summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/Kconfig1
-rw-r--r--drivers/accel/Makefile1
-rw-r--r--drivers/accel/amdxdna/Kconfig18
-rw-r--r--drivers/accel/amdxdna/Makefile23
-rw-r--r--drivers/accel/amdxdna/TODO3
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c910
-rw-r--r--drivers/accel/amdxdna/aie2_error.c360
-rw-r--r--drivers/accel/amdxdna/aie2_message.c776
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h370
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c928
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h297
-rw-r--r--drivers/accel/amdxdna/aie2_pm.c108
-rw-r--r--drivers/accel/amdxdna/aie2_psp.c146
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c134
-rw-r--r--drivers/accel/amdxdna/aie2_solver.c380
-rw-r--r--drivers/accel/amdxdna/aie2_solver.h155
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c550
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h162
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c622
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h65
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c562
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.h124
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.c61
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.h42
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c434
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h147
-rw-r--r--drivers/accel/amdxdna/amdxdna_sysfs.c67
-rw-r--r--drivers/accel/amdxdna/npu1_regs.c114
-rw-r--r--drivers/accel/amdxdna/npu2_regs.c113
-rw-r--r--drivers/accel/amdxdna/npu4_regs.c134
-rw-r--r--drivers/accel/amdxdna/npu5_regs.c113
-rw-r--r--drivers/accel/amdxdna/npu6_regs.c114
-rw-r--r--drivers/accel/habanalabs/common/context.c3
-rw-r--r--drivers/accel/habanalabs/common/device.c2
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c4
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c11
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c16
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c86
-rw-r--r--drivers/accel/qaic/qaic_data.c9
-rw-r--r--drivers/accel/qaic/qaic_drv.c1
-rw-r--r--drivers/accel/qaic/sahara.c3
-rw-r--r--drivers/acpi/Makefile4
-rw-r--r--drivers/acpi/acpi_extlog.c14
-rw-r--r--drivers/acpi/acpi_pad.c5
-rw-r--r--drivers/acpi/acpi_video.c49
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/apei/ghes.c10
-rw-r--r--drivers/acpi/arm64/gtdt.c12
-rw-r--r--drivers/acpi/battery.c14
-rw-r--r--drivers/acpi/bgrt.c6
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c6
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/fan_core.c10
-rw-r--r--drivers/acpi/mipi-disco-img.c3
-rw-r--r--drivers/acpi/numa/hmat.c24
-rw-r--r--drivers/acpi/numa/srat.c95
-rw-r--r--drivers/acpi/osl.c22
-rw-r--r--drivers/acpi/platform_profile.c705
-rw-r--r--drivers/acpi/prmt.c4
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/property.c13
-rw-r--r--drivers/acpi/resource.c30
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/acpi/tables.c12
-rw-r--r--drivers/acpi/utils.c7
-rw-r--r--drivers/acpi/x86/utils.c13
-rw-r--r--drivers/android/binder.c38
-rw-r--r--drivers/android/binder_alloc.c366
-rw-r--r--drivers/android/binder_alloc.h45
-rw-r--r--drivers/android/binder_alloc_selftest.c18
-rw-r--r--drivers/android/binder_internal.h11
-rw-r--r--drivers/android/binder_trace.h2
-rw-r--r--drivers/android/binderfs.c3
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci.h21
-rw-r--r--drivers/ata/ahci_brcm.c3
-rw-r--r--drivers/ata/ahci_ceva.c6
-rw-r--r--drivers/ata/ahci_st.c6
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libahci_platform.c38
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/ata/libata-sata.c8
-rw-r--r--drivers/ata/libata-scsi.c19
-rw-r--r--drivers/ata/libata-sff.c18
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_macio.c10
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_rdc.c2
-rw-r--r--drivers/ata/sata_gemini.c32
-rw-r--r--drivers/ata/sata_gemini.h1
-rw-r--r--drivers/ata/sata_mv.c4
-rw-r--r--drivers/ata/sata_nv.c28
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_uli.c2
-rw-r--r--drivers/ata/sata_vsc.c2
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c8
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/arch_topology.c26
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/bus.c9
-rw-r--r--drivers/base/class.c42
-rw-r--r--drivers/base/core.c84
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/devcoredump.c22
-rw-r--r--drivers/base/devres.c23
-rw-r--r--drivers/base/devtmpfs.c153
-rw-r--r--drivers/base/driver.c9
-rw-r--r--drivers/base/faux.c232
-rw-r--r--drivers/base/firmware_loader/fallback_table.c2
-rw-r--r--drivers/base/firmware_loader/sysfs.c14
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/physical_location.c4
-rw-r--r--drivers/base/power/main.c44
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/power/wakeirq.c26
-rw-r--r--drivers/base/property.c38
-rw-r--r--drivers/base/regmap/regcache-maple.c7
-rw-r--r--drivers/base/regmap/regcache-rbtree.c10
-rw-r--r--drivers/base/regmap/regcache.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c2
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c219
-rw-r--r--drivers/base/regmap/regmap.c13
-rw-r--r--drivers/base/swnode.c1
-rw-r--r--drivers/base/test/Kconfig1
-rw-r--r--drivers/base/test/platform-device-test.c41
-rw-r--r--drivers/base/topology.c24
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/aoe/aoedev.c5
-rw-r--r--drivers/block/ataflop.c6
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c193
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/nbd.c123
-rw-r--r--drivers/block/null_blk/main.c42
-rw-r--r--drivers/block/null_blk/null_blk.h1
-rw-r--r--drivers/block/ps3disk.c7
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/block/rnbd/rnbd-clt.c3
-rw-r--r--drivers/block/rnbd/rnbd-srv.c2
-rw-r--r--drivers/block/rnull.rs30
-rw-r--r--drivers/block/sunvdc.c13
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/swim3.c8
-rw-r--r--drivers/block/ublk_drv.c8
-rw-r--r--drivers/block/virtio_blk.c47
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/block/z2ram.c1
-rw-r--r--drivers/block/zram/zram_drv.c301
-rw-r--r--drivers/block/zram/zram_drv.h5
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/btbcm.c3
-rw-r--r--drivers/bluetooth/btintel.c17
-rw-r--r--drivers/bluetooth/btintel_pcie.c5
-rw-r--r--drivers/bluetooth/btmrvl_main.c3
-rw-r--r--drivers/bluetooth/btmtk.c4
-rw-r--r--drivers/bluetooth/btmtksdio.c4
-rw-r--r--drivers/bluetooth/btnxpuart.c3
-rw-r--r--drivers/bluetooth/btqca.c200
-rw-r--r--drivers/bluetooth/btqca.h5
-rw-r--r--drivers/bluetooth/btrtl.c4
-rw-r--r--drivers/bluetooth/btusb.c133
-rw-r--r--drivers/bluetooth/hci_qca.c33
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c8
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c36
-rw-r--r--drivers/bus/mhi/host/boot.c1
-rw-r--r--drivers/bus/mhi/host/pci_generic.c64
-rw-r--r--drivers/bus/moxtet.c2
-rw-r--r--drivers/bus/simple-pm-bus.c22
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/cdx/cdx.c9
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c3
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c5
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c5
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c8
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c5
-rw-r--r--drivers/char/ipmi/ssif_bmc.c5
-rw-r--r--drivers/char/misc.c41
-rw-r--r--drivers/char/random.c8
-rw-r--r--drivers/char/tpm/eventlog/acpi.c15
-rw-r--r--drivers/char/virtio_console.c8
-rw-r--r--drivers/clk/analogbits/wrpll-cln28hpc.c2
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-master.c2
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c2
-rw-r--r--drivers/clk/at91/pmc.c1
-rw-r--r--drivers/clk/at91/sama7d65.c1375
-rw-r--r--drivers/clk/at91/sckc.c24
-rw-r--r--drivers/clk/bcm/clk-kona.c3
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c33
-rw-r--r--drivers/clk/clk-en7523.c25
-rw-r--r--drivers/clk/clk-ep93xx.c6
-rw-r--r--drivers/clk/clk-lmk04832.c4
-rw-r--r--drivers/clk/clk-loongson2.c9
-rw-r--r--drivers/clk/clk-nomadik.c5
-rw-r--r--drivers/clk/clk-stm32f4.c155
-rw-r--r--drivers/clk/clk-versaclock3.c67
-rw-r--r--drivers/clk/clk-xgene.c4
-rw-r--r--drivers/clk/clk.c4
-rw-r--r--drivers/clk/davinci/pll.c32
-rw-r--r--drivers/clk/imx/clk-imx8mp.c5
-rw-r--r--drivers/clk/imx/clk-imx93.c32
-rw-r--r--drivers/clk/imx/clk-pll14xx.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c1
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbc.c4
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbcp.c4
-rw-r--r--drivers/clk/mmp/clk-pxa1908-mpmu.c4
-rw-r--r--drivers/clk/mmp/pwr-island.c2
-rw-r--r--drivers/clk/qcom/Kconfig65
-rw-r--r--drivers/clk/qcom/Makefile7
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c3
-rw-r--r--drivers/clk/qcom/camcc-x1e80100.c7
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c181
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h13
-rw-r--r--drivers/clk/qcom/clk-rcg.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c198
-rw-r--r--drivers/clk/qcom/clk-rpm.c27
-rw-r--r--drivers/clk/qcom/clk-rpmh.c50
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c81
-rw-r--r--drivers/clk/qcom/clk-spmi-pmic-div.c13
-rw-r--r--drivers/clk/qcom/dispcc-qcm2290.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c7
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c1961
-rw-r--r--drivers/clk/qcom/gcc-ipq5424.c57
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c4
-rw-r--r--drivers/clk/qcom/gcc-mdm9607.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs615.c3034
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c43
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c22
-rw-r--r--drivers/clk/qcom/gcc-sm8550.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c3274
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c2
-rw-r--r--drivers/clk/qcom/gpucc-x1p42100.c587
-rw-r--r--drivers/clk/qcom/ipq-cmn-pll.c435
-rw-r--r--drivers/clk/qcom/lpasscc-sm6115.c85
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c61
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8750.c141
-rw-r--r--drivers/clk/ralink/clk-mtmips.c1
-rw-r--r--drivers/clk/renesas/Kconfig7
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c29
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c47
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c150
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c181
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c196
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h39
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c120
-rw-r--r--drivers/clk/rockchip/clk.c102
-rw-r--r--drivers/clk/rockchip/clk.h40
-rw-r--r--drivers/clk/rockchip/gate-link.c85
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-exynos990.c1343
-rw-r--r--drivers/clk/samsung/clk-gs101.c8
-rw-r--r--drivers/clk/samsung/clk-pll.c21
-rw-r--r--drivers/clk/samsung/clk-pll.h3
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100-audio.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-aon.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-isp.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-stg.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-vout.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.c12
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c13
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c28
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c13
-rw-r--r--drivers/clk/ti/clk.c5
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c121
-rw-r--r--drivers/clocksource/hyperv_timer.c2
-rw-r--r--drivers/clocksource/jcore-pit.c15
-rw-r--r--drivers/cpufreq/Kconfig6
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Kconfig.x8612
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c41
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c152
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h52
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c12
-rw-r--r--drivers/cpufreq/amd-pstate.c501
-rw-r--r--drivers/cpufreq/amd-pstate.h3
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c56
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c7
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c4
-rw-r--r--drivers/cpufreq/cpufreq.c68
-rw-r--r--drivers/cpufreq/intel_pstate.c66
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c34
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c11
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c45
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c2
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c2
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c1
-rw-r--r--drivers/cpuidle/governors/teo.c276
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/bcm/spu.c7
-rw-r--r--drivers/crypto/caam/blob_gen.c3
-rw-r--r--drivers/crypto/ccp/dbc.c53
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccp/sp-dev.c14
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c13
-rw-r--r--drivers/crypto/hisilicon/qm.c291
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h3
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c161
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h11
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c13
-rw-r--r--drivers/crypto/hisilicon/zip/Makefile2
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c262
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h8
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c52
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c36
-rw-r--r--drivers/crypto/n2_asm.S96
-rw-r--r--drivers/crypto/n2_core.c2168
-rw-r--r--drivers/crypto/n2_core.h232
-rw-r--r--drivers/crypto/omap-aes.c34
-rw-r--r--drivers/crypto/omap-aes.h6
-rw-r--r--drivers/crypto/omap-des.c40
-rw-r--r--drivers/crypto/qce/aead.c2
-rw-r--r--drivers/crypto/qce/core.c129
-rw-r--r--drivers/crypto/qce/core.h9
-rw-r--r--drivers/crypto/qce/dma.c22
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c7
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c7
-rw-r--r--drivers/cxl/core/hdm.c2
-rw-r--r--drivers/cxl/core/pci.c10
-rw-r--r--drivers/cxl/core/pmem.c15
-rw-r--r--drivers/cxl/core/region.c23
-rw-r--r--drivers/cxl/core/regs.c56
-rw-r--r--drivers/cxl/core/trace.h259
-rw-r--r--drivers/cxl/cxl.h7
-rw-r--r--drivers/cxl/pci.c6
-rw-r--r--drivers/cxl/port.c2
-rw-r--r--drivers/devfreq/devfreq-event.c8
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c3
-rw-r--r--drivers/devfreq/exynos-bus.c5
-rw-r--r--drivers/dma/Kconfig6
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amd/Kconfig28
-rw-r--r--drivers/dma/amd/Makefile2
-rw-r--r--drivers/dma/amd/ae4dma/Makefile10
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-dev.c157
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-pci.c158
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma.h100
-rw-r--r--drivers/dma/amd/ptdma/Makefile (renamed from drivers/dma/ptdma/Makefile)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-debugfs.c (renamed from drivers/dma/ptdma/ptdma-debugfs.c)79
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dev.c (renamed from drivers/dma/ptdma/ptdma-dev.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c (renamed from drivers/dma/ptdma/ptdma-dmaengine.c)226
-rw-r--r--drivers/dma/amd/ptdma/ptdma-pci.c (renamed from drivers/dma/ptdma/ptdma-pci.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma.h (renamed from drivers/dma/ptdma/ptdma.h)4
-rw-r--r--drivers/dma/amd/qdma/qdma.c22
-rw-r--r--drivers/dma/bcm2835-dma.c22
-rw-r--r--drivers/dma/fsl-edma-common.c36
-rw-r--r--drivers/dma/fsl-edma-common.h3
-rw-r--r--drivers/dma/fsl-edma-main.c115
-rw-r--r--drivers/dma/idxd/cdev.c5
-rw-r--r--drivers/dma/idxd/idxd.h15
-rw-r--r--drivers/dma/idxd/init.c481
-rw-r--r--drivers/dma/idxd/irq.c85
-rw-r--r--drivers/dma/idxd/registers.h1
-rw-r--r--drivers/dma/idxd/sysfs.c10
-rw-r--r--drivers/dma/ioat/dca.c8
-rw-r--r--drivers/dma/mv_xor.c5
-rw-r--r--drivers/dma/ptdma/Kconfig13
-rw-r--r--drivers/dma/qcom/gpi.c31
-rw-r--r--drivers/dma/sh/rcar-dmac.c4
-rw-r--r--drivers/dma/sun4i-dma.c208
-rw-r--r--drivers/dma/tegra210-adma.c100
-rw-r--r--drivers/dma/ti/edma.c7
-rw-r--r--drivers/dma/ti/k3-udma.c16
-rw-r--r--drivers/dma/xilinx/xdma.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c20
-rw-r--r--drivers/dpll/dpll_core.c2
-rw-r--r--drivers/edac/Kconfig47
-rw-r--r--drivers/edac/Makefile6
-rw-r--r--drivers/edac/amd64_edac.c53
-rw-r--r--drivers/edac/cell_edac.c281
-rw-r--r--drivers/edac/debugfs.c5
-rwxr-xr-xdrivers/edac/ecs.c205
-rw-r--r--drivers/edac/edac_device.c185
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c6
-rw-r--r--drivers/edac/i10nm_base.c35
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/edac/i5400_edac.c3
-rw-r--r--drivers/edac/i7300_edac.c7
-rw-r--r--drivers/edac/ie31200_edac.c641
-rw-r--r--drivers/edac/igen6_edac.c41
-rw-r--r--drivers/edac/loongson_edac.c157
-rwxr-xr-xdrivers/edac/mem_repair.c359
-rw-r--r--drivers/edac/pnd2_edac.c4
-rw-r--r--drivers/edac/qcom_edac.c4
-rwxr-xr-xdrivers/edac/scrub.c209
-rw-r--r--drivers/edac/skx_base.c11
-rw-r--r--drivers/edac/skx_common.c80
-rw-r--r--drivers/edac/skx_common.h14
-rw-r--r--drivers/edac/xgene_edac.c17
-rw-r--r--drivers/extcon/extcon-fsa9480.c2
-rw-r--r--drivers/extcon/extcon-ptn5150.c2
-rw-r--r--drivers/extcon/extcon-rtk-type-c.c2
-rw-r--r--drivers/firewire/core-device.c4
-rw-r--r--drivers/firewire/device-attribute-test.c2
-rw-r--r--drivers/firewire/ohci.c44
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/bus.c4
-rw-r--r--drivers/firmware/arm_scmi/common.h4
-rw-r--r--drivers/firmware/arm_scmi/driver.c74
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c12
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c1
-rw-r--r--drivers/firmware/arm_scmi/transports/smc.c1
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c1
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c5
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c9
-rw-r--r--drivers/firmware/cirrus/Kconfig18
-rw-r--r--drivers/firmware/cirrus/Makefile2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c24
-rw-r--r--drivers/firmware/cirrus/test/Makefile23
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c199
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c752
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c367
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_utils.c13
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c473
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2556
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c600
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c688
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c3282
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c1851
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c2669
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c2211
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c1347
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_tests.c14
-rw-r--r--drivers/firmware/efi/cper-arm.c2
-rw-r--r--drivers/firmware/efi/cper-x86.c2
-rw-r--r--drivers/firmware/efi/dev-path-parser.c4
-rw-r--r--drivers/firmware/efi/efi.c9
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c9
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c49
-rw-r--r--drivers/firmware/efi/libstub/efistub.h20
-rw-r--r--drivers/firmware/efi/libstub/gop.c323
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c4
-rw-r--r--drivers/firmware/efi/libstub/mem.c20
-rw-r--r--drivers/firmware/efi/libstub/pci.c34
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c11
-rw-r--r--drivers/firmware/efi/libstub/relocate.c13
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c164
-rw-r--r--drivers/firmware/efi/mokvar-table.c57
-rw-r--r--drivers/firmware/efi/sysfb_efi.c2
-rw-r--r--drivers/firmware/google/cbmem.c10
-rw-r--r--drivers/firmware/google/gsmi.c6
-rw-r--r--drivers/firmware/google/memconsole.c4
-rw-r--r--drivers/firmware/google/vpd.c8
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/firmware/imx/imx-scu.c1
-rw-r--r--drivers/firmware/iscsi_ibft.c5
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c18
-rw-r--r--drivers/firmware/qcom/qcom_scm-smc.c6
-rw-r--r--drivers/firmware/qcom/qcom_scm.c271
-rw-r--r--drivers/firmware/qcom/qcom_scm.h4
-rw-r--r--drivers/firmware/stratix10-svc.c9
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c117
-rw-r--r--drivers/fpga/dfl-afu-error.c59
-rw-r--r--drivers/fpga/dfl-afu-main.c278
-rw-r--r--drivers/fpga/dfl-afu-region.c51
-rw-r--r--drivers/fpga/dfl-afu.h26
-rw-r--r--drivers/fpga/dfl-fme-br.c24
-rw-r--r--drivers/fpga/dfl-fme-error.c98
-rw-r--r--drivers/fpga/dfl-fme-main.c95
-rw-r--r--drivers/fpga/dfl-fme-pr.c86
-rw-r--r--drivers/fpga/dfl.c445
-rw-r--r--drivers/fpga/dfl.h140
-rw-r--r--drivers/gpio/Kconfig5
-rw-r--r--drivers/gpio/gpio-aggregator.c20
-rw-r--r--drivers/gpio/gpio-altera.c9
-rw-r--r--drivers/gpio/gpio-bcm-kona.c71
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mxc.c3
-rw-r--r--drivers/gpio/gpio-pca953x.c22
-rw-r--r--drivers/gpio/gpio-rcar.c31
-rw-r--r--drivers/gpio/gpio-regmap.c2
-rw-r--r--drivers/gpio/gpio-sim.c20
-rw-r--r--drivers/gpio/gpio-stmpe.c15
-rw-r--r--drivers/gpio/gpio-tps65219.c12
-rw-r--r--drivers/gpio/gpio-tqmx86.c206
-rw-r--r--drivers/gpio/gpio-twl6040.c6
-rw-r--r--drivers/gpio/gpio-vf610.c4
-rw-r--r--drivers/gpio/gpio-xilinx.c32
-rw-r--r--drivers/gpio/gpiolib-acpi.c14
-rw-r--r--drivers/gpio/gpiolib-cdev.c15
-rw-r--r--drivers/gpio/gpiolib.c119
-rw-r--r--drivers/gpio/gpiolib.h9
-rw-r--r--drivers/gpu/drm/Kconfig84
-rw-r--r--drivers/gpu/drm/Makefile11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c285
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c345
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c209
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c708
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c304
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c1118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c49
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h3232
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm202
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm1136
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c135
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c64
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c344
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c564
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h55
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c136
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c59
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c76
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c38
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c140
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c243
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h179
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c201
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h109
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h401
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c1420
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h135
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c225
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c428
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c1178
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h)19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c549
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c836
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c179
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c177
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h219
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c9
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h18
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h)4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h)4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h37
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h17
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h47
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c108
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c33
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c33
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c89
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c272
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c50
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c94
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c4
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c80
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c28
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c2
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c2
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c41
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c337
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c6
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c2
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c187
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c7
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c11
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c4
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c29
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c1030
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c149
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c2
-rw-r--r--drivers/gpu/drm/clients/Kconfig123
-rw-r--r--drivers/gpu/drm/clients/Makefile8
-rw-r--r--drivers/gpu/drm/clients/drm_client_internal.h25
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c (renamed from drivers/gpu/drm/drm_client_setup.c)34
-rw-r--r--drivers/gpu/drm/clients/drm_fbdev_client.c (renamed from drivers/gpu/drm/drm_fbdev_client.c)4
-rw-r--r--drivers/gpu/drm/clients/drm_log.c420
-rw-r--r--drivers/gpu/drm/display/Kconfig8
-rw-r--r--drivers/gpu/drm/display/Makefile2
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c170
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c14
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c127
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c140
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c190
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c91
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c9
-rw-r--r--drivers/gpu/drm/drm_connector.c181
-rw-r--r--drivers/gpu/drm/drm_draw.c233
-rw-r--r--drivers/gpu/drm/drm_draw_internal.h56
-rw-r--r--drivers/gpu/drm/drm_drv.c32
-rw-r--r--drivers/gpu/drm/drm_edid.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c14
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c217
-rw-r--r--drivers/gpu/drm/drm_file.c23
-rw-r--r--drivers/gpu/drm/drm_mode_config.c9
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_of.c4
-rw-r--r--drivers/gpu/drm/drm_panel.c3
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c94
-rw-r--r--drivers/gpu/drm/drm_panic.c269
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs18
-rw-r--r--drivers/gpu/drm/drm_print.c23
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c13
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c41
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c164
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h63
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h19
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c220
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h28
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c332
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h76
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c118
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c17
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h19
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c41
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c20
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c6
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c26
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.h6
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c36
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h5
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c20
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h6
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c97
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.h14
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm_regs.h257
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c177
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c163
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt_regs.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c667
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c138
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c274
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h155
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c305
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.h38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c952
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c567
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h35
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c317
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c1275
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h39
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c935
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c184
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c195
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c174
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c88
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c3
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c287
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h11
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c55
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c863
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h5
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c28
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c52
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c3
-rw-r--r--drivers/gpu/drm/i915/i915_active.c18
-rw-r--r--drivers/gpu/drm/i915/i915_active.h1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c133
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h1
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c7
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c53
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h380
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c141
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.h14
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h28
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.c44
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.h13
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c9
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h5
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c10
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.h49
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c28
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h3
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c1
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.h1
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c6
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c134
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h3
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c3
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c3
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c3
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h1
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c1
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c3
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c10
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c25
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c69
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c81
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp_reg.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c22
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c203
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h26
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c24
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h54
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h252
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c61
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c395
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c175
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c277
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c159
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h35
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c26
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h11
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.c10
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c21
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c53
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h18
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c67
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c3
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c3
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c79
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h4
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c2
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml5
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml11
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdss.xml11
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c3
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c3
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/log.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c508
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c8
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c8
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c1
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c136
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c9
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c4
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c12
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c90
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h37
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c7
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c142
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c23
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c26
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c22
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c68
-rw-r--r--drivers/gpu/drm/radeon/Kconfig1
-rw-r--r--drivers/gpu/drm/radeon/r300.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c10
-rw-r--r--drivers/gpu/drm/radeon/rs400.c18
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c21
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c24
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c18
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c6
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig10
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c13
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.h2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c487
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c147
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c598
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c219
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h2
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h4
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c11
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c4
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/stm/drv.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c32
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c523
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c17
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c661
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h102
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c28
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c5
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h5
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c34
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/tiny/Makefile2
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c6
-rw-r--r--drivers/gpu/drm/tiny/bochs.c9
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c (renamed from drivers/gpu/drm/tiny/cirrus.c)10
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c4
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c3
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c3
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c4
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c4
-rw-r--r--drivers/gpu/drm/tiny/repaper.c3
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c3
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c4
-rw-r--r--drivers/gpu/drm/tiny/st7586.c3
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c18
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c4
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c52
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c54
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c23
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h8
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c12
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c57
-rw-r--r--drivers/gpu/drm/v3d/v3d_performance_counters.h12
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h29
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c49
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c19
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c3
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c8
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c106
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c96
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h54
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c216
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h6
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c640
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h217
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c737
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c105
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c871
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h297
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c91
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c125
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c179
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c35
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c314
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c10
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h63
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c409
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c49
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c21
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c4
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug4
-rw-r--r--drivers/gpu/drm/xe/Makefile6
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h20
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h38
-rw-r--r--drivers/gpu/drm/xe/abi/guc_capture_abi.h2
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h20
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h16
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h)0
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_irq.c13
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c25
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c116
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h2
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c12
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c18
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h15
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h19
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c30
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c5
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h8
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c190
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h35
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c14
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c159
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h9
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_device.c19
-rw-r--r--drivers/gpu/drm/xe/xe_device.h3
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h65
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c85
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h1
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c24
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c10
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c37
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c47
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c20
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h27
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h31
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c80
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c63
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c29
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c350
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c26
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c35
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c43
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c132
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c157
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c8
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c194
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h7
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c22
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.h4
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c354
-rw-r--r--drivers/gpu/drm/xe/xe_irq.h8
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c53
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h6
-rw-r--r--drivers/gpu/drm/xe/xe_macros.h12
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c26
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c2
-rw-r--r--drivers/gpu/drm/xe/xe_module.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c202
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c4
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c26
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c101
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h4
-rw-r--r--drivers/gpu/drm/xe/xe_query.c5
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c53
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c37
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c5
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h15
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h17
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c263
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h14
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h11
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h17
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h52
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c54
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c61
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c180
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h11
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h22
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c233
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h11
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c6
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules3
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c6
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig9
-rw-r--r--drivers/gpu/drm/xlnx/Makefile1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c48
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp_regs.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c59
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c447
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c39
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h16
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c3
-rw-r--r--drivers/gpu/host1x/dev.c8
-rw-r--r--drivers/gpu/host1x/intr.c2
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/amd-sfh-hid/Kconfig1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c4
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c24
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c38
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h24
-rw-r--r--drivers/hid/hid-apple.c13
-rw-r--r--drivers/hid/hid-appleir.c2
-rw-r--r--drivers/hid/hid-asus.c26
-rw-r--r--drivers/hid/hid-core.c29
-rw-r--r--drivers/hid/hid-corsair-void.c86
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c37
-rw-r--r--drivers/hid/hid-lenovo.c104
-rw-r--r--drivers/hid/hid-magicmouse.c8
-rw-r--r--drivers/hid/hid-multitouch.c16
-rw-r--r--drivers/hid/hid-nintendo.c18
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-roccat-arvo.c20
-rw-r--r--drivers/hid/hid-roccat-common.h22
-rw-r--r--drivers/hid/hid-roccat-isku.c22
-rw-r--r--drivers/hid/hid-roccat-kone.c22
-rw-r--r--drivers/hid/hid-roccat-koneplus.c42
-rw-r--r--drivers/hid/hid-roccat-konepure.c4
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c38
-rw-r--r--drivers/hid/hid-roccat-lua.c10
-rw-r--r--drivers/hid/hid-roccat-pyra.c50
-rw-r--r--drivers/hid/hid-roccat-ryos.c4
-rw-r--r--drivers/hid/hid-roccat-savu.c4
-rw-r--r--drivers/hid/hid-sensor-hub.c21
-rw-r--r--drivers/hid/hid-steam.c49
-rw-r--r--drivers/hid/hid-steelseries.c120
-rw-r--r--drivers/hid/hid-thrustmaster.c8
-rw-r--r--drivers/hid/hid-topre.c7
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/hid-winwing.c2
-rw-r--r--drivers/hid/i2c-hid/Kconfig2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c14
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c15
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c7
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c21
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c19
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c30
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h3
-rw-r--r--drivers/hid/intel-thc-hid/Kconfig42
-rw-r--r--drivers/hid/intel-thc-hid/Makefile22
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c969
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h186
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c166
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c224
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h20
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c989
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h172
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c165
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c414
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h25
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c1578
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h116
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c969
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h146
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h881
-rw-r--r--drivers/hid/surface-hid/Kconfig2
-rw-r--r--drivers/hid/usbhid/Kconfig3
-rw-r--r--drivers/hid/wacom.h8
-rw-r--r--drivers/hid/wacom_sys.c43
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/hv/channel_mgmt.c61
-rw-r--r--drivers/hv/connection.c4
-rw-r--r--drivers/hv/hv_balloon.c22
-rw-r--r--drivers/hv/hv_common.c19
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h16
-rw-r--r--drivers/hv/vmbus_drv.c44
-rw-r--r--drivers/hwmon/Kconfig20
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/acpi_power_meter.c41
-rw-r--r--drivers/hwmon/ad7314.c10
-rw-r--r--drivers/hwmon/asus-ec-sensors.c13
-rw-r--r--drivers/hwmon/asus_atk0110.c15
-rw-r--r--drivers/hwmon/chipcap2.c63
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c8
-rw-r--r--drivers/hwmon/drivetemp.c8
-rw-r--r--drivers/hwmon/hwmon.c29
-rw-r--r--drivers/hwmon/isl28022.c11
-rw-r--r--drivers/hwmon/k10temp.c7
-rw-r--r--drivers/hwmon/lm75.c339
-rw-r--r--drivers/hwmon/ltc2991.c2
-rw-r--r--drivers/hwmon/nct6683.c6
-rw-r--r--drivers/hwmon/nct6775-core.c10
-rw-r--r--drivers/hwmon/ntc_thermistor.c66
-rw-r--r--drivers/hwmon/occ/p9_sbe.c4
-rw-r--r--drivers/hwmon/peci/dimmtemp.c10
-rw-r--r--drivers/hwmon/pmbus/Kconfig30
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c10
-rw-r--r--drivers/hwmon/pmbus/crps.c74
-rw-r--r--drivers/hwmon/pmbus/dps920ab.c7
-rw-r--r--drivers/hwmon/pmbus/max15301.c1
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.h4
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c94
-rw-r--r--drivers/hwmon/pmbus/tps25990.c436
-rw-r--r--drivers/hwmon/pwm-fan.c26
-rw-r--r--drivers/hwmon/qnap-mcu-hwmon.c364
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c22
-rw-r--r--drivers/hwmon/spd5118.c8
-rw-r--r--drivers/hwmon/tmp108.c13
-rw-r--r--drivers/hwmon/tmp513.c7
-rw-r--r--drivers/hwmon/xgene-hwmon.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c113
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c81
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c55
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-self-hosted-trace.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c15
-rw-r--r--drivers/hwtracing/intel_th/core.c3
-rw-r--r--drivers/hwtracing/intel_th/msu.c13
-rw-r--r--drivers/hwtracing/intel_th/pci.c15
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/busses/Kconfig5
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c12
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c12
-rw-r--r--drivers/i2c/busses/i2c-amd-asf-plat.c1
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-amd756.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c112
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c5
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c35
-rw-r--r--drivers/i2c/busses/i2c-i801.c131
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c811
-rw-r--r--drivers/i2c/busses/i2c-imx.c103
-rw-r--r--drivers/i2c/busses/i2c-isch.c6
-rw-r--r--drivers/i2c/busses/i2c-keba.c8
-rw-r--r--drivers/i2c/busses/i2c-ls2x.c16
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c434
-rw-r--r--drivers/i2c/busses/i2c-omap.c26
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c56
-rw-r--r--drivers/i2c/busses/i2c-rcar.c20
-rw-r--r--drivers/i2c/busses/i2c-riic.c134
-rw-r--r--drivers/i2c/busses/i2c-sis630.c12
-rw-r--r--drivers/i2c/busses/i2c-xiic.c281
-rw-r--r--drivers/i2c/i2c-atr.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c22
-rw-r--r--drivers/i2c/i2c-core-base.c12
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c8
-rw-r--r--drivers/i2c/i2c-slave-testunit.c19
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c6
-rw-r--r--drivers/i3c/master.c14
-rw-r--r--drivers/i3c/master/Kconfig11
-rw-r--r--drivers/i3c/master/dw-i3c-master.c15
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c3
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c17
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c148
-rw-r--r--drivers/idle/Makefile5
-rw-r--r--drivers/idle/intel_idle.c25
-rw-r--r--drivers/iio/accel/adxl345.h81
-rw-r--r--drivers/iio/accel/adxl345_core.c417
-rw-r--r--drivers/iio/accel/adxl345_i2c.c2
-rw-r--r--drivers/iio/accel/adxl345_spi.c7
-rw-r--r--drivers/iio/accel/bma220_spi.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c14
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c2
-rw-r--r--drivers/iio/accel/fxls8962af.h2
-rw-r--r--drivers/iio/accel/kionix-kx022a-i2c.c4
-rw-r--r--drivers/iio/accel/kionix-kx022a-spi.c4
-rw-r--r--drivers/iio/accel/kionix-kx022a.c169
-rw-r--r--drivers/iio/accel/kionix-kx022a.h14
-rw-r--r--drivers/iio/adc/ad4000.c313
-rw-r--r--drivers/iio/adc/ad4695.c102
-rw-r--r--drivers/iio/adc/ad7124.c220
-rw-r--r--drivers/iio/adc/ad7173.c129
-rw-r--r--drivers/iio/adc/ad7192.c6
-rw-r--r--drivers/iio/adc/ad7606.c50
-rw-r--r--drivers/iio/adc/ad7606.h2
-rw-r--r--drivers/iio/adc/ad7625.c8
-rw-r--r--drivers/iio/adc/ad7791.c1
-rw-r--r--drivers/iio/adc/ad7793.c3
-rw-r--r--drivers/iio/adc/ad7944.c2
-rw-r--r--drivers/iio/adc/ad9467.c15
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c194
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c68
-rw-r--r--drivers/iio/adc/at91_adc.c2
-rw-r--r--drivers/iio/adc/dln2-adc.c21
-rw-r--r--drivers/iio/adc/ina2xx-adc.c2
-rw-r--r--drivers/iio/adc/max1118.c2
-rw-r--r--drivers/iio/adc/max11410.c2
-rw-r--r--drivers/iio/adc/max1363.c30
-rw-r--r--drivers/iio/adc/mcp3911.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c47
-rw-r--r--drivers/iio/adc/pac1921.c95
-rw-r--r--drivers/iio/adc/rockchip_saradc.c4
-rw-r--r--drivers/iio/adc/rtq6056.c2
-rw-r--r--drivers/iio/adc/rzg2l_adc.c429
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c13
-rw-r--r--drivers/iio/adc/ti-adc081c.c2
-rw-r--r--drivers/iio/adc/ti-adc084s021.c2
-rw-r--r--drivers/iio/adc/ti-ads1015.c2
-rw-r--r--drivers/iio/adc/ti-ads1119.c6
-rw-r--r--drivers/iio/adc/ti-ads124s08.c4
-rw-r--r--drivers/iio/adc/ti-ads1298.c2
-rw-r--r--drivers/iio/adc/ti-ads131e08.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/adc/ti-lmp92064.c2
-rw-r--r--drivers/iio/adc/ti-tsc2046.c6
-rw-r--r--drivers/iio/adc/vf610_adc.c100
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c19
-rw-r--r--drivers/iio/chemical/bme680.h2
-rw-r--r--drivers/iio/chemical/bme680_core.c124
-rw-r--r--drivers/iio/chemical/bme680_i2c.c1
-rw-r--r--drivers/iio/chemical/bme680_spi.c1
-rw-r--r--drivers/iio/chemical/ccs811.c2
-rw-r--r--drivers/iio/chemical/ens160_core.c2
-rw-r--r--drivers/iio/chemical/scd30_core.c2
-rw-r--r--drivers/iio/chemical/scd4x.c2
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c4
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio.c12
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad3552r-common.c5
-rw-r--r--drivers/iio/dac/ad3552r-hs.c6
-rw-r--r--drivers/iio/dac/ad3552r.c6
-rw-r--r--drivers/iio/dac/ad3552r.h8
-rw-r--r--drivers/iio/dac/ad5624r.h4
-rw-r--r--drivers/iio/dac/ad5686-spi.c6
-rw-r--r--drivers/iio/dac/ad5686.c62
-rw-r--r--drivers/iio/dac/ad5686.h6
-rw-r--r--drivers/iio/dac/ad5696-i2c.c6
-rw-r--r--drivers/iio/dac/ad7293.c68
-rw-r--r--drivers/iio/dac/ad8801.c81
-rw-r--r--drivers/iio/dac/ltc2632.c69
-rw-r--r--drivers/iio/dac/ltc2688.c44
-rw-r--r--drivers/iio/dac/max5821.c36
-rw-r--r--drivers/iio/dac/mcp4725.c2
-rw-r--r--drivers/iio/dac/rohm-bd79703.c162
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/iio/filter/admv8818.c14
-rw-r--r--drivers/iio/gyro/adxrs290.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c2
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c9
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c2
-rw-r--r--drivers/iio/humidity/am2315.c2
-rw-r--r--drivers/iio/humidity/hdc100x.c2
-rw-r--r--drivers/iio/humidity/hts221.h2
-rw-r--r--drivers/iio/imu/adis16480.c75
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c2
-rw-r--r--drivers/iio/imu/bno055/bno055.c10
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c22
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c25
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c5
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig18
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c6
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-gts-helper.c77
-rw-r--r--drivers/iio/inkern.c13
-rw-r--r--drivers/iio/light/Kconfig32
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adjd_s311.c2
-rw-r--r--drivers/iio/light/apds9306.c4
-rw-r--r--drivers/iio/light/as73211.c26
-rw-r--r--drivers/iio/light/bh1745.c4
-rw-r--r--drivers/iio/light/cm3232.c18
-rw-r--r--drivers/iio/light/hid-sensor-prox.c8
-rw-r--r--drivers/iio/light/isl29125.c2
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/light/max44000.c2
-rw-r--r--drivers/iio/light/opt4060.c1343
-rw-r--r--drivers/iio/light/rohm-bu27008.c1635
-rw-r--r--drivers/iio/light/rohm-bu27034.c75
-rw-r--r--drivers/iio/light/rpr0521.c2
-rw-r--r--drivers/iio/light/st_uvis25.h2
-rw-r--r--drivers/iio/light/tcs3414.c2
-rw-r--r--drivers/iio/light/tcs3472.c2
-rw-r--r--drivers/iio/light/vcnl4035.c2
-rw-r--r--drivers/iio/light/veml3235.c274
-rw-r--r--drivers/iio/light/veml6030.c76
-rw-r--r--drivers/iio/magnetometer/af8133j.c2
-rw-r--r--drivers/iio/magnetometer/ak8974.c2
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843.h2
-rw-r--r--drivers/iio/magnetometer/mag3110.c2
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c2
-rw-r--r--drivers/iio/multiplexer/iio-mux.c84
-rw-r--r--drivers/iio/pressure/bmp280-core.c39
-rw-r--r--drivers/iio/pressure/bmp280.h8
-rw-r--r--drivers/iio/pressure/hsc030pa.h2
-rw-r--r--drivers/iio/pressure/ms5611_core.c2
-rw-r--r--drivers/iio/pressure/rohm-bm1390.c80
-rw-r--r--drivers/iio/pressure/zpa2326.c2
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/iio/proximity/aw96103.c2
-rw-r--r--drivers/iio/proximity/hx9023s.c96
-rw-r--r--drivers/iio/proximity/mb1232.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/iio/proximity/srf08.c2
-rw-r--r--drivers/iio/proximity/sx_common.h2
-rw-r--r--drivers/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/iio/temperature/tmp006.c4
-rw-r--r--drivers/iio/test/Kconfig2
-rw-r--r--drivers/iio/test/iio-test-rescale.c4
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c4
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c69
-rw-r--r--drivers/infiniband/core/cache.c35
-rw-r--r--drivers/infiniband/core/device.c116
-rw-r--r--drivers/infiniband/core/ud_header.c83
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c42
-rw-r--r--drivers/infiniband/hw/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c49
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c358
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h24
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c120
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h3
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c8
-rw-r--r--drivers/infiniband/hw/efa/efa.h8
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c28
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h14
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c71
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c26
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c65
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h135
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c62
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c301
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c568
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h166
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h14
-rw-r--r--drivers/infiniband/hw/hfi1/init.c5
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c31
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c14
-rw-r--r--drivers/infiniband/hw/hns/Kconfig20
-rw-r--r--drivers/infiniband/hw/hns/Makefile9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c77
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c20
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h4
-rw-r--r--drivers/infiniband/hw/irdma/protos.h4
-rw-r--r--drivers/infiniband/hw/irdma/utils.c71
-rw-r--r--drivers/infiniband/hw/mana/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c60
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c286
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c12
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c13
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c8
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c37
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c33
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c71
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h1
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c9
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c83
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_abi.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c73
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c66
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c25
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c5
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--drivers/input/Kconfig14
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/evbug.c100
-rw-r--r--drivers/input/ff-core.c91
-rw-r--r--drivers/input/ff-memless.c18
-rw-r--r--drivers/input/input-mt.c34
-rw-r--r--drivers/input/input-poller.c4
-rw-r--r--drivers/input/input.c339
-rw-r--r--drivers/input/joystick/sidewinder.c3
-rw-r--r--drivers/input/joystick/xpad.c44
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/dlink-dir685-touchkeys.c3
-rw-r--r--drivers/input/keyboard/lm8323.c3
-rw-r--r--drivers/input/misc/Kconfig12
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ideapad_slidebar.c4
-rw-r--r--drivers/input/misc/iqs7222.c50
-rw-r--r--drivers/input/misc/max77693-haptic.c3
-rw-r--r--drivers/input/misc/mma8450.c16
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c8
-rw-r--r--drivers/input/misc/qnap-mcu-input.c153
-rw-r--r--drivers/input/misc/regulator-haptic.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/mouse/synaptics.c56
-rw-r--r--drivers/input/mouse/synaptics.h1
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h111
-rw-r--r--drivers/input/serio/i8042.c17
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/input/touchscreen/egalax_ts.c3
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c26
-rw-r--r--drivers/input/touchscreen/imagis.c9
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/interconnect/icc-clk.c10
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c2
-rw-r--r--drivers/interconnect/qcom/sm8750.c1705
-rw-r--r--drivers/iommu/Kconfig12
-rw-r--r--drivers/iommu/amd/amd_iommu.h9
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h42
-rw-r--r--drivers/iommu/amd/init.c285
-rw-r--r--drivers/iommu/amd/iommu.c533
-rw-r--r--drivers/iommu/amd/pasid.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c8
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c15
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c298
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h31
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c8
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c5
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c121
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c43
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h2
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/hyperv-iommu.c4
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/cache.c11
-rw-r--r--drivers/iommu/intel/cap_audit.c217
-rw-r--r--drivers/iommu/intel/cap_audit.h131
-rw-r--r--drivers/iommu/intel/dmar.c1
-rw-r--r--drivers/iommu/intel/iommu.c60
-rw-r--r--drivers/iommu/intel/irq_remapping.c9
-rw-r--r--drivers/iommu/intel/pasid.c22
-rw-r--r--drivers/iommu/intel/pasid.h6
-rw-r--r--drivers/iommu/intel/prq.c4
-rw-r--r--drivers/iommu/io-pgfault.c1
-rw-r--r--drivers/iommu/io-pgtable-arm.c227
-rw-r--r--drivers/iommu/iommu.c39
-rw-r--r--drivers/iommu/iommufd/fault.c44
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c10
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h29
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c2
-rw-r--r--drivers/iommu/iommufd/main.c32
-rw-r--r--drivers/iommu/iommufd/selftest.c45
-rw-r--r--drivers/iommu/msm_iommu.c51
-rw-r--r--drivers/iommu/mtk_iommu.c9
-rw-r--r--drivers/iommu/mtk_iommu_v1.c3
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/riscv/iommu-pci.c8
-rw-r--r--drivers/iommu/riscv/iommu-platform.c108
-rw-r--r--drivers/iommu/riscv/iommu.c14
-rw-r--r--drivers/iommu/riscv/iommu.h1
-rw-r--r--drivers/iommu/rockchip-iommu.c3
-rw-r--r--drivers/irqchip/Kconfig27
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-apple-aic.c3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c28
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c57
-rw-r--r--drivers/irqchip/irq-gic-v2m.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c25
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3.c55
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c14
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c1
-rw-r--r--drivers/irqchip/irq-jcore-aic.c2
-rw-r--r--drivers/irqchip/irq-keystone.c11
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c16
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c1
-rw-r--r--drivers/irqchip/irq-msi-lib.c11
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c1
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c3
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c1
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c1
-rw-r--r--drivers/irqchip/irq-partition-percpu.c2
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c53
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c198
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c24
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c16
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c215
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c151
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h12
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c249
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c86
-rw-r--r--drivers/irqchip/irq-thead-c900-aclint-sswi.c2
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c1
-rw-r--r--drivers/irqchip/irq-ts4800.c2
-rw-r--r--drivers/irqchip/irqchip.c4
-rw-r--r--drivers/irqchip/qcom-pdc.c67
-rw-r--r--drivers/isdn/mISDN/core.c14
-rw-r--r--drivers/isdn/mISDN/core.h1
-rw-r--r--drivers/leds/Kconfig44
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/leds/led-triggers.c4
-rw-r--r--drivers/leds/leds-cht-wcove.c6
-rw-r--r--drivers/leds/leds-lp8860.c2
-rw-r--r--drivers/leds/leds-lp8864.c296
-rw-r--r--drivers/leds/leds-netxbig.c1
-rw-r--r--drivers/leds/leds-qnap-mcu.c227
-rw-r--r--drivers/leds/leds-st1202.c415
-rw-r--r--drivers/leds/leds-turris-omnia.c336
-rw-r--r--drivers/leds/leds-upboard.c126
-rw-r--r--drivers/leds/leds.h4
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c8
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c2
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c2
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c4
-rw-r--r--drivers/macintosh/mac_hid.c2
-rw-r--r--drivers/macintosh/smu.c6
-rw-r--r--drivers/mailbox/Kconfig24
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/exynos-mailbox.c157
-rw-r--r--drivers/mailbox/mailbox-mchp-ipc-sbi.c504
-rw-r--r--drivers/mailbox/mailbox-mpfs.c2
-rw-r--r--drivers/mailbox/mailbox-th1520.c6
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c1
-rw-r--r--drivers/mailbox/qcom-ipcc.c16
-rw-r--r--drivers/mailbox/tegra-hsp.c6
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c2
-rw-r--r--drivers/md/Kconfig13
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/dm-crypt.c42
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c25
-rw-r--r--drivers/md/dm-io.c1
-rw-r--r--drivers/md/dm-linear.c5
-rw-r--r--drivers/md/dm-ps-io-affinity.c2
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c5
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-stripe.c5
-rw-r--r--drivers/md/dm-table.c29
-rw-r--r--drivers/md/dm-vdo/dedupe.c1
-rw-r--r--drivers/md/dm-verity-fec.c6
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/dm.c31
-rw-r--r--drivers/md/md-autodetect.c8
-rw-r--r--drivers/md/md-bitmap.c121
-rw-r--r--drivers/md/md-bitmap.h7
-rw-r--r--drivers/md/md-linear.c352
-rw-r--r--drivers/md/md.c38
-rw-r--r--drivers/md/md.h5
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c54
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1.c40
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c32
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5-cache.c20
-rw-r--r--drivers/md/raid5.c111
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/cec/core/cec-adap.c5
-rw-r--r--drivers/media/cec/core/cec-core.c5
-rw-r--r--drivers/media/cec/core/cec-pin-error-inj.c3
-rw-r--r--drivers/media/cec/core/cec-pin.c6
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c7
-rw-r--r--drivers/media/common/b2c2/flexcop-common.h4
-rw-r--r--drivers/media/common/b2c2/flexcop-misc.c13
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c8
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c2
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c18
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c15
-rw-r--r--drivers/media/i2c/ds90ub913.c26
-rw-r--r--drivers/media/i2c/ds90ub953.c56
-rw-r--r--drivers/media/i2c/ds90ub960.c188
-rw-r--r--drivers/media/i2c/imx208.c2
-rw-r--r--drivers/media/i2c/imx290.c81
-rw-r--r--drivers/media/i2c/imx296.c2
-rw-r--r--drivers/media/i2c/imx412.c42
-rw-r--r--drivers/media/i2c/ov2740.c58
-rw-r--r--drivers/media/i2c/ov5640.c1
-rw-r--r--drivers/media/i2c/ov9282.c2
-rw-r--r--drivers/media/pci/b2c2/flexcop-dma.c17
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c15
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h1
-rw-r--r--drivers/media/pci/cx88/cx88-input.c3
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c29
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.c8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-cpd.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c1
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c8
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h3
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_in.c12
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c20
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.h4
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c14
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c4
-rw-r--r--drivers/media/platform/broadcom/bcm2835-unicam.c42
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c6
-rw-r--r--drivers/media/platform/marvell/mcam-core.c7
-rw-r--r--drivers/media/platform/marvell/mmp-driver.c21
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c77
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c537
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h1
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c4
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c7
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c14
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h1
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c3
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c13
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h7
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c8
-rw-r--r--drivers/media/platform/qcom/camss/camss.c379
-rw-r--r--drivers/media/platform/qcom/camss/camss.h5
-rw-r--r--drivers/media/platform/qcom/venus/Kconfig1
-rw-r--r--drivers/media/platform/qcom/venus/core.c113
-rw-r--r--drivers/media/platform/qcom/venus/core.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c11
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c18
-rw-r--r--drivers/media/platform/qcom/venus/venc.c18
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c91
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c4
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c3
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c131
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.c9
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c10
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-core.c13
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c7
-rw-r--r--drivers/media/platform/st/stm32/Kconfig14
-rw-r--r--drivers/media/platform/st/stm32/Makefile1
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c1137
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/Makefile2
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c128
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c119
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c122
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c540
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c440
-rw-r--r--drivers/media/platform/verisilicon/hantro.h9
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c32
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c21
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c10
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c9
-rw-r--r--drivers/media/radio/Kconfig4
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/wl128x/Kconfig15
-rw-r--r--drivers/media/radio/wl128x/Makefile7
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h229
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c1676
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.h389
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c820
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.h45
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.c413
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.h24
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c604
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.h20
-rw-r--r--drivers/media/rc/iguanair.c4
-rw-r--r--drivers/media/rc/imon_raw.c2
-rw-r--r--drivers/media/rc/mceusb.c5
-rw-r--r--drivers/media/rc/pwm-ir-tx.c3
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c8
-rw-r--r--drivers/media/tuners/fc0013.c64
-rw-r--r--drivers/media/tuners/fc0013.h11
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c18
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c12
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c88
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c368
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c9
-rw-r--r--drivers/media/usb/uvc/uvc_status.c8
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c164
-rw-r--r--drivers/media/usb/uvc/uvc_video.c59
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h15
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/memory/omap-gpmc.c53
-rw-r--r--drivers/memory/tegra/tegra20-emc.c8
-rw-r--r--drivers/memory/ti-aemif.c192
-rw-r--r--drivers/memstick/core/memstick.c46
-rw-r--r--drivers/memstick/core/ms_block.c3
-rw-r--r--drivers/memstick/core/mspro_block.c3
-rw-r--r--drivers/message/fusion/mptfc.c14
-rw-r--r--drivers/message/fusion/mptsas.c22
-rw-r--r--drivers/message/fusion/mptscsih.c10
-rw-r--r--drivers/message/fusion/mptscsih.h5
-rw-r--r--drivers/message/fusion/mptspi.c19
-rw-r--r--drivers/mfd/Kconfig25
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/axp20x.c7
-rw-r--r--drivers/mfd/cs42l43-i2c.c8
-rw-r--r--drivers/mfd/cs42l43-sdw.c10
-rw-r--r--drivers/mfd/cs42l43.c37
-rw-r--r--drivers/mfd/cs42l43.h1
-rw-r--r--drivers/mfd/da9052-core.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c2
-rw-r--r--drivers/mfd/lpc_ich.c3
-rw-r--r--drivers/mfd/qnap-mcu.c338
-rw-r--r--drivers/mfd/stpmic1.c6
-rw-r--r--drivers/mfd/syscon.c122
-rw-r--r--drivers/mfd/tps65219.c15
-rw-r--r--drivers/mfd/upboard-fpga.c325
-rw-r--r--drivers/mfd/vexpress-sysreg.c1
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/c2port/core.c29
-rw-r--r--drivers/misc/cxl/Kconfig6
-rw-r--r--drivers/misc/cxl/of.c2
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c8
-rw-r--r--drivers/misc/ds1682.c8
-rw-r--r--drivers/misc/eeprom/digsy_mtc_eeprom.c2
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/eeprom/max6875.c4
-rw-r--r--drivers/misc/fastrpc.c66
-rw-r--r--drivers/misc/keba/cp500.c69
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c7
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/vsc-tp.c2
-rw-r--r--drivers/misc/misc_minor_kunit.c69
-rw-r--r--drivers/misc/ntsync.c1001
-rw-r--r--drivers/misc/ocxl/sysfs.c4
-rw-r--r--drivers/misc/pch_phub.c8
-rw-r--r--drivers/misc/pci_endpoint_test.c352
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c4
-rw-r--r--drivers/misc/sram.c8
-rw-r--r--drivers/misc/vcpu_stall_detector.c3
-rw-r--r--drivers/mmc/core/core.c7
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/core/queue.c2
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c4
-rw-r--r--drivers/mmc/host/bcm2835.c20
-rw-r--r--drivers/mmc/host/cqhci-crypto.c38
-rw-r--r--drivers/mmc/host/cqhci.h8
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798mv200.c8
-rw-r--r--drivers/mmc/host/dw_mmc.c3
-rw-r--r--drivers/mmc/host/mtk-sd.c52
-rw-r--r--drivers/mmc/host/mxcmmc.c8
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c3
-rw-r--r--drivers/mmc/host/sdhci-acpi.c20
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c10
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c3
-rw-r--r--drivers/mmc/host/sdhci-msm.c147
-rw-r--r--drivers/mmc/host/sdhci_am654.c30
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c1
-rw-r--r--drivers/mtd/devices/mchp48l640.c28
-rw-r--r--drivers/mtd/devices/phram.c13
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c6
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c21
-rw-r--r--drivers/mtd/mtd_blkdevs.c7
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c1
-rw-r--r--drivers/mtd/nand/qpic_common.c759
-rw-r--r--drivers/mtd/nand/raw/Kconfig12
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c5
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c44
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c137
-rw-r--r--drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c1029
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c1773
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c4
-rw-r--r--drivers/mtd/nand/spi/ato.c4
-rw-r--r--drivers/mtd/nand/spi/core.c40
-rw-r--r--drivers/mtd/nand/spi/esmt.c4
-rw-r--r--drivers/mtd/nand/spi/foresee.c14
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c16
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/mtd/nand/spi/micron.c8
-rw-r--r--drivers/mtd/nand/spi/paragon.c4
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c147
-rw-r--r--drivers/mtd/nand/spi/toshiba.c4
-rw-r--r--drivers/mtd/nand/spi/winbond.c27
-rw-r--r--drivers/mtd/nand/spi/xtx.c4
-rw-r--r--drivers/mtd/spi-nor/atmel.c4
-rw-r--r--drivers/mtd/spi-nor/core.c21
-rw-r--r--drivers/mtd/spi-nor/core.h6
-rw-r--r--drivers/mtd/spi-nor/macronix.c9
-rw-r--r--drivers/mtd/spi-nor/spansion.c10
-rw-r--r--drivers/mtd/spi-nor/sst.c2
-rw-r--r--drivers/mtd/spi-nor/sysfs.c8
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/cdev.c70
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/wl.c21
-rw-r--r--drivers/mux/core.c2
-rw-r--r--drivers/net/bareudp.c16
-rw-r--r--drivers/net/bonding/bond_debugfs.c9
-rw-r--r--drivers/net/bonding/bond_main.c19
-rw-r--r--drivers/net/bonding/bond_options.c55
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c5
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c10
-rw-r--r--drivers/net/can/dev/dev.c2
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c18
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/kvaser_pciefd.c81
-rw-r--r--drivers/net/can/m_can/m_can.c31
-rw-r--r--drivers/net/can/m_can/m_can.h1
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c30
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c28
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c15
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c8
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c6
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c133
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c38
-rw-r--r--drivers/net/can/usb/ucan.c43
-rw-r--r--drivers/net/dsa/b53/b53_common.c14
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c118
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c4
-rw-r--r--drivers/net/dsa/mt7530.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c119
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c3
-rw-r--r--drivers/net/dsa/ocelot/felix.c9
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c10
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c7
-rw-r--r--drivers/net/dsa/qca/qca8k.h3
-rw-r--r--drivers/net/dsa/realtek/Kconfig6
-rw-r--r--drivers/net/dsa/realtek/Makefile3
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb-leds.c177
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c265
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.h107
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c8
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c19
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c4
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c39
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c22
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c160
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c114
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c93
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c64
-rw-r--r--drivers/net/ethernet/cortina/gemini.c5
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c3
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c197
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c28
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c433
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h29
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h23
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c13
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c66
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c35
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c604
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c74
-rw-r--r--drivers/net/ethernet/google/gve/gve.h10
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h29
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c160
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c134
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c199
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c22
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c120
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c458
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c231
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c104
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c23
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c550
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.c)2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h101
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c495
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c176
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c155
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c427
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c154
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c32
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c15
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c25
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c270
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c562
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c144
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c50
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c2658
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h81
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c461
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1074
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c25
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c142
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c118
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c41
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c68
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c23
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c29
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c25
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1056
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c10
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c1
-rw-r--r--drivers/net/ethernet/marvell/skge.c5
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c571
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c292
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c369
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c159
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1377
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c450
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c259
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c263
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c211
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h63
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h20
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c543
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c160
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c53
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c72
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c12
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c238
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h16
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c21
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c46
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h17
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c68
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c48
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h35
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h145
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c57
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h5
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c10
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c14
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c53
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c32
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c40
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h22
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c69
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c159
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c38
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h1
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c24
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c23
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c119
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h48
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c375
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/niu.c32
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c509
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h1
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c21
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c5
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c181
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h10
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c4
-rw-r--r--drivers/net/ethernet/via/via-rhine.c11
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c44
-rw-r--r--drivers/net/geneve.c28
-rw-r--r--drivers/net/gtp.c25
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c18
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c21
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c6
-rw-r--r--drivers/net/loopback.c19
-rw-r--r--drivers/net/mctp/mctp-i2c.c8
-rw-r--r--drivers/net/mctp/mctp-i3c.c8
-rw-r--r--drivers/net/mdio/mdio-octeon.c25
-rw-r--r--drivers/net/mii.c3
-rw-r--r--drivers/net/netconsole.c62
-rw-r--r--drivers/net/netdevsim/ethtool.c14
-rw-r--r--drivers/net/netdevsim/hwstats.c29
-rw-r--r--drivers/net/netdevsim/netdev.c268
-rw-r--r--drivers/net/netdevsim/netdevsim.h9
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c23
-rw-r--r--drivers/net/netkit.c66
-rw-r--r--drivers/net/pcs/pcs-lynx.c39
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c25
-rw-r--r--drivers/net/pcs/pcs-xpcs.c44
-rw-r--r--drivers/net/pfcp.c15
-rw-r--r--drivers/net/phy/Kconfig14
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin.c2
-rw-r--r--drivers/net/phy/adin1100.c2
-rw-r--r--drivers/net/phy/air_en8811h.c2
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c2
-rw-r--r--drivers/net/phy/ax88796b.c2
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm54140.c2
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm84881.c12
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/cortina.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c385
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/dp83869.c2
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/dp83td510.c114
-rw-r--r--drivers/net/phy/dp83tg720.c163
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/intel-xway.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c35
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c54
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c2
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c2
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c1309
-rw-r--r--drivers/net/phy/microchip_rds_ptp.h247
-rw-r--r--drivers/net/phy/microchip_t1.c53
-rw-r--r--drivers/net/phy/microchip_t1s.c2
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/mxl-gpy.c2
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/ncn26000.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c72
-rw-r--r--drivers/net/phy/nxp-cbtx.c2
-rw-r--r--drivers/net/phy/nxp-tja11xx.c2
-rw-r--r--drivers/net/phy/phy-c45.c14
-rw-r--r--drivers/net/phy/phy.c172
-rw-r--r--drivers/net/phy/phy_device.c83
-rw-r--r--drivers/net/phy/phy_link_topology.c2
-rw-r--r--drivers/net/phy/phylink.c591
-rw-r--r--drivers/net/phy/qcom/at803x.c2
-rw-r--r--drivers/net/phy/qcom/qca807x.c4
-rw-r--r--drivers/net/phy/qcom/qca808x.c2
-rw-r--r--drivers/net/phy/qcom/qca83xx.c2
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek/Kconfig11
-rw-r--r--drivers/net/phy/realtek/Makefile4
-rw-r--r--drivers/net/phy/realtek/realtek.h10
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c79
-rw-r--r--drivers/net/phy/realtek/realtek_main.c (renamed from drivers/net/phy/realtek.c)58
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/smsc.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c8
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/teranetics.c2
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c28
-rw-r--r--drivers/net/pse-pd/pd692x0.c224
-rw-r--r--drivers/net/pse-pd/pse_core.c181
-rw-r--r--drivers/net/pse-pd/pse_regulator.c23
-rw-r--r--drivers/net/pse-pd/tps23881.c449
-rw-r--r--drivers/net/tap.c6
-rw-r--r--drivers/net/team/team_core.c11
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/ipheth.c69
-rw-r--r--drivers/net/usb/lan78xx.c942
-rw-r--r--drivers/net/usb/rtl8150.c22
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c14
-rw-r--r--drivers/net/vrf.c49
-rw-r--r--drivers/net/vxlan/vxlan_core.c194
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c2
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c5
-rw-r--r--drivers/net/wan/framer/framer-core.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c132
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c747
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h179
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c1183
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h373
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c84
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h33
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c156
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c327
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h14
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c2713
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h27
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c225
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h26
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c489
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h21
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c491
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h172
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c54
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c28
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c5
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c27
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c38
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c31
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c225
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c291
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c27
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c406
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c138
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c130
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c236
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c150
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c216
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c403
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c69
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c903
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c504
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h177
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c168
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c9
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c61
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig5
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c73
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c21
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c68
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c28
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c52
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c267
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig6
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c47
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h9
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c31
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h9
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c193
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h163
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c332
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h85
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c173
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h43
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c301
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c323
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h37
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c42
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c57
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c50
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c54
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c57
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c45
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c24
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c56
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c23
-rw-r--r--drivers/nfc/nfcmrvl/uart.c9
-rw-r--r--drivers/nfc/st21nfca/dep.c18
-rw-r--r--drivers/nfc/st21nfca/i2c.c1
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/ntb/test/ntb_pingpong.c3
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/claim.c9
-rw-r--r--drivers/nvme/host/apple.c60
-rw-r--r--drivers/nvme/host/core.c104
-rw-r--r--drivers/nvme/host/fc.c69
-rw-r--r--drivers/nvme/host/ioctl.c15
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/nvme.h39
-rw-r--r--drivers/nvme/host/pci.c70
-rw-r--r--drivers/nvme/host/sysfs.c2
-rw-r--r--drivers/nvme/host/tcp.c153
-rw-r--r--drivers/nvme/target/Kconfig11
-rw-r--r--drivers/nvme/target/Makefile2
-rw-r--r--drivers/nvme/target/admin-cmd.c389
-rw-r--r--drivers/nvme/target/configfs.c49
-rw-r--r--drivers/nvme/target/core.c306
-rw-r--r--drivers/nvme/target/discovery.c17
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd.c101
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c3
-rw-r--r--drivers/nvme/target/nvmet.h111
-rw-r--r--drivers/nvme/target/passthru.c18
-rw-r--r--drivers/nvme/target/pci-epf.c2610
-rw-r--r--drivers/nvme/target/rdma.c33
-rw-r--r--drivers/nvme/target/tcp.c15
-rw-r--r--drivers/nvme/target/zns.c3
-rw-r--r--drivers/nvmem/core.c37
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c38
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c1
-rw-r--r--drivers/nvmem/rmem.c95
-rw-r--r--drivers/of/address.c56
-rw-r--r--drivers/of/base.c25
-rw-r--r--drivers/of/fdt.c41
-rw-r--r--drivers/of/fdt_address.c21
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/kobj.c4
-rw-r--r--drivers/of/of_private.h24
-rw-r--r--drivers/of/of_reserved_mem.c11
-rw-r--r--drivers/of/of_test.c119
-rw-r--r--drivers/of/pdt.c2
-rw-r--r--drivers/of/platform.c23
-rw-r--r--drivers/of/property.c35
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi18
-rw-r--r--drivers/of/unittest.c31
-rw-r--r--drivers/opp/core.c99
-rw-r--r--drivers/opp/debugfs.c10
-rw-r--r--drivers/opp/of.c4
-rw-r--r--drivers/opp/opp.h1
-rw-r--r--drivers/parport/parport_serial.c12
-rw-r--r--drivers/pci/Kconfig6
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c27
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c449
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c52
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c56
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c69
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/pci-host-common.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c18
-rw-r--r--drivers/pci/controller/pci-mvebu.c1
-rw-r--r--drivers/pci/controller/pcie-apple.c75
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c117
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c5
-rw-r--r--drivers/pci/controller/pcie-rockchip.c219
-rw-r--r--drivers/pci/controller/pcie-rockchip.h35
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c50
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c96
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c17
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h6
-rw-r--r--drivers/pci/devres.c74
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c25
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c37
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c6
-rw-r--r--drivers/pci/iov.c8
-rw-r--r--drivers/pci/msi/api.c6
-rw-r--r--drivers/pci/msi/msi.c167
-rw-r--r--drivers/pci/of.c22
-rw-r--r--drivers/pci/of_property.c4
-rw-r--r--drivers/pci/p2pdma.c6
-rw-r--r--drivers/pci/pci-driver.c14
-rw-r--r--drivers/pci/pci-sysfs.c150
-rw-r--r--drivers/pci/pci.c275
-rw-r--r--drivers/pci/pci.h32
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer.c15
-rw-r--r--drivers/pci/pcie/aspm.c30
-rw-r--r--drivers/pci/pcie/bwctrl.c25
-rw-r--r--drivers/pci/pcie/dpc.c22
-rw-r--r--drivers/pci/pcie/tlp.c115
-rw-r--r--drivers/pci/probe.c110
-rw-r--r--drivers/pci/quirks.c21
-rw-r--r--drivers/pci/switch/switchtec.c26
-rw-r--r--drivers/pci/tph.c46
-rw-r--r--drivers/pci/vpd.c14
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c72
-rw-r--r--drivers/perf/arm-ccn.c5
-rw-r--r--drivers/perf/arm-cmn.c9
-rw-r--r--drivers/perf/arm_cspmu/ampere_cspmu.c32
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c81
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h57
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c97
-rw-r--r--drivers/perf/arm_pmu.c8
-rw-r--r--drivers/perf/arm_pmuv3.c13
-rw-r--r--drivers/perf/arm_spe_pmu.c22
-rw-r--r--drivers/perf/arm_v7_pmu.c50
-rw-r--r--drivers/perf/dwc_pcie_pmu.c121
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c33
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c42
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c61
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c48
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c44
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c53
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c160
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h49
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c43
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c45
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c534
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c66
-rw-r--r--drivers/perf/riscv_pmu_sbi.c2
-rw-r--r--drivers/perf/thunderx2_pmu.c5
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c1
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c62
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-pcie.c11
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c44
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h3
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c28
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h4
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c40
-rw-r--r--drivers/phy/phy-core.c23
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c100
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c278
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c55
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c284
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c148
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c2
-rw-r--r--drivers/phy/samsung/Kconfig1
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c25
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c6
-rw-r--r--drivers/phy/st/phy-stm32-combophy.c38
-rw-r--r--drivers/phy/tegra/Kconfig5
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c11
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c15
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c3
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c3
-rw-r--r--drivers/pinctrl/core.c50
-rw-r--r--drivers/pinctrl/mediatek/Kconfig7
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c1556
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c57
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c5
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c42
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c3
-rw-r--r--drivers/pinctrl/pinconf-generic.c8
-rw-r--r--drivers/pinctrl/pinctrl-amd.c30
-rw-r--r--drivers/pinctrl/pinctrl-amd.h7
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c42
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c11
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c20
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c200
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h3
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c2
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm6
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c34
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c1620
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c10
-rw-r--r--drivers/pinctrl/renesas/Kconfig1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c190
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/spacemit/Kconfig3
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c81
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c12
-rw-r--r--drivers/platform/chrome/Kconfig7
-rw-r--r--drivers/platform/chrome/Makefile4
-rw-r--r--drivers/platform/chrome/cros_ec.c5
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c3
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c203
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c69
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c2
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c4
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c10
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c46
-rw-r--r--drivers/platform/chrome/cros_ec_typec.h1
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c2
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c10
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c79
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.c373
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.h51
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c5
-rw-r--r--drivers/platform/cznic/Kconfig1
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c3
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h130
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c20
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c113
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c5
-rw-r--r--drivers/platform/surface/surface_platform_profile.c44
-rw-r--r--drivers/platform/x86/acer-wmi.c550
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c19
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c48
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h3
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c48
-rw-r--r--drivers/platform/x86/amd/pmc/Kconfig2
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmc/mp1_stb.c332
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c394
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h24
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig2
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c30
-rw-r--r--drivers/platform/x86/amd/pmf/core.c24
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c66
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h44
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c77
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c60
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c82
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c55
-rw-r--r--drivers/platform/x86/asus-wmi.h3
-rw-r--r--drivers/platform/x86/dell/Kconfig1
-rw-r--r--drivers/platform/x86/dell/Makefile1
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c522
-rw-r--r--drivers/platform/x86/dell/dcdbas.c10
-rw-r--r--drivers/platform/x86/dell/dcdbas.h8
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c60
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c256
-rw-r--r--drivers/platform/x86/dell/dell-pc.c69
-rw-r--r--drivers/platform/x86/dell/dell-smo8800-ids.h27
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c16
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c7
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c17
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c20
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c42
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h5
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c8
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c14
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c449
-rw-r--r--drivers/platform/x86/hp/hp_accel.c4
-rw-r--r--drivers/platform/x86/ideapad-laptop.c43
-rw-r--r--drivers/platform/x86/inspur_platform_profile.c43
-rw-r--r--drivers/platform/x86/intel/Kconfig1
-rw-r--r--drivers/platform/x86/intel/bytcrc_pwrsrc.c79
-rw-r--r--drivers/platform/x86/intel/hid.c7
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h9
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c14
-rw-r--r--drivers/platform/x86/intel/int3472/common.c2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c109
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c3
-rw-r--r--drivers/platform/x86/intel/plr_tpmi.c2
-rw-r--r--drivers/platform/x86/intel/pmc/core.c11
-rw-r--r--drivers/platform/x86/intel/pmt/class.c4
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c33
-rw-r--r--drivers/platform/x86/intel/sdsi.c34
-rw-r--r--drivers/platform/x86/intel/vsec.c7
-rw-r--r--drivers/platform/x86/lenovo-wmi-camera.c69
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c5
-rw-r--r--drivers/platform/x86/msi-laptop.c6
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/quickstart.c1
-rw-r--r--drivers/platform/x86/serdev_helpers.h60
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c12
-rw-r--r--drivers/platform/x86/think-lmi.c13
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c94
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/wmi-bmof.c75
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile2
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c31
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c8
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c16
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c261
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h13
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c2
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c8
-rw-r--r--drivers/pmdomain/core.c15
-rw-r--r--drivers/pmdomain/imx/gpcv2.c2
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c1
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c3
-rw-r--r--drivers/pmdomain/mediatek/Kconfig12
-rw-r--r--drivers/pmdomain/mediatek/Makefile8
-rw-r--r--drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c144
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c81
-rw-r--r--drivers/power/reset/Kconfig4
-rw-r--r--drivers/power/reset/as3722-poweroff.c2
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c1
-rw-r--r--drivers/power/reset/gpio-poweroff.c8
-rw-r--r--drivers/power/reset/keystone-reset.c18
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c8
-rw-r--r--drivers/power/sequencing/pwrseq-qcom-wcn.c29
-rw-r--r--drivers/power/supply/88pm860x_battery.c4
-rw-r--r--drivers/power/supply/Kconfig9
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/ab8500_btemp.c5
-rw-r--r--drivers/power/supply/ab8500_chargalg.c14
-rw-r--r--drivers/power/supply/ab8500_charger.c5
-rw-r--r--drivers/power/supply/ab8500_fg.c33
-rw-r--r--drivers/power/supply/apm_power.c6
-rw-r--r--drivers/power/supply/axp20x_battery.c31
-rw-r--r--drivers/power/supply/bq2415x_charger.c36
-rw-r--r--drivers/power/supply/bq24190_charger.c29
-rw-r--r--drivers/power/supply/bq24257_charger.c8
-rw-r--r--drivers/power/supply/bq27xxx_battery.c39
-rw-r--r--drivers/power/supply/charger-manager.c3
-rw-r--r--drivers/power/supply/cpcap-charger.c3
-rw-r--r--drivers/power/supply/cros_charge-control.c200
-rw-r--r--drivers/power/supply/da9030_battery.c3
-rw-r--r--drivers/power/supply/da9150-fg.c4
-rw-r--r--drivers/power/supply/ds2760_battery.c8
-rw-r--r--drivers/power/supply/ds2780_battery.c24
-rw-r--r--drivers/power/supply/ds2781_battery.c24
-rw-r--r--drivers/power/supply/ds2782_battery.c89
-rw-r--r--drivers/power/supply/gpio-charger.c13
-rw-r--r--drivers/power/supply/ip5xxx_power.c572
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c440
-rw-r--r--drivers/power/supply/max17042_battery.c203
-rw-r--r--drivers/power/supply/max1720x_battery.c66
-rw-r--r--drivers/power/supply/mm8013.c2
-rw-r--r--drivers/power/supply/olpc_battery.c11
-rw-r--r--drivers/power/supply/power_supply.h31
-rw-r--r--drivers/power/supply/power_supply_core.c260
-rw-r--r--drivers/power/supply/power_supply_hwmon.c50
-rw-r--r--drivers/power/supply/power_supply_sysfs.c192
-rw-r--r--drivers/power/supply/sbs-battery.c5
-rw-r--r--drivers/power/supply/stc3117_fuel_gauge.c612
-rw-r--r--drivers/power/supply/surface_battery.c4
-rw-r--r--drivers/power/supply/test_power.c113
-rw-r--r--drivers/power/supply/ug3105_battery.c4
-rw-r--r--drivers/powercap/idle_inject.c3
-rw-r--r--drivers/powercap/intel_rapl_common.c6
-rw-r--r--drivers/powercap/powercap_sys.c3
-rw-r--r--drivers/pps/Makefile3
-rw-r--r--drivers/pps/clients/pps-gpio.c10
-rw-r--r--drivers/pps/clients/pps-ktimer.c4
-rw-r--r--drivers/pps/clients/pps-ldisc.c6
-rw-r--r--drivers/pps/clients/pps_parport.c4
-rw-r--r--drivers/pps/generators/Kconfig22
-rw-r--r--drivers/pps/generators/Makefile4
-rw-r--r--drivers/pps/generators/pps_gen-dummy.c96
-rw-r--r--drivers/pps/generators/pps_gen.c344
-rw-r--r--drivers/pps/generators/pps_gen_parport.c3
-rw-r--r--drivers/pps/generators/sysfs.c75
-rw-r--r--drivers/pps/kapi.c10
-rw-r--r--drivers/pps/kc.c10
-rw-r--r--drivers/pps/pps.c127
-rw-r--r--drivers/ptp/ptp_chardev.c4
-rw-r--r--drivers/ptp/ptp_clock.c10
-rw-r--r--drivers/ptp/ptp_ocp.c18
-rw-r--r--drivers/ptp/ptp_vmclock.c47
-rw-r--r--drivers/pwm/core.c15
-rw-r--r--drivers/pwm/pwm-dwc.c14
-rw-r--r--drivers/pwm/pwm-lpss-pci.c9
-rw-r--r--drivers/pwm/pwm-microchip-core.c2
-rw-r--r--drivers/pwm/pwm-stm32-lp.c8
-rw-r--r--drivers/pwm/pwm-stm32.c7
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/rapidio/rio-scan.c5
-rw-r--r--drivers/ras/amd/atl/Kconfig1
-rw-r--r--drivers/ras/amd/atl/internal.h1
-rw-r--r--drivers/regulator/bd96801-regulator.c130
-rw-r--r--drivers/regulator/core.c304
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/of_regulator.c17
-rw-r--r--drivers/regulator/pca9450-regulator.c111
-rw-r--r--drivers/regulator/rtq2208-regulator.c216
-rw-r--r--drivers/regulator/tps6287x-regulator.c57
-rw-r--r--drivers/regulator/tps65219-regulator.c39
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c17
-rw-r--r--drivers/remoteproc/mtk_scp.c12
-rw-r--r--drivers/remoteproc/omap_remoteproc.c24
-rw-r--r--drivers/remoteproc/remoteproc_core.c14
-rw-r--r--drivers/remoteproc/st_remoteproc.c54
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c88
-rw-r--r--drivers/reset/amlogic/reset-meson-aux.c97
-rw-r--r--drivers/reset/reset-microchip-sparx5.c19
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c1
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/class.c3
-rw-r--r--drivers/rtc/rtc-88pm80x.c2
-rw-r--r--drivers/rtc/rtc-88pm860x.c2
-rw-r--r--drivers/rtc/rtc-amlogic-a4.c6
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-as3722.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-cadence.c2
-rw-r--r--drivers/rtc/rtc-cmos.c7
-rw-r--r--drivers/rtc/rtc-cpcap.c2
-rw-r--r--drivers/rtc/rtc-cros-ec.c2
-rw-r--r--drivers/rtc/rtc-da9055.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c2
-rw-r--r--drivers/rtc/rtc-loongson.c17
-rw-r--r--drivers/rtc/rtc-lp8788.c2
-rw-r--r--drivers/rtc/rtc-lpc32xx.c2
-rw-r--r--drivers/rtc/rtc-max77686.c2
-rw-r--r--drivers/rtc/rtc-max8925.c2
-rw-r--r--drivers/rtc/rtc-max8997.c2
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-mv.c4
-rw-r--r--drivers/rtc/rtc-mxc.c2
-rw-r--r--drivers/rtc/rtc-mxc_v2.c2
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-palmas.c2
-rw-r--r--drivers/rtc/rtc-pcf2127.c82
-rw-r--r--drivers/rtc/rtc-pcf85063.c11
-rw-r--r--drivers/rtc/rtc-pic32.c2
-rw-r--r--drivers/rtc/rtc-pm8xxx.c2
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/rtc/rtc-rc5t583.c2
-rw-r--r--drivers/rtc/rtc-rc5t619.c2
-rw-r--r--drivers/rtc/rtc-renesas-rtca3.c2
-rw-r--r--drivers/rtc/rtc-rk808.c2
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/rtc/rtc-s5m.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-sc27xx.c4
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-spear.c4
-rw-r--r--drivers/rtc/rtc-stm32.c22
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-sunplus.c4
-rw-r--r--drivers/rtc/rtc-tegra.c2
-rw-r--r--drivers/rtc/rtc-test.c2
-rw-r--r--drivers/rtc/rtc-tps6586x.c2
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/rtc/rtc-tps6594.c2
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-wm8350.c2
-rw-r--r--drivers/rtc/rtc-xgene.c4
-rw-r--r--drivers/rtc/rtc-zynqmp.c8
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/char/sclp.c12
-rw-r--r--drivers/s390/char/sclp.h18
-rw-r--r--drivers/s390/char/sclp_config.c4
-rw-r--r--drivers/s390/char/sclp_early.c3
-rw-r--r--drivers/s390/char/sclp_pci.c19
-rw-r--r--drivers/s390/char/sclp_sd.c4
-rw-r--r--drivers/s390/char/vmlogrdr.c8
-rw-r--r--drivers/s390/cio/chp.c31
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/qdio.h9
-rw-r--r--drivers/s390/cio/qdio_setup.c21
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c128
-rw-r--r--drivers/s390/net/ism_drv.c14
-rw-r--r--drivers/s390/net/qeth_core_main.c8
-rw-r--r--drivers/s390/scsi/zfcp_fc.c7
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/s390/scsi/zfcp_unit.c2
-rw-r--r--drivers/scsi/3w-9xxx.c9
-rw-r--r--drivers/scsi/3w-sas.c21
-rw-r--r--drivers/scsi/3w-xxxx.c10
-rw-r--r--drivers/scsi/53c700.c19
-rw-r--r--drivers/scsi/BusLogic.c9
-rw-r--r--drivers/scsi/BusLogic.h3
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/linit.c8
-rw-r--r--drivers/scsi/advansys.c25
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l3
-rw-r--r--drivers/scsi/am53c974.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c12
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c26
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c14
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c7
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c20
-rw-r--r--drivers/scsi/cxlflash/Kconfig6
-rw-r--r--drivers/scsi/cxlflash/main.c4
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2
-rw-r--r--drivers/scsi/dc395x.c14
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c2
-rw-r--r--drivers/scsi/esas2r/esas2r.h12
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c32
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fdomain_pci.c2
-rw-r--r--drivers/scsi/fnic/Makefile5
-rw-r--r--drivers/scsi/fnic/fdls_disc.c4997
-rw-r--r--drivers/scsi/fnic/fdls_fc.h253
-rw-r--r--drivers/scsi/fnic/fip.c1005
-rw-r--r--drivers/scsi/fnic/fip.h159
-rw-r--r--drivers/scsi/fnic/fnic.h288
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c12
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c11
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c1742
-rw-r--r--drivers/scsi/fnic/fnic_fdls.h434
-rw-r--r--drivers/scsi/fnic/fnic_fip.h48
-rw-r--r--drivers/scsi/fnic/fnic_io.h14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c28
-rw-r--r--drivers/scsi/fnic/fnic_main.c761
-rw-r--r--drivers/scsi/fnic/fnic_pci_subsys_devid.c131
-rw-r--r--drivers/scsi/fnic/fnic_res.c77
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1161
-rw-r--r--drivers/scsi/fnic/fnic_stats.h49
-rw-r--r--drivers/scsi/fnic/fnic_trace.c97
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c16
-rw-r--r--drivers/scsi/hpsa.c20
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c20
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c8
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c4
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c48
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/ips.h3
-rw-r--r--drivers/scsi/isci/remote_device.c29
-rw-r--r--drivers/scsi/isci/remote_device.h17
-rw-r--r--drivers/scsi/iscsi_tcp.c6
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c11
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c210
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h85
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c22
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c19
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c8
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c22
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c27
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvumi.c5
-rw-r--r--drivers/scsi/myrb.c23
-rw-r--r--drivers/scsi/myrs.c13
-rw-r--r--drivers/scsi/ncr53c8xx.c9
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c6
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c78
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c59
-rw-r--r--drivers/scsi/pmcraid.c24
-rw-r--r--drivers/scsi/ps3rom.c5
-rw-r--r--drivers/scsi/qedf/qedf_attr.c10
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c5
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla1280.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c80
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c124
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c11
-rw-r--r--drivers/scsi/qlogicpti.c5
-rw-r--r--drivers/scsi/scsi_debug.c36
-rw-r--r--drivers/scsi/scsi_error.c26
-rw-r--r--drivers/scsi/scsi_ioctl.c35
-rw-r--r--drivers/scsi/scsi_lib.c57
-rw-r--r--drivers/scsi/scsi_lib_test.c7
-rw-r--r--drivers/scsi/scsi_scan.c49
-rw-r--r--drivers/scsi/scsi_sysctl.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c20
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c41
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c3
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c20
-rw-r--r--drivers/scsi/snic/snic_main.c14
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c6
-rw-r--r--drivers/scsi/storvsc_drv.c29
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c17
-rw-r--r--drivers/scsi/virtio_scsi.c5
-rw-r--r--drivers/scsi/xen-scsifront.c11
-rw-r--r--drivers/slimbus/core.c17
-rw-r--r--drivers/slimbus/messaging.c7
-rw-r--r--drivers/soc/atmel/soc.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c6
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c4
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/soc-imx8m.c26
-rw-r--r--drivers/soc/imx/soc-imx9.c128
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c23
-rw-r--r--drivers/soc/loongson/loongson2_guts.c5
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c18
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c19
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c4
-rw-r--r--drivers/soc/qcom/Kconfig2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c58
-rw-r--r--drivers/soc/qcom/pdr_interface.c8
-rw-r--r--drivers/soc/qcom/pmic_glink.c72
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c11
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c2
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c2
-rw-r--r--drivers/soc/qcom/smem_state.c3
-rw-r--r--drivers/soc/qcom/smp2p.c2
-rw-r--r--drivers/soc/qcom/socinfo.c3
-rw-r--r--drivers/soc/renesas/Kconfig5
-rw-r--r--drivers/soc/samsung/exynos-pmu.c2
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c20
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c17
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c10
-rw-r--r--drivers/soundwire/amd_manager.c5
-rw-r--r--drivers/soundwire/bus.c65
-rw-r--r--drivers/soundwire/bus.h3
-rw-r--r--drivers/soundwire/bus_type.c3
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c316
-rw-r--r--drivers/soundwire/irq.c12
-rw-r--r--drivers/soundwire/irq.h5
-rw-r--r--drivers/soundwire/mipi_disco.c40
-rw-r--r--drivers/soundwire/qcom.c2
-rw-r--r--drivers/soundwire/stream.c71
-rw-r--r--drivers/spi/Kconfig12
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel-quadspi.c984
-rw-r--r--drivers/spi/spi-amd.c26
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c7
-rw-r--r--drivers/spi/spi-cadence-quadspi.c57
-rw-r--r--drivers/spi/spi-dw-core.c10
-rw-r--r--drivers/spi/spi-fsl-qspi.c12
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-kspi2.c431
-rw-r--r--drivers/spi/spi-mem.c64
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c26
-rw-r--r--drivers/spi/spi-microchip-core.c41
-rw-r--r--drivers/spi/spi-mt65xx.c7
-rw-r--r--drivers/spi/spi-mxic.c3
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-nxp-fspi.c12
-rw-r--r--drivers/spi/spi-omap2-mcspi.c11
-rw-r--r--drivers/spi/spi-pxa2xx.c88
-rw-r--r--drivers/spi/spi-rockchip-sfc.c233
-rw-r--r--drivers/spi/spi-sc18is602.c34
-rw-r--r--drivers/spi/spi-sn-f-ospi.c11
-rw-r--r--drivers/spi/spi-ti-qspi.c19
-rw-r--r--drivers/spi/spi-zynq-qspi.c26
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c13
-rw-r--r--drivers/spi/spi.c49
-rw-r--r--drivers/spi/spidev.c30
-rw-r--r--drivers/spmi/hisi-spmi-controller.c3
-rw-r--r--drivers/spmi/spmi.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c3
-rw-r--r--drivers/staging/gpib/Kconfig8
-rw-r--r--drivers/staging/gpib/agilent_82350b/Makefile2
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c142
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.h12
-rw-r--r--drivers/staging/gpib/agilent_82357a/Makefile2
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.c161
-rw-r--r--drivers/staging/gpib/cb7210/Makefile2
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.c596
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h9
-rw-r--r--drivers/staging/gpib/cec/Makefile2
-rw-r--r--drivers/staging/gpib/cec/cec.h4
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c64
-rw-r--r--drivers/staging/gpib/common/gpib_os.c180
-rw-r--r--drivers/staging/gpib/common/iblib.c2
-rw-r--r--drivers/staging/gpib/eastwood/Makefile2
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c194
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.h6
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c282
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.h6
-rw-r--r--drivers/staging/gpib/gpio/Makefile2
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c67
-rw-r--r--drivers/staging/gpib/hp_82335/Makefile2
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.c81
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.h3
-rw-r--r--drivers/staging/gpib/hp_82341/Makefile2
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c135
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.h2
-rw-r--r--drivers/staging/gpib/include/amcc5920.h2
-rw-r--r--drivers/staging/gpib/include/gpibP.h14
-rw-r--r--drivers/staging/gpib/include/gpib_types.h3
-rw-r--r--drivers/staging/gpib/include/nec7210.h5
-rw-r--r--drivers/staging/gpib/include/tms9914.h5
-rw-r--r--drivers/staging/gpib/ines/Makefile2
-rw-r--r--drivers/staging/gpib/ines/ines.h11
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c546
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/Makefile2
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c84
-rw-r--r--drivers/staging/gpib/nec7210/nec7210.c16
-rw-r--r--drivers/staging/gpib/ni_usb/Makefile2
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c89
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.h2
-rw-r--r--drivers/staging/gpib/pc2/Makefile2
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c262
-rw-r--r--drivers/staging/gpib/tms9914/Makefile2
-rw-r--r--drivers/staging/gpib/tms9914/tms9914.c8
-rw-r--r--drivers/staging/gpib/tnt4882/Makefile2
-rw-r--r--drivers/staging/gpib/tnt4882/mite.c71
-rw-r--r--drivers/staging/gpib/tnt4882/mite.h15
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c697
-rw-r--r--drivers/staging/greybus/camera.c17
-rw-r--r--drivers/staging/iio/frequency/ad9832.c2
-rw-r--r--drivers/staging/iio/frequency/ad9834.c2
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h4
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c4
-rw-r--r--drivers/staging/media/imx/imx-media-of.c8
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h3
-rw-r--r--drivers/staging/media/max96712/max96712.c4
-rw-r--r--drivers/staging/rtl8723bs/Makefile1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c55
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h5
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h18
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c3
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1286
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c2
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c3
-rw-r--r--drivers/target/iscsi/Kconfig4
-rw-r--r--drivers/target/iscsi/iscsi_target.c168
-rw-r--r--drivers/target/iscsi/iscsi_target.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c50
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c21
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c58
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/target/target_core_stat.c4
-rw-r--r--drivers/tee/optee/smc_abi.c5
-rw-r--r--drivers/tee/optee/supp.c35
-rw-r--r--drivers/thermal/cpufreq_cooling.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c57
-rw-r--r--drivers/thermal/gov_power_allocator.c32
-rw-r--r--drivers/thermal/gov_user_space.c12
-rw-r--r--drivers/thermal/intel/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c4
-rw-r--r--drivers/thermal/intel/intel_tcc.c2
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/thermal/thermal_of.c51
-rw-r--r--drivers/thunderbolt/ctl.c11
-rw-r--r--drivers/thunderbolt/ctl.h1
-rw-r--r--drivers/thunderbolt/debugfs.c67
-rw-r--r--drivers/thunderbolt/eeprom.c78
-rw-r--r--drivers/thunderbolt/path.c4
-rw-r--r--drivers/thunderbolt/retimer.c2
-rw-r--r--drivers/thunderbolt/tb.c196
-rw-r--r--drivers/thunderbolt/tb.h5
-rw-r--r--drivers/thunderbolt/test.c90
-rw-r--r--drivers/thunderbolt/tunnel.c411
-rw-r--r--drivers/thunderbolt/tunnel.h63
-rw-r--r--drivers/thunderbolt/xdomain.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/tty/n_gsm.c39
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/8250/8250.h6
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c4
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c3
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_dma.c16
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c11
-rw-r--r--drivers/tty/serial/8250/8250_pci.c76
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c60
-rw-r--r--drivers/tty/serial/8250/8250_platform.c9
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c10
-rw-r--r--drivers/tty/serial/8250/8250_port.c116
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c10
-rw-r--r--drivers/tty/serial/altera_uart.c7
-rw-r--r--drivers/tty/serial/amba-pl011.c125
-rw-r--r--drivers/tty/serial/atmel_serial.c18
-rw-r--r--drivers/tty/serial/fsl_lpuart.c7
-rw-r--r--drivers/tty/serial/imx.c12
-rw-r--r--drivers/tty/serial/kgdb_nmi.c101
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c37
-rw-r--r--drivers/tty/serial/serial_core.c265
-rw-r--r--drivers/tty/serial/serial_port.c12
-rw-r--r--drivers/tty/serial/sh-sci.c98
-rw-r--r--drivers/tty/serial/stm32-usart.c4
-rw-r--r--drivers/tty/serial/xilinx_uartps.c16
-rw-r--r--drivers/tty/tty_io.c5
-rw-r--r--drivers/tty/vt/selection.c14
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/ufs/core/ufs-sysfs.c7
-rw-r--r--drivers/ufs/core/ufs_bsg.c8
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c26
-rw-r--r--drivers/ufs/core/ufshcd.c417
-rw-r--r--drivers/ufs/host/ufs-qcom.c168
-rw-r--r--drivers/ufs/host/ufshcd-pci.c2
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c28
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c2
-rw-r--r--drivers/uio/uio_hv_generic.c86
-rw-r--r--drivers/usb/atm/cxacru.c13
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c13
-rw-r--r--drivers/usb/cdns3/core.c4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c25
-rw-r--r--drivers/usb/chipidea/host.c13
-rw-r--r--drivers/usb/chipidea/otg_fsm.c3
-rw-r--r--drivers/usb/class/cdc-acm.c28
-rw-r--r--drivers/usb/class/usblp.c9
-rw-r--r--drivers/usb/common/common.c14
-rw-r--r--drivers/usb/common/usb-conn-gpio.c3
-rw-r--r--drivers/usb/core/config.c19
-rw-r--r--drivers/usb/core/driver.c7
-rw-r--r--drivers/usb/core/generic.c12
-rw-r--r--drivers/usb/core/hcd-pci.c15
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c63
-rw-r--r--drivers/usb/core/port.c10
-rw-r--r--drivers/usb/core/quirks.c13
-rw-r--r--drivers/usb/core/sysfs.c12
-rw-r--r--drivers/usb/core/usb-acpi.c3
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/dwc2/hcd_queue.c3
-rw-r--r--drivers/usb/dwc3/core.c140
-rw-r--r--drivers/usb/dwc3/core.h9
-rw-r--r--drivers/usb/dwc3/drd.c4
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c84
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c13
-rw-r--r--drivers/usb/dwc3/dwc3-st.c6
-rw-r--r--drivers/usb/dwc3/gadget.c159
-rw-r--r--drivers/usb/fotg210/fotg210-core.c5
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c3
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/composite.c17
-rw-r--r--drivers/usb/gadget/configfs.c6
-rw-r--r--drivers/usb/gadget/function/f_ecm.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_midi.c19
-rw-r--r--drivers/usb/gadget/function/f_ncm.c6
-rw-r--r--drivers/usb/gadget/function/f_tcm.c719
-rw-r--r--drivers/usb/gadget/function/f_uac2.c1
-rw-r--r--drivers/usb/gadget/function/storage_common.h2
-rw-r--r--drivers/usb/gadget/function/tcm.h28
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/function/u_serial.c3
-rw-r--r--drivers/usb/gadget/function/uvc_video.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c3
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c3
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c13
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c9
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c3
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c3
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c3
-rw-r--r--drivers/usb/host/pci-quirks.c9
-rw-r--r--drivers/usb/host/sl811-hcd.c3
-rw-r--r--drivers/usb/host/xhci-caps.h6
-rw-r--r--drivers/usb/host/xhci-dbgcap.c2
-rw-r--r--drivers/usb/host/xhci-dbgtty.c98
-rw-r--r--drivers/usb/host/xhci-debugfs.c25
-rw-r--r--drivers/usb/host/xhci-hub.c8
-rw-r--r--drivers/usb/host/xhci-mem.c3
-rw-r--r--drivers/usb/host/xhci-pci.c17
-rw-r--r--drivers/usb/host/xhci-plat.c6
-rw-r--r--drivers/usb/host/xhci-ring.c18
-rw-r--r--drivers/usb/host/xhci-tegra.c7
-rw-r--r--drivers/usb/host/xhci.c9
-rw-r--r--drivers/usb/host/xhci.h6
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c43
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c3
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c3
-rw-r--r--drivers/usb/musb/da8xx.c3
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/musb/musb_dsps.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c3
-rw-r--r--drivers/usb/musb/musb_host.c3
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c3
-rw-r--r--drivers/usb/phy/phy-generic.c2
-rw-r--r--drivers/usb/phy/phy-mv-usb.c3
-rw-r--r--drivers/usb/phy/phy-tahvo.c3
-rw-r--r--drivers/usb/phy/phy.c26
-rw-r--r--drivers/usb/renesas_usbhs/common.c6
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c2
-rw-r--r--drivers/usb/roles/class.c5
-rw-r--r--drivers/usb/serial/ch341.c35
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c14
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h13
-rw-r--r--drivers/usb/serial/option.c73
-rw-r--r--drivers/usb/serial/quatech2.c2
-rw-r--r--drivers/usb/storage/Kconfig3
-rw-r--r--drivers/usb/storage/scsiglue.c15
-rw-r--r--drivers/usb/storage/shuttle_usbat.c4
-rw-r--r--drivers/usb/storage/transport.c8
-rw-r--r--drivers/usb/storage/uas.c10
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/altmodes/Kconfig9
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/altmodes/nvidia.c2
-rw-r--r--drivers/usb/typec/altmodes/thunderbolt.c388
-rw-r--r--drivers/usb/typec/bus.c6
-rw-r--r--drivers/usb/typec/class.c47
-rw-r--r--drivers/usb/typec/hd3ss3220.c207
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c2
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c24
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c4
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c42
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6370.c1
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c133
-rw-r--r--drivers/usb/typec/ucsi/Kconfig13
-rw-r--r--drivers/usb/typec/ucsi/Makefile1
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c333
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h3
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c21
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c6
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c3
-rw-r--r--drivers/usb/usbip/stub_rx.c2
-rw-r--r--drivers/usb/usbip/stub_tx.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c13
-rw-r--r--drivers/usb/usbip/vhci_rx.c6
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c8
-rw-r--r--drivers/usb/usbip/vudc_tx.c2
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c4
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa.h32
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_hw.c38
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c99
-rw-r--r--drivers/vdpa/solidrun/snet_main.c57
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c2
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c9
-rw-r--r--drivers/vfio/group.c16
-rw-r--r--drivers/vfio/mdev/mdev_core.c4
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c14
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c169
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c13
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c40
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c38
-rw-r--r--drivers/vfio/pci/virtio/migrate.c6
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c10
-rw-r--r--drivers/vhost/net.c5
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c8
-rw-r--r--drivers/video/fbdev/efifb.c4
-rw-r--r--drivers/video/fbdev/hyperv_fb.c52
-rw-r--r--drivers/video/fbdev/omap/lcd_dma.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c17
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h1
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c6
-rw-r--r--drivers/video/fbdev/sm501fb.c5
-rw-r--r--drivers/video/fbdev/udlfb.c8
-rw-r--r--drivers/video/fbdev/vga16fb.c7
-rw-r--r--drivers/video/hdmi.c28
-rw-r--r--drivers/virt/acrn/hsm.c6
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c8
-rw-r--r--drivers/virt/coco/sev-guest/Kconfig1
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c544
-rw-r--r--drivers/virt/vboxguest/Kconfig3
-rw-r--r--drivers/virtio/virtio.c113
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_mem.c107
-rw-r--r--drivers/virtio/virtio_pci_common.c41
-rw-r--r--drivers/w1/masters/ds2482.c26
-rw-r--r--drivers/w1/slaves/w1_ds2406.c10
-rw-r--r--drivers/w1/slaves/w1_ds2408.c42
-rw-r--r--drivers/w1/slaves/w1_ds2413.c14
-rw-r--r--drivers/w1/slaves/w1_ds2430.c10
-rw-r--r--drivers/w1/slaves/w1_ds2431.c10
-rw-r--r--drivers/w1/slaves/w1_ds2433.c24
-rw-r--r--drivers/w1/slaves/w1_ds2438.c34
-rw-r--r--drivers/w1/slaves/w1_ds2780.c8
-rw-r--r--drivers/w1/slaves/w1_ds2781.c8
-rw-r--r--drivers/w1/slaves/w1_ds2805.c10
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c18
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c4
-rw-r--r--drivers/w1/w1.c12
-rw-r--r--drivers/watchdog/da9052_wdt.c13
-rw-r--r--drivers/watchdog/max77620_wdt.c1
-rw-r--r--drivers/watchdog/rti_wdt.c4
-rw-r--r--drivers/watchdog/rzv2h_wdt.c16
-rw-r--r--drivers/watchdog/softdog.c8
-rw-r--r--drivers/watchdog/sp805_wdt.c3
-rw-r--r--drivers/watchdog/watchdog_dev.c6
-rw-r--r--drivers/watchdog/watchdog_hrtimer_pretimeout.c4
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/events/events_base.c6
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/pvcalls-front.c14
-rw-r--r--drivers/xen/pvcalls-front.h2
-rw-r--r--drivers/xen/swiotlb-xen.c22
-rw-r--r--drivers/zorro/zorro-sysfs.c10
4586 files changed, 210714 insertions, 71399 deletions
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index 64065fb8922b..5b9490367a39 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -24,6 +24,7 @@ menuconfig DRM_ACCEL
different device files, called accel/accel* (in /dev, sysfs
and debugfs).
+source "drivers/accel/amdxdna/Kconfig"
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
index ab3df932937f..a301fb6089d4 100644
--- a/drivers/accel/Makefile
+++ b/drivers/accel/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
diff --git a/drivers/accel/amdxdna/Kconfig b/drivers/accel/amdxdna/Kconfig
new file mode 100644
index 000000000000..f39d7a87296c
--- /dev/null
+++ b/drivers/accel/amdxdna/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_AMDXDNA
+ tristate "AMD AI Engine"
+ depends on AMD_IOMMU
+ depends on DRM_ACCEL
+ depends on PCI && HAS_IOMEM
+ depends on X86_64
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ select FW_LOADER
+ select HMM_MIRROR
+ help
+ Choose this option to enable support for NPU integrated into AMD
+ client CPUs like AMD Ryzen AI 300 Series. AMD NPU can be used to
+ accelerate machine learning applications.
+
+ If "M" is selected, the driver module will be amdxdna.
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
new file mode 100644
index 000000000000..0e9adf6890a0
--- /dev/null
+++ b/drivers/accel/amdxdna/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+amdxdna-y := \
+ aie2_ctx.o \
+ aie2_error.o \
+ aie2_message.o \
+ aie2_pci.o \
+ aie2_pm.o \
+ aie2_psp.o \
+ aie2_smu.o \
+ aie2_solver.o \
+ amdxdna_ctx.o \
+ amdxdna_gem.o \
+ amdxdna_mailbox.o \
+ amdxdna_mailbox_helper.o \
+ amdxdna_pci_drv.o \
+ amdxdna_sysfs.o \
+ npu1_regs.o \
+ npu2_regs.o \
+ npu4_regs.o \
+ npu5_regs.o \
+ npu6_regs.o
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) = amdxdna.o
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
new file mode 100644
index 000000000000..5119bccd1917
--- /dev/null
+++ b/drivers/accel/amdxdna/TODO
@@ -0,0 +1,3 @@
+- Add import and export BO support
+- Add debugfs support
+- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
new file mode 100644
index 000000000000..5f43db02b240
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/drm_syncobj.h>
+#include <linux/hmm.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+static bool force_cmdlist;
+module_param(force_cmdlist, bool, 0600);
+MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
+
+#define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */
+
+static void aie2_job_release(struct kref *ref)
+{
+ struct amdxdna_sched_job *job;
+
+ job = container_of(ref, struct amdxdna_sched_job, refcnt);
+ amdxdna_sched_job_cleanup(job);
+ if (job->out_fence)
+ dma_fence_put(job->out_fence);
+ kfree(job);
+}
+
+static void aie2_job_put(struct amdxdna_sched_job *job)
+{
+ kref_put(&job->refcnt, aie2_job_release);
+}
+
+/* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
+static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
+ struct drm_sched_job *bad_job)
+{
+ drm_sched_stop(&hwctx->priv->sched, bad_job);
+ aie2_destroy_context(xdna->dev_handle, hwctx);
+}
+
+static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_gem_obj *heap = hwctx->priv->heap;
+ int ret;
+
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
+ goto out;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
+ goto out;
+ }
+
+ if (hwctx->status != HWCTX_STAT_READY) {
+ XDNA_DBG(xdna, "hwctx is not ready, status %d", hwctx->status);
+ goto out;
+ }
+
+ ret = aie2_config_cu(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
+ goto out;
+ }
+
+out:
+ drm_sched_start(&hwctx->priv->sched, 0);
+ XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
+ return ret;
+}
+
+void aie2_restart_ctx(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ if (hwctx->status != HWCTX_STAT_STOP)
+ continue;
+
+ hwctx->status = hwctx->old_status;
+ XDNA_DBG(xdna, "Resetting %s", hwctx->name);
+ aie2_hwctx_restart(xdna, hwctx);
+ }
+ mutex_unlock(&client->hwctx_lock);
+}
+
+static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
+{
+ struct dma_fence *fence, *out_fence = NULL;
+ int ret;
+
+ fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
+ if (!fence)
+ return NULL;
+
+ ret = dma_fence_chain_find_seqno(&fence, seq);
+ if (ret)
+ goto out;
+
+ out_fence = dma_fence_get(dma_fence_chain_contained(fence));
+
+out:
+ dma_fence_put(fence);
+ return out_fence;
+}
+
+static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
+{
+ struct dma_fence *fence;
+
+ fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
+ if (!fence)
+ return;
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+}
+
+void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ /*
+ * Command timeout is unlikely. But if it happens, it doesn't
+ * break the system. aie2_hwctx_stop() will destroy mailbox
+ * and abort all commands.
+ */
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ aie2_hwctx_wait_for_idle(hwctx);
+ aie2_hwctx_stop(xdna, hwctx, NULL);
+ hwctx->old_status = hwctx->status;
+ hwctx->status = HWCTX_STAT_STOP;
+}
+
+void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ /*
+ * The resume path cannot guarantee that mailbox channel can be
+ * regenerated. If this happen, when submit message to this
+ * mailbox channel, error will return.
+ */
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ hwctx->status = hwctx->old_status;
+ aie2_hwctx_restart(xdna, hwctx);
+}
+
+static void
+aie2_sched_notify(struct amdxdna_sched_job *job)
+{
+ struct dma_fence *fence = job->fence;
+
+ trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
+ job->hwctx->priv->completed++;
+ dma_fence_signal(fence);
+
+ up(&job->hwctx->priv->job_sem);
+ job->job_done = true;
+ dma_fence_put(fence);
+ mmput_async(job->mm);
+ aie2_job_put(job);
+}
+
+static int
+aie2_sched_resp_handler(void *handle, const u32 *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ u32 ret = 0;
+ u32 status;
+
+ cmd_abo = job->cmd_bo;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(size != sizeof(u32))) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ status = *data;
+ XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+ if (status == AIE2_STATUS_SUCCESS)
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ else
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ u32 ret = 0;
+ u32 status;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(size != sizeof(u32))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ status = *data;
+ XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ struct cmd_chain_resp *resp;
+ struct amdxdna_dev *xdna;
+ u32 fail_cmd_status;
+ u32 fail_cmd_idx;
+ u32 ret = 0;
+
+ cmd_abo = job->cmd_bo;
+ if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ resp = (struct cmd_chain_resp *)data;
+ xdna = job->hwctx->client->xdna;
+ XDNA_DBG(xdna, "Status 0x%x", resp->status);
+ if (resp->status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ goto out;
+ }
+
+ /* Slow path to handle error, read from ringbuf on BAR */
+ fail_cmd_idx = resp->fail_cmd_idx;
+ fail_cmd_status = resp->fail_cmd_status;
+ XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
+ fail_cmd_idx, fail_cmd_status);
+
+ if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+ amdxdna_cmd_set_state(cmd_abo, fail_cmd_status);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
+ struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
+
+ cc->error_index = fail_cmd_idx;
+ if (cc->error_index >= cc->command_count)
+ cc->error_index = 0;
+ }
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static struct dma_fence *
+aie2_sched_job_run(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct dma_fence *fence;
+ int ret;
+
+ if (!mmget_not_zero(job->mm))
+ return ERR_PTR(-ESRCH);
+
+ kref_get(&job->refcnt);
+ fence = dma_fence_get(job->fence);
+
+ if (unlikely(!cmd_abo)) {
+ ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler);
+ goto out;
+ }
+
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
+ ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else if (force_cmdlist)
+ ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else
+ ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
+
+out:
+ if (ret) {
+ dma_fence_put(job->fence);
+ aie2_job_put(job);
+ mmput(job->mm);
+ fence = ERR_PTR(ret);
+ }
+ trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
+
+ return fence;
+}
+
+static void aie2_sched_job_free(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+
+ trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
+ if (!job->job_done)
+ up(&hwctx->priv->job_sem);
+
+ drm_sched_job_cleanup(sched_job);
+ aie2_job_put(job);
+}
+
+static enum drm_gpu_sched_stat
+aie2_sched_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct amdxdna_dev *xdna;
+
+ xdna = hwctx->client->xdna;
+ trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
+ mutex_lock(&xdna->dev_lock);
+ aie2_hwctx_stop(xdna, hwctx, sched_job);
+
+ aie2_hwctx_restart(xdna, hwctx);
+ mutex_unlock(&xdna->dev_lock);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+const struct drm_sched_backend_ops sched_ops = {
+ .run_job = aie2_sched_job_run,
+ .free_job = aie2_sched_job_free,
+ .timedout_job = aie2_sched_job_timedout,
+};
+
+static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int start, end, first, last;
+ u32 width = 1, entries = 0;
+ int i;
+
+ if (!hwctx->num_tiles) {
+ XDNA_ERR(xdna, "Number of tiles is zero");
+ return -EINVAL;
+ }
+
+ ndev = xdna->dev_handle;
+ if (unlikely(!ndev->metadata.core.row_count)) {
+ XDNA_WARN(xdna, "Core tile row count is zero");
+ return -EINVAL;
+ }
+
+ hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
+ if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
+ XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
+ return -EINVAL;
+ }
+
+ if (ndev->priv->col_align == COL_ALIGN_NATURE)
+ width = hwctx->num_col;
+
+ /*
+ * In range [start, end], find out columns that is multiple of width.
+ * 'first' is the first column,
+ * 'last' is the last column,
+ * 'entries' is the total number of columns.
+ */
+ start = xdna->dev_info->first_col;
+ end = ndev->total_col - hwctx->num_col;
+ if (start > 0 && end == 0) {
+ XDNA_DBG(xdna, "Force start from col 0");
+ start = 0;
+ }
+ first = start + (width - start % width) % width;
+ last = end - end % width;
+ if (last >= first)
+ entries = (last - first) / width + 1;
+ XDNA_DBG(xdna, "start %d end %d first %d last %d",
+ start, end, first, last);
+
+ if (unlikely(!entries)) {
+ XDNA_ERR(xdna, "Start %d end %d width %d",
+ start, end, width);
+ return -EINVAL;
+ }
+
+ hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
+ if (!hwctx->col_list)
+ return -ENOMEM;
+
+ hwctx->col_list_len = entries;
+ hwctx->col_list[0] = first;
+ for (i = 1; i < entries; i++)
+ hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
+
+ print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
+ entries * sizeof(*hwctx->col_list), false);
+ return 0;
+}
+
+static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct alloc_requests *xrs_req;
+ int ret;
+
+ xrs_req = kzalloc(sizeof(*xrs_req), GFP_KERNEL);
+ if (!xrs_req)
+ return -ENOMEM;
+
+ xrs_req->cdo.start_cols = hwctx->col_list;
+ xrs_req->cdo.cols_len = hwctx->col_list_len;
+ xrs_req->cdo.ncols = hwctx->num_col;
+ xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
+
+ xrs_req->rqos.gops = hwctx->qos.gops;
+ xrs_req->rqos.fps = hwctx->qos.fps;
+ xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
+ xrs_req->rqos.latency = hwctx->qos.latency;
+ xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
+ xrs_req->rqos.priority = hwctx->qos.priority;
+
+ xrs_req->rid = (uintptr_t)hwctx;
+
+ ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
+
+ kfree(xrs_req);
+ return ret;
+}
+
+static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ int ret;
+
+ ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
+}
+
+static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct drm_file *filp = hwctx->client->filp;
+ struct drm_syncobj *syncobj;
+ u32 hdl;
+ int ret;
+
+ hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
+
+ ret = drm_syncobj_create(&syncobj, 0, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
+ return ret;
+ }
+ ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
+ if (ret) {
+ drm_syncobj_put(syncobj);
+ XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
+ return ret;
+ }
+ hwctx->priv->syncobj = syncobj;
+ hwctx->syncobj_hdl = hdl;
+
+ return 0;
+}
+
+static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
+{
+ /*
+ * The syncobj_hdl is owned by user space and will be cleaned up
+ * separately.
+ */
+ drm_syncobj_put(hwctx->priv->syncobj);
+}
+
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct drm_gpu_scheduler *sched;
+ struct amdxdna_hwctx_priv *priv;
+ struct amdxdna_gem_obj *heap;
+ struct amdxdna_dev_hdl *ndev;
+ int i, ret;
+
+ priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ hwctx->priv = priv;
+
+ mutex_lock(&client->mm_lock);
+ heap = client->dev_heap;
+ if (!heap) {
+ XDNA_ERR(xdna, "The client dev heap object not exist");
+ mutex_unlock(&client->mm_lock);
+ ret = -ENOENT;
+ goto free_priv;
+ }
+ drm_gem_object_get(to_gobj(heap));
+ mutex_unlock(&client->mm_lock);
+ priv->heap = heap;
+ sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
+
+ ret = amdxdna_gem_pin(heap);
+ if (ret) {
+ XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
+ goto put_heap;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ struct amdxdna_gem_obj *abo;
+ struct amdxdna_drm_create_bo args = {
+ .flags = 0,
+ .type = AMDXDNA_BO_DEV,
+ .vaddr = 0,
+ .size = MAX_CHAIN_CMDBUF_SIZE,
+ };
+
+ abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp, true);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto free_cmd_bufs;
+ }
+
+ XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
+ i, abo->mem.dev_addr, abo->mem.size);
+ priv->cmd_buf[i] = abo;
+ }
+
+ sched = &priv->sched;
+ mutex_init(&priv->io_lock);
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&priv->io_lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+ ret = drm_sched_init(sched, &sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT,
+ HWCTX_MAX_CMDS, 0, msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
+ NULL, NULL, hwctx->name, xdna->ddev.dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
+ goto free_cmd_bufs;
+ }
+
+ ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
+ goto free_sched;
+ }
+
+ ret = aie2_hwctx_col_list(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
+ goto free_entity;
+ }
+
+ ret = aie2_alloc_resource(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
+ goto free_col_list;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
+ goto release_resource;
+ }
+
+ ret = aie2_ctx_syncobj_create(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
+ goto release_resource;
+ }
+
+ hwctx->status = HWCTX_STAT_INIT;
+ ndev = xdna->dev_handle;
+ ndev->hwctx_num++;
+
+ XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
+
+ return 0;
+
+release_resource:
+ aie2_release_resource(hwctx);
+free_col_list:
+ kfree(hwctx->col_list);
+free_entity:
+ drm_sched_entity_destroy(&priv->entity);
+free_sched:
+ drm_sched_fini(&priv->sched);
+free_cmd_bufs:
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ if (!priv->cmd_buf[i])
+ continue;
+ drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
+ }
+ amdxdna_gem_unpin(heap);
+put_heap:
+ drm_gem_object_put(to_gobj(heap));
+free_priv:
+ kfree(priv);
+ return ret;
+}
+
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev_hdl *ndev;
+ struct amdxdna_dev *xdna;
+ int idx;
+
+ xdna = hwctx->client->xdna;
+ ndev = xdna->dev_handle;
+ ndev->hwctx_num--;
+ drm_sched_wqueue_stop(&hwctx->priv->sched);
+
+ /* Now, scheduler will not send command to device. */
+ aie2_release_resource(hwctx);
+
+ /*
+ * All submitted commands are aborted.
+ * Restart scheduler queues to cleanup jobs. The amdxdna_sched_job_run()
+ * will return NODEV if it is called.
+ */
+ drm_sched_wqueue_start(&hwctx->priv->sched);
+
+ aie2_hwctx_wait_for_idle(hwctx);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+ drm_sched_fini(&hwctx->priv->sched);
+ aie2_ctx_syncobj_destroy(hwctx);
+
+ XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
+
+ for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
+ drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
+ amdxdna_gem_unpin(hwctx->priv->heap);
+ drm_gem_object_put(to_gobj(hwctx->priv->heap));
+
+ mutex_destroy(&hwctx->priv->io_lock);
+ kfree(hwctx->col_list);
+ kfree(hwctx->priv);
+ kfree(hwctx->cus);
+}
+
+static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
+{
+ struct amdxdna_hwctx_param_config_cu *config = buf;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 total_size;
+ int ret;
+
+ XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
+ if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
+ return -EINVAL;
+
+ if (hwctx->status != HWCTX_STAT_INIT) {
+ XDNA_ERR(xdna, "Not support re-config CU");
+ return -EINVAL;
+ }
+
+ if (!config->num_cus) {
+ XDNA_ERR(xdna, "Number of CU is zero");
+ return -EINVAL;
+ }
+
+ total_size = struct_size(config, cu_configs, config->num_cus);
+ if (total_size > size) {
+ XDNA_ERR(xdna, "CU config larger than size");
+ return -EINVAL;
+ }
+
+ hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
+ if (!hwctx->cus)
+ return -ENOMEM;
+
+ ret = aie2_config_cu(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
+ goto free_cus;
+ }
+
+ wmb(); /* To avoid locking in command submit when check status */
+ hwctx->status = HWCTX_STAT_READY;
+
+ return 0;
+
+free_cus:
+ kfree(hwctx->cus);
+ hwctx->cus = NULL;
+ return ret;
+}
+
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ switch (type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ return aie2_hwctx_cu_config(hwctx, buf, size);
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ return -EOPNOTSUPP;
+ default:
+ XDNA_DBG(xdna, "Not supported type %d", type);
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aie2_populate_range(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct mm_struct *mm = abo->mem.notifier.mm;
+ struct hmm_range range = { 0 };
+ unsigned long timeout;
+ int ret;
+
+ XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
+ abo->mem.userptr, abo->mem.size);
+ range.notifier = &abo->mem.notifier;
+ range.start = abo->mem.userptr;
+ range.end = abo->mem.userptr + abo->mem.size;
+ range.hmm_pfns = abo->mem.pfns;
+ range.default_flags = HMM_PFN_REQ_FAULT;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+again:
+ range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(&range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto put_mm;
+ }
+
+ if (ret == -EBUSY)
+ goto again;
+
+ goto put_mm;
+ }
+
+ down_read(&xdna->notifier_lock);
+ if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
+ up_read(&xdna->notifier_lock);
+ goto again;
+ }
+ abo->mem.map_invalid = false;
+ up_read(&xdna->notifier_lock);
+
+put_mm:
+ mmput(mm);
+ return ret;
+}
+
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct ww_acquire_ctx acquire_ctx;
+ struct dma_fence_chain *chain;
+ struct amdxdna_gem_obj *abo;
+ unsigned long timeout = 0;
+ int ret, i;
+
+ ret = down_interruptible(&hwctx->priv->job_sem);
+ if (ret) {
+ XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
+ return ret;
+ }
+
+ chain = dma_fence_chain_alloc();
+ if (!chain) {
+ XDNA_ERR(xdna, "Alloc fence chain failed");
+ ret = -ENOMEM;
+ goto up_sem;
+ }
+
+ ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
+ goto free_chain;
+ }
+
+retry:
+ ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
+ goto cleanup_job;
+ }
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ goto cleanup_job;
+ }
+ }
+
+ down_read(&xdna->notifier_lock);
+ for (i = 0; i < job->bo_cnt; i++) {
+ abo = to_xdna_obj(job->bos[i]);
+ if (abo->mem.map_invalid) {
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (!timeout) {
+ timeout = jiffies +
+ msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ } else if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto cleanup_job;
+ }
+
+ ret = aie2_populate_range(abo);
+ if (ret)
+ goto cleanup_job;
+ goto retry;
+ }
+ }
+
+ mutex_lock(&hwctx->priv->io_lock);
+ drm_sched_job_arm(&job->base);
+ job->out_fence = dma_fence_get(&job->base.s_fence->finished);
+ for (i = 0; i < job->bo_cnt; i++)
+ dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
+ job->seq = hwctx->priv->seq++;
+ kref_get(&job->refcnt);
+ drm_sched_entity_push_job(&job->base);
+
+ *seq = job->seq;
+ drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
+ mutex_unlock(&hwctx->priv->io_lock);
+
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+
+ aie2_job_put(job);
+
+ return 0;
+
+cleanup_job:
+ drm_sched_job_cleanup(&job->base);
+free_chain:
+ dma_fence_chain_free(chain);
+up_sem:
+ up(&hwctx->priv->job_sem);
+ job->job_done = true;
+ return ret;
+}
+
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
+ unsigned long cur_seq)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct drm_gem_object *gobj = to_gobj(abo);
+ long ret;
+
+ down_write(&xdna->notifier_lock);
+ abo->mem.map_invalid = true;
+ mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
+ up_write(&xdna->notifier_lock);
+ ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (!ret || ret == -ERESTARTSYS)
+ XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
new file mode 100644
index 000000000000..b1defaa8513b
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+struct async_event {
+ struct amdxdna_dev_hdl *ndev;
+ struct async_event_msg_resp resp;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+};
+
+struct async_events {
+ struct workqueue_struct *wq;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+ u32 event_cnt;
+ struct async_event event[] __counted_by(event_cnt);
+};
+
+/*
+ * Below enum, struct and lookup tables are porting from XAIE util header file.
+ *
+ * Below data is defined by AIE device and it is used for decode error message
+ * from the device.
+ */
+
+enum aie_module_type {
+ AIE_MEM_MOD = 0,
+ AIE_CORE_MOD,
+ AIE_PL_MOD,
+};
+
+enum aie_error_category {
+ AIE_ERROR_SATURATION = 0,
+ AIE_ERROR_FP,
+ AIE_ERROR_STREAM,
+ AIE_ERROR_ACCESS,
+ AIE_ERROR_BUS,
+ AIE_ERROR_INSTRUCTION,
+ AIE_ERROR_ECC,
+ AIE_ERROR_LOCK,
+ AIE_ERROR_DMA,
+ AIE_ERROR_MEM_PARITY,
+ /* Unknown is not from XAIE, added for better category */
+ AIE_ERROR_UNKNOWN,
+};
+
+/* Don't pack, unless XAIE side changed */
+struct aie_error {
+ __u8 row;
+ __u8 col;
+ __u32 mod_type;
+ __u8 event_id;
+};
+
+struct aie_err_info {
+ u32 err_cnt;
+ u32 ret_code;
+ u32 rsvd;
+ struct aie_error payload[] __counted_by(err_cnt);
+};
+
+struct aie_event_category {
+ u8 event_id;
+ enum aie_error_category category;
+};
+
+#define EVENT_CATEGORY(id, cat) { id, cat }
+static const struct aie_event_category aie_ml_mem_event_cat[] = {
+ EVENT_CATEGORY(88U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(90U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(91U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(92U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(93U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(94U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(95U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(96U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(97U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(98U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(99U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(100U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(101U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_core_event_cat[] = {
+ EVENT_CATEGORY(55U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(56U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(57U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(58U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(59U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(60U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(62U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(64U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(65U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(66U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(67U, AIE_ERROR_LOCK),
+ EVENT_CATEGORY(70U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(71U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(72U, AIE_ERROR_BUS),
+};
+
+static const struct aie_event_category aie_ml_mem_tile_event_cat[] = {
+ EVENT_CATEGORY(130U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(132U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(133U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(134U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(135U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(136U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(137U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(138U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(139U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_shim_tile_event_cat[] = {
+ EVENT_CATEGORY(64U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(65U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(66U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(67U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(68U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(69U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(70U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(71U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(72U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(73U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(74U, AIE_ERROR_LOCK),
+};
+
+static enum aie_error_category
+aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
+{
+ const struct aie_event_category *lut;
+ int num_entry;
+ int i;
+
+ switch (mod_type) {
+ case AIE_PL_MOD:
+ lut = aie_ml_shim_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_shim_tile_event_cat);
+ break;
+ case AIE_CORE_MOD:
+ lut = aie_ml_core_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_core_event_cat);
+ break;
+ case AIE_MEM_MOD:
+ if (row == 1) {
+ lut = aie_ml_mem_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_tile_event_cat);
+ } else {
+ lut = aie_ml_mem_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_event_cat);
+ }
+ break;
+ default:
+ return AIE_ERROR_UNKNOWN;
+ }
+
+ for (i = 0; i < num_entry; i++) {
+ if (event_id != lut[i].event_id)
+ continue;
+
+ return lut[i].category;
+ }
+
+ return AIE_ERROR_UNKNOWN;
+}
+
+static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
+{
+ struct aie_error *errs = err_info;
+ u32 err_col = 0; /* assume that AIE has less than 32 columns */
+ int i;
+
+ /* Get err column bitmap */
+ for (i = 0; i < num_err; i++) {
+ struct aie_error *err = &errs[i];
+ enum aie_error_category cat;
+
+ cat = aie_get_error_category(err->row, err->event_id, err->mod_type);
+ XDNA_ERR(ndev->xdna, "Row: %d, Col: %d, module %d, event ID %d, category %d",
+ err->row, err->col, err->mod_type,
+ err->event_id, cat);
+
+ if (err->col >= 32) {
+ XDNA_WARN(ndev->xdna, "Invalid column number");
+ break;
+ }
+
+ err_col |= (1 << err->col);
+ }
+
+ return err_col;
+}
+
+static int aie2_error_async_cb(void *handle, const u32 *data, size_t size)
+{
+ struct async_event_msg_resp *resp;
+ struct async_event *e = handle;
+
+ if (data) {
+ resp = (struct async_event_msg_resp *)data;
+ e->resp.type = resp->type;
+ wmb(); /* Update status in the end, so that no lock for here */
+ e->resp.status = resp->status;
+ }
+ queue_work(e->wq, &e->work);
+ return 0;
+}
+
+static int aie2_error_event_send(struct async_event *e)
+{
+ drm_clflush_virt_range(e->buf, e->size); /* device can access */
+ return aie2_register_asyn_event_msg(e->ndev, e->addr, e->size, e,
+ aie2_error_async_cb);
+}
+
+static void aie2_error_worker(struct work_struct *err_work)
+{
+ struct aie_err_info *info;
+ struct amdxdna_dev *xdna;
+ struct async_event *e;
+ u32 max_err;
+ u32 err_col;
+
+ e = container_of(err_work, struct async_event, work);
+
+ xdna = e->ndev->xdna;
+
+ if (e->resp.status == MAX_AIE2_STATUS_CODE)
+ return;
+
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+
+ print_hex_dump_debug("AIE error: ", DUMP_PREFIX_OFFSET, 16, 4,
+ e->buf, 0x100, false);
+
+ info = (struct aie_err_info *)e->buf;
+ XDNA_DBG(xdna, "Error count %d return code %d", info->err_cnt, info->ret_code);
+
+ max_err = (e->size - sizeof(*info)) / sizeof(struct aie_error);
+ if (unlikely(info->err_cnt > max_err)) {
+ WARN_ONCE(1, "Error count too large %d\n", info->err_cnt);
+ return;
+ }
+ err_col = aie2_error_backtrack(e->ndev, info->payload, info->err_cnt);
+ if (!err_col) {
+ XDNA_WARN(xdna, "Did not get error column");
+ return;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ /* Re-sent this event to firmware */
+ if (aie2_error_event_send(e))
+ XDNA_WARN(xdna, "Unable to register async event");
+ mutex_unlock(&xdna->dev_lock);
+}
+
+int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct async_event *e;
+ int i, ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ for (i = 0; i < ndev->async_events->event_cnt; i++) {
+ e = &ndev->async_events->event[i];
+ ret = aie2_error_event_send(e);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct async_events *events;
+
+ events = ndev->async_events;
+
+ mutex_unlock(&xdna->dev_lock);
+ destroy_workqueue(events->wq);
+ mutex_lock(&xdna->dev_lock);
+
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+ kfree(events);
+}
+
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 total_col = ndev->total_col;
+ u32 total_size = ASYNC_BUF_SIZE * total_col;
+ struct async_events *events;
+ int i, ret;
+
+ events = kzalloc(struct_size(events, event, total_col), GFP_KERNEL);
+ if (!events)
+ return -ENOMEM;
+
+ events->buf = dma_alloc_noncoherent(xdna->ddev.dev, total_size, &events->addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!events->buf) {
+ ret = -ENOMEM;
+ goto free_events;
+ }
+ events->size = total_size;
+ events->event_cnt = total_col;
+
+ events->wq = alloc_ordered_workqueue("async_wq", 0);
+ if (!events->wq) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ for (i = 0; i < events->event_cnt; i++) {
+ struct async_event *e = &events->event[i];
+ u32 offset = i * ASYNC_BUF_SIZE;
+
+ e->ndev = ndev;
+ e->wq = events->wq;
+ e->buf = &events->buf[offset];
+ e->addr = events->addr + offset;
+ e->size = ASYNC_BUF_SIZE;
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+ INIT_WORK(&e->work, aie2_error_worker);
+ }
+
+ ndev->async_events = events;
+
+ XDNA_DBG(xdna, "Async event count %d, buf total size 0x%x",
+ events->event_cnt, events->size);
+ return 0;
+
+free_buf:
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+free_events:
+ kfree(events);
+ return ret;
+}
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
new file mode 100644
index 000000000000..9e2c9a44f76a
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+#define DECLARE_AIE2_MSG(name, op) \
+ DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE)
+
+static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
+ struct xdna_mailbox_msg *msg)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ if (!ndev->mgmt_chann)
+ return -ENODEV;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
+ if (ret == -ETIME) {
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ }
+
+ if (!ret && *hdl->data != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
+ msg->opcode, *hdl->data);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_SUSPEND);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_RESUME);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value)
+{
+ DECLARE_AIE2_MSG(set_runtime_cfg, MSG_OP_SET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ req.value = value;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to set runtime config, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value)
+{
+ DECLARE_AIE2_MSG(get_runtime_cfg, MSG_OP_GET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to get runtime config, ret %d", ret);
+ return ret;
+ }
+
+ *value = resp.value;
+ return 0;
+}
+
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid)
+{
+ DECLARE_AIE2_MSG(assign_mgmt_pasid, MSG_OP_ASSIGN_MGMT_PASID);
+
+ req.pasid = pasid;
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version)
+{
+ DECLARE_AIE2_MSG(aie_version_info, MSG_OP_QUERY_AIE_VERSION);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "Query AIE version - major: %u minor: %u completed",
+ resp.major, resp.minor);
+
+ version->major = resp.major;
+ version->minor = resp.minor;
+
+ return 0;
+}
+
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata)
+{
+ DECLARE_AIE2_MSG(aie_tile_info, MSG_OP_QUERY_AIE_TILE_INFO);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ metadata->size = resp.info.size;
+ metadata->cols = resp.info.cols;
+ metadata->rows = resp.info.rows;
+
+ metadata->version.major = resp.info.major;
+ metadata->version.minor = resp.info.minor;
+
+ metadata->core.row_count = resp.info.core_rows;
+ metadata->core.row_start = resp.info.core_row_start;
+ metadata->core.dma_channel_count = resp.info.core_dma_channels;
+ metadata->core.lock_count = resp.info.core_locks;
+ metadata->core.event_reg_count = resp.info.core_events;
+
+ metadata->mem.row_count = resp.info.mem_rows;
+ metadata->mem.row_start = resp.info.mem_row_start;
+ metadata->mem.dma_channel_count = resp.info.mem_dma_channels;
+ metadata->mem.lock_count = resp.info.mem_locks;
+ metadata->mem.event_reg_count = resp.info.mem_events;
+
+ metadata->shim.row_count = resp.info.shim_rows;
+ metadata->shim.row_start = resp.info.shim_row_start;
+ metadata->shim.dma_channel_count = resp.info.shim_dma_channels;
+ metadata->shim.lock_count = resp.info.shim_locks;
+ metadata->shim.event_reg_count = resp.info.shim_events;
+
+ return 0;
+}
+
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver)
+{
+ DECLARE_AIE2_MSG(firmware_version, MSG_OP_GET_FIRMWARE_VERSION);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ fw_ver->major = resp.major;
+ fw_ver->minor = resp.minor;
+ fw_ver->sub = resp.sub;
+ fw_ver->build = resp.build;
+
+ return 0;
+}
+
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(create_ctx, MSG_OP_CREATE_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_mailbox_chann_res x2i;
+ struct xdna_mailbox_chann_res i2x;
+ struct cq_pair *cq_pair;
+ u32 intr_reg;
+ int ret;
+
+ req.aie_type = 1;
+ req.start_col = hwctx->start_col;
+ req.num_col = hwctx->num_col;
+ req.num_cq_pairs_requested = 1;
+ req.pasid = hwctx->client->pasid;
+ req.context_priority = 2;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ hwctx->fw_ctx_id = resp.context_id;
+ WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id");
+
+ cq_pair = &resp.cq_pair[0];
+ x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr);
+ x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr);
+ x2i.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->x2i_q.buf_addr);
+ x2i.rb_size = cq_pair->x2i_q.buf_size;
+
+ i2x.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.head_addr);
+ i2x.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.tail_addr);
+ i2x.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->i2x_q.buf_addr);
+ i2x.rb_size = cq_pair->i2x_q.buf_size;
+
+ ret = pci_irq_vector(to_pci_dev(xdna->ddev.dev), resp.msix_id);
+ if (ret == -EINVAL) {
+ XDNA_ERR(xdna, "not able to create channel");
+ goto out_destroy_context;
+ }
+
+ intr_reg = i2x.mb_head_ptr_reg + 4;
+ hwctx->priv->mbox_chann = xdna_mailbox_create_channel(ndev->mbox, &x2i, &i2x,
+ intr_reg, ret);
+ if (!hwctx->priv->mbox_chann) {
+ XDNA_ERR(xdna, "not able to create channel");
+ ret = -EINVAL;
+ goto out_destroy_context;
+ }
+
+ XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d",
+ hwctx->name, ret, resp.msix_id);
+ XDNA_DBG(xdna, "%s created fw ctx %d pasid %d", hwctx->name,
+ hwctx->fw_ctx_id, hwctx->client->pasid);
+
+ return 0;
+
+out_destroy_context:
+ aie2_destroy_context(ndev, hwctx);
+ return ret;
+}
+
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(destroy_ctx, MSG_OP_DESTROY_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ if (hwctx->fw_ctx_id == -1)
+ return 0;
+
+ xdna_mailbox_stop_channel(hwctx->priv->mbox_chann);
+
+ req.context_id = hwctx->fw_ctx_id;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ XDNA_WARN(xdna, "%s destroy context failed, ret %d", hwctx->name, ret);
+
+ xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann);
+ XDNA_DBG(xdna, "%s destroyed fw ctx %d", hwctx->name,
+ hwctx->fw_ctx_id);
+ hwctx->priv->mbox_chann = NULL;
+ hwctx->fw_ctx_id = -1;
+
+ return ret;
+}
+
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size)
+{
+ DECLARE_AIE2_MSG(map_host_buffer, MSG_OP_MAP_HOST_BUFFER);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ req.context_id = context_id;
+ req.buf_addr = addr;
+ req.buf_size = size;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "fw ctx %d map host buf addr 0x%llx size 0x%llx",
+ context_id, addr, size);
+
+ return 0;
+}
+
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
+ u32 size, u32 *cols_filled)
+{
+ DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct amdxdna_client *client;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ dma_addr_t dma_addr;
+ u32 aie_bitmap = 0;
+ u8 *buff_addr;
+ int ret, idx;
+
+ buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!buff_addr)
+ return -ENOMEM;
+
+ /* Go through each hardware context and mark the AIE columns that are active */
+ list_for_each_entry(client, &xdna->client_list, node) {
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
+ aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ }
+
+ *cols_filled = 0;
+ req.dump_buff_addr = dma_addr;
+ req.dump_buff_size = size;
+ req.num_cols = hweight32(aie_bitmap);
+ req.aie_bitmap = aie_bitmap;
+
+ drm_clflush_virt_range(buff_addr, size); /* device can access */
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(xdna, "Error during NPU query, status %d", ret);
+ goto fail;
+ }
+
+ if (resp.status != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "Query NPU status failed, status 0x%x", resp.status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ XDNA_DBG(xdna, "Query NPU status completed");
+
+ if (size < resp.size) {
+ ret = -EINVAL;
+ XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size);
+ goto fail;
+ }
+
+ if (copy_to_user(buf, buff_addr, resp.size)) {
+ ret = -EFAULT;
+ XDNA_ERR(xdna, "Failed to copy NPU status to user space");
+ goto fail;
+ }
+
+ *cols_filled = aie_bitmap;
+
+fail:
+ dma_free_noncoherent(xdna->ddev.dev, size, buff_addr, dma_addr, DMA_FROM_DEVICE);
+ return ret;
+}
+
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, const u32 *, size_t))
+{
+ struct async_event_msg_req req = { 0 };
+ struct xdna_mailbox_msg msg = {
+ .send_data = (u8 *)&req,
+ .send_size = sizeof(req),
+ .handle = handle,
+ .opcode = MSG_OP_REGISTER_ASYNC_EVENT_MSG,
+ .notify_cb = cb,
+ };
+
+ req.buf_addr = addr;
+ req.buf_size = size;
+
+ XDNA_DBG(ndev->xdna, "Register addr 0x%llx size 0x%x", addr, size);
+ return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
+}
+
+int aie2_config_cu(struct amdxdna_hwctx *hwctx)
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 shift = xdna->dev_info->dev_mem_buf_shift;
+ DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU);
+ struct drm_gem_object *gobj;
+ struct amdxdna_gem_obj *abo;
+ int ret, i;
+
+ if (!chann)
+ return -ENODEV;
+
+ if (hwctx->cus->num_cus > MAX_NUM_CUS) {
+ XDNA_DBG(xdna, "Exceed maximum CU %d", MAX_NUM_CUS);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hwctx->cus->num_cus; i++) {
+ struct amdxdna_cu_config *cu = &hwctx->cus->cu_configs[i];
+
+ if (XDNA_MBZ_DBG(xdna, cu->pad, sizeof(cu->pad)))
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(hwctx->client->filp, cu->cu_bo);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -EINVAL;
+ }
+ abo = to_xdna_obj(gobj);
+
+ if (abo->type != AMDXDNA_BO_DEV) {
+ drm_gem_object_put(gobj);
+ XDNA_ERR(xdna, "Invalid BO type");
+ return -EINVAL;
+ }
+
+ req.cfgs[i] = FIELD_PREP(AIE2_MSG_CFG_CU_PDI_ADDR,
+ abo->mem.dev_addr >> shift);
+ req.cfgs[i] |= FIELD_PREP(AIE2_MSG_CFG_CU_FUNC, cu->cu_func);
+ XDNA_DBG(xdna, "CU %d full addr 0x%llx, cfg 0x%x", i,
+ abo->mem.dev_addr, req.cfgs[i]);
+ drm_gem_object_put(gobj);
+ }
+ req.num_cus = hwctx->cus->num_cus;
+
+ ret = xdna_send_msg_wait(xdna, chann, &msg);
+ if (ret == -ETIME)
+ aie2_destroy_context(xdna->dev_handle, hwctx);
+
+ if (resp.status == AIE2_STATUS_SUCCESS) {
+ XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret);
+ return 0;
+ }
+
+ XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d",
+ msg.opcode, resp.status, ret);
+ return ret;
+}
+
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ union {
+ struct execute_buffer_req ebuf;
+ struct exec_dpu_req dpu;
+ } req;
+ struct xdna_mailbox_msg msg;
+ u32 payload_len;
+ void *payload;
+ int cu_idx;
+ int ret;
+ u32 op;
+
+ if (!chann)
+ return -ENODEV;
+
+ payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
+ if (!payload) {
+ XDNA_ERR(xdna, "Invalid command, cannot get payload");
+ return -EINVAL;
+ }
+
+ cu_idx = amdxdna_cmd_get_cu_idx(cmd_abo);
+ if (cu_idx < 0) {
+ XDNA_DBG(xdna, "Invalid cu idx");
+ return -EINVAL;
+ }
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ switch (op) {
+ case ERT_START_CU:
+ if (unlikely(payload_len > sizeof(req.ebuf.payload)))
+ XDNA_DBG(xdna, "Invalid ebuf payload len: %d", payload_len);
+ req.ebuf.cu_idx = cu_idx;
+ memcpy(req.ebuf.payload, payload, sizeof(req.ebuf.payload));
+ msg.send_size = sizeof(req.ebuf);
+ msg.opcode = MSG_OP_EXECUTE_BUFFER_CF;
+ break;
+ case ERT_START_NPU: {
+ struct amdxdna_cmd_start_npu *sn = payload;
+
+ if (unlikely(payload_len - sizeof(*sn) > sizeof(req.dpu.payload)))
+ XDNA_DBG(xdna, "Invalid dpu payload len: %d", payload_len);
+ req.dpu.inst_buf_addr = sn->buffer;
+ req.dpu.inst_size = sn->buffer_size;
+ req.dpu.inst_prop_cnt = sn->prop_count;
+ req.dpu.cu_idx = cu_idx;
+ memcpy(req.dpu.payload, sn->prop_args, sizeof(req.dpu.payload));
+ msg.send_size = sizeof(req.dpu);
+ msg.opcode = MSG_OP_EXEC_DPU;
+ break;
+ }
+ default:
+ XDNA_DBG(xdna, "Invalid ERT cmd op code: %d", op);
+ return -EINVAL;
+ }
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
+ 0x40, false);
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct cmd_chain_slot_execbuf_cf *buf = cmd_buf + offset;
+ int cu_idx = amdxdna_cmd_get_cu_idx(abo);
+ u32 payload_len;
+ void *payload;
+
+ if (cu_idx < 0)
+ return -EINVAL;
+
+ payload = amdxdna_cmd_get_payload(abo, &payload_len);
+ if (!payload)
+ return -EINVAL;
+
+ if (!slot_cf_has_space(offset, payload_len))
+ return -ENOSPC;
+
+ buf->cu_idx = cu_idx;
+ buf->arg_cnt = payload_len / sizeof(u32);
+ memcpy(buf->args, payload, payload_len);
+ /* Accurate buf size to hint firmware to do necessary copy */
+ *size = sizeof(*buf) + payload_len;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct cmd_chain_slot_dpu *buf = cmd_buf + offset;
+ int cu_idx = amdxdna_cmd_get_cu_idx(abo);
+ struct amdxdna_cmd_start_npu *sn;
+ u32 payload_len;
+ void *payload;
+ u32 arg_sz;
+
+ if (cu_idx < 0)
+ return -EINVAL;
+
+ payload = amdxdna_cmd_get_payload(abo, &payload_len);
+ if (!payload)
+ return -EINVAL;
+ sn = payload;
+ arg_sz = payload_len - sizeof(*sn);
+ if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (!slot_dpu_has_space(offset, arg_sz))
+ return -ENOSPC;
+
+ buf->inst_buf_addr = sn->buffer;
+ buf->inst_size = sn->buffer_size;
+ buf->inst_prop_cnt = sn->prop_count;
+ buf->cu_idx = cu_idx;
+ buf->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(buf->args, sn->prop_args, arg_sz);
+
+ /* Accurate buf size to hint firmware to do necessary copy */
+ *size += sizeof(*buf) + arg_sz;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot(u32 op, struct amdxdna_gem_obj *cmdbuf_abo, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ u32 this_op = amdxdna_cmd_get_op(abo);
+ void *cmd_buf = cmdbuf_abo->mem.kva;
+ int ret;
+
+ if (this_op != op) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (op) {
+ case ERT_START_CU:
+ ret = aie2_cmdlist_fill_one_slot_cf(cmd_buf, offset, abo, size);
+ break;
+ case ERT_START_NPU:
+ ret = aie2_cmdlist_fill_one_slot_dpu(cmd_buf, offset, abo, size);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+done:
+ if (ret) {
+ XDNA_ERR(abo->client->xdna, "Can't fill slot for cmd op %d ret %d",
+ op, ret);
+ }
+ return ret;
+}
+
+static inline struct amdxdna_gem_obj *
+aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
+{
+ int idx = get_job_idx(job->seq);
+
+ return job->hwctx->priv->cmd_buf[idx];
+}
+
+static void
+aie2_cmdlist_prepare_request(struct cmd_chain_req *req,
+ struct amdxdna_gem_obj *cmdbuf_abo, u32 size, u32 cnt)
+{
+ req->buf_addr = cmdbuf_abo->mem.dev_addr;
+ req->buf_size = size;
+ req->count = cnt;
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
+ XDNA_DBG(cmdbuf_abo->client->xdna, "Command buf addr 0x%llx size 0x%x count %d",
+ req->buf_addr, size, cnt);
+}
+
+static inline u32
+aie2_cmd_op_to_msg_op(u32 op)
+{
+ switch (op) {
+ case ERT_START_CU:
+ return MSG_OP_CHAIN_EXEC_BUFFER_CF;
+ case ERT_START_NPU:
+ return MSG_OP_CHAIN_EXEC_DPU;
+ default:
+ return MSG_OP_MAX_OPCODE;
+ }
+}
+
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_cmd_chain *payload;
+ struct xdna_mailbox_msg msg;
+ struct cmd_chain_req req;
+ u32 payload_len;
+ u32 offset = 0;
+ u32 size;
+ int ret;
+ u32 op;
+ u32 i;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
+ if (op != ERT_CMD_CHAIN || !payload ||
+ payload_len < struct_size(payload, data, payload->command_count))
+ return -EINVAL;
+
+ for (i = 0; i < payload->command_count; i++) {
+ u32 boh = (u32)(payload->data[i]);
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD);
+ if (!abo) {
+ XDNA_ERR(client->xdna, "Failed to find cmd BO %d", boh);
+ return -ENOENT;
+ }
+
+ /* All sub-cmd should have same op, use the first one. */
+ if (i == 0)
+ op = amdxdna_cmd_get_op(abo);
+
+ ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, offset, abo, &size);
+ amdxdna_gem_put_obj(abo);
+ if (ret)
+ return -EINVAL;
+
+ offset += size;
+ }
+
+ /* The offset is the accumulated total size of the cmd buffer */
+ aie2_cmdlist_prepare_request(&req, cmdbuf_abo, offset, payload->command_count);
+
+ msg.opcode = aie2_cmd_op_to_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct xdna_mailbox_msg msg;
+ struct cmd_chain_req req;
+ u32 size;
+ int ret;
+ u32 op;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, 0, cmd_abo, &size);
+ if (ret)
+ return ret;
+
+ aie2_cmdlist_prepare_request(&req, cmdbuf_abo, size, 1);
+
+ msg.opcode = aie2_cmd_op_to_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct xdna_mailbox_msg msg;
+ struct sync_bo_req req;
+ int ret = 0;
+
+ req.src_addr = 0;
+ req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr;
+ req.size = abo->mem.size;
+
+ /* Device to Host */
+ req.type = FIELD_PREP(AIE2_MSG_SYNC_BO_SRC_TYPE, SYNC_BO_DEV_MEM) |
+ FIELD_PREP(AIE2_MSG_SYNC_BO_DST_TYPE, SYNC_BO_HOST_MEM);
+
+ XDNA_DBG(xdna, "sync %d bytes src(0x%llx) to dst(0x%llx) completed",
+ req.size, req.src_addr, req.dst_addr);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.opcode = MSG_OP_SYNC_BO;
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
new file mode 100644
index 000000000000..4e02e744b470
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MSG_PRIV_H_
+#define _AIE2_MSG_PRIV_H_
+
+enum aie2_msg_opcode {
+ MSG_OP_CREATE_CONTEXT = 0x2,
+ MSG_OP_DESTROY_CONTEXT = 0x3,
+ MSG_OP_SYNC_BO = 0x7,
+ MSG_OP_EXECUTE_BUFFER_CF = 0xC,
+ MSG_OP_QUERY_COL_STATUS = 0xD,
+ MSG_OP_QUERY_AIE_TILE_INFO = 0xE,
+ MSG_OP_QUERY_AIE_VERSION = 0xF,
+ MSG_OP_EXEC_DPU = 0x10,
+ MSG_OP_CONFIG_CU = 0x11,
+ MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12,
+ MSG_OP_CHAIN_EXEC_DPU = 0x13,
+ MSG_OP_MAX_XRT_OPCODE,
+ MSG_OP_SUSPEND = 0x101,
+ MSG_OP_RESUME = 0x102,
+ MSG_OP_ASSIGN_MGMT_PASID = 0x103,
+ MSG_OP_INVOKE_SELF_TEST = 0x104,
+ MSG_OP_MAP_HOST_BUFFER = 0x106,
+ MSG_OP_GET_FIRMWARE_VERSION = 0x108,
+ MSG_OP_SET_RUNTIME_CONFIG = 0x10A,
+ MSG_OP_GET_RUNTIME_CONFIG = 0x10B,
+ MSG_OP_REGISTER_ASYNC_EVENT_MSG = 0x10C,
+ MSG_OP_MAX_DRV_OPCODE,
+ MSG_OP_GET_PROTOCOL_VERSION = 0x301,
+ MSG_OP_MAX_OPCODE
+};
+
+enum aie2_msg_status {
+ AIE2_STATUS_SUCCESS = 0x0,
+ /* AIE Error codes */
+ AIE2_STATUS_AIE_SATURATION_ERROR = 0x1000001,
+ AIE2_STATUS_AIE_FP_ERROR = 0x1000002,
+ AIE2_STATUS_AIE_STREAM_ERROR = 0x1000003,
+ AIE2_STATUS_AIE_ACCESS_ERROR = 0x1000004,
+ AIE2_STATUS_AIE_BUS_ERROR = 0x1000005,
+ AIE2_STATUS_AIE_INSTRUCTION_ERROR = 0x1000006,
+ AIE2_STATUS_AIE_ECC_ERROR = 0x1000007,
+ AIE2_STATUS_AIE_LOCK_ERROR = 0x1000008,
+ AIE2_STATUS_AIE_DMA_ERROR = 0x1000009,
+ AIE2_STATUS_AIE_MEM_PARITY_ERROR = 0x100000a,
+ AIE2_STATUS_AIE_PWR_CFG_ERROR = 0x100000b,
+ AIE2_STATUS_AIE_BACKTRACK_ERROR = 0x100000c,
+ AIE2_STATUS_MAX_AIE_STATUS_CODE,
+ /* MGMT ERT Error codes */
+ AIE2_STATUS_MGMT_ERT_SELF_TEST_FAILURE = 0x2000001,
+ AIE2_STATUS_MGMT_ERT_HASH_MISMATCH,
+ AIE2_STATUS_MGMT_ERT_NOAVAIL,
+ AIE2_STATUS_MGMT_ERT_INVALID_PARAM,
+ AIE2_STATUS_MGMT_ERT_ENTER_SUSPEND_FAILURE,
+ AIE2_STATUS_MGMT_ERT_BUSY,
+ AIE2_STATUS_MGMT_ERT_APPLICATION_ACTIVE,
+ MAX_MGMT_ERT_STATUS_CODE,
+ /* APP ERT Error codes */
+ AIE2_STATUS_APP_ERT_FIRST_ERROR = 0x3000001,
+ AIE2_STATUS_APP_INVALID_INSTR,
+ AIE2_STATUS_APP_LOAD_PDI_FAIL,
+ MAX_APP_ERT_STATUS_CODE,
+ /* NPU RTOS Error Codes */
+ AIE2_STATUS_INVALID_INPUT_BUFFER = 0x4000001,
+ AIE2_STATUS_INVALID_COMMAND,
+ AIE2_STATUS_INVALID_PARAM,
+ AIE2_STATUS_INVALID_OPERATION = 0x4000006,
+ AIE2_STATUS_ASYNC_EVENT_MSGS_FULL,
+ AIE2_STATUS_MAX_RTOS_STATUS_CODE,
+ MAX_AIE2_STATUS_CODE
+};
+
+struct assign_mgmt_pasid_req {
+ __u16 pasid;
+ __u16 reserved;
+} __packed;
+
+struct assign_mgmt_pasid_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct map_host_buffer_req {
+ __u32 context_id;
+ __u64 buf_addr;
+ __u64 buf_size;
+} __packed;
+
+struct map_host_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+#define MAX_CQ_PAIRS 2
+struct cq_info {
+ __u32 head_addr;
+ __u32 tail_addr;
+ __u32 buf_addr;
+ __u32 buf_size;
+};
+
+struct cq_pair {
+ struct cq_info x2i_q;
+ struct cq_info i2x_q;
+};
+
+struct create_ctx_req {
+ __u32 aie_type;
+ __u8 start_col;
+ __u8 num_col;
+ __u16 reserved;
+ __u8 num_cq_pairs_requested;
+ __u8 reserved1;
+ __u16 pasid;
+ __u32 pad[2];
+ __u32 sec_comm_target_type;
+ __u32 context_priority;
+} __packed;
+
+struct create_ctx_resp {
+ enum aie2_msg_status status;
+ __u32 context_id;
+ __u16 msix_id;
+ __u8 num_cq_pairs_allocated;
+ __u8 reserved;
+ struct cq_pair cq_pair[MAX_CQ_PAIRS];
+} __packed;
+
+struct destroy_ctx_req {
+ __u32 context_id;
+} __packed;
+
+struct destroy_ctx_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct execute_buffer_req {
+ __u32 cu_idx;
+ __u32 payload[19];
+} __packed;
+
+struct exec_dpu_req {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 payload[35];
+} __packed;
+
+struct execute_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct aie_tile_info {
+ __u32 size;
+ __u16 major;
+ __u16 minor;
+ __u16 cols;
+ __u16 rows;
+ __u16 core_rows;
+ __u16 mem_rows;
+ __u16 shim_rows;
+ __u16 core_row_start;
+ __u16 mem_row_start;
+ __u16 shim_row_start;
+ __u16 core_dma_channels;
+ __u16 mem_dma_channels;
+ __u16 shim_dma_channels;
+ __u16 core_locks;
+ __u16 mem_locks;
+ __u16 shim_locks;
+ __u16 core_events;
+ __u16 mem_events;
+ __u16 shim_events;
+ __u16 reserved;
+};
+
+struct aie_tile_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_tile_info_resp {
+ enum aie2_msg_status status;
+ struct aie_tile_info info;
+} __packed;
+
+struct aie_version_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_version_info_resp {
+ enum aie2_msg_status status;
+ __u16 major;
+ __u16 minor;
+} __packed;
+
+struct aie_column_info_req {
+ __u64 dump_buff_addr;
+ __u32 dump_buff_size;
+ __u32 num_cols;
+ __u32 aie_bitmap;
+} __packed;
+
+struct aie_column_info_resp {
+ enum aie2_msg_status status;
+ __u32 size;
+} __packed;
+
+struct suspend_req {
+ __u32 place_holder;
+} __packed;
+
+struct suspend_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct resume_req {
+ __u32 place_holder;
+} __packed;
+
+struct resume_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct check_header_hash_req {
+ __u64 hash_high;
+ __u64 hash_low;
+} __packed;
+
+struct check_header_hash_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct query_error_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct query_error_resp {
+ enum aie2_msg_status status;
+ __u32 num_err;
+ __u32 has_next_err;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct protocol_version_req {
+ __u32 reserved;
+} __packed;
+
+struct protocol_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+} __packed;
+
+struct firmware_version_req {
+ __u32 reserved;
+} __packed;
+
+struct firmware_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+ __u32 sub;
+ __u32 build;
+} __packed;
+
+#define MAX_NUM_CUS 32
+#define AIE2_MSG_CFG_CU_PDI_ADDR GENMASK(16, 0)
+#define AIE2_MSG_CFG_CU_FUNC GENMASK(24, 17)
+struct config_cu_req {
+ __u32 num_cus;
+ __u32 cfgs[MAX_NUM_CUS];
+} __packed;
+
+struct config_cu_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct set_runtime_cfg_req {
+ __u32 type;
+ __u64 value;
+} __packed;
+
+struct set_runtime_cfg_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct get_runtime_cfg_req {
+ __u32 type;
+} __packed;
+
+struct get_runtime_cfg_resp {
+ enum aie2_msg_status status;
+ __u64 value;
+} __packed;
+
+enum async_event_type {
+ ASYNC_EVENT_TYPE_AIE_ERROR,
+ ASYNC_EVENT_TYPE_EXCEPTION,
+ MAX_ASYNC_EVENT_TYPE
+};
+
+#define ASYNC_BUF_SIZE SZ_8K
+struct async_event_msg_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+} __packed;
+
+struct async_event_msg_resp {
+ enum aie2_msg_status status;
+ enum async_event_type type;
+} __packed;
+
+#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
+#define slot_cf_has_space(offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
+ offsetof(struct cmd_chain_slot_execbuf_cf, args[0]))
+struct cmd_chain_slot_execbuf_cf {
+ __u32 cu_idx;
+ __u32 arg_cnt;
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+#define slot_dpu_has_space(offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
+ offsetof(struct cmd_chain_slot_dpu, args[0]))
+struct cmd_chain_slot_dpu {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 arg_cnt;
+#define MAX_DPU_ARGS_SIZE (34 * sizeof(__u32))
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+struct cmd_chain_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 count;
+} __packed;
+
+struct cmd_chain_resp {
+ enum aie2_msg_status status;
+ __u32 fail_cmd_idx;
+ enum aie2_msg_status fail_cmd_status;
+} __packed;
+
+#define AIE2_MSG_SYNC_BO_SRC_TYPE GENMASK(3, 0)
+#define AIE2_MSG_SYNC_BO_DST_TYPE GENMASK(7, 4)
+struct sync_bo_req {
+ __u64 src_addr;
+ __u64 dst_addr;
+ __u32 size;
+#define SYNC_BO_DEV_MEM 0
+#define SYNC_BO_HOST_MEM 2
+ __u32 type;
+} __packed;
+
+struct sync_bo_resp {
+ enum aie2_msg_status status;
+} __packed;
+#endif /* _AIE2_MSG_PRIV_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
new file mode 100644
index 000000000000..5a058e565b01
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -0,0 +1,928 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+static int aie2_max_col = XRS_MAX_COL;
+module_param(aie2_max_col, uint, 0600);
+MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
+
+/*
+ * The management mailbox channel is allocated by firmware.
+ * The related register and ring buffer information is on SRAM BAR.
+ * This struct is the register layout.
+ */
+#define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
+struct mgmt_mbox_chann_info {
+ __u32 x2i_tail;
+ __u32 x2i_head;
+ __u32 x2i_buf;
+ __u32 x2i_buf_sz;
+ __u32 i2x_tail;
+ __u32 i2x_head;
+ __u32 i2x_buf;
+ __u32 i2x_buf_sz;
+ __u32 magic;
+ __u32 msi_id;
+ __u32 prot_major;
+ __u32 prot_minor;
+ __u32 rsvd[4];
+};
+
+static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ /*
+ * The driver supported mailbox behavior is defined by
+ * ndev->priv->protocol_major and protocol_minor.
+ *
+ * When protocol_major and fw_major are different, it means driver
+ * and firmware are incompatible.
+ */
+ if (ndev->priv->protocol_major != fw_major) {
+ XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
+ fw_major, fw_minor);
+ return -EINVAL;
+ }
+
+ /*
+ * When protocol_minor is greater then fw_minor, that means driver
+ * relies on operation the installed firmware does not support.
+ */
+ if (ndev->priv->protocol_minor > fw_minor) {
+ XDNA_ERR(xdna, "Firmware minor version smaller than supported");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ XDNA_DBG(xdna, "i2x tail 0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "i2x head 0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
+ XDNA_DBG(xdna, "i2x rsize 0x%x", ndev->mgmt_i2x.rb_size);
+ XDNA_DBG(xdna, "x2i tail 0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "x2i head 0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
+ XDNA_DBG(xdna, "x2i rsize 0x%x", ndev->mgmt_x2i.rb_size);
+ XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
+ XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
+ XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
+}
+
+static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
+{
+ struct mgmt_mbox_chann_info info_regs;
+ struct xdna_mailbox_chann_res *i2x;
+ struct xdna_mailbox_chann_res *x2i;
+ u32 addr, off;
+ u32 *reg;
+ int ret;
+ int i;
+
+ /*
+ * Once firmware is alive, it will write management channel
+ * information in SRAM BAR and write the address of that information
+ * at FW_ALIVE_OFF offset in SRMA BAR.
+ *
+ * Read a non-zero value from FW_ALIVE_OFF implies that firmware
+ * is alive.
+ */
+ ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
+ addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret || !addr)
+ return -ETIME;
+
+ off = AIE2_SRAM_OFF(ndev, addr);
+ reg = (u32 *)&info_regs;
+ for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
+ reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
+
+ if (info_regs.magic != MGMT_MBOX_MAGIC) {
+ XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ i2x = &ndev->mgmt_i2x;
+ x2i = &ndev->mgmt_x2i;
+
+ i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
+ i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
+ i2x->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
+ i2x->rb_size = info_regs.i2x_buf_sz;
+
+ x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
+ x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
+ x2i->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
+ x2i->rb_size = info_regs.x2i_buf_sz;
+
+ ndev->mgmt_chan_idx = info_regs.msi_id;
+ ndev->mgmt_prot_major = info_regs.prot_major;
+ ndev->mgmt_prot_minor = info_regs.prot_minor;
+
+ ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
+
+done:
+ aie2_dump_chann_info_debug(ndev);
+
+ /* Must clear address at FW_ALIVE_OFF */
+ writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
+
+ return ret;
+}
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val)
+{
+ const struct rt_config *cfg;
+ u32 value;
+ int ret;
+
+ for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
+ if (cfg->category != category)
+ continue;
+
+ value = val ? *val : cfg->value;
+ ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
+ cfg->type, value);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_suspend_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Suspend firmware failed");
+ return ret;
+ }
+
+ ret = aie2_resume_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Resume firmware failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Runtime config failed");
+ return ret;
+ }
+
+ ret = aie2_assign_mgmt_pasid(ndev, 0);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Can not assign PASID");
+ return ret;
+ }
+
+ ret = aie2_xdna_reset(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Reset firmware failed");
+ return ret;
+ }
+
+ if (!ndev->async_events)
+ return 0;
+
+ ret = aie2_error_async_events_send(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Send async events failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "query firmware version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_version(ndev, &ndev->version);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
+{
+ if (aie2_suspend_fw(ndev))
+ XDNA_ERR(ndev->xdna, "Suspend_fw failed");
+ XDNA_DBG(ndev->xdna, "Firmware suspended");
+}
+
+static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ hwctx->start_col = action->part.start_col;
+ hwctx->num_col = action->part.ncols;
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "create context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_unload(void *cb_arg)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ ret = aie2_destroy_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_dev_hdl *ndev;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ ndev = xdna->dev_handle;
+ ndev->dft_dpm_level = dpm_level;
+ if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
+ return 0;
+
+ return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+}
+
+static struct xrs_action_ops aie2_xrs_actions = {
+ .load = aie2_xrs_load,
+ .unload = aie2_xrs_unload,
+ .set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
+};
+
+static void aie2_hw_stop(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+
+ if (ndev->dev_status <= AIE2_DEV_INIT) {
+ XDNA_ERR(xdna, "device is already stopped");
+ return;
+ }
+
+ aie2_mgmt_fw_fini(ndev);
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ drmm_kfree(&xdna->ddev, ndev->mbox);
+ ndev->mbox = NULL;
+ aie2_psp_stop(ndev->psp_hdl);
+ aie2_smu_fini(ndev);
+ pci_disable_device(pdev);
+
+ ndev->dev_status = AIE2_DEV_INIT;
+}
+
+static int aie2_hw_start(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+ struct xdna_mailbox_res mbox_res;
+ u32 xdna_mailbox_intr_reg;
+ int mgmt_mb_irq, ret;
+
+ if (ndev->dev_status >= AIE2_DEV_START) {
+ XDNA_INFO(xdna, "device is already started");
+ return 0;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ ret = aie2_smu_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
+ goto disable_dev;
+ }
+
+ ret = aie2_psp_start(ndev->psp_hdl);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
+ goto fini_smu;
+ }
+
+ ret = aie2_get_mgmt_chann_info(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "firmware is not alive");
+ goto stop_psp;
+ }
+
+ mbox_res.ringbuf_base = ndev->sram_base;
+ mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
+ mbox_res.mbox_base = ndev->mbox_base;
+ mbox_res.mbox_size = MBOX_SIZE(ndev);
+ mbox_res.name = "xdna_mailbox";
+ ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
+ if (!ndev->mbox) {
+ XDNA_ERR(xdna, "failed to create mailbox device");
+ ret = -ENODEV;
+ goto stop_psp;
+ }
+
+ mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
+ if (mgmt_mb_irq < 0) {
+ ret = mgmt_mb_irq;
+ XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
+ goto stop_psp;
+ }
+
+ xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
+ ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
+ &ndev->mgmt_x2i,
+ &ndev->mgmt_i2x,
+ xdna_mailbox_intr_reg,
+ mgmt_mb_irq);
+ if (!ndev->mgmt_chann) {
+ XDNA_ERR(xdna, "failed to create management mailbox channel");
+ ret = -EINVAL;
+ goto stop_psp;
+ }
+
+ ret = aie2_pm_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_mgmt_fw_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ndev->dev_status = AIE2_DEV_START;
+
+ return 0;
+
+destroy_mgmt_chann:
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+stop_psp:
+ aie2_psp_stop(ndev->psp_hdl);
+fini_smu:
+ aie2_smu_fini(ndev);
+disable_dev:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static int aie2_init(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
+ struct init_config xrs_cfg = { 0 };
+ struct amdxdna_dev_hdl *ndev;
+ struct psp_config psp_conf;
+ const struct firmware *fw;
+ unsigned long bars = 0;
+ int i, nvec, ret;
+
+ ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->priv = xdna->dev_info->dev_priv;
+ ndev->xdna = xdna;
+
+ ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
+ ndev->priv->fw_path, ret);
+ return ret;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
+ goto release_fw;
+ }
+
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ set_bit(PSP_REG_BAR(ndev, i), &bars);
+
+ set_bit(xdna->dev_info->sram_bar, &bars);
+ set_bit(xdna->dev_info->smu_bar, &bars);
+ set_bit(xdna->dev_info->mbox_bar, &bars);
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ if (!test_bit(i, &bars))
+ continue;
+ tbl[i] = pcim_iomap(pdev, i, 0);
+ if (!tbl[i]) {
+ XDNA_ERR(xdna, "map bar %d failed", i);
+ ret = -ENOMEM;
+ goto release_fw;
+ }
+ }
+
+ ndev->sram_base = tbl[xdna->dev_info->sram_bar];
+ ndev->smu_base = tbl[xdna->dev_info->smu_bar];
+ ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
+ goto release_fw;
+ }
+
+ nvec = pci_msix_vec_count(pdev);
+ if (nvec <= 0) {
+ XDNA_ERR(xdna, "does not get number of interrupt vector");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
+ goto release_fw;
+ }
+
+ ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ if (ret) {
+ XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret);
+ goto free_irq;
+ }
+
+ psp_conf.fw_size = fw->size;
+ psp_conf.fw_buf = fw->data;
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
+ ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
+ if (!ndev->psp_hdl) {
+ XDNA_ERR(xdna, "failed to create psp");
+ ret = -ENOMEM;
+ goto disable_sva;
+ }
+ xdna->dev_handle = ndev;
+
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "start npu failed, ret %d", ret);
+ goto disable_sva;
+ }
+
+ ret = aie2_mgmt_fw_query(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
+ goto stop_hw;
+ }
+ ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
+
+ xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
+ for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
+ xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
+ xrs_cfg.sys_eff_factor = 1;
+ xrs_cfg.ddev = &xdna->ddev;
+ xrs_cfg.actions = &aie2_xrs_actions;
+ xrs_cfg.total_col = ndev->total_col;
+
+ xdna->xrs_hdl = xrsm_init(&xrs_cfg);
+ if (!xdna->xrs_hdl) {
+ XDNA_ERR(xdna, "Initialize resolver failed");
+ ret = -EINVAL;
+ goto stop_hw;
+ }
+
+ ret = aie2_error_async_events_alloc(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
+ goto stop_hw;
+ }
+
+ ret = aie2_error_async_events_send(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
+ goto async_event_free;
+ }
+
+ /* Issue a command to make sure firmware handled async events */
+ ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
+ if (ret) {
+ XDNA_ERR(xdna, "Re-query firmware version failed");
+ goto async_event_free;
+ }
+
+ release_firmware(fw);
+ return 0;
+
+async_event_free:
+ aie2_error_async_events_free(ndev);
+stop_hw:
+ aie2_hw_stop(xdna);
+disable_sva:
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+free_irq:
+ pci_free_irq_vectors(pdev);
+release_fw:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static void aie2_fini(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+
+ aie2_hw_stop(xdna);
+ aie2_error_async_events_free(ndev);
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ pci_free_irq_vectors(pdev);
+}
+
+static int aie2_get_aie_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_status status;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret;
+
+ ndev = xdna->dev_handle;
+ if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
+ return -EFAULT;
+ }
+
+ if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
+ XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
+ status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
+ return -EINVAL;
+ }
+
+ ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
+ status.buffer_size, &status.cols_filled);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
+ return ret;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int aie2_get_aie_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_metadata *meta;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ meta->col_size = ndev->metadata.size;
+ meta->cols = ndev->metadata.cols;
+ meta->rows = ndev->metadata.rows;
+
+ meta->version.major = ndev->metadata.version.major;
+ meta->version.minor = ndev->metadata.version.minor;
+
+ meta->core.row_count = ndev->metadata.core.row_count;
+ meta->core.row_start = ndev->metadata.core.row_start;
+ meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
+ meta->core.lock_count = ndev->metadata.core.lock_count;
+ meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
+
+ meta->mem.row_count = ndev->metadata.mem.row_count;
+ meta->mem.row_start = ndev->metadata.mem.row_start;
+ meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
+ meta->mem.lock_count = ndev->metadata.mem.lock_count;
+ meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
+
+ meta->shim.row_count = ndev->metadata.shim.row_count;
+ meta->shim.row_start = ndev->metadata.shim.row_start;
+ meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
+ meta->shim.lock_count = ndev->metadata.shim.lock_count;
+ meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
+ ret = -EFAULT;
+
+ kfree(meta);
+ return ret;
+}
+
+static int aie2_get_aie_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ version.major = ndev->version.major;
+ version.minor = ndev->version.minor;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_firmware_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_firmware_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ version.major = xdna->fw_ver.major;
+ version.minor = xdna->fw_ver.minor;
+ version.patch = xdna->fw_ver.sub;
+ version.build = xdna->fw_ver.build;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_power_mode mode = {};
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ mode.power_mode = ndev->pw_mode;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_clock_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_clock_metadata *clock;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return -ENOMEM;
+
+ snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
+ "MP-NPU Clock");
+ clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
+ snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
+ clock->h_clock.freq_mhz = ndev->hclk_freq;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
+ ret = -EFAULT;
+
+ kfree(clock);
+ return ret;
+}
+
+static int aie2_get_hwctx_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_hwctx __user *buf;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drm_query_hwctx *tmp;
+ struct amdxdna_client *tmp_client;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ bool overflow = false;
+ u32 req_bytes = 0;
+ u32 hw_i = 0;
+ int ret = 0;
+ int idx;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ buf = u64_to_user_ptr(args->buffer);
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ idx = srcu_read_lock(&tmp_client->hwctx_srcu);
+ amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
+ req_bytes += sizeof(*tmp);
+ if (args->buffer_size < req_bytes) {
+ /* Continue iterating to get the required size */
+ overflow = true;
+ continue;
+ }
+
+ memset(tmp, 0, sizeof(*tmp));
+ tmp->pid = tmp_client->pid;
+ tmp->context_id = hwctx->id;
+ tmp->start_col = hwctx->start_col;
+ tmp->num_col = hwctx->num_col;
+ tmp->command_submissions = hwctx->priv->seq;
+ tmp->command_completions = hwctx->priv->completed;
+
+ if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
+ ret = -EFAULT;
+ srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
+ goto out;
+ }
+ hw_i++;
+ }
+ srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
+ }
+
+ if (overflow) {
+ XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
+ args->buffer_size, req_bytes);
+ ret = -EINVAL;
+ }
+
+out:
+ kfree(tmp);
+ args->buffer_size = req_bytes;
+ return ret;
+}
+
+static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_QUERY_AIE_STATUS:
+ ret = aie2_get_aie_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_METADATA:
+ ret = aie2_get_aie_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_VERSION:
+ ret = aie2_get_aie_version(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
+ ret = aie2_get_clock_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
+ ret = aie2_get_hwctx_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
+ ret = aie2_get_firmware_version(client, args);
+ break;
+ case DRM_AMDXDNA_GET_POWER_MODE:
+ ret = aie2_get_power_mode(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int aie2_set_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_drm_set_power_mode power_state;
+ enum amdxdna_power_mode_type power_mode;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
+ sizeof(power_state))) {
+ XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
+ return -EFAULT;
+ }
+
+ if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
+ return -EINVAL;
+
+ power_mode = power_state.power_mode;
+ if (power_mode > POWER_MODE_TURBO) {
+ XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
+ return -EINVAL;
+ }
+
+ return aie2_pm_set_mode(xdna->dev_handle, power_mode);
+}
+
+static int aie2_set_state(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_SET_POWER_MODE:
+ ret = aie2_set_power_mode(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+const struct amdxdna_dev_ops aie2_ops = {
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_start,
+ .suspend = aie2_hw_stop,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .cmd_submit = aie2_cmd_submit,
+ .hmm_invalidate = aie2_hmm_invalidate,
+ .hwctx_suspend = aie2_hwctx_suspend,
+ .hwctx_resume = aie2_hwctx_resume,
+};
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
new file mode 100644
index 000000000000..f2d95531ddc2
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_PCI_H_
+#define _AIE2_PCI_H_
+
+#include <drm/amdxdna_accel.h>
+#include <linux/semaphore.h>
+
+#include "amdxdna_mailbox.h"
+
+#define AIE2_INTERVAL 20000 /* us */
+#define AIE2_TIMEOUT 1000000 /* us */
+
+/* Firmware determines device memory base address and size */
+#define AIE2_DEVM_BASE 0x4000000
+#define AIE2_DEVM_SIZE SZ_64M
+
+#define NDEV2PDEV(ndev) (to_pci_dev((ndev)->xdna->ddev.dev))
+
+#define AIE2_SRAM_OFF(ndev, addr) ((addr) - (ndev)->priv->sram_dev_addr)
+#define AIE2_MBOX_OFF(ndev, addr) ((addr) - (ndev)->priv->mbox_dev_addr)
+
+#define PSP_REG_BAR(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].bar_idx)
+#define PSP_REG_OFF(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].offset)
+#define SRAM_REG_OFF(ndev, idx) ((ndev)->priv->sram_offs[(idx)].offset)
+
+#define SMU_REG(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->smu_base + (_ndev)->priv->smu_regs_off[(idx)].offset); \
+})
+#define SRAM_GET_ADDR(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->sram_base + SRAM_REG_OFF((_ndev), (idx))); \
+})
+
+#define CHAN_SLOT_SZ SZ_8K
+#define MBOX_SIZE(ndev) \
+({ \
+ typeof(ndev) _ndev = (ndev); \
+ ((_ndev)->priv->mbox_size) ? (_ndev)->priv->mbox_size : \
+ pci_resource_len(NDEV2PDEV(_ndev), (_ndev)->xdna->dev_info->mbox_bar); \
+})
+
+enum aie2_smu_reg_idx {
+ SMU_CMD_REG = 0,
+ SMU_ARG_REG,
+ SMU_INTR_REG,
+ SMU_RESP_REG,
+ SMU_OUT_REG,
+ SMU_MAX_REGS /* Keep this at the end */
+};
+
+enum aie2_sram_reg_idx {
+ MBOX_CHANN_OFF = 0,
+ FW_ALIVE_OFF,
+ SRAM_MAX_INDEX /* Keep this at the end */
+};
+
+enum psp_reg_idx {
+ PSP_CMD_REG = 0,
+ PSP_ARG0_REG,
+ PSP_ARG1_REG,
+ PSP_ARG2_REG,
+ PSP_NUM_IN_REGS, /* number of input registers */
+ PSP_INTR_REG = PSP_NUM_IN_REGS,
+ PSP_STATUS_REG,
+ PSP_RESP_REG,
+ PSP_MAX_REGS /* Keep this at the end */
+};
+
+struct amdxdna_client;
+struct amdxdna_fw_ver;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+struct psp_config {
+ const void *fw_buf;
+ u32 fw_size;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+struct aie_version {
+ u16 major;
+ u16 minor;
+};
+
+struct aie_tile_metadata {
+ u16 row_count;
+ u16 row_start;
+ u16 dma_channel_count;
+ u16 lock_count;
+ u16 event_reg_count;
+};
+
+struct aie_metadata {
+ u32 size;
+ u16 cols;
+ u16 rows;
+ struct aie_version version;
+ struct aie_tile_metadata core;
+ struct aie_tile_metadata mem;
+ struct aie_tile_metadata shim;
+};
+
+enum rt_config_category {
+ AIE2_RT_CFG_INIT,
+ AIE2_RT_CFG_CLK_GATING,
+};
+
+struct rt_config {
+ u32 type;
+ u32 value;
+ u32 category;
+};
+
+struct dpm_clk_freq {
+ u32 npuclk;
+ u32 hclk;
+};
+
+/*
+ * Define the maximum number of pending commands in a hardware context.
+ * Must be power of 2!
+ */
+#define HWCTX_MAX_CMDS 4
+#define get_job_idx(seq) ((seq) & (HWCTX_MAX_CMDS - 1))
+struct amdxdna_hwctx_priv {
+ struct amdxdna_gem_obj *heap;
+ void *mbox_chann;
+
+ struct drm_gpu_scheduler sched;
+ struct drm_sched_entity entity;
+
+ struct mutex io_lock; /* protect seq and cmd order */
+ struct wait_queue_head job_free_wq;
+ u32 num_pending;
+ u64 seq;
+ struct semaphore job_sem;
+ bool job_done;
+
+ /* Completed job counter */
+ u64 completed;
+
+ struct amdxdna_gem_obj *cmd_buf[HWCTX_MAX_CMDS];
+ struct drm_syncobj *syncobj;
+};
+
+enum aie2_dev_status {
+ AIE2_DEV_UNINIT,
+ AIE2_DEV_INIT,
+ AIE2_DEV_START,
+};
+
+struct amdxdna_dev_hdl {
+ struct amdxdna_dev *xdna;
+ const struct amdxdna_dev_priv *priv;
+ void __iomem *sram_base;
+ void __iomem *smu_base;
+ void __iomem *mbox_base;
+ struct psp_device *psp_hdl;
+
+ struct xdna_mailbox_chann_res mgmt_x2i;
+ struct xdna_mailbox_chann_res mgmt_i2x;
+ u32 mgmt_chan_idx;
+ u32 mgmt_prot_major;
+ u32 mgmt_prot_minor;
+
+ u32 total_col;
+ struct aie_version version;
+ struct aie_metadata metadata;
+
+ /* power management and clock*/
+ enum amdxdna_power_mode_type pw_mode;
+ u32 dpm_level;
+ u32 dft_dpm_level;
+ u32 max_dpm_level;
+ u32 clk_gating;
+ u32 npuclk_freq;
+ u32 hclk_freq;
+
+ /* Mailbox and the management channel */
+ struct mailbox *mbox;
+ struct mailbox_channel *mgmt_chann;
+ struct async_events *async_events;
+
+ enum aie2_dev_status dev_status;
+ u32 hwctx_num;
+};
+
+#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
+ [reg_name] = {bar##_BAR_INDEX, (reg_addr) - bar##_BAR_BASE}
+
+struct aie2_bar_off_pair {
+ int bar_idx;
+ u32 offset;
+};
+
+struct aie2_hw_ops {
+ int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+};
+
+struct amdxdna_dev_priv {
+ const char *fw_path;
+ u64 protocol_major;
+ u64 protocol_minor;
+ const struct rt_config *rt_config;
+ const struct dpm_clk_freq *dpm_clk_tbl;
+
+#define COL_ALIGN_NONE 0
+#define COL_ALIGN_NATURE 1
+ u32 col_align;
+ u32 mbox_dev_addr;
+ /* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */
+ u32 mbox_size;
+ u32 sram_dev_addr;
+ struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX];
+ struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS];
+ struct aie2_bar_off_pair smu_regs_off[SMU_MAX_REGS];
+ struct aie2_hw_ops hw_ops;
+};
+
+extern const struct amdxdna_dev_ops aie2_ops;
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val);
+
+/* aie2 npu hw config */
+extern const struct dpm_clk_freq npu1_dpm_clk_table[];
+extern const struct dpm_clk_freq npu4_dpm_clk_table[];
+extern const struct rt_config npu1_default_rt_cfg[];
+extern const struct rt_config npu4_default_rt_cfg[];
+
+/* aie2_smu.c */
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev);
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev);
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+
+/* aie2_pm.c */
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev);
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target);
+
+/* aie2_psp.c */
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf);
+int aie2_psp_start(struct psp_device *psp);
+void aie2_psp_stop(struct psp_device *psp);
+
+/* aie2_error.c */
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev);
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev);
+int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev);
+int aie2_error_async_msg_thread(void *data);
+
+/* aie2_message.c */
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value);
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid);
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version);
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata);
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver);
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, const u32 *, size_t));
+int aie2_config_cu(struct amdxdna_hwctx *hwctx);
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+
+/* aie2_hwctx.c */
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx);
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+void aie2_restart_ctx(struct amdxdna_client *client);
+
+#endif /* _AIE2_PCI_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pm.c b/drivers/accel/amdxdna/aie2_pm.c
new file mode 100644
index 000000000000..426c38fce848
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pm.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+
+#define AIE2_CLK_GATING_ENABLE 1
+#define AIE2_CLK_GATING_DISABLE 0
+
+static int aie2_pm_set_clk_gating(struct amdxdna_dev_hdl *ndev, u32 val)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, &val);
+ if (ret)
+ return ret;
+
+ ndev->clk_gating = val;
+ return 0;
+}
+
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ if (ndev->dev_status != AIE2_DEV_UNINIT) {
+ /* Resume device */
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, ndev->clk_gating);
+ if (ret)
+ return ret;
+
+ return 0;
+ }
+
+ while (ndev->priv->dpm_clk_tbl[ndev->max_dpm_level].hclk)
+ ndev->max_dpm_level++;
+ ndev->max_dpm_level--;
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->max_dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, AIE2_CLK_GATING_ENABLE);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = POWER_MODE_DEFAULT;
+ ndev->dft_dpm_level = ndev->max_dpm_level;
+
+ return 0;
+}
+
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 clk_gating, dpm_level;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ if (ndev->pw_mode == target)
+ return 0;
+
+ switch (target) {
+ case POWER_MODE_TURBO:
+ if (ndev->hwctx_num) {
+ XDNA_ERR(xdna, "Can not set turbo when there is active hwctx");
+ return -EINVAL;
+ }
+
+ clk_gating = AIE2_CLK_GATING_DISABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_HIGH:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_DEFAULT:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->dft_dpm_level;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, clk_gating);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = target;
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_psp.c b/drivers/accel/amdxdna/aie2_psp.c
new file mode 100644
index 000000000000..dc3a072ce3b6
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_psp.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+#define PSP_STATUS_READY BIT(31)
+
+/* PSP commands */
+#define PSP_VALIDATE 1
+#define PSP_START 2
+#define PSP_RELEASE_TMR 3
+
+/* PSP special arguments */
+#define PSP_START_COPY_FW 1
+
+/* PSP response error code */
+#define PSP_ERROR_CANCEL 0xFFFF0002
+#define PSP_ERROR_BAD_STATE 0xFFFF0007
+
+#define PSP_FW_ALIGN 0x10000
+#define PSP_POLL_INTERVAL 20000 /* us */
+#define PSP_POLL_TIMEOUT 1000000 /* us */
+
+#define PSP_REG(p, reg) ((p)->psp_regs[reg])
+
+struct psp_device {
+ struct drm_device *ddev;
+ struct psp_config conf;
+ u32 fw_buf_sz;
+ u64 fw_paddr;
+ void *fw_buffer;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+static int psp_exec(struct psp_device *psp, u32 *reg_vals)
+{
+ u32 resp_code;
+ int ret, i;
+ u32 ready;
+
+ /* Write command and argument registers */
+ for (i = 0; i < PSP_NUM_IN_REGS; i++)
+ writel(reg_vals[i], PSP_REG(psp, i));
+
+ /* clear and set PSP INTR register to kick off */
+ writel(0, PSP_REG(psp, PSP_INTR_REG));
+ writel(1, PSP_REG(psp, PSP_INTR_REG));
+
+ /* PSP should be busy. Wait for ready, so we know task is done. */
+ ret = readx_poll_timeout(readl, PSP_REG(psp, PSP_STATUS_REG), ready,
+ FIELD_GET(PSP_STATUS_READY, ready),
+ PSP_POLL_INTERVAL, PSP_POLL_TIMEOUT);
+ if (ret) {
+ drm_err(psp->ddev, "PSP is not ready, ret 0x%x", ret);
+ return ret;
+ }
+
+ resp_code = readl(PSP_REG(psp, PSP_RESP_REG));
+ if (resp_code) {
+ drm_err(psp->ddev, "fw return error 0x%x", resp_code);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void aie2_psp_stop(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS] = { PSP_RELEASE_TMR, };
+ int ret;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret)
+ drm_err(psp->ddev, "release tmr failed, ret %d", ret);
+}
+
+int aie2_psp_start(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS];
+ int ret;
+
+ reg_vals[0] = PSP_VALIDATE;
+ reg_vals[1] = lower_32_bits(psp->fw_paddr);
+ reg_vals[2] = upper_32_bits(psp->fw_paddr);
+ reg_vals[3] = psp->fw_buf_sz;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to validate fw, ret %d", ret);
+ return ret;
+ }
+
+ memset(reg_vals, 0, sizeof(reg_vals));
+ reg_vals[0] = PSP_START;
+ reg_vals[1] = PSP_START_COPY_FW;
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to start fw, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf)
+{
+ struct psp_device *psp;
+ u64 offset;
+
+ psp = drmm_kzalloc(ddev, sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return NULL;
+
+ psp->ddev = ddev;
+ memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
+
+ psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN) + PSP_FW_ALIGN;
+ psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz, GFP_KERNEL);
+ if (!psp->fw_buffer) {
+ drm_err(ddev, "no memory for fw buffer");
+ return NULL;
+ }
+
+ /*
+ * AMD Platform Security Processor(PSP) requires host physical
+ * address to load NPU firmware.
+ */
+ psp->fw_paddr = virt_to_phys(psp->fw_buffer);
+ offset = ALIGN(psp->fw_paddr, PSP_FW_ALIGN) - psp->fw_paddr;
+ psp->fw_paddr += offset;
+ memcpy(psp->fw_buffer + offset, conf->fw_buf, conf->fw_size);
+
+ return psp;
+}
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
new file mode 100644
index 000000000000..73388443c676
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+
+#define SMU_RESULT_OK 1
+
+/* SMU commands */
+#define AIE2_SMU_POWER_ON 0x3
+#define AIE2_SMU_POWER_OFF 0x4
+#define AIE2_SMU_SET_MPNPUCLK_FREQ 0x5
+#define AIE2_SMU_SET_HCLK_FREQ 0x6
+#define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7
+#define AIE2_SMU_SET_HARD_DPMLEVEL 0x8
+
+static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd,
+ u32 reg_arg, u32 *out)
+{
+ u32 resp;
+ int ret;
+
+ writel(0, SMU_REG(ndev, SMU_RESP_REG));
+ writel(reg_arg, SMU_REG(ndev, SMU_ARG_REG));
+ writel(reg_cmd, SMU_REG(ndev, SMU_CMD_REG));
+
+ /* Clear and set SMU_INTR_REG to kick off */
+ writel(0, SMU_REG(ndev, SMU_INTR_REG));
+ writel(1, SMU_REG(ndev, SMU_INTR_REG));
+
+ ret = readx_poll_timeout(readl, SMU_REG(ndev, SMU_RESP_REG), resp,
+ resp, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d timed out", reg_cmd);
+ return ret;
+ }
+
+ if (out)
+ *out = readl(SMU_REG(ndev, SMU_OUT_REG));
+
+ if (resp != SMU_RESULT_OK) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d failed, 0x%x", reg_cmd, resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ u32 freq;
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
+ }
+ ndev->npuclk_freq = freq;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
+ }
+ ndev->hclk_freq = freq;
+ ndev->dpm_level = dpm_level;
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+}
+
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
+ dpm_level, ret);
+ return ret;
+ }
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
+ dpm_level, ret);
+ return ret;
+ }
+
+ ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
+ ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
+ ndev->dpm_level = dpm_level;
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+}
+
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Power on failed, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ndev->priv->hw_ops.set_dpm(ndev, 0);
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL);
+ if (ret)
+ XDNA_ERR(ndev->xdna, "Power off failed, ret %d", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.c b/drivers/accel/amdxdna/aie2_solver.c
new file mode 100644
index 000000000000..2013d1f13aae
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+
+#include "aie2_solver.h"
+
+struct partition_node {
+ struct list_head list;
+ u32 nshared; /* # shared requests */
+ u32 start_col; /* start column */
+ u32 ncols; /* # columns */
+ bool exclusive; /* can not be shared if set */
+};
+
+struct solver_node {
+ struct list_head list;
+ u64 rid; /* Request ID from consumer */
+
+ struct partition_node *pt_node;
+ void *cb_arg;
+ u32 dpm_level;
+ u32 cols_len;
+ u32 start_cols[] __counted_by(cols_len);
+};
+
+struct solver_rgroup {
+ u32 rgid;
+ u32 nnode;
+ u32 npartition_node;
+
+ DECLARE_BITMAP(resbit, XRS_MAX_COL);
+ struct list_head node_list;
+ struct list_head pt_node_list;
+};
+
+struct solver_state {
+ struct solver_rgroup rgp;
+ struct init_config cfg;
+ struct xrs_action_ops *actions;
+};
+
+static u32 calculate_gops(struct aie_qos *rqos)
+{
+ u32 service_rate = 0;
+
+ if (rqos->latency)
+ service_rate = (1000 / rqos->latency);
+
+ if (rqos->fps > service_rate)
+ return rqos->fps * rqos->gops;
+
+ return service_rate * rqos->gops;
+}
+
+/*
+ * qos_meet() - Check the QOS request can be met.
+ */
+static int qos_meet(struct solver_state *xrs, struct aie_qos *rqos, u32 cgops)
+{
+ u32 request_gops = calculate_gops(rqos) * xrs->cfg.sys_eff_factor;
+
+ if (request_gops <= cgops)
+ return 0;
+
+ return -EINVAL;
+}
+
+/*
+ * sanity_check() - Do a basic sanity check on allocation request.
+ */
+static int sanity_check(struct solver_state *xrs, struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 cu_clk_freq;
+
+ if (cdop->ncols > xrs->cfg.total_col)
+ return -EINVAL;
+
+ /*
+ * We can find at least one CDOs groups that meet the
+ * GOPs requirement.
+ */
+ cu_clk_freq = xrs->cfg.clk_list.cu_clk_list[xrs->cfg.clk_list.num_levels - 1];
+
+ if (qos_meet(xrs, rqos, cdop->qos_cap.opc * cu_clk_freq / 1000))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool is_valid_qos_dpm_params(struct aie_qos *rqos)
+{
+ /*
+ * gops is retrieved from the xmodel, so it's always set
+ * fps and latency are the configurable params from the application
+ */
+ if (rqos->gops > 0 && (rqos->fps > 0 || rqos->latency > 0))
+ return true;
+
+ return false;
+}
+
+static int set_dpm_level(struct solver_state *xrs, struct alloc_requests *req, u32 *dpm_level)
+{
+ struct solver_rgroup *rgp = &xrs->rgp;
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 freq, max_dpm_level, level;
+ struct solver_node *node;
+
+ max_dpm_level = xrs->cfg.clk_list.num_levels - 1;
+ /* If no QoS parameters are passed, set it to the max DPM level */
+ if (!is_valid_qos_dpm_params(rqos)) {
+ level = max_dpm_level;
+ goto set_dpm;
+ }
+
+ /* Find one CDO group that meet the GOPs requirement. */
+ for (level = 0; level < max_dpm_level; level++) {
+ freq = xrs->cfg.clk_list.cu_clk_list[level];
+ if (!qos_meet(xrs, rqos, cdop->qos_cap.opc * freq / 1000))
+ break;
+ }
+
+ /* set the dpm level which fits all the sessions */
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->dpm_level > level)
+ level = node->dpm_level;
+ }
+
+set_dpm:
+ *dpm_level = level;
+ return xrs->cfg.actions->set_dft_dpm_level(xrs->cfg.ddev, level);
+}
+
+static struct solver_node *rg_search_node(struct solver_rgroup *rgp, u64 rid)
+{
+ struct solver_node *node;
+
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->rid == rid)
+ return node;
+ }
+
+ return NULL;
+}
+
+static void remove_partition_node(struct solver_rgroup *rgp,
+ struct partition_node *pt_node)
+{
+ pt_node->nshared--;
+ if (pt_node->nshared > 0)
+ return;
+
+ list_del(&pt_node->list);
+ rgp->npartition_node--;
+
+ bitmap_clear(rgp->resbit, pt_node->start_col, pt_node->ncols);
+ kfree(pt_node);
+}
+
+static void remove_solver_node(struct solver_rgroup *rgp,
+ struct solver_node *node)
+{
+ list_del(&node->list);
+ rgp->nnode--;
+
+ if (node->pt_node)
+ remove_partition_node(rgp, node->pt_node);
+
+ kfree(node);
+}
+
+static int get_free_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node;
+ u32 ncols = req->cdo.ncols;
+ u32 col, i;
+
+ for (i = 0; i < snode->cols_len; i++) {
+ col = snode->start_cols[i];
+ if (find_next_bit(xrs->rgp.resbit, XRS_MAX_COL, col) >= col + ncols)
+ break;
+ }
+
+ if (i == snode->cols_len)
+ return -ENODEV;
+
+ pt_node = kzalloc(sizeof(*pt_node), GFP_KERNEL);
+ if (!pt_node)
+ return -ENOMEM;
+
+ pt_node->nshared = 1;
+ pt_node->start_col = col;
+ pt_node->ncols = ncols;
+
+ /*
+ * Always set exclusive to false for now.
+ */
+ pt_node->exclusive = false;
+
+ list_add_tail(&pt_node->list, &xrs->rgp.pt_node_list);
+ xrs->rgp.npartition_node++;
+ bitmap_set(xrs->rgp.resbit, pt_node->start_col, pt_node->ncols);
+
+ snode->pt_node = pt_node;
+
+ return 0;
+}
+
+static int allocate_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node, *rpt_node = NULL;
+ int idx, ret;
+
+ ret = get_free_partition(xrs, snode, req);
+ if (!ret)
+ return ret;
+
+ /* try to get a share-able partition */
+ list_for_each_entry(pt_node, &xrs->rgp.pt_node_list, list) {
+ if (pt_node->exclusive)
+ continue;
+
+ if (rpt_node && pt_node->nshared >= rpt_node->nshared)
+ continue;
+
+ for (idx = 0; idx < snode->cols_len; idx++) {
+ if (snode->start_cols[idx] != pt_node->start_col)
+ continue;
+
+ if (req->cdo.ncols != pt_node->ncols)
+ continue;
+
+ rpt_node = pt_node;
+ break;
+ }
+ }
+
+ if (!rpt_node)
+ return -ENODEV;
+
+ rpt_node->nshared++;
+ snode->pt_node = rpt_node;
+
+ return 0;
+}
+
+static struct solver_node *create_solver_node(struct solver_state *xrs,
+ struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct solver_node *node;
+ int ret;
+
+ node = kzalloc(struct_size(node, start_cols, cdop->cols_len), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->rid = req->rid;
+ node->cols_len = cdop->cols_len;
+ memcpy(node->start_cols, cdop->start_cols, cdop->cols_len * sizeof(u32));
+
+ ret = allocate_partition(xrs, node, req);
+ if (ret)
+ goto free_node;
+
+ list_add_tail(&node->list, &xrs->rgp.node_list);
+ xrs->rgp.nnode++;
+ return node;
+
+free_node:
+ kfree(node);
+ return ERR_PTR(ret);
+}
+
+static void fill_load_action(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct xrs_action_load *action)
+{
+ action->rid = snode->rid;
+ action->part.start_col = snode->pt_node->start_col;
+ action->part.ncols = snode->pt_node->ncols;
+}
+
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg)
+{
+ struct xrs_action_load load_act;
+ struct solver_node *snode;
+ struct solver_state *xrs;
+ u32 dpm_level;
+ int ret;
+
+ xrs = (struct solver_state *)hdl;
+
+ ret = sanity_check(xrs, req);
+ if (ret) {
+ drm_err(xrs->cfg.ddev, "invalid request");
+ return ret;
+ }
+
+ if (rg_search_node(&xrs->rgp, req->rid)) {
+ drm_err(xrs->cfg.ddev, "rid %lld is in-use", req->rid);
+ return -EEXIST;
+ }
+
+ snode = create_solver_node(xrs, req);
+ if (IS_ERR(snode))
+ return PTR_ERR(snode);
+
+ fill_load_action(xrs, snode, &load_act);
+ ret = xrs->cfg.actions->load(cb_arg, &load_act);
+ if (ret)
+ goto free_node;
+
+ ret = set_dpm_level(xrs, req, &dpm_level);
+ if (ret)
+ goto free_node;
+
+ snode->dpm_level = dpm_level;
+ snode->cb_arg = cb_arg;
+
+ drm_dbg(xrs->cfg.ddev, "start col %d ncols %d\n",
+ snode->pt_node->start_col, snode->pt_node->ncols);
+
+ return 0;
+
+free_node:
+ remove_solver_node(&xrs->rgp, snode);
+
+ return ret;
+}
+
+int xrs_release_resource(void *hdl, u64 rid)
+{
+ struct solver_state *xrs = hdl;
+ struct solver_node *node;
+
+ node = rg_search_node(&xrs->rgp, rid);
+ if (!node) {
+ drm_err(xrs->cfg.ddev, "node not exist");
+ return -ENODEV;
+ }
+
+ xrs->cfg.actions->unload(node->cb_arg);
+ remove_solver_node(&xrs->rgp, node);
+
+ return 0;
+}
+
+void *xrsm_init(struct init_config *cfg)
+{
+ struct solver_rgroup *rgp;
+ struct solver_state *xrs;
+
+ xrs = drmm_kzalloc(cfg->ddev, sizeof(*xrs), GFP_KERNEL);
+ if (!xrs)
+ return NULL;
+
+ memcpy(&xrs->cfg, cfg, sizeof(*cfg));
+
+ rgp = &xrs->rgp;
+ INIT_LIST_HEAD(&rgp->node_list);
+ INIT_LIST_HEAD(&rgp->pt_node_list);
+
+ return xrs;
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.h b/drivers/accel/amdxdna/aie2_solver.h
new file mode 100644
index 000000000000..a2e3c52229e9
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_SOLVER_H
+#define _AIE2_SOLVER_H
+
+#define XRS_MAX_COL 128
+
+/*
+ * Structure used to describe a partition. A partition is column based
+ * allocation unit described by its start column and number of columns.
+ */
+struct aie_part {
+ u32 start_col;
+ u32 ncols;
+};
+
+/*
+ * The QoS capabilities of a given AIE partition.
+ */
+struct aie_qos_cap {
+ u32 opc; /* operations per cycle */
+ u32 dma_bw; /* DMA bandwidth */
+};
+
+/*
+ * QoS requirement of a resource allocation.
+ */
+struct aie_qos {
+ u32 gops; /* Giga operations */
+ u32 fps; /* Frames per second */
+ u32 dma_bw; /* DMA bandwidth */
+ u32 latency; /* Frame response latency */
+ u32 exec_time; /* Frame execution time */
+ u32 priority; /* Request priority */
+};
+
+/*
+ * Structure used to describe a relocatable CDO (Configuration Data Object).
+ */
+struct cdo_parts {
+ u32 *start_cols; /* Start column array */
+ u32 cols_len; /* Length of start column array */
+ u32 ncols; /* # of column */
+ struct aie_qos_cap qos_cap; /* CDO QoS capabilities */
+};
+
+/*
+ * Structure used to describe a request to allocate.
+ */
+struct alloc_requests {
+ u64 rid;
+ struct cdo_parts cdo;
+ struct aie_qos rqos; /* Requested QoS */
+};
+
+/*
+ * Load callback argument
+ */
+struct xrs_action_load {
+ u32 rid;
+ struct aie_part part;
+};
+
+/*
+ * Define the power level available
+ *
+ * POWER_LEVEL_MIN:
+ * Lowest power level. Usually set when all actions are unloaded.
+ *
+ * POWER_LEVEL_n
+ * Power levels 0 - n, is a step increase in system frequencies
+ */
+enum power_level {
+ POWER_LEVEL_MIN = 0x0,
+ POWER_LEVEL_0 = 0x1,
+ POWER_LEVEL_1 = 0x2,
+ POWER_LEVEL_2 = 0x3,
+ POWER_LEVEL_3 = 0x4,
+ POWER_LEVEL_4 = 0x5,
+ POWER_LEVEL_5 = 0x6,
+ POWER_LEVEL_6 = 0x7,
+ POWER_LEVEL_7 = 0x8,
+ POWER_LEVEL_NUM,
+};
+
+/*
+ * Structure used to describe the frequency table.
+ * Resource solver chooses the frequency from the table
+ * to meet the QOS requirements.
+ */
+struct clk_list_info {
+ u32 num_levels; /* available power levels */
+ u32 cu_clk_list[POWER_LEVEL_NUM]; /* available aie clock frequencies in Mhz*/
+};
+
+struct xrs_action_ops {
+ int (*load)(void *cb_arg, struct xrs_action_load *action);
+ int (*unload)(void *cb_arg);
+ int (*set_dft_dpm_level)(struct drm_device *ddev, u32 level);
+};
+
+/*
+ * Structure used to describe information for solver during initialization.
+ */
+struct init_config {
+ u32 total_col;
+ u32 sys_eff_factor; /* system efficiency factor */
+ u32 latency_adj; /* latency adjustment in ms */
+ struct clk_list_info clk_list; /* List of frequencies available in system */
+ struct drm_device *ddev;
+ struct xrs_action_ops *actions;
+};
+
+/*
+ * xrsm_init() - Register resource solver. Resource solver client needs
+ * to call this function to register itself.
+ *
+ * @cfg: The system metrics for resource solver to use
+ *
+ * Return: A resource solver handle
+ *
+ * Note: We should only create one handle per AIE array to be managed.
+ */
+void *xrsm_init(struct init_config *cfg);
+
+/*
+ * xrs_allocate_resource() - Request to allocate resources for a given context
+ * and a partition metadata. (See struct part_meta)
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @req: Input to the Resource solver including request id
+ * and partition metadata.
+ * @cb_arg: callback argument pointer
+ *
+ * Return: 0 when successful.
+ * Or standard error number when failing
+ *
+ * Note:
+ * There is no lock mechanism inside resource solver. So it is
+ * the caller's responsibility to lock down XCLBINs and grab
+ * necessary lock.
+ */
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg);
+
+/*
+ * xrs_release_resource() - Request to free resources for a given context.
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @rid: The Request ID to identify the requesting context
+ */
+int xrs_release_resource(void *hdl, u64 rid);
+#endif /* _AIE2_SOLVER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
new file mode 100644
index 000000000000..d11b1c83d9c3
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define MAX_HWCTX_ID 255
+#define MAX_ARG_COUNT 4095
+
+struct amdxdna_fence {
+ struct dma_fence base;
+ spinlock_t lock; /* for base */
+ struct amdxdna_hwctx *hwctx;
+};
+
+static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
+{
+ return KBUILD_MODNAME;
+}
+
+static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct amdxdna_fence *xdna_fence;
+
+ xdna_fence = container_of(fence, struct amdxdna_fence, base);
+
+ return xdna_fence->hwctx->name;
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = amdxdna_fence_get_driver_name,
+ .get_timeline_name = amdxdna_fence_get_timeline_name,
+};
+
+static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->hwctx = hwctx;
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
+ return &fence->base;
+}
+
+void amdxdna_hwctx_suspend(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
+ xdna->dev_info->ops->hwctx_suspend(hwctx);
+ mutex_unlock(&client->hwctx_lock);
+}
+
+void amdxdna_hwctx_resume(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
+ xdna->dev_info->ops->hwctx_resume(hwctx);
+ mutex_unlock(&client->hwctx_lock);
+}
+
+static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
+ struct srcu_struct *ss)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ synchronize_srcu(ss);
+
+ /* At this point, user is not able to submit new commands */
+ mutex_lock(&xdna->dev_lock);
+ xdna->dev_info->ops->hwctx_fini(hwctx);
+ mutex_unlock(&xdna->dev_lock);
+
+ kfree(hwctx->name);
+ kfree(hwctx);
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, count;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ num_masks = 0;
+ else
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+
+ if (size) {
+ count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
+ if (unlikely(count <= num_masks)) {
+ *size = 0;
+ return NULL;
+ }
+ *size = (count - num_masks) * sizeof(u32);
+ }
+ return &cmd->data[num_masks];
+}
+
+int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, i;
+ u32 *cu_mask;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ return -1;
+
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+ cu_mask = cmd->data;
+ for (i = 0; i < num_masks; i++) {
+ if (cu_mask[i])
+ return ffs(cu_mask[i]) - 1;
+ }
+
+ return -1;
+}
+
+/*
+ * This should be called in close() and remove(). DO NOT call in other syscalls.
+ * This guarantee that when hwctx and resources will be released, if user
+ * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
+ */
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ XDNA_DBG(client->xdna, "PID %d close HW context %d",
+ client->pid, hwctx->id);
+ xa_erase(&client->hwctx_xa, hwctx->id);
+ mutex_unlock(&client->hwctx_lock);
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+ mutex_lock(&client->hwctx_lock);
+ }
+ mutex_unlock(&client->hwctx_lock);
+}
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_create_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
+ if (!hwctx) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
+ XDNA_ERR(xdna, "Access QoS info failed");
+ ret = -EFAULT;
+ goto free_hwctx;
+ }
+
+ hwctx->client = client;
+ hwctx->fw_ctx_id = -1;
+ hwctx->num_tiles = args->num_tiles;
+ hwctx->mem_size = args->mem_size;
+ hwctx->max_opc = args->max_opc;
+ ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
+ XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
+ &client->next_hwctxid, GFP_KERNEL);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
+ goto free_hwctx;
+ }
+
+ hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
+ if (!hwctx->name) {
+ ret = -ENOMEM;
+ goto rm_id;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->hwctx_init(hwctx);
+ if (ret) {
+ mutex_unlock(&xdna->dev_lock);
+ XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ goto free_name;
+ }
+ args->handle = hwctx->id;
+ args->syncobj_handle = hwctx->syncobj_hdl;
+ mutex_unlock(&xdna->dev_lock);
+
+ XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
+ drm_dev_exit(idx);
+ return 0;
+
+free_name:
+ kfree(hwctx->name);
+rm_id:
+ xa_erase(&client->hwctx_xa, hwctx->id);
+free_hwctx:
+ kfree(hwctx);
+exit:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_destroy_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret = 0, idx;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ hwctx = xa_erase(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ ret = -EINVAL;
+ XDNA_DBG(xdna, "PID %d HW context %d not exist",
+ client->pid, args->handle);
+ goto out;
+ }
+
+ /*
+ * The pushed jobs are handled by DRM scheduler during destroy.
+ * SRCU to synchronize with exec command ioctls.
+ */
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+
+ XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
+out:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_config_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+ u32 buf_size;
+ void *buf;
+ u64 val;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!xdna->dev_info->ops->hwctx_config)
+ return -EOPNOTSUPP;
+
+ val = args->param_val;
+ buf_size = args->param_val_size;
+
+ switch (args->param_type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ /* For those types that param_val is pointer */
+ if (buf_size > PAGE_SIZE) {
+ XDNA_ERR(xdna, "Config CU param buffer too large");
+ return -E2BIG;
+ }
+
+ /* Hwctx needs to keep buf */
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ break;
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ /* For those types that param_val is a value */
+ buf = NULL;
+ buf_size = 0;
+ break;
+ default:
+ XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
+
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ mutex_unlock(&xdna->dev_lock);
+ kfree(buf);
+ return ret;
+}
+
+static void
+amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ if (!job->bos[i])
+ break;
+ drm_gem_object_put(job->bos[i]);
+ }
+}
+
+static int
+amdxdna_arg_bos_lookup(struct amdxdna_client *client,
+ struct amdxdna_sched_job *job,
+ u32 *bo_hdls, u32 bo_cnt)
+{
+ struct drm_gem_object *gobj;
+ int i, ret;
+
+ job->bo_cnt = bo_cnt;
+ for (i = 0; i < job->bo_cnt; i++) {
+ struct amdxdna_gem_obj *abo;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
+ if (!gobj) {
+ ret = -ENOENT;
+ goto put_shmem_bo;
+ }
+ abo = to_xdna_obj(gobj);
+
+ mutex_lock(&abo->lock);
+ if (abo->pinned) {
+ mutex_unlock(&abo->lock);
+ job->bos[i] = gobj;
+ continue;
+ }
+
+ ret = amdxdna_gem_pin_nolock(abo);
+ if (ret) {
+ mutex_unlock(&abo->lock);
+ drm_gem_object_put(gobj);
+ goto put_shmem_bo;
+ }
+ abo->pinned = true;
+ mutex_unlock(&abo->lock);
+
+ job->bos[i] = gobj;
+ }
+
+ return 0;
+
+put_shmem_bo:
+ amdxdna_arg_bos_put(job);
+ return ret;
+}
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
+{
+ trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
+ amdxdna_arg_bos_put(job);
+ amdxdna_gem_put_obj(job->cmd_bo);
+}
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_sched_job *job;
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
+ job = kzalloc(struct_size(job, bos, arg_bo_cnt), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
+ job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
+ if (!job->cmd_bo) {
+ XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
+ ret = -EINVAL;
+ goto free_job;
+ }
+ } else {
+ job->cmd_bo = NULL;
+ }
+
+ ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
+ if (ret) {
+ XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
+ goto cmd_put;
+ }
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
+ client->pid, hwctx_hdl);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ if (hwctx->status != HWCTX_STAT_READY) {
+ XDNA_ERR(xdna, "HW Context is not ready");
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ job->hwctx = hwctx;
+ job->mm = current->mm;
+
+ job->fence = amdxdna_fence_create(hwctx);
+ if (!job->fence) {
+ XDNA_ERR(xdna, "Failed to create fence");
+ ret = -ENOMEM;
+ goto unlock_srcu;
+ }
+ kref_init(&job->refcnt);
+
+ ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
+ if (ret)
+ goto put_fence;
+
+ /*
+ * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
+ * resource after synchronize_srcu(). The submitted jobs should be
+ * handled by the queue, for example DRM scheduler, in device layer.
+ * For here we can unlock SRCU.
+ */
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
+
+ return 0;
+
+put_fence:
+ dma_fence_put(job->fence);
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ amdxdna_arg_bos_put(job);
+cmd_put:
+ amdxdna_gem_put_obj(job->cmd_bo);
+free_job:
+ kfree(job);
+ return ret;
+}
+
+/*
+ * The submit command ioctl submits a command to firmware. One firmware command
+ * may contain multiple command BOs for processing as a whole.
+ * The command sequence number is returned which can be used for wait command ioctl.
+ */
+static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
+ struct amdxdna_drm_exec_cmd *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ u32 *arg_bo_hdls;
+ u32 cmd_bo_hdl;
+ int ret;
+
+ if (!args->arg_count || args->arg_count > MAX_ARG_COUNT) {
+ XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
+ return -EINVAL;
+ }
+
+ /* Only support single command for now. */
+ if (args->cmd_count != 1) {
+ XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
+ return -EINVAL;
+ }
+
+ cmd_bo_hdl = (u32)args->cmd_handles;
+ arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
+ if (!arg_bo_hdls)
+ return -ENOMEM;
+ ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
+ args->arg_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_cmd_bo_hdls;
+ }
+
+ ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
+ args->arg_count, args->hwctx, &args->seq);
+ if (ret)
+ XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
+
+free_cmd_bo_hdls:
+ kfree(arg_bo_hdls);
+ if (!ret)
+ XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
+ return ret;
+}
+
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_exec_cmd *args = data;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ switch (args->type) {
+ case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
+ return amdxdna_drm_submit_execbuf(client, args);
+ }
+
+ XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
+ return -EINVAL;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
new file mode 100644
index 000000000000..80b0304193ec
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_CTX_H_
+#define _AMDXDNA_CTX_H_
+
+#include <linux/bitfield.h>
+
+#include "amdxdna_gem.h"
+
+struct amdxdna_hwctx_priv;
+
+enum ert_cmd_opcode {
+ ERT_START_CU = 0,
+ ERT_CMD_CHAIN = 19,
+ ERT_START_NPU = 20,
+};
+
+enum ert_cmd_state {
+ ERT_CMD_STATE_INVALID,
+ ERT_CMD_STATE_NEW,
+ ERT_CMD_STATE_QUEUED,
+ ERT_CMD_STATE_RUNNING,
+ ERT_CMD_STATE_COMPLETED,
+ ERT_CMD_STATE_ERROR,
+ ERT_CMD_STATE_ABORT,
+ ERT_CMD_STATE_SUBMITTED,
+ ERT_CMD_STATE_TIMEOUT,
+ ERT_CMD_STATE_NORESPONSE,
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_START_NPU in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
+ */
+struct amdxdna_cmd_start_npu {
+ u64 buffer; /* instruction buffer address */
+ u32 buffer_size; /* size of buffer in bytes */
+ u32 prop_count; /* properties count */
+ u32 prop_args[]; /* properties and regular kernel arguments */
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_CMD_CHAIN in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is cmd BO handles.
+ */
+struct amdxdna_cmd_chain {
+ u32 command_count;
+ u32 submit_index;
+ u32 error_index;
+ u32 reserved[3];
+ u64 data[] __counted_by(command_count);
+};
+
+/* Exec buffer command header format */
+#define AMDXDNA_CMD_STATE GENMASK(3, 0)
+#define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
+#define AMDXDNA_CMD_COUNT GENMASK(22, 12)
+#define AMDXDNA_CMD_OPCODE GENMASK(27, 23)
+struct amdxdna_cmd {
+ u32 header;
+ u32 data[];
+};
+
+struct amdxdna_hwctx {
+ struct amdxdna_client *client;
+ struct amdxdna_hwctx_priv *priv;
+ char *name;
+
+ u32 id;
+ u32 max_opc;
+ u32 num_tiles;
+ u32 mem_size;
+ u32 fw_ctx_id;
+ u32 col_list_len;
+ u32 *col_list;
+ u32 start_col;
+ u32 num_col;
+#define HWCTX_STAT_INIT 0
+#define HWCTX_STAT_READY 1
+#define HWCTX_STAT_STOP 2
+ u32 status;
+ u32 old_status;
+
+ struct amdxdna_qos_info qos;
+ struct amdxdna_hwctx_param_config_cu *cus;
+ u32 syncobj_hdl;
+};
+
+#define drm_job_to_xdna_job(j) \
+ container_of(j, struct amdxdna_sched_job, base)
+
+struct amdxdna_sched_job {
+ struct drm_sched_job base;
+ struct kref refcnt;
+ struct amdxdna_hwctx *hwctx;
+ struct mm_struct *mm;
+ /* The fence to notice DRM scheduler that job is done by hardware */
+ struct dma_fence *fence;
+ /* user can wait on this fence */
+ struct dma_fence *out_fence;
+ bool job_done;
+ u64 seq;
+ struct amdxdna_gem_obj *cmd_bo;
+ size_t bo_cnt;
+ struct drm_gem_object *bos[] __counted_by(bo_cnt);
+};
+
+static inline u32
+amdxdna_cmd_get_op(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_OPCODE, cmd->header);
+}
+
+static inline void
+amdxdna_cmd_set_state(struct amdxdna_gem_obj *abo, enum ert_cmd_state s)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ cmd->header &= ~AMDXDNA_CMD_STATE;
+ cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, s);
+}
+
+static inline enum ert_cmd_state
+amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_STATE, cmd->header);
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
+int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
+
+static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
+{
+ return GENMASK(hwctx->start_col + hwctx->num_col - 1,
+ hwctx->start_col);
+}
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
+void amdxdna_hwctx_suspend(struct amdxdna_client *client);
+void amdxdna_hwctx_resume(struct amdxdna_client *client);
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq);
+
+int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
+ u64 seq, u32 timeout);
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_CTX_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
new file mode 100644
index 000000000000..606433d73236
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iosys-map.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+
+static int
+amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
+{
+ struct amdxdna_client *client = abo->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_mem *mem = &abo->mem;
+ u64 offset;
+ u32 align;
+ int ret;
+
+ align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
+ ret = drm_mm_insert_node_generic(&abo->dev_heap->mm, &abo->mm_node,
+ mem->size, align,
+ 0, DRM_MM_INSERT_BEST);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ return ret;
+ }
+
+ mem->dev_addr = abo->mm_node.start;
+ offset = mem->dev_addr - abo->dev_heap->mem.dev_addr;
+ mem->userptr = abo->dev_heap->mem.userptr + offset;
+ mem->pages = &abo->dev_heap->base.pages[offset >> PAGE_SHIFT];
+ mem->nr_pages = mem->size >> PAGE_SHIFT;
+
+ if (use_vmap) {
+ mem->kva = vmap(mem->pages, mem->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!mem->kva) {
+ XDNA_ERR(xdna, "Failed to vmap");
+ drm_mm_remove_node(&abo->mm_node);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ if (abo->type == AMDXDNA_BO_DEV) {
+ mutex_lock(&abo->client->mm_lock);
+ drm_mm_remove_node(&abo->mm_node);
+ mutex_unlock(&abo->client->mm_lock);
+
+ vunmap(abo->mem.kva);
+ drm_gem_object_put(to_gobj(abo->dev_heap));
+ drm_gem_object_release(gobj);
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+ return;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV_HEAP)
+ drm_mm_takedown(&abo->mm);
+
+ drm_gem_vunmap_unlocked(gobj, &map);
+ mutex_destroy(&abo->lock);
+ drm_gem_shmem_free(&abo->base);
+}
+
+static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
+ .free = amdxdna_gem_obj_free,
+};
+
+static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
+ mem.notifier);
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+
+ XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
+ abo->mem.userptr, abo->mem.size, abo->type);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
+ .invalidate = amdxdna_hmm_invalidate,
+};
+
+static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+
+ if (!xdna->dev_info->ops->hmm_invalidate)
+ return;
+
+ mmu_interval_notifier_remove(&abo->mem.notifier);
+ kvfree(abo->mem.pfns);
+ abo->mem.pfns = NULL;
+}
+
+static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
+ size_t len)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ u32 nr_pages;
+ int ret;
+
+ if (!xdna->dev_info->ops->hmm_invalidate)
+ return 0;
+
+ if (abo->mem.pfns)
+ return -EEXIST;
+
+ nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
+ abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
+ GFP_KERNEL);
+ if (!abo->mem.pfns)
+ return -ENOMEM;
+
+ ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
+ current->mm,
+ addr,
+ len,
+ &amdxdna_hmm_ops);
+ if (ret) {
+ XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
+ kvfree(abo->mem.pfns);
+ }
+ abo->mem.userptr = addr;
+
+ return ret;
+}
+
+static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ unsigned long num_pages;
+ int ret;
+
+ ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret)
+ goto hmm_unreg;
+
+ num_pages = gobj->size >> PAGE_SHIFT;
+ /* Try to insert the pages */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
+ if (ret)
+ XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
+
+ return 0;
+
+hmm_unreg:
+ amdxdna_hmm_unregister(abo);
+ return ret;
+}
+
+static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
+{
+ return drm_gem_shmem_vm_ops.fault(vmf);
+}
+
+static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
+{
+ drm_gem_shmem_vm_ops.open(vma);
+}
+
+static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gobj = vma->vm_private_data;
+
+ amdxdna_hmm_unregister(to_xdna_obj(gobj));
+ drm_gem_shmem_vm_ops.close(vma);
+}
+
+static const struct vm_operations_struct amdxdna_gem_vm_ops = {
+ .fault = amdxdna_gem_vm_fault,
+ .open = amdxdna_gem_vm_open,
+ .close = amdxdna_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
+ .free = amdxdna_gem_obj_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = amdxdna_gem_obj_mmap,
+ .vm_ops = &amdxdna_gem_vm_ops,
+};
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = kzalloc(sizeof(*abo), GFP_KERNEL);
+ if (!abo)
+ return ERR_PTR(-ENOMEM);
+
+ abo->pinned = false;
+ abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
+ mutex_init(&abo->lock);
+
+ abo->mem.userptr = AMDXDNA_INVALID_ADDR;
+ abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
+ abo->mem.size = size;
+
+ return abo;
+}
+
+/* For drm_driver->gem_create_object callback */
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_create_obj(dev, size);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
+
+ to_gobj(abo)->funcs = &amdxdna_gem_shmem_funcs;
+
+ return to_gobj(abo);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_alloc_shmem(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct drm_gem_shmem_object *shmem;
+ struct amdxdna_gem_obj *abo;
+
+ shmem = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+
+ abo = to_xdna_obj(&shmem->base);
+ abo->client = client;
+ abo->type = AMDXDNA_BO_SHMEM;
+
+ return abo;
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_dev_heap(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct drm_gem_shmem_object *shmem;
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->size > xdna->dev_info->dev_mem_size) {
+ XDNA_DBG(xdna, "Invalid dev heap size 0x%llx, limit 0x%lx",
+ args->size, xdna->dev_info->dev_mem_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&client->mm_lock);
+ if (client->dev_heap) {
+ XDNA_DBG(client->xdna, "dev heap is already created");
+ ret = -EBUSY;
+ goto mm_unlock;
+ }
+
+ shmem = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
+ goto mm_unlock;
+ }
+
+ shmem->map_wc = false;
+ abo = to_xdna_obj(&shmem->base);
+
+ abo->type = AMDXDNA_BO_DEV_HEAP;
+ abo->client = client;
+ abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
+ drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
+
+ client->dev_heap = abo;
+ drm_gem_object_get(to_gobj(abo));
+ mutex_unlock(&client->mm_lock);
+
+ return abo;
+
+mm_unlock:
+ mutex_unlock(&client->mm_lock);
+ return ERR_PTR(ret);
+}
+
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp, bool use_vmap)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+ struct amdxdna_gem_obj *abo, *heap;
+ int ret;
+
+ mutex_lock(&client->mm_lock);
+ heap = client->dev_heap;
+ if (!heap) {
+ ret = -EINVAL;
+ goto mm_unlock;
+ }
+
+ if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
+ XDNA_ERR(xdna, "Invalid dev heap userptr");
+ ret = -EINVAL;
+ goto mm_unlock;
+ }
+
+ if (args->size > heap->mem.size) {
+ XDNA_ERR(xdna, "Invalid dev bo size 0x%llx, limit 0x%lx",
+ args->size, heap->mem.size);
+ ret = -EINVAL;
+ goto mm_unlock;
+ }
+
+ abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto mm_unlock;
+ }
+ to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
+ abo->type = AMDXDNA_BO_DEV;
+ abo->client = client;
+ abo->dev_heap = heap;
+ ret = amdxdna_gem_insert_node_locked(abo, use_vmap);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ goto mm_unlock;
+ }
+
+ drm_gem_object_get(to_gobj(heap));
+ drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
+
+ mutex_unlock(&client->mm_lock);
+ return abo;
+
+mm_unlock:
+ mutex_unlock(&client->mm_lock);
+ return ERR_PTR(ret);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_cmd_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct drm_gem_shmem_object *shmem;
+ struct amdxdna_gem_obj *abo;
+ struct iosys_map map;
+ int ret;
+
+ if (args->size > XDNA_MAX_CMD_BO_SIZE) {
+ XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (args->size < sizeof(struct amdxdna_cmd)) {
+ XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ shmem = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+ abo = to_xdna_obj(&shmem->base);
+
+ abo->type = AMDXDNA_BO_CMD;
+ abo->client = filp->driver_priv;
+
+ ret = drm_gem_vmap_unlocked(to_gobj(abo), &map);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
+ goto release_obj;
+ }
+ abo->mem.kva = map.vaddr;
+
+ return abo;
+
+release_obj:
+ drm_gem_shmem_free(shmem);
+ return ERR_PTR(ret);
+}
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_create_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->flags || args->vaddr || !args->size)
+ return -EINVAL;
+
+ XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
+ args->type, args->vaddr, args->size, args->flags);
+ switch (args->type) {
+ case AMDXDNA_BO_SHMEM:
+ abo = amdxdna_drm_alloc_shmem(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV_HEAP:
+ abo = amdxdna_drm_create_dev_heap(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV:
+ abo = amdxdna_drm_alloc_dev_bo(dev, args, filp, false);
+ break;
+ case AMDXDNA_BO_CMD:
+ abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (IS_ERR(abo))
+ return PTR_ERR(abo);
+
+ /* ready to publish object to userspace */
+ ret = drm_gem_handle_create(filp, to_gobj(abo), &args->handle);
+ if (ret) {
+ XDNA_ERR(xdna, "Create handle failed");
+ goto put_obj;
+ }
+
+ XDNA_DBG(xdna, "BO hdl %d type %d userptr 0x%llx xdna_addr 0x%llx size 0x%lx",
+ args->handle, args->type, abo->mem.userptr,
+ abo->mem.dev_addr, abo->mem.size);
+put_obj:
+ /* Dereference object reference. Handle holds it now. */
+ drm_gem_object_put(to_gobj(abo));
+ return ret;
+}
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ int ret;
+
+ switch (abo->type) {
+ case AMDXDNA_BO_SHMEM:
+ case AMDXDNA_BO_DEV_HEAP:
+ ret = drm_gem_shmem_pin(&abo->base);
+ break;
+ case AMDXDNA_BO_DEV:
+ ret = drm_gem_shmem_pin(&abo->dev_heap->base);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
+ return ret;
+}
+
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
+{
+ int ret;
+
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->dev_heap;
+
+ mutex_lock(&abo->lock);
+ ret = amdxdna_gem_pin_nolock(abo);
+ mutex_unlock(&abo->lock);
+
+ return ret;
+}
+
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
+{
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->dev_heap;
+
+ mutex_lock(&abo->lock);
+ drm_gem_shmem_unpin(&abo->base);
+ mutex_unlock(&abo->lock);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdl);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Can not find bo %d", bo_hdl);
+ return NULL;
+ }
+
+ abo = to_xdna_obj(gobj);
+ if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type)
+ return abo;
+
+ drm_gem_object_put(gobj);
+ return NULL;
+}
+
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_drm_get_bo_info *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret = 0;
+
+ if (args->ext || args->ext_flags || args->pad)
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Lookup GEM object %d failed", args->handle);
+ return -ENOENT;
+ }
+
+ abo = to_xdna_obj(gobj);
+ args->vaddr = abo->mem.userptr;
+ args->xdna_addr = abo->mem.dev_addr;
+
+ if (abo->type != AMDXDNA_BO_DEV)
+ args->map_offset = drm_vma_node_offset_addr(&gobj->vma_node);
+ else
+ args->map_offset = AMDXDNA_INVALID_ADDR;
+
+ XDNA_DBG(xdna, "BO hdl %d map_offset 0x%llx vaddr 0x%llx xdna_addr 0x%llx",
+ args->handle, args->map_offset, args->vaddr, args->xdna_addr);
+
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
+/*
+ * The sync bo ioctl is to make sure the CPU cache is in sync with memory.
+ * This is required because NPU is not cache coherent device. CPU cache
+ * flushing/invalidation is expensive so it is best to handle this outside
+ * of the command submission path. This ioctl allows explicit cache
+ * flushing/invalidation outside of the critical path.
+ */
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_sync_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -ENOENT;
+ }
+ abo = to_xdna_obj(gobj);
+
+ ret = amdxdna_gem_pin(abo);
+ if (ret) {
+ XDNA_ERR(xdna, "Pin BO %d failed, ret %d", args->handle, ret);
+ goto put_obj;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV)
+ drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
+ else
+ drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
+
+ amdxdna_gem_unpin(abo);
+
+ XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
+ args->handle, args->offset, args->size);
+
+put_obj:
+ drm_gem_object_put(gobj);
+ return ret;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
new file mode 100644
index 000000000000..8ccc0375dd9d
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_GEM_H_
+#define _AMDXDNA_GEM_H_
+
+struct amdxdna_mem {
+ u64 userptr;
+ void *kva;
+ u64 dev_addr;
+ size_t size;
+ struct page **pages;
+ u32 nr_pages;
+ struct mmu_interval_notifier notifier;
+ unsigned long *pfns;
+ bool map_invalid;
+};
+
+struct amdxdna_gem_obj {
+ struct drm_gem_shmem_object base;
+ struct amdxdna_client *client;
+ u8 type;
+ bool pinned;
+ struct mutex lock; /* Protects: pinned */
+ struct amdxdna_mem mem;
+
+ /* Below members is uninitialized when needed */
+ struct drm_mm mm; /* For AMDXDNA_BO_DEV_HEAP */
+ struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
+ struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
+ u32 assigned_hwctx;
+};
+
+#define to_gobj(obj) (&(obj)->base.base)
+
+static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
+{
+ return container_of(gobj, struct amdxdna_gem_obj, base.base);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type);
+static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
+{
+ drm_gem_object_put(to_gobj(abo));
+}
+
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp, bool use_vmap);
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo);
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo);
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo);
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_GEM_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
new file mode 100644
index 000000000000..e5301fac1397
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_mailbox.h"
+
+#define MB_ERR(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_DBG(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_WARN_ONCE(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+
+#define MAGIC_VAL 0x1D000000U
+#define MAGIC_VAL_MASK 0xFF000000
+#define MAX_MSG_ID_ENTRIES 256
+#define MSG_RX_TIMER 200 /* milliseconds */
+#define MAILBOX_NAME "xdna_mailbox"
+
+enum channel_res_type {
+ CHAN_RES_X2I,
+ CHAN_RES_I2X,
+ CHAN_RES_NUM
+};
+
+struct mailbox {
+ struct device *dev;
+ struct xdna_mailbox_res res;
+};
+
+struct mailbox_channel {
+ struct mailbox *mb;
+ struct xdna_mailbox_chann_res res[CHAN_RES_NUM];
+ int msix_irq;
+ u32 iohub_int_addr;
+ struct xarray chan_xa;
+ u32 next_msgid;
+ u32 x2i_tail;
+
+ /* Received msg related fields */
+ struct workqueue_struct *work_q;
+ struct work_struct rx_work;
+ u32 i2x_head;
+ bool bad_state;
+};
+
+#define MSG_BODY_SZ GENMASK(10, 0)
+#define MSG_PROTO_VER GENMASK(23, 16)
+struct xdna_msg_header {
+ __u32 total_size;
+ __u32 sz_ver;
+ __u32 id;
+ __u32 opcode;
+} __packed;
+
+static_assert(sizeof(struct xdna_msg_header) == 16);
+
+struct mailbox_pkg {
+ struct xdna_msg_header header;
+ __u32 payload[];
+};
+
+/* The protocol version. */
+#define MSG_PROTOCOL_VERSION 0x1
+/* The tombstone value. */
+#define TOMBSTONE 0xDEADFACE
+
+struct mailbox_msg {
+ void *handle;
+ int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ size_t pkg_size; /* package size in bytes */
+ struct mailbox_pkg pkg;
+};
+
+static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ writel(data, ringbuf_addr);
+}
+
+static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ return readl(ringbuf_addr);
+}
+
+static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+ int ret, value;
+
+ /* Poll till value is not zero */
+ ret = readx_poll_timeout(readl, ringbuf_addr, value,
+ value, 1 /* us */, 100);
+ if (ret < 0)
+ return ret;
+
+ *val = value;
+ return 0;
+}
+
+static inline void
+mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
+ mb_chann->i2x_head = headptr_val;
+}
+
+static inline void
+mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
+ mb_chann->x2i_tail = tailptr_val;
+}
+
+static inline u32
+mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
+}
+
+static inline u32
+mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
+}
+
+static inline u32
+mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mb_chann->res[type].rb_size;
+}
+
+static inline int mailbox_validate_msgid(int msg_id)
+{
+ return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
+}
+
+static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ u32 msg_id;
+ int ret;
+
+ ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
+ XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
+ &mb_chann->next_msgid, GFP_NOWAIT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Add MAGIC_VAL to the higher bits.
+ */
+ msg_id |= MAGIC_VAL;
+ return msg_id;
+}
+
+static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
+{
+ msg_id &= ~MAGIC_VAL_MASK;
+ xa_erase_irq(&mb_chann->chan_xa, msg_id);
+}
+
+static void mailbox_release_msg(struct mailbox_channel *mb_chann,
+ struct mailbox_msg *mb_msg)
+{
+ MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
+ mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
+ mb_msg->notify_cb(mb_msg->handle, NULL, 0);
+ kfree(mb_msg);
+}
+
+static int
+mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ void __iomem *write_addr;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ u32 tmp_tail;
+
+ head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
+ tail = mb_chann->x2i_tail;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);
+ start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
+ tmp_tail = tail + mb_msg->pkg_size;
+
+ if (tail < head && tmp_tail >= head)
+ goto no_space;
+
+ if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&
+ mb_msg->pkg_size >= head))
+ goto no_space;
+
+ if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ writel(TOMBSTONE, write_addr);
+
+ /* tombstone is set. Write from the start of the ringbuf */
+ tail = 0;
+ }
+
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
+ mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
+
+ trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
+ mb_msg->pkg.header.opcode,
+ mb_msg->pkg.header.id);
+
+ return 0;
+
+no_space:
+ return -ENOSPC;
+}
+
+static int
+mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
+ void *data)
+{
+ struct mailbox_msg *mb_msg;
+ int msg_id;
+ int ret;
+
+ msg_id = header->id;
+ if (!mailbox_validate_msgid(msg_id)) {
+ MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ msg_id &= ~MAGIC_VAL_MASK;
+ mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
+ if (!mb_msg) {
+ MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+ ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
+ if (unlikely(ret))
+ MB_ERR(mb_chann, "Message callback ret %d", ret);
+
+ kfree(mb_msg);
+ return ret;
+}
+
+static int mailbox_get_msg(struct mailbox_channel *mb_chann)
+{
+ struct xdna_msg_header header;
+ void __iomem *read_addr;
+ u32 msg_size, rest;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ int ret;
+
+ if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))
+ return -EINVAL;
+ head = mb_chann->i2x_head;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
+ start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
+
+ if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
+ return -EINVAL;
+ }
+
+ /* ringbuf empty */
+ if (head == tail)
+ return -ENOENT;
+
+ if (head == ringbuf_size)
+ head = 0;
+
+ /* Peek size of the message or TOMBSTONE */
+ read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
+ header.total_size = readl(read_addr);
+ /* size is TOMBSTONE, set next read from 0 */
+ if (header.total_size == TOMBSTONE) {
+ if (head < tail) {
+ MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
+ head, tail);
+ return -EINVAL;
+ }
+ mailbox_set_headptr(mb_chann, 0);
+ return 0;
+ }
+
+ if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
+ return -EINVAL;
+ }
+ msg_size = sizeof(header) + header.total_size;
+
+ if (msg_size > ringbuf_size - head || msg_size > tail - head) {
+ MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
+ msg_size, tail, head);
+ return -EINVAL;
+ }
+
+ rest = sizeof(header) - sizeof(u32);
+ read_addr += sizeof(u32);
+ memcpy_fromio((u32 *)&header + 1, read_addr, rest);
+ read_addr += rest;
+
+ ret = mailbox_get_resp(mb_chann, &header, (u32 *)read_addr);
+
+ mailbox_set_headptr(mb_chann, head + msg_size);
+ /* After update head, it can equal to ringbuf_size. This is expected. */
+ trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
+ header.opcode, header.id);
+
+ return ret;
+}
+
+static irqreturn_t mailbox_irq_handler(int irq, void *p)
+{
+ struct mailbox_channel *mb_chann = p;
+
+ trace_mbox_irq_handle(MAILBOX_NAME, irq);
+ /* Schedule a rx_work to call the callback functions */
+ queue_work(mb_chann->work_q, &mb_chann->rx_work);
+ /* Clear IOHUB register */
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void mailbox_rx_worker(struct work_struct *rx_work)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state, work aborted");
+ return;
+ }
+
+ while (1) {
+ /*
+ * If return is 0, keep consuming next message, until there is
+ * no messages or an error happened.
+ */
+ ret = mailbox_get_msg(mb_chann);
+ if (ret == -ENOENT)
+ break;
+
+ /* Other error means device doesn't look good, disable irq. */
+ if (unlikely(ret)) {
+ MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
+ WRITE_ONCE(mb_chann->bad_state, true);
+ disable_irq(mb_chann->msix_irq);
+ break;
+ }
+ }
+}
+
+int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout)
+{
+ struct xdna_msg_header *header;
+ struct mailbox_msg *mb_msg;
+ size_t pkg_size;
+ int ret;
+
+ pkg_size = sizeof(*header) + msg->send_size;
+ if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
+ MB_ERR(mb_chann, "Message size larger than ringbuf size");
+ return -EINVAL;
+ }
+
+ if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
+ MB_ERR(mb_chann, "Message must be 4 bytes align");
+ return -EINVAL;
+ }
+
+ /* The fist word in payload can NOT be TOMBSTONE */
+ if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
+ MB_ERR(mb_chann, "Tomb stone in data");
+ return -EINVAL;
+ }
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state");
+ return -EPIPE;
+ }
+
+ mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
+ if (!mb_msg)
+ return -ENOMEM;
+
+ mb_msg->handle = msg->handle;
+ mb_msg->notify_cb = msg->notify_cb;
+ mb_msg->pkg_size = pkg_size;
+
+ header = &mb_msg->pkg.header;
+ /*
+ * Hardware use total_size and size to split huge message.
+ * We do not support it here. Thus the values are the same.
+ */
+ header->total_size = msg->send_size;
+ header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
+ FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
+ header->opcode = msg->opcode;
+ memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
+
+ ret = mailbox_acquire_msgid(mb_chann, mb_msg);
+ if (unlikely(ret < 0)) {
+ MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
+ goto msg_id_failed;
+ }
+ header->id = ret;
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+
+ ret = mailbox_send_msg(mb_chann, mb_msg);
+ if (ret) {
+ MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
+ goto release_id;
+ }
+
+ return 0;
+
+release_id:
+ mailbox_release_msgid(mb_chann, header->id);
+msg_id_failed:
+ kfree(mb_msg);
+ return ret;
+}
+
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mb,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 iohub_int_addr,
+ int mb_irq)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
+ pr_err("Ring buf size must be power of 2");
+ return NULL;
+ }
+
+ mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
+ if (!mb_chann)
+ return NULL;
+
+ mb_chann->mb = mb;
+ mb_chann->msix_irq = mb_irq;
+ mb_chann->iohub_int_addr = iohub_int_addr;
+ memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
+ memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
+
+ xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
+ mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
+
+ INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
+ mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
+ if (!mb_chann->work_q) {
+ MB_ERR(mb_chann, "Create workqueue failed");
+ goto free_and_out;
+ }
+
+ /* Everything look good. Time to enable irq handler */
+ ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
+ if (ret) {
+ MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
+ goto destroy_wq;
+ }
+
+ mb_chann->bad_state = false;
+
+ MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
+ return mb_chann;
+
+destroy_wq:
+ destroy_workqueue(mb_chann->work_q);
+free_and_out:
+ kfree(mb_chann);
+ return NULL;
+}
+
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
+{
+ struct mailbox_msg *mb_msg;
+ unsigned long msg_id;
+
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+ free_irq(mb_chann->msix_irq, mb_chann);
+ destroy_workqueue(mb_chann->work_q);
+ /* We can clean up and release resources */
+
+ xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
+ mailbox_release_msg(mb_chann, mb_msg);
+
+ xa_destroy(&mb_chann->chan_xa);
+
+ MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
+ kfree(mb_chann);
+ return 0;
+}
+
+void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
+{
+ /* Disable an irq and wait. This might sleep. */
+ disable_irq(mb_chann->msix_irq);
+
+ /* Cancel RX work and wait for it to finish */
+ cancel_work_sync(&mb_chann->rx_work);
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+}
+
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res)
+{
+ struct mailbox *mb;
+
+ mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return NULL;
+ mb->dev = ddev->dev;
+
+ /* mailbox and ring buf base and size information */
+ memcpy(&mb->res, res, sizeof(*res));
+
+ return mb;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.h b/drivers/accel/amdxdna/amdxdna_mailbox.h
new file mode 100644
index 000000000000..57954c303bdd
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MAILBOX_H_
+#define _AIE2_MAILBOX_H_
+
+struct mailbox;
+struct mailbox_channel;
+
+/*
+ * xdna_mailbox_msg - message struct
+ *
+ * @opcode: opcode for firmware
+ * @handle: handle used for the notify callback
+ * @notify_cb: callback function to notify the sender when there is response
+ * @send_data: pointing to sending data
+ * @send_size: size of the sending data
+ *
+ * The mailbox will split the sending data in to multiple firmware message if
+ * the size of the data is too big. This is transparent to the sender. The
+ * sender will receive one notification.
+ */
+struct xdna_mailbox_msg {
+ u32 opcode;
+ void *handle;
+ int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ u8 *send_data;
+ size_t send_size;
+};
+
+/*
+ * xdna_mailbox_res - mailbox hardware resource
+ *
+ * @ringbuf_base: ring buffer base address
+ * @ringbuf_size: ring buffer size
+ * @mbox_base: mailbox base address
+ * @mbox_size: mailbox size
+ */
+struct xdna_mailbox_res {
+ void __iomem *ringbuf_base;
+ size_t ringbuf_size;
+ void __iomem *mbox_base;
+ size_t mbox_size;
+ const char *name;
+};
+
+/*
+ * xdna_mailbox_chann_res - resources
+ *
+ * @rb_start_addr: ring buffer start address
+ * @rb_size: ring buffer size
+ * @mb_head_ptr_reg: mailbox head pointer register
+ * @mb_tail_ptr_reg: mailbox tail pointer register
+ */
+struct xdna_mailbox_chann_res {
+ u32 rb_start_addr;
+ u32 rb_size;
+ u32 mb_head_ptr_reg;
+ u32 mb_tail_ptr_reg;
+};
+
+/*
+ * xdna_mailbox_create() -- create mailbox subsystem and initialize
+ *
+ * @ddev: device pointer
+ * @res: SRAM and mailbox resources
+ *
+ * Return: If success, return a handle of mailbox subsystem.
+ * Otherwise, return NULL pointer.
+ */
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res);
+
+/*
+ * xdna_mailbox_create_channel() -- Create a mailbox channel instance
+ *
+ * @mailbox: the handle return from xdna_mailbox_create()
+ * @x2i: host to firmware mailbox resources
+ * @i2x: firmware to host mailbox resources
+ * @xdna_mailbox_intr_reg: register addr of MSI-X interrupt
+ * @mb_irq: Linux IRQ number associated with mailbox MSI-X interrupt vector index
+ *
+ * Return: If success, return a handle of mailbox channel. Otherwise, return NULL.
+ */
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mailbox,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 xdna_mailbox_intr_reg,
+ int mb_irq);
+
+/*
+ * xdna_mailbox_destroy_channel() -- destroy mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_stop_channel() -- stop mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+void xdna_mailbox_stop_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_send_msg() -- Send a message
+ *
+ * @mailbox_chann: Mailbox channel handle
+ * @msg: message struct for message information
+ * @tx_timeout: the timeout value for sending the message in ms.
+ *
+ * Return: If success return 0, otherwise, return error code
+ */
+int xdna_mailbox_send_msg(struct mailbox_channel *mailbox_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout);
+
+#endif /* _AIE2_MAILBOX_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
new file mode 100644
index 000000000000..5139a9c96a91
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/completion.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+int xdna_msg_cb(void *handle, const u32 *data, size_t size)
+{
+ struct xdna_notify *cb_arg = handle;
+ int ret;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(cb_arg->size != size)) {
+ cb_arg->error = -EINVAL;
+ goto out;
+ }
+
+ print_hex_dump_debug("resp data: ", DUMP_PREFIX_OFFSET,
+ 16, 4, data, cb_arg->size, true);
+ memcpy(cb_arg->data, data, cb_arg->size);
+out:
+ ret = cb_arg->error;
+ complete(&cb_arg->comp);
+ return ret;
+}
+
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg)
+{
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ ret = xdna_mailbox_send_msg(chann, msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed, ret %d", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&hdl->comp,
+ msecs_to_jiffies(RX_TIMEOUT));
+ if (!ret) {
+ XDNA_ERR(xdna, "Wait for completion timeout");
+ return -ETIME;
+ }
+
+ return hdl->error;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
new file mode 100644
index 000000000000..23e1317b79fe
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_MAILBOX_HELPER_H
+#define _AMDXDNA_MAILBOX_HELPER_H
+
+#define TX_TIMEOUT 2000 /* milliseconds */
+#define RX_TIMEOUT 5000 /* milliseconds */
+
+struct amdxdna_dev;
+
+struct xdna_notify {
+ struct completion comp;
+ u32 *data;
+ size_t size;
+ int error;
+};
+
+#define DECLARE_XDNA_MSG_COMMON(name, op, status) \
+ struct name##_req req = { 0 }; \
+ struct name##_resp resp = { status }; \
+ struct xdna_notify hdl = { \
+ .error = 0, \
+ .data = (u32 *)&resp, \
+ .size = sizeof(resp), \
+ .comp = COMPLETION_INITIALIZER_ONSTACK(hdl.comp), \
+ }; \
+ struct xdna_mailbox_msg msg = { \
+ .send_data = (u8 *)&req, \
+ .send_size = sizeof(req), \
+ .handle = &hdl, \
+ .opcode = op, \
+ .notify_cb = xdna_msg_cb, \
+ }
+
+int xdna_msg_cb(void *handle, const u32 *data, size_t size);
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg);
+
+#endif /* _AMDXDNA_MAILBOX_HELPER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
new file mode 100644
index 000000000000..f5b8497cf5ad
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iommu.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+
+MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
+
+/*
+ * Bind the driver base on (vendor_id, device_id) pair and later use the
+ * (device_id, rev_id) pair as a key to select the devices. The devices with
+ * same device_id have very similar interface to host driver.
+ */
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static const struct amdxdna_device_id amdxdna_ids[] = {
+ { 0x1502, 0x0, &dev_npu1_info },
+ { 0x17f0, 0x0, &dev_npu2_info },
+ { 0x17f0, 0x10, &dev_npu4_info },
+ { 0x17f0, 0x11, &dev_npu5_info },
+ { 0x17f0, 0x20, &dev_npu6_info },
+ {0}
+};
+
+static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_client *client;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret);
+ return ret;
+ }
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client) {
+ ret = -ENOMEM;
+ goto put_rpm;
+ }
+
+ client->pid = pid_nr(rcu_access_pointer(filp->pid));
+ client->xdna = xdna;
+
+ client->sva = iommu_sva_bind_device(xdna->ddev.dev, current->mm);
+ if (IS_ERR(client->sva)) {
+ ret = PTR_ERR(client->sva);
+ XDNA_ERR(xdna, "SVA bind device failed, ret %d", ret);
+ goto failed;
+ }
+ client->pasid = iommu_sva_get_pasid(client->sva);
+ if (client->pasid == IOMMU_PASID_INVALID) {
+ XDNA_ERR(xdna, "SVA get pasid failed");
+ ret = -ENODEV;
+ goto unbind_sva;
+ }
+ mutex_init(&client->hwctx_lock);
+ init_srcu_struct(&client->hwctx_srcu);
+ xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
+ mutex_init(&client->mm_lock);
+
+ mutex_lock(&xdna->dev_lock);
+ list_add_tail(&client->node, &xdna->client_list);
+ mutex_unlock(&xdna->dev_lock);
+
+ filp->driver_priv = client;
+ client->filp = filp;
+
+ XDNA_DBG(xdna, "pid %d opened", client->pid);
+ return 0;
+
+unbind_sva:
+ iommu_sva_unbind_device(client->sva);
+failed:
+ kfree(client);
+put_rpm:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return ret;
+}
+
+static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+
+ XDNA_DBG(xdna, "closing pid %d", client->pid);
+
+ xa_destroy(&client->hwctx_xa);
+ cleanup_srcu_struct(&client->hwctx_srcu);
+ mutex_destroy(&client->hwctx_lock);
+ mutex_destroy(&client->mm_lock);
+ if (client->dev_heap)
+ drm_gem_object_put(to_gobj(client->dev_heap));
+
+ iommu_sva_unbind_device(client->sva);
+
+ XDNA_DBG(xdna, "pid %d closed", client->pid);
+ kfree(client);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+}
+
+static int amdxdna_flush(struct file *f, fl_owner_t id)
+{
+ struct drm_file *filp = f->private_data;
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = client->xdna;
+ int idx;
+
+ XDNA_DBG(xdna, "PID %d flushing...", client->pid);
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return 0;
+
+ mutex_lock(&xdna->dev_lock);
+ list_del_init(&client->node);
+ mutex_unlock(&xdna->dev_lock);
+ amdxdna_hwctx_remove_all(client);
+
+ drm_dev_exit(idx);
+ return 0;
+}
+
+static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_info *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->get_aie_info)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->get_aie_info(client, args);
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+}
+
+static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_set_state *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->set_aie_state)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->set_aie_state(client, args);
+ mutex_unlock(&xdna->dev_lock);
+
+ return ret;
+}
+
+static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
+ /* Context */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0),
+ /* BO */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0),
+ /* Execution */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
+ /* AIE hardware */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
+};
+
+static const struct file_operations amdxdna_fops = {
+ .owner = THIS_MODULE,
+ .open = accel_open,
+ .release = drm_release,
+ .flush = amdxdna_flush,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+};
+
+const struct drm_driver amdxdna_drm_drv = {
+ .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+ .fops = &amdxdna_fops,
+ .name = "amdxdna_accel_driver",
+ .desc = "AMD XDNA DRM implementation",
+ .open = amdxdna_drm_open,
+ .postclose = amdxdna_drm_close,
+ .ioctls = amdxdna_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
+
+ .gem_create_object = amdxdna_gem_create_object_cb,
+};
+
+static const struct amdxdna_dev_info *
+amdxdna_get_dev_info(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) {
+ if (pdev->device == amdxdna_ids[i].device &&
+ pdev->revision == amdxdna_ids[i].revision)
+ return amdxdna_ids[i].dev_info;
+ }
+ return NULL;
+}
+
+static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev);
+ if (IS_ERR(xdna))
+ return PTR_ERR(xdna);
+
+ xdna->dev_info = amdxdna_get_dev_info(pdev);
+ if (!xdna->dev_info)
+ return -ENODEV;
+
+ drmm_mutex_init(&xdna->ddev, &xdna->dev_lock);
+ init_rwsem(&xdna->notifier_lock);
+ INIT_LIST_HEAD(&xdna->client_list);
+ pci_set_drvdata(pdev, xdna);
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&xdna->notifier_lock);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->init(xdna);
+ mutex_unlock(&xdna->dev_lock);
+ if (ret) {
+ XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
+ return ret;
+ }
+
+ ret = amdxdna_sysfs_init(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Create amdxdna attrs failed: %d", ret);
+ goto failed_dev_fini;
+ }
+
+ pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+
+ ret = drm_dev_register(&xdna->ddev, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
+ pm_runtime_forbid(dev);
+ goto failed_sysfs_fini;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+
+failed_sysfs_fini:
+ amdxdna_sysfs_fini(xdna);
+failed_dev_fini:
+ mutex_lock(&xdna->dev_lock);
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+}
+
+static void amdxdna_remove(struct pci_dev *pdev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct amdxdna_client *client;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_forbid(dev);
+
+ drm_dev_unplug(&xdna->ddev);
+ amdxdna_sysfs_fini(xdna);
+
+ mutex_lock(&xdna->dev_lock);
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ while (client) {
+ list_del_init(&client->node);
+ mutex_unlock(&xdna->dev_lock);
+
+ amdxdna_hwctx_remove_all(client);
+
+ mutex_lock(&xdna->dev_lock);
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ }
+
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+}
+
+static int amdxdna_dev_suspend_nolock(struct amdxdna_dev *xdna)
+{
+ if (xdna->dev_info->ops->suspend)
+ xdna->dev_info->ops->suspend(xdna);
+
+ return 0;
+}
+
+static int amdxdna_dev_resume_nolock(struct amdxdna_dev *xdna)
+{
+ if (xdna->dev_info->ops->resume)
+ return xdna->dev_info->ops->resume(xdna);
+
+ return 0;
+}
+
+static int amdxdna_pmops_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ struct amdxdna_client *client;
+
+ mutex_lock(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_suspend(client);
+
+ amdxdna_dev_suspend_nolock(xdna);
+ mutex_unlock(&xdna->dev_lock);
+
+ return 0;
+}
+
+static int amdxdna_pmops_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ struct amdxdna_client *client;
+ int ret;
+
+ XDNA_INFO(xdna, "firmware resuming...");
+ mutex_lock(&xdna->dev_lock);
+ ret = amdxdna_dev_resume_nolock(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "resume NPU firmware failed");
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+ }
+
+ XDNA_INFO(xdna, "hardware context resuming...");
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_resume(client);
+ mutex_unlock(&xdna->dev_lock);
+
+ return 0;
+}
+
+static int amdxdna_rpmops_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ int ret;
+
+ mutex_lock(&xdna->dev_lock);
+ ret = amdxdna_dev_suspend_nolock(xdna);
+ mutex_unlock(&xdna->dev_lock);
+
+ XDNA_DBG(xdna, "Runtime suspend done ret: %d", ret);
+ return ret;
+}
+
+static int amdxdna_rpmops_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ int ret;
+
+ mutex_lock(&xdna->dev_lock);
+ ret = amdxdna_dev_resume_nolock(xdna);
+ mutex_unlock(&xdna->dev_lock);
+
+ XDNA_DBG(xdna, "Runtime resume done ret: %d", ret);
+ return ret;
+}
+
+static const struct dev_pm_ops amdxdna_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
+ RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL)
+};
+
+static struct pci_driver amdxdna_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci_ids,
+ .probe = amdxdna_probe,
+ .remove = amdxdna_remove,
+ .driver.pm = &amdxdna_pm_ops,
+};
+
+module_pci_driver(amdxdna_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_DESCRIPTION("amdxdna driver");
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
new file mode 100644
index 000000000000..37848a8d8031
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_PCI_DRV_H_
+#define _AMDXDNA_PCI_DRV_H_
+
+#include <linux/xarray.h>
+
+#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
+#define XDNA_WARN(xdna, fmt, args...) drm_warn(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_ERR(xdna, fmt, args...) drm_err(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_DBG(xdna, fmt, args...) drm_dbg(&(xdna)->ddev, fmt, ##args)
+#define XDNA_INFO_ONCE(xdna, fmt, args...) drm_info_once(&(xdna)->ddev, fmt, ##args)
+
+#define XDNA_MBZ_DBG(xdna, ptr, sz) \
+ ({ \
+ int __i; \
+ int __ret = 0; \
+ u8 *__ptr = (u8 *)(ptr); \
+ for (__i = 0; __i < (sz); __i++) { \
+ if (__ptr[__i]) { \
+ XDNA_DBG(xdna, "MBZ check failed"); \
+ __ret = -EINVAL; \
+ break; \
+ } \
+ } \
+ __ret; \
+ })
+
+#define to_xdna_dev(drm_dev) \
+ ((struct amdxdna_dev *)container_of(drm_dev, struct amdxdna_dev, ddev))
+
+extern const struct drm_driver amdxdna_drm_drv;
+
+struct amdxdna_client;
+struct amdxdna_dev;
+struct amdxdna_drm_get_info;
+struct amdxdna_drm_set_state;
+struct amdxdna_gem_obj;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+/*
+ * struct amdxdna_dev_ops - Device hardware operation callbacks
+ */
+struct amdxdna_dev_ops {
+ int (*init)(struct amdxdna_dev *xdna);
+ void (*fini)(struct amdxdna_dev *xdna);
+ int (*resume)(struct amdxdna_dev *xdna);
+ void (*suspend)(struct amdxdna_dev *xdna);
+ int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
+ void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
+ int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+ void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+ void (*hwctx_suspend)(struct amdxdna_hwctx *hwctx);
+ void (*hwctx_resume)(struct amdxdna_hwctx *hwctx);
+ int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+ int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
+ int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
+};
+
+/*
+ * struct amdxdna_dev_info - Device hardware information
+ * Record device static information, like reg, mbox, PSP, SMU bar index
+ */
+struct amdxdna_dev_info {
+ int reg_bar;
+ int mbox_bar;
+ int sram_bar;
+ int psp_bar;
+ int smu_bar;
+ int device_type;
+ int first_col;
+ u32 dev_mem_buf_shift;
+ u64 dev_mem_base;
+ size_t dev_mem_size;
+ char *vbnv;
+ const struct amdxdna_dev_priv *dev_priv;
+ const struct amdxdna_dev_ops *ops;
+};
+
+struct amdxdna_fw_ver {
+ u32 major;
+ u32 minor;
+ u32 sub;
+ u32 build;
+};
+
+struct amdxdna_dev {
+ struct drm_device ddev;
+ struct amdxdna_dev_hdl *dev_handle;
+ const struct amdxdna_dev_info *dev_info;
+ void *xrs_hdl;
+
+ struct mutex dev_lock; /* per device lock */
+ struct list_head client_list;
+ struct amdxdna_fw_ver fw_ver;
+ struct rw_semaphore notifier_lock; /* for mmu notifier*/
+};
+
+/*
+ * struct amdxdna_device_id - PCI device info
+ */
+struct amdxdna_device_id {
+ unsigned short device;
+ u8 revision;
+ const struct amdxdna_dev_info *dev_info;
+};
+
+/*
+ * struct amdxdna_client - amdxdna client
+ * A per fd data structure for managing context and other user process stuffs.
+ */
+struct amdxdna_client {
+ struct list_head node;
+ pid_t pid;
+ struct mutex hwctx_lock; /* protect hwctx */
+ /* do NOT wait this srcu when hwctx_lock is held */
+ struct srcu_struct hwctx_srcu;
+ struct xarray hwctx_xa;
+ u32 next_hwctxid;
+ struct amdxdna_dev *xdna;
+ struct drm_file *filp;
+
+ struct mutex mm_lock; /* protect memory related */
+ struct amdxdna_gem_obj *dev_heap;
+
+ struct iommu_sva *sva;
+ int pasid;
+};
+
+#define amdxdna_for_each_hwctx(client, hwctx_id, entry) \
+ xa_for_each(&(client)->hwctx_xa, hwctx_id, entry)
+
+/* Add device info below */
+extern const struct amdxdna_dev_info dev_npu1_info;
+extern const struct amdxdna_dev_info dev_npu2_info;
+extern const struct amdxdna_dev_info dev_npu4_info;
+extern const struct amdxdna_dev_info dev_npu5_info;
+extern const struct amdxdna_dev_info dev_npu6_info;
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna);
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna);
+
+#endif /* _AMDXDNA_PCI_DRV_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_sysfs.c b/drivers/accel/amdxdna/amdxdna_sysfs.c
new file mode 100644
index 000000000000..f27e4ee960a0
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_sysfs.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/types.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+static ssize_t vbnv_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", xdna->dev_info->vbnv);
+}
+static DEVICE_ATTR_RO(vbnv);
+
+static ssize_t device_type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", xdna->dev_info->device_type);
+}
+static DEVICE_ATTR_RO(device_type);
+
+static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d.%d.%d.%d\n", xdna->fw_ver.major,
+ xdna->fw_ver.minor, xdna->fw_ver.sub,
+ xdna->fw_ver.build);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static struct attribute *amdxdna_attrs[] = {
+ &dev_attr_device_type.attr,
+ &dev_attr_vbnv.attr,
+ &dev_attr_fw_version.attr,
+ NULL,
+};
+
+static struct attribute_group amdxdna_attr_group = {
+ .attrs = amdxdna_attrs,
+};
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna)
+{
+ int ret;
+
+ ret = sysfs_create_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+ if (ret)
+ XDNA_ERR(xdna, "Create attr group failed");
+
+ return ret;
+}
+
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna)
+{
+ sysfs_remove_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+}
diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c
new file mode 100644
index 000000000000..e4f6dac7d00f
--- /dev/null
+++ b/drivers/accel/amdxdna/npu1_regs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* Address definition from NPU1 docs */
+#define MPNPU_PUB_SEC_INTR 0x3010090
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010094
+#define MPNPU_PUB_SCRATCH2 0x30100A0
+#define MPNPU_PUB_SCRATCH3 0x30100A4
+#define MPNPU_PUB_SCRATCH4 0x30100A8
+#define MPNPU_PUB_SCRATCH5 0x30100AC
+#define MPNPU_PUB_SCRATCH6 0x30100B0
+#define MPNPU_PUB_SCRATCH7 0x30100B4
+#define MPNPU_PUB_SCRATCH9 0x30100BC
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x30A0000
+#define MPNPU_SRAM_X2I_MAILBOX_1 0x30A2000
+#define MPNPU_SRAM_I2X_MAILBOX_15 0x30BF000
+
+#define MPNPU_APERTURE0_BASE 0x3000000
+#define MPNPU_APERTURE1_BASE 0x3080000
+#define MPNPU_APERTURE2_BASE 0x30C0000
+
+/* PCIe BAR Index for NPU1 */
+#define NPU1_REG_BAR_INDEX 0
+#define NPU1_MBOX_BAR_INDEX 4
+#define NPU1_PSP_BAR_INDEX 0
+#define NPU1_SMU_BAR_INDEX 0
+#define NPU1_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU1_REG_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_MBOX_BAR_BASE MPNPU_APERTURE2_BASE
+#define NPU1_PSP_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SMU_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SRAM_BAR_BASE MPNPU_APERTURE1_BASE
+
+const struct rt_config npu1_default_rt_cfg[] = {
+ { 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 0 },
+};
+
+const struct dpm_clk_freq npu1_dpm_clk_table[] = {
+ {400, 800},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {720, 1309},
+ {720, 1309},
+ {847, 1600},
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu1_dev_priv = {
+ .fw_path = "amdnpu/1502_00/npu.sbin",
+ .protocol_major = 0x5,
+ .protocol_minor = 0x7,
+ .rt_config = npu1_default_rt_cfg,
+ .dpm_clk_tbl = npu1_dpm_clk_table,
+ .col_align = COL_ALIGN_NONE,
+ .mbox_dev_addr = NPU1_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU1_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU1_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU1_SRAM, MPNPU_SRAM_I2X_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU1_PSP, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU1_PSP, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU1_PSP, MPNPU_PUB_SEC_INTR),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU1_SMU, MPNPU_PUB_SCRATCH5),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU1_SMU, MPNPU_PUB_PWRMGMT_INTR),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU1_SMU, MPNPU_PUB_SCRATCH6),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ },
+ .hw_ops = {
+ .set_dpm = npu1_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu1_info = {
+ .reg_bar = NPU1_REG_BAR_INDEX,
+ .mbox_bar = NPU1_MBOX_BAR_INDEX,
+ .sram_bar = NPU1_SRAM_BAR_INDEX,
+ .psp_bar = NPU1_PSP_BAR_INDEX,
+ .smu_bar = NPU1_SMU_BAR_INDEX,
+ .first_col = 1,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu1",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu1_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu2_regs.c b/drivers/accel/amdxdna/npu2_regs.c
new file mode 100644
index 000000000000..a081cac75ee0
--- /dev/null
+++ b/drivers/accel/amdxdna/npu2_regs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU2 */
+#define NPU2_REG_BAR_INDEX 0
+#define NPU2_MBOX_BAR_INDEX 0
+#define NPU2_PSP_BAR_INDEX 4
+#define NPU2_SMU_BAR_INDEX 5
+#define NPU2_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU2_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU2_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU2_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu2_dev_priv = {
+ .fw_path = "amdnpu/17f0_00/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 0x6,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU2_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU2_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU2_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU2_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU2_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU2_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU2_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU2_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU2_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU2_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu2_info = {
+ .reg_bar = NPU2_REG_BAR_INDEX,
+ .mbox_bar = NPU2_MBOX_BAR_INDEX,
+ .sram_bar = NPU2_SRAM_BAR_INDEX,
+ .psp_bar = NPU2_PSP_BAR_INDEX,
+ .smu_bar = NPU2_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu2",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu2_dev_priv,
+ .ops = &aie2_ops, /* NPU2 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c
new file mode 100644
index 000000000000..9f2e33182ec6
--- /dev/null
+++ b/drivers/accel/amdxdna/npu4_regs.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU4 */
+#define NPU4_REG_BAR_INDEX 0
+#define NPU4_MBOX_BAR_INDEX 0
+#define NPU4_PSP_BAR_INDEX 4
+#define NPU4_SMU_BAR_INDEX 5
+#define NPU4_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU4_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU4_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU4_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+const struct rt_config npu4_default_rt_cfg[] = {
+ { 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 4, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 0 },
+};
+
+const struct dpm_clk_freq npu4_dpm_clk_table[] = {
+ {396, 792},
+ {600, 1056},
+ {792, 1152},
+ {975, 1267},
+ {975, 1267},
+ {1056, 1408},
+ {1152, 1584},
+ {1267, 1800},
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu4_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU4_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU4_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU4_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU4_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU4_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU4_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU4_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU4_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU4_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU4_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu4_info = {
+ .reg_bar = NPU4_REG_BAR_INDEX,
+ .mbox_bar = NPU4_MBOX_BAR_INDEX,
+ .sram_bar = NPU4_SRAM_BAR_INDEX,
+ .psp_bar = NPU4_PSP_BAR_INDEX,
+ .smu_bar = NPU4_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu4",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu4_dev_priv,
+ .ops = &aie2_ops, /* NPU4 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu5_regs.c b/drivers/accel/amdxdna/npu5_regs.c
new file mode 100644
index 000000000000..5f1cf83461c4
--- /dev/null
+++ b/drivers/accel/amdxdna/npu5_regs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU5 */
+#define NPU5_REG_BAR_INDEX 0
+#define NPU5_MBOX_BAR_INDEX 0
+#define NPU5_PSP_BAR_INDEX 4
+#define NPU5_SMU_BAR_INDEX 5
+#define NPU5_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU5_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU5_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu5_dev_priv = {
+ .fw_path = "amdnpu/17f0_11/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU5_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU5_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU5_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU5_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU5_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU5_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU5_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU5_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU5_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU5_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu5_info = {
+ .reg_bar = NPU5_REG_BAR_INDEX,
+ .mbox_bar = NPU5_MBOX_BAR_INDEX,
+ .sram_bar = NPU5_SRAM_BAR_INDEX,
+ .psp_bar = NPU5_PSP_BAR_INDEX,
+ .smu_bar = NPU5_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu5",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu5_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu6_regs.c b/drivers/accel/amdxdna/npu6_regs.c
new file mode 100644
index 000000000000..94a7005685a7
--- /dev/null
+++ b/drivers/accel/amdxdna/npu6_regs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU6 */
+#define NPU6_REG_BAR_INDEX 0
+#define NPU6_MBOX_BAR_INDEX 0
+#define NPU6_PSP_BAR_INDEX 4
+#define NPU6_SMU_BAR_INDEX 5
+#define NPU6_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU6_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU6_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu6_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU6_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU6_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU6_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU6_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU6_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU6_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU6_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU6_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU6_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU6_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+
+};
+
+const struct amdxdna_dev_info dev_npu6_info = {
+ .reg_bar = NPU6_REG_BAR_INDEX,
+ .mbox_bar = NPU6_MBOX_BAR_INDEX,
+ .sram_bar = NPU6_SRAM_BAR_INDEX,
+ .psp_bar = NPU6_PSP_BAR_INDEX,
+ .smu_bar = NPU6_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu6",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu6_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c
index b83141f58319..9f212b17611a 100644
--- a/drivers/accel/habanalabs/common/context.c
+++ b/drivers/accel/habanalabs/common/context.c
@@ -199,7 +199,6 @@ out_err:
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
- char task_comm[TASK_COMM_LEN];
int rc = 0, i;
ctx->hdev = hdev;
@@ -272,7 +271,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
mutex_init(&ctx->ts_reg_lock);
dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
- get_task_comm(task_comm, current), ctx->asid);
+ current->comm, ctx->asid);
}
return 0;
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index e0cf3b4343bb..30277ae410d4 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -817,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work)
}
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
- msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
+ secs_to_jiffies(HL_PENDING_RESET_PER_SEC));
}
}
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 708dfd10f39c..596c52e8aa26 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -101,7 +101,6 @@ static const struct drm_driver hl_driver = {
.major = LINUX_VERSION_MAJOR,
.minor = LINUX_VERSION_PATCHLEVEL,
.patchlevel = LINUX_VERSION_SUBLEVEL,
- .date = "20190505",
.fops = &hl_fops,
.open = hl_device_open,
@@ -362,8 +361,7 @@ static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
* a different default timeout for Gaudi
*/
if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
- hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
- MSEC_PER_SEC);
+ hdev->timeout_jiffies = secs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED);
hdev->reset_upon_device_release = 0;
break;
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 1dd6e23172ca..8729a0c57d78 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -1279,13 +1279,10 @@ static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long ar
retcode = -EFAULT;
out_err:
- if (retcode) {
- char task_comm[TASK_COMM_LEN];
-
+ if (retcode)
dev_dbg_ratelimited(dev,
"error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
- }
+ task_pid_nr(current), current->comm, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
@@ -1308,11 +1305,9 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) {
ioctl = &hl_ioctls_control[nr - HL_COMMAND_START];
} else {
- char task_comm[TASK_COMM_LEN];
-
dev_dbg_ratelimited(hdev->dev_ctrl,
"invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
+ task_pid_nr(current), current->comm, cmd, nr);
return -ENOTTY;
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index ca2bf47ce248..38cf1c342c72 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -397,15 +397,19 @@ int ivpu_boot(struct ivpu_device *vdev)
if (ivpu_fw_is_cold_boot(vdev)) {
ret = ivpu_pm_dct_init(vdev);
if (ret)
- goto err_diagnose_failure;
+ goto err_disable_ipc;
ret = ivpu_hw_sched_init(vdev);
if (ret)
- goto err_diagnose_failure;
+ goto err_disable_ipc;
}
return 0;
+err_disable_ipc:
+ ivpu_ipc_disable(vdev);
+ ivpu_hw_irq_disable(vdev);
+ disable_irq(vdev->irq);
err_diagnose_failure:
ivpu_hw_diagnose_failure(vdev);
ivpu_mmu_evtq_dump(vdev);
@@ -458,15 +462,7 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
-#ifdef DRIVER_DATE
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-#else
- .date = UTS_RELEASE,
.major = 1,
-#endif
};
static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 949f4233946c..5060c5dd40d1 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -78,8 +78,8 @@ static int ivpu_resume(struct ivpu_device *vdev)
int ret;
retry:
- pci_restore_state(to_pci_dev(vdev->drm.dev));
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_up(vdev);
if (ret) {
@@ -115,41 +115,57 @@ err_power_down:
return ret;
}
-static void ivpu_pm_recovery_work(struct work_struct *work)
+static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
{
- struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
- struct ivpu_device *vdev = pm->vdev;
- char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
- int ret;
-
- ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
-
- ret = pm_runtime_resume_and_get(vdev->drm.dev);
- if (ret)
- ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
-
- ivpu_jsm_state_dump(vdev);
- ivpu_dev_coredump(vdev);
+ pm_runtime_disable(vdev->drm.dev);
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->reset_pending, 1);
down_write(&vdev->pm->reset_lock);
+}
+
+static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
+{
+ int ret;
- ivpu_suspend(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ret = ivpu_resume(vdev);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ } else {
+ pm_runtime_set_active(vdev->drm.dev);
+ }
up_write(&vdev->pm->reset_lock);
atomic_set(&vdev->pm->reset_pending, 0);
- kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ pm_runtime_enable(vdev->drm.dev);
+}
+
+static void ivpu_pm_recovery_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+ struct ivpu_device *vdev = pm->vdev;
+ char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+
+ ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_jsm_state_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ ivpu_suspend(vdev);
+ }
+
+ ivpu_pm_reset_complete(vdev);
+
+ kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
}
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
@@ -309,7 +325,10 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
int ret;
ret = pm_runtime_resume_and_get(vdev->drm.dev);
- drm_WARN_ON(&vdev->drm, ret < 0);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ }
return ret;
}
@@ -325,16 +344,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
struct ivpu_device *vdev = pci_get_drvdata(pdev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
- atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->reset_pending, 1);
- pm_runtime_get_sync(vdev->drm.dev);
- down_write(&vdev->pm->reset_lock);
- ivpu_prepare_for_reset(vdev);
- ivpu_hw_reset(vdev);
- ivpu_pm_prepare_cold_boot(vdev);
- ivpu_jobs_abort_all(vdev);
- ivpu_ms_cleanup_all(vdev);
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
+ }
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
}
@@ -342,18 +358,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- int ret;
ivpu_dbg(vdev, PM, "Post-reset..\n");
- ret = ivpu_resume(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- up_write(&vdev->pm->reset_lock);
- atomic_set(&vdev->pm->reset_pending, 0);
- ivpu_dbg(vdev, PM, "Post-reset done.\n");
- pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ ivpu_pm_reset_complete(vdev);
+
+ ivpu_dbg(vdev, PM, "Post-reset done.\n");
}
void ivpu_pm_init(struct ivpu_device *vdev)
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index c20eb63750f5..43aba57b48f0 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -172,9 +172,10 @@ static void free_slice(struct kref *kref)
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
- int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
+ unsigned int len, nents, offf, offl;
struct sg_table *sgt;
+ size_t total_len;
int ret, j;
/* find out number of relevant nents needed for this mem */
@@ -182,6 +183,8 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
sgf = NULL;
sgl = NULL;
nents = 0;
+ offf = 0;
+ offl = 0;
size = size ? size : PAGE_SIZE;
for_each_sgtable_dma_sg(sgt_in, sg, j) {
@@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
+ u64 total;
int i;
for (i = 0; i < count; i++) {
@@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;
- if (slice_ent[i].offset + slice_ent[i].size > total_size)
+ if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
+ total > total_size)
return -EINVAL;
}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 4ddf89308ff5..81819b9ef8d4 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -208,7 +208,6 @@ static const struct drm_driver qaic_accel_driver = {
.name = QAIC_NAME,
.desc = QAIC_DESC,
- .date = "20190618",
.fops = &qaic_accel_fops,
.open = qaic_open,
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index 6d772143d612..21d58aed0deb 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -772,8 +772,7 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev)
cancel_work_sync(&context->fw_work);
cancel_work_sync(&context->dump_work);
- if (context->mem_dump)
- vfree(context->mem_dump);
+ vfree(context->mem_dump);
sahara_release_image(context);
mhi_unprepare_from_transfer(mhi_dev);
}
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 40208a0f5dfb..797070fc9a3f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -5,6 +5,10 @@
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+ifdef CONFIG_TRACE_BRANCH_PROFILING
+CFLAGS_processor_idle.o += -DDISABLE_BRANCH_PROFILING
+endif
+
#
# ACPI Boot-Time Table Parsing
#
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index ca87a0939135..f7fb7205028d 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -251,6 +251,10 @@ static int __init extlog_init(void)
}
extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size);
+ if (!extlog_l1_hdr) {
+ rc = -ENOMEM;
+ goto err_release_l1_hdr;
+ }
l1_head = (struct extlog_l1_head *)extlog_l1_hdr;
l1_size = l1_head->total_len;
l1_percpu_entry = l1_head->entries;
@@ -268,6 +272,10 @@ static int __init extlog_init(void)
goto err;
}
extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size);
+ if (!extlog_l1_addr) {
+ rc = -ENOMEM;
+ goto err_release_l1_dir;
+ }
l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size);
/* remap elog table */
@@ -279,6 +287,10 @@ static int __init extlog_init(void)
goto err_release_l1_dir;
}
elog_addr = acpi_os_map_iomem(elog_base, elog_size);
+ if (!elog_addr) {
+ rc = -ENOMEM;
+ goto err_release_elog;
+ }
rc = -ENOMEM;
/* allocate buffer to save elog record */
@@ -300,6 +312,8 @@ err_release_l1_dir:
if (extlog_l1_addr)
acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
release_mem_region(l1_dirbase, l1_size);
+err_release_l1_hdr:
+ release_mem_region(l1_dirbase, l1_hdr_size);
err:
pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n");
return rc;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 4ec20fd56985..3fde4496f8a2 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -19,6 +19,7 @@
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <asm/cpuid.h>
#include <asm/mwait.h>
#include <xen/xen.h>
@@ -46,10 +47,8 @@ static void power_saving_mwait_init(void)
if (!boot_cpu_has(X86_FEATURE_MWAIT))
return;
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return;
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+ cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8274a17872ed..a972831dbd66 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -610,16 +610,28 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
return 0;
}
+/**
+ * acpi_video_device_EDID() - Get EDID from ACPI _DDC
+ * @device: video output device (LCD, CRT, ..)
+ * @edid: address for returned EDID pointer
+ * @length: _DDC length to request (must be a multiple of 128)
+ *
+ * Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written
+ * to the @edid address, and the length of the EDID is returned. The caller is
+ * responsible for freeing the edid pointer.
+ *
+ * Return the length of EDID (positive value) on success or error (negative
+ * value).
+ */
static int
-acpi_video_device_EDID(struct acpi_video_device *device,
- union acpi_object **edid, int length)
+acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length)
{
- int status;
+ acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
-
+ int ret;
*edid = NULL;
@@ -636,16 +648,17 @@ acpi_video_device_EDID(struct acpi_video_device *device,
obj = buffer.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER)
- *edid = obj;
- else {
+ if (obj && obj->type == ACPI_TYPE_BUFFER) {
+ *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
+ ret = *edid ? obj->buffer.length : -ENOMEM;
+ } else {
acpi_handle_debug(device->dev->handle,
"Invalid _DDC data for length %d\n", length);
- status = -EFAULT;
- kfree(obj);
+ ret = -EFAULT;
}
- return status;
+ kfree(obj);
+ return ret;
}
/* bus */
@@ -1435,9 +1448,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
- union acpi_object *buffer = NULL;
- acpi_status status;
- int i, length;
+ int i, length, ret;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1477,16 +1488,10 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
}
for (length = 512; length > 0; length -= 128) {
- status = acpi_video_device_EDID(video_device, &buffer,
- length);
- if (ACPI_SUCCESS(status))
- break;
+ ret = acpi_video_device_EDID(video_device, edid, length);
+ if (ret > 0)
+ return ret;
}
- if (!length)
- continue;
-
- *edid = buffer->buffer.pointer;
- return length;
}
return -ENODEV;
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 79bbfe00d241..b8543a34caea 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -103,8 +103,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_status acpi_hw_enable_all_runtime_gpes(void);
-acpi_status acpi_hw_enable_all_wakeup_gpes(void);
-
u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
acpi_status
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 07789f0b59bc..b72772494655 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -173,8 +173,6 @@ static struct gen_pool *ghes_estatus_pool;
static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
static atomic_t ghes_estatus_cache_alloced;
-static int ghes_panic_timeout __read_mostly = 30;
-
static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
{
phys_addr_t paddr;
@@ -983,14 +981,16 @@ static void __ghes_panic(struct ghes *ghes,
struct acpi_hest_generic_status *estatus,
u64 buf_paddr, enum fixed_addresses fixmap_idx)
{
+ const char *msg = GHES_PFX "Fatal hardware error";
+
__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
- /* reboot to log the error! */
if (!panic_timeout)
- panic_timeout = ghes_panic_timeout;
- panic("Fatal hardware error!");
+ pr_emerg("%s but panic disabled\n", msg);
+
+ panic(msg);
}
static int ghes_proc(struct ghes *ghes)
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 3561553eff8b..70f8290b659d 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -163,7 +163,7 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
{
void *platform_timer;
struct acpi_table_gtdt *gtdt;
- int cnt = 0;
+ u32 cnt = 0;
gtdt = container_of(table, struct acpi_table_gtdt, header);
acpi_gtdt_desc.gtdt = gtdt;
@@ -188,13 +188,17 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
cnt++;
if (cnt != gtdt->platform_timer_count) {
+ cnt = min(cnt, gtdt->platform_timer_count);
+ pr_err(FW_BUG "limiting Platform Timer count to %d\n", cnt);
+ }
+
+ if (!cnt) {
acpi_gtdt_desc.platform_timer = NULL;
- pr_err(FW_BUG "invalid timer data.\n");
- return -EINVAL;
+ return 0;
}
if (platform_timer_count)
- *platform_timer_count = gtdt->platform_timer_count;
+ *platform_timer_count = cnt;
return 0;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 3d5342f8d7b3..6760330a8af5 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -717,7 +717,7 @@ static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
}
list_del_init(&hook->list);
- pr_info("extension unregistered: %s\n", hook->name);
+ pr_info("hook unregistered: %s\n", hook->name);
}
void battery_hook_unregister(struct acpi_battery_hook *hook)
@@ -751,18 +751,18 @@ void battery_hook_register(struct acpi_battery_hook *hook)
if (hook->add_battery(battery->bat, hook)) {
/*
* If a add-battery returns non-zero,
- * the registration of the extension has failed,
+ * the registration of the hook has failed,
* and we will not add it to the list of loaded
* hooks.
*/
- pr_err("extension failed to load: %s", hook->name);
+ pr_err("hook failed to load: %s", hook->name);
battery_hook_unregister_unlocked(hook);
goto end;
}
power_supply_changed(battery->bat);
}
- pr_info("new extension: %s\n", hook->name);
+ pr_info("new hook: %s\n", hook->name);
end:
mutex_unlock(&hook_mutex);
}
@@ -805,10 +805,10 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
if (hook_node->add_battery(battery->bat, hook_node)) {
/*
- * The notification of the extensions has failed, to
- * prevent further errors we will unload the extension.
+ * The notification of the hook has failed, to
+ * prevent further errors we will unload the hook.
*/
- pr_err("error in extension, unloading: %s",
+ pr_err("error in hook, unloading: %s",
hook_node->name);
battery_hook_unregister_unlocked(hook_node);
}
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index d1d9c9289087..35ece8e9f15d 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -29,7 +29,7 @@ BGRT_SHOW(type, image_type);
BGRT_SHOW(xoffset, image_offset_x);
BGRT_SHOW(yoffset, image_offset_y);
-static BIN_ATTR_SIMPLE_RO(image);
+static __ro_after_init BIN_ATTR_SIMPLE_RO(image);
static struct attribute *bgrt_attributes[] = {
&bgrt_attr_version.attr,
@@ -40,14 +40,14 @@ static struct attribute *bgrt_attributes[] = {
NULL,
};
-static struct bin_attribute *bgrt_bin_attributes[] = {
+static const struct bin_attribute *const bgrt_bin_attributes[] = {
&bin_attr_image,
NULL,
};
static const struct attribute_group bgrt_attribute_group = {
.attrs = bgrt_attributes,
- .bin_attrs = bgrt_bin_attributes,
+ .bin_attrs_new = bgrt_bin_attributes,
};
int __init acpi_parse_bgrt(struct acpi_table_header *table)
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 624fce67ce43..952216c67d58 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -152,6 +152,7 @@ static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1064", 0},
{"INTC106B", 0},
{"INTC10A3", 0},
+ {"INTC10D7", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 3d3edd81b172..e8caf4106ff9 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -236,6 +236,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC106D", 0},
{"INTC10A4", 0},
{"INTC10A5", 0},
+ {"INTC10D8", 0},
+ {"INTC10D9", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 014ada759954..aef7aca2161d 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -55,6 +55,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC10A3"},
{"INTC10A4"},
{"INTC10A5"},
+ {"INTC10D4"},
+ {"INTC10D5"},
+ {"INTC10D6"},
+ {"INTC10D7"},
+ {"INTC10D8"},
+ {"INTC10D9"},
{""},
};
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index db25a3898af7..488b51e2cb31 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -19,6 +19,7 @@
{"INTC1063", }, /* Fan for Meteor Lake generation */ \
{"INTC106A", }, /* Fan for Lunar Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
+ {"INTC10D6", }, /* Fan for Panther Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
#define ACPI_FPS_NAME_LEN 20
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 3ea9cfcff46e..10016f52f4f4 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -371,19 +371,25 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = sysfs_create_link(&pdev->dev.kobj,
&cdev->device.kobj,
"thermal_cooling");
- if (result)
+ if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
+ goto err_unregister;
+ }
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
- goto err_end;
+ goto err_remove_link;
}
return 0;
+err_remove_link:
+ sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
+err_unregister:
+ thermal_cooling_device_unregister(cdev);
err_end:
if (fan->acpi4)
acpi_fan_delete_attributes(device);
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index 92b658f92dc0..5b85989f96be 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -624,8 +624,7 @@ static void init_crs_csi2_swnodes(struct crs_csi2 *csi2)
if (!fwnode_property_present(adev_fwnode, "rotation")) {
struct acpi_pld_info *pld;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_get_physical_device_location(handle, &pld)) {
swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] =
PROPERTY_ENTRY_U32("rotation",
pld->rotation * 45U);
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 80a3481c0470..bfbb08b1e6af 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -442,9 +442,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
return -EINVAL;
}
- pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
- hmat_loc->flags, hmat_data_type(type), ipds, tpds,
- hmat_loc->entry_base_unit);
+ pr_debug("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
+ hmat_loc->flags, hmat_data_type(type), ipds, tpds,
+ hmat_loc->entry_base_unit);
inits = (u32 *)(hmat_loc + 1);
targs = inits + ipds;
@@ -455,9 +455,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
value = hmat_normalize(entries[init * tpds + targ],
hmat_loc->entry_base_unit,
type);
- pr_info(" Initiator-Target[%u-%u]:%u%s\n",
- inits[init], targs[targ], value,
- hmat_data_type_suffix(type));
+ pr_debug(" Initiator-Target[%u-%u]:%u%s\n",
+ inits[init], targs[targ], value,
+ hmat_data_type_suffix(type));
hmat_update_target(targs[targ], inits[init],
mem_hier, type, value);
@@ -485,9 +485,9 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
}
attrs = cache->cache_attributes;
- pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
- cache->memory_PD, cache->cache_size, attrs,
- cache->number_of_SMBIOShandles);
+ pr_debug("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ cache->memory_PD, cache->cache_size, attrs,
+ cache->number_of_SMBIOShandles);
target = find_mem_target(cache->memory_PD);
if (!target)
@@ -546,9 +546,9 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
}
if (hmat_revision == 1)
- pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
- p->reserved3, p->reserved4, p->flags, p->processor_PD,
- p->memory_PD);
+ pr_debug("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ p->reserved3, p->reserved4, p->flags, p->processor_PD,
+ p->memory_PD);
else
pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index bec0dcd1f9c3..00ac0d7bb8c9 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -81,6 +81,101 @@ int acpi_map_pxm_to_node(int pxm)
}
EXPORT_SYMBOL(acpi_map_pxm_to_node);
+#ifdef CONFIG_NUMA_EMU
+/*
+ * Take max_nid - 1 fake-numa nodes into account in both
+ * pxm_to_node_map()/node_to_pxm_map[] tables.
+ */
+int __init fix_pxm_node_maps(int max_nid)
+{
+ static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
+ static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ int i, j, index = -1, count = 0;
+ nodemask_t nodes_to_enable;
+
+ if (numa_off)
+ return -1;
+
+ /* no or incomplete node/PXM mapping set, nothing to do */
+ if (srat_disabled())
+ return 0;
+
+ /* find fake nodes PXM mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++) {
+ if ((emu_nid_to_phys[j] == i) &&
+ WARN(node_to_pxm_map_copy[j] != PXM_INVAL,
+ "Node %d is already binded to PXM %d\n",
+ j, node_to_pxm_map_copy[j]))
+ return -1;
+ if (emu_nid_to_phys[j] == i) {
+ node_to_pxm_map_copy[j] =
+ node_to_pxm_map[i];
+ if (j > index)
+ index = j;
+ count++;
+ }
+ }
+ }
+ }
+ if (index == -1) {
+ pr_debug("No node/PXM mapping has been set\n");
+ /* nothing more to be done */
+ return 0;
+ }
+ if (WARN(index != max_nid, "%d max nid when expected %d\n",
+ index, max_nid))
+ return -1;
+
+ nodes_clear(nodes_to_enable);
+
+ /* map phys nodes not used for fake nodes */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++)
+ if (emu_nid_to_phys[j] == i)
+ break;
+ /* fake nodes PXM mapping has been done */
+ if (j <= max_nid)
+ continue;
+ /* find first hole */
+ for (j = 0;
+ j < MAX_NUMNODES &&
+ node_to_pxm_map_copy[j] != PXM_INVAL;
+ j++)
+ ;
+ if (WARN(j == MAX_NUMNODES,
+ "Number of nodes exceeds MAX_NUMNODES\n"))
+ return -1;
+ node_to_pxm_map_copy[j] = node_to_pxm_map[i];
+ node_set(j, nodes_to_enable);
+ count++;
+ }
+ }
+
+ /* creating reverse mapping in pxm_to_node_map[] */
+ for (i = 0; i < MAX_NUMNODES; i++)
+ if (node_to_pxm_map_copy[i] != PXM_INVAL &&
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE)
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i;
+
+ /* overwrite with new mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ node_to_pxm_map[i] = node_to_pxm_map_copy[i];
+ pxm_to_node_map[i] = pxm_to_node_map_copy[i];
+ }
+
+ /* enable other nodes found in PXM for hotplug */
+ nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed);
+
+ pr_debug("found %d total number of nodes\n", count);
+ return 0;
+}
+#endif
+
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index fed446aace42..5ff343096ece 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -607,7 +607,27 @@ acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
void acpi_os_sleep(u64 ms)
{
- msleep(ms);
+ u64 usec = ms * USEC_PER_MSEC, delta_us = 50;
+
+ /*
+ * Use a hrtimer because the timer wheel timers are optimized for
+ * cancelation before they expire and this timer is not going to be
+ * canceled.
+ *
+ * Set the delta between the requested sleep time and the effective
+ * deadline to at least 50 us in case there is an opportunity for timer
+ * coalescing.
+ *
+ * Moreover, longer sleeps can be assumed to need somewhat less timer
+ * precision, so sacrifice some of it for making the timer a more likely
+ * candidate for coalescing by setting the delta to 1% of the sleep time
+ * if it is above 5 ms (this value is chosen so that the delta is a
+ * continuous function of the sleep time).
+ */
+ if (ms > 5)
+ delta_us = (USEC_PER_MSEC / 100) * ms;
+
+ usleep_range(usec, usec + delta_us);
}
void acpi_os_stall(u32 us)
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index d2f7fd7743a1..ef9444482db1 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -2,16 +2,34 @@
/* Platform profile sysfs interface */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/platform_profile.h>
#include <linux/sysfs.h>
-static struct platform_profile_handler *cur_profile;
+#define to_pprof_handler(d) (container_of(d, struct platform_profile_handler, dev))
+
static DEFINE_MUTEX(profile_lock);
+struct platform_profile_handler {
+ const char *name;
+ struct device dev;
+ int minor;
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ const struct platform_profile_ops *ops;
+};
+
+struct aggregate_choices_data {
+ unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int count;
+};
+
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@@ -19,99 +37,408 @@ static const char * const profile_names[] = {
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
+ [PLATFORM_PROFILE_CUSTOM] = "custom",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
-static ssize_t platform_profile_choices_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0;
- int err, i;
-
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+static DEFINE_IDA(platform_profile_ida);
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+/**
+ * _commmon_choices_show - Show the available profile choices
+ * @choices: The available profile choices
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t _commmon_choices_show(unsigned long *choices, char *buf)
+{
+ int i, len = 0;
- for_each_set_bit(i, cur_profile->choices, PLATFORM_PROFILE_LAST) {
+ for_each_set_bit(i, choices, PLATFORM_PROFILE_LAST) {
if (len == 0)
len += sysfs_emit_at(buf, len, "%s", profile_names[i]);
else
len += sysfs_emit_at(buf, len, " %s", profile_names[i]);
}
len += sysfs_emit_at(buf, len, "\n");
- mutex_unlock(&profile_lock);
+
return len;
}
-static ssize_t platform_profile_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/**
+ * _store_class_profile - Set the profile for a class device
+ * @dev: The class device
+ * @data: The profile to set
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_class_profile(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler;
+ int *bit = (int *)data;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
+ return -EOPNOTSUPP;
+
+ return handler->ops->profile_set(dev, *bit);
+}
+
+/**
+ * _notify_class_profile - Notify the class device of a profile change
+ * @dev: The class device
+ * @data: Unused
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _notify_class_profile(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ lockdep_assert_held(&profile_lock);
+ sysfs_notify(&handler->dev.kobj, NULL, "profile");
+ kobject_uevent(&handler->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * get_class_profile - Show the current profile for a class device
+ * @dev: The class device
+ * @profile: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int get_class_profile(struct device *dev,
+ enum platform_profile_option *profile)
{
- enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
+ struct platform_profile_handler *handler;
+ enum platform_profile_option val;
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ err = handler->ops->profile_get(dev, &val);
+ if (err) {
+ pr_err("Failed to get profile for handler %s\n", handler->name);
return err;
+ }
+
+ if (WARN_ON(val >= PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+ *profile = val;
+
+ return 0;
+}
+
+/**
+ * name_show - Show the name of the profile handler
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ return sysfs_emit(buf, "%s\n", handler->name);
+}
+static DEVICE_ATTR_RO(name);
+
+/**
+ * choices_show - Show the available profile choices
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ return _commmon_choices_show(handler->choices, buf);
+}
+static DEVICE_ATTR_RO(choices);
+
+/**
+ * profile_show - Show the current profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = get_class_profile(dev, &profile);
+ if (err)
+ return err;
}
- err = cur_profile->profile_get(cur_profile, &profile);
- mutex_unlock(&profile_lock);
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * profile_store - Set the profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int index, ret;
+
+ index = sysfs_match_string(profile_names, buf);
+ if (index < 0)
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = _store_class_profile(dev, &index);
+ if (ret)
+ return ret;
+ }
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return count;
+}
+static DEVICE_ATTR_RW(profile);
+
+static struct attribute *profile_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_choices.attr,
+ &dev_attr_profile.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(profile);
+
+static void pprof_device_release(struct device *dev)
+{
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+
+ kfree(pprof);
+}
+
+static const struct class platform_profile_class = {
+ .name = "platform-profile",
+ .dev_groups = profile_groups,
+ .dev_release = pprof_device_release,
+};
+
+/**
+ * _aggregate_choices - Aggregate the available profile choices
+ * @dev: The device
+ * @arg: struct aggregate_choices_data
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_choices(struct device *dev, void *arg)
+{
+ unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data *data = arg;
+ struct platform_profile_handler *handler;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
+ if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
+ bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
+ else
+ bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
+ data->count++;
+
+ return 0;
+}
+
+/**
+ * _remove_hidden_choices - Remove hidden choices from aggregate data
+ * @dev: The device
+ * @arg: struct aggregate_choices_data
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _remove_hidden_choices(struct device *dev, void *arg)
+{
+ struct aggregate_choices_data *data = arg;
+ struct platform_profile_handler *handler;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ bitmap_andnot(data->aggregate, handler->choices,
+ handler->hidden_choices, PLATFORM_PROFILE_LAST);
+
+ return 0;
+}
+
+/**
+ * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
+ int err;
+
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _aggregate_choices);
+ if (err)
+ return err;
+ if (data.count == 1) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _remove_hidden_choices);
+ if (err)
+ return err;
+ }
+ }
+
+ /* no profile handler registered any more */
+ if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+
+ return _commmon_choices_show(data.aggregate, buf);
+}
+
+/**
+ * _aggregate_profiles - Aggregate the profiles for legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_profiles(struct device *dev, void *data)
+{
+ enum platform_profile_option *profile = data;
+ enum platform_profile_option val;
+ int err;
+
+ err = get_class_profile(dev, &val);
if (err)
return err;
- /* Check that profile is valid index */
- if (WARN_ON((profile < 0) || (profile >= ARRAY_SIZE(profile_names))))
- return -EIO;
+ if (*profile != PLATFORM_PROFILE_LAST && *profile != val)
+ *profile = PLATFORM_PROFILE_CUSTOM;
+ else
+ *profile = val;
- return sysfs_emit(buf, "%s\n", profile_names[profile]);
+ return 0;
}
-static ssize_t platform_profile_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+/**
+ * _store_and_notify - Store and notify a class from legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_and_notify(struct device *dev, void *data)
{
- int err, i;
+ enum platform_profile_option *profile = data;
+ int err;
- err = mutex_lock_interruptible(&profile_lock);
+ err = _store_class_profile(dev, profile);
if (err)
return err;
+ return _notify_class_profile(dev, NULL);
+}
+
+/**
+ * platform_profile_show - Show the current profile for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
}
+ /* no profile handler registered any more */
+ if (profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * platform_profile_store - Set the profile for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t platform_profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
+ int ret;
+ int i;
+
/* Scan for a matching profile */
i = sysfs_match_string(profile_names, buf);
- if (i < 0) {
- mutex_unlock(&profile_lock);
+ if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = class_for_each_device(&platform_profile_class, NULL,
+ &data, _aggregate_choices);
+ if (ret)
+ return ret;
+ if (!test_bit(i, data.aggregate))
+ return -EOPNOTSUPP;
+
+ ret = class_for_each_device(&platform_profile_class, NULL, &i,
+ _store_and_notify);
+ if (ret)
+ return ret;
}
- /* Check that platform supports this profile choice */
- if (!test_bit(i, cur_profile->choices)) {
- mutex_unlock(&profile_lock);
- return -EOPNOTSUPP;
- }
-
- err = cur_profile->profile_set(cur_profile, i);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
- mutex_unlock(&profile_lock);
- if (err)
- return err;
return count;
}
@@ -124,98 +451,266 @@ static struct attribute *platform_profile_attrs[] = {
NULL
};
+static int profile_class_registered(struct device *dev, const void *data)
+{
+ return 1;
+}
+
+static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev;
+
+ dev = class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered);
+ if (!dev)
+ return 0;
+
+ put_device(dev);
+
+ return attr->mode;
+}
+
static const struct attribute_group platform_profile_group = {
- .attrs = platform_profile_attrs
+ .attrs = platform_profile_attrs,
+ .is_visible = profile_class_is_visible,
};
-void platform_profile_notify(void)
+/**
+ * platform_profile_notify - Notify class device and legacy sysfs interface
+ * @dev: The class device
+ */
+void platform_profile_notify(struct device *dev)
{
- if (!cur_profile)
- return;
+ scoped_cond_guard(mutex_intr, return, &profile_lock) {
+ _notify_class_profile(dev, NULL);
+ }
sysfs_notify(acpi_kobj, NULL, "platform_profile");
}
EXPORT_SYMBOL_GPL(platform_profile_notify);
+/**
+ * platform_profile_cycle - Cycles profiles available on all registered class devices
+ *
+ * Return: 0 on success, -errno on failure
+ */
int platform_profile_cycle(void)
{
- enum platform_profile_option profile;
- enum platform_profile_option next;
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
+ enum platform_profile_option next = PLATFORM_PROFILE_LAST;
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+ if (profile == PLATFORM_PROFILE_CUSTOM ||
+ profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
- err = cur_profile->profile_get(cur_profile, &profile);
- if (err) {
- mutex_unlock(&profile_lock);
- return err;
- }
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _aggregate_choices);
+ if (err)
+ return err;
- next = find_next_bit_wrap(cur_profile->choices, PLATFORM_PROFILE_LAST,
- profile + 1);
+ /* never iterate into a custom if all drivers supported it */
+ clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
- if (WARN_ON(next == PLATFORM_PROFILE_LAST)) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
- }
+ next = find_next_bit_wrap(data.aggregate,
+ PLATFORM_PROFILE_LAST,
+ profile + 1);
- err = cur_profile->profile_set(cur_profile, next);
- mutex_unlock(&profile_lock);
+ err = class_for_each_device(&platform_profile_class, NULL, &next,
+ _store_and_notify);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (err)
+ return err;
+ }
- return err;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_cycle);
-int platform_profile_register(struct platform_profile_handler *pprof)
+/**
+ * platform_profile_register - Creates and registers a platform profile class device
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
{
+ struct device *ppdev;
+ int minor;
int err;
- mutex_lock(&profile_lock);
- /* We can only have one active profile */
- if (cur_profile) {
- mutex_unlock(&profile_lock);
- return -EEXIST;
+ /* Sanity check */
+ if (WARN_ON_ONCE(!dev || !name || !ops || !ops->profile_get ||
+ !ops->profile_set || !ops->probe))
+ return ERR_PTR(-EINVAL);
+
+ struct platform_profile_handler *pprof __free(kfree) = kzalloc(
+ sizeof(*pprof), GFP_KERNEL);
+ if (!pprof)
+ return ERR_PTR(-ENOMEM);
+
+ err = ops->probe(drvdata, pprof->choices);
+ if (err) {
+ dev_err(dev, "platform_profile probe failed\n");
+ return ERR_PTR(err);
}
- /* Sanity check the profile handler field are set */
- if (!pprof || bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST) ||
- !pprof->profile_set || !pprof->profile_get) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
+ if (bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST)) {
+ dev_err(dev, "Failed to register platform_profile class device with empty choices\n");
+ return ERR_PTR(-EINVAL);
}
- err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (ops->hidden_choices) {
+ err = ops->hidden_choices(drvdata, pprof->hidden_choices);
+ if (err) {
+ dev_err(dev, "platform_profile hidden_choices failed\n");
+ return ERR_PTR(err);
+ }
+ }
+
+ guard(mutex)(&profile_lock);
+
+ /* create class interface for individual handler */
+ minor = ida_alloc(&platform_profile_ida, GFP_KERNEL);
+ if (minor < 0)
+ return ERR_PTR(minor);
+
+ pprof->name = name;
+ pprof->ops = ops;
+ pprof->minor = minor;
+ pprof->dev.class = &platform_profile_class;
+ pprof->dev.parent = dev;
+ dev_set_drvdata(&pprof->dev, drvdata);
+ dev_set_name(&pprof->dev, "platform-profile-%d", pprof->minor);
+ /* device_register() takes ownership of pprof/ppdev */
+ ppdev = &no_free_ptr(pprof)->dev;
+ err = device_register(ppdev);
if (err) {
- mutex_unlock(&profile_lock);
- return err;
+ put_device(ppdev);
+ goto cleanup_ida;
}
- cur_profile = pprof;
- mutex_unlock(&profile_lock);
- return 0;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ err = sysfs_update_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ goto cleanup_cur;
+
+ return ppdev;
+
+cleanup_cur:
+ device_unregister(ppdev);
+
+cleanup_ida:
+ ida_free(&platform_profile_ida, minor);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(platform_profile_register);
-int platform_profile_remove(void)
+/**
+ * platform_profile_remove - Unregisters a platform profile class device
+ * @dev: Class device
+ *
+ * Return: 0
+ */
+int platform_profile_remove(struct device *dev)
{
- sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+ int id;
+ guard(mutex)(&profile_lock);
+
+ id = pprof->minor;
+ device_unregister(&pprof->dev);
+ ida_free(&platform_profile_ida, id);
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ sysfs_update_group(acpi_kobj, &platform_profile_group);
- mutex_lock(&profile_lock);
- cur_profile = NULL;
- mutex_unlock(&profile_lock);
return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
+static void devm_platform_profile_release(struct device *dev, void *res)
+{
+ struct device **ppdev = res;
+
+ platform_profile_remove(*ppdev);
+}
+
+/**
+ * devm_platform_profile_register - Device managed version of platform_profile_register
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *devm_platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
+{
+ struct device *ppdev;
+ struct device **dr;
+
+ dr = devres_alloc(devm_platform_profile_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ppdev = platform_profile_register(dev, name, drvdata, ops);
+ if (IS_ERR(ppdev)) {
+ devres_free(dr);
+ return ppdev;
+ }
+
+ *dr = ppdev;
+ devres_add(dev, dr);
+
+ return ppdev;
+}
+EXPORT_SYMBOL_GPL(devm_platform_profile_register);
+
+static int __init platform_profile_init(void)
+{
+ int err;
+
+ err = class_register(&platform_profile_class);
+ if (err)
+ return err;
+
+ err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ class_unregister(&platform_profile_class);
+
+ return err;
+}
+
+static void __exit platform_profile_exit(void)
+{
+ sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ class_unregister(&platform_profile_class);
+}
+module_init(platform_profile_init);
+module_exit(platform_profile_exit);
+
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
MODULE_DESCRIPTION("ACPI platform profile sysfs interface");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 747f83f7114d..e549914a636c 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -287,9 +287,7 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
if (!handler || !module)
goto invalid_guid;
- if (!handler->handler_addr ||
- !handler->static_data_buffer_addr ||
- !handler->acpi_param_buffer_addr) {
+ if (!handler->handler_addr) {
buffer->prm_status = PRM_HANDLER_ERROR;
return AE_OK;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 698897b29de2..586cc7d1d8aa 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -590,6 +590,8 @@ static void acpi_idle_play_dead(struct cpuidle_device *dev, int index)
raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
+ } else if (cx->entry_method == ACPI_CSTATE_FFH) {
+ acpi_processor_ffh_play_dead(cx);
} else
return;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 80a52a4e66dd..436019d96027 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1187,8 +1187,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
break;
}
- if (nval == 0)
- return -EINVAL;
if (obj->type == ACPI_TYPE_BUFFER) {
if (proptype != DEV_PROP_U8)
@@ -1212,9 +1210,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
- ret = acpi_copy_property_array_string(
- items, (char **)val,
- min_t(u32, nval, obj->package.count));
+ nval = min_t(u32, nval, obj->package.count);
+ if (nval == 0)
+ return -ENODATA;
+
+ ret = acpi_copy_property_array_string(items, (char **)val, nval);
break;
default:
ret = -EINVAL;
@@ -1492,7 +1492,7 @@ acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
- return false;
+ return true;
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
@@ -1656,6 +1656,7 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
acpi_fwnode_device_dma_supported, \
.device_get_dma_attr = acpi_fwnode_device_get_dma_attr, \
.property_present = acpi_fwnode_property_present, \
+ .property_read_bool = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
.property_read_string_array = \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 821867de43be..b4cd14e7fa76 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -441,6 +441,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook X1504VAP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"),
+ },
+ },
+ {
/* Asus Vivobook X1704VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -557,6 +564,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
+ },
+ },
+ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
@@ -646,6 +659,17 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
},
},
+ {
+ /*
+ * TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
+ * board-name is changed, so check OEM strings instead. Note
+ * OEM string matches are always exact matches.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=219614
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"),
+ },
+ },
{ }
};
@@ -671,11 +695,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
const struct irq_override_cmp *entry = &override_table[i];
- if (dmi_check_system(entry->system) &&
- entry->irq == gsi &&
+ if (entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
- entry->shareable == shareable)
+ entry->shareable == shareable &&
+ dmi_check_system(entry->system))
return entry->override;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 74dcccdc6482..9f4efa8f75a6 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -723,10 +723,8 @@ int acpi_tie_acpi_dev(struct acpi_device *adev)
static void acpi_store_pld_crc(struct acpi_device *adev)
{
struct acpi_pld_info *pld;
- acpi_status status;
- status = acpi_get_physical_device_location(adev->handle, &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(adev->handle, &pld))
return;
adev->pld_crc = crc32(~0, pld, sizeof(*pld));
@@ -1769,6 +1767,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"CSC3557", },
{"INT33FE", },
{"INT3515", },
+ {"TXNW2781", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
{"CLSA0101", },
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 687524b50085..a48ebbf768f9 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -319,7 +319,7 @@ struct acpi_data_attr {
};
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_table_attr *table_attr =
@@ -372,7 +372,7 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
}
table_attr->attr.size = table_header->length;
- table_attr->attr.read = acpi_table_show;
+ table_attr->attr.read_new = acpi_table_show;
table_attr->attr.attr.name = table_attr->filename;
table_attr->attr.attr.mode = 0400;
@@ -412,7 +412,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
}
static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
@@ -495,7 +495,7 @@ static int acpi_table_data_init(struct acpi_table_header *th)
if (!data_attr)
return -ENOMEM;
sysfs_attr_init(&data_attr->attr.attr);
- data_attr->attr.read = acpi_data_show;
+ data_attr->attr.read_new = acpi_data_show;
data_attr->attr.attr.mode = 0400;
return acpi_data_objs[i].fn(th, data_attr);
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 9e1b01c35070..2295abbecd14 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -56,7 +56,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_apic *)header;
pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -66,7 +66,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_x2apic *)header;
pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -160,7 +160,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_sapic *)header;
pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -183,7 +183,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
p->uid, p->base_address,
p->arm_mpidr,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -218,7 +218,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("CORE PIC (processor_id[0x%02x] core_id[0x%02x] %s)\n",
p->processor_id, p->core_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -228,7 +228,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("RISC-V INTC (acpi_uid[0x%04x] hart_id[0x%llx] %s)\n",
p->uid, p->hart_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 6de542d99518..526563a0d188 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -494,7 +494,7 @@ bool acpi_device_dep(acpi_handle target, acpi_handle match)
}
EXPORT_SYMBOL_GPL(acpi_device_dep);
-acpi_status
+bool
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
acpi_status status;
@@ -502,9 +502,8 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
union acpi_object *output;
status = acpi_evaluate_object(handle, "_PLD", NULL, &buffer);
-
if (ACPI_FAILURE(status))
- return status;
+ return false;
output = buffer.pointer;
@@ -523,7 +522,7 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
out:
kfree(buffer.pointer);
- return status;
+ return ACPI_SUCCESS(status);
}
EXPORT_SYMBOL(acpi_get_physical_device_location);
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index cb45ef5240da..068c1612660b 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -408,6 +408,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
+ /* Vexia Edu Atla 10 tablet 5V version */
+ .matches = {
+ /* Having all 3 of these not set is somewhat unique */
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
/* Vexia Edu Atla 10 tablet 9V version */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index ef353ca13c35..76052006bd87 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1971,7 +1971,7 @@ static bool binder_validate_fixup(struct binder_proc *proc,
* struct binder_task_work_cb - for deferred close
*
* @twork: callback_head for task work
- * @fd: fd to close
+ * @file: file to close
*
* Structure to pass task work to be handled after
* returning from binder_ioctl() via task_work_add().
@@ -3017,8 +3017,7 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
ktime_t t_start_time = ktime_get();
- char *secctx = NULL;
- u32 secctx_sz = 0;
+ struct lsm_context lsmctx = { };
struct list_head sgc_head;
struct list_head pf_head;
const void __user *user_buffer = (const void __user *)
@@ -3297,8 +3296,8 @@ static void binder_transaction(struct binder_proc *proc,
size_t added_size;
security_cred_getsecid(proc->cred, &secid);
- ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
- if (ret) {
+ ret = security_secid_to_secctx(secid, &lsmctx);
+ if (ret < 0) {
binder_txn_error("%d:%d failed to get security context\n",
thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
@@ -3306,7 +3305,7 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
- added_size = ALIGN(secctx_sz, sizeof(u64));
+ added_size = ALIGN(lsmctx.len, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
@@ -3340,23 +3339,23 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
- if (secctx) {
+ if (lsmctx.context) {
int err;
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
t->security_ctx = t->buffer->user_data + buf_offset;
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
- secctx, secctx_sz);
+ lsmctx.context, lsmctx.len);
if (err) {
t->security_ctx = 0;
WARN_ON(1);
}
- security_release_secctx(secctx, secctx_sz);
- secctx = NULL;
+ security_release_secctx(&lsmctx);
+ lsmctx.context = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
@@ -3400,7 +3399,7 @@ static void binder_transaction(struct binder_proc *proc,
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -3779,8 +3778,8 @@ err_copy_data_failed:
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
- if (secctx)
- security_release_secctx(secctx, secctx_sz);
+ if (lsmctx.context)
+ security_release_secctx(&lsmctx);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -3801,13 +3800,13 @@ err_invalid_target_handle:
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
(tr->flags & TF_ONE_WAY ? "async" : "call"),
target_proc ? target_proc->pid : 0,
target_thread ? target_thread->pid : 0,
t_debug_id, return_error, return_error_param,
- (u64)tr->data_size, (u64)tr->offsets_size,
+ tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
if (target_thread)
@@ -6374,7 +6373,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.buffer - buffer->user_data);
+ proc->alloc.vm_start - buffer->user_data);
}
static void print_binder_work_ilocked(struct seq_file *m,
@@ -6928,6 +6927,11 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
{} /* terminator */
};
+void binder_add_device(struct binder_device *device)
+{
+ hlist_add_head(&device->hlist, &binder_devices);
+}
+
static int __init init_binder_device(const char *name)
{
int ret;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index a738e7745865..fcfaf1b899c8 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -61,7 +61,7 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
@@ -169,32 +169,33 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return buffer;
}
static inline void
-binder_set_installed_page(struct binder_lru_page *lru_page,
+binder_set_installed_page(struct binder_alloc *alloc,
+ unsigned long index,
struct page *page)
{
/* Pairs with acquire in binder_get_installed_page() */
- smp_store_release(&lru_page->page_ptr, page);
+ smp_store_release(&alloc->pages[index], page);
}
static inline struct page *
-binder_get_installed_page(struct binder_lru_page *lru_page)
+binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
{
/* Pairs with release in binder_set_installed_page() */
- return smp_load_acquire(&lru_page->page_ptr);
+ return smp_load_acquire(&alloc->pages[index]);
}
static void binder_lru_freelist_add(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, false, start, end);
@@ -202,65 +203,159 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
size_t index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (!binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
+ if (!page)
continue;
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add_obj(&binder_freelist, &page->lru);
+ ret = list_lru_add(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
}
}
-static int binder_install_single_page(struct binder_alloc *alloc,
- struct binder_lru_page *lru_page,
- unsigned long addr)
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
{
- struct page *page;
- int ret = 0;
+ /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+ smp_store_release(&alloc->mapped, state);
+}
- if (!mmget_not_zero(alloc->mm))
- return -ESRCH;
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+ /* pairs with smp_store_release in binder_alloc_set_mapped() */
+ return smp_load_acquire(&alloc->mapped);
+}
+
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+ unsigned long addr)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct page *page;
+ long npages = 0;
/*
- * Protected with mmap_sem in write mode as multiple tasks
- * might race to install the same page.
+ * Find an existing page in the remote mm. If missing,
+ * don't attempt to fault-in just propagate an error.
*/
- mmap_write_lock(alloc->mm);
- if (binder_get_installed_page(lru_page))
- goto out;
+ mmap_read_lock(mm);
+ if (binder_alloc_is_mapped(alloc))
+ npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
+ &page, NULL);
+ mmap_read_unlock(mm);
- if (!alloc->vma) {
- pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
- ret = -ESRCH;
- goto out;
+ return npages > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+ unsigned long addr,
+ struct page *page)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ int ret = -ESRCH;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, addr);
+ if (vma) {
+ if (binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ vma_end_read(vma);
+ return ret;
}
+ /* fall back to mmap_lock */
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (vma && binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ mmap_read_unlock(mm);
+
+ return ret;
+}
+
+static struct page *binder_page_alloc(struct binder_alloc *alloc,
+ unsigned long index)
+{
+ struct binder_shrinker_mdata *mdata;
+ struct page *page;
+
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ /* allocate and install shrinker metadata under page->private */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata) {
+ __free_page(page);
+ return NULL;
+ }
+
+ mdata->alloc = alloc;
+ mdata->page_index = index;
+ INIT_LIST_HEAD(&mdata->lru);
+ set_page_private(page, (unsigned long)mdata);
+
+ return page;
+}
+
+static void binder_free_page(struct page *page)
+{
+ kfree((struct binder_shrinker_mdata *)page_private(page));
+ __free_page(page);
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+ unsigned long index,
+ unsigned long addr)
+{
+ struct page *page;
+ int ret;
+
+ if (!mmget_not_zero(alloc->mm))
+ return -ESRCH;
+
+ page = binder_page_alloc(alloc, index);
if (!page) {
- pr_err("%d: failed to allocate page\n", alloc->pid);
ret = -ENOMEM;
goto out;
}
- ret = vm_insert_page(alloc->vma, addr, page);
- if (ret) {
+ ret = binder_page_insert(alloc, addr, page);
+ switch (ret) {
+ case -EBUSY:
+ /*
+ * EBUSY is ok. Someone installed the pte first but the
+ * alloc->pages[index] has not been updated yet. Discard
+ * our page and look up the one already installed.
+ */
+ ret = 0;
+ binder_free_page(page);
+ page = binder_page_lookup(alloc, addr);
+ if (!page) {
+ pr_err("%d: failed to find page at offset %lx\n",
+ alloc->pid, addr - alloc->vm_start);
+ ret = -ESRCH;
+ break;
+ }
+ fallthrough;
+ case 0:
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(alloc, index, page);
+ break;
+ default:
+ binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
- alloc->pid, __func__, addr - alloc->buffer, ret);
- __free_page(page);
- ret = -ENOMEM;
- goto out;
+ alloc->pid, __func__, addr - alloc->vm_start, ret);
+ break;
}
-
- /* Mark page installation complete and safe to use */
- binder_set_installed_page(lru_page, page);
out:
- mmap_write_unlock(alloc->mm);
mmput_async(alloc->mm);
return ret;
}
@@ -269,7 +364,6 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- struct binder_lru_page *page;
unsigned long start, final;
unsigned long page_addr;
@@ -280,15 +374,13 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (binder_get_installed_page(alloc, index))
continue;
trace_binder_alloc_page_start(alloc, index);
- ret = binder_install_single_page(alloc, page, page_addr);
+ ret = binder_install_single_page(alloc, index, page_addr);
if (ret)
return ret;
@@ -302,8 +394,8 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, true, start, end);
@@ -311,13 +403,16 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long index;
bool on_lru;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
- if (page->page_ptr) {
+ if (page) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
@@ -329,20 +424,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
}
}
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- /* pairs with smp_load_acquire in binder_alloc_get_vma() */
- smp_store_release(&alloc->vma, vma);
-}
-
-static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
-{
- /* pairs with smp_store_release in binder_alloc_set_vma() */
- return smp_load_acquire(&alloc->vma);
-}
-
static void debug_no_space_locked(struct binder_alloc *alloc)
{
size_t largest_alloc_size = 0;
@@ -576,7 +657,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int ret;
/* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
+ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -597,10 +678,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
goto out;
}
@@ -608,7 +689,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -674,8 +755,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->user_data < alloc->buffer);
- BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->vm_start);
+ BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += buffer_size;
@@ -734,14 +815,13 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
+ (buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
- lru_page = &alloc->pages[index];
*pgoffp = pgoff;
- return lru_page->page_ptr;
+
+ return alloc->pages[index];
}
/**
@@ -785,17 +865,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc->lock if clear_on_free
- * is used frequently for large buffers. This lock is not
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -816,7 +896,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
const char *failure_string;
- int ret, i;
+ int ret;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
@@ -834,22 +914,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = vma->vm_start;
+ alloc->vm_start = vma->vm_start;
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
- if (alloc->pages == NULL) {
+ if (!alloc->pages) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- alloc->pages[i].alloc = alloc;
- INIT_LIST_HEAD(&alloc->pages[i].lru);
- }
-
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
@@ -857,14 +932,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->user_data = alloc->buffer;
+ buffer->user_data = alloc->vm_start;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
- binder_alloc_set_vma(alloc, vma);
+ binder_alloc_set_mapped(alloc, true);
return 0;
@@ -872,7 +947,7 @@ err_alloc_buf_struct_failed:
kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- alloc->buffer = 0;
+ alloc->vm_start = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -893,8 +968,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
- BUG_ON(alloc->vma);
+ mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->mapped);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -925,22 +1000,26 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ struct page *page;
bool on_lru;
- if (!alloc->pages[i].page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
continue;
- on_lru = list_lru_del_obj(&binder_freelist,
- &alloc->pages[i].lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
on_lru ? "on lru" : "active");
- __free_page(alloc->pages[i].page_ptr);
+ binder_free_page(page);
page_count++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
kvfree(alloc->pages);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -964,17 +1043,17 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->debug_id,
- buffer->user_data - alloc->buffer,
+ buffer->user_data - alloc->vm_start,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -985,29 +1064,29 @@ void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc)
{
- struct binder_lru_page *page;
+ struct page *page;
int i;
int active = 0;
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
+ if (binder_alloc_is_mapped(alloc)) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
free++;
- else if (list_empty(&page->lru))
+ else if (list_empty(page_to_lru(page)))
active++;
else
lru++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1023,10 +1102,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return count;
}
@@ -1036,12 +1115,12 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ binder_alloc_set_mapped(alloc, false);
}
/**
@@ -1058,39 +1137,50 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
void *cb_arg)
__must_hold(&lru->lock)
{
- struct binder_lru_page *page = container_of(item, typeof(*page), lru);
- struct binder_alloc *alloc = page->alloc;
+ struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
+ struct binder_alloc *alloc = mdata->alloc;
struct mm_struct *mm = alloc->mm;
struct vm_area_struct *vma;
struct page *page_to_free;
unsigned long page_addr;
+ int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
- goto err_get_alloc_lock_failed;
- if (!page->page_ptr)
- goto err_page_already_freed;
-
- index = page - alloc->pages;
- page_addr = alloc->buffer + index * PAGE_SIZE;
-
- vma = vma_lookup(mm, page_addr);
- if (vma && vma != binder_alloc_get_vma(alloc))
+
+ index = mdata->page_index;
+ page_addr = alloc->vm_start + index * PAGE_SIZE;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, page_addr);
+ if (!vma) {
+ /* fall back to mmap_lock */
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ mm_locked = 1;
+ vma = vma_lookup(mm, page_addr);
+ }
+
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ /*
+ * Since a binder_alloc can only be mapped once, we ensure
+ * the vma corresponds to this mapping by checking whether
+ * the binder_alloc is still mapped.
+ */
+ if (vma && !binder_alloc_is_mapped(alloc))
goto err_invalid_vma;
trace_binder_unmap_kernel_start(alloc, index);
- page_to_free = page->page_ptr;
- page->page_ptr = NULL;
+ page_to_free = alloc->pages[index];
+ binder_set_installed_page(alloc, index, NULL);
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
spin_unlock(&lru->lock);
if (vma) {
@@ -1101,17 +1191,23 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
mmput_async(mm);
- __free_page(page_to_free);
+ binder_free_page(page_to_free);
return LRU_REMOVED_RETRY;
err_invalid_vma:
-err_page_already_freed:
- spin_unlock(&alloc->lock);
-err_get_alloc_lock_failed:
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
@@ -1145,7 +1241,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
- spin_lock_init(&alloc->lock);
+ mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index c02c8ebcb466..feecd7414241 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -9,7 +9,7 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
@@ -59,34 +59,43 @@ struct binder_buffer {
};
/**
- * struct binder_lru_page - page object used for binder shrinker
- * @page_ptr: pointer to physical page in mmap'd space
- * @lru: entry in binder_freelist
- * @alloc: binder_alloc for a proc
+ * struct binder_shrinker_mdata - binder metadata used to reclaim pages
+ * @lru: LRU entry in binder_freelist
+ * @alloc: binder_alloc owning the page to reclaim
+ * @page_index: offset in @alloc->pages[] into the page to reclaim
*/
-struct binder_lru_page {
+struct binder_shrinker_mdata {
struct list_head lru;
- struct page *page_ptr;
struct binder_alloc *alloc;
+ unsigned long page_index;
};
+static inline struct list_head *page_to_lru(struct page *p)
+{
+ struct binder_shrinker_mdata *mdata;
+
+ mdata = (struct binder_shrinker_mdata *)page_private(p);
+
+ return &mdata->lru;
+}
+
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @lock: protects binder_alloc fields
- * @vma: vm_area_struct passed to mmap_handler
- * (invariant after mmap)
+ * @mutex: protects binder_alloc fields
* @mm: copy of task->mm (invariant after open)
- * @buffer: base of per-proc address space mapped via mmap
+ * @vm_start: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of binder_lru_page
+ * @pages: array of struct page *
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
+ * @mapped: whether the vm area is mapped, each binder instance is
+ * allowed a single mapping throughout its lifetime
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
@@ -96,18 +105,18 @@ struct binder_lru_page {
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
- spinlock_t lock;
- struct vm_area_struct *vma;
+ struct mutex mutex;
struct mm_struct *mm;
- unsigned long buffer;
+ unsigned long vm_start;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct binder_lru_page *pages;
+ struct page **pages;
size_t buffer_size;
int pid;
size_t pages_high;
+ bool mapped;
bool oneway_spam_detected;
};
@@ -153,9 +162,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
size_t free_async_space;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
free_async_space = alloc->free_async_space;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return free_async_space;
}
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 81442fe20a69..c88735c54848 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -104,11 +104,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
end = PAGE_ALIGN(buffer->user_data + size);
page_addr = buffer->user_data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
- page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
- if (!alloc->pages[page_index].page_ptr ||
- !list_empty(&alloc->pages[page_index].lru)) {
+ page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (!alloc->pages[page_index] ||
+ !list_empty(page_to_lru(alloc->pages[page_index]))) {
pr_err("expect alloc but is %s at page index %d\n",
- alloc->pages[page_index].page_ptr ?
+ alloc->pages[page_index] ?
"lru" : "free", page_index);
return false;
}
@@ -148,10 +148,10 @@ static void binder_selftest_free_buf(struct binder_alloc *alloc,
* if binder shrinker ran during binder_alloc_free_buf
* calls above.
*/
- if (list_empty(&alloc->pages[i].lru)) {
+ if (list_empty(page_to_lru(alloc->pages[i]))) {
pr_err_size_seq(sizes, seq);
pr_err("expect lru but is %s at page index %d\n",
- alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ alloc->pages[i] ? "alloc" : "free", i);
binder_selftest_failures++;
}
}
@@ -168,9 +168,9 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
}
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
- if (alloc->pages[i].page_ptr) {
+ if (alloc->pages[i]) {
pr_err("expect free but is %s at page index %d\n",
- list_empty(&alloc->pages[i].lru) ?
+ list_empty(page_to_lru(alloc->pages[i])) ?
"alloc" : "lru", i);
binder_selftest_failures++;
}
@@ -291,7 +291,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->mapped)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index f8d6be682f23..e4eb8357989c 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -25,8 +25,7 @@ struct binder_context {
/**
* struct binder_device - information about a binder device node
- * @hlist: list of binder devices (only used for devices requested via
- * CONFIG_ANDROID_BINDER_DEVICES)
+ * @hlist: list of binder devices
* @miscdev: information about a binder character device node
* @context: binder context information
* @binderfs_inode: This is the inode of the root dentry of the super block
@@ -582,4 +581,12 @@ struct binder_object {
};
};
+/**
+ * Add a binder device to binder_devices
+ * @device: the new binder device to add to the global list
+ *
+ * Not reentrant as the list is not protected by any locks
+ */
+void binder_add_device(struct binder_device *device);
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index fe38c6fc65d0..16de1b9e72f7 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -328,7 +328,7 @@ TRACE_EVENT(binder_update_page_range,
TP_fast_assign(
__entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - alloc->buffer;
+ __entry->offset = start - alloc->vm_start;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index ad1fa7abc323..94c6446604fc 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -207,6 +207,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
fsnotify_create(root->d_inode, dentry);
inode_unlock(d_inode(root));
+ binder_add_device(device);
+
return 0;
err:
@@ -272,6 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
+ hlist_del_init(&device->hlist);
kfree(device->context.name);
kfree(device);
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 8d27c567be1c..f813dbdc2346 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1987,7 +1987,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
/* legacy intx interrupts */
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
}
hpriv->irq = pci_irq_vector(pdev, 0);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8f40f75ba08c..c842e2de6ef9 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -328,7 +328,7 @@ struct ahci_port_priv {
struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
- u32 mask_port_map; /* mask out particular bits */
+ u32 mask_port_map; /* Mask of valid ports */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
@@ -379,6 +379,21 @@ struct ahci_host_priv {
int port);
};
+/*
+ * Return true if a port should be ignored because it is excluded from
+ * the host port map.
+ */
+static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
+ unsigned int portid)
+{
+ if (portid >= hpriv->nports)
+ return true;
+ /* mask_port_map not set means that all ports are available */
+ if (!hpriv->mask_port_map)
+ return false;
+ return !(hpriv->mask_port_map & (1 << portid));
+}
+
extern int ahci_ignore_sss;
extern const struct attribute_group *ahci_shost_groups[];
@@ -396,8 +411,8 @@ extern const struct attribute_group *ahci_sdev_groups[];
.shost_groups = ahci_shost_groups, \
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index ef569eae4ce4..29be74fedcf0 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -288,6 +288,9 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
/* Re-initialize and calibrate the PHY */
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 1ec35778903d..2d6a08c23d6a 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -206,6 +206,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
goto disable_clks;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_rsts;
@@ -215,6 +218,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
ahci_platform_deassert_rsts(hpriv);
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 6b9b4a1dfa15..4336c8a6e208 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -176,7 +176,6 @@ static int st_ahci_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int st_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -221,9 +220,8 @@ static int st_ahci_resume(struct device *dev)
return ahci_platform_resume_host(dev);
}
-#endif
-static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
static const struct of_device_id st_ahci_match[] = {
{ .compatible = "st,ahci", },
@@ -234,7 +232,7 @@ MODULE_DEVICE_TABLE(of, st_ahci_match);
static struct platform_driver st_ahci_driver = {
.driver = {
.name = DRV_NAME,
- .pm = &st_ahci_pm_ops,
+ .pm = pm_sleep_ptr(&st_ahci_pm_ops),
.of_match_table = st_ahci_match,
},
.probe = st_ahci_probe,
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 2f57ec00ab82..e70b6c089cf1 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -209,7 +209,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
}
-static struct pci_device_id ata_generic[] = {
+static const struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 093b940bc953..d441246fa357 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1725,7 +1725,7 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* message-signalled interrupts currently).
*/
if (port_flags & PIIX_FLAG_CHECKINTR)
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
if (piix_check_450nx_errata(pdev)) {
/* This writes into the master table but it does not
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index fdfa7b266218..e7ace4b10f15 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -541,6 +541,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
hpriv->saved_port_map = port_map;
}
+ /* mask_port_map not set means that all ports are available */
if (hpriv->mask_port_map) {
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 7a8064520a35..91d44302eac9 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -49,6 +49,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
int rc, i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
@@ -70,6 +73,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
disable_phys:
while (--i >= 0) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -88,6 +94,9 @@ void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
int i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -432,6 +441,20 @@ static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
return 0;
}
+static u32 ahci_platform_find_max_port_id(struct device *dev)
+{
+ u32 max_port = 0;
+
+ for_each_child_of_node_scoped(dev->of_node, child) {
+ u32 port;
+
+ if (!of_property_read_u32(child, "reg", &port))
+ max_port = max(max_port, port);
+ }
+
+ return max_port;
+}
+
/**
* ahci_platform_get_resources - Get platform resources
* @pdev: platform device to get resources for
@@ -458,6 +481,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
u32 mask_port_map = 0;
+ u32 max_port;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
return ERR_PTR(-ENOMEM);
@@ -549,15 +573,17 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
goto err_out;
}
+ /* find maximum port id for allocating structures */
+ max_port = ahci_platform_find_max_port_id(dev);
/*
- * If no sub-node was found, we still need to set nports to
- * one in order to be able to use the
+ * Set nports according to maximum port id. Clamp at
+ * AHCI_MAX_PORTS, warning message for invalid port id
+ * is generated later.
+ * When DT has no sub-nodes max_port is 0, nports is 1,
+ * in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
- if (child_nodes)
- hpriv->nports = child_nodes;
- else
- hpriv->nports = 1;
+ hpriv->nports = min(AHCI_MAX_PORTS, max_port + 1);
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
if (!hpriv->phys) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c085dd81ebe7..d956735e2a76 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2845,6 +2845,10 @@ int ata_dev_configure(struct ata_device *dev)
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
dev->quirks |= ATA_QUIRK_NOLPM;
+ if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI &&
+ ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
+ dev->quirks |= ATA_QUIRK_NOLPM;
+
if (ap->flags & ATA_FLAG_NO_LPM)
dev->quirks |= ATA_QUIRK_NOLPM;
@@ -3897,6 +3901,7 @@ static const char * const ata_quirk_names[] = {
[__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
[__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
[__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
+ [__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
[__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog",
[__ATA_QUIRK_NO_LOG_DIR] = "nologdir",
[__ATA_QUIRK_NO_FUA] = "nofua",
@@ -4142,13 +4147,16 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
ATA_QUIRK_ZERO_AFTER_TRIM },
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI, },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM },
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 9c76fb1ad2ec..ba300cc0a3a3 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1313,7 +1313,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
/**
- * ata_sas_device_configure - Default device_configure routine for libata
+ * ata_sas_sdev_configure - Default sdev_configure routine for libata
* devices
* @sdev: SCSI device to configure
* @lim: queue limits
@@ -1323,14 +1323,14 @@ EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
* Zero.
*/
-int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
- struct ata_port *ap)
+int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
return ata_scsi_dev_config(sdev, lim, ap->link.device);
}
-EXPORT_SYMBOL_GPL(ata_sas_device_configure);
+EXPORT_SYMBOL_GPL(ata_sas_sdev_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2ce5befd2242..2796c0da8257 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1133,7 +1133,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
}
/**
- * ata_scsi_slave_alloc - Early setup of SCSI device
+ * ata_scsi_sdev_init - Early setup of SCSI device
* @sdev: SCSI device to examine
*
* This is called from scsi_alloc_sdev() when the scsi device
@@ -1143,7 +1143,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_slave_alloc(struct scsi_device *sdev)
+int ata_scsi_sdev_init(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct device_link *link;
@@ -1166,10 +1166,10 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_init);
/**
- * ata_scsi_device_configure - Set SCSI device attributes
+ * ata_scsi_sdev_configure - Set SCSI device attributes
* @sdev: SCSI device to examine
* @lim: queue limits
*
@@ -1181,8 +1181,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
@@ -1192,10 +1191,10 @@ int ata_scsi_device_configure(struct scsi_device *sdev,
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_configure);
/**
- * ata_scsi_slave_destroy - SCSI device is about to be destroyed
+ * ata_scsi_sdev_destroy - SCSI device is about to be destroyed
* @sdev: SCSI device to be destroyed
*
* @sdev is about to be destroyed for hot/warm unplugging. If
@@ -1208,7 +1207,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
* LOCKING:
* Defined by SCSI layer. We don't really care.
*/
-void ata_scsi_slave_destroy(struct scsi_device *sdev)
+void ata_scsi_sdev_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
unsigned long flags;
@@ -1228,7 +1227,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
kfree(sdev->dma_drain_buf);
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_destroy);
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 67f277e1c3bf..5a46c066abc3 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -601,7 +601,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct page *page;
- unsigned int offset;
+ unsigned int offset, count;
if (!qc->cursg) {
qc->curbytes = qc->nbytes;
@@ -617,25 +617,27 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE;
- trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
+ /* don't overrun current sg */
+ count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);
+
+ trace_ata_sff_pio_transfer_data(qc, offset, count);
/*
* Split the transfer when it splits a page boundary. Note that the
* split still has to be dword aligned like all ATA data transfers.
*/
WARN_ON_ONCE(offset % 4);
- if (offset + qc->sect_size > PAGE_SIZE) {
+ if (offset + count > PAGE_SIZE) {
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
- ata_pio_xfer(qc, nth_page(page, 1), 0,
- qc->sect_size - split_len);
+ ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
} else {
- ata_pio_xfer(qc, page, offset, qc->sect_size);
+ ata_pio_xfer(qc, page, offset, count);
}
- qc->curbytes += qc->sect_size;
- qc->cursg_ofs += qc->sect_size;
+ qc->curbytes += count;
+ qc->cursg_ofs += count;
if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg);
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index aaef5924f636..308f86f9e2f0 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_reinit_one(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id atp867x_pci_tbl[] = {
+static const struct pci_device_id atp867x_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 },
{ },
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index f2f36e55a1f4..fbf5f07ea357 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -812,8 +812,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
-static int pata_macio_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int pata_macio_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
@@ -822,7 +822,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
int rc;
/* First call original */
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (rc)
return rc;
@@ -932,10 +932,10 @@ static const struct scsi_host_template pata_macio_sht = {
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
.max_segment_size = PATA_MACIO_MAX_SEGMENT_SIZE,
- .device_configure = pata_macio_device_configure,
+ .sdev_configure = pata_macio_sdev_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static struct ata_port_operations pata_macio_ops = {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index dce24806a052..2d32125c16fd 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -935,9 +935,8 @@ static int octeon_cf_probe(struct platform_device *pdev)
ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
/* True IDE mode needs a timer to poll for not-busy. */
- hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cf_port->delayed_finish.function = octeon_cf_delayed_finish;
+ hrtimer_setup(&cf_port->delayed_finish, octeon_cf_delayed_finish, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
} else {
/* 16 bit but not True IDE */
base = cs0 + 0x800;
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index ced906bf56be..beb53bd990be 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -97,7 +97,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
-static struct pci_device_id ata_tosh[] = {
+static const struct pci_device_id ata_tosh[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 0a9689862f71..09792aac7f9d 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -340,7 +340,7 @@ static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
host->private_data = hpriv;
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index d040799bf9cb..530ee26b3012 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -11,7 +11,6 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
-#include <linux/reset.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -27,8 +26,6 @@
* @muxmode: the current muxing mode
* @ide_pins: if the device is using the plain IDE interface pins
* @sata_bridge: if the device enables the SATA bridge
- * @sata0_reset: SATA0 reset handler
- * @sata1_reset: SATA1 reset handler
* @sata0_pclk: SATA0 PCLK handler
* @sata1_pclk: SATA1 PCLK handler
*/
@@ -38,8 +35,6 @@ struct sata_gemini {
enum gemini_muxmode muxmode;
bool ide_pins;
bool sata_bridge;
- struct reset_control *sata0_reset;
- struct reset_control *sata1_reset;
struct clk *sata0_pclk;
struct clk *sata1_pclk;
};
@@ -224,18 +219,6 @@ void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge)
}
EXPORT_SYMBOL(gemini_sata_stop_bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg,
- unsigned int bridge)
-{
- if (bridge == 0)
- reset_control_reset(sg->sata0_reset);
- else
- reset_control_reset(sg->sata1_reset);
- msleep(10);
- return gemini_sata_setup_bridge(sg, bridge);
-}
-EXPORT_SYMBOL(gemini_sata_reset_bridge);
-
static int gemini_sata_bridge_init(struct sata_gemini *sg)
{
struct device *dev = sg->dev;
@@ -265,21 +248,6 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return ret;
}
- sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
- if (IS_ERR(sg->sata0_reset)) {
- dev_err(dev, "no SATA0 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata0_reset);
- }
- sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
- if (IS_ERR(sg->sata1_reset)) {
- dev_err(dev, "no SATA1 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata1_reset);
- }
-
sata_id = readl(sg->base + GEMINI_SATA_ID);
sata_phy_id = readl(sg->base + GEMINI_SATA_PHY_ID);
sg->sata_bridge = true;
diff --git a/drivers/ata/sata_gemini.h b/drivers/ata/sata_gemini.h
index 6f6e691d6007..b6e4a5c86e01 100644
--- a/drivers/ata/sata_gemini.h
+++ b/drivers/ata/sata_gemini.h
@@ -17,6 +17,5 @@ bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1);
enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg);
int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge);
void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg, unsigned int bridge);
#endif
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b8f363370e1a..bcbf96867f89 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -672,8 +672,8 @@ static const struct scsi_host_template mv6_sht = {
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true,
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations mv5_ops = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 36d99043ef50..f36e2915ccf1 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -296,8 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
@@ -319,8 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
@@ -382,10 +382,10 @@ static const struct scsi_host_template nv_adma_sht = {
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
- .device_configure = nv_adma_device_configure,
+ .sdev_configure = nv_adma_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static const struct scsi_host_template nv_swncq_sht = {
@@ -393,10 +393,10 @@ static const struct scsi_host_template nv_swncq_sht = {
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
- .device_configure = nv_swncq_device_configure,
+ .sdev_configure = nv_swncq_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
/*
@@ -663,8 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
@@ -676,7 +676,7 @@ static int nv_adma_device_configure(struct scsi_device *sdev,
int adma_enable;
u32 current_reg, new_reg, config_mask;
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
@@ -1871,8 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -1882,7 +1882,7 @@ static int nv_swncq_device_configure(struct scsi_device *sdev,
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 72c03cbdaff4..87f4cde6a686 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -378,10 +378,9 @@ static const struct scsi_host_template sil24_sht = {
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
- .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .device_configure = ata_scsi_device_configure
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations sil24_ops = {
@@ -1317,7 +1316,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sata_sil24_msi && !pci_enable_msi(pdev)) {
dev_info(&pdev->dev, "Using MSI\n");
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
}
pci_set_master(pdev);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index ef8724986de3..b8b6d9eff3b8 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -290,7 +290,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sis_sht);
}
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 60ea45926cd1..52894ff49dcb 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -221,7 +221,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index d39b87537168..a53a2dfc1e17 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -384,7 +384,7 @@ static int vsc_sata_init_one(struct pci_dev *pdev,
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
if (pci_enable_msi(pdev) == 0)
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
/*
* Config offset 0x98 is "Extended Control and Status Register 0"
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index a802678a6f74..32e1863ef4b2 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -36,7 +36,6 @@ struct img_ascii_lcd_config {
* @base: the base address of the LCD registers
* @regmap: the regmap through which LCD registers are accessed
* @offset: the offset within regmap to the start of the LCD registers
- * @cfg: pointer to the LCD model configuration
*/
struct img_ascii_lcd_ctx {
struct linedisp linedisp;
@@ -45,7 +44,6 @@ struct img_ascii_lcd_ctx {
struct regmap *regmap;
};
u32 offset;
- const struct img_ascii_lcd_config *cfg;
};
/*
@@ -71,7 +69,7 @@ static void boston_update(struct linedisp *linedisp)
#endif
}
-static struct img_ascii_lcd_config boston_config = {
+static const struct img_ascii_lcd_config boston_config = {
.num_chars = 8,
.ops = {
.update = boston_update,
@@ -100,7 +98,7 @@ static void malta_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config malta_config = {
+static const struct img_ascii_lcd_config malta_config = {
.num_chars = 8,
.external_regmap = true,
.ops = {
@@ -202,7 +200,7 @@ static void sead3_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config sead3_config = {
+static const struct img_ascii_lcd_config sead3_config = {
.num_chars = 16,
.external_regmap = true,
.ops = {
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 7fb21768ca36..8074a10183dc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o \
- swnode.o
+ swnode.o faux.o
obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 3ebe77566788..af0029d30dbe 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_smt.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/slab.h>
@@ -28,7 +29,7 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
-DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
+DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0;
EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus)
@@ -293,13 +294,15 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1;
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
@@ -506,6 +509,10 @@ core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+
+/* Used to enable the SMT control */
+static unsigned int max_smt_thread_num = 1;
+
/*
* This function returns the logic cpu number of the node.
* There are basically three kinds of return values:
@@ -565,6 +572,8 @@ static int __init parse_core(struct device_node *core, int package_id,
i++;
} while (1);
+ max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i);
+
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
if (!leaf) {
@@ -677,6 +686,17 @@ static int __init parse_socket(struct device_node *socket)
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
+ /*
+ * Reset the max_smt_thread_num to 1 on failure. Since on failure
+ * we need to notify the framework the SMT is not supported, but
+ * max_smt_thread_num can be initialized to the SMT thread number
+ * of the cores which are successfully parsed.
+ */
+ if (ret)
+ max_smt_thread_num = 1;
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+
return ret;
}
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 8cf04a557bdb..0042e4774b0c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -137,6 +137,7 @@ int hypervisor_init(void);
static inline int hypervisor_init(void) { return 0; }
#endif
int platform_bus_init(void);
+int faux_bus_init(void);
void cpu_dev_init(void);
void container_dev_init(void);
#ifdef CONFIG_AUXILIARY_BUS
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 657c93c38b0d..6b9e65a42cd2 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -354,7 +354,7 @@ static struct device *next_device(struct klist_iter *i)
* count in the supplied callback.
*/
int bus_for_each_dev(const struct bus_type *bus, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -402,9 +402,12 @@ struct device *bus_find_device(const struct bus_type *bus,
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
subsys_put(sp);
return dev;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 582b5a02a5c4..2526c57d924e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -323,8 +323,12 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
struct subsys_private *sp = class_to_subsys(class);
struct klist_node *start_knode = NULL;
- if (!sp)
+ memset(iter, 0, sizeof(*iter));
+ if (!sp) {
+ pr_crit("%s: class %p was not registered yet\n",
+ __func__, class);
return;
+ }
if (start)
start_knode = &start->p->knode_class;
@@ -351,6 +355,9 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
struct klist_node *knode;
struct device *dev;
+ if (!iter->sp)
+ return NULL;
+
while (1) {
knode = klist_next(&iter->ki);
if (!knode)
@@ -395,7 +402,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit);
* code. There's no locking restriction.
*/
int class_for_each_device(const struct class *class, const struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
@@ -594,30 +601,10 @@ EXPORT_SYMBOL_GPL(class_compat_unregister);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link should be created
*/
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+int class_compat_create_link(struct class_compat *cls, struct device *dev)
{
- int error;
-
- error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
- if (error)
- return error;
-
- /*
- * Optionally add a "device" link (typically to the parent), as a
- * class device would have one and we want to provide as much
- * backwards compatibility as possible.
- */
- if (device_link) {
- error = sysfs_create_link(&dev->kobj, &device_link->kobj,
- "device");
- if (error)
- sysfs_remove_link(cls->kobj, dev_name(dev));
- }
-
- return error;
+ return sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_create_link);
@@ -626,14 +613,9 @@ EXPORT_SYMBOL_GPL(class_compat_create_link);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link was previously
- * created
*/
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+void class_compat_remove_link(struct class_compat *cls, struct device *dev)
{
- if (device_link)
- sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(cls->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_remove_link);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 94865c9d8adc..2fde698430df 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
+ put_device(con_dev);
put_device(par_dev);
return ret;
}
@@ -3980,7 +3981,7 @@ const char *device_get_devnode(const struct device *dev,
* other than 0, we break out and return that value.
*/
int device_for_each_child(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
@@ -4010,7 +4011,7 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
* other than 0, we break out and return that value.
*/
int device_for_each_child_reverse(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
@@ -4043,14 +4044,14 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
* device_for_each_child_reverse_from();
*/
int device_for_each_child_reverse_from(struct device *parent,
- struct device *from, const void *data,
- int (*fn)(struct device *, const void *))
+ struct device *from, void *data,
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init_node(&parent->p->klist_children, &i,
@@ -4079,8 +4080,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
-struct device *device_find_child(struct device *parent, void *data,
- int (*match)(struct device *dev, void *data))
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match)
{
struct klist_iter i;
struct device *child;
@@ -4089,62 +4090,17 @@ struct device *device_find_child(struct device *parent, void *data,
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (match(child, data) && get_device(child))
+ while ((child = next_device(&i))) {
+ if (match(child, data)) {
+ get_device(child);
break;
+ }
+ }
klist_iter_exit(&i);
return child;
}
EXPORT_SYMBOL_GPL(device_find_child);
-/**
- * device_find_child_by_name - device iterator for locating a child device.
- * @parent: parent struct device
- * @name: name of the child device
- *
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a device that has the name @name.
- *
- * NOTE: you will need to drop the reference with put_device() after use.
- */
-struct device *device_find_child_by_name(struct device *parent,
- const char *name)
-{
- struct klist_iter i;
- struct device *child;
-
- if (!parent)
- return NULL;
-
- klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (sysfs_streq(dev_name(child), name) && get_device(child))
- break;
- klist_iter_exit(&i);
- return child;
-}
-EXPORT_SYMBOL_GPL(device_find_child_by_name);
-
-static int match_any(struct device *dev, void *unused)
-{
- return 1;
-}
-
-/**
- * device_find_any_child - device iterator for locating a child device, if any.
- * @parent: parent struct device
- *
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a child device, if any.
- *
- * NOTE: you will need to drop the reference with put_device() after use.
- */
-struct device *device_find_any_child(struct device *parent)
-{
- return device_find_child(parent, NULL, match_any);
-}
-EXPORT_SYMBOL_GPL(device_find_any_child);
-
int __init devices_init(void)
{
devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
@@ -5244,15 +5200,21 @@ int device_match_name(struct device *dev, const void *name)
}
EXPORT_SYMBOL_GPL(device_match_name);
+int device_match_type(struct device *dev, const void *type)
+{
+ return dev->type == type;
+}
+EXPORT_SYMBOL_GPL(device_match_type);
+
int device_match_of_node(struct device *dev, const void *np)
{
- return dev->of_node == np;
+ return np && dev->of_node == np;
}
EXPORT_SYMBOL_GPL(device_match_of_node);
int device_match_fwnode(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode;
+ return fwnode && dev_fwnode(dev) == fwnode;
}
EXPORT_SYMBOL_GPL(device_match_fwnode);
@@ -5264,13 +5226,13 @@ EXPORT_SYMBOL_GPL(device_match_devt);
int device_match_acpi_dev(struct device *dev, const void *adev)
{
- return ACPI_COMPANION(dev) == adev;
+ return adev && ACPI_COMPANION(dev) == adev;
}
EXPORT_SYMBOL(device_match_acpi_dev);
int device_match_acpi_handle(struct device *dev, const void *handle)
{
- return ACPI_HANDLE(dev) == handle;
+ return handle && ACPI_HANDLE(dev) == handle;
}
EXPORT_SYMBOL(device_match_acpi_handle);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index fdaa24bb641a..a7e511849875 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -599,6 +599,7 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+CPU_SHOW_VULN_FALLBACK(ghostwrite);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -614,6 +615,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -630,6 +632,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
+ &dev_attr_ghostwrite.attr,
NULL
};
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index c795edad1b96..64840e5d5fcc 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -106,7 +106,7 @@ static void devcd_del(struct work_struct *wk)
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -116,7 +116,7 @@ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
}
static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -132,19 +132,15 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute devcd_attr_data = {
- .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
- .size = 0,
- .read = devcd_data_read,
- .write = devcd_data_write,
-};
+static const struct bin_attribute devcd_attr_data =
+ __BIN_ATTR(data, 0600, devcd_data_read, devcd_data_write, 0);
-static struct bin_attribute *devcd_dev_bin_attrs[] = {
+static const struct bin_attribute *const devcd_dev_bin_attrs[] = {
&devcd_attr_data, NULL,
};
static const struct attribute_group devcd_dev_group = {
- .bin_attrs = devcd_dev_bin_attrs,
+ .bin_attrs_new = devcd_dev_bin_attrs,
};
static const struct attribute_group *devcd_dev_groups[] = {
@@ -186,9 +182,9 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* mutex_lock(&devcd->mutex);
*
*
- * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * In the above diagram, it looks like disabled_store() would be racing with parallelly
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * is called after kfree of devcd memory after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -285,6 +281,8 @@ static void devcd_free_sgtable(void *data)
* @offset: start copy from @offset@ bytes from the head of the data
* in the given scatterlist
* @data_len: the length of the data in the sg_table
+ *
+ * Returns: the number of bytes copied
*/
static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
size_t buf_len, void *data,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 2152eec0c135..93e7779ef21e 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -750,25 +750,38 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co
EXPORT_SYMBOL_GPL(__devm_add_action);
/**
- * devm_remove_action() - removes previously added custom action
+ * devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Removes instance of @action previously added by devm_add_action().
* Both action and data should match one of the existing entries.
+ *
+ * In contrast to devm_remove_action(), this function does not WARN() if no
+ * entry could have been found.
+ *
+ * This should only be used if the action is contained in an object with
+ * independent lifetime management, e.g. the Devres rust abstraction.
+ *
+ * Causing the warning from regular driver code most likely indicates an abuse
+ * of the devres API.
+ *
+ * Returns: 0 on success, -ENOENT if no entry could have been found.
*/
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+int devm_remove_action_nowarn(struct device *dev,
+ void (*action)(void *),
+ void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
- WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
- &devres));
+ return devres_destroy(dev, devm_action_release, devm_action_match,
+ &devres);
}
-EXPORT_SYMBOL_GPL(devm_remove_action);
+EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
/**
* devm_release_action() - release previously added custom action
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b848764ef018..6dd1a8860f1c 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -63,22 +63,6 @@ __setup("devtmpfs.mount=", mount_param);
static struct vfsmount *mnt;
-static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct super_block *s = mnt->mnt_sb;
- int err;
-
- atomic_inc(&s->s_active);
- down_write(&s->s_umount);
- err = reconfigure_single(s, flags, data);
- if (err < 0) {
- deactivate_locked_super(s);
- return ERR_PTR(err);
- }
- return dget(s->s_root);
-}
-
static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
@@ -89,9 +73,40 @@ static struct file_system_type internal_fs_type = {
.kill_sb = kill_litter_super,
};
+/* Simply take a ref on the existing mount */
+static int devtmpfs_get_tree(struct fs_context *fc)
+{
+ struct super_block *sb = mnt->mnt_sb;
+
+ atomic_inc(&sb->s_active);
+ down_write(&sb->s_umount);
+ fc->root = dget(sb->s_root);
+ return 0;
+}
+
+/* Ops are filled in during init depending on underlying shmem or ramfs type */
+struct fs_context_operations devtmpfs_context_ops = {};
+
+/* Call the underlying initialization and set to our ops */
+static int devtmpfs_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+#ifdef CONFIG_TMPFS
+ ret = shmem_init_fs_context(fc);
+#else
+ ret = ramfs_init_fs_context(fc);
+#endif
+ if (ret < 0)
+ return ret;
+
+ fc->ops = &devtmpfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
- .mount = public_dev_mount,
+ .init_fs_context = devtmpfs_init_fs_context,
};
static int devtmpfs_submit_req(struct req *req, const char *tmp)
@@ -160,18 +175,17 @@ static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
- int err;
dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
- if (!err)
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
+ if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
done_path_create(&path, dentry);
- return err;
+ return PTR_ERR_OR_ZERO(dentry);
}
static int create_path(const char *nodepath)
@@ -245,15 +259,12 @@ static int dev_rmdir(const char *name)
dentry = kern_path_locked(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry);
+ else
+ err = -EPERM;
+
dput(dentry);
inode_unlock(d_inode(parent.dentry));
path_put(&parent);
@@ -310,6 +321,8 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
+ struct kstat stat;
+ struct path p;
int deleted = 0;
int err;
@@ -317,32 +330,28 @@ static int handle_remove(const char *nodename, struct device *dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- struct kstat stat;
- struct path p = {.mnt = parent.mnt, .dentry = dentry};
- err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
- AT_STATX_SYNC_AS_STAT);
- if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = GLOBAL_ROOT_UID;
- newattrs.ia_gid = GLOBAL_ROOT_GID;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- inode_lock(d_inode(dentry));
- notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
- inode_unlock(d_inode(dentry));
- err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry, NULL);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
+ p.mnt = parent.mnt;
+ p.dentry = dentry;
+ err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
+ AT_STATX_SYNC_AS_STAT);
+ if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ inode_lock(d_inode(dentry));
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
+ inode_unlock(d_inode(dentry));
+ err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry, NULL);
+ if (!err || err == -ENOENT)
+ deleted = 1;
}
dput(dentry);
inode_unlock(d_inode(parent.dentry));
@@ -443,6 +452,31 @@ static int __ref devtmpfsd(void *p)
}
/*
+ * Get the underlying (shmem/ramfs) context ops to build ours
+ */
+static int devtmpfs_configure_context(void)
+{
+ struct fs_context *fc;
+
+ fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
+ MS_RMT_MASK);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ /* Set up devtmpfs_context_ops based on underlying type */
+ devtmpfs_context_ops.free = fc->ops->free;
+ devtmpfs_context_ops.dup = fc->ops->dup;
+ devtmpfs_context_ops.parse_param = fc->ops->parse_param;
+ devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
+ devtmpfs_context_ops.get_tree = &devtmpfs_get_tree;
+ devtmpfs_context_ops.reconfigure = fc->ops->reconfigure;
+
+ put_fs_context(fc);
+
+ return 0;
+}
+
+/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
@@ -456,6 +490,13 @@ int __init devtmpfs_init(void)
pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
+
+ err = devtmpfs_configure_context();
+ if (err) {
+ pr_err("unable to configure devtmpfs type %d\n", err);
+ return err;
+ }
+
err = register_filesystem(&dev_fs_type);
if (err) {
pr_err("unable to register devtmpfs type %d\n", err);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index b4eb5b89c4ee..8ab010ddf709 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_set_override);
* Iterate over the @drv's list of devices calling @fn for each one.
*/
int driver_for_each_device(struct device_driver *drv, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct klist_iter i;
struct device *dev;
@@ -160,9 +160,12 @@ struct device *driver_find_device(const struct device_driver *drv,
klist_iter_init_node(&drv->p->klist_devices, &i,
(start ? &start->p->knode_driver : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
return dev;
}
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
new file mode 100644
index 000000000000..531e9d789ee0
--- /dev/null
+++ b/drivers/base/faux.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2025 The Linux Foundation
+ *
+ * A "simple" faux bus that allows devices to be created and added
+ * automatically to it. This is to be used whenever you need to create a
+ * device that is not associated with any "real" system resources, and do
+ * not want to have to deal with a bus/driver binding logic. It is
+ * intended to be very simple, with only a create and a destroy function
+ * available.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/container_of.h>
+#include <linux/device/faux.h>
+#include "base.h"
+
+/*
+ * Internal wrapper structure so we can hold a pointer to the
+ * faux_device_ops for this device.
+ */
+struct faux_object {
+ struct faux_device faux_dev;
+ const struct faux_device_ops *faux_ops;
+};
+#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev)
+
+static struct device faux_bus_root = {
+ .init_name = "faux",
+};
+
+static int faux_match(struct device *dev, const struct device_driver *drv)
+{
+ /* Match always succeeds, we only have one driver */
+ return 1;
+}
+
+static int faux_probe(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+ int ret = 0;
+
+ if (faux_ops && faux_ops->probe)
+ ret = faux_ops->probe(faux_dev);
+
+ return ret;
+}
+
+static void faux_remove(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+
+ if (faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
+}
+
+static const struct bus_type faux_bus_type = {
+ .name = "faux",
+ .match = faux_match,
+ .probe = faux_probe,
+ .remove = faux_remove,
+};
+
+static struct device_driver faux_driver = {
+ .name = "faux_driver",
+ .bus = &faux_bus_type,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+};
+
+static void faux_device_release(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+
+ kfree(faux_obj);
+}
+
+/**
+ * faux_device_create_with_groups - Create and register with the driver
+ * core a faux device and populate the device with an initial
+ * set of sysfs attributes.
+ * @name: The name of the device we are adding, must be unique for
+ * all faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ * @groups: The set of sysfs attributes that will be created for this
+ * device when it is registered with the driver core.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create_with_groups(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops,
+ const struct attribute_group **groups)
+{
+ struct faux_object *faux_obj;
+ struct faux_device *faux_dev;
+ struct device *dev;
+ int ret;
+
+ faux_obj = kzalloc(sizeof(*faux_obj), GFP_KERNEL);
+ if (!faux_obj)
+ return NULL;
+
+ /* Save off the callbacks so we can use them in the future */
+ faux_obj->faux_ops = faux_ops;
+
+ /* Initialize the device portion and register it with the driver core */
+ faux_dev = &faux_obj->faux_dev;
+ dev = &faux_dev->dev;
+
+ device_initialize(dev);
+ dev->release = faux_device_release;
+ if (parent)
+ dev->parent = parent;
+ else
+ dev->parent = &faux_bus_root;
+ dev->bus = &faux_bus_type;
+ dev->groups = groups;
+ dev_set_name(dev, "%s", name);
+
+ ret = device_add(dev);
+ if (ret) {
+ pr_err("%s: device_add for faux device '%s' failed with %d\n",
+ __func__, name, ret);
+ put_device(dev);
+ return NULL;
+ }
+
+ return faux_dev;
+}
+EXPORT_SYMBOL_GPL(faux_device_create_with_groups);
+
+/**
+ * faux_device_create - create and register with the driver core a faux device
+ * @name: The name of the device we are adding, must be unique for all
+ * faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops)
+{
+ return faux_device_create_with_groups(name, parent, faux_ops, NULL);
+}
+EXPORT_SYMBOL_GPL(faux_device_create);
+
+/**
+ * faux_device_destroy - destroy a faux device
+ * @faux_dev: faux device to destroy
+ *
+ * Unregisters and cleans up a device that was created with a call to
+ * faux_device_create()
+ */
+void faux_device_destroy(struct faux_device *faux_dev)
+{
+ struct device *dev = &faux_dev->dev;
+
+ if (!faux_dev)
+ return;
+
+ device_del(dev);
+
+ /* The final put_device() will clean up the memory we allocated for this device. */
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(faux_device_destroy);
+
+int __init faux_bus_init(void)
+{
+ int ret;
+
+ ret = device_register(&faux_bus_root);
+ if (ret) {
+ put_device(&faux_bus_root);
+ return ret;
+ }
+
+ ret = bus_register(&faux_bus_type);
+ if (ret)
+ goto error_bus;
+
+ ret = driver_register(&faux_driver);
+ if (ret)
+ goto error_driver;
+
+ return ret;
+
+error_driver:
+ bus_unregister(&faux_bus_type);
+
+error_bus:
+ device_unregister(&faux_bus_root);
+ return ret;
+}
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index ddb70e29eb42..c8afc501a8a4 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -25,7 +25,7 @@ struct firmware_fallback_config fw_fallback_config = {
EXPORT_SYMBOL_NS_GPL(fw_fallback_config, "FIRMWARE_LOADER_PRIVATE");
#ifdef CONFIG_SYSCTL
-static struct ctl_table firmware_config_table[] = {
+static const struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index c9c93b47d9a5..d254ceb56d84 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -259,7 +259,7 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
}
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -316,7 +316,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
* the driver as a firmware image.
**/
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -356,11 +356,11 @@ out:
return retval;
}
-static struct bin_attribute firmware_attr_data = {
+static const struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
- .read = firmware_data_read,
- .write = firmware_data_write,
+ .read_new = firmware_data_read,
+ .write_new = firmware_data_write,
};
static struct attribute *fw_dev_attrs[] = {
@@ -374,14 +374,14 @@ static struct attribute *fw_dev_attrs[] = {
NULL
};
-static struct bin_attribute *fw_dev_bin_attrs[] = {
+static const struct bin_attribute *const fw_dev_bin_attrs[] = {
&firmware_attr_data,
NULL
};
static const struct attribute_group fw_dev_attr_group = {
.attrs = fw_dev_attrs,
- .bin_attrs = fw_dev_bin_attrs,
+ .bin_attrs_new = fw_dev_bin_attrs,
#ifdef CONFIG_FW_UPLOAD
.is_visible = fw_upload_is_visible,
#endif
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c4954835128c..9d2b06d65dfc 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -32,6 +32,7 @@ void __init driver_init(void)
/* These are also core pieces, but must come after the
* core core pieces.
*/
+ faux_bus_init();
of_core_init();
platform_bus_init();
auxiliary_bus_init();
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 67858eeb92ed..348c5dbbfa68 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -512,7 +512,7 @@ static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
- online_type_to_str[mhp_default_online_type]);
+ online_type_to_str[mhp_get_default_online_type()]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
@@ -524,7 +524,7 @@ static ssize_t auto_online_blocks_store(struct device *dev,
if (online_type < 0)
return -EINVAL;
- mhp_default_online_type = online_type;
+ mhp_set_default_online_type(online_type);
return count;
}
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
index 951819e71b4a..5db06e825c94 100644
--- a/drivers/base/physical_location.c
+++ b/drivers/base/physical_location.c
@@ -13,13 +13,11 @@
bool dev_add_physical_location(struct device *dev)
{
struct acpi_pld_info *pld;
- acpi_status status;
if (!has_acpi_companion(dev))
return false;
- status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld))
return false;
dev->physical_location =
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 4a67e83300e1..40e1d8d8a589 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -496,6 +496,7 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
+ bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
@@ -512,11 +513,23 @@ struct dpm_watchdog {
static void dpm_watchdog_handler(struct timer_list *t)
{
struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct timer_list *timer = &wd->timer;
+ unsigned int time_left;
+
+ if (wd->fatal) {
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+ }
+
+ time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
+ dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
+ CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
+ show_stack(wd->tsk, NULL, KERN_WARNING);
- dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL, KERN_EMERG);
- panic("%s %s: unrecoverable failure\n",
- dev_driver_string(wd->dev), dev_name(wd->dev));
+ wd->fatal = true;
+ mod_timer(timer, jiffies + HZ * time_left);
}
/**
@@ -530,10 +543,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
+ wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
- timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@@ -642,13 +656,15 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
- * to avoid confusing drivers that don't use it.
+ * status to "active" unless its power.set_active flag is clear, in
+ * which case it is not necessary to update its PM-runtime status.
*/
- if (skip_resume)
+ if (skip_resume) {
pm_runtime_set_suspended(dev);
- else if (dev_pm_skip_suspend(dev))
+ } else if (dev->power.set_active) {
pm_runtime_set_active(dev);
+ dev->power.set_active = false;
+ }
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -914,7 +930,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
if (dev->power.direct_complete) {
- /* Match the pm_runtime_disable() in __device_suspend(). */
+ /* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
@@ -1264,8 +1280,14 @@ Skip:
dev->power.may_skip_resume))
dev->power.must_resume = true;
- if (dev->power.must_resume)
+ if (dev->power.must_resume) {
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
+ dev->power.set_active = true;
+ if (dev->parent && !dev->parent->power.ignore_children)
+ dev->parent->power.set_active = true;
+ }
dpm_superior_set_must_resume(dev);
+ }
Complete:
complete_all(&dev->power.completion);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2ee45841486b..425c43b2d478 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1764,8 +1764,8 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- dev->power.suspend_timer.function = pm_suspend_timer_fn;
+ hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index f8163b559bf9..f84018125b46 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 5a5a9e978e85..8aa28c08b289 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+static void devm_pm_clear_wake_irq(void *dev)
+{
+ dev_pm_clear_wake_irq(dev);
+}
+
+/**
+ * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ *
+ * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
+ * but the device will be auto clear wake capability on driver detach.
+ */
+int devm_pm_set_wake_irq(struct device *dev, int irq)
+{
+ int ret;
+
+ ret = dev_pm_set_wake_irq(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
+
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 837d77e3af2b..c1392743df9c 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -71,6 +71,44 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_property_present);
/**
+ * device_property_read_bool - Return the value for a boolean property of a device
+ * @dev: Device whose property is being checked
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the device firmware description.
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
+ */
+bool device_property_read_bool(const struct device *dev, const char *propname)
+{
+ return fwnode_property_read_bool(dev_fwnode(dev), propname);
+}
+EXPORT_SYMBOL_GPL(device_property_read_bool);
+
+/**
+ * fwnode_property_read_bool - Return the value for a boolean property of a firmware node
+ * @fwnode: Firmware node whose property to check
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the firmware description.
+ */
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ bool ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
+ ret = fwnode_call_bool_op(fwnode, property_read_bool, propname);
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_read_bool, propname);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_bool);
+
+/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 23da7b31d715..2319c30283a6 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -73,8 +73,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
rcu_read_unlock();
- entry = kmalloc((last - index + 1) * sizeof(unsigned long),
- map->alloc_flags);
+ entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -204,7 +203,7 @@ static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
* overheads.
*/
if (max - min > 1 && regmap_can_raw_write(map)) {
- buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
+ buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
if (!buf) {
ret = -ENOMEM;
goto out;
@@ -320,7 +319,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
unsigned long *entry;
int i, ret;
- entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
+ entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 188438186589..a9d17f316e55 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -275,18 +275,16 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
pos = (reg - base_reg) / map->reg_stride;
offset = (rbnode->base_reg - base_reg) / map->reg_stride;
- blk = krealloc(rbnode->block,
- blklen * map->cache_word_size,
- map->alloc_flags);
+ blk = krealloc_array(rbnode->block, blklen, map->cache_word_size, map->alloc_flags);
if (!blk)
return -ENOMEM;
rbnode->block = blk;
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present),
- map->alloc_flags);
+ present = krealloc_array(rbnode->cache_present,
+ BITS_TO_LONGS(blklen), sizeof(*present),
+ map->alloc_flags);
if (!present)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d3659ba3cc11..b1f8508c3966 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -154,7 +154,7 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->num_reg_defaults = config->num_reg_defaults;
map->num_reg_defaults_raw = config->num_reg_defaults_raw;
map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_word_size = BITS_TO_BYTES(config->val_bits);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
map->cache = NULL;
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 0bcd81389a29..978613407ea3 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -906,6 +906,7 @@ err_alloc:
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
@@ -981,6 +982,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
if (d->config_buf) {
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index c99eada83780..86644bbd0710 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -1,45 +1,187 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2020 Intel Corporation.
+#include <linux/bits.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
+#include <sound/sdca_function.h>
#include "internal.h"
+struct regmap_mbq_context {
+ struct device *dev;
+
+ struct regmap_sdw_mbq_cfg cfg;
+
+ int val_size;
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+};
+
+static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ int size = ctx->val_size;
+
+ if (ctx->cfg.mbq_size) {
+ size = ctx->cfg.mbq_size(ctx->dev, reg);
+ if (!size || size > ctx->val_size)
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ if (ctx->cfg.deferrable)
+ return ctx->cfg.deferrable(ctx->dev, reg);
+
+ return false;
+}
+
+static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
+ struct regmap_mbq_context *ctx)
+{
+ struct device *dev = &slave->dev;
+ int val, ret = 0;
+
+ dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
+
+ reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
+ SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
+
+ if (ctx->readable_reg(dev, reg)) {
+ ret = read_poll_timeout(sdw_read_no_pm, val,
+ val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
+ ctx->cfg.timeout_us, ctx->cfg.retry_us,
+ false, slave, reg);
+ if (val < 0)
+ return val;
+ if (ret)
+ dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
+ } else {
+ fsleep(ctx->cfg.timeout_us);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int val,
+ int mbq_size, bool deferrable)
+{
+ int shift = mbq_size * BITS_PER_BYTE;
+ int ret;
+
+ while (--mbq_size > 0) {
+ shift -= BITS_PER_BYTE;
+
+ ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
+ (val >> shift) & 0xff);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = sdw_write_no_pm(slave, reg, val & 0xff);
+ if (deferrable && ret == -ENODATA)
+ return -EAGAIN;
+
+ return ret;
+}
+
static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
- ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
- if (ret < 0)
- return ret;
+ if (mbq_size < 0)
+ return mbq_size;
+
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int *val,
+ int mbq_size, bool deferrable)
+{
+ int shift = BITS_PER_BYTE;
+ int read;
+
+ read = sdw_read_no_pm(slave, reg);
+ if (read < 0) {
+ if (deferrable && read == -ENODATA)
+ return -EAGAIN;
+
+ return read;
+ }
+
+ *val = read;
+
+ while (--mbq_size > 0) {
+ read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
+ if (read < 0)
+ return read;
+
+ *val |= read << shift;
+ shift += BITS_PER_BYTE;
+ }
- return sdw_write_no_pm(slave, reg, val & 0xff);
+ return 0;
}
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- int read0;
- int read1;
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
+ int ret;
- read0 = sdw_read_no_pm(slave, reg);
- if (read0 < 0)
- return read0;
+ if (mbq_size < 0)
+ return mbq_size;
- read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
- if (read1 < 0)
- return read1;
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
- *val = (read1 << 8) | read0;
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
+ }
- return 0;
+ return ret;
}
static const struct regmap_bus regmap_sdw_mbq = {
@@ -51,8 +193,7 @@ static const struct regmap_bus regmap_sdw_mbq = {
static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
{
- /* MBQ-based controls are only 16-bits for now */
- if (config->val_bits != 16)
+ if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
return -ENOTSUPP;
/* Registers are 32 bits wide */
@@ -65,35 +206,69 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
return 0;
}
+static struct regmap_mbq_context *
+regmap_sdw_mbq_gen_context(struct device *dev,
+ const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config)
+{
+ struct regmap_mbq_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+
+ if (mbq_config)
+ ctx->cfg = *mbq_config;
+
+ ctx->val_size = config->val_bits / BITS_PER_BYTE;
+ ctx->readable_reg = config->readable_reg;
+
+ return ctx;
+}
+
struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 5962ea1230a1..f2843f814675 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -769,14 +769,13 @@ struct regmap *__regmap_init(struct device *dev,
map->alloc_flags = GFP_KERNEL;
map->reg_base = config->reg_base;
+ map->reg_shift = config->pad_bits % 8;
- map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.reg_shift = config->reg_shift;
- map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
- config->val_bits + config->pad_bits, 8);
- map->reg_shift = config->pad_bits % 8;
+ map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
+ map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
+ map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
if (config->reg_stride)
map->reg_stride = config->reg_stride;
else
@@ -3116,7 +3115,7 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
EXPORT_SYMBOL_GPL(regmap_fields_read);
static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
- unsigned int *regs, void *val, size_t val_count)
+ const unsigned int *regs, void *val, size_t val_count)
{
u32 *u32 = val;
u16 *u16 = val;
@@ -3210,7 +3209,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read);
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val,
+int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
size_t val_count)
{
if (val_count == 0)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index eb6eb25b343b..b1726a3515f6 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -677,6 +677,7 @@ static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
.property_present = software_node_property_present,
+ .property_read_bool = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
.get_name = software_node_get_name,
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 5c7fac80611c..2756870615cc 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -12,6 +12,7 @@ config TEST_ASYNC_DRIVER_PROBE
config DM_KUNIT_TEST
tristate "KUnit Tests for the device model" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
config DRIVER_PE_KUNIT_TEST
tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
diff --git a/drivers/base/test/platform-device-test.c b/drivers/base/test/platform-device-test.c
index ea05b8785743..6355a2231b74 100644
--- a/drivers/base/test/platform-device-test.c
+++ b/drivers/base/test/platform-device-test.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <kunit/platform_device.h>
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#define DEVICE_NAME "test"
@@ -217,7 +220,43 @@ static struct kunit_suite platform_device_devm_test_suite = {
.test_cases = platform_device_devm_tests,
};
-kunit_test_suite(platform_device_devm_test_suite);
+static void platform_device_find_by_null_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = kunit_platform_device_alloc(test, DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = kunit_platform_device_add(test, pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, of_find_device_by_node(NULL), NULL);
+
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_of_node(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_fwnode(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_acpi_dev(&platform_bus_type, NULL), NULL);
+
+ KUNIT_EXPECT_FALSE(test, device_match_of_node(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_fwnode(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_dev(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_handle(&pdev->dev, NULL));
+}
+
+static struct kunit_case platform_device_match_tests[] = {
+ KUNIT_CASE(platform_device_find_by_null_test),
+ {}
+};
+
+static struct kunit_suite platform_device_match_test_suite = {
+ .name = "platform-device-match",
+ .test_cases = platform_device_match_tests,
+};
+
+kunit_test_suites(
+ &platform_device_devm_test_suite,
+ &platform_device_match_test_suite,
+);
MODULE_DESCRIPTION("Test module for platform devices");
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index cf160dd2c27b..b962da263eee 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -27,9 +27,17 @@ static ssize_t name##_read(struct file *file, struct kobject *kobj, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
\
- return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_bitmask_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
+ \
+ return n; \
} \
\
static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
@@ -37,9 +45,17 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
+ \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_list_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
\
- return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ return n; \
}
define_id_show_func(physical_package_id, "%d");
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 49ced65bef4c..9edd4468f755 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1819,7 +1819,6 @@ static int fd_alloc_drive(int drive)
unit[drive].tag_set.nr_maps = 1;
unit[drive].tag_set.queue_depth = 2;
unit[drive].tag_set.numa_node = NUMA_NO_NODE;
- unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
goto out_cleanup_trackbuf;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2028795ec61c..00b74a845328 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -368,7 +368,6 @@ aoeblk_gdalloc(void *vp)
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(set);
if (err) {
pr_err("aoe: cannot allocate tag set for %ld.%d\n",
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 3523dd82d7a0..4db7f6ce8ade 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -226,10 +226,11 @@ aoedev_downdev(struct aoedev *d)
/* fast fail all pending I/O */
if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */
- blk_mq_freeze_queue(d->blkq);
+ unsigned int memflags = blk_mq_freeze_queue(d->blkq);
+
blk_mq_quiesce_queue(d->blkq);
blk_mq_unquiesce_queue(d->blkq);
- blk_mq_unfreeze_queue(d->blkq);
+ blk_mq_unfreeze_queue(d->blkq, memflags);
}
if (d->gd)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4ba98c6654be..a81ade622a01 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -746,6 +746,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
unsigned char *p;
int sect, nsect;
unsigned long flags;
+ unsigned int memflags;
int ret;
if (type) {
@@ -758,7 +759,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
}
q = unit[drive].disk[type]->queue;
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
local_irq_save(flags);
@@ -817,7 +818,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
return ret;
}
@@ -2088,7 +2089,6 @@ static int __init atari_floppy_init (void)
unit[i].tag_set.nr_maps = 1;
unit[i].tag_set.queue_depth = 2;
unit[i].tag_set.numa_node = NUMA_NO_NODE;
- unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
if (ret)
goto err;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3affb538b989..abf0486f0d4f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4596,7 +4596,6 @@ static int __init do_floppy_init(void)
tag_sets[drive].nr_maps = 1;
tag_sets[drive].queue_depth = 2;
tag_sets[drive].numa_node = NUMA_NO_NODE;
- tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(&tag_sets[drive]);
if (err)
goto out_put_disk;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 8f6761c27c68..c05fe27a96b6 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,7 +68,6 @@ struct loop_device {
struct list_head idle_worker_list;
struct rb_root worker_tree;
struct timer_list timer;
- bool use_dio;
bool sysfs_inited;
struct request_queue *lo_queue;
@@ -182,41 +181,44 @@ static bool lo_bdev_can_use_dio(struct loop_device *lo,
return true;
}
-static void __loop_update_dio(struct loop_device *lo, bool dio)
+static bool lo_can_use_dio(struct loop_device *lo)
{
- struct file *file = lo->lo_backing_file;
- struct inode *inode = file->f_mapping->host;
- struct block_device *backing_bdev = NULL;
- bool use_dio;
+ struct inode *inode = lo->lo_backing_file->f_mapping->host;
- if (S_ISBLK(inode->i_mode))
- backing_bdev = I_BDEV(inode);
- else if (inode->i_sb->s_bdev)
- backing_bdev = inode->i_sb->s_bdev;
+ if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
+ return false;
- use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
- (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
+ if (S_ISBLK(inode->i_mode))
+ return lo_bdev_can_use_dio(lo, I_BDEV(inode));
+ if (inode->i_sb->s_bdev)
+ return lo_bdev_can_use_dio(lo, inode->i_sb->s_bdev);
+ return true;
+}
- if (lo->use_dio == use_dio)
- return;
+/*
+ * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by
+ * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently
+ * disabled when the device block size is too small or the offset is unaligned.
+ *
+ * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and
+ * not the originally passed in one.
+ */
+static inline void loop_update_dio(struct loop_device *lo)
+{
+ bool dio_in_use = lo->lo_flags & LO_FLAGS_DIRECT_IO;
- /* flush dirty pages before changing direct IO */
- vfs_fsync(file, 0);
+ lockdep_assert_held(&lo->lo_mutex);
+ WARN_ON_ONCE(lo->lo_state == Lo_bound &&
+ lo->lo_queue->mq_freeze_depth == 0);
- /*
- * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
- * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
- * will get updated by ioctl(LOOP_GET_STATUS)
- */
- if (lo->lo_state == Lo_bound)
- blk_mq_freeze_queue(lo->lo_queue);
- lo->use_dio = use_dio;
- if (use_dio)
+ if (lo->lo_backing_file->f_flags & O_DIRECT)
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- else
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
- if (lo->lo_state == Lo_bound)
- blk_mq_unfreeze_queue(lo->lo_queue);
+
+ /* flush dirty pages before starting to issue direct I/O */
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !dio_in_use)
+ vfs_fsync(lo->lo_backing_file, 0);
}
/**
@@ -311,6 +313,13 @@ static void loop_clear_limits(struct loop_device *lo, int mode)
lim.discard_granularity = 0;
}
+ /*
+ * XXX: this updates the queue limits without freezing the queue, which
+ * is against the locking protocol and dangerous. But we can't just
+ * freeze the queue as we're inside the ->queue_rq method here. So this
+ * should move out into a workqueue unless we get the file operations to
+ * advertise if they support specific fallocate operations.
+ */
queue_limits_commit_update(lo->lo_queue, &lim);
}
@@ -520,12 +529,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
}
}
-static inline void loop_update_dio(struct loop_device *lo)
-{
- __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
- lo->use_dio);
-}
-
static void loop_reread_partitions(struct loop_device *lo)
{
int rc;
@@ -583,6 +586,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
{
struct file *file = fget(arg);
struct file *old_file;
+ unsigned int memflags;
int error;
bool partscan;
bool is_loop;
@@ -620,14 +624,14 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
/* and ... switch */
disk_force_media_change(lo->lo_disk);
- blk_mq_freeze_queue(lo->lo_queue);
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
mapping_set_gfp_mask(file->f_mapping,
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
loop_global_unlock(lo, is_loop);
@@ -964,7 +968,6 @@ loop_set_status_from_info(struct loop_device *lo,
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_flags = info->lo_flags;
return 0;
}
@@ -977,12 +980,12 @@ static unsigned int loop_default_blocksize(struct loop_device *lo,
return SECTOR_SIZE;
}
-static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
+static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
+ unsigned int bsize)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct block_device *backing_bdev = NULL;
- struct queue_limits lim;
u32 granularity = 0, max_discard_sectors = 0;
if (S_ISBLK(inode->i_mode))
@@ -995,22 +998,20 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
loop_get_discard_config(lo, &granularity, &max_discard_sectors);
- lim = queue_limits_start_update(lo->lo_queue);
- lim.logical_block_size = bsize;
- lim.physical_block_size = bsize;
- lim.io_min = bsize;
- lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
+ lim->logical_block_size = bsize;
+ lim->physical_block_size = bsize;
+ lim->io_min = bsize;
+ lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
- lim.features |= BLK_FEAT_WRITE_CACHE;
+ lim->features |= BLK_FEAT_WRITE_CACHE;
if (backing_bdev && !bdev_nonrot(backing_bdev))
- lim.features |= BLK_FEAT_ROTATIONAL;
- lim.max_hw_discard_sectors = max_discard_sectors;
- lim.max_write_zeroes_sectors = max_discard_sectors;
+ lim->features |= BLK_FEAT_ROTATIONAL;
+ lim->max_hw_discard_sectors = max_discard_sectors;
+ lim->max_write_zeroes_sectors = max_discard_sectors;
if (max_discard_sectors)
- lim.discard_granularity = granularity;
+ lim->discard_granularity = granularity;
else
- lim.discard_granularity = 0;
- return queue_limits_commit_update(lo->lo_queue, &lim);
+ lim->discard_granularity = 0;
}
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
@@ -1019,6 +1020,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
{
struct file *file = fget(config->fd);
struct address_space *mapping;
+ struct queue_limits lim;
int error;
loff_t size;
bool partscan;
@@ -1063,6 +1065,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
error = loop_set_status_from_info(lo, &config->info);
if (error)
goto out_unlock;
+ lo->lo_flags = config->info.lo_flags;
if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
!file->f_op->write_iter)
@@ -1084,13 +1087,15 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- error = loop_reconfigure_limits(lo, config->block_size);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, config->block_size);
+ /* No need to freeze the queue as the device isn't bound yet. */
+ error = queue_limits_commit_update(lo->lo_queue, &lim);
if (error)
goto out_unlock;
@@ -1150,7 +1155,12 @@ static void __loop_clr_fd(struct loop_device *lo)
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- /* reset the block size to the default */
+ /*
+ * Reset the block size to the default.
+ *
+ * No queue freezing needed because this is called from the final
+ * ->release call only, so there can't be any outstanding I/O.
+ */
lim = queue_limits_start_update(lo->lo_queue);
lim.logical_block_size = SECTOR_SIZE;
lim.physical_block_size = SECTOR_SIZE;
@@ -1244,9 +1254,9 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
+ unsigned int memflags;
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
@@ -1263,21 +1273,18 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
invalidate_bdev(lo->lo_device);
}
- /* I/O need to be drained during transfer transition */
- blk_mq_freeze_queue(lo->lo_queue);
-
- prev_lo_flags = lo->lo_flags;
+ /* I/O needs to be drained before changing lo_offset or lo_sizelimit */
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
- /* Mask out flags that can't be set using LOOP_SET_STATUS. */
- lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For those flags, use the previous values instead */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For flags that can't be cleared, use previous values too */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) &&
+ (info->lo_flags & LO_FLAGS_PARTSCAN);
+
+ lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
if (size_changed) {
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
@@ -1285,17 +1292,13 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_set_size(lo, new_size);
}
- /* update dio if lo_offset or transfer is changed */
- __loop_update_dio(lo, lo->use_dio);
+ /* update the direct I/O flag if lo_offset changed */
+ loop_update_dio(lo);
out_unfreeze:
- blk_mq_unfreeze_queue(lo->lo_queue);
-
- if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
- !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+ if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- partscan = true;
- }
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
@@ -1444,20 +1447,34 @@ static int loop_set_capacity(struct loop_device *lo)
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
{
- int error = -ENXIO;
- if (lo->lo_state != Lo_bound)
- goto out;
+ bool use_dio = !!arg;
+ unsigned int memflags;
- __loop_update_dio(lo, !!arg);
- if (lo->use_dio == !!arg)
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+ if (use_dio == !!(lo->lo_flags & LO_FLAGS_DIRECT_IO))
return 0;
- error = -EINVAL;
- out:
- return error;
+
+ if (use_dio) {
+ if (!lo_can_use_dio(lo))
+ return -EINVAL;
+ /* flush dirty pages before starting to use direct I/O */
+ vfs_fsync(lo->lo_backing_file, 0);
+ }
+
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
+ if (use_dio)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ else
+ lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+ return 0;
}
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
+ struct queue_limits lim;
+ unsigned int memflags;
int err = 0;
if (lo->lo_state != Lo_bound)
@@ -1469,10 +1486,13 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
- blk_mq_freeze_queue(lo->lo_queue);
- err = loop_reconfigure_limits(lo, arg);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, arg);
+
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
+ err = queue_limits_commit_update(lo->lo_queue, &lim);
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
return err;
}
@@ -1854,7 +1874,7 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->use_aio = false;
break;
default:
- cmd->use_aio = lo->use_dio;
+ cmd->use_aio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
break;
}
@@ -2023,8 +2043,7 @@ static int loop_add(int i)
lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
- lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
- BLK_MQ_F_NO_SCHED_BY_DEFAULT;
+ lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 43701b7b10a7..95361099a2dc 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3416,7 +3416,6 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->tags.reserved_tags = 1;
dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd->tags.numa_node = dd->numa_node;
- dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b852050d8a96..7bdc7eb808ea 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -62,6 +62,7 @@ struct nbd_sock {
bool dead;
int fallback_index;
int cookie;
+ struct work_struct work;
};
struct recv_thread_args {
@@ -141,6 +142,9 @@ struct nbd_device {
*/
#define NBD_CMD_INFLIGHT 2
+/* Just part of request header or data payload is sent successfully */
+#define NBD_CMD_PARTIAL_SEND 3
+
struct nbd_cmd {
struct nbd_device *nbd;
struct mutex lock;
@@ -327,8 +331,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize)
{
struct queue_limits lim;
int error;
@@ -368,7 +371,7 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim.logical_block_size = blksize;
lim.physical_block_size = blksize;
- error = queue_limits_commit_update(nbd->disk->queue, &lim);
+ error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim);
if (error)
return error;
@@ -379,18 +382,6 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
return 0;
}
-static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
-{
- int error;
-
- blk_mq_freeze_queue(nbd->disk->queue);
- error = __nbd_set_size(nbd, bytesize, blksize);
- blk_mq_unfreeze_queue(nbd->disk->queue);
-
- return error;
-}
-
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -466,6 +457,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
+ /* partial send is handled in nbd_sock's work function */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return BLK_EH_RESET_TIMER;
+ }
+
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
@@ -615,6 +612,30 @@ static inline int was_interrupted(int result)
}
/*
+ * We've already sent header or part of data payload, have no choice but
+ * to set pending and schedule it in work.
+ *
+ * And we have to return BLK_STS_OK to block core, otherwise this same
+ * request may be re-dispatched with different tag, but our header has
+ * been sent out with old tag, and this way does confuse reply handling.
+ */
+static void nbd_sched_pending_work(struct nbd_device *nbd,
+ struct nbd_sock *nsock,
+ struct nbd_cmd *cmd, int sent)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ /* pending work should be scheduled only once */
+ WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags));
+
+ nsock->pending = req;
+ nsock->sent = sent;
+ set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+ refcount_inc(&nbd->config_refs);
+ schedule_work(&nsock->work);
+}
+
+/*
* Returns BLK_STS_RESOURCE if the caller should retry after a delay.
* Returns BLK_STS_IOERR if sending failed.
*/
@@ -699,8 +720,8 @@ static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
* completely done.
*/
if (sent) {
- nsock->pending = req;
- nsock->sent = sent;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
@@ -737,14 +758,8 @@ send_pages:
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result < 0) {
if (was_interrupted(result)) {
- /* We've already sent the header, we
- * have no choice but to set pending and
- * return BUSY.
- */
- nsock->pending = req;
- nsock->sent = sent;
- set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -770,6 +785,14 @@ out:
return BLK_STS_OK;
requeue:
+ /*
+ * Can't requeue in case we are dealing with partial send
+ *
+ * We must run from pending work function.
+ * */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))
+ return BLK_STS_OK;
+
/* retry on a different socket */
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
@@ -778,6 +801,44 @@ requeue:
return BLK_STS_OK;
}
+/* handle partial sending */
+static void nbd_pending_cmd_work(struct work_struct *work)
+{
+ struct nbd_sock *nsock = container_of(work, struct nbd_sock, work);
+ struct request *req = nsock->pending;
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_device *nbd = cmd->nbd;
+ unsigned long deadline = READ_ONCE(req->deadline);
+ unsigned int wait_ms = 2;
+
+ mutex_lock(&cmd->lock);
+
+ WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags));
+ if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)))
+ goto out;
+
+ mutex_lock(&nsock->tx_lock);
+ while (true) {
+ nbd_send_cmd(nbd, cmd, cmd->index);
+ if (!nsock->pending)
+ break;
+
+ /* don't bother timeout handler for partial sending */
+ if (READ_ONCE(jiffies) + msecs_to_jiffies(wait_ms) >= deadline) {
+ cmd->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
+ break;
+ }
+ msleep(wait_ms);
+ wait_ms *= 2;
+ }
+ mutex_unlock(&nsock->tx_lock);
+ clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+out:
+ mutex_unlock(&cmd->lock);
+ nbd_config_put(nbd);
+}
+
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply)
{
@@ -1173,6 +1234,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
struct socket *sock;
struct nbd_sock **socks;
struct nbd_sock *nsock;
+ unsigned int memflags;
int err;
/* Arg will be cast to int, check it to avoid overflow */
@@ -1186,7 +1248,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
* We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array.
*/
- blk_mq_freeze_queue(nbd->disk->queue);
+ memflags = blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
@@ -1224,14 +1286,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->pending = NULL;
nsock->sent = 0;
nsock->cookie = 0;
+ INIT_WORK(&nsock->work, nbd_pending_cmd_work);
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
return 0;
put_socket:
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
sockfd_put(sock);
return err;
}
@@ -1841,8 +1904,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd->tag_set.queue_depth = 128;
nbd->tag_set.numa_node = NUMA_NO_NODE;
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_BLOCKING;
+ nbd->tag_set.flags = BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
nbd->backend = NULL;
@@ -2180,6 +2242,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
+ clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
mutex_unlock(&nbd->config_lock);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 3c3d8d200abb..da1ecbf988b8 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -266,6 +266,10 @@ static bool g_zone_full;
module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false");
+static bool g_rotational;
+module_param_named(rotational, g_rotational, bool, S_IRUGO);
+MODULE_PARM_DESC(rotational, "Set the rotational feature for the device. Default: false");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -468,6 +472,7 @@ NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
NULLB_DEVICE_ATTR(fua, bool, NULL);
+NULLB_DEVICE_ATTR(rotational, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -621,6 +626,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_shared_tags,
&nullb_device_attr_shared_tag_bitmap,
&nullb_device_attr_fua,
+ &nullb_device_attr_rotational,
NULL,
};
@@ -706,7 +712,8 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"shared_tags,size,submit_queues,use_per_node_hctx,"
"virt_boundary,zoned,zone_capacity,zone_max_active,"
"zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors,zone_full\n");
+ "zone_size,zone_append_max_sectors,zone_full,"
+ "rotational\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -793,6 +800,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
dev->fua = g_fua;
+ dev->rotational = g_rotational;
return dev;
}
@@ -899,7 +907,7 @@ static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
if (radix_tree_insert(root, idx, t_page)) {
null_free_page(t_page);
t_page = radix_tree_lookup(root, idx);
- WARN_ON(!t_page || t_page->page->index != idx);
+ WARN_ON(!t_page || t_page->page->private != idx);
} else if (is_cache)
nullb->dev->curr_cache += PAGE_SIZE;
@@ -922,7 +930,7 @@ static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
(void **)t_pages, pos, FREE_BATCH);
for (i = 0; i < nr_pages; i++) {
- pos = t_pages[i]->page->index;
+ pos = t_pages[i]->page->private;
ret = radix_tree_delete_item(root, pos, t_pages[i]);
WARN_ON(ret != t_pages[i]);
null_free_page(ret);
@@ -948,7 +956,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
t_page = radix_tree_lookup(root, idx);
- WARN_ON(t_page && t_page->page->index != idx);
+ WARN_ON(t_page && t_page->page->private != idx);
if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
return t_page;
@@ -991,7 +999,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_lock_irq(&nullb->lock);
idx = sector >> PAGE_SECTORS_SHIFT;
- t_page->page->index = idx;
+ t_page->page->private = idx;
t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
radix_tree_preload_end();
@@ -1011,7 +1019,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
struct nullb_page *t_page, *ret;
void *dst, *src;
- idx = c_page->page->index;
+ idx = c_page->page->private;
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
@@ -1070,7 +1078,7 @@ again:
* avoid race, we don't allow page free
*/
for (i = 0; i < nr_pages; i++) {
- nullb->cache_flush_pos = c_pages[i]->page->index;
+ nullb->cache_flush_pos = c_pages[i]->page->private;
/*
* We found the page which is being flushed to disk by other
* threads
@@ -1418,8 +1426,7 @@ static void nullb_setup_bwtimer(struct nullb *nullb)
{
ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
- hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- nullb->bw_timer.function = nullb_bwtimer_fn;
+ hrtimer_setup(&nullb->bw_timer, nullb_bwtimer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
@@ -1541,8 +1548,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
- blk_mq_end_request_batch))
+ if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
+ blk_mq_end_request_batch))
blk_mq_end_request(req, cmd->error);
nr++;
}
@@ -1596,8 +1603,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
- hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cmd->timer.function = null_cmd_timer_expired;
+ hrtimer_setup(&cmd->timer, null_cmd_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
cmd->error = BLK_STS_OK;
cmd->nq = nq;
@@ -1783,9 +1790,8 @@ static int null_init_global_tag_set(void)
tag_set.nr_hw_queues = g_submit_queues;
tag_set.queue_depth = g_hw_queue_depth;
tag_set.numa_node = g_home_node;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (g_no_sched)
- tag_set.flags |= BLK_MQ_F_NO_SCHED;
+ tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (g_shared_tag_bitmap)
tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (g_blocking)
@@ -1809,9 +1815,8 @@ static int null_setup_tagset(struct nullb *nullb)
nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
nullb->tag_set->numa_node = nullb->dev->home_node;
- nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
if (nullb->dev->no_sched)
- nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
+ nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (nullb->dev->shared_tag_bitmap)
nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (nullb->dev->blocking)
@@ -1938,6 +1943,9 @@ static int null_add_dev(struct nullb_device *dev)
lim.features |= BLK_FEAT_FUA;
}
+ if (dev->rotational)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index a7bb32f73ec3..6f9fe6171087 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -107,6 +107,7 @@ struct nullb_device {
bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
bool fua; /* Support FUA */
+ bool rotational; /* Fake rotational device */
};
struct nullb {
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index ff45ed766469..dc9e4a14b885 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -384,9 +384,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
unsigned int devidx;
struct queue_limits lim = {
.logical_block_size = dev->blk_size,
- .max_hw_sectors = dev->bounce_size >> 9,
+ .max_hw_sectors = BOUNCE_SIZE >> 9,
.max_segments = -1,
- .max_segment_size = dev->bounce_size,
+ .max_segment_size = BOUNCE_SIZE,
.dma_alignment = dev->blk_size - 1,
.features = BLK_FEAT_WRITE_CACHE |
BLK_FEAT_ROTATIONAL,
@@ -434,8 +434,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
- error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
+ error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 0);
if (error)
goto fail_teardown;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index ac421dbeeb11..faafd7ff43d6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4964,7 +4964,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->tag_set.ops = &rbd_mq_ops;
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
- rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
@@ -7282,9 +7281,10 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
* Prevent new IO from being queued and wait for existing
* IO to complete/fail.
*/
- blk_mq_freeze_queue(rbd_dev->disk->queue);
+ unsigned int memflags = blk_mq_freeze_queue(rbd_dev->disk->queue);
+
blk_mark_disk_dead(rbd_dev->disk);
- blk_mq_unfreeze_queue(rbd_dev->disk->queue);
+ blk_mq_unfreeze_queue(rbd_dev->disk->queue, memflags);
}
del_gendisk(rbd_dev->disk);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index c34695d2eea7..82467ecde7ec 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1209,8 +1209,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->ops = &rnbd_mq_ops;
tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE;
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_TAG_QUEUE_SHARED;
+ tag_set->flags = BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
/* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 08ce6d96d04c..2ee6e9bd4e28 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -167,7 +167,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
- bio_set_prio(bio, prio);
+ bio->bi_ioprio = prio;
submit_bio(bio);
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
index 9cca05dcf772..ddf3629d8894 100644
--- a/drivers/block/rnull.rs
+++ b/drivers/block/rnull.rs
@@ -32,25 +32,31 @@ module! {
license: "GPL v2",
}
+#[pin_data]
struct NullBlkModule {
- _disk: Pin<KBox<Mutex<GenDisk<NullBlkDevice>>>>,
+ #[pin]
+ _disk: Mutex<GenDisk<NullBlkDevice>>,
}
-impl kernel::Module for NullBlkModule {
- fn init(_module: &'static ThisModule) -> Result<Self> {
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
pr_info!("Rust null_blk loaded\n");
- let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
- let disk = gen_disk::GenDiskBuilder::new()
- .capacity_sectors(4096 << 11)
- .logical_block_size(4096)?
- .physical_block_size(4096)?
- .rotational(false)
- .build(format_args!("rnullb{}", 0), tagset)?;
+ // Use a immediately-called closure as a stable `try` block
+ let disk = /* try */ (|| {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
- let disk = KBox::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?;
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(4096 << 11)
+ .logical_block_size(4096)?
+ .physical_block_size(4096)?
+ .rotational(false)
+ .build(format_args!("rnullb{}", 0), tagset)
+ })();
- Ok(Self { _disk: disk })
+ try_pin_init!(Self {
+ _disk <- new_mutex!(disk?, "nullb:disk"),
+ })
}
}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 2d38331ee667..282f81616a78 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -829,7 +829,7 @@ static int probe_disk(struct vdc_port *port)
}
err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
- VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
+ VDC_TX_RING_SIZE, 0);
if (err)
return err;
@@ -918,12 +918,12 @@ struct vdc_check_port_data {
char *type;
};
-static int vdc_device_probed(struct device *dev, void *arg)
+static int vdc_device_probed(struct device *dev, const void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
- struct vdc_check_port_data *port_data;
+ const struct vdc_check_port_data *port_data;
- port_data = (struct vdc_check_port_data *)arg;
+ port_data = (const struct vdc_check_port_data *)arg;
if ((vdev->dev_no == port_data->dev_no) &&
(!(strcmp((char *)&vdev->type, port_data->type))) &&
@@ -1113,6 +1113,7 @@ static void vdc_requeue_inflight(struct vdc_port *port)
static void vdc_queue_drain(struct vdc_port *port)
{
struct request_queue *q = port->disk->queue;
+ unsigned int memflags;
/*
* Mark the queue as draining, then freeze/quiesce to ensure
@@ -1121,13 +1122,13 @@ static void vdc_queue_drain(struct vdc_port *port)
port->drain = 1;
spin_unlock_irq(&port->vio.lock);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
spin_lock_irq(&port->vio.lock);
port->drain = 0;
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static void vdc_ldc_reset_timer_work(struct work_struct *work)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index be4ac58afe41..eda33c5eb5e2 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -818,7 +818,7 @@ static int swim_floppy_init(struct swim_priv *swd)
for (drive = 0; drive < swd->floppy_count; drive++) {
err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
- &swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE);
+ &swim_mq_ops, 2, 0);
if (err)
goto exit_put_disks;
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 90be1017f7bf..3aedcb5add61 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -840,6 +840,7 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
static void release_drive(struct floppy_state *fs)
{
struct request_queue *q = disks[fs->index]->queue;
+ unsigned int memflags;
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
@@ -848,10 +849,10 @@ static void release_drive(struct floppy_state *fs)
fs->state = idle;
spin_unlock_irqrestore(&swim3_lock, flags);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static int fd_eject(struct floppy_state *fs)
@@ -1208,8 +1209,7 @@ static int swim3_attach(struct macio_dev *mdev,
fs = &floppy_states[floppy_count];
memset(fs, 0, sizeof(*fs));
- rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 0);
if (rc)
goto out_unregister;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 934ab9332c80..ca9a67b5b537 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -2213,7 +2213,6 @@ static int ublk_add_tag_set(struct ublk_device *ub)
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
- ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
}
@@ -2716,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- /* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
- if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+ if (test_bit(UB_STATE_USED, &ub->state)) {
+ /*
+ * Parameters can only be changed when device hasn't
+ * been started yet
+ */
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
ret = -EFAULT;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3efe378f1386..91cde76a4b3e 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -13,7 +13,6 @@
#include <linux/string_helpers.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
@@ -1106,9 +1105,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
lim.features |= BLK_FEAT_WRITE_CACHE;
else
lim.features &= ~BLK_FEAT_WRITE_CACHE;
- blk_mq_freeze_queue(disk->queue);
- i = queue_limits_commit_update(disk->queue, &lim);
- blk_mq_unfreeze_queue(disk->queue);
+ i = queue_limits_commit_update_frozen(disk->queue, &lim);
if (i)
return i;
return count;
@@ -1181,7 +1178,8 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(&set->map[i]);
else
- blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ blk_mq_map_hw_queues(&set->map[i],
+ &vblk->vdev->dev, 0);
}
}
@@ -1209,11 +1207,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
+ u8 status = virtblk_vbr_status(vbr);
found++;
if (!blk_mq_complete_request_remote(req) &&
- !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
- virtblk_complete_batch))
+ !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
+ virtblk_complete_batch))
virtblk_request_done(req);
}
@@ -1481,7 +1480,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
@@ -1582,16 +1580,16 @@ static void virtblk_remove(struct virtio_device *vdev)
put_disk(vblk->disk);
}
-#ifdef CONFIG_PM_SLEEP
-static int virtblk_freeze(struct virtio_device *vdev)
+static int virtblk_freeze_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
struct request_queue *q = vblk->disk->queue;
+ unsigned int memflags;
/* Ensure no requests in virtqueues before deleting vqs. */
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue_nowait(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
@@ -1605,7 +1603,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
return 0;
}
-static int virtblk_restore(struct virtio_device *vdev)
+static int virtblk_restore_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int ret;
@@ -1619,8 +1617,29 @@ static int virtblk_restore(struct virtio_device *vdev)
return 0;
}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtblk_freeze(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_restore(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
#endif
+static int virtblk_reset_prepare(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_reset_done(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -1656,6 +1675,8 @@ static struct virtio_driver virtio_blk = {
.freeze = virtblk_freeze,
.restore = virtblk_restore,
#endif
+ .reset_prepare = virtblk_reset_prepare,
+ .reset_done = virtblk_reset_done,
};
static int __init virtio_blk_init(void)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 838064593f62..a7c2b04ab943 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -544,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring)
ring->st_rd_req, ring->st_wr_req,
ring->st_f_req, ring->st_ds_req,
ring->persistent_gnt_c, max_pgrants);
- ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
+ ring->st_print = jiffies + secs_to_jiffies(10);
ring->st_rd_req = 0;
ring->st_wr_req = 0;
ring->st_oo_req = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 59ce113b882a..edcd08a9dcef 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1131,7 +1131,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
} else
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
- info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 4b7219be1bb8..8c1c7f4211eb 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -354,7 +354,6 @@ static int __init z2_init(void)
tag_set.nr_maps = 1;
tag_set.queue_depth = 16;
tag_set.numa_node = NUMA_NO_NODE;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&tag_set);
if (ret)
goto out_unregister_blkdev;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 45df5eeabc5e..9f5020b077c5 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -55,8 +55,8 @@ static size_t huge_class_size;
static const struct block_device_operations zram_devops;
static void zram_free_page(struct zram *zram, size_t index);
-static int zram_read_page(struct zram *zram, struct page *page, u32 index,
- struct bio *parent);
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index);
static int zram_slot_trylock(struct zram *zram, u32 index)
{
@@ -112,17 +112,6 @@ static void zram_clear_flag(struct zram *zram, u32 index,
zram->table[index].flags &= ~BIT(flag);
}
-static inline void zram_set_element(struct zram *zram, u32 index,
- unsigned long element)
-{
- zram->table[index].element = element;
-}
-
-static unsigned long zram_get_element(struct zram *zram, u32 index)
-{
- return zram->table[index].element;
-}
-
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
@@ -143,6 +132,27 @@ static inline bool zram_allocated(struct zram *zram, u32 index)
zram_test_flag(zram, index, ZRAM_WB);
}
+static inline void update_used_max(struct zram *zram, const unsigned long pages)
+{
+ unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+
+ do {
+ if (cur_max >= pages)
+ return;
+ } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
+ &cur_max, pages));
+}
+
+static bool zram_can_store_page(struct zram *zram)
+{
+ unsigned long alloced_pages;
+
+ alloced_pages = zs_get_total_pages(zram->mem_pool);
+ update_used_max(zram, alloced_pages);
+
+ return !zram->limit_pages || alloced_pages <= zram->limit_pages;
+}
+
#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
@@ -277,18 +287,6 @@ static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
}
#endif
-static inline void update_used_max(struct zram *zram,
- const unsigned long pages)
-{
- unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
-
- do {
- if (cur_max >= pages)
- return;
- } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
- &cur_max, pages));
-}
-
static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
{
@@ -833,13 +831,10 @@ static ssize_t writeback_store(struct device *dev,
*/
if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
goto next;
+ if (zram_read_from_zspool(zram, page, index))
+ goto next;
zram_slot_unlock(zram, index);
- if (zram_read_page(zram, page, index, NULL)) {
- release_pp_slot(zram, pps);
- continue;
- }
-
bio_init(&bio, zram->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
@@ -879,7 +874,7 @@ static ssize_t writeback_store(struct device *dev,
zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_WB);
- zram_set_element(zram, index, blk_idx);
+ zram_set_handle(zram, index, blk_idx);
blk_idx = 0;
atomic64_inc(&zram->stats.pages_stored);
spin_lock(&zram->wb_limit_lock);
@@ -889,6 +884,8 @@ static ssize_t writeback_store(struct device *dev,
next:
zram_slot_unlock(zram, index);
release_pp_slot(zram, pps);
+
+ cond_resched();
}
if (blk_idx)
@@ -1468,6 +1465,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
if (!zram->mem_pool) {
vfree(zram->table);
+ zram->table = NULL;
return false;
}
@@ -1504,7 +1502,7 @@ static void zram_free_page(struct zram *zram, size_t index)
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
- free_block_bdev(zram, zram_get_element(zram, index));
+ free_block_bdev(zram, zram_get_handle(zram, index));
goto out;
}
@@ -1532,56 +1530,73 @@ out:
zram_set_obj_size(zram, index, 0);
}
-/*
- * Reads (decompresses if needed) a page from zspool (zsmalloc).
- * Corresponding ZRAM slot should be locked.
- */
-static int zram_read_from_zspool(struct zram *zram, struct page *page,
+static int read_same_filled_page(struct zram *zram, struct page *page,
u32 index)
{
+ void *mem;
+
+ mem = kmap_local_page(page);
+ zram_fill_page(mem, PAGE_SIZE, zram_get_handle(zram, index));
+ kunmap_local(mem);
+ return 0;
+}
+
+static int read_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src, *dst;
+
+ handle = zram_get_handle(zram, index);
+ src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+ dst = kmap_local_page(page);
+ copy_page(dst, src);
+ kunmap_local(dst);
+ zs_unmap_object(zram->mem_pool, handle);
+
+ return 0;
+}
+
+static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
+{
struct zcomp_strm *zstrm;
unsigned long handle;
unsigned int size;
void *src, *dst;
- u32 prio;
- int ret;
+ int ret, prio;
handle = zram_get_handle(zram, index);
- if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
- unsigned long value;
- void *mem;
-
- value = handle ? zram_get_element(zram, index) : 0;
- mem = kmap_local_page(page);
- zram_fill_page(mem, PAGE_SIZE, value);
- kunmap_local(mem);
- return 0;
- }
-
size = zram_get_obj_size(zram, index);
+ prio = zram_get_priority(zram, index);
- if (size != PAGE_SIZE) {
- prio = zram_get_priority(zram, index);
- zstrm = zcomp_stream_get(zram->comps[prio]);
- }
-
+ zstrm = zcomp_stream_get(zram->comps[prio]);
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
- if (size == PAGE_SIZE) {
- dst = kmap_local_page(page);
- copy_page(dst, src);
- kunmap_local(dst);
- ret = 0;
- } else {
- dst = kmap_local_page(page);
- ret = zcomp_decompress(zram->comps[prio], zstrm,
- src, size, dst);
- kunmap_local(dst);
- zcomp_stream_put(zram->comps[prio]);
- }
+ dst = kmap_local_page(page);
+ ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
+ kunmap_local(dst);
zs_unmap_object(zram->mem_pool, handle);
+ zcomp_stream_put(zram->comps[prio]);
+
return ret;
}
+/*
+ * Reads (decompresses if needed) a page from zspool (zsmalloc).
+ * Corresponding ZRAM slot should be locked.
+ */
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index)
+{
+ if (zram_test_flag(zram, index, ZRAM_SAME) ||
+ !zram_get_handle(zram, index))
+ return read_same_filled_page(zram, page, index);
+
+ if (!zram_test_flag(zram, index, ZRAM_HUGE))
+ return read_compressed_page(zram, page, index);
+ else
+ return read_incompressible_page(zram, page, index);
+}
+
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent)
{
@@ -1599,7 +1614,7 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
*/
zram_slot_unlock(zram, index);
- ret = read_from_bdev(zram, page, zram_get_element(zram, index),
+ ret = read_from_bdev(zram, page, zram_get_handle(zram, index),
parent);
}
@@ -1637,33 +1652,88 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return zram_read_page(zram, bvec->bv_page, index, bio);
}
+static int write_same_filled_page(struct zram *zram, unsigned long fill,
+ u32 index)
+{
+ zram_slot_lock(zram, index);
+ zram_set_flag(zram, index, ZRAM_SAME);
+ zram_set_handle(zram, index, fill);
+ zram_slot_unlock(zram, index);
+
+ atomic64_inc(&zram->stats.same_pages);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
+static int write_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src, *dst;
+
+ /*
+ * This function is called from preemptible context so we don't need
+ * to do optimistic and fallback to pessimistic handle allocation,
+ * like we do for compressible pages.
+ */
+ handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
+ if (IS_ERR_VALUE(handle))
+ return PTR_ERR((void *)handle);
+
+ if (!zram_can_store_page(zram)) {
+ zs_free(zram->mem_pool, handle);
+ return -ENOMEM;
+ }
+
+ dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+ src = kmap_local_page(page);
+ memcpy(dst, src, PAGE_SIZE);
+ kunmap_local(src);
+ zs_unmap_object(zram->mem_pool, handle);
+
+ zram_slot_lock(zram, index);
+ zram_set_flag(zram, index, ZRAM_HUGE);
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, PAGE_SIZE);
+ zram_slot_unlock(zram, index);
+
+ atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
+ atomic64_inc(&zram->stats.huge_pages);
+ atomic64_inc(&zram->stats.huge_pages_since);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
- unsigned long alloced_pages;
unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
- void *src, *dst, *mem;
+ void *dst, *mem;
struct zcomp_strm *zstrm;
unsigned long element = 0;
- enum zram_pageflags flags = 0;
+ bool same_filled;
+
+ /* First, free memory allocated to this slot (if any) */
+ zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
+ zram_slot_unlock(zram, index);
mem = kmap_local_page(page);
- if (page_same_filled(mem, &element)) {
- kunmap_local(mem);
- /* Free memory associated with this sector now. */
- flags = ZRAM_SAME;
- atomic64_inc(&zram->stats.same_pages);
- goto out;
- }
+ same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
+ if (same_filled)
+ return write_same_filled_page(zram, element, index);
compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
- src = kmap_local_page(page);
+ mem = kmap_local_page(page);
ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
- src, &comp_len);
- kunmap_local(src);
+ mem, &comp_len);
+ kunmap_local(mem);
if (unlikely(ret)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
@@ -1672,8 +1742,11 @@ compress_again:
return ret;
}
- if (comp_len >= huge_class_size)
- comp_len = PAGE_SIZE;
+ if (comp_len >= huge_class_size) {
+ zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ return write_incompressible_page(zram, page, index);
+ }
+
/*
* handle allocation has 2 paths:
* a) fast path is executed with preemption disabled (for
@@ -1689,35 +1762,23 @@ compress_again:
*/
if (IS_ERR_VALUE(handle))
handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
- GFP_NOIO | __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
- if (comp_len != PAGE_SIZE)
- goto compress_again;
- /*
- * If the page is not compressible, you need to acquire the
- * lock and execute the code below. The zcomp_stream_get()
- * call is needed to disable the cpu hotplug and grab the
- * zstrm buffer back. It is necessary that the dereferencing
- * of the zstrm variable below occurs correctly.
- */
- zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
+ goto compress_again;
}
- alloced_pages = zs_get_total_pages(zram->mem_pool);
- update_used_max(zram, alloced_pages);
-
- if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+ if (!zram_can_store_page(zram)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
@@ -1725,41 +1786,19 @@ compress_again:
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
- src = zstrm->buffer;
- if (comp_len == PAGE_SIZE)
- src = kmap_local_page(page);
- memcpy(dst, src, comp_len);
- if (comp_len == PAGE_SIZE)
- kunmap_local(src);
-
+ memcpy(dst, zstrm->buffer, comp_len);
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_unmap_object(zram->mem_pool, handle);
- atomic64_add(comp_len, &zram->stats.compr_data_size);
-out:
- /*
- * Free memory associated with this sector
- * before overwriting unused sectors.
- */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
-
- if (comp_len == PAGE_SIZE) {
- zram_set_flag(zram, index, ZRAM_HUGE);
- atomic64_inc(&zram->stats.huge_pages);
- atomic64_inc(&zram->stats.huge_pages_since);
- }
- if (flags) {
- zram_set_flag(zram, index, flags);
- zram_set_element(zram, index, element);
- } else {
- zram_set_handle(zram, index, handle);
- zram_set_obj_size(zram, index, comp_len);
- }
+ zram_slot_lock(zram, index);
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+
return ret;
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 134be414e210..db78d7c01b9a 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -62,10 +62,7 @@ enum zram_pageflags {
/* Allocated for each disk page */
struct zram_table_entry {
- union {
- unsigned long handle;
- unsigned long element;
- };
+ unsigned long handle;
unsigned int flags;
spinlock_t lock;
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4ab32abf0f48..7771edf54fb3 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -56,6 +56,18 @@ config BT_HCIBTUSB_POLL_SYNC
Say Y here to enable USB poll_sync for Bluetooth USB devices by
default.
+config BT_HCIBTUSB_AUTO_ISOC_ALT
+ bool "Automatically adjust alternate setting for Isoc endpoints"
+ depends on BT_HCIBTUSB
+ default y if CHROME_PLATFORMS
+ help
+ Say Y here to automatically adjusting the alternate setting for
+ HCI_USER_CHANNEL whenever a SCO link is established.
+
+ When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets
+ and configures isoc endpoint alternate setting automatically when
+ HCI_USER_CHANNEL is in use.
+
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index a1153ada74d2..0a60660fc8ce 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -553,6 +553,9 @@ static const char *btbcm_get_board_name(struct device *dev)
/* get rid of any '/' in the compatible string */
board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!board_type)
+ return NULL;
+
strreplace(board_type, '/', '-');
return board_type;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index d496cf2c3411..d2540b28bc7a 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <linux/unaligned.h>
@@ -506,13 +507,13 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
bt_dev_info(hdev, "Secure boot is %s",
- version->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(version->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- version->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- version->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- version->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
version->min_fw_build_nn, version->min_fw_build_cw,
2000 + version->min_fw_build_yy);
@@ -927,16 +928,16 @@ int btintel_read_boot_params(struct hci_dev *hdev,
le16_to_cpu(params->dev_revid));
bt_dev_info(hdev, "Secure boot is %s",
- params->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(params->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- params->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- params->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- params->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
params->min_fw_build_nn, params->min_fw_build_cw,
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 2b79952f3628..091ffe3e1495 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -1320,6 +1320,10 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
if (opcode == 0xfc01)
btintel_pcie_inject_cmd_complete(hdev, opcode);
}
+ /* Firmware raises alive interrupt on HCI_OP_RESET */
+ if (opcode == HCI_OP_RESET)
+ data->gp0_received = false;
+
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
@@ -1357,7 +1361,6 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
if (opcode == HCI_OP_RESET) {
- data->gp0_received = false;
ret = wait_event_timeout(data->gp0_wait_q,
data->gp0_received,
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 18f34998a120..e26b07a9387d 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/string_choices.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <linux/mmc/sdio_func.h>
@@ -88,7 +89,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
else
adapter->psmode = 0;
BT_DBG("PS Mode:%s",
- (adapter->psmode) ? "Enable" : "Disable");
+ str_enable_disable(adapter->psmode));
} else {
BT_DBG("PS Mode command failed");
}
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 224eafc27dbe..68846c5bd4f7 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -1329,7 +1329,6 @@ int btmtk_usb_setup(struct hci_dev *hdev)
fwname = FIRMWARE_MT7668;
break;
case 0x7922:
- case 0x7961:
case 0x7925:
/* Reset the device to ensure it's in the initial state before
* downloading the firmware to ensure.
@@ -1337,7 +1336,8 @@ int btmtk_usb_setup(struct hci_dev *hdev)
if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags))
btmtk_usb_subsys_reset(hdev, dev_id);
-
+ fallthrough;
+ case 0x7961:
btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
fw_version, fw_flavor);
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index a1dfcfe43d3a..bd5464bde174 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1249,7 +1249,7 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
-static void btmtksdio_cmd_timeout(struct hci_dev *hdev)
+static void btmtksdio_reset(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
u32 status;
@@ -1360,7 +1360,7 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->open = btmtksdio_open;
hdev->close = btmtksdio_close;
- hdev->cmd_timeout = btmtksdio_cmd_timeout;
+ hdev->reset = btmtksdio_reset;
hdev->flush = btmtksdio_flush;
hdev->setup = btmtksdio_setup;
hdev->shutdown = btmtksdio_shutdown;
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 1230045d78a5..aa5ec1d444a9 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1381,13 +1381,12 @@ static void btnxpuart_tx_work(struct work_struct *work)
while ((skb = nxp_dequeue(nxpdev))) {
len = serdev_device_write_buf(serdev, skb->data, skb->len);
- serdev_device_wait_until_sent(serdev, 0);
hdev->stat.byte_tx += len;
skb_pull(skb, len);
if (skb->len > 0) {
skb_queue_head(&nxpdev->txq, skb);
- break;
+ continue;
}
switch (hci_skb_pkt_type(skb)) {
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index dfbbac92242a..cdf09d9a9ad2 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -272,6 +272,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+static bool qca_filename_has_extension(const char *filename)
+{
+ const char *suffix = strrchr(filename, '.');
+
+ /* File extensions require a dot, but not as the first or last character */
+ if (!suffix || suffix == filename || *(suffix + 1) == '\0')
+ return 0;
+
+ /* Avoid matching directories with names that look like files with extensions */
+ return !strchr(suffix, '/');
+}
+
+static bool qca_get_alt_nvm_file(char *filename, size_t max_size)
+{
+ char fwname[64];
+ const char *suffix;
+
+ /* nvm file name has an extension, replace with .bin */
+ if (qca_filename_has_extension(filename)) {
+ suffix = strrchr(filename, '.');
+ strscpy(fwname, filename, suffix - filename + 1);
+ snprintf(fwname + (suffix - filename),
+ sizeof(fwname) - (suffix - filename), ".bin");
+ /* If nvm file is already the default one, return false to skip the retry. */
+ if (strcmp(fwname, filename) == 0)
+ return false;
+
+ snprintf(filename, max_size, "%s", fwname);
+ return true;
+ }
+ return false;
+}
+
static int qca_tlv_check_data(struct hci_dev *hdev,
struct qca_fw_config *config,
u8 *fw_data, size_t fw_size,
@@ -564,6 +597,19 @@ static int qca_download_firmware(struct hci_dev *hdev,
config->fwname, ret);
return ret;
}
+ }
+ /* If the board-specific file is missing, try loading the default
+ * one, unless that was attempted already.
+ */
+ else if (config->type == TLV_TYPE_NVM &&
+ qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
+ ret = request_firmware(&fw, config->fwname, &hdev->dev);
+ if (ret) {
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
+ return ret;
+ }
} else {
bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
config->fwname, ret);
@@ -700,39 +746,43 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
return 0;
}
-static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
+static void qca_get_nvm_name_by_board(char *fwname, size_t max_size,
+ const char *stem, enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
const char *variant;
+ const char *prefix;
- /* hsp gf chip */
- if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
- variant = "g";
- else
- variant = "";
+ /* Set the default value to variant and prefix */
+ variant = "";
+ prefix = "b";
- if (bid == 0x0)
- snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
- else
- snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
-}
+ if (soc_type == QCA_QCA2066)
+ prefix = "";
-static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg,
- const char *stem, u8 rom_ver, u16 bid)
-{
- if (bid == 0x0)
- snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver);
- else if (bid & 0xff00)
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%x", stem, rom_ver, bid);
- else
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%02x", stem, rom_ver, bid);
+ if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) {
+ /* If the chip is manufactured by GlobalFoundries */
+ if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
+ variant = "g";
+ }
+
+ if (rom_ver != 0) {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver,
+ variant, prefix, bid);
+ } else {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid);
+ }
}
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name, const char *rampatch_name)
{
struct qca_fw_config config = {};
int err;
@@ -761,44 +811,48 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- switch (soc_type) {
- case QCA_WCN3990:
- case QCA_WCN3991:
- case QCA_WCN3998:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN3988:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/apbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA2066:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA6390:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/htbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN6750:
- /* Choose mbn file by default.If mbn file is not found
- * then choose tlv file
- */
- config.type = ELF_TYPE_PATCH;
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/msbtfw%02x.mbn", rom_ver);
- break;
- case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN7850:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hmtbtfw%02x.tlv", rom_ver);
- break;
- default:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/rampatch_%08x.bin", soc_ver);
+ if (rampatch_name) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", rampatch_name);
+ } else {
+ switch (soc_type) {
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN3988:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/apbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA2066:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA6390:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN6750:
+ /* Choose mbn file by default.If mbn file is not found
+ * then choose tlv file
+ */
+ config.type = ELF_TYPE_PATCH;
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/msbtfw%02x.mbn", rom_ver);
+ break;
+ case QCA_WCN6855:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN7850:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hmtbtfw%02x.tlv", rom_ver);
+ break;
+ default:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/rampatch_%08x.bin", soc_ver);
+ }
}
err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
@@ -816,8 +870,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/%s", firmware_name);
+ /* The firmware name has an extension, use it directly */
+ if (qca_filename_has_extension(firmware_name)) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name);
+ } else {
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ firmware_name, soc_type, ver, 0, boardid);
+ }
} else {
switch (soc_type) {
case QCA_WCN3990:
@@ -836,8 +896,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/apnv%02x.bin", rom_ver);
break;
case QCA_QCA2066:
- qca_generate_hsp_nvm_name(config.fwname,
- sizeof(config.fwname), ver, rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname,
+ sizeof(config.fwname), "hpnv", soc_type, ver,
+ rom_ver, boardid);
break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
@@ -848,13 +909,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/msnv%02x.bin", rom_ver);
break;
case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpnv%02x.bin", rom_ver);
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hpnv", soc_type, ver, rom_ver, boardid);
break;
case QCA_WCN7850:
- qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hmtnv", soc_type, ver, rom_ver, boardid);
break;
-
default:
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index bb5207d7a8c7..9d28c8800225 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -161,7 +161,7 @@ enum qca_btsoc_type {
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name);
+ const char *firmware_name, const char *rampatch_name);
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
@@ -176,7 +176,8 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name,
+ const char *rampatch_name)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 83025f457ca0..d3eba0d4a57d 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -1351,12 +1351,14 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
btrtl_set_quirks(hdev, btrtl_dev);
- hci_set_hw_info(hdev,
+ if (btrtl_dev->ic_info) {
+ hci_set_hw_info(hdev,
"RTL lmp_subver=%u hci_rev=%u hci_ver=%u hci_bus=%u",
btrtl_dev->ic_info->lmp_subver,
btrtl_dev->ic_info->hci_rev,
btrtl_dev->ic_info->hci_ver,
btrtl_dev->ic_info->hci_bus);
+ }
btrtl_free(btrtl_dev);
return ret;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 279fe6c115fa..a0fc465458b2 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -34,6 +34,7 @@ static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC);
static bool reset = true;
+static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT);
static struct usb_driver btusb_driver;
@@ -377,6 +378,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -481,6 +484,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8851BE Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
+
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -600,6 +606,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3576), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
@@ -610,6 +618,8 @@ static const struct usb_device_id quirks_table[] = {
/* MediaTek MT7922 Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3610), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
@@ -674,6 +684,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -879,7 +891,6 @@ struct btusb_data {
int (*disconnect)(struct hci_dev *hdev);
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
- unsigned cmd_timeout_cnt;
struct qca_dump_info qca_dump;
};
@@ -889,11 +900,6 @@ static void btusb_reset(struct hci_dev *hdev)
struct btusb_data *data;
int err;
- if (hdev->reset) {
- hdev->reset(hdev);
- return;
- }
-
data = hci_get_drvdata(hdev);
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
@@ -906,15 +912,12 @@ static void btusb_reset(struct hci_dev *hdev)
usb_queue_reset_device(data->intf);
}
-static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
+static void btusb_intel_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
struct btintel_data *intel_data = hci_get_priv(hdev);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (intel_data->acpi_reset_method) {
if (test_and_set_bit(INTEL_ACPI_RESET_ACTIVE, intel_data->flags)) {
bt_dev_err(hdev, "acpi: last reset failed ? Not resetting again");
@@ -987,7 +990,7 @@ static inline void btusb_rtl_alloc_devcoredump(struct hci_dev *hdev,
}
}
-static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
+static void btusb_rtl_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
@@ -997,9 +1000,6 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (!reset_gpio) {
btusb_reset(hdev);
return;
@@ -1034,19 +1034,16 @@ static void btusb_rtl_hw_error(struct hci_dev *hdev, u8 code)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
}
-static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
+static void btusb_qca_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
if (test_bit(BTUSB_HW_SSR_ACTIVE, &data->flags)) {
- bt_dev_info(hdev, "Ramdump in progress, defer cmd_timeout");
+ bt_dev_info(hdev, "Ramdump in progress, defer reset");
return;
}
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (reset_gpio) {
bt_dev_err(hdev, "Reset qca device via bt_en gpio");
@@ -1089,6 +1086,42 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags);
}
+static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *) skb->data;
+ struct hci_ev_sync_conn_complete *ev =
+ (void *) skb->data + sizeof(*hdr);
+ struct hci_dev *hdev = data->hdev;
+ unsigned int notify_air_mode;
+
+ if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
+ return;
+
+ if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE)
+ return;
+
+ if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status)
+ return;
+
+ switch (ev->air_mode) {
+ case BT_CODEC_CVSD:
+ notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD;
+ break;
+
+ case BT_CODEC_TRANSPARENT:
+ notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP;
+ break;
+
+ default:
+ return;
+ }
+
+ bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode);
+ data->sco_num = 1;
+ data->air_mode = notify_air_mode;
+ schedule_work(&data->work);
+}
+
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
@@ -1096,6 +1129,10 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
schedule_delayed_work(&data->rx_work, 0);
}
+ /* Configure altsetting for HCI_USER_CHANNEL on SCO connected */
+ if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL))
+ btusb_sco_connected(data, skb);
+
return data->recv_event(data->hdev, skb);
}
@@ -2106,7 +2143,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -2580,7 +2618,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -2638,8 +2677,15 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
int err;
+ /*
+ * The function usb_driver_claim_interface() is documented to need
+ * locks held if it's not called from a probe routine. The code here
+ * is called from the hci_power_on workqueue, so grab the lock.
+ */
+ device_lock(&btmtk_data->isopkt_intf->dev);
err = usb_driver_claim_interface(&btusb_driver,
btmtk_data->isopkt_intf, data);
+ device_unlock(&btmtk_data->isopkt_intf->dev);
if (err < 0) {
btmtk_data->isopkt_intf = NULL;
bt_dev_err(data->hdev, "Failed to claim iso interface");
@@ -3639,12 +3685,39 @@ static ssize_t force_poll_sync_write(struct file *file,
}
static const struct file_operations force_poll_sync_fops = {
+ .owner = THIS_MODULE,
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,
.llseek = default_llseek,
};
+static ssize_t isoc_alt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct btusb_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", data->isoc_altsetting);
+}
+
+static ssize_t isoc_alt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct btusb_data *data = dev_get_drvdata(dev);
+ int alt;
+ int ret;
+
+ if (kstrtoint(buf, 10, &alt))
+ return -EINVAL;
+
+ ret = btusb_switch_alt_setting(data->hdev, alt);
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(isoc_alt);
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3841,7 +3914,7 @@ static int btusb_probe(struct usb_interface *intf,
/* Transport specific configuration */
hdev->send = btusb_send_frame_intel;
- hdev->cmd_timeout = btusb_intel_cmd_timeout;
+ hdev->reset = btusb_intel_reset;
if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT)
btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT);
@@ -3861,7 +3934,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->setup = btusb_mtk_setup;
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
- hdev->cmd_timeout = btmtk_reset_sync;
+ hdev->reset = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
hdev->send = btusb_send_frame_mtk;
set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
@@ -3893,7 +3966,7 @@ static int btusb_probe(struct usb_interface *intf,
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ hdev->reset = btusb_qca_reset;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
btusb_check_needs_reset_resume(intf);
}
@@ -3907,7 +3980,7 @@ static int btusb_probe(struct usb_interface *intf,
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ hdev->reset = btusb_qca_reset;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
hci_set_msft_opcode(hdev, 0xFD70);
}
@@ -3926,7 +3999,7 @@ static int btusb_probe(struct usb_interface *intf,
btrtl_set_driver_name(hdev, btusb_driver.name);
hdev->setup = btusb_setup_realtek;
hdev->shutdown = btrtl_shutdown_realtek;
- hdev->cmd_timeout = btusb_rtl_cmd_timeout;
+ hdev->reset = btusb_rtl_reset;
hdev->hw_error = btusb_rtl_hw_error;
/* Realtek devices need to set remote wakeup on auto-suspend */
@@ -4008,6 +4081,10 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc, data);
if (err < 0)
goto out_free_dev;
+
+ err = device_create_file(&intf->dev, &dev_attr_isoc_alt);
+ if (err)
+ goto out_free_dev;
}
if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) {
@@ -4054,8 +4131,10 @@ static void btusb_disconnect(struct usb_interface *intf)
hdev = data->hdev;
usb_set_intfdata(data->intf, NULL);
- if (data->isoc)
+ if (data->isoc) {
+ device_remove_file(&intf->dev, &dev_attr_isoc_alt);
usb_set_intfdata(data->isoc, NULL);
+ }
if (data->diag)
usb_set_intfdata(data->diag, NULL);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 37129e6cb0eb..0ac2168f1dc4 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -31,6 +31,7 @@
#include <linux/pwrseq/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
+#include <linux/string_choices.h>
#include <linux/mutex.h>
#include <linux/unaligned.h>
@@ -228,7 +229,7 @@ struct qca_serdev {
u32 init_speed;
u32 oper_speed;
bool bdaddr_property_broken;
- const char *firmware_name;
+ const char *firmware_name[2];
};
static int qca_regulator_enable(struct qca_serdev *qcadev);
@@ -258,7 +259,18 @@ static const char *qca_get_firmware_name(struct hci_uart *hu)
if (hu->serdev) {
struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
- return qsd->firmware_name;
+ return qsd->firmware_name[0];
+ } else {
+ return NULL;
+ }
+}
+
+static const char *qca_get_rampatch_name(struct hci_uart *hu)
+{
+ if (hu->serdev) {
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
+
+ return qsd->firmware_name[1];
} else {
return NULL;
}
@@ -332,8 +344,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
else
__serial_clock_off(hu->tty);
- BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
- vote ? "true" : "false");
+ BT_DBG("Vote serial clock %s(%s)", str_true_false(new_vote),
+ str_true_false(vote));
diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
@@ -1638,7 +1650,7 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
}
-static void qca_cmd_timeout(struct hci_dev *hdev)
+static void qca_reset(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
@@ -1855,6 +1867,7 @@ static int qca_setup(struct hci_uart *hu)
unsigned int retries = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
const char *firmware_name = qca_get_firmware_name(hu);
+ const char *rampatch_name = qca_get_rampatch_name(hu);
int ret;
struct qca_btsoc_version ver;
struct qca_serdev *qcadev;
@@ -1963,12 +1976,12 @@ retry:
/* Setup patch / NVM configurations */
ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver,
- firmware_name);
+ firmware_name, rampatch_name);
if (!ret) {
clear_bit(QCA_IBS_DISABLED, &qca->flags);
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
- hu->hdev->cmd_timeout = qca_cmd_timeout;
+ hu->hdev->reset = qca_reset;
if (hu->serdev) {
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
hu->hdev->wakeup = qca_wakeup;
@@ -2202,7 +2215,7 @@ static int qca_power_off(struct hci_dev *hdev)
enum qca_btsoc_type soc_type = qca_soc_type(hu);
hu->hdev->hw_error = NULL;
- hu->hdev->cmd_timeout = NULL;
+ hu->hdev->reset = NULL;
del_timer_sync(&qca->wake_retrans_timer);
del_timer_sync(&qca->tx_idle_timer);
@@ -2309,8 +2322,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->serdev_hu.serdev = serdev;
data = device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
- device_property_read_string(&serdev->dev, "firmware-name",
- &qcadev->firmware_name);
+ device_property_read_string_array(&serdev->dev, "firmware-name",
+ qcadev->firmware_name, ARRAY_SIZE(qcadev->firmware_name));
device_property_read_u32(&serdev->dev, "max-speed",
&qcadev->oper_speed);
if (!qcadev->oper_speed)
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 4b68c84ef485..52053f7c6d9a 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -22,8 +22,8 @@ struct fsl_mc_child_objs {
struct fsl_mc_obj_desc *child_array;
};
-static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
- struct fsl_mc_obj_desc *obj_desc)
+static bool fsl_mc_device_match(const struct fsl_mc_device *mc_dev,
+ const struct fsl_mc_obj_desc *obj_desc)
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
@@ -112,9 +112,9 @@ void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
}
EXPORT_SYMBOL_GPL(dprc_remove_devices);
-static int __fsl_mc_device_match(struct device *dev, void *data)
+static int __fsl_mc_device_match(struct device *dev, const void *data)
{
- struct fsl_mc_obj_desc *obj_desc = data;
+ const struct fsl_mc_obj_desc *obj_desc = data;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
return fsl_mc_device_match(mc_dev, obj_desc);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 2916d1333649..d1f3d327ddd1 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -320,90 +320,90 @@ const struct bus_type fsl_mc_bus_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
-struct device_type fsl_mc_bus_dprc_type = {
+const struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
-struct device_type fsl_mc_bus_dpni_type = {
+const struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
-struct device_type fsl_mc_bus_dpio_type = {
+const struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
-struct device_type fsl_mc_bus_dpsw_type = {
+const struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
-struct device_type fsl_mc_bus_dpbp_type = {
+const struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
-struct device_type fsl_mc_bus_dpcon_type = {
+const struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
-struct device_type fsl_mc_bus_dpmcp_type = {
+const struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
-struct device_type fsl_mc_bus_dpmac_type = {
+const struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
-struct device_type fsl_mc_bus_dprtc_type = {
+const struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
-struct device_type fsl_mc_bus_dpseci_type = {
+const struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
-struct device_type fsl_mc_bus_dpdmux_type = {
+const struct device_type fsl_mc_bus_dpdmux_type = {
.name = "fsl_mc_bus_dpdmux"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
-struct device_type fsl_mc_bus_dpdcei_type = {
+const struct device_type fsl_mc_bus_dpdcei_type = {
.name = "fsl_mc_bus_dpdcei"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
-struct device_type fsl_mc_bus_dpaiop_type = {
+const struct device_type fsl_mc_bus_dpaiop_type = {
.name = "fsl_mc_bus_dpaiop"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
-struct device_type fsl_mc_bus_dpci_type = {
+const struct device_type fsl_mc_bus_dpci_type = {
.name = "fsl_mc_bus_dpci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
-struct device_type fsl_mc_bus_dpdmai_type = {
+const struct device_type fsl_mc_bus_dpdmai_type = {
.name = "fsl_mc_bus_dpdmai"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
-struct device_type fsl_mc_bus_dpdbg_type = {
+const struct device_type fsl_mc_bus_dpdbg_type = {
.name = "fsl_mc_bus_dpdbg"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
-static struct device_type *fsl_mc_get_device_type(const char *type)
+static const struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
- struct device_type *dev_type;
+ const struct device_type *dev_type;
const char *type;
} dev_types[] = {
{ &fsl_mc_bus_dprc_type, "dprc" },
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index e8c92972f9df..9dcc7184817d 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -357,6 +357,7 @@ error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(img_info->mhi_buf);
error_alloc_mhi_buf:
kfree(img_info);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 07645ce2119a..7ffea0f98162 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -245,6 +245,58 @@ struct mhi_pci_dev_info {
.channel = ch_num, \
}
+static const struct mhi_channel_config mhi_qcom_qdu100_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(9, "QDSS", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(14, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(15, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(16, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(17, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(40, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(41, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 256, 5),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 256, 5),
+};
+
+static struct mhi_event_config mhi_qcom_qdu100_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* SAHARA dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 256),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+ MHI_EVENT_CONFIG_SW_DATA(3, 256),
+ MHI_EVENT_CONFIG_SW_DATA(4, 256),
+ /* Software IP channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(5, 512),
+ MHI_EVENT_CONFIG_SW_DATA(6, 512),
+ MHI_EVENT_CONFIG_SW_DATA(7, 512),
+};
+
+static const struct mhi_controller_config mhi_qcom_qdu100_config = {
+ .max_channels = 128,
+ .timeout_ms = 120000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_qdu100_channels),
+ .ch_cfg = mhi_qcom_qdu100_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_qdu100_events),
+ .event_cfg = mhi_qcom_qdu100_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
+ .name = "qcom-qdu100",
+ .fw = "qcom/qdu100/xbl_s.melf",
+ .edl_trigger = true,
+ .config = &mhi_qcom_qdu100_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
@@ -742,6 +794,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
+ /* QDU100, x100-DU */
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0601),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_qdu100_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
@@ -917,7 +972,7 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
return err;
}
- mhi_cntrl->regs = pcim_iomap_region(pdev, 1 << bar_num, pci_name(pdev));
+ mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev));
if (IS_ERR(mhi_cntrl->regs)) {
err = PTR_ERR(mhi_cntrl->regs);
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
@@ -949,7 +1004,7 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
*/
mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
- nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_vectors < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
nr_vectors);
@@ -1040,8 +1095,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_try_reset:
- if (pci_reset_function(pdev))
- dev_err(&pdev->dev, "Recovery failed\n");
+ err = pci_try_reset_function(pdev);
+ if (err)
+ dev_err(&pdev->dev, "Recovery failed: %d\n", err);
}
static void health_check(struct timer_list *t)
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 6276551d7968..1e57ebfb7622 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -657,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
id = moxtet->modules[pos->idx];
- seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 5dea31769f9a..d8e029e7e53f 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev)
return 0;
}
+static int simple_pm_bus_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int simple_pm_bus_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_resume(dev);
+}
+
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
- NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume)
};
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 51745ed1bbab..b163e043c687 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3612,7 +3612,7 @@ static int cdrom_sysctl_handler(const struct ctl_table *ctl, int write,
}
/* Place files in /proc/sys/dev/cdrom */
-static struct ctl_table cdrom_table[] = {
+static const struct ctl_table cdrom_table[] = {
{
.procname = "info",
.data = &cdrom_sysctl_settings.info,
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 64b097e830d4..85aceab5eac6 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -777,7 +777,7 @@ static int probe_gdrom(struct platform_device *devptr)
probe_gdrom_setupcd();
err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (err)
goto probe_fail_free_cd_info;
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 76eac3653b1c..7811aa734053 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -338,7 +338,10 @@ static void cdx_shutdown(struct device *dev)
{
struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ if (cdx_dev->is_bus && cdx_dev->enabled && cdx->ops->bus_disable)
+ cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
if (cdx_drv && cdx_drv->shutdown)
cdx_drv->shutdown(cdx_dev);
}
@@ -470,8 +473,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ ssize_t len;
- return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_lock(dev);
+ len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 48fe96ab4649..e110857824fc 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -724,7 +724,7 @@ static int hpet_is_known(struct hpet_data *hdp)
return 0;
}
-static struct ctl_table hpet_table[] = {
+static const struct ctl_table hpet_table[] = {
{
.procname = "max-user-freq",
.data = &hpet_max_freq,
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 7174bfccc7b3..b95f6d0f17ed 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -152,8 +152,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->period = ns_to_ktime(period * NSEC_PER_USEC);
init_completion(&priv->completion);
- hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->timer.function = timeriomem_rng_trigger;
+ hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
priv->rng_ops.name = dev_name(&pdev->dev);
priv->rng_ops.read = timeriomem_rng_read;
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 7296127181ec..ee2bdc7ed0da 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client)
ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL,
"%s%d", "ipmb-",
client->adapter->nr);
+ if (!ipmb_dev->miscdev.name)
+ return -ENOMEM;
+
ipmb_dev->miscdev.fops = &ipmb_fops;
ipmb_dev->miscdev.parent = &client->dev;
ret = misc_register(&ipmb_dev->miscdev);
@@ -355,11 +358,13 @@ static const struct i2c_device_id ipmb_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ipmb_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id acpi_ipmb_id[] = {
{ "IPMB0001", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id);
+#endif
static struct i2c_driver ipmb_driver = {
.driver = {
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 332082e02ea5..e6ba35b71f10 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -122,12 +122,9 @@ out:
static int ipmi_release(struct inode *inode, struct file *file)
{
struct ipmi_file_private *priv = file->private_data;
- int rv;
struct ipmi_recv_msg *msg, *next;
- rv = ipmi_destroy_user(priv->user);
- if (rv)
- return rv;
+ ipmi_destroy_user(priv->user);
list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
ipmi_free_recv_msg(msg);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e12b531f5c2f..1e5313748f8b 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1398,13 +1398,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
module_put(owner);
}
-int ipmi_destroy_user(struct ipmi_user *user)
+void ipmi_destroy_user(struct ipmi_user *user)
{
_ipmi_destroy_user(user);
kref_put(&user->refcount, free_user);
-
- return 0;
}
EXPORT_SYMBOL(ipmi_destroy_user);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 941d2dcc8c9d..e63c316d8aaa 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -650,7 +650,7 @@ static struct ipmi_smi_watcher smi_watcher = {
#ifdef CONFIG_PROC_FS
#include <linux/sysctl.h>
-static struct ctl_table ipmi_table[] = {
+static const struct ctl_table ipmi_table[] = {
{ .procname = "poweroff_powercycle",
.data = &poweroff_powercycle,
.maxlen = sizeof(poweroff_powercycle),
@@ -699,8 +699,6 @@ static int __init ipmi_poweroff_init(void)
#ifdef MODULE
static void __exit ipmi_poweroff_cleanup(void)
{
- int rv;
-
#ifdef CONFIG_PROC_FS
unregister_sysctl_table(ipmi_table_header);
#endif
@@ -708,9 +706,7 @@ static void __exit ipmi_poweroff_cleanup(void)
ipmi_smi_watcher_unregister(&smi_watcher);
if (ready) {
- rv = ipmi_destroy_user(ipmi_user);
- if (rv)
- pr_err("could not cleanup the IPMI user: 0x%x\n", rv);
+ ipmi_destroy_user(ipmi_user);
pm_power_off = old_poweroff_func;
}
}
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index b83d55685b22..8c0ea637aba0 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -118,7 +118,7 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
if (io.irq)
io.irq_setup = ipmi_std_irq_setup;
- dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ dev_info(&pdev->dev, "%pR regsize %u spacing %u irq %d\n",
&pdev->resource[0], io.regsize, io.regspacing, io.irq);
return ipmi_si_add_smi(&io);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 335eea80054e..f1875b2bebbc 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1064,7 +1064,6 @@ static void ipmi_register_watchdog(int ipmi_intf)
static void ipmi_unregister_watchdog(int ipmi_intf)
{
- int rv;
struct ipmi_user *loc_user = watchdog_user;
if (!loc_user)
@@ -1089,9 +1088,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
mutex_lock(&ipmi_watchdog_mutex);
/* Disconnect from IPMI. */
- rv = ipmi_destroy_user(loc_user);
- if (rv)
- pr_warn("error unlinking from IPMI: %d\n", rv);
+ ipmi_destroy_user(loc_user);
/* If it comes back, restart it properly. */
ipmi_start_timer_on_heartbeat = 1;
diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
index a14fafc583d4..310f17dd9511 100644
--- a/drivers/char/ipmi/ssif_bmc.c
+++ b/drivers/char/ipmi/ssif_bmc.c
@@ -292,7 +292,6 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
ssif_bmc->nbytes_processed = 0;
ssif_bmc->remain_len = 0;
ssif_bmc->busy = false;
- memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
wake_up_all(&ssif_bmc->wait_queue);
}
@@ -744,9 +743,11 @@ static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
ssif_bmc->aborting = true;
}
} else if (ssif_bmc->state == SSIF_RES_SENDING) {
- if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF)
+ if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) {
+ memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
/* Invalidate response buffer to denote it is sent */
complete_response(ssif_bmc);
+ }
ssif_bmc->state = SSIF_READY;
}
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 541edc26ec89..f7dd455dd0dd 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -63,16 +63,30 @@ static DEFINE_MUTEX(misc_mtx);
#define DYNAMIC_MINORS 128 /* like dynamic majors */
static DEFINE_IDA(misc_minors_ida);
-static int misc_minor_alloc(void)
+static int misc_minor_alloc(int minor)
{
- int ret;
-
- ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
- if (ret >= 0) {
- ret = DYNAMIC_MINORS - ret - 1;
+ int ret = 0;
+
+ if (minor == MISC_DYNAMIC_MINOR) {
+ /* allocate free id */
+ ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
+ if (ret >= 0) {
+ ret = DYNAMIC_MINORS - ret - 1;
+ } else {
+ ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
+ MINORMASK, GFP_KERNEL);
+ }
} else {
- ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
- MINORMASK, GFP_KERNEL);
+ /* specific minor, check if it is in dynamic or misc dynamic range */
+ if (minor < DYNAMIC_MINORS) {
+ minor = DYNAMIC_MINORS - minor - 1;
+ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
+ } else if (minor > MISC_DYNAMIC_MINOR) {
+ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
+ } else {
+ /* case of non-dynamic minors, no need to allocate id */
+ ret = 0;
+ }
}
return ret;
}
@@ -219,7 +233,7 @@ int misc_register(struct miscdevice *misc)
mutex_lock(&misc_mtx);
if (is_dynamic) {
- int i = misc_minor_alloc();
+ int i = misc_minor_alloc(misc->minor);
if (i < 0) {
err = -EBUSY;
@@ -228,6 +242,7 @@ int misc_register(struct miscdevice *misc)
misc->minor = i;
} else {
struct miscdevice *c;
+ int i;
list_for_each_entry(c, &misc_list, list) {
if (c->minor == misc->minor) {
@@ -235,6 +250,12 @@ int misc_register(struct miscdevice *misc)
goto out;
}
}
+
+ i = misc_minor_alloc(misc->minor);
+ if (i < 0) {
+ err = -EBUSY;
+ goto out;
+ }
}
dev = MKDEV(MISC_MAJOR, misc->minor);
@@ -243,8 +264,8 @@ int misc_register(struct miscdevice *misc)
device_create_with_groups(&misc_class, misc->parent, dev,
misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
+ misc_minor_free(misc->minor);
if (is_dynamic) {
- misc_minor_free(misc->minor);
misc->minor = MISC_DYNAMIC_MINOR;
}
err = PTR_ERR(misc->this_device);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 23ee76bbb4aa..92cbd24a36d8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -278,7 +278,7 @@ static void crng_reseed(struct work_struct *work)
WRITE_ONCE(base_crng.generation, next_gen);
#ifdef CONFIG_VDSO_GETRANDOM
/* base_crng.generation's invalid value is ULONG_MAX, while
- * _vdso_rng_data.generation's invalid value is 0, so add one to the
+ * vdso_k_rng_data->generation's invalid value is 0, so add one to the
* former to arrive at the latter. Use smp_store_release so that this
* is ordered with the write above to base_crng.generation. Pairs with
* the smp_rmb() before the syscall in the vDSO code.
@@ -290,7 +290,7 @@ static void crng_reseed(struct work_struct *work)
* because the vDSO side only checks whether the value changed, without
* actually using or interpreting the value.
*/
- smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1);
+ smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1);
#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
@@ -743,7 +743,7 @@ static void __cold _credit_init_bits(size_t bits)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
#ifdef CONFIG_VDSO_GETRANDOM
- WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true);
+ WRITE_ONCE(vdso_k_rng_data->is_ready, true);
#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -1665,7 +1665,7 @@ static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf,
return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
}
-static struct ctl_table random_table[] = {
+static const struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 69533d0bfb51..cf02ec646f46 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
return n == 0;
}
+static void tpm_bios_log_free(void *data)
+{
+ kvfree(data);
+}
+
/* read binary bios log */
int tpm_read_log_acpi(struct tpm_chip *chip)
{
@@ -136,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
+ log->bios_event_log = kvmalloc(len, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
@@ -161,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
goto err;
}
+ ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log);
+ if (ret) {
+ log->bios_event_log = NULL;
+ goto err;
+ }
+
return format;
err:
- devm_kfree(&chip->dev, log->bios_event_log);
+ tpm_bios_log_free(log->bios_event_log);
log->bios_event_log = NULL;
return ret;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index c62b208b42f1..18f92dd44d45 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -883,9 +883,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- src = kmap_atomic(buf->page);
+ src = kmap_local_page(buf->page);
memcpy(page_address(page) + offset, src + buf->offset, len);
- kunmap_atomic(src);
+ kunmap_local(src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
}
@@ -923,14 +923,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
ret = 0;
- if (pipe_empty(pipe->head, pipe->tail))
+ if (pipe_is_empty(pipe))
goto error_out;
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
- occupancy = pipe_occupancy(pipe->head, pipe->tail);
+ occupancy = pipe_buf_usage(pipe);
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
if (!buf) {
diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
index 65d422a588e1..9d178afc73bd 100644
--- a/drivers/clk/analogbits/wrpll-cln28hpc.c
+++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
@@ -292,7 +292,7 @@ int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
vco = vco_pre * f;
}
- delta = abs(target_rate - vco);
+ delta = abs(target_vco_rate - vco);
if (delta < best_delta) {
best_delta = delta;
best_r = r;
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 8e3684ba2c74..9128a06b860d 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -24,4 +24,5 @@ obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o
obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o dt-compat.o
+obj-$(CONFIG_SOC_SAMA7D65) += sama7d65.o
obj-$(CONFIG_SOC_SAMA7G5) += sama7g5.o
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 15c46489ba85..7a544e429d34 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -20,7 +20,7 @@
#define PMC_MCR_CSS_SHIFT (16)
-#define MASTER_MAX_ID 4
+#define MASTER_MAX_ID 9
#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index fda041102224..cefd9948e103 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -23,7 +23,7 @@
#define UPLL_DIV 2
#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
-#define PLL_MAX_ID 7
+#define PLL_MAX_ID 9
struct sam9x60_pll_core {
struct regmap *regmap;
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 5aa9c1f1c886..acf780a81589 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -151,6 +151,7 @@ static struct syscore_ops pmc_syscore_ops = {
static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sama7g5-pmc", },
+ { .compatible = "microchip,sama7d65-pmc", },
{ /* sentinel */ }
};
diff --git a/drivers/clk/at91/sama7d65.c b/drivers/clk/at91/sama7d65.c
new file mode 100644
index 000000000000..a5d40df8b2f2
--- /dev/null
+++ b/drivers/clk/at91/sama7d65.c
@@ -0,0 +1,1375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAMA7D65 PMC code.
+ *
+ * Copyright (C) 2024 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Ryan Wanner <ryan.wanner@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(pmc_mck0_lock);
+static DEFINE_SPINLOCK(pmc_mckX_lock);
+
+#define PMC_INDEX_MAX 25
+
+/*
+ * PLL clocks identifiers
+ * @PLL_ID_CPU: CPU PLL identifier
+ * @PLL_ID_SYS: System PLL identifier
+ * @PLL_ID_DDR: DDR PLL identifier
+ * @PLL_ID_GPU: Graphics subsystem PLL identifier
+ * @PLL_ID_BAUD: Baud PLL identifier
+ * @PLL_ID_AUDIO: Audio PLL identifier
+ * @PLL_ID_ETH: Ethernet PLL identifier
+ * @PLL_ID_LVDS: LVDS PLL identifier
+ * @PLL_ID_USB: USB PLL identifier
+ */
+enum pll_ids {
+ PLL_ID_CPU,
+ PLL_ID_SYS,
+ PLL_ID_DDR,
+ PLL_ID_GPU,
+ PLL_ID_BAUD,
+ PLL_ID_AUDIO,
+ PLL_ID_ETH,
+ PLL_ID_LVDS,
+ PLL_ID_USB,
+ PLL_ID_MAX
+};
+
+/*
+ * PLL component identifier
+ * @PLL_COMPID_FRAC: Fractional PLL component identifier
+ * @PLL_COMPID_DIV0: 1st PLL divider component identifier
+ * @PLL_COMPID_DIV1: 2nd PLL divider component identifier
+ */
+enum pll_component_id {
+ PLL_COMPID_FRAC,
+ PLL_COMPID_DIV0,
+ PLL_COMPID_DIV1,
+ PLL_COMPID_MAX
+};
+
+/*
+ * PLL type identifiers
+ * @PLL_TYPE_FRAC: fractional PLL identifier
+ * @PLL_TYPE_DIV: divider PLL identifier
+ */
+enum pll_type {
+ PLL_TYPE_FRAC,
+ PLL_TYPE_DIV
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_layout_frac = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+/* Layout for DIVPMC dividers. */
+static const struct clk_pll_layout pll_layout_divpmc = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_layout_divio = {
+ .div_mask = GENMASK(19, 12),
+ .endiv_mask = BIT(30),
+ .div_shift = 12,
+ .endiv_shift = 30,
+};
+
+/*
+ * CPU PLL output range.
+ * Notice: The upper limit has been setup to 1000000002 due to hardware
+ * block which cannot output exactly 1GHz.
+ */
+static const struct clk_range cpu_pll_outputs[] = {
+ { .min = 2343750, .max = 1000000002 },
+};
+
+/* PLL output range. */
+static const struct clk_range pll_outputs[] = {
+ { .min = 2343750, .max = 1200000000 },
+};
+
+/*
+ * Min: fCOREPLLCK = 600 MHz, PMC_PLL_CTRL0.DIVPMC = 255
+ * Max: fCOREPLLCK = 800 MHz, PMC_PLL_CTRL0.DIVPMC = 0
+ */
+static const struct clk_range lvdspll_outputs[] = {
+ { .min = 16406250, .max = 800000000 },
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 480000000, .max = 480000000 },
+};
+
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range lvdspll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range upll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+/* CPU PLL characteristics. */
+static const struct clk_pll_characteristics cpu_pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(cpu_pll_outputs),
+ .output = cpu_pll_outputs,
+ .core_output = core_outputs,
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(pll_outputs),
+ .output = pll_outputs,
+ .core_output = core_outputs,
+};
+
+static const struct clk_pll_characteristics lvdspll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(lvdspll_outputs),
+ .output = lvdspll_outputs,
+ .core_output = lvdspll_core_outputs,
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .core_output = upll_core_outputs,
+ .upll = true,
+};
+
+/*
+ * SAMA7D65 PLL possible parents
+ * @SAMA7D65_PLL_PARENT_MAINCK: MAINCK is PLL a parent
+ * @SAMA7D65_PLL_PARENT_MAIN_XTAL: MAIN XTAL is a PLL parent
+ * @SAMA7D65_PLL_PARENT_FRACCK: Frac PLL is a PLL parent (for PLL dividers)
+ */
+enum sama7d65_pll_parent {
+ SAMA7D65_PLL_PARENT_MAINCK,
+ SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ SAMA7D65_PLL_PARENT_FRACCK
+};
+
+/*
+ * PLL clocks description
+ * @n: clock name
+ * @l: clock layout
+ * @c: clock characteristics
+ * @hw: pointer to clk_hw
+ * @t: clock type
+ * @f: clock flags
+ * @p: clock parent
+ * @eid: export index in sama7d65->chws[] array
+ * @safe_div: intermediate divider need to be set on PRE_RATE_CHANGE
+ * notification
+ */
+static struct sama7d65_pll {
+ const char *n;
+ const struct clk_pll_layout *l;
+ const struct clk_pll_characteristics *c;
+ struct clk_hw *hw;
+ unsigned long f;
+ enum sama7d65_pll_parent p;
+ u8 t;
+ u8 eid;
+ u8 safe_div;
+} sama7d65_plls[][PLL_COMPID_MAX] = {
+ [PLL_ID_CPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "cpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds cpupll_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "cpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds CPU. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .eid = PMC_CPUPLL,
+ /*
+ * Safe div=15 should be safe even for switching b/w 1GHz and
+ * 90MHz (frac pll might go up to 1.2GHz).
+ */
+ .safe_div = 15,
+ },
+ },
+
+ [PLL_ID_SYS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "syspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds syspll_divpmcck which may feed critical parts
+ * of the systems like timers. Therefore it should not be
+ * disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "syspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /*
+ * This may feed critical parts of the systems like timers.
+ * Therefore it should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_SYSPLL,
+ },
+ },
+
+ [PLL_ID_DDR] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ddrpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds ddrpll_divpmcck which feeds DDR. It should not
+ * be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ddrpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds DDR. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+ },
+
+ [PLL_ID_GPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "gpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "gpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ },
+ },
+
+ [PLL_ID_BAUD] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "baudpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "baudpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_BAUDPLL,
+ },
+ },
+
+ [PLL_ID_AUDIO] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "audiopll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "audiopll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOPMCPLL,
+ },
+
+ [PLL_COMPID_DIV1] = {
+ .n = "audiopll_diviock",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divio,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOIOPLL,
+ },
+ },
+
+ [PLL_ID_ETH] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ethpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ethpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_ETHPLL,
+ },
+ },
+
+ [PLL_ID_LVDS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "lvdspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "lvdspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_LVDSPLL,
+ },
+ },
+
+ [PLL_ID_USB] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "usbpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "usbpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_UTMI,
+ },
+ },
+};
+
+/* Used to create an array entry identifying a PLL by its components. */
+#define PLL_IDS_TO_ARR_ENTRY(_id, _comp) { PLL_ID_##_id, PLL_COMPID_##_comp}
+
+/*
+ * Master clock (MCK[0..9]) description
+ * @n: clock name
+ * @ep_chg_chg_id: index in parents array that specifies the changeable
+ * @ep: extra parents names array (entry formed by PLL components
+ * identifiers (see enum pll_component_id))
+ * @hw: pointer to clk_hw
+ * parent
+ * @ep_count: extra parents count
+ * @ep_mux_table: mux table for extra parents
+ * @id: clock id
+ * @eid: export index in sama7d65->chws[] array
+ * @c: true if clock is critical and cannot be disabled
+ */
+static struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } ep[4];
+ struct clk_hw *hw;
+ int ep_chg_id;
+ u8 ep_count;
+ u8 ep_mux_table[4];
+ u8 id;
+ u8 eid;
+ u8 c;
+} sama7d65_mckx[] = {
+ { .n = "mck0", }, /* Dummy entry for MCK0 to store hw in probe. */
+ { .n = "mck1",
+ .id = 1,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK1,
+ .c = 1, },
+
+ { .n = "mck2",
+ .id = 2,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck3",
+ .id = 3,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK3,
+ .c = 1, },
+
+ { .n = "mck4",
+ .id = 4,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck5",
+ .id = 5,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK5,
+ .c = 1, },
+
+ { .n = "mck6",
+ .id = 6,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1,
+ .c = 1, },
+
+ { .n = "mck7",
+ .id = 7,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck8",
+ .id = 8,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck9",
+ .id = 9,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+};
+
+/*
+ * System clock description
+ * @n: clock name
+ * @p: clock parent name
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ u8 id;
+} sama7d65_systemck[] = {
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8, },
+ { .n = "pck1", .p = "prog1", .id = 9, },
+ { .n = "pck2", .p = "prog2", .id = 10, },
+ { .n = "pck3", .p = "prog3", .id = 11, },
+ { .n = "pck4", .p = "prog4", .id = 12, },
+ { .n = "pck5", .p = "prog5", .id = 13, },
+ { .n = "pck6", .p = "prog6", .id = 14, },
+ { .n = "pck7", .p = "prog7", .id = 15, },
+};
+
+/* Mux table for programmable clocks. */
+static u32 sama7d65_prog_mux_table[] = { 0, 1, 2, 5, 7, 8, 9, 10, 12 };
+
+/*
+ * Peripheral clock parent hw identifier (used to index in sama7d65_mckx[])
+ * @PCK_PARENT_HW_MCK0: pck parent hw identifier is MCK0
+ * @PCK_PARENT_HW_MCK1: pck parent hw identifier is MCK1
+ * @PCK_PARENT_HW_MCK2: pck parent hw identifier is MCK2
+ * @PCK_PARENT_HW_MCK3: pck parent hw identifier is MCK3
+ * @PCK_PARENT_HW_MCK4: pck parent hw identifier is MCK4
+ * @PCK_PARENT_HW_MCK5: pck parent hw identifier is MCK5
+ * @PCK_PARENT_HW_MCK6: pck parent hw identifier is MCK6
+ * @PCK_PARENT_HW_MCK7: pck parent hw identifier is MCK7
+ * @PCK_PARENT_HW_MCK8: pck parent hw identifier is MCK8
+ * @PCK_PARENT_HW_MCK9: pck parent hw identifier is MCK9
+ * @PCK_PARENT_HW_MAX: max identifier
+ */
+enum sama7d65_pck_parent_hw_id {
+ PCK_PARENT_HW_MCK0,
+ PCK_PARENT_HW_MCK1,
+ PCK_PARENT_HW_MCK2,
+ PCK_PARENT_HW_MCK3,
+ PCK_PARENT_HW_MCK4,
+ PCK_PARENT_HW_MCK5,
+ PCK_PARENT_HW_MCK6,
+ PCK_PARENT_HW_MCK7,
+ PCK_PARENT_HW_MCK8,
+ PCK_PARENT_HW_MCK9,
+ PCK_PARENT_HW_MAX
+};
+
+/*
+ * Peripheral clock description
+ * @n: clock name
+ * @p: clock parent hw id
+ * @r: clock range values
+ * @id: clock id
+ * @chgp: index in parent array of the changeable parent
+ */
+static struct {
+ const char *n;
+ enum sama7d65_pck_parent_hw_id p;
+ struct clk_range r;
+ u8 chgp;
+ u8 id;
+} sama7d65_periphck[] = {
+ { .n = "pioA_clk", .p = PCK_PARENT_HW_MCK0, .id = 10, },
+ { .n = "securam_clk", .p = PCK_PARENT_HW_MCK0, .id = 17, },
+ { .n = "sfr_clk", .p = PCK_PARENT_HW_MCK7, .id = 18, },
+ { .n = "hsmc_clk", .p = PCK_PARENT_HW_MCK5, .id = 20, },
+ { .n = "xdmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 21, },
+ { .n = "xdmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 22, },
+ { .n = "xdmac2_clk", .p = PCK_PARENT_HW_MCK1, .id = 23, },
+ { .n = "acc_clk", .p = PCK_PARENT_HW_MCK7, .id = 24, },
+ { .n = "aes_clk", .p = PCK_PARENT_HW_MCK6, .id = 26, },
+ { .n = "tzaesbasc_clk", .p = PCK_PARENT_HW_MCK8, .id = 27, },
+ { .n = "asrc_clk", .p = PCK_PARENT_HW_MCK9, .id = 29, .r = { .max = 200000000, }, },
+ { .n = "cpkcc_clk", .p = PCK_PARENT_HW_MCK0, .id = 30, },
+ { .n = "eic_clk", .p = PCK_PARENT_HW_MCK7, .id = 33, },
+ { .n = "flex0_clk", .p = PCK_PARENT_HW_MCK7, .id = 34, },
+ { .n = "flex1_clk", .p = PCK_PARENT_HW_MCK7, .id = 35, },
+ { .n = "flex2_clk", .p = PCK_PARENT_HW_MCK7, .id = 36, },
+ { .n = "flex3_clk", .p = PCK_PARENT_HW_MCK7, .id = 37, },
+ { .n = "flex4_clk", .p = PCK_PARENT_HW_MCK8, .id = 38, },
+ { .n = "flex5_clk", .p = PCK_PARENT_HW_MCK8, .id = 39, },
+ { .n = "flex6_clk", .p = PCK_PARENT_HW_MCK8, .id = 40, },
+ { .n = "flex7_clk", .p = PCK_PARENT_HW_MCK8, .id = 41, },
+ { .n = "flex8_clk", .p = PCK_PARENT_HW_MCK9, .id = 42, },
+ { .n = "flex9_clk", .p = PCK_PARENT_HW_MCK9, .id = 43, },
+ { .n = "flex10_clk", .p = PCK_PARENT_HW_MCK9, .id = 44, },
+ { .n = "gmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 46, },
+ { .n = "gmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 47, },
+ { .n = "gmac0_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 49, },
+ { .n = "gmac1_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 50, },
+ { .n = "icm_clk", .p = PCK_PARENT_HW_MCK5, .id = 53, },
+ { .n = "i2smcc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 54, .r = { .max = 200000000, }, },
+ { .n = "i2smcc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 55, .r = { .max = 200000000, }, },
+ { .n = "lcd_clk", .p = PCK_PARENT_HW_MCK3, .id = 56, },
+ { .n = "matrix_clk", .p = PCK_PARENT_HW_MCK5, .id = 57, },
+ { .n = "mcan0_clk", .p = PCK_PARENT_HW_MCK5, .id = 58, .r = { .max = 200000000, }, },
+ { .n = "mcan1_clk", .p = PCK_PARENT_HW_MCK5, .id = 59, .r = { .max = 200000000, }, },
+ { .n = "mcan2_clk", .p = PCK_PARENT_HW_MCK5, .id = 60, .r = { .max = 200000000, }, },
+ { .n = "mcan3_clk", .p = PCK_PARENT_HW_MCK5, .id = 61, .r = { .max = 200000000, }, },
+ { .n = "mcan4_clk", .p = PCK_PARENT_HW_MCK5, .id = 62, .r = { .max = 200000000, }, },
+ { .n = "pdmc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 64, .r = { .max = 200000000, }, },
+ { .n = "pdmc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 65, .r = { .max = 200000000, }, },
+ { .n = "pit64b0_clk", .p = PCK_PARENT_HW_MCK7, .id = 66, },
+ { .n = "pit64b1_clk", .p = PCK_PARENT_HW_MCK7, .id = 67, },
+ { .n = "pit64b2_clk", .p = PCK_PARENT_HW_MCK7, .id = 68, },
+ { .n = "pit64b3_clk", .p = PCK_PARENT_HW_MCK8, .id = 69, },
+ { .n = "pit64b4_clk", .p = PCK_PARENT_HW_MCK8, .id = 70, },
+ { .n = "pit64b5_clk", .p = PCK_PARENT_HW_MCK8, .id = 71, },
+ { .n = "pwm_clk", .p = PCK_PARENT_HW_MCK7, .id = 72, },
+ { .n = "qspi0_clk", .p = PCK_PARENT_HW_MCK5, .id = 73, },
+ { .n = "qspi1_clk", .p = PCK_PARENT_HW_MCK5, .id = 74, },
+ { .n = "sdmmc0_clk", .p = PCK_PARENT_HW_MCK1, .id = 75, },
+ { .n = "sdmmc1_clk", .p = PCK_PARENT_HW_MCK1, .id = 76, },
+ { .n = "sdmmc2_clk", .p = PCK_PARENT_HW_MCK1, .id = 77, },
+ { .n = "sha_clk", .p = PCK_PARENT_HW_MCK6, .id = 78, },
+ { .n = "spdifrx_clk", .p = PCK_PARENT_HW_MCK9, .id = 79, .r = { .max = 200000000, }, },
+ { .n = "spdiftx_clk", .p = PCK_PARENT_HW_MCK9, .id = 80, .r = { .max = 200000000, }, },
+ { .n = "ssc0_clk", .p = PCK_PARENT_HW_MCK7, .id = 81, .r = { .max = 200000000, }, },
+ { .n = "ssc1_clk", .p = PCK_PARENT_HW_MCK8, .id = 82, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch0_clk", .p = PCK_PARENT_HW_MCK8, .id = 83, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch1_clk", .p = PCK_PARENT_HW_MCK8, .id = 84, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch2_clk", .p = PCK_PARENT_HW_MCK8, .id = 85, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch0_clk", .p = PCK_PARENT_HW_MCK5, .id = 86, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch1_clk", .p = PCK_PARENT_HW_MCK5, .id = 87, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch2_clk", .p = PCK_PARENT_HW_MCK5, .id = 88, .r = { .max = 200000000, }, },
+ { .n = "tcpca_clk", .p = PCK_PARENT_HW_MCK5, .id = 89, },
+ { .n = "tcpcb_clk", .p = PCK_PARENT_HW_MCK5, .id = 90, },
+ { .n = "tdes_clk", .p = PCK_PARENT_HW_MCK6, .id = 91, },
+ { .n = "trng_clk", .p = PCK_PARENT_HW_MCK6, .id = 92, },
+ { .n = "udphsa_clk", .p = PCK_PARENT_HW_MCK5, .id = 99, },
+ { .n = "udphsb_clk", .p = PCK_PARENT_HW_MCK5, .id = 100, },
+ { .n = "uhphs_clk", .p = PCK_PARENT_HW_MCK5, .id = 101, },
+ { .n = "dsi_clk", .p = PCK_PARENT_HW_MCK3, .id = 103, },
+ { .n = "lvdsc_clk", .p = PCK_PARENT_HW_MCK3, .id = 104, },
+};
+
+/*
+ * Generic clock description
+ * @n: clock name
+ * @pp: PLL parents (entry formed by PLL components identifiers
+ * (see enum pll_component_id))
+ * @pp_mux_table: PLL parents mux table
+ * @r: clock output range
+ * @pp_chg_id: id in parent array of changeable PLL parent
+ * @pp_count: PLL parents count
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } pp[8];
+ const char pp_mux_table[8];
+ struct clk_range r;
+ int pp_chg_id;
+ u8 pp_count;
+ u8 id;
+} sama7d65_gck[] = {
+ { .n = "adc_gclk",
+ .id = 25,
+ .r = { .max = 100000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 8, 9, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "asrc_gclk",
+ .id = 29,
+ .r = { .max = 200000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex0_gclk",
+ .id = 34,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex1_gclk",
+ .id = 35,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex2_gclk",
+ .id = 36,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex3_gclk",
+ .id = 37,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex4_gclk",
+ .id = 38,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex5_gclk",
+ .id = 39,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex6_gclk",
+ .id = 40,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex7_gclk",
+ .id = 41,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex8_gclk",
+ .id = 42,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex9_gclk",
+ .id = 43,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex10_gclk",
+ .id = 44,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac0_gclk",
+ .id = 46,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac1_gclk",
+ .id = 47,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac0_tsu_gclk",
+ .id = 49,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac1_tsu_gclk",
+ .id = 50,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc0_gclk",
+ .id = 54,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc1_gclk",
+ .id = 55,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "lcdc_gclk",
+ .id = 56,
+ .r = { .max = 90000000 },
+ .pp_count = 0,
+ .pp_chg_id = INT_MIN,
+ },
+
+ { .n = "mcan0_gclk",
+ .id = 58,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan1_gclk",
+ .id = 59,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan2_gclk",
+ .id = 60,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan3_gclk",
+ .id = 61,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan4_gclk",
+ .id = 62,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "pdmc0_gclk",
+ .id = 64,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9 },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pdmc1_gclk",
+ .id = 65,
+ .r = { .max = 80000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b0_gclk",
+ .id = 66,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b1_gclk",
+ .id = 67,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b2_gclk",
+ .id = 68,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b3_gclk",
+ .id = 69,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b4_gclk",
+ .id = 70,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b5_gclk",
+ .id = 71,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi0_gclk",
+ .id = 73,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi1_gclk",
+ .id = 74,
+ .r = { .max = 266000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "sdmmc0_gclk",
+ .id = 75,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc1_gclk",
+ .id = 76,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc2_gclk",
+ .id = 77,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10 },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "spdifrx_gclk",
+ .id = 79,
+ .r = { .max = 150000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "spdiftx_gclk",
+ .id = 80,
+ .r = { .max = 25000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb0_ch0_gclk",
+ .id = 83,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb1_ch0_gclk",
+ .id = 86,
+ .r = { .max = 67000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "DSI_gclk",
+ .id = 103,
+ .r = {.max = 27000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .pp_mux_table = {5},
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "I3CC_gclk",
+ .id = 105,
+ .r = {.max = 125000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+};
+
+/* MCK0 characteristics. */
+static const struct clk_master_characteristics mck0_characteristics = {
+ .output = { .min = 32768, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3, 5 },
+ .have_div3_pres = 1,
+};
+
+/* MCK0 layout. */
+static const struct clk_master_layout mck0_layout = {
+ .mask = 0x773,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+/* Programmable clock layout. */
+static const struct clk_programmable_layout programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
+/* Peripheral clock layout. */
+static const struct clk_pcr_layout sama7d65_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static void __init sama7d65_pmc_setup(struct device_node *np)
+{
+ const char *main_xtal_name = "main_xtal";
+ struct pmc_data *sama7d65_pmc;
+ const char *parent_names[11];
+ void **alloc_mem = NULL;
+ int alloc_mem_size = 0;
+ struct regmap *regmap;
+ struct clk_hw *hw, *main_rc_hw, *main_osc_hw, *main_xtal_hw;
+ struct clk_hw *td_slck_hw, *md_slck_hw;
+ static struct clk_parent_data parent_data;
+ struct clk_hw *parent_hws[10];
+ bool bypass;
+ int i, j;
+
+ td_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "td_slck"));
+ md_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "md_slck"));
+ main_xtal_hw = __clk_get_hw(of_clk_get_by_name(np, main_xtal_name));
+
+ if (!td_slck_hw || !md_slck_hw || !main_xtal_hw)
+ return;
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama7d65_pmc = pmc_data_allocate(PMC_INDEX_MAX,
+ nck(sama7d65_systemck),
+ nck(sama7d65_periphck),
+ nck(sama7d65_gck), 8);
+ if (!sama7d65_pmc)
+ return;
+
+ alloc_mem = kmalloc(sizeof(void *) *
+ (ARRAY_SIZE(sama7d65_mckx) + ARRAY_SIZE(sama7d65_gck)),
+ GFP_KERNEL);
+ if (!alloc_mem)
+ goto err_free;
+
+ main_rc_hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(main_rc_hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ parent_data.name = main_xtal_name;
+ parent_data.fw_name = main_xtal_name;
+ main_osc_hw = at91_clk_register_main_osc(regmap, "main_osc", NULL,
+ &parent_data, bypass);
+ if (IS_ERR(main_osc_hw))
+ goto err_free;
+
+ parent_hws[0] = main_rc_hw;
+ parent_hws[1] = main_osc_hw;
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", NULL, parent_hws, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+ for (j = 0; j < PLL_COMPID_MAX; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sama7d65_plls[i][j].n)
+ continue;
+
+ switch (sama7d65_plls[i][j].t) {
+ case PLL_TYPE_FRAC:
+ switch (sama7d65_plls[i][j].p) {
+ case SAMA7D65_PLL_PARENT_MAINCK:
+ parent_hw = sama7d65_pmc->chws[PMC_MAIN];
+ break;
+ case SAMA7D65_PLL_PARENT_MAIN_XTAL:
+ parent_hw = main_xtal_hw;
+ break;
+ default:
+ /* Should not happen. */
+ parent_hw = NULL;
+ break;
+ }
+
+ hw = sam9x60_clk_register_frac_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, parent_hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f);
+ break;
+
+ case PLL_TYPE_DIV:
+ hw = sam9x60_clk_register_div_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, sama7d65_plls[i][0].hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f,
+ sama7d65_plls[i][j].safe_div);
+ break;
+
+ default:
+ continue;
+ }
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_plls[i][j].hw = hw;
+ if (sama7d65_plls[i][j].eid)
+ sama7d65_pmc->chws[sama7d65_plls[i][j].eid] = hw;
+ }
+ }
+
+ hw = at91_clk_register_master_div(regmap, "mck0", NULL,
+ sama7d65_plls[PLL_ID_CPU][1].hw,
+ &mck0_layout, &mck0_characteristics,
+ &pmc_mck0_lock, CLK_GET_RATE_NOCACHE, 5);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MCK] = hw;
+ sama7d65_mckx[PCK_PARENT_HW_MCK0].hw = hw;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ for (i = PCK_PARENT_HW_MCK1; i < ARRAY_SIZE(sama7d65_mckx); i++) {
+ u8 num_parents = 3 + sama7d65_mckx[i].ep_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7d65_mckx[i].ep_mux_table,
+ sama7d65_mckx[i].ep_count);
+ for (j = 0; j < sama7d65_mckx[i].ep_count; j++) {
+ u8 pll_id = sama7d65_mckx[i].ep[j].pll_id;
+ u8 pll_compid = sama7d65_mckx[i].ep[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7d65_mckx[i].ep_count);
+
+ hw = at91_clk_sama7g5_register_master(regmap, sama7d65_mckx[i].n,
+ num_parents, NULL, parent_hws,
+ mux_table, &pmc_mckX_lock,
+ sama7d65_mckx[i].id,
+ sama7d65_mckx[i].c,
+ sama7d65_mckx[i].ep_chg_id);
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_mckx[i].hw = hw;
+ if (sama7d65_mckx[i].eid)
+ sama7d65_pmc->chws[sama7d65_mckx[i].eid] = hw;
+ }
+
+ parent_names[0] = "syspll_divpmcck";
+ parent_names[1] = "usbpll_divpmcck";
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_plls[PLL_ID_SYS][PLL_COMPID_DIV0].hw;
+ parent_hws[4] = sama7d65_plls[PLL_ID_DDR][PLL_COMPID_DIV0].hw;
+ parent_hws[5] = sama7d65_plls[PLL_ID_GPU][PLL_COMPID_DIV0].hw;
+ parent_hws[6] = sama7d65_plls[PLL_ID_BAUD][PLL_COMPID_DIV0].hw;
+ parent_hws[7] = sama7d65_plls[PLL_ID_AUDIO][PLL_COMPID_DIV0].hw;
+ parent_hws[8] = sama7d65_plls[PLL_ID_ETH][PLL_COMPID_DIV0].hw;
+
+ for (i = 0; i < 8; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name, NULL, parent_hws,
+ 9, i,
+ &programmable_layout,
+ sama7d65_prog_mux_table);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->pchws[i] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama7d65_systemck[i].n,
+ sama7d65_systemck[i].p, NULL,
+ sama7d65_systemck[i].id, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->shws[sama7d65_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_periphck[i].n,
+ NULL,
+ sama7d65_mckx[sama7d65_periphck[i].p].hw,
+ sama7d65_periphck[i].id,
+ &sama7d65_periphck[i].r,
+ sama7d65_periphck[i].chgp ? 0 :
+ INT_MIN, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->phws[sama7d65_periphck[i].id] = hw;
+ }
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_pmc->chws[PMC_MCK1];
+ for (i = 0; i < ARRAY_SIZE(sama7d65_gck); i++) {
+ u8 num_parents = 4 + sama7d65_gck[i].pp_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 4);
+ PMC_FILL_TABLE(&mux_table[4], sama7d65_gck[i].pp_mux_table,
+ sama7d65_gck[i].pp_count);
+ for (j = 0; j < sama7d65_gck[i].pp_count; j++) {
+ u8 pll_id = sama7d65_gck[i].pp[j].pll_id;
+ u8 pll_compid = sama7d65_gck[i].pp[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[4], tmp_parent_hws,
+ sama7d65_gck[i].pp_count);
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_gck[i].n, NULL,
+ parent_hws, mux_table,
+ num_parents,
+ sama7d65_gck[i].id,
+ &sama7d65_gck[i].r,
+ sama7d65_gck[i].pp_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->ghws[sama7d65_gck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama7d65_pmc);
+ kfree(alloc_mem);
+
+ return;
+
+err_free:
+ if (alloc_mem) {
+ for (i = 0; i < alloc_mem_size; i++)
+ kfree(alloc_mem[i]);
+ kfree(alloc_mem);
+ }
+
+ kfree(sama7d65_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sama7d65_pmc, "microchip,sama7d65-pmc", sama7d65_pmc_setup);
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index 7741d8f3dbee..021d1b412af4 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -12,6 +12,8 @@
#include <linux/of_address.h>
#include <linux/io.h>
+#include <dt-bindings/clock/at91.h>
+
#define SLOW_CLOCK_FREQ 32768
#define SLOWCK_SW_CYCLES 5
#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \
@@ -470,7 +472,7 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
{
void __iomem *regbase = of_iomap(np, 0);
struct clk_hw_onecell_data *clk_data;
- struct clk_hw *slow_rc, *slow_osc;
+ struct clk_hw *slow_rc, *slow_osc, *hw;
const char *xtal_name;
const struct clk_hw *parent_hws[2];
static struct clk_parent_data parent_data = {
@@ -506,19 +508,19 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
/* MD_SLCK and TD_SLCK. */
clk_data->num = 2;
- clk_data->hws[0] = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck",
- slow_rc,
- 0, 32768);
- if (IS_ERR(clk_data->hws[0]))
+ hw = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck", slow_rc,
+ 0, 32768);
+ if (IS_ERR(hw))
goto clk_data_free;
+ clk_data->hws[SCKC_MD_SLCK] = hw;
parent_hws[0] = slow_rc;
parent_hws[1] = slow_osc;
- clk_data->hws[1] = at91_clk_register_sam9x5_slow(regbase, "td_slck",
- parent_hws, 2,
- &at91sam9x60_bits);
- if (IS_ERR(clk_data->hws[1]))
+ hw = at91_clk_register_sam9x5_slow(regbase, "td_slck", parent_hws,
+ 2, &at91sam9x60_bits);
+ if (IS_ERR(hw))
goto unregister_md_slck;
+ clk_data->hws[SCKC_TD_SLCK] = hw;
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
if (WARN_ON(ret))
@@ -527,9 +529,9 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
return;
unregister_td_slck:
- at91_clk_unregister_sam9x5_slow(clk_data->hws[1]);
+ at91_clk_unregister_sam9x5_slow(clk_data->hws[SCKC_TD_SLCK]);
unregister_md_slck:
- clk_hw_unregister(clk_data->hws[0]);
+ clk_hw_unregister(clk_data->hws[SCKC_MD_SLCK]);
clk_data_free:
kfree(clk_data);
unregister_slow_osc:
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index ec5749e301ba..2b0ea882f1e4 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/string_choices.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
@@ -502,7 +503,7 @@ static int clk_gate(struct ccu_data *ccu, const char *name,
return 0;
pr_err("%s: failed to %s gate for %s\n", __func__,
- enable ? "enable" : "disable", name);
+ str_enable_disable(enable), name);
return -EIO;
}
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index a18a8768feb4..0e1fe3759530 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -34,6 +34,7 @@ static char *rpi_firmware_clk_names[] = {
[RPI_FIRMWARE_M2MC_CLK_ID] = "m2mc",
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = "pixel-bvb",
[RPI_FIRMWARE_VEC_CLK_ID] = "vec",
+ [RPI_FIRMWARE_DISP_CLK_ID] = "disp",
};
#define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0)
@@ -56,6 +57,12 @@ struct raspberrypi_clk_data {
struct raspberrypi_clk *rpi;
};
+static inline
+const struct raspberrypi_clk_data *clk_hw_to_data(const struct clk_hw *hw)
+{
+ return container_of(hw, struct raspberrypi_clk_data, hw);
+}
+
struct raspberrypi_clk_variant {
bool export;
char *clkdev;
@@ -111,18 +118,31 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
},
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_PIXEL_CLK_ID] = {
.export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_HEVC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_ISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_VEC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_DISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
};
@@ -153,7 +173,6 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
struct raspberrypi_firmware_prop msg = {
.id = cpu_to_le32(data->id),
.val = cpu_to_le32(*val),
- .disable_turbo = cpu_to_le32(1),
};
int ret;
@@ -168,8 +187,7 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
@@ -186,8 +204,7 @@ static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
@@ -203,8 +220,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 _rate = rate;
int ret;
@@ -221,8 +237,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk_variant *variant = data->variant;
/*
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index 495c0d607c7d..15bbdeb60b8e 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -75,6 +75,7 @@ struct en_rst_data {
};
struct en_clk_soc_data {
+ u32 num_clocks;
const struct clk_ops pcie_ops;
int (*hw_init)(struct platform_device *pdev,
struct clk_hw_onecell_data *clk_data);
@@ -90,6 +91,7 @@ static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 }
static const u32 bus7581_base[] = { 600000000, 540000000 };
static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
static const u32 crypto_base[] = { 540000000, 480000000 };
+static const u32 emmc7581_base[] = { 200000000, 150000000 };
static const struct en_clk_desc en7523_base_clks[] = {
{
@@ -280,6 +282,15 @@ static const struct en_clk_desc en7581_base_clks[] = {
.base_shift = 0,
.base_values = crypto_base,
.n_base_values = ARRAY_SIZE(crypto_base),
+ }, {
+ .id = EN7581_CLK_EMMC,
+ .name = "emmc",
+
+ .base_reg = REG_CRYPTO_CLKSRC2,
+ .base_bits = 1,
+ .base_shift = 12,
+ .base_values = emmc7581_base,
+ .n_base_values = ARRAY_SIZE(emmc7581_base),
}
};
@@ -478,7 +489,6 @@ static int en7581_pci_enable(struct clk_hw *hw)
REG_PCI_CONTROL_PERSTOUT;
val = readl(np_base + REG_PCI_CONTROL);
writel(val | mask, np_base + REG_PCI_CONTROL);
- msleep(250);
return 0;
}
@@ -504,8 +514,6 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
u32 rate;
int i;
- clk_data->num = EN7523_NUM_CLOCKS;
-
for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
const struct en_clk_desc *desc = &en7523_base_clks[i];
u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
@@ -587,8 +595,6 @@ static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_dat
hw = en7523_register_pcie_clk(dev, base);
clk_data->hws[EN7523_CLK_PCIE] = hw;
-
- clk_data->num = EN7523_NUM_CLOCKS;
}
static int en7523_reset_update(struct reset_controller_dev *rcdev,
@@ -702,13 +708,15 @@ static int en7523_clk_probe(struct platform_device *pdev)
struct clk_hw_onecell_data *clk_data;
int r;
+ soc_data = device_get_match_data(&pdev->dev);
+
clk_data = devm_kzalloc(&pdev->dev,
- struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
+ struct_size(clk_data, hws, soc_data->num_clocks),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
- soc_data = device_get_match_data(&pdev->dev);
+ clk_data->num = soc_data->num_clocks;
r = soc_data->hw_init(pdev, clk_data);
if (r)
return r;
@@ -717,6 +725,7 @@ static int en7523_clk_probe(struct platform_device *pdev)
}
static const struct en_clk_soc_data en7523_data = {
+ .num_clocks = ARRAY_SIZE(en7523_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7523_pci_is_enabled,
.prepare = en7523_pci_prepare,
@@ -726,6 +735,8 @@ static const struct en_clk_soc_data en7523_data = {
};
static const struct en_clk_soc_data en7581_data = {
+ /* We increment num_clocks by 1 to account for additional PCIe clock */
+ .num_clocks = ARRAY_SIZE(en7581_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7581_pci_is_enabled,
.enable = en7581_pci_enable,
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
index f888aed79b11..4bd8d6ecf6a2 100644
--- a/drivers/clk/clk-ep93xx.c
+++ b/drivers/clk/clk-ep93xx.c
@@ -586,9 +586,9 @@ static unsigned long calc_pll_rate(u64 rate, u32 config_word)
static int ep93xx_plls_init(struct ep93xx_clk_priv *priv)
{
- const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
- const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
- const char pclk_divisors[] = { 1, 2, 4, 8 };
+ static const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
+ static const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
+ static const char pclk_divisors[] = { 1, 2, 4, 8 };
struct clk_parent_data xtali = { .index = 0 };
unsigned int clk_f_div, clk_h_div, clk_p_div;
unsigned long clk_pll1_rate, clk_pll2_rate;
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index c997e7491996..2bcf422f0b04 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -375,7 +375,7 @@ static unsigned long lmk04832_vco_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
- const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
+ static const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
unsigned int pll2_n, p, pll2_r;
unsigned int pll2_misc;
unsigned long vco_rate;
@@ -637,7 +637,7 @@ static int lmk04832_register_vco(struct lmk04832 *lmk)
static int lmk04832_clkout_set_ddly(struct lmk04832 *lmk, int id)
{
- const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
+ static const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
unsigned int sclkx_y_ddly = 10;
unsigned int dclkx_y_ddly;
unsigned int dclkx_y_div;
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index 7082b4309c6f..27e632edd484 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -294,7 +294,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
return -EINVAL;
for (p = data; p->name; p++)
- clks_num++;
+ clks_num = max(clks_num, p->id + 1);
clp = devm_kzalloc(dev, struct_size(clp, clk_data.hws, clks_num),
GFP_KERNEL);
@@ -309,6 +309,9 @@ static int loongson2_clk_probe(struct platform_device *pdev)
clp->clk_data.num = clks_num;
clp->dev = dev;
+ /* Avoid returning NULL for unused id */
+ memset_p((void **)clp->clk_data.hws, ERR_PTR(-ENOENT), clks_num);
+
for (i = 0; i < clks_num; i++) {
p = &data[i];
switch (p->type) {
@@ -335,8 +338,8 @@ static int loongson2_clk_probe(struct platform_device *pdev)
&clp->clk_lock);
break;
case CLK_TYPE_FIXED:
- hw = clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
- 0, p->fixed_rate);
+ hw = devm_clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
+ 0, p->fixed_rate);
break;
default:
return dev_err_probe(dev, -EINVAL, "Invalid clk type\n");
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 06245681dac7..fc0aeb4247f2 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/reboot.h>
/*
@@ -116,9 +117,9 @@ static void __init nomadik_src_init(void)
val = readl(src_base + SRC_XTALCR);
pr_info("SXTALO is %s\n",
- (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
+ str_disabled_enabled(val & SRC_XTALCR_SXTALDIS));
pr_info("MXTAL is %s\n",
- (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
+ str_enabled_disabled(val & SRC_XTALCR_MXTALSTAT));
if (of_property_read_bool(np, "disable-sxtalo")) {
/* The machine uses an external oscillator circuit */
val |= SRC_XTALCR_SXTALDIS;
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 07c13ebe327d..f476883bc93b 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -5,6 +5,7 @@
* Inspired by clk-asm9260.c .
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -34,11 +35,20 @@
#define STM32F4_RCC_APB2ENR 0x44
#define STM32F4_RCC_BDCR 0x70
#define STM32F4_RCC_CSR 0x74
+#define STM32F4_RCC_SSCGR 0x80
#define STM32F4_RCC_PLLI2SCFGR 0x84
#define STM32F4_RCC_PLLSAICFGR 0x88
#define STM32F4_RCC_DCKCFGR 0x8c
#define STM32F7_RCC_DCKCFGR2 0x90
+#define STM32F4_RCC_PLLCFGR_N_MASK GENMASK(14, 6)
+
+#define STM32F4_RCC_SSCGR_SSCGEN BIT(31)
+#define STM32F4_RCC_SSCGR_SPREADSEL BIT(30)
+#define STM32F4_RCC_SSCGR_RESERVED_MASK GENMASK(29, 28)
+#define STM32F4_RCC_SSCGR_INCSTEP_MASK GENMASK(27, 13)
+#define STM32F4_RCC_SSCGR_MODPER_MASK GENMASK(12, 0)
+
#define NONE -1
#define NO_IDX NONE
#define NO_MUX NONE
@@ -364,6 +374,16 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
};
+enum stm32f4_pll_ssc_mod_type {
+ STM32F4_PLL_SSC_CENTER_SPREAD,
+ STM32F4_PLL_SSC_DOWN_SPREAD,
+};
+
+static const char * const stm32f4_ssc_mod_methods[] __initconst = {
+ [STM32F4_PLL_SSC_DOWN_SPREAD] = "down-spread",
+ [STM32F4_PLL_SSC_CENTER_SPREAD] = "center-spread",
+};
+
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
@@ -509,6 +529,12 @@ static const struct clk_div_table pll_divr_table[] = {
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
};
+struct stm32f4_pll_ssc {
+ unsigned int mod_freq;
+ unsigned int mod_depth;
+ enum stm32f4_pll_ssc_mod_type mod_type;
+};
+
struct stm32f4_pll {
spinlock_t *lock;
struct clk_gate gate;
@@ -516,6 +542,8 @@ struct stm32f4_pll {
u8 bit_rdy_idx;
u8 status;
u8 n_start;
+ bool ssc_enable;
+ struct stm32f4_pll_ssc ssc_conf;
};
#define to_stm32f4_pll(_gate) container_of(_gate, struct stm32f4_pll, gate)
@@ -538,6 +566,7 @@ struct stm32f4_vco_data {
u8 offset;
u8 bit_idx;
u8 bit_rdy_idx;
+ bool sscg;
};
static const struct stm32f4_vco_data vco_data[] = {
@@ -632,9 +661,11 @@ static unsigned long stm32f4_pll_recalc(struct clk_hw *hw,
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ unsigned long val;
unsigned long n;
- n = (readl(base + pll->offset) >> 6) & 0x1ff;
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
return parent_rate * n;
}
@@ -656,6 +687,32 @@ static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate,
return *prate * n;
}
+static void stm32f4_pll_set_ssc(struct clk_hw *hw, unsigned long parent_rate,
+ unsigned int ndiv)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct stm32f4_pll_ssc *ssc = &pll->ssc_conf;
+ u32 modeper, incstep;
+ u32 sscgr;
+
+ sscgr = readl(base + STM32F4_RCC_SSCGR);
+ /* reserved field must be kept at reset value */
+ sscgr &= STM32F4_RCC_SSCGR_RESERVED_MASK;
+
+ modeper = DIV_ROUND_CLOSEST(parent_rate, 4 * ssc->mod_freq);
+ incstep = DIV_ROUND_CLOSEST(((1 << 15) - 1) * ssc->mod_depth * ndiv,
+ 5 * 10000 * modeper);
+ sscgr |= STM32F4_RCC_SSCGR_SSCGEN |
+ FIELD_PREP(STM32F4_RCC_SSCGR_INCSTEP_MASK, incstep) |
+ FIELD_PREP(STM32F4_RCC_SSCGR_MODPER_MASK, modeper);
+
+ if (ssc->mod_type)
+ sscgr |= STM32F4_RCC_SSCGR_SPREADSEL;
+
+ writel(sscgr, base + STM32F4_RCC_SSCGR);
+}
+
static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -673,9 +730,13 @@ static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
n = rate / parent_rate;
- val = readl(base + pll->offset) & ~(0x1ff << 6);
+ val = readl(base + pll->offset) & ~STM32F4_RCC_PLLCFGR_N_MASK;
+ val |= FIELD_PREP(STM32F4_RCC_PLLCFGR_N_MASK, n);
+
+ writel(val, base + pll->offset);
- writel(val | ((n & 0x1ff) << 6), base + pll->offset);
+ if (pll->ssc_enable)
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
if (pll_state)
stm32f4_pll_enable(hw);
@@ -782,6 +843,84 @@ static struct clk_hw *clk_register_pll_div(const char *name,
return hw;
}
+static int __init stm32f4_pll_init_ssc(struct clk_hw *hw,
+ const struct stm32f4_pll_ssc *conf)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct clk_hw *parent;
+ unsigned long parent_rate;
+ int pll_state;
+ unsigned long n, val;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent) {
+ pr_err("%s: failed to get clock parent\n", __func__);
+ return -ENODEV;
+ }
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ pll->ssc_enable = true;
+ memcpy(&pll->ssc_conf, conf, sizeof(pll->ssc_conf));
+
+ pll_state = stm32f4_pll_is_enabled(hw);
+
+ if (pll_state)
+ stm32f4_pll_disable(hw);
+
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
+
+ pr_debug("%s: pll: %s, parent: %s, parent-rate: %lu, n: %lu\n",
+ __func__, clk_hw_get_name(hw), clk_hw_get_name(parent),
+ parent_rate, n);
+
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
+
+ if (pll_state)
+ stm32f4_pll_enable(hw);
+
+ return 0;
+}
+
+static int __init stm32f4_pll_ssc_parse_dt(struct device_node *np,
+ struct stm32f4_pll_ssc *conf)
+{
+ int ret;
+ const char *s;
+
+ if (!conf)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "st,ssc-modfreq-hz", &conf->mod_freq);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "st,ssc-moddepth-permyriad",
+ &conf->mod_depth);
+ if (ret) {
+ pr_err("%pOF: missing st,ssc-moddepth-permyriad\n", np);
+ return ret;
+ }
+
+ ret = fwnode_property_match_property_string(of_fwnode_handle(np),
+ "st,ssc-modmethod",
+ stm32f4_ssc_mod_methods,
+ ARRAY_SIZE(stm32f4_ssc_mod_methods));
+ if (ret < 0) {
+ pr_err("%pOF: failed to get st,ssc-modmethod\n", np);
+ return ret;
+ }
+
+ conf->mod_type = ret;
+
+ pr_debug("%pOF: SSCG settings: mod_freq: %d, mod_depth: %d mod_method: %s [%d]\n",
+ np, conf->mod_freq, conf->mod_depth, s, conf->mod_type);
+
+ return 0;
+}
+
static struct clk_hw *stm32f4_rcc_register_pll(const char *pllsrc,
const struct stm32f4_pll_data *data, spinlock_t *lock)
{
@@ -1689,7 +1828,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
const struct of_device_id *match;
const struct stm32f4_clk_data *data;
unsigned long pllm;
- struct clk_hw *pll_src_hw;
+ struct clk_hw *pll_src_hw, *pll_vco_hw;
+ struct stm32f4_pll_ssc ssc_conf;
base = of_iomap(np, 0);
if (!base) {
@@ -1748,8 +1888,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clk_hw_register_fixed_factor(NULL, "vco_in", pll_src,
0, 1, pllm);
- stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
- &stm32f4_clk_lock);
+ pll_vco_hw = stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
+ &stm32f4_clk_lock);
clks[PLL_VCO_I2S] = stm32f4_rcc_register_pll("vco_in",
&data->pll_data[1], &stm32f4_clk_lock);
@@ -1894,6 +2034,9 @@ static void __init stm32f4_rcc_init(struct device_node *np)
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
+ if (!stm32f4_pll_ssc_parse_dt(np, &ssc_conf))
+ stm32f4_pll_init_ssc(pll_vco_hw, &ssc_conf);
+
return;
fail:
kfree(clks);
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 76d7ea1964c3..9fe27dace111 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -78,9 +78,6 @@
#define VC3_PLL1_VCO_MIN 300000000UL
#define VC3_PLL1_VCO_MAX 600000000UL
-#define VC3_PLL2_VCO_MIN 400000000UL
-#define VC3_PLL2_VCO_MAX 1200000000UL
-
#define VC3_PLL3_VCO_MIN 300000000UL
#define VC3_PLL3_VCO_MAX 800000000UL
@@ -147,9 +144,13 @@ struct vc3_pfd_data {
u8 mdiv2_bitmsk;
};
+struct vc3_vco {
+ unsigned long min;
+ unsigned long max;
+};
+
struct vc3_pll_data {
- unsigned long vco_min;
- unsigned long vco_max;
+ struct vc3_vco vco;
u8 num;
u8 int_div_msb_offs;
u8 int_div_lsb_offs;
@@ -166,12 +167,17 @@ struct vc3_div_data {
struct vc3_hw_data {
struct clk_hw hw;
struct regmap *regmap;
- const void *data;
+ void *data;
u32 div_int;
u32 div_frc;
};
+struct vc3_hw_cfg {
+ struct vc3_vco pll2_vco;
+ u32 se2_clk_sel_msk;
+};
+
static const struct clk_div_table div1_divs[] = {
{ .val = 0, .div = 1, }, { .val = 1, .div = 4, },
{ .val = 2, .div = 5, }, { .val = 3, .div = 6, },
@@ -386,10 +392,10 @@ static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
const struct vc3_pll_data *pll = vc3->data;
u64 div_frc;
- if (rate < pll->vco_min)
- rate = pll->vco_min;
- if (rate > pll->vco_max)
- rate = pll->vco_max;
+ if (rate < pll->vco.min)
+ rate = pll->vco.min;
+ if (rate > pll->vco.max)
+ rate = pll->vco.max;
vc3->div_int = rate / *parent_rate;
@@ -680,8 +686,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL1,
.int_div_msb_offs = VC3_PLL1_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL1_VCO_N_DIVIDER,
- .vco_min = VC3_PLL1_VCO_MIN,
- .vco_max = VC3_PLL1_VCO_MAX
+ .vco = {
+ .min = VC3_PLL1_VCO_MIN,
+ .max = VC3_PLL1_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll1",
@@ -698,8 +706,6 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL2,
.int_div_msb_offs = VC3_PLL2_FB_INT_DIV_MSB,
.int_div_lsb_offs = VC3_PLL2_FB_INT_DIV_LSB,
- .vco_min = VC3_PLL2_VCO_MIN,
- .vco_max = VC3_PLL2_VCO_MAX
},
.hw.init = &(struct clk_init_data) {
.name = "pll2",
@@ -716,8 +722,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL3,
.int_div_msb_offs = VC3_PLL3_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL3_N_DIVIDER,
- .vco_min = VC3_PLL3_VCO_MIN,
- .vco_max = VC3_PLL3_VCO_MAX
+ .vco = {
+ .min = VC3_PLL3_VCO_MIN,
+ .max = VC3_PLL3_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll3",
@@ -901,7 +909,6 @@ static struct vc3_hw_data clk_mux[] = {
[VC3_SE2_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_SE2_CTRL_REG0,
- .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
.hw.init = &(struct clk_init_data) {
.name = "se2_mux",
@@ -982,6 +989,7 @@ static int vc3_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 settings[NUM_CONFIG_REGISTERS];
+ const struct vc3_hw_cfg *data;
struct regmap *regmap;
const char *name;
int ret, i;
@@ -1029,9 +1037,16 @@ static int vc3_probe(struct i2c_client *client)
clk_pfd[i].hw.init->name);
}
+ data = i2c_get_match_data(client);
+
/* Register pll's */
for (i = 0; i < ARRAY_SIZE(clk_pll); i++) {
clk_pll[i].regmap = regmap;
+ if (i == VC3_PLL2) {
+ struct vc3_pll_data *pll_data = clk_pll[i].data;
+
+ pll_data->vco = data->pll2_vco;
+ }
ret = devm_clk_hw_register(dev, &clk_pll[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1059,6 +1074,11 @@ static int vc3_probe(struct i2c_client *client)
/* Register clk muxes */
for (i = 0; i < ARRAY_SIZE(clk_mux); i++) {
clk_mux[i].regmap = regmap;
+ if (i == VC3_SE2_MUX) {
+ struct vc3_clk_data *clk_data = clk_mux[i].data;
+
+ clk_data->bitmsk = data->se2_clk_sel_msk;
+ }
ret = devm_clk_hw_register(dev, &clk_mux[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1108,8 +1128,19 @@ static int vc3_probe(struct i2c_client *client)
return ret;
}
+static const struct vc3_hw_cfg vc3_5p = {
+ .pll2_vco = { .min = 400000000UL, .max = 1200000000UL },
+ .se2_clk_sel_msk = BIT(6),
+};
+
+static const struct vc3_hw_cfg vc3_5l = {
+ .pll2_vco = { .min = 30000000UL, .max = 130000000UL },
+ .se2_clk_sel_msk = BIT(0),
+};
+
static const struct of_device_id dev_ids[] = {
- { .compatible = "renesas,5p35023" },
+ { .compatible = "renesas,5p35023", .data = &vc3_5p },
+ { .compatible = "renesas,5l35023", .data = &vc3_5l },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dev_ids);
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 0c3d0cee98c8..96946a8e2854 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clkdev.h>
@@ -520,8 +521,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
- data & pclk->param.reg_clk_mask ? "enabled" :
- "disabled");
+ str_enabled_disabled(data & pclk->param.reg_clk_mask));
} else {
return 1;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9b45fa005030..cf7720b9172f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -5385,8 +5385,10 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index)
count++;
}
/* We went off the end of 'clock-indices' without finding it */
- if (of_property_present(clkspec.np, "clock-indices") && !found)
+ if (of_property_present(clkspec.np, "clock-indices") && !found) {
+ of_node_put(clkspec.np);
return NULL;
+ }
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 5bbbb3a66477..82727b1fc67a 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -19,7 +19,6 @@
#include <linux/mfd/syscon.h>
#include <linux/notifier.h>
#include <linux/of.h>
-#include <linux/platform_data/clk-davinci-pll.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -840,27 +839,6 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
return 0;
}
-static struct davinci_pll_platform_data *davinci_pll_get_pdata(struct device *dev)
-{
- struct davinci_pll_platform_data *pdata = dev_get_platdata(dev);
-
- /*
- * Platform data is optional, so allocate a new struct if one was not
- * provided. For device tree, this will always be the case.
- */
- if (!pdata)
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- /* for device tree, we need to fill in the struct */
- if (dev->of_node)
- pdata->cfgchip =
- syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
-
- return pdata;
-}
-
/* needed in early boot for clocksource/clockevent */
#ifdef CONFIG_ARCH_DAVINCI_DA850
CLK_OF_DECLARE(da850_pll0, "ti,da850-pll0", of_da850_pll0_init);
@@ -890,8 +868,8 @@ typedef int (*davinci_pll_init)(struct device *dev, void __iomem *base,
static int davinci_pll_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct davinci_pll_platform_data *pdata;
davinci_pll_init pll_init = NULL;
+ struct regmap *cfgchip;
void __iomem *base;
pll_init = device_get_match_data(dev);
@@ -903,17 +881,13 @@ static int davinci_pll_probe(struct platform_device *pdev)
return -EINVAL;
}
- pdata = davinci_pll_get_pdata(dev);
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
+ cfgchip = syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- return pll_init(dev, base, pdata->cfgchip);
+ return pll_init(dev, base, cfgchip);
}
static struct platform_driver davinci_pll_driver = {
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 516dbd170c8a..fb18f507f121 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -399,8 +399,9 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
"dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
- "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
- "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+ "arm_pll_out", "sys_pll1_out", "sys_pll2_out",
+ "sys_pll3_out", "dummy", "dummy", "osc_24m",
+ "dummy", "osc_32k"};
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index 58a516dd385b..c5f358a75f30 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -15,7 +15,7 @@
#include "clk.h"
-#define IMX93_CLK_END 207
+#define IMX93_CLK_END 208
#define PLAT_IMX93 BIT(0)
#define PLAT_IMX91 BIT(1)
@@ -38,6 +38,7 @@ static u32 share_count_sai2;
static u32 share_count_sai3;
static u32 share_count_mub;
static u32 share_count_pdm;
+static u32 share_count_spdif;
static const char * const a55_core_sels[] = {"a55_alt", "arm_pll"};
static const char *parent_names[MAX_SEL][4] = {
@@ -70,8 +71,8 @@ static const struct imx93_clk_root {
{ IMX93_CLK_WAKEUP_AXI, "wakeup_axi_root", 0x0380, FAST_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_SWO_TRACE, "swo_trace_root", 0x0400, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
- { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, },
+ { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_LPTMR1, "lptmr1_root", 0x0700, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_LPTMR2, "lptmr2_root", 0x0780, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_TPM2, "tpm2_root", 0x0880, TPM_SEL, },
@@ -177,10 +178,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_WDOG5_GATE, "wdog5", "osc_24m", 0x8400, },
{ IMX93_CLK_SEMA1_GATE, "sema1", "bus_aon_root", 0x8440, },
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
- { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub },
- { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub },
+ { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
+ { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
{ IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi1", "flexspi1_root", 0x8640, },
@@ -188,8 +189,8 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_GPIO2_GATE, "gpio2", "bus_wakeup_root", 0x88c0, },
{ IMX93_CLK_GPIO3_GATE, "gpio3", "bus_wakeup_root", 0x8900, },
{ IMX93_CLK_GPIO4_GATE, "gpio4", "bus_wakeup_root", 0x8940, },
- { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, },
- { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, },
+ { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, 0, NULL, PLAT_IMX93},
+ { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, 0, NULL, PLAT_IMX93},
{ IMX93_CLK_LPIT1_GATE, "lpit1", "bus_aon_root", 0x8a00, },
{ IMX93_CLK_LPIT2_GATE, "lpit2", "bus_wakeup_root", 0x8a40, },
{ IMX93_CLK_LPTMR1_GATE, "lptmr1", "lptmr1_root", 0x8a80, },
@@ -238,10 +239,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_SAI3_GATE, "sai3", "sai3_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_SAI3_IPG, "sai3_ipg_clk", "bus_wakeup_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_MIPI_CSI_GATE, "mipi_csi", "media_apb_root", 0x9580, },
- { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, },
- { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, },
+ { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, 0, NULL, PLAT_IMX93 },
+ { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_LCDIF_GATE, "lcdif", "media_apb_root", 0x9640, },
- { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, },
+ { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_ISI_GATE, "isi", "media_apb_root", 0x96c0, },
{ IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_axi_root", 0x9700, },
{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root", 0x9a00, },
@@ -252,12 +253,13 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_MQS1_GATE, "mqs1", "sai1_root", 0x9b00, },
{ IMX93_CLK_MQS2_GATE, "mqs2", "sai3_root", 0x9b40, },
{ IMX93_CLK_AUD_XCVR_GATE, "aud_xcvr", "audio_xcvr_root", 0x9b80, },
- { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, },
+ { IMX93_CLK_SPDIF_IPG, "spdif_ipg_clk", "bus_wakeup_root", 0x9c00, 0, &share_count_spdif},
+ { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, 0, &share_count_spdif},
{ IMX93_CLK_HSIO_32K_GATE, "hsio_32k", "osc_32k", 0x9dc0, },
{ IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX93, },
{ IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX93, },
- { IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX91, },
- { IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX91, },
+ { IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX91, },
+ { IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX91, },
/* Critical because clk accessed during CPU idle */
{ IMX93_CLK_SYS_CNT_GATE, "sys_cnt", "osc_24m", 0x9e80, CLK_IS_CRITICAL},
{ IMX93_CLK_TSTMR1_GATE, "tstmr1", "bus_aon_root", 0x9ec0, },
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index d63564dbb12c..f290981ea13b 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -56,7 +56,9 @@ static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
PLL_1416X_RATE(700000000U, 350, 3, 2),
PLL_1416X_RATE(640000000U, 320, 3, 2),
PLL_1416X_RATE(600000000U, 300, 3, 2),
+ PLL_1416X_RATE(416000000U, 208, 3, 2),
PLL_1416X_RATE(320000000U, 160, 3, 2),
+ PLL_1416X_RATE(208000000U, 208, 3, 3),
};
static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 425c69cfb105..e103121cf58e 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -55,10 +55,16 @@ static const struct mtk_gate audio_clks[] = {
GATE_DUMMY(CLK_DUMMY, "aud_dummy"),
/* AUDIO0 */
GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
+ GATE_DUMMY(CLK_AUD_LRCK_DETECT, "audio_lrck_detect_dummy"),
+ GATE_DUMMY(CLK_AUD_I2S, "audio_i2c_dummy"),
+ GATE_DUMMY(CLK_AUD_APLL_TUNER, "audio_apll_tuner_dummy"),
GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
GATE_AUDIO0(CLK_AUD_SPDF, "audio_spdf", "audpll_sel", 21),
GATE_AUDIO0(CLK_AUD_SPDF2, "audio_spdf2", "audpll_sel", 22),
GATE_AUDIO0(CLK_AUD_APLL, "audio_apll", "audpll_sel", 23),
+ GATE_DUMMY(CLK_AUD_TML, "audio_tml_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_EXT, "audio_ahb_idle_ext_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_INT, "audio_ahb_idle_int_dummy"),
/* AUDIO1 */
GATE_AUDIO1(CLK_AUD_I2SIN1, "audio_i2sin1", "aud_mux1_sel", 0),
GATE_AUDIO1(CLK_AUD_I2SIN2, "audio_i2sin2", "aud_mux1_sel", 1),
@@ -76,10 +82,12 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUD_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
GATE_AUDIO1(CLK_AUD_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
GATE_AUDIO1(CLK_AUD_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+ GATE_DUMMY(CLK_AUD_HDMIRX, "audio_hdmirx_dummy"),
GATE_AUDIO1(CLK_AUD_INTDIR, "audio_intdir", "intdir_sel", 20),
GATE_AUDIO1(CLK_AUD_A1SYS, "audio_a1sys", "aud_mux1_sel", 21),
GATE_AUDIO1(CLK_AUD_A2SYS, "audio_a2sys", "aud_mux2_sel", 22),
GATE_AUDIO1(CLK_AUD_AFE_CONN, "audio_afe_conn", "aud_mux1_sel", 23),
+ GATE_DUMMY(CLK_AUD_AFE_PCMIF, "audio_afe_pcmif_dummy"),
GATE_AUDIO1(CLK_AUD_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUD_MMIF_UL1, "audio_ul1", "aud_mux1_sel", 0),
@@ -100,6 +108,8 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO2(CLK_AUD_MMIF_AWB2, "audio_awb2", "aud_mux1_sel", 15),
GATE_AUDIO2(CLK_AUD_MMIF_DAI, "audio_dai", "aud_mux1_sel", 16),
/* AUDIO3 */
+ GATE_DUMMY(CLK_AUD_DMIC1, "audio_dmic1_dummy"),
+ GATE_DUMMY(CLK_AUD_DMIC2, "audio_dmic2_dummy"),
GATE_AUDIO3(CLK_AUD_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
GATE_AUDIO3(CLK_AUD_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
GATE_AUDIO3(CLK_AUD_ASRCI5, "audio_asrci5", "asm_h_sel", 4),
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index 5da3eabffd3e..f11c7a4fa37b 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate bdp_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "bdp_dummy"),
GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index 875594bc9dcb..c158e54c4652 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -22,6 +22,7 @@ static const struct mtk_gate_regs img_cg_regs = {
GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate img_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "img_dummy"),
GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index bc68fa718878..474d87d62e83 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs disp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "mm_dummy"),
GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
GATE_DISP0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
GATE_DISP0(CLK_MM_CMDQ, "mm_cmdq", "mm_sel", 2),
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index 94db86f8d0a4..5299d92f3aba 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "vdec_dummy"),
GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
};
diff --git a/drivers/clk/mmp/clk-pxa1908-apbc.c b/drivers/clk/mmp/clk-pxa1908-apbc.c
index b93d08466198..3fd7b5e644f3 100644
--- a/drivers/clk/mmp/clk-pxa1908-apbc.c
+++ b/drivers/clk/mmp/clk-pxa1908-apbc.c
@@ -96,8 +96,8 @@ static int pxa1908_apbc_probe(struct platform_device *pdev)
struct pxa1908_clk_unit *pxa_unit;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
- if (IS_ERR(pxa_unit))
- return PTR_ERR(pxa_unit);
+ if (!pxa_unit)
+ return -ENOMEM;
pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pxa_unit->base))
diff --git a/drivers/clk/mmp/clk-pxa1908-apbcp.c b/drivers/clk/mmp/clk-pxa1908-apbcp.c
index 08f3845cbb1b..f638d7e89b47 100644
--- a/drivers/clk/mmp/clk-pxa1908-apbcp.c
+++ b/drivers/clk/mmp/clk-pxa1908-apbcp.c
@@ -48,8 +48,8 @@ static int pxa1908_apbcp_probe(struct platform_device *pdev)
struct pxa1908_clk_unit *pxa_unit;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
- if (IS_ERR(pxa_unit))
- return PTR_ERR(pxa_unit);
+ if (!pxa_unit)
+ return -ENOMEM;
pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pxa_unit->base))
diff --git a/drivers/clk/mmp/clk-pxa1908-mpmu.c b/drivers/clk/mmp/clk-pxa1908-mpmu.c
index e3337bacaadd..90b4b2488574 100644
--- a/drivers/clk/mmp/clk-pxa1908-mpmu.c
+++ b/drivers/clk/mmp/clk-pxa1908-mpmu.c
@@ -78,8 +78,8 @@ static int pxa1908_mpmu_probe(struct platform_device *pdev)
struct pxa1908_clk_unit *pxa_unit;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
- if (IS_ERR(pxa_unit))
- return PTR_ERR(pxa_unit);
+ if (!pxa_unit)
+ return -ENOMEM;
pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pxa_unit->base))
diff --git a/drivers/clk/mmp/pwr-island.c b/drivers/clk/mmp/pwr-island.c
index edaa2433a472..eaf5d2c5e593 100644
--- a/drivers/clk/mmp/pwr-island.c
+++ b/drivers/clk/mmp/pwr-island.c
@@ -106,10 +106,10 @@ struct generic_pm_domain *mmp_pm_domain_register(const char *name,
pm_domain->flags = flags;
pm_domain->lock = lock;
- pm_genpd_init(&pm_domain->genpd, NULL, true);
pm_domain->genpd.name = name;
pm_domain->genpd.power_on = mmp_pm_domain_power_on;
pm_domain->genpd.power_off = mmp_pm_domain_power_off;
+ pm_genpd_init(&pm_domain->genpd, NULL, true);
return &pm_domain->genpd;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index ef89d686cbc4..69bbf62ba3cd 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -64,6 +64,15 @@ config CLK_X1E80100_TCSRCC
Support for the TCSR clock controller on X1E80100 devices.
Say Y if you want to use peripheral devices such as SD/UFS.
+config CLK_X1P42100_GPUCC
+ tristate "X1P42100 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_X1E80100_GCC
+ help
+ Support for the graphics clock controller on X1P42100 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config CLK_QCM2290_GPUCC
tristate "QCM2290 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -190,6 +199,15 @@ config IPQ_APSS_6018
Say Y if you want to support CPU frequency scaling on
ipq based devices.
+config IPQ_CMN_PLL
+ tristate "IPQ CMN PLL Clock Controller"
+ help
+ Support for CMN PLL clock controller on IPQ platform. The
+ CMN PLL consumes the AHB/SYS clocks from GCC and supplies
+ the output clocks to the networking hardware and GCC blocks.
+ Say Y or M if you want to support CMN PLL clock on the IPQ
+ based devices.
+
config IPQ_GCC_4019
tristate "IPQ4019 Global Clock Controller"
help
@@ -495,6 +513,15 @@ config QCS_GCC_8300
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config QCS_GCC_615
+ tristate "QCS615 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on QCS615 devices.
+ Say Y if you want to use multimedia devices or peripheral
+ devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+
config SC_CAMCC_7180
tristate "SC7180 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1022,6 +1049,17 @@ config SM_DISPCC_8550
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_DISPCC_8750
+ tristate "SM8750 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on SM_GCC_8750
+ select QCOM_GDSC
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM8750 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_GCC_4450
tristate "SM4450 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1079,6 +1117,7 @@ config SM_GCC_7150
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
help
Support for the global clock controller on SM8150 devices.
Say Y if you want to use peripheral devices such as UART,
@@ -1130,6 +1169,15 @@ config SM_GCC_8650
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8750
+ tristate "SM8750 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8750 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_4450
tristate "SM4450 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1230,6 +1278,15 @@ config SM_GPUCC_8650
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_LPASSCC_6115
+ tristate "SM6115 Low Power Audio Subsystem (LPASS) Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_6115
+ help
+ Support for the LPASS clock controller on SM6115 devices.
+ Say Y if you want to toggle LPASS-adjacent resets within
+ this clock controller to reset the LPASS subsystem.
+
config SM_TCSRCC_8550
tristate "SM8550 TCSR Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1246,6 +1303,14 @@ config SM_TCSRCC_8650
Support for the TCSR clock controller on SM8650 devices.
Say Y if you want to use peripheral devices such as SD/UFS.
+config SM_TCSRCC_8750
+ tristate "SM8750 TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on SM8750 devices.
+ Say Y if you want to use peripheral devices such as UFS/USB/PCIe.
+
config SA_VIDEOCC_8775P
tristate "SA8775P Video Clock Controller"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index b09dbdc210eb..0db2f98bcb3e 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -26,9 +26,11 @@ obj-$(CONFIG_CLK_X1E80100_DISPCC) += dispcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GCC) += gcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GPUCC) += gpucc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
+obj-$(CONFIG_CLK_X1P42100_GPUCC) += gpucc-x1p42100.o
obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
+obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o
obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o
@@ -71,6 +73,7 @@ obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o
obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
+obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o
obj-$(CONFIG_QCS_GCC_8300) += gcc-qcs8300.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
@@ -131,6 +134,7 @@ obj-$(CONFIG_SM_DISPCC_7150) += dispcc-sm7150.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
+obj-$(CONFIG_SM_DISPCC_8750) += dispcc-sm8750.o
obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -143,6 +147,7 @@ obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o
+obj-$(CONFIG_SM_GCC_8750) += gcc-sm8750.o
obj-$(CONFIG_SM_GPUCC_4450) += gpucc-sm4450.o
obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
@@ -154,8 +159,10 @@ obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_GPUCC_8450) += gpucc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8550) += gpucc-sm8550.o
obj-$(CONFIG_SM_GPUCC_8650) += gpucc-sm8650.o
+obj-$(CONFIG_SM_LPASSCC_6115) += lpasscc-sm6115.o
obj-$(CONFIG_SM_TCSRCC_8550) += tcsrcc-sm8550.o
obj-$(CONFIG_SM_TCSRCC_8650) += tcsrcc-sm8650.o
+obj-$(CONFIG_SM_TCSRCC_8750) += tcsrcc-sm8750.o
obj-$(CONFIG_SM_VIDEOCC_7150) += videocc-sm7150.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index e8632db2c542..d6c1aea7e9e1 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -73,20 +73,19 @@ static const struct alpha_pll_config ipq5018_pll_config = {
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
- .alpha_en_mask = BIT(24),
.status_val = 0x3,
.status_mask = GENMASK(10, 8),
.lock_det = BIT(2),
.test_ctl_hi_val = 0x00400003,
};
+/* 1.080 GHz configuration */
static const struct alpha_pll_config ipq5332_pll_config = {
.l = 0x2d,
.config_ctl_val = 0x4001075b,
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
- .alpha_en_mask = BIT(24),
.status_val = 0x3,
.status_mask = GENMASK(10, 8),
.lock_det = BIT(2),
diff --git a/drivers/clk/qcom/camcc-x1e80100.c b/drivers/clk/qcom/camcc-x1e80100.c
index 85e76c7712ad..b73524ae64b1 100644
--- a/drivers/clk/qcom/camcc-x1e80100.c
+++ b/drivers/clk/qcom/camcc-x1e80100.c
@@ -2212,6 +2212,8 @@ static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
},
};
+static struct gdsc cam_cc_titan_top_gdsc;
+
static struct gdsc cam_cc_bps_gdsc = {
.gdscr = 0x10004,
.en_rest_wait_val = 0x2,
@@ -2221,6 +2223,7 @@ static struct gdsc cam_cc_bps_gdsc = {
.name = "cam_cc_bps_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2233,6 +2236,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
.name = "cam_cc_ife_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2245,6 +2249,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
.name = "cam_cc_ife_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2257,6 +2262,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
.name = "cam_cc_ipe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2269,6 +2275,7 @@ static struct gdsc cam_cc_sfe_0_gdsc = {
.name = "cam_cc_sfe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index b8351f8c0b84..9a65d14acf71 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -58,6 +58,7 @@
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
#define PLL_TEST_CTL_U2(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U2])
+#define PLL_TEST_CTL_U3(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U3])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
@@ -197,6 +198,37 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U1] = 0x34,
[PLL_OFF_TEST_CTL_U2] = 0x38,
},
+ [CLK_ALPHA_PLL_TYPE_PONGO_ELU] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATE] = 0x08,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_USER_CTL_U] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U2] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ [PLL_OFF_TEST_CTL_U1] = 0x34,
+ [PLL_OFF_TEST_CTL_U2] = 0x38,
+ [PLL_OFF_TEST_CTL_U3] = 0x3c,
+ },
+ [CLK_ALPHA_PLL_TYPE_TAYCAN_ELU] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATE] = 0x08,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_ALPHA_VAL] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ },
[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO] = {
[PLL_OFF_OPMODE] = 0x04,
[PLL_OFF_STATUS] = 0x0c,
@@ -323,6 +355,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define LUCID_EVO_PLL_CAL_L_VAL_SHIFT 16
#define LUCID_OLE_PLL_RINGOSC_CAL_L_VAL_SHIFT 24
+/* PONGO ELU PLL specific setting and offsets */
+#define PONGO_PLL_OUT_MASK GENMASK(1, 0)
+#define PONGO_PLL_L_VAL_MASK GENMASK(11, 0)
+#define PONGO_XO_PRESENT BIT(10)
+#define PONGO_CLOCK_SELECT BIT(12)
+
/* ZONDA PLL specific */
#define ZONDA_PLL_OUT_MASK 0xf
#define ZONDA_STAY_IN_CFA BIT(16)
@@ -352,7 +390,8 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
if (ret)
return ret;
- for (count = 200; count > 0; count--) {
+ /* Pongo PLLs using a 32KHz reference can take upwards of 1500us to lock. */
+ for (count = 1500; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@@ -432,6 +471,8 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
mask |= config->pre_div_mask;
mask |= config->post_div_mask;
mask |= config->vco_mask;
+ mask |= config->alpha_en_mask;
+ mask |= config->alpha_mode_mask;
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
@@ -2494,6 +2535,144 @@ const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_reset_lucid_evo_ops);
+static int alpha_pll_pongo_elu_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Enable PLL intially to one-time calibrate against XO. */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ regmap_update_bits(regmap, PLL_MODE(pll), PONGO_XO_PRESENT, PONGO_XO_PRESENT);
+
+ /* Set regmap for wait_for_pll() */
+ pll->clkr.regmap = regmap;
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret) {
+ /* Reverse calibration - disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ return ret;
+ }
+
+ /* Disable PLL after one-time calibration. */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ /* Select internally generated clock. */
+ regmap_update_bits(regmap, PLL_MODE(pll), PONGO_CLOCK_SELECT,
+ PONGO_CLOCK_SELECT);
+
+ return 0;
+}
+
+static int alpha_pll_pongo_elu_enable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Check if PLL is already enabled */
+ if (trion_pll_is_enabled(pll, regmap))
+ return 0;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set operation mode to RUN */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Enable the global PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ return ret;
+}
+
+static void alpha_pll_pongo_elu_disable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Disable the global PLL output */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL mode in STANDBY */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+
+static unsigned long alpha_pll_pongo_elu_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 l;
+
+ if (regmap_read(regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ l &= PONGO_PLL_L_VAL_MASK;
+
+ return alpha_pll_calc_rate(parent_rate, l, 0, pll_alpha_width(pll));
+}
+
+const struct clk_ops clk_alpha_pll_pongo_elu_ops = {
+ .prepare = alpha_pll_pongo_elu_prepare,
+ .enable = alpha_pll_pongo_elu_enable,
+ .disable = alpha_pll_pongo_elu_disable,
+ .recalc_rate = alpha_pll_pongo_elu_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_pongo_elu_ops);
+
+void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll,
+ struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val;
+
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), PONGO_PLL_OUT_MASK,
+ PONGO_PLL_OUT_MASK);
+
+ if (trion_pll_is_enabled(pll, regmap))
+ return;
+
+ if (regmap_read(regmap, PLL_L_VAL(pll), &val))
+ return;
+ val &= PONGO_PLL_L_VAL_MASK;
+ if (val)
+ return;
+
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U2(pll), config->config_ctl_hi2_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val | PONGO_PLL_OUT_MASK);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U2(pll), config->test_ctl_hi2_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U3(pll), config->test_ctl_hi3_val);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+}
+EXPORT_SYMBOL_GPL(clk_pongo_elu_pll_configure);
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index c6d1b8429f95..79aca8525262 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -27,6 +27,8 @@ enum {
CLK_ALPHA_PLL_TYPE_ZONDA_OLE,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
+ CLK_ALPHA_PLL_TYPE_PONGO_ELU,
+ CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
@@ -52,6 +54,7 @@ enum {
PLL_OFF_TEST_CTL_U,
PLL_OFF_TEST_CTL_U1,
PLL_OFF_TEST_CTL_U2,
+ PLL_OFF_TEST_CTL_U3,
PLL_OFF_STATE,
PLL_OFF_STATUS,
PLL_OFF_OPMODE,
@@ -137,6 +140,7 @@ struct alpha_pll_config {
u32 test_ctl_hi_mask;
u32 test_ctl_hi1_val;
u32 test_ctl_hi2_val;
+ u32 test_ctl_hi3_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
@@ -185,13 +189,17 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_zonda_ole_ops clk_alpha_pll_zonda_ops
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
+#define clk_alpha_pll_taycan_elu_ops clk_alpha_pll_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
#define clk_alpha_pll_reset_lucid_ole_ops clk_alpha_pll_reset_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
#define clk_alpha_pll_fixed_lucid_ole_ops clk_alpha_pll_fixed_lucid_evo_ops
+#define clk_alpha_pll_fixed_taycan_elu_ops clk_alpha_pll_fixed_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
#define clk_alpha_pll_postdiv_lucid_ole_ops clk_alpha_pll_postdiv_lucid_evo_ops
+#define clk_alpha_pll_postdiv_taycan_elu_ops clk_alpha_pll_postdiv_lucid_evo_ops
+extern const struct clk_ops clk_alpha_pll_pongo_elu_ops;
extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
@@ -218,6 +226,11 @@ void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
const struct alpha_pll_config *config);
void clk_lucid_ole_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+#define clk_taycan_elu_pll_configure(pll, regmap, config) \
+ clk_lucid_evo_pll_configure(pll, regmap, config)
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 88845baa7f84..987141c91fe0 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -597,6 +597,7 @@ struct frac_entry {
};
static const struct frac_entry pixel_table[] = {
+ { 1, 1 },
{ 1, 2 },
{ 1, 3 },
{ 3, 16 },
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 80f1f4fcd52a..4fbdf4880d03 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -189,6 +189,7 @@ struct clk_rcg2_gfx3d {
container_of(to_clk_rcg2(_hw), struct clk_rcg2_gfx3d, rcg)
extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_gp_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
extern const struct clk_ops clk_rcg2_fm_ops;
extern const struct clk_ops clk_rcg2_mux_closest_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index bf6406f5279a..8001fd9faf9d 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -8,11 +8,13 @@
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/export.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/math64.h>
+#include <linux/gcd.h>
#include <linux/minmax.h>
#include <linux/slab.h>
@@ -32,6 +34,7 @@
#define CFG_REG 0x4
#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_DIV_LENGTH 8
#define CFG_SRC_SEL_SHIFT 8
#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
#define CFG_MODE_SHIFT 12
@@ -148,12 +151,32 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
return update_config(rcg);
}
-/*
- * Calculate m/n:d rate
+/**
+ * convert_to_reg_val() - Convert divisor values to hardware values.
+ *
+ * @f: Frequency table with pure m/n/pre_div parameters.
+ */
+static void convert_to_reg_val(struct freq_tbl *f)
+{
+ f->pre_div *= 2;
+ f->pre_div -= 1;
+}
+
+/**
+ * calc_rate() - Calculate rate based on m/n:d values
+ *
+ * @rate: Parent rate.
+ * @m: Multiplier.
+ * @n: Divisor.
+ * @mode: Use zero to ignore m/n calculation.
+ * @hid_div: Pre divisor register value. Pre divisor value
+ * relates to hid_div as pre_div = (hid_div + 1) / 2.
+ *
+ * Return calculated rate according to formula:
*
* parent_rate m
* rate = ----------- x ---
- * hid_div n
+ * pre_div n
*/
static unsigned long
calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
@@ -393,16 +416,110 @@ static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
}
-static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
- u32 *_cfg)
+/**
+ * clk_rcg2_split_div() - Split multiplier that doesn't fit in n neither in pre_div.
+ *
+ * @multiplier: Multiplier to split between n and pre_div.
+ * @pre_div: Pointer to pre divisor value.
+ * @n: Pointer to n divisor value.
+ * @pre_div_max: Pre divisor maximum value.
+ */
+static inline void clk_rcg2_split_div(int multiplier, unsigned int *pre_div,
+ u16 *n, unsigned int pre_div_max)
+{
+ *n = mult_frac(multiplier * *n, *pre_div, pre_div_max);
+ *pre_div = pre_div_max;
+}
+
+static void clk_rcg2_calc_mnd(u64 parent_rate, u64 rate, struct freq_tbl *f,
+ unsigned int mnd_max, unsigned int pre_div_max)
+{
+ int i = 2;
+ unsigned int pre_div = 1;
+ unsigned long rates_gcd, scaled_parent_rate;
+ u16 m, n = 1, n_candidate = 1, n_max;
+
+ rates_gcd = gcd(parent_rate, rate);
+ m = div64_u64(rate, rates_gcd);
+ scaled_parent_rate = div64_u64(parent_rate, rates_gcd);
+ while (scaled_parent_rate > (mnd_max + m) * pre_div_max) {
+ // we're exceeding divisor's range, trying lower scale.
+ if (m > 1) {
+ m--;
+ scaled_parent_rate = mult_frac(scaled_parent_rate, m, (m + 1));
+ } else {
+ // cannot lower scale, just set max divisor values.
+ f->n = mnd_max + m;
+ f->pre_div = pre_div_max;
+ f->m = m;
+ return;
+ }
+ }
+
+ n_max = m + mnd_max;
+
+ while (scaled_parent_rate > 1) {
+ while (scaled_parent_rate % i == 0) {
+ n_candidate *= i;
+ if (n_candidate < n_max)
+ n = n_candidate;
+ else if (pre_div * i < pre_div_max)
+ pre_div *= i;
+ else
+ clk_rcg2_split_div(i, &pre_div, &n, pre_div_max);
+
+ scaled_parent_rate /= i;
+ }
+ i++;
+ }
+
+ f->m = m;
+ f->n = n;
+ f->pre_div = pre_div > 1 ? pre_div : 0;
+}
+
+static int clk_rcg2_determine_gp_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f_tbl = {}, *f = &f_tbl;
+ int mnd_max = BIT(rcg->mnd_width) - 1;
+ int hid_max = BIT(rcg->hid_width) - 1;
+ struct clk_hw *parent;
+ u64 parent_rate;
+
+ parent = clk_hw_get_parent(hw);
+ parent_rate = clk_get_rate(parent->clk);
+ if (!parent_rate)
+ return -EINVAL;
+
+ clk_rcg2_calc_mnd(parent_rate, req->rate, f, mnd_max, hid_max / 2);
+ convert_to_reg_val(f);
+ req->rate = calc_rate(parent_rate, f->m, f->n, f->n, f->pre_div);
+
+ return 0;
+}
+
+static int __clk_rcg2_configure_parent(struct clk_rcg2 *rcg, u8 src, u32 *_cfg)
{
- u32 cfg, mask, d_val, not2d_val, n_minus_m;
struct clk_hw *hw = &rcg->clkr.hw;
- int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ int index = qcom_find_src_index(hw, rcg->parent_map, src);
if (index < 0)
return index;
+ *_cfg &= ~CFG_SRC_SEL_MASK;
+ *_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ return 0;
+}
+
+static int __clk_rcg2_configure_mnd(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+ u32 *_cfg)
+{
+ u32 cfg, mask, d_val, not2d_val, n_minus_m;
+ int ret;
+
if (rcg->mnd_width && f->n) {
mask = BIT(rcg->mnd_width) - 1;
ret = regmap_update_bits(rcg->clkr.regmap,
@@ -431,9 +548,8 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
}
mask = BIT(rcg->hid_width) - 1;
- mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
+ mask |= CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
- cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
if (rcg->hw_clk_ctrl)
@@ -445,6 +561,22 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
return 0;
}
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+ u32 *_cfg)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure_parent(rcg, f->src, _cfg);
+ if (ret)
+ return ret;
+
+ ret = __clk_rcg2_configure_mnd(rcg, f, _cfg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg;
@@ -465,6 +597,26 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
return update_config(rcg);
}
+static int clk_rcg2_configure_gp(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ u32 cfg;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+ if (ret)
+ return ret;
+
+ ret = __clk_rcg2_configure_mnd(rcg, f, &cfg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
enum freq_policy policy)
{
@@ -518,6 +670,22 @@ static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
return __clk_rcg2_set_rate(hw, rate, CEIL);
}
+static int clk_rcg2_set_gp_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int mnd_max = BIT(rcg->mnd_width) - 1;
+ int hid_max = BIT(rcg->hid_width) - 1;
+ struct freq_tbl f_tbl = {}, *f = &f_tbl;
+ int ret;
+
+ clk_rcg2_calc_mnd(parent_rate, rate, f, mnd_max, hid_max / 2);
+ convert_to_reg_val(f);
+ ret = clk_rcg2_configure_gp(rcg, f);
+
+ return ret;
+}
+
static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -645,6 +813,18 @@ const struct clk_ops clk_rcg2_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+const struct clk_ops clk_rcg2_gp_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_gp_rate,
+ .set_rate = clk_rcg2_set_gp_rate,
+ .get_duty_cycle = clk_rcg2_get_duty_cycle,
+ .set_duty_cycle = clk_rcg2_set_duty_cycle,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_gp_ops);
+
const struct clk_ops clk_rcg2_floor_ops = {
.is_enabled = clk_rcg2_is_enabled,
.get_parent = clk_rcg2_get_parent,
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index 9da034f8f2ff..ccc112c21667 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -4,6 +4,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -224,10 +225,10 @@ static void clk_rpm_unprepare(struct clk_hw *hw)
unsigned long active_rate, sleep_rate;
int ret;
- mutex_lock(&rpm_clk_lock);
+ guard(mutex)(&rpm_clk_lock);
if (!r->rate)
- goto out;
+ return;
/* Take peer clock's rate into account only if it's enabled. */
if (peer->enabled)
@@ -237,17 +238,14 @@ static void clk_rpm_unprepare(struct clk_hw *hw)
active_rate = r->branch ? !!peer_rate : peer_rate;
ret = clk_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return;
sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
ret = clk_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return;
r->enabled = false;
-
-out:
- mutex_unlock(&rpm_clk_lock);
}
static int clk_rpm_xo_prepare(struct clk_hw *hw)
@@ -324,12 +322,12 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
unsigned long active_rate, sleep_rate;
unsigned long this_rate = 0, this_sleep_rate = 0;
unsigned long peer_rate = 0, peer_sleep_rate = 0;
- int ret = 0;
+ int ret;
- mutex_lock(&rpm_clk_lock);
+ guard(mutex)(&rpm_clk_lock);
if (!r->enabled)
- goto out;
+ return 0;
to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
@@ -341,19 +339,16 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
active_rate = max(this_rate, peer_rate);
ret = clk_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return ret;
sleep_rate = max(this_sleep_rate, peer_sleep_rate);
ret = clk_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return ret;
r->rate = rate;
-out:
- mutex_unlock(&rpm_clk_lock);
-
- return ret;
+ return 0;
}
static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index eefc322ce367..c7675930fde1 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/rpmh.h>
#include <soc/qcom/tcs.h>
@@ -206,7 +207,7 @@ static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
c->state = c->valid_state_mask;
WARN(1, "clk: %s failed to %s\n", c->res_name,
- enable ? "enable" : "disable");
+ str_enable_disable(enable));
return ret;
}
@@ -329,7 +330,7 @@ static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
{
struct clk_rpmh *c = to_clk_rpmh(hw);
- return c->aggr_state * c->unit;
+ return (unsigned long)c->aggr_state * c->unit;
}
static const struct clk_ops clk_rpmh_bcm_ops = {
@@ -368,6 +369,8 @@ DEFINE_CLK_RPMH_VRM(rf_clk2, _d, "rfclkd2", 1);
DEFINE_CLK_RPMH_VRM(rf_clk3, _d, "rfclkd3", 1);
DEFINE_CLK_RPMH_VRM(rf_clk4, _d, "rfclkd4", 1);
+DEFINE_CLK_RPMH_VRM(rf_clk3, _a2, "rfclka3", 2);
+
DEFINE_CLK_RPMH_VRM(clk1, _a1, "clka1", 1);
DEFINE_CLK_RPMH_VRM(clk2, _a1, "clka2", 1);
DEFINE_CLK_RPMH_VRM(clk3, _a1, "clka3", 1);
@@ -807,6 +810,45 @@ static const struct clk_rpmh_desc clk_rpmh_x1e80100 = {
.num_clks = ARRAY_SIZE(x1e80100_rpmh_clocks),
};
+static struct clk_hw *qcs615_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_qcs615 = {
+ .clks = qcs615_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(qcs615_rpmh_clocks),
+};
+
+static struct clk_hw *sm8750_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK1] = &clk_rpmh_clk6_a2.hw,
+ [RPMH_LN_BB_CLK1_A] = &clk_rpmh_clk6_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_clk8_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_clk8_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a2.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a2_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
+ .clks = sm8750_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8750_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -890,10 +932,12 @@ static int clk_rpmh_probe(struct platform_device *pdev)
}
static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,qcs615-rpmh-clk", .data = &clk_rpmh_qcs615},
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
{ .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
{ .compatible = "qcom,sar2130p-rpmh-clk", .data = &clk_rpmh_sar2130p},
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
+ { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
@@ -909,7 +953,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450},
{ .compatible = "qcom,sm8550-rpmh-clk", .data = &clk_rpmh_sm8550},
{ .compatible = "qcom,sm8650-rpmh-clk", .data = &clk_rpmh_sm8650},
- { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
+ { .compatible = "qcom,sm8750-rpmh-clk", .data = &clk_rpmh_sm8750},
{ .compatible = "qcom,x1e80100-rpmh-clk", .data = &clk_rpmh_x1e80100},
{ }
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 45c5255bcd11..29ef08a9d50b 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -4,6 +4,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -309,10 +310,10 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
unsigned long active_rate, sleep_rate;
int ret;
- mutex_lock(&rpm_smd_clk_lock);
+ guard(mutex)(&rpm_smd_clk_lock);
if (!r->rate)
- goto out;
+ return;
/* Take peer clock's rate into account only if it's enabled. */
if (peer->enabled)
@@ -322,17 +323,14 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
active_rate = r->branch ? !!peer_rate : peer_rate;
ret = clk_smd_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return;
sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return;
r->enabled = false;
-
-out:
- mutex_unlock(&rpm_smd_clk_lock);
}
static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -345,10 +343,10 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long peer_rate = 0, peer_sleep_rate = 0;
int ret = 0;
- mutex_lock(&rpm_smd_clk_lock);
+ guard(mutex)(&rpm_smd_clk_lock);
if (!r->enabled)
- goto out;
+ return 0;
to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
@@ -360,19 +358,16 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
active_rate = max(this_rate, peer_rate);
ret = clk_smd_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return ret;
sleep_rate = max(this_sleep_rate, peer_sleep_rate);
ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return ret;
r->rate = rate;
-out:
- mutex_unlock(&rpm_smd_clk_lock);
-
- return ret;
+ return 0;
}
static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -700,6 +695,60 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8936 = {
.num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
};
+static struct clk_smd_rpm *msm8937_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8937 = {
+ .clks = msm8937_clks,
+ .num_clks = ARRAY_SIZE(msm8937_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
+static struct clk_smd_rpm *msm8940_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8940 = {
+ .clks = msm8940_clks,
+ .num_clks = ARRAY_SIZE(msm8940_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
static struct clk_smd_rpm *msm8974_clks[] = {
[RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
[RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
@@ -1216,6 +1265,8 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8917", .data = &rpm_clk_msm8917 },
{ .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 },
+ { .compatible = "qcom,rpmcc-msm8937", .data = &rpm_clk_msm8937 },
+ { .compatible = "qcom,rpmcc-msm8940", .data = &rpm_clk_msm8940 },
{ .compatible = "qcom,rpmcc-msm8953", .data = &rpm_clk_msm8953 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 },
diff --git a/drivers/clk/qcom/clk-spmi-pmic-div.c b/drivers/clk/qcom/clk-spmi-pmic-div.c
index f394031eb0e5..41a0a4f3b4fb 100644
--- a/drivers/clk/qcom/clk-spmi-pmic-div.c
+++ b/drivers/clk/qcom/clk-spmi-pmic-div.c
@@ -3,6 +3,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -140,30 +141,26 @@ static int clk_spmi_pmic_div_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clkdiv *clkdiv = to_clkdiv(hw);
unsigned int div_factor = div_to_div_factor(parent_rate / rate);
- unsigned long flags;
bool enabled;
int ret;
- spin_lock_irqsave(&clkdiv->lock, flags);
+ guard(spinlock_irqsave)(&clkdiv->lock);
+
enabled = is_spmi_pmic_clkdiv_enabled(clkdiv);
if (enabled) {
ret = spmi_pmic_clkdiv_set_enable_state(clkdiv, false);
if (ret)
- goto unlock;
+ return ret;
}
ret = regmap_update_bits(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1,
DIV_CTL1_DIV_FACTOR_MASK, div_factor);
if (ret)
- goto unlock;
+ return ret;
if (enabled)
ret = __spmi_pmic_clkdiv_set_enable_state(clkdiv, true,
div_factor);
-
-unlock:
- spin_unlock_irqrestore(&clkdiv->lock, flags);
-
return ret;
}
diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
index 449ffea2295d..d7bb1399e102 100644
--- a/drivers/clk/qcom/dispcc-qcm2290.c
+++ b/drivers/clk/qcom/dispcc-qcm2290.c
@@ -40,8 +40,6 @@ static const struct pll_vco spark_vco[] = {
/* 768MHz configuration */
static const struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x28,
- .alpha = 0x0,
- .alpha_en_mask = BIT(24),
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.main_output_mask = BIT(0),
diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
index 939887f82ecc..2b236d52b29f 100644
--- a/drivers/clk/qcom/dispcc-sm6115.c
+++ b/drivers/clk/qcom/dispcc-sm6115.c
@@ -48,8 +48,6 @@ static const struct pll_vco spark_vco[] = {
/* 768MHz configuration */
static const struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x28,
- .alpha = 0x0,
- .alpha_en_mask = BIT(24),
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.main_output_mask = BIT(0),
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index 50facb36701a..2bc6b5f99f57 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -187,13 +187,12 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
.cmd_rcgr = 0x1144,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_dp_aux_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c
new file mode 100644
index 000000000000..e9bca179998b
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm8750.c
@@ -0,0 +1,1961 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/clock/qcom,sm8750-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DISP_CC_PLL2_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco pongo_elu_vco[] = {
+ { 38400000, 38400000, 0 },
+};
+
+static const struct pll_vco taycan_elu_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+static struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config disp_cc_pll2_config = {
+ .l = 0x493,
+ .alpha = 0x0,
+ .config_ctl_val = 0x60000f68,
+ .config_ctl_hi_val = 0x0001c808,
+ .config_ctl_hi1_val = 0x00000000,
+ .config_ctl_hi2_val = 0x040082f4,
+ .test_ctl_val = 0x00008000,
+ .test_ctl_hi_val = 0x0080c496,
+ .test_ctl_hi1_val = 0x40100180,
+ .test_ctl_hi2_val = 0x441001bc,
+ .test_ctl_hi3_val = 0x002003d8,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00e50302,
+};
+
+static struct clk_alpha_pll disp_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = pongo_elu_vco,
+ .num_vco = ARRAY_SIZE(pongo_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_PONGO_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_SLEEP_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_pongo_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DISP_CC_PLL2_OUT_MAIN, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .hw = &disp_cc_pll2.clkr.hw },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL2_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_11[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_11[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_esync0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_esync0_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_esync1_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8360,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8298,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x827c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x8264,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82fc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x82b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x82cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8348,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x832c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x8314,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(156000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(207000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(337000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(417000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(532000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(575000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ /*
+ * TODO: Downstream does not manage the clock directly, but
+ * places votes via new hardware block called "cesta".
+ * It is not clear whether such approach should be taken instead
+ * of manual control.
+ */
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x8120,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
+ .cmd_rcgr = 0x8138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_osc_clk_src[] = {
+ F(38400000, P_DISP_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_osc_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_10,
+ .freq_tbl = ftbl_disp_cc_osc_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk_src",
+ .parent_data = disp_cc_parent_data_10,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_11,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_11,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8198,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8200,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8294,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x82c8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8344,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_esync0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_esync1_clk = {
+ .halt_reg = 0x80bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_shift_clk = {
+ .halt_reg = 0xe060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x80a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
+ .halt_reg = 0x80ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk2_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_osc_clk = {
+ .halt_reg = 0x80b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_osc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mdss_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm8750_clocks[] = {
+ [DISP_CC_ESYNC0_CLK] = &disp_cc_esync0_clk.clkr,
+ [DISP_CC_ESYNC0_CLK_SRC] = &disp_cc_esync0_clk_src.clkr,
+ [DISP_CC_ESYNC1_CLK] = &disp_cc_esync1_clk.clkr,
+ [DISP_CC_ESYNC1_CLK_SRC] = &disp_cc_esync1_clk_src.clkr,
+ [DISP_CC_MDSS_ACCU_SHIFT_CLK] = &disp_cc_mdss_accu_shift_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK] = &disp_cc_mdss_pclk2_clk.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK_SRC] = &disp_cc_mdss_pclk2_clk_src.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_OSC_CLK] = &disp_cc_osc_clk.clkr,
+ [DISP_CC_OSC_CLK_SRC] = &disp_cc_osc_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_PLL2] = &disp_cc_pll2.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sm8750_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_sm8750_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11014,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_sm8750_desc = {
+ .config = &disp_cc_sm8750_regmap_config,
+ .clks = disp_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm8750_clocks),
+ .resets = disp_cc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm8750_resets),
+ .gdscs = disp_cc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm8750_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm8750_match_table);
+
+static int disp_cc_sm8750_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm8750_desc);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_put_rpm;
+ }
+
+ clk_taycan_elu_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_taycan_elu_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ clk_pongo_elu_pll_configure(&disp_cc_pll2, regmap, &disp_cc_pll2_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xe07c); /* DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xe05c); /* DISP_CC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc00c); /* DISP_CC_MDSS_RSCC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc008); /* DISP_CC_MDSS_RSCC_VSYNC_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8750_desc, regmap);
+ if (ret)
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm8750_driver = {
+ .probe = disp_cc_sm8750_probe,
+ .driver = {
+ .name = "disp_cc-sm8750",
+ .of_match_table = disp_cc_sm8750_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_sm8750_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq5424.c b/drivers/clk/qcom/gcc-ipq5424.c
index 88a7d5b2e751..d5b218b76e29 100644
--- a/drivers/clk/qcom/gcc-ipq5424.c
+++ b/drivers/clk/qcom/gcc-ipq5424.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/interconnect-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -12,6 +13,7 @@
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,ipq5424-gcc.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
#include <dt-bindings/reset/qcom,ipq5424-gcc.h>
#include "clk-alpha-pll.h"
@@ -325,6 +327,24 @@ static struct clk_rcg2 gcc_xo_clk_src = {
},
};
+static struct clk_branch gcc_xo_clk = {
+ .halt_reg = 0x34018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x34018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_fixed_factor gcc_xo_div4_clk_src = {
.mult = 1,
.div = 4,
@@ -1097,24 +1117,6 @@ static struct clk_branch gcc_adss_pwm_clk = {
},
};
-static struct clk_branch gcc_apss_dbg_clk = {
- .halt_reg = 0x2402c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x2402c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_apss_dbg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_cnoc_pcie0_1lane_s_clk = {
.halt_reg = 0x31088,
.halt_check = BRANCH_HALT,
@@ -2785,7 +2787,6 @@ static struct clk_branch gcc_pcie3_rchng_clk = {
static struct clk_regmap *gcc_ipq5424_clocks[] = {
[GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
[GCC_ADSS_PWM_CLK_SRC] = &gcc_adss_pwm_clk_src.clkr,
- [GCC_APSS_DBG_CLK] = &gcc_apss_dbg_clk.clkr,
[GCC_CNOC_PCIE0_1LANE_S_CLK] = &gcc_cnoc_pcie0_1lane_s_clk.clkr,
[GCC_CNOC_PCIE1_1LANE_S_CLK] = &gcc_cnoc_pcie1_1lane_s_clk.clkr,
[GCC_CNOC_PCIE2_2LANE_S_CLK] = &gcc_cnoc_pcie2_2lane_s_clk.clkr,
@@ -2920,6 +2921,7 @@ static struct clk_regmap *gcc_ipq5424_clocks[] = {
[GCC_QPIC_CLK_SRC] = &gcc_qpic_clk_src.clkr,
[GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
[GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
+ [GCC_XO_CLK] = &gcc_xo_clk.clkr,
[GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
[GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
[GPLL0] = &gpll0.clkr,
@@ -3230,6 +3232,20 @@ static const struct qcom_reset_map gcc_ipq5424_resets[] = {
[GCC_QUSB2_1_PHY_BCR] = { 0x3C030, 0 },
};
+#define IPQ_APPS_ID 5424 /* some unique value */
+
+static const struct qcom_icc_hws_data icc_ipq5424_hws[] = {
+ { MASTER_ANOC_PCIE0, SLAVE_ANOC_PCIE0, GCC_ANOC_PCIE0_1LANE_M_CLK },
+ { MASTER_CNOC_PCIE0, SLAVE_CNOC_PCIE0, GCC_CNOC_PCIE0_1LANE_S_CLK },
+ { MASTER_ANOC_PCIE1, SLAVE_ANOC_PCIE1, GCC_ANOC_PCIE1_1LANE_M_CLK },
+ { MASTER_CNOC_PCIE1, SLAVE_CNOC_PCIE1, GCC_CNOC_PCIE1_1LANE_S_CLK },
+ { MASTER_ANOC_PCIE2, SLAVE_ANOC_PCIE2, GCC_ANOC_PCIE2_2LANE_M_CLK },
+ { MASTER_CNOC_PCIE2, SLAVE_CNOC_PCIE2, GCC_CNOC_PCIE2_2LANE_S_CLK },
+ { MASTER_ANOC_PCIE3, SLAVE_ANOC_PCIE3, GCC_ANOC_PCIE3_2LANE_M_CLK },
+ { MASTER_CNOC_PCIE3, SLAVE_CNOC_PCIE3, GCC_CNOC_PCIE3_2LANE_S_CLK },
+ { MASTER_CNOC_USB, SLAVE_CNOC_USB, GCC_CNOC_USB_CLK },
+};
+
static const struct of_device_id gcc_ipq5424_match_table[] = {
{ .compatible = "qcom,ipq5424-gcc" },
{ }
@@ -3260,6 +3276,8 @@ static const struct qcom_cc_desc gcc_ipq5424_desc = {
.num_resets = ARRAY_SIZE(gcc_ipq5424_resets),
.clk_hws = gcc_ipq5424_hws,
.num_clk_hws = ARRAY_SIZE(gcc_ipq5424_hws),
+ .icc_hws = icc_ipq5424_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_hws),
};
static int gcc_ipq5424_probe(struct platform_device *pdev)
@@ -3272,6 +3290,7 @@ static struct platform_driver gcc_ipq5424_driver = {
.driver = {
.name = "qcom,gcc-ipq5424",
.of_match_table = gcc_ipq5424_match_table,
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index ab0f7fc665a9..d861191b0c85 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -4194,10 +4194,9 @@ static const struct alpha_pll_config ubi32_pll_config = {
.test_ctl_hi_val = 0x4000,
};
+/* 1200 MHz configuration */
static const struct alpha_pll_config nss_crypto_pll_config = {
.l = 0x32,
- .alpha = 0x0,
- .alpha_hi = 0x0,
.config_ctl_val = 0x4001055b,
.main_output_mask = BIT(0),
.pre_div_val = 0x0,
@@ -4206,7 +4205,6 @@ static const struct alpha_pll_config nss_crypto_pll_config = {
.post_div_mask = GENMASK(11, 8),
.vco_mask = GENMASK(21, 20),
.vco_val = 0x0,
- .alpha_en_mask = BIT(24),
};
static struct clk_hw *gcc_ipq6018_hws[] = {
diff --git a/drivers/clk/qcom/gcc-mdm9607.c b/drivers/clk/qcom/gcc-mdm9607.c
index 6e6068b168e6..07f1b78d737a 100644
--- a/drivers/clk/qcom/gcc-mdm9607.c
+++ b/drivers/clk/qcom/gcc-mdm9607.c
@@ -535,7 +535,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
};
static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
- .cmd_rcgr = 0x6044,
+ .cmd_rcgr = 0x7044,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
diff --git a/drivers/clk/qcom/gcc-qcs615.c b/drivers/clk/qcom/gcc-qcs615.c
new file mode 100644
index 000000000000..9695446bc2a3
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qcs615.c
@@ -0,0 +1,3034 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_AUX2_DIV,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL3_OUT_MAIN_DIV,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL8_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* Fixed divider clock of GPLL0 instead of PLL normal postdiv */
+static struct clk_fixed_factor gpll0_out_aux2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_out_aux2_div",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* Fixed divider clock of GPLL3 instead of PLL normal postdiv */
+static struct clk_fixed_factor gpll3_out_aux2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll3_out_aux2_div",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x13000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_main = {
+ .offset = 0x13000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll6_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x1a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x1b000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_main = {
+ .offset = 0x1b000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll8_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN_DIV, 4 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3_out_aux2_div.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x6038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_rgmii_clk_src[] = {
+ F(2500000, P_BI_TCXO, 1, 25, 192),
+ F(5000000, P_BI_TCXO, 1, 25, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_rgmii_clk_src = {
+ .cmd_rcgr = 0x601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_rgmii_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = {
+ F(60000000, P_GPLL0_OUT_AUX2_DIV, 5, 0, 0),
+ F(133250000, P_GPLL3_OUT_MAIN_DIV, 4, 0, 0),
+ F(266500000, P_GPLL3_OUT_MAIN_DIV, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_qspi_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_AUX2_DIV, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_AUX2_DIV, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_AUX2_DIV, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_AUX2_DIV, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_AUX2_DIV, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_AUX2_DIV, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_AUX2_DIV, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_AUX2_DIV, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GPLL0_OUT_AUX2_DIV, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_AUX2_DIV, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_AUX2_DIV, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_AUX2_DIV, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x173a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x174d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x17608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_AUX2_DIV, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2_DIV, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x12028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x12010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2_DIV, 3, 0, 0),
+ F(202000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x7707c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_sec_master_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_sec_master_clk_src = {
+ .cmd_rcgr = 0xa601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb20_sec_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xa6034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb2_sec_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb2_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa6060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_AUX2_DIV, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_AUX2_DIV, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_AUX2_DIV, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_vsensor_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_sec_axi_clk = {
+ .halt_reg = 0xa6084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa6084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_east_clk = {
+ .halt_reg = 0x6a008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy_east_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_west_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy_west_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_sec_axi_clk = {
+ .halt_reg = 0xa609c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa609c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_out_aux2_div.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_axi_clk = {
+ .halt_reg = 0x6010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_ptp_clk = {
+ .halt_reg = 0x6034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_rgmii_clk = {
+ .halt_reg = 0x6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_slv_ahb_clk = {
+ .halt_reg = 0x6014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_out_aux2_div.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qspi_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x173a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x174d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x17604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_clk = {
+ .halt_reg = 0x8c030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_rx1_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx3_usb2_clkref_clk = {
+ .halt_reg = 0x8c038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_rx3_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x12008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x1200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x4819c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77044,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x77078,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77040,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_master_clk = {
+ .halt_reg = 0xa6010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_mock_utmi_clk = {
+ .halt_reg = 0xa6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_sleep_clk = {
+ .halt_reg = 0xa6014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_prim_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_clkref_clk = {
+ .halt_reg = 0x8c018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_aux_clk = {
+ .halt_reg = 0xa6050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb2_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_com_aux_clk = {
+ .halt_reg = 0xa6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb2_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_pipe_clk = {
+ .halt_reg = 0xa6058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xa6058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gcc_qcs615_hws[] = {
+ [GPLL0_OUT_AUX2_DIV] = &gpll0_out_aux2_div.hw,
+ [GPLL3_OUT_AUX2_DIV] = &gpll3_out_aux2_div.hw,
+};
+
+static struct gdsc emac_gdsc = {
+ .gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "emac_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb20_sec_gdsc = {
+ .gdscr = 0xa6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb20_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d04c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_qcs615_clocks[] = {
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_SEC_AXI_CLK] = &gcc_aggre_usb2_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AHB2PHY_EAST_CLK] = &gcc_ahb2phy_east_clk.clkr,
+ [GCC_AHB2PHY_WEST_CLK] = &gcc_ahb2phy_west_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB2_SEC_AXI_CLK] = &gcc_cfg_noc_usb2_sec_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EMAC_AXI_CLK] = &gcc_emac_axi_clk.clkr,
+ [GCC_EMAC_PTP_CLK] = &gcc_emac_ptp_clk.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_EMAC_RGMII_CLK] = &gcc_emac_rgmii_clk.clkr,
+ [GCC_EMAC_RGMII_CLK_SRC] = &gcc_emac_rgmii_clk_src.clkr,
+ [GCC_EMAC_SLV_AHB_CLK] = &gcc_emac_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+ [GCC_RX3_USB2_CLKREF_CLK] = &gcc_rx3_usb2_clkref_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_SEC_MASTER_CLK] = &gcc_usb20_sec_master_clk.clkr,
+ [GCC_USB20_SEC_MASTER_CLK_SRC] = &gcc_usb20_sec_master_clk_src.clkr,
+ [GCC_USB20_SEC_MOCK_UTMI_CLK] = &gcc_usb20_sec_mock_utmi_clk.clkr,
+ [GCC_USB20_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb20_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB20_SEC_SLEEP_CLK] = &gcc_usb20_sec_sleep_clk.clkr,
+ [GCC_USB2_PRIM_CLKREF_CLK] = &gcc_usb2_prim_clkref_clk.clkr,
+ [GCC_USB2_SEC_CLKREF_CLK] = &gcc_usb2_sec_clkref_clk.clkr,
+ [GCC_USB2_SEC_PHY_AUX_CLK] = &gcc_usb2_sec_phy_aux_clk.clkr,
+ [GCC_USB2_SEC_PHY_AUX_CLK_SRC] = &gcc_usb2_sec_phy_aux_clk_src.clkr,
+ [GCC_USB2_SEC_PHY_COM_AUX_CLK] = &gcc_usb2_sec_phy_com_aux_clk.clkr,
+ [GCC_USB2_SEC_PHY_PIPE_CLK] = &gcc_usb2_sec_phy_pipe_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_MAIN] = &gpll6_out_main.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr,
+};
+
+static struct gdsc *gcc_qcs615_gdscs[] = {
+ [EMAC_GDSC] = &emac_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB20_SEC_GDSC] = &usb20_sec_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] = &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] = &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] = &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] = &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_qcs615_resets[] = {
+ [GCC_EMAC_BCR] = { 0x6000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0xd000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0xd004 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB2_PHY_SEC_BCR] = { 0x50018 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB20_SEC_BCR] = { 0xa6000 },
+ [GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x50000 },
+ [GCC_SDCC1_BCR] = { 0x12000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static const struct regmap_config gcc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa609c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qcs615_desc = {
+ .config = &gcc_qcs615_regmap_config,
+ .clk_hws = gcc_qcs615_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_qcs615_hws),
+ .clks = gcc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qcs615_clocks),
+ .resets = gcc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(gcc_qcs615_resets),
+ .gdscs = gcc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_qcs615_gdscs),
+};
+
+static const struct of_device_id gcc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qcs615_match_table);
+
+static int gcc_qcs615_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_qcs615_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ /*
+ * Disable the GPLL0 active input to MM blocks and GPU
+ * via MISC registers.
+ */
+ regmap_update_bits(regmap, 0x0b084, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x9b000, BIT(0), BIT(0));
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xb008); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb044); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb00c); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb048); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb040); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x480040); /* GCC_CPUSS_GNOC_CLK */
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_qcs615_desc, regmap);
+}
+
+static struct platform_driver gcc_qcs615_driver = {
+ .probe = gcc_qcs615_probe,
+ .driver = {
+ .name = "gcc-qcs615",
+ .of_match_table = gcc_qcs615_match_table,
+ },
+};
+
+static int __init gcc_qcs615_init(void)
+{
+ return platform_driver_register(&gcc_qcs615_driver);
+}
+subsys_initcall(gcc_qcs615_init);
+
+static void __exit gcc_qcs615_exit(void)
+{
+ platform_driver_unregister(&gcc_qcs615_driver);
+}
+module_exit(gcc_qcs615_exit);
+
+MODULE_DESCRIPTION("QTI GCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index dc3aa7014c3e..6d0f9cede5cf 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -284,11 +284,6 @@ static struct clk_rcg2 gcc_sdm670_cpuss_rbcpr_clk_src = {
};
static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
- F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
};
@@ -302,7 +297,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
.name = "gcc_gp1_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -316,7 +311,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
.name = "gcc_gp2_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -330,7 +325,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
.name = "gcc_gp3_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -454,7 +449,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.name = "gcc_qupv3_wrap0_s0_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
@@ -470,7 +465,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.name = "gcc_qupv3_wrap0_s1_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -486,7 +481,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.name = "gcc_qupv3_wrap0_s2_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -502,7 +497,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.name = "gcc_qupv3_wrap0_s3_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -518,7 +513,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.name = "gcc_qupv3_wrap0_s4_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -534,7 +529,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.name = "gcc_qupv3_wrap0_s5_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -550,7 +545,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.name = "gcc_qupv3_wrap0_s6_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -566,7 +561,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.name = "gcc_qupv3_wrap0_s7_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -582,7 +577,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.name = "gcc_qupv3_wrap1_s0_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -598,7 +593,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.name = "gcc_qupv3_wrap1_s1_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -614,7 +609,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.name = "gcc_qupv3_wrap1_s2_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -630,7 +625,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.name = "gcc_qupv3_wrap1_s3_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -646,7 +641,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.name = "gcc_qupv3_wrap1_s4_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -662,7 +657,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.name = "gcc_qupv3_wrap1_s5_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -678,7 +673,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.name = "gcc_qupv3_wrap1_s6_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -694,7 +689,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.name = "gcc_qupv3_wrap1_s7_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index a811fad2aa27..74346dc02606 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -182,6 +182,14 @@ static const struct clk_parent_data gcc_parent_data_2_ao[] = {
{ .hw = &gpll0_out_odd.clkr.hw },
};
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
static const struct parent_map gcc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@@ -701,13 +709,12 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
.cmd_rcgr = 0x3a0b0,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -764,13 +771,12 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
.cmd_rcgr = 0x1a034,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
index 5abaeddd6afc..862a9bf73bcb 100644
--- a/drivers/clk/qcom/gcc-sm8550.c
+++ b/drivers/clk/qcom/gcc-sm8550.c
@@ -3003,7 +3003,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3014,7 +3014,7 @@ static struct gdsc pcie_0_phy_gdsc = {
.pd = {
.name = "pcie_0_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3025,7 +3025,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3036,7 +3036,7 @@ static struct gdsc pcie_1_phy_gdsc = {
.pd = {
.name = "pcie_1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index fd9d6544bdd5..9dd5c48f33be 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3437,7 +3437,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3448,7 +3448,7 @@ static struct gdsc pcie_0_phy_gdsc = {
.pd = {
.name = "pcie_0_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3459,7 +3459,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3470,7 +3470,7 @@ static struct gdsc pcie_1_phy_gdsc = {
.pd = {
.name = "pcie_1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
new file mode 100644
index 000000000000..b36d70976095
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -0,0 +1,3274 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_elu_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b080,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770ec,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x39070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b084,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
+ .cmd_rcgr = 0x17008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
+ .cmd_rcgr = 0x17024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
+ .cmd_rcgr = 0x17040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
+ .cmd_rcgr = 0x1705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
+ .cmd_rcgr = 0x17078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
+ .cmd_rcgr = 0x17094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
+ .cmd_rcgr = 0x170b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
+ .cmd_rcgr = 0x170cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
+ .cmd_rcgr = 0x170e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
+ .cmd_rcgr = 0x17104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+/* Check this frequency table.*/
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x188c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_ref_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s3_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x182a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x183dc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18518,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18654,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18790,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src[] = {
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_ibi_ctrl_0_clk_src = {
+ .cmd_rcgr = 0x1e9f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_0_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e28c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e504,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e640,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_s6_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(128000000, P_GCC_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x1e77c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0x1e8b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1401c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7708c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x770a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x39030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x39048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x39074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0x1828c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x39060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_axi_clk = {
+ .halt_reg = 0x10068,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x39090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3908c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_sf_axi_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71150,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x71150,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_qtb_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1007c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_qtb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0_clk = {
+ .halt_reg = 0x9f008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0c_clk = {
+ .halt_reg = 0x9f018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9f018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gemnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gemnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x6b064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_cmd_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_cmd_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_core_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s0_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s1_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s2_clk = {
+ .halt_reg = 0x1703c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s3_clk = {
+ .halt_reg = 0x17058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s4_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s5_clk = {
+ .halt_reg = 0x17090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s6_clk = {
+ .halt_reg = 0x170ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s7_clk = {
+ .halt_reg = 0x170c8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s8_clk = {
+ .halt_reg = 0x170e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s8_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s9_clk = {
+ .halt_reg = 0x17100,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s9_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s_ahb_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x2315c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_ref_clk = {
+ .halt_reg = 0x188bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x18290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x183cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18508,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18644,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18780,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x232b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x232a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_2_clk = {
+ .halt_reg = 0x1e9ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9ec,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_3_clk = {
+ .halt_reg = 0x1e9f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e27c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e3b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e630,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x1e76c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0x1e8a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23140,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x23144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23144,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_2_ahb_clk = {
+ .halt_reg = 0x1e9e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_3_ahb_clk = {
+ .halt_reg = 0x1e9e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9e8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_3_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x23298,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23298,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x2329c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2329c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x7707c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7707c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7707c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770bc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770d8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x3902c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3902c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x39064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x39068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x3906c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x3906c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3906c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(0),
+ .pd = {
+ .name = "gcc_pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_0_phy_gdsc = {
+ .gdscr = 0x6c000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(2),
+ .pd = {
+ .name = "gcc_pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_ufs_mem_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x39004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_phy_gdsc = {
+ .gdscr = 0x50018,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_sm8750_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_AXI_CLK] = &gcc_aggre_noc_pcie_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CNOC_PCIE_SF_AXI_CLK] = &gcc_cnoc_pcie_sf_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_QTB_CLK] = &gcc_ddrss_pcie_sf_qtb_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EVA_AXI0_CLK] = &gcc_eva_axi0_clk.clkr,
+ [GCC_EVA_AXI0C_CLK] = &gcc_eva_axi0c_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GEMNOC_GFX_CLK] = &gcc_gpu_gemnoc_gfx_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_CMD_AHB_CLK] = &gcc_qmip_camera_cmd_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_I2C_CORE_CLK] = &gcc_qupv3_i2c_core_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK] = &gcc_qupv3_i2c_s0_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK_SRC] = &gcc_qupv3_i2c_s0_clk_src.clkr,
+ [GCC_QUPV3_I2C_S1_CLK] = &gcc_qupv3_i2c_s1_clk.clkr,
+ [GCC_QUPV3_I2C_S1_CLK_SRC] = &gcc_qupv3_i2c_s1_clk_src.clkr,
+ [GCC_QUPV3_I2C_S2_CLK] = &gcc_qupv3_i2c_s2_clk.clkr,
+ [GCC_QUPV3_I2C_S2_CLK_SRC] = &gcc_qupv3_i2c_s2_clk_src.clkr,
+ [GCC_QUPV3_I2C_S3_CLK] = &gcc_qupv3_i2c_s3_clk.clkr,
+ [GCC_QUPV3_I2C_S3_CLK_SRC] = &gcc_qupv3_i2c_s3_clk_src.clkr,
+ [GCC_QUPV3_I2C_S4_CLK] = &gcc_qupv3_i2c_s4_clk.clkr,
+ [GCC_QUPV3_I2C_S4_CLK_SRC] = &gcc_qupv3_i2c_s4_clk_src.clkr,
+ [GCC_QUPV3_I2C_S5_CLK] = &gcc_qupv3_i2c_s5_clk.clkr,
+ [GCC_QUPV3_I2C_S5_CLK_SRC] = &gcc_qupv3_i2c_s5_clk_src.clkr,
+ [GCC_QUPV3_I2C_S6_CLK] = &gcc_qupv3_i2c_s6_clk.clkr,
+ [GCC_QUPV3_I2C_S6_CLK_SRC] = &gcc_qupv3_i2c_s6_clk_src.clkr,
+ [GCC_QUPV3_I2C_S7_CLK] = &gcc_qupv3_i2c_s7_clk.clkr,
+ [GCC_QUPV3_I2C_S7_CLK_SRC] = &gcc_qupv3_i2c_s7_clk_src.clkr,
+ [GCC_QUPV3_I2C_S8_CLK] = &gcc_qupv3_i2c_s8_clk.clkr,
+ [GCC_QUPV3_I2C_S8_CLK_SRC] = &gcc_qupv3_i2c_s8_clk_src.clkr,
+ [GCC_QUPV3_I2C_S9_CLK] = &gcc_qupv3_i2c_s9_clk.clkr,
+ [GCC_QUPV3_I2C_S9_CLK_SRC] = &gcc_qupv3_i2c_s9_clk_src.clkr,
+ [GCC_QUPV3_I2C_S_AHB_CLK] = &gcc_qupv3_i2c_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK] = &gcc_qupv3_wrap1_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC] = &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_2_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_2_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_3_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_sm8750_gdscs[] = {
+ [GCC_PCIE_0_GDSC] = &gcc_pcie_0_gdsc,
+ [GCC_PCIE_0_PHY_GDSC] = &gcc_pcie_0_phy_gdsc,
+ [GCC_UFS_MEM_PHY_GDSC] = &gcc_ufs_mem_phy_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB3_PHY_GDSC] = &gcc_usb3_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sm8750_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_EVA_BCR] = { 0x9f000 },
+ [GCC_EVA_AXI0_CLK_ARES] = { 0x9f008, 2 },
+ [GCC_EVA_AXI0C_CLK_ARES] = { 0x9f018, 2 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0x11000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUPV3_WRAPPER_I2C_BCR] = { 0x17000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0x39000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32028, 2 },
+};
+
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static const struct regmap_config gcc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8750_desc = {
+ .config = &gcc_sm8750_regmap_config,
+ .clks = gcc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8750_clocks),
+ .resets = gcc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8750_resets),
+ .gdscs = gcc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8750_gdscs),
+};
+
+static const struct of_device_id gcc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8750_match_table);
+
+static int gcc_sm8750_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8750_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /*
+ * Keep clocks always enabled:
+ * gcc_cam_bist_mclk_ahb_clk
+ * gcc_camera_ahb_clk
+ * gcc_camera_xo_clk
+ * gcc_disp_ahb_clk
+ * gcc_eva_ahb_clk
+ * gcc_eva_xo_clk
+ * gcc_gpu_cfg_ahb_clk
+ * gcc_video_ahb_clk
+ * gcc_video_xo_clk
+ * gcc_pcie_rscc_cfg_ahb_clk
+ * gcc_pcie_rscc_xo_clk
+ */
+ qcom_branch_set_clk_en(regmap, 0xa0004);
+ qcom_branch_set_clk_en(regmap, 0x26004);
+ qcom_branch_set_clk_en(regmap, 0x26034);
+ qcom_branch_set_clk_en(regmap, 0x27004);
+ qcom_branch_set_clk_en(regmap, 0x9f004);
+ qcom_branch_set_clk_en(regmap, 0x9f01c);
+ qcom_branch_set_clk_en(regmap, 0x71004);
+ qcom_branch_set_clk_en(regmap, 0x32004);
+ qcom_branch_set_clk_en(regmap, 0x32038);
+ regmap_update_bits(regmap, 0x52010, BIT(20), BIT(20));
+ regmap_update_bits(regmap, 0x52010, BIT(21), BIT(21));
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm8750_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8750_driver = {
+ .probe = gcc_sm8750_probe,
+ .driver = {
+ .name = "gcc-sm8750",
+ .of_match_table = gcc_sm8750_match_table,
+ },
+};
+
+static int __init gcc_sm8750_init(void)
+{
+ return platform_driver_register(&gcc_sm8750_driver);
+}
+subsys_initcall(gcc_sm8750_init);
+
+static void __exit gcc_sm8750_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8750_driver);
+}
+module_exit(gcc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 8ea25aa25dff..7288af845434 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -6083,7 +6083,7 @@ static struct gdsc gcc_usb20_prim_gdsc = {
.pd = {
.name = "gcc_usb20_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/gpucc-x1p42100.c b/drivers/clk/qcom/gpucc-x1p42100.c
new file mode 100644
index 000000000000..dba783339613
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-x1p42100.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,x1e80100-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 560.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x1d,
+ .alpha = 0x2aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+/* 440.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x16,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x9480,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9480,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x947c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x947c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x90b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x9288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x928c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x928c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_cc_gx_gdsc = {
+ .gdscr = 0x905c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gpu_cc_x1p42100_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static struct gdsc *gpu_cc_x1p42100_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cc_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_cc_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_x1p42100_resets[] = {
+ [GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPU_CC_CX_BCR] = { 0x9104 },
+ [GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPU_CC_FF_BCR] = { 0x9470 },
+ [GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPU_CC_GX_BCR] = { 0x9058 },
+ [GPU_CC_XO_BCR] = { 0x9000 },
+};
+
+static const struct regmap_config gpu_cc_x1p42100_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9988,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc gpu_cc_x1p42100_desc = {
+ .config = &gpu_cc_x1p42100_regmap_config,
+ .clks = gpu_cc_x1p42100_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_x1p42100_clocks),
+ .resets = gpu_cc_x1p42100_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_x1p42100_resets),
+ .gdscs = gpu_cc_x1p42100_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_x1p42100_gdscs),
+};
+
+static const struct of_device_id gpu_cc_x1p42100_match_table[] = {
+ { .compatible = "qcom,x1p42100-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_x1p42100_match_table);
+
+static int gpu_cc_x1p42100_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_x1p42100_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x93a4); /* GPU_CC_CB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x9004); /* GPU_CC_CXO_AON_CLK */
+ qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &gpu_cc_x1p42100_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver gpu_cc_x1p42100_driver = {
+ .probe = gpu_cc_x1p42100_probe,
+ .driver = {
+ .name = "gpucc-x1p42100",
+ .of_match_table = gpu_cc_x1p42100_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_x1p42100_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC X1P42100 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
new file mode 100644
index 000000000000..432d4c4b7aa6
--- /dev/null
+++ b/drivers/clk/qcom/ipq-cmn-pll.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/*
+ * CMN PLL block expects the reference clock from on-board Wi-Fi block,
+ * and supplies fixed rate clocks as output to the networking hardware
+ * blocks and to GCC. The networking related blocks include PPE (packet
+ * process engine), the externally connected PHY or switch devices, and
+ * the PCS.
+ *
+ * On the IPQ9574 SoC, there are three clocks with 50 MHZ and one clock
+ * with 25 MHZ which are output from the CMN PLL to Ethernet PHY (or switch),
+ * and one clock with 353 MHZ to PPE. The other fixed rate output clocks
+ * are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
+ * with 31.25 MHZ.
+ *
+ * +---------+
+ * | GCC |
+ * +--+---+--+
+ * AHB CLK| |SYS CLK
+ * V V
+ * +-------+---+------+
+ * | +-------------> eth0-50mhz
+ * REF CLK | IPQ9574 |
+ * -------->+ +-------------> eth1-50mhz
+ * | CMN PLL block |
+ * | +-------------> eth2-50mhz
+ * | |
+ * +----+----+----+---+-------------> eth-25mhz
+ * | | |
+ * V V V
+ * GCC PCS NSS/PPE
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
+
+#define CMN_PLL_REFCLK_SRC_SELECTION 0x28
+#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8)
+
+#define CMN_PLL_LOCKED 0x64
+#define CMN_PLL_CLKS_LOCKED BIT(8)
+
+#define CMN_PLL_POWER_ON_AND_RESET 0x780
+#define CMN_ANA_EN_SW_RSTN BIT(6)
+
+#define CMN_PLL_REFCLK_CONFIG 0x784
+#define CMN_PLL_REFCLK_EXTERNAL BIT(9)
+#define CMN_PLL_REFCLK_DIV GENMASK(8, 4)
+#define CMN_PLL_REFCLK_INDEX GENMASK(3, 0)
+
+#define CMN_PLL_CTRL 0x78c
+#define CMN_PLL_CTRL_LOCK_DETECT_EN BIT(15)
+
+#define CMN_PLL_DIVIDER_CTRL 0x794
+#define CMN_PLL_DIVIDER_CTRL_FACTOR GENMASK(9, 0)
+
+/**
+ * struct cmn_pll_fixed_output_clk - CMN PLL output clocks information
+ * @id: Clock specifier to be supplied
+ * @name: Clock name to be registered
+ * @rate: Clock rate
+ */
+struct cmn_pll_fixed_output_clk {
+ unsigned int id;
+ const char *name;
+ unsigned long rate;
+};
+
+/**
+ * struct clk_cmn_pll - CMN PLL hardware specific data
+ * @regmap: hardware regmap.
+ * @hw: handle between common and hardware-specific interfaces
+ */
+struct clk_cmn_pll {
+ struct regmap *regmap;
+ struct clk_hw hw;
+};
+
+#define CLK_PLL_OUTPUT(_id, _name, _rate) { \
+ .id = _id, \
+ .name = _name, \
+ .rate = _rate, \
+}
+
+#define to_clk_cmn_pll(_hw) container_of(_hw, struct clk_cmn_pll, hw)
+
+static const struct regmap_config ipq_cmn_pll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7fc,
+ .fast_io = true,
+};
+
+static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
+ CLK_PLL_OUTPUT(XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
+ CLK_PLL_OUTPUT(NSS_1200MHZ_CLK, "nss-1200mhz", 1200000000UL),
+ CLK_PLL_OUTPUT(PPE_353MHZ_CLK, "ppe-353mhz", 353000000UL),
+ CLK_PLL_OUTPUT(ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
+};
+
+/*
+ * CMN PLL has the single parent clock, which supports the several
+ * possible parent clock rates, each parent clock rate is reflected
+ * by the specific reference index value in the hardware.
+ */
+static int ipq_cmn_pll_find_freq_index(unsigned long parent_rate)
+{
+ int index = -EINVAL;
+
+ switch (parent_rate) {
+ case 25000000:
+ index = 3;
+ break;
+ case 31250000:
+ index = 4;
+ break;
+ case 40000000:
+ index = 6;
+ break;
+ case 48000000:
+ case 96000000:
+ /*
+ * Parent clock rate 48 MHZ and 96 MHZ take the same value
+ * of reference clock index. 96 MHZ needs the source clock
+ * divider to be programmed as 2.
+ */
+ index = 7;
+ break;
+ case 50000000:
+ index = 8;
+ break;
+ default:
+ break;
+ }
+
+ return index;
+}
+
+static unsigned long clk_cmn_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
+ u32 val, factor;
+
+ /*
+ * The value of CMN_PLL_DIVIDER_CTRL_FACTOR is automatically adjusted
+ * by HW according to the parent clock rate.
+ */
+ regmap_read(cmn_pll->regmap, CMN_PLL_DIVIDER_CTRL, &val);
+ factor = FIELD_GET(CMN_PLL_DIVIDER_CTRL_FACTOR, val);
+
+ return parent_rate * 2 * factor;
+}
+
+static int clk_cmn_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ int ret;
+
+ /* Validate the rate of the single parent clock. */
+ ret = ipq_cmn_pll_find_freq_index(req->best_parent_rate);
+
+ return ret < 0 ? ret : 0;
+}
+
+/*
+ * This function is used to initialize the CMN PLL to enable the fixed
+ * rate output clocks. It is expected to be configured once.
+ */
+static int clk_cmn_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
+ int ret, index;
+ u32 val;
+
+ /*
+ * Configure the reference input clock selection as per the given
+ * parent clock. The output clock rates are always of fixed value.
+ */
+ index = ipq_cmn_pll_find_freq_index(parent_rate);
+ if (index < 0)
+ return index;
+
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
+ CMN_PLL_REFCLK_INDEX,
+ FIELD_PREP(CMN_PLL_REFCLK_INDEX, index));
+ if (ret)
+ return ret;
+
+ /*
+ * Update the source clock rate selection and source clock
+ * divider as 2 when the parent clock rate is 96 MHZ.
+ */
+ if (parent_rate == 96000000) {
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
+ CMN_PLL_REFCLK_DIV,
+ FIELD_PREP(CMN_PLL_REFCLK_DIV, 2));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_SRC_SELECTION,
+ CMN_PLL_REFCLK_SRC_DIV,
+ FIELD_PREP(CMN_PLL_REFCLK_SRC_DIV, 0));
+ if (ret)
+ return ret;
+ }
+
+ /* Enable PLL locked detect. */
+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_CTRL,
+ CMN_PLL_CTRL_LOCK_DETECT_EN);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset the CMN PLL block to ensure the updated configurations
+ * take effect.
+ */
+ ret = regmap_clear_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
+ CMN_ANA_EN_SW_RSTN);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 1200);
+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
+ CMN_ANA_EN_SW_RSTN);
+ if (ret)
+ return ret;
+
+ /* Stability check of CMN PLL output clocks. */
+ return regmap_read_poll_timeout(cmn_pll->regmap, CMN_PLL_LOCKED, val,
+ (val & CMN_PLL_CLKS_LOCKED),
+ 100, 100 * USEC_PER_MSEC);
+}
+
+static const struct clk_ops clk_cmn_pll_ops = {
+ .recalc_rate = clk_cmn_pll_recalc_rate,
+ .determine_rate = clk_cmn_pll_determine_rate,
+ .set_rate = clk_cmn_pll_set_rate,
+};
+
+static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
+{
+ struct clk_parent_data pdata = { .index = 0 };
+ struct device *dev = &pdev->dev;
+ struct clk_init_data init = {};
+ struct clk_cmn_pll *cmn_pll;
+ struct regmap *regmap;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &ipq_cmn_pll_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ cmn_pll = devm_kzalloc(dev, sizeof(*cmn_pll), GFP_KERNEL);
+ if (!cmn_pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "cmn_pll";
+ init.parent_data = &pdata;
+ init.num_parents = 1;
+ init.ops = &clk_cmn_pll_ops;
+
+ cmn_pll->hw.init = &init;
+ cmn_pll->regmap = regmap;
+
+ ret = devm_clk_hw_register(dev, &cmn_pll->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &cmn_pll->hw;
+}
+
+static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
+{
+ const struct cmn_pll_fixed_output_clk *fixed_clk;
+ struct clk_hw_onecell_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct clk_hw *cmn_pll_hw;
+ unsigned int num_clks;
+ struct clk_hw *hw;
+ int ret, i;
+
+ fixed_clk = ipq9574_output_clks;
+ num_clks = ARRAY_SIZE(ipq9574_output_clks);
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ /*
+ * Register the CMN PLL clock, which is the parent clock of
+ * the fixed rate output clocks.
+ */
+ cmn_pll_hw = ipq_cmn_pll_clk_hw_register(pdev);
+ if (IS_ERR(cmn_pll_hw))
+ return PTR_ERR(cmn_pll_hw);
+
+ /* Register the fixed rate output clocks. */
+ for (i = 0; i < num_clks; i++) {
+ hw = clk_hw_register_fixed_rate_parent_hw(dev, fixed_clk[i].name,
+ cmn_pll_hw, 0,
+ fixed_clk[i].rate);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto unregister_fixed_clk;
+ }
+
+ hw_data->hws[fixed_clk[i].id] = hw;
+ }
+
+ /*
+ * Provide the CMN PLL clock. The clock rate of CMN PLL
+ * is configured to 12 GHZ by DT property assigned-clock-rates-u64.
+ */
+ hw_data->hws[CMN_PLL_CLK] = cmn_pll_hw;
+ hw_data->num = num_clks + 1;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
+ if (ret)
+ goto unregister_fixed_clk;
+
+ platform_set_drvdata(pdev, hw_data);
+
+ return 0;
+
+unregister_fixed_clk:
+ while (i > 0)
+ clk_hw_unregister(hw_data->hws[fixed_clk[--i].id]);
+
+ return ret;
+}
+
+static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * To access the CMN PLL registers, the GCC AHB & SYS clocks
+ * of CMN PLL block need to be enabled.
+ */
+ ret = pm_clk_add(dev, "ahb");
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to add AHB clock\n");
+
+ ret = pm_clk_add(dev, "sys");
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to add SYS clock\n");
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ /* Register CMN PLL clock and fixed rate output clocks. */
+ ret = ipq_cmn_pll_register_clks(pdev);
+ pm_runtime_put(dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Fail to register CMN PLL clocks\n");
+
+ return 0;
+}
+
+static void ipq_cmn_pll_clk_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *hw_data = platform_get_drvdata(pdev);
+ int i;
+
+ /*
+ * The clock with index CMN_PLL_CLK is unregistered by
+ * device management.
+ */
+ for (i = 0; i < hw_data->num; i++) {
+ if (i != CMN_PLL_CLK)
+ clk_hw_unregister(hw_data->hws[i]);
+ }
+}
+
+static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
+ { .compatible = "qcom,ipq9574-cmn-pll", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
+
+static struct platform_driver ipq_cmn_pll_clk_driver = {
+ .probe = ipq_cmn_pll_clk_probe,
+ .remove = ipq_cmn_pll_clk_remove,
+ .driver = {
+ .name = "ipq_cmn_pll",
+ .of_match_table = ipq_cmn_pll_clk_ids,
+ .pm = &ipq_cmn_pll_pm_ops,
+ },
+};
+module_platform_driver(ipq_cmn_pll_clk_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ CMN PLL Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/lpasscc-sm6115.c b/drivers/clk/qcom/lpasscc-sm6115.c
new file mode 100644
index 000000000000..8ffdab71b948
--- /dev/null
+++ b/drivers/clk/qcom/lpasscc-sm6115.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, 2023 Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6115-lpasscc.h>
+
+#include "common.h"
+#include "reset.h"
+
+static const struct qcom_reset_map lpass_audiocc_sm6115_resets[] = {
+ [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
+};
+
+static struct regmap_config lpass_audiocc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-audio-csr",
+ .max_register = 0x1000,
+};
+
+static const struct qcom_cc_desc lpass_audiocc_sm6115_reset_desc = {
+ .config = &lpass_audiocc_sm6115_regmap_config,
+ .resets = lpass_audiocc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(lpass_audiocc_sm6115_resets),
+};
+
+static const struct qcom_reset_map lpasscc_sm6115_resets[] = {
+ [LPASS_SWR_TX_CONFIG_CGCR] = { .reg = 0x100, .bit = 1, .udelay = 500 },
+};
+
+static struct regmap_config lpasscc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-tcsr",
+ .max_register = 0x1000,
+};
+
+static const struct qcom_cc_desc lpasscc_sm6115_reset_desc = {
+ .config = &lpasscc_sm6115_regmap_config,
+ .resets = lpasscc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(lpasscc_sm6115_resets),
+};
+
+static const struct of_device_id lpasscc_sm6115_match_table[] = {
+ {
+ .compatible = "qcom,sm6115-lpassaudiocc",
+ .data = &lpass_audiocc_sm6115_reset_desc,
+ }, {
+ .compatible = "qcom,sm6115-lpasscc",
+ .data = &lpasscc_sm6115_reset_desc,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpasscc_sm6115_match_table);
+
+static int lpasscc_sm6115_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc = of_device_get_match_data(&pdev->dev);
+
+ return qcom_cc_probe_by_index(pdev, 0, desc);
+}
+
+static struct platform_driver lpasscc_sm6115_driver = {
+ .probe = lpasscc_sm6115_probe,
+ .driver = {
+ .name = "lpasscc-sm6115",
+ .of_match_table = lpasscc_sm6115_match_table,
+ },
+};
+
+module_platform_driver(lpasscc_sm6115_driver);
+
+MODULE_DESCRIPTION("QTI LPASSCC SM6115 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 3f41249c5ae4..20d1c43f35d9 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -37,6 +37,7 @@ enum {
P_DSI2_PLL_DSICLK,
P_DSI1_PLL_BYTECLK,
P_DSI2_PLL_BYTECLK,
+ P_LVDS_PLL,
};
#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
@@ -143,6 +144,20 @@ static const struct clk_parent_data mmcc_pxo_dsi2_dsi1[] = {
{ .fw_name = "dsi1pll", .name = "dsi1pll" },
};
+static const struct parent_map mmcc_pxo_dsi2_dsi1_lvds_map[] = {
+ { P_PXO, 0 },
+ { P_DSI2_PLL_DSICLK, 1 },
+ { P_LVDS_PLL, 2 },
+ { P_DSI1_PLL_DSICLK, 3 },
+};
+
+static const struct clk_parent_data mmcc_pxo_dsi2_dsi1_lvds[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "dsi2pll", .name = "dsi2pll" },
+ { .fw_name = "lvdspll", .name = "mpd4_lvds_pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
+};
+
static const struct parent_map mmcc_pxo_dsi1_dsi2_byte_map[] = {
{ P_PXO, 0 },
{ P_DSI1_PLL_BYTECLK, 1 },
@@ -2439,26 +2454,42 @@ static struct clk_rcg dsi2_pixel_src = {
},
.s = {
.src_sel_shift = 0,
- .parent_map = mmcc_pxo_dsi2_dsi1_map,
+ .parent_map = mmcc_pxo_dsi2_dsi1_lvds_map,
},
.clkr = {
.enable_reg = 0x0094,
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_pixel_src",
- .parent_data = mmcc_pxo_dsi2_dsi1,
- .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
+ .parent_data = mmcc_pxo_dsi2_dsi1_lvds,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1_lvds),
.ops = &clk_rcg_pixel_ops,
},
},
};
+static struct clk_branch dsi2_pixel_lvds_src = {
+ .clkr = {
+ .enable_reg = 0x0094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_pixel_lvds_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_simple_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch dsi2_pixel_clk = {
.halt_reg = 0x01d0,
.halt_bit = 19,
.clkr = {
.enable_reg = 0x0094,
- .enable_mask = BIT(0),
+ .enable_mask = 0,
.hw.init = &(struct clk_init_data){
.name = "mdp_pclk2_clk",
.parent_hws = (const struct clk_hw*[]){
@@ -2471,6 +2502,24 @@ static struct clk_branch dsi2_pixel_clk = {
},
};
+static struct clk_branch lvds_clk = {
+ .halt_reg = 0x024c,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0264,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_lvds_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_lvds_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gfx2d0_ahb_clk = {
.hwcg_reg = 0x0038,
.hwcg_bit = 28,
@@ -2799,6 +2848,8 @@ static struct clk_regmap *mmcc_msm8960_clks[] = {
[CSIPHY1_TIMER_CLK] = &csiphy1_timer_clk.clkr,
[CSIPHY0_TIMER_CLK] = &csiphy0_timer_clk.clkr,
[PLL2] = &pll2.clkr,
+ [DSI2_PIXEL_LVDS_SRC] = &dsi2_pixel_lvds_src.clkr,
+ [LVDS_CLK] = &lvds_clk.clkr,
};
static const struct qcom_reset_map mmcc_msm8960_resets[] = {
@@ -2983,6 +3034,8 @@ static struct clk_regmap *mmcc_apq8064_clks[] = {
[VCAP_CLK] = &vcap_clk.clkr,
[VCAP_NPL_CLK] = &vcap_npl_clk.clkr,
[PLL15] = &pll15.clkr,
+ [DSI2_PIXEL_LVDS_SRC] = &dsi2_pixel_lvds_src.clkr,
+ [LVDS_CLK] = &lvds_clk.clkr,
};
static const struct qcom_reset_map mmcc_apq8064_resets[] = {
diff --git a/drivers/clk/qcom/tcsrcc-sm8750.c b/drivers/clk/qcom/tcsrcc-sm8750.c
new file mode 100644
index 000000000000..242e320986ef
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-sm8750.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-tcsr.h>
+
+#include "clk-branch.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_pcie_0_clkref_en = {
+ .halt_reg = 0x0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_ufs_clkref_en = {
+ .halt_reg = 0x1000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_ufs_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_clkref_en = {
+ .halt_reg = 0x2000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_clkref_en = {
+ .halt_reg = 0x3000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x3000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_sm8750_clocks[] = {
+ [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ [TCSR_UFS_CLKREF_EN] = &tcsr_ufs_clkref_en.clkr,
+ [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
+ [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_sm8750_desc = {
+ .config = &tcsr_cc_sm8750_regmap_config,
+ .clks = tcsr_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_sm8750_clocks),
+};
+
+static const struct of_device_id tcsr_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_sm8750_match_table);
+
+static int tcsr_cc_sm8750_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_sm8750_desc);
+}
+
+static struct platform_driver tcsr_cc_sm8750_driver = {
+ .probe = tcsr_cc_sm8750_probe,
+ .driver = {
+ .name = "tcsr_cc-sm8750",
+ .of_match_table = tcsr_cc_sm8750_match_table,
+ },
+};
+
+static int __init tcsr_cc_sm8750_init(void)
+{
+ return platform_driver_register(&tcsr_cc_sm8750_driver);
+}
+subsys_initcall(tcsr_cc_sm8750_init);
+
+static void __exit tcsr_cc_sm8750_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_sm8750_driver);
+}
+module_exit(tcsr_cc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI TCSR_CC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
index 97b8ca0f9181..19d433034884 100644
--- a/drivers/clk/ralink/clk-mtmips.c
+++ b/drivers/clk/ralink/clk-mtmips.c
@@ -266,7 +266,6 @@ err_clk_unreg:
}
static struct mtmips_clk_fixed rt3883_fixed_clocks[] = {
- CLK_FIXED("xtal", NULL, 40000000),
CLK_FIXED("periph", "xtal", 40000000)
};
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index ff01f5f0ed20..5a4bc3f94d49 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -40,6 +40,7 @@ config CLK_RENESAS
select CLK_R9A07G054 if ARCH_R9A07G054
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
+ select CLK_R9A09G047 if ARCH_R9A09G047
select CLK_R9A09G057 if ARCH_R9A09G057
select CLK_SH73A0 if ARCH_SH73A0
@@ -194,6 +195,10 @@ config CLK_R9A09G011
bool "RZ/V2M clock support" if COMPILE_TEST
select CLK_RZG2L
+config CLK_R9A09G047
+ bool "RZ/G3E clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
@@ -234,7 +239,7 @@ config CLK_RZG2L
select RESET_CONTROLLER
config CLK_RZV2H
- bool "RZ/V2H(P) family clock support" if COMPILE_TEST
+ bool "RZ/{G3E,V2H(P)} family clock support" if COMPILE_TEST
select RESET_CONTROLLER
config CLK_RENESAS_VBATTB
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 82efaa835ac7..2d6e746939c4 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
+obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index 55c8dd032fc3..d45571096b96 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -238,6 +238,10 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("pfc2", 917, R8A779G0_CLK_CP),
DEF_MOD("pfc3", 918, R8A779G0_CLK_CP),
DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
+ DEF_MOD("vspx0", 1028, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("vspx1", 1029, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx0", 1100, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx1", 1101, R8A779G0_CLK_S0D1_VIO),
DEF_MOD("tsn", 2723, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER),
diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
index 9067e407cbc6..607fa815b6c1 100644
--- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
@@ -177,6 +177,9 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("canfd0", 328, R8A779H0_CLK_SASYNCPERD2),
DEF_MOD("csi40", 331, R8A779H0_CLK_CSI),
DEF_MOD("csi41", 400, R8A779H0_CLK_CSI),
+ DEF_MOD("dis0", 411, R8A779H0_CLK_VIOBUSD2),
+ DEF_MOD("dsitxlink0", 415, R8A779H0_CLK_VIOBUSD2),
+ DEF_MOD("fcpvd0", 508, R8A779H0_CLK_VIOBUSD2),
DEF_MOD("hscif0", 514, R8A779H0_CLK_SASYNCPERD1),
DEF_MOD("hscif1", 515, R8A779H0_CLK_SASYNCPERD1),
DEF_MOD("hscif2", 516, R8A779H0_CLK_SASYNCPERD1),
@@ -225,6 +228,7 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("vin15", 811, R8A779H0_CLK_S0D4_VIO),
DEF_MOD("vin16", 812, R8A779H0_CLK_S0D4_VIO),
DEF_MOD("vin17", 813, R8A779H0_CLK_S0D4_VIO),
+ DEF_MOD("vspd0", 830, R8A779H0_CLK_VIOBUSD2),
DEF_MOD("wdt1:wdt0", 907, R8A779H0_CLK_R),
DEF_MOD("cmt0", 910, R8A779H0_CLK_R),
DEF_MOD("cmt1", 911, R8A779H0_CLK_R),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index c1348e2d450c..dcda19318b2a 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -20,15 +20,24 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/spinlock.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
#define R9A06G032_SYSCTRL_USB 0x00
-#define R9A06G032_SYSCTRL_USB_H2MODE (1<<1)
+#define R9A06G032_SYSCTRL_USB_H2MODE BIT(1)
#define R9A06G032_SYSCTRL_DMAMUX 0xA0
+#define R9A06G032_SYSCTRL_RSTEN 0x120
+#define R9A06G032_SYSCTRL_RSTEN_MRESET_EN BIT(0)
+#define R9A06G032_SYSCTRL_RSTCTRL 0x198
+/* These work for both reset registers */
+#define R9A06G032_SYSCTRL_SWRST BIT(6)
+#define R9A06G032_SYSCTRL_WDA7RST_1 BIT(2)
+#define R9A06G032_SYSCTRL_WDA7RST_0 BIT(1)
+
/**
* struct regbit - describe one bit in a register
* @reg: offset of register relative to base address,
@@ -1270,6 +1279,12 @@ static void r9a06g032_clocks_del_clk_provider(void *data)
of_clk_del_provider(data);
}
+static int r9a06g032_restart_handler(struct sys_off_data *data)
+{
+ writel(R9A06G032_SYSCTRL_SWRST, sysctrl_priv->reg + R9A06G032_SYSCTRL_RSTCTRL);
+ return NOTIFY_DONE;
+}
+
static void __init r9a06g032_init_h2mode(struct r9a06g032_priv *clocks)
{
struct device_node *usbf_np;
@@ -1324,6 +1339,18 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
r9a06g032_init_h2mode(clocks);
+ /* Clear potentially pending resets */
+ writel(R9A06G032_SYSCTRL_WDA7RST_0 | R9A06G032_SYSCTRL_WDA7RST_1,
+ clocks->reg + R9A06G032_SYSCTRL_RSTCTRL);
+ /* Allow software reset */
+ writel(R9A06G032_SYSCTRL_SWRST | R9A06G032_SYSCTRL_RSTEN_MRESET_EN,
+ clocks->reg + R9A06G032_SYSCTRL_RSTEN);
+
+ error = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART, SYS_OFF_PRIO_HIGH,
+ r9a06g032_restart_handler, NULL);
+ if (error)
+ dev_warn(dev, "couldn't register restart handler (%d)\n", error);
+
for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
const char *parent_name = d->source ?
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index b2ae8cdc4723..0e7e3bf05b52 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -187,6 +187,7 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
DEF_FIXED("OSC", R9A08G045_OSCCLK, CLK_EXTAL, 1, 1),
DEF_FIXED("OSC2", R9A08G045_OSCCLK2, CLK_EXTAL, 1, 3),
DEF_FIXED("HP", R9A08G045_CLK_HP, CLK_PLL6, 1, 2),
+ DEF_FIXED("TSU", R9A08G045_CLK_TSU, CLK_PLL2_DIV2, 1, 8),
};
static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
@@ -209,6 +210,14 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9),
DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10),
DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11),
+ DEF_MOD("ssi0_pclk2", R9A08G045_SSI0_PCLK2, R9A08G045_CLK_P0, 0x570, 0),
+ DEF_MOD("ssi0_sfr", R9A08G045_SSI0_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 1),
+ DEF_MOD("ssi1_pclk2", R9A08G045_SSI1_PCLK2, R9A08G045_CLK_P0, 0x570, 2),
+ DEF_MOD("ssi1_sfr", R9A08G045_SSI1_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 3),
+ DEF_MOD("ssi2_pclk2", R9A08G045_SSI2_PCLK2, R9A08G045_CLK_P0, 0x570, 4),
+ DEF_MOD("ssi2_sfr", R9A08G045_SSI2_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 5),
+ DEF_MOD("ssi3_pclk2", R9A08G045_SSI3_PCLK2, R9A08G045_CLK_P0, 0x570, 6),
+ DEF_MOD("ssi3_sfr", R9A08G045_SSI3_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 7),
DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0),
DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1),
DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2),
@@ -224,7 +233,14 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("i2c2_pclk", R9A08G045_I2C2_PCLK, R9A08G045_CLK_P0, 0x580, 2),
DEF_MOD("i2c3_pclk", R9A08G045_I2C3_PCLK, R9A08G045_CLK_P0, 0x580, 3),
DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0),
+ DEF_MOD("scif1_clk_pck", R9A08G045_SCIF1_CLK_PCK, R9A08G045_CLK_P0, 0x584, 1),
+ DEF_MOD("scif2_clk_pck", R9A08G045_SCIF2_CLK_PCK, R9A08G045_CLK_P0, 0x584, 2),
+ DEF_MOD("scif3_clk_pck", R9A08G045_SCIF3_CLK_PCK, R9A08G045_CLK_P0, 0x584, 3),
+ DEF_MOD("scif4_clk_pck", R9A08G045_SCIF4_CLK_PCK, R9A08G045_CLK_P0, 0x584, 4),
+ DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5),
DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0),
+ DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0),
+ DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1),
DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0),
};
@@ -238,6 +254,10 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2),
+ DEF_RST(R9A08G045_SSI0_RST_M2_REG, 0x870, 0),
+ DEF_RST(R9A08G045_SSI1_RST_M2_REG, 0x870, 1),
+ DEF_RST(R9A08G045_SSI2_RST_M2_REG, 0x870, 2),
+ DEF_RST(R9A08G045_SSI3_RST_M2_REG, 0x870, 3),
DEF_RST(R9A08G045_USB_U2H0_HRESETN, 0x878, 0),
DEF_RST(R9A08G045_USB_U2H1_HRESETN, 0x878, 1),
DEF_RST(R9A08G045_USB_U2P_EXL_SYSRST, 0x878, 2),
@@ -249,9 +269,16 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_I2C2_MRST, 0x880, 2),
DEF_RST(R9A08G045_I2C3_MRST, 0x880, 3),
DEF_RST(R9A08G045_SCIF0_RST_SYSTEM_N, 0x884, 0),
+ DEF_RST(R9A08G045_SCIF1_RST_SYSTEM_N, 0x884, 1),
+ DEF_RST(R9A08G045_SCIF2_RST_SYSTEM_N, 0x884, 2),
+ DEF_RST(R9A08G045_SCIF3_RST_SYSTEM_N, 0x884, 3),
+ DEF_RST(R9A08G045_SCIF4_RST_SYSTEM_N, 0x884, 4),
+ DEF_RST(R9A08G045_SCIF5_RST_SYSTEM_N, 0x884, 5),
DEF_RST(R9A08G045_GPIO_RSTN, 0x898, 0),
DEF_RST(R9A08G045_GPIO_PORT_RESETN, 0x898, 1),
DEF_RST(R9A08G045_GPIO_SPARE_RESETN, 0x898, 2),
+ DEF_RST(R9A08G045_ADC_PRESETN, 0x8a8, 0),
+ DEF_RST(R9A08G045_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A08G045_VBAT_BRESETN, 0x914, 0),
};
@@ -286,6 +313,14 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)), 0),
DEF_PD("sdhi2", R9A08G045_PD_SDHI2,
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)), 0),
+ DEF_PD("ssi0", R9A08G045_PD_SSI0,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(10)), 0),
+ DEF_PD("ssi1", R9A08G045_PD_SSI1,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(11)), 0),
+ DEF_PD("ssi2", R9A08G045_PD_SSI2,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(12)), 0),
+ DEF_PD("ssi3", R9A08G045_PD_SSI3,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(13)), 0),
DEF_PD("usb0", R9A08G045_PD_USB0,
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)), 0),
DEF_PD("usb1", R9A08G045_PD_USB1,
@@ -306,6 +341,18 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(13)), 0),
DEF_PD("scif0", R9A08G045_PD_SCIF0,
DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)), 0),
+ DEF_PD("scif1", R9A08G045_PD_SCIF1,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(2)), 0),
+ DEF_PD("scif2", R9A08G045_PD_SCIF2,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(3)), 0),
+ DEF_PD("scif3", R9A08G045_PD_SCIF3,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(4)), 0),
+ DEF_PD("scif4", R9A08G045_PD_SCIF4,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(5)), 0),
+ DEF_PD("scif5", R9A08G045_PD_SCIF5,
+ DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(4)), 0),
+ DEF_PD("adc", R9A08G045_PD_ADC,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(14)), 0),
DEF_PD("vbat", R9A08G045_PD_VBAT,
DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(8)),
GENPD_FLAG_ALWAYS_ON),
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
new file mode 100644
index 000000000000..536d922bed70
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G3E CPG driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g047-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G047_IOTOP_0_SHCLK,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV16,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G047_CA55_0_CORECLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G047_CA55_0_CORECLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G047_CA55_0_CORECLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G047_CA55_0_CORECLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
+ DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+};
+
+static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
+ DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
+ DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
+ DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */
+ DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */
+ DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */
+ DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
+ DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
+ DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+};
+
+const struct rzv2h_cpg_info r9a09g047_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g047_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g047_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g047_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g047_mod_clks),
+ .num_hw_mod_clks = 28 * 16,
+
+ /* Resets */
+ .resets = r9a09g047_resets,
+ .num_resets = ARRAY_SIZE(r9a09g047_resets),
+
+ .num_mstop_bits = 208,
+};
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index 7c4507fd34e6..3705e18f66ad 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -28,6 +28,7 @@ enum clk_ids {
CLK_PLLCLN,
CLK_PLLDTY,
CLK_PLLCA55,
+ CLK_PLLVDO,
/* Internal Core Clocks */
CLK_PLLCM33_DIV16,
@@ -35,7 +36,13 @@ enum clk_ids {
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV16,
+ CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_CRU1,
+ CLK_PLLVDO_CRU2,
+ CLK_PLLVDO_CRU3,
/* Module Clocks */
MOD_CLK_BASE,
@@ -49,6 +56,12 @@ static const struct clk_div_table dtable_1_8[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_4[] = {
+ {0, 2},
+ {1, 4},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -69,6 +82,7 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
@@ -78,7 +92,14 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+
+ DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru1", CLK_PLLVDO_CRU1, CLK_PLLVDO, CDDIV4_DIVCTL0, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
@@ -94,49 +115,117 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
- DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5),
- DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3),
- DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4),
- DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5),
- DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6),
- DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7),
- DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8),
- DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9),
- DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10),
- DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11),
- DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12),
- DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13),
- DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14),
- DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15),
- DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16),
- DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17),
- DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18),
- DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15),
- DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19),
- DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20),
- DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21),
- DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22),
- DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23),
- DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24),
- DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25),
- DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26),
- DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27),
- DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3),
- DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4),
- DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5),
- DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6),
- DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7),
- DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8),
- DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9),
- DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10),
- DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11),
- DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12),
- DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13),
- DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14),
+ DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
+ BUS_MSTOP_NONE),
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3,
+ BUS_MSTOP(5, BIT(10))),
+ DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4,
+ BUS_MSTOP(5, BIT(11))),
+ DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5,
+ BUS_MSTOP(2, BIT(13))),
+ DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6,
+ BUS_MSTOP(2, BIT(14))),
+ DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7,
+ BUS_MSTOP(11, BIT(13))),
+ DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8,
+ BUS_MSTOP(11, BIT(14))),
+ DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9,
+ BUS_MSTOP(11, BIT(15))),
+ DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10,
+ BUS_MSTOP(12, BIT(0))),
+ DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_1_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 5, 6, 21,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD_NO_PM("cru_1_vclk", CLK_PLLVDO_CRU1, 13, 6, 6, 22,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_1_pclk", CLK_PLLDTY_DIV16, 13, 7, 6, 23,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_2_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 8, 6, 24,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD_NO_PM("cru_2_vclk", CLK_PLLVDO_CRU2, 13, 9, 6, 25,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD("cru_2_pclk", CLK_PLLDTY_DIV16, 13, 10, 6, 26,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD("cru_3_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 11, 6, 27,
+ BUS_MSTOP(9, BIT(7))),
+ DEF_MOD_NO_PM("cru_3_vclk", CLK_PLLVDO_CRU3, 13, 12, 6, 28,
+ BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
+ BUS_MSTOP(9, BIT(7))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */
DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */
DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */
@@ -162,6 +251,18 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
+ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
+ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(12, 8, 5, 25), /* CRU_1_PRESETN */
+ DEF_RST(12, 9, 5, 26), /* CRU_1_ARESETN */
+ DEF_RST(12, 10, 5, 27), /* CRU_1_S_RESETN */
+ DEF_RST(12, 11, 5, 28), /* CRU_2_PRESETN */
+ DEF_RST(12, 12, 5, 29), /* CRU_2_ARESETN */
+ DEF_RST(12, 13, 5, 30), /* CRU_2_S_RESETN */
+ DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
+ DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
+ DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
@@ -179,4 +280,6 @@ const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
/* Resets */
.resets = r9a09g057_resets,
.num_resets = ARRAY_SIZE(r9a09g057_resets),
+
+ .num_mstop_bits = 192,
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 79e7a90c3b1b..bf85501709f0 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -979,7 +979,7 @@ static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
const struct cpg_mssr_info *info)
{
- struct device_node *soc = of_find_node_by_path("/soc");
+ struct device_node *soc __free(device_node) = of_find_node_by_path("/soc");
struct device_node *node;
uint32_t args[MAX_PHANDLE_ARGS];
unsigned int *ids = NULL;
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index b524a9d33610..a4c1e92e1fd7 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/refcount.h>
#include <linux/reset-controller.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -40,6 +41,9 @@
#define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
#define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
+#define CPG_BUS_1_MSTOP (0xd00)
+#define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
+
#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
@@ -64,6 +68,7 @@
* @resets: Array of resets
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @mstop_count: Array of mstop values
* @rcdev: Reset controller entity
*/
struct rzv2h_cpg_priv {
@@ -78,6 +83,8 @@ struct rzv2h_cpg_priv {
unsigned int num_resets;
unsigned int last_dt_core_clk;
+ atomic_t *mstop_count;
+
struct reset_controller_dev rcdev;
};
@@ -97,7 +104,9 @@ struct pll_clk {
* struct mod_clock - Module clock
*
* @priv: CPG private data
+ * @mstop_data: mstop data relating to module clock
* @hw: handle between common and hardware-specific interfaces
+ * @no_pm: flag to indicate PM is not supported
* @on_index: register offset
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
@@ -105,7 +114,9 @@ struct pll_clk {
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
+ unsigned int mstop_data;
struct clk_hw hw;
+ bool no_pm;
u8 on_index;
u8 on_bit;
s8 mon_index;
@@ -431,8 +442,71 @@ fail:
core->name, PTR_ERR(clk));
}
+static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
+ u32 mstop_data)
+{
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
+ unsigned int index = (mstop_index - 1) * 16;
+ atomic_t *mstop = &priv->mstop_count[index];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (!atomic_read(&mstop[i]))
+ val |= BIT(i) << 16;
+ atomic_inc(&mstop[i]);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
+ u32 mstop_data)
+{
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
+ unsigned int index = (mstop_index - 1) * 16;
+ atomic_t *mstop = &priv->mstop_count[index];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (!atomic_read(&mstop[i]) ||
+ atomic_dec_and_test(&mstop[i]))
+ val |= BIT(i) << 16 | BIT(i);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
+{
+ struct mod_clock *clock = to_mod_clock(hw);
+ struct rzv2h_cpg_priv *priv = clock->priv;
+ u32 bitmask;
+ u32 offset;
+
+ if (clock->mon_index >= 0) {
+ offset = GET_CLK_MON_OFFSET(clock->mon_index);
+ bitmask = BIT(clock->mon_bit);
+ } else {
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+ }
+
+ return readl(priv->base + offset) & bitmask;
+}
+
static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
{
+ bool enabled = rzv2h_mod_clock_is_enabled(hw);
struct mod_clock *clock = to_mod_clock(hw);
unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
struct rzv2h_cpg_priv *priv = clock->priv;
@@ -444,11 +518,20 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
enable ? "ON" : "OFF");
+ if (enabled == enable)
+ return 0;
+
value = bitmask << 16;
- if (enable)
+ if (enable) {
value |= bitmask;
-
- writel(value, priv->base + reg);
+ writel(value, priv->base + reg);
+ if (clock->mstop_data != BUS_MSTOP_NONE)
+ rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
+ } else {
+ if (clock->mstop_data != BUS_MSTOP_NONE)
+ rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data);
+ writel(value, priv->base + reg);
+ }
if (!enable || clock->mon_index < 0)
return 0;
@@ -474,24 +557,6 @@ static void rzv2h_mod_clock_disable(struct clk_hw *hw)
rzv2h_mod_clock_endisable(hw, false);
}
-static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
-{
- struct mod_clock *clock = to_mod_clock(hw);
- struct rzv2h_cpg_priv *priv = clock->priv;
- u32 bitmask;
- u32 offset;
-
- if (clock->mon_index >= 0) {
- offset = GET_CLK_MON_OFFSET(clock->mon_index);
- bitmask = BIT(clock->mon_bit);
- } else {
- offset = GET_CLK_ON_OFFSET(clock->on_index);
- bitmask = BIT(clock->on_bit);
- }
-
- return readl(priv->base + offset) & bitmask;
-}
-
static const struct clk_ops rzv2h_mod_clock_ops = {
.enable = rzv2h_mod_clock_enable,
.disable = rzv2h_mod_clock_disable,
@@ -541,8 +606,10 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
clock->on_bit = mod->on_bit;
clock->mon_index = mod->mon_index;
clock->mon_bit = mod->mon_bit;
+ clock->no_pm = mod->no_pm;
clock->priv = priv;
clock->hw.init = &init;
+ clock->mstop_data = mod->mstop_data;
ret = devm_clk_hw_register(dev, &clock->hw);
if (ret) {
@@ -552,6 +619,41 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
priv->clks[id] = clock->hw.clk;
+ /*
+ * Ensure the module clocks and MSTOP bits are synchronized when they are
+ * turned ON by the bootloader. Enable MSTOP bits for module clocks that were
+ * turned ON in an earlier boot stage.
+ */
+ if (clock->mstop_data != BUS_MSTOP_NONE &&
+ !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) {
+ rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
+ } else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
+ unsigned int index = (mstop_index - 1) * 16;
+ atomic_t *mstop = &priv->mstop_count[index];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ /*
+ * Critical clocks are turned ON immediately upon registration, and the
+ * MSTOP counter is updated through the rzv2h_mod_clock_enable() path.
+ * However, if the critical clocks were already turned ON by the initial
+ * bootloader, synchronize the atomic counter here and clear the MSTOP bit.
+ */
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (atomic_read(&mstop[i]))
+ continue;
+ val |= BIT(i) << 16;
+ atomic_inc(&mstop[i]);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ }
+
return;
fail:
@@ -668,17 +770,51 @@ struct rzv2h_cpg_pd {
struct generic_pm_domain genpd;
};
+static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd,
+ const struct of_phandle_args *clkspec)
+{
+ if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
+ return false;
+
+ switch (clkspec->args[0]) {
+ case CPG_MOD: {
+ struct rzv2h_cpg_priv *priv = pd->priv;
+ unsigned int id = clkspec->args[1];
+ struct mod_clock *clock;
+
+ if (id >= priv->num_mod_clks)
+ return false;
+
+ if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT))
+ return false;
+
+ clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id]));
+
+ return !clock->no_pm;
+ }
+
+ case CPG_CORE:
+ default:
+ return false;
+ }
+}
+
static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
+ struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd);
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
struct clk *clk;
+ unsigned int i;
int error;
- int i = 0;
- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
- &clkspec)) {
+ for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
+ if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) {
+ of_node_put(clkspec.np);
+ continue;
+ }
+
if (once) {
once = false;
error = pm_clk_create(dev);
@@ -700,7 +836,6 @@ static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device
error);
goto fail_put;
}
- i++;
}
return 0;
@@ -786,6 +921,11 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
if (!clks)
return -ENOMEM;
+ priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits,
+ sizeof(*priv->mstop_count), GFP_KERNEL);
+ if (!priv->mstop_count)
+ return -ENOMEM;
+
priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
info->num_resets, GFP_KERNEL);
if (!priv->resets)
@@ -833,6 +973,12 @@ static const struct of_device_id rzv2h_cpg_match[] = {
.data = &r9a09g057_cpg_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G047
+ {
+ .compatible = "renesas,r9a09g047-cpg",
+ .data = &r9a09g047_cpg_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 819029c81904..fd8eb985c75b 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -8,6 +8,8 @@
#ifndef __RENESAS_RZV2H_CPG_H__
#define __RENESAS_RZV2H_CPG_H__
+#include <linux/bitfield.h>
+
/**
* struct ddiv - Structure for dynamic switching divider
*
@@ -33,12 +35,24 @@ struct ddiv {
#define CPG_CDDIV0 (0x400)
#define CPG_CDDIV1 (0x404)
+#define CPG_CDDIV3 (0x40C)
+#define CPG_CDDIV4 (0x410)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
#define CDDIV1_DIVCTL0 DDIV_PACK(CPG_CDDIV1, 0, 2, 4)
#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
+#define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16)
+#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
+#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+
+#define BUS_MSTOP_IDX_MASK GENMASK(31, 16)
+#define BUS_MSTOP_BITS_MASK GENMASK(15, 0)
+#define BUS_MSTOP(idx, mask) (FIELD_PREP_CONST(BUS_MSTOP_IDX_MASK, (idx)) | \
+ FIELD_PREP_CONST(BUS_MSTOP_BITS_MASK, (mask)))
+#define BUS_MSTOP_NONE GENMASK(31, 0)
/**
* Definitions of CPG Core Clocks
@@ -98,8 +112,10 @@ enum clk_types {
* struct rzv2h_mod_clk - Module Clocks definitions
*
* @name: handle between common and hardware-specific interfaces
+ * @mstop_data: packed data mstop register offset and mask
* @parent: id of parent clock
* @critical: flag to indicate the clock is critical
+ * @no_pm: flag to indicate PM is not supported
* @on_index: control register index
* @on_bit: ON bit
* @mon_index: monitor register index
@@ -107,30 +123,37 @@ enum clk_types {
*/
struct rzv2h_mod_clk {
const char *name;
+ u32 mstop_data;
u16 parent;
bool critical;
+ bool no_pm;
u8 on_index;
u8 on_bit;
s8 mon_index;
u8 mon_bit;
};
-#define DEF_MOD_BASE(_name, _parent, _critical, _onindex, _onbit, _monindex, _monbit) \
+#define DEF_MOD_BASE(_name, _mstop, _parent, _critical, _no_pm, _onindex, _onbit, _monindex, _monbit) \
{ \
.name = (_name), \
+ .mstop_data = (_mstop), \
.parent = (_parent), \
.critical = (_critical), \
+ .no_pm = (_no_pm), \
.on_index = (_onindex), \
.on_bit = (_onbit), \
.mon_index = (_monindex), \
.mon_bit = (_monbit), \
}
-#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
- DEF_MOD_BASE(_name, _parent, false, _onindex, _onbit, _monindex, _monbit)
+#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit)
+
+#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, true, false, _onindex, _onbit, _monindex, _monbit)
-#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
- DEF_MOD_BASE(_name, _parent, true, _onindex, _onbit, _monindex, _monbit)
+#define DEF_MOD_NO_PM(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, true, _onindex, _onbit, _monindex, _monbit)
/**
* struct rzv2h_reset - Reset definitions
@@ -172,6 +195,9 @@ struct rzv2h_reset {
*
* @resets: Array of Module Reset definitions
* @num_resets: Number of entries in resets[]
+ *
+ * @num_mstop_bits: Maximum number of MSTOP bits supported, equivalent to the
+ * number of CPG_BUS_m_MSTOP registers multiplied by 16.
*/
struct rzv2h_cpg_info {
/* Core Clocks */
@@ -188,8 +214,11 @@ struct rzv2h_cpg_info {
/* Resets */
const struct rzv2h_reset *resets;
unsigned int num_resets;
+
+ unsigned int num_mstop_bits;
};
+extern const struct rzv2h_cpg_info r9a09g047_cpg_info;
extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index af2ade54a7ef..3fe7616f0ebe 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -13,6 +13,7 @@ clk-rockchip-y += clk-inverter.o
clk-rockchip-y += clk-mmc-phase.o
clk-rockchip-y += clk-muxgrf.o
clk-rockchip-y += clk-ddr.o
+clk-rockchip-y += gate-link.o
clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-$(CONFIG_CLK_PX30) += clk-px30.o
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 0ffaf639f807..4031733def4e 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -12,28 +12,6 @@
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
#include "clk.h"
-/*
- * Recent Rockchip SoCs have a new hardware block called Native Interface
- * Unit (NIU), which gates clocks to devices behind them. These effectively
- * need two parent clocks.
- *
- * Downstream enables the linked clock via runtime PM whenever the gate is
- * enabled. This implementation uses separate clock nodes for each of the
- * linked gate clocks, which leaks parts of the clock tree into DT.
- *
- * The GATE_LINK macro instead takes the second parent via 'linkname', but
- * ignores the information. Once the clock framework is ready to handle it, the
- * information should be passed on here. But since these clocks are required to
- * access multiple relevant IP blocks, such as PCIe or USB, we mark all linked
- * clocks critical until a better solution is available. This will waste some
- * power, but avoids leaking implementation details into DT or hanging the
- * system.
- */
-#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
- GATE(_id, cname, pname, f, o, b, gf)
-#define RK3588_LINKED_CLK CLK_IS_CRITICAL
-
-
#define RK3588_GRF_SOC_STATUS0 0x600
#define RK3588_PHYREF_ALT_GATE 0xc38
@@ -266,6 +244,8 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = {
}, \
}
+static struct rockchip_clk_provider *early_ctx;
+
static struct rockchip_cpuclk_rate_table rk3588_cpub0clk_rates[] __initdata = {
RK3588_CPUB01CLK_RATE(2496000000, 1),
RK3588_CPUB01CLK_RATE(2400000000, 1),
@@ -694,7 +674,7 @@ static struct rockchip_pll_clock rk3588_pll_clks[] __initdata = {
RK3588_MODE_CON0, 10, 15, 0, rk3588_pll_rates),
};
-static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
+static struct rockchip_clk_branch rk3588_early_clk_branches[] __initdata = {
/*
* CRU Clock-Architecture
*/
@@ -792,10 +772,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(MCLK_GMAC0_OUT, "mclk_gmac0_out", gpll_cpll_p, 0,
RK3588_CLKSEL_CON(15), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 3, GFLAGS),
- COMPOSITE(REFCLKO25M_ETH0_OUT, "refclko25m_eth0_out", gpll_cpll_p, 0,
+ COMPOSITE(REFCLKO25M_ETH0_OUT, "refclko25m_eth0_out", gpll_cpll_p, CLK_IS_CRITICAL,
RK3588_CLKSEL_CON(15), 15, 1, MFLAGS, 8, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 4, GFLAGS),
- COMPOSITE(REFCLKO25M_ETH1_OUT, "refclko25m_eth1_out", gpll_cpll_p, 0,
+ COMPOSITE(REFCLKO25M_ETH1_OUT, "refclko25m_eth1_out", gpll_cpll_p, CLK_IS_CRITICAL,
RK3588_CLKSEL_CON(16), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 5, GFLAGS),
COMPOSITE(CLK_CIFOUT_OUT, "clk_cifout_out", gpll_cpll_24m_spll_p, 0,
@@ -1456,7 +1436,7 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE_NODIV(HCLK_NVM_ROOT, "hclk_nvm_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(77), 0, 2, MFLAGS,
RK3588_CLKGATE_CON(31), 0, GFLAGS),
- COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, 0,
RK3588_CLKSEL_CON(77), 7, 1, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(31), 1, GFLAGS),
GATE(ACLK_EMMC, "aclk_emmc", "aclk_nvm_root", 0,
@@ -1685,13 +1665,13 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(42), 9, GFLAGS),
/* vdpu */
- COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, 0,
RK3588_CLKSEL_CON(98), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(44), 0, GFLAGS),
COMPOSITE_NODIV(ACLK_VDPU_LOW_ROOT, "aclk_vdpu_low_root", mux_400m_200m_100m_24m_p, 0,
RK3588_CLKSEL_CON(98), 7, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(98), 9, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 2, GFLAGS),
COMPOSITE(ACLK_JPEG_DECODER_ROOT, "aclk_jpeg_decoder_root", gpll_cpll_aupll_spll_p, 0,
@@ -1742,9 +1722,9 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_RKVENC0_ROOT, "aclk_rkvenc0_root", gpll_cpll_npll_p, 0,
RK3588_CLKSEL_CON(102), 7, 2, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(47), 1, GFLAGS),
- GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", RK3588_LINKED_CLK,
+ GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", 0,
RK3588_CLKGATE_CON(47), 4, GFLAGS),
- GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", RK3588_LINKED_CLK,
+ GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", 0,
RK3588_CLKGATE_CON(47), 5, GFLAGS),
COMPOSITE(CLK_RKVENC0_CORE, "clk_rkvenc0_core", gpll_cpll_aupll_npll_p, 0,
RK3588_CLKSEL_CON(102), 14, 2, MFLAGS, 9, 5, DFLAGS,
@@ -1754,10 +1734,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(48), 6, GFLAGS),
/* vi */
- COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, 0,
RK3588_CLKSEL_CON(106), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(49), 0, GFLAGS),
- COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(106), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(49), 1, GFLAGS),
COMPOSITE_NODIV(PCLK_VI_ROOT, "pclk_vi_root", mux_100m_50m_24m_p, 0,
@@ -1927,10 +1907,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", gpll_cpll_dmyaupll_npll_spll_p, 0,
RK3588_CLKSEL_CON(110), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(52), 0, GFLAGS),
- COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, 0,
RK3588_CLKSEL_CON(110), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(110), 10, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 2, GFLAGS),
COMPOSITE_NODIV(PCLK_VOP_ROOT, "pclk_vop_root", mux_100m_50m_24m_p, 0,
@@ -2428,10 +2408,12 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(68), 5, GFLAGS),
GATE(ACLK_AV1, "aclk_av1", "aclk_av1_pre", 0,
RK3588_CLKGATE_CON(68), 2, GFLAGS),
+};
+static struct rockchip_clk_branch rk3588_clk_branches[] = {
GATE_LINK(ACLK_ISP1_PRE, "aclk_isp1_pre", "aclk_isp1_root", ACLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 6, GFLAGS),
GATE_LINK(HCLK_ISP1_PRE, "hclk_isp1_pre", "hclk_isp1_root", HCLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 8, GFLAGS),
- GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", ACLK_NVM_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(31), 2, GFLAGS),
+ GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", ACLK_NVM_ROOT, 0, RK3588_CLKGATE_CON(31), 2, GFLAGS),
GATE_LINK(ACLK_USB, "aclk_usb", "aclk_usb_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 2, GFLAGS),
GATE_LINK(HCLK_USB, "hclk_usb", "hclk_usb_root", HCLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 3, GFLAGS),
GATE_LINK(ACLK_JPEG_DECODER_PRE, "aclk_jpeg_decoder_pre", "aclk_jpeg_decoder_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(44), 7, GFLAGS),
@@ -2443,9 +2425,9 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE_LINK(HCLK_RKVDEC1_PRE, "hclk_rkvdec1_pre", "hclk_rkvdec1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 4, GFLAGS),
GATE_LINK(ACLK_RKVDEC1_PRE, "aclk_rkvdec1_pre", "aclk_rkvdec1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 5, GFLAGS),
GATE_LINK(ACLK_HDCP0_PRE, "aclk_hdcp0_pre", "aclk_vo0_root", ACLK_VOP_LOW_ROOT, 0, RK3588_CLKGATE_CON(55), 9, GFLAGS),
- GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", HCLK_VOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(55), 5, GFLAGS),
+ GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", HCLK_VOP_ROOT, 0, RK3588_CLKGATE_CON(55), 5, GFLAGS),
GATE_LINK(ACLK_HDCP1_PRE, "aclk_hdcp1_pre", "aclk_hdcp1_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(59), 6, GFLAGS),
- GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", HCLK_VO1USB_TOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(59), 9, GFLAGS),
+ GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", HCLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(59), 9, GFLAGS),
GATE_LINK(ACLK_AV1_PRE, "aclk_av1_pre", "aclk_av1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 1, GFLAGS),
GATE_LINK(PCLK_AV1_PRE, "pclk_av1_pre", "pclk_av1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 4, GFLAGS),
GATE_LINK(HCLK_SDIO_PRE, "hclk_sdio_pre", "hclk_sdio_root", HCLK_NVM, 0, RK3588_CLKGATE_CON(75), 1, GFLAGS),
@@ -2453,26 +2435,31 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE_LINK(PCLK_VO1GRF, "pclk_vo1grf", "pclk_vo1_root", HCLK_VO1, CLK_IGNORE_UNUSED, RK3588_CLKGATE_CON(59), 12, GFLAGS),
};
-static void __init rk3588_clk_init(struct device_node *np)
+static void __init rk3588_clk_early_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
- unsigned long clk_nr_clks;
+ unsigned long clk_nr_clks, max_clk_id1, max_clk_id2;
void __iomem *reg_base;
- clk_nr_clks = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
- ARRAY_SIZE(rk3588_clk_branches)) + 1;
+ max_clk_id1 = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches));
+ max_clk_id2 = rockchip_clk_find_max_clk_id(rk3588_early_clk_branches,
+ ARRAY_SIZE(rk3588_early_clk_branches));
+ clk_nr_clks = max(max_clk_id1, max_clk_id2) + 1;
+
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
return;
}
- ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ ctx = rockchip_clk_init_early(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
return;
}
+ early_ctx = ctx;
rockchip_clk_register_plls(ctx, rk3588_pll_clks,
ARRAY_SIZE(rk3588_pll_clks),
@@ -2491,14 +2478,55 @@ static void __init rk3588_clk_init(struct device_node *np)
&rk3588_cpub1clk_data, rk3588_cpub1clk_rates,
ARRAY_SIZE(rk3588_cpub1clk_rates));
- rockchip_clk_register_branches(ctx, rk3588_clk_branches,
- ARRAY_SIZE(rk3588_clk_branches));
+ rockchip_clk_register_branches(ctx, rk3588_early_clk_branches,
+ ARRAY_SIZE(rk3588_early_clk_branches));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE_DRIVER(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_early_init);
+
+static int clk_rk3588_probe(struct platform_device *pdev)
+{
+ struct rockchip_clk_provider *ctx = early_ctx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ rockchip_clk_register_late_branches(dev, ctx, rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches));
- rk3588_rst_init(np, reg_base);
+ rockchip_clk_finalize(ctx);
+ rk3588_rst_init(np, ctx->reg_base);
rockchip_register_restart_notifier(ctx, RK3588_GLB_SRST_FST, NULL);
+ /*
+ * Re-add clock provider, so that the newly added clocks are also
+ * re-parented and get their defaults configured.
+ */
+ of_clk_del_provider(np);
rockchip_clk_of_add_provider(np, ctx);
+
+ return 0;
}
-CLK_OF_DECLARE(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_init);
+static const struct of_device_id clk_rk3588_match_table[] = {
+ {
+ .compatible = "rockchip,rk3588-cru",
+ },
+ { }
+};
+
+static struct platform_driver clk_rk3588_driver = {
+ .probe = clk_rk3588_probe,
+ .driver = {
+ .name = "clk-rk3588",
+ .of_match_table = clk_rk3588_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rockchip_clk_rk3588_drv_register(void)
+{
+ return platform_driver_register(&clk_rk3588_driver);
+}
+core_initcall(rockchip_clk_rk3588_drv_register);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 88629a9abc9c..cbf93ea119a9 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
@@ -197,12 +198,6 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n);
}
-static void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
- struct clk *clk, unsigned int id)
-{
- ctx->clk_data.clks[id] = clk;
-}
-
static struct clk *rockchip_clk_register_frac_branch(
struct rockchip_clk_provider *ctx, const char *name,
const char *const *parent_names, u8 num_parents,
@@ -292,7 +287,7 @@ static struct clk *rockchip_clk_register_frac_branch(
return mux_clk;
}
- rockchip_clk_add_lookup(ctx, mux_clk, child->id);
+ rockchip_clk_set_lookup(ctx, mux_clk, child->id);
/* notifier on the fraction divider to catch rate changes */
if (frac->mux_frac_idx >= 0) {
@@ -359,14 +354,17 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name,
return hw->clk;
}
-struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
- void __iomem *base,
- unsigned long nr_clks)
+static struct rockchip_clk_provider *rockchip_clk_init_base(
+ struct device_node *np, void __iomem *base,
+ unsigned long nr_clks, bool has_late_clocks)
{
struct rockchip_clk_provider *ctx;
struct clk **clk_table;
+ struct clk *default_clk_val;
int i;
+ default_clk_val = ERR_PTR(has_late_clocks ? -EPROBE_DEFER : -ENOENT);
+
ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -376,7 +374,7 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
goto err_free;
for (i = 0; i < nr_clks; ++i)
- clk_table[i] = ERR_PTR(-ENOENT);
+ clk_table[i] = default_clk_val;
ctx->reg_base = base;
ctx->clk_data.clks = clk_table;
@@ -393,8 +391,33 @@ err_free:
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
+
+struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_clks)
+{
+ return rockchip_clk_init_base(np, base, nr_clks, false);
+}
EXPORT_SYMBOL_GPL(rockchip_clk_init);
+struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_clks)
+{
+ return rockchip_clk_init_base(np, base, nr_clks, true);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_init_early);
+
+void rockchip_clk_finalize(struct rockchip_clk_provider *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->clk_data.clk_num; ++i)
+ if (ctx->clk_data.clks[i] == ERR_PTR(-EPROBE_DEFER))
+ ctx->clk_data.clks[i] = ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_finalize);
+
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx)
{
@@ -424,7 +447,7 @@ void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
continue;
}
- rockchip_clk_add_lookup(ctx, clk, list->id);
+ rockchip_clk_set_lookup(ctx, clk, list->id);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
@@ -446,6 +469,29 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
}
EXPORT_SYMBOL_GPL(rockchip_clk_find_max_clk_id);
+static struct platform_device *rockchip_clk_register_gate_link(
+ struct device *parent_dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *clkbr)
+{
+ struct rockchip_gate_link_platdata gate_link_pdata = {
+ .ctx = ctx,
+ .clkbr = clkbr,
+ };
+
+ struct platform_device_info pdevinfo = {
+ .parent = parent_dev,
+ .name = "rockchip-gate-link-clk",
+ .id = clkbr->id,
+ .fwnode = dev_fwnode(parent_dev),
+ .of_node_reused = true,
+ .data = &gate_link_pdata,
+ .size_data = sizeof(gate_link_pdata),
+ };
+
+ return platform_device_register_full(&pdevinfo);
+}
+
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
@@ -571,6 +617,9 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->div_width, list->div_flags,
ctx->reg_base, &ctx->lock);
break;
+ case branch_linked_gate:
+ /* must be registered late, fall-through for error message */
+ break;
}
/* none of the cases above matched */
@@ -586,11 +635,36 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
continue;
}
- rockchip_clk_add_lookup(ctx, clk, list->id);
+ rockchip_clk_set_lookup(ctx, clk, list->id);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_branches);
+void rockchip_clk_register_late_branches(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ struct platform_device *pdev = NULL;
+
+ switch (list->branch_type) {
+ case branch_linked_gate:
+ pdev = rockchip_clk_register_gate_link(dev, ctx, list);
+ break;
+ default:
+ dev_err(dev, "unknown clock type %d\n", list->branch_type);
+ break;
+ }
+
+ if (!pdev)
+ dev_err(dev, "failed to register device for clock %s\n", list->name);
+ }
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_register_late_branches);
+
void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
unsigned int lookup_id,
const char *name, const char *const *parent_names,
@@ -610,7 +684,7 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
return;
}
- rockchip_clk_add_lookup(ctx, clk, lookup_id);
+ rockchip_clk_set_lookup(ctx, clk, lookup_id);
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index f1957e1c1178..9b37d44b9e5d 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -570,6 +570,7 @@ enum rockchip_clk_branch_type {
branch_divider,
branch_fraction_divider,
branch_gate,
+ branch_linked_gate,
branch_mmc,
branch_inverter,
branch_factor,
@@ -597,6 +598,7 @@ struct rockchip_clk_branch {
int gate_offset;
u8 gate_shift;
u8 gate_flags;
+ unsigned int linked_clk_id;
struct rockchip_clk_branch *child;
};
@@ -895,6 +897,20 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_linked_gate, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .linked_clk_id = linkedclk, \
+ .num_parents = 1, \
+ .flags = f, \
+ .gate_offset = o, \
+ .gate_shift = b, \
+ .gate_flags = gf, \
+ }
+
#define MMC(_id, cname, pname, offset, shift) \
{ \
.id = _id, \
@@ -1022,8 +1038,28 @@ struct rockchip_clk_branch {
#define SGRF_GATE(_id, cname, pname) \
FACTOR(_id, cname, pname, 0, 1, 1)
+static inline struct clk *rockchip_clk_get_lookup(struct rockchip_clk_provider *ctx,
+ unsigned int id)
+{
+ return ctx->clk_data.clks[id];
+}
+
+static inline void rockchip_clk_set_lookup(struct rockchip_clk_provider *ctx,
+ struct clk *clk, unsigned int id)
+{
+ ctx->clk_data.clks[id] = clk;
+}
+
+struct rockchip_gate_link_platdata {
+ struct rockchip_clk_provider *ctx;
+ struct rockchip_clk_branch *clkbr;
+};
+
struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks);
+struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
+ void __iomem *base, unsigned long nr_clks);
+void rockchip_clk_finalize(struct rockchip_clk_provider *ctx);
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx);
unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
@@ -1031,6 +1067,10 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk);
+void rockchip_clk_register_late_branches(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk);
void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
struct rockchip_pll_clock *pll_list,
unsigned int nr_pll, int grf_lock_offset);
diff --git a/drivers/clk/rockchip/gate-link.c b/drivers/clk/rockchip/gate-link.c
new file mode 100644
index 000000000000..cd0f7a2d30ab
--- /dev/null
+++ b/drivers/clk/rockchip/gate-link.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2024 Collabora Ltd.
+ * Author: Sebastian Reichel <sebastian.reichel@collabora.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include "clk.h"
+
+static int rk_clk_gate_link_register(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *clkbr)
+{
+ unsigned long flags = clkbr->flags | CLK_SET_RATE_PARENT;
+ struct clk *clk;
+
+ clk = clk_register_gate(dev, clkbr->name, clkbr->parent_names[0],
+ flags, ctx->reg_base + clkbr->gate_offset,
+ clkbr->gate_shift, clkbr->gate_flags,
+ &ctx->lock);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rockchip_clk_set_lookup(ctx, clk, clkbr->id);
+ return 0;
+}
+
+static int rk_clk_gate_link_probe(struct platform_device *pdev)
+{
+ struct rockchip_gate_link_platdata *pdata;
+ struct device *dev = &pdev->dev;
+ struct clk *linked_clk;
+ int ret;
+
+ pdata = dev_get_platdata(dev);
+ if (!pdata)
+ return dev_err_probe(dev, -ENODEV, "missing platform data");
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ linked_clk = rockchip_clk_get_lookup(pdata->ctx, pdata->clkbr->linked_clk_id);
+ ret = pm_clk_add_clk(dev, linked_clk);
+ if (ret)
+ return ret;
+
+ ret = rk_clk_gate_link_register(dev, pdata->ctx, pdata->clkbr);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ pm_clk_remove_clk(dev, linked_clk);
+ return ret;
+}
+
+static const struct dev_pm_ops rk_clk_gate_link_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver rk_clk_gate_link_driver = {
+ .probe = rk_clk_gate_link_probe,
+ .driver = {
+ .name = "rockchip-gate-link-clk",
+ .pm = &rk_clk_gate_link_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rk_clk_gate_link_drv_register(void)
+{
+ return platform_driver_register(&rk_clk_gate_link_driver);
+}
+core_initcall(rk_clk_gate_link_drv_register);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 7a88331a658d..90e5b114872c 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos8895.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos990.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o
diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
new file mode 100644
index 000000000000..8e2a2e8eccee
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos990.c
@@ -0,0 +1,1343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Igor Belwon <igor.belwon@mentallysanemainliners.org>
+ *
+ * Common Clock Framework support for Exynos990.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos990.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+#include "clk-pll.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (CLK_GOUT_CMU_VRA_BUS + 1)
+#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_XIU_D_HSI0_ACLK + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x1a330000) */
+#define PLL_LOCKTIME_PLL_G3D 0x0000
+#define PLL_LOCKTIME_PLL_MMC 0x0004
+#define PLL_LOCKTIME_PLL_SHARED0 0x0008
+#define PLL_LOCKTIME_PLL_SHARED1 0x000c
+#define PLL_LOCKTIME_PLL_SHARED2 0x0010
+#define PLL_LOCKTIME_PLL_SHARED3 0x0014
+#define PLL_LOCKTIME_PLL_SHARED4 0x0018
+#define PLL_CON0_PLL_G3D 0x0100
+#define PLL_CON3_PLL_G3D 0x010c
+#define PLL_CON0_PLL_MMC 0x0140
+#define PLL_CON3_PLL_MMC 0x014c
+#define PLL_CON0_PLL_SHARED0 0x0180
+#define PLL_CON3_PLL_SHARED0 0x018c
+#define PLL_CON0_PLL_SHARED1 0x01c0
+#define PLL_CON3_PLL_SHARED1 0x01cc
+#define PLL_CON0_PLL_SHARED2 0x0200
+#define PLL_CON3_PLL_SHARED2 0x020c
+#define PLL_CON0_PLL_SHARED3 0x0240
+#define PLL_CON3_PLL_SHARED3 0x024c
+#define PLL_CON0_PLL_SHARED4 0x0280
+#define PLL_CON3_PLL_SHARED4 0x028c
+#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS 0x1038
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_BUS 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_DNS_BUS 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_DPU_ALT 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_BUS 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_HPM 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_IPP_BUS 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_ITP_BUS 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_NPU_BUS 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_SSP_BUS 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_TNR_BUS 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_VRA_BUS 0x10e8
+#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1804
+#define CLK_CON_DIV_CLKCMU_BUS0_BUS 0x1808
+#define CLK_CON_DIV_CLKCMU_BUS1_BUS 0x180c
+#define CLK_CON_DIV_CLKCMU_BUS1_SSS 0x1810
+#define CLK_CON_DIV_CLKCMU_CIS_CLK0 0x1814
+#define CLK_CON_DIV_CLKCMU_CIS_CLK1 0x1818
+#define CLK_CON_DIV_CLKCMU_CIS_CLK2 0x181c
+#define CLK_CON_DIV_CLKCMU_CIS_CLK3 0x1820
+#define CLK_CON_DIV_CLKCMU_CIS_CLK4 0x1824
+#define CLK_CON_DIV_CLKCMU_CIS_CLK5 0x1828
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST 0x182c
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1830
+#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1838
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c
+#define CLK_CON_DIV_CLKCMU_CPUCL2_BUSP 0x1840
+#define CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH 0x1844
+#define CLK_CON_DIV_CLKCMU_CSIS_BUS 0x1848
+#define CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU 0x184c
+#define CLK_CON_DIV_CLKCMU_DNC_BUS 0x1850
+#define CLK_CON_DIV_CLKCMU_DNC_BUSM 0x1854
+#define CLK_CON_DIV_CLKCMU_DNS_BUS 0x1858
+#define CLK_CON_DIV_CLKCMU_DSP_BUS 0x185c
+#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x1860
+#define CLK_CON_DIV_CLKCMU_G2D_MSCL 0x1864
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1868
+#define CLK_CON_DIV_CLKCMU_HPM 0x186c
+#define CLK_CON_DIV_CLKCMU_HSI0_BUS 0x1870
+#define CLK_CON_DIV_CLKCMU_HSI0_DPGTC 0x1874
+#define CLK_CON_DIV_CLKCMU_HSI0_USB31DRD 0x1878
+#define CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG 0x187c
+#define CLK_CON_DIV_CLKCMU_HSI1_BUS 0x1880
+#define CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD 0x1884
+#define CLK_CON_DIV_CLKCMU_HSI1_PCIE 0x1888
+#define CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD 0x188c
+#define CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD 0x1890
+#define CLK_CON_DIV_CLKCMU_HSI2_BUS 0x1894
+#define CLK_CON_DIV_CLKCMU_HSI2_PCIE 0x1898
+#define CLK_CON_DIV_CLKCMU_IPP_BUS 0x189c
+#define CLK_CON_DIV_CLKCMU_ITP_BUS 0x18a0
+#define CLK_CON_DIV_CLKCMU_MCSC_BUS 0x18a4
+#define CLK_CON_DIV_CLKCMU_MCSC_GDC 0x18a8
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU 0x18ac
+#define CLK_CON_DIV_CLKCMU_MFC0_MFC0 0x18b0
+#define CLK_CON_DIV_CLKCMU_MFC0_WFD 0x18b4
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x18b8
+#define CLK_CON_DIV_CLKCMU_NPU_BUS 0x18bc
+#define CLK_CON_DIV_CLKCMU_OTP 0x18c0
+#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x18c4
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x18c8
+#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18cc
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18d0
+#define CLK_CON_DIV_CLKCMU_PERIS_BUS 0x18d4
+#define CLK_CON_DIV_CLKCMU_SSP_BUS 0x18d8
+#define CLK_CON_DIV_CLKCMU_TNR_BUS 0x18dc
+#define CLK_CON_DIV_CLKCMU_VRA_BUS 0x18e0
+#define CLK_CON_DIV_DIV_CLKCMU_DPU 0x18e8
+#define CLK_CON_DIV_DIV_CLKCMU_DPU_ALT 0x18ec
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18f4
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18f8
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18fc
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1900
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x1904
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x1908
+#define CLK_CON_DIV_PLL_SHARED2_DIV2 0x190c
+#define CLK_CON_DIV_PLL_SHARED4_DIV2 0x1910
+#define CLK_CON_DIV_PLL_SHARED4_DIV3 0x1914
+#define CLK_CON_DIV_PLL_SHARED4_DIV4 0x1918
+#define CLK_CON_GAT_CLKCMU_G3D_BUS 0x2000
+#define CLK_CON_GAT_CLKCMU_MIF_SWITCH 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2008
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU 0x200c
+#define CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS 0x2010
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS 0x2014
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS 0x2018
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2040
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_BUS 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_DNS_BUS 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_DPU 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_DPU_BUS 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_BUS 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2074
+#define CLK_CON_GAT_GATE_CLKCMU_HPM 0x2078
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_IPP_BUS 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_ITP_BUS 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS 0x20b0
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC 0x20b4
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_NPU_BUS 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP 0x20d8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS 0x20dc
+#define CLK_CON_GAT_GATE_CLKCMU_SSP_BUS 0x20e0
+#define CLK_CON_GAT_GATE_CLKCMU_TNR_BUS 0x20e4
+#define CLK_CON_GAT_GATE_CLKCMU_VRA_BUS 0x20e8
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_G3D,
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON3_PLL_G3D,
+ PLL_CON3_PLL_MMC,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED4,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM,
+ CLK_CON_MUX_MUX_CLKCMU_DNS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DPU,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_ALT,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL,
+ CLK_CON_MUX_MUX_CLKCMU_HPM,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_IPP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_ITP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_NPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_SSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_TNR_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_VRA_BUS,
+ CLK_CON_DIV_CLKCMU_APM_BUS,
+ CLK_CON_DIV_CLKCMU_AUD_CPU,
+ CLK_CON_DIV_CLKCMU_BUS0_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_SSS,
+ CLK_CON_DIV_CLKCMU_CIS_CLK0,
+ CLK_CON_DIV_CLKCMU_CIS_CLK1,
+ CLK_CON_DIV_CLKCMU_CIS_CLK2,
+ CLK_CON_DIV_CLKCMU_CIS_CLK3,
+ CLK_CON_DIV_CLKCMU_CIS_CLK4,
+ CLK_CON_DIV_CLKCMU_CIS_CLK5,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_DIV_CLKCMU_CSIS_BUS,
+ CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_DIV_CLKCMU_DNC_BUS,
+ CLK_CON_DIV_CLKCMU_DNC_BUSM,
+ CLK_CON_DIV_CLKCMU_DNS_BUS,
+ CLK_CON_DIV_CLKCMU_DSP_BUS,
+ CLK_CON_DIV_CLKCMU_G2D_G2D,
+ CLK_CON_DIV_CLKCMU_G2D_MSCL,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_CLKCMU_HPM,
+ CLK_CON_DIV_CLKCMU_HSI0_BUS,
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC,
+ CLK_CON_DIV_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_DIV_CLKCMU_HSI1_BUS,
+ CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE,
+ CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_HSI2_BUS,
+ CLK_CON_DIV_CLKCMU_HSI2_PCIE,
+ CLK_CON_DIV_CLKCMU_IPP_BUS,
+ CLK_CON_DIV_CLKCMU_ITP_BUS,
+ CLK_CON_DIV_CLKCMU_MCSC_BUS,
+ CLK_CON_DIV_CLKCMU_MCSC_GDC,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0,
+ CLK_CON_DIV_CLKCMU_MFC0_WFD,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_NPU_BUS,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP,
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP,
+ CLK_CON_DIV_CLKCMU_PERIS_BUS,
+ CLK_CON_DIV_CLKCMU_SSP_BUS,
+ CLK_CON_DIV_CLKCMU_TNR_BUS,
+ CLK_CON_DIV_CLKCMU_VRA_BUS,
+ CLK_CON_DIV_DIV_CLKCMU_DPU,
+ CLK_CON_DIV_DIV_CLKCMU_DPU_ALT,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_DIV_PLL_SHARED2_DIV2,
+ CLK_CON_DIV_PLL_SHARED4_DIV2,
+ CLK_CON_DIV_PLL_SHARED4_DIV3,
+ CLK_CON_DIV_PLL_SHARED4_DIV4,
+ CLK_CON_GAT_CLKCMU_G3D_BUS,
+ CLK_CON_GAT_CLKCMU_MIF_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU,
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM,
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DPU,
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_HPM,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD,
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_SSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_VRA_BUS,
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ PLL(pll_0717x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+ PLL(pll_0717x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+ PLL(pll_0718x, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+ PLL(pll_0718x, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+ PLL(pll_0717x, CLK_FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ PLL(pll_0732x, CLK_FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+ PLL(pll_0718x, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
+ PLL_LOCKTIME_PLL_G3D, PLL_CON3_PLL_G3D, NULL),
+};
+
+/* Parent clock list for CMU_TOP muxes*/
+PNAME(mout_pll_shared0_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_pll_shared1_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_pll_shared2_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" };
+PNAME(mout_pll_mmc_p) = { "oscclk", "fout_mmc_pll" };
+PNAME(mout_pll_g3d_p) = { "oscclk", "fout_g3d_pll" };
+PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_aud_cpu_p) = { "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_bus0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_bus1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_bus1_sss_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_cis_clk0_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk1_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk2_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk3_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk4_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk5_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cmu_boost_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_core_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "fout_shared3_pll", "oscclk" };
+PNAME(mout_cmu_cpucl0_dbg_bus_p) = { "fout_shared2_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4",
+ "oscclk" };
+PNAME(mout_cmu_cpucl0_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_cpucl1_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_cpucl2_busp_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cpucl2_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_csis_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3" };
+PNAME(mout_cmu_csis_ois_mcu_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_dnc_bus_p) = { "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_dnc_busm_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div4" };
+PNAME(mout_cmu_dns_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_dpu_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_dpu_alt_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_dsp_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "fout_shared3_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_g2d_g2d_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_g2d_mscl_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div4",
+ "oscclk" };
+PNAME(mout_cmu_hpm_p) = { "oscclk",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi0_dpgtc_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_usb31drd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_usbdp_debug_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_hsi1_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "fout_mmc_pll", "oscclk", "oscclk" };
+PNAME(mout_cmu_hsi1_mmc_card_p) = { "oscclk", "fout_shared2_pll",
+ "fout_mmc_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_hsi1_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_hsi1_ufs_card_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi1_ufs_embd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi2_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi2_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_ipp_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_itp_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mcsc_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mcsc_gdc_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cmu_boost_cpu_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_mfc0_mfc0_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mfc0_wfd_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mif_busp_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_mif_switch_p) = { "fout_shared0_pll",
+ "fout_shared1_pll",
+ "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_npu_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "fout_shared3_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_peric0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric0_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peris_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_ssp_bus_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_tnr_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3" };
+
+/*
+ * Register name to clock name mangling strategy used in this file
+ *
+ * Replace PLL_CON{0,3}_PLL with CLK_MOUT_PLL and mout_pll
+ * Replace CLK_CON_MUX_MUX_CLKCMU with CLK_MOUT_CMU and mout_cmu
+ * Replace CLK_CON_DIV_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_DIV_DIV_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_DIV_PLL_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_GAT_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ * Replace CLK_CON_GAT_GATE_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ *
+ * For gates remove _UID _BLK _IPCLKPORT, _I and _RSTNSYNC
+ */
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
+ PLL_CON3_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
+ PLL_CON3_PLL_SHARED1, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
+ PLL_CON3_PLL_SHARED2, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
+ PLL_CON3_PLL_SHARED3, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
+ PLL_CON0_PLL_SHARED4, 4, 1),
+ MUX(CLK_MOUT_PLL_MMC, "mout_pll_mmc", mout_pll_mmc_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+ MUX(CLK_MOUT_PLL_G3D, "mout_pll_g3d", mout_pll_g3d_p,
+ PLL_CON0_PLL_G3D, 4, 1),
+ MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus",
+ mout_cmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_AUD_CPU, "mout_cmu_aud_cpu",
+ mout_cmu_aud_cpu_p, CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS0_BUS, "mout_cmu_bus0_bus",
+ mout_cmu_bus0_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS1_BUS, "mout_cmu_bus1_bus",
+ mout_cmu_bus1_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS1_SSS, "mout_cmu_bus1_sss",
+ mout_cmu_bus1_sss_p, CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS, 0, 2),
+ MUX(CLK_MOUT_CMU_CIS_CLK0, "mout_cmu_cis_clk0",
+ mout_cmu_cis_clk0_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK1, "mout_cmu_cis_clk1",
+ mout_cmu_cis_clk1_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK2, "mout_cmu_cis_clk2",
+ mout_cmu_cis_clk2_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK3, "mout_cmu_cis_clk3",
+ mout_cmu_cis_clk3_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK4, "mout_cmu_cis_clk4",
+ mout_cmu_cis_clk4_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK5, "mout_cmu_cis_clk5",
+ mout_cmu_cis_clk5_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5, 0, 1),
+ MUX(CLK_MOUT_CMU_CMU_BOOST, "mout_cmu_cmu_boost",
+ mout_cmu_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(CLK_MOUT_CMU_CORE_BUS, "mout_cmu_core_bus",
+ mout_cmu_core_bus_p, CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_DBG_BUS, "mout_cmu_cpucl0_dbg_bus",
+ mout_cmu_cpucl0_dbg_bus_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch",
+ mout_cmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch",
+ mout_cmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL2_BUSP, "mout_cmu_cpucl2_busp",
+ mout_cmu_cpucl2_busp_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_CPUCL2_SWITCH, "mout_cmu_cpucl2_switch",
+ mout_cmu_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CSIS_BUS, "mout_cmu_csis_bus",
+ mout_cmu_csis_bus_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CSIS_OIS_MCU, "mout_cmu_csis_ois_mcu",
+ mout_cmu_csis_ois_mcu_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_DNC_BUS, "mout_cmu_dnc_bus",
+ mout_cmu_dnc_bus_p, CLK_CON_MUX_MUX_CLKCMU_DNC_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_DNC_BUSM, "mout_cmu_dnc_busm",
+ mout_cmu_dnc_busm_p, CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM, 0, 2),
+ MUX(CLK_MOUT_CMU_DNS_BUS, "mout_cmu_dns_bus",
+ mout_cmu_dns_bus_p, CLK_CON_MUX_MUX_CLKCMU_DNS_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_DPU, "mout_cmu_dpu",
+ mout_cmu_dpu_p, CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 1),
+ MUX(CLK_MOUT_CMU_DPU_ALT, "mout_cmu_dpu_alt",
+ mout_cmu_dpu_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPU_ALT, 0, 2),
+ MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus",
+ mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d",
+ mout_cmu_g2d_g2d_p, CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_MSCL, "mout_cmu_g2d_mscl",
+ mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 1),
+ MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm",
+ mout_cmu_hpm_p, CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_BUS, "mout_cmu_hsi0_bus",
+ mout_cmu_hsi0_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI0_DPGTC, "mout_cmu_hsi0_dpgtc",
+ mout_cmu_hsi0_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_USB31DRD, "mout_cmu_hsi0_usb31drd",
+ mout_cmu_hsi0_usb31drd_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_USBDP_DEBUG, "mout_cmu_hsi0_usbdp_debug",
+ mout_cmu_hsi0_usbdp_debug_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_BUS, "mout_cmu_hsi1_bus",
+ mout_cmu_hsi1_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_HSI1_MMC_CARD, "mout_cmu_hsi1_mmc_card",
+ mout_cmu_hsi1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_PCIE, "mout_cmu_hsi1_pcie",
+ mout_cmu_hsi1_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI1_UFS_CARD, "mout_cmu_hsi1_ufs_card",
+ mout_cmu_hsi1_ufs_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_UFS_EMBD, "mout_cmu_hsi1_ufs_embd",
+ mout_cmu_hsi1_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_HSI2_BUS, "mout_cmu_hsi2_bus",
+ mout_cmu_hsi2_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI2_PCIE, "mout_cmu_hsi2_pcie",
+ mout_cmu_hsi2_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_IPP_BUS, "mout_cmu_ipp_bus",
+ mout_cmu_ipp_bus_p, CLK_CON_MUX_MUX_CLKCMU_IPP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_ITP_BUS, "mout_cmu_itp_bus",
+ mout_cmu_itp_bus_p, CLK_CON_MUX_MUX_CLKCMU_ITP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_BUS, "mout_cmu_mcsc_bus",
+ mout_cmu_mcsc_bus_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_GDC, "mout_cmu_mcsc_gdc",
+ mout_cmu_mcsc_gdc_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC, 0, 3),
+ MUX(CLK_MOUT_CMU_CMU_BOOST_CPU, "mout_cmu_cmu_boost_cpu",
+ mout_cmu_cmu_boost_cpu_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MFC0_MFC0, "mout_cmu_mfc0_mfc0",
+ mout_cmu_mfc0_mfc0_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0, 0, 2),
+ MUX(CLK_MOUT_CMU_MFC0_WFD, "mout_cmu_mfc0_wfd",
+ mout_cmu_mfc0_wfd_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_BUSP, "mout_cmu_mif_busp",
+ mout_cmu_mif_busp_p, CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_SWITCH, "mout_cmu_mif_switch",
+ mout_cmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_NPU_BUS, "mout_cmu_npu_bus",
+ mout_cmu_npu_bus_p, CLK_CON_MUX_MUX_CLKCMU_NPU_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_PERIC0_BUS, "mout_cmu_peric0_bus",
+ mout_cmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC0_IP, "mout_cmu_peric0_ip",
+ mout_cmu_peric0_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_BUS, "mout_cmu_peric1_bus",
+ mout_cmu_peric1_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_IP, "mout_cmu_peric1_ip",
+ mout_cmu_peric1_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIS_BUS, "mout_cmu_peris_bus",
+ mout_cmu_peris_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_SSP_BUS, "mout_cmu_ssp_bus",
+ mout_cmu_ssp_bus_p, CLK_CON_MUX_MUX_CLKCMU_SSP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_TNR_BUS, "mout_cmu_tnr_bus",
+ mout_cmu_tnr_bus_p, CLK_CON_MUX_MUX_CLKCMU_TNR_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_VRA_BUS, "mout_cmu_vra_bus",
+ mout_cmu_vra_bus_p, CLK_CON_MUX_MUX_CLKCMU_VRA_BUS, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* SHARED0 region*/
+ DIV(CLK_DOUT_CMU_SHARED0_DIV2, "dout_cmu_shared0_div2", "mout_pll_shared0",
+ CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV3, "dout_cmu_shared0_div3", "mout_pll_shared0",
+ CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV4, "dout_cmu_shared0_div4", "dout_cmu_shared0_div2",
+ CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+
+ /* SHARED1 region*/
+ DIV(CLK_DOUT_CMU_SHARED1_DIV2, "dout_cmu_shared1_div2", "mout_pll_shared1",
+ CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV3, "dout_cmu_shared1_div3", "mout_pll_shared1",
+ CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV4, "dout_cmu_shared1_div4", "dout_cmu_shared1_div2",
+ CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+
+ /* SHARED2 region */
+ DIV(CLK_DOUT_CMU_SHARED2_DIV2, "dout_cmu_shared2_div2", "mout_pll_shared2",
+ CLK_CON_DIV_PLL_SHARED2_DIV2, 0, 1),
+
+ /* SHARED4 region*/
+ DIV(CLK_DOUT_CMU_SHARED4_DIV2, "dout_cmu_shared4_div2", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED4_DIV3, "dout_cmu_shared4_div3", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED4_DIV4, "dout_cmu_shared4_div4", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1),
+
+ DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ DIV(CLK_DOUT_CMU_AUD_CPU, "dout_cmu_aud_cpu", "gout_cmu_aud_cpu",
+ CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
+ DIV(CLK_DOUT_CMU_BUS0_BUS, "dout_cmu_bus0_bus", "gout_cmu_bus0_bus",
+ CLK_CON_DIV_CLKCMU_BUS0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS1_BUS, "dout_cmu_bus1_bus", "gout_cmu_bus1_bus",
+ CLK_CON_DIV_CLKCMU_BUS1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS1_SSS, "dout_cmu_bus1_sss", "gout_cmu_bus1_sss",
+ CLK_CON_DIV_CLKCMU_BUS1_SSS, 0, 4),
+ DIV(CLK_DOUT_CMU_CIS_CLK0, "dout_cmu_cis_clk0", "gout_cmu_cis_clk0",
+ CLK_CON_DIV_CLKCMU_CIS_CLK0, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK1, "dout_cmu_cis_clk1", "gout_cmu_cis_clk1",
+ CLK_CON_DIV_CLKCMU_CIS_CLK1, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK2, "dout_cmu_cis_clk2", "gout_cmu_cis_clk2",
+ CLK_CON_DIV_CLKCMU_CIS_CLK2, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK3, "dout_cmu_cis_clk3", "gout_cmu_cis_clk3",
+ CLK_CON_DIV_CLKCMU_CIS_CLK3, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK4, "dout_cmu_cis_clk4", "gout_cmu_cis_clk4",
+ CLK_CON_DIV_CLKCMU_CIS_CLK4, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK5, "dout_cmu_cis_clk5", "gout_cmu_cis_clk5",
+ CLK_CON_DIV_CLKCMU_CIS_CLK5, 0, 5),
+ DIV(CLK_DOUT_CMU_CMU_BOOST, "dout_cmu_cmu_boost", "mout_cmu_cmu_boost",
+ CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 2),
+ DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_debug",
+ "gout_cmu_cpucl0_dbg_bus", CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
+ "gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
+ "gout_cmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL2_BUSP, "dout_cmu_cpucl2_busp",
+ "gout_cmu_cpucl2_busp", CLK_CON_DIV_CLKCMU_CPUCL2_BUSP, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL2_SWITCH, "dout_cmu_cpucl2_switch",
+ "gout_cmu_cpucl2_switch", CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CSIS_BUS, "dout_cmu_csis_bus", "gout_cmu_csis_bus",
+ CLK_CON_DIV_CLKCMU_CSIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_OIS_MCU, "dout_cmu_csis_ois_mcu",
+ "gout_cmu_csis_ois_mcu", CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU, 0, 4),
+ DIV(CLK_DOUT_CMU_DNC_BUS, "dout_cmu_dnc_bus", "gout_cmu_dnc_bus",
+ CLK_CON_DIV_CLKCMU_DNC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DNC_BUSM, "dout_cmu_dnc_busm", "gout_cmu_dnc_busm",
+ CLK_CON_DIV_CLKCMU_DNC_BUSM, 0, 4),
+ DIV(CLK_DOUT_CMU_DNS_BUS, "dout_cmu_dns_bus", "gout_cmu_dns_bus",
+ CLK_CON_DIV_CLKCMU_DNS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DSP_BUS, "dout_cmu_dsp_bus", "gout_cmu_dsp_bus",
+ CLK_CON_DIV_CLKCMU_DSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_G2D, "dout_cmu_g2d_g2d", "gout_cmu_g2d_g2d",
+ CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_MSCL, "dout_cmu_g2d_mscl", "gout_cmu_g2d_mscl",
+ CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_switch",
+ "gout_cmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_HPM, "dout_cmu_hpm", "gout_cmu_hpm",
+ CLK_CON_DIV_CLKCMU_HPM, 0, 2),
+ DIV(CLK_DOUT_CMU_HSI0_BUS, "dout_cmu_hsi0_bus", "gout_cmu_hsi0_bus",
+ CLK_CON_DIV_CLKCMU_HSI0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_DPGTC, "dout_cmu_hsi0_dpgtc", "gout_cmu_hsi0_dpgtc",
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI0_USB31DRD, "dout_cmu_hsi0_usb31drd",
+ "gout_cmu_hsi0_usb31drd", CLK_CON_DIV_CLKCMU_HSI0_USB31DRD, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
+ "gout_cmu_hsi0_usbdp_debug", CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_HSI1_BUS, "dout_cmu_hsi1_bus", "gout_cmu_hsi1_bus",
+ CLK_CON_DIV_CLKCMU_HSI1_BUS, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI1_MMC_CARD, "dout_cmu_hsi1_mmc_card",
+ "gout_cmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie", "gout_cmu_hsi1_pcie",
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 7),
+ DIV(CLK_DOUT_CMU_HSI1_UFS_CARD, "dout_cmu_hsi1_ufs_card",
+ "gout_cmu_hsi1_ufs_card", CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_HSI1_UFS_EMBD, "dout_cmu_hsi1_ufs_embd",
+ "gout_cmu_hsi1_ufs_embd", CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_HSI2_BUS, "dout_cmu_hsi2_bus", "gout_cmu_hsi2_bus",
+ CLK_CON_DIV_CLKCMU_HSI2_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie", "gout_cmu_hsi2_pcie",
+ CLK_CON_DIV_CLKCMU_HSI2_PCIE, 0, 7),
+ DIV(CLK_DOUT_CMU_IPP_BUS, "dout_cmu_ipp_bus", "gout_cmu_ipp_bus",
+ CLK_CON_DIV_CLKCMU_IPP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_ITP_BUS, "dout_cmu_itp_bus", "gout_cmu_itp_bus",
+ CLK_CON_DIV_CLKCMU_ITP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_BUS, "dout_cmu_mcsc_bus", "gout_cmu_mcsc_bus",
+ CLK_CON_DIV_CLKCMU_MCSC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_GDC, "dout_cmu_mcsc_gdc", "gout_cmu_mcsc_gdc",
+ CLK_CON_DIV_CLKCMU_MCSC_GDC, 0, 4),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_CPU, "dout_cmu_cmu_boost_cpu",
+ "mout_cmu_cmu_boost_cpu", CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ DIV(CLK_DOUT_CMU_MFC0_MFC0, "dout_cmu_mfc0_mfc0", "gout_cmu_mfc0_mfc0",
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC0_WFD, "dout_cmu_mfc0_wfd", "gout_cmu_mfc0_wfd",
+ CLK_CON_DIV_CLKCMU_MFC0_WFD, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_BUSP, "dout_cmu_mif_busp", "gout_cmu_mif_busp",
+ CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 4),
+ DIV(CLK_DOUT_CMU_NPU_BUS, "dout_cmu_npu_bus", "gout_cmu_npu_bus",
+ CLK_CON_DIV_CLKCMU_NPU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_BUS, "dout_cmu_peric0_bus", "gout_cmu_peric0_bus",
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_IP, "dout_cmu_peric0_ip", "gout_cmu_peric0_ip",
+ CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_BUS, "dout_cmu_peric1_bus", "gout_cmu_peric1_bus",
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_IP, "dout_cmu_peric1_ip", "gout_cmu_peric1_ip",
+ CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_BUS, "dout_cmu_peris_bus", "gout_cmu_peris_bus",
+ CLK_CON_DIV_CLKCMU_PERIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_SSP_BUS, "dout_cmu_ssp_bus", "gout_cmu_ssp_bus",
+ CLK_CON_DIV_CLKCMU_SSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_TNR_BUS, "dout_cmu_tnr_bus", "gout_cmu_tnr_bus",
+ CLK_CON_DIV_CLKCMU_TNR_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_VRA_BUS, "dout_cmu_vra_bus", "gout_cmu_vra_bus",
+ CLK_CON_DIV_CLKCMU_VRA_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU, "dout_cmu_clkcmu_dpu", "gout_cmu_dpu",
+ CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 4),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CMU_APM_BUS, "gout_cmu_apm_bus", "mout_cmu_apm_bus",
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_AUD_CPU, "gout_cmu_aud_cpu", "mout_cmu_aud_cpu",
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BUS0_BUS, "gout_cmu_bus0_bus", "mout_cmu_bus0_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_BUS, "gout_cmu_bus1_bus", "mout_cmu_bus1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_SSS, "gout_cmu_bus1_sss", "mout_cmu_bus1_sss",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK0, "gout_cmu_cis_clk0", "mout_cmu_cis_clk0",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK1, "gout_cmu_cis_clk1", "mout_cmu_cis_clk1",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK2, "gout_cmu_cis_clk2", "mout_cmu_cis_clk2",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK3, "gout_cmu_cis_clk3", "mout_cmu_cis_clk3",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK4, "gout_cmu_cis_clk4", "mout_cmu_cis_clk4",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK5, "gout_cmu_cis_clk5", "mout_cmu_cis_clk5",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CORE_BUS, "gout_cmu_core_bus", "mout_cmu_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_DBG_BUS, "gout_cmu_cpucl0_dbg_bus",
+ "mout_cmu_cpucl0_dbg_bus", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_SWITCH, "gout_cmu_cpucl0_switch",
+ "mout_cmu_cpucl0_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL1_SWITCH, "gout_cmu_cpucl1_switch",
+ "mout_cmu_cpucl1_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_BUSP, "gout_cmu_cpucl2_busp",
+ "mout_cmu_cpucl2_busp", CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_SWITCH, "gout_cmu_cpucl2_switch",
+ "mout_cmu_cpucl2_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CSIS_BUS, "gout_cmu_csis_bus", "mout_cmu_csis_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CSIS_OIS_MCU, "gout_cmu_csis_ois_mcu",
+ "mout_cmu_csis_ois_mcu", CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNC_BUS, "gout_cmu_dnc_bus", "mout_cmu_dnc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNC_BUSM, "gout_cmu_dnc_busm", "mout_cmu_dnc_busm",
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNS_BUS, "gout_cmu_dns_bus", "mout_cmu_dns_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU, "gout_cmu_dpu", "mout_cmu_dpu",
+ CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU_BUS, "gout_cmu_dpu_bus", "mout_cmu_dpu_alt",
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_DSP_BUS, "gout_cmu_dsp_bus", "mout_cmu_dsp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_G2D, "gout_cmu_g2d_g2d", "mout_cmu_g2d_g2d",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_MSCL, "gout_cmu_g2d_mscl", "mout_cmu_g2d_mscl",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3D_SWITCH, "gout_cmu_g3d_switch",
+ "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HPM, "gout_cmu_hpm", "mout_cmu_hpm",
+ CLK_CON_GAT_GATE_CLKCMU_HPM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_BUS, "gout_cmu_hsi0_bus",
+ "mout_cmu_hsi0_bus", CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_DPGTC, "gout_cmu_hsi0_dpgtc",
+ "mout_cmu_hsi0_dpgtc", CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USB31DRD, "gout_cmu_hsi0_usb31drd",
+ "mout_cmu_hsi0_usb31drd", CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USBDP_DEBUG, "gout_cmu_hsi0_usbdp_debug",
+ "mout_cmu_hsi0_usbdp_debug", CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_BUS, "gout_cmu_hsi1_bus", "mout_cmu_hsi1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_MMC_CARD, "gout_cmu_hsi1_mmc_card",
+ "mout_cmu_hsi1_mmc_card", CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_PCIE, "gout_cmu_hsi1_pcie",
+ "mout_cmu_hsi1_pcie", CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_UFS_CARD, "gout_cmu_hsi1_ufs_card",
+ "mout_cmu_hsi1_ufs_card", CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_UFS_EMBD, "gout_cmu_hsi1_ufs_embd",
+ "mout_cmu_hsi1_ufs_embd", CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_BUS, "gout_cmu_hsi2_bus", "mout_cmu_hsi2_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_PCIE, "gout_cmu_hsi2_pcie",
+ "mout_cmu_hsi2_pcie", CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_IPP_BUS, "gout_cmu_ipp_bus", "mout_cmu_ipp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ITP_BUS, "gout_cmu_itp_bus", "mout_cmu_itp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_BUS, "gout_cmu_mcsc_bus", "mout_cmu_mcsc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_GDC, "gout_cmu_mcsc_gdc", "mout_cmu_mcsc_gdc",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC0_MFC0, "gout_cmu_mfc0_mfc0",
+ "mout_cmu_mfc0_mfc0", CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC0_WFD, "gout_cmu_mfc0_wfd", "mout_cmu_mfc0_wfd",
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MIF_BUSP, "gout_cmu_mif_busp", "mout_cmu_mif_busp",
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_NPU_BUS, "gout_cmu_npu_bus", "mout_cmu_npu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_BUS, "gout_cmu_peric0_bus",
+ "mout_cmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_IP, "gout_cmu_peric0_ip",
+ "mout_cmu_peric0_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_BUS, "gout_cmu_peric1_bus",
+ "mout_cmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_IP, "gout_cmu_peric1_ip",
+ "mout_cmu_peric1_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIS_BUS, "gout_cmu_peris_bus",
+ "mout_cmu_peris_bus", CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_SSP_BUS, "gout_cmu_ssp_bus", "mout_cmu_ssp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_SSP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TNR_BUS, "gout_cmu_tnr_bus", "mout_cmu_tnr_bus",
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_VRA_BUS, "gout_cmu_vra_bus", "mout_cmu_vra_bus",
+ CLK_CON_GAT_GATE_CLKCMU_VRA_BUS, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos990_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos990_cmu_top, "samsung,exynos990-cmu-top",
+ exynos990_cmu_top_init);
+
+/* ---- CMU_HSI0 ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_HSI0 (0x10a00000) */
+#define PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER 0x0630
+#define PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER 0x0610
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK 0x2004
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2 0x2024
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL 0x202c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40 0x2034
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY 0x2030
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL 0x2038
+
+static const unsigned long hsi0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_hsi0_bus_user_p) = { "oscclk", "dout_cmu_hsi0_bus" };
+PNAME(mout_hsi0_usb31drd_user_p) = { "oscclk", "dout_cmu_hsi0_usb31drd" };
+PNAME(mout_hsi0_usbdp_debug_user_p) = { "oscclk",
+ "dout_cmu_hsi0_usbdp_debug" };
+PNAME(mout_hsi0_dpgtc_user_p) = { "oscclk", "dout_cmu_hsi0_dpgtc" };
+
+static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI0_BUS_USER, "mout_hsi0_bus_user",
+ mout_hsi0_bus_user_p, PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_USB31DRD_USER, "mout_hsi0_usb31drd_user",
+ mout_hsi0_usb31drd_user_p, PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_USBDP_DEBUG_USER, "mout_hsi0_usbdp_debug_user",
+ mout_hsi0_usbdp_debug_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_DPGTC_USER, "mout_hsi0_dpgtc_user",
+ mout_hsi0_dpgtc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock hsi0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_HSI0_DP_LINK_DP_GTC_CLK,
+ "gout_hsi0_dp_link_dp_gtc_clk", "mout_hsi0_dpgtc_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_DP_LINK_PCLK,
+ "gout_hsi0_dp_link_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK,
+ "gout_hsi0_d_tzpc_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_LHM_AXI_P_HSI0_CLK,
+ "gout_hsi0_lhm_axi_p_hsi0_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS1_ACLK,
+ "gout_hsi0_ppmu_hsi0_bus1_aclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS1_PCLK,
+ "gout_hsi0_ppmu_hsi0_bus1_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK,
+ "gout_hsi0_clk_hsi0_bus_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2,
+ "gout_hsi0_sysmmu_usb_clk_s2", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_SYSREG_HSI0_PCLK,
+ "gout_hsi0_sysreg_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL,
+ "gout_hsi0_usb31drd_aclk_phyctrl", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY,
+ "gout_hsi0_usb31drd_bus_clk_early",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USB31DRD_REF_CLK_40,
+ "gout_hsi0_usb31drd_usb31drd_ref_clk_40",
+ "mout_hsi0_usb31drd_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_REF_SOC_PLL,
+ "gout_hsi0_usb31drd_usbdpphy_ref_soc_pll",
+ "mout_hsi0_usbdp_debug_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_SCL_APB,
+ "gout_hsi0_usb31drd_ipclkport_i_usbdpphy_scl_apb_pclk",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBPCS_APB_CLK,
+ "gout_hsi0_usb31drd_usbpcs_apb_clk",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_VGEN_LITE_HSI0_CLK,
+ "gout_hsi0_vgen_lite_ipclkport_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CMU_HSI0_PCLK,
+ "gout_hsi0_cmu_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_XIU_D_HSI0_ACLK,
+ "gout_hsi0_xiu_d_hsi0_aclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
+ .mux_clks = hsi0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks),
+ .gate_clks = hsi0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(hsi0_gate_clks),
+ .nr_clk_ids = CLKS_NR_HSI0,
+ .clk_regs = hsi0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ----- platform_driver ----- */
+
+static int __init exynos990_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos990_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos990-cmu-hsi0",
+ .data = &hsi0_cmu_info,
+ },
+ { },
+};
+
+static struct platform_driver exynos990_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos990-cmu",
+ .of_match_table = exynos990_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos990_cmu_probe,
+};
+
+static int __init exynos990_cmu_init(void)
+{
+ return platform_driver_register(&exynos990_cmu_driver);
+}
+
+core_initcall(exynos990_cmu_init);
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 86b39edba122..08b867ae3ed9 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -382,17 +382,9 @@ static const unsigned long cmu_top_clk_regs[] __initconst = {
EARLY_WAKEUP_DPU_DEST,
EARLY_WAKEUP_CSIS_DEST,
EARLY_WAKEUP_SW_TRIG_APM,
- EARLY_WAKEUP_SW_TRIG_APM_SET,
- EARLY_WAKEUP_SW_TRIG_APM_CLEAR,
EARLY_WAKEUP_SW_TRIG_CLUSTER0,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR,
EARLY_WAKEUP_SW_TRIG_DPU,
- EARLY_WAKEUP_SW_TRIG_DPU_SET,
- EARLY_WAKEUP_SW_TRIG_DPU_CLEAR,
EARLY_WAKEUP_SW_TRIG_CSIS,
- EARLY_WAKEUP_SW_TRIG_CSIS_SET,
- EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR,
CLK_CON_MUX_MUX_CLKCMU_BO_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index be6b51694919..023a25af73c4 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -206,6 +206,7 @@ static const struct clk_ops samsung_pll3000_clk_ops = {
*/
/* Maximum lock time can be 270 * PDIV cycles */
#define PLL35XX_LOCK_FACTOR (270)
+#define PLL142XX_LOCK_FACTOR (150)
#define PLL35XX_MDIV_MASK (0x3FF)
#define PLL35XX_PDIV_MASK (0x3F)
@@ -272,7 +273,11 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
}
/* Set PLL lock time. */
- writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
+ if (pll->type == pll_142xx)
+ writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR,
+ pll->lock_reg);
+ else
+ writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
pll->lock_reg);
/* Change PLL PMS values */
@@ -430,7 +435,10 @@ static const struct clk_ops samsung_pll36xx_clk_min_ops = {
#define PLL0822X_LOCK_STAT_SHIFT (29)
#define PLL0822X_ENABLE_SHIFT (31)
-/* PLL1418x is similar to PLL0822x, except that MDIV is one bit smaller */
+/*
+ * PLL1418x, PLL0717x and PLL0718x are similar
+ * to PLL0822x, except that MDIV is one bit smaller
+ */
#define PLL1418X_MDIV_MASK (0x1FF)
static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
@@ -441,10 +449,14 @@ static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
u64 fvco = parent_rate;
pll_con3 = readl_relaxed(pll->con_reg);
- if (pll->type != pll_1418x)
+
+ if (pll->type != pll_1418x &&
+ pll->type != pll_0717x &&
+ pll->type != pll_0718x)
mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
else
mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL1418X_MDIV_MASK;
+
pdiv = (pll_con3 >> PLL0822X_PDIV_SHIFT) & PLL0822X_PDIV_MASK;
sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK;
@@ -1377,6 +1389,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_0516x:
case pll_0517x:
case pll_0518x:
+ case pll_0717x:
+ case pll_0718x:
+ case pll_0732x:
pll->enable_offs = PLL0822X_ENABLE_SHIFT;
pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
if (!pll->rate_table)
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 858ab367eb65..6ddc54d173a0 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -45,6 +45,9 @@ enum samsung_pll_type {
pll_531x,
pll_1051x,
pll_1052x,
+ pll_0717x,
+ pll_0718x,
+ pll_0732x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index b028f25c658a..62eed964c3d0 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -35,7 +35,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, reg;
+ u32 divf, divq, reg;
unsigned long long vco_freq;
/* read VCO1 reg for numerator and denominator */
diff --git a/drivers/clk/starfive/clk-starfive-jh7100-audio.c b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
index 1fcf4e62f347..7de23f6749aa 100644
--- a/drivers/clk/starfive/clk-starfive-jh7100-audio.c
+++ b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
@@ -84,17 +84,6 @@ static const struct jh71x0_clk_data jh7100_audclk_data[] = {
JH7100_AUDCLK_AUDIO_12288),
};
-static struct clk_hw *jh7100_audclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7100_AUDCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7100_audclk_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -106,6 +95,7 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7100_AUDCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -146,7 +136,7 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
return ret;
}
- return devm_of_clk_add_hw_provider(priv->dev, jh7100_audclk_get, priv);
+ return devm_of_clk_add_hw_provider(priv->dev, jh71x0_clk_get, priv);
}
static const struct of_device_id jh7100_audclk_match[] = {
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-aon.c b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
index 418efdad719b..6f67587f4335 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-aon.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
@@ -54,17 +54,6 @@ static const struct jh71x0_clk_data jh7110_aonclk_data[] = {
JH71X0_GATE(JH7110_AONCLK_RTC_CAL, "rtc_cal", 0, JH7110_AONCLK_OSC),
};
-static struct clk_hw *jh7110_aonclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_AONCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7110_aoncrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -78,6 +67,7 @@ static int jh7110_aoncrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_AONCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -127,7 +117,7 @@ static int jh7110_aoncrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_aonclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-isp.c b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
index 8c4c3a958a9f..f3fa069db193 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-isp.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
@@ -75,17 +75,6 @@ static inline int jh7110_isp_top_rst_init(struct jh71x0_clk_priv *priv)
return reset_control_deassert(top_rsts);
}
-static struct clk_hw *jh7110_ispclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_ISPCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
#ifdef CONFIG_PM
static int jh7110_ispcrg_suspend(struct device *dev)
{
@@ -126,6 +115,7 @@ static int jh7110_ispcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_ISPCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -186,7 +176,7 @@ static int jh7110_ispcrg_probe(struct platform_device *pdev)
goto err_exit;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_ispclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
goto err_exit;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-stg.c b/drivers/clk/starfive/clk-starfive-jh7110-stg.c
index dafcb7190592..2a5ad0e07d1d 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-stg.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-stg.c
@@ -75,17 +75,6 @@ static const struct jh71x0_clk_data jh7110_stgclk_data[] = {
JH71X0_GATE(JH7110_STGCLK_DMA1P_AHB, "dma1p_ahb", 0, JH7110_STGCLK_STG_AXIAHB),
};
-static struct clk_hw *jh7110_stgclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_STGCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7110_stgcrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -98,6 +87,7 @@ static int jh7110_stgcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_STGCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -145,7 +135,7 @@ static int jh7110_stgcrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_stgclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
index 17325f17696f..e9d8168d02b8 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -323,17 +323,6 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH7110_SYSCLK_OSC),
};
-static struct clk_hw *jh7110_sysclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_SYSCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static void jh7110_reset_unregister_adev(void *_adev)
{
struct auxiliary_device *adev = _adev;
@@ -425,6 +414,7 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_SYSCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -526,7 +516,7 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_sysclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
index 04eeed199087..bad20d5d794a 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
@@ -80,17 +80,6 @@ static int jh7110_vout_top_rst_init(struct jh71x0_clk_priv *priv)
return reset_control_deassert(top_rst);
}
-static struct clk_hw *jh7110_voutclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_VOUTCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
#ifdef CONFIG_PM
static int jh7110_voutcrg_suspend(struct device *dev)
{
@@ -131,6 +120,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_VOUTCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -193,7 +183,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
goto err_exit;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_voutclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
goto err_exit;
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.c b/drivers/clk/starfive/clk-starfive-jh71x0.c
index aebc99264a0b..80e9157347eb 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.c
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.c
@@ -325,3 +325,15 @@ const struct clk_ops *starfive_jh71x0_clk_ops(u32 max)
return &jh71x0_clk_inv_ops;
}
EXPORT_SYMBOL_GPL(starfive_jh71x0_clk_ops);
+
+struct clk_hw *jh71x0_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct jh71x0_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < priv->num_reg)
+ return &priv->reg[idx].hw;
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(jh71x0_clk_get);
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h
index e3f441393e48..9d5dec1d5cd1 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.h
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.h
@@ -117,9 +117,11 @@ struct jh71x0_clk_priv {
struct clk *original_clk;
struct notifier_block pll_clk_nb;
struct clk_hw *pll[3];
- struct jh71x0_clk reg[];
+ unsigned int num_reg;
+ struct jh71x0_clk reg[] __counted_by(num_reg);
};
const struct clk_ops *starfive_jh71x0_clk_ops(u32 max);
+struct clk_hw *jh71x0_clk_get(struct of_phandle_args *clkspec, void *data);
#endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
index 7133377d4163..1f81c7ac41af 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -436,7 +436,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
0, 4, /* M */
@@ -444,7 +444,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
0, 4, /* M */
@@ -452,7 +452,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 3a7d61c81667..ba1ad267f123 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -535,11 +535,11 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
CLK_SET_RATE_PARENT);
/*
- * DSI output seems to work only when PLL_MIPI selected. Set it and prevent
- * the mux from reparenting.
+ * Experiments showed that RGB output requires pll-video0-2x, while DSI
+ * requires pll-mipi. It will not work with incorrect clock, the screen will
+ * be blank.
+ * sun50i-a64.dtsi assigns pll-mipi as TCON0 parent by default
*/
-#define SUN50I_A64_TCON0_CLK_REG 0x118
-
static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
static const u8 tcon0_table[] = { 0, 2, };
static SUNXI_CCU_MUX_TABLE_WITH_GATE_CLOSEST(tcon0_clk, "tcon0", tcon0_parents,
@@ -959,11 +959,6 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
- /* Set PLL MIPI as parent for TCON0 */
- val = readl(reg + SUN50I_A64_TCON0_CLK_REG);
- val &= ~GENMASK(26, 24);
- writel(val | (0 << 24), reg + SUN50I_A64_TCON0_CLK_REG);
-
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc);
if (ret)
return ret;
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index a8c11c0b4e06..dfba88a5ad0f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -21,7 +21,6 @@
/* PLL_VIDEO0 exported for HDMI PHY */
-#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10
@@ -32,7 +31,6 @@
#define CLK_PLL_PERIPH1_2X 14
#define CLK_PLL_VIDEO1 15
#define CLK_PLL_GPU 16
-#define CLK_PLL_MIPI 17
#define CLK_PLL_HSIC 18
#define CLK_PLL_DE 19
#define CLK_PLL_DDR1 20
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 1086669b91da..190816c35da9 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -1107,11 +1107,24 @@ static const u32 usb2_clk_regs[] = {
SUN50I_H616_USB3_CLK_REG,
};
+static struct ccu_mux_nb sun50i_h616_cpu_nb = {
+ .common = &cpux_clk.common,
+ .cm = &cpux_clk.mux,
+ .delay_us = 1, /* manual doesn't really say */
+ .bypass_index = 4, /* PLL_PERI0@600MHz, as recommended by manual */
+};
+
+static struct ccu_pll_nb sun50i_h616_pll_cpu_nb = {
+ .common = &pll_cpux_clk.common,
+ .enable = BIT(29), /* LOCK_ENABLE */
+ .lock = BIT(28),
+};
+
static int sun50i_h616_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
u32 val;
- int i;
+ int ret, i;
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
@@ -1166,7 +1179,18 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
val |= BIT(24);
writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
- return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Reparent CPU during CPU PLL rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun50i_h616_cpu_nb);
+
+ /* Re-lock the CPU PLL after any rate changes */
+ ccu_pll_notifier_register(&sun50i_h616_pll_cpu_nb);
+
+ return 0;
}
static const struct of_device_id sun50i_h616_ccu_ids[] = {
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 1015fab95251..4c9555fc6184 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -657,7 +657,7 @@ static struct ccu_div apb_pclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("apb-pclk",
apb_parents,
&ccu_div_ops,
- 0),
+ CLK_IGNORE_UNUSED),
},
};
@@ -794,13 +794,13 @@ static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_
0x134, BIT(7), 0);
static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd, 0x138, BIT(8), 0);
static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
- 0x140, BIT(9), 0);
+ 0x140, BIT(9), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
0x150, BIT(9), 0);
static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(10), 0);
+ 0x150, BIT(10), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(11), 0);
+ 0x150, BIT(11), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", perisys_ahb_hclk_pd,
0x150, BIT(12), 0);
static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
@@ -896,7 +896,6 @@ static struct ccu_common *th1520_div_clks[] = {
&vo_axi_clk.common,
&vp_apb_clk.common,
&vp_axi_clk.common,
- &cpu2vp_clk.common,
&venc_clk.common,
&dpu0_clk.common,
&dpu1_clk.common,
@@ -916,6 +915,7 @@ static struct ccu_common *th1520_gate_clks[] = {
&bmu_clk.common,
&cpu2aon_x2h_clk.common,
&cpu2peri_x2h_clk.common,
+ &cpu2vp_clk.common,
&perisys_apb1_hclk.common,
&perisys_apb2_hclk.common,
&perisys_apb3_hclk.common,
@@ -1048,7 +1048,8 @@ static int th1520_clk_probe(struct platform_device *pdev)
hw = devm_clk_hw_register_gate_parent_data(dev,
cg->common.hw.init->name,
cg->common.hw.init->parent_data,
- 0, base + cg->common.cfg0,
+ cg->common.hw.init->flags,
+ base + cg->common.cfg0,
ffs(cg->enable) - 1, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index f2117fef7c7d..9c75dcc9a534 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -449,10 +449,7 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
{
struct clk_iomap *io;
- io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES);
- if (!io)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*io));
+ io = memblock_alloc_or_panic(sizeof(*io), SMP_CACHE_BYTES);
io->mem = mem;
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 216d85d6aac6..f684fc306ecc 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -180,7 +180,7 @@ static void of_mux_clk_setup(struct device_node *node)
pr_err("mux-clock %pOFn must have parents\n", node);
return;
}
- parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
if (!parent_names)
goto cleanup;
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index b2233d3ff9a9..bbf7714480e7 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -52,6 +52,8 @@
#define WZRD_CLKFBOUT_MULT_SHIFT 8
#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_CLKFBOUT_MULT_FRAC_MASK GENMASK(25, 16)
+#define WZRD_CLKFBOUT_O_MASK GENMASK(7, 0)
#define WZRD_CLKFBOUT_L_SHIFT 0
#define WZRD_CLKFBOUT_H_SHIFT 8
#define WZRD_CLKFBOUT_L_MASK GENMASK(7, 0)
@@ -87,14 +89,14 @@
#define DIV_O 0x01
#define DIV_ALL 0x03
-#define WZRD_M_MIN 2
-#define WZRD_M_MAX 128
-#define WZRD_D_MIN 1
-#define WZRD_D_MAX 106
-#define WZRD_VCO_MIN 800000000
-#define WZRD_VCO_MAX 1600000000
-#define WZRD_O_MIN 1
-#define WZRD_O_MAX 128
+#define WZRD_M_MIN 2ULL
+#define WZRD_M_MAX 128ULL
+#define WZRD_D_MIN 1ULL
+#define WZRD_D_MAX 106ULL
+#define WZRD_VCO_MIN 800000000ULL
+#define WZRD_VCO_MAX 1600000000ULL
+#define WZRD_O_MIN 2ULL
+#define WZRD_O_MAX 128ULL
#define VER_WZRD_M_MIN 4
#define VER_WZRD_M_MAX 432
#define VER_WZRD_D_MIN 1
@@ -153,8 +155,10 @@ struct clk_wzrd {
* @flags: clk_wzrd divider flags
* @table: array of value/divider pairs, last entry should have div = 0
* @m: value of the multiplier
+ * @m_frac: fractional value of the multiplier
* @d: value of the common divider
* @o: value of the leaf divider
+ * @o_frac: value of the fractional leaf divider
* @lock: register lock
*/
struct clk_wzrd_divider {
@@ -166,8 +170,10 @@ struct clk_wzrd_divider {
u8 flags;
const struct clk_div_table *table;
u32 m;
+ u32 m_frac;
u32 d;
u32 o;
+ u32 o_frac;
spinlock_t *lock; /* divider lock */
};
@@ -372,38 +378,40 @@ static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u64 vco_freq, freq, diff, vcomin, vcomax;
- u32 m, d, o;
- u32 mmin, mmax, dmin, dmax, omin, omax;
+ u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
+ u64 m, d, o;
+ u64 mmin, mmax, dmin, dmax, omin, omax, mdmin, mdmax;
- mmin = WZRD_M_MIN;
- mmax = WZRD_M_MAX;
+ mmin = WZRD_M_MIN << 3;
+ mmax = WZRD_M_MAX << 3;
dmin = WZRD_D_MIN;
dmax = WZRD_D_MAX;
- omin = WZRD_O_MIN;
- omax = WZRD_O_MAX;
- vcomin = WZRD_VCO_MIN;
- vcomax = WZRD_VCO_MAX;
+ omin = WZRD_O_MIN << 3;
+ omax = WZRD_O_MAX << 3;
+ vcomin = WZRD_VCO_MIN << 3;
+ vcomax = WZRD_VCO_MAX << 3;
for (m = mmin; m <= mmax; m++) {
- for (d = dmin; d <= dmax; d++) {
- vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
- if (vco_freq >= vcomin && vco_freq <= vcomax) {
- for (o = omin; o <= omax; o++) {
- freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
- diff = abs(freq - rate);
-
- if (diff < WZRD_MIN_ERR) {
- divider->m = m;
- divider->d = d;
- divider->o = o;
- return 0;
- }
- }
+ mdmin = max(dmin, div64_u64(parent_rate * m + vcomax / 2, vcomax));
+ mdmax = min(dmax, div64_u64(parent_rate * m + vcomin / 2, vcomin));
+ for (d = mdmin; d <= mdmax; d++) {
+ vco_freq = DIV_ROUND_CLOSEST_ULL((parent_rate * m), d);
+ o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
+ if (o < omin || o > omax)
+ continue;
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = freq - rate;
+ if (diff < best_diff) {
+ best_diff = diff;
+ divider->m = m >> 3;
+ divider->m_frac = (m - (divider->m << 3)) * 125;
+ divider->d = d;
+ divider->o = o >> 3;
+ divider->o_frac = (o - (divider->o << 3)) * 125;
}
}
}
- return -EBUSY;
+ return best_diff < WZRD_MIN_ERR ? 0 : -EBUSY;
}
static int clk_wzrd_reconfig(struct clk_wzrd_divider *divider, void __iomem *div_addr)
@@ -496,33 +504,22 @@ static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- unsigned long vco_freq, rate_div, clockout0_div;
void __iomem *div_addr;
- u32 reg, pre, f;
+ u32 reg;
int err;
err = clk_wzrd_get_divisors(hw, rate, parent_rate);
if (err)
return err;
- vco_freq = DIV_ROUND_CLOSEST(parent_rate * divider->m, divider->d);
- rate_div = DIV_ROUND_CLOSEST_ULL((vco_freq * WZRD_FRAC_POINTS), rate);
-
- clockout0_div = div_u64(rate_div, WZRD_FRAC_POINTS);
-
- pre = DIV_ROUND_CLOSEST_ULL(vco_freq * WZRD_FRAC_POINTS, rate);
- f = (pre - (clockout0_div * WZRD_FRAC_POINTS));
- f &= WZRD_CLKOUT_FRAC_MASK;
-
- reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, clockout0_div) |
- FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, f);
+ reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, divider->o) |
+ FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, divider->o_frac);
writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 2));
- /* Set divisor and clear phase offset */
reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
+ FIELD_PREP(WZRD_CLKFBOUT_MULT_FRAC_MASK, divider->m_frac) |
FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 0));
- writel(divider->o, divider->base + WZRD_CLK_CFG_REG(0, 2));
writel(0, divider->base + WZRD_CLK_CFG_REG(0, 3));
div_addr = divider->base + WZRD_DR_INIT_REG_OFFSET;
return clk_wzrd_reconfig(divider, div_addr);
@@ -564,18 +561,19 @@ static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u32 m, d, o, div, reg, f;
+ u32 m, d, o, reg, f, mf;
+ u64 mul;
reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 0));
d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
+ mf = FIELD_GET(WZRD_CLKFBOUT_MULT_FRAC_MASK, reg);
reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 2));
o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
- div = DIV_ROUND_CLOSEST(d * (WZRD_FRAC_POINTS * o + f), WZRD_FRAC_POINTS);
- return divider_recalc_rate(hw, parent_rate * m, div, divider->table,
- divider->flags, divider->width);
+ mul = m * 1000 + mf;
+ return DIV_ROUND_CLOSEST_ULL(parent_rate * mul, d * (o * 1000 + f));
}
static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
@@ -648,6 +646,25 @@ static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ u32 m, d, o;
+ int err;
+
+ err = clk_wzrd_get_divisors(hw, rate, *prate);
+ if (err)
+ return err;
+
+ m = divider->m;
+ d = divider->d;
+ o = divider->o;
+
+ rate = div_u64(*prate * (m * 1000 + divider->m_frac), d * (o * 1000 + divider->o_frac));
+ return rate;
+}
+
+static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
unsigned long int_freq;
u32 m, d, o, div, f;
int err;
@@ -678,7 +695,7 @@ static const struct clk_ops clk_wzrd_ver_divider_ops = {
};
static const struct clk_ops clk_wzrd_ver_div_all_ops = {
- .round_rate = clk_wzrd_round_rate_all,
+ .round_rate = clk_wzrd_ver_round_rate_all,
.set_rate = clk_wzrd_dynamic_all_ver,
.recalc_rate = clk_wzrd_recalc_rate_all_ver,
};
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index b39dee7b93af..f00019b078a7 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -23,7 +23,7 @@
#include <linux/acpi.h>
#include <linux/hyperv.h>
#include <clocksource/hyperv_timer.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index a3fe98cd3838..82815428f8f9 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -114,6 +114,18 @@ static int jcore_pit_local_init(unsigned cpu)
pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+ enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int jcore_pit_local_teardown(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+
+ pr_info("Local J-Core PIT teardown on cpu %u\n", cpu);
+
+ disable_percpu_irq(pit->ced.irq);
return 0;
}
@@ -168,6 +180,7 @@ static int __init jcore_pit_init(struct device_node *node)
return -ENOMEM;
}
+ irq_set_percpu_devid(pit_irq);
err = request_percpu_irq(pit_irq, jcore_timer_interrupt,
"jcore_pit", jcore_pit_percpu);
if (err) {
@@ -237,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node)
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
"clockevents/jcore:starting",
- jcore_pit_local_init, NULL);
+ jcore_pit_local_init, jcore_pit_local_teardown);
return 0;
}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 92a83a9bb2e1..d64b07ec48e5 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -232,7 +232,7 @@ config CPUFREQ_VIRT
If in doubt, say N.
config CPUFREQ_DT_PLATDEV
- tristate "Generic DT based cpufreq platdev driver"
+ bool "Generic DT based cpufreq platdev driver"
depends on OF
help
This adds a generic DT based cpufreq platdev driver for frequency
@@ -325,8 +325,6 @@ config QORIQ_CPUFREQ
This adds the CPUFreq driver support for Freescale QorIQ SoCs
which are capable of changing the CPU's frequency dynamically.
-endif
-
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
depends on ACPI_PROCESSOR
@@ -355,4 +353,6 @@ config ACPI_CPPC_CPUFREQ_FIE
If in doubt, say N.
+endif
+
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5f7e13e60c80..9e46960f6a86 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -15,6 +15,15 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
To compile this driver as a module, choose M here: the
module will be called sun50i-cpufreq-nvmem.
+config ARM_AIROHA_SOC_CPUFREQ
+ tristate "Airoha EN7581 SoC CPUFreq support"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ depends on OF
+ select PM_OPP
+ default ARCH_AIROHA
+ help
+ This adds the CPUFreq driver for Airoha EN7581 SoCs.
+
config ARM_APPLE_SOC_CPUFREQ
tristate "Apple Silicon SoC CPUFreq support"
depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 97c2d4f15d76..2c5c228408bf 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -340,3 +340,15 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK
option lets the probing code bypass some of those checks if the
parameter "relaxed_check=1" is passed to the module.
+config CPUFREQ_ARCH_CUR_FREQ
+ default y
+ bool "Current frequency derived from HW provided feedback"
+ help
+ This determines whether the scaling_cur_freq sysfs attribute returns
+ the last requested frequency or a more precise value based on hardware
+ provided feedback (as architected counters).
+ Given that a more precise frequency can now be provided via the
+ cpuinfo_avg_freq attribute, by enabling this option,
+ scaling_cur_freq maintains the provision of a counter based frequency,
+ for compatibility reasons.
+
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index d35a28dd9463..890fff99f37d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_AIROHA_SOC_CPUFREQ) += airoha-cpufreq.o
obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index c9ebacf5c88e..463b69a2dff5 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -623,7 +623,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
-static u64 get_max_boost_ratio(unsigned int cpu)
+/*
+ * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
+ * between the highest_perf and the nominal_perf.
+ *
+ * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
+ * frequency via @nominal_freq if it is non-NULL pointer.
+ */
+static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
@@ -652,6 +659,9 @@ static u64 get_max_boost_ratio(unsigned int cpu)
nominal_perf = perf_caps.nominal_perf;
+ if (nominal_freq)
+ *nominal_freq = perf_caps.nominal_freq;
+
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
return 0;
@@ -664,8 +674,12 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
+
#else
-static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
+{
+ return 0;
+}
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -675,9 +689,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct acpi_cpufreq_data *data;
unsigned int cpu = policy->cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u64 max_boost_ratio, nominal_freq = 0;
unsigned int valid_states = 0;
unsigned int result = 0;
- u64 max_boost_ratio;
unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
@@ -827,16 +841,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
- max_boost_ratio = get_max_boost_ratio(cpu);
+ max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
if (max_boost_ratio) {
- unsigned int freq = freq_table[0].frequency;
+ unsigned int freq = nominal_freq;
/*
- * Because the loop above sorts the freq_table entries in the
- * descending order, freq is the maximum frequency in the table.
- * Assume that it corresponds to the CPPC nominal frequency and
- * use it to set cpuinfo.max_freq.
+ * The loop above sorts the freq_table entries in the
+ * descending order. If ACPI CPPC has not advertised
+ * the nominal frequency (this is possible in CPPC
+ * revisions prior to 3), then use the first entry in
+ * the pstate table as a proxy for nominal frequency.
*/
+ if (!freq)
+ freq = freq_table[0].frequency;
+
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
} else {
/*
@@ -891,11 +909,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
- if (acpi_cpufreq_driver.set_boost) {
- set_boost(policy, acpi_cpufreq_driver.boost_enabled);
- policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
- }
-
return result;
err_unreg:
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
new file mode 100644
index 000000000000..4fe39eadd163
--- /dev/null
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "cpufreq-dt.h"
+
+struct airoha_cpufreq_priv {
+ int opp_token;
+ struct dev_pm_domain_list *pd_list;
+ struct platform_device *cpufreq_dt;
+};
+
+static struct platform_device *cpufreq_pdev;
+
+/* NOP function to disable OPP from setting clock */
+static int airoha_cpufreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp,
+ void *data, bool scaling_down)
+{
+ return 0;
+}
+
+static const char * const airoha_cpufreq_clk_names[] = { "cpu", NULL };
+static const char * const airoha_cpufreq_pd_names[] = { "perf" };
+
+static int airoha_cpufreq_probe(struct platform_device *pdev)
+{
+ const struct dev_pm_domain_attach_data attach_data = {
+ .pd_names = airoha_cpufreq_pd_names,
+ .num_pd_names = ARRAY_SIZE(airoha_cpufreq_pd_names),
+ .pd_flags = PD_FLAG_DEV_LINK_ON | PD_FLAG_REQUIRED_OPP,
+ };
+ struct dev_pm_opp_config config = {
+ .clk_names = airoha_cpufreq_clk_names,
+ .config_clks = airoha_cpufreq_config_clks_nop,
+ };
+ struct platform_device *cpufreq_dt;
+ struct airoha_cpufreq_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev;
+ int ret;
+
+ /* CPUs refer to the same OPP table */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Set OPP table conf with NOP config_clks */
+ priv->opp_token = dev_pm_opp_set_config(cpu_dev, &config);
+ if (priv->opp_token < 0)
+ return dev_err_probe(dev, priv->opp_token, "Failed to set OPP config\n");
+
+ /* Attach PM for OPP */
+ ret = dev_pm_domain_attach_list(cpu_dev, &attach_data,
+ &priv->pd_list);
+ if (ret)
+ goto clear_opp_config;
+
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (ret) {
+ dev_err(dev, "failed to create cpufreq-dt device: %d\n", ret);
+ goto detach_pm;
+ }
+
+ priv->cpufreq_dt = cpufreq_dt;
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+detach_pm:
+ dev_pm_domain_detach_list(priv->pd_list);
+clear_opp_config:
+ dev_pm_opp_clear_config(priv->opp_token);
+
+ return ret;
+}
+
+static void airoha_cpufreq_remove(struct platform_device *pdev)
+{
+ struct airoha_cpufreq_priv *priv = platform_get_drvdata(pdev);
+
+ platform_device_unregister(priv->cpufreq_dt);
+
+ dev_pm_domain_detach_list(priv->pd_list);
+
+ dev_pm_opp_clear_config(priv->opp_token);
+}
+
+static struct platform_driver airoha_cpufreq_driver = {
+ .probe = airoha_cpufreq_probe,
+ .remove = airoha_cpufreq_remove,
+ .driver = {
+ .name = "airoha-cpufreq",
+ },
+};
+
+static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,en7581" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_cpufreq_match_list);
+
+static int __init airoha_cpufreq_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(airoha_cpufreq_match_list, np);
+ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&airoha_cpufreq_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ cpufreq_pdev = platform_device_register_data(NULL, "airoha-cpufreq",
+ -1, match, sizeof(*match));
+ ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
+ if (ret)
+ platform_driver_unregister(&airoha_cpufreq_driver);
+
+ return ret;
+}
+module_init(airoha_cpufreq_init);
+
+static void __exit airoha_cpufreq_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&airoha_cpufreq_driver);
+}
+module_exit(airoha_cpufreq_exit);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Airoha SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 35f38ae67fb1..8d692415d905 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -32,7 +32,6 @@ TRACE_EVENT(amd_pstate_perf,
u64 aperf,
u64 tsc,
unsigned int cpu_id,
- bool changed,
bool fast_switch
),
@@ -44,7 +43,6 @@ TRACE_EVENT(amd_pstate_perf,
aperf,
tsc,
cpu_id,
- changed,
fast_switch
),
@@ -57,7 +55,6 @@ TRACE_EVENT(amd_pstate_perf,
__field(unsigned long long, aperf)
__field(unsigned long long, tsc)
__field(unsigned int, cpu_id)
- __field(bool, changed)
__field(bool, fast_switch)
),
@@ -70,11 +67,10 @@ TRACE_EVENT(amd_pstate_perf,
__entry->aperf = aperf;
__entry->tsc = tsc;
__entry->cpu_id = cpu_id;
- __entry->changed = changed;
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
+ TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
(unsigned long)__entry->min_perf,
(unsigned long)__entry->target_perf,
(unsigned long)__entry->capacity,
@@ -83,11 +79,55 @@ TRACE_EVENT(amd_pstate_perf,
(unsigned long long)__entry->aperf,
(unsigned long long)__entry->tsc,
(unsigned int)__entry->cpu_id,
- (__entry->changed) ? "true" : "false",
(__entry->fast_switch) ? "true" : "false"
)
);
+TRACE_EVENT(amd_pstate_epp_perf,
+
+ TP_PROTO(unsigned int cpu_id,
+ unsigned int highest_perf,
+ unsigned int epp,
+ unsigned int min_perf,
+ unsigned int max_perf,
+ bool boost
+ ),
+
+ TP_ARGS(cpu_id,
+ highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ boost),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu_id)
+ __field(unsigned int, highest_perf)
+ __field(unsigned int, epp)
+ __field(unsigned int, min_perf)
+ __field(unsigned int, max_perf)
+ __field(bool, boost)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->highest_perf = highest_perf;
+ __entry->epp = epp;
+ __entry->min_perf = min_perf;
+ __entry->max_perf = max_perf;
+ __entry->boost = boost;
+ ),
+
+ TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+ (unsigned int)__entry->cpu_id,
+ (unsigned int)__entry->min_perf,
+ (unsigned int)__entry->max_perf,
+ (unsigned int)__entry->highest_perf,
+ (unsigned int)__entry->epp,
+ (bool)__entry->boost
+ )
+);
+
#endif /* _AMD_PSTATE_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index a261d7300951..3a0a380c3590 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -207,7 +207,6 @@ static void amd_pstate_ut_check_freq(u32 index)
int cpu = 0;
struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata = NULL;
- u32 nominal_freq_khz;
for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
@@ -215,14 +214,13 @@ static void amd_pstate_ut_check_freq(u32 index)
break;
cpudata = policy->driver_data;
- nominal_freq_khz = cpudata->nominal_freq*1000;
- if (!((cpudata->max_freq >= nominal_freq_khz) &&
- (nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
+ if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
(cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
(cpudata->min_freq > 0))) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, nominal_freq_khz,
+ __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
goto skip_test;
}
@@ -236,13 +234,13 @@ static void amd_pstate_ut_check_freq(u32 index)
if (cpudata->boost_supported) {
if ((policy->max == cpudata->max_freq) ||
- (policy->max == nominal_freq_khz))
+ (policy->max == cpudata->nominal_freq))
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
else {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
__func__, cpu, policy->max, cpudata->max_freq,
- nominal_freq_khz);
+ cpudata->nominal_freq);
goto skip_test;
}
} else {
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 66e5dfc711c0..313550fa62d4 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -88,6 +89,11 @@ static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
static struct quirk_entry *quirks;
+#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
+#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
+#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
+#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
+
/*
* AMD Energy Preference Performance (EPP)
* The EPP is used in the CCLK DPM controller to drive
@@ -180,120 +186,145 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+static s16 msr_get_epp(struct amd_cpudata *cpudata)
{
- u64 epp;
+ u64 value;
int ret;
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- if (!cppc_req_cached) {
- epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- &cppc_req_cached);
- if (epp)
- return epp;
- }
- epp = (cppc_req_cached >> 24) & 0xFF;
- } else {
- ret = cppc_get_epp_perf(cpudata->cpu, &epp);
- if (ret < 0) {
- pr_debug("Could not retrieve energy perf value (%d)\n", ret);
- return -EIO;
- }
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return (s16)(epp & 0xff);
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value);
}
-static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
+DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
+
+static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
{
- s16 epp;
- int index = -EINVAL;
+ return static_call(amd_pstate_get_epp)(cpudata);
+}
- epp = amd_pstate_get_epp(cpudata, 0);
- if (epp < 0)
- return epp;
+static s16 shmem_get_epp(struct amd_cpudata *cpudata)
+{
+ u64 epp;
+ int ret;
- switch (epp) {
- case AMD_CPPC_EPP_PERFORMANCE:
- index = EPP_INDEX_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
- index = EPP_INDEX_BALANCE_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_POWERSAVE:
- index = EPP_INDEX_BALANCE_POWERSAVE;
- break;
- case AMD_CPPC_EPP_POWERSAVE:
- index = EPP_INDEX_POWERSAVE;
- break;
- default:
- break;
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return index;
+ return (s16)(epp & 0xff);
}
-static void msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
+static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
{
- if (fast_switch)
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
+ u64 value, prev;
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (value == prev)
+ return 0;
+
+ if (fast_switch) {
+ wrmsrl(MSR_AMD_CPPC_REQ, value);
+ return 0;
+ } else {
+ int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+
+ if (ret)
+ return ret;
+ }
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ WRITE_ONCE(cpudata->epp_cached, epp);
+
+ return 0;
}
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+ u32 max_perf, u32 epp,
+ bool fast_switch)
{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ max_perf, epp, fast_switch);
}
-static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
+ u64 value, prev;
int ret;
- struct cppc_perf_ctrls perf_ctrls;
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
-
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
- ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- if (!ret)
- cpudata->epp_cached = epp;
- } else {
- amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, false);
+ if (value == prev)
+ return 0;
- perf_ctrls.energy_perf = epp;
- ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- if (ret) {
- pr_debug("failed to set energy perf value (%d)\n", ret);
- return ret;
- }
- cpudata->epp_cached = epp;
+ ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (ret) {
+ pr_err("failed to set energy perf value (%d)\n", ret);
+ return ret;
}
+ /* update both so that msr_update_perf() can effectively check */
+ WRITE_ONCE(cpudata->epp_cached, epp);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
return ret;
}
-static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
- int pref_index)
+DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
+
+static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+ return static_call(amd_pstate_set_epp)(cpudata, epp);
+}
+
+static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
- int epp = -EINVAL;
int ret;
+ struct cppc_perf_ctrls perf_ctrls;
+
+ if (epp == cpudata->epp_cached)
+ return 0;
+
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+ WRITE_ONCE(cpudata->epp_cached, epp);
+
+ return ret;
+}
+
+static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
+ int pref_index)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int epp;
if (!pref_index)
epp = cpudata->epp_default;
-
- if (epp == -EINVAL)
+ else
epp = epp_values[pref_index];
if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -301,9 +332,15 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
return -EBUSY;
}
- ret = amd_pstate_set_epp(cpudata, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ policy->boost_enabled);
+ }
- return ret;
+ return amd_pstate_set_epp(cpudata, epp);
}
static inline int msr_cppc_enable(bool enable)
@@ -442,17 +479,23 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void shmem_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
{
struct cppc_perf_ctrls perf_ctrls;
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ int ret = shmem_set_epp(cpudata, epp);
+
+ if (ret)
+ return ret;
+ }
+
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ return cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
@@ -493,14 +536,8 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
{
unsigned long max_freq;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
- u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
- u64 value = prev;
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
max_freq = READ_ONCE(cpudata->max_limit_freq);
@@ -511,34 +548,18 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
des_perf = 0;
}
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(des_perf);
-
/* limit the max perf when core performance boost feature is disabled */
if (!cpudata->boost_supported)
max_perf = min_t(unsigned long, nominal_perf, max_perf);
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
- cpudata->cpu, (value != prev), fast_switch);
+ cpudata->cpu, fast_switch);
}
- if (value == prev)
- goto cpufreq_policy_put;
-
- WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- amd_pstate_update_perf(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
-cpufreq_policy_put:
cpufreq_cpu_put(policy);
}
@@ -570,7 +591,7 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf, max_freq;
+ u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
max_perf = READ_ONCE(cpudata->highest_perf);
@@ -578,12 +599,8 @@ static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
max_limit_perf = div_u64(policy->max * max_perf, max_freq);
min_limit_perf = div_u64(policy->min * max_perf, max_freq);
- lowest_perf = READ_ONCE(cpudata->lowest_perf);
- if (min_limit_perf < lowest_perf)
- min_limit_perf = lowest_perf;
-
- if (max_limit_perf < min_limit_perf)
- max_limit_perf = min_limit_perf;
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min_limit_perf = min(cpudata->nominal_perf, max_limit_perf);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
@@ -682,7 +699,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (min_perf < lowest_nonlinear_perf)
min_perf = lowest_nonlinear_perf;
- max_perf = cap_perf;
+ max_perf = cpudata->max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
@@ -704,8 +721,8 @@ static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
if (on)
policy->cpuinfo.max_freq = max_freq;
- else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
- policy->cpuinfo.max_freq = nominal_freq * 1000;
+ else if (policy->cpuinfo.max_freq > nominal_freq)
+ policy->cpuinfo.max_freq = nominal_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -727,12 +744,10 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
+
ret = amd_pstate_cpu_boost_update(policy, state);
- WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
- policy->boost_enabled = !ret ? state : false;
refresh_frequency_limits(policy);
- mutex_unlock(&amd_pstate_driver_lock);
return ret;
}
@@ -752,9 +767,6 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
goto exit_err;
}
- /* at least one CPU supports CPB, even if others fail later on to set up */
- current_pstate_driver->boost_enabled = true;
-
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
if (ret) {
pr_err_once("failed to read initial CPU boost state!\n");
@@ -802,31 +814,35 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
*/
- sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
+ sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
schedule_work(&sched_prefcore_work);
}
static void amd_pstate_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
int ret;
bool highest_perf_changed = false;
+ if (!amd_pstate_prefcore)
+ return;
+
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
cpudata = policy->driver_data;
- if (!amd_pstate_prefcore)
- return;
+ guard(mutex)(&amd_pstate_driver_lock);
- mutex_lock(&amd_pstate_driver_lock);
ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret)
- goto free_cpufreq_put;
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return;
+ }
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
@@ -836,14 +852,11 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (cur_high < CPPC_MAX_PERF)
sched_set_itmt_core_prio((int)cur_high, cpu);
}
-
-free_cpufreq_put:
cpufreq_cpu_put(policy);
if (!highest_perf_changed)
cpufreq_update_policy(cpu);
- mutex_unlock(&amd_pstate_driver_lock);
}
/*
@@ -895,9 +908,8 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
int ret;
u32 min_freq, max_freq;
- u32 nominal_perf, nominal_freq;
+ u32 highest_perf, nominal_perf, nominal_freq;
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
- u32 boost_ratio, lowest_nonlinear_ratio;
struct cppc_perf_caps cppc_perf;
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
@@ -905,29 +917,25 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
return ret;
if (quirks && quirks->lowest_freq)
- min_freq = quirks->lowest_freq * 1000;
+ min_freq = quirks->lowest_freq;
else
- min_freq = cppc_perf.lowest_freq * 1000;
+ min_freq = cppc_perf.lowest_freq;
if (quirks && quirks->nominal_freq)
- nominal_freq = quirks->nominal_freq ;
+ nominal_freq = quirks->nominal_freq;
else
nominal_freq = cppc_perf.nominal_freq;
+ highest_perf = READ_ONCE(cpudata->highest_perf);
nominal_perf = READ_ONCE(cpudata->nominal_perf);
-
- boost_ratio = div_u64(cpudata->highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
- max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+ max_freq = div_u64((u64)highest_perf * nominal_freq, nominal_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
- lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
-
- WRITE_ONCE(cpudata->min_freq, min_freq);
- WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
- WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
- WRITE_ONCE(cpudata->max_freq, max_freq);
+ lowest_nonlinear_freq = div_u64((u64)nominal_freq * lowest_nonlinear_perf, nominal_perf);
+ WRITE_ONCE(cpudata->min_freq, min_freq * 1000);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq * 1000);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq * 1000);
+ WRITE_ONCE(cpudata->max_freq, max_freq * 1000);
/**
* Below values need to be initialized correctly, otherwise driver will fail to load
@@ -937,13 +945,13 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
*/
if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
- min_freq, max_freq, nominal_freq * 1000);
+ min_freq, max_freq, nominal_freq);
return -EINVAL;
}
- if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
+ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq) {
pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
- lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
+ lowest_nonlinear_freq, min_freq, nominal_freq);
return -EINVAL;
}
@@ -1160,7 +1168,6 @@ static ssize_t show_energy_performance_available_preferences(
static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
- struct amd_cpudata *cpudata = policy->driver_data;
char str_preference[21];
ssize_t ret;
@@ -1172,11 +1179,11 @@ static ssize_t store_energy_performance_preference(
if (ret < 0)
return -EINVAL;
- mutex_lock(&amd_pstate_limits_lock);
- ret = amd_pstate_set_energy_pref_index(cpudata, ret);
- mutex_unlock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
+
+ ret = amd_pstate_set_energy_pref_index(policy, ret);
- return ret ?: count;
+ return ret ? ret : count;
}
static ssize_t show_energy_performance_preference(
@@ -1185,9 +1192,22 @@ static ssize_t show_energy_performance_preference(
struct amd_cpudata *cpudata = policy->driver_data;
int preference;
- preference = amd_pstate_get_energy_pref_index(cpudata);
- if (preference < 0)
- return preference;
+ switch (cpudata->epp_cached) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ preference = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ preference = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ preference = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ preference = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
}
@@ -1236,6 +1256,9 @@ static int amd_pstate_register_driver(int mode)
return ret;
}
+ /* at least one CPU supports CPB */
+ current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
+
ret = cpufreq_register_driver(current_pstate_driver);
if (ret) {
amd_pstate_driver_cleanup();
@@ -1340,13 +1363,10 @@ EXPORT_SYMBOL_GPL(amd_pstate_update_status);
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- ssize_t ret;
- mutex_lock(&amd_pstate_driver_lock);
- ret = amd_pstate_show_status(buf);
- mutex_unlock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
- return ret;
+ return amd_pstate_show_status(buf);
}
static ssize_t status_store(struct device *a, struct device_attribute *b,
@@ -1355,9 +1375,8 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&amd_pstate_driver_lock);
return ret < 0 ? ret : count;
}
@@ -1451,7 +1470,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return -ENOMEM;
cpudata->cpu = policy->cpu;
- cpudata->epp_policy = 0;
ret = amd_pstate_init_perf(cpudata);
if (ret)
@@ -1477,8 +1495,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
- cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
-
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -1489,10 +1505,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
* the default cpufreq governor is neither powersave nor performance.
*/
if (amd_pstate_acpi_pm_profile_server() ||
- amd_pstate_acpi_pm_profile_undefined())
+ amd_pstate_acpi_pm_profile_undefined()) {
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
+ cpudata->epp_default = amd_pstate_get_epp(cpudata);
+ } else {
policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
+ }
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
@@ -1505,6 +1524,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return ret;
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
+ ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
+ if (ret)
+ return ret;
current_pstate_driver->adjust_perf = NULL;
@@ -1530,51 +1552,24 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf;
- u64 value;
- s16 epp;
+ u32 epp;
- max_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
amd_pstate_update_min_max_limit(policy);
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_perf = min(cpudata->nominal_perf, max_perf);
-
- /* Initial min/max values for CPPC Performance Controls Register */
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
- /* CPPC EPP feature require to set zero to the desire perf bit */
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(0);
-
- cpudata->epp_policy = cpudata->policy;
+ epp = 0;
+ else
+ epp = READ_ONCE(cpudata->epp_cached);
- /* Get BIOS pre-defined epp value */
- epp = amd_pstate_get_epp(cpudata, value);
- if (epp < 0) {
- /**
- * This return value can only be negative for shared_memory
- * systems where EPP register read/write not supported.
- */
- return epp;
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
+ cpudata->min_limit_perf,
+ cpudata->max_limit_perf,
+ policy->boost_enabled);
}
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- epp = 0;
-
- WRITE_ONCE(cpudata->cppc_req_cached, value);
- return amd_pstate_set_epp(cpudata, epp);
+ return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+ cpudata->max_limit_perf, epp, false);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1603,87 +1598,63 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
{
- struct cppc_perf_ctrls perf_ctrls;
- u64 value, max_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u64 max_perf;
int ret;
ret = amd_pstate_cppc_enable(true);
if (ret)
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
- value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.max_perf = max_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
- cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ cpudata->epp_cached,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
+ max_perf, policy->boost_enabled);
}
+
+ return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
}
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- amd_pstate_epp_reenable(cpudata);
- cpudata->suspended = false;
- }
+ ret = amd_pstate_epp_reenable(policy);
+ if (ret)
+ return ret;
+ cpudata->suspended = false;
return 0;
}
-static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
- int min_perf;
- u64 value;
-
- min_perf = READ_ONCE(cpudata->lowest_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
- mutex_lock(&amd_pstate_limits_lock);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
-
- /* Set max perf same as min perf */
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(min_perf);
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.desired_perf = 0;
- perf_ctrls.min_perf = min_perf;
- perf_ctrls.max_perf = min_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
- cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- }
- mutex_unlock(&amd_pstate_limits_lock);
-}
-
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
-
- pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+ int min_perf;
if (cpudata->suspended)
return 0;
- if (cppc_state == AMD_PSTATE_ACTIVE)
- amd_pstate_epp_offline(policy);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
- return 0;
+ guard(mutex)(&amd_pstate_limits_lock);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ min_perf, min_perf, policy->boost_enabled);
+ }
+
+ return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
}
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
@@ -1711,12 +1682,10 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- mutex_lock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
/* enable amd pstate from suspend state*/
- amd_pstate_epp_reenable(cpudata);
-
- mutex_unlock(&amd_pstate_limits_lock);
+ amd_pstate_epp_reenable(policy);
cpudata->suspended = false;
}
@@ -1869,6 +1838,8 @@ static int __init amd_pstate_init(void)
static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
static_call_update(amd_pstate_init_perf, shmem_init_perf);
static_call_update(amd_pstate_update_perf, shmem_update_perf);
+ static_call_update(amd_pstate_get_epp, shmem_get_epp);
+ static_call_update(amd_pstate_set_epp, shmem_set_epp);
}
if (amd_pstate_prefcore) {
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index cd573bc6b6db..9747e3be6cee 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -57,7 +57,6 @@ struct amd_aperf_mperf {
* @hw_prefcore: check whether HW supports preferred core featue.
* Only when hw_prefcore and early prefcore param are true,
* AMD P-State driver supports preferred core featue.
- * @epp_policy: Last saved policy used to set energy-performance preference
* @epp_cached: Cached CPPC energy-performance preference value
* @policy: Cpufreq policy value
* @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
@@ -94,13 +93,11 @@ struct amd_cpudata {
bool hw_prefcore;
/* EPP feature related attributes*/
- s16 epp_policy;
s16 epp_cached;
u32 policy;
u64 cppc_cap1_cached;
bool suspended;
s16 epp_default;
- bool boost_state;
};
/*
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 4dcacab9b4bf..269b18c62d04 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -22,11 +22,14 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define APPLE_DVFS_CMD 0x20
-#define APPLE_DVFS_CMD_BUSY BIT(31)
-#define APPLE_DVFS_CMD_SET BIT(25)
-#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12)
-#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD 0x20
+#define APPLE_DVFS_CMD_BUSY BIT(31)
+#define APPLE_DVFS_CMD_SET BIT(25)
+#define APPLE_DVFS_CMD_PS1_S5L8960X GENMASK(24, 22)
+#define APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT 22
+#define APPLE_DVFS_CMD_PS2 GENMASK(15, 12)
+#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD_PS1_SHIFT 0
/* Same timebase as CPU counter (24MHz) */
#define APPLE_DVFS_LAST_CHG_TIME 0x38
@@ -35,6 +38,9 @@
* Apple ran out of bits and had to shift this in T8112...
*/
#define APPLE_DVFS_STATUS 0x50
+#define APPLE_DVFS_STATUS_CUR_PS_S5L8960X GENMASK(5, 3)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X 3
+#define APPLE_DVFS_STATUS_TGT_PS_S5L8960X GENMASK(2, 0)
#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4)
#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4
#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0)
@@ -52,12 +58,15 @@
#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16)
#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0)
-#define APPLE_DVFS_TRANSITION_TIMEOUT 100
+#define APPLE_DVFS_TRANSITION_TIMEOUT 400
struct apple_soc_cpufreq_info {
+ bool has_ps2;
u64 max_pstate;
u64 cur_pstate_mask;
u64 cur_pstate_shift;
+ u64 ps1_mask;
+ u64 ps1_shift;
};
struct apple_cpu_priv {
@@ -68,25 +77,47 @@ struct apple_cpu_priv {
static struct cpufreq_driver apple_soc_cpufreq_driver;
+static const struct apple_soc_cpufreq_info soc_s5l8960x_info = {
+ .has_ps2 = false,
+ .max_pstate = 7,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_S5L8960X,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X,
+ .ps1_mask = APPLE_DVFS_CMD_PS1_S5L8960X,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT,
+};
+
static const struct apple_soc_cpufreq_info soc_t8103_info = {
+ .has_ps2 = true,
.max_pstate = 15,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_t8112_info = {
+ .has_ps2 = false,
.max_pstate = 31,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_default_info = {
+ .has_ps2 = false,
.max_pstate = 15,
.cur_pstate_mask = 0, /* fallback */
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
{
+ .compatible = "apple,s5l8960x-cluster-cpufreq",
+ .data = &soc_s5l8960x_info,
+ },
+ {
.compatible = "apple,t8103-cluster-cpufreq",
.data = &soc_t8103_info,
},
@@ -109,7 +140,7 @@ static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
unsigned int pstate;
if (priv->info->cur_pstate_mask) {
- u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+ u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift;
} else {
@@ -148,9 +179,12 @@ static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy,
return -EIO;
}
- reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ reg &= ~priv->info->ps1_mask;
+ reg |= pstate << priv->info->ps1_shift;
+ if (priv->info->has_ps2) {
+ reg &= ~APPLE_DVFS_CMD_PS2;
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ }
reg |= APPLE_DVFS_CMD_SET;
writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD);
@@ -275,7 +309,7 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = APPLE_DVFS_TRANSITION_TIMEOUT * NSEC_PER_USEC;
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index bd8f75accfa0..8f512448382f 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -225,7 +225,7 @@ static void __init cppc_freq_invariance_init(void)
if (fie_disabled)
return;
- kworker_fie = kthread_create_worker(0, "cppc_fie");
+ kworker_fie = kthread_run_worker(0, "cppc_fie");
if (IS_ERR(kworker_fie)) {
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
PTR_ERR(kworker_fie));
@@ -611,7 +611,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
- policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
+ policy->max = cppc_perf_to_khz(caps, policy->boost_enabled ?
+ caps->highest_perf : caps->nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
@@ -619,7 +620,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* nonlinear perf
*/
policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
- policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
+ policy->cpuinfo.max_freq = policy->max;
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 2a3e8bd317c9..2aa00769cf09 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,8 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,en7581", },
+
{ .compatible = "allwinner,sun50i-a100" },
{ .compatible = "allwinner,sun50i-h6", },
{ .compatible = "allwinner,sun50i-h616", },
@@ -235,5 +237,3 @@ create_pdev:
sizeof(struct cpufreq_dt_platform_data)));
}
core_initcall(cpufreq_dt_platdev_init);
-MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1a4cae54a01b..0ce79fed8e55 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
@@ -602,12 +603,12 @@ static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
if (cpufreq_boost_trigger_state(enable)) {
pr_err("%s: Cannot %s BOOST!\n",
- __func__, enable ? "enable" : "disable");
+ __func__, str_enable_disable(enable));
return -EINVAL;
}
pr_debug("%s: cpufreq BOOST %s\n",
- __func__, enable ? "enabled" : "disabled");
+ __func__, str_enabled_disabled(enable));
return count;
}
@@ -728,18 +729,26 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
-__weak unsigned int arch_freq_get_on_cpu(int cpu)
+__weak int arch_freq_get_on_cpu(int cpu)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
+{
+ return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
}
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
- unsigned int freq;
+ int freq;
+
+ freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
+ ? arch_freq_get_on_cpu(policy->cpu)
+ : 0;
- freq = arch_freq_get_on_cpu(policy->cpu);
- if (freq)
+ if (freq > 0)
ret = sysfs_emit(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
@@ -784,6 +793,19 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
}
/*
+ * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
+ */
+static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int avg_freq = arch_freq_get_on_cpu(policy->cpu);
+
+ if (avg_freq > 0)
+ return sysfs_emit(buf, "%u\n", avg_freq);
+ return avg_freq != 0 ? avg_freq : -EINVAL;
+}
+
+/*
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
@@ -945,6 +967,7 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_avg_freq);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
@@ -1072,6 +1095,12 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
+ if (cpufreq_avg_freq_supported(policy)) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
+ if (ret)
+ return ret;
+ }
+
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
return ret;
@@ -1409,10 +1438,6 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
- /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
- policy->boost_enabled = true;
-
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
@@ -1475,6 +1500,10 @@ static int cpufreq_online(unsigned int cpu)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
+ } else {
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ goto out_destroy_policy;
}
if (cpufreq_driver->get && has_target()) {
@@ -1538,7 +1567,7 @@ static int cpufreq_online(unsigned int cpu)
/*
* Register with the energy model before
- * sugov_eas_rebuild_sd() is called, which will result
+ * em_rebuild_sched_domains() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.
@@ -1569,6 +1598,19 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
+ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ if (cpufreq_driver->set_boost &&
+ policy->boost_enabled != cpufreq_boost_enabled()) {
+ policy->boost_enabled = cpufreq_boost_enabled();
+ ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
+ if (ret) {
+ /* If the set_boost fails, the online operation is not affected */
+ pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
+ policy->boost_enabled ? "enable" : "disable");
+ policy->boost_enabled = !policy->boost_enabled;
+ }
+ }
+
pr_debug("initialization complete\n");
return 0;
@@ -2812,7 +2854,7 @@ err_reset_state:
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
- __func__, state ? "enable" : "disable");
+ __func__, str_enable_disable(state));
return ret;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b8e2396a708a..f06b9bc99945 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -28,6 +28,7 @@
#include <linux/pm_qos.h>
#include <linux/bitfield.h>
#include <trace/events/power.h>
+#include <linux/units.h>
#include <asm/cpu.h>
#include <asm/div64.h>
@@ -302,11 +303,11 @@ static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
-#define HYBRID_SCALING_FACTOR 78741
+#define HYBRID_SCALING_FACTOR_ADL 78741
#define HYBRID_SCALING_FACTOR_MTL 80000
#define HYBRID_SCALING_FACTOR_LNL 86957
-static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
+static int hybrid_scaling_factor;
static inline int core_get_scaling(void)
{
@@ -414,18 +415,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
static int intel_pstate_cppc_get_scaling(int cpu)
{
struct cppc_perf_caps cppc_perf;
- int ret;
-
- ret = cppc_get_perf_caps(cpu, &cppc_perf);
/*
- * If the nominal frequency and the nominal performance are not
- * zero and the ratio between them is not 100, return the hybrid
- * scaling factor.
+ * Compute the perf-to-frequency scaling factor for the given CPU if
+ * possible, unless it would be 0.
*/
- if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
- cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
- return hybrid_scaling_factor;
+ if (!cppc_get_perf_caps(cpu, &cppc_perf) &&
+ cppc_perf.nominal_perf && cppc_perf.nominal_freq)
+ return div_u64(cppc_perf.nominal_freq * KHZ_PER_MHZ,
+ cppc_perf.nominal_perf);
return core_get_scaling();
}
@@ -2202,33 +2200,31 @@ static int knl_get_turbo_pstate(int cpu)
return ret;
}
-static void hybrid_get_type(void *data)
-{
- u8 *cpu_type = data;
-
- *cpu_type = get_this_hybrid_cpu_type();
-}
-
static int hwp_get_cpu_scaling(int cpu)
{
- u8 cpu_type = 0;
+ if (hybrid_scaling_factor) {
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ u8 cpu_type = c->topo.intel_type;
+
+ /*
+ * Return the hybrid scaling factor for P-cores and use the
+ * default core scaling for E-cores.
+ */
+ if (cpu_type == INTEL_CPU_TYPE_CORE)
+ return hybrid_scaling_factor;
- smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
- /* P-cores have a smaller perf level-to-freqency scaling factor. */
- if (cpu_type == 0x40)
- return hybrid_scaling_factor;
+ if (cpu_type == INTEL_CPU_TYPE_ATOM)
+ return core_get_scaling();
+ }
- /* Use default core scaling for E-cores */
- if (cpu_type == 0x20)
+ /* Use core scaling on non-hybrid systems. */
+ if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
return core_get_scaling();
/*
- * If reached here, this system is either non-hybrid (like Tiger
- * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
- * no E cores (in which case CPUID for hybrid support is 0).
- *
- * The CPPC nominal_frequency field is 0 for non-hybrid systems,
- * so the default core scaling will be used for them.
+ * The system is hybrid, but the hybrid scaling factor is not known or
+ * the CPU type is not one of the above, so use CPPC to compute the
+ * scaling factor for this CPU.
*/
return intel_pstate_cppc_get_scaling(cpu);
}
@@ -2709,7 +2705,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
}
cpu->epp_powersave = -EINVAL;
- cpu->epp_policy = 0;
+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
intel_pstate_get_cpu_pstates(cpu);
@@ -3665,8 +3661,12 @@ static const struct x86_cpu_id intel_epp_default[] = {
};
static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
+ X86_MATCH_VFM(INTEL_ALDERLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, HYBRID_SCALING_FACTOR_ADL),
X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
- X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
{}
};
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 8de759247771..ae79d909943b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/cpu.h>
#include <linux/hashtable.h>
#include <trace/events/power.h>
@@ -281,7 +282,7 @@ next:
pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
- (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
+ str_enabled_disabled(powernv_pstate_info.wof_enabled));
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
if (!pstate_ids) {
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 98129565acb8..b2e7e89feaac 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -143,14 +143,12 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
}
/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
const struct qcom_cpufreq_soc_data *soc_data;
- struct cpufreq_policy *policy;
unsigned int index;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -163,12 +161,10 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
return policy->freq_table[index].frequency;
}
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -177,7 +173,12 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
if (data->throttle_irq >= 0)
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return qcom_cpufreq_get_freq(cpu);
+ return qcom_cpufreq_get_freq(policy);
+}
+
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+{
+ return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -363,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
* If h/w throttled frequency is higher than what cpufreq has requested
* for, then stop polling and switch back to interrupt mechanism.
*/
- if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
+ if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
enable_irq(data->throttle_irq);
else
mod_delayed_work(system_highpri_wq, &data->throttle_work,
@@ -441,7 +442,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return data->throttle_irq;
data->cancel_throttle = false;
- data->policy = policy;
mutex_init(&data->throttle_lock);
INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
@@ -552,6 +552,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = data;
policy->dvfs_possible_from_any_cpu = true;
+ data->policy = policy;
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
@@ -622,11 +623,24 @@ static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned lon
{
struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
- return qcom_lmh_get_throttle_freq(data);
+ return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ;
+}
+
+/*
+ * Since we cannot determine the closest rate of the target rate, let's just
+ * return the actual rate at which the clock is running at. This is needed to
+ * make clk_set_rate() API work properly.
+ */
+static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
+
+ return 0;
}
static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
.recalc_rate = qcom_cpufreq_hw_recalc_rate,
+ .determine_rate = qcom_cpufreq_hw_determine_rate,
};
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index c6bdfc308e99..9cef71528076 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -24,6 +24,7 @@ struct s3c64xx_dvfs {
unsigned int vddarm_max;
};
+#ifdef CONFIG_REGULATOR
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[0] = { 1000000, 1150000 },
[1] = { 1050000, 1150000 },
@@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[3] = { 1200000, 1350000 },
[4] = { 1300000, 1350000 },
};
+#endif
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 0, 66000 },
@@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
- struct s3c64xx_dvfs *dvfs;
- unsigned int old_freq, new_freq;
+ unsigned int new_freq = s3c64xx_freq_table[index].frequency;
int ret;
+#ifdef CONFIG_REGULATOR
+ struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq;
+
old_freq = clk_get_rate(policy->clk) / 1000;
- new_freq = s3c64xx_freq_table[index].frequency;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
-#ifdef CONFIG_REGULATOR
if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 07d6f9a9b7c8..b8fe758aeb01 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
@@ -26,6 +27,8 @@ struct scmi_data {
int nr_opp;
struct device *cpu_dev;
cpumask_var_t opp_shared_cpus;
+ struct notifier_block limit_notify_nb;
+ struct freq_qos_request limits_freq_req;
};
static struct scmi_protocol_handle *ph;
@@ -174,6 +177,22 @@ static struct freq_attr *scmi_cpufreq_hw_attr[] = {
NULL,
};
+static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
+ struct scmi_perf_limits_report *limit_notify = data;
+ unsigned int limit_freq_khz;
+ int ret;
+
+ limit_freq_khz = limit_notify->range_max_freq / HZ_PER_KHZ;
+
+ ret = freq_qos_update_request(&priv->limits_freq_req, limit_freq_khz);
+ if (ret < 0)
+ pr_warn("failed to update freq constraint: %d\n", ret);
+
+ return NOTIFY_OK;
+}
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp, domain;
@@ -181,6 +200,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -294,6 +314,23 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
}
+ ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to add qos limits request: %d\n", ret);
+ goto out_free_table;
+ }
+
+ priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb;
+ ret = sdev->handle->notify_ops->event_notifier_register(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ if (ret)
+ dev_warn(&sdev->dev,
+ "failed to register for limits change notifier for domain %d\n",
+ priv->domain_id);
+
return 0;
out_free_table:
@@ -313,7 +350,13 @@ out_free_priv:
static void scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
+ sdev->handle->notify_ops->event_notifier_unregister(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ freq_qos_remove_request(&priv->limits_freq_req);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
@@ -372,6 +415,8 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
if (!handle)
return -ENODEV;
+ scmi_cpufreq_driver.driver_data = sdev;
+
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
if (IS_ERR(perf_ops))
return PTR_ERR(perf_ops);
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 8a0cd5312a59..15899dd77c08 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -323,7 +323,7 @@ static int __init us2e_freq_init(void)
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
- us2e_freq_table = kzalloc(NR_CPUS * sizeof(*us2e_freq_table),
+ us2e_freq_table = kcalloc(NR_CPUS, sizeof(*us2e_freq_table),
GFP_KERNEL);
if (!us2e_freq_table)
return -ENOMEM;
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index b50f9d13e6d2..de50a2f3b124 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -171,7 +171,7 @@ static int __init us3_freq_init(void)
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
- us3_freq_table = kzalloc(NR_CPUS * sizeof(*us3_freq_table),
+ us3_freq_table = kcalloc(NR_CPUS, sizeof(*us3_freq_table),
GFP_KERNEL);
if (!us3_freq_table)
return -ENOMEM;
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index d103342b7cfc..1de9e92c5b0f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,6 +3,9 @@
# Makefile for cpuidle.
#
+# Branch profiling isn't noinstr-safe
+ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 146f97068022..5fb5228f6bf1 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -72,6 +72,7 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
*/
if (use_osi) {
pd->power_off = psci_pd_power_off;
+ pd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
if (IS_ENABLED(CONFIG_PREEMPT_RT))
pd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
} else {
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index f2992f92d8db..8fe5e1b47ef9 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -10,25 +10,27 @@
* DOC: teo-description
*
* The idea of this governor is based on the observation that on many systems
- * timer events are two or more orders of magnitude more frequent than any
- * other interrupts, so they are likely to be the most significant cause of CPU
- * wakeups from idle states. Moreover, information about what happened in the
- * (relatively recent) past can be used to estimate whether or not the deepest
- * idle state with target residency within the (known) time till the closest
- * timer event, referred to as the sleep length, is likely to be suitable for
- * the upcoming CPU idle period and, if not, then which of the shallower idle
- * states to choose instead of it.
+ * timer interrupts are two or more orders of magnitude more frequent than any
+ * other interrupt types, so they are likely to dominate CPU wakeup patterns.
+ * Moreover, in principle, the time when the next timer event is going to occur
+ * can be determined at the idle state selection time, although doing that may
+ * be costly, so it can be regarded as the most reliable source of information
+ * for idle state selection.
*
- * Of course, non-timer wakeup sources are more important in some use cases
- * which can be covered by taking a few most recent idle time intervals of the
- * CPU into account. However, even in that context it is not necessary to
- * consider idle duration values greater than the sleep length, because the
- * closest timer will ultimately wake up the CPU anyway unless it is woken up
- * earlier.
+ * Of course, non-timer wakeup sources are more important in some use cases,
+ * but even then it is generally unnecessary to consider idle duration values
+ * greater than the time time till the next timer event, referred as the sleep
+ * length in what follows, because the closest timer will ultimately wake up the
+ * CPU anyway unless it is woken up earlier.
*
- * Thus this governor estimates whether or not the prospective idle duration of
- * a CPU is likely to be significantly shorter than the sleep length and selects
- * an idle state for it accordingly.
+ * However, since obtaining the sleep length may be costly, the governor first
+ * checks if it can select a shallow idle state using wakeup pattern information
+ * from recent times, in which case it can do without knowing the sleep length
+ * at all. For this purpose, it counts CPU wakeup events and looks for an idle
+ * state whose target residency has not exceeded the idle duration (measured
+ * after wakeup) in the majority of relevant recent cases. If the target
+ * residency of that state is small enough, it may be used right away and the
+ * sleep length need not be determined.
*
* The computations carried out by this governor are based on using bins whose
* boundaries are aligned with the target residency parameter values of the CPU
@@ -49,47 +51,50 @@
* sleep length and the idle duration measured after CPU wakeup fall into the
* same bin (that is, the CPU appears to wake up "on time" relative to the sleep
* length). In turn, the "intercepts" metric reflects the relative frequency of
- * situations in which the measured idle duration is so much shorter than the
- * sleep length that the bin it falls into corresponds to an idle state
- * shallower than the one whose bin is fallen into by the sleep length (these
- * situations are referred to as "intercepts" below).
+ * non-timer wakeup events for which the measured idle duration falls into a bin
+ * that corresponds to an idle state shallower than the one whose bin is fallen
+ * into by the sleep length (these events are also referred to as "intercepts"
+ * below).
+ *
+ * The governor also counts "intercepts" with the measured idle duration below
+ * the tick period length and uses this information when deciding whether or not
+ * to stop the scheduler tick.
*
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
*
- * 1. Find the deepest CPU idle state whose target residency does not exceed
- * the current sleep length (the candidate idle state) and compute 2 sums as
- * follows:
+ * 1. Find the deepest enabled CPU idle state (the candidate idle state) and
+ * compute 2 sums as follows:
*
- * - The sum of the "hits" and "intercepts" metrics for the candidate state
- * and all of the deeper idle states (it represents the cases in which the
- * CPU was idle long enough to avoid being intercepted if the sleep length
- * had been equal to the current one).
+ * - The sum of the "hits" metric for all of the idle states shallower than
+ * the candidate one (it represents the cases in which the CPU was likely
+ * woken up by a timer).
*
- * - The sum of the "intercepts" metrics for all of the idle states shallower
- * than the candidate one (it represents the cases in which the CPU was not
- * idle long enough to avoid being intercepted if the sleep length had been
- * equal to the current one).
+ * - The sum of the "intercepts" metric for all of the idle states shallower
+ * than the candidate one (it represents the cases in which the CPU was
+ * likely woken up by a non-timer wakeup source).
*
- * 2. If the second sum is greater than the first one the CPU is likely to wake
- * up early, so look for an alternative idle state to select.
+ * 2. If the second sum computed in step 1 is greater than a half of the sum of
+ * both metrics for the candidate state bin and all subsequent bins(if any),
+ * a shallower idle state is likely to be more suitable, so look for it.
*
- * - Traverse the idle states shallower than the candidate one in the
+ * - Traverse the enabled idle states shallower than the candidate one in the
* descending order.
*
* - For each of them compute the sum of the "intercepts" metrics over all
* of the idle states between it and the candidate one (including the
* former and excluding the latter).
*
- * - If each of these sums that needs to be taken into account (because the
- * check related to it has indicated that the CPU is likely to wake up
- * early) is greater than a half of the corresponding sum computed in step
- * 1 (which means that the target residency of the state in question had
- * not exceeded the idle duration in over a half of the relevant cases),
- * select the given idle state instead of the candidate one.
+ * - If this sum is greater than a half of the second sum computed in step 1,
+ * use the given idle state as the new candidate one.
+ *
+ * 3. If the current candidate state is state 0 or its target residency is short
+ * enough, return it and prevent the scheduler tick from being stopped.
*
- * 3. By default, select the candidate state.
+ * 4. Obtain the sleep length value and check if it is below the target
+ * residency of the current candidate state, in which case a new shallower
+ * candidate state needs to be found, so look for it.
*/
#include <linux/cpuidle.h>
@@ -101,6 +106,12 @@
#include "gov.h"
/*
+ * Idle state exit latency threshold used for deciding whether or not to check
+ * the time till the closest expected timer event.
+ */
+#define LATENCY_THRESHOLD_NS (RESIDENCY_THRESHOLD_NS / 2)
+
+/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
*/
@@ -119,18 +130,20 @@ struct teo_bin {
/**
* struct teo_cpu - CPU data used by the TEO cpuidle governor.
- * @time_span_ns: Time between idle state selection and post-wakeup update.
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
* @total: Grand total of the "intercepts" and "hits" metrics for all bins.
- * @tick_hits: Number of "hits" after TICK_NSEC.
+ * @tick_intercepts: "Intercepts" before TICK_NSEC.
+ * @short_idles: Wakeups after short idle periods.
+ * @artificial_wakeup: Set if the wakeup has been triggered by a safety net.
*/
struct teo_cpu {
- s64 time_span_ns;
s64 sleep_length_ns;
struct teo_bin state_bins[CPUIDLE_STATE_MAX];
unsigned int total;
- unsigned int tick_hits;
+ unsigned int tick_intercepts;
+ unsigned int short_idles;
+ bool artificial_wakeup;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
@@ -147,23 +160,17 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
s64 target_residency_ns;
u64 measured_ns;
- if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
+ cpu_data->short_idles -= cpu_data->short_idles >> DECAY_SHIFT;
+
+ if (cpu_data->artificial_wakeup) {
/*
- * One of the safety nets has triggered or the wakeup was close
- * enough to the closest timer event expected at the idle state
- * selection time to be discarded.
+ * If one of the safety nets has triggered, assume that this
+ * might have been a long sleep.
*/
measured_ns = U64_MAX;
} else {
u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
- /*
- * The computations below are to determine whether or not the
- * (saved) time till the next timer event and the measured idle
- * duration fall into the same "bin", so use last_residency_ns
- * for that instead of time_span_ns which includes the cpuidle
- * overhead.
- */
measured_ns = dev->last_residency_ns;
/*
* The delay between the wakeup and the first instruction
@@ -171,14 +178,16 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* time, so take 1/2 of the exit latency as a very rough
* approximation of the average of it.
*/
- if (measured_ns >= lat_ns)
+ if (measured_ns >= lat_ns) {
measured_ns -= lat_ns / 2;
- else
+ if (measured_ns < RESIDENCY_THRESHOLD_NS)
+ cpu_data->short_idles += PULSE;
+ } else {
measured_ns /= 2;
+ cpu_data->short_idles += PULSE;
+ }
}
- cpu_data->total = 0;
-
/*
* Decay the "hits" and "intercepts" metrics for all of the bins and
* find the bins that the sleep length and the measured idle duration
@@ -190,8 +199,6 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
bin->hits -= bin->hits >> DECAY_SHIFT;
bin->intercepts -= bin->intercepts >> DECAY_SHIFT;
- cpu_data->total += bin->hits + bin->intercepts;
-
target_residency_ns = drv->states[i].target_residency_ns;
if (target_residency_ns <= cpu_data->sleep_length_ns) {
@@ -201,38 +208,22 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
}
- /*
- * If the deepest state's target residency is below the tick length,
- * make a record of it to help teo_select() decide whether or not
- * to stop the tick. This effectively adds an extra hits-only bin
- * beyond the last state-related one.
- */
- if (target_residency_ns < TICK_NSEC) {
- cpu_data->tick_hits -= cpu_data->tick_hits >> DECAY_SHIFT;
-
- cpu_data->total += cpu_data->tick_hits;
-
- if (TICK_NSEC <= cpu_data->sleep_length_ns) {
- idx_timer = drv->state_count;
- if (TICK_NSEC <= measured_ns) {
- cpu_data->tick_hits += PULSE;
- goto end;
- }
- }
- }
-
+ cpu_data->tick_intercepts -= cpu_data->tick_intercepts >> DECAY_SHIFT;
/*
* If the measured idle duration falls into the same bin as the sleep
* length, this is a "hit", so update the "hits" metric for that bin.
* Otherwise, update the "intercepts" metric for the bin fallen into by
* the measured idle duration.
*/
- if (idx_timer == idx_duration)
+ if (idx_timer == idx_duration) {
cpu_data->state_bins[idx_timer].hits += PULSE;
- else
+ } else {
cpu_data->state_bins[idx_duration].intercepts += PULSE;
+ if (TICK_NSEC <= measured_ns)
+ cpu_data->tick_intercepts += PULSE;
+ }
-end:
+ cpu_data->total -= cpu_data->total >> DECAY_SHIFT;
cpu_data->total += PULSE;
}
@@ -280,14 +271,12 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
ktime_t delta_tick = TICK_NSEC / 2;
- unsigned int tick_intercept_sum = 0;
unsigned int idx_intercept_sum = 0;
unsigned int intercept_sum = 0;
unsigned int idx_hit_sum = 0;
unsigned int hit_sum = 0;
int constraint_idx = 0;
int idx0 = 0, idx = -1;
- int prev_intercept_idx;
s64 duration_ns;
int i;
@@ -296,10 +285,14 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
dev->last_state_idx = -1;
}
- cpu_data->time_span_ns = local_clock();
/*
- * Set the expected sleep length to infinity in case of an early
- * return.
+ * Set the sleep length to infinity in case the invocation of
+ * tick_nohz_get_sleep_length() below is skipped, in which case it won't
+ * be known whether or not the subsequent wakeup is caused by a timer.
+ * It is generally fine to count the wakeup as an intercept then, except
+ * for the cases when the CPU is mostly woken up by timers and there may
+ * be opportunities to ask for a deeper idle state when no imminent
+ * timers are scheduled which may be missed.
*/
cpu_data->sleep_length_ns = KTIME_MAX;
@@ -355,17 +348,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
goto end;
}
- tick_intercept_sum = intercept_sum +
- cpu_data->state_bins[drv->state_count-1].intercepts;
-
/*
* If the sum of the intercepts metric for all of the idle states
* shallower than the current candidate one (idx) is greater than the
* sum of the intercepts and hits metrics for the candidate state and
- * all of the deeper states a shallower idle state is likely to be a
+ * all of the deeper states, a shallower idle state is likely to be a
* better choice.
*/
- prev_intercept_idx = idx;
if (2 * idx_intercept_sum > cpu_data->total - idx_hit_sum) {
int first_suitable_idx = idx;
@@ -391,41 +380,38 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* first enabled state that is deep enough.
*/
if (teo_state_ok(i, drv) &&
- !dev->states_usage[i].disable)
+ !dev->states_usage[i].disable) {
idx = i;
- else
- idx = first_suitable_idx;
-
+ break;
+ }
+ idx = first_suitable_idx;
break;
}
if (dev->states_usage[i].disable)
continue;
- if (!teo_state_ok(i, drv)) {
+ if (teo_state_ok(i, drv)) {
/*
- * The current state is too shallow, but if an
- * alternative candidate state has been found,
- * it may still turn out to be a better choice.
+ * The current state is deep enough, but still
+ * there may be a better one.
*/
- if (first_suitable_idx != idx)
- continue;
-
- break;
+ first_suitable_idx = i;
+ continue;
}
- first_suitable_idx = i;
+ /*
+ * The current state is too shallow, so if no suitable
+ * states other than the initial candidate have been
+ * found, give up (the remaining states to check are
+ * shallower still), but otherwise the first suitable
+ * state other than the initial candidate may turn out
+ * to be preferable.
+ */
+ if (first_suitable_idx == idx)
+ break;
}
}
- if (!idx && prev_intercept_idx) {
- /*
- * We have to query the sleep length here otherwise we don't
- * know after wakeup if our guess was correct.
- */
- duration_ns = tick_nohz_get_sleep_length(&delta_tick);
- cpu_data->sleep_length_ns = duration_ns;
- goto out_tick;
- }
/*
* If there is a latency constraint, it may be necessary to select an
@@ -435,24 +421,39 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
idx = constraint_idx;
/*
- * Skip the timers check if state 0 is the current candidate one,
- * because an immediate non-timer wakeup is expected in that case.
- */
- if (!idx)
- goto out_tick;
-
- /*
- * If state 0 is a polling one, check if the target residency of
- * the current candidate state is low enough and skip the timers
- * check in that case too.
+ * If either the candidate state is state 0 or its target residency is
+ * low enough, there is basically nothing more to do, but if the sleep
+ * length is not updated, the subsequent wakeup will be counted as an
+ * "intercept" which may be problematic in the cases when timer wakeups
+ * are dominant. Namely, it may effectively prevent deeper idle states
+ * from being selected at one point even if no imminent timers are
+ * scheduled.
+ *
+ * However, frequent timers in the RESIDENCY_THRESHOLD_NS range on one
+ * CPU are unlikely (user space has a default 50 us slack value for
+ * hrtimers and there are relatively few timers with a lower deadline
+ * value in the kernel), and even if they did happen, the potential
+ * benefit from using a deep idle state in that case would be
+ * questionable anyway for latency reasons. Thus if the measured idle
+ * duration falls into that range in the majority of cases, assume
+ * non-timer wakeups to be dominant and skip updating the sleep length
+ * to reduce latency.
+ *
+ * Also, if the latency constraint is sufficiently low, it will force
+ * shallow idle states regardless of the wakeup type, so the sleep
+ * length need not be known in that case.
*/
- if ((drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
- drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS)
+ if ((!idx || drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS) &&
+ (2 * cpu_data->short_idles >= cpu_data->total ||
+ latency_req < LATENCY_THRESHOLD_NS))
goto out_tick;
duration_ns = tick_nohz_get_sleep_length(&delta_tick);
cpu_data->sleep_length_ns = duration_ns;
+ if (!idx)
+ goto out_tick;
+
/*
* If the closest expected timer is before the target residency of the
* candidate state, a shallower one needs to be found.
@@ -469,7 +470,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* total wakeup events, do not stop the tick.
*/
if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- tick_intercept_sum > cpu_data->total / 2 + cpu_data->total / 8)
+ cpu_data->tick_intercepts > cpu_data->total / 2 + cpu_data->total / 8)
duration_ns = TICK_NSEC / 2;
end:
@@ -506,17 +507,16 @@ static void teo_reflect(struct cpuidle_device *dev, int state)
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
dev->last_state_idx = state;
- /*
- * If the wakeup was not "natural", but triggered by one of the safety
- * nets, assume that the CPU might have been idle for the entire sleep
- * length time.
- */
if (dev->poll_time_limit ||
(tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
+ /*
+ * The wakeup was not "genuine", but triggered by one of the
+ * safety nets.
+ */
dev->poll_time_limit = false;
- cpu_data->time_span_ns = cpu_data->sleep_length_ns;
+ cpu_data->artificial_wakeup = true;
} else {
- cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns;
+ cpu_data->artificial_wakeup = false;
}
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0a9cdd31cbd9..19ab145f912e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -200,23 +200,6 @@ config S390_PRNG
It is available as of z9.
-config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
- Each core of a Niagara2 processor contains a Stream
- Processing Unit, which itself contains several cryptographic
- sub-units. One set provides the Modular Arithmetic Unit,
- used for SSL offload. The other set provides the Cipher
- Group, which can perform encryption, decryption, hashing,
- checksumming, and raw copies.
-
config CRYPTO_DEV_SL3516
tristate "Storlink SL3516 crypto offloader"
depends on ARCH_GEMINI || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad4ccef67d12..fef18ffdb128 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -21,8 +21,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
-n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index 6283e8c6d51d..86c227caa722 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -836,7 +836,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
u32 cipher_bits = 0;
u32 ecf_bits = 0;
u8 sctx_words = 0;
- u8 *ptr = spu_hdr;
flow_log("%s()\n", __func__);
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
@@ -847,7 +846,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* starting out: zero the header (plus some) */
memset(spu_hdr, 0, sizeof(struct SPUHEADER));
- ptr += sizeof(struct SPUHEADER);
/* format master header word */
/* Do not set the next bit even though the datasheet says to */
@@ -861,10 +859,8 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* copy the encryption keys in the SAD entry */
if (cipher_parms->alg) {
- if (cipher_parms->key_len) {
- ptr += cipher_parms->key_len;
+ if (cipher_parms->key_len)
sctx_words += cipher_parms->key_len / 4;
- }
/*
* if encrypting then set IV size, use SCTX IV unless no IV
@@ -873,7 +869,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
if (cipher_parms->iv_len) {
/* Use SCTX IV */
ecf_bits |= SCTX_IV;
- ptr += cipher_parms->iv_len;
sctx_words += cipher_parms->iv_len / 4;
}
}
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 87781c1534ee..079a22cc9f02 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
@@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
}
ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
+ moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
index 5b105a23f699..410084a9039c 100644
--- a/drivers/crypto/ccp/dbc.c
+++ b/drivers/crypto/ccp/dbc.c
@@ -7,6 +7,8 @@
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
+#include <linux/mutex.h>
+
#include "dbc.h"
#define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
@@ -137,64 +139,49 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENODEV;
dbc_dev = psp_master->dbc_data;
- mutex_lock(&dbc_dev->ioctl_mutex);
+ guard(mutex)(&dbc_dev->ioctl_mutex);
switch (cmd) {
case DBCIOCNONCE:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
ret = send_dbc_nonce(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
break;
case DBCIOCUID:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
break;
case DBCIOCPARAM:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param);
ret = send_dbc_parameter(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param)))
+ return -EFAULT;
break;
default:
- ret = -EINVAL;
-
+ return -EINVAL;
}
-unlock:
- mutex_unlock(&dbc_dev->ioctl_mutex);
- return ret;
+ return 0;
}
static const struct file_operations dbc_fops = {
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index af018afd9cd7..2e87ca0e292a 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -249,7 +249,7 @@ static struct file *open_file_as_root(const char *filename, int flags, umode_t m
fp = file_open_root(&root, filename, flags, mode);
path_put(&root);
- revert_creds(old_cred);
+ put_cred(revert_creds(old_cred));
return fp;
}
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index 7eb3e4668286..3467f6db4f50 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/ccp.h>
+#include "sev-dev.h"
#include "ccp-dev.h"
#include "sp-dev.h"
@@ -253,8 +254,12 @@ unlock:
static int __init sp_mod_init(void)
{
#ifdef CONFIG_X86
+ static bool initialized;
int ret;
+ if (initialized)
+ return 0;
+
ret = sp_pci_init();
if (ret)
return ret;
@@ -263,6 +268,8 @@ static int __init sp_mod_init(void)
psp_pci_init();
#endif
+ initialized = true;
+
return 0;
#endif
@@ -279,6 +286,13 @@ static int __init sp_mod_init(void)
return -ENODEV;
}
+#if IS_BUILTIN(CONFIG_KVM_AMD) && IS_ENABLED(CONFIG_KVM_AMD_SEV)
+int __init sev_module_init(void)
+{
+ return sp_mod_init();
+}
+#endif
+
static void __exit sp_mod_exit(void)
{
#ifdef CONFIG_X86
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 96fde9437b4b..f5b47e5ff48a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -1209,7 +1209,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->mode = uacce_mode;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
@@ -1396,6 +1395,17 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1428,6 +1438,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
.get_err_result = hpre_get_err_result,
+ .dev_is_abnormal = hpre_dev_is_abnormal,
};
static int hpre_pf_probe_init(struct hpre *hpre)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 19c1b5d3c954..d3f5d108b898 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -30,8 +30,6 @@
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
-#define QM_MB_CMD_DATA_SHIFT 32
-#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
@@ -102,6 +100,8 @@
#define QM_PM_CTRL 0x100148
#define QM_IDLE_DISABLE BIT(9)
+#define QM_SUB_VERSION_ID 0x210
+
#define QM_VFT_CFG_DATA_L 0x100064
#define QM_VFT_CFG_DATA_H 0x100068
#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
@@ -119,6 +119,7 @@
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
+#define QM_MAX_QC_TYPE 2
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@@ -176,6 +177,10 @@
#define QM_IFC_INT_MASK 0x0024
#define QM_IFC_INT_STATUS 0x0028
#define QM_IFC_INT_SET_V 0x002C
+#define QM_PF2VF_PF_W 0x104700
+#define QM_VF2PF_PF_R 0x104800
+#define QM_VF2PF_VF_W 0x320
+#define QM_PF2VF_VF_R 0x380
#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
#define QM_IFC_INT_SOURCE_MASK BIT(0)
@@ -185,8 +190,11 @@
#define QM_WAIT_DST_ACK 10
#define QM_MAX_PF_WAIT_COUNT 10
#define QM_MAX_VF_WAIT_COUNT 40
-#define QM_VF_RESET_WAIT_US 20000
-#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF_RESET_WAIT_US 20000
+#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF2PF_REG_SIZE 4
+#define QM_IFC_CMD_MASK GENMASK(31, 0)
+#define QM_IFC_DATA_SHIFT 32
#define QM_VF_RESET_WAIT_TIMEOUT_US \
(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
@@ -234,8 +242,6 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_AUTOSUSPEND_DELAY 3000
-#define QM_DEV_ALG_MAX_LEN 256
-
/* abnormal status value for stopping queue */
#define QM_STOP_QUEUE_FAIL 1
#define QM_DUMP_SQC_FAIL 3
@@ -276,7 +282,7 @@ enum qm_alg_type {
ALG_TYPE_1,
};
-enum qm_mb_cmd {
+enum qm_ifc_cmd {
QM_PF_FLR_PREPARE = 0x01,
QM_PF_SRST_PREPARE,
QM_PF_RESET_DONE,
@@ -333,6 +339,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0},
};
static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
@@ -396,6 +403,11 @@ struct hisi_qm_hw_ops {
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
int (*set_msi)(struct hisi_qm *qm, bool set);
+
+ /* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */
+ int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
+ void (*set_ifc_end)(struct hisi_qm *qm);
+ int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
};
struct hisi_qm_hw_error {
@@ -501,15 +513,20 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
/* Check if the error causes the master ooo block */
static bool qm_check_dev_error(struct hisi_qm *qm)
{
- u32 val, dev_val;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ u32 err_status;
- if (qm->fun_type == QM_HW_VF)
+ if (pf_qm->fun_type == QM_HW_VF)
return false;
- val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
- dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
+ err_status = qm_get_hw_error_status(pf_qm);
+ if (err_status & pf_qm->err_info.qm_shutdown_mask)
+ return true;
+
+ if (pf_qm->err_ini->dev_is_abnormal)
+ return pf_qm->err_ini->dev_is_abnormal(pf_qm);
- return val || dev_val;
+ return false;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -654,7 +671,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_mb);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct qm_mailbox mailbox;
dma_addr_t xqc_dma;
void *tmp_xqc;
@@ -688,7 +704,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
}
/* Setting xqc will fail if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm)) {
+ if (qm_check_dev_error(qm)) {
dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
return -EIO;
}
@@ -855,10 +871,10 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
strcat(algs, dev_algs[i].alg);
ptr = strrchr(algs, '\n');
- if (ptr) {
+ if (ptr)
*ptr = '\0';
- qm->uacce->algs = algs;
- }
+
+ qm->uacce->algs = algs;
return 0;
}
@@ -1052,11 +1068,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
static void qm_reset_function(struct hisi_qm *qm)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return;
ret = qm_reset_prepare_ready(qm);
@@ -1540,17 +1555,15 @@ static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
{
struct device *dev = &qm->pdev->dev;
- u32 cmd;
- u64 msg;
+ enum qm_ifc_cmd cmd;
int ret;
- ret = qm_get_mb_cmd(qm, &msg, vf_id);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id);
if (ret) {
- dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
+ dev_err(dev, "failed to get command from VF(%u)!\n", vf_id);
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_VF_PREPARE_FAIL:
dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
@@ -1562,7 +1575,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
case QM_VF_START_DONE:
break;
default:
- dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
+ dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id);
break;
}
}
@@ -1630,17 +1643,14 @@ static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
writel(val, qm->io_base + QM_IFC_INT_SET_V);
}
-static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
+static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- struct qm_mailbox mailbox;
int cnt = 0;
u64 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num);
if (ret) {
dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
goto err_unlock;
@@ -1662,27 +1672,23 @@ static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
}
err_unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return ret;
}
-static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct device *dev = &qm->pdev->dev;
u32 vfs_num = qm->vfs_num;
- struct qm_mailbox mailbox;
u64 val = 0;
int cnt = 0;
int ret;
u32 i;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
- mutex_lock(&qm->mailbox_lock);
- /* PF sends command to all VFs by mailbox */
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS);
if (ret) {
- dev_err(dev, "failed to send command to VFs!\n");
- mutex_unlock(&qm->mailbox_lock);
+ dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd);
+ qm->ops->set_ifc_end(qm);
return ret;
}
@@ -1692,7 +1698,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
val = readq(qm->io_base + QM_IFC_READY_STATUS);
/* If all VFs acked, PF notifies VFs successfully. */
if (!(val & GENMASK(vfs_num, 1))) {
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return 0;
}
@@ -1700,7 +1706,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
break;
}
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
/* Check which vf respond timeout. */
for (i = 1; i <= vfs_num; i++) {
@@ -1711,18 +1717,15 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
return -ETIMEDOUT;
}
-static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
- struct qm_mailbox mailbox;
int cnt = 0;
u32 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0);
if (ret) {
- dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
+ dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd);
goto unlock;
}
@@ -1741,7 +1744,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
}
unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
+
return ret;
}
@@ -1842,6 +1846,94 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
return ret;
}
+static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ struct qm_mailbox mailbox;
+ u64 msg;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ return qm_mb_nolock(qm, &mailbox);
+}
+
+static void qm_set_ifc_end_v3(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->mailbox_lock);
+}
+
+static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+ int ret;
+
+ ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ if (ret)
+ return ret;
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
+static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ uintptr_t offset;
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ offset = QM_PF2VF_PF_W;
+ else
+ offset = QM_VF2PF_VF_W;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ mutex_lock(&qm->ifc_lock);
+ writeq(msg, qm->io_base + offset);
+
+ return 0;
+}
+
+static void qm_set_ifc_end_v4(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->ifc_lock);
+}
+
+static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num)
+{
+ uintptr_t offset;
+
+ offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num;
+
+ return (u64)readl(qm->io_base + offset);
+}
+
+static u64 qm_get_ifc_vf(struct hisi_qm *qm)
+{
+ return readq(qm->io_base + QM_PF2VF_VF_R);
+}
+
+static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ msg = qm_get_ifc_pf(qm, fun_num);
+ else
+ msg = qm_get_ifc_vf(qm);
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.hw_error_init = qm_hw_error_init_v1,
@@ -1864,6 +1956,21 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
.set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v3,
+ .set_ifc_end = qm_set_ifc_end_v3,
+ .get_ifc = qm_get_ifc_v3,
+};
+
+static const struct hisi_qm_hw_ops qm_hw_ops_v4 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .hw_error_init = qm_hw_error_init_v3,
+ .hw_error_uninit = qm_hw_error_uninit_v3,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v4,
+ .set_ifc_end = qm_set_ifc_end_v4,
+ .get_ifc = qm_get_ifc_v4,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2156,12 +2263,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
static int qm_drain_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
u32 state = 0;
int ret;
/* No need to judge if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return 0;
/* HW V3 supports drain qp by device */
@@ -2475,7 +2581,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
+ if (qp_ctx.qc_type > QM_MAX_QC_TYPE)
return -EINVAL;
qm_set_sqctype(q, qp_ctx.qc_type);
@@ -2843,11 +2949,14 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->ops = &qm_hw_ops_v1;
else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
- else
+ else if (qm->ver == QM_HW_V3)
qm->ops = &qm_hw_ops_v3;
+ else
+ qm->ops = &qm_hw_ops_v4;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
+ mutex_init(&qm->ifc_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
@@ -3607,7 +3716,6 @@ static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 mb_cmd;
u32 qos;
int ret;
@@ -3617,10 +3725,9 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
return;
}
- mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
- ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
+ ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num);
if (ret)
- dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
+ dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num);
}
static int qm_vf_read_qos(struct hisi_qm *qm)
@@ -4109,7 +4216,7 @@ stop_fail:
return ret;
}
-static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
+static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd,
enum qm_stop_reason stop_reason)
{
struct pci_dev *pdev = qm->pdev;
@@ -4122,7 +4229,7 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
ret = qm_ping_all_vfs(qm, cmd);
if (ret)
- pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
+ pci_err(pdev, "failed to send command to all VFs before PF reset!\n");
} else {
ret = qm_vf_reset_prepare(qm, stop_reason);
if (ret)
@@ -4137,6 +4244,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_reset_prepare_ready(qm);
if (ret) {
pci_err(pdev, "Controller reset not ready!\n");
@@ -4298,7 +4411,7 @@ restart_fail:
return ret;
}
-static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
+static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4527,7 +4640,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
* Check whether there is an ECC mbit error, If it occurs, need to
* wait for soft reset to fix it.
*/
- while (qm_check_dev_error(pf_qm)) {
+ while (qm_check_dev_error(qm)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return;
@@ -4675,7 +4788,7 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
- enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4709,7 +4822,7 @@ out:
static void qm_pf_reset_vf_done(struct hisi_qm *qm)
{
- enum qm_mb_cmd cmd = QM_VF_START_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_START_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4732,7 +4845,6 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
u32 val, cmd;
- u64 msg;
int ret;
/* Wait for reset to finish */
@@ -4749,16 +4861,15 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
* Whether message is got successfully,
* VF needs to ack PF by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, 0);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, 0);
qm_clear_cmd_interrupt(qm, 0);
if (ret) {
- dev_err(dev, "failed to get msg from PF in reset done!\n");
+ dev_err(dev, "failed to get command from PF in reset done!\n");
return ret;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
if (cmd != QM_PF_RESET_DONE) {
- dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
+ dev_err(dev, "the command(0x%x) is not reset done!\n", cmd);
ret = -EINVAL;
}
@@ -4795,22 +4906,21 @@ err_get_status:
static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 msg;
- u32 cmd;
+ enum qm_ifc_cmd cmd;
+ u32 data;
int ret;
/*
* Get the msg from source by sending mailbox. Whether message is got
* successfully, destination needs to ack source by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num);
qm_clear_cmd_interrupt(qm, BIT(fun_num));
if (ret) {
- dev_err(dev, "failed to get msg from source!\n");
+ dev_err(dev, "failed to get command from source!\n");
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
qm_pf_reset_vf_process(qm, QM_DOWN);
@@ -4822,10 +4932,10 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
qm_vf_get_qos(qm, fun_num);
break;
case QM_PF_SET_QOS:
- qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
+ qm->mb_qos = data;
break;
default:
- dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
+ dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num);
break;
}
}
@@ -5167,6 +5277,20 @@ static int qm_get_hw_caps(struct hisi_qm *qm)
return qm_pre_store_caps(qm);
}
+static void qm_get_version(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 sub_version_id;
+
+ qm->ver = pdev->revision;
+
+ if (pdev->revision == QM_HW_V3) {
+ sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID);
+ if (sub_version_id)
+ qm->ver = sub_version_id;
+ }
+}
+
static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5186,6 +5310,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
+ qm_get_version(qm);
+
ret = qm_get_hw_caps(qm);
if (ret)
goto err_ioremap;
@@ -5205,6 +5331,7 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
+ hisi_qm_pre_init(qm);
ret = qm_get_qp_num(qm);
if (ret)
goto err_db_ioremap;
@@ -5247,6 +5374,14 @@ static int qm_clear_device(struct hisi_qm *qm)
return ret;
}
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret) {
+ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ return ret;
+ }
+ }
+
return qm_reset_device(qm);
}
@@ -5461,8 +5596,6 @@ int hisi_qm_init(struct hisi_qm *qm)
struct device *dev = &pdev->dev;
int ret;
- hisi_qm_pre_init(qm);
-
ret = hisi_qm_pci_init(qm);
if (ret)
return ret;
@@ -5598,6 +5731,12 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
if (ret)
return ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_set_pf_mse(qm, false);
if (ret)
pci_err(pdev, "failed to disable MSE before suspending!\n");
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 356188bee6fb..4b9970230822 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -37,6 +37,7 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
+ bool fallback;
};
/* SEC request of Crypto */
@@ -90,9 +91,7 @@ struct sec_auth_ctx {
dma_addr_t a_key_dma;
u8 *a_key;
u8 a_key_len;
- u8 mac_len;
u8 a_alg;
- bool fallback;
struct crypto_shash *hash_tfm;
struct crypto_aead *fallback_aead_tfm;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index ae9ebbb4103d..66bc07da9eb6 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -948,15 +948,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
struct aead_request *aead_req = req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
- u8 *mac_out = req->out_mac;
struct scatterlist *sgl = aead_req->src;
+ u8 *mac_out = req->out_mac;
size_t copy_size;
off_t skip_size;
/* Copy input mac */
skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
- copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
- authsize, skip_size);
+ copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
if (unlikely(copy_size != authsize))
return -EINVAL;
@@ -1120,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- if (unlikely(a_ctx->fallback_aead_tfm))
- return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
-
- return 0;
+ return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
}
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
@@ -1139,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
- const enum sec_mac_len mac_len,
const enum sec_cmode c_mode)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1151,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
- ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
@@ -1162,13 +1156,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (unlikely(a_ctx->fallback_aead_tfm)) {
- ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
- if (ret)
- return ret;
- }
-
- return 0;
+ return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
}
ret = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -1187,10 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
- if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
- (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
+ if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
ret = -EINVAL;
- dev_err(dev, "MAC or AUTH key length error!\n");
+ dev_err(dev, "AUTH key length error!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "set sec fallback key err!\n");
goto bad_key;
}
@@ -1202,27 +1195,19 @@ bad_key:
}
-#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
-static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
- u32 keylen) \
-{ \
- return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
-}
-
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
- SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
- SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
- SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -1470,9 +1455,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *a_req = &req->aead_req;
- size_t authsize = ctx->a_ctx.mac_len;
+ struct sec_cipher_req *c_req = &req->c_req;
u32 data_size = aead_req->cryptlen;
u8 flage = 0;
u8 cm, cl;
@@ -1513,10 +1499,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- size_t authsize = crypto_aead_authsize(tfm);
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
@@ -1524,15 +1508,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
/*
* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
* the counter must set to 0x01
+ * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
*/
- ctx->a_ctx.mac_len = authsize;
- /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
set_aead_auth_iv(ctx, req);
- }
-
- /* GCM 12Byte Cipher_IV == Auth_IV */
- if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
- ctx->a_ctx.mac_len = authsize;
+ } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+ /* GCM 12Byte Cipher_IV == Auth_IV */
memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
}
}
@@ -1542,9 +1522,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
@@ -1568,9 +1550,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+ sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sqe3->a_key_addr = sqe3->c_key_addr;
@@ -1594,11 +1578,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg =
- cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+ sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)((ctx->a_key_len) /
@@ -1648,11 +1633,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->mac_len /
+ cpu_to_le32((u32)(authsize /
SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
@@ -1703,9 +1690,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *aead_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
- size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct aead_request *backlog_aead_req;
struct sec_req *backlog_req;
@@ -1718,10 +1705,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
if (!err && c_req->encrypt) {
struct scatterlist *sgl = a_req->dst;
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
- aead_req->out_mac,
- authsize, a_req->cryptlen +
- a_req->assoclen);
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
+ authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1929,8 +1914,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
- struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ const char *aead_name = alg->base.cra_name;
int ret;
ret = sec_aead_init(tfm);
@@ -1939,11 +1926,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
return ret;
}
- auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(auth_ctx->hash_tfm)) {
+ a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(a_ctx->hash_tfm)) {
dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
- return PTR_ERR(auth_ctx->hash_tfm);
+ return PTR_ERR(a_ctx->hash_tfm);
+ }
+
+ a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+ dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+ return PTR_ERR(a_ctx->fallback_aead_tfm);
}
return 0;
@@ -1953,6 +1949,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
crypto_free_shash(ctx->a_ctx.hash_tfm);
sec_aead_exit(tfm);
}
@@ -1979,7 +1976,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
sec_aead_exit(tfm);
return PTR_ERR(a_ctx->fallback_aead_tfm);
}
- a_ctx->fallback = false;
return 0;
}
@@ -2233,21 +2229,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
u8 c_mode = ctx->c_ctx.c_mode;
struct device *dev = ctx->dev;
int ret;
- if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(dev, "aead input spec error!\n");
+ /* Hardware does not handle cases where authsize is less than 4 bytes */
+ if (unlikely(sz < MIN_MAC_LEN)) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
- if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
- (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
- authsize & MAC_LEN_MASK)))) {
- dev_err(dev, "aead input mac length error!\n");
+ if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+ req->assoclen > SEC_MAX_AAD_LEN)) {
+ dev_err(dev, "aead input spec error!\n");
return -EINVAL;
}
@@ -2266,7 +2261,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (sreq->c_req.encrypt)
sreq->c_req.c_len = req->cryptlen;
else
- sreq->c_req.c_len = req->cryptlen - authsize;
+ sreq->c_req.c_len = req->cryptlen - sz;
if (c_mode == SEC_CMODE_CBC) {
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "aead crypto length error!\n");
@@ -2292,8 +2287,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (ctx->sec->qm.ver == QM_HW_V2) {
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
- req->cryptlen <= authsize))) {
- ctx->a_ctx.fallback = true;
+ req->cryptlen <= authsize))) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
}
@@ -2321,16 +2316,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
bool encrypt)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- struct device *dev = ctx->dev;
struct aead_request *subreq;
int ret;
- /* Kunpeng920 aead mode not support input 0 size */
- if (!a_ctx->fallback_aead_tfm) {
- dev_err(dev, "aead fallback tfm is NULL!\n");
- return -EINVAL;
- }
-
subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
if (!subreq)
return -ENOMEM;
@@ -2362,10 +2350,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->aead_req.fallback = false;
ret = sec_aead_param_check(ctx, req);
if (unlikely(ret)) {
- if (ctx->a_ctx.fallback)
+ if (req->aead_req.fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 27a0ee5ad913..04725b514382 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -23,17 +23,6 @@ enum sec_hash_alg {
SEC_A_HMAC_SHA512 = 0x15,
};
-enum sec_mac_len {
- SEC_HMAC_CCM_MAC = 16,
- SEC_HMAC_GCM_MAC = 16,
- SEC_SM3_MAC = 32,
- SEC_HMAC_SM3_MAC = 32,
- SEC_HMAC_MD5_MAC = 16,
- SEC_HMAC_SHA1_MAC = 20,
- SEC_HMAC_SHA256_MAC = 32,
- SEC_HMAC_SHA512_MAC = 64,
-};
-
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 8ec5333bb5aa..72cf48d1f3ab 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1097,6 +1097,17 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static bool sec_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1129,6 +1140,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
.get_err_result = sec_get_err_result,
+ .dev_is_abnormal = sec_dev_is_abnormal,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1180,7 +1192,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile
index a936f099ee22..13de020b77d6 100644
--- a/drivers/crypto/hisilicon/zip/Makefile
+++ b/drivers/crypto/hisilicon/zip/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
-hisi_zip-objs = zip_main.o zip_crypto.o
+hisi_zip-objs = zip_main.o zip_crypto.o dae_main.o
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
new file mode 100644
index 000000000000..6f22e4c36e49
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 HiSilicon Limited. */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uacce.h>
+#include "zip.h"
+
+/* memory */
+#define DAE_MEM_START_OFFSET 0x331040
+#define DAE_MEM_DONE_OFFSET 0x331044
+#define DAE_MEM_START_MASK 0x1
+#define DAE_MEM_DONE_MASK 0x1
+#define DAE_REG_RD_INTVRL_US 10
+#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
+
+#define DAE_ALG_NAME "hashagg"
+
+/* error */
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_MASK (BIT(0) | BIT(5))
+#define DAE_ERR_SOURCE_OFFSET 0x331C84
+#define DAE_ERR_STATUS_OFFSET 0x331C88
+#define DAE_ERR_CE_OFFSET 0x331CA0
+#define DAE_ERR_CE_MASK BIT(3)
+#define DAE_ERR_NFE_OFFSET 0x331CA4
+#define DAE_ERR_NFE_MASK 0x17
+#define DAE_ERR_FE_OFFSET 0x331CA8
+#define DAE_ERR_FE_MASK 0
+#define DAE_ECC_MBIT_MASK BIT(2)
+#define DAE_ECC_INFO_OFFSET 0x33400C
+#define DAE_ERR_SHUTDOWN_OFFSET 0x331CAC
+#define DAE_ERR_SHUTDOWN_MASK 0x17
+#define DAE_ERR_ENABLE_OFFSET 0x331C80
+#define DAE_ERR_ENABLE_MASK (DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
+#define DAE_AM_CTRL_GLOBAL_OFFSET 0x330000
+#define DAE_AM_RETURN_OFFSET 0x330150
+#define DAE_AM_RETURN_MASK 0x3
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_EN_MASK (BIT(0) | BIT(5))
+
+struct hisi_dae_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const struct hisi_dae_hw_error dae_hw_error[] = {
+ { .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
+ { .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
+ { .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
+ { .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
+};
+
+static inline bool dae_is_support(struct hisi_qm *qm)
+{
+ if (test_bit(QM_SUPPORT_DAE, &qm->caps))
+ return true;
+
+ return false;
+}
+
+int hisi_dae_set_user_domain(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_MEM_START_OFFSET);
+ val |= DAE_MEM_START_MASK;
+ writel(val, qm->io_base + DAE_MEM_START_OFFSET);
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
+ val & DAE_MEM_DONE_MASK,
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to init dae memory!\n");
+
+ return ret;
+}
+
+int hisi_dae_set_alg(struct hisi_qm *qm)
+{
+ size_t len;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ if (!qm->uacce)
+ return 0;
+
+ len = strlen(qm->uacce->algs);
+ /* A line break may be required */
+ if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ pci_err(qm->pdev, "algorithm name is too long!\n");
+ return -EINVAL;
+ }
+
+ if (len)
+ strcat((char *)qm->uacce->algs, "\n");
+
+ strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
+
+ return 0;
+}
+
+static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
+{
+ u32 axi_val, err_val;
+
+ axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+ if (enable) {
+ axi_val |= DAE_AXI_SHUTDOWN_MASK;
+ err_val = DAE_ERR_SHUTDOWN_MASK;
+ } else {
+ axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
+ err_val = 0;
+ }
+
+ writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
+}
+
+void hisi_dae_hw_error_enable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ /* clear dae hw error source if having */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+
+ /* configure error type */
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+ writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
+
+ hisi_dae_master_ooo_ctrl(qm, true);
+
+ /* enable dae hw error interrupts */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+}
+
+void hisi_dae_hw_error_disable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+ hisi_dae_master_ooo_ctrl(qm, false);
+}
+
+static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
+}
+
+static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+}
+
+static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
+static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
+{
+ const struct hisi_dae_hw_error *err = dae_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 ecc_info;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
+ err = &dae_hw_error[i];
+ if (!(err->int_msk & err_type))
+ continue;
+
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & DAE_ECC_MBIT_MASK) {
+ ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
+ dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
+ }
+ }
+}
+
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return ACC_ERR_NONE;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (!err_status)
+ return ACC_ERR_NONE;
+
+ hisi_dae_log_hw_error(qm, err_status);
+
+ if (err_status & DAE_ERR_NFE_MASK) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_dae_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hisi_dae_clear_hw_err_status(qm, err_status);
+
+ return ACC_ERR_RECOVERED;
+}
+
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return false;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (err_status & DAE_ERR_NFE_MASK)
+ return true;
+
+ return false;
+}
+
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+ val |= BIT(0);
+ writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
+ val, (val == DAE_AM_RETURN_MASK),
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
+
+ return ret;
+}
+
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (!dae_is_support(qm))
+ return;
+
+ val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+
+ writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+}
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 2fecf346c3c9..9fb2a9c01132 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -103,4 +103,12 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
+int hisi_dae_set_user_domain(struct hisi_qm *qm);
+int hisi_dae_set_alg(struct hisi_qm *qm);
+void hisi_dae_hw_error_disable(struct hisi_qm *qm);
+void hisi_dae_hw_error_enable(struct hisi_qm *qm);
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm);
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm);
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm);
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 9239b251c2d7..d8ba23b7cc7d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -582,7 +582,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
hisi_zip_enable_clock_gate(qm);
- return 0;
+ return hisi_dae_set_user_domain(qm);
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -631,6 +631,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
@@ -643,6 +645,8 @@ static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
+
+ hisi_dae_hw_error_disable(qm);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -1129,6 +1133,8 @@ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ hisi_dae_open_axi_master_ooo(qm);
}
static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
@@ -1147,8 +1153,11 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
{
+ enum acc_err_result zip_result = ACC_ERR_NONE;
+ enum acc_err_result dae_result;
u32 err_status;
+ /* Get device hardware new error status */
err_status = hisi_zip_get_hw_err_status(qm);
if (err_status) {
if (err_status & qm->err_info.ecc_2bits_mask)
@@ -1159,11 +1168,32 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
/* Disable the same error reporting until device is recovered. */
hisi_zip_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
+ } else {
+ hisi_zip_clear_hw_err_status(qm, err_status);
}
- hisi_zip_clear_hw_err_status(qm, err_status);
}
- return ACC_ERR_RECOVERED;
+ dae_result = hisi_dae_get_err_result(qm);
+
+ return (zip_result == ACC_ERR_NEED_RESET ||
+ dae_result == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
+}
+
+static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return hisi_dae_dev_is_abnormal(qm);
+}
+
+static int hisi_zip_set_priv_status(struct hisi_qm *qm)
+{
+ return hisi_dae_close_axi_master_ooo(qm);
}
static void hisi_zip_err_info_init(struct hisi_qm *qm)
@@ -1200,6 +1230,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
.get_err_result = hisi_zip_get_err_result,
+ .set_priv_status = hisi_zip_set_priv_status,
+ .dev_is_abnormal = hisi_zip_dev_is_abnormal,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1264,7 +1296,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1301,17 +1332,24 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
ret = zip_pre_store_cap_reg(qm);
if (ret) {
pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
- hisi_qm_uninit(qm);
- return ret;
+ goto err_qm_uninit;
}
alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
- hisi_qm_uninit(qm);
+ goto err_qm_uninit;
}
+ ret = hisi_dae_set_alg(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ return 0;
+
+err_qm_uninit:
+ hisi_qm_uninit(qm);
return ret;
}
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 9e557649e5d0..c3776b0de51d 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name)
async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async")) {
- async_mode = true;
+ async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async_irq")) {
async_mode = true;
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index 449c6d3ab2db..fcc0cf4df637 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
npe_id = npe_spec.args[0];
+ of_node_put(npe_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
&queue_spec);
@@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
recv_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
&queue_spec);
@@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
send_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
} else {
/*
* Hardcoded engine when using platform data, this goes away
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index c8241f5a0a26..f20ae7e35a0d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -473,22 +473,6 @@ unlock_and_exit:
}
DEFINE_SHOW_STORE_ATTRIBUTE(tl_control);
-static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num)
-{
- char alpha;
- u8 index;
- int ret;
-
- ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha);
- if (ret != 1)
- return -EINVAL;
-
- index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha);
- *rp_id = index;
-
- return 0;
-}
-
static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev,
unsigned int new_rp_num,
unsigned int rp_regs_index)
@@ -611,18 +595,11 @@ static int tl_rp_data_show(struct seq_file *s, void *unused)
{
struct adf_accel_dev *accel_dev = s->private;
u8 rp_regs_index;
- u8 max_rp;
- int ret;
if (!accel_dev)
return -EINVAL;
- max_rp = GET_TL_DATA(accel_dev).max_rp;
- ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp);
- if (ret) {
- dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
- return ret;
- }
+ rp_regs_index = debugfs_get_aux_num(s->file);
return tl_print_rp_data(accel_dev, s, rp_regs_index);
}
@@ -635,7 +612,6 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
struct adf_telemetry *telemetry;
unsigned int new_rp_num;
u8 rp_regs_index;
- u8 max_rp;
int ret;
accel_dev = seq_f->private;
@@ -643,15 +619,10 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
return -EINVAL;
telemetry = accel_dev->telemetry;
- max_rp = GET_TL_DATA(accel_dev).max_rp;
mutex_lock(&telemetry->wr_lock);
- ret = get_rp_index_from_file(file, &rp_regs_index, max_rp);
- if (ret) {
- dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
- goto unlock_and_exit;
- }
+ rp_regs_index = debugfs_get_aux_num(file);
ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num);
if (ret)
@@ -689,7 +660,8 @@ void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev)
for (i = 0; i < max_rp; i++) {
snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME,
ADF_TL_DBG_RP_ALPHA_INDEX(i));
- debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops);
+ debugfs_create_file_aux_num(name, 0644, dir, accel_dev, i,
+ &tl_rp_data_fops);
}
}
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
deleted file mode 100644
index 9a67dbf340f4..000000000000
--- a/drivers/crypto/n2_asm.S
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* n2_asm.S: Hypervisor calls for NCS support.
- *
- * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
- */
-
-#include <linux/linkage.h>
-#include <asm/hypervisor.h>
-#include "n2_core.h"
-
- /* o0: queue type
- * o1: RA of queue
- * o2: num entries in queue
- * o3: address of queue handle return
- */
-ENTRY(sun4v_ncs_qconf)
- mov HV_FAST_NCS_QCONF, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o3]
- retl
- nop
-ENDPROC(sun4v_ncs_qconf)
-
- /* %o0: queue handle
- * %o1: address of queue type return
- * %o2: address of queue base address return
- * %o3: address of queue num entries return
- */
-ENTRY(sun4v_ncs_qinfo)
- mov %o1, %g1
- mov %o2, %g2
- mov %o3, %g3
- mov HV_FAST_NCS_QINFO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%g1]
- stx %o2, [%g2]
- stx %o3, [%g3]
- retl
- nop
-ENDPROC(sun4v_ncs_qinfo)
-
- /* %o0: queue handle
- * %o1: address of head offset return
- */
-ENTRY(sun4v_ncs_gethead)
- mov %o1, %o2
- mov HV_FAST_NCS_GETHEAD, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gethead)
-
- /* %o0: queue handle
- * %o1: address of tail offset return
- */
-ENTRY(sun4v_ncs_gettail)
- mov %o1, %o2
- mov HV_FAST_NCS_GETTAIL, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gettail)
-
- /* %o0: queue handle
- * %o1: new tail offset
- */
-ENTRY(sun4v_ncs_settail)
- mov HV_FAST_NCS_SETTAIL, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_settail)
-
- /* %o0: queue handle
- * %o1: address of devino return
- */
-ENTRY(sun4v_ncs_qhandle_to_devino)
- mov %o1, %o2
- mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_qhandle_to_devino)
-
- /* %o0: queue handle
- * %o1: new head offset
- */
-ENTRY(sun4v_ncs_sethead_marker)
- mov HV_FAST_NCS_SETHEAD_MARKER, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
deleted file mode 100644
index 14c302d2db79..000000000000
--- a/drivers/crypto/n2_core.c
+++ /dev/null
@@ -1,2168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
- *
- * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/cpumask.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/crypto.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-
-#include <asm/hypervisor.h>
-#include <asm/mdesc.h>
-
-#include "n2_core.h"
-
-#define DRV_MODULE_NAME "n2_crypto"
-#define DRV_MODULE_VERSION "0.2"
-#define DRV_MODULE_RELDATE "July 28, 2011"
-
-static const char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("Niagara2 Crypto driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
-
-#define N2_CRA_PRIORITY 200
-
-static DEFINE_MUTEX(spu_lock);
-
-struct spu_queue {
- cpumask_t sharing;
- unsigned long qhandle;
-
- spinlock_t lock;
- u8 q_type;
- void *q;
- unsigned long head;
- unsigned long tail;
- struct list_head jobs;
-
- unsigned long devino;
-
- char irq_name[32];
- unsigned int irq;
-
- struct list_head list;
-};
-
-struct spu_qreg {
- struct spu_queue *queue;
- unsigned long type;
-};
-
-static struct spu_queue **cpu_to_cwq;
-static struct spu_queue **cpu_to_mau;
-
-static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
-{
- if (q->q_type == HV_NCS_QTYPE_MAU) {
- off += MAU_ENTRY_SIZE;
- if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
- off = 0;
- } else {
- off += CWQ_ENTRY_SIZE;
- if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
- off = 0;
- }
- return off;
-}
-
-struct n2_request_common {
- struct list_head entry;
- unsigned int offset;
-};
-#define OFFSET_NOT_RUNNING (~(unsigned int)0)
-
-/* An async job request records the final tail value it used in
- * n2_request_common->offset, test to see if that offset is in
- * the range old_head, new_head, inclusive.
- */
-static inline bool job_finished(struct spu_queue *q, unsigned int offset,
- unsigned long old_head, unsigned long new_head)
-{
- if (old_head <= new_head) {
- if (offset > old_head && offset <= new_head)
- return true;
- } else {
- if (offset > old_head || offset <= new_head)
- return true;
- }
- return false;
-}
-
-/* When the HEAD marker is unequal to the actual HEAD, we get
- * a virtual device INO interrupt. We should process the
- * completed CWQ entries and adjust the HEAD marker to clear
- * the IRQ.
- */
-static irqreturn_t cwq_intr(int irq, void *dev_id)
-{
- unsigned long off, new_head, hv_ret;
- struct spu_queue *q = dev_id;
-
- pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- spin_lock(&q->lock);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
-
- pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), new_head, hv_ret);
-
- for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
- /* XXX ... XXX */
- }
-
- hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
- if (hv_ret == HV_EOK)
- q->head = new_head;
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mau_intr(int irq, void *dev_id)
-{
- struct spu_queue *q = dev_id;
- unsigned long head, hv_ret;
-
- spin_lock(&q->lock);
-
- pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
-
- pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), head, hv_ret);
-
- sun4v_ncs_sethead_marker(q->qhandle, head);
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static void *spu_queue_next(struct spu_queue *q, void *cur)
-{
- return q->q + spu_next_offset(q, cur - q->q);
-}
-
-static int spu_queue_num_free(struct spu_queue *q)
-{
- unsigned long head = q->head;
- unsigned long tail = q->tail;
- unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
- unsigned long diff;
-
- if (head > tail)
- diff = head - tail;
- else
- diff = (end - tail) + head;
-
- return (diff / CWQ_ENTRY_SIZE) - 1;
-}
-
-static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
-{
- int avail = spu_queue_num_free(q);
-
- if (avail >= num_entries)
- return q->q + q->tail;
-
- return NULL;
-}
-
-static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
-{
- unsigned long hv_ret, new_tail;
-
- new_tail = spu_next_offset(q, last - q->q);
-
- hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
- if (hv_ret == HV_EOK)
- q->tail = new_tail;
- return hv_ret;
-}
-
-static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
- int enc_type, int auth_type,
- unsigned int hash_len,
- bool sfas, bool sob, bool eob, bool encrypt,
- int opcode)
-{
- u64 word = (len - 1) & CONTROL_LEN;
-
- word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
- word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
- word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
- if (sfas)
- word |= CONTROL_STORE_FINAL_AUTH_STATE;
- if (sob)
- word |= CONTROL_START_OF_BLOCK;
- if (eob)
- word |= CONTROL_END_OF_BLOCK;
- if (encrypt)
- word |= CONTROL_ENCRYPT;
- if (hmac_key_len)
- word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
- if (hash_len)
- word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
-
- return word;
-}
-
-#if 0
-static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
-{
- if (this_len >= 64 ||
- qp->head != qp->tail)
- return true;
- return false;
-}
-#endif
-
-struct n2_ahash_alg {
- struct list_head entry;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 auth_type;
- u8 hmac_type;
- struct ahash_alg alg;
-};
-
-static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_ahash_alg, alg);
-}
-
-struct n2_hmac_alg {
- const char *child_alg;
- struct n2_ahash_alg derived;
-};
-
-static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
-}
-
-struct n2_hash_ctx {
- struct crypto_ahash *fallback_tfm;
-};
-
-#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
-
-struct n2_hmac_ctx {
- struct n2_hash_ctx base;
-
- struct crypto_shash *child_shash;
-
- int hash_key_len;
- unsigned char hash_key[N2_HASH_KEY_MAX];
-};
-
-struct n2_hash_req_ctx {
- union {
- struct md5_state md5;
- struct sha1_state sha1;
- struct sha256_state sha256;
- } u;
-
- struct ahash_request fallback_req;
-};
-
-static int n2_hash_async_init(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- return crypto_ahash_init(&rctx->fallback_req);
-}
-
-static int n2_hash_async_update(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
-
- return crypto_ahash_update(&rctx->fallback_req);
-}
-
-static int n2_hash_async_final(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_final(&rctx->fallback_req);
-}
-
-static int n2_hash_async_finup(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_finup(&rctx->fallback_req);
-}
-
-static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_async_noexport(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct crypto_ahash *fallback_tfm;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->fallback_tfm = fallback_tfm;
- return 0;
-
-out:
- return err;
-}
-
-static void n2_hash_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->fallback_tfm);
-}
-
-static int n2_hmac_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
- struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
- struct crypto_ahash *fallback_tfm;
- struct crypto_shash *child_shash;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
- if (IS_ERR(child_shash)) {
- pr_warn("Child shash '%s' could not be loaded!\n",
- n2alg->child_alg);
- err = PTR_ERR(child_shash);
- goto out_free_fallback;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->child_shash = child_shash;
- ctx->base.fallback_tfm = fallback_tfm;
- return 0;
-
-out_free_fallback:
- crypto_free_ahash(fallback_tfm);
-
-out:
- return err;
-}
-
-static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->base.fallback_tfm);
- crypto_free_shash(ctx->child_shash);
-}
-
-static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct crypto_shash *child_shash = ctx->child_shash;
- struct crypto_ahash *fallback_tfm;
- int err, bs, ds;
-
- fallback_tfm = ctx->base.fallback_tfm;
- err = crypto_ahash_setkey(fallback_tfm, key, keylen);
- if (err)
- return err;
-
- bs = crypto_shash_blocksize(child_shash);
- ds = crypto_shash_digestsize(child_shash);
- BUG_ON(ds > N2_HASH_KEY_MAX);
- if (keylen > bs) {
- err = crypto_shash_tfm_digest(child_shash, key, keylen,
- ctx->hash_key);
- if (err)
- return err;
- keylen = ds;
- } else if (keylen <= N2_HASH_KEY_MAX)
- memcpy(ctx->hash_key, key, keylen);
-
- ctx->hash_key_len = keylen;
-
- return err;
-}
-
-static unsigned long wait_for_tail(struct spu_queue *qp)
-{
- unsigned long head, hv_ret;
-
- do {
- hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
- if (hv_ret != HV_EOK) {
- pr_err("Hypervisor error on gethead\n");
- break;
- }
- if (head == qp->tail) {
- qp->head = head;
- break;
- }
- } while (1);
- return hv_ret;
-}
-
-static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
- struct cwq_initial_entry *ent)
-{
- unsigned long hv_ret = spu_queue_submit(qp, ent);
-
- if (hv_ret == HV_EOK)
- hv_ret = wait_for_tail(qp);
-
- return hv_ret;
-}
-
-static int n2_do_async_digest(struct ahash_request *req,
- unsigned int auth_type, unsigned int digest_size,
- unsigned int result_size, void *hash_loc,
- unsigned long auth_key, unsigned int auth_key_len)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cwq_initial_entry *ent;
- struct crypto_hash_walk walk;
- struct spu_queue *qp;
- unsigned long flags;
- int err = -ENODEV;
- int nbytes, cpu;
-
- /* The total effective length of the operation may not
- * exceed 2^16.
- */
- if (unlikely(req->nbytes > (1 << 16))) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
-
- nbytes = crypto_hash_walk_first(req, &walk);
-
- cpu = get_cpu();
- qp = cpu_to_cwq[cpu];
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- /* XXX can do better, improve this later by doing a by-hand scatterlist
- * XXX walk, etc.
- */
- ent = qp->q + qp->tail;
-
- ent->control = control_word_base(nbytes, auth_key_len, 0,
- auth_type, digest_size,
- false, true, false, false,
- OPCODE_INPLACE_BIT |
- OPCODE_AUTH_MAC);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = auth_key;
- ent->auth_iv_addr = __pa(hash_loc);
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = __pa(hash_loc);
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- while (nbytes > 0) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = (nbytes - 1);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
- err = -EINVAL;
- else
- err = 0;
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
- if (!err)
- memcpy(req->result, hash_loc, result_size);
-out:
- put_cpu();
-
- return err;
-}
-
-static int n2_hash_async_digest(struct ahash_request *req)
-{
- struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- int ds;
-
- ds = n2alg->digest_size;
- if (unlikely(req->nbytes == 0)) {
- memcpy(req->result, n2alg->hash_zero, ds);
- return 0;
- }
- memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->auth_type,
- n2alg->hw_op_hashsz, ds,
- &rctx->u, 0UL, 0);
-}
-
-static int n2_hmac_async_digest(struct ahash_request *req)
-{
- struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- int ds;
-
- ds = n2alg->derived.digest_size;
- if (unlikely(req->nbytes == 0) ||
- unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
- memcpy(&rctx->u, n2alg->derived.hash_init,
- n2alg->derived.hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->derived.hmac_type,
- n2alg->derived.hw_op_hashsz, ds,
- &rctx->u,
- __pa(&ctx->hash_key),
- ctx->hash_key_len);
-}
-
-struct n2_skcipher_context {
- int key_len;
- int enc_type;
- union {
- u8 aes[AES_MAX_KEY_SIZE];
- u8 des[DES_KEY_SIZE];
- u8 des3[3 * DES_KEY_SIZE];
- } key;
-};
-
-#define N2_CHUNK_ARR_LEN 16
-
-struct n2_crypto_chunk {
- struct list_head entry;
- unsigned long iv_paddr : 44;
- unsigned long arr_len : 20;
- unsigned long dest_paddr;
- unsigned long dest_final;
- struct {
- unsigned long src_paddr : 44;
- unsigned long src_len : 20;
- } arr[N2_CHUNK_ARR_LEN];
-};
-
-struct n2_request_context {
- struct skcipher_walk walk;
- struct list_head chunk_list;
- struct n2_crypto_chunk chunk;
- u8 temp_iv[16];
-};
-
-/* The SPU allows some level of flexibility for partial cipher blocks
- * being specified in a descriptor.
- *
- * It merely requires that every descriptor's length field is at least
- * as large as the cipher block size. This means that a cipher block
- * can span at most 2 descriptors. However, this does not allow a
- * partial block to span into the final descriptor as that would
- * violate the rule (since every descriptor's length must be at lest
- * the block size). So, for example, assuming an 8 byte block size:
- *
- * 0xe --> 0xa --> 0x8
- *
- * is a valid length sequence, whereas:
- *
- * 0xe --> 0xb --> 0x7
- *
- * is not a valid sequence.
- */
-
-struct n2_skcipher_alg {
- struct list_head entry;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
-{
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-
- return container_of(alg, struct n2_skcipher_alg, skcipher);
-}
-
-static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
-
- ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->enc_type |= ENC_TYPE_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- ctx->enc_type |= ENC_TYPE_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- ctx->enc_type |= ENC_TYPE_ALG_AES256;
- break;
- default:
- return -EINVAL;
- }
-
- ctx->key_len = keylen;
- memcpy(ctx->key.aes, key, keylen);
- return 0;
-}
-
-static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des, key, keylen);
- return 0;
-}
-
-static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des3_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des3, key, keylen);
- return 0;
-}
-
-static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
-{
- int this_len = nbytes;
-
- this_len -= (nbytes & (block_size - 1));
- return this_len > (1 << 16) ? (1 << 16) : this_len;
-}
-
-static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
- struct n2_crypto_chunk *cp,
- struct spu_queue *qp, bool encrypt)
-{
- struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
- struct cwq_initial_entry *ent;
- bool in_place;
- int i;
-
- ent = spu_queue_alloc(qp, cp->arr_len);
- if (!ent) {
- pr_info("queue_alloc() of %d fails\n",
- cp->arr_len);
- return -EBUSY;
- }
-
- in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
-
- ent->control = control_word_base(cp->arr[0].src_len,
- 0, ctx->enc_type, 0, 0,
- false, true, false, encrypt,
- OPCODE_ENCRYPT |
- (in_place ? OPCODE_INPLACE_BIT : 0));
- ent->src_addr = cp->arr[0].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = __pa(&ctx->key);
- ent->enc_iv_addr = cp->iv_paddr;
- ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
-
- for (i = 1; i < cp->arr_len; i++) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = cp->arr[i].src_len - 1;
- ent->src_addr = cp->arr[i].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
-}
-
-static int n2_compute_chunks(struct skcipher_request *req)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct skcipher_walk *walk = &rctx->walk;
- struct n2_crypto_chunk *chunk;
- unsigned long dest_prev;
- unsigned int tot_len;
- bool prev_in_place;
- int err, nbytes;
-
- err = skcipher_walk_async(walk, req);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&rctx->chunk_list);
-
- chunk = &rctx->chunk;
- INIT_LIST_HEAD(&chunk->entry);
-
- chunk->iv_paddr = 0UL;
- chunk->arr_len = 0;
- chunk->dest_paddr = 0UL;
-
- prev_in_place = false;
- dest_prev = ~0UL;
- tot_len = 0;
-
- while ((nbytes = walk->nbytes) != 0) {
- unsigned long dest_paddr, src_paddr;
- bool in_place;
- int this_len;
-
- src_paddr = (page_to_phys(walk->src.phys.page) +
- walk->src.phys.offset);
- dest_paddr = (page_to_phys(walk->dst.phys.page) +
- walk->dst.phys.offset);
- in_place = (src_paddr == dest_paddr);
- this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
-
- if (chunk->arr_len != 0) {
- if (in_place != prev_in_place ||
- (!prev_in_place &&
- dest_paddr != dest_prev) ||
- chunk->arr_len == N2_CHUNK_ARR_LEN ||
- tot_len + this_len > (1 << 16)) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry,
- &rctx->chunk_list);
- chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
- if (!chunk) {
- err = -ENOMEM;
- break;
- }
- INIT_LIST_HEAD(&chunk->entry);
- }
- }
- if (chunk->arr_len == 0) {
- chunk->dest_paddr = dest_paddr;
- tot_len = 0;
- }
- chunk->arr[chunk->arr_len].src_paddr = src_paddr;
- chunk->arr[chunk->arr_len].src_len = this_len;
- chunk->arr_len++;
-
- dest_prev = dest_paddr + this_len;
- prev_in_place = in_place;
- tot_len += this_len;
-
- err = skcipher_walk_done(walk, nbytes - this_len);
- if (err)
- break;
- }
- if (!err && chunk->arr_len != 0) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry, &rctx->chunk_list);
- }
-
- return err;
-}
-
-static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct n2_crypto_chunk *c, *tmp;
-
- if (final_iv)
- memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
-
-}
-
-static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- unsigned long flags, hv_ret;
- struct spu_queue *qp;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- err = __n2_crypt_chunk(tfm, c, qp, encrypt);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, NULL);
- return err;
-}
-
-static int n2_encrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, true);
-}
-
-static int n2_decrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, false);
-}
-
-static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned long flags, hv_ret, iv_paddr;
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- struct spu_queue *qp;
- void *final_iv_addr;
-
- final_iv_addr = NULL;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- if (encrypt) {
- iv_paddr = __pa(rctx->walk.iv);
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
- entry) {
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, true);
- if (err)
- break;
- iv_paddr = c->dest_final - rctx->walk.blocksize;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- final_iv_addr = __va(iv_paddr);
- } else {
- list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
- entry) {
- if (c == &rctx->chunk) {
- iv_paddr = __pa(rctx->walk.iv);
- } else {
- iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
- tmp->arr[tmp->arr_len-1].src_len -
- rctx->walk.blocksize);
- }
- if (!final_iv_addr) {
- unsigned long pa;
-
- pa = (c->arr[c->arr_len-1].src_paddr +
- c->arr[c->arr_len-1].src_len -
- rctx->walk.blocksize);
- final_iv_addr = rctx->temp_iv;
- memcpy(rctx->temp_iv, __va(pa),
- rctx->walk.blocksize);
- }
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, false);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, err ? NULL : final_iv_addr);
- return err;
-}
-
-static int n2_encrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, true);
-}
-
-static int n2_decrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, false);
-}
-
-struct n2_skcipher_tmpl {
- const char *name;
- const char *drv_name;
- u8 block_size;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
- /* DES: ECB CBC and CFB are supported */
- { .name = "ecb(des)",
- .drv_name = "ecb-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des)",
- .drv_name = "cbc-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* 3DES: ECB CBC and CFB are supported */
- { .name = "ecb(des3_ede)",
- .drv_name = "ecb-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des3_ede)",
- .drv_name = "cbc-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* AES: ECB CBC and CTR are supported */
- { .name = "ecb(aes)",
- .drv_name = "ecb-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(aes)",
- .drv_name = "cbc-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
- { .name = "ctr(aes)",
- .drv_name = "ctr-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_COUNTER),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_encrypt_chaining,
- },
- },
-
-};
-#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
-
-static LIST_HEAD(skcipher_algs);
-
-struct n2_hash_tmpl {
- const char *name;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 statesize;
- u8 block_size;
- u8 auth_type;
- u8 hmac_type;
-};
-
-static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
- cpu_to_le32(MD5_H0),
- cpu_to_le32(MD5_H1),
- cpu_to_le32(MD5_H2),
- cpu_to_le32(MD5_H3),
-};
-static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
- SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
-};
-static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
- SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
-};
-static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
- SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
-};
-
-static const struct n2_hash_tmpl hash_tmpls[] = {
- { .name = "md5",
- .hash_zero = md5_zero_message_hash,
- .hash_init = (u8 *)n2_md5_init,
- .auth_type = AUTH_TYPE_MD5,
- .hmac_type = AUTH_TYPE_HMAC_MD5,
- .hw_op_hashsz = MD5_DIGEST_SIZE,
- .digest_size = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct md5_state),
- .block_size = MD5_HMAC_BLOCK_SIZE },
- { .name = "sha1",
- .hash_zero = sha1_zero_message_hash,
- .hash_init = (u8 *)n2_sha1_init,
- .auth_type = AUTH_TYPE_SHA1,
- .hmac_type = AUTH_TYPE_HMAC_SHA1,
- .hw_op_hashsz = SHA1_DIGEST_SIZE,
- .digest_size = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_state),
- .block_size = SHA1_BLOCK_SIZE },
- { .name = "sha256",
- .hash_zero = sha256_zero_message_hash,
- .hash_init = (u8 *)n2_sha256_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_HMAC_SHA256,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA256_BLOCK_SIZE },
- { .name = "sha224",
- .hash_zero = sha224_zero_message_hash,
- .hash_init = (u8 *)n2_sha224_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_RESERVED,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA224_BLOCK_SIZE },
-};
-#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
-
-static LIST_HEAD(ahash_algs);
-static LIST_HEAD(hmac_algs);
-
-static int algs_registered;
-
-static void __n2_unregister_algs(void)
-{
- struct n2_skcipher_alg *skcipher, *skcipher_tmp;
- struct n2_ahash_alg *alg, *alg_tmp;
- struct n2_hmac_alg *hmac, *hmac_tmp;
-
- list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
- crypto_unregister_skcipher(&skcipher->skcipher);
- list_del(&skcipher->entry);
- kfree(skcipher);
- }
- list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
- crypto_unregister_ahash(&hmac->derived.alg);
- list_del(&hmac->derived.entry);
- kfree(hmac);
- }
- list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
- crypto_unregister_ahash(&alg->alg);
- list_del(&alg->entry);
- kfree(alg);
- }
-}
-
-static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
-{
- crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
- return 0;
-}
-
-static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
-{
- struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct skcipher_alg *alg;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- alg = &p->skcipher;
- *alg = tmpl->skcipher;
-
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
- alg->base.cra_priority = N2_CRA_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY;
- alg->base.cra_blocksize = tmpl->block_size;
- p->enc_type = tmpl->enc_type;
- alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
- alg->base.cra_module = THIS_MODULE;
- alg->init = n2_skcipher_init_tfm;
-
- list_add(&p->entry, &skcipher_algs);
- err = crypto_register_skcipher(alg);
- if (err) {
- pr_err("%s alg registration failed\n", alg->base.cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", alg->base.cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
-{
- struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct ahash_alg *ahash;
- struct crypto_alg *base;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->child_alg = n2ahash->alg.halg.base.cra_name;
- memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
- INIT_LIST_HEAD(&p->derived.entry);
-
- ahash = &p->derived.alg;
- ahash->digest = n2_hmac_async_digest;
- ahash->setkey = n2_hmac_async_setkey;
-
- base = &ahash->halg.base;
- err = -EINVAL;
- if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
- if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
-
- base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
- base->cra_init = n2_hmac_cra_init;
- base->cra_exit = n2_hmac_cra_exit;
-
- list_add(&p->derived.entry, &hmac_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->derived.entry);
-out_free_p:
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
-{
- struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct hash_alg_common *halg;
- struct crypto_alg *base;
- struct ahash_alg *ahash;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->hash_zero = tmpl->hash_zero;
- p->hash_init = tmpl->hash_init;
- p->auth_type = tmpl->auth_type;
- p->hmac_type = tmpl->hmac_type;
- p->hw_op_hashsz = tmpl->hw_op_hashsz;
- p->digest_size = tmpl->digest_size;
-
- ahash = &p->alg;
- ahash->init = n2_hash_async_init;
- ahash->update = n2_hash_async_update;
- ahash->final = n2_hash_async_final;
- ahash->finup = n2_hash_async_finup;
- ahash->digest = n2_hash_async_digest;
- ahash->export = n2_hash_async_noexport;
- ahash->import = n2_hash_async_noimport;
-
- halg = &ahash->halg;
- halg->digestsize = tmpl->digest_size;
- halg->statesize = tmpl->statesize;
-
- base = &halg->base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
- base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- base->cra_blocksize = tmpl->block_size;
- base->cra_ctxsize = sizeof(struct n2_hash_ctx);
- base->cra_module = THIS_MODULE;
- base->cra_init = n2_hash_cra_init;
- base->cra_exit = n2_hash_cra_exit;
-
- list_add(&p->entry, &ahash_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
- err = __n2_register_one_hmac(p);
- return err;
-}
-
-static int n2_register_algs(void)
-{
- int i, err = 0;
-
- mutex_lock(&spu_lock);
- if (algs_registered++)
- goto out;
-
- for (i = 0; i < NUM_HASH_TMPLS; i++) {
- err = __n2_register_one_ahash(&hash_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
- for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
- err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
-
-out:
- mutex_unlock(&spu_lock);
- return err;
-}
-
-static void n2_unregister_algs(void)
-{
- mutex_lock(&spu_lock);
- if (!--algs_registered)
- __n2_unregister_algs();
- mutex_unlock(&spu_lock);
-}
-
-/* To map CWQ queues to interrupt sources, the hypervisor API provides
- * a devino. This isn't very useful to us because all of the
- * interrupts listed in the device_node have been translated to
- * Linux virtual IRQ cookie numbers.
- *
- * So we have to back-translate, going through the 'intr' and 'ino'
- * property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to figure out which
- * devino goes to which already-translated IRQ.
- */
-static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
- unsigned long dev_ino)
-{
- const unsigned int *dev_intrs;
- unsigned int intr;
- int i;
-
- for (i = 0; i < ip->num_intrs; i++) {
- if (ip->ino_table[i].ino == dev_ino)
- break;
- }
- if (i == ip->num_intrs)
- return -ENODEV;
-
- intr = ip->ino_table[i].intr;
-
- dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
- if (!dev_intrs)
- return -ENODEV;
-
- for (i = 0; i < dev->archdata.num_irqs; i++) {
- if (dev_intrs[i] == intr)
- return i;
- }
-
- return -ENODEV;
-}
-
-static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
- const char *irq_name, struct spu_queue *p,
- irq_handler_t handler)
-{
- unsigned long herr;
- int index;
-
- herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
- if (herr)
- return -EINVAL;
-
- index = find_devino_index(dev, ip, p->devino);
- if (index < 0)
- return index;
-
- p->irq = dev->archdata.irqs[index];
-
- sprintf(p->irq_name, "%s-%d", irq_name, index);
-
- return request_irq(p->irq, handler, 0, p->irq_name, p);
-}
-
-static struct kmem_cache *queue_cache[2];
-
-static void *new_queue(unsigned long q_type)
-{
- return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
-}
-
-static void free_queue(void *p, unsigned long q_type)
-{
- kmem_cache_free(queue_cache[q_type - 1], p);
-}
-
-static int queue_cache_init(void)
-{
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- queue_cache[HV_NCS_QTYPE_MAU - 1] =
- kmem_cache_create("mau_queue",
- (MAU_NUM_ENTRIES *
- MAU_ENTRY_SIZE),
- MAU_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- return -ENOMEM;
-
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
- queue_cache[HV_NCS_QTYPE_CWQ - 1] =
- kmem_cache_create("cwq_queue",
- (CWQ_NUM_ENTRIES *
- CWQ_ENTRY_SIZE),
- CWQ_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-static void queue_cache_destroy(void)
-{
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
-}
-
-static long spu_queue_register_workfn(void *arg)
-{
- struct spu_qreg *qr = arg;
- struct spu_queue *p = qr->queue;
- unsigned long q_type = qr->type;
- unsigned long hv_ret;
-
- hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
- CWQ_NUM_ENTRIES, &p->qhandle);
- if (!hv_ret)
- sun4v_ncs_sethead_marker(p->qhandle, 0);
-
- return hv_ret ? -EINVAL : 0;
-}
-
-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
-{
- int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
- struct spu_qreg qr = { .queue = p, .type = q_type };
-
- return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
-}
-
-static int spu_queue_setup(struct spu_queue *p)
-{
- int err;
-
- p->q = new_queue(p->q_type);
- if (!p->q)
- return -ENOMEM;
-
- err = spu_queue_register(p, p->q_type);
- if (err) {
- free_queue(p->q, p->q_type);
- p->q = NULL;
- }
-
- return err;
-}
-
-static void spu_queue_destroy(struct spu_queue *p)
-{
- unsigned long hv_ret;
-
- if (!p->q)
- return;
-
- hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
-
- if (!hv_ret)
- free_queue(p->q, p->q_type);
-}
-
-static void spu_list_destroy(struct list_head *list)
-{
- struct spu_queue *p, *n;
-
- list_for_each_entry_safe(p, n, list, list) {
- int i;
-
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_to_cwq[i] == p)
- cpu_to_cwq[i] = NULL;
- }
-
- if (p->irq) {
- free_irq(p->irq, p);
- p->irq = 0;
- }
- spu_queue_destroy(p);
- list_del(&p->list);
- kfree(p);
- }
-}
-
-/* Walk the backward arcs of a CWQ 'exec-unit' node,
- * gathering cpu membership information.
- */
-static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- u64 node, struct spu_queue *p,
- struct spu_queue **table)
-{
- u64 arc;
-
- mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
- u64 tgt = mdesc_arc_target(mdesc, arc);
- const char *name = mdesc_node_name(mdesc, tgt);
- const u64 *id;
-
- if (strcmp(name, "cpu"))
- continue;
- id = mdesc_get_property(mdesc, tgt, "id", NULL);
- if (table[*id] != NULL) {
- dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
- dev->dev.of_node);
- return -EINVAL;
- }
- cpumask_set_cpu(*id, &p->sharing);
- table[*id] = p;
- }
- return 0;
-}
-
-/* Process an 'exec-unit' MDESC node of type 'cwq'. */
-static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
- struct platform_device *dev, struct mdesc_handle *mdesc,
- u64 node, const char *iname, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- struct spu_queue *p;
- int err;
-
- p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
- if (!p) {
- dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- cpumask_clear(&p->sharing);
- spin_lock_init(&p->lock);
- p->q_type = q_type;
- INIT_LIST_HEAD(&p->jobs);
- list_add(&p->list, list);
-
- err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
- if (err)
- return err;
-
- err = spu_queue_setup(p);
- if (err)
- return err;
-
- return spu_map_ino(dev, ip, iname, p, handler);
-}
-
-static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
- struct spu_mdesc_info *ip, struct list_head *list,
- const char *exec_name, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- int err = 0;
- u64 node;
-
- mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
- const char *type;
-
- type = mdesc_get_property(mdesc, node, "type", NULL);
- if (!type || strcmp(type, exec_name))
- continue;
-
- err = handle_exec_unit(ip, list, dev, mdesc, node,
- exec_name, q_type, handler, table);
- if (err) {
- spu_list_destroy(list);
- break;
- }
- }
-
- return err;
-}
-
-static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
- struct spu_mdesc_info *ip)
-{
- const u64 *ino;
- int ino_len;
- int i;
-
- ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!ino) {
- printk("NO 'ino'\n");
- return -ENODEV;
- }
-
- ip->num_intrs = ino_len / sizeof(u64);
- ip->ino_table = kzalloc((sizeof(struct ino_blob) *
- ip->num_intrs),
- GFP_KERNEL);
- if (!ip->ino_table)
- return -ENOMEM;
-
- for (i = 0; i < ip->num_intrs; i++) {
- struct ino_blob *b = &ip->ino_table[i];
- b->intr = i + 1;
- b->ino = ino[i];
- }
-
- return 0;
-}
-
-static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- struct spu_mdesc_info *ip,
- const char *node_name)
-{
- u64 node, reg;
-
- if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0)
- return -ENODEV;
-
- mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
- const char *name;
- const u64 *chdl;
-
- name = mdesc_get_property(mdesc, node, "name", NULL);
- if (!name || strcmp(name, node_name))
- continue;
- chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
- if (!chdl || (*chdl != reg))
- continue;
- ip->cfg_handle = *chdl;
- return get_irq_props(mdesc, node, ip);
- }
-
- return -ENODEV;
-}
-
-static unsigned long n2_spu_hvapi_major;
-static unsigned long n2_spu_hvapi_minor;
-
-static int n2_spu_hvapi_register(void)
-{
- int err;
-
- n2_spu_hvapi_major = 2;
- n2_spu_hvapi_minor = 0;
-
- err = sun4v_hvapi_register(HV_GRP_NCS,
- n2_spu_hvapi_major,
- &n2_spu_hvapi_minor);
-
- if (!err)
- pr_info("Registered NCS HVAPI version %lu.%lu\n",
- n2_spu_hvapi_major,
- n2_spu_hvapi_minor);
-
- return err;
-}
-
-static void n2_spu_hvapi_unregister(void)
-{
- sun4v_hvapi_unregister(HV_GRP_NCS);
-}
-
-static int global_ref;
-
-static int grab_global_resources(void)
-{
- int err = 0;
-
- mutex_lock(&spu_lock);
-
- if (global_ref++)
- goto out;
-
- err = n2_spu_hvapi_register();
- if (err)
- goto out;
-
- err = queue_cache_init();
- if (err)
- goto out_hvapi_release;
-
- err = -ENOMEM;
- cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_cwq)
- goto out_queue_cache_destroy;
-
- cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_mau)
- goto out_free_cwq_table;
-
- err = 0;
-
-out:
- if (err)
- global_ref--;
- mutex_unlock(&spu_lock);
- return err;
-
-out_free_cwq_table:
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
-out_queue_cache_destroy:
- queue_cache_destroy();
-
-out_hvapi_release:
- n2_spu_hvapi_unregister();
- goto out;
-}
-
-static void release_global_resources(void)
-{
- mutex_lock(&spu_lock);
- if (!--global_ref) {
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
- kfree(cpu_to_mau);
- cpu_to_mau = NULL;
-
- queue_cache_destroy();
- n2_spu_hvapi_unregister();
- }
- mutex_unlock(&spu_lock);
-}
-
-static struct n2_crypto *alloc_n2cp(void)
-{
- struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
-
- if (np)
- INIT_LIST_HEAD(&np->cwq_list);
-
- return np;
-}
-
-static void free_n2cp(struct n2_crypto *np)
-{
- kfree(np->cwq_info.ino_table);
- np->cwq_info.ino_table = NULL;
-
- kfree(np);
-}
-
-static void n2_spu_driver_version(void)
-{
- static int n2_spu_version_printed;
-
- if (n2_spu_version_printed++ == 0)
- pr_info("%s", version);
-}
-
-static int n2_crypto_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_crypto *np;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
-
- np = alloc_n2cp();
- if (!np) {
- dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_n2cp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
- err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
- "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
- cpu_to_cwq);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- err = n2_register_algs();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
- dev->dev.of_node);
- goto out_free_spu_list;
- }
-
- dev_set_drvdata(&dev->dev, np);
-
- return 0;
-
-out_free_spu_list:
- spu_list_destroy(&np->cwq_list);
-
-out_free_global:
- release_global_resources();
-
-out_free_n2cp:
- free_n2cp(np);
-
- return err;
-}
-
-static void n2_crypto_remove(struct platform_device *dev)
-{
- struct n2_crypto *np = dev_get_drvdata(&dev->dev);
-
- n2_unregister_algs();
-
- spu_list_destroy(&np->cwq_list);
-
- release_global_resources();
-
- free_n2cp(np);
-}
-
-static struct n2_mau *alloc_ncp(void)
-{
- struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
-
- if (mp)
- INIT_LIST_HEAD(&mp->mau_list);
-
- return mp;
-}
-
-static void free_ncp(struct n2_mau *mp)
-{
- kfree(mp->mau_info.ino_table);
- mp->mau_info.ino_table = NULL;
-
- kfree(mp);
-}
-
-static int n2_mau_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_mau *mp;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found NCP at %pOF\n", dev->dev.of_node);
-
- mp = alloc_ncp();
- if (!mp) {
- dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_ncp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
-
- err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
- "mau", HV_NCS_QTYPE_MAU, mau_intr,
- cpu_to_mau);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- dev_set_drvdata(&dev->dev, mp);
-
- return 0;
-
-out_free_global:
- release_global_resources();
-
-out_free_ncp:
- free_ncp(mp);
-
- return err;
-}
-
-static void n2_mau_remove(struct platform_device *dev)
-{
- struct n2_mau *mp = dev_get_drvdata(&dev->dev);
-
- spu_list_destroy(&mp->mau_list);
-
- release_global_resources();
-
- free_ncp(mp);
-}
-
-static const struct of_device_id n2_crypto_match[] = {
- {
- .name = "n2cp",
- .compatible = "SUNW,n2-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,vf-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,kt-cwq",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_crypto_match);
-
-static struct platform_driver n2_crypto_driver = {
- .driver = {
- .name = "n2cp",
- .of_match_table = n2_crypto_match,
- },
- .probe = n2_crypto_probe,
- .remove = n2_crypto_remove,
-};
-
-static const struct of_device_id n2_mau_match[] = {
- {
- .name = "ncp",
- .compatible = "SUNW,n2-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,vf-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,kt-mau",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_mau_match);
-
-static struct platform_driver n2_mau_driver = {
- .driver = {
- .name = "ncp",
- .of_match_table = n2_mau_match,
- },
- .probe = n2_mau_probe,
- .remove = n2_mau_remove,
-};
-
-static struct platform_driver * const drivers[] = {
- &n2_crypto_driver,
- &n2_mau_driver,
-};
-
-static int __init n2_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-static void __exit n2_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-module_init(n2_init);
-module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
deleted file mode 100644
index 2406763b0306..000000000000
--- a/drivers/crypto/n2_core.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _N2_CORE_H
-#define _N2_CORE_H
-
-#ifndef __ASSEMBLY__
-
-struct ino_blob {
- u64 intr;
- u64 ino;
-};
-
-struct spu_mdesc_info {
- u64 cfg_handle;
- struct ino_blob *ino_table;
- int num_intrs;
-};
-
-struct n2_crypto {
- struct spu_mdesc_info cwq_info;
- struct list_head cwq_list;
-};
-
-struct n2_mau {
- struct spu_mdesc_info mau_info;
- struct list_head mau_list;
-};
-
-#define CWQ_ENTRY_SIZE 64
-#define CWQ_NUM_ENTRIES 64
-
-#define MAU_ENTRY_SIZE 64
-#define MAU_NUM_ENTRIES 64
-
-struct cwq_initial_entry {
- u64 control;
- u64 src_addr;
- u64 auth_key_addr;
- u64 auth_iv_addr;
- u64 final_auth_state_addr;
- u64 enc_key_addr;
- u64 enc_iv_addr;
- u64 dest_addr;
-};
-
-struct cwq_ext_entry {
- u64 len;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-struct cwq_final_entry {
- u64 control;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-#define CONTROL_LEN 0x000000000000ffffULL
-#define CONTROL_LEN_SHIFT 0
-#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
-#define CONTROL_HMAC_KEY_LEN_SHIFT 16
-#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
-#define CONTROL_ENC_TYPE_SHIFT 24
-#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
-#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
-#define ENC_TYPE_ALG_DES 0x08ULL
-#define ENC_TYPE_ALG_3DES 0x0cULL
-#define ENC_TYPE_ALG_AES128 0x10ULL
-#define ENC_TYPE_ALG_AES192 0x14ULL
-#define ENC_TYPE_ALG_AES256 0x18ULL
-#define ENC_TYPE_ALG_RESERVED 0x1cULL
-#define ENC_TYPE_ALG_MASK 0x1cULL
-#define ENC_TYPE_CHAINING_ECB 0x00ULL
-#define ENC_TYPE_CHAINING_CBC 0x01ULL
-#define ENC_TYPE_CHAINING_CFB 0x02ULL
-#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
-#define ENC_TYPE_CHAINING_MASK 0x03ULL
-#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
-#define CONTROL_AUTH_TYPE_SHIFT 32
-#define AUTH_TYPE_RESERVED 0x00ULL
-#define AUTH_TYPE_MD5 0x01ULL
-#define AUTH_TYPE_SHA1 0x02ULL
-#define AUTH_TYPE_SHA256 0x03ULL
-#define AUTH_TYPE_CRC32 0x04ULL
-#define AUTH_TYPE_HMAC_MD5 0x05ULL
-#define AUTH_TYPE_HMAC_SHA1 0x06ULL
-#define AUTH_TYPE_HMAC_SHA256 0x07ULL
-#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
-#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
-#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
-#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
-#define CONTROL_STRAND 0x000000e000000000ULL
-#define CONTROL_STRAND_SHIFT 37
-#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
-#define CONTROL_HASH_LEN_SHIFT 40
-#define CONTROL_INTERRUPT 0x0001000000000000ULL
-#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
-#define CONTROL_RESERVED 0x001c000000000000ULL
-#define CONTROL_HV_DONE 0x0004000000000000ULL
-#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
-#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
-#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
-#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
-#define CONTROL_ENCRYPT 0x0080000000000000ULL
-#define CONTROL_OPCODE 0xff00000000000000ULL
-#define CONTROL_OPCODE_SHIFT 56
-#define OPCODE_INPLACE_BIT 0x80ULL
-#define OPCODE_SSL_KEYBLOCK 0x10ULL
-#define OPCODE_COPY 0x20ULL
-#define OPCODE_ENCRYPT 0x40ULL
-#define OPCODE_AUTH_MAC 0x41ULL
-
-#endif /* !(__ASSEMBLY__) */
-
-/* NCS v2.0 hypervisor interfaces */
-#define HV_NCS_QTYPE_MAU 0x01
-#define HV_NCS_QTYPE_CWQ 0x02
-
-/* ncs_qconf()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QCONF
- * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * ARG1: Real address of queue, or handle for unconfigure
- * ARG2: Number of entries in queue, zero for unconfigure
- * RET0: status
- * RET1: queue handle
- *
- * Configure a queue in the stream processing unit.
- *
- * The real address given as the base must be 64-byte
- * aligned.
- *
- * The queue size can range from a minimum of 2 to a maximum
- * of 64. The queue size must be a power of two.
- *
- * To unconfigure a queue, specify a length of zero and place
- * the queue handle into ARG1.
- *
- * On configure success the hypervisor will set the FIRST, HEAD,
- * and TAIL registers to the address of the first entry in the
- * queue. The LAST register will be set to point to the last
- * entry in the queue.
- */
-#define HV_FAST_NCS_QCONF 0x111
-
-/* ncs_qinfo()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QINFO
- * ARG0: Queue handle
- * RET0: status
- * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * RET2: Queue base address
- * RET3: Number of entries
- */
-#define HV_FAST_NCS_QINFO 0x112
-
-/* ncs_gethead()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETHEAD
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue head offset
- */
-#define HV_FAST_NCS_GETHEAD 0x113
-
-/* ncs_gettail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETTAIL
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue tail offset
- */
-#define HV_FAST_NCS_GETTAIL 0x114
-
-/* ncs_settail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETTAIL
- * ARG0: Queue handle
- * ARG1: New tail offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETTAIL 0x115
-
-/* ncs_qhandle_to_devino()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
- * ARG0: Queue handle
- * RET0: status
- * RET1: devino
- */
-#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
-
-/* ncs_sethead_marker()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
- * ARG0: Queue handle
- * ARG1: New head offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETHEAD_MARKER 0x117
-
-#ifndef __ASSEMBLY__
-extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
- unsigned long queue_ra,
- unsigned long num_entries,
- unsigned long *qhandle);
-extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
- unsigned long *queue_type,
- unsigned long *queue_ra,
- unsigned long *num_entries);
-extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
- unsigned long *head);
-extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
- unsigned long *tail);
-extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
- unsigned long tail);
-extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
- unsigned long *devino);
-extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
- unsigned long head);
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index e27b84616743..551dd32a8db0 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -18,7 +18,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -272,9 +271,9 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
+ dd->in_sg_offset = 0;
if (out_sg_len)
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -871,21 +870,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -904,20 +900,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 0f35c9164764..41d67780fd45 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -14,8 +14,6 @@
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
/*
* OMAP TRM gives bitfields as start:end, where start is the higher bit
* number. For example 7:0
@@ -186,8 +184,8 @@ struct omap_aes_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 498cbd585ed1..a099460d5f21 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -19,7 +19,6 @@
#include <crypto/engine.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -40,8 +39,6 @@
#define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2)
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
#define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
((x ^ 0x01) * 0x04))
@@ -152,8 +149,8 @@ struct omap_des_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
@@ -379,8 +376,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->in_sg_offset = 0;
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -836,21 +833,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
omap_des_write(dd, DES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -869,20 +863,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 7d811728f047..97b56e92ea33 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
alg->init = qce_aead_init;
alg->exit = qce_aead_exit;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index e228a31fe28d..e95e84486d9a 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -3,14 +3,15 @@
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
@@ -37,9 +38,10 @@ static const struct qce_algo_ops *qce_ops[] = {
#endif
};
-static void qce_unregister_algs(struct qce_device *qce)
+static void qce_unregister_algs(void *data)
{
const struct qce_algo_ops *ops;
+ struct qce_device *qce = data;
int i;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
@@ -48,19 +50,22 @@ static void qce_unregister_algs(struct qce_device *qce)
}
}
-static int qce_register_algs(struct qce_device *qce)
+static int devm_qce_register_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
- int i, ret = -ENODEV;
+ int i, j, ret = -ENODEV;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ret = ops->register_algs(qce);
- if (ret)
- break;
+ if (ret) {
+ for (j = i - 1; j >= 0; j--)
+ ops->unregister_algs(qce);
+ return ret;
+ }
}
- return ret;
+ return devm_add_action_or_reset(qce->dev, qce_unregister_algs, qce);
}
static int qce_handle_request(struct crypto_async_request *async_req)
@@ -84,55 +89,49 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *req)
{
struct crypto_async_request *async_req, *backlog;
- unsigned long flags;
int ret = 0, err;
- spin_lock_irqsave(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ if (req)
+ ret = crypto_enqueue_request(&qce->queue, req);
- if (req)
- ret = crypto_enqueue_request(&qce->queue, req);
+ /* busy, do not dequeue request */
+ if (qce->req)
+ return ret;
- /* busy, do not dequeue request */
- if (qce->req) {
- spin_unlock_irqrestore(&qce->lock, flags);
- return ret;
+ backlog = crypto_get_backlog(&qce->queue);
+ async_req = crypto_dequeue_request(&qce->queue);
+ if (async_req)
+ qce->req = async_req;
}
- backlog = crypto_get_backlog(&qce->queue);
- async_req = crypto_dequeue_request(&qce->queue);
- if (async_req)
- qce->req = async_req;
-
- spin_unlock_irqrestore(&qce->lock, flags);
-
if (!async_req)
return ret;
if (backlog) {
- spin_lock_bh(&qce->lock);
- crypto_request_complete(backlog, -EINPROGRESS);
- spin_unlock_bh(&qce->lock);
+ scoped_guard(mutex, &qce->lock)
+ crypto_request_complete(backlog, -EINPROGRESS);
}
err = qce_handle_request(async_req);
if (err) {
qce->result = err;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
return ret;
}
-static void qce_tasklet_req_done(unsigned long data)
+static void qce_req_done_work(struct work_struct *work)
{
- struct qce_device *qce = (struct qce_device *)data;
+ struct qce_device *qce = container_of(work, struct qce_device,
+ done_work);
struct crypto_async_request *req;
- unsigned long flags;
- spin_lock_irqsave(&qce->lock, flags);
- req = qce->req;
- qce->req = NULL;
- spin_unlock_irqrestore(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ req = qce->req;
+ qce->req = NULL;
+ }
if (req)
crypto_request_complete(req, qce->result);
@@ -149,7 +148,7 @@ static int qce_async_request_enqueue(struct qce_device *qce,
static void qce_async_request_done(struct qce_device *qce, int ret)
{
qce->result = ret;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
static int qce_check_version(struct qce_device *qce)
@@ -209,15 +208,15 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- qce->core = devm_clk_get_optional(qce->dev, "core");
+ qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
- qce->iface = devm_clk_get_optional(qce->dev, "iface");
+ qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
- qce->bus = devm_clk_get_optional(qce->dev, "bus");
+ qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
@@ -229,64 +228,25 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(qce->core);
+ ret = devm_qce_dma_request(qce->dev, &qce->dma);
if (ret)
- goto err_mem_path_disable;
-
- ret = clk_prepare_enable(qce->iface);
- if (ret)
- goto err_clks_core;
-
- ret = clk_prepare_enable(qce->bus);
- if (ret)
- goto err_clks_iface;
+ return ret;
- ret = qce_dma_request(qce->dev, &qce->dma);
+ ret = qce_check_version(qce);
if (ret)
- goto err_clks;
+ return ret;
- ret = qce_check_version(qce);
+ ret = devm_mutex_init(qce->dev, &qce->lock);
if (ret)
- goto err_clks;
+ return ret;
- spin_lock_init(&qce->lock);
- tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
- (unsigned long)qce);
+ INIT_WORK(&qce->done_work, qce_req_done_work);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
- ret = qce_register_algs(qce);
- if (ret)
- goto err_dma;
-
- return 0;
-
-err_dma:
- qce_dma_release(&qce->dma);
-err_clks:
- clk_disable_unprepare(qce->bus);
-err_clks_iface:
- clk_disable_unprepare(qce->iface);
-err_clks_core:
- clk_disable_unprepare(qce->core);
-err_mem_path_disable:
- icc_set_bw(qce->mem_path, 0, 0);
-
- return ret;
-}
-
-static void qce_crypto_remove(struct platform_device *pdev)
-{
- struct qce_device *qce = platform_get_drvdata(pdev);
-
- tasklet_kill(&qce->done_tasklet);
- qce_unregister_algs(qce);
- qce_dma_release(&qce->dma);
- clk_disable_unprepare(qce->bus);
- clk_disable_unprepare(qce->iface);
- clk_disable_unprepare(qce->core);
+ return devm_qce_register_algs(qce);
}
static const struct of_device_id qce_crypto_of_match[] = {
@@ -299,7 +259,6 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
- .remove = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index 228fcd69ec51..eb6fa7a8b64a 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -6,13 +6,16 @@
#ifndef _CORE_H_
#define _CORE_H_
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
#include "dma.h"
/**
* struct qce_device - crypto engine device structure
* @queue: crypto request queue
* @lock: the lock protects queue and req
- * @done_tasklet: done tasklet object
+ * @done_work: workqueue context
* @req: current active request
* @result: result of current transform
* @base: virtual IO base
@@ -28,8 +31,8 @@
*/
struct qce_device {
struct crypto_queue queue;
- spinlock_t lock;
- struct tasklet_struct done_tasklet;
+ struct mutex lock;
+ struct work_struct done_work;
struct crypto_async_request *req;
int result;
void __iomem *base;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 46db5bf366b4..1dec7aea852d 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -3,12 +3,22 @@
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/device.h>
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+static void qce_dma_release(void *data)
+{
+ struct qce_dma_data *dma = data;
+
+ dma_release_channel(dma->txchan);
+ dma_release_channel(dma->rxchan);
+ kfree(dma->result_buf);
+}
+
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
@@ -31,7 +41,8 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
- return 0;
+ return devm_add_action_or_reset(dev, qce_dma_release, dma);
+
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
@@ -39,13 +50,6 @@ error_rx:
return ret;
}
-void qce_dma_release(struct qce_dma_data *dma)
-{
- dma_release_channel(dma->txchan);
- dma_release_channel(dma->rxchan);
- kfree(dma->result_buf);
-}
-
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
unsigned int max_len)
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 786402169360..31629185000e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -34,8 +34,7 @@ struct qce_dma_data {
void *ignore_buf;
};
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
-void qce_dma_release(struct qce_dma_data *dma);
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma);
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
int in_ents, struct scatterlist *sg_out, int out_ents,
dma_async_tx_callback cb, void *cb_param);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index fc72af8aa9a7..71b748183cfa 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
- base->cra_priority = 300;
+ base->cra_priority = 175;
base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 5b493fdc1e74..ffb334eb5b34 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->encrypt = qce_skcipher_encrypt;
alg->decrypt = qce_skcipher_decrypt;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index 9d130592cc0a..d734c9a56786 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -1750,10 +1750,13 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ int ret;
- tegra_cmac_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_cmac_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 4d4bd727f498..0b5cdd5676b1 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -615,13 +615,16 @@ static int tegra_sha_digest(struct ahash_request *req)
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- tegra_sha_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_sha_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 28edd5822486..50e6a45b30ba 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -703,7 +703,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
return 0;
}
-static int commit_reap(struct device *dev, const void *data)
+static int commit_reap(struct device *dev, void *data)
{
struct cxl_port *port = to_cxl_port(dev->parent);
struct cxl_decoder *cxld;
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 9d58ab9d33c5..013b869b66cb 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -252,9 +252,9 @@ static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
}
/* require dvsec ranges to be covered by a locked platform window */
-static int dvsec_range_allowed(struct device *dev, void *arg)
+static int dvsec_range_allowed(struct device *dev, const void *arg)
{
- struct range *dev_range = arg;
+ const struct range *dev_range = arg;
struct cxl_decoder *cxld;
if (!is_root_decoder(dev))
@@ -291,11 +291,11 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
-int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
+int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct device *dev = cxlds->dev;
int hdm_count, rc, i, ranges = 0;
int d = cxlds->cxl_dvsec;
u16 cap, ctrl;
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index b3378d3f6acb..8853415c106a 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -51,17 +51,6 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, "CXL");
-bool is_cxl_nvdimm_bridge(struct device *dev)
-{
- return dev->type == &cxl_nvdimm_bridge_type;
-}
-EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, "CXL");
-
-static int match_nvdimm_bridge(struct device *dev, void *data)
-{
- return is_cxl_nvdimm_bridge(dev);
-}
-
/**
* cxl_find_nvdimm_bridge() - find a bridge device relative to a port
* @port: any descendant port of an nvdimm-bridge associated
@@ -75,7 +64,9 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port)
if (!cxl_root)
return NULL;
- dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
+ dev = device_find_child(&cxl_root->port.dev,
+ &cxl_nvdimm_bridge_type,
+ device_match_type);
if (!dev)
return NULL;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index b98b1ccffd1c..e8d11a988fd9 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -778,7 +778,7 @@ out:
return rc;
}
-static int check_commit_order(struct device *dev, const void *data)
+static int check_commit_order(struct device *dev, void *data)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
@@ -792,7 +792,7 @@ static int check_commit_order(struct device *dev, const void *data)
return 0;
}
-static int match_free_decoder(struct device *dev, void *data)
+static int match_free_decoder(struct device *dev, const void *data)
{
struct cxl_port *port = to_cxl_port(dev->parent);
struct cxl_decoder *cxld;
@@ -824,9 +824,9 @@ static int match_free_decoder(struct device *dev, void *data)
return 1;
}
-static int match_auto_decoder(struct device *dev, void *data)
+static int match_auto_decoder(struct device *dev, const void *data)
{
- struct cxl_region_params *p = data;
+ const struct cxl_region_params *p = data;
struct cxl_decoder *cxld;
struct range *r;
@@ -1733,10 +1733,12 @@ static struct cxl_port *next_port(struct cxl_port *port)
return port->parent_dport->port;
}
-static int match_switch_decoder_by_range(struct device *dev, void *data)
+static int match_switch_decoder_by_range(struct device *dev,
+ const void *data)
{
struct cxl_switch_decoder *cxlsd;
- struct range *r1, *r2 = data;
+ const struct range *r1, *r2 = data;
+
if (!is_switch_decoder(dev))
return 0;
@@ -3187,9 +3189,10 @@ err:
return rc;
}
-static int match_root_decoder_by_range(struct device *dev, void *data)
+static int match_root_decoder_by_range(struct device *dev,
+ const void *data)
{
- struct range *r1, *r2 = data;
+ const struct range *r1, *r2 = data;
struct cxl_root_decoder *cxlrd;
if (!is_root_decoder(dev))
@@ -3200,11 +3203,11 @@ static int match_root_decoder_by_range(struct device *dev, void *data)
return range_contains(r1, r2);
}
-static int match_region_by_range(struct device *dev, void *data)
+static int match_region_by_range(struct device *dev, const void *data)
{
struct cxl_region_params *p;
struct cxl_region *cxlr;
- struct range *r = data;
+ const struct range *r = data;
int rc = 0;
if (!is_cxl_region(dev))
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 59cb35b40c7e..117c2e94c761 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -289,21 +289,17 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
return true;
}
-/**
- * cxl_find_regblock_instance() - Locate a register block by type / index
- * @pdev: The CXL PCI device to enumerate.
- * @type: Register Block Indicator id
- * @map: Enumeration output, clobbered on error
- * @index: Index into which particular instance of a regblock wanted in the
- * order found in register locator DVSEC.
- *
- * Return: 0 if register block enumerated, negative error code otherwise
+/*
+ * __cxl_find_regblock_instance() - Locate a register block or count instances by type / index
+ * Use CXL_INSTANCES_COUNT for @index if counting instances.
*
- * A CXL DVSEC may point to one or more register blocks, search for them
- * by @type and @index.
+ * __cxl_find_regblock_instance() may return:
+ * 0 - if register block enumerated.
+ * >= 0 - if counting instances.
+ * < 0 - error code otherwise.
*/
-int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map, int index)
+static int __cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, int index)
{
u32 regloc_size, regblocks;
int instance = 0;
@@ -342,8 +338,30 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
}
map->resource = CXL_RESOURCE_NONE;
+ if (index == CXL_INSTANCES_COUNT)
+ return instance;
+
return -ENODEV;
}
+
+/**
+ * cxl_find_regblock_instance() - Locate a register block by type / index
+ * @pdev: The CXL PCI device to enumerate.
+ * @type: Register Block Indicator id
+ * @map: Enumeration output, clobbered on error
+ * @index: Index into which particular instance of a regblock wanted in the
+ * order found in register locator DVSEC.
+ *
+ * Return: 0 if register block enumerated, negative error code otherwise
+ *
+ * A CXL DVSEC may point to one or more register blocks, search for them
+ * by @type and @index.
+ */
+int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, unsigned int index)
+{
+ return __cxl_find_regblock_instance(pdev, type, map, index);
+}
EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, "CXL");
/**
@@ -360,7 +378,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, "CXL");
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map)
{
- return cxl_find_regblock_instance(pdev, type, map, 0);
+ return __cxl_find_regblock_instance(pdev, type, map, 0);
}
EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, "CXL");
@@ -371,19 +389,13 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, "CXL");
*
* Some regblocks may be repeated. Count how many instances.
*
- * Return: count of matching regblocks.
+ * Return: non-negative count of matching regblocks, negative error code otherwise.
*/
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type)
{
struct cxl_register_map map;
- int rc, count = 0;
- while (1) {
- rc = cxl_find_regblock_instance(pdev, type, &map, count);
- if (rc)
- return count;
- count++;
- }
+ return __cxl_find_regblock_instance(pdev, type, &map, CXL_INSTANCES_COUNT);
}
EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, "CXL");
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 8389a94adb1a..cea706b683b5 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -166,11 +166,13 @@ TRACE_EVENT(cxl_overflow,
#define CXL_EVENT_RECORD_FLAG_MAINT_NEEDED BIT(3)
#define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4)
#define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5)
+#define CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID BIT(6)
#define show_hdr_flags(flags) __print_flags(flags, " | ", \
{ CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \
{ CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \
- { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" } \
+ { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" }, \
+ { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" } \
)
/*
@@ -197,7 +199,8 @@ TRACE_EVENT(cxl_overflow,
__field(u16, hdr_related_handle) \
__field(u64, hdr_timestamp) \
__field(u8, hdr_length) \
- __field(u8, hdr_maint_op_class)
+ __field(u8, hdr_maint_op_class) \
+ __field(u8, hdr_maint_op_sub_class)
#define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \
__assign_str(memdev); \
@@ -209,17 +212,19 @@ TRACE_EVENT(cxl_overflow,
__entry->hdr_handle = le16_to_cpu((hdr).handle); \
__entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \
__entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \
- __entry->hdr_maint_op_class = (hdr).maint_op_class
+ __entry->hdr_maint_op_class = (hdr).maint_op_class; \
+ __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class
#define CXL_EVT_TP_printk(fmt, ...) \
TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \
"len=%d flags='%s' handle=%x related_handle=%x " \
- "maint_op_class=%u : " fmt, \
+ "maint_op_class=%u maint_op_sub_class=%u : " fmt, \
__get_str(memdev), __get_str(host), __entry->serial, \
cxl_event_log_type_str(__entry->log), \
__entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\
show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \
__entry->hdr_related_handle, __entry->hdr_maint_op_class, \
+ __entry->hdr_maint_op_sub_class, \
##__VA_ARGS__)
TRACE_EVENT(cxl_generic_event,
@@ -264,8 +269,30 @@ TRACE_EVENT(cxl_generic_event,
)
/*
+ * Component ID Format
+ * CXL 3.1 section 8.2.9.2.1; Table 8-44
+ */
+#define CXL_PLDM_COMPONENT_ID_ENTITY_VALID BIT(0)
+#define CXL_PLDM_COMPONENT_ID_RES_VALID BIT(1)
+
+#define show_comp_id_pldm_flags(flags) __print_flags(flags, " | ", \
+ { CXL_PLDM_COMPONENT_ID_ENTITY_VALID, "PLDM Entity ID" }, \
+ { CXL_PLDM_COMPONENT_ID_RES_VALID, "Resource ID" } \
+)
+
+#define show_pldm_entity_id(flags, valid_comp_id, valid_id_format, comp_id) \
+ (flags & valid_comp_id && flags & valid_id_format) ? \
+ (comp_id[0] & CXL_PLDM_COMPONENT_ID_ENTITY_VALID) ? \
+ __print_hex(&comp_id[1], 6) : "0x00" : "0x00"
+
+#define show_pldm_resource_id(flags, valid_comp_id, valid_id_format, comp_id) \
+ (flags & valid_comp_id && flags & valid_id_format) ? \
+ (comp_id[0] & CXL_PLDM_COMPONENT_ID_RES_VALID) ? \
+ __print_hex(&comp_id[7], 4) : "0x00" : "0x00"
+
+/*
* General Media Event Record - GMER
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ * CXL rev 3.1 Section 8.2.9.2.1.1; Table 8-45
*/
#define CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT BIT(0)
#define CXL_GMER_EVT_DESC_THRESHOLD_EVENT BIT(1)
@@ -279,10 +306,18 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00
#define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01
#define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02
-#define show_gmer_mem_event_type(type) __print_symbolic(type, \
- { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
- { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
- { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
+#define CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x03
+#define CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x04
+#define CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05
+#define CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION 0x06
+#define show_gmer_mem_event_type(type) __print_symbolic(type, \
+ { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \
+ { CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \
+ { CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \
)
#define CXL_GMER_TRANS_UNKNOWN 0x00
@@ -292,6 +327,8 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_TRANS_HOST_INJECT_POISON 0x04
#define CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB 0x05
#define CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT 0x06
+#define CXL_GMER_TRANS_INTERNAL_MEDIA_ECS 0x07
+#define CXL_GMER_TRANS_MEDIA_INITIALIZATION 0x08
#define show_trans_type(type) __print_symbolic(type, \
{ CXL_GMER_TRANS_UNKNOWN, "Unknown" }, \
{ CXL_GMER_TRANS_HOST_READ, "Host Read" }, \
@@ -299,18 +336,57 @@ TRACE_EVENT(cxl_generic_event,
{ CXL_GMER_TRANS_HOST_SCAN_MEDIA, "Host Scan Media" }, \
{ CXL_GMER_TRANS_HOST_INJECT_POISON, "Host Inject Poison" }, \
{ CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, "Internal Media Scrub" }, \
- { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" } \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" }, \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_ECS, "Internal Media Error Check Scrub" }, \
+ { CXL_GMER_TRANS_MEDIA_INITIALIZATION, "Media Initialization" } \
)
#define CXL_GMER_VALID_CHANNEL BIT(0)
#define CXL_GMER_VALID_RANK BIT(1)
#define CXL_GMER_VALID_DEVICE BIT(2)
#define CXL_GMER_VALID_COMPONENT BIT(3)
+#define CXL_GMER_VALID_COMPONENT_ID_FORMAT BIT(4)
#define show_valid_flags(flags) __print_flags(flags, "|", \
{ CXL_GMER_VALID_CHANNEL, "CHANNEL" }, \
{ CXL_GMER_VALID_RANK, "RANK" }, \
{ CXL_GMER_VALID_DEVICE, "DEVICE" }, \
- { CXL_GMER_VALID_COMPONENT, "COMPONENT" } \
+ { CXL_GMER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_GMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \
+)
+
+#define CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA BIT(0)
+#define CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED BIT(1)
+#define show_cme_threshold_ev_flags(flags) __print_flags(flags, "|", \
+ { \
+ CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA, \
+ "Corrected Memory Errors in Multiple Media Components" \
+ }, { \
+ CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED, \
+ "Exceeded Programmable Threshold" \
+ } \
+)
+
+#define CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED 0x00
+#define CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR 0x01
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR 0x02
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR 0x03
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR 0x04
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR 0x05
+#define show_mem_event_sub_type(sub_type) __print_symbolic(sub_type, \
+ { CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \
+ { CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR, "Internal Datapath Error" }, \
+ { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR, \
+ "Media Link Command Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR, \
+ "Media Link Control Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR, \
+ "Media Link Data Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR, "Media Link CRC Error" \
+ } \
)
TRACE_EVENT(cxl_general_media,
@@ -336,6 +412,9 @@ TRACE_EVENT(cxl_general_media,
__field(u16, validity_flags)
__field(u8, rank)
__field(u8, dpa_flags)
+ __field(u32, cme_count)
+ __field(u8, sub_type)
+ __field(u8, cme_threshold_ev_flags)
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
@@ -350,6 +429,7 @@ TRACE_EVENT(cxl_general_media,
__entry->dpa &= CXL_DPA_MASK;
__entry->descriptor = rec->media_hdr.descriptor;
__entry->type = rec->media_hdr.type;
+ __entry->sub_type = rec->sub_type;
__entry->transaction_type = rec->media_hdr.transaction_type;
__entry->channel = rec->media_hdr.channel;
__entry->rank = rec->media_hdr.rank;
@@ -365,27 +445,40 @@ TRACE_EVENT(cxl_general_media,
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &uuid_null);
}
+ __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
+ __entry->cme_count = get_unaligned_le24(rec->cme_count);
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
- "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \
- "device=%x comp_id=%s validity_flags='%s' " \
- "hpa=%llx region=%s region_uuid=%pUb",
+ "descriptor='%s' type='%s' sub_type='%s' " \
+ "transaction_type='%s' channel=%u rank=%u " \
+ "device=%x validity_flags='%s' " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s " \
+ "hpa=%llx region=%s region_uuid=%pUb " \
+ "cme_threshold_ev_flags='%s' cme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_gmer_mem_event_type(__entry->type),
+ show_mem_event_sub_type(__entry->sub_type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
- __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
show_valid_flags(__entry->validity_flags),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
+ CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
+ CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags), __entry->cme_count
)
);
/*
* DRAM Event Record - DER
*
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ * CXL rev 3.1 section 8.2.9.2.1.2; Table 8-46
*/
/*
* DRAM Event Record defines many fields the same as the General Media Event
@@ -395,11 +488,17 @@ TRACE_EVENT(cxl_general_media,
#define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01
#define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02
#define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03
-#define show_dram_mem_event_type(type) __print_symbolic(type, \
- { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
- { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
- { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
- { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
+#define CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x04
+#define CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05
+#define CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION 0x06
+#define show_dram_mem_event_type(type) __print_symbolic(type, \
+ { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \
+ { CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \
+ { CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \
)
#define CXL_DER_VALID_CHANNEL BIT(0)
@@ -410,15 +509,21 @@ TRACE_EVENT(cxl_general_media,
#define CXL_DER_VALID_ROW BIT(5)
#define CXL_DER_VALID_COLUMN BIT(6)
#define CXL_DER_VALID_CORRECTION_MASK BIT(7)
-#define show_dram_valid_flags(flags) __print_flags(flags, "|", \
- { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \
- { CXL_DER_VALID_RANK, "RANK" }, \
- { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \
- { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \
- { CXL_DER_VALID_BANK, "BANK" }, \
- { CXL_DER_VALID_ROW, "ROW" }, \
- { CXL_DER_VALID_COLUMN, "COLUMN" }, \
- { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" } \
+#define CXL_DER_VALID_COMPONENT BIT(8)
+#define CXL_DER_VALID_COMPONENT_ID_FORMAT BIT(9)
+#define CXL_DER_VALID_SUB_CHANNEL BIT(10)
+#define show_dram_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_DER_VALID_RANK, "RANK" }, \
+ { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \
+ { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \
+ { CXL_DER_VALID_BANK, "BANK" }, \
+ { CXL_DER_VALID_ROW, "ROW" }, \
+ { CXL_DER_VALID_COLUMN, "COLUMN" }, \
+ { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" }, \
+ { CXL_DER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_DER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" }, \
+ { CXL_DER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \
)
TRACE_EVENT(cxl_dram,
@@ -447,6 +552,12 @@ TRACE_EVENT(cxl_dram,
__field(u8, bank_group) /* Out of order to pack trace record */
__field(u8, bank) /* Out of order to pack trace record */
__field(u8, dpa_flags) /* Out of order to pack trace record */
+ /* Following are out of order to pack trace record */
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ __field(u32, cvme_count)
+ __field(u8, sub_type)
+ __field(u8, sub_channel)
+ __field(u8, cme_threshold_ev_flags)
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
@@ -460,6 +571,7 @@ TRACE_EVENT(cxl_dram,
__entry->dpa &= CXL_DPA_MASK;
__entry->descriptor = rec->media_hdr.descriptor;
__entry->type = rec->media_hdr.type;
+ __entry->sub_type = rec->sub_type;
__entry->transaction_type = rec->media_hdr.transaction_type;
__entry->validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
__entry->channel = rec->media_hdr.channel;
@@ -479,30 +591,47 @@ TRACE_EVENT(cxl_dram,
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &uuid_null);
}
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
+ __entry->sub_channel = rec->sub_channel;
+ __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
+ __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
),
- CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \
+ CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' sub_type='%s' " \
"transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \
"bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \
"validity_flags='%s' " \
- "hpa=%llx region=%s region_uuid=%pUb",
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s " \
+ "hpa=%llx region=%s region_uuid=%pUb " \
+ "sub_channel=%u cme_threshold_ev_flags='%s' cvme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_dram_mem_event_type(__entry->type),
+ show_mem_event_sub_type(__entry->sub_type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->nibble_mask,
__entry->bank_group, __entry->bank,
__entry->row, __entry->column,
__print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE),
show_dram_valid_flags(__entry->validity_flags),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
+ CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
+ CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ __entry->sub_channel, show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags),
+ __entry->cvme_count
)
);
/*
* Memory Module Event Record - MMER
*
- * CXL res 3.0 section 8.2.9.2.1.3; Table 8-45
+ * CXL res 3.1 section 8.2.9.2.1.3; Table 8-47
*/
#define CXL_MMER_HEALTH_STATUS_CHANGE 0x00
#define CXL_MMER_MEDIA_STATUS_CHANGE 0x01
@@ -510,27 +639,35 @@ TRACE_EVENT(cxl_dram,
#define CXL_MMER_TEMP_CHANGE 0x03
#define CXL_MMER_DATA_PATH_ERROR 0x04
#define CXL_MMER_LSA_ERROR 0x05
+#define CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR 0x06
+#define CXL_MMER_MEMORY_MEDIA_FRU_ERROR 0x07
+#define CXL_MMER_POWER_MANAGEMENT_FAULT 0x08
#define show_dev_evt_type(type) __print_symbolic(type, \
{ CXL_MMER_HEALTH_STATUS_CHANGE, "Health Status Change" }, \
{ CXL_MMER_MEDIA_STATUS_CHANGE, "Media Status Change" }, \
{ CXL_MMER_LIFE_USED_CHANGE, "Life Used Change" }, \
{ CXL_MMER_TEMP_CHANGE, "Temperature Change" }, \
{ CXL_MMER_DATA_PATH_ERROR, "Data Path Error" }, \
- { CXL_MMER_LSA_ERROR, "LSA Error" } \
+ { CXL_MMER_LSA_ERROR, "LSA Error" }, \
+ { CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR, "Unrecoverable Internal Sideband Bus Error" }, \
+ { CXL_MMER_MEMORY_MEDIA_FRU_ERROR, "Memory Media FRU Error" }, \
+ { CXL_MMER_POWER_MANAGEMENT_FAULT, "Power Management Fault" } \
)
/*
* Device Health Information - DHI
*
- * CXL res 3.0 section 8.2.9.8.3.1; Table 8-100
+ * CXL res 3.1 section 8.2.9.9.3.1; Table 8-133
*/
#define CXL_DHI_HS_MAINTENANCE_NEEDED BIT(0)
#define CXL_DHI_HS_PERFORMANCE_DEGRADED BIT(1)
#define CXL_DHI_HS_HW_REPLACEMENT_NEEDED BIT(2)
+#define CXL_DHI_HS_MEM_CAPACITY_DEGRADED BIT(3)
#define show_health_status_flags(flags) __print_flags(flags, "|", \
{ CXL_DHI_HS_MAINTENANCE_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_DHI_HS_PERFORMANCE_DEGRADED, "PERFORMANCE_DEGRADED" }, \
- { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" } \
+ { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" }, \
+ { CXL_DHI_HS_MEM_CAPACITY_DEGRADED, "MEM_CAPACITY_DEGRADED" } \
)
#define CXL_DHI_MS_NORMAL 0x00
@@ -584,6 +721,26 @@ TRACE_EVENT(cxl_dram,
#define CXL_DHI_AS_COR_VOL_ERR_CNT(as) ((as & 0x10) >> 4)
#define CXL_DHI_AS_COR_PER_ERR_CNT(as) ((as & 0x20) >> 5)
+#define CXL_MMER_VALID_COMPONENT BIT(0)
+#define CXL_MMER_VALID_COMPONENT_ID_FORMAT BIT(1)
+#define show_mem_module_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_MMER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_MMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \
+)
+#define CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED 0x00
+#define CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA 0x01
+#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA 0x02
+#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU 0x03
+#define show_dev_event_sub_type(sub_type) __print_symbolic(sub_type, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA, "Invalid Config Data" }, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA, "Unsupported Config Data" }, \
+ { \
+ CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU, \
+ "Unsupported Memory Media FRU" \
+ } \
+)
+
TRACE_EVENT(cxl_memory_module,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
@@ -606,6 +763,9 @@ TRACE_EVENT(cxl_memory_module,
__field(u32, cor_per_err_cnt)
__field(s16, device_temp)
__field(u8, add_status)
+ __field(u8, event_sub_type)
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ __field(u16, validity_flags)
),
TP_fast_assign(
@@ -614,6 +774,7 @@ TRACE_EVENT(cxl_memory_module,
/* Memory Module Event */
__entry->event_type = rec->event_type;
+ __entry->event_sub_type = rec->event_sub_type;
/* Device Health Info */
__entry->health_status = rec->info.health_status;
@@ -624,13 +785,20 @@ TRACE_EVENT(cxl_memory_module,
__entry->cor_per_err_cnt = get_unaligned_le32(rec->info.cor_per_err_cnt);
__entry->device_temp = get_unaligned_le16(rec->info.device_temp);
__entry->add_status = rec->info.add_status;
+ __entry->validity_flags = get_unaligned_le16(rec->validity_flags);
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
),
- CXL_EVT_TP_printk("event_type='%s' health_status='%s' media_status='%s' " \
- "as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \
+ CXL_EVT_TP_printk("event_type='%s' event_sub_type='%s' health_status='%s' " \
+ "media_status='%s' as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \
"as_cor_per_err_cnt=%s life_used=%u device_temp=%d " \
- "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u",
+ "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u " \
+ "validity_flags='%s' " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s",
show_dev_evt_type(__entry->event_type),
+ show_dev_event_sub_type(__entry->event_sub_type),
show_health_status_flags(__entry->health_status),
show_media_status(__entry->media_status),
show_two_bit_status(CXL_DHI_AS_LIFE_USED(__entry->add_status)),
@@ -639,7 +807,14 @@ TRACE_EVENT(cxl_memory_module,
show_one_bit_status(CXL_DHI_AS_COR_PER_ERR_CNT(__entry->add_status)),
__entry->life_used, __entry->device_temp,
__entry->dirty_shutdown_cnt, __entry->cor_vol_err_cnt,
- __entry->cor_per_err_cnt
+ __entry->cor_per_err_cnt,
+ show_mem_module_valid_flags(__entry->validity_flags),
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT,
+ CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT,
+ CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id)
)
);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index f6015f24ad38..bbbaa0d0a670 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -302,10 +302,11 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
struct cxl_device_regs *regs);
int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs);
+#define CXL_INSTANCES_COUNT -1
enum cxl_regloc_type;
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type);
int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map, int index);
+ struct cxl_register_map *map, unsigned int index);
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map);
int cxl_setup_regs(struct cxl_register_map *map);
@@ -821,7 +822,8 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
-int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
+struct cxl_dev_state;
+int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info);
bool is_cxl_region(struct device *dev);
@@ -864,7 +866,6 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
-bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 6d94ff4a4f1a..a96e54c6259e 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -907,7 +907,8 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct cxl_dev_state *cxlds;
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
- int i, rc, pmu_count;
+ int rc, pmu_count;
+ unsigned int i;
bool irq_avail;
/*
@@ -1009,6 +1010,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU);
+ if (pmu_count < 0)
+ return pmu_count;
+
for (i = 0; i < pmu_count; i++) {
struct cxl_pmu_regs pmu_regs;
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 4c83f6a22e58..d2bfd1ff5492 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -98,7 +98,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
struct cxl_port *root;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds->dev, port, &info);
+ rc = cxl_dvsec_rr_decode(cxlds, &info);
if (rc < 0)
return rc;
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 3ebac2496679..70219099c604 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -244,13 +244,9 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
edev = NULL;
out:
mutex_unlock(&devfreq_event_list_lock);
-
- if (!edev) {
- of_node_put(node);
- return ERR_PTR(-ENODEV);
- }
-
of_node_put(node);
+ if (!edev)
+ return ERR_PTR(-ENODEV);
return edev;
}
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index e2a1e4463b6f..0470d7c175f4 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -642,8 +642,7 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
if (ret)
return ret;
- hrtimer_init(&dfi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dfi->timer.function = rockchip_dfi_timer;
+ hrtimer_setup(&dfi->timer, rockchip_dfi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
switch (dfi->ddr_type) {
case ROCKCHIP_DDRTYPE_LPDDR2:
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 7d06c476d8e9..b9ea7ad2e51b 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -236,8 +236,7 @@ err_regulator:
return ret;
}
-static int exynos_bus_parse_of(struct device_node *np,
- struct exynos_bus *bus)
+static int exynos_bus_parse_of(struct exynos_bus *bus)
{
struct device *dev = bus->dev;
struct dev_pm_opp *opp;
@@ -408,7 +407,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
}
/* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
+ ret = exynos_bus_parse_of(bus);
if (ret < 0)
goto err_reg;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e994d6e0779e..8afea2e23360 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -162,8 +162,8 @@ config DMA_SA11X0
config DMA_SUN4I
tristate "Allwinner A10 DMA SoCs support"
- depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
- default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
+ depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV
+ default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -740,8 +740,6 @@ source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
-source "drivers/dma/ptdma/Kconfig"
-
source "drivers/dma/qcom/Kconfig"
source "drivers/dma/dw/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5b2a52f4f2ee..19ba465011a6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
-obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
diff --git a/drivers/dma/amd/Kconfig b/drivers/dma/amd/Kconfig
index 7d1f51d69675..00d874872a8f 100644
--- a/drivers/dma/amd/Kconfig
+++ b/drivers/dma/amd/Kconfig
@@ -1,4 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
+#
+
+config AMD_AE4DMA
+ tristate "AMD AE4DMA Engine"
+ depends on (X86_64 || COMPILE_TEST) && PCI
+ depends on AMD_PTDMA
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD AE4DMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
+
+config AMD_PTDMA
+ tristate "AMD PassThru DMA Engine"
+ depends on X86_64 && PCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD PTDMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
config AMD_QDMA
tristate "AMD Queue-based DMA"
diff --git a/drivers/dma/amd/Makefile b/drivers/dma/amd/Makefile
index 37212be9364f..11278c06374d 100644
--- a/drivers/dma/amd/Makefile
+++ b/drivers/dma/amd/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma/
+obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_AMD_QDMA) += qdma/
diff --git a/drivers/dma/amd/ae4dma/Makefile b/drivers/dma/amd/ae4dma/Makefile
new file mode 100644
index 000000000000..e918f85a80ec
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# AMD AE4DMA driver
+#
+
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma.o
+
+ae4dma-objs := ae4dma-dev.o
+
+ae4dma-$(CONFIG_PCI) += ae4dma-pci.o
diff --git a/drivers/dma/amd/ae4dma/ae4dma-dev.c b/drivers/dma/amd/ae4dma/ae4dma-dev.c
new file mode 100644
index 000000000000..8de3bef41b58
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-dev.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static unsigned int max_hw_q = 1;
+module_param(max_hw_q, uint, 0444);
+MODULE_PARM_DESC(max_hw_q, "max hw queues supported by engine (any non-zero value, default: 1)");
+
+static void ae4_pending_work(struct work_struct *work)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(work, struct ae4_cmd_queue, p_work.work);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct pt_cmd *cmd;
+ u32 cridx;
+
+ for (;;) {
+ wait_event_interruptible(ae4cmd_q->q_w,
+ ((atomic64_read(&ae4cmd_q->done_cnt)) <
+ atomic64_read(&ae4cmd_q->intr_cnt)));
+
+ atomic64_inc(&ae4cmd_q->done_cnt);
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ cridx = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
+ while ((ae4cmd_q->dridx != cridx) && !list_empty(&ae4cmd_q->cmd)) {
+ cmd = list_first_entry(&ae4cmd_q->cmd, struct pt_cmd, entry);
+ list_del(&cmd->entry);
+
+ ae4_check_status_error(ae4cmd_q, ae4cmd_q->dridx);
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+
+ ae4cmd_q->q_cmd_count--;
+ ae4cmd_q->dridx = (ae4cmd_q->dridx + 1) % CMD_Q_LEN;
+
+ complete_all(&ae4cmd_q->cmp);
+ }
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+}
+
+static irqreturn_t ae4_core_irq_handler(int irq, void *data)
+{
+ struct ae4_cmd_queue *ae4cmd_q = data;
+ struct pt_cmd_queue *cmd_q;
+ struct pt_device *pt;
+ u32 status;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ pt = cmd_q->pt;
+
+ pt->total_interrupts++;
+ atomic64_inc(&ae4cmd_q->intr_cnt);
+
+ status = readl(cmd_q->reg_control + AE4_INTR_STS_OFF);
+ if (status & BIT(0)) {
+ status &= GENMASK(31, 1);
+ writel(status, cmd_q->reg_control + AE4_INTR_STS_OFF);
+ }
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return IRQ_HANDLED;
+}
+
+void ae4_destroy_work(struct ae4_device *ae4)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ int i;
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ if (!ae4cmd_q->pws)
+ break;
+
+ cancel_delayed_work_sync(&ae4cmd_q->p_work);
+ destroy_workqueue(ae4cmd_q->pws);
+ }
+}
+
+int ae4_core_init(struct ae4_device *ae4)
+{
+ struct pt_device *pt = &ae4->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct device *dev = pt->dev;
+ struct pt_cmd_queue *cmd_q;
+ int i, ret = 0;
+
+ writel(max_hw_q, pt->io_regs);
+
+ for (i = 0; i < max_hw_q; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ ae4cmd_q->id = ae4->cmd_q_count;
+ ae4->cmd_q_count++;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ cmd_q->pt = pt;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ ret = devm_request_irq(dev, ae4->ae4_irq[i], ae4_core_irq_handler, 0,
+ dev_name(pt->dev), ae4cmd_q);
+ if (ret)
+ return ret;
+
+ cmd_q->qsize = Q_SIZE(sizeof(struct ae4dma_desc));
+
+ cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ /* Update the device registers with queue information. */
+ writel(CMD_Q_LEN, cmd_q->reg_control + AE4_MAX_IDX_OFF);
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ writel(lower_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_L_OFF);
+ writel(upper_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_H_OFF);
+
+ INIT_LIST_HEAD(&ae4cmd_q->cmd);
+ init_waitqueue_head(&ae4cmd_q->q_w);
+
+ ae4cmd_q->pws = alloc_ordered_workqueue("ae4dma_%d", WQ_MEM_RECLAIM, ae4cmd_q->id);
+ if (!ae4cmd_q->pws) {
+ ae4_destroy_work(ae4);
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&ae4cmd_q->p_work, ae4_pending_work);
+ queue_delayed_work(ae4cmd_q->pws, &ae4cmd_q->p_work, usecs_to_jiffies(100));
+
+ init_completion(&ae4cmd_q->cmp);
+ }
+
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ ae4_destroy_work(ae4);
+ else
+ ptdma_debugfs_setup(pt);
+
+ return ret;
+}
diff --git a/drivers/dma/amd/ae4dma/ae4dma-pci.c b/drivers/dma/amd/ae4dma/ae4dma-pci.c
new file mode 100644
index 000000000000..aad0dc4294a3
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-pci.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static int ae4_get_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+ int i, v, ret;
+
+ pdev = to_pci_dev(dev);
+
+ for (v = 0; v < ARRAY_SIZE(ae4_msix->msix_entry); v++)
+ ae4_msix->msix_entry[v].entry = v;
+
+ ret = pci_alloc_irq_vectors(pdev, v, v, PCI_IRQ_MSIX);
+ if (ret != v) {
+ if (ret > 0)
+ pci_free_irq_vectors(pdev);
+
+ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "could not enable MSI (%d)\n", ret);
+ return ret;
+ }
+
+ ret = pci_irq_vector(pdev, 0);
+ if (ret < 0) {
+ pci_free_irq_vectors(pdev);
+ return ret;
+ }
+
+ for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
+ ae4->ae4_irq[i] = ret;
+
+ } else {
+ ae4_msix->msix_count = ret;
+ for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
+ ae4->ae4_irq[i] = ae4_msix->msix_entry[i].vector;
+ }
+
+ return ret;
+}
+
+static void ae4_free_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ if (ae4_msix && (ae4_msix->msix_count || ae4->ae4_irq[MAX_AE4_HW_QUEUES - 1]))
+ pci_free_irq_vectors(pdev);
+}
+
+static void ae4_deinit(struct ae4_device *ae4)
+{
+ ae4_free_irqs(ae4);
+}
+
+static int ae4_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct ae4_device *ae4;
+ struct pt_device *pt;
+ int bar_mask;
+ int ret = 0;
+
+ ae4 = devm_kzalloc(dev, sizeof(*ae4), GFP_KERNEL);
+ if (!ae4)
+ return -ENOMEM;
+
+ ae4->ae4_msix = devm_kzalloc(dev, sizeof(struct ae4_msix), GFP_KERNEL);
+ if (!ae4->ae4_msix)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ goto ae4_error;
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ae4dma");
+ if (ret)
+ goto ae4_error;
+
+ pt = &ae4->pt;
+ pt->dev = dev;
+ pt->ver = AE4_DMA_VERSION;
+
+ pt->io_regs = pcim_iomap_table(pdev)[0];
+ if (!pt->io_regs) {
+ ret = -ENOMEM;
+ goto ae4_error;
+ }
+
+ ret = ae4_get_irqs(ae4);
+ if (ret < 0)
+ goto ae4_error;
+
+ pci_set_master(pdev);
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+
+ dev_set_drvdata(dev, ae4);
+
+ ret = ae4_core_init(ae4);
+ if (ret)
+ goto ae4_error;
+
+ return 0;
+
+ae4_error:
+ ae4_deinit(ae4);
+
+ return ret;
+}
+
+static void ae4_pci_remove(struct pci_dev *pdev)
+{
+ struct ae4_device *ae4 = dev_get_drvdata(&pdev->dev);
+
+ ae4_destroy_work(ae4);
+ ae4_deinit(ae4);
+}
+
+static const struct pci_device_id ae4_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x14C8), },
+ { PCI_VDEVICE(AMD, 0x14DC), },
+ { PCI_VDEVICE(AMD, 0x149B), },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ae4_pci_table);
+
+static struct pci_driver ae4_pci_driver = {
+ .name = "ae4dma",
+ .id_table = ae4_pci_table,
+ .probe = ae4_pci_probe,
+ .remove = ae4_pci_remove,
+};
+
+module_pci_driver(ae4_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD AE4DMA driver");
diff --git a/drivers/dma/amd/ae4dma/ae4dma.h b/drivers/dma/amd/ae4dma/ae4dma.h
new file mode 100644
index 000000000000..265c5d436008
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+#ifndef __AE4DMA_H__
+#define __AE4DMA_H__
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include "../ptdma/ptdma.h"
+#include "../../virt-dma.h"
+
+#define MAX_AE4_HW_QUEUES 16
+
+#define AE4_DESC_COMPLETED 0x03
+
+#define AE4_MAX_IDX_OFF 0x08
+#define AE4_RD_IDX_OFF 0x0c
+#define AE4_WR_IDX_OFF 0x10
+#define AE4_INTR_STS_OFF 0x14
+#define AE4_Q_BASE_L_OFF 0x18
+#define AE4_Q_BASE_H_OFF 0x1c
+#define AE4_Q_SZ 0x20
+
+#define AE4_DMA_VERSION 4
+#define CMD_AE4_DESC_DW0_VAL 2
+
+struct ae4_msix {
+ int msix_count;
+ struct msix_entry msix_entry[MAX_AE4_HW_QUEUES];
+};
+
+struct ae4_cmd_queue {
+ struct ae4_device *ae4;
+ struct pt_cmd_queue cmd_q;
+ struct list_head cmd;
+ /* protect command operations */
+ struct mutex cmd_lock;
+ struct delayed_work p_work;
+ struct workqueue_struct *pws;
+ struct completion cmp;
+ wait_queue_head_t q_w;
+ atomic64_t intr_cnt;
+ atomic64_t done_cnt;
+ u64 q_cmd_count;
+ u32 dridx;
+ u32 tail_wi;
+ u32 id;
+};
+
+union dwou {
+ u32 dw0;
+ struct dword0 {
+ u8 byte0;
+ u8 byte1;
+ u16 timestamp;
+ } dws;
+};
+
+struct dword1 {
+ u8 status;
+ u8 err_code;
+ u16 desc_id;
+};
+
+struct ae4dma_desc {
+ union dwou dwouv;
+ struct dword1 dw1;
+ u32 length;
+ u32 rsvd;
+ u32 src_hi;
+ u32 src_lo;
+ u32 dst_hi;
+ u32 dst_lo;
+};
+
+struct ae4_device {
+ struct pt_device pt;
+ struct ae4_msix *ae4_msix;
+ struct ae4_cmd_queue ae4cmd_q[MAX_AE4_HW_QUEUES];
+ unsigned int ae4_irq[MAX_AE4_HW_QUEUES];
+ unsigned int cmd_q_count;
+};
+
+int ae4_core_init(struct ae4_device *ae4);
+void ae4_destroy_work(struct ae4_device *ae4);
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx);
+#endif
diff --git a/drivers/dma/ptdma/Makefile b/drivers/dma/amd/ptdma/Makefile
index ce5410268a9a..ce5410268a9a 100644
--- a/drivers/dma/ptdma/Makefile
+++ b/drivers/dma/amd/ptdma/Makefile
diff --git a/drivers/dma/ptdma/ptdma-debugfs.c b/drivers/dma/amd/ptdma/ptdma-debugfs.c
index c8307d3044a3..c7c90bbf6fd8 100644
--- a/drivers/dma/ptdma/ptdma-debugfs.c
+++ b/drivers/dma/amd/ptdma/ptdma-debugfs.c
@@ -13,6 +13,7 @@
#include <linux/seq_file.h>
#include "ptdma.h"
+#include "../ae4dma/ae4dma.h"
/* DebugFS helpers */
#define RI_VERSION_NUM 0x0000003F
@@ -23,11 +24,19 @@
static int pt_debugfs_info_show(struct seq_file *s, void *p)
{
struct pt_device *pt = s->private;
+ struct ae4_device *ae4;
unsigned int regval;
seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
- seq_printf(s, " # Queues: %d\n", 1);
- seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ seq_printf(s, " # Queues: %d\n", ae4->cmd_q_count);
+ seq_printf(s, " # Cmds per queue: %d\n", CMD_Q_LEN);
+ } else {
+ seq_printf(s, " # Queues: %d\n", 1);
+ seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+ }
regval = ioread32(pt->io_regs + CMD_PT_VERSION);
@@ -55,6 +64,7 @@ static int pt_debugfs_stats_show(struct seq_file *s, void *p)
static int pt_debugfs_queue_show(struct seq_file *s, void *p)
{
struct pt_cmd_queue *cmd_q = s->private;
+ struct pt_device *pt;
unsigned int regval;
if (!cmd_q)
@@ -62,18 +72,24 @@ static int pt_debugfs_queue_show(struct seq_file *s, void *p)
seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
- regval = ioread32(cmd_q->reg_control + 0x000C);
-
- seq_puts(s, " Enabled Interrupts:");
- if (regval & INT_EMPTY_QUEUE)
- seq_puts(s, " EMPTY");
- if (regval & INT_QUEUE_STOPPED)
- seq_puts(s, " STOPPED");
- if (regval & INT_ERROR)
- seq_puts(s, " ERROR");
- if (regval & INT_COMPLETION)
- seq_puts(s, " COMPLETION");
- seq_puts(s, "\n");
+ pt = cmd_q->pt;
+ if (pt->ver == AE4_DMA_VERSION) {
+ regval = readl(cmd_q->reg_control + 0x4);
+ seq_printf(s, " Enabled Interrupts:: status 0x%x\n", regval);
+ } else {
+ regval = ioread32(cmd_q->reg_control + 0x000C);
+
+ seq_puts(s, " Enabled Interrupts:");
+ if (regval & INT_EMPTY_QUEUE)
+ seq_puts(s, " EMPTY");
+ if (regval & INT_QUEUE_STOPPED)
+ seq_puts(s, " STOPPED");
+ if (regval & INT_ERROR)
+ seq_puts(s, " ERROR");
+ if (regval & INT_COMPLETION)
+ seq_puts(s, " COMPLETION");
+ seq_puts(s, "\n");
+ }
return 0;
}
@@ -84,8 +100,12 @@ DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
void ptdma_debugfs_setup(struct pt_device *pt)
{
- struct pt_cmd_queue *cmd_q;
struct dentry *debugfs_q_instance;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+ char name[30];
+ int i;
if (!debugfs_initialized())
return;
@@ -96,11 +116,28 @@ void ptdma_debugfs_setup(struct pt_device *pt)
debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
&pt_debugfs_stats_fops);
- cmd_q = &pt->cmd_q;
-
- debugfs_q_instance =
- debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
- debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
- &pt_debugfs_queue_fops);
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, 29, "q%d", ae4cmd_q->id);
+
+ debugfs_q_instance =
+ debugfs_create_dir(name, pt->dma_dev.dbg_dev_root);
+
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
+ } else {
+ debugfs_q_instance =
+ debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
+ cmd_q = &pt->cmd_q;
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
}
+EXPORT_SYMBOL_GPL(ptdma_debugfs_setup);
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/amd/ptdma/ptdma-dev.c
index a2bf13ff18b6..a2bf13ff18b6 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/amd/ptdma/ptdma-dev.c
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
index f79240734807..35c84ec9608b 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -9,9 +9,58 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
+#include <linux/bitfield.h>
#include "ptdma.h"
-#include "../dmaengine.h"
-#include "../virt-dma.h"
+#include "../ae4dma/ae4dma.h"
+#include "../../dmaengine.h"
+
+static char *ae4_error_codes[] = {
+ "",
+ "ERR 01: INVALID HEADER DW0",
+ "ERR 02: INVALID STATUS",
+ "ERR 03: INVALID LENGTH - 4 BYTE ALIGNMENT",
+ "ERR 04: INVALID SRC ADDR - 4 BYTE ALIGNMENT",
+ "ERR 05: INVALID DST ADDR - 4 BYTE ALIGNMENT",
+ "ERR 06: INVALID ALIGNMENT",
+ "ERR 07: INVALID DESCRIPTOR",
+};
+
+static void ae4_log_error(struct pt_device *d, int e)
+{
+ /* ERR 01 - 07 represents Invalid AE4 errors */
+ if (e <= 7)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", ae4_error_codes[e], e);
+ /* ERR 08 - 15 represents Invalid Descriptor errors */
+ else if (e > 7 && e <= 15)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "INVALID DESCRIPTOR", e);
+ /* ERR 16 - 31 represents Firmware errors */
+ else if (e > 15 && e <= 31)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FIRMWARE ERROR", e);
+ /* ERR 32 - 63 represents Fatal errors */
+ else if (e > 31 && e <= 63)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FATAL ERROR", e);
+ /* ERR 64 - 255 represents PTE errors */
+ else if (e > 63 && e <= 255)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "PTE ERROR", e);
+ else
+ dev_info(d->dev, "Unknown AE4DMA error");
+}
+
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx)
+{
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct ae4dma_desc desc;
+ u8 status;
+
+ memcpy(&desc, &cmd_q->qbase[idx], sizeof(struct ae4dma_desc));
+ status = desc.dw1.status;
+ if (status && status != AE4_DESC_COMPLETED) {
+ cmd_q->cmd_error = desc.dw1.err_code;
+ if (cmd_q->cmd_error)
+ ae4_log_error(cmd_q->pt, cmd_q->cmd_error);
+ }
+}
+EXPORT_SYMBOL_GPL(ae4_check_status_error);
static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
{
@@ -45,7 +94,71 @@ static void pt_do_cleanup(struct virt_dma_desc *vd)
kmem_cache_free(pt->dma_desc_cache, desc);
}
-static int pt_dma_start_desc(struct pt_dma_desc *desc)
+static struct pt_cmd_queue *pt_get_cmd_queue(struct pt_device *pt, struct pt_dma_chan *chan)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ cmd_q = &ae4cmd_q->cmd_q;
+ } else {
+ cmd_q = &pt->cmd_q;
+ }
+
+ return cmd_q;
+}
+
+static int ae4_core_execute_cmd(struct ae4dma_desc *desc, struct ae4_cmd_queue *ae4cmd_q)
+{
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dwouv.dw0);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+
+ if (soc) {
+ desc->dwouv.dw0 |= FIELD_PREP(DWORD0_IOC, desc->dwouv.dw0);
+ desc->dwouv.dw0 &= ~DWORD0_SOC;
+ }
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ memcpy(&cmd_q->qbase[ae4cmd_q->tail_wi], desc, sizeof(struct ae4dma_desc));
+ ae4cmd_q->q_cmd_count++;
+ ae4cmd_q->tail_wi = (ae4cmd_q->tail_wi + 1) % CMD_Q_LEN;
+ writel(ae4cmd_q->tail_wi, cmd_q->reg_control + AE4_WR_IDX_OFF);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return 0;
+}
+
+static int pt_core_perform_passthru_ae4(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ struct ae4dma_desc desc;
+
+ cmd_q->cmd_error = 0;
+ cmd_q->total_pt_ops++;
+ memset(&desc, 0, sizeof(desc));
+ desc.dwouv.dws.byte0 = CMD_AE4_DESC_DW0_VAL;
+
+ desc.dw1.status = 0;
+ desc.dw1.err_code = 0;
+ desc.dw1.desc_id = 0;
+
+ desc.length = pt_engine->src_len;
+
+ desc.src_lo = upper_32_bits(pt_engine->src_dma);
+ desc.src_hi = lower_32_bits(pt_engine->src_dma);
+ desc.dst_lo = upper_32_bits(pt_engine->dst_dma);
+ desc.dst_hi = lower_32_bits(pt_engine->dst_dma);
+
+ return ae4_core_execute_cmd(&desc, ae4cmd_q);
+}
+
+static int pt_dma_start_desc(struct pt_dma_desc *desc, struct pt_dma_chan *chan)
{
struct pt_passthru_engine *pt_engine;
struct pt_device *pt;
@@ -56,13 +169,18 @@ static int pt_dma_start_desc(struct pt_dma_desc *desc)
pt_cmd = &desc->pt_cmd;
pt = pt_cmd->pt;
- cmd_q = &pt->cmd_q;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
pt_engine = &pt_cmd->passthru;
pt->tdata.cmd = pt_cmd;
/* Execute the command */
- pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_cmd->ret = pt_core_perform_passthru_ae4(cmd_q, pt_engine);
+ else
+ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
return 0;
}
@@ -151,7 +269,7 @@ static void pt_cmd_callback(void *data, int err)
if (!desc)
break;
- ret = pt_dma_start_desc(desc);
+ ret = pt_dma_start_desc(desc, chan);
if (!ret)
break;
@@ -186,7 +304,10 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt = chan->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
struct pt_dma_desc *desc;
+ struct ae4_device *ae4;
struct pt_cmd *pt_cmd;
desc = pt_alloc_dma_desc(chan, flags);
@@ -194,7 +315,7 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
return NULL;
pt_cmd = &desc->pt_cmd;
- pt_cmd->pt = chan->pt;
+ pt_cmd->pt = pt;
pt_engine = &pt_cmd->passthru;
pt_cmd->engine = PT_ENGINE_PASSTHRU;
pt_engine->src_dma = src;
@@ -205,6 +326,14 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
desc->len = len;
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ list_add_tail(&pt_cmd->entry, &ae4cmd_q->cmd);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+
return desc;
}
@@ -258,24 +387,43 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
pt_cmd_callback(desc, 0);
}
+static void pt_check_status_trans_ae4(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ int i;
+
+ for (i = 0; i < CMD_Q_LEN; i++)
+ ae4_check_status_error(ae4cmd_q, i);
+}
+
static enum dma_status
pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct pt_device *pt = to_pt_chan(c)->pt;
- struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ struct pt_dma_chan *chan = to_pt_chan(c);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_check_status_trans_ae4(pt, cmd_q);
+ else
+ pt_check_status_trans(pt, cmd_q);
- pt_check_status_trans(pt, cmd_q);
return dma_cookie_status(c, cookie, txstate);
}
static int pt_pause(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
- pt_stop_queue(&chan->pt->cmd_q);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_stop_queue(cmd_q);
spin_unlock_irqrestore(&chan->vc.lock, flags);
return 0;
@@ -285,10 +433,13 @@ static int pt_resume(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc = NULL;
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
- pt_start_queue(&chan->pt->cmd_q);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_start_queue(cmd_q);
desc = pt_next_dma_desc(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -302,11 +453,17 @@ static int pt_resume(struct dma_chan *dma_chan)
static int pt_terminate_all(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
unsigned long flags;
- struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
LIST_HEAD(head);
- iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_stop_queue(cmd_q);
+ else
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
spin_lock_irqsave(&chan->vc.lock, flags);
vchan_get_all_descriptors(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -319,14 +476,24 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
int pt_dmaengine_register(struct pt_device *pt)
{
- struct pt_dma_chan *chan;
struct dma_device *dma_dev = &pt->dma_dev;
- char *cmd_cache_name;
+ struct ae4_cmd_queue *ae4cmd_q = NULL;
+ struct ae4_device *ae4 = NULL;
+ struct pt_dma_chan *chan;
char *desc_cache_name;
- int ret;
+ char *cmd_cache_name;
+ int ret, i;
+
+ if (pt->ver == AE4_DMA_VERSION)
+ ae4 = container_of(pt, struct ae4_device, pt);
+
+ if (ae4)
+ pt->pt_dma_chan = devm_kcalloc(pt->dev, ae4->cmd_q_count,
+ sizeof(*pt->pt_dma_chan), GFP_KERNEL);
+ else
+ pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+ GFP_KERNEL);
- pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
- GFP_KERNEL);
if (!pt->pt_dma_chan)
return -ENOMEM;
@@ -368,9 +535,6 @@ int pt_dmaengine_register(struct pt_device *pt)
INIT_LIST_HEAD(&dma_dev->channels);
- chan = pt->pt_dma_chan;
- chan->pt = pt;
-
/* Set base and prep routines */
dma_dev->device_free_chan_resources = pt_free_chan_resources;
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
@@ -382,8 +546,21 @@ int pt_dmaengine_register(struct pt_device *pt)
dma_dev->device_terminate_all = pt_terminate_all;
dma_dev->device_synchronize = pt_synchronize;
- chan->vc.desc_free = pt_do_cleanup;
- vchan_init(&chan->vc, dma_dev);
+ if (ae4) {
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ chan = pt->pt_dma_chan + i;
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ chan->id = ae4cmd_q->id;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
+ } else {
+ chan = pt->pt_dma_chan;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
ret = dma_async_device_register(dma_dev);
if (ret)
@@ -399,6 +576,7 @@ err_cache:
return ret;
}
+EXPORT_SYMBOL_GPL(pt_dmaengine_register);
void pt_dmaengine_unregister(struct pt_device *pt)
{
diff --git a/drivers/dma/ptdma/ptdma-pci.c b/drivers/dma/amd/ptdma/ptdma-pci.c
index 22739ff0c3c5..22739ff0c3c5 100644
--- a/drivers/dma/ptdma/ptdma-pci.c
+++ b/drivers/dma/amd/ptdma/ptdma-pci.c
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/amd/ptdma/ptdma.h
index 39bc37268235..0a7939105e51 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/amd/ptdma/ptdma.h
@@ -22,7 +22,7 @@
#include <linux/wait.h>
#include <linux/dmapool.h>
-#include "../virt-dma.h"
+#include "../../virt-dma.h"
#define MAX_PT_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
@@ -184,6 +184,7 @@ struct pt_dma_desc {
struct pt_dma_chan {
struct virt_dma_chan vc;
struct pt_device *pt;
+ u32 id;
};
struct pt_cmd_queue {
@@ -262,6 +263,7 @@ struct pt_device {
unsigned long total_interrupts;
struct pt_tasklet_data tdata;
+ int ver;
};
/*
diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
index 66f00ad67351..8fb2d5e1df20 100644
--- a/drivers/dma/amd/qdma/qdma.c
+++ b/drivers/dma/amd/qdma/qdma.c
@@ -283,16 +283,20 @@ static int qdma_check_queue_status(struct qdma_device *qdev,
static int qdma_clear_queue_context(const struct qdma_queue *queue)
{
- enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
- QDMA_CTXT_DESC_HW_H2C,
- QDMA_CTXT_DESC_CR_H2C,
- QDMA_CTXT_PFTCH, };
- enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
- QDMA_CTXT_DESC_HW_C2H,
- QDMA_CTXT_DESC_CR_C2H,
- QDMA_CTXT_PFTCH, };
+ static const enum qdma_ctxt_type h2c_types[] = {
+ QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_PFTCH,
+ };
+ static const enum qdma_ctxt_type c2h_types[] = {
+ QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_PFTCH,
+ };
struct qdma_device *qdev = queue->qdev;
- enum qdma_ctxt_type *type;
+ const enum qdma_ctxt_type *type;
int ret, num, i;
if (queue->dir == DMA_MEM_TO_DEV) {
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 7ba52dee40a9..20b10c15c696 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -875,6 +875,27 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
return chan;
}
+static int bcm2835_dma_suspend_late(struct device *dev)
+{
+ struct bcm2835_dmadev *od = dev_get_drvdata(dev);
+ struct bcm2835_chan *c, *next;
+
+ list_for_each_entry_safe(c, next, &od->ddev.channels,
+ vc.chan.device_node) {
+ void __iomem *chan_base = c->chan_base;
+
+ /* Check if DMA channel is busy */
+ if (readl(chan_base + BCM2835_DMA_ADDR))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops bcm2835_dma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
+};
+
static int bcm2835_dma_probe(struct platform_device *pdev)
{
struct bcm2835_dmadev *od;
@@ -1033,6 +1054,7 @@ static struct platform_driver bcm2835_dma_driver = {
.driver = {
.name = "bcm2835-dma",
.of_match_table = of_match_ptr(bcm2835_dma_of_match),
+ .pm = pm_ptr(&bcm2835_dma_pm_ops),
},
};
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b7f15ab96855..443b2430466c 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -480,8 +480,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
bool disable_req, bool enable_sg)
{
struct dma_slave_config *cfg = &fsl_chan->cfg;
+ u32 burst = 0;
u16 csr = 0;
- u32 burst;
/*
* eDMA hardware SGs require the TCDs to be stored in little
@@ -496,16 +496,30 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
- if (fsl_chan->is_multi_fifo) {
- /* set mloff to support multiple fifo */
- burst = cfg->direction == DMA_DEV_TO_MEM ?
- cfg->src_maxburst : cfg->dst_maxburst;
- nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
- /* enable DMLOE/SMLOE */
- if (cfg->direction == DMA_MEM_TO_DEV) {
+ /* If we expect to have either multi_fifo or a port window size,
+ * we will use minor loop offset, meaning bits 29-10 will be used for
+ * address offset, while bits 9-0 will be used to tell DMA how much
+ * data to read from addr.
+ * If we don't have either of those, will use a major loop reading from addr
+ * nbytes (29bits).
+ */
+ if (cfg->direction == DMA_MEM_TO_DEV) {
+ if (fsl_chan->is_multi_fifo)
+ burst = cfg->dst_maxburst * 4;
+ if (cfg->dst_port_window_size)
+ burst = cfg->dst_port_window_size * cfg->dst_addr_width;
+ if (burst) {
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
- } else {
+ }
+ } else {
+ if (fsl_chan->is_multi_fifo)
+ burst = cfg->src_maxburst * 4;
+ if (cfg->src_port_window_size)
+ burst = cfg->src_port_window_size * cfg->src_addr_width;
+ if (burst) {
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
}
@@ -623,11 +637,15 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = fsl_chan->is_multi_fifo ? 4 : 0;
+ if (fsl_chan->cfg.dst_port_window_size)
+ doff = fsl_chan->cfg.dst_addr_width;
} else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
soff = fsl_chan->is_multi_fifo ? 4 : 0;
doff = fsl_chan->cfg.src_addr_width;
+ if (fsl_chan->cfg.src_port_window_size)
+ soff = fsl_chan->cfg.src_addr_width;
} else {
/* DMA_DEV_TO_DEV */
src_addr = fsl_chan->cfg.src_addr;
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index fe8f103d4a63..10a5565ddfd7 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -68,6 +68,8 @@
#define EDMA_V3_CH_CSR_EEI BIT(2)
#define EDMA_V3_CH_CSR_DONE BIT(30)
#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
+#define EDMA_V3_CH_ES_ERR BIT(31)
+#define EDMA_V3_MP_ES_VLD BIT(31)
enum fsl_edma_pm_state {
RUNNING = 0,
@@ -241,6 +243,7 @@ struct fsl_edma_engine {
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
int txirq;
+ int txirq_16_31;
int errirq;
bool big_endian;
struct edma_regs regs;
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 1a613236b3e4..f989b6c9c0a9 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -3,10 +3,11 @@
* drivers/dma/fsl-edma.c
*
* Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2024 NXP
*
* Driver for the Freescale eDMA engine with flexible channel multiplexing
* capability for DMA request sources. The eDMA block can be found on some
- * Vybrid and Layerscape SoCs.
+ * Vybrid, Layerscape and S32G SoCs.
*/
#include <dt-bindings/dma/fsl-edma.h>
@@ -72,6 +73,60 @@ static irqreturn_t fsl_edma2_tx_handler(int irq, void *devi_id)
return fsl_edma_tx_handler(irq, fsl_chan->edma);
}
+static irqreturn_t fsl_edma3_or_tx_handler(int irq, void *dev_id,
+ u8 start, u8 end)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ struct fsl_edma_chan *chan;
+ int i;
+
+ end = min(end, fsl_edma->n_chans);
+
+ for (i = start; i < end; i++) {
+ chan = &fsl_edma->chans[i];
+
+ fsl_edma3_tx_handler(irq, chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma3_tx_0_15_handler(int irq, void *dev_id)
+{
+ return fsl_edma3_or_tx_handler(irq, dev_id, 0, 16);
+}
+
+static irqreturn_t fsl_edma3_tx_16_31_handler(int irq, void *dev_id)
+{
+ return fsl_edma3_or_tx_handler(irq, dev_id, 16, 32);
+}
+
+static irqreturn_t fsl_edma3_or_err_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ struct edma_regs *regs = &fsl_edma->regs;
+ unsigned int err, ch, ch_es;
+ struct fsl_edma_chan *chan;
+
+ err = edma_readl(fsl_edma, regs->es);
+ if (!(err & EDMA_V3_MP_ES_VLD))
+ return IRQ_NONE;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ chan = &fsl_edma->chans[ch];
+
+ ch_es = edma_readl_chreg(chan, ch_es);
+ if (!(ch_es & EDMA_V3_CH_ES_ERR))
+ continue;
+
+ edma_writel_chreg(chan, EDMA_V3_CH_ES_ERR, ch_es);
+ fsl_edma_disable_request(chan);
+ fsl_edma->chans[ch].status = DMA_ERROR;
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
@@ -274,6 +329,49 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
return 0;
}
+static int fsl_edma3_or_irq_init(struct platform_device *pdev,
+ struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+
+ fsl_edma->txirq = platform_get_irq_byname(pdev, "tx-0-15");
+ if (fsl_edma->txirq < 0)
+ return fsl_edma->txirq;
+
+ fsl_edma->txirq_16_31 = platform_get_irq_byname(pdev, "tx-16-31");
+ if (fsl_edma->txirq_16_31 < 0)
+ return fsl_edma->txirq_16_31;
+
+ fsl_edma->errirq = platform_get_irq_byname(pdev, "err");
+ if (fsl_edma->errirq < 0)
+ return fsl_edma->errirq;
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+ fsl_edma3_tx_0_15_handler, 0, "eDMA tx0_15",
+ fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA tx0_15 IRQ.\n");
+
+ if (fsl_edma->n_chans > 16) {
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq_16_31,
+ fsl_edma3_tx_16_31_handler, 0,
+ "eDMA tx16_31", fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA tx16_31 IRQ.\n");
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
+ fsl_edma3_or_err_handler, 0, "eDMA err",
+ fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA err IRQ.\n");
+
+ return 0;
+}
+
static int
fsl_edma2_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma)
@@ -404,6 +502,14 @@ static struct fsl_edma_drvdata imx95_data5 = {
.setup_irq = fsl_edma3_irq_init,
};
+static const struct fsl_edma_drvdata s32g2_data = {
+ .dmamuxs = DMAMUX_NR,
+ .chreg_space_sz = EDMA_TCD,
+ .chreg_off = 0x4000,
+ .flags = FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MUX_SWAP,
+ .setup_irq = fsl_edma3_or_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
@@ -413,6 +519,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
+ { .compatible = "nxp,s32g2-edma", .data = &s32g2_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
@@ -545,10 +652,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
- /* eDMAv3 mux register move to TCD area if ch_mux exist */
- if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)
- break;
-
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
@@ -677,7 +780,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
ret = of_dma_controller_register(np,
- drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
+ drvdata->dmamuxs ? fsl_edma_xlate : fsl_edma3_xlate,
fsl_edma);
if (ret) {
dev_err(&pdev->dev,
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 57f1bf2ab20b..ff94ee892339 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -28,7 +28,6 @@ struct idxd_cdev_context {
* global to avoid conflict file names.
*/
static DEFINE_IDA(file_ida);
-static DEFINE_MUTEX(ida_lock);
/*
* ictx is an array based off of accelerator types. enum idxd_type
@@ -123,9 +122,7 @@ static void idxd_file_dev_release(struct device *dev)
struct idxd_device *idxd = wq->idxd;
int rc;
- mutex_lock(&ida_lock);
ida_free(&file_ida, ctx->id);
- mutex_unlock(&ida_lock);
/* Wait for in-flight operations to complete. */
if (wq_shared(wq)) {
@@ -284,9 +281,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
}
idxd_cdev = wq->idxd_cdev;
- mutex_lock(&ida_lock);
ctx->id = ida_alloc(&file_ida, GFP_KERNEL);
- mutex_unlock(&ida_lock);
if (ctx->id < 0) {
dev_warn(dev, "ida alloc failure\n");
goto failed_ida;
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index d84e21daa991..214b8039439f 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -374,6 +374,17 @@ struct idxd_device {
struct dentry *dbgfs_evl_file;
bool user_submission_safe;
+
+ struct idxd_saved_states *idxd_saved;
+};
+
+struct idxd_saved_states {
+ struct idxd_device saved_idxd;
+ struct idxd_evl saved_evl;
+ struct idxd_engine **saved_engines;
+ struct idxd_wq **saved_wqs;
+ struct idxd_group **saved_groups;
+ unsigned long *saved_wq_enable_map;
};
static inline unsigned int evl_ent_size(struct idxd_device *idxd)
@@ -725,8 +736,6 @@ static inline void idxd_desc_complete(struct idxd_desc *desc,
&desc->txd, &status);
}
-int idxd_register_bus_type(void);
-void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
void idxd_unregister_devices(struct idxd_device *idxd);
void idxd_wqs_quiesce(struct idxd_device *idxd);
@@ -742,6 +751,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd);
/* device control */
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
+int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
+ const struct pci_device_id *id);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int idxd_drv_enable_wq(struct idxd_wq *wq);
void idxd_drv_disable_wq(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 140f8d772bee..b946f78f85e1 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -78,6 +78,8 @@ static struct pci_device_id idxd_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
/* IAA on DMR platforms */
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA PTL platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
@@ -723,67 +725,464 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_disable_sva(idxd->pdev);
}
-static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+/*
+ * Attach IDXD device to IDXD driver.
+ */
+static int idxd_bind(struct device_driver *drv, const char *buf)
{
- struct device *dev = &pdev->dev;
- struct idxd_device *idxd;
- struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
+ const struct bus_type *bus = drv->bus;
+ struct device *dev;
+ int err = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev)
+ err = device_driver_attach(drv, dev);
+
+ put_device(dev);
+
+ return err;
+}
+
+/*
+ * Detach IDXD device from driver.
+ */
+static void idxd_unbind(struct device_driver *drv, const char *buf)
+{
+ const struct bus_type *bus = drv->bus;
+ struct device *dev;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev && dev->driver == drv)
+ device_release_driver(dev);
+
+ put_device(dev);
+}
+
+#define idxd_free_saved_configs(saved_configs, count) \
+ do { \
+ int i; \
+ \
+ for (i = 0; i < (count); i++) \
+ kfree(saved_configs[i]); \
+ } while (0)
+
+static void idxd_free_saved(struct idxd_group **saved_groups,
+ struct idxd_engine **saved_engines,
+ struct idxd_wq **saved_wqs,
+ struct idxd_device *idxd)
+{
+ if (saved_groups)
+ idxd_free_saved_configs(saved_groups, idxd->max_groups);
+ if (saved_engines)
+ idxd_free_saved_configs(saved_engines, idxd->max_engines);
+ if (saved_wqs)
+ idxd_free_saved_configs(saved_wqs, idxd->max_wqs);
+}
+
+/*
+ * Save IDXD device configurations including engines, groups, wqs etc.
+ * The saved configurations can be restored when needed.
+ */
+static int idxd_device_config_save(struct idxd_device *idxd,
+ struct idxd_saved_states *idxd_saved)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd));
+
+ if (idxd->evl) {
+ memcpy(&idxd_saved->saved_evl, idxd->evl,
+ sizeof(struct idxd_evl));
+ }
+
+ struct idxd_group **saved_groups __free(kfree) =
+ kcalloc_node(idxd->max_groups,
+ sizeof(struct idxd_group *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_groups)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *saved_group __free(kfree) =
+ kzalloc_node(sizeof(*saved_group), GFP_KERNEL,
+ dev_to_node(dev));
+
+ if (!saved_group) {
+ /* Free saved groups */
+ idxd_free_saved(saved_groups, NULL, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ memcpy(saved_group, idxd->groups[i], sizeof(*saved_group));
+ saved_groups[i] = no_free_ptr(saved_group);
+ }
+
+ struct idxd_engine **saved_engines =
+ kcalloc_node(idxd->max_engines,
+ sizeof(struct idxd_engine *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_engines) {
+ /* Free saved groups */
+ idxd_free_saved(saved_groups, NULL, NULL, idxd);
+
+ return -ENOMEM;
+ }
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *saved_engine __free(kfree) =
+ kzalloc_node(sizeof(*saved_engine), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!saved_engine) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL,
+ idxd);
+
+ return -ENOMEM;
+ }
+
+ memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine));
+ saved_engines[i] = no_free_ptr(saved_engine);
+ }
+
+ unsigned long *saved_wq_enable_map __free(bitmap) =
+ bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL,
+ dev_to_node(dev));
+ if (!saved_wq_enable_map) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs);
+
+ struct idxd_wq **saved_wqs __free(kfree) =
+ kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_wqs) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *saved_wq __free(kfree) =
+ kzalloc_node(sizeof(*saved_wq), GFP_KERNEL,
+ dev_to_node(dev));
+ struct idxd_wq *wq;
+
+ if (!saved_wq) {
+ /* Free saved groups, engines, and wqs */
+ idxd_free_saved(saved_groups, saved_engines, saved_wqs,
+ idxd);
+
+ return -ENOMEM;
+ }
+
+ if (!test_bit(i, saved_wq_enable_map))
+ continue;
+
+ wq = idxd->wqs[i];
+ mutex_lock(&wq->wq_lock);
+ memcpy(saved_wq, wq, sizeof(*saved_wq));
+ saved_wqs[i] = no_free_ptr(saved_wq);
+ mutex_unlock(&wq->wq_lock);
+ }
+
+ /* Save configurations */
+ idxd_saved->saved_groups = no_free_ptr(saved_groups);
+ idxd_saved->saved_engines = no_free_ptr(saved_engines);
+ idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map);
+ idxd_saved->saved_wqs = no_free_ptr(saved_wqs);
+
+ return 0;
+}
+
+/*
+ * Restore IDXD device configurations including engines, groups, wqs etc
+ * that were saved before.
+ */
+static void idxd_device_config_restore(struct idxd_device *idxd,
+ struct idxd_saved_states *idxd_saved)
+{
+ struct idxd_evl *saved_evl = &idxd_saved->saved_evl;
+ int i;
+
+ idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
+
+ if (saved_evl)
+ idxd->evl->size = saved_evl->size;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *saved_group, *group;
+
+ saved_group = idxd_saved->saved_groups[i];
+ group = idxd->groups[i];
+
+ group->rdbufs_allowed = saved_group->rdbufs_allowed;
+ group->rdbufs_reserved = saved_group->rdbufs_reserved;
+ group->tc_a = saved_group->tc_a;
+ group->tc_b = saved_group->tc_b;
+ group->use_rdbuf_limit = saved_group->use_rdbuf_limit;
+
+ kfree(saved_group);
+ }
+ kfree(idxd_saved->saved_groups);
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *saved_engine, *engine;
+
+ saved_engine = idxd_saved->saved_engines[i];
+ engine = idxd->engines[i];
+
+ engine->group = saved_engine->group;
+
+ kfree(saved_engine);
+ }
+ kfree(idxd_saved->saved_engines);
+
+ bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map,
+ idxd->max_wqs);
+ bitmap_free(idxd_saved->saved_wq_enable_map);
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *saved_wq, *wq;
+ size_t len;
+
+ if (!test_bit(i, idxd->wq_enable_map))
+ continue;
+
+ saved_wq = idxd_saved->saved_wqs[i];
+ wq = idxd->wqs[i];
+
+ mutex_lock(&wq->wq_lock);
+
+ wq->group = saved_wq->group;
+ wq->flags = saved_wq->flags;
+ wq->threshold = saved_wq->threshold;
+ wq->size = saved_wq->size;
+ wq->priority = saved_wq->priority;
+ wq->type = saved_wq->type;
+ len = strlen(saved_wq->name) + 1;
+ strscpy(wq->name, saved_wq->name, len);
+ wq->max_xfer_bytes = saved_wq->max_xfer_bytes;
+ wq->max_batch_size = saved_wq->max_batch_size;
+ wq->enqcmds_retries = saved_wq->enqcmds_retries;
+ wq->descs = saved_wq->descs;
+ wq->idxd_chan = saved_wq->idxd_chan;
+ len = strlen(saved_wq->driver_name) + 1;
+ strscpy(wq->driver_name, saved_wq->driver_name, len);
+
+ mutex_unlock(&wq->wq_lock);
+
+ kfree(saved_wq);
+ }
+
+ kfree(idxd_saved->saved_wqs);
+}
+
+static void idxd_reset_prepare(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ struct device *dev = &idxd->pdev->dev;
+ const char *idxd_name;
int rc;
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
+ dev = &idxd->pdev->dev;
+ idxd_name = dev_name(idxd_confdev(idxd));
- dev_dbg(dev, "Alloc IDXD context\n");
- idxd = idxd_alloc(pdev, data);
- if (!idxd) {
- rc = -ENOMEM;
- goto err_idxd_alloc;
+ struct idxd_saved_states *idxd_saved __free(kfree) =
+ kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!idxd_saved) {
+ dev_err(dev, "HALT: no memory\n");
+
+ return;
}
- dev_dbg(dev, "Mapping BARs\n");
- idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
- if (!idxd->reg_base) {
- rc = -ENOMEM;
- goto err_iomap;
+ /* Save IDXD configurations. */
+ rc = idxd_device_config_save(idxd, idxd_saved);
+ if (rc < 0) {
+ dev_err(dev, "HALT: cannot save %s configs\n", idxd_name);
+
+ return;
+ }
+
+ idxd->idxd_saved = no_free_ptr(idxd_saved);
+
+ /* Save PCI device state. */
+ pci_save_state(idxd->pdev);
+}
+
+static void idxd_reset_done(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ const char *idxd_name;
+ struct device *dev;
+ int rc, i;
+
+ if (!idxd->idxd_saved)
+ return;
+
+ dev = &idxd->pdev->dev;
+ idxd_name = dev_name(idxd_confdev(idxd));
+
+ /* Restore PCI device state. */
+ pci_restore_state(idxd->pdev);
+
+ /* Unbind idxd device from driver. */
+ idxd_unbind(&idxd_drv.drv, idxd_name);
+
+ /*
+ * Probe PCI device without allocating or changing
+ * idxd software data which keeps the same as before FLR.
+ */
+ idxd_pci_probe_alloc(idxd, NULL, NULL);
+
+ /* Restore IDXD configurations. */
+ idxd_device_config_restore(idxd, idxd->idxd_saved);
+
+ /* Re-configure IDXD device if allowed. */
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ dev_err(dev, "HALT: %s config fails\n", idxd_name);
+ goto out;
+ }
+ }
+
+ /* Bind IDXD device to driver. */
+ rc = idxd_bind(&idxd_drv.drv, idxd_name);
+ if (rc < 0) {
+ dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name);
+ goto out;
}
- dev_dbg(dev, "Set DMA masks\n");
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ /* Bind enabled wq in the IDXD device to driver. */
+ for (i = 0; i < idxd->max_wqs; i++) {
+ if (test_bit(i, idxd->wq_enable_map)) {
+ struct idxd_wq *wq = idxd->wqs[i];
+ char wq_name[32];
+
+ wq->state = IDXD_WQ_DISABLED;
+ sprintf(wq_name, "wq%d.%d", idxd->id, wq->id);
+ /*
+ * Bind to user driver depending on wq type.
+ *
+ * Currently only support user type WQ. Will support
+ * kernel type WQ in the future.
+ */
+ if (wq->type == IDXD_WQT_USER)
+ rc = idxd_bind(&idxd_user_drv.drv, wq_name);
+ else
+ rc = -EINVAL;
+ if (rc < 0) {
+ clear_bit(i, idxd->wq_enable_map);
+ dev_err(dev,
+ "HALT: unable to re-enable wq %s\n",
+ dev_name(wq_confdev(wq)));
+ }
+ }
+ }
+out:
+ kfree(idxd->idxd_saved);
+}
+
+static const struct pci_error_handlers idxd_error_handler = {
+ .reset_prepare = idxd_reset_prepare,
+ .reset_done = idxd_reset_done,
+};
+
+/*
+ * Probe idxd PCI device.
+ * If idxd is not given, need to allocate idxd and set up its data.
+ *
+ * If idxd is given, idxd was allocated and setup already. Just need to
+ * configure device without re-allocating and re-configuring idxd data.
+ * This is useful for recovering from FLR.
+ */
+int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ bool alloc_idxd = idxd ? false : true;
+ struct idxd_driver_data *data;
+ struct device *dev;
+ int rc;
+
+ pdev = idxd ? idxd->pdev : pdev;
+ dev = &pdev->dev;
+ data = id ? (struct idxd_driver_data *)id->driver_data : NULL;
+ rc = pci_enable_device(pdev);
if (rc)
- goto err;
+ return rc;
+
+ if (alloc_idxd) {
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev, data);
+ if (!idxd) {
+ rc = -ENOMEM;
+ goto err_idxd_alloc;
+ }
+
+ dev_dbg(dev, "Mapping BARs\n");
+ idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
+ if (!idxd->reg_base) {
+ rc = -ENOMEM;
+ goto err_iomap;
+ }
+
+ dev_dbg(dev, "Set DMA masks\n");
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ goto err;
+ }
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
pci_set_drvdata(pdev, idxd);
- idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
- rc = idxd_probe(idxd);
- if (rc) {
- dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
- goto err;
- }
+ if (alloc_idxd) {
+ idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+ rc = idxd_probe(idxd);
+ if (rc) {
+ dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+ goto err;
+ }
+
+ if (data->load_device_defaults) {
+ rc = data->load_device_defaults(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD loading device defaults failed\n");
+ }
+
+ rc = idxd_register_devices(idxd);
+ if (rc) {
+ dev_err(dev, "IDXD sysfs setup failed\n");
+ goto err_dev_register;
+ }
- if (data->load_device_defaults) {
- rc = data->load_device_defaults(idxd);
+ rc = idxd_device_init_debugfs(idxd);
if (rc)
- dev_warn(dev, "IDXD loading device defaults failed\n");
+ dev_warn(dev, "IDXD debugfs failed to setup\n");
}
- rc = idxd_register_devices(idxd);
- if (rc) {
- dev_err(dev, "IDXD sysfs setup failed\n");
- goto err_dev_register;
- }
+ if (!alloc_idxd) {
+ /* Release interrupts in the IDXD device. */
+ idxd_cleanup_interrupts(idxd);
- rc = idxd_device_init_debugfs(idxd);
- if (rc)
- dev_warn(dev, "IDXD debugfs failed to setup\n");
+ /* Re-enable interrupts in the IDXD device. */
+ rc = idxd_setup_interrupts(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD interrupts failed to setup\n");
+ }
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
- idxd->user_submission_safe = data->user_submission_safe;
+ if (data)
+ idxd->user_submission_safe = data->user_submission_safe;
return 0;
@@ -798,6 +1197,11 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
}
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return idxd_pci_probe_alloc(NULL, pdev, id);
+}
+
void idxd_wqs_quiesce(struct idxd_device *idxd)
{
struct idxd_wq *wq;
@@ -864,6 +1268,7 @@ static struct pci_driver idxd_pci_driver = {
.probe = idxd_pci_probe,
.remove = idxd_remove,
.shutdown = idxd_shutdown,
+ .err_handler = &idxd_error_handler,
};
static int __init idxd_init_module(void)
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index fc049c9c9892..1107db3ce0a3 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -383,15 +383,65 @@ static void process_evl_entries(struct idxd_device *idxd)
mutex_unlock(&evl->lock);
}
+static void idxd_device_flr(struct work_struct *work)
+{
+ struct idxd_device *idxd = container_of(work, struct idxd_device, work);
+ int rc;
+
+ /*
+ * IDXD device requires a Function Level Reset (FLR).
+ * pci_reset_function() will reset the device with FLR.
+ */
+ rc = pci_reset_function(idxd->pdev);
+ if (rc)
+ dev_err(&idxd->pdev->dev, "FLR failed\n");
+}
+
+static irqreturn_t idxd_halt(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+ if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+ idxd->state = IDXD_DEV_HALTED;
+ if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+ /*
+ * If we need a software reset, we will throw the work
+ * on a system workqueue in order to allow interrupts
+ * for the device command completions.
+ */
+ INIT_WORK(&idxd->work, idxd_device_reinit);
+ queue_work(idxd->wq, &idxd->work);
+ } else if (gensts.reset_type == IDXD_DEVICE_RESET_FLR) {
+ idxd->state = IDXD_DEV_HALTED;
+ idxd_mask_error_interrupts(idxd);
+ dev_dbg(&idxd->pdev->dev,
+ "idxd halted, doing FLR. After FLR, configs are restored\n");
+ INIT_WORK(&idxd->work, idxd_device_flr);
+ queue_work(idxd->wq, &idxd->work);
+
+ } else {
+ idxd->state = IDXD_DEV_HALTED;
+ idxd_wqs_quiesce(idxd);
+ idxd_wqs_unmap_portal(idxd);
+ idxd_device_clear_state(idxd);
+ dev_err(&idxd->pdev->dev,
+ "idxd halted, need system reset");
+
+ return -ENXIO;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t idxd_misc_thread(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
struct idxd_device *idxd = ie_to_idxd(irq_entry);
struct device *dev = &idxd->pdev->dev;
- union gensts_reg gensts;
u32 val = 0;
int i;
- bool err = false;
u32 cause;
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
@@ -401,7 +451,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause & IDXD_INTC_HALT_STATE)
- goto halt;
+ return idxd_halt(idxd);
if (cause & IDXD_INTC_ERR) {
spin_lock(&idxd->dev_lock);
@@ -435,7 +485,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
for (i = 0; i < 4; i++)
dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n",
i, idxd->sw_err.bits[i]);
- err = true;
}
if (cause & IDXD_INTC_INT_HANDLE_REVOKED) {
@@ -480,34 +529,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
val);
- if (!err)
- goto out;
-
-halt:
- gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
- if (gensts.state == IDXD_DEVICE_STATE_HALT) {
- idxd->state = IDXD_DEV_HALTED;
- if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
- /*
- * If we need a software reset, we will throw the work
- * on a system workqueue in order to allow interrupts
- * for the device command completions.
- */
- INIT_WORK(&idxd->work, idxd_device_reinit);
- queue_work(idxd->wq, &idxd->work);
- } else {
- idxd->state = IDXD_DEV_HALTED;
- idxd_wqs_quiesce(idxd);
- idxd_wqs_unmap_portal(idxd);
- idxd_device_clear_state(idxd);
- dev_err(&idxd->pdev->dev,
- "idxd halted, need %s.\n",
- gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
- "FLR" : "system reset");
- }
- }
-
-out:
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index c426511f2104..006ba206ab1b 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -9,6 +9,7 @@
#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
+#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index f706eae0e76b..6af493f6ba77 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1979,13 +1979,3 @@ void idxd_unregister_devices(struct idxd_device *idxd)
device_unregister(group_confdev(group));
}
}
-
-int idxd_register_bus_type(void)
-{
- return bus_register(&dsa_bus_type);
-}
-
-void idxd_unregister_bus_type(void)
-{
- bus_unregister(&dsa_bus_type);
-}
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 17f6b6367113..c9aba2304de7 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -10,6 +10,8 @@
#include <linux/interrupt.h>
#include <linux/dca.h>
+#include <asm/cpuid.h>
+
/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
#include <asm/smp.h>
@@ -58,11 +60,11 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
{
/* CPUID level 9 returns DCA configuration */
/* Bit 0 indicates DCA enabled by the BIOS */
- unsigned long cpuid_level_9;
+ u32 eax;
int res;
- cpuid_level_9 = cpuid_eax(9);
- res = test_bit(0, &cpuid_level_9);
+ eax = cpuid_eax(CPUID_LEAF_DCA);
+ res = eax & BIT(0);
if (!res)
dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 40b76b40bc30..fa6e4646fdc2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1369,10 +1369,9 @@ static int mv_xor_probe(struct platform_device *pdev)
return 0;
if (pdev->dev.of_node) {
- struct device_node *np;
int i = 0;
- for_each_child_of_node(pdev->dev.of_node, np) {
+ for_each_child_of_node_scoped(pdev->dev.of_node, np) {
struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;
@@ -1388,7 +1387,6 @@ static int mv_xor_probe(struct platform_device *pdev)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -ENODEV;
- of_node_put(np);
goto err_channel_add;
}
@@ -1397,7 +1395,6 @@ static int mv_xor_probe(struct platform_device *pdev)
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
irq_dispose_mapping(irq);
- of_node_put(np);
goto err_channel_add;
}
diff --git a/drivers/dma/ptdma/Kconfig b/drivers/dma/ptdma/Kconfig
deleted file mode 100644
index b430edd709f9..000000000000
--- a/drivers/dma/ptdma/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config AMD_PTDMA
- tristate "AMD PassThru DMA Engine"
- depends on X86_64 && PCI
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Enable support for the AMD PTDMA controller. This controller
- provides DMA capabilities to perform high bandwidth memory to
- memory and IO copy operations. It performs DMA transfer through
- queue-based descriptor management. This DMA controller is intended
- to be used with AMD Non-Transparent Bridge devices and not for
- general purpose peripheral DMA.
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 52a7c8f2498f..b1f0001cc99c 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -18,6 +18,7 @@
#include "../virt-dma.h"
#define TRE_TYPE_DMA 0x10
+#define TRE_TYPE_IMMEDIATE_DMA 0x11
#define TRE_TYPE_GO 0x20
#define TRE_TYPE_CONFIG0 0x22
@@ -64,6 +65,7 @@
/* DMA TRE */
#define TRE_DMA_LEN GENMASK(23, 0)
+#define TRE_DMA_IMMEDIATE_LEN GENMASK(3, 0)
/* Register offsets from gpi-top */
#define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
@@ -1711,6 +1713,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
dma_addr_t address;
struct gpi_tre *tre;
unsigned int i;
+ int len;
/* first create config tre if applicable */
if (direction == DMA_MEM_TO_DEV && spi->set_config) {
@@ -1763,14 +1766,30 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre_idx++;
address = sg_dma_address(sgl);
- tre->dword[0] = lower_32_bits(address);
- tre->dword[1] = upper_32_bits(address);
+ len = sg_dma_len(sgl);
- tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
+ /* Support Immediate dma for write transfers for data length up to 8 bytes */
+ if (direction == DMA_MEM_TO_DEV && len <= 2 * sizeof(tre->dword[0])) {
+ /*
+ * For Immediate dma, data length may not always be length of 8 bytes,
+ * it can be length less than 8, hence initialize both dword's with 0
+ */
+ tre->dword[0] = 0;
+ tre->dword[1] = 0;
+ memcpy(&tre->dword[0], sg_virt(sgl), len);
- tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
- if (direction == DMA_MEM_TO_DEV)
- tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_IMMEDIATE_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_IMMEDIATE_DMA, TRE_FLAGS_TYPE);
+ } else {
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ }
+
+ tre->dword[3] |= u32_encode_bits(direction == DMA_MEM_TO_DEV,
+ TRE_FLAGS_IEOT);
for (i = 0; i < tre_idx; i++)
dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2679c1f09faf..0c45ce8c74aa 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -2023,6 +2023,10 @@ static const struct of_device_id rcar_dmac_of_ids[] = {
.compatible = "renesas,rcar-gen4-dmac",
.data = &rcar_gen4_dmac_data,
}, {
+ /*
+ * Backward compatibility for between v5.12 - v5.19
+ * which didn't combined with "renesas,rcar-gen4-dmac"
+ */
.compatible = "renesas,dmac-r8a779a0",
.data = &rcar_gen4_dmac_data,
},
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index f37cdf6f2179..24796aaaddfa 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -13,7 +13,9 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -31,12 +33,21 @@
#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
+#define SUNIV_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 24)
+#define SUNIV_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 8)
+
+#define SUN4I_MAX_BURST 8
+#define SUNIV_MAX_BURST 4
+
/** Normal DMA register values **/
/* Normal DMA source/destination data request type values */
#define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
#define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+#define SUNIV_NDMA_DRQ_TYPE_SDRAM 0x11
+#define SUNIV_NDMA_DRQ_TYPE_LIMIT (0x17 + 1)
+
/** Normal DMA register layout **/
/* Dedicated DMA source/destination address mode values */
@@ -50,6 +61,9 @@
#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
#define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
+#define SUNIV_NDMA_CFG_CONT_MODE BIT(29)
+#define SUNIV_NDMA_CFG_WAIT_STATE(n) ((n) << 26)
+
/** Dedicated DMA register values **/
/* Dedicated DMA source/destination address mode values */
@@ -62,6 +76,9 @@
#define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
#define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+#define SUNIV_DDMA_DRQ_TYPE_SDRAM 0x1
+#define SUNIV_DDMA_DRQ_TYPE_LIMIT (0x9 + 1)
+
/** Dedicated DMA register layout **/
/* Dedicated DMA configuration register layout */
@@ -115,6 +132,11 @@
#define SUN4I_DMA_NR_MAX_VCHANS \
(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
+#define SUNIV_NDMA_NR_MAX_CHANNELS 4
+#define SUNIV_DDMA_NR_MAX_CHANNELS 4
+#define SUNIV_NDMA_NR_MAX_VCHANS (24 * 2 - 1)
+#define SUNIV_DDMA_NR_MAX_VCHANS 10
+
/* This set of SUN4I_DDMA timing parameters were found experimentally while
* working with the SPI driver and seem to make it behave correctly */
#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
@@ -132,6 +154,33 @@
#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
+/*
+ * Hardware channels / ports representation
+ *
+ * The hardware is used in several SoCs, with differing numbers
+ * of channels and endpoints. This structure ties those numbers
+ * to a certain compatible string.
+ */
+struct sun4i_dma_config {
+ u32 ndma_nr_max_channels;
+ u32 ndma_nr_max_vchans;
+
+ u32 ddma_nr_max_channels;
+ u32 ddma_nr_max_vchans;
+
+ u32 dma_nr_max_channels;
+
+ void (*set_dst_data_width)(u32 *p_cfg, s8 data_width);
+ void (*set_src_data_width)(u32 *p_cfg, s8 data_width);
+ int (*convert_burst)(u32 maxburst);
+
+ u8 ndma_drq_sdram;
+ u8 ddma_drq_sdram;
+
+ u8 max_burst;
+ bool has_reset;
+};
+
struct sun4i_dma_pchan {
/* Register base of channel */
void __iomem *base;
@@ -170,7 +219,7 @@ struct sun4i_dma_contract {
};
struct sun4i_dma_dev {
- DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
+ unsigned long *pchans_used;
struct dma_device slave;
struct sun4i_dma_pchan *pchans;
struct sun4i_dma_vchan *vchans;
@@ -178,6 +227,8 @@ struct sun4i_dma_dev {
struct clk *clk;
int irq;
spinlock_t lock;
+ const struct sun4i_dma_config *cfg;
+ struct reset_control *rst;
};
static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
@@ -200,7 +251,27 @@ static struct device *chan2dev(struct dma_chan *chan)
return &chan->dev->device;
}
-static int convert_burst(u32 maxburst)
+static void set_dst_data_width_a10(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(data_width);
+}
+
+static void set_src_data_width_a10(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(data_width);
+}
+
+static void set_dst_data_width_f1c100s(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUNIV_DMA_CFG_DST_DATA_WIDTH(data_width);
+}
+
+static void set_src_data_width_f1c100s(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUNIV_DMA_CFG_SRC_DATA_WIDTH(data_width);
+}
+
+static int convert_burst_a10(u32 maxburst)
{
if (maxburst > 8)
return -EINVAL;
@@ -209,6 +280,15 @@ static int convert_burst(u32 maxburst)
return (maxburst >> 2);
}
+static int convert_burst_f1c100s(u32 maxburst)
+{
+ if (maxburst > 4)
+ return -EINVAL;
+
+ /* 1 -> 0, 4 -> 1 */
+ return (maxburst >> 2);
+}
+
static int convert_buswidth(enum dma_slave_buswidth addr_width)
{
if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
@@ -233,15 +313,15 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
int i, max;
/*
- * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
- * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
+ * pchans 0-priv->cfg->ndma_nr_max_channels are normal, and
+ * priv->cfg->ndma_nr_max_channels+ are dedicated ones
*/
if (vchan->is_dedicated) {
- i = SUN4I_NDMA_NR_MAX_CHANNELS;
- max = SUN4I_DMA_NR_MAX_CHANNELS;
+ i = priv->cfg->ndma_nr_max_channels;
+ max = priv->cfg->dma_nr_max_channels;
} else {
i = 0;
- max = SUN4I_NDMA_NR_MAX_CHANNELS;
+ max = priv->cfg->ndma_nr_max_channels;
}
spin_lock_irqsave(&priv->lock, flags);
@@ -444,6 +524,7 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig,
enum dma_transfer_direction direction)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_promise *promise;
int ret;
@@ -467,13 +548,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
sconfig->src_addr_width, sconfig->dst_addr_width);
/* Source burst */
- ret = convert_burst(sconfig->src_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->src_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
- ret = convert_burst(sconfig->dst_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
@@ -482,13 +563,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
ret = convert_buswidth(sconfig->src_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+ priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+ priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
@@ -510,6 +591,7 @@ static struct sun4i_dma_promise *
generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_promise *promise;
int ret;
@@ -524,13 +606,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
/* Source burst */
- ret = convert_burst(sconfig->src_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->src_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
- ret = convert_burst(sconfig->dst_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
@@ -539,13 +621,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
ret = convert_buswidth(sconfig->src_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+ priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+ priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
@@ -622,6 +704,7 @@ static struct dma_async_tx_descriptor *
sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
dma_addr_t src, size_t len, unsigned long flags)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -638,8 +721,8 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
*/
sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- sconfig->src_maxburst = 8;
- sconfig->dst_maxburst = 8;
+ sconfig->src_maxburst = priv->cfg->max_burst;
+ sconfig->dst_maxburst = priv->cfg->max_burst;
if (vchan->is_dedicated)
promise = generate_ddma_promise(chan, src, dest, len, sconfig);
@@ -654,11 +737,13 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
/* Configure memcpy mode */
if (vchan->is_dedicated) {
- promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
+ promise->cfg |=
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ddma_drq_sdram) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ddma_drq_sdram);
} else {
- promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ promise->cfg |=
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ndma_drq_sdram) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ndma_drq_sdram);
}
/* Fill the contract with our only promise */
@@ -673,6 +758,7 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
size_t period_len, enum dma_transfer_direction dir,
unsigned long flags)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -696,11 +782,11 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
if (vchan->is_dedicated) {
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ddma_drq_sdram;
} else {
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ndma_drq_sdram;
}
if (dir == DMA_MEM_TO_DEV) {
@@ -793,6 +879,7 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags, void *context)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -818,11 +905,11 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (vchan->is_dedicated) {
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ddma_drq_sdram;
} else {
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ndma_drq_sdram;
}
if (dir == DMA_MEM_TO_DEV)
@@ -1150,6 +1237,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->cfg = of_device_get_match_data(&pdev->dev);
+ if (!priv->cfg)
+ return -ENODEV;
+
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -1164,6 +1255,13 @@ static int sun4i_dma_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
+ if (priv->cfg->has_reset) {
+ priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->rst),
+ "Failed to get reset control\n");
+ }
+
platform_set_drvdata(pdev, priv);
spin_lock_init(&priv->lock);
@@ -1197,23 +1295,26 @@ static int sun4i_dma_probe(struct platform_device *pdev)
priv->slave.dev = &pdev->dev;
- priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
+ priv->pchans = devm_kcalloc(&pdev->dev, priv->cfg->dma_nr_max_channels,
sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
- if (!priv->vchans || !priv->pchans)
+ priv->pchans_used = devm_kcalloc(&pdev->dev,
+ BITS_TO_LONGS(priv->cfg->dma_nr_max_channels),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!priv->vchans || !priv->pchans || !priv->pchans_used)
return -ENOMEM;
/*
- * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
- * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
+ * [0..priv->cfg->ndma_nr_max_channels) are normal pchans, and
+ * [priv->cfg->ndma_nr_max_channels..priv->cfg->dma_nr_max_channels) are
* dedicated ones
*/
- for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
+ for (i = 0; i < priv->cfg->ndma_nr_max_channels; i++)
priv->pchans[i].base = priv->base +
SUN4I_NDMA_CHANNEL_REG_BASE(i);
- for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
+ for (j = 0; i < priv->cfg->dma_nr_max_channels; i++, j++) {
priv->pchans[i].base = priv->base +
SUN4I_DDMA_CHANNEL_REG_BASE(j);
priv->pchans[i].is_dedicated = 1;
@@ -1284,8 +1385,51 @@ static void sun4i_dma_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
}
+static struct sun4i_dma_config sun4i_a10_dma_cfg = {
+ .ndma_nr_max_channels = SUN4I_NDMA_NR_MAX_CHANNELS,
+ .ndma_nr_max_vchans = SUN4I_NDMA_NR_MAX_VCHANS,
+
+ .ddma_nr_max_channels = SUN4I_DDMA_NR_MAX_CHANNELS,
+ .ddma_nr_max_vchans = SUN4I_DDMA_NR_MAX_VCHANS,
+
+ .dma_nr_max_channels = SUN4I_DMA_NR_MAX_CHANNELS,
+
+ .set_dst_data_width = set_dst_data_width_a10,
+ .set_src_data_width = set_src_data_width_a10,
+ .convert_burst = convert_burst_a10,
+
+ .ndma_drq_sdram = SUN4I_NDMA_DRQ_TYPE_SDRAM,
+ .ddma_drq_sdram = SUN4I_DDMA_DRQ_TYPE_SDRAM,
+
+ .max_burst = SUN4I_MAX_BURST,
+ .has_reset = false,
+};
+
+static struct sun4i_dma_config suniv_f1c100s_dma_cfg = {
+ .ndma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS,
+ .ndma_nr_max_vchans = SUNIV_NDMA_NR_MAX_VCHANS,
+
+ .ddma_nr_max_channels = SUNIV_DDMA_NR_MAX_CHANNELS,
+ .ddma_nr_max_vchans = SUNIV_DDMA_NR_MAX_VCHANS,
+
+ .dma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS +
+ SUNIV_DDMA_NR_MAX_CHANNELS,
+
+ .set_dst_data_width = set_dst_data_width_f1c100s,
+ .set_src_data_width = set_src_data_width_f1c100s,
+ .convert_burst = convert_burst_f1c100s,
+
+ .ndma_drq_sdram = SUNIV_NDMA_DRQ_TYPE_SDRAM,
+ .ddma_drq_sdram = SUNIV_DDMA_DRQ_TYPE_SDRAM,
+
+ .max_burst = SUNIV_MAX_BURST,
+ .has_reset = true,
+};
+
static const struct of_device_id sun4i_dma_match[] = {
- { .compatible = "allwinner,sun4i-a10-dma" },
+ { .compatible = "allwinner,sun4i-a10-dma", .data = &sun4i_a10_dma_cfg },
+ { .compatible = "allwinner,suniv-f1c100s-dma",
+ .data = &suniv_f1c100s_dma_cfg },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun4i_dma_match);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 2953008d42ef..ce80ac4b1a1b 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -43,6 +43,10 @@
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
+#define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30
+#define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70
+#define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84
+
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
@@ -79,7 +83,9 @@ struct tegra_adma;
* @nr_channels: Number of DMA channels available.
* @ch_fifo_size_mask: Mask for FIFO size field.
* @sreq_index_offset: Slave channel index offset.
+ * @max_page: Maximum ADMA Channel Page.
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
+ * @set_global_pg_config: Global page programming.
*/
struct tegra_adma_chip_data {
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
@@ -95,7 +101,9 @@ struct tegra_adma_chip_data {
unsigned int nr_channels;
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
+ unsigned int max_page;
bool has_outstanding_reqs;
+ void (*set_global_pg_config)(struct tegra_adma *tdma);
};
/*
@@ -151,6 +159,7 @@ struct tegra_adma {
struct dma_device dma_dev;
struct device *dev;
void __iomem *base_addr;
+ void __iomem *ch_base_addr;
struct clk *ahub_clk;
unsigned int nr_channels;
unsigned long *dma_chan_mask;
@@ -159,6 +168,7 @@ struct tegra_adma {
/* Used to store global command register state when suspending */
unsigned int global_cmd;
+ unsigned int ch_page_no;
const struct tegra_adma_chip_data *cdata;
@@ -176,6 +186,11 @@ static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg)
return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg);
}
+static inline void tdma_ch_global_write(struct tegra_adma *tdma, u32 reg, u32 val)
+{
+ writel(val, tdma->ch_base_addr + tdma->cdata->global_reg_offset + reg);
+}
+
static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val)
{
writel(val, tdc->chan_addr + reg);
@@ -217,13 +232,30 @@ static int tegra_adma_slave_config(struct dma_chan *dc,
return 0;
}
+static void tegra186_adma_global_page_config(struct tegra_adma *tdma)
+{
+ /*
+ * Clear the default page1 channel group configs and program
+ * the global registers based on the actual page usage
+ */
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP + (tdma->ch_page_no * 0x4), 0xff);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ + (tdma->ch_page_no * 0x4), 0x1ffffff);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff);
+}
+
static int tegra_adma_init(struct tegra_adma *tdma)
{
u32 status;
int ret;
- /* Clear any interrupts */
- tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
+ /* Clear any channels group global interrupts */
+ tdma_ch_global_write(tdma, tdma->cdata->global_int_clear, 0x1);
+
+ if (!tdma->base_addr)
+ return 0;
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
@@ -237,6 +269,9 @@ static int tegra_adma_init(struct tegra_adma *tdma)
if (ret)
return ret;
+ if (tdma->cdata->set_global_pg_config)
+ tdma->cdata->set_global_pg_config(tdma);
+
/* Enable global ADMA registers */
tdma_write(tdma, ADMA_GLOBAL_CMD, 1);
@@ -736,7 +771,9 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
struct tegra_adma_chan *tdc;
int i;
- tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+ if (tdma->base_addr)
+ tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+
if (!tdma->global_cmd)
goto clk_disable;
@@ -777,7 +814,11 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
dev_err(dev, "ahub clk_enable failed: %d\n", ret);
return ret;
}
- tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (tdma->base_addr) {
+ tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (tdma->cdata->set_global_pg_config)
+ tdma->cdata->set_global_pg_config(tdma);
+ }
if (!tdma->global_cmd)
return 0;
@@ -816,7 +857,9 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.nr_channels = 22,
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
+ .max_page = 0,
.has_outstanding_reqs = false,
+ .set_global_pg_config = NULL,
};
static const struct tegra_adma_chip_data tegra186_chip_data = {
@@ -832,7 +875,9 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.nr_channels = 32,
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
+ .max_page = 4,
.has_outstanding_reqs = true,
+ .set_global_pg_config = tegra186_adma_global_page_config,
};
static const struct of_device_id tegra_adma_of_match[] = {
@@ -846,6 +891,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
{
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
+ struct resource *res_page, *res_base;
int ret, i;
cdata = of_device_get_match_data(&pdev->dev);
@@ -865,9 +911,46 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
- tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(tdma->base_addr))
- return PTR_ERR(tdma->base_addr);
+ res_page = platform_get_resource_byname(pdev, IORESOURCE_MEM, "page");
+ if (res_page) {
+ tdma->ch_base_addr = devm_ioremap_resource(&pdev->dev, res_page);
+ if (IS_ERR(tdma->ch_base_addr))
+ return PTR_ERR(tdma->ch_base_addr);
+
+ res_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global");
+ if (res_base) {
+ resource_size_t page_offset, page_no;
+ unsigned int ch_base_offset;
+
+ if (res_page->start < res_base->start)
+ return -EINVAL;
+ page_offset = res_page->start - res_base->start;
+ ch_base_offset = cdata->ch_base_offset;
+ if (!ch_base_offset)
+ return -EINVAL;
+
+ page_no = div_u64(page_offset, ch_base_offset);
+ if (!page_no || page_no > INT_MAX)
+ return -EINVAL;
+
+ tdma->ch_page_no = page_no - 1;
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+ }
+ } else {
+ /* If no 'page' property found, then reg DT binding would be legacy */
+ res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_base) {
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+ } else {
+ return -ENODEV;
+ }
+
+ tdma->ch_base_addr = tdma->base_addr + cdata->ch_base_offset;
+ }
tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
if (IS_ERR(tdma->ahub_clk)) {
@@ -900,8 +983,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
if (!test_bit(i, tdma->dma_chan_mask))
continue;
- tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
- + (cdata->ch_reg_size * i);
+ tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i);
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 343e986e66e7..4ece125b2ae7 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -208,7 +208,6 @@ struct edma_desc {
struct edma_cc;
struct edma_tc {
- struct device_node *node;
u16 id;
};
@@ -2460,19 +2459,19 @@ static int edma_probe(struct platform_device *pdev)
goto err_reg1;
}
- for (i = 0;; i++) {
+ for (i = 0; i < ecc->num_tc; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
1, i, &tc_args);
- if (ret || i == ecc->num_tc)
+ if (ret)
break;
- ecc->tc_list[i].node = tc_args.np;
ecc->tc_list[i].id = i;
queue_priority_mapping[i][1] = tc_args.args[0];
if (queue_priority_mapping[i][1] > lowest_priority) {
lowest_priority = queue_priority_mapping[i][1];
info->default_queue = i;
}
+ of_node_put(tc_args.np);
}
/* See if we have optional dma-channel-mask array */
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index b3f27b3f9209..7ed1956b4642 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4404,6 +4404,18 @@ static struct udma_match_data j721s2_bcdma_csi_data = {
.soc_data = &j721s2_bcdma_csi_soc_data,
};
+static struct udma_match_data j722s_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@@ -4435,6 +4447,10 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,j721s2-dmss-bcdma-csi",
.data = &j721s2_bcdma_csi_data,
},
+ {
+ .compatible = "ti,j722s-dmss-bcdma-csi",
+ .data = &j722s_bcdma_csi_data,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, udma_of_match);
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 93772abc3b49..0d88b1a670e1 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -390,15 +390,11 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
*/
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
- int ret;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
- ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
- CHAN_CTRL_RUN_STOP);
- if (ret)
- return ret;
- return ret;
+ return regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
}
/**
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 1bdd57de87a6..108a7287f4cd 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1404,16 +1404,18 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- j = chan->desc_submitcount;
- reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
- if (chan->direction == DMA_MEM_TO_DEV) {
- reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
- } else {
- reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
+ if (config->park) {
+ j = chan->desc_submitcount;
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
+ } else {
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
+ }
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
}
- dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
/* Start the hardware */
xilinx_dma_start(chan);
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 32019dc33cca..1877201d1aa9 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -505,7 +505,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err_xa_alloc;
return pin;
err_xa_alloc:
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 06f7b43a6f78..19ad3c3b675d 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -75,9 +75,38 @@ config EDAC_GHES
In doubt, say 'Y'.
+config EDAC_SCRUB
+ bool "EDAC scrub feature"
+ help
+ The EDAC scrub feature is optional and is designed to control the
+ memory scrubbers in the system. The common sysfs scrub interface
+ abstracts the control of various arbitrary scrubbing functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC scrub feature.
+
+config EDAC_ECS
+ bool "EDAC ECS (Error Check Scrub) feature"
+ help
+ The EDAC ECS feature is optional and is designed to control on-die
+ error check scrub (e.g., DDR5 ECS) in the system. The common sysfs
+ ECS interface abstracts the control of various ECS functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC ECS feature.
+
+config EDAC_MEM_REPAIR
+ bool "EDAC memory repair feature"
+ help
+ The EDAC memory repair feature is optional and is designed to control
+ the memory devices with repair features, such as Post Package Repair
+ (PPR), memory sparing etc. The common sysfs memory repair interface
+ abstracts the control of various memory repair functionalities into
+ a unified set of functions.
+ Say 'y/n' to enable/disable EDAC memory repair feature.
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
+ depends on AMD_NODE
imply AMD_ATL
help
Support for error detection and correction of DRAM ECC errors on
@@ -167,7 +196,7 @@ config EDAC_I3200
config EDAC_IE31200
tristate "Intel e312xx"
- depends on PCI && X86
+ depends on PCI && X86 && X86_MCE_INTEL
help
Support for error detection and correction on the Intel
E3-1200 based DRAM controllers.
@@ -303,14 +332,6 @@ config EDAC_PASEMI
Support for error detection and correction on PA Semi
PWRficient.
-config EDAC_CELL
- tristate "Cell Broadband Engine memory controller"
- depends on PPC_CELL_COMMON
- help
- Support for error detection and correction on the
- Cell Broadband Engine internal memory controller
- on platform without a hypervisor
-
config EDAC_CPC925
tristate "IBM CPC925 Memory Controller (PPC970FX)"
depends on PPC64
@@ -546,5 +567,13 @@ config EDAC_VERSAL
Support injecting both correctable and uncorrectable errors
for debugging purposes.
+config EDAC_LOONGSON
+ tristate "Loongson Memory Controller"
+ depends on LOONGARCH && ACPI
+ help
+ Support for error detection and correction on the Loongson
+ family memory controller. This driver reports single bit
+ errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000
+ are compatible.
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index f9cf19d8d13d..a8f2d8f6c894 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -12,6 +12,9 @@ edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o
edac_core-y += edac_module.o edac_device_sysfs.o wq.o
edac_core-$(CONFIG_EDAC_DEBUG) += debugfs.o
+edac_core-$(CONFIG_EDAC_SCRUB) += scrub.o
+edac_core-$(CONFIG_EDAC_ECS) += ecs.o
+edac_core-$(CONFIG_EDAC_MEM_REPAIR) += mem_repair.o
ifdef CONFIG_PCI
edac_core-y += edac_pci.o edac_pci_sysfs.o
@@ -62,8 +65,6 @@ obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
i10nm_edac-y := i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
-obj-$(CONFIG_EDAC_CELL) += cell_edac.o
-
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
@@ -86,3 +87,4 @@ obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
+obj-$(CONFIG_EDAC_LOONGSON) += loongson_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 5d356b7c4589..90f0eb7cc5b9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/ras.h>
+#include <linux/string_choices.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>
+#include <asm/amd_node.h>
static struct edac_pci_ctl_info *pci_ctl;
@@ -1170,22 +1172,21 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
}
- edac_dbg(1, "All DIMMs support ECC:%s\n",
- (dclr & BIT(19)) ? "yes" : "no");
+ edac_dbg(1, "All DIMMs support ECC: %s\n", str_yes_no(dclr & BIT(19)));
edac_dbg(1, " PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
+ str_enabled_disabled(dclr & BIT(8)));
if (pvt->fam == 0x10)
edac_dbg(1, " DCT 128bit mode width: %s\n",
(dclr & BIT(11)) ? "128b" : "64b");
edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
+ str_yes_no(dclr & BIT(12)),
+ str_yes_no(dclr & BIT(13)),
+ str_yes_no(dclr & BIT(14)),
+ str_yes_no(dclr & BIT(15)));
}
#define CS_EVEN_PRIMARY BIT(0)
@@ -1352,14 +1353,14 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
- i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
- (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cap_hi & BIT(30)),
+ str_yes_no(umc->umc_cap_hi & BIT(31)));
edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
- i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cfg & BIT(12)));
edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(6)));
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(7)));
umc_debug_display_dimm_sizes(pvt, i);
}
@@ -1370,11 +1371,11 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
edac_dbg(1, " NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_DCT_DUAL));
edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_SECDED),
+ str_yes_no(pvt->nbcap & NBCAP_CHIPKILL));
debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
@@ -1397,7 +1398,7 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
- edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+ edac_dbg(1, " DramHoleValid: %s\n", str_yes_no(dhar_valid(pvt)));
amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
@@ -2026,15 +2027,15 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
edac_dbg(0, " Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
+ str_yes_no(dct_high_range_enabled(pvt)));
edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
+ str_enabled_disabled(dct_data_intlv_enabled(pvt)),
+ str_yes_no(dct_memory_cleared(pvt)));
edac_dbg(0, " channel interleave: %s, "
"interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
+ str_enabled_disabled(dct_interleave_enabled(pvt)),
dct_sel_interleave_addr(pvt));
}
@@ -3207,8 +3208,7 @@ static bool nb_mce_bank_enabled_on_node(u16 nid)
nbe = reg->l & MSR_MCGCTL_NBE;
edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
+ cpu, reg->q, str_enabled_disabled(nbe));
if (!nbe)
goto out;
@@ -3352,12 +3352,9 @@ static bool dct_ecc_enabled(struct amd64_pvt *pvt)
edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, nid);
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, str_enabled_disabled(ecc_en));
- if (!ecc_en || !nb_mce_en)
- return false;
- else
- return true;
+ return ecc_en && nb_mce_en;
}
static bool umc_ecc_enabled(struct amd64_pvt *pvt)
@@ -3377,7 +3374,7 @@ static bool umc_ecc_enabled(struct amd64_pvt *pvt)
}
}
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, str_enabled_disabled(ecc_en));
return ecc_en;
}
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
deleted file mode 100644
index c2420e2287ff..000000000000
--- a/drivers/edac/cell_edac.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Cell MIC driver for ECC counting
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- */
-#undef DEBUG
-
-#include <linux/edac.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/stop_machine.h>
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "edac_module.h"
-
-struct cell_edac_priv
-{
- struct cbe_mic_tm_regs __iomem *regs;
- int node;
- int chanmask;
-#ifdef DEBUG
- u64 prev_fir;
-#endif
-};
-
-static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset, syndrome;
-
- dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
- syndrome = (ar & 0x000000001fe00000ul) >> 21;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- csrow->first_page + pfn, offset, syndrome,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset;
-
- dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- csrow->first_page + pfn, offset, 0,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_check(struct mem_ctl_info *mci)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- u64 fir, addreg, clear = 0;
-
- fir = in_be64(&priv->regs->mic_fir);
-#ifdef DEBUG
- if (fir != priv->prev_fir) {
- dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
- priv->prev_fir = fir;
- }
-#endif
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
- cell_edac_count_ce(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
- cell_edac_count_ce(mci, 1, addreg);
- }
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
- cell_edac_count_ue(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
- cell_edac_count_ue(mci, 1, addreg);
- }
-
- /* The procedure for clearing FIR bits is a bit ... weird */
- if (clear) {
- fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
- fir |= CBE_MIC_FIR_ECC_RESET_MASK;
- fir &= ~clear;
- out_be64(&priv->regs->mic_fir, fir);
- (void)in_be64(&priv->regs->mic_fir);
-
- mb(); /* sync up */
-#ifdef DEBUG
- fir = in_be64(&priv->regs->mic_fir);
- dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
-#endif
- }
-}
-
-static void cell_edac_init_csrows(struct mem_ctl_info *mci)
-{
- struct csrow_info *csrow = mci->csrows[0];
- struct dimm_info *dimm;
- struct cell_edac_priv *priv = mci->pvt_info;
- struct device_node *np;
- int j;
- u32 nr_pages;
-
- for_each_node_by_name(np, "memory") {
- struct resource r;
-
- /* We "know" that the Cell firmware only creates one entry
- * in the "memory" nodes. If that changes, this code will
- * need to be adapted.
- */
- if (of_address_to_resource(np, 0, &r))
- continue;
- if (of_node_to_nid(np) != priv->node)
- continue;
- csrow->first_page = r.start >> PAGE_SHIFT;
- nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + nr_pages - 1;
-
- for (j = 0; j < csrow->nr_channels; j++) {
- dimm = csrow->channels[j]->dimm;
- dimm->mtype = MEM_XDR;
- dimm->edac_mode = EDAC_SECDED;
- dimm->nr_pages = nr_pages / csrow->nr_channels;
- }
- dev_dbg(mci->pdev,
- "Initialized on node %d, chanmask=0x%x,"
- " first_page=0x%lx, nr_pages=0x%x\n",
- priv->node, priv->chanmask,
- csrow->first_page, nr_pages);
- break;
- }
- of_node_put(np);
-}
-
-static int cell_edac_probe(struct platform_device *pdev)
-{
- struct cbe_mic_tm_regs __iomem *regs;
- struct mem_ctl_info *mci;
- struct edac_mc_layer layers[2];
- struct cell_edac_priv *priv;
- u64 reg;
- int rc, chanmask, num_chans;
-
- regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
- if (regs == NULL)
- return -ENODEV;
-
- edac_op_state = EDAC_OPSTATE_POLL;
-
- /* Get channel population */
- reg = in_be64(&regs->mic_mnt_cfg);
- dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
- chanmask = 0;
- if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
- chanmask |= 0x1;
- if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
- chanmask |= 0x2;
- if (chanmask == 0) {
- dev_warn(&pdev->dev,
- "Yuck ! No channel populated ? Aborting !\n");
- return -ENODEV;
- }
- dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
- in_be64(&regs->mic_fir));
-
- /* Allocate & init EDAC MC data structure */
- num_chans = chanmask == 3 ? 2 : 1;
-
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = 1;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = num_chans;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
- sizeof(struct cell_edac_priv));
- if (mci == NULL)
- return -ENOMEM;
- priv = mci->pvt_info;
- priv->regs = regs;
- priv->node = pdev->id;
- priv->chanmask = chanmask;
- mci->pdev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_XDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->mod_name = "cell_edac";
- mci->ctl_name = "MIC";
- mci->dev_name = dev_name(&pdev->dev);
- mci->edac_check = cell_edac_check;
- cell_edac_init_csrows(mci);
-
- /* Register with EDAC core */
- rc = edac_mc_add_mc(mci);
- if (rc) {
- dev_err(&pdev->dev, "failed to register with EDAC core\n");
- edac_mc_free(mci);
- return rc;
- }
-
- return 0;
-}
-
-static void cell_edac_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
- if (mci)
- edac_mc_free(mci);
-}
-
-static struct platform_driver cell_edac_driver = {
- .driver = {
- .name = "cbe-mic",
- },
- .probe = cell_edac_probe,
- .remove = cell_edac_remove,
-};
-
-static int __init cell_edac_init(void)
-{
- /* Sanity check registers data structure */
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_0) != 0xf8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_1) != 0x1b8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_config) != 0x218);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_fir) != 0x230);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_mnt_cfg) != 0x210);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_exc) != 0x208);
-
- return platform_driver_register(&cell_edac_driver);
-}
-
-static void __exit cell_edac_exit(void)
-{
- platform_driver_unregister(&cell_edac_driver);
-}
-
-module_init(cell_edac_init);
-module_exit(cell_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
index 4804332d9946..8195fc9c9354 100644
--- a/drivers/edac/debugfs.c
+++ b/drivers/edac/debugfs.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/string_choices.h>
+
#include "edac_module.h"
static struct dentry *edac_debugfs;
@@ -22,7 +25,7 @@ static ssize_t edac_fake_inject_write(struct file *file,
"Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
errcount,
(type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
- errcount > 1 ? "s" : "",
+ str_plural(errcount),
mci->fake_inject_layer[0],
mci->fake_inject_layer[1],
mci->fake_inject_layer[2]
diff --git a/drivers/edac/ecs.c b/drivers/edac/ecs.c
new file mode 100755
index 000000000000..1d51838a60c1
--- /dev/null
+++ b/drivers/edac/ecs.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic ECS driver is designed to support control of on-die error
+ * check scrub (e.g., DDR5 ECS). The common sysfs ECS interface abstracts
+ * the control of various ECS functionalities into a unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+#define EDAC_ECS_FRU_NAME "ecs_fru"
+
+enum edac_ecs_attributes {
+ ECS_LOG_ENTRY_TYPE,
+ ECS_MODE,
+ ECS_RESET,
+ ECS_THRESHOLD,
+ ECS_MAX_ATTRS
+};
+
+struct edac_ecs_dev_attr {
+ struct device_attribute dev_attr;
+ int fru_id;
+};
+
+struct edac_ecs_fru_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_ecs_dev_attr dev_attr[ECS_MAX_ATTRS];
+ struct attribute *ecs_attrs[ECS_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+struct edac_ecs_context {
+ u16 num_media_frus;
+ struct edac_ecs_fru_context *fru_ctxs;
+};
+
+#define TO_ECS_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_ecs_dev_attr, dev_attr)
+
+#define EDAC_ECS_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_ECS_ATTR_SHOW(log_entry_type, get_log_entry_type, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(mode, get_mode, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(threshold, get_threshold, u32, "%u\n")
+
+#define EDAC_ECS_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_ECS_ATTR_STORE(log_entry_type, set_log_entry_type, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(mode, set_mode, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(reset, reset, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(threshold, set_threshold, unsigned long, kstrtoul)
+
+static umode_t ecs_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops;
+
+ switch (attr_id) {
+ case ECS_LOG_ENTRY_TYPE:
+ if (ops->get_log_entry_type) {
+ if (ops->set_log_entry_type)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_MODE:
+ if (ops->get_mode) {
+ if (ops->set_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_RESET:
+ if (ops->reset)
+ return a->mode;
+ break;
+ case ECS_THRESHOLD:
+ if (ops->get_threshold) {
+ if (ops->set_threshold)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_ECS_ATTR_RO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_WO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_RW(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .fru_id = _fru_id })
+
+static int ecs_create_desc(struct device *ecs_dev, const struct attribute_group **attr_groups,
+ u16 num_media_frus)
+{
+ struct edac_ecs_context *ecs_ctx;
+ u32 fru;
+
+ ecs_ctx = devm_kzalloc(ecs_dev, sizeof(*ecs_ctx), GFP_KERNEL);
+ if (!ecs_ctx)
+ return -ENOMEM;
+
+ ecs_ctx->num_media_frus = num_media_frus;
+ ecs_ctx->fru_ctxs = devm_kcalloc(ecs_dev, num_media_frus,
+ sizeof(*ecs_ctx->fru_ctxs),
+ GFP_KERNEL);
+ if (!ecs_ctx->fru_ctxs)
+ return -ENOMEM;
+
+ for (fru = 0; fru < num_media_frus; fru++) {
+ struct edac_ecs_fru_context *fru_ctx = &ecs_ctx->fru_ctxs[fru];
+ struct attribute_group *group = &fru_ctx->group;
+ int i;
+
+ fru_ctx->dev_attr[ECS_LOG_ENTRY_TYPE] = EDAC_ECS_ATTR_RW(log_entry_type, fru);
+ fru_ctx->dev_attr[ECS_MODE] = EDAC_ECS_ATTR_RW(mode, fru);
+ fru_ctx->dev_attr[ECS_RESET] = EDAC_ECS_ATTR_WO(reset, fru);
+ fru_ctx->dev_attr[ECS_THRESHOLD] = EDAC_ECS_ATTR_RW(threshold, fru);
+
+ for (i = 0; i < ECS_MAX_ATTRS; i++)
+ fru_ctx->ecs_attrs[i] = &fru_ctx->dev_attr[i].dev_attr.attr;
+
+ sprintf(fru_ctx->name, "%s%d", EDAC_ECS_FRU_NAME, fru);
+ group->name = fru_ctx->name;
+ group->attrs = fru_ctx->ecs_attrs;
+ group->is_visible = ecs_attr_visible;
+
+ attr_groups[fru] = group;
+ }
+
+ return 0;
+}
+
+/**
+ * edac_ecs_get_desc - get EDAC ECS descriptors
+ * @ecs_dev: client device, supports ECS feature
+ * @attr_groups: pointer to attribute group container
+ * @num_media_frus: number of media FRUs in the device
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups, u16 num_media_frus)
+{
+ if (!ecs_dev || !attr_groups || !num_media_frus)
+ return -EINVAL;
+
+ return ecs_create_desc(ecs_dev, attr_groups, num_media_frus);
+}
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 621dc2a5d034..0734909b08a4 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -570,3 +570,188 @@ void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
block ? block->name : "N/A", count, msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
+
+static void edac_dev_release(struct device *dev)
+{
+ struct edac_dev_feat_ctx *ctx = container_of(dev, struct edac_dev_feat_ctx, dev);
+
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+ kfree(ctx->dev.groups);
+ kfree(ctx);
+}
+
+static const struct device_type edac_dev_type = {
+ .name = "edac_dev",
+ .release = edac_dev_release,
+};
+
+static void edac_dev_unreg(void *data)
+{
+ device_unregister(data);
+}
+
+/**
+ * edac_dev_register - register device for RAS features with EDAC
+ * @parent: parent device.
+ * @name: name for the folder in the /sys/bus/edac/devices/,
+ * which is derived from the parent device.
+ * For e.g. /sys/bus/edac/devices/cxl_mem0/
+ * @private: parent driver's data to store in the context if any.
+ * @num_features: number of RAS features to register.
+ * @ras_features: list of RAS features to register.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ *
+ */
+int edac_dev_register(struct device *parent, char *name,
+ void *private, int num_features,
+ const struct edac_dev_feature *ras_features)
+{
+ const struct attribute_group **ras_attr_groups;
+ struct edac_dev_data *dev_data;
+ struct edac_dev_feat_ctx *ctx;
+ int mem_repair_cnt = 0;
+ int attr_gcnt = 0;
+ int ret = -ENOMEM;
+ int scrub_cnt = 0;
+ int feat;
+
+ if (!parent || !name || !num_features || !ras_features)
+ return -EINVAL;
+
+ /* Double parse to make space for attributes */
+ for (feat = 0; feat < num_features; feat++) {
+ switch (ras_features[feat].ft_type) {
+ case RAS_FEAT_SCRUB:
+ attr_gcnt++;
+ scrub_cnt++;
+ break;
+ case RAS_FEAT_ECS:
+ attr_gcnt += ras_features[feat].ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ attr_gcnt++;
+ mem_repair_cnt++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ras_attr_groups = kcalloc(attr_gcnt + 1, sizeof(*ras_attr_groups), GFP_KERNEL);
+ if (!ras_attr_groups)
+ goto ctx_free;
+
+ if (scrub_cnt) {
+ ctx->scrub = kcalloc(scrub_cnt, sizeof(*ctx->scrub), GFP_KERNEL);
+ if (!ctx->scrub)
+ goto groups_free;
+ }
+
+ if (mem_repair_cnt) {
+ ctx->mem_repair = kcalloc(mem_repair_cnt, sizeof(*ctx->mem_repair), GFP_KERNEL);
+ if (!ctx->mem_repair)
+ goto data_mem_free;
+ }
+
+ attr_gcnt = 0;
+ scrub_cnt = 0;
+ mem_repair_cnt = 0;
+ for (feat = 0; feat < num_features; feat++, ras_features++) {
+ switch (ras_features->ft_type) {
+ case RAS_FEAT_SCRUB:
+ if (!ras_features->scrub_ops || scrub_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->scrub[scrub_cnt];
+ dev_data->instance = scrub_cnt;
+ dev_data->scrub_ops = ras_features->scrub_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_scrub_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ scrub_cnt++;
+ attr_gcnt++;
+ break;
+ case RAS_FEAT_ECS:
+ if (!ras_features->ecs_ops) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->ecs;
+ dev_data->ecs_ops = ras_features->ecs_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_ecs_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->ecs_info.num_media_frus);
+ if (ret)
+ goto data_mem_free;
+
+ attr_gcnt += ras_features->ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ if (!ras_features->mem_repair_ops ||
+ mem_repair_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->mem_repair[mem_repair_cnt];
+ dev_data->instance = mem_repair_cnt;
+ dev_data->mem_repair_ops = ras_features->mem_repair_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_mem_repair_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ mem_repair_cnt++;
+ attr_gcnt++;
+ break;
+ default:
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+ }
+
+ ctx->dev.parent = parent;
+ ctx->dev.bus = edac_get_sysfs_subsys();
+ ctx->dev.type = &edac_dev_type;
+ ctx->dev.groups = ras_attr_groups;
+ ctx->private = private;
+ dev_set_drvdata(&ctx->dev, ctx);
+
+ ret = dev_set_name(&ctx->dev, "%s", name);
+ if (ret)
+ goto data_mem_free;
+
+ ret = device_register(&ctx->dev);
+ if (ret) {
+ put_device(&ctx->dev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, edac_dev_unreg, &ctx->dev);
+
+data_mem_free:
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+groups_free:
+ kfree(ras_attr_groups);
+ctx_free:
+ kfree(ctx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(edac_dev_register);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d6eed727b0cd..0959320fe51c 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -214,7 +214,7 @@ static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
unsigned int row, chn;
/*
- * Alocate and fill the csrow/channels structs
+ * Allocate and fill the csrow/channels structs
*/
mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4200aec04831..0f338adf7d93 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -422,7 +422,7 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
return nr_pages;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
@@ -449,7 +449,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
return 0;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
{
int err, i;
@@ -636,7 +636,7 @@ static void dimm_release(struct device *dev)
*/
}
-/* Create a DIMM object under specifed memory controller device */
+/* Create a DIMM object under specified memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
struct dimm_info *dimm)
{
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 51556c72a967..355a977019e9 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -751,6 +751,8 @@ static int i10nm_get_ddr_munits(void)
continue;
} else {
d->imc[lmc].mdev = mdev;
+ if (res_cfg->type == SPR)
+ skx_set_mc_mapping(d, i, lmc);
lmc++;
}
}
@@ -938,16 +940,18 @@ static struct res_config gnr_cfg = {
};
static const struct x86_cpu_id i10nm_cpuids[] = {
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MIN, 0x3, &i10nm_cfg0),
+ X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, 0x4, X86_STEP_MAX, &i10nm_cfg1),
+ X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, X86_STEP_MIN, 0x3, &i10nm_cfg0),
+ X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, 0x4, X86_STEP_MAX, &i10nm_cfg1),
+ X86_MATCH_VFM( INTEL_ICELAKE_D, &i10nm_cfg1),
+
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_cfg),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_cfg),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -1010,7 +1014,7 @@ static struct notifier_block i10nm_mce_dec = {
static int __init i10nm_init(void)
{
- u8 mc = 0, src_id = 0, node_id = 0;
+ u8 mc = 0, src_id = 0;
const struct x86_cpu_id *id;
struct res_config *cfg;
const char *owner;
@@ -1070,19 +1074,14 @@ static int __init i10nm_init(void)
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
-
- edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
+ edac_dbg(2, "src_id = %d\n", src_id);
for (i = 0; i < imc_num; i++) {
if (!d->imc[i].mdev)
continue;
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
- d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
+ d->imc[i].src_id = src_id;
if (d->imc[i].hbm_mc) {
d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
d->imc[i].num_channels = cfg->hbm_chan_num;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 4b5a71f8739d..4a1bebc1ff14 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -338,11 +338,11 @@ struct i5000_pvt {
u16 mir0, mir1, mir2;
- u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b0_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
- u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
+ u16 b0_ambpresent1; /* Branch 0, Channel 1 */
- u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b1_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
@@ -1210,7 +1210,7 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
&pvt->b0_ambpresent1);
edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
- /* Only if we have 2 branchs (4 channels) */
+ /* Only if we have 2 branches (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent0 = 0;
pvt->b1_ambpresent1 = 0;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 49b4499269fb..b5cf25905b05 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -899,7 +900,7 @@ static void decode_mtr(int slot_row, u16 mtr)
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 61adaa872ba7..69068f8d0cad 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -620,7 +621,7 @@ static int decode_mtr(struct i7300_pvt *pvt,
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
@@ -871,9 +872,9 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
edac_dbg(0, "Error detection is %s\n",
- IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_ECC_ENABLED(pvt->mc_settings)));
edac_dbg(0, "Retry is %s\n",
- IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_RETRY_ENABLED(pvt->mc_settings)));
/* Get Memory Interleave Range registers */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 4fc16922dc1a..204834149579 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -51,6 +51,7 @@
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <asm/mce.h>
#include "edac_module.h"
#define EDAC_MOD_STR "ie31200_edac"
@@ -84,44 +85,23 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9 0x3ec6
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca
-/* Test if HB is for Skylake or later. */
-#define DEVICE_ID_SKYLAKE_OR_LATER(did) \
- (((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_12) || \
- (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
- PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
-
-#define IE31200_DIMMS 4
-#define IE31200_RANKS 8
-#define IE31200_RANKS_PER_CHANNEL 4
+/* Raptor Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630
+
+#define IE31200_RANKS_PER_CHANNEL 8
#define IE31200_DIMMS_PER_CHANNEL 2
#define IE31200_CHANNELS 2
+#define IE31200_IMC_NUM 2
/* Intel IE31200 register addresses - device 0 function 0 - DRAM Controller */
#define IE31200_MCHBAR_LOW 0x48
#define IE31200_MCHBAR_HIGH 0x4c
-#define IE31200_MCHBAR_MASK GENMASK_ULL(38, 15)
-#define IE31200_MMR_WINDOW_SIZE BIT(15)
/*
* Error Status Register (16b)
*
- * 15 reserved
- * 14 Isochronous TBWRR Run Behind FIFO Full
- * (ITCV)
- * 13 Isochronous TBWRR Run Behind FIFO Put
- * (ITSTV)
- * 12 reserved
- * 11 MCH Thermal Sensor Event
- * for SMI/SCI/SERR (GTSE)
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 reserved
- * 7 DRAM Throttle Flag (DTF)
- * 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
@@ -130,68 +110,60 @@
#define IE31200_ERRSTS_CE BIT(0)
#define IE31200_ERRSTS_BITS (IE31200_ERRSTS_UE | IE31200_ERRSTS_CE)
-/*
- * Channel 0 ECC Error Log (64b)
- *
- * 63:48 Error Column Address (ERRCOL)
- * 47:32 Error Row Address (ERRROW)
- * 31:29 Error Bank Address (ERRBANK)
- * 28:27 Error Rank Address (ERRRANK)
- * 26:24 reserved
- * 23:16 Error Syndrome (ERRSYND)
- * 15: 2 reserved
- * 1 Multiple Bit Error Status (MERRSTS)
- * 0 Correctable Error Status (CERRSTS)
- */
-
-#define IE31200_C0ECCERRLOG 0x40c8
-#define IE31200_C1ECCERRLOG 0x44c8
-#define IE31200_C0ECCERRLOG_SKL 0x4048
-#define IE31200_C1ECCERRLOG_SKL 0x4448
-#define IE31200_ECCERRLOG_CE BIT(0)
-#define IE31200_ECCERRLOG_UE BIT(1)
-#define IE31200_ECCERRLOG_RANK_BITS GENMASK_ULL(28, 27)
-#define IE31200_ECCERRLOG_RANK_SHIFT 27
-#define IE31200_ECCERRLOG_SYNDROME_BITS GENMASK_ULL(23, 16)
-#define IE31200_ECCERRLOG_SYNDROME_SHIFT 16
-
-#define IE31200_ECCERRLOG_SYNDROME(log) \
- ((log & IE31200_ECCERRLOG_SYNDROME_BITS) >> \
- IE31200_ECCERRLOG_SYNDROME_SHIFT)
-
#define IE31200_CAPID0 0xe4
#define IE31200_CAPID0_PDCD BIT(4)
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-#define IE31200_MAD_DIMM_0_OFFSET 0x5004
-#define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C
-#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
-#define IE31200_MAD_DIMM_A_RANK BIT(17)
-#define IE31200_MAD_DIMM_A_RANK_SHIFT 17
-#define IE31200_MAD_DIMM_A_RANK_SKL BIT(10)
-#define IE31200_MAD_DIMM_A_RANK_SKL_SHIFT 10
-#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
-#define IE31200_MAD_DIMM_A_WIDTH_SHIFT 19
-#define IE31200_MAD_DIMM_A_WIDTH_SKL GENMASK_ULL(9, 8)
-#define IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT 8
-
-/* Skylake reports 1GB increments, everything else is 256MB */
-#define IE31200_PAGES(n, skl) \
- (n << (28 + (2 * skl) - PAGE_SHIFT))
+/* Non-constant mask variant of FIELD_GET() */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
+struct res_config {
+ enum mem_type mtype;
+ bool cmci;
+ int imc_num;
+ /* Host MMIO configuration register */
+ u64 reg_mchbar_mask;
+ u64 reg_mchbar_window_size;
+ /* ECC error log register */
+ u64 reg_eccerrlog_offset[IE31200_CHANNELS];
+ u64 reg_eccerrlog_ce_mask;
+ u64 reg_eccerrlog_ce_ovfl_mask;
+ u64 reg_eccerrlog_ue_mask;
+ u64 reg_eccerrlog_ue_ovfl_mask;
+ u64 reg_eccerrlog_rank_mask;
+ u64 reg_eccerrlog_syndrome_mask;
+ /* MSR to clear ECC error log register */
+ u32 msr_clear_eccerrlog_offset;
+ /* DIMM characteristics register */
+ u64 reg_mad_dimm_size_granularity;
+ u64 reg_mad_dimm_offset[IE31200_CHANNELS];
+ u32 reg_mad_dimm_size_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_rank_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_width_mask[IE31200_DIMMS_PER_CHANNEL];
+};
+
struct ie31200_priv {
void __iomem *window;
void __iomem *c0errlog;
void __iomem *c1errlog;
+ struct res_config *cfg;
+ struct mem_ctl_info *mci;
+ struct pci_dev *pdev;
+ struct device dev;
};
+static struct ie31200_pvt {
+ struct ie31200_priv *priv[IE31200_IMC_NUM];
+} ie31200_pvt;
+
enum ie31200_chips {
IE31200 = 0,
+ IE31200_1 = 1,
};
struct ie31200_dev_info {
@@ -202,18 +174,22 @@ struct ie31200_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[IE31200_CHANNELS];
+ u64 erraddr;
};
static const struct ie31200_dev_info ie31200_devs[] = {
[IE31200] = {
.ctl_name = "IE31200"
},
+ [IE31200_1] = {
+ .ctl_name = "IE31200_1"
+ },
};
struct dimm_data {
- u8 size; /* in multiples of 256MB, except Skylake is 1GB */
- u8 dual_rank : 1,
- x16_width : 2; /* 0 means x8 width */
+ u64 size; /* in bytes */
+ u8 ranks;
+ enum dev_type dtype;
};
static int how_many_channels(struct pci_dev *pdev)
@@ -251,29 +227,54 @@ static bool ecc_capable(struct pci_dev *pdev)
return true;
}
-static int eccerrlog_row(u64 log)
-{
- return ((log & IE31200_ECCERRLOG_RANK_BITS) >>
- IE31200_ECCERRLOG_RANK_SHIFT);
-}
+#define mci_to_pci_dev(mci) (((struct ie31200_priv *)(mci)->pvt_info)->pdev)
static void ie31200_clear_error_info(struct mem_ctl_info *mci)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
+
+ /*
+ * The PCI ERRSTS register is deprecated. Write the MSR to clear
+ * the ECC error log registers in all memory controllers.
+ */
+ if (cfg->msr_clear_eccerrlog_offset) {
+ if (wrmsr_safe(cfg->msr_clear_eccerrlog_offset,
+ cfg->reg_eccerrlog_ce_mask |
+ cfg->reg_eccerrlog_ce_ovfl_mask |
+ cfg->reg_eccerrlog_ue_mask |
+ cfg->reg_eccerrlog_ue_ovfl_mask, 0) < 0)
+ ie31200_printk(KERN_ERR, "Failed to wrmsr.\n");
+
+ return;
+ }
+
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
- pci_write_bits16(to_pci_dev(mci->pdev), IE31200_ERRSTS,
+ pci_write_bits16(mci_to_pci_dev(mci), IE31200_ERRSTS,
IE31200_ERRSTS_BITS, IE31200_ERRSTS_BITS);
}
static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = mci_to_pci_dev(mci);
struct ie31200_priv *priv = mci->pvt_info;
- pdev = to_pci_dev(mci->pdev);
+ /*
+ * The PCI ERRSTS register is deprecated, directly read the
+ * MMIO-mapped ECC error log registers.
+ */
+ if (priv->cfg->msr_clear_eccerrlog_offset) {
+ info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
+ if (nr_channels == 2)
+ info->eccerrlog[1] = lo_hi_readq(priv->c1errlog);
+
+ ie31200_clear_error_info(mci);
+ return;
+ }
/*
* This is a mess because there is no atomic way to read all the
@@ -309,46 +310,56 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
static void ie31200_process_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
int channel;
u64 log;
- if (!(info->errsts & IE31200_ERRSTS_BITS))
- return;
+ if (!cfg->msr_clear_eccerrlog_offset) {
+ if (!(info->errsts & IE31200_ERRSTS_BITS))
+ return;
- if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
- -1, -1, -1, "UE overwrote CE", "");
- info->errsts = info->errsts2;
+ if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "");
+ info->errsts = info->errsts2;
+ }
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
- if (log & IE31200_ECCERRLOG_UE) {
+ if (log & cfg->reg_eccerrlog_ue_mask) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- 0, 0, 0,
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0, 0,
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 UE", "");
- } else if (log & IE31200_ECCERRLOG_CE) {
+ } else if (log & cfg->reg_eccerrlog_ce_mask) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- 0, 0,
- IE31200_ECCERRLOG_SYNDROME(log),
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0,
+ field_get(cfg->reg_eccerrlog_syndrome_mask, log),
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 CE", "");
}
}
}
-static void ie31200_check(struct mem_ctl_info *mci)
+static void __ie31200_check(struct mem_ctl_info *mci, struct mce *mce)
{
struct ie31200_error_info info;
+ info.erraddr = mce ? mce->addr : 0;
ie31200_get_and_clear_error_info(mci, &info);
ie31200_process_error_info(mci, &info);
}
-static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
+static void ie31200_check(struct mem_ctl_info *mci)
+{
+ __ie31200_check(mci, NULL);
+}
+
+static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
union {
u64 mchbar;
@@ -361,7 +372,8 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
pci_read_config_dword(pdev, IE31200_MCHBAR_LOW, &u.mchbar_low);
pci_read_config_dword(pdev, IE31200_MCHBAR_HIGH, &u.mchbar_high);
- u.mchbar &= IE31200_MCHBAR_MASK;
+ u.mchbar &= cfg->reg_mchbar_mask;
+ u.mchbar += cfg->reg_mchbar_window_size * mc;
if (u.mchbar != (resource_size_t)u.mchbar) {
ie31200_printk(KERN_ERR, "mmio space beyond accessible range (0x%llx)\n",
@@ -369,7 +381,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, cfg->reg_mchbar_window_size);
if (!window)
ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
@@ -377,155 +389,108 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return window;
}
-static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int dimm,
+ struct res_config *cfg)
{
- dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
- dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
- (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
+ dd->size = field_get(cfg->reg_mad_dimm_size_mask[dimm], addr_decode) * cfg->reg_mad_dimm_size_granularity;
+ dd->ranks = field_get(cfg->reg_mad_dimm_rank_mask[dimm], addr_decode) + 1;
+ dd->dtype = field_get(cfg->reg_mad_dimm_width_mask[dimm], addr_decode) + DEV_X8;
}
-static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void ie31200_get_dimm_config(struct mem_ctl_info *mci, void __iomem *window,
+ struct res_config *cfg, int mc)
{
- dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
- dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
-}
+ struct dimm_data dimm_info;
+ struct dimm_info *dimm;
+ unsigned long nr_pages;
+ u32 addr_decode;
+ int i, j, k;
-static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
- bool skl)
-{
- if (skl)
- __skl_populate_dimm_info(dd, addr_decode, chan);
- else
- __populate_dimm_info(dd, addr_decode, chan);
-}
+ for (i = 0; i < IE31200_CHANNELS; i++) {
+ addr_decode = readl(window + cfg->reg_mad_dimm_offset[i]);
+ edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
+ for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
+ populate_dimm_info(&dimm_info, addr_decode, j, cfg);
+ edac_dbg(0, "mc: %d, channel: %d, dimm: %d, size: %lld MiB, ranks: %d, DRAM chip type: %d\n",
+ mc, i, j, dimm_info.size >> 20,
+ dimm_info.ranks,
+ dimm_info.dtype);
-static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+ nr_pages = MiB_TO_PAGES(dimm_info.size >> 20);
+ if (nr_pages == 0)
+ continue;
+
+ nr_pages = nr_pages / dimm_info.ranks;
+ for (k = 0; k < dimm_info.ranks; k++) {
+ dimm = edac_get_dimm(mci, (j * dimm_info.ranks) + k, i, 0);
+ dimm->nr_pages = nr_pages;
+ edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
+ dimm->grain = 8; /* just a guess */
+ dimm->mtype = cfg->mtype;
+ dimm->dtype = dimm_info.dtype;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
+ }
+ }
+}
+
+static int ie31200_register_mci(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
- int i, j, ret;
- struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
- void __iomem *window;
struct ie31200_priv *priv;
- u32 addr_decode, mad_offset;
-
- /*
- * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
- * this logic when adding new CPU support.
- */
- bool skl = DEVICE_ID_SKYLAKE_OR_LATER(pdev->device);
-
- edac_dbg(0, "MC:\n");
-
- if (!ecc_capable(pdev)) {
- ie31200_printk(KERN_INFO, "No ECC support\n");
- return -ENODEV;
- }
+ struct mem_ctl_info *mci;
+ void __iomem *window;
+ int ret;
nr_channels = how_many_channels(pdev);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = IE31200_DIMMS;
+ layers[0].size = IE31200_RANKS_PER_CHANNEL;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers,
sizeof(struct ie31200_priv));
if (!mci)
return -ENOMEM;
- window = ie31200_map_mchbar(pdev);
+ window = ie31200_map_mchbar(pdev, cfg, mc);
if (!window) {
ret = -ENODEV;
goto fail_free;
}
edac_dbg(3, "MC: init mci\n");
- mci->pdev = &pdev->dev;
- if (skl)
- mci->mtype_cap = MEM_FLAG_DDR4;
- else
- mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->mtype_cap = BIT(cfg->mtype);
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
+ mci->ctl_name = ie31200_devs[mc].ctl_name;
mci->dev_name = pci_name(pdev);
- mci->edac_check = ie31200_check;
+ mci->edac_check = cfg->cmci ? NULL : ie31200_check;
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
- if (skl) {
- priv->c0errlog = window + IE31200_C0ECCERRLOG_SKL;
- priv->c1errlog = window + IE31200_C1ECCERRLOG_SKL;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET_SKL;
- } else {
- priv->c0errlog = window + IE31200_C0ECCERRLOG;
- priv->c1errlog = window + IE31200_C1ECCERRLOG;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET;
- }
-
- /* populate DIMM info */
- for (i = 0; i < IE31200_CHANNELS; i++) {
- addr_decode = readl(window + mad_offset +
- (i * 4));
- edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
- for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
- populate_dimm_info(&dimm_info[i][j], addr_decode, j,
- skl);
- edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
- dimm_info[i][j].size,
- dimm_info[i][j].dual_rank,
- dimm_info[i][j].x16_width);
- }
- }
-
+ priv->c0errlog = window + cfg->reg_eccerrlog_offset[0];
+ priv->c1errlog = window + cfg->reg_eccerrlog_offset[1];
+ priv->cfg = cfg;
+ priv->mci = mci;
+ priv->pdev = pdev;
+ device_initialize(&priv->dev);
/*
- * The dram rank boundary (DRB) reg values are boundary addresses
- * for each DRAM rank with a granularity of 64MB. DRB regs are
- * cumulative; the last one will contain the total memory
- * contained in all ranks.
+ * The EDAC core uses mci->pdev (pointer to the structure device)
+ * as the memory controller ID. The SoCs attach one or more memory
+ * controllers to a single pci_dev (a single pci_dev->dev can
+ * correspond to multiple memory controllers).
+ *
+ * To make mci->pdev unique, assign pci_dev->dev to mci->pdev
+ * for the first memory controller and assign a unique priv->dev
+ * to mci->pdev for each additional memory controller.
*/
- for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
- for (j = 0; j < IE31200_CHANNELS; j++) {
- struct dimm_info *dimm;
- unsigned long nr_pages;
-
- nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
- if (nr_pages == 0)
- continue;
-
- if (dimm_info[j][i].dual_rank) {
- nr_pages = nr_pages / 2;
- dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* just a guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- dimm = edac_get_dimm(mci, i * 2, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* same guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- }
+ mci->pdev = mc ? &priv->dev : &pdev->dev;
+ ie31200_get_dimm_config(mci, window, cfg, mc);
ie31200_clear_error_info(mci);
if (edac_mc_add_mc(mci)) {
@@ -534,16 +499,115 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
goto fail_unmap;
}
- /* get this far and it's successful */
- edac_dbg(3, "MC: success\n");
+ ie31200_pvt.priv[mc] = priv;
return 0;
-
fail_unmap:
iounmap(window);
-
fail_free:
edac_mc_free(mci);
+ return ret;
+}
+
+static void mce_check(struct mce *mce)
+{
+ struct ie31200_priv *priv;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+
+ __ie31200_check(priv->mci, mce);
+ }
+}
+
+static int mce_handler(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ char *type;
+
+ if (mce->kflags & MCE_HANDLED_CEC)
+ return NOTIFY_DONE;
+
+ /*
+ * Ignore unless this is a memory related error.
+ * Don't check MCI_STATUS_ADDRV since it's not set on some CPUs.
+ */
+ if ((mce->status & 0xefff) >> 7 != 1)
+ return NOTIFY_DONE;
+
+ type = mce->mcgstatus & MCG_STATUS_MCIP ? "Exception" : "Event";
+
+ edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n",
+ mce->extcpu, type, mce->mcgstatus,
+ mce->bank, mce->status);
+ edac_dbg(0, "TSC 0x%llx\n", mce->tsc);
+ edac_dbg(0, "ADDR 0x%llx\n", mce->addr);
+ edac_dbg(0, "MISC 0x%llx\n", mce->misc);
+ edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n",
+ mce->cpuvendor, mce->cpuid, mce->time,
+ mce->socketid, mce->apicid);
+
+ mce_check(mce);
+ mce->kflags |= MCE_HANDLED_EDAC;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ie31200_mce_dec = {
+ .notifier_call = mce_handler,
+ .priority = MCE_PRIO_EDAC,
+};
+
+static void ie31200_unregister_mcis(void)
+{
+ struct ie31200_priv *priv;
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+ mci = priv->mci;
+ edac_mc_del_mc(mci->pdev);
+ iounmap(priv->window);
+ edac_mc_free(mci);
+ }
+}
+
+static int ie31200_probe1(struct pci_dev *pdev, struct res_config *cfg)
+{
+ int i, ret;
+
+ edac_dbg(0, "MC:\n");
+
+ if (!ecc_capable(pdev)) {
+ ie31200_printk(KERN_INFO, "No ECC support\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cfg->imc_num; i++) {
+ ret = ie31200_register_mci(pdev, cfg, i);
+ if (ret)
+ goto fail_register;
+ }
+
+ if (cfg->cmci) {
+ mce_register_decode_chain(&ie31200_mce_dec);
+ edac_op_state = EDAC_OPSTATE_INT;
+ } else {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ }
+
+ /* get this far and it's successful. */
+ edac_dbg(3, "MC: success\n");
+ return 0;
+
+fail_register:
+ ie31200_unregister_mcis();
return ret;
}
@@ -555,7 +619,7 @@ static int ie31200_init_one(struct pci_dev *pdev,
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
- rc = ie31200_probe1(pdev, ent->driver_data);
+ rc = ie31200_probe1(pdev, (struct res_config *)ent->driver_data);
if (rc == 0 && !mci_pdev)
mci_pdev = pci_dev_get(pdev);
@@ -564,43 +628,112 @@ static int ie31200_init_one(struct pci_dev *pdev,
static void ie31200_remove_one(struct pci_dev *pdev)
{
- struct mem_ctl_info *mci;
- struct ie31200_priv *priv;
+ struct ie31200_priv *priv = ie31200_pvt.priv[0];
edac_dbg(0, "\n");
pci_dev_put(mci_pdev);
mci_pdev = NULL;
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
- priv = mci->pvt_info;
- iounmap(priv->window);
- edac_mc_free(mci);
+ if (priv->cfg->cmci)
+ mce_unregister_decode_chain(&ie31200_mce_dec);
+ ie31200_unregister_mcis();
}
+static struct res_config snb_cfg = {
+ .mtype = MEM_DDR3,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x40c8,
+ .reg_eccerrlog_offset[1] = 0x44c8,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(28),
+ .reg_mad_dimm_offset[0] = 0x5004,
+ .reg_mad_dimm_offset[1] = 0x5008,
+ .reg_mad_dimm_size_mask[0] = GENMASK(7, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(15, 8),
+ .reg_mad_dimm_rank_mask[0] = BIT(17),
+ .reg_mad_dimm_rank_mask[1] = BIT(18),
+ .reg_mad_dimm_width_mask[0] = BIT(19),
+ .reg_mad_dimm_width_mask[1] = BIT(20),
+};
+
+static struct res_config skl_cfg = {
+ .mtype = MEM_DDR4,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x4048,
+ .reg_eccerrlog_offset[1] = 0x4448,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(30),
+ .reg_mad_dimm_offset[0] = 0x500c,
+ .reg_mad_dimm_offset[1] = 0x5010,
+ .reg_mad_dimm_size_mask[0] = GENMASK(5, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(21, 16),
+ .reg_mad_dimm_rank_mask[0] = BIT(10),
+ .reg_mad_dimm_rank_mask[1] = BIT(26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(9, 8),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
+struct res_config rpl_s_cfg = {
+ .mtype = MEM_DDR5,
+ .cmci = true,
+ .imc_num = 2,
+ .reg_mchbar_mask = GENMASK_ULL(41, 17),
+ .reg_mchbar_window_size = BIT_ULL(16),
+ .reg_eccerrlog_offset[0] = 0xe048,
+ .reg_eccerrlog_offset[1] = 0xe848,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ce_ovfl_mask = BIT_ULL(1),
+ .reg_eccerrlog_ue_mask = BIT_ULL(2),
+ .reg_eccerrlog_ue_ovfl_mask = BIT_ULL(3),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .msr_clear_eccerrlog_offset = 0x791,
+ .reg_mad_dimm_offset[0] = 0xd80c,
+ .reg_mad_dimm_offset[1] = 0xd810,
+ .reg_mad_dimm_size_granularity = BIT_ULL(29),
+ .reg_mad_dimm_size_mask[0] = GENMASK(6, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(22, 16),
+ .reg_mad_dimm_rank_mask[0] = GENMASK(10, 9),
+ .reg_mad_dimm_rank_mask[1] = GENMASK(27, 26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(8, 7),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
static const struct pci_device_id ie31200_pci_tbl[] = {
- { PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_12), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_1), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_2), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_3), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_4), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_5), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_6), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_7), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_11), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_12), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_1), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_2), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_3), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_4), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_5), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_6), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_7), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg},
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
@@ -617,12 +750,10 @@ static int __init ie31200_init(void)
int pci_rc, i;
edac_dbg(3, "MC:\n");
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
pci_rc = pci_register_driver(&ie31200_driver);
if (pci_rc < 0)
- goto fail0;
+ return pci_rc;
if (!mci_pdev) {
ie31200_registered = 0;
@@ -633,11 +764,13 @@ static int __init ie31200_init(void)
if (mci_pdev)
break;
}
+
if (!mci_pdev) {
edac_dbg(0, "ie31200 pci_get_device fail\n");
pci_rc = -ENODEV;
- goto fail1;
+ goto fail0;
}
+
pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
if (pci_rc < 0) {
edac_dbg(0, "ie31200 init fail\n");
@@ -645,12 +778,12 @@ static int __init ie31200_init(void)
goto fail1;
}
}
- return 0;
+ return 0;
fail1:
- pci_unregister_driver(&ie31200_driver);
-fail0:
pci_dev_put(mci_pdev);
+fail0:
+ pci_unregister_driver(&ie31200_driver);
return pci_rc;
}
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index fdf3a84fe698..5807517ee32d 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -125,7 +125,7 @@
#define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
#define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
-static struct res_config {
+static const struct res_config {
bool machine_check;
int num_imc;
u32 imc_base;
@@ -472,7 +472,7 @@ static u64 rpl_p_err_addr(u64 ecclog)
return ECC_ERROR_LOG_ADDR45(ecclog);
}
-static struct res_config ehl_cfg = {
+static const struct res_config ehl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xdc00,
@@ -482,7 +482,7 @@ static struct res_config ehl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static struct res_config icl_cfg = {
+static const struct res_config icl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xd800,
@@ -492,7 +492,7 @@ static struct res_config icl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static struct res_config tgl_cfg = {
+static const struct res_config tgl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0x5000,
@@ -506,7 +506,7 @@ static struct res_config tgl_cfg = {
.err_addr_to_imc_addr = tgl_err_addr_to_imc_addr,
};
-static struct res_config adl_cfg = {
+static const struct res_config adl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -517,7 +517,7 @@ static struct res_config adl_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config adl_n_cfg = {
+static const struct res_config adl_n_cfg = {
.machine_check = true,
.num_imc = 1,
.imc_base = 0xd800,
@@ -528,7 +528,7 @@ static struct res_config adl_n_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config rpl_p_cfg = {
+static const struct res_config rpl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -540,7 +540,7 @@ static struct res_config rpl_p_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config mtl_ps_cfg = {
+static const struct res_config mtl_ps_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -551,7 +551,7 @@ static struct res_config mtl_ps_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config mtl_p_cfg = {
+static const struct res_config mtl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -785,13 +785,22 @@ static u64 ecclog_read_and_clear(struct igen6_imc *imc)
{
u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET);
- if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) {
- /* Clear CE/UE bits by writing 1s */
- writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
- return ecclog;
- }
+ /*
+ * Quirk: The ECC_ERROR_LOG register of certain SoCs may contain
+ * the invalid value ~0. This will result in a flood of invalid
+ * error reports in polling mode. Skip it.
+ */
+ if (ecclog == ~0)
+ return 0;
- return 0;
+ /* Neither a CE nor a UE. Skip it.*/
+ if (!(ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)))
+ return 0;
+
+ /* Clear CE/UE bits by writing 1s */
+ writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
+
+ return ecclog;
}
static void errsts_clear(struct igen6_imc *imc)
@@ -1374,7 +1383,7 @@ static void unregister_err_handler(void)
unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
}
-static void opstate_set(struct res_config *cfg, const struct pci_device_id *ent)
+static void opstate_set(const struct res_config *cfg, const struct pci_device_id *ent)
{
/*
* Quirk: Certain SoCs' error reporting interrupts don't work.
diff --git a/drivers/edac/loongson_edac.c b/drivers/edac/loongson_edac.c
new file mode 100644
index 000000000000..38745800ed01
--- /dev/null
+++ b/drivers/edac/loongson_edac.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/edac.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "edac_module.h"
+
+#define ECC_CS_COUNT_REG 0x18
+
+struct loongson_edac_pvt {
+ void __iomem *ecc_base;
+
+ /*
+ * The ECC register in this controller records the number of errors
+ * encountered since reset and cannot be zeroed so in order to be able
+ * to report the error count at each check, this records the previous
+ * register state.
+ */
+ int last_ce_count;
+};
+
+static int read_ecc(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ u64 ecc;
+ int cs;
+
+ ecc = readq(pvt->ecc_base + ECC_CS_COUNT_REG);
+ /* cs0 -- cs3 */
+ cs = ecc & 0xff;
+ cs += (ecc >> 8) & 0xff;
+ cs += (ecc >> 16) & 0xff;
+ cs += (ecc >> 24) & 0xff;
+
+ return cs;
+}
+
+static void edac_check(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ int new, add;
+
+ new = read_ecc(mci);
+ add = new - pvt->last_ce_count;
+ pvt->last_ce_count = new;
+ if (add <= 0)
+ return;
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add,
+ 0, 0, 0, 0, 0, -1, "error", "");
+}
+
+static void dimm_config_init(struct mem_ctl_info *mci)
+{
+ struct dimm_info *dimm;
+ u32 size, npages;
+
+ /* size not used */
+ size = -1;
+ npages = MiB_TO_PAGES(size);
+
+ dimm = edac_get_dimm(mci, 0, 0, 0);
+ dimm->nr_pages = npages;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "MC#%uChannel#%u_DIMM#%u", mci->mc_idx, 0, 0);
+ dimm->grain = 8;
+}
+
+static void pvt_init(struct mem_ctl_info *mci, void __iomem *vbase)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+
+ pvt->ecc_base = vbase;
+ pvt->last_ce_count = read_ecc(mci);
+}
+
+static int edac_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ void __iomem *vbase;
+ int ret;
+
+ vbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vbase))
+ return PTR_ERR(vbase);
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct loongson_edac_pvt));
+ if (mci == NULL)
+ return -ENOMEM;
+
+ mci->mc_idx = edac_device_alloc_index();
+ mci->mtype_cap = MEM_FLAG_RDDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "loongson_edac.c";
+ mci->ctl_name = "loongson_edac_ctl";
+ mci->dev_name = "loongson_edac_dev";
+ mci->ctl_page_to_phys = NULL;
+ mci->pdev = &pdev->dev;
+ mci->error_desc.grain = 8;
+ mci->edac_check = edac_check;
+
+ pvt_init(mci, vbase);
+ dimm_config_init(mci);
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ edac_mc_free(mci);
+ return ret;
+ }
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ return 0;
+}
+
+static void edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+
+ if (mci)
+ edac_mc_free(mci);
+}
+
+static const struct acpi_device_id loongson_edac_acpi_match[] = {
+ {"LOON0010", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, loongson_edac_acpi_match);
+
+static struct platform_driver loongson_edac_driver = {
+ .probe = edac_probe,
+ .remove = edac_remove,
+ .driver = {
+ .name = "loongson-mc-edac",
+ .acpi_match_table = loongson_edac_acpi_match,
+ },
+};
+module_platform_driver(loongson_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhao Qunqin <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("EDAC driver for loongson memory controller");
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
new file mode 100755
index 000000000000..3b1a845457b0
--- /dev/null
+++ b/drivers/edac/mem_repair.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC memory repair driver is designed to control the memory
+ * devices with memory repair features, such as Post Package Repair (PPR),
+ * memory sparing etc. The common sysfs memory repair interface abstracts
+ * the control of various arbitrary memory repair functionalities into a
+ * unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_mem_repair_attributes {
+ MR_TYPE,
+ MR_PERSIST_MODE,
+ MR_SAFE_IN_USE,
+ MR_HPA,
+ MR_MIN_HPA,
+ MR_MAX_HPA,
+ MR_DPA,
+ MR_MIN_DPA,
+ MR_MAX_DPA,
+ MR_NIBBLE_MASK,
+ MR_BANK_GROUP,
+ MR_BANK,
+ MR_RANK,
+ MR_ROW,
+ MR_COLUMN,
+ MR_CHANNEL,
+ MR_SUB_CHANNEL,
+ MEM_DO_REPAIR,
+ MR_MAX_ATTRS
+};
+
+struct edac_mem_repair_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_mem_repair_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_mem_repair_dev_attr mem_repair_dev_attr[MR_MAX_ATTRS];
+ struct attribute *mem_repair_attrs[MR_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+#define TO_MR_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_mem_repair_dev_attr, dev_attr)
+
+#define MR_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+MR_ATTR_SHOW(repair_type, get_repair_type, const char *, "%s\n")
+MR_ATTR_SHOW(persist_mode, get_persist_mode, bool, "%u\n")
+MR_ATTR_SHOW(repair_safe_when_in_use, get_repair_safe_when_in_use, bool, "%u\n")
+MR_ATTR_SHOW(hpa, get_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_hpa, get_min_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_hpa, get_max_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(dpa, get_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_dpa, get_min_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_dpa, get_max_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(nibble_mask, get_nibble_mask, u32, "0x%x\n")
+MR_ATTR_SHOW(bank_group, get_bank_group, u32, "%u\n")
+MR_ATTR_SHOW(bank, get_bank, u32, "%u\n")
+MR_ATTR_SHOW(rank, get_rank, u32, "%u\n")
+MR_ATTR_SHOW(row, get_row, u32, "0x%x\n")
+MR_ATTR_SHOW(column, get_column, u32, "%u\n")
+MR_ATTR_SHOW(channel, get_channel, u32, "%u\n")
+MR_ATTR_SHOW(sub_channel, get_sub_channel, u32, "%u\n")
+
+#define MR_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_ATTR_STORE(persist_mode, set_persist_mode, unsigned long, kstrtoul)
+MR_ATTR_STORE(hpa, set_hpa, u64, kstrtou64)
+MR_ATTR_STORE(dpa, set_dpa, u64, kstrtou64)
+MR_ATTR_STORE(nibble_mask, set_nibble_mask, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank_group, set_bank_group, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank, set_bank, unsigned long, kstrtoul)
+MR_ATTR_STORE(rank, set_rank, unsigned long, kstrtoul)
+MR_ATTR_STORE(row, set_row, unsigned long, kstrtoul)
+MR_ATTR_STORE(column, set_column, unsigned long, kstrtoul)
+MR_ATTR_STORE(channel, set_channel, unsigned long, kstrtoul)
+MR_ATTR_STORE(sub_channel, set_sub_channel, unsigned long, kstrtoul)
+
+#define MR_DO_OP(attrib, cb) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops; \
+ unsigned long data; \
+ int ret; \
+ \
+ ret = kstrtoul(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_DO_OP(repair, do_repair)
+
+static umode_t mem_repair_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ u8 inst = TO_MR_DEV_ATTR(dev_attr)->instance;
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops;
+
+ switch (attr_id) {
+ case MR_TYPE:
+ if (ops->get_repair_type)
+ return a->mode;
+ break;
+ case MR_PERSIST_MODE:
+ if (ops->get_persist_mode) {
+ if (ops->set_persist_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SAFE_IN_USE:
+ if (ops->get_repair_safe_when_in_use)
+ return a->mode;
+ break;
+ case MR_HPA:
+ if (ops->get_hpa) {
+ if (ops->set_hpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_HPA:
+ if (ops->get_min_hpa)
+ return a->mode;
+ break;
+ case MR_MAX_HPA:
+ if (ops->get_max_hpa)
+ return a->mode;
+ break;
+ case MR_DPA:
+ if (ops->get_dpa) {
+ if (ops->set_dpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_DPA:
+ if (ops->get_min_dpa)
+ return a->mode;
+ break;
+ case MR_MAX_DPA:
+ if (ops->get_max_dpa)
+ return a->mode;
+ break;
+ case MR_NIBBLE_MASK:
+ if (ops->get_nibble_mask) {
+ if (ops->set_nibble_mask)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK_GROUP:
+ if (ops->get_bank_group) {
+ if (ops->set_bank_group)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK:
+ if (ops->get_bank) {
+ if (ops->set_bank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_RANK:
+ if (ops->get_rank) {
+ if (ops->set_rank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_ROW:
+ if (ops->get_row) {
+ if (ops->set_row)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_COLUMN:
+ if (ops->get_column) {
+ if (ops->set_column)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_CHANNEL:
+ if (ops->get_channel) {
+ if (ops->set_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SUB_CHANNEL:
+ if (ops->get_sub_channel) {
+ if (ops->set_sub_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MEM_DO_REPAIR:
+ if (ops->do_repair)
+ return a->mode;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define MR_ATTR_RO(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .instance = _instance })
+
+#define MR_ATTR_WO(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .instance = _instance })
+
+#define MR_ATTR_RW(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .instance = _instance })
+
+static int mem_repair_create_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{
+ struct edac_mem_repair_context *ctx;
+ struct attribute_group *group;
+ int i;
+ struct edac_mem_repair_dev_attr dev_attr[] = {
+ [MR_TYPE] = MR_ATTR_RO(repair_type, instance),
+ [MR_PERSIST_MODE] = MR_ATTR_RW(persist_mode, instance),
+ [MR_SAFE_IN_USE] = MR_ATTR_RO(repair_safe_when_in_use, instance),
+ [MR_HPA] = MR_ATTR_RW(hpa, instance),
+ [MR_MIN_HPA] = MR_ATTR_RO(min_hpa, instance),
+ [MR_MAX_HPA] = MR_ATTR_RO(max_hpa, instance),
+ [MR_DPA] = MR_ATTR_RW(dpa, instance),
+ [MR_MIN_DPA] = MR_ATTR_RO(min_dpa, instance),
+ [MR_MAX_DPA] = MR_ATTR_RO(max_dpa, instance),
+ [MR_NIBBLE_MASK] = MR_ATTR_RW(nibble_mask, instance),
+ [MR_BANK_GROUP] = MR_ATTR_RW(bank_group, instance),
+ [MR_BANK] = MR_ATTR_RW(bank, instance),
+ [MR_RANK] = MR_ATTR_RW(rank, instance),
+ [MR_ROW] = MR_ATTR_RW(row, instance),
+ [MR_COLUMN] = MR_ATTR_RW(column, instance),
+ [MR_CHANNEL] = MR_ATTR_RW(channel, instance),
+ [MR_SUB_CHANNEL] = MR_ATTR_RW(sub_channel, instance),
+ [MEM_DO_REPAIR] = MR_ATTR_WO(repair, instance)
+ };
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < MR_MAX_ATTRS; i++) {
+ memcpy(&ctx->mem_repair_dev_attr[i],
+ &dev_attr[i], sizeof(dev_attr[i]));
+ ctx->mem_repair_attrs[i] =
+ &ctx->mem_repair_dev_attr[i].dev_attr.attr;
+ }
+
+ sprintf(ctx->name, "%s%d", "mem_repair", instance);
+ group = &ctx->group;
+ group->name = ctx->name;
+ group->attrs = ctx->mem_repair_attrs;
+ group->is_visible = mem_repair_attr_visible;
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_mem_repair_get_desc - get EDAC memory repair descriptors
+ * @dev: client device with memory repair feature
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's memory repair instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!dev || !attr_groups)
+ return -EINVAL;
+
+ return mem_repair_create_desc(dev, attr_groups, instance);
+}
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index f93f2f2b1cf2..af14c8a3279f 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -372,7 +372,7 @@ static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
struct b_cr_asym_mem_region1_mchbar *as1,
struct b_cr_asym_2way_mem_region_mchbar *as2way)
{
- const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
+ static const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
int mask = 0;
if (as2way->asym_2way_interleave_enable)
@@ -489,7 +489,7 @@ static int dnv_get_registers(void)
*/
static int get_registers(void)
{
- const int intlv[] = { 10, 11, 12, 12 };
+ static const int intlv[] = { 10, 11, 12, 12 };
if (RD_REG(&tolud, b_cr_tolud_pci) ||
RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 04c42c83a2ba..f3da9385ca0d 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
* Configure interrupt enable registers such that Tag, Data RAM related
* interrupts are propagated to interrupt controller for servicing
*/
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
TRP0_INTERRUPT_ENABLE,
TRP0_INTERRUPT_ENABLE);
if (ret)
@@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
if (ret)
return ret;
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
DRP0_INTERRUPT_ENABLE,
DRP0_INTERRUPT_ENABLE);
if (ret)
diff --git a/drivers/edac/scrub.c b/drivers/edac/scrub.c
new file mode 100755
index 000000000000..e421d3ebd959
--- /dev/null
+++ b/drivers/edac/scrub.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC scrub driver controls the memory scrubbers in the
+ * system. The common sysfs scrub interface abstracts the control of
+ * various arbitrary scrubbing functionalities into a unified set of
+ * functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_scrub_attributes {
+ SCRUB_ADDRESS,
+ SCRUB_SIZE,
+ SCRUB_ENABLE_BACKGROUND,
+ SCRUB_MIN_CYCLE_DURATION,
+ SCRUB_MAX_CYCLE_DURATION,
+ SCRUB_CUR_CYCLE_DURATION,
+ SCRUB_MAX_ATTRS
+};
+
+struct edac_scrub_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_scrub_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_scrub_dev_attr scrub_dev_attr[SCRUB_MAX_ATTRS];
+ struct attribute *scrub_attrs[SCRUB_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+#define TO_SCRUB_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_scrub_dev_attr, dev_attr)
+
+#define EDAC_SCRUB_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_SCRUB_ATTR_SHOW(addr, read_addr, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(size, read_size, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(enable_background, get_enabled_bg, bool, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(min_cycle_duration, get_min_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(max_cycle_duration, get_max_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(current_cycle_duration, get_cycle_duration, u32, "%u\n")
+
+#define EDAC_SCRUB_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_SCRUB_ATTR_STORE(addr, write_addr, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(size, write_size, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(enable_background, set_enabled_bg, unsigned long, kstrtoul)
+EDAC_SCRUB_ATTR_STORE(current_cycle_duration, set_cycle_duration, unsigned long, kstrtoul)
+
+static umode_t scrub_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ u8 inst = TO_SCRUB_DEV_ATTR(dev_attr)->instance;
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops;
+
+ switch (attr_id) {
+ case SCRUB_ADDRESS:
+ if (ops->read_addr) {
+ if (ops->write_addr)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_SIZE:
+ if (ops->read_size) {
+ if (ops->write_size)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_ENABLE_BACKGROUND:
+ if (ops->get_enabled_bg) {
+ if (ops->set_enabled_bg)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_MIN_CYCLE_DURATION:
+ if (ops->get_min_cycle)
+ return a->mode;
+ break;
+ case SCRUB_MAX_CYCLE_DURATION:
+ if (ops->get_max_cycle)
+ return a->mode;
+ break;
+ case SCRUB_CUR_CYCLE_DURATION:
+ if (ops->get_cycle_duration) {
+ if (ops->set_cycle_duration)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_SCRUB_ATTR_RO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_WO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_RW(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .instance = _instance })
+
+static int scrub_create_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ struct edac_scrub_context *scrub_ctx;
+ struct attribute_group *group;
+ int i;
+ struct edac_scrub_dev_attr dev_attr[] = {
+ [SCRUB_ADDRESS] = EDAC_SCRUB_ATTR_RW(addr, instance),
+ [SCRUB_SIZE] = EDAC_SCRUB_ATTR_RW(size, instance),
+ [SCRUB_ENABLE_BACKGROUND] = EDAC_SCRUB_ATTR_RW(enable_background, instance),
+ [SCRUB_MIN_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(min_cycle_duration, instance),
+ [SCRUB_MAX_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(max_cycle_duration, instance),
+ [SCRUB_CUR_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RW(current_cycle_duration, instance)
+ };
+
+ scrub_ctx = devm_kzalloc(scrub_dev, sizeof(*scrub_ctx), GFP_KERNEL);
+ if (!scrub_ctx)
+ return -ENOMEM;
+
+ group = &scrub_ctx->group;
+ for (i = 0; i < SCRUB_MAX_ATTRS; i++) {
+ memcpy(&scrub_ctx->scrub_dev_attr[i], &dev_attr[i], sizeof(dev_attr[i]));
+ scrub_ctx->scrub_attrs[i] = &scrub_ctx->scrub_dev_attr[i].dev_attr.attr;
+ }
+ sprintf(scrub_ctx->name, "%s%d", "scrub", instance);
+ group->name = scrub_ctx->name;
+ group->attrs = scrub_ctx->scrub_attrs;
+ group->is_visible = scrub_attr_visible;
+
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_scrub_get_desc - get EDAC scrub descriptors
+ * @scrub_dev: client device, with scrub support
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's scrub instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!scrub_dev || !attr_groups)
+ return -EINVAL;
+
+ return scrub_create_desc(scrub_dev, attr_groups, instance);
+}
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 14cfd394b469..29897b21fb8e 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -164,7 +164,7 @@ static struct res_config skx_cfg = {
};
static const struct x86_cpu_id skx_cpuids[] = {
- X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
@@ -600,7 +600,7 @@ static int __init skx_init(void)
const struct munit *m;
const char *owner;
int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
- u8 mc = 0, src_id, node_id;
+ u8 mc = 0, src_id;
struct skx_dev *d;
edac_dbg(2, "\n");
@@ -650,15 +650,12 @@ static int __init skx_init(void)
rc = skx_get_src_id(d, 0xf0, &src_id);
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
- edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
+
+ edac_dbg(2, "src_id = %d\n", src_id);
for (i = 0; i < SKX_NUM_IMC; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 6cf17af7d911..fa5b442b1844 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -19,6 +19,7 @@
#include <linux/adxl.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
+#include <asm/uv/uv.h>
#include "edac_module.h"
#include "skx_common.h"
@@ -120,6 +121,35 @@ void skx_adxl_put(void)
}
EXPORT_SYMBOL_GPL(skx_adxl_put);
+static void skx_init_mc_mapping(struct skx_dev *d)
+{
+ /*
+ * By default, the BIOS presents all memory controllers within each
+ * socket to the EDAC driver. The physical indices are the same as
+ * the logical indices of the memory controllers enumerated by the
+ * EDAC driver.
+ */
+ for (int i = 0; i < NUM_IMC; i++)
+ d->mc_mapping[i] = i;
+}
+
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
+{
+ edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
+
+ d->mc_mapping[pmc] = lmc;
+}
+EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
+
+static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
+{
+ edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, d->mc_mapping[pmc]);
+
+ return d->mc_mapping[pmc];
+}
+
static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
{
struct skx_dev *d;
@@ -187,6 +217,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
return false;
}
+ res->imc = skx_get_mc_mapping(d, res->imc);
+
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
continue;
@@ -221,33 +253,51 @@ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
}
EXPORT_SYMBOL_GPL(skx_set_decode);
-int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
+static int skx_get_pkg_id(struct skx_dev *d, u8 *id)
{
- u32 reg;
+ int node;
+ int cpu;
- if (pci_read_config_dword(d->util_all, off, &reg)) {
- skx_printk(KERN_ERR, "Failed to read src id\n");
- return -ENODEV;
+ node = pcibus_to_node(d->util_all->bus);
+ if (numa_valid_node(node)) {
+ for_each_cpu(cpu, cpumask_of_pcibus(d->util_all->bus)) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && cpu_to_node(cpu) == node) {
+ *id = c->topo.pkg_id;
+ return 0;
+ }
+ }
}
- *id = GET_BITFIELD(reg, 12, 14);
- return 0;
+ skx_printk(KERN_ERR, "Failed to get package ID from NUMA information\n");
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(skx_get_src_id);
-int skx_get_node_id(struct skx_dev *d, u8 *id)
+int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
{
u32 reg;
- if (pci_read_config_dword(d->util_all, 0xf4, &reg)) {
- skx_printk(KERN_ERR, "Failed to read node id\n");
+ /*
+ * The 3-bit source IDs in PCI configuration space registers are limited
+ * to 8 unique IDs, and each ID is local to a UPI/QPI domain.
+ *
+ * Source IDs cannot be used to map devices to sockets on UV systems
+ * because they can exceed 8 sockets and have multiple UPI/QPI domains
+ * with identical, repeating source IDs.
+ */
+ if (is_uv_system())
+ return skx_get_pkg_id(d, id);
+
+ if (pci_read_config_dword(d->util_all, off, &reg)) {
+ skx_printk(KERN_ERR, "Failed to read src id\n");
return -ENODEV;
}
- *id = GET_BITFIELD(reg, 0, 2);
+ *id = GET_BITFIELD(reg, 12, 14);
return 0;
}
-EXPORT_SYMBOL_GPL(skx_get_node_id);
+EXPORT_SYMBOL_GPL(skx_get_src_id);
static int get_width(u32 mtr)
{
@@ -307,6 +357,8 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
+
+ skx_init_mc_mapping(d);
}
if (list)
@@ -507,7 +559,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
pvt->imc = imc;
mci->ctl_name = kasprintf(GFP_KERNEL, "%s#%d IMC#%d", ctl_name,
- imc->node_id, imc->lmc);
+ imc->src_id, imc->lmc);
if (!mci->ctl_name) {
rc = -ENOMEM;
goto fail0;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 54bba8a62f72..ca5408803f87 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -93,6 +93,16 @@ struct skx_dev {
struct pci_dev *uracu; /* for i10nm CPU */
struct pci_dev *pcu_cr3; /* for HBM memory detection */
u32 mcroute;
+ /*
+ * Some server BIOS may hide certain memory controllers, and the
+ * EDAC driver skips those hidden memory controllers. However, the
+ * ADXL still decodes memory error address using physical memory
+ * controller indices. The mapping table is used to convert the
+ * physical indices (reported by ADXL) to the logical indices
+ * (used the EDAC driver) of present memory controllers during the
+ * error handling process.
+ */
+ u8 mc_mapping[NUM_IMC];
struct skx_imc {
struct mem_ctl_info *mci;
struct pci_dev *mdev; /* for i10nm CPU */
@@ -103,7 +113,7 @@ struct skx_dev {
bool hbm_mc;
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
- u8 src_id, node_id;
+ u8 src_id;
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
@@ -242,9 +252,9 @@ void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
void skx_set_res_cfg(struct res_config *cfg);
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
-int skx_get_node_id(struct skx_dev *d, u8 *id);
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list);
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 699c7d29d80c..9955396c9a52 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -1407,7 +1408,7 @@ static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
dev_err(edac_dev->dev, "Multiple XGIC write size error\n");
info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
- info & REQTYPE_MASK ? "read" : "write", ERRADDR_RD(info),
+ str_read_write(info & REQTYPE_MASK), ERRADDR_RD(info),
info);
writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
@@ -1489,19 +1490,19 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
if (reg & AGENT_OFFLINE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to offline agent error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & UNIMPL_RBPAGE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to unimplemented page error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & WORD_ALIGNED_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s word aligned access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & PAGE_ACCESS_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s to page out of range access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
return;
if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
@@ -1560,7 +1561,7 @@ rb_skip:
err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
- REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_F2_RD(err_addr_hi)),
ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
if (reg & WRERR_RESP_MASK)
dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
@@ -1611,7 +1612,7 @@ chk_iob_axi0:
dev_err(edac_dev->dev,
"%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
@@ -1625,7 +1626,7 @@ chk_iob_axi1:
dev_err(edac_dev->dev,
"%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
}
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index e458ce0c45ab..b11b43171063 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -350,7 +350,7 @@ static const struct dev_pm_ops fsa9480_pm_ops = {
};
static const struct i2c_device_id fsa9480_id[] = {
- { "fsa9480", 0 },
+ { "fsa9480" },
{}
};
MODULE_DEVICE_TABLE(i2c, fsa9480_id);
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index 4616da7e5430..78ad86c4a3be 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -338,7 +338,7 @@ static const struct of_device_id ptn5150_dt_match[] = {
MODULE_DEVICE_TABLE(of, ptn5150_dt_match);
static const struct i2c_device_id ptn5150_i2c_id[] = {
- { "ptn5150", 0 },
+ { "ptn5150" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ptn5150_i2c_id);
diff --git a/drivers/extcon/extcon-rtk-type-c.c b/drivers/extcon/extcon-rtk-type-c.c
index bdc2b7b3a246..82b60b927e41 100644
--- a/drivers/extcon/extcon-rtk-type-c.c
+++ b/drivers/extcon/extcon-rtk-type-c.c
@@ -1369,6 +1369,8 @@ static int extcon_rtk_type_c_probe(struct platform_device *pdev)
}
type_c->type_c_cfg = devm_kzalloc(dev, sizeof(*type_c_cfg), GFP_KERNEL);
+ if (!type_c->type_c_cfg)
+ return -ENOMEM;
memcpy(type_c->type_c_cfg, type_c_cfg, sizeof(*type_c_cfg));
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index a99fe35f1f0d..ec3e21ad2025 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -988,7 +988,7 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen)
return 0;
}
-static int compare_configuration_rom(struct device *dev, void *data)
+static int compare_configuration_rom(struct device *dev, const void *data)
{
const struct fw_device *old = fw_device(dev);
const u32 *config_rom = data;
@@ -1039,7 +1039,7 @@ static void fw_device_init(struct work_struct *work)
//
// serialize config_rom access.
scoped_guard(rwsem_read, &fw_device_rwsem) {
- found = device_find_child(card->device, (void *)device->config_rom,
+ found = device_find_child(card->device, device->config_rom,
compare_configuration_rom);
}
if (found) {
diff --git a/drivers/firewire/device-attribute-test.c b/drivers/firewire/device-attribute-test.c
index 2f123c6b0a16..97478a96d1c9 100644
--- a/drivers/firewire/device-attribute-test.c
+++ b/drivers/firewire/device-attribute-test.c
@@ -99,6 +99,7 @@ static void device_attr_simple_avc(struct kunit *test)
struct device *unit0_dev = (struct device *)&unit0.device;
static const int unit0_expected_ids[] = {0x00ffffff, 0x00ffffff, 0x0000a02d, 0x00010001};
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
int ids[4] = {0, 0, 0, 0};
// Ensure associations for node and unit devices.
@@ -180,6 +181,7 @@ static void device_attr_legacy_avc(struct kunit *test)
struct device *unit0_dev = (struct device *)&unit0.device;
static const int unit0_expected_ids[] = {0x00012345, 0x00fedcba, 0x00abcdef, 0x00543210};
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
int ids[4] = {0, 0, 0, 0};
// Ensure associations for node and unit devices.
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index c02aed11b590..edaedd156a6d 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3301,8 +3301,7 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
}
}
-#ifdef CONFIG_PM
-static void ohci_resume_iso_dma(struct fw_ohci *ohci)
+static void __maybe_unused ohci_resume_iso_dma(struct fw_ohci *ohci)
{
int i;
struct iso_context *ctx;
@@ -3319,7 +3318,6 @@ static void ohci_resume_iso_dma(struct fw_ohci *ohci)
ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
}
}
-#endif
static int queue_iso_transmit(struct iso_context *ctx,
struct fw_iso_packet *packet,
@@ -3888,39 +3886,25 @@ static void pci_remove(struct pci_dev *dev)
dev_notice(&dev->dev, "removing fw-ohci device\n");
}
-#ifdef CONFIG_PM
-static int pci_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused pci_suspend(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
- int err;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
software_reset(ohci);
- err = pci_save_state(dev);
- if (err) {
- ohci_err(ohci, "pci_save_state failed\n");
- return err;
- }
- err = pci_set_power_state(dev, pci_choose_state(dev, state));
- if (err)
- ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
- pmac_ohci_off(dev);
+ pmac_ohci_off(pdev);
return 0;
}
-static int pci_resume(struct pci_dev *dev)
+
+static int __maybe_unused pci_resume(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
int err;
- pmac_ohci_on(dev);
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- err = pci_enable_device(dev);
- if (err) {
- ohci_err(ohci, "pci_enable_device failed\n");
- return err;
- }
+ pmac_ohci_on(pdev);
/* Some systems don't setup GUID register on resume from ram */
if (!reg_read(ohci, OHCI1394_GUIDLo) &&
@@ -3937,7 +3921,6 @@ static int pci_resume(struct pci_dev *dev)
return 0;
}
-#endif
static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
@@ -3946,15 +3929,14 @@ static const struct pci_device_id pci_table[] = {
MODULE_DEVICE_TABLE(pci, pci_table);
+static SIMPLE_DEV_PM_OPS(pci_pm_ops, pci_suspend, pci_resume);
+
static struct pci_driver fw_ohci_pci_driver = {
.name = ohci_driver_name,
.id_table = pci_table,
.probe = pci_probe,
.remove = pci_remove,
-#ifdef CONFIG_PM
- .resume = pci_resume,
- .suspend = pci_suspend,
-#endif
+ .driver.pm = &pci_pm_ops,
};
static int __init fw_ohci_init(void)
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 827dee0f57dd..1a19828114cf 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1490,7 +1490,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
return retval;
}
-static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+static int sbp2_scsi_sdev_init(struct scsi_device *sdev)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1506,8 +1506,8 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int sbp2_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int sbp2_scsi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1590,8 +1590,8 @@ static const struct scsi_host_template scsi_driver_template = {
.name = "SBP-2 IEEE-1394",
.proc_name = "sbp2",
.queuecommand = sbp2_scsi_queuecommand,
- .slave_alloc = sbp2_scsi_slave_alloc,
- .device_configure = sbp2_scsi_device_configure,
+ .sdev_init = sbp2_scsi_sdev_init,
+ .sdev_configure = sbp2_scsi_sdev_configure,
.eh_abort_handler = sbp2_scsi_abort,
.this_id = -1,
.sg_tablesize = SG_ALL,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 71d8b26c4103..9f35f69e0f9e 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -106,7 +106,7 @@ config ISCSI_IBFT
select ISCSI_BOOT_SYSFS
select ISCSI_IBFT_FIND if X86
depends on ACPI && SCSI && SCSI_LOWLEVEL
- default n
+ default n
help
This option enables support for detection and exposing of iSCSI
Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 157172a5f2b5..a3386bf36de5 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -238,10 +238,10 @@ static int scmi_dev_match(struct device *dev, const struct device_driver *drv)
return 0;
}
-static int scmi_match_by_id_table(struct device *dev, void *data)
+static int scmi_match_by_id_table(struct device *dev, const void *data)
{
struct scmi_device *sdev = to_scmi_dev(dev);
- struct scmi_device_id *id_table = data;
+ const struct scmi_device_id *id_table = data;
return sdev->protocol_id == id_table->protocol_id &&
(id_table->name && !strcmp(sdev->name, id_table->name));
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 48b12f81141d..10ea7962323e 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -442,7 +442,7 @@ struct scmi_transport_core_operations {
*/
struct scmi_transport {
struct device *supplier;
- struct scmi_desc *desc;
+ struct scmi_desc desc;
struct scmi_transport_core_operations **core_ops;
};
@@ -468,7 +468,7 @@ static int __tag##_probe(struct platform_device *pdev) \
device_set_of_node_from_dev(&spdev->dev, dev); \
\
strans.supplier = dev; \
- strans.desc = &(__desc); \
+ memcpy(&strans.desc, &(__desc), sizeof(strans.desc)); \
strans.core_ops = &(__core_ops); \
\
ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 1b5fb2c4ce86..60050da54bf2 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/ktime.h>
#include <linux/hashtable.h>
#include <linux/list.h>
@@ -43,6 +44,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
+#define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
+
static DEFINE_IDA(scmi_id);
static DEFINE_XARRAY(scmi_protocols);
@@ -276,6 +279,44 @@ scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
}
static const struct scmi_protocol *
+scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version)
+{
+ const struct scmi_protocol *proto;
+
+ proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ if (!proto) {
+ int ret;
+
+ pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n",
+ protocol_id, version->vendor_id);
+
+ /* Note that vendor_id is mandatory for vendor protocols */
+ ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT,
+ protocol_id, version->vendor_id);
+ if (ret) {
+ pr_warn("Problem loading module for protocol 0x%x\n",
+ protocol_id);
+ return NULL;
+ }
+
+ /* Lookup again, once modules loaded */
+ proto = scmi_vendor_protocol_lookup(protocol_id,
+ version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ }
+
+ if (proto)
+ pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
+ protocol_id, proto->vendor_id ?: "",
+ proto->sub_vendor_id ?: "", proto->impl_ver);
+
+ return proto;
+}
+
+static const struct scmi_protocol *
scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
{
const struct scmi_protocol *proto = NULL;
@@ -283,10 +324,8 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
proto = xa_load(&scmi_protocols, protocol_id);
else
- proto = scmi_vendor_protocol_lookup(protocol_id,
- version->vendor_id,
- version->sub_vendor_id,
- version->impl_ver);
+ proto = scmi_vendor_protocol_get(protocol_id, version);
+
if (!proto || !try_module_get(proto->owner)) {
pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
return NULL;
@@ -294,11 +333,6 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
- if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE)
- pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
- protocol_id, proto->vendor_id ?: "",
- proto->sub_vendor_id ?: "", proto->impl_ver);
-
return proto;
}
@@ -366,7 +400,9 @@ int scmi_protocol_register(const struct scmi_protocol *proto)
return ret;
}
- pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+ pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n",
+ proto->id, proto->vendor_id, proto->sub_vendor_id,
+ proto->impl_ver);
return 0;
}
@@ -3028,7 +3064,7 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
int ret;
trans = dev_get_platdata(dev);
- if (!trans || !trans->desc || !trans->supplier || !trans->core_ops)
+ if (!trans || !trans->supplier || !trans->core_ops)
return NULL;
if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
@@ -3043,33 +3079,33 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
- &trans->desc->max_rx_timeout_ms);
+ &trans->desc.max_rx_timeout_ms);
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
- &trans->desc->max_msg_size);
+ &trans->desc.max_msg_size);
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
ret = of_property_read_u32(dev->of_node, "arm,max-msg",
- &trans->desc->max_msg);
+ &trans->desc.max_msg);
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-msg DT property.\n");
dev_info(dev,
"SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
- trans->desc->max_rx_timeout_ms, trans->desc->max_msg_size,
- trans->desc->max_msg);
+ trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
+ trans->desc.max_msg);
/* System wide atomic threshold for atomic ops .. if any */
if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
- &trans->desc->atomic_threshold))
+ &trans->desc.atomic_threshold))
dev_info(dev,
"SCMI System wide atomic threshold set to %u us\n",
- trans->desc->atomic_threshold);
+ trans->desc.atomic_threshold);
- return trans->desc;
+ return &trans->desc;
}
static int scmi_probe(struct platform_device *pdev)
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 9e89a6a763da..7cc0d616b8de 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -886,10 +886,8 @@ static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
{
- u8 id;
struct scmi_raw_mode_info *raw;
struct scmi_dbg_raw_data *rd;
- const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
if (!inode->i_private)
return -ENODEV;
@@ -915,8 +913,8 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
}
/* Grab channel ID from debugfs entry naming if any */
- if (!kstrtou8(id_str, 16, &id))
- rd->chan_id = id;
+ /* not set - reassing 0 we already had after kzalloc() */
+ rd->chan_id = debugfs_get_aux_num(filp);
rd->raw = raw;
filp->private_data = rd;
@@ -1225,10 +1223,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
snprintf(cdir, 8, "0x%02X", channels[i]);
chd = debugfs_create_dir(cdir, top_chans);
- debugfs_create_file("message", 0600, chd, raw,
+ debugfs_create_file_aux_num("message", 0600, chd,
+ raw, channels[i],
&scmi_dbg_raw_mode_message_fops);
- debugfs_create_file("message_async", 0600, chd, raw,
+ debugfs_create_file_aux_num("message_async", 0600, chd,
+ raw, channels[i],
&scmi_dbg_raw_mode_message_async_fops);
}
}
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index b66df2981456..bd041c99b92b 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -378,6 +378,7 @@ static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver,
scmi_mailbox_desc, scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/smc.c b/drivers/firmware/arm_scmi/transports/smc.c
index f632a62cfb3e..21abb571e4f2 100644
--- a/drivers/firmware/arm_scmi/transports/smc.c
+++ b/drivers/firmware/arm_scmi/transports/smc.c
@@ -301,6 +301,7 @@ static const struct of_device_id scmi_of_match[] = {
{ .compatible = "qcom,scmi-smc" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_smc, scmi_smc_driver, scmi_smc_desc,
scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index 41aea33776a9..cb934db9b2b4 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -921,6 +921,7 @@ static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
{ 0 }
};
+MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_scmi_driver = {
.driver.name = "scmi-virtio",
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
index 17799eacf06c..aa176c1a5eef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
@@ -374,10 +374,11 @@ static const struct scmi_protocol scmi_imx_bbm = {
.ops = &scmi_imx_bbm_proto_ops,
.events = &scmi_imx_bbm_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_bbm);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_BBM) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI BBM driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index a86ab9b35953..a8915d3b4df5 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -254,8 +254,8 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
if (num > max_num)
return -EINVAL;
- ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in),
- 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET,
+ sizeof(*in) + num * sizeof(__le32), 0, &t);
if (ret)
return ret;
@@ -309,10 +309,11 @@ static const struct scmi_protocol scmi_imx_misc = {
.ops = &scmi_imx_misc_proto_ops,
.events = &scmi_imx_misc_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_misc);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_MISC) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI MISC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
index 3ccbe14e4b0c..0a883091259a 100644
--- a/drivers/firmware/cirrus/Kconfig
+++ b/drivers/firmware/cirrus/Kconfig
@@ -3,3 +3,21 @@
config FW_CS_DSP
tristate
default n
+
+config FW_CS_DSP_KUNIT_TEST_UTILS
+ tristate
+ depends on KUNIT && REGMAP
+ select FW_CS_DSP
+
+config FW_CS_DSP_KUNIT_TEST
+ tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS
+ depends on KUNIT && REGMAP
+ default KUNIT_ALL_TESTS
+ select FW_CS_DSP
+ select FW_CS_DSP_KUNIT_TEST_UTILS
+ help
+ This builds KUnit tests for cs_dsp.
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+ If in doubt, say "N".
diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile
index b91318ca0ff4..b32dfa869491 100644
--- a/drivers/firmware/cirrus/Makefile
+++ b/drivers/firmware/cirrus/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
#
obj-$(CONFIG_FW_CS_DSP) += cs_dsp.o
+
+obj-y += test/
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 5365e9a43000..42433c19eb30 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
goto out_fw;
}
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(region->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(region->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write %d bytes at %d in %s: %d\n",
@@ -1625,12 +1625,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
regions++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0) {
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
- goto out_fw;
- }
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, regions, pos - firmware->size);
@@ -1638,7 +1632,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
cs_dsp_debugfs_save_wmfwname(dsp, file);
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -2326,8 +2319,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
file, blocks, le32_to_cpu(blk->len),
reg);
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(blk->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(blk->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write to %x in %s: %d\n",
@@ -2339,10 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
blocks++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0)
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, blocks, pos - firmware->size);
@@ -2350,7 +2339,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_debugfs_save_binname(dsp, file);
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -2561,8 +2549,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
{
int ret;
- ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
- ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
if (ret != 0)
return ret;
diff --git a/drivers/firmware/cirrus/test/Makefile b/drivers/firmware/cirrus/test/Makefile
new file mode 100644
index 000000000000..7a24a6079ddc
--- /dev/null
+++ b/drivers/firmware/cirrus/test/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+
+cs_dsp_test_utils-objs := \
+ cs_dsp_mock_mem_maps.o \
+ cs_dsp_mock_bin.o \
+ cs_dsp_mock_regmap.o \
+ cs_dsp_mock_utils.o \
+ cs_dsp_mock_wmfw.o
+
+cs_dsp_test-objs := \
+ cs_dsp_test_bin.o \
+ cs_dsp_test_bin_error.o \
+ cs_dsp_test_callbacks.o \
+ cs_dsp_test_control_parse.o \
+ cs_dsp_test_control_cache.o \
+ cs_dsp_test_control_rw.o \
+ cs_dsp_test_wmfw.o \
+ cs_dsp_test_wmfw_error.o \
+ cs_dsp_tests.o
+
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST_UTILS) += cs_dsp_test_utils.o
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST) += cs_dsp_test.o
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
new file mode 100644
index 000000000000..49d84f7e59e6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// bin file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_BIN_BUF_SIZE 32768
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+struct cs_dsp_mock_bin_builder {
+ struct cs_dsp_test *test_priv;
+ void *buf;
+ void *write_p;
+ size_t bytes_used;
+};
+
+/**
+ * cs_dsp_mock_bin_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder)
+{
+ struct firmware *fw;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_raw_block() - Add a data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID.
+ * @alg_ver: Algorithm version.
+ * @type: Type of the block.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_coeff_item *item;
+ size_t bytes_needed = struct_size_t(struct wmfw_coeff_item, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_BIN_BUF_SIZE));
+
+ item = builder->write_p;
+
+ item->offset = cpu_to_le16(offset);
+ item->type = cpu_to_le16(type);
+ item->id = cpu_to_le32(alg_id);
+ item->ver = cpu_to_le32(alg_ver << 8);
+ item->len = cpu_to_le32(payload_len_bytes);
+
+ if (payload_len_bytes)
+ memcpy(item->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info, int type)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_bin_add_raw_block(builder, 0, 0, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+
+/**
+ * cs_dsp_mock_bin_add_info() - Add an info block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, info, WMFW_INFO_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_name() - Add a name block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @name: Pointer to name string to be copied into the file.
+ */
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, name, WMFW_NAME_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_name, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_patch() - Add a patch data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID for the patch.
+ * @alg_ver: Algorithm version for the patch.
+ * @mem_region: Memory region for the patch.
+ * @reg_addr_offset: Offset to start of data in register addresses.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_bin_add_raw_block(builder, alg_id, alg_ver,
+ mem_region, reg_addr_offset,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_patch, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_init() - Initialize a struct cs_dsp_mock_bin_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required bin format version.
+ * @fw_version: Firmware version to put in bin file.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_bin_builder.
+ */
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version)
+{
+ struct cs_dsp_mock_bin_builder *builder;
+ struct wmfw_coeff_hdr *hdr;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_BIN_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ /* Create header */
+ hdr = builder->buf;
+ memcpy(hdr->magic, "WMDR", sizeof(hdr->magic));
+ hdr->len = cpu_to_le32(offsetof(struct wmfw_coeff_hdr, data));
+ hdr->ver = cpu_to_le32(fw_version | (format_version << 24));
+ hdr->core_ver = cpu_to_le32(((u32)priv->dsp->type << 24) | priv->dsp->rev);
+
+ builder->write_p = hdr->data;
+ builder->bytes_used = offsetof(struct wmfw_coeff_hdr, data);
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
new file mode 100644
index 000000000000..161272e47bda
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock DSP memory maps for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/math.h>
+
+const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[] = {
+ { .type = WMFW_HALO_PM_PACKED, .base = 0x3800000 },
+ { .type = WMFW_HALO_XM_PACKED, .base = 0x2000000 },
+ { .type = WMFW_HALO_YM_PACKED, .base = 0x2C00000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x2800000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x3400000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[] = {
+ 0x5000, /* PM_PACKED */
+ 0x6000, /* XM_PACKED */
+ 0x47F4, /* YM_PACKED */
+ 0x8000, /* XM_UNPACKED_24 */
+ 0x5FF8, /* YM_UNPACKED_24 */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x080000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x0a0000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x0c0000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x0e0000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[] = {
+ 0x9000, /* PM */
+ 0xa000, /* ZM */
+ 0x2000, /* XM */
+ 0x2000, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x100000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x180000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x190000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x1a8000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[] = {
+ 0x6000, /* PM */
+ 0x800, /* ZM */
+ 0x800, /* XM */
+ 0x800, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes)
+{
+ int i;
+
+ for (i = 0; region_sizes[i]; ++i)
+ ;
+
+ return i;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_count_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_size_of_region() - Return size of given memory region.
+ *
+ * @dsp: Pointer to struct cs_dsp.
+ * @mem_type: Memory region type.
+ *
+ * Return: Size of region in bytes.
+ */
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type)
+{
+ const unsigned int *sizes;
+ int i;
+
+ if (dsp->mem == cs_dsp_mock_halo_dsp1_regions)
+ sizes = cs_dsp_mock_halo_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_32bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_32bit_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_16bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_16bit_dsp1_region_sizes;
+ else
+ return 0;
+
+ for (i = 0; i < dsp->num_mems; ++i) {
+ if (dsp->mem[i].type == mem_type)
+ return sizes[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_size_of_region, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_base_addr_for_mem() - Base register address for memory region.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Base register address of region.
+ */
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type)
+{
+ int num_mems = priv->dsp->num_mems;
+ const struct cs_dsp_region *region = priv->dsp->mem;
+ int i;
+
+ for (i = 0; i < num_mems; ++i) {
+ if (region[i].type == mem_type)
+ return region[i].base;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected region %d\n", mem_type);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_base_addr_for_mem, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_addr_inc_per_unpacked_word() - Unpacked register address increment per DSP word.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Amount by which register address increments to move to the next
+ * DSP word in unpacked XM/YM/ZM.
+ */
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return 2; /* two 16-bit register indexes per XM/YM/ZM word */
+ case WMFW_HALO:
+ return 4; /* one byte-addressed 32-bit register per XM/YM/ZM word */
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_addr_inc_per_unpacked_word, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_bytes() - Number of bytes in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of bytes in a group of registers forming the
+ * smallest bus access size (including any padding bits). For unpacked
+ * memory this is the number of registers containing one DSP word.
+ * For packed memory this is the number of registers in one packed
+ * access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return 3 * regmap_get_val_bytes(priv->dsp->regmap);
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return sizeof(u32);
+ case WMFW_HALO_PM_PACKED:
+ return 5 * sizeof(u32);
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 3 * sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_registers() - Number of registers in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of register forming the smallest bus access size.
+ * For unpacked memory this is the number of registers containing one
+ * DSP word. For packed memory this is the number of registers in one
+ * packed access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type)
+{
+ return cs_dsp_mock_reg_block_length_bytes(priv, mem_type) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_registers, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_dsp_words() - Number of dsp_words in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of DSP words in a group of registers forming the
+ * smallest bus access size.
+ */
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return regmap_get_val_bytes(priv->dsp->regmap) / 2;
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return 1;
+ case WMFW_HALO_PM_PACKED:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 4;
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_dsp_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_has_zm() - DSP has ZM
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: True if DSP has ZM.
+ */
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_has_zm, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_packed_to_unpacked_mem_type() - Unpacked region that is
+ * the same memory as a packed region.
+ *
+ * @packed_mem_type: Type of packed memory region.
+ *
+ * Return: unpacked type that is the same memory as packed_mem_type.
+ */
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type)
+{
+ switch (packed_mem_type) {
+ case WMFW_HALO_XM_PACKED:
+ return WMFW_ADSP2_XM;
+ case WMFW_HALO_YM_PACKED:
+ return WMFW_ADSP2_YM;
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_packed_to_unpacked_mem_type, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_num_dsp_words_to_num_packed_regs() - Number of DSP words
+ * to number of packed registers.
+ *
+ * @num_dsp_words: Number of DSP words.
+ *
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ *
+ * Return: Number of packed registers.
+ */
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ /* There are 3 registers for every 4 packed words */
+ return (num_dsp_words * 3) / 4;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_num_dsp_words_to_num_packed_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct wmfw_halo_id_hdr cs_dsp_mock_halo_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_HALO << 16),
+ .block_rev = cpu_to_be32(3 << 16),
+ .vendor_id = cpu_to_be32(0x2),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm_base = cpu_to_be32(((sizeof(struct wmfw_halo_id_hdr) +
+ (40 * sizeof(struct wmfw_halo_alg_hdr)))
+ / 4) * 3),
+ .xm_size = cpu_to_be32(0x20),
+
+ /* Allocate a dummy word of YM */
+ .ym_base = cpu_to_be32(0),
+ .ym_size = cpu_to_be32(1),
+
+ .n_algs = 0,
+};
+
+static const struct wmfw_adsp2_id_hdr cs_dsp_mock_adsp2_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_ADSP2 << 16),
+ .core_rev = cpu_to_be32(2 << 16),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm = cpu_to_be32(((sizeof(struct wmfw_adsp2_id_hdr) +
+ (40 * sizeof(struct wmfw_adsp2_alg_hdr)))
+ / 4) * 3),
+
+ .ym = cpu_to_be32(0),
+ .zm = cpu_to_be32(0),
+
+ .n_algs = 0,
+};
+
+/**
+ * cs_dsp_mock_xm_header_get_alg_base_in_words() - Algorithm base offset in DSP words.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @alg_id: Algorithm ID.
+ * @mem_type: Memory region type.
+ *
+ * Lookup an algorithm in the XM header and return the base offset in
+ * DSP words of the algorithm data in the requested memory region.
+ *
+ * Return: Offset in DSP words.
+ */
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_adsp2_alg_hdr adsp2;
+ struct wmfw_halo_alg_hdr halo;
+ } alg;
+ unsigned int alg_hdr_addr;
+ unsigned int val, xm_base = 0, ym_base = 0, zm_base = 0;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ alg_hdr_addr = xm + (sizeof(struct wmfw_adsp2_id_hdr) / 2);
+ for (;; alg_hdr_addr += sizeof(alg.adsp2) / 2) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.adsp2, sizeof(alg.adsp2));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.adsp2.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.adsp2.xm);
+ ym_base = be32_to_cpu(alg.adsp2.ym);
+ zm_base = be32_to_cpu(alg.adsp2.zm);
+ break;
+ }
+ }
+ break;
+ case WMFW_HALO:
+ alg_hdr_addr = xm + sizeof(struct wmfw_halo_id_hdr);
+ for (;; alg_hdr_addr += sizeof(alg.halo)) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.halo, sizeof(alg.halo));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.halo.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.halo.xm_base);
+ ym_base = be32_to_cpu(alg.halo.ym_base);
+ break;
+ }
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type %d\n", priv->dsp->type);
+ return 0;
+ }
+
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_HALO_XM_PACKED:
+ return xm_base;
+ case WMFW_ADSP2_YM:
+ case WMFW_HALO_YM_PACKED:
+ return ym_base;
+ case WMFW_ADSP2_ZM:
+ return zm_base;
+ default:
+ KUNIT_FAIL(priv->test, "Bad mem_type\n");
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_id_hdr adsp2;
+ struct wmfw_v3_id_hdr halo;
+ } hdr;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2));
+ return be32_to_cpu(hdr.adsp2.ver);
+ case WMFW_HALO:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo));
+ return be32_to_cpu(hdr.halo.ver);
+ default:
+ KUNIT_FAIL(priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap,
+ "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header)
+{
+ const struct wmfw_id_hdr *adsp2_hdr;
+ const struct wmfw_v3_id_hdr *halo_hdr;
+
+ switch (header->test_priv->dsp->type) {
+ case WMFW_ADSP2:
+ adsp2_hdr = header->blob_data;
+ return be32_to_cpu(adsp2_hdr->ver);
+ case WMFW_HALO:
+ halo_hdr = header->blob_data;
+ return be32_to_cpu(halo_hdr->ver);
+ default:
+ KUNIT_FAIL(header->test_priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_drop_from_regmap_cache() - Drop XM header from regmap cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ */
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ unsigned int bytes;
+ __be32 num_algs_be32;
+ unsigned int num_algs;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ /*
+ * Could be one 32-bit register or two 16-bit registers.
+ * A raw read will read the requested number of bytes.
+ */
+ regmap_raw_read(priv->dsp->regmap,
+ xm + (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
+ &num_algs_be32, sizeof(num_algs_be32));
+ num_algs = be32_to_cpu(num_algs_be32);
+ bytes = sizeof(struct wmfw_adsp2_id_hdr) +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1);
+ break;
+ case WMFW_HALO:
+ regmap_read(priv->dsp->regmap,
+ xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
+ &num_algs);
+ bytes = sizeof(struct wmfw_halo_id_hdr) +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + bytes - 4);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_drop_from_regmap_cache, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_xm_header_add_adsp2_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_adsp2_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word, next_free_zm_word;
+
+ next_free_xm_word = be32_to_cpu(hdr->xm);
+ next_free_ym_word = be32_to_cpu(hdr->ym);
+ next_free_zm_word = be32_to_cpu(hdr->zm);
+
+ /* Set num_algs in XM header. */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_adsp2_alg_hdr *alg_info =
+ (struct wmfw_adsp2_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last, alg_zm_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm = cpu_to_be32(algs->xm_base_words);
+ alg_info->ym = cpu_to_be32(algs->ym_base_words);
+ alg_info->zm = cpu_to_be32(algs->zm_base_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm && algs->xm_size_words)
+ alg_info->xm = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym && algs->ym_size_words)
+ alg_info->ym = cpu_to_be32(next_free_ym_word);
+
+ if (!alg_info->zm && algs->zm_size_words)
+ alg_info->zm = cpu_to_be32(next_free_zm_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm) + algs->xm_size_words - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym) + algs->ym_size_words - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+
+ alg_zm_last = be32_to_cpu(alg_info->zm) + algs->zm_size_words - 1;
+ if (alg_zm_last > next_free_zm_word)
+ next_free_zm_word = alg_zm_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+static void cs_dsp_mock_xm_header_add_halo_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_halo_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word;
+
+ /* Assume we're starting with bare header */
+ next_free_xm_word = be32_to_cpu(hdr->xm_base) + be32_to_cpu(hdr->xm_size) - 1;
+ next_free_ym_word = be32_to_cpu(hdr->ym_base) + be32_to_cpu(hdr->ym_size) - 1;
+
+ /* Set num_algs in XM header */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_halo_alg_hdr *alg_info =
+ (struct wmfw_halo_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm_base = cpu_to_be32(algs->xm_base_words);
+ alg_info->xm_size = cpu_to_be32(algs->xm_size_words);
+ alg_info->ym_base = cpu_to_be32(algs->ym_base_words);
+ alg_info->ym_size = cpu_to_be32(algs->ym_size_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm_base && alg_info->xm_size)
+ alg_info->xm_base = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym_base && alg_info->ym_size)
+ alg_info->ym_base = cpu_to_be32(next_free_ym_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm_base) + be32_to_cpu(alg_info->xm_size) - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym_base) + be32_to_cpu(alg_info->ym_size) - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+/**
+ * cs_dsp_mock_xm_header_write_to_regmap() - Write XM header to regmap.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * The data in header is written to the XM addresses in the regmap.
+ *
+ * Return: 0 on success, else negative error code.
+ */
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header)
+{
+ struct cs_dsp_test *priv = header->test_priv;
+ unsigned int reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+
+ /*
+ * One 32-bit word corresponds to one 32-bit unpacked XM word so the
+ * blob can be written directly to the regmap.
+ */
+ return regmap_raw_write(priv->dsp->regmap, reg_addr,
+ header->blob_data, header->blob_size_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_write_to_regmap, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_create_mock_xm_header() - Create a dummy XM header.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @algs: Pointer to array of struct cs_dsp_mock_alg_def listing the
+ * dummy algorithm entries to include in the XM header.
+ * @num_algs: Number of entries in the algs array.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_xm_header.
+ */
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct cs_dsp_mock_xm_header *builder;
+ size_t total_bytes_required;
+ const void *header;
+ size_t header_size_bytes;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ header = &cs_dsp_mock_adsp2_xm_hdr;
+ header_size_bytes = sizeof(cs_dsp_mock_adsp2_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ case WMFW_HALO:
+ header = &cs_dsp_mock_halo_xm_hdr,
+ header_size_bytes = sizeof(cs_dsp_mock_halo_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "%s unexpected DSP type %d\n",
+ __func__, priv->dsp->type);
+ return NULL;
+ }
+
+ builder->blob_data = kunit_kzalloc(priv->test, total_bytes_required, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder->blob_data);
+ builder->blob_size_bytes = total_bytes_required;
+
+ memcpy(builder->blob_data, header, header_size_bytes);
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ cs_dsp_mock_xm_header_add_adsp2_algs(builder, algs, num_algs);
+ break;
+ case WMFW_HALO:
+ cs_dsp_mock_xm_header_add_halo_algs(builder, algs, num_algs);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_create_mock_xm_header, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
new file mode 100644
index 000000000000..fb8e4a5d189a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock regmap for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/regmap.h>
+
+static int cs_dsp_mock_regmap_read(void *context, const void *reg_buf,
+ const size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus read @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_gather_write(void *context,
+ const void *reg_buf, size_t reg_size,
+ const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus gather_write @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_write(void *context, const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus write @%#x", *(u32 *)val_buf);
+
+ return -EIO;
+}
+
+static const struct regmap_bus cs_dsp_mock_regmap_bus = {
+ .read = cs_dsp_mock_regmap_read,
+ .write = cs_dsp_mock_regmap_write,
+ .gather_write = cs_dsp_mock_regmap_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct reg_default adsp2_32bit_register_defaults[] = {
+ { 0xffe00, 0x0000 }, /* CONTROL */
+ { 0xffe02, 0x0000 }, /* CLOCKING */
+ { 0xffe04, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0xffe30, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0xffe32, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0xffe34, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0xffe40, 0x0000 }, /* SCRATCH_0_1 */
+ { 0xffe42, 0x0000 }, /* SCRATCH_2_3 */
+};
+
+static const struct regmap_range adsp2_32bit_registers[] = {
+ regmap_reg_range(0x80000, 0x88ffe), /* PM */
+ regmap_reg_range(0xa0000, 0xa9ffe), /* XM */
+ regmap_reg_range(0xc0000, 0xc1ffe), /* YM */
+ regmap_reg_range(0xe0000, 0xe1ffe), /* ZM */
+ regmap_reg_range(0xffe00, 0xffe7c), /* CORE CTRL */
+};
+
+const unsigned int cs_dsp_mock_adsp2_32bit_sysbase = 0xffe00;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_32bit_rw = {
+ .yes_ranges = adsp2_32bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_32bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_32bit = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 2,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_32bit_rw,
+ .rd_table = &adsp2_32bit_rw,
+ .max_register = 0xffe7c,
+ .reg_defaults = adsp2_32bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_32bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default adsp2_16bit_register_defaults[] = {
+ { 0x1100, 0x0000 }, /* CONTROL */
+ { 0x1101, 0x0000 }, /* CLOCKING */
+ { 0x1104, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0x1130, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0x1131, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0x1134, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0x1140, 0x0000 }, /* SCRATCH_0 */
+ { 0x1141, 0x0000 }, /* SCRATCH_1 */
+ { 0x1142, 0x0000 }, /* SCRATCH_2 */
+ { 0x1143, 0x0000 }, /* SCRATCH_3 */
+};
+
+static const struct regmap_range adsp2_16bit_registers[] = {
+ regmap_reg_range(0x001100, 0x001143), /* CORE CTRL */
+ regmap_reg_range(0x100000, 0x105fff), /* PM */
+ regmap_reg_range(0x180000, 0x1807ff), /* ZM */
+ regmap_reg_range(0x190000, 0x1947ff), /* XM */
+ regmap_reg_range(0x1a8000, 0x1a97ff), /* YM */
+};
+
+const unsigned int cs_dsp_mock_adsp2_16bit_sysbase = 0x001100;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_16bit_rw = {
+ .yes_ranges = adsp2_16bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_16bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_16bit = {
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_stride = 1,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_16bit_rw,
+ .rd_table = &adsp2_16bit_rw,
+ .max_register = 0x1a97ff,
+ .reg_defaults = adsp2_16bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_16bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default halo_register_defaults[] = {
+ /* CORE */
+ { 0x2b80010, 0 }, /* HALO_CORE_SOFT_RESET */
+ { 0x2b805c0, 0 }, /* HALO_SCRATCH1 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH2 */
+ { 0x2b805d0, 0 }, /* HALO_SCRATCH3 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH4 */
+ { 0x2bc1000, 0 }, /* HALO_CCM_CORE_CONTROL */
+ { 0x2bc7000, 0 }, /* HALO_WDT_CONTROL */
+
+ /* SYSINFO */
+ { 0x25e2040, 0 }, /* HALO_AHBM_WINDOW_DEBUG_0 */
+ { 0x25e2044, 0 }, /* HALO_AHBM_WINDOW_DEBUG_1 */
+};
+
+static const struct regmap_range halo_readable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x25e0000, 0x25e004f), /* SYSINFO */
+ regmap_reg_range(0x25e2000, 0x25e2047), /* SYSINFO */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+static const struct regmap_range halo_writeable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+const unsigned int cs_dsp_mock_halo_core_base = 0x2b80000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_core_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const unsigned int cs_dsp_mock_halo_sysinfo_base = 0x25e0000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_sysinfo_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table halo_readable = {
+ .yes_ranges = halo_readable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_readable_registers),
+};
+
+static const struct regmap_access_table halo_writeable = {
+ .yes_ranges = halo_writeable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_writeable_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_halo = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &halo_writeable,
+ .rd_table = &halo_readable,
+ .max_register = 0x3804ffc,
+ .reg_defaults = halo_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(halo_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+/**
+ * cs_dsp_mock_regmap_drop_range() - drop a range of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @last_reg: Address of last register to drop.
+ */
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg)
+{
+ regcache_drop_region(priv->dsp->regmap, first_reg, last_reg);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_range, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_regs() - drop a number of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_regs: Number of registers to drop.
+ */
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs)
+{
+ int stride = regmap_get_reg_stride(priv->dsp->regmap);
+ unsigned int last = first_reg + (stride * (num_regs - 1));
+
+ cs_dsp_mock_regmap_drop_range(priv, first_reg, last);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_bytes() - drop a number of bytes from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_bytes: Number of bytes to drop from the cache. Will be rounded
+ * down to a whole number of registers. Trailing bytes that
+ * are not a multiple of the register size will not be dropped.
+ * (This is intended to help detect math errors in test code.)
+ */
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes)
+{
+ size_t num_regs = num_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+
+ cs_dsp_mock_regmap_drop_regs(priv, first_reg, num_regs);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_system_regs() - Drop DSP system registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ *
+ * Drops all DSP system registers from the regmap cache.
+ */
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x7c);
+ }
+ return;
+ case WMFW_HALO:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x47000);
+ }
+
+ /* sysinfo registers are read-only so don't drop them */
+ return;
+ default:
+ return;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_system_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_is_dirty() - Test for dirty registers in the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @drop_system_regs: If true the DSP system regs will be dropped from
+ * the cache before checking for dirty.
+ *
+ * All registers that are expected to be written must have been dropped
+ * from the cache (DSP system registers can be dropped by passing
+ * drop_system_regs == true). If any unexpected registers were written
+ * there will still be dirty entries in the cache and a cache sync will
+ * cause a write.
+ *
+ * Returns: true if there were dirty entries, false if not.
+ */
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs)
+{
+ if (drop_system_regs)
+ cs_dsp_mock_regmap_drop_system_regs(priv);
+
+ priv->saw_bus_write = false;
+ regcache_cache_only(priv->dsp->regmap, false);
+ regcache_sync(priv->dsp->regmap);
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return priv->saw_bus_write;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_is_dirty, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_init() - Initialize a mock regmap.
+ *
+ * @priv: Pointer to struct cs_dsp_test object. This must have a
+ * valid pointer to a struct cs_dsp in which the type and
+ * rev fields are set to the type of DSP to be simulated.
+ *
+ * On success the priv->dsp->regmap will point to the created
+ * regmap instance.
+ *
+ * Return: zero on success, else negative error code.
+ */
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv)
+{
+ const struct regmap_config *config;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_HALO:
+ config = &cs_dsp_mock_regmap_halo;
+ break;
+ case WMFW_ADSP2:
+ if (priv->dsp->rev == 0)
+ config = &cs_dsp_mock_regmap_adsp2_16bit;
+ else
+ config = &cs_dsp_mock_regmap_adsp2_32bit;
+ break;
+ default:
+ config = NULL;
+ break;
+ }
+
+ priv->dsp->regmap = devm_regmap_init(priv->dsp->dev,
+ &cs_dsp_mock_regmap_bus,
+ priv,
+ config);
+ if (IS_ERR(priv->dsp->regmap)) {
+ ret = PTR_ERR(priv->dsp->regmap);
+ kunit_err(priv->test, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ /* Put regmap in cache-only so it accumulates the writes done by cs_dsp */
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
new file mode 100644
index 000000000000..cbd0bf72b7de
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("Utilities for Cirrus Logic DSP driver testing");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
new file mode 100644
index 000000000000..5a3ac03ac37f
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// wmfw file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_WMFW_BUF_SIZE 131072
+
+struct cs_dsp_mock_wmfw_builder {
+ struct cs_dsp_test *test_priv;
+ int format_version;
+ void *buf;
+ size_t buf_size_bytes;
+ void *write_p;
+ size_t bytes_used;
+
+ void *alg_data_header;
+ unsigned int num_coeffs;
+};
+
+struct wmfw_adsp2_halo_header {
+ struct wmfw_header header;
+ struct wmfw_adsp2_sizes sizes;
+ struct wmfw_footer footer;
+} __packed;
+
+struct wmfw_long_string {
+ __le16 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+struct wmfw_short_string {
+ u8 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+/**
+ * cs_dsp_mock_wmfw_format_version() - Return format version.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Format version.
+ */
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ return builder->format_version;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_format_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct firmware *fw;
+
+ if (!builder)
+ return NULL;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_raw_block() - Add a block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @block_type: Block type.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data,
+ * or NULL if no data.
+ * @payload_len_bytes: Length of payload data in bytes, or zero.
+ */
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int block_type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_region *header = builder->write_p;
+ unsigned int bytes_needed = struct_size_t(struct wmfw_region, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ header->offset = cpu_to_le32(offset | (block_type << 24));
+ header->len = cpu_to_le32(payload_len_bytes);
+ if (payload_len_bytes > 0)
+ memcpy(header->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_info() - Add an info block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_data_block() - Add a data block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @mem_region: Memory region for the block.
+ * @mem_offset_dsp_words: Offset to start of destination in DSP words.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Blob payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, mem_region, mem_offset_dsp_words,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_data_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description)
+{
+ struct wmfw_region *rgn = builder->write_p;
+ struct wmfw_adsp_alg_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, name_len, description_len;
+ int offset;
+
+ /* Bytes needed for region header */
+ bytes_needed = offsetof(struct wmfw_region, data);
+
+ builder->alg_data_header = builder->write_p;
+ builder->num_coeffs = 0;
+
+ switch (builder->format_version) {
+ case 0:
+ KUNIT_FAIL(builder->test_priv->test, "wmfwV0 does not have alg blocks\n");
+ return;
+ case 1:
+ bytes_needed += offsetof(struct wmfw_adsp_alg_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->id = cpu_to_le32(alg_id);
+ if (name)
+ strscpy(v1->name, name, sizeof(v1->name));
+
+ if (description)
+ strscpy(v1->descr, description, sizeof(v1->descr));
+ break;
+ default:
+ name_len = 0;
+ description_len = 0;
+
+ if (name)
+ name_len = strlen(name);
+
+ if (description)
+ description_len = strlen(description);
+
+ bytes_needed += sizeof(__le32); /* alg id */
+ bytes_needed += round_up(name_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32); /* coeff count */
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ *(__force __le32 *)&rgn->data[0] = cpu_to_le32(alg_id);
+
+ shortstring = (struct wmfw_short_string *)&rgn->data[4];
+ shortstring->len = name_len;
+
+ if (name_len)
+ memcpy(shortstring->data, name, name_len);
+
+ /* Round up to next __le32 */
+ offset = round_up(4 + struct_size_t(struct wmfw_short_string, data, name_len),
+ sizeof(__le32));
+
+ longstring = (struct wmfw_long_string *)&rgn->data[offset];
+ longstring->len = cpu_to_le16(description_len);
+
+ if (description_len)
+ memcpy(longstring->data, description, description_len);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_start_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def)
+{
+ struct wmfw_adsp_coeff_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, shortname_len, fullname_len, description_len;
+ __le32 *ple32;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, builder->alg_data_header);
+
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ bytes_needed = offsetof(struct wmfw_adsp_coeff_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ v1 = (struct wmfw_adsp_coeff_data *)builder->write_p;
+ memset(v1, 0, sizeof(*v1));
+ v1->hdr.offset = cpu_to_le16(def->offset_dsp_words);
+ v1->hdr.type = cpu_to_le16(def->mem_type);
+ v1->hdr.size = cpu_to_le32(bytes_needed - sizeof(v1->hdr));
+ v1->ctl_type = cpu_to_le16(def->type);
+ v1->flags = cpu_to_le16(def->flags);
+ v1->len = cpu_to_le32(def->length_bytes);
+
+ if (def->fullname)
+ strscpy(v1->name, def->fullname, sizeof(v1->name));
+
+ if (def->description)
+ strscpy(v1->descr, def->description, sizeof(v1->descr));
+ break;
+ default:
+ fullname_len = 0;
+ description_len = 0;
+ shortname_len = strlen(def->shortname);
+
+ if (def->fullname)
+ fullname_len = strlen(def->fullname);
+
+ if (def->description)
+ description_len = strlen(def->description);
+
+ bytes_needed = sizeof(__le32) * 2; /* type, offset and size */
+ bytes_needed += round_up(shortname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(fullname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32) * 2; /* flags, type and length */
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ ple32 = (__force __le32 *)builder->write_p;
+ *ple32++ = cpu_to_le32(def->offset_dsp_words | (def->mem_type << 16));
+ *ple32++ = cpu_to_le32(bytes_needed - sizeof(__le32) - sizeof(__le32));
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = shortname_len;
+ memcpy(shortstring->data, def->shortname, shortname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, shortname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = fullname_len;
+ memcpy(shortstring->data, def->fullname, fullname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, fullname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ longstring = (__force struct wmfw_long_string *)ple32;
+ longstring->len = cpu_to_le16(description_len);
+ memcpy(longstring->data, def->description, description_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_long_string, data, description_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ *ple32++ = cpu_to_le32(def->type | (def->flags << 16));
+ *ple32 = cpu_to_le32(def->length_bytes);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+ builder->num_coeffs++;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_coeff_desc, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_region *rgn = builder->alg_data_header;
+ struct wmfw_adsp_alg_data *v1;
+ const struct wmfw_short_string *shortstring;
+ const struct wmfw_long_string *longstring;
+ size_t offset;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, rgn);
+
+ /* Fill in data size */
+ rgn->len = cpu_to_le32((u8 *)builder->write_p - (u8 *)rgn->data);
+
+ /* Fill in coefficient count */
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->ncoeff = cpu_to_le32(builder->num_coeffs);
+ break;
+ default:
+ offset = 4; /* skip alg id */
+
+ /* Get name length and round up to __le32 multiple */
+ shortstring = (const struct wmfw_short_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_short_string, data, shortstring->len),
+ sizeof(__le32));
+
+ /* Get description length and round up to __le32 multiple */
+ longstring = (const struct wmfw_long_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_long_string, data,
+ le16_to_cpu(longstring->len)),
+ sizeof(__le32));
+
+ *(__force __le32 *)&rgn->data[offset] = cpu_to_le32(builder->num_coeffs);
+ break;
+ }
+
+ builder->alg_data_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_end_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_init_adsp2_halo_wmfw(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_adsp2_halo_header *hdr = builder->buf;
+ const struct cs_dsp *dsp = builder->test_priv->dsp;
+
+ memcpy(hdr->header.magic, "WMFW", sizeof(hdr->header.magic));
+ hdr->header.len = cpu_to_le32(sizeof(*hdr));
+ hdr->header.ver = builder->format_version;
+ hdr->header.core = dsp->type;
+ hdr->header.rev = cpu_to_le16(dsp->rev);
+
+ hdr->sizes.pm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_PM));
+ hdr->sizes.xm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_XM));
+ hdr->sizes.ym = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_YM));
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ hdr->sizes.zm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_ZM));
+ break;
+ default:
+ break;
+ }
+
+ builder->write_p = &hdr[1];
+ builder->bytes_used += sizeof(*hdr);
+}
+
+/**
+ * cs_dsp_mock_wmfw_init() - Initialize a struct cs_dsp_mock_wmfw_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required wmfw format version.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_wmfw_builder.
+ */
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version)
+{
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ /* If format version isn't given use the default for the target core */
+ if (format_version < 0) {
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ format_version = 2;
+ break;
+ default:
+ format_version = 3;
+ break;
+ }
+ }
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+
+ builder->test_priv = priv;
+ builder->format_version = format_version;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_WMFW_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ builder->buf_size_bytes = CS_DSP_MOCK_WMFW_BUF_SIZE;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ case WMFW_HALO:
+ cs_dsp_init_adsp2_halo_wmfw(builder);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
new file mode 100644
index 000000000000..1e161bbc5b4a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
@@ -0,0 +1,2556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create a XM header with an algorithm list in the cached regmap.
+ * 3) Create dummy wmfw file to satisfy cs_dsp.
+ * 4) Create bin file content.
+ * 5) Call cs_dsp_power_up() with the bin file.
+ * 6) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 7) All the registers that are expected to have been written are dropped from
+ * the cache (including the XM header). This should leave the cache clean.
+ * 8) If the cache is still dirty there have been unexpected writes.
+ *
+ * There are multiple different schemes used for addressing across
+ * ADSP2 and Halo Core DSPs:
+ *
+ * dsp words: The addressing scheme used by the DSP, pointers and lengths
+ * in DSP memory use this. A memory region (XM, YM, ZM) is
+ * also required to create a unique DSP memory address.
+ * registers: Addresses in the register map. Older ADSP2 devices have
+ * 16-bit registers with an address stride of 1. Newer ADSP2
+ * devices have 32-bit registers with an address stride of 2.
+ * Halo Core devices have 32-bit registers with a stride of 4.
+ * unpacked: Registers that have a 1:1 mapping to DSP words
+ * packed: Registers that pack multiple DSP words more efficiently into
+ * multiple 32-bit registers. Because of this the relationship
+ * between a packed _register_ address and the corresponding
+ * _dsp word_ address is different from unpacked registers.
+ * Packed registers can only be accessed as a group of
+ * multiple registers, therefore can only read/write a group
+ * of multiple DSP words.
+ * Packed registers only exist on Halo Core DSPs.
+ *
+ * Addresses can also be relative to the start of an algorithm, and this
+ * can be expressed in dsp words, register addresses, or bytes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+};
+
+struct bin_test_param {
+ const char *name;
+ int mem_type;
+ unsigned int offset_words;
+ int alg_idx;
+};
+
+static const struct cs_dsp_mock_alg_def bin_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xfbfb,
+ .ver = 0x100000,
+ .xm_size_words = 99,
+ .ym_size_words = 99,
+ .zm_size_words = 99,
+ },
+ {
+ .id = 0xc321,
+ .ver = 0x100000,
+ .xm_size_words = 120,
+ .ym_size_words = 120,
+ .zm_size_words = 120,
+ },
+ {
+ .id = 0xb123,
+ .ver = 0x100000,
+ .xm_size_words = 96,
+ .ym_size_words = 96,
+ .zm_size_words = 96,
+ },
+};
+
+/*
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ * There are 3 registers for every 4 packed words.
+ */
+static unsigned int _num_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ return (num_dsp_words * 3) / 4;
+}
+
+/* bin file that patches a single DSP word */
+static void bin_patch_one_word(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a single payload that patches consecutive words */
+static void bin_patch_one_multiword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple one-word payloads that patch consecutive words */
+static void bin_patch_multi_oneword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(payload_data); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + i) * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads that patch a block of consecutive
+ * words but the payloads are not in address order.
+ */
+static void bin_patch_multi_oneword_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ static const u8 word_order[] = { 10, 2, 12, 4, 0, 11, 6, 1, 3, 15, 5, 13, 8, 7, 9, 14 };
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+ static_assert(ARRAY_SIZE(word_order) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_order); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + word_order[i]) *
+ reg_inc_per_word,
+ &payload_data[word_order[i]], sizeof(payload_data[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads. The payloads are not in address
+ * order and collectively do not patch a contiguous block of memory.
+ */
+static void bin_patch_multi_oneword_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ static const u8 word_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ u32 payload_data[44];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ word_offsets[i] * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + word_offsets[i]) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &reg_val, &payload_data[i], sizeof(reg_val));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_one_word_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ unsigned int alg_xm_base_words, alg_ym_base_words, alg_zm_base_words;
+ unsigned int reg_addr;
+ u32 payload_data[3];
+ struct firmware *fw;
+ u32 reg_val;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_XM);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_YM);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_zm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_ZM);
+ } else {
+ alg_zm_base_words = 0;
+ }
+
+ /* Add words to XM, YM and ZM */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_XM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[0], sizeof(payload_data[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_YM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[1], sizeof(payload_data[1]));
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_ZM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[2], sizeof(payload_data[2]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM) +
+ ((alg_xm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[0]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM) +
+ ((alg_ym_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[1]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM) +
+ ((alg_zm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[2]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ */
+static void bin_patch_one_word_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_one_word_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file that patches a single packed block of DSP words */
+static void bin_patch_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is one word longer than a packed block using one
+ * packed block followed by one unpacked word.
+ */
+static void bin_patch_1_packed_1_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked word following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by two blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_2_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by three blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_3_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 6) - alg_base_words) * 4,
+ &unpacked_payloads[2], sizeof(unpacked_payloads[2]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by a block of two unpacked words.
+ */
+static void bin_patch_1_packed_2_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by a block of three unpacked words.
+ */
+static void bin_patch_1_packed_3_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts one word before a packed boundary using one
+ * unpacked word followed by one packed block.
+ */
+static void bin_patch_1_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked word */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 1) * 4;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using two
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_2_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using one
+ * block of two unpacked words followed by one packed block.
+ */
+static void bin_patch_2_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using three
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_3_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[2], sizeof(unpacked_payload[2]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using one
+ * block of three unpacked words followed by one packed block.
+ */
+static void bin_patch_3_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple payloads that each patch one packed block. */
+static void bin_patch_multi_onepacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(packed_payloads); ++i) {
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words + (i * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i], sizeof(packed_payloads[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order.
+ */
+static void bin_patch_multi_onepacked_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 payload_order[] = { 4, 3, 6, 1, 0, 7, 5, 2 };
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(payload_order) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(payload_order); ++i) {
+ patch_pos_in_packed_regs =
+ _num_words_to_num_packed_regs(patch_pos_words + (payload_order[i] * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[payload_order[i]],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content in registers should match the order of data in packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order. The patched memory is not contiguous.
+ */
+static void bin_patch_multi_onepacked_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 word_offsets[] = { 60, 24, 76, 4, 40, 52, 48, 36, 12 };
+ u32 packed_payloads[9][3], readback[3];
+ unsigned int alg_base_words, alg_base_in_packed_regs;
+ unsigned int patch_pos_words, patch_pos_in_packed_regs, payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads[0]));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads[i], sizeof(packed_payloads[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_1_packed_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_xm_payload[3], packed_ym_payload[3], readback[3];
+ unsigned int alg_xm_base_words, alg_ym_base_words;
+ unsigned int xm_patch_pos_words, ym_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_xm_payload));
+ static_assert(sizeof(readback) == sizeof(packed_ym_payload));
+
+ get_random_bytes(packed_xm_payload, sizeof(packed_xm_payload));
+ get_random_bytes(packed_ym_payload, sizeof(packed_ym_payload));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_XM_PACKED);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_YM_PACKED);
+
+ /* Round patch start word up to a packed boundary */
+ xm_patch_pos_words = round_up(alg_xm_base_words + param->offset_words, 4);
+ ym_patch_pos_words = round_up(alg_ym_base_words + param->offset_words, 4);
+
+ /* Add XM and YM patches */
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_xm_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_XM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_xm_payload, sizeof(packed_xm_payload));
+
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_ym_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_YM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_ym_payload, sizeof(packed_ym_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed XM registers should match packed_xm_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_XM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_xm_payload, sizeof(packed_xm_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_xm_payload));
+
+ /* Content of packed YM registers should match packed_ym_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_YM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_ym_payload, sizeof(packed_ym_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_ym_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ */
+static void bin_patch_1_packed_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /* For each algorithm patch one DSP word to a value from packed_payload */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_1_packed_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /*
+ * For each algorithm index in alg_order[] patch one DSP word in
+ * that algorithm to a value from packed_payload.
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that contains a mix of packed and unpacked words.
+ * payloads are in random offset order. Offsets that are on a packed boundary
+ * are written as a packed block. Offsets that are not on a packed boundary
+ * are written as a single unpacked word.
+ */
+static void bin_patch_mixed_packed_unpacked_random(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 offset_words[] = {
+ 58, 68, 50, 10, 44, 17, 74, 36, 8, 7, 49, 11, 78, 57, 65, 2,
+ 48, 38, 22, 70, 77, 21, 61, 56, 75, 34, 27, 3, 31, 20, 43, 63,
+ 5, 30, 32, 25, 33, 79, 29, 0, 37, 60, 69, 52, 13, 12, 24, 26,
+ 4, 51, 76, 72, 16, 6, 39, 62, 15, 41, 28, 73, 53, 40, 45, 54,
+ 14, 55, 46, 66, 64, 59, 23, 9, 67, 47, 19, 71, 35, 18, 42, 1,
+ };
+ struct {
+ u32 packed[80][3];
+ u32 unpacked[80];
+ } *payload;
+ u32 readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ struct firmware *fw;
+ int i;
+
+ payload = kunit_kmalloc(test, sizeof(*payload), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, payload);
+
+ get_random_bytes(payload->packed, sizeof(payload->packed));
+ get_random_bytes(payload->unpacked, sizeof(payload->unpacked));
+
+ /* Create a patch entry for every offset in offset_words[] */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ /*
+ * If the offset is on a packed boundary use a packed payload else
+ * use an unpacked word
+ */
+ patch_pos_words = alg_base_words + offset_words[i];
+ if ((patch_pos_words % 4) == 0) {
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ param->mem_type,
+ payload_offset,
+ payload->packed[i],
+ sizeof(payload->packed[i]));
+ } else {
+ payload_offset = offset_words[i] * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ unpacked_mem_type,
+ payload_offset,
+ &payload->unpacked[i],
+ sizeof(payload->unpacked[i]));
+ }
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /*
+ * Readback the packed registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->packed[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is not on a packed boundary */
+ if ((patch_pos_words % 4) != 0)
+ continue;
+
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload->packed[i], sizeof(payload->packed[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->packed[i]));
+ }
+
+ /*
+ * Readback the unpacked registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->unpacked[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ unpacked_mem_type);
+
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is on a packed boundary */
+ if ((patch_pos_words % 4) == 0)
+ continue;
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ ((patch_pos_words) * 4);
+
+ readback[0] = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[0], sizeof(readback[0])),
+ 0);
+ KUNIT_EXPECT_EQ(test, readback[0], payload->unpacked[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->unpacked[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Bin file with name and multiple info blocks */
+static void bin_patch_name_and_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ char *infobuf;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ WMFW_ADSP2_YM);
+
+ /* Add a name block and info block */
+ cs_dsp_mock_bin_add_name(priv->local->bin_builder, "The name");
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, "Some info");
+
+ /* Add a big block of info */
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512; )
+ ;
+
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, infobuf);
+
+ /* Add a patch */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM,
+ 0,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg_addr += alg_base_words * reg_inc_per_word;
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+}
+
+static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_mock_xm_header *xm_hdr;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!priv->local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /* Create an XM header */
+ xm_hdr = cs_dsp_create_mock_xm_header(priv,
+ bin_test_mock_algs,
+ ARRAY_SIZE(bin_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_hdr);
+ ret = cs_dsp_mock_xm_header_write_to_regmap(xm_hdr);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv->local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
+
+ /* We must provide a dummy wmfw to load */
+ priv->local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, -1);
+ priv->local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+/* Parameterize on choice of XM or YM with a range of word offsets */
+static const struct bin_test_param x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 20 },
+
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 20 },
+};
+
+/* Parameterize on ZM with a range of word offsets */
+static const struct bin_test_param z_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 20 },
+};
+
+/* Parameterize on choice of packed XM or YM with a range of word offsets */
+static const struct bin_test_param packed_x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 12 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 12 },
+};
+
+static void x_or_y_or_z_and_offset_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s@%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_offset,
+ x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(z_and_offset,
+ z_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_offset,
+ packed_x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+/* Parameterize on choice of packed XM or YM */
+static const struct bin_test_param packed_x_or_y_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+};
+
+static void x_or_y_or_z_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(packed_x_or_y, packed_x_or_y_param_cases, x_or_y_or_z_param_desc);
+
+static const struct bin_test_param offset_param_cases[] = {
+ { .offset_words = 0 },
+ { .offset_words = 1 },
+ { .offset_words = 2 },
+ { .offset_words = 3 },
+ { .offset_words = 4 },
+ { .offset_words = 23 },
+ { .offset_words = 22 },
+ { .offset_words = 21 },
+ { .offset_words = 20 },
+};
+
+static void offset_param_desc(const struct bin_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "@%u", param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(offset, offset_param_cases, offset_param_desc);
+
+static const struct bin_test_param alg_param_cases[] = {
+ { .alg_idx = 0 },
+ { .alg_idx = 1 },
+ { .alg_idx = 2 },
+ { .alg_idx = 3 },
+};
+
+static void alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg[%u] (%#x)",
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(alg, alg_param_cases, alg_param_desc);
+
+static const struct bin_test_param x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 3 },
+
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 3 },
+};
+
+static void x_or_y_or_z_and_alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s alg[%u] (%#x)",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_alg, x_or_y_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param z_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(z_and_alg, z_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param packed_x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 3 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_alg, packed_x_or_y_and_alg_param_cases,
+ x_or_y_or_z_and_alg_param_desc);
+
+static struct kunit_case cs_dsp_bin_test_cases_halo[] = {
+ /* Unpacked memory */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* Packed memory tests */
+ KUNIT_CASE_PARAM(bin_patch_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_1_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_sparse_unordered,
+ packed_x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_mixed_packed_unpacked_random,
+ packed_x_or_y_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_bin_test_cases_adsp2[] = {
+ /* XM and YM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* ZM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, z_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, z_and_offset_gen_params),
+
+ /* Other */
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_test_halo = {
+ .name = "cs_dsp_bin_halo",
+ .init = cs_dsp_bin_test_halo_init,
+ .test_cases = cs_dsp_bin_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_adsp2_32bit",
+ .init = cs_dsp_bin_test_adsp2_32bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_adsp2_16bit",
+ .init = cs_dsp_bin_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_test_halo,
+ &cs_dsp_bin_test_adsp2_32bit,
+ &cs_dsp_bin_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
new file mode 100644
index 000000000000..5dcf62f19faf
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+ int wmfw_version;
+};
+
+struct cs_dsp_bin_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_bin_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/* Load a bin containing unknown blocks. They should be skipped. */
+static void bin_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the bin */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf500, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xc300, 0,
+ random_data, sizeof(random_data));
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a bin that doesn't have a valid magic marker. */
+static void bin_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+
+ memcpy((void *)bin->data, "WMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "xMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WxDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMxR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMDx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memset((void *)bin->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Load a bin that is too short for a valid header. */
+static void bin_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ do {
+ bin->size--;
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ } while (bin->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void bin_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ unsigned int real_len, len;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Wrong core type in header. */
+static void bin_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+
+ header->core_ver = cpu_to_le32(0);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(priv->dsp->type + 1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(0xff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void bin_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int header_length;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header_length = bin->size;
+ kunit_kfree(test, bin);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ NULL, 0);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_ASSERT_GT(test, bin->size, header_length);
+
+ for (bin->size--; bin->size > header_length; bin->size--) {
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* File too short to contain the block payload */
+static void bin_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ static const u8 payload[256] = { };
+ int i;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ for (i = 0; i < sizeof(payload); i++) {
+ bin->size--;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void bin_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ struct wmfw_coeff_item *block;
+ u32 payload = 0;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ &payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ block = (struct wmfw_coeff_item *)&bin->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the bin */
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(block->type), param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(block->len), sizeof(payload));
+
+ block->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+static void cs_dsp_bin_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_bin_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_bin_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_bin_err_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 1);
+}
+
+static struct kunit_case cs_dsp_bin_err_test_cases_halo[] = {
+
+ { } /* terminator */
+};
+
+static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+/* Some block types to test against, including illegal types */
+static const struct cs_dsp_bin_test_param bin_test_block_types_cases[] = {
+ { .block_type = WMFW_INFO_TEXT << 8 },
+ { .block_type = WMFW_METADATA << 8 },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_XM },
+ { .block_type = 0x33 },
+ { .block_type = 0xf500 },
+ { .block_type = 0xc000 },
+};
+
+KUNIT_ARRAY_PARAM(bin_test_block_types,
+ bin_test_block_types_cases,
+ cs_dsp_bin_err_block_types_desc);
+
+static struct kunit_case cs_dsp_bin_err_test_cases_adsp2[] = {
+ KUNIT_CASE(bin_load_with_unknown_blocks),
+ KUNIT_CASE(bin_err_wrong_magic),
+ KUNIT_CASE(bin_err_too_short_for_header),
+ KUNIT_CASE(bin_err_bad_header_length),
+ KUNIT_CASE(bin_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(bin_too_short_for_block_header, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_too_short_for_block_payload, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_block_payload_len_garbage, bin_test_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_halo = {
+ .name = "cs_dsp_bin_err_halo",
+ .init = cs_dsp_bin_err_test_halo_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_err_adsp2_32bit",
+ .init = cs_dsp_bin_err_test_adsp2_32bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_err_adsp2_16bit",
+ .init = cs_dsp_bin_err_test_adsp2_16bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_err_test_halo,
+ &cs_dsp_bin_err_test_adsp2_32bit,
+ &cs_dsp_bin_err_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
new file mode 100644
index 000000000000..8a9b66a3b7d3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define ADSP2_LOCK_REGION_CTRL 0x7A
+#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+
+ int num_control_add;
+ int num_control_remove;
+ int num_pre_run;
+ int num_post_run;
+ int num_pre_stop;
+ int num_post_stop;
+ int num_watchdog_expired;
+
+ struct cs_dsp_coeff_ctl *passed_ctl[16];
+ struct cs_dsp *passed_dsp;
+};
+
+struct cs_dsp_callbacks_test_param {
+ const struct cs_dsp_client_ops *ops;
+ const char *case_name;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_callbacks_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+static int cs_dsp_test_control_add_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_add] = ctl;
+ local->num_control_add++;
+
+ return 0;
+}
+
+static void cs_dsp_test_control_remove_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_remove] = ctl;
+ local->num_control_remove++;
+}
+
+static int cs_dsp_test_pre_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_run++;
+
+ return 0;
+}
+
+static int cs_dsp_test_post_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_run++;
+
+ return 0;
+}
+
+static void cs_dsp_test_pre_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_stop++;
+}
+
+static void cs_dsp_test_post_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_stop++;
+}
+
+static void cs_dsp_test_watchdog_expired_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_watchdog_expired++;
+}
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_client_ops = {
+ .control_add = cs_dsp_test_control_add_callback,
+ .control_remove = cs_dsp_test_control_remove_callback,
+ .pre_run = cs_dsp_test_pre_run_callback,
+ .post_run = cs_dsp_test_post_run_callback,
+ .pre_stop = cs_dsp_test_pre_stop_callback,
+ .post_stop = cs_dsp_test_post_stop_callback,
+ .watchdog_expired = cs_dsp_test_watchdog_expired_callback,
+};
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_empty_client_ops = {
+ /* No entries */
+};
+
+static void cs_dsp_test_run_stop_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 2);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+}
+
+static void cs_dsp_test_ctl_v1_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Add a control for each memory */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ def.shortname = "zm";
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "ym";
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "xm";
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 3);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_ctl_v2_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char name[2] = { };
+ int i;
+
+ /* Add some controls */
+ def.shortname = name;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ for (i = 0; i < ARRAY_SIZE(local->passed_ctl); ++i) {
+ name[0] = 'A' + i;
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, ARRAY_SIZE(local->passed_ctl));
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+
+ /* Add a controls */
+ def.shortname = "A";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Run a sequence of ops that would invoke callbacks */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ cs_dsp_stop(priv->dsp);
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ /* Something went very wrong if any of our callbacks were called */
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 0);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static void cs_dsp_test_halo_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_halo_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static int cs_dsp_callbacks_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ const struct cs_dsp_callbacks_test_param *param = test->param_value;
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ struct cs_dsp_mock_xm_header *xm_header;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_callbacks_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_callbacks_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ xm_header->blob_data,
+ xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = param->ops;
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_callbacks_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_callbacks_test_adsp2_32bit_init(struct kunit *test, int rev)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = rev;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v2_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v1_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_callbacks_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 1);
+}
+
+static void cs_dsp_callbacks_param_desc(const struct cs_dsp_callbacks_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", param->case_name);
+}
+
+/* Parameterize on different client callback ops tables */
+static const struct cs_dsp_callbacks_test_param cs_dsp_callbacks_ops_cases[] = {
+ { .ops = &cs_dsp_callback_test_client_ops, .case_name = "all ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops,
+ cs_dsp_callbacks_ops_cases,
+ cs_dsp_callbacks_param_desc);
+
+static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = {
+ { .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks,
+ cs_dsp_no_callbacks_cases,
+ cs_dsp_callbacks_param_desc);
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv1_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v1_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_adsp2v2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_halo = {
+ .name = "cs_dsp_callbacks_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_callbacks_halo_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v2_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v2_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v1_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v1_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v1_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2_16bit = {
+ .name = "cs_dsp_callbacks_adsp2_16bit_wmfwv1",
+ .init = cs_dsp_callbacks_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv1_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_adsp2v2_32bit = {
+ .name = "cs_dsp_watchdog_adsp2v2_32bit",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_watchdog_adsp2v2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_halo_32bit = {
+ .name = "cs_dsp_watchdog_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_watchdog_halo_test_cases,
+};
+
+kunit_test_suites(&cs_dsp_callbacks_test_halo,
+ &cs_dsp_callbacks_test_adsp2v2_32bit,
+ &cs_dsp_callbacks_test_adsp2v1_32bit,
+ &cs_dsp_callbacks_test_adsp2_16bit,
+ &cs_dsp_watchdog_test_adsp2v2_32bit,
+ &cs_dsp_watchdog_test_halo_32bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
new file mode 100644
index 000000000000..83386cc978e3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
@@ -0,0 +1,3282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_cache_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_cache_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static const char * const cs_dsp_ctl_cache_test_fw_names[] = {
+ "misc", "mbc/vss", "haps",
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_cache_test_algs); ++i) {
+ if (cs_dsp_ctl_cache_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Memory allocated for control cache must be large enough.
+ * This creates multiple controls of different sizes so only works on
+ * wmfw V2 and later.
+ */
+static void cs_dsp_ctl_v2_cache_alloc(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_size_bytes;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char ctl_name[4];
+ u32 *reg_vals;
+ int num_ctls;
+
+ /* Create some DSP data to initialize the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ alg_size_bytes = cs_dsp_ctl_cache_test_algs[0].ym_size_words *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_vals = kunit_kzalloc(test, alg_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, alg_size_bytes);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls of different sizes */
+ def.mem_type = WMFW_ADSP2_YM;
+ def.shortname = ctl_name;
+ num_ctls = 0;
+ for (def.length_bytes = 4; def.length_bytes <= 64; def.length_bytes += 4) {
+ snprintf(ctl_name, ARRAY_SIZE(ctl_name), "%x", def.length_bytes);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ num_ctls++;
+ def.offset_dsp_words += def.length_bytes / sizeof(u32);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&dsp->ctl_list), num_ctls);
+
+ /* Check that the block allocated for the cache is large enough */
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ KUNIT_EXPECT_GE(test, ksize(ctl->cache), ctl->len);
+}
+
+/*
+ * Content of registers backing a control should be read into the
+ * control cache when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * For a non-volatile write-only control the cache should be zero-filled
+ * when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init_write_only(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *readback, *zeros;
+
+ zeros = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, zeros);
+
+ readback = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create a non-volatile write-only control */
+ def.flags = param->flags & ~WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The control cache should have been zero-filled so should be
+ * readable through the control.
+ */
+ get_random_bytes(readback, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, zeros, param->len_bytes);
+}
+
+/*
+ * Multiple different firmware with identical controls.
+ * This is legal because different firmwares could contain the same
+ * algorithm.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fw_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[0]) == 0)
+ ctl[0] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[1]) == 0)
+ ctl[1] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[2]) == 0)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Multiple different firmware with controls identical except for alg id.
+ * This is legal because the controls are qualified by algorithm id.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (cs_dsp_ctl_cache_test_algs[0].id == walkctl->alg_region.alg)
+ ctl[0] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[1].id == walkctl->alg_region.alg)
+ ctl[1] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[2].id == walkctl->alg_region.alg)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls at the same position in different memories.
+ * The control cache should be initialized with content from the
+ * correct memory region.
+ */
+static void cs_dsp_ctl_cache_init_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for memory region */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_XM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_ZM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 2 or 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list),
+ cs_dsp_mock_has_zm(priv) ? 3 : 2);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.type == WMFW_ADSP2_YM)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_XM)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_ZM)
+ ctl[2] = walkctl;
+ }
+
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+ }
+}
+
+/*
+ * Firmware with controls at the same position in different algorithms
+ * The control cache should be initialized with content from the
+ * memory of the algorithm it points to.
+ */
+static void cs_dsp_ctl_cache_init_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create controls identical except for algorithm */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ /* Create random content in the registers backing each control */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[0].id)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[1].id)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[2].id)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls in the same algorithm and memory but at
+ * different offsets.
+ * The control cache should be initialized with content from the
+ * correct offset.
+ * Only for wmfw format V2 and later. V1 only supports one control per
+ * memory per algorithm.
+ */
+static void cs_dsp_ctl_cache_init_multiple_offsets(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_base_reg;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for offset */
+ def.length_bytes = 8;
+ def.offset_dsp_words = 0;
+ def.shortname = "CtlA";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 5;
+ def.shortname = "CtlB";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 8;
+ def.shortname = "CtlC";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ alg_base_reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ alg_base_reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ reg = alg_base_reg;
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+ reg = alg_base_reg + (5 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+ reg = alg_base_reg + (8 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->offset == 0)
+ ctl[0] = walkctl;
+ if (walkctl->offset == 5)
+ ctl[1] = walkctl;
+ if (walkctl->offset == 8)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Read from a cached control before the firmware is started.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been stopped.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the DSP has been powered-up and
+ * then powered-down without running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been run and
+ * stopped, then the DSP has been powered-down.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with non-zero flags while the firmware is
+ * running.
+ * Should return the data in the cache, not from the registers.
+ */
+static void cs_dsp_ctl_cache_read_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create data in the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(init_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Change the values in the registers backing the control then drop
+ * them from the regmap cache. This allows checking that the control
+ * read is returning values from the control cache and not accessing
+ * the registers.
+ */
+ KUNIT_ASSERT_EQ(test,
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes),
+ 0);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Control should readback the origin data from its cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with flags == 0 while the firmware is
+ * running.
+ * Should behave as volatile and read from the registers.
+ * (This is for backwards compatibility with old firmware versions)
+ */
+static void cs_dsp_ctl_cache_read_running_zero_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = 0;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Change the values in the registers backing the control */
+ get_random_bytes(new_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Change the values in the registers backing the control */
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * This should be a writethrough operation, writing to the cache and
+ * the registers.
+ */
+static void cs_dsp_ctl_cache_writethrough(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control, it should be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is running.
+ * The control write should return 0 to indicate that the content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_writethrough_unchanged(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is not started.
+ * The control write should return 0 to indicate that the cache content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_write_unchanged_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is loaded but not
+ * started.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started and stopped.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started, stopped, and then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently running firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control before running the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_before_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * The value written should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_while_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *ctl_vals, *readback;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ ctl_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP and start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Stop firmware and zero the registers backing the control */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after stopping the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_after_stop(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control the next time the firmware containing the
+ * control is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_not_current_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Power-down DSP then power-up with the original firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be synced out to the registers
+ * backing the control every time the firmware containing the control
+ * is run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_every_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and reset the registers */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Start the firmware again and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained if the same
+ * firmware is downloaded again. It should be synced out to the
+ * registers backing the control after the firmware containing the
+ * control is downloaded again and run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_reload(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the firmware again, the cache content should not change */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained after a different
+ * firmware is downloaded.
+ * When the firmware containing the control is downloaded and run
+ * the value in the control cache should be synced out to the registers
+ * backing the control.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_swap(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download and run a different firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the original firmware again */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_EXPECT_TRUE(test, ctl->set);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+static int cs_dsp_ctl_cache_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_cache_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_cache_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_cache_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_cache_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control, except flags==0
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_nonzero_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_nonzero_flags,
+ all_pop_nonvol_readable_nonzero_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile write-only control of varying lengths
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_write_only_length_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_write_only_length,
+ all_pop_nonvol_write_only_length_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v1[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v2[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_halo = {
+ .name = "cs_dsp_ctl_cache_wmfwV3_halo",
+ .init = cs_dsp_ctl_cache_test_halo_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_ctl_cache_test_halo,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
new file mode 100644
index 000000000000..cb90964740ea
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
@@ -0,0 +1,1851 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_parse_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offset;
+ unsigned int length;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_parse_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_size_words = 8,
+ .ym_size_words = 8,
+ .zm_size_words = 8,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Algorithm info block without controls should load */
+static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored.
+ */
+static void cs_dsp_ctl_parse_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Dummy";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a zero-length name string.
+ */
+static void cs_dsp_ctl_parse_empty_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "\0";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a maximum length name string.
+ */
+static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *name;
+
+ name = kunit_kzalloc(test, 256, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+ def.fullname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Short name from coeff descriptor should be used as control name. */
+static void cs_dsp_ctl_parse_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a short name that is a single character.
+ */
+static void cs_dsp_ctl_parse_min_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.shortname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 1);
+ KUNIT_EXPECT_EQ(test, ctl->subname[0], 'Q');
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a maximum length name.
+ */
+static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char *name;
+ struct firmware *wmfw;
+
+ name = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+
+ def.shortname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 255);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, name, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character full name.
+ */
+static void cs_dsp_ctl_parse_with_min_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length full name.
+ */
+static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character description
+ */
+static void cs_dsp_ctl_parse_with_min_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.description = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length description
+ */
+static void cs_dsp_ctl_parse_with_max_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *description;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name and description from coeff descriptor are variable length
+ * fields so affects the position of subsequent fields.
+ * Test with a maximum length full name and description
+ */
+static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname, *description;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+static const char * const cs_dsp_ctl_alignment_test_names[] = {
+ "1", "12", "123", "1234", "12345", "123456", "1234567",
+ "12345678", "123456789", "123456789A", "123456789AB",
+ "123456789ABC", "123456789ABCD", "123456789ABCDE",
+ "123456789ABCDEF",
+};
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of short name.
+ */
+static void cs_dsp_ctl_shortname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ def.shortname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_alignment_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, i + 1);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_alignment_test_names[i],
+ ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of full name.
+ */
+static void cs_dsp_ctl_fullname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.fullname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of description.
+ */
+static void cs_dsp_ctl_description_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.description = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+static const char * const cs_dsp_get_ctl_test_names[] = {
+ "Up", "Down", "Switch", "Mute",
+ "Left Up", "Left Down", "Right Up", "Right Down",
+ "Left Mute", "Right Mute",
+ "_trunc_1", "_trunc_2", " trunc",
+};
+
+/* Test using cs_dsp_get_ctl() to lookup various controls. */
+static void cs_dsp_get_ctl_test(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_get_ctl_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(cs_dsp_get_ctl_test_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_get_ctl_test_names[i],
+ ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->offset, i);
+ }
+}
+
+/*
+ * cs_dsp_get_ctl() searches for the control in the currently loaded
+ * firmware, so create identical controls in multiple firmware and
+ * test that the correct one is found.
+ */
+static void cs_dsp_get_ctl_test_multiple_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ def.shortname = "_A_CONTROL";
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control of the same name */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* A lookup should return the control for the current firmware */
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 2);
+
+ /* Re-load the 'misc' firmware and a lookup should return its control */
+ cs_dsp_power_down(priv->dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 1);
+}
+
+/* Test that the value of the memory type field is parsed correctly. */
+static void cs_dsp_ctl_parse_memory_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the algorithm id from the parent alg-info block is
+ * correctly stored in the cs_dsp_coeff_ctl.
+ */
+static void cs_dsp_ctl_parse_alg_id(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, def.mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the values of (alg id, memory type) tuple is parsed correctly.
+ * The alg id is parsed from the alg-info block, but the memory type is
+ * parsed from the coefficient info descriptor.
+ */
+static void cs_dsp_ctl_parse_alg_mem(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+}
+
+/* Test that the value of the offset field is parsed correctly. */
+static void cs_dsp_ctl_parse_offset(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.offset_dsp_words = param->offset;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, param->offset);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the length field is parsed correctly. */
+static void cs_dsp_ctl_parse_length(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.length_bytes = param->length;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, def.offset_dsp_words);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, param->length);
+}
+
+/* Test that the value of the control type field is parsed correctly. */
+static void cs_dsp_ctl_parse_ctl_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, param->ctl_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the flags field is parsed correctly. */
+static void cs_dsp_ctl_parse_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, param->flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that invalid combinations of (control type, flags) are rejected. */
+static void cs_dsp_ctl_illegal_type_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_LT(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/* Test that the correct firmware name is entered in the cs_dsp_coeff_ctl. */
+static void cs_dsp_ctl_parse_fw_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* Both controls should be in the list (order not guaranteed) */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = NULL;
+ ctl2 = NULL;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, "misc") == 0)
+ ctl1 = walkctl;
+ else if (strcmp(walkctl->fw_name, "mbc/vss") == 0)
+ ctl2 = walkctl;
+ }
+
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, 1);
+ KUNIT_EXPECT_EQ(test, ctl2->offset, 2);
+}
+
+/* Controls are unique if the algorithm ID is different */
+static void cs_dsp_ctl_alg_id_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ /* Create an algorithm containing the control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create a different algorithm containing an identical control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[1].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if the memory region is different */
+static void cs_dsp_ctl_mem_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ /* Create control in XM */
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ /* Create control in YM */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if they are in different firmware */
+static void cs_dsp_ctl_fw_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with the same control */
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2",
+ NULL, NULL, "mbc/vss"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STRNEQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This creates multiple algorithms with one control each, which will
+ * work on both V1 format and >=V2 format controls.
+ */
+static void cs_dsp_ctl_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Create some algorithms with a control */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_parse_test_algs); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[i].id,
+ "dummyalg", NULL);
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This tests >=V2 firmware that can have multiple named controls in
+ * the same algorithm.
+ */
+static void cs_dsp_ctl_v2_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_get_ctl_test_names)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create some controls */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ if (i & BIT(0))
+ def.mem_type = WMFW_ADSP2_XM;
+ else
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+static const char * const cs_dsp_ctl_v2_compare_len_names[] = {
+ "LEFT",
+ "LEFT_",
+ "LEFT_SPK",
+ "LEFT_SPK_V",
+ "LEFT_SPK_VOL",
+ "LEFT_SPK_MUTE",
+ "LEFT_SPK_1",
+ "LEFT_X",
+ "LEFT2",
+};
+
+/*
+ * When comparing shortnames the full length of both strings is
+ * considered, not only the characters in of the shortest string.
+ * So that "LEFT" is not the same as "LEFT2".
+ * This is specifically to test for the bug that was fixed by commit:
+ * 7ac1102b227b ("firmware: cs_dsp: Fix new control name check")
+ */
+static void cs_dsp_ctl_v2_compare_len(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ def.shortname = cs_dsp_ctl_v2_compare_len_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_v2_compare_len_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len,
+ strlen(cs_dsp_ctl_v2_compare_len_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_v2_compare_len_names[i],
+ ctl->subname_len);
+ }
+}
+
+static int cs_dsp_ctl_parse_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_parse_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header blob to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_parse_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 2);
+}
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_mem_type_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM },
+ { .mem_type = WMFW_ADSP2_YM },
+ { .mem_type = WMFW_ADSP2_ZM },
+};
+
+static void cs_dsp_ctl_mem_type_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s",
+ cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_mem_type,
+ cs_dsp_ctl_mem_type_param_cases,
+ cs_dsp_ctl_mem_type_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_alg_id_param_cases[] = {
+ { .alg_id = 0xb },
+ { .alg_id = 0xfafa },
+ { .alg_id = 0x9f1234 },
+ { .alg_id = 0xff00ff },
+};
+
+static void cs_dsp_ctl_alg_id_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg_id:%#x", param->alg_id);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_alg_id,
+ cs_dsp_ctl_alg_id_param_cases,
+ cs_dsp_ctl_alg_id_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_offset_param_cases[] = {
+ { .offset = 0x0 },
+ { .offset = 0x1 },
+ { .offset = 0x2 },
+ { .offset = 0x3 },
+ { .offset = 0x4 },
+ { .offset = 0x5 },
+ { .offset = 0x6 },
+ { .offset = 0x7 },
+ { .offset = 0xe0 },
+ { .offset = 0xf1 },
+ { .offset = 0xfffe },
+ { .offset = 0xffff },
+};
+
+static void cs_dsp_ctl_offset_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "offset:%#x", param->offset);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_offset,
+ cs_dsp_ctl_offset_param_cases,
+ cs_dsp_ctl_offset_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_length_param_cases[] = {
+ { .length = 0x4 },
+ { .length = 0x8 },
+ { .length = 0x18 },
+ { .length = 0xf000 },
+};
+
+static void cs_dsp_ctl_length_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "length:%#x", param->length);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_length,
+ cs_dsp_ctl_length_param_cases,
+ cs_dsp_ctl_length_desc);
+
+/* Note: some control types mandate specific flags settings */
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_type_param_cases[] = {
+ { .ctl_type = WMFW_CTL_TYPE_BYTES,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+};
+
+static void cs_dsp_ctl_type_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "ctl_type:%#x flags:%#x",
+ param->ctl_type, param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_type,
+ cs_dsp_ctl_type_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_flags_param_cases[] = {
+ { .flags = 0 },
+ { .flags = WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+static void cs_dsp_ctl_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "flags:%#x", param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_flags,
+ cs_dsp_ctl_flags_param_cases,
+ cs_dsp_ctl_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_illegal_type_flags_param_cases[] = {
+ /* ACKED control must be volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* HOSTEVENT must be system + volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* FWEVENT rules same as HOSTEVENT */
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /*
+ * HOSTBUFFER must be system + volatile + readable or
+ * system + volatile + readable + writeable
+ */
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE},
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v1[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_empty_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_v1_name),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v2_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_min_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname_and_description),
+ KUNIT_CASE(cs_dsp_ctl_shortname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_fullname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_description_alignment),
+ KUNIT_CASE(cs_dsp_get_ctl_test),
+ KUNIT_CASE(cs_dsp_get_ctl_test_multiple_wmfw),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_compare_len),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_halo = {
+ .name = "cs_dsp_ctl_parse_wmfwV3_halo",
+ .init = cs_dsp_ctl_parse_test_halo_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+kunit_test_suites(&cs_dsp_ctl_parse_test_halo,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
new file mode 100644
index 000000000000..bda00a95d4f9
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
@@ -0,0 +1,2669 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_rw_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_rw_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_rw_test_algs); ++i) {
+ if (cs_dsp_ctl_rw_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Write to a control while the firmware is running.
+ * This should write to the underlying registers.
+ */
+static void cs_dsp_ctl_write_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Write new data to the control, it should be written to the registers
+ * and cs_dsp_coeff_lock_and_write_ctrl() should return 1 to indicate
+ * that the control content changed.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a volatile control while the firmware is running.
+ * This should return the current state of the underlying registers.
+ */
+static void cs_dsp_ctl_read_volatile_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return the current register content */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /*
+ * Change the register content and read the control, it should return
+ * the new register content
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes), 0);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Write to a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from an offset into the control data. Should return only the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_read_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read from an offset into the control cache. Should return only the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_read_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a control. Should return
+ * only the requested number of bytes.
+ */
+static void cs_dsp_ctl_read_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a cached control.
+ * Should return only the requested number of bytes.
+ * Same as cs_dsp_ctl_read_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control data. Should only change the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_write_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control cache. Should only change the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_write_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a control. Should only
+ * change the requested number of bytes.
+ */
+static void cs_dsp_ctl_write_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a cached control.
+ * Should only change the requested number of bytes.
+ * Same as cs_dsp_ctl_write_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read from an offset that is beyond the end of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Read more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+}
+
+/*
+ * Read with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Read full control length but at a start offset of 1 so that
+ * offset + length exceeds the length of the control.
+ */
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Write to an offset that is beyond the end of the control data.
+ * Should return an error without touching any registers.
+ */
+static void cs_dsp_ctl_write_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /*
+ * Write full control length but at a start offset of 1 so that
+ * offset + length exceeeds the length of the control.
+ */
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a write-only control. This is legal because controls can
+ * always be read. Write-only only indicates that it is not useful to
+ * populate the cache from the DSP memory.
+ */
+static void cs_dsp_ctl_read_from_writeonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *ctl_vals, *readback;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ ctl_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write some test data to the control */
+ get_random_bytes(ctl_vals, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, def.length_bytes),
+ 1);
+
+ /* Read back the data */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+ }
+}
+
+/*
+ * Write to a read-only control.
+ * This should return an error without writing registers.
+ */
+static void cs_dsp_ctl_write_to_readonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+static int cs_dsp_ctl_rw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_rw_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_rw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_rw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_rw_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readable_flags,
+ all_pop_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * read-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readonly_flags,
+ all_pop_readonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeable_flags,
+ all_pop_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * write-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeonly_flags,
+ all_pop_writeonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_readable_flags,
+ all_pop_volatile_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_writeable_flags,
+ all_pop_volatile_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_adsp[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_halo[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_halo = {
+ .name = "cs_dsp_ctl_rw_wmfwV3_halo",
+ .init = cs_dsp_ctl_rw_test_halo_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+kunit_test_suites(&cs_dsp_ctl_rw_test_halo,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
new file mode 100644
index 000000000000..9e997c4ee2d6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
@@ -0,0 +1,2211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create dummy wmfw file.
+ * 3) Call cs_dsp_power_up() with the bin file.
+ * 4) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 5) All the registers that are expected to have been written are dropped from
+ * the cache. This should leave the cache clean.
+ * 6) If the cache is still dirty there have been unexpected writes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_vfree_wrapper, vfree, void *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ unsigned int num_blocks;
+ int mem_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/*
+ * wmfw that writes the XM header.
+ * cs_dsp always reads this back from unpacked XM.
+ */
+static void wmfw_write_xm_header_unpacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ unsigned int reg_addr;
+ u8 *readback;
+
+ /* XM header payload was added to wmfw by test case init function */
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Read raw so endianness and register width don't matter */
+ readback = kunit_kzalloc(test, local->xm_header->blob_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ local->xm_header->blob_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write one payload of length param->num_blocks */
+static void wmfw_write_one_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type, mem_offset_dsp_words,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write several smallest possible payloads for the given memory type */
+static void wmfw_write_multiple_oneblock_payloads(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = 0; i < num_payloads; ++i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write several smallest possible payloads of the given memory type
+ * in reverse address order
+ */
+static void wmfw_write_multiple_oneblock_payloads_reverse(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = num_payloads - 1; i >= 0; --i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write multiple payloads of length param->num_blocks.
+ * The payloads are not in address order and collectively do not patch
+ * a contiguous block of memory.
+ */
+static void wmfw_write_multiple_payloads_sparse_unordered(struct kunit *test)
+{
+ static const unsigned int random_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const int num_payloads = ARRAY_SIZE(random_offsets);
+ int i;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ payload_size_dsp_words = param->num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each at "random" locations */
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset = random_offsets[i] * payload_size_dsp_words;
+
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + offset,
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset_num_regs = (random_offsets[i] * payload_size_bytes) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[i * payload_size_bytes],
+ payload_size_bytes),
+ 0);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single unpacked payload */
+static void wmfw_write_all_unpacked_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_ADSP2_PM);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_PM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_PM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single packed payload */
+static void wmfw_write_all_packed_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_HALO_PM_PACKED);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_HALO_PM_PACKED, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_PM_PACKED);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_ADSP2_PM, 11, 60 },
+ { WMFW_ADSP2_ZM, 69, 8 },
+ { WMFW_ADSP2_YM, 32, 74 },
+ { WMFW_ADSP2_XM, 70, 38 },
+ { WMFW_ADSP2_PM, 84, 48 },
+ { WMFW_ADSP2_XM, 46, 18 },
+ { WMFW_ADSP2_PM, 0, 8 },
+ { WMFW_ADSP2_YM, 0, 30 },
+ { WMFW_ADSP2_PM, 160, 50 },
+ { WMFW_ADSP2_ZM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks, payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various packed and unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_packed_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_HALO_PM_PACKED, 11, 60 },
+ { WMFW_ADSP2_YM, 69, 8 },
+ { WMFW_HALO_YM_PACKED, 32, 74 },
+ { WMFW_HALO_XM_PACKED, 70, 38 },
+ { WMFW_HALO_PM_PACKED, 84, 48 },
+ { WMFW_HALO_XM_PACKED, 46, 18 },
+ { WMFW_HALO_PM_PACKED, 0, 8 },
+ { WMFW_HALO_YM_PACKED, 0, 30 },
+ { WMFW_HALO_PM_PACKED, 160, 50 },
+ { WMFW_ADSP2_XM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * using one packed payload followed by one unpacked word.
+ */
+static void wmfw_write_packed_1_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of one unpacked word to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache. The unpacked payload is offset within
+ * unpacked register space by the number of DSP words that were
+ * written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by one payload of two unpacked words.
+ */
+static void wmfw_write_packed_2_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of two unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by one payload of three unpacked words.
+ */
+static void wmfw_write_packed_3_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of three unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by two payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_2_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add two unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by three payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_3_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add three unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 2,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one unpacked word
+ * followed by a packed payload.
+ */
+static void wmfw_write_packed_1_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for an unaligned word before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 1;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /* Add a single unpacked word right before the first word of packed data */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked word. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 1) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of two unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_2_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of three unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_3_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for three unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use two payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_2_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as two payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use three payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_3_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as three payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Load a wmfw containing multiple info blocks */
+static void wmfw_load_with_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ char *infobuf;
+ const unsigned int payload_size_bytes = 48;
+ int ret;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add a couple of info blocks at the start of the wmfw */
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is a timestamp");
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is some more info");
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ /* Add a bigger info block then another small one*/
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512;)
+ ;
+
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, infobuf);
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "Another block of info");
+
+ /* Add another payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 64,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ ret = cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc");
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "cs_dsp_power_up failed: %d\n", ret);
+
+ /* Check first payload was written */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Check second payload was written */
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * 64;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+static int cs_dsp_wmfw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_mem_param_desc(const struct cs_dsp_wmfw_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s num_blocks:%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->num_blocks);
+}
+
+static const struct cs_dsp_wmfw_test_param adsp2_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(adsp2_all_num_blocks,
+ adsp2_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param halo_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(halo_all_num_blocks,
+ halo_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param packed_xy_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(packed_xy_num_blocks,
+ packed_xy_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static struct kunit_case cs_dsp_wmfw_test_cases_halo[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ halo_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_packed_pm),
+ KUNIT_CASE(wmfw_write_multiple_packed_unpacked_mem),
+
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_test_cases_adsp2[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ adsp2_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_unpacked_pm),
+ KUNIT_CASE(wmfw_write_multiple_unpacked_mem),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_halo = {
+ .name = "cs_dsp_wmfwV3_halo",
+ .init = cs_dsp_wmfw_test_halo_init,
+ .test_cases = cs_dsp_wmfw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_test_halo,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
new file mode 100644
index 000000000000..c309843261d7
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Load a wmfw containing unknown blocks. They should be skipped. */
+static void wmfw_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the wmfw */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xc0, 0, random_data,
+ sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0x33, 0, NULL, 0);
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a wmfw that doesn't have a valid magic marker. */
+static void wmfw_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ memcpy((void *)wmfw->data, "WMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "xMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WxFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMxW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMFx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memset((void *)wmfw->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* Load a wmfw that is too short for a valid header. */
+static void wmfw_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ do {
+ wmfw->size--;
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ } while (wmfw->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void wmfw_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ unsigned int real_len, len;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* Wrong core type in header. */
+static void wmfw_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+
+ header->core = 0;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = priv->dsp->type + 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 0xff;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void wmfw_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+ u32 dummy_payload = 0;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ /* Add the block. A block must have at least 4 bytes of payload */
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &dummy_payload, sizeof(dummy_payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* File too short to contain the block payload */
+static void wmfw_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ static const u8 payload[256] = { };
+ int i;
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (i = 0; i < sizeof(payload); i++) {
+ wmfw->size--;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void wmfw_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ u32 payload = 0;
+
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the wmfw */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->offset) >> 24, param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->len), sizeof(payload));
+
+ region->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* File too short to contain an algorithm header */
+static void wmfw_too_short_for_alg_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ NULL, NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* V1 algorithm name does not have NUL terminator */
+static void wmfw_v1_alg_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Write a string to the alg name that overflows the array */
+ memset(alg_data->descr, 0, sizeof(alg_data->descr));
+ memset(alg_data->name, 'A', sizeof(alg_data->name));
+ memset(alg_data->descr, 'A', sizeof(alg_data->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow alg_data->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(alg_data->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(alg_data->name, sizeof(alg_data->name)),
+ sizeof(alg_data->name));
+
+ /*
+ * The alg name isn't stored, but cs_dsp parses the name field.
+ * It should load the file successfully and create the control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of alg name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ algorithm name exceeds length of containing block */
+static void wmfw_v2_alg_name_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[1]), 3 | ('a' << 8) | ('b' << 16) | ('c' << 24));
+ KUNIT_ASSERT_EQ(test, *(u8 *)&alg_data[1], 3);
+
+ /* Set name string length longer than available space */
+ *(u8 *)&alg_data[1] = 4;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 7;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0x80;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0xff;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ algorithm description exceeds length of containing block */
+static void wmfw_v2_alg_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc[desc_len]de
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[2]), 2 | ('d' << 16) | ('e' << 24));
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(*(__le16 *)&alg_data[2]), 2);
+
+ /* Set name string length longer than available space */
+ *(__le16 *)&alg_data[2] = cpu_to_le16(4);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(7);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x80);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coefficient count exceeds length of containing block */
+static void wmfw_v1_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Add one to the coefficient count */
+ alg_data->ncoeff = cpu_to_le32(le32_to_cpu(alg_data->ncoeff) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ alg_data->ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient count exceeds length of containing block */
+static void wmfw_v2_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *ncoeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ ncoeff = (__force __le32 *)&alg_data[3];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(*ncoeff), 1);
+
+ /* Add one to the coefficient count */
+ *ncoeff = cpu_to_le32(2);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ *ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient block size exceeds length of containing block */
+static void wmfw_v2_coeff_block_size_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the block size */
+ coeff[1] = cpu_to_le32(le32_to_cpu(coeff[1]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the block size garbage */
+ coeff[1] = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coeff name does not have NUL terminator */
+static void wmfw_v1_coeff_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct wmfw_adsp_coeff_data *coeff;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->ncoeff), 1);
+
+ coeff = (void *)alg_data->data;
+
+ /* Write a string to the coeff name that overflows the array */
+ memset(coeff->descr, 0, sizeof(coeff->descr));
+ memset(coeff->name, 'A', sizeof(coeff->name));
+ memset(coeff->descr, 'A', sizeof(coeff->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow coeff->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(coeff->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(coeff->name, sizeof(coeff->name)),
+ sizeof(coeff->name));
+
+ /*
+ * V1 controls do not have names, but cs_dsp parses the name
+ * field. It should load the file successfully and create the
+ * control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of coeff name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ coefficient shortname exceeds length of coeff block */
+static void wmfw_v2_coeff_shortname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the shortname length */
+ coeff[2] = cpu_to_le32(le32_to_cpu(coeff[2]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum shortname length */
+ coeff[2] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient fullname exceeds length of coeff block */
+static void wmfw_v2_coeff_fullname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname;
+ size_t shortlen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Fullname follows the shortname rounded up to a __le32 boundary */
+ shortlen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (shortlen / sizeof(*coeff));
+
+ /* Fullname increases in blocks of __le32 so increase past the current __le32 */
+ fullname[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum fullname length */
+ fullname[0] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient description exceeds length of coeff block */
+static void wmfw_v2_coeff_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname, *description;
+ size_t namelen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Description follows the shortname and fullname rounded up to __le32 boundaries */
+ namelen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (namelen / sizeof(*coeff));
+ namelen = round_up(le32_to_cpu(fullname[0]) & 0xff, sizeof(__le32));
+ description = fullname + (namelen / sizeof(*fullname));
+
+ /* Description increases in blocks of __le32 so increase past the current __le32 */
+ description[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum description length */
+ fullname[0] = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+static void cs_dsp_wmfw_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_wmfw_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_wmfw_err_block_types_desc(const struct cs_dsp_wmfw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_adsp2_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_adsp2,
+ wmfw_valid_block_types_adsp2_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_halo_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_HALO_PM_PACKED },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_halo,
+ wmfw_valid_block_types_halo_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_invalid_block_types_cases[] = {
+ { .block_type = 0x33 },
+ { .block_type = 0xf5 },
+ { .block_type = 0xc0 },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_invalid_block_types,
+ wmfw_invalid_block_types_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v0[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v1[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v1_alg_name_unterminated),
+ KUNIT_CASE(wmfw_v1_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v1_coeff_name_unterminated),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v2[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v3[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_halo = {
+ .name = "cs_dsp_wmfwV3_err_halo",
+ .init = cs_dsp_wmfw_err_test_halo_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_err_test_halo,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_tests.c b/drivers/firmware/cirrus/test/cs_dsp_tests.c
new file mode 100644
index 000000000000..7b829a03ca52
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_tests.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("KUnit tests for Cirrus Logic DSP driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
+MODULE_IMPORT_NS("FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index fa9c1c3bf168..f0a63d09d3c4 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -311,7 +311,7 @@ void cper_print_proc_arm(const char *pfx,
ctx_info = (struct cper_arm_ctx_info *)err_info;
max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16);
printk("%sContext info structure %d:\n", pfx, i);
if (len < size) {
diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c
index 438ed9eff6d0..3949d7b5e808 100644
--- a/drivers/firmware/efi/cper-x86.c
+++ b/drivers/firmware/efi/cper-x86.c
@@ -325,7 +325,7 @@ void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
ctx_info = (struct cper_ia_proc_ctx *)err_info;
for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
- int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16);
int groupsize = 4;
printk("%sContext Information Structure %d:\n", pfx, i);
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 937be269fee8..13ea141c0def 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -47,9 +47,9 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
return 0;
}
-static int __init match_pci_dev(struct device *dev, void *data)
+static int __init match_pci_dev(struct device *dev, const void *data)
{
- unsigned int devfn = *(unsigned int *)data;
+ unsigned int devfn = *(const unsigned int *)data;
return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 60c64b81d2c3..7309394b8fc9 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -148,9 +148,6 @@ static ssize_t systab_show(struct kobject *kobj,
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
- if (IS_ENABLED(CONFIG_X86))
- str = efi_systab_show_arch(str);
-
return str - buf;
}
@@ -937,13 +934,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
- EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
+ EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
+ EFI_MEMORY_RUNTIME))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
snprintf(pos, size,
- "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
+ attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
attr & EFI_MEMORY_SP ? "SP" : "",
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index ed4e8ddbe76a..1141cd06011f 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-y := $(KBUILD_CFLAGS)
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index c0c81ca4237e..fd6dc790c5a8 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -47,9 +47,10 @@ bool __pure __efi_soft_reserve_enabled(void)
*/
efi_status_t efi_parse_options(char const *cmdline)
{
- size_t len;
+ char *buf __free(efi_pool) = NULL;
efi_status_t status;
- char *str, *buf;
+ size_t len;
+ char *str;
if (!cmdline)
return EFI_SUCCESS;
@@ -102,7 +103,6 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_parse_option_graphics(val + strlen("efifb:"));
}
}
- efi_bs_call(free_pool, buf);
return EFI_SUCCESS;
}
@@ -250,7 +250,7 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
u64, const union efistub_event *);
struct { u32 hash_log_extend_event; } mixed_mode;
} method;
- struct efistub_measured_event *evt;
+ struct efistub_measured_event *evt __free(efi_pool) = NULL;
int size = struct_size(evt, tagged_event.tagged_event_data,
events[event].event_data_len);
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
@@ -312,7 +312,6 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
status = efi_fn_call(&method, hash_log_extend_event, protocol, 0,
load_addr, load_size, &evt->event_data);
- efi_bs_call(free_pool, evt);
if (status == EFI_SUCCESS)
return EFI_SUCCESS;
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 382b54f40603..874f63b4a383 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -10,6 +10,7 @@
*/
#include <linux/efi.h>
+#include <linux/screen_info.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -53,25 +54,16 @@ void __weak free_screen_info(struct screen_info *si)
static struct screen_info *setup_graphics(void)
{
- efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- struct screen_info *si = NULL;
+ struct screen_info *si, tmp = {};
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &gop_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL) {
- si = alloc_screen_info();
- if (!si)
- return NULL;
- status = efi_setup_gop(si, &gop_proto, size);
- if (status != EFI_SUCCESS) {
- free_screen_info(si);
- return NULL;
- }
- }
+ if (efi_setup_gop(&tmp) != EFI_SUCCESS)
+ return NULL;
+
+ si = alloc_screen_info();
+ if (!si)
+ return NULL;
+
+ *si = tmp;
return si;
}
@@ -112,8 +104,8 @@ static u32 get_supported_rt_services(void)
efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
{
+ char *cmdline __free(efi_pool) = NULL;
efi_status_t status;
- char *cmdline;
/*
* Get the command line from EFI, using the LOADED_IMAGE
@@ -128,25 +120,24 @@ efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
status = efi_parse_options(cmdline);
- if (status != EFI_SUCCESS)
- goto fail_free_cmdline;
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse EFI load options\n");
+ return status;
+ }
}
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
cmdline[0] == 0) {
status = efi_parse_options(CONFIG_CMDLINE);
- if (status != EFI_SUCCESS)
- goto fail_free_cmdline;
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse built-in command line\n");
+ return status;
+ }
}
- *cmdline_ptr = cmdline;
+ *cmdline_ptr = no_free_ptr(cmdline);
return EFI_SUCCESS;
-
-fail_free_cmdline:
- efi_err("Failed to parse options\n");
- efi_bs_call(free_pool, cmdline);
- return status;
}
efi_status_t efi_stub_common(efi_handle_t handle,
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 76e44c185f29..d96d4494070d 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -4,6 +4,7 @@
#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/efi.h>
#include <linux/kernel.h>
#include <linux/kern_levels.h>
@@ -122,11 +123,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
#define efi_get_handle_num(size) \
((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
-#define for_each_efi_handle(handle, array, size, i) \
- for (i = 0; \
- i < efi_get_handle_num(size) && \
- ((handle = efi_get_handle_at((array), i)) || true); \
- i++)
+#define for_each_efi_handle(handle, array, num) \
+ for (int __i = 0; __i < (num) && \
+ ((handle = efi_get_handle_at((array), __i)) || true); \
+ __i++)
static inline
void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
@@ -171,7 +171,7 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
* the EFI memory map. Other related structures, e.g. x86 e820ext, need
* to factor in this headroom requirement as well.
*/
-#define EFI_MMAP_NR_SLACK_SLOTS 8
+#define EFI_MMAP_NR_SLACK_SLOTS 32
typedef struct efi_generic_dev_path efi_device_path_protocol_t;
@@ -314,7 +314,9 @@ union efi_boot_services {
void *close_protocol;
void *open_protocol_information;
void *protocols_per_handle;
- void *locate_handle_buffer;
+ efi_status_t (__efiapi *locate_handle_buffer)(int, efi_guid_t *,
+ void *, unsigned long *,
+ efi_handle_t **);
efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
void **);
efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...);
@@ -1053,6 +1055,7 @@ void efi_puts(const char *str);
__printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
+DEFINE_FREE(efi_pool, void *, if (_T) efi_bs_call(free_pool, _T));
void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size);
@@ -1082,8 +1085,7 @@ efi_status_t efi_parse_options(char const *cmdline);
void efi_parse_option_graphics(char *option);
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size);
+efi_status_t efi_setup_gop(struct screen_info *si);
efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
const efi_char16_t *optstr,
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index ea5da307d542..3785fb4986b4 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -133,13 +133,11 @@ void efi_parse_option_graphics(char *option)
static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
unsigned long info_size;
-
u32 max_mode, cur_mode;
+ efi_status_t status;
int pf;
mode = efi_table_attr(gop, mode);
@@ -154,17 +152,13 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cur_mode;
}
- status = efi_call_proto(gop, query_mode, cmdline.mode,
- &info_size, &info);
+ status = efi_call_proto(gop, query_mode, cmdline.mode, &info_size, &info);
if (status != EFI_SUCCESS) {
efi_err("Couldn't get mode information\n");
return cur_mode;
}
pf = info->pixel_format;
-
- efi_bs_call(free_pool, info);
-
if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) {
efi_err("Invalid PixelFormat\n");
return cur_mode;
@@ -173,6 +167,28 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cmdline.mode;
}
+static u32 choose_mode(efi_graphics_output_protocol_t *gop,
+ bool (*match)(const efi_graphics_output_mode_info_t *, u32, void *),
+ void *ctx)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
+
+ for (u32 m = 0; m < max_mode; m++) {
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
+ unsigned long info_size;
+ efi_status_t status;
+
+ status = efi_call_proto(gop, query_mode, m, &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (match(info, m, ctx))
+ return m;
+ }
+ return (unsigned long)ctx;
+}
+
static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
{
if (pixel_format == PIXEL_BIT_MASK) {
@@ -185,192 +201,117 @@ static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
return 32;
}
-static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+static bool match_res(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
{
- efi_status_t status;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- mode = efi_table_attr(gop, mode);
+ return cmdline.res.width == info->horizontal_resolution &&
+ cmdline.res.height == info->vertical_resolution &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi));
+}
- cur_mode = efi_table_attr(mode, mode);
- info = efi_table_attr(mode, info);
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ if (match_res(efi_table_attr(mode, info), cur_mode, NULL))
return cur_mode;
- max_mode = efi_table_attr(mode, max_mode);
-
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return choose_mode(gop, match_res, (void *)cur_mode);
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+struct match {
+ u32 mode;
+ u32 area;
+ u8 depth;
+};
- efi_bs_call(free_pool, info);
+static bool match_auto(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ u32 area = info->horizontal_resolution * info->vertical_resolution;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
+ u8 depth = pixel_bpp(pf, pi);
+ struct match *m = ctx;
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
- return m;
- }
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- efi_err("Couldn't find requested mode\n");
+ if (area > m->area || (area == m->area && depth > m->depth))
+ *m = (struct match){ mode, area, depth };
- return cur_mode;
+ return false;
}
static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode, best_mode, area;
- u8 depth;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h, a;
- u8 d;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ struct match match = {};
- info = efi_table_attr(mode, info);
-
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- best_mode = cur_mode;
- area = w * h;
- depth = pixel_bpp(pf, pi);
+ choose_mode(gop, match_auto, &match);
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return match.mode;
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static bool match_list(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ u32 cur_mode = (unsigned long)ctx;
+ int pf = info->pixel_format;
+ const char *dstr;
+ u8 depth = 0;
+ bool valid;
- efi_bs_call(free_pool, info);
+ valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- a = w * h;
- if (a < area)
- continue;
- d = pixel_bpp(pf, pi);
- if (a > area || d > depth) {
- best_mode = m;
- area = a;
- depth = d;
- }
+ switch (pf) {
+ case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
+ dstr = "rgb";
+ break;
+ case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
+ dstr = "bgr";
+ break;
+ case PIXEL_BIT_MASK:
+ dstr = "";
+ depth = pixel_bpp(pf, pi);
+ break;
+ case PIXEL_BLT_ONLY:
+ dstr = "blt";
+ break;
+ default:
+ dstr = "xxx";
+ break;
}
- return best_mode;
+ efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
+ mode,
+ (mode == cur_mode) ? '*' : ' ',
+ !valid ? '-' : ' ',
+ info->horizontal_resolution,
+ info->vertical_resolution,
+ dstr, depth);
+
+ return false;
}
static u32 choose_mode_list(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
- u8 d;
- const char *dstr;
- bool valid;
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
efi_input_key_t key;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ efi_status_t status;
efi_printk("Available graphics modes are 0-%u\n", max_mode-1);
efi_puts(" * = current mode\n"
" - = unusable mode\n");
- for (m = 0; m < max_mode; m++) {
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- efi_bs_call(free_pool, info);
-
- valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- d = 0;
- switch (pf) {
- case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
- dstr = "rgb";
- break;
- case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
- dstr = "bgr";
- break;
- case PIXEL_BIT_MASK:
- dstr = "";
- d = pixel_bpp(pf, pi);
- break;
- case PIXEL_BLT_ONLY:
- dstr = "blt";
- break;
- default:
- dstr = "xxx";
- break;
- }
-
- efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
- m,
- m == cur_mode ? '*' : ' ',
- !valid ? '-' : ' ',
- w, h, dstr, d);
- }
+ choose_mode(gop, match_list, (void *)cur_mode);
efi_puts("\nPress any key to continue (or wait 10 seconds)\n");
status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key);
@@ -461,26 +402,25 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
-static efi_graphics_output_protocol_t *
-find_gop(efi_guid_t *proto, unsigned long size, void **handles)
+static efi_graphics_output_protocol_t *find_gop(unsigned long num,
+ const efi_handle_t handles[])
{
efi_graphics_output_protocol_t *first_gop;
efi_handle_t h;
- int i;
first_gop = NULL;
- for_each_efi_handle(h, handles, size, i) {
+ for_each_efi_handle(h, handles, num) {
efi_status_t status;
efi_graphics_output_protocol_t *gop;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
-
- efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
void *dummy = NULL;
- status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID,
+ (void **)&gop);
if (status != EFI_SUCCESS)
continue;
@@ -500,7 +440,8 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
* Once we've found a GOP supporting ConOut,
* don't bother looking any further.
*/
- status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_CONSOLE_OUT_DEVICE_GUID, &dummy);
if (status == EFI_SUCCESS)
return gop;
@@ -511,16 +452,22 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
return first_gop;
}
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size, void **handles)
+efi_status_t efi_setup_gop(struct screen_info *si)
{
- efi_graphics_output_protocol_t *gop;
+ efi_handle_t *handles __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
+ efi_graphics_output_protocol_t *gop;
+ efi_status_t status;
+ unsigned long num;
- gop = find_gop(proto, size, handles);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID, NULL, &num,
+ &handles);
+ if (status != EFI_SUCCESS)
+ return status;
- /* Did we find any GOPs? */
+ gop = find_gop(num, handles);
if (!gop)
return EFI_NOT_FOUND;
@@ -552,29 +499,3 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
return EFI_SUCCESS;
}
-
-/*
- * See if we have Graphics Output Protocol
- */
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size)
-{
- efi_status_t status;
- void **gop_handle = NULL;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&gop_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
- &size, gop_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- status = setup_gop(si, proto, size, gop_handle);
-
-free_handle:
- efi_bs_call(free_pool, gop_handle);
- return status;
-}
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
index 6318c40bda38..4bc963e999eb 100644
--- a/drivers/firmware/efi/libstub/kaslr.c
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -57,7 +57,7 @@ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
*/
static bool check_image_region(u64 base, u64 size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
bool ret = false;
int map_offset;
@@ -80,8 +80,6 @@ static bool check_image_region(u64 base, u64 size)
}
}
- efi_bs_call(free_pool, map);
-
return ret;
}
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
index 4f1fa302234d..9c82259eea81 100644
--- a/drivers/firmware/efi/libstub/mem.c
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -20,10 +20,10 @@
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
bool install_cfg_tbl)
{
+ struct efi_boot_memmap tmp, *m __free(efi_pool) = NULL;
int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY
: EFI_LOADER_DATA;
efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID;
- struct efi_boot_memmap *m, tmp;
efi_status_t status;
unsigned long size;
@@ -48,24 +48,20 @@ efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
*/
status = efi_bs_call(install_configuration_table, &tbl_guid, m);
if (status != EFI_SUCCESS)
- goto free_map;
+ return status;
}
m->buff_size = m->map_size = size;
status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key,
&m->desc_size, &m->desc_ver);
- if (status != EFI_SUCCESS)
- goto uninstall_table;
+ if (status != EFI_SUCCESS) {
+ if (install_cfg_tbl)
+ efi_bs_call(install_configuration_table, &tbl_guid, NULL);
+ return status;
+ }
- *map = m;
+ *map = no_free_ptr(m);
return EFI_SUCCESS;
-
-uninstall_table:
- if (install_cfg_tbl)
- efi_bs_call(install_configuration_table, &tbl_guid, NULL);
-free_map:
- efi_bs_call(free_pool, m);
- return status;
}
/**
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
index 99fb25d2bcf5..1dccf77958d3 100644
--- a/drivers/firmware/efi/libstub/pci.c
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -16,37 +16,20 @@
void efi_pci_disable_bridge_busmaster(void)
{
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long pci_handle_size = 0;
- efi_handle_t *pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
+ unsigned long pci_handle_num;
efi_handle_t handle;
efi_status_t status;
u16 class, command;
- int i;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL) {
- if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
- efi_err("Failed to locate PCI I/O handles'\n");
- return;
- }
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
- (void **)&pci_handle);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &pci_handle_num, &pci_handle);
if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
+ efi_err("Failed to locate PCI I/O handles\n");
return;
}
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, pci_handle);
- if (status != EFI_SUCCESS) {
- efi_err("Failed to locate PCI I/O handles'\n");
- goto free_handle;
- }
-
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
unsigned long segment_nr, bus_nr, device_nr, func_nr;
@@ -82,7 +65,7 @@ void efi_pci_disable_bridge_busmaster(void)
efi_bs_call(disconnect_controller, handle, NULL, NULL);
}
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
status = efi_bs_call(handle_protocol, handle, &pci_proto,
@@ -108,7 +91,4 @@ void efi_pci_disable_bridge_busmaster(void)
if (status != EFI_SUCCESS)
efi_err("Failed to disable PCI busmastering\n");
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index c41e7b2091cd..fd80b2f3233a 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
+ if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE)
+ return 0;
+
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
return 0;
@@ -59,9 +62,9 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long alloc_min,
unsigned long alloc_max)
{
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
- struct efi_boot_memmap *map;
efi_status_t status;
int map_offset;
@@ -72,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size,
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
+ /* Avoid address 0x0, as it can be mistaken for NULL */
+ if (alloc_min == 0)
+ alloc_min = align;
+
size = round_up(size, EFI_ALLOC_ALIGN);
/* count the suitable slots in each memory map entry */
@@ -130,7 +137,5 @@ efi_status_t efi_random_alloc(unsigned long size,
break;
}
- efi_bs_call(free_pool, map);
-
return status;
}
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index d694bcfa1074..d4264bfb6dc1 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -23,14 +23,14 @@
efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
unsigned long nr_pages;
int i;
status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
/*
* Enforce minimum alignment that EFI or Linux requires when
@@ -53,6 +53,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE)
+ continue;
+
if (efi_soft_reserve_enabled() &&
(desc->attribute & EFI_MEMORY_SP))
continue;
@@ -79,11 +82,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
}
if (i == map->map_size / map->desc_size)
- status = EFI_NOT_FOUND;
+ return EFI_NOT_FOUND;
- efi_bs_call(free_pool, map);
-fail:
- return status;
+ return EFI_SUCCESS;
}
/**
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 188c8000d245..863910e9eefc 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -42,7 +42,7 @@ union sev_memory_acceptance_protocol {
static efi_status_t
preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
{
- struct pci_setup_rom *rom = NULL;
+ struct pci_setup_rom *rom __free(efi_pool) = NULL;
efi_status_t status;
unsigned long size;
uint64_t romsize;
@@ -75,14 +75,13 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
rom->data.len = size - sizeof(struct setup_data);
rom->data.next = 0;
rom->pcilen = romsize;
- *__rom = rom;
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
PCI_VENDOR_ID, 1, &rom->vendor);
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->vendor\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
@@ -90,21 +89,18 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->devid\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus,
&rom->device, &rom->function);
if (status != EFI_SUCCESS)
- goto free_struct;
+ return status;
memcpy(rom->romdata, romimage, romsize);
- return status;
-
-free_struct:
- efi_bs_call(free_pool, rom);
- return status;
+ *__rom = no_free_ptr(rom);
+ return EFI_SUCCESS;
}
/*
@@ -119,38 +115,23 @@ free_struct:
static void setup_efi_pci(struct boot_params *params)
{
efi_status_t status;
- void **pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long size = 0;
struct setup_data *data;
+ unsigned long num;
efi_handle_t h;
- int i;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
-
- if (status == EFI_BUFFER_TOO_SMALL) {
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&pci_handle);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
- return;
- }
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
- }
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &num, &pci_handle);
if (status != EFI_SUCCESS)
- goto free_handle;
+ return;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
while (data && data->next)
data = (struct setup_data *)(unsigned long)data->next;
- for_each_efi_handle(h, pci_handle, size, i) {
+ for_each_efi_handle(h, pci_handle, num) {
efi_pci_io_protocol_t *pci = NULL;
struct pci_setup_rom *rom;
@@ -170,9 +151,6 @@ static void setup_efi_pci(struct boot_params *params)
data = (struct setup_data *)rom;
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
static void retrieve_apple_device_properties(struct boot_params *boot_params)
@@ -405,116 +383,13 @@ static void setup_quirks(struct boot_params *boot_params)
}
}
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t
-setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
-{
- efi_status_t status;
- u32 width, height;
- void **uga_handle = NULL;
- efi_uga_draw_protocol_t *uga = NULL, *first_uga;
- efi_handle_t handle;
- int i;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&uga_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- uga_proto, NULL, &size, uga_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- height = 0;
- width = 0;
-
- first_uga = NULL;
- for_each_efi_handle(handle, uga_handle, size, i) {
- efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 w, h, depth, refresh;
- void *pciio;
-
- status = efi_bs_call(handle_protocol, handle, uga_proto,
- (void **)&uga);
- if (status != EFI_SUCCESS)
- continue;
-
- pciio = NULL;
- efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio);
-
- status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh);
- if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- width = w;
- height = h;
-
- /*
- * Once we've found a UGA supporting PCIIO,
- * don't bother looking any further.
- */
- if (pciio)
- break;
-
- first_uga = uga;
- }
- }
-
- if (!width && !height)
- goto free_handle;
-
- /* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_depth = 32;
- si->lfb_width = width;
- si->lfb_height = height;
-
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
-
-free_handle:
- efi_bs_call(free_pool, uga_handle);
-
- return status;
-}
-
static void setup_graphics(struct boot_params *boot_params)
{
- efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- struct screen_info *si;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- void **uga_handle = NULL;
-
- si = &boot_params->screen_info;
- memset(si, 0, sizeof(*si));
+ struct screen_info *si = memset(&boot_params->screen_info, 0, sizeof(*si));
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &graphics_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- status = efi_setup_gop(si, &graphics_proto, size);
-
- if (status != EFI_SUCCESS) {
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &uga_proto, NULL, &size, uga_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- setup_uga(si, &uga_proto, size);
- }
+ efi_setup_gop(si);
}
-
static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
{
efi_bs_call(exit, handle, status, 0, NULL);
@@ -737,7 +612,7 @@ static efi_status_t allocate_e820(struct boot_params *params,
struct setup_data **e820ext,
u32 *e820ext_size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
__u32 nr_desc;
@@ -751,13 +626,14 @@ static efi_status_t allocate_e820(struct boot_params *params,
EFI_MMAP_NR_SLACK_SLOTS;
status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
}
- if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && status == EFI_SUCCESS)
- status = allocate_unaccepted_bitmap(nr_desc, map);
+ if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
+ return allocate_unaccepted_bitmap(nr_desc, map);
- efi_bs_call(free_pool, map);
- return status;
+ return EFI_SUCCESS;
}
struct exit_boot_struct {
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 5ed0602c2f75..208db29613c6 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -99,14 +99,13 @@ static struct kobject *mokvar_kobj;
*/
void __init efi_mokvar_table_init(void)
{
+ struct efi_mokvar_table_entry __aligned(1) *mokvar_entry, *next_entry;
efi_memory_desc_t md;
void *va = NULL;
unsigned long cur_offset = 0;
unsigned long offset_limit;
- unsigned long map_size = 0;
unsigned long map_size_needed = 0;
unsigned long size;
- struct efi_mokvar_table_entry *mokvar_entry;
int err;
if (!efi_enabled(EFI_MEMMAP))
@@ -134,48 +133,46 @@ void __init efi_mokvar_table_init(void)
*/
err = -EINVAL;
while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
- mokvar_entry = va + cur_offset;
- map_size_needed = cur_offset + sizeof(*mokvar_entry);
- if (map_size_needed > map_size) {
- if (va)
- early_memunmap(va, map_size);
- /*
- * Map a little more than the fixed size entry
- * header, anticipating some data. It's safe to
- * do so as long as we stay within current memory
- * descriptor.
- */
- map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
- offset_limit);
- va = early_memremap(efi.mokvar_table, map_size);
- if (!va) {
- pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
- efi.mokvar_table, map_size);
- return;
- }
- mokvar_entry = va + cur_offset;
+ if (va)
+ early_memunmap(va, sizeof(*mokvar_entry));
+ va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ if (!va) {
+ pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n",
+ efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ return;
}
-
+ mokvar_entry = va;
+next:
/* Check for last sentinel entry */
if (mokvar_entry->name[0] == '\0') {
if (mokvar_entry->data_size != 0)
break;
err = 0;
+ map_size_needed = cur_offset + sizeof(*mokvar_entry);
break;
}
- /* Sanity check that the name is null terminated */
- size = strnlen(mokvar_entry->name,
- sizeof(mokvar_entry->name));
- if (size >= sizeof(mokvar_entry->name))
- break;
+ /* Enforce that the name is NUL terminated */
+ mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0';
/* Advance to the next entry */
- cur_offset = map_size_needed + mokvar_entry->data_size;
+ size = sizeof(*mokvar_entry) + mokvar_entry->data_size;
+ cur_offset += size;
+
+ /*
+ * Don't bother remapping if the current entry header and the
+ * next one end on the same page.
+ */
+ next_entry = (void *)((unsigned long)mokvar_entry + size);
+ if (((((unsigned long)(mokvar_entry + 1) - 1) ^
+ ((unsigned long)(next_entry + 1) - 1)) & PAGE_MASK) == 0) {
+ mokvar_entry = next_entry;
+ goto next;
+ }
}
if (va)
- early_memunmap(va, map_size);
+ early_memunmap(va, sizeof(*mokvar_entry));
if (err) {
pr_err("EFI MOKvar config table is not valid\n");
return;
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index cc807ed35aed..1e509595ac03 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -91,6 +91,7 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
_ret_; \
})
+#ifdef CONFIG_EFI
static int __init efifb_set_system(const struct dmi_system_id *id)
{
struct efifb_dmi_info *info = id->driver_data;
@@ -346,7 +347,6 @@ static const struct fwnode_operations efifb_fwnode_ops = {
.add_links = efifb_add_links,
};
-#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;
__init void sysfb_apply_efi_quirks(void)
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index 66042160b361..773d05078e0a 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -30,7 +30,7 @@ static struct cbmem_entry *to_cbmem_entry(struct kobject *kobj)
}
static ssize_t mem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ const struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
@@ -40,7 +40,7 @@ static ssize_t mem_read(struct file *filp, struct kobject *kobj,
}
static ssize_t mem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ const struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
@@ -53,7 +53,7 @@ static ssize_t mem_write(struct file *filp, struct kobject *kobj,
memcpy(entry->mem_file_buf + pos, buf, count);
return count;
}
-static BIN_ATTR_ADMIN_RW(mem, 0);
+static const BIN_ATTR_ADMIN_RW(mem, 0);
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -79,14 +79,14 @@ static struct attribute *attrs[] = {
NULL,
};
-static struct bin_attribute *bin_attrs[] = {
+static const struct bin_attribute *const bin_attrs[] = {
&bin_attr_mem,
NULL,
};
static const struct attribute_group cbmem_entry_group = {
.attrs = attrs,
- .bin_attrs = bin_attrs,
+ .bin_attrs_new = bin_attrs,
};
static const struct attribute_group *dev_groups[] = {
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 24e666d5c3d1..e8fb00dcaf65 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -488,7 +488,7 @@ static const struct efivar_operations efivar_ops = {
#endif /* CONFIG_EFI */
static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct gsmi_set_eventlog_param param = {
@@ -528,9 +528,9 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
}
-static struct bin_attribute eventlog_bin_attr = {
+static const struct bin_attribute eventlog_bin_attr = {
.attr = {.name = "append_to_eventlog", .mode = 0200},
- .write = eventlog_write,
+ .write_new = eventlog_write,
};
static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index b9d99fe1ff0f..d957af6f9349 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -14,7 +14,7 @@
#include "memconsole.h"
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
@@ -28,7 +28,7 @@ static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
static struct bin_attribute memconsole_bin_attr = {
.attr = {.name = "log", .mode = 0444},
- .read = memconsole_read,
+ .read_new = memconsole_read,
};
void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1749529f63d4..254ac6545d68 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -56,7 +56,7 @@ static struct vpd_section ro_vpd;
static struct vpd_section rw_vpd;
static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_attrib_info *info = bin_attr->private;
@@ -121,7 +121,7 @@ static int vpd_section_attrib_add(const u8 *key, u32 key_len,
info->bin_attr.attr.name = info->key;
info->bin_attr.attr.mode = 0444;
info->bin_attr.size = value_len;
- info->bin_attr.read = vpd_attrib_read;
+ info->bin_attr.read_new = vpd_attrib_read;
info->bin_attr.private = info;
info->value = value;
@@ -156,7 +156,7 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec)
}
static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_section *sec = bin_attr->private;
@@ -201,7 +201,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->bin_attr.attr.name = sec->raw_name;
sec->bin_attr.attr.mode = 0444;
sec->bin_attr.size = size;
- sec->bin_attr.read = vpd_section_read;
+ sec->bin_attr.read_new = vpd_section_read;
sec->bin_attr.private = sec;
err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 907cd149c40a..c964f4924359 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -25,6 +25,7 @@ config IMX_SCU
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
default y if ARCH_MXC
help
The System Controller Management Interface firmware (SCMI FW) is
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 1dd4362ef9a3..8c28e25ddc8a 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev)
return ret;
sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+ of_node_put(args.np);
num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
for (i = 0; i < num_channel; i++) {
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6e9788324fea..371f24569b3b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
str += sprintf_ipaddr(str, nic->ip_addr);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
- val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+ if (nic->subnet_mask_prefix > 32)
+ val = cpu_to_be32(~0);
+ else
+ val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
case ISCSI_BOOT_ETH_PREFIX_LEN:
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 447246bd04be..98a463e9774b 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -814,15 +814,6 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev);
- auxiliary_set_drvdata(aux_dev, qcuefi);
- status = qcuefi_set_reference(qcuefi);
- if (status)
- return status;
-
- status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
- if (status)
- qcuefi_set_reference(NULL);
-
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = SZ_4K;
pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER;
@@ -833,6 +824,15 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
if (IS_ERR(qcuefi->mempool))
return PTR_ERR(qcuefi->mempool);
+ auxiliary_set_drvdata(aux_dev, qcuefi);
+ status = qcuefi_set_reference(qcuefi);
+ if (status)
+ return status;
+
+ status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
+ if (status)
+ qcuefi_set_reference(NULL);
+
return status;
}
diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
index 2b4c2826f572..574930729ddd 100644
--- a/drivers/firmware/qcom/qcom_scm-smc.c
+++ b/drivers/firmware/qcom/qcom_scm-smc.c
@@ -152,7 +152,6 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
struct qcom_scm_res *res, bool atomic)
{
- struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
int arglen = desc->arginfo & 0xf;
int i, ret;
void *args_virt __free(qcom_tzmem) = NULL;
@@ -173,6 +172,11 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+ struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
+
+ if (!mempool)
+ return -EINVAL;
+
args_virt = qcom_tzmem_alloc(mempool,
SCM_SMC_N_EXT_ARGS * sizeof(u64),
flag);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 72bf87ddcd96..fc4d67e4c4a6 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -217,7 +217,10 @@ static DEFINE_SPINLOCK(scm_query_lock);
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
{
- return __scm ? __scm->mempool : NULL;
+ if (!qcom_scm_is_available())
+ return NULL;
+
+ return __scm->mempool;
}
static enum qcom_scm_convention __get_convention(void)
@@ -1279,6 +1282,220 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
+bool qcom_scm_has_wrapped_key_support(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_DERIVE_SW_SECRET) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_GENERATE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_PREPARE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_IMPORT_ICE_KEY);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support);
+
+/**
+ * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key
+ * @eph_key: an ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes
+ * @sw_secret: output buffer for the software secret
+ * @sw_secret_size: size of the software secret to derive in bytes
+ *
+ * Derive a software secret from an ephemerally-wrapped key for software crypto
+ * operations. This is done by calling into the secure execution environment,
+ * which then calls into the hardware to unwrap and derive the secret.
+ *
+ * For more information on sw_secret, see the "Hardware-wrapped keys" section of
+ * Documentation/block/inline-encryption.rst.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
+ u8 *sw_secret, size_t sw_secret_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ sw_secret_size,
+ GFP_KERNEL);
+ if (!sw_secret_buf)
+ return -ENOMEM;
+
+ memcpy(eph_key_buf, eph_key, eph_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[1] = eph_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf);
+ desc.args[3] = sw_secret_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(sw_secret, sw_secret_buf, sw_secret_size);
+
+ memzero_explicit(eph_key_buf, eph_key_size);
+ memzero_explicit(sw_secret_buf, sw_secret_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret);
+
+/**
+ * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Generate a key using the built-in HW module in the SoC. The resulting key is
+ * returned wrapped with the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key);
+
+/**
+ * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key
+ * @lt_key: a long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes
+ * @eph_key: output buffer for the ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for
+ * added protection. The resulting key will only be valid for the current boot.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
+ u8 *eph_key, size_t eph_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ memcpy(lt_key_buf, lt_key, lt_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[3] = eph_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(eph_key, eph_key_buf, eph_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ memzero_explicit(eph_key_buf, eph_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key);
+
+/**
+ * qcom_scm_import_ice_key() - Import key for storage encryption
+ * @raw_key: the raw key to import
+ * @raw_key_size: size of @raw_key in bytes
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to
+ * wrap the raw key using the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
+ u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ raw_key_size,
+ GFP_KERNEL);
+ if (!raw_key_buf)
+ return -ENOMEM;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ memcpy(raw_key_buf, raw_key, raw_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(raw_key_buf);
+ desc.args[1] = raw_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[3] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(raw_key_buf, raw_key_size);
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key);
+
/**
* qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
*
@@ -1768,18 +1985,23 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
+ any potential issues with this, only allow validated machines for now.
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ { .compatible = "asus,vivobook-s15" },
{ .compatible = "dell,xps13-9345" },
+ { .compatible = "hp,omnibook-x14" },
+ { .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
{ .compatible = "lenovo,yoga-slim7x" },
{ .compatible = "microsoft,arcata", },
+ { .compatible = "microsoft,blackrock" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
{ .compatible = "qcom,sc8180x-primus" },
{ .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
{ .compatible = "qcom,x1e80100-qcp" },
+ { .compatible = "qcom,x1p42100-crd" },
{ }
};
@@ -1867,7 +2089,8 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
*/
bool qcom_scm_is_available(void)
{
- return !!READ_ONCE(__scm);
+ /* Paired with smp_store_release() in qcom_scm_probe */
+ return !!smp_load_acquire(&__scm);
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
@@ -2024,18 +2247,22 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Let all above stores be available after this */
+ /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
smp_store_release(&__scm, scm);
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
- if (irq != -ENXIO)
- return irq;
+ if (irq != -ENXIO) {
+ ret = irq;
+ goto err;
+ }
} else {
ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
IRQF_ONESHOT, "qcom-scm", __scm);
- if (ret < 0)
- return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ if (ret < 0) {
+ dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ goto err;
+ }
}
__get_convention();
@@ -2054,14 +2281,18 @@ static int qcom_scm_probe(struct platform_device *pdev)
qcom_scm_disable_sdi();
ret = of_reserved_mem_device_init(__scm->dev);
- if (ret && ret != -ENODEV)
- return dev_err_probe(__scm->dev, ret,
- "Failed to setup the reserved memory region for TZ mem\n");
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(__scm->dev, ret,
+ "Failed to setup the reserved memory region for TZ mem\n");
+ goto err;
+ }
ret = qcom_tzmem_enable(__scm->dev);
- if (ret)
- return dev_err_probe(__scm->dev, ret,
- "Failed to enable the TrustZone memory allocator\n");
+ if (ret) {
+ dev_err_probe(__scm->dev, ret,
+ "Failed to enable the TrustZone memory allocator\n");
+ goto err;
+ }
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = 0;
@@ -2069,9 +2300,11 @@ static int qcom_scm_probe(struct platform_device *pdev)
pool_config.max_size = SZ_256K;
__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
- if (IS_ERR(__scm->mempool))
- return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
+ if (IS_ERR(__scm->mempool)) {
+ ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
+ goto err;
+ }
/*
* Initialize the QSEECOM interface.
@@ -2087,6 +2320,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
return 0;
+
+err:
+ /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
+ smp_store_release(&__scm, NULL);
+
+ return ret;
}
static void qcom_scm_shutdown(struct platform_device *pdev)
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index e36b2f67607f..097369d38b84 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -128,6 +128,10 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */
#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03
#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04
+#define QCOM_SCM_ES_DERIVE_SW_SECRET 0x07
+#define QCOM_SCM_ES_GENERATE_ICE_KEY 0x08
+#define QCOM_SCM_ES_PREPARE_ICE_KEY 0x09
+#define QCOM_SCM_ES_IMPORT_ICE_KEY 0x0a
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_HDCP_INVOKE 0x01
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index c5c78b869561..3c52cb73237a 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -967,18 +967,15 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
/* first client will create kernel thread */
if (!chan->ctrl->task) {
chan->ctrl->task =
- kthread_create_on_node(svc_normal_to_secure_thread,
- (void *)chan->ctrl,
- cpu_to_node(cpu),
- "svc_smc_hvc_thread");
+ kthread_run_on_cpu(svc_normal_to_secure_thread,
+ (void *)chan->ctrl,
+ cpu, "svc_smc_hvc_thread");
if (IS_ERR(chan->ctrl->task)) {
dev_err(chan->ctrl->dev,
"failed to create svc_smc_hvc_thread\n");
kfree(p_data);
return -EINVAL;
}
- kthread_bind(chan->ctrl->task, cpu);
- wake_up_process(chan->ctrl->task);
}
pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 02b60fde0430..5aa7b8884374 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -16,26 +16,26 @@
#include "dfl-afu.h"
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
afu->dma_regions = RB_ROOT;
}
/**
* afu_dma_pin_pages - pin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be pinned
*
* Pin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+static int afu_dma_pin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
int npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
int ret, pinned;
ret = account_locked_vm(current->mm, npages, true);
@@ -73,17 +73,17 @@ unlock_vm:
/**
* afu_dma_unpin_pages - unpin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be unpinned
*
* Unpin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+static void afu_dma_unpin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
long npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
unpin_user_pages(region->pages, npages);
kfree(region->pages);
@@ -133,20 +133,20 @@ static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
/**
* afu_dma_region_add - add given dma region to rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be added
*
* Return 0 for success, -EEXIST if dma region has already been added.
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+static int afu_dma_region_add(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node **new, *parent = NULL;
- dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "add region (iova = %llx)\n",
(unsigned long long)region->iova);
new = &afu->dma_regions.rb_node;
@@ -177,50 +177,50 @@ static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
/**
* afu_dma_region_remove - remove given dma region from rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be removed
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+static void afu_dma_region_remove(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
struct dfl_afu *afu;
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
rb_erase(&region->node, &afu->dma_regions);
}
/**
* afu_dma_region_destroy - destroy all regions in rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = rb_first(&afu->dma_regions);
struct dfl_afu_dma_region *region;
while (node) {
region = container_of(node, struct dfl_afu_dma_region, node);
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
rb_erase(node, &afu->dma_regions);
if (region->iova)
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length,
DMA_BIDIRECTIONAL);
if (region->pages)
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
node = rb_next(node);
kfree(region);
@@ -229,7 +229,7 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
/**
* afu_dma_region_find - find the dma region from rbtree based on iova and size
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma memory area
* @size: size of the dma memory area
*
@@ -239,14 +239,14 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
* [@iova, @iova+size)
* If nothing is matched returns NULL.
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+afu_dma_region_find(struct dfl_feature_dev_data *fdata, u64 iova, u64 size)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = afu->dma_regions.rb_node;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
while (node) {
struct dfl_afu_dma_region *region;
@@ -276,20 +276,20 @@ afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
/**
* afu_dma_region_find_iova - find the dma region from rbtree by iova
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma region
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
static struct dfl_afu_dma_region *
-afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+afu_dma_region_find_iova(struct dfl_feature_dev_data *fdata, u64 iova)
{
- return afu_dma_region_find(pdata, iova, 0);
+ return afu_dma_region_find(fdata, iova, 0);
}
/**
* afu_dma_map_region - map memory region for dma
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @user_addr: address of the memory region
* @length: size of the memory region
* @iova: pointer of iova address
@@ -298,9 +298,10 @@ afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
* of the memory region via @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_dma_region *region;
int ret;
@@ -323,47 +324,47 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
region->length = length;
/* Pin the user memory region */
- ret = afu_dma_pin_pages(pdata, region);
+ ret = afu_dma_pin_pages(fdata, region);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ dev_err(dev, "failed to pin memory region\n");
goto free_region;
}
/* Only accept continuous pages, return error else */
if (!afu_dma_check_continuous_pages(region)) {
- dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ dev_err(dev, "pages are not continuous\n");
ret = -EINVAL;
goto unpin_pages;
}
/* As pages are continuous then start to do DMA mapping */
- region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova = dma_map_page(dfl_fpga_fdata_to_parent(fdata),
region->pages[0], 0,
region->length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
- dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ if (dma_mapping_error(dfl_fpga_fdata_to_parent(fdata), region->iova)) {
+ dev_err(dev, "failed to map for dma\n");
ret = -EFAULT;
goto unpin_pages;
}
*iova = region->iova;
- mutex_lock(&pdata->lock);
- ret = afu_dma_region_add(pdata, region);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = afu_dma_region_add(fdata, region);
+ mutex_unlock(&fdata->lock);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ dev_err(dev, "failed to add dma region\n");
goto unmap_dma;
}
return 0;
unmap_dma:
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
unpin_pages:
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
free_region:
kfree(region);
return ret;
@@ -371,34 +372,34 @@ free_region:
/**
* afu_dma_unmap_region - unmap dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: dma address of the region
*
* Unmap dma memory region based on @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova)
{
struct dfl_afu_dma_region *region;
- mutex_lock(&pdata->lock);
- region = afu_dma_region_find_iova(pdata, iova);
+ mutex_lock(&fdata->lock);
+ region = afu_dma_region_find_iova(fdata, iova);
if (!region) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EINVAL;
}
if (region->in_use) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
- afu_dma_region_remove(pdata, region);
- mutex_unlock(&pdata->lock);
+ afu_dma_region_remove(fdata, region);
+ mutex_unlock(&fdata->lock);
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
kfree(region);
return 0;
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index ab7be6217368..0f392d1f6d45 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -28,37 +28,36 @@
#define ERROR_MASK GENMASK_ULL(63, 0)
/* mask or unmask port errors by the error mask register. */
-static void __afu_port_err_mask(struct device *dev, bool mask)
+static void __afu_port_err_mask(struct dfl_feature_dev_data *fdata, bool mask)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
}
static void afu_port_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- mutex_lock(&pdata->lock);
- __afu_port_err_mask(dev, mask);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ __afu_port_err_mask(fdata, mask);
+ mutex_unlock(&fdata->lock);
}
/* clear port errors. */
static int afu_port_err_clear(struct device *dev, u64 err)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
- struct platform_device *pdev = to_platform_device(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base_err, *base_hdr;
int enable_ret = 0, ret = -EBUSY;
u64 v;
- base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
- base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base_err = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
+ base_hdr = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/*
* clear Port Errors
@@ -80,12 +79,12 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Halt Port by keeping Port in reset */
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
goto done;
/* Mask all errors */
- __afu_port_err_mask(dev, true);
+ __afu_port_err_mask(fdata, true);
/* Clear errors if err input matches with current port errors.*/
v = readq(base_err + PORT_ERROR);
@@ -102,28 +101,28 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Clear mask */
- __afu_port_err_mask(dev, false);
+ __afu_port_err_mask(fdata, false);
/* Enable the Port by clearing the reset */
- enable_ret = __afu_port_enable(pdev);
+ enable_ret = __afu_port_enable(fdata);
done:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return enable_ret ? enable_ret : ret;
}
static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -146,15 +145,15 @@ static DEVICE_ATTR_RW(errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -164,16 +163,16 @@ static ssize_t first_malformed_req_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 req0, req1;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
req0 = readq(base + PORT_MALFORMED_REQ0);
req1 = readq(base + PORT_MALFORMED_REQ1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%016llx%016llx\n",
(unsigned long long)req1, (unsigned long long)req0);
@@ -191,12 +190,14 @@ static umode_t port_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_ERROR))
return 0;
return attr->mode;
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 2fd4f07ed081..3bf8e7338dbe 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -26,7 +26,7 @@
/**
* __afu_port_enable - enable a port by clear reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Enable Port by clear the port soft reset bit, which is set by default.
* The AFU is unable to respond to any MMIO access while in reset.
@@ -35,18 +35,17 @@
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_enable(struct platform_device *pdev)
+int __afu_port_enable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- WARN_ON(!pdata->disable_count);
+ WARN_ON(!fdata->disable_count);
- if (--pdata->disable_count != 0)
+ if (--fdata->disable_count != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Clear port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -60,7 +59,8 @@ int __afu_port_enable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
!(v & PORT_CTRL_SFTRST_ACK),
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to enable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to enable device\n");
return -ETIMEDOUT;
}
@@ -69,22 +69,21 @@ int __afu_port_enable(struct platform_device *pdev)
/**
* __afu_port_disable - disable a port by hold reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Disable Port by setting the port soft reset bit, it puts the port into reset.
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_disable(struct platform_device *pdev)
+int __afu_port_disable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- if (pdata->disable_count++ != 0)
+ if (fdata->disable_count++ != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Set port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -99,7 +98,8 @@ int __afu_port_disable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to disable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to disable device\n");
return -ETIMEDOUT;
}
@@ -118,34 +118,34 @@ int __afu_port_disable(struct platform_device *pdev)
* (disabled). Any attempts on MMIO access to AFU while in reset, will
* result errors reported via port error reporting sub feature (if present).
*/
-static int __port_reset(struct platform_device *pdev)
+static int __port_reset(struct dfl_feature_dev_data *fdata)
{
int ret;
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
return ret;
- return __afu_port_enable(pdev);
+ return __afu_port_enable(fdata);
}
static int port_reset(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
- ret = __port_reset(pdev);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = __port_reset(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
-static int port_get_id(struct platform_device *pdev)
+static int port_get_id(struct dfl_feature_dev_data *fdata)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
}
@@ -153,7 +153,8 @@ static int port_get_id(struct platform_device *pdev)
static ssize_t
id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int id = port_get_id(to_platform_device(dev));
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
+ int id = port_get_id(fdata);
return scnprintf(buf, PAGE_SIZE, "%d\n", id);
}
@@ -162,15 +163,15 @@ static DEVICE_ATTR_RO(id);
static ssize_t
ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
}
@@ -179,7 +180,7 @@ static ssize_t
ltr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool ltr;
u64 v;
@@ -187,14 +188,14 @@ ltr_store(struct device *dev, struct device_attribute *attr,
if (kstrtobool(buf, &ltr))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
v &= ~PORT_CTRL_LATENCY;
v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
writeq(v, base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -203,15 +204,15 @@ static DEVICE_ATTR_RW(ltr);
static ssize_t
ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
}
@@ -220,18 +221,18 @@ static ssize_t
ap1_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -241,15 +242,15 @@ static ssize_t
ap2_event_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
}
@@ -258,18 +259,18 @@ static ssize_t
ap2_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -278,15 +279,15 @@ static DEVICE_ATTR_RW(ap2_event);
static ssize_t
power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
}
@@ -296,18 +297,18 @@ static ssize_t
userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freq_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freq_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -317,18 +318,18 @@ static ssize_t
userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntr_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -338,15 +339,15 @@ static ssize_t
userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
}
@@ -356,15 +357,15 @@ static ssize_t
userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntrsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)userclk_freqcntrsts);
@@ -388,10 +389,12 @@ static umode_t port_hdr_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
umode_t mode = attr->mode;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ fdata = to_dfl_feature_dev_data(dev);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
if (dfl_feature_revision(base) > 0) {
/*
@@ -456,21 +459,21 @@ static const struct dfl_feature_ops port_hdr_ops = {
static ssize_t
afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 guidl, guidh;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_AFU);
- mutex_lock(&pdata->lock);
- if (pdata->disable_count) {
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ if (fdata->disable_count) {
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
guidl = readq(base + GUID_L);
guidh = readq(base + GUID_H);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
}
@@ -485,12 +488,14 @@ static umode_t port_afu_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_AFU))
return 0;
return attr->mode;
@@ -504,9 +509,10 @@ static const struct attribute_group port_afu_group = {
static int port_afu_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_AFU,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -525,9 +531,10 @@ static const struct dfl_feature_ops port_afu_ops = {
static int port_stp_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_STP,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -595,22 +602,18 @@ static struct dfl_feature_driver port_feature_drvs[] = {
static int afu_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- pdata = dev_get_platdata(&fdev->dev);
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
+ dfl_feature_dev_use_count(fdata));
filp->private_data = fdev;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -618,29 +621,29 @@ static int afu_open(struct inode *inode, struct file *filp)
static int afu_release(struct inode *inode, struct file *filp)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata)) {
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata)) {
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- __port_reset(pdev);
- afu_dma_region_destroy(pdata);
+ __port_reset(fdata);
+ afu_dma_region_destroy(fdata);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -648,7 +651,7 @@ static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_get_info(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_info info;
struct dfl_afu *afu;
@@ -662,12 +665,12 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
if (info.argsz < minsz)
return -EINVAL;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
info.flags = 0;
info.num_regions = afu->num_regions;
info.num_umsgs = afu->num_umsgs;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
@@ -675,7 +678,7 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
return 0;
}
-static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_get_region_info(struct dfl_feature_dev_data *fdata,
void __user *arg)
{
struct dfl_fpga_port_region_info rinfo;
@@ -691,7 +694,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
if (rinfo.argsz < minsz || rinfo.padding)
return -EINVAL;
- ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ ret = afu_mmio_region_get_by_index(fdata, rinfo.index, &region);
if (ret)
return ret;
@@ -706,7 +709,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_map(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_map map;
unsigned long minsz;
@@ -720,16 +723,16 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
if (map.argsz < minsz || map.flags)
return -EINVAL;
- ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ ret = afu_dma_map_region(fdata, map.user_addr, map.length, &map.iova);
if (ret)
return ret;
if (copy_to_user(arg, &map, sizeof(map))) {
- afu_dma_unmap_region(pdata, map.iova);
+ afu_dma_unmap_region(fdata, map.iova);
return -EFAULT;
}
- dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ dev_dbg(&fdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
(unsigned long long)map.user_addr,
(unsigned long long)map.length,
(unsigned long long)map.iova);
@@ -738,7 +741,7 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
}
static long
-afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_unmap(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_unmap unmap;
unsigned long minsz;
@@ -751,33 +754,33 @@ afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
if (unmap.argsz < minsz || unmap.flags)
return -EINVAL;
- return afu_dma_unmap_region(pdata, unmap.iova);
+ return afu_dma_unmap_region(fdata, unmap.iova);
}
static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *f;
long ret;
dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return afu_ioctl_check_extension(pdata, arg);
+ return afu_ioctl_check_extension(fdata, arg);
case DFL_FPGA_PORT_GET_INFO:
- return afu_ioctl_get_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_GET_REGION_INFO:
- return afu_ioctl_get_region_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_region_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_MAP:
- return afu_ioctl_dma_map(pdata, (void __user *)arg);
+ return afu_ioctl_dma_map(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_UNMAP:
- return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
+ return afu_ioctl_dma_unmap(fdata, (void __user *)arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd
@@ -785,7 +788,7 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 and other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f)
+ dfl_fpga_dev_for_each_feature(fdata, f)
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -805,8 +808,8 @@ static const struct vm_operations_struct afu_vma_ops = {
static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
u64 size = vma->vm_end - vma->vm_start;
+ struct dfl_feature_dev_data *fdata;
struct dfl_afu_mmio_region region;
u64 offset;
int ret;
@@ -814,10 +817,10 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
offset = vma->vm_pgoff << PAGE_SHIFT;
- ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ ret = afu_mmio_region_get_by_offset(fdata, offset, size, &region);
if (ret)
return ret;
@@ -851,46 +854,45 @@ static const struct file_operations afu_fops = {
static int afu_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_afu *afu;
afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
if (!afu)
return -ENOMEM;
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, afu);
- afu_mmio_region_init(pdata);
- afu_dma_region_init(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, afu);
+ afu_mmio_region_init(fdata);
+ afu_dma_region_init(fdata);
+ mutex_unlock(&fdata->lock);
return 0;
}
static int afu_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- afu_mmio_region_destroy(pdata);
- afu_dma_region_destroy(pdata);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ afu_mmio_region_destroy(fdata);
+ afu_dma_region_destroy(fdata);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static int port_enable_set(struct platform_device *pdev, bool enable)
+static int port_enable_set(struct dfl_feature_dev_data *fdata, bool enable)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
if (enable)
- ret = __afu_port_enable(pdev);
+ ret = __afu_port_enable(fdata);
else
- ret = __afu_port_disable(pdev);
- mutex_unlock(&pdata->lock);
+ ret = __afu_port_disable(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
index 2e7b41629406..b11a5b21e666 100644
--- a/drivers/fpga/dfl-afu-region.c
+++ b/drivers/fpga/dfl-afu-region.c
@@ -12,11 +12,11 @@
/**
* afu_mmio_region_init - init function for afu mmio region support
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
INIT_LIST_HEAD(&afu->regions);
}
@@ -39,7 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
/**
* afu_mmio_region_add - add a mmio region to given feature dev.
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @region_size: region size.
* @phys: region's physical address of this region.
@@ -47,14 +47,15 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_mmio_region *region;
struct dfl_afu *afu;
int ret = 0;
- region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
@@ -63,13 +64,13 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
region->phys = phys;
region->flags = flags;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
/* check if @index already exists */
if (get_region_by_index(afu, region_index)) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
ret = -EEXIST;
goto exit;
}
@@ -80,37 +81,37 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
afu->region_cur_offset += region_size;
afu->num_regions++;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
exit:
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(dev, region);
return ret;
}
/**
* afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct dfl_afu_mmio_region *tmp, *region;
list_for_each_entry_safe(region, tmp, &afu->regions, node)
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(&fdata->dev->dev, region);
}
/**
* afu_mmio_region_get_by_index - find an afu region by index.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @pregion: ptr to region for result.
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion)
{
@@ -118,8 +119,8 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
region = get_region_by_index(afu, region_index);
if (!region) {
ret = -EINVAL;
@@ -127,14 +128,14 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
}
*pregion = *region;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
/**
* afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @offset: region offset from start of the device fd.
* @size: region size.
* @pregion: ptr to region for result.
@@ -144,7 +145,7 @@ exit:
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion)
{
@@ -152,8 +153,8 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
for_each_region(region, afu)
if (region->offset <= offset &&
region->offset + region->size >= offset + size) {
@@ -162,6 +163,6 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
}
ret = -EINVAL;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index 7bef3e300aa2..03be4f0969c7 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -76,27 +76,27 @@ struct dfl_afu {
struct rb_root dma_regions;
};
-/* hold pdata->lock when call __afu_port_enable/disable */
-int __afu_port_enable(struct platform_device *pdev);
-int __afu_port_disable(struct platform_device *pdev);
+/* hold fdata->lock when call __afu_port_enable/disable */
+int __afu_port_enable(struct dfl_feature_dev_data *fdata);
+int __afu_port_disable(struct dfl_feature_dev_data *fdata);
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags);
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion);
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion);
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata);
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova);
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova);
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+afu_dma_region_find(struct dfl_feature_dev_data *fdata,
u64 iova, u64 size);
extern const struct dfl_feature_ops port_err_ops;
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 950c606c59d4..28b0f9d062ac 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -22,34 +22,34 @@
struct fme_br_priv {
struct dfl_fme_br_pdata *pdata;
struct dfl_fpga_port_ops *port_ops;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
};
static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
{
struct fme_br_priv *priv = bridge->priv;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
struct dfl_fpga_port_ops *ops;
- if (!priv->port_pdev) {
- port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
- &priv->pdata->port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ if (!priv->port_fdata) {
+ port_fdata = dfl_fpga_cdev_find_port_data(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_fdata)
return -ENODEV;
- priv->port_pdev = port_pdev;
+ priv->port_fdata = port_fdata;
}
- if (priv->port_pdev && !priv->port_ops) {
- ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (priv->port_fdata && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_fdata);
if (!ops || !ops->enable_set)
return -ENOENT;
priv->port_ops = ops;
}
- return priv->port_ops->enable_set(priv->port_pdev, enable);
+ return priv->port_ops->enable_set(priv->port_fdata, enable);
}
static const struct fpga_bridge_ops fme_bridge_ops = {
@@ -85,8 +85,6 @@ static void fme_br_remove(struct platform_device *pdev)
fpga_bridge_unregister(br);
- if (priv->port_pdev)
- put_device(&priv->port_pdev->dev);
if (priv->port_ops)
dfl_fpga_port_ops_put(priv->port_ops);
}
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
index 51c2892ec06d..f00d949efe69 100644
--- a/drivers/fpga/dfl-fme-error.c
+++ b/drivers/fpga/dfl-fme-error.c
@@ -42,15 +42,15 @@
static ssize_t pcie0_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE0_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -59,7 +59,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -67,9 +67,9 @@ static ssize_t pcie0_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
v = readq(base + PCIE0_ERROR);
@@ -79,7 +79,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE0_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie0_errors);
@@ -87,15 +87,15 @@ static DEVICE_ATTR_RW(pcie0_errors);
static ssize_t pcie1_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE1_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -104,7 +104,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -112,9 +112,9 @@ static ssize_t pcie1_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
v = readq(base + PCIE1_ERROR);
@@ -124,7 +124,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE1_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie1_errors);
@@ -132,9 +132,10 @@ static DEVICE_ATTR_RW(pcie1_errors);
static ssize_t nonfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_NONFAT_ERROR));
@@ -144,9 +145,10 @@ static DEVICE_ATTR_RO(nonfatal_errors);
static ssize_t catfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_CATFAT_ERROR));
@@ -156,15 +158,15 @@ static DEVICE_ATTR_RO(catfatal_errors);
static ssize_t inject_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
@@ -174,7 +176,7 @@ static ssize_t inject_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u8 inject_error;
u64 v;
@@ -185,14 +187,14 @@ static ssize_t inject_errors_store(struct device *dev,
if (inject_error & ~INJECT_ERROR_MASK)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
v &= ~INJECT_ERROR_MASK;
v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
writeq(v, base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -201,15 +203,15 @@ static DEVICE_ATTR_RW(inject_errors);
static ssize_t fme_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -218,7 +220,7 @@ static ssize_t fme_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v, val;
int ret = 0;
@@ -226,9 +228,9 @@ static ssize_t fme_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
v = readq(base + FME_ERROR);
@@ -240,7 +242,7 @@ static ssize_t fme_errors_store(struct device *dev,
/* Workaround: disable MBP_ERROR if feature revision is 0 */
writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
base + FME_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(fme_errors);
@@ -248,15 +250,15 @@ static DEVICE_ATTR_RW(fme_errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -265,15 +267,15 @@ static DEVICE_ATTR_RO(first_error);
static ssize_t next_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_NEXT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -295,12 +297,14 @@ static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
+ if (!dfl_get_feature_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR))
return 0;
return attr->mode;
@@ -314,12 +318,12 @@ const struct attribute_group fme_global_err_group = {
static void fme_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/* Workaround: keep MBP_ERROR always masked if revision is 0 */
if (dfl_feature_revision(base))
@@ -332,7 +336,7 @@ static void fme_err_mask(struct device *dev, bool mask)
writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
}
static int fme_global_err_init(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index f8d89a4a6ccb..8aca2fb20e87 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -28,10 +28,11 @@
static ssize_t ports_num_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -47,10 +48,11 @@ static DEVICE_ATTR_RO(ports_num);
static ssize_t bitstream_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_ID);
@@ -65,10 +67,11 @@ static DEVICE_ATTR_RO(bitstream_id);
static ssize_t bitstream_metadata_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_MD);
@@ -79,10 +82,11 @@ static DEVICE_ATTR_RO(bitstream_metadata);
static ssize_t cache_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -94,10 +98,11 @@ static DEVICE_ATTR_RO(cache_size);
static ssize_t fabric_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -109,10 +114,11 @@ static DEVICE_ATTR_RO(fabric_version);
static ssize_t socket_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -135,10 +141,10 @@ static const struct attribute_group fme_hdr_group = {
.attrs = fme_hdr_attrs,
};
-static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_release_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -147,10 +153,10 @@ static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
return dfl_fpga_cdev_release_port(cdev, port_id);
}
-static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_assign_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -163,13 +169,13 @@ static long fme_hdr_ioctl(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_FME_PORT_RELEASE:
- return fme_hdr_ioctl_release_port(pdata, arg);
+ return fme_hdr_ioctl_release_port(fdata, arg);
case DFL_FPGA_FME_PORT_ASSIGN:
- return fme_hdr_ioctl_assign_port(pdata, arg);
+ return fme_hdr_ioctl_assign_port(fdata, arg);
}
return -ENODEV;
@@ -411,14 +417,14 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev->parent);
struct dfl_feature *feature = dev_get_drvdata(dev);
int ret = 0;
u64 v;
val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
switch (attr) {
case hwmon_power_max:
@@ -438,7 +444,7 @@ static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
break;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -589,7 +595,7 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
},
};
-static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long fme_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -598,49 +604,46 @@ static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
static int fme_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
- filp->private_data = pdata;
+ dfl_feature_dev_use_count(fdata));
+ filp->private_data = fdata;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static int fme_release(struct inode *inode, struct file *filp)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata))
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata))
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *f;
long ret;
@@ -650,7 +653,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return fme_ioctl_check_extension(pdata, arg);
+ return fme_ioctl_check_extension(fdata, arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd.
@@ -658,7 +661,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 or other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f) {
+ dfl_fpga_dev_for_each_feature(fdata, f) {
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -672,27 +675,27 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static int fme_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme *fme;
fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
if (!fme)
return -ENOMEM;
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, fme);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, fme);
+ mutex_unlock(&fdata->lock);
return 0;
}
static void fme_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
}
static const struct file_operations fme_fops = {
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index cdcf6dea4cc9..b878b260af38 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -65,7 +65,7 @@ static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
static int fme_pr(struct platform_device *pdev, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
void __user *argp = (void __user *)arg;
struct dfl_fpga_fme_port_pr port_pr;
struct fpga_image_info *info;
@@ -87,8 +87,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
return -EINVAL;
/* get fme header region */
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
/* check port id */
v = readq(fme_hdr + FME_HDR_CAP);
@@ -123,8 +122,8 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
- mutex_lock(&pdata->lock);
- fme = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ fme = dfl_fpga_fdata_get_private(fdata);
/* fme device has been unregistered. */
if (!fme) {
ret = -EINVAL;
@@ -156,7 +155,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
put_device(&region->dev);
unlock_exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
free_exit:
vfree(buf);
return ret;
@@ -164,16 +163,16 @@ free_exit:
/**
* dfl_fme_create_mgr - create fpga mgr platform device as child device
+ * @fdata: fme feature dev data
* @feature: sub feature info
- * @pdata: fme platform_device's pdata
*
* Return: mgr platform device if successful, and error code otherwise.
*/
static struct platform_device *
-dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_mgr(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *mgr, *fme = pdata->dev;
+ struct platform_device *mgr, *fme = fdata->dev;
struct dfl_fme_mgr_pdata mgr_pdata;
int ret = -ENOMEM;
@@ -209,11 +208,11 @@ create_mgr_err:
/**
* dfl_fme_destroy_mgr - destroy fpga mgr platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_mgr(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
platform_device_unregister(priv->mgr);
}
@@ -221,15 +220,15 @@ static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_bridge - create fme fpga bridge platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @port_id: port id for the bridge to be created.
*
* Return: bridge platform device if successful, and error code otherwise.
*/
static struct dfl_fme_bridge *
-dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+dfl_fme_create_bridge(struct dfl_feature_dev_data *fdata, int port_id)
{
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_br_pdata br_pdata;
struct dfl_fme_bridge *fme_br;
int ret = -ENOMEM;
@@ -238,7 +237,7 @@ dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
if (!fme_br)
return ERR_PTR(ret);
- br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.cdev = fdata->dfl_cdev;
br_pdata.port_id = port_id;
fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
@@ -274,11 +273,11 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
/**
* dfl_fme_destroy_bridges - destroy all fpga bridge platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_bridges(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_bridge *fbridge, *tmp;
list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
@@ -290,7 +289,7 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_region - create fpga region platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @mgr: mgr platform device needed for region
* @br: br platform device needed for region
* @port_id: port id
@@ -298,12 +297,12 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
* Return: fme region if successful, and error code otherwise.
*/
static struct dfl_fme_region *
-dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_region(struct dfl_feature_dev_data *fdata,
struct platform_device *mgr,
struct platform_device *br, int port_id)
{
struct dfl_fme_region_pdata region_pdata;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_region *fme_region;
int ret = -ENOMEM;
@@ -353,11 +352,11 @@ static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
/**
* dfl_fme_destroy_regions - destroy all fme regions
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_regions(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_region *fme_region, *tmp;
list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
@@ -369,7 +368,7 @@ static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
static int pr_mgmt_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme_region *fme_region;
struct dfl_fme_bridge *fme_br;
struct platform_device *mgr;
@@ -378,18 +377,17 @@ static int pr_mgmt_init(struct platform_device *pdev,
int ret = -ENODEV, i = 0;
u64 fme_cap, port_offset;
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
- priv = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ priv = dfl_fpga_fdata_get_private(fdata);
/* Initialize the region and bridge sub device list */
INIT_LIST_HEAD(&priv->region_list);
INIT_LIST_HEAD(&priv->bridge_list);
/* Create fpga mgr platform device */
- mgr = dfl_fme_create_mgr(pdata, feature);
+ mgr = dfl_fme_create_mgr(fdata, feature);
if (IS_ERR(mgr)) {
dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
goto unlock;
@@ -405,7 +403,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
continue;
/* Create bridge for each port */
- fme_br = dfl_fme_create_bridge(pdata, i);
+ fme_br = dfl_fme_create_bridge(fdata, i);
if (IS_ERR(fme_br)) {
ret = PTR_ERR(fme_br);
goto destroy_region;
@@ -414,7 +412,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_br->node, &priv->bridge_list);
/* Create region for each port */
- fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_region = dfl_fme_create_region(fdata, mgr,
fme_br->br, i);
if (IS_ERR(fme_region)) {
ret = PTR_ERR(fme_region);
@@ -423,30 +421,30 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_region->node, &priv->region_list);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
destroy_region:
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
unlock:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static void pr_mgmt_uinit(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
- mutex_unlock(&pdata->lock);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
+ mutex_unlock(&fdata->lock);
}
static long fme_pr_ioctl(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index c406b949026f..7022657243c0 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -119,17 +119,6 @@ static void dfl_id_free(enum dfl_id_type type, int id)
mutex_unlock(&dfl_id_mutex);
}
-static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
- if (!strcmp(dfl_devs[i].name, pdev->name))
- return i;
-
- return DFL_ID_MAX;
-}
-
static enum dfl_id_type dfh_id_to_type(u16 id)
{
int i;
@@ -156,12 +145,12 @@ static LIST_HEAD(dfl_port_ops_list);
/**
* dfl_fpga_port_ops_get - get matched port ops from the global list
- * @pdev: platform device to match with associated port ops.
+ * @fdata: feature dev data to match with associated port ops.
* Return: matched port ops on success, NULL otherwise.
*
* Please note that must dfl_fpga_port_ops_put after use the port_ops.
*/
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata)
{
struct dfl_fpga_port_ops *ops = NULL;
@@ -171,7 +160,7 @@ struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
list_for_each_entry(ops, &dfl_port_ops_list, node) {
/* match port_ops using the name of platform device */
- if (!strcmp(pdev->name, ops->name)) {
+ if (!strcmp(fdata->pdev_name, ops->name)) {
if (!try_module_get(ops->owner))
ops = NULL;
goto done;
@@ -222,27 +211,26 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
/**
* dfl_fpga_check_port_id - check the port id
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
* @pport_id: port id to compare.
*
* Return: 1 if port device matches with given port id, otherwise 0.
*/
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct dfl_fpga_port_ops *port_ops;
- if (pdata->id != FEATURE_DEV_ID_UNUSED)
- return pdata->id == *(int *)pport_id;
+ if (fdata->id != FEATURE_DEV_ID_UNUSED)
+ return fdata->id == *(int *)pport_id;
- port_ops = dfl_fpga_port_ops_get(pdev);
+ port_ops = dfl_fpga_port_ops_get(fdata);
if (!port_ops || !port_ops->get_id)
return 0;
- pdata->id = port_ops->get_id(pdev);
+ fdata->id = port_ops->get_id(fdata);
dfl_fpga_port_ops_put(port_ops);
- return pdata->id == *(int *)pport_id;
+ return fdata->id == *(int *)pport_id;
}
EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
@@ -351,10 +339,10 @@ static void release_dfl_dev(struct device *dev)
}
static struct dfl_device *
-dfl_dev_add(struct dfl_feature_platform_data *pdata,
+dfl_dev_add(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *pdev = pdata->dev;
+ struct platform_device *pdev = fdata->dev;
struct resource *parent_res;
struct dfl_device *ddev;
int id, i, ret;
@@ -380,11 +368,11 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
if (ret)
goto put_dev;
- ddev->type = feature_dev_id_type(pdev);
+ ddev->type = fdata->type;
ddev->feature_id = feature->id;
ddev->revision = feature->revision;
ddev->dfh_version = feature->dfh_version;
- ddev->cdev = pdata->dfl_cdev;
+ ddev->cdev = fdata->dfl_cdev;
if (feature->param_size) {
ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL);
if (!ddev->params) {
@@ -435,11 +423,11 @@ put_dev:
return ERR_PTR(ret);
}
-static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
+static void dfl_devs_remove(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ddev) {
device_unregister(&feature->ddev->dev);
feature->ddev = NULL;
@@ -447,13 +435,13 @@ static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
}
}
-static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
+static int dfl_devs_add(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
struct dfl_device *ddev;
int ret;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ioaddr)
continue;
@@ -462,7 +450,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
goto err;
}
- ddev = dfl_dev_add(pdata, feature);
+ ddev = dfl_dev_add(fdata, feature);
if (IS_ERR(ddev)) {
ret = PTR_ERR(ddev);
goto err;
@@ -474,7 +462,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
return 0;
err:
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
return ret;
}
@@ -504,12 +492,12 @@ EXPORT_SYMBOL(dfl_driver_unregister);
*/
void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature *feature;
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ops) {
if (feature->ops->uinit)
feature->ops->uinit(pdev, feature);
@@ -520,7 +508,6 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
static int dfl_feature_instance_init(struct platform_device *pdev,
- struct dfl_feature_platform_data *pdata,
struct dfl_feature *feature,
struct dfl_feature_driver *drv)
{
@@ -579,16 +566,15 @@ static bool dfl_feature_drv_match(struct dfl_feature *feature,
int dfl_fpga_dev_feature_init(struct platform_device *pdev,
struct dfl_feature_driver *feature_drvs)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature_driver *drv = feature_drvs;
struct dfl_feature *feature;
int ret;
while (drv->ops) {
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (dfl_feature_drv_match(feature, drv)) {
- ret = dfl_feature_instance_init(pdev, pdata,
- feature, drv);
+ ret = dfl_feature_instance_init(pdev, feature, drv);
if (ret)
goto exit;
}
@@ -596,7 +582,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
drv++;
}
- ret = dfl_devs_add(pdata);
+ ret = dfl_devs_add(fdata);
if (ret)
goto exit;
@@ -695,7 +681,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
* @nr_irqs: number of irqs for all feature devices.
* @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
* this device.
- * @feature_dev: current feature device.
+ * @type: the current FIU type.
* @ioaddr: header register region address of current FIU in enumeration.
* @start: register resource start of current FIU.
* @len: max register resource length of current FIU.
@@ -708,7 +694,7 @@ struct build_feature_devs_info {
unsigned int nr_irqs;
int *irq_table;
- struct platform_device *feature_dev;
+ enum dfl_id_type type;
void __iomem *ioaddr;
resource_size_t start;
resource_size_t len;
@@ -743,50 +729,62 @@ struct dfl_feature_info {
u64 params[];
};
-static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
- struct platform_device *port)
+static void dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev *cdev,
+ struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
-
mutex_lock(&cdev->lock);
- list_add(&pdata->node, &cdev->port_dev_list);
- get_device(&pdata->dev->dev);
+ list_add(&fdata->node, &cdev->port_dev_list);
mutex_unlock(&cdev->lock);
}
-/*
- * register current feature device, it is called when we need to switch to
- * another feature parsing or we have parsed all features on given device
- * feature list.
- */
-static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+static void dfl_id_free_action(void *arg)
+{
+ struct dfl_feature_dev_data *fdata = arg;
+
+ dfl_id_free(fdata->type, fdata->pdev_id);
+}
+
+static struct dfl_feature_dev_data *
+binfo_create_feature_dev_data(struct build_feature_devs_info *binfo)
{
- struct platform_device *fdev = binfo->feature_dev;
- struct dfl_feature_platform_data *pdata;
+ enum dfl_id_type type = binfo->type;
struct dfl_feature_info *finfo, *p;
- enum dfl_id_type type;
+ struct dfl_feature_dev_data *fdata;
int ret, index = 0, res_idx = 0;
- type = feature_dev_id_type(fdev);
if (WARN_ON_ONCE(type >= DFL_ID_MAX))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- /*
- * we do not need to care for the memory which is associated with
- * the platform device. After calling platform_device_unregister(),
- * it will be automatically freed by device's release() callback,
- * platform_device_release().
- */
- pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ fdata = devm_kzalloc(binfo->dev, sizeof(*fdata), GFP_KERNEL);
+ if (!fdata)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->features = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->features), GFP_KERNEL);
+ if (!fdata->features)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->resources = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->resources), GFP_KERNEL);
+ if (!fdata->resources)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->type = type;
- pdata->dev = fdev;
- pdata->num = binfo->feature_num;
- pdata->dfl_cdev = binfo->cdev;
- pdata->id = FEATURE_DEV_ID_UNUSED;
- mutex_init(&pdata->lock);
- lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
+ fdata->pdev_id = dfl_id_alloc(type, binfo->dev);
+ if (fdata->pdev_id < 0)
+ return ERR_PTR(fdata->pdev_id);
+
+ ret = devm_add_action_or_reset(binfo->dev, dfl_id_free_action, fdata);
+ if (ret)
+ return ERR_PTR(ret);
+
+ fdata->pdev_name = dfl_devs[type].name;
+ fdata->num = binfo->feature_num;
+ fdata->dfl_cdev = binfo->cdev;
+ fdata->id = FEATURE_DEV_ID_UNUSED;
+ mutex_init(&fdata->lock);
+ lockdep_set_class_and_name(&fdata->lock, &dfl_pdata_keys[type],
dfl_pdata_key_strings[type]);
/*
@@ -795,25 +793,15 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
* works properly for port device.
* and it should always be 0 for fme device.
*/
- WARN_ON(pdata->disable_count);
-
- fdev->dev.platform_data = pdata;
-
- /* each sub feature has one MMIO resource */
- fdev->num_resources = binfo->feature_num;
- fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
- GFP_KERNEL);
- if (!fdev->resource)
- return -ENOMEM;
+ WARN_ON(fdata->disable_count);
/* fill features and resource information for feature dev */
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- struct dfl_feature *feature = &pdata->features[index++];
+ struct dfl_feature *feature = &fdata->features[index++];
struct dfl_feature_irq_ctx *ctx;
unsigned int i;
/* save resource information for each feature */
- feature->dev = fdev;
feature->id = finfo->fid;
feature->revision = finfo->revision;
feature->dfh_version = finfo->dfh_version;
@@ -823,7 +811,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
finfo->params, finfo->param_size,
GFP_KERNEL);
if (!feature->params)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
feature->param_size = finfo->param_size;
}
@@ -840,17 +828,17 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
devm_ioremap_resource(binfo->dev,
&finfo->mmio_res);
if (IS_ERR(feature->ioaddr))
- return PTR_ERR(feature->ioaddr);
+ return ERR_CAST(feature->ioaddr);
} else {
feature->resource_index = res_idx;
- fdev->resource[res_idx++] = finfo->mmio_res;
+ fdata->resources[res_idx++] = finfo->mmio_res;
}
if (finfo->nr_irqs) {
ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < finfo->nr_irqs; i++)
ctx[i].irq =
@@ -864,55 +852,94 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
kfree(finfo);
}
- ret = platform_device_add(binfo->feature_dev);
- if (!ret) {
- if (type == PORT_ID)
- dfl_fpga_cdev_add_port_dev(binfo->cdev,
- binfo->feature_dev);
- else
- binfo->cdev->fme_dev =
- get_device(&binfo->feature_dev->dev);
- /*
- * reset it to avoid build_info_free() freeing their resource.
- *
- * The resource of successfully registered feature devices
- * will be freed by platform_device_unregister(). See the
- * comments in build_info_create_dev().
- */
- binfo->feature_dev = NULL;
- }
+ fdata->resource_num = res_idx;
- return ret;
+ return fdata;
}
-static int
-build_info_create_dev(struct build_feature_devs_info *binfo,
- enum dfl_id_type type)
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int feature_dev_register(struct dfl_feature_dev_data *fdata)
{
+ struct dfl_feature_platform_data pdata = {};
struct platform_device *fdev;
+ struct dfl_feature *feature;
+ int ret;
- if (type >= DFL_ID_MAX)
- return -EINVAL;
-
- /*
- * we use -ENODEV as the initialization indicator which indicates
- * whether the id need to be reclaimed
- */
- fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ fdev = platform_device_alloc(fdata->pdev_name, fdata->pdev_id);
if (!fdev)
return -ENOMEM;
- binfo->feature_dev = fdev;
- binfo->feature_num = 0;
+ fdata->dev = fdev;
- INIT_LIST_HEAD(&binfo->sub_features);
+ fdev->dev.parent = &fdata->dfl_cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[fdata->type].devt_type, fdev->id);
- fdev->id = dfl_id_alloc(type, &fdev->dev);
- if (fdev->id < 0)
- return fdev->id;
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = fdev;
+
+ ret = platform_device_add_resources(fdev, fdata->resources,
+ fdata->resource_num);
+ if (ret)
+ goto err_put_dev;
+
+ pdata.fdata = fdata;
+ ret = platform_device_add_data(fdev, &pdata, sizeof(pdata));
+ if (ret)
+ goto err_put_dev;
+
+ ret = platform_device_add(fdev);
+ if (ret)
+ goto err_put_dev;
+
+ return 0;
+
+err_put_dev:
+ platform_device_put(fdev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+
+ return ret;
+}
+
+static void feature_dev_unregister(struct dfl_feature_dev_data *fdata)
+{
+ struct dfl_feature *feature;
+
+ platform_device_unregister(fdata->dev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+}
+
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_dev_data *fdata;
+ int ret;
+
+ fdata = binfo_create_feature_dev_data(binfo);
+ if (IS_ERR(fdata))
+ return PTR_ERR(fdata);
+
+ ret = feature_dev_register(fdata);
+ if (ret)
+ return ret;
+
+ if (binfo->type == PORT_ID)
+ dfl_fpga_cdev_add_port_data(binfo->cdev, fdata);
+ else
+ binfo->cdev->fme_dev = get_device(&fdata->dev->dev);
- fdev->dev.parent = &binfo->cdev->region->dev;
- fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+ /* reset the binfo for next FIU */
+ binfo->type = DFL_ID_MAX;
return 0;
}
@@ -921,22 +948,11 @@ static void build_info_free(struct build_feature_devs_info *binfo)
{
struct dfl_feature_info *finfo, *p;
- /*
- * it is a valid id, free it. See comments in
- * build_info_create_dev()
- */
- if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
- dfl_id_free(feature_dev_id_type(binfo->feature_dev),
- binfo->feature_dev->id);
-
- list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- list_del(&finfo->node);
- kfree(finfo);
- }
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
}
- platform_device_put(binfo->feature_dev);
-
devm_kfree(binfo->dev, binfo);
}
@@ -1025,7 +1041,7 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
* Instead, features with interrupt functionality provide
* the information in feature specific registers.
*/
- type = feature_dev_id_type(binfo->feature_dev);
+ type = binfo->type;
if (type == PORT_ID) {
switch (fid) {
case PORT_FEATURE_ID_UINT:
@@ -1217,7 +1233,7 @@ static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
}
-#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
+#define is_feature_dev_detected(binfo) ((binfo)->type != DFL_ID_MAX)
static int parse_feature_afu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
@@ -1227,12 +1243,11 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo,
return -EINVAL;
}
- switch (feature_dev_id_type(binfo->feature_dev)) {
+ switch (binfo->type) {
case PORT_ID:
return parse_feature_port_afu(binfo, ofst);
default:
- dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
- binfo->feature_dev->name);
+ dev_info(binfo->dev, "AFU belonging to FIU is not supported yet.\n");
}
return 0;
@@ -1273,6 +1288,7 @@ static void build_info_complete(struct build_feature_devs_info *binfo)
static int parse_feature_fiu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
{
+ enum dfl_id_type type;
int ret = 0;
u32 offset;
u16 id;
@@ -1294,10 +1310,13 @@ static int parse_feature_fiu(struct build_feature_devs_info *binfo,
v = readq(binfo->ioaddr + DFH);
id = FIELD_GET(DFH_ID, v);
- /* create platform device for dfl feature dev */
- ret = build_info_create_dev(binfo, dfh_id_to_type(id));
- if (ret)
- return ret;
+ type = dfh_id_to_type(id);
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ binfo->type = type;
+ binfo->feature_num = 0;
+ INIT_LIST_HEAD(&binfo->sub_features);
ret = create_feature_instance(binfo, 0, 0, 0);
if (ret)
@@ -1515,13 +1534,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
static int remove_feature_dev(struct device *dev, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- enum dfl_id_type type = feature_dev_id_type(pdev);
- int id = pdev->id;
-
- platform_device_unregister(pdev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- dfl_id_free(type, id);
+ feature_dev_unregister(fdata);
return 0;
}
@@ -1573,6 +1588,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
goto unregister_region_exit;
}
+ binfo->type = DFL_ID_MAX;
binfo->dev = info->dev;
binfo->cdev = cdev;
@@ -1614,25 +1630,10 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
*/
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata, *ptmp;
-
mutex_lock(&cdev->lock);
if (cdev->fme_dev)
put_device(cdev->fme_dev);
- list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
- struct platform_device *port_dev = pdata->dev;
-
- /* remove released ports */
- if (!device_is_registered(&port_dev->dev)) {
- dfl_id_free(feature_dev_id_type(port_dev),
- port_dev->id);
- platform_device_put(port_dev);
- }
-
- list_del(&pdata->node);
- put_device(&port_dev->dev);
- }
mutex_unlock(&cdev->lock);
remove_feature_devs(cdev);
@@ -1643,7 +1644,7 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
/**
- * __dfl_fpga_cdev_find_port - find a port under given container device
+ * __dfl_fpga_cdev_find_port_data - find a port under given container device
*
* @cdev: container device
* @data: data passed to match function
@@ -1656,23 +1657,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
*
* NOTE: you will need to drop the device reference with put_device() after use.
*/
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_dev;
-
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- port_dev = pdata->dev;
+ struct dfl_feature_dev_data *fdata;
- if (match(port_dev, data) && get_device(&port_dev->dev))
- return port_dev;
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (match(fdata, data))
+ return fdata;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port_data);
static int __init dfl_fpga_init(void)
{
@@ -1706,33 +1704,28 @@ static int __init dfl_fpga_init(void)
*/
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (!device_is_registered(&port_pdev->dev)) {
+ if (!fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- pdata = dev_get_platdata(&port_pdev->dev);
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, true);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, true);
+ mutex_unlock(&fdata->lock);
if (ret)
- goto put_dev_exit;
+ goto unlock_exit;
- platform_device_del(port_pdev);
+ feature_dev_unregister(fdata);
cdev->released_port_num++;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1752,34 +1745,29 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
*/
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (device_is_registered(&port_pdev->dev)) {
+ if (fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- ret = platform_device_add(port_pdev);
+ ret = feature_dev_register(fdata);
if (ret)
- goto put_dev_exit;
-
- pdata = dev_get_platdata(&port_pdev->dev);
+ goto unlock_exit;
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
+ mutex_unlock(&fdata->lock);
cdev->released_port_num--;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1789,10 +1777,11 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
static void config_port_access_mode(struct device *fme_dev, int port_id,
bool is_vf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(fme_dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_PORT_OFST(port_id));
@@ -1816,14 +1805,14 @@ static void config_port_access_mode(struct device *fme_dev, int port_id,
*/
void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_pf_mode(cdev->fme_dev, pdata->id);
+ config_port_pf_mode(cdev->fme_dev, fdata->id);
}
mutex_unlock(&cdev->lock);
}
@@ -1842,7 +1831,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
*/
int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
int ret = 0;
mutex_lock(&cdev->lock);
@@ -1856,11 +1845,11 @@ int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
goto done;
}
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_vf_mode(cdev->fme_dev, pdata->id);
+ config_port_vf_mode(cdev->fme_dev, fdata->id);
}
done:
mutex_unlock(&cdev->lock);
@@ -1993,7 +1982,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fpga_irq_set hdr;
s32 *fds;
long ret;
@@ -2013,9 +2002,9 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
if (IS_ERR(fds))
return PTR_ERR(fds);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
kfree(fds);
return ret;
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 5063d73b0d82..95539f1213cb 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -17,6 +17,7 @@
#include <linux/bitfield.h>
#include <linux/cdev.h>
#include <linux/delay.h>
+#include <linux/dfl.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
@@ -206,6 +207,8 @@
#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */
#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */
+struct dfl_feature_dev_data;
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -219,15 +222,15 @@ struct dfl_fpga_port_ops {
const char *name;
struct module *owner;
struct list_head node;
- int (*get_id)(struct platform_device *pdev);
- int (*enable_set)(struct platform_device *pdev, bool enable);
+ int (*get_id)(struct dfl_feature_dev_data *fdata);
+ int (*enable_set)(struct dfl_feature_dev_data *fdata, bool enable);
};
void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata);
void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id);
/**
* struct dfl_feature_id - dfl private feature id
@@ -300,26 +303,32 @@ struct dfl_feature {
#define FEATURE_DEV_ID_UNUSED (-1)
/**
- * struct dfl_feature_platform_data - platform data for feature devices
+ * struct dfl_feature_dev_data - dfl enumeration data for dfl feature dev.
*
- * @node: node to link feature devs to container device's port_dev_list.
- * @lock: mutex to protect platform data.
- * @cdev: cdev of feature dev.
- * @dev: ptr to platform device linked with this platform data.
+ * @node: node to link the data structure to container device's port_dev_list.
+ * @lock: mutex to protect feature dev data.
+ * @dev: ptr to the feature's platform device linked with this structure.
+ * @type: type of DFL FIU for the feature dev. See enum dfl_id_type.
+ * @pdev_id: platform device id for the feature dev.
+ * @pdev_name: platform device name for the feature dev.
* @dfl_cdev: ptr to container device.
- * @id: id used for this feature device.
+ * @id: id used for the feature device.
* @disable_count: count for port disable.
* @excl_open: set on feature device exclusive open.
* @open_count: count for feature device open.
* @num: number for sub features.
* @private: ptr to feature dev private data.
- * @features: sub features of this feature dev.
+ * @features: sub features for the feature dev.
+ * @resource_num: number of resources for the feature dev.
+ * @resources: resources for the feature dev.
*/
-struct dfl_feature_platform_data {
+struct dfl_feature_dev_data {
struct list_head node;
struct mutex lock;
- struct cdev cdev;
struct platform_device *dev;
+ enum dfl_id_type type;
+ int pdev_id;
+ const char *pdev_name;
struct dfl_fpga_cdev *dfl_cdev;
int id;
unsigned int disable_count;
@@ -327,55 +336,68 @@ struct dfl_feature_platform_data {
int open_count;
void *private;
int num;
- struct dfl_feature features[];
+ struct dfl_feature *features;
+ int resource_num;
+ struct resource *resources;
+};
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @cdev: cdev of feature dev.
+ * @fdata: dfl enumeration data for the dfl feature device.
+ */
+struct dfl_feature_platform_data {
+ struct cdev cdev;
+ struct dfl_feature_dev_data *fdata;
};
static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+int dfl_feature_dev_use_begin(struct dfl_feature_dev_data *fdata,
bool excl)
{
- if (pdata->excl_open)
+ if (fdata->excl_open)
return -EBUSY;
if (excl) {
- if (pdata->open_count)
+ if (fdata->open_count)
return -EBUSY;
- pdata->excl_open = true;
+ fdata->excl_open = true;
}
- pdata->open_count++;
+ fdata->open_count++;
return 0;
}
static inline
-void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+void dfl_feature_dev_use_end(struct dfl_feature_dev_data *fdata)
{
- pdata->excl_open = false;
+ fdata->excl_open = false;
- if (WARN_ON(pdata->open_count <= 0))
+ if (WARN_ON(fdata->open_count <= 0))
return;
- pdata->open_count--;
+ fdata->open_count--;
}
static inline
-int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_count(struct dfl_feature_dev_data *fdata)
{
- return pdata->open_count;
+ return fdata->open_count;
}
static inline
-void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+void dfl_fpga_fdata_set_private(struct dfl_feature_dev_data *fdata,
void *private)
{
- pdata->private = private;
+ fdata->private = private;
}
static inline
-void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+void *dfl_fpga_fdata_get_private(struct dfl_feature_dev_data *fdata)
{
- return pdata->private;
+ return fdata->private;
}
struct dfl_feature_ops {
@@ -398,37 +420,36 @@ int dfl_fpga_dev_ops_register(struct platform_device *pdev,
struct module *owner);
void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
-static inline
-struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
+static inline struct dfl_feature_dev_data *
+dfl_fpga_inode_to_feature_dev_data(struct inode *inode)
{
struct dfl_feature_platform_data *pdata;
pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
cdev);
- return pdata->dev;
+ return pdata->fdata;
}
-#define dfl_fpga_dev_for_each_feature(pdata, feature) \
- for ((feature) = (pdata)->features; \
- (feature) < (pdata)->features + (pdata)->num; (feature)++)
+#define dfl_fpga_dev_for_each_feature(fdata, feature) \
+ for ((feature) = (fdata)->features; \
+ (feature) < (fdata)->features + (fdata)->num; (feature)++)
-static inline
-struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id)
+static inline struct dfl_feature *
+dfl_get_feature_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_dev_for_each_feature(fdata, feature)
if (feature->id == id)
return feature;
return NULL;
}
-static inline
-void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
+static inline void __iomem *
+dfl_get_feature_ioaddr_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+ struct dfl_feature *feature = dfl_get_feature_by_id(fdata, id);
if (feature && feature->ioaddr)
return feature->ioaddr;
@@ -437,10 +458,18 @@ void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
return NULL;
}
+static inline struct dfl_feature_dev_data *
+to_dfl_feature_dev_data(struct device *dev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+
+ return pdata->fdata;
+}
+
static inline
-struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+struct device *dfl_fpga_fdata_to_parent(struct dfl_feature_dev_data *fdata)
{
- return pdata->dev->dev.parent->parent;
+ return fdata->dev->dev.parent->parent;
}
static inline bool dfl_feature_is_fme(void __iomem *base)
@@ -522,26 +551,21 @@ struct dfl_fpga_cdev *
dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
-/*
- * need to drop the device reference with put_device() after use port platform
- * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
- * functions.
- */
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *));
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *));
-static inline struct platform_device *
-dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+static inline struct dfl_feature_dev_data *
+dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct platform_device *pdev;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, data, match);
mutex_unlock(&cdev->lock);
- return pdev;
+ return fdata;
}
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 93ee3aa092f8..98b4d1633b25 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -338,6 +338,7 @@ config GPIO_GRANITERAPIDS
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
+ depends on OF || COMPILE_TEST
select GPIO_GENERIC
select IRQ_DOMAIN
help
@@ -529,9 +530,9 @@ config GPIO_OCTEON
family of SOCs.
config GPIO_OMAP
- tristate "TI OMAP GPIO support" if ARCH_OMAP2PLUS || COMPILE_TEST
+ tristate "TI OMAP GPIO support"
+ depends on ARCH_OMAP || COMPILE_TEST
default y if ARCH_OMAP
- depends on ARM
select GENERIC_IRQ_CHIP
select GPIOLIB_IRQCHIP
help
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 65f41cc3eafc..d668ddb2e81d 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -119,10 +119,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
struct platform_device *pdev;
int res, id;
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
/* kernfs guarantees string termination, so count + 1 is safe */
aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
- if (!aggr)
- return -ENOMEM;
+ if (!aggr) {
+ res = -ENOMEM;
+ goto put_module;
+ }
memcpy(aggr->args, buf, count + 1);
@@ -161,6 +166,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
}
aggr->pdev = pdev;
+ module_put(THIS_MODULE);
return count;
remove_table:
@@ -175,6 +181,8 @@ free_table:
kfree(aggr->lookups);
free_ga:
kfree(aggr);
+put_module:
+ module_put(THIS_MODULE);
return res;
}
@@ -203,13 +211,19 @@ static ssize_t delete_device_store(struct device_driver *driver,
if (error)
return error;
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
mutex_lock(&gpio_aggregator_lock);
aggr = idr_remove(&gpio_aggregator_idr, id);
mutex_unlock(&gpio_aggregator_lock);
- if (!aggr)
+ if (!aggr) {
+ module_put(THIS_MODULE);
return -ENOENT;
+ }
gpio_aggregator_free(aggr);
+ module_put(THIS_MODULE);
return count;
}
static DRIVER_ATTR_WO(delete_device);
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 73e660c5e38a..17ab039c7413 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -32,14 +32,12 @@
* will be blocked until the current one completes.
* @interrupt_trigger : specifies the hardware configured IRQ trigger type
* (rising, falling, both, high)
-* @mapped_irq : kernel mapped irq number.
*/
struct altera_gpio_chip {
struct gpio_chip gc;
void __iomem *regs;
raw_spinlock_t gpio_lock;
int interrupt_trigger;
- int mapped_irq;
};
static void altera_gpio_irq_unmask(struct irq_data *d)
@@ -235,6 +233,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
int reg, ret;
struct altera_gpio_chip *altera_gc;
struct gpio_irq_chip *girq;
+ int mapped_irq;
altera_gc = devm_kzalloc(&pdev->dev, sizeof(*altera_gc), GFP_KERNEL);
if (!altera_gc)
@@ -271,8 +270,8 @@ static int altera_gpio_probe(struct platform_device *pdev)
if (IS_ERR(altera_gc->regs))
return dev_err_probe(dev, PTR_ERR(altera_gc->regs), "failed to ioremap memory resource\n");
- altera_gc->mapped_irq = platform_get_irq_optional(pdev, 0);
- if (altera_gc->mapped_irq < 0)
+ mapped_irq = platform_get_irq_optional(pdev, 0);
+ if (mapped_irq < 0)
goto skip_irq;
if (device_property_read_u32(dev, "altr,interrupt-type", &reg)) {
@@ -296,7 +295,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
- girq->parents[0] = altera_gc->mapped_irq;
+ girq->parents[0] = mapped_irq;
skip_irq:
ret = devm_gpiochip_add_data(dev, &altera_gc->gc, altera_gc);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 5321ef98f442..64908f1a5e7f 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -69,6 +69,22 @@ struct bcm_kona_gpio {
struct bcm_kona_gpio_bank {
int id;
int irq;
+ /*
+ * Used to keep track of lock/unlock operations for each GPIO in the
+ * bank.
+ *
+ * All GPIOs are locked by default (see bcm_kona_gpio_reset), and the
+ * unlock count for all GPIOs is 0 by default. Each unlock increments
+ * the counter, and each lock decrements the counter.
+ *
+ * The lock function only locks the GPIO once its unlock counter is
+ * down to 0. This is necessary because the GPIO is unlocked in two
+ * places in this driver: once for requested GPIOs, and once for
+ * requested IRQs. Since it is possible for a GPIO to be requested
+ * as both a GPIO and an IRQ, we need to ensure that we don't lock it
+ * too early.
+ */
+ u8 gpio_unlock_count[GPIO_PER_BANK];
/* Used in the interrupt handler */
struct bcm_kona_gpio *kona_gpio;
};
@@ -86,14 +102,24 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
u32 val;
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ if (bank->gpio_unlock_count[bit] == 0) {
+ dev_err(kona_gpio->gpio_chip.parent,
+ "Unbalanced locks for GPIO %u\n", gpio);
+ return;
+ }
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
- val |= BIT(gpio);
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ if (--bank->gpio_unlock_count[bit] == 0) {
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val |= BIT(bit);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ }
}
static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
@@ -102,14 +128,20 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
u32 val;
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ if (bank->gpio_unlock_count[bit] == 0) {
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
- val &= ~BIT(gpio);
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val &= ~BIT(bit);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ }
+
+ ++bank->gpio_unlock_count[bit];
}
static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
@@ -360,6 +392,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
+
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MASK(bank_id));
@@ -382,6 +415,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
+
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
@@ -477,15 +511,26 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
- return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq);
+ /*
+ * We need to unlock the GPIO before any other operations are performed
+ * on the relevant GPIO configuration registers
+ */
+ bcm_kona_gpio_unlock_gpio(kona_gpio, gpio);
+
+ return gpiochip_reqres_irq(&kona_gpio->gpio_chip, gpio);
}
static void bcm_kona_gpio_irq_relres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
+
+ /* Once we no longer use it, lock the GPIO again */
+ bcm_kona_gpio_lock_gpio(kona_gpio, gpio);
- gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq);
+ gpiochip_relres_irq(&kona_gpio->gpio_chip, gpio);
}
static struct irq_chip bcm_gpio_irq_chip = {
@@ -614,7 +659,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
bank->irq = platform_get_irq(pdev, i);
bank->kona_gpio = kona_gpio;
if (bank->irq < 0) {
- dev_err(dev, "Couldn't get IRQ for bank %d", i);
+ dev_err(dev, "Couldn't get IRQ for bank %d\n", i);
ret = -ENOENT;
goto err_irq_domain;
}
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 24417c3247b0..0cd4c36ae8aa 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -285,6 +285,7 @@ static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
};
static const struct of_device_id mpc8xxx_gpio_ids[] = {
+ { .compatible = "fsl,mpc8314-gpio", },
{ .compatible = "fsl,mpc8349-gpio", },
{ .compatible = "fsl,mpc8572-gpio", .data = &mpc8572_gpio_devtype, },
{ .compatible = "fsl,mpc8610-gpio", },
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 4cb455b2bdee..619b6fb9d833 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -490,8 +490,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->gc.request = mxc_gpio_request;
port->gc.free = mxc_gpio_free;
port->gc.to_irq = mxc_gpio_to_irq;
- port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
- pdev->id * 32;
+ port->gc.base = of_alias_get_id(np, "gpio") * 32;
err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
if (err)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 272febc3230e..d63c1030e6ac 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -841,25 +841,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(trigger, MAX_LINE);
int ret;
- if (chip->driver_data & PCA_PCAL) {
- /* Read the current interrupt status from the device */
- ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
- if (ret)
- return false;
-
- /* Check latched inputs and clear interrupt status */
- ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
- if (ret)
- return false;
-
- /* Apply filter for rising/falling edge selection */
- bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
-
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
-
- return !bitmap_empty(pending, gc->ngpio);
- }
-
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
@@ -1088,7 +1069,8 @@ static int pca953x_probe(struct i2c_client *client)
*/
reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(reset_gpio))
- return PTR_ERR(reset_gpio);
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Failed to get reset gpio\n");
}
chip->client = client;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 2ecee3269a0c..a7a1cdf7ac66 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -40,7 +40,7 @@ struct gpio_rcar_info {
struct gpio_rcar_priv {
void __iomem *base;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct device *dev;
struct gpio_chip gpio_chip;
unsigned int irq_parent;
@@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
* "Setting Level-Sensitive Interrupt Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive or negative logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
@@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
if (!level_trigger)
gpio_rcar_write(p, INTCLR, BIT(hwirq));
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
@@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
* "Setting General Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
@@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
if (p->info.has_outdtsel && output)
gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
@@ -347,7 +347,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
outputs = gpio_rcar_read(p, INOUTSEL);
m = outputs & bankmask;
if (m)
@@ -356,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
m = ~outputs & bankmask;
if (m)
val |= gpio_rcar_read(p, INDT) & m;
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
bits[0] = val;
return 0;
@@ -367,9 +367,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
gpio_rcar_modify_bit(p, OUTDT, offset, value);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
@@ -386,12 +386,12 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
if (!bankmask)
return;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
val = gpio_rcar_read(p, OUTDT);
val &= ~bankmask;
val |= (bankmask & bits[0]);
gpio_rcar_write(p, OUTDT, val);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -468,7 +468,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
p->info = *info;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
- *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
+ if (ret) {
+ *npins = RCAR_MAX_GPIO_PER_BANK;
+ } else {
+ *npins = args.args[2];
+ of_node_put(args.np);
+ }
if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n",
@@ -505,7 +510,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
return -ENOMEM;
p->dev = dev;
- spin_lock_init(&p->lock);
+ raw_spin_lock_init(&p->lock);
/* Get device configuration from DT node */
ret = gpio_rcar_parse_dt(p, &npins);
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 71684dee2ca5..05f8781b5204 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -262,6 +262,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->label = config->label ?: dev_name(config->parent);
chip->can_sleep = regmap_might_sleep(config->regmap);
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
chip->set = gpio_regmap_set_with_clear;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 686ae3d11ba3..b6c230fab840 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -413,11 +413,6 @@ static int gpio_sim_setup_sysfs(struct gpio_sim_chip *chip)
return devm_add_action_or_reset(dev, gpio_sim_sysfs_remove, chip);
}
-static int gpio_sim_dev_match_fwnode(struct device *dev, void *data)
-{
- return device_match_fwnode(dev, data);
-}
-
static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
{
struct gpio_sim_chip *chip;
@@ -503,7 +498,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
if (ret)
return ret;
- chip->dev = device_find_child(dev, swnode, gpio_sim_dev_match_fwnode);
+ chip->dev = device_find_child(dev, swnode, device_match_fwnode);
if (!chip->dev)
return -ENODEV;
@@ -1033,20 +1028,23 @@ gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
struct configfs_subsystem *subsys = dev->group.cg_subsys;
struct gpio_sim_bank *bank;
struct gpio_sim_line *line;
+ struct config_item *item;
/*
- * The device only needs to depend on leaf line entries. This is
+ * The device only needs to depend on leaf entries. This is
* sufficient to lock up all the configfs entries that the
* instantiated, alive device depends on.
*/
list_for_each_entry(bank, &dev->bank_list, siblings) {
list_for_each_entry(line, &bank->line_list, siblings) {
+ item = line->hog ? &line->hog->item
+ : &line->group.cg_item;
+
if (lock)
- WARN_ON(configfs_depend_item_unlocked(
- subsys, &line->group.cg_item));
+ WARN_ON(configfs_depend_item_unlocked(subsys,
+ item));
else
- configfs_undepend_item_unlocked(
- &line->group.cg_item);
+ configfs_undepend_item_unlocked(item);
}
}
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 75a3633ceddb..222279a9d82b 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -191,7 +191,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
[REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
[REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
};
- int i, j;
+ int ret, i, j;
/*
* STMPE1600: to be able to get IRQ from pins,
@@ -199,8 +199,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
* GPSR or GPCR registers
*/
if (stmpe->partnum == STMPE1600) {
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+ if (ret < 0) {
+ dev_err(stmpe->dev, "Failed to read GPMR_LSB: %d\n", ret);
+ goto err;
+ }
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ if (ret < 0) {
+ dev_err(stmpe->dev, "Failed to read GPMR_CSB: %d\n", ret);
+ goto err;
+ }
}
for (i = 0; i < CACHE_NR_REGS; i++) {
@@ -222,6 +230,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
}
}
+err:
mutex_unlock(&stmpe_gpio->irq_lock);
}
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index cd1f17041f8c..526640c39a11 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -15,8 +15,6 @@
#define TPS65219_GPIO0_DIR_MASK BIT(3)
#define TPS65219_GPIO0_OFFSET 2
#define TPS65219_GPIO0_IDX 0
-#define TPS65219_GPIO_DIR_IN 1
-#define TPS65219_GPIO_DIR_OUT 0
struct tps65219_gpio {
struct gpio_chip gpio_chip;
@@ -61,7 +59,7 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset)
* status bit.
*/
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_OUT)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return -ENOTSUPP;
return ret;
@@ -124,10 +122,10 @@ static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offs
return -ENOTSUPP;
}
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_IN)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_IN)
return 0;
- return tps65219_gpio_change_direction(gc, offset, TPS65219_GPIO_DIR_IN);
+ return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_IN);
}
static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
@@ -136,10 +134,10 @@ static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int off
if (offset != TPS65219_GPIO0_IDX)
return 0;
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_OUT)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return 0;
- return tps65219_gpio_change_direction(gc, offset, TPS65219_GPIO_DIR_OUT);
+ return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_OUT);
}
static const struct gpio_chip tps65219_template_chip = {
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 5e26eb3adabb..18f523a15b3c 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -29,18 +29,22 @@
#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
-#define TQMX86_GPII_NONE 0
-#define TQMX86_GPII_FALLING BIT(0)
-#define TQMX86_GPII_RISING BIT(1)
-/* Stored in irq_type as a trigger type, but not actually valid as a register
- * value, so the name doesn't use "GPII"
+/*
+ * NONE, FALLING and RISING use the same bit patterns that can be programmed to
+ * the GPII register (after passing them to the TQMX86_GPII_ macros to shift
+ * them to the right position)
*/
-#define TQMX86_INT_BOTH (BIT(0) | BIT(1))
-#define TQMX86_GPII_MASK (BIT(0) | BIT(1))
-#define TQMX86_GPII_BITS 2
+#define TQMX86_INT_TRIG_NONE 0
+#define TQMX86_INT_TRIG_FALLING BIT(0)
+#define TQMX86_INT_TRIG_RISING BIT(1)
+#define TQMX86_INT_TRIG_BOTH (BIT(0) | BIT(1))
+#define TQMX86_INT_TRIG_MASK (BIT(0) | BIT(1))
/* Stored in irq_type with GPII bits */
#define TQMX86_INT_UNMASKED BIT(2)
+#define TQMX86_GPIIC_CONFIG(i, v) ((v) << (2 * (i)))
+#define TQMX86_GPIIC_MASK(i) TQMX86_GPIIC_CONFIG(i, TQMX86_INT_TRIG_MASK)
+
struct tqmx86_gpio_data {
struct gpio_chip chip;
void __iomem *io_base;
@@ -48,7 +52,7 @@ struct tqmx86_gpio_data {
/* Lock must be held for accessing output and irq_type fields */
raw_spinlock_t spinlock;
DECLARE_BITMAP(output, TQMX86_NGPIO);
- u8 irq_type[TQMX86_NGPI];
+ u8 irq_type[TQMX86_NGPIO];
};
static u8 tqmx86_gpio_read(struct tqmx86_gpio_data *gd, unsigned int reg)
@@ -62,6 +66,18 @@ static void tqmx86_gpio_write(struct tqmx86_gpio_data *gd, u8 val,
iowrite8(val, gd->io_base + reg);
}
+static void tqmx86_gpio_clrsetbits(struct tqmx86_gpio_data *gpio,
+ u8 clr, u8 set, unsigned int reg)
+ __must_hold(&gpio->spinlock)
+{
+ u8 val = tqmx86_gpio_read(gpio, reg);
+
+ val &= ~clr;
+ val |= set;
+
+ tqmx86_gpio_write(gpio, val, reg);
+}
+
static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
@@ -69,127 +85,137 @@ static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(tqmx86_gpio_read(gpio, TQMX86_GPIOD) & BIT(offset));
}
+static void _tqmx86_gpio_set(struct tqmx86_gpio_data *gpio, unsigned int offset,
+ int value)
+ __must_hold(&gpio->spinlock)
+{
+ __assign_bit(offset, gpio->output, value);
+ tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
+}
+
static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
- unsigned long flags;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- __assign_bit(offset, gpio->output, value);
- tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ _tqmx86_gpio_set(gpio, offset, value);
}
static int tqmx86_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
- /* Direction cannot be changed. Validate is an input. */
- if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
- return 0;
- else
- return -EINVAL;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ tqmx86_gpio_clrsetbits(gpio, BIT(offset), 0, TQMX86_GPIODD);
+
+ return 0;
}
static int tqmx86_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset,
int value)
{
- /* Direction cannot be changed, validate is an output */
- if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
- return -EINVAL;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ _tqmx86_gpio_set(gpio, offset, value);
+ tqmx86_gpio_clrsetbits(gpio, 0, BIT(offset), TQMX86_GPIODD);
- tqmx86_gpio_set(chip, offset, value);
return 0;
}
static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
- if (TQMX86_DIR_INPUT_MASK & BIT(offset))
- return GPIO_LINE_DIRECTION_IN;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ u8 val;
+
+ val = tqmx86_gpio_read(gpio, TQMX86_GPIODD);
+
+ if (val & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
- return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
}
-static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
+static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int hwirq)
__must_hold(&gpio->spinlock)
{
- u8 type = TQMX86_GPII_NONE, gpiic;
+ u8 type = TQMX86_INT_TRIG_NONE;
+ int gpiic_irq = hwirq - TQMX86_NGPO;
- if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
- type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
+ if (gpio->irq_type[hwirq] & TQMX86_INT_UNMASKED) {
+ type = gpio->irq_type[hwirq] & TQMX86_INT_TRIG_MASK;
- if (type == TQMX86_INT_BOTH)
- type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
- ? TQMX86_GPII_FALLING
- : TQMX86_GPII_RISING;
+ if (type == TQMX86_INT_TRIG_BOTH)
+ type = tqmx86_gpio_get(&gpio->chip, hwirq)
+ ? TQMX86_INT_TRIG_FALLING
+ : TQMX86_INT_TRIG_RISING;
}
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
- gpiic |= type << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ tqmx86_gpio_clrsetbits(gpio,
+ TQMX86_GPIIC_MASK(gpiic_irq),
+ TQMX86_GPIIC_CONFIG(gpiic_irq, type),
+ TQMX86_GPIIC);
}
static void tqmx86_gpio_irq_mask(struct irq_data *data)
{
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ scoped_guard(raw_spinlock_irqsave, &gpio->spinlock) {
+ gpio->irq_type[data->hwirq] &= ~TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
+ }
gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
static void tqmx86_gpio_irq_unmask(struct irq_data *data)
{
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned long flags;
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ gpio->irq_type[data->hwirq] |= TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
}
static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
unsigned int edge_type = type & IRQF_TRIGGER_MASK;
- unsigned long flags;
u8 new_type;
switch (edge_type) {
case IRQ_TYPE_EDGE_RISING:
- new_type = TQMX86_GPII_RISING;
+ new_type = TQMX86_INT_TRIG_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
- new_type = TQMX86_GPII_FALLING;
+ new_type = TQMX86_INT_TRIG_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
- new_type = TQMX86_INT_BOTH;
+ new_type = TQMX86_INT_TRIG_BOTH;
break;
default:
return -EINVAL; /* not supported */
}
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
- gpio->irq_type[offset] |= new_type;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ gpio->irq_type[data->hwirq] &= ~TQMX86_INT_TRIG_MASK;
+ gpio->irq_type[data->hwirq] |= new_type;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
return 0;
}
@@ -199,8 +225,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned long irq_bits, flags;
- int i;
+ unsigned long irq_bits;
+ int i, hwirq;
u8 irq_status;
chained_irq_enter(irq_chip, desc);
@@ -210,32 +236,38 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
irq_bits = irq_status;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
- /*
- * Edge-both triggers are implemented by flipping the edge
- * trigger after each interrupt, as the controller only supports
- * either rising or falling edge triggers, but not both.
- *
- * Internally, the TQMx86 GPIO controller has separate status
- * registers for rising and falling edge interrupts. GPIIC
- * configures which bits from which register are visible in the
- * interrupt status register GPIIS and defines what triggers the
- * parent IRQ line. Writing to GPIIS always clears both rising
- * and falling interrupt flags internally, regardless of the
- * currently configured trigger.
- *
- * In consequence, we can cleanly implement the edge-both
- * trigger in software by first clearing the interrupt and then
- * setting the new trigger based on the current GPIO input in
- * tqmx86_gpio_irq_config() - even if an edge arrives between
- * reading the input and setting the trigger, we will have a new
- * interrupt pending.
- */
- if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
- tqmx86_gpio_irq_config(gpio, i);
+ scoped_guard(raw_spinlock_irqsave, &gpio->spinlock) {
+ for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
+ hwirq = i + TQMX86_NGPO;
+
+ /*
+ * Edge-both triggers are implemented by flipping the
+ * edge trigger after each interrupt, as the controller
+ * only supports either rising or falling edge triggers,
+ * but not both.
+ *
+ * Internally, the TQMx86 GPIO controller has separate
+ * status registers for rising and falling edge
+ * interrupts. GPIIC configures which bits from which
+ * register are visible in the interrupt status register
+ * GPIIS and defines what triggers the parent IRQ line.
+ * Writing to GPIIS always clears both rising and
+ * falling interrupt flags internally, regardless of the
+ * currently configured trigger.
+ *
+ * In consequence, we can cleanly implement the
+ * edge-both trigger in software by first clearing the
+ * interrupt and then setting the new trigger based on
+ * the current GPIO input in tqmx86_gpio_irq_config() -
+ * even if an edge arrives between reading the input and
+ * setting the trigger, we will have a new interrupt
+ * pending.
+ */
+ if ((gpio->irq_type[hwirq] & TQMX86_INT_TRIG_MASK) ==
+ TQMX86_INT_TRIG_BOTH)
+ tqmx86_gpio_irq_config(gpio, hwirq);
+ }
}
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
generic_handle_domain_irq(gpio->chip.irq.domain,
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index 6c3fbf382dba..b9171bf66168 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -22,7 +22,7 @@
static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset)
{
- struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
+ struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret = 0;
ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL);
@@ -46,7 +46,7 @@ static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset,
static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
+ struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret;
u8 gpoctl;
@@ -91,7 +91,7 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
twl6040gpo_chip.parent = &pdev->dev;
- ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, twl6040);
if (ret < 0) {
dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret);
twl6040gpo_chip.ngpio = 0;
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index c4f34a347cb6..c36a9dbccd4d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -36,6 +36,7 @@ struct vf610_gpio_port {
struct clk *clk_port;
struct clk *clk_gpio;
int irq;
+ spinlock_t lock; /* protect gpio direction registers */
};
#define GPIO_PDOR 0x00
@@ -124,6 +125,7 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
u32 val;
if (port->sdata->have_paddr) {
+ guard(spinlock_irqsave)(&port->lock);
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val &= ~mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
@@ -142,6 +144,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio
vf610_gpio_set(chip, gpio, value);
if (port->sdata->have_paddr) {
+ guard(spinlock_irqsave)(&port->lock);
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
@@ -297,6 +300,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
port->sdata = device_get_match_data(dev);
+ spin_lock_init(&port->lock);
dual_base = port->sdata->have_dual_base;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index c6a8f2c82680..792d94c49077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -65,7 +65,7 @@ struct xgpio_instance {
DECLARE_BITMAP(state, 64);
DECLARE_BITMAP(last_irq_read, 64);
DECLARE_BITMAP(dir, 64);
- spinlock_t gpio_lock; /* For serializing operations */
+ raw_spinlock_t gpio_lock; /* For serializing operations */
int irq;
DECLARE_BITMAP(enable, 64);
DECLARE_BITMAP(rising_edge, 64);
@@ -179,14 +179,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
__assign_bit(bit, chip->state, val);
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -210,7 +210,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
@@ -218,7 +218,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_copy(chip->state, state, 64);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -236,13 +236,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
__set_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -265,7 +265,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
__assign_bit(bit, chip->state, val);
@@ -275,7 +275,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
__clear_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -398,7 +398,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
int bit = xgpio_to_bit(chip, irq_offset);
u32 mask = BIT(bit / 32), temp;
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__clear_bit(bit, chip->enable);
@@ -408,7 +408,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
temp &= ~mask;
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
gpiochip_disable_irq(&chip->gc, irq_offset);
}
@@ -428,7 +428,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
gpiochip_enable_irq(&chip->gc, irq_offset);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__set_bit(bit, chip->enable);
@@ -447,7 +447,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -512,7 +512,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
- spin_lock(&chip->gpio_lock);
+ raw_spin_lock(&chip->gpio_lock);
xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
@@ -529,7 +529,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
bitmap_copy(chip->last_irq_read, all, 64);
bitmap_or(all, rising, falling, 64);
- spin_unlock(&chip->gpio_lock);
+ raw_spin_unlock(&chip->gpio_lock);
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
@@ -620,7 +620,7 @@ static int xgpio_probe(struct platform_device *pdev)
bitmap_set(chip->hw_map, 0, width[0]);
bitmap_set(chip->hw_map, 32, width[1]);
- spin_lock_init(&chip->gpio_lock);
+ raw_spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 1f9fe50bba00..f7746c57ba76 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1689,6 +1689,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "PNP0C50:00@8",
},
},
+ {
+ /*
+ * Spurious wakeups from GPIO 11
+ * Found in BIOS 1.04
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@11",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 40f76a90fd7d..107d75558b5a 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2729,8 +2729,9 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
- ret = atomic_notifier_chain_register(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ ret = raw_notifier_chain_register(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
if (ret)
goto out_free_bitmap;
@@ -2754,8 +2755,9 @@ out_unregister_device_notifier:
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
out_unregister_line_notifier:
- atomic_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
out_free_bitmap:
gpio_device_put(gdev);
bitmap_free(cdev->watched_lines);
@@ -2779,8 +2781,9 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
- atomic_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
bitmap_free(cdev->watched_lines);
gpio_device_put(gdev);
kfree(cdev);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 679ed764cb14..0c00ed2ab431 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -904,13 +904,13 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
}
if (gc->ngpio == 0) {
- chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
+ dev_err(dev, "tried to insert a GPIO chip with zero lines\n");
return -EINVAL;
}
if (gc->ngpio > FASTPATH_NGPIO)
- chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
- gc->ngpio, FASTPATH_NGPIO);
+ dev_warn(dev, "line cnt %u is greater than fast path cnt %u\n",
+ gc->ngpio, FASTPATH_NGPIO);
return 0;
}
@@ -1025,7 +1025,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
}
}
- ATOMIC_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
+ rwlock_init(&gdev->line_state_lock);
+ RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
@@ -1056,13 +1057,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
desc->gdev = gdev;
- if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
- assign_bit(FLAG_IS_OUT,
- &desc->flags, !gc->get_direction(gc, desc_index));
- } else {
+ /*
+ * We would typically want to check the return value of
+ * get_direction() here but we must not check the return value
+ * and bail-out as pin controllers can have pins configured to
+ * alternate functions and return -EINVAL. Also: there's no
+ * need to take the SRCU lock here.
+ */
+ if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index))
+ assign_bit(FLAG_IS_OUT, &desc->flags,
+ !gc->get_direction(gc, desc_index));
+ else
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
- }
}
ret = of_gpiochip_add(gc);
@@ -2701,7 +2708,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input);
int gpiod_direction_input_nonotify(struct gpio_desc *desc)
{
- int ret = 0;
+ int ret = 0, dir;
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -2728,13 +2735,18 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
if (guard.gc->direction_input) {
ret = guard.gc->direction_input(guard.gc,
gpio_chip_hwgpio(desc));
- } else if (guard.gc->get_direction &&
- (guard.gc->get_direction(guard.gc,
- gpio_chip_hwgpio(desc)) != 1)) {
- gpiod_warn(desc,
- "%s: missing direction_input() operation and line is output\n",
- __func__);
- return -EIO;
+ } else if (guard.gc->get_direction) {
+ dir = guard.gc->get_direction(guard.gc,
+ gpio_chip_hwgpio(desc));
+ if (dir < 0)
+ return dir;
+
+ if (dir != GPIO_LINE_DIRECTION_IN) {
+ gpiod_warn(desc,
+ "%s: missing direction_input() operation and line is output\n",
+ __func__);
+ return -EIO;
+ }
}
if (ret == 0) {
clear_bit(FLAG_IS_OUT, &desc->flags);
@@ -2748,7 +2760,7 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
- int val = !!value, ret = 0;
+ int val = !!value, ret = 0, dir;
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -2771,12 +2783,18 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
gpio_chip_hwgpio(desc), val);
} else {
/* Check that we are in output mode if we can */
- if (guard.gc->get_direction &&
- guard.gc->get_direction(guard.gc, gpio_chip_hwgpio(desc))) {
- gpiod_warn(desc,
- "%s: missing direction_output() operation\n",
- __func__);
- return -EIO;
+ if (guard.gc->get_direction) {
+ dir = guard.gc->get_direction(guard.gc,
+ gpio_chip_hwgpio(desc));
+ if (dir < 0)
+ return dir;
+
+ if (dir != GPIO_LINE_DIRECTION_OUT) {
+ gpiod_warn(desc,
+ "%s: missing direction_output() operation\n",
+ __func__);
+ return -EIO;
+ }
}
/*
* If we can't actively set the direction, we are some
@@ -3129,6 +3147,8 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
static int gpio_chip_get_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
if (gc->get_multiple)
return gc->get_multiple(gc, mask, bits);
if (gc->get) {
@@ -3159,6 +3179,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
+ struct gpio_chip *gc;
int ret, i = 0;
/*
@@ -3170,10 +3191,15 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
array_size <= array_info->size &&
(void *)array_info == desc_array + array_info->size) {
if (!can_sleep)
- WARN_ON(array_info->chip->can_sleep);
+ WARN_ON(array_info->gdev->can_sleep);
- ret = gpio_chip_get_multiple(array_info->chip,
- array_info->get_mask,
+ guard(srcu)(&array_info->gdev->srcu);
+ gc = srcu_dereference(array_info->gdev->chip,
+ &array_info->gdev->srcu);
+ if (!gc)
+ return -ENODEV;
+
+ ret = gpio_chip_get_multiple(gc, array_info->get_mask,
value_bitmap);
if (ret)
return ret;
@@ -3454,6 +3480,8 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
static void gpio_chip_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
if (gc->set_multiple) {
gc->set_multiple(gc, mask, bits);
} else {
@@ -3471,6 +3499,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
+ struct gpio_chip *gc;
int i = 0;
/*
@@ -3482,14 +3511,19 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
array_size <= array_info->size &&
(void *)array_info == desc_array + array_info->size) {
if (!can_sleep)
- WARN_ON(array_info->chip->can_sleep);
+ WARN_ON(array_info->gdev->can_sleep);
+
+ guard(srcu)(&array_info->gdev->srcu);
+ gc = srcu_dereference(array_info->gdev->chip,
+ &array_info->gdev->srcu);
+ if (!gc)
+ return -ENODEV;
if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
bitmap_xor(value_bitmap, value_bitmap,
array_info->invert_mask, array_size);
- gpio_chip_set_multiple(array_info->chip, array_info->set_mask,
- value_bitmap);
+ gpio_chip_set_multiple(gc, array_info->set_mask, value_bitmap);
i = find_first_zero_bit(array_info->set_mask, array_size);
if (i == array_size)
@@ -4155,8 +4189,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action)
{
- atomic_notifier_call_chain(&desc->gdev->line_state_notifier,
- action, desc);
+ guard(read_lock_irqsave)(&desc->gdev->line_state_lock);
+
+ raw_notifier_call_chain(&desc->gdev->line_state_notifier, action, desc);
}
/**
@@ -4751,9 +4786,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
{
struct gpio_desc *desc;
struct gpio_descs *descs;
+ struct gpio_device *gdev;
struct gpio_array *array_info = NULL;
- struct gpio_chip *gc;
int count, bitmap_size;
+ unsigned long dflags;
size_t descs_size;
count = gpiod_count(dev, con_id);
@@ -4774,7 +4810,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
descs->desc[descs->ndescs] = desc;
- gc = gpiod_to_chip(desc);
+ gdev = gpiod_to_gpio_device(desc);
/*
* If pin hardware number of array member 0 is also 0, select
* its chip as a candidate for fast bitmap processing path.
@@ -4782,8 +4818,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
struct gpio_descs *array;
- bitmap_size = BITS_TO_LONGS(gc->ngpio > count ?
- gc->ngpio : count);
+ bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ?
+ gdev->ngpio : count);
array = krealloc(descs, descs_size +
struct_size(array_info, invert_mask, 3 * bitmap_size),
@@ -4803,7 +4839,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->desc = descs->desc;
array_info->size = count;
- array_info->chip = gc;
+ array_info->gdev = gdev;
bitmap_set(array_info->get_mask, descs->ndescs,
count - descs->ndescs);
bitmap_set(array_info->set_mask, descs->ndescs,
@@ -4816,7 +4852,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
continue;
/* Unmark array members which don't belong to the 'fast' chip */
- if (array_info->chip != gc) {
+ if (array_info->gdev != gdev) {
__clear_bit(descs->ndescs, array_info->get_mask);
__clear_bit(descs->ndescs, array_info->set_mask);
}
@@ -4839,9 +4875,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->set_mask);
}
} else {
+ dflags = READ_ONCE(desc->flags);
/* Exclude open drain or open source from fast output */
- if (gpiochip_line_is_open_drain(gc, descs->ndescs) ||
- gpiochip_line_is_open_source(gc, descs->ndescs))
+ if (test_bit(FLAG_OPEN_DRAIN, &dflags) ||
+ test_bit(FLAG_OPEN_SOURCE, &dflags))
__clear_bit(descs->ndescs,
array_info->set_mask);
/* Identify 'fast' pins which require invertion */
@@ -4853,7 +4890,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (array_info)
dev_dbg(dev,
"GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n",
- array_info->chip->label, array_info->size,
+ array_info->gdev->label, array_info->size,
*array_info->get_mask, *array_info->set_mask,
*array_info->invert_mask);
return descs;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 83690f72f7e5..c129a03e2040 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,6 +16,7 @@
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/workqueue.h>
@@ -45,6 +46,7 @@
* @list: links gpio_device:s together for traversal
* @line_state_notifier: used to notify subscribers about lines being
* requested, released or reconfigured
+ * @line_state_lock: RW-spinlock protecting the line state notifier
* @line_state_wq: used to emit line state events from a separate thread in
* process context
* @device_notifier: used to notify character device wait queues about the GPIO
@@ -72,7 +74,8 @@ struct gpio_device {
const char *label;
void *data;
struct list_head list;
- struct atomic_notifier_head line_state_notifier;
+ struct raw_notifier_head line_state_notifier;
+ rwlock_t line_state_lock;
struct workqueue_struct *line_state_wq;
struct blocking_notifier_head device_notifier;
struct srcu_struct srcu;
@@ -114,7 +117,7 @@ extern const char *const gpio_suffixes[];
*
* @desc: Array of pointers to the GPIO descriptors
* @size: Number of elements in desc
- * @chip: Parent GPIO chip
+ * @gdev: Parent GPIO device
* @get_mask: Get mask used in fastpath
* @set_mask: Set mask used in fastpath
* @invert_mask: Invert mask used in fastpath
@@ -126,7 +129,7 @@ extern const char *const gpio_suffixes[];
struct gpio_array {
struct gpio_desc **desc;
unsigned int size;
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long *get_mask;
unsigned long *set_mask;
unsigned long invert_mask[];
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 772fc7625639..fbef3f471bd0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -103,10 +103,15 @@ config DRM_KMS_HELPER
help
CRTC helpers for KMS drivers.
+config DRM_DRAW
+ bool
+ depends on DRM
+
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
depends on DRM
select FONT_SUPPORT
+ select DRM_DRAW
help
Enable a drm panic handler, which will display a user-friendly message
when a kernel panic occurs. It's useful when using a user-space
@@ -218,77 +223,7 @@ config DRM_CLIENT
option. Drivers that support the default clients should
select DRM_CLIENT_SELECTION instead.
-config DRM_CLIENT_LIB
- tristate
- depends on DRM
- select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
- select FB_CORE if DRM_FBDEV_EMULATION
- help
- This option enables the DRM client library and selects all
- modules and components according to the enabled clients.
-
-config DRM_CLIENT_SELECTION
- tristate
- depends on DRM
- select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION
- help
- Drivers that support in-kernel DRM clients have to select this
- option.
-
-config DRM_CLIENT_SETUP
- bool
- depends on DRM_CLIENT_SELECTION
- help
- Enables the DRM client selection. DRM drivers that support the
- default clients should select DRM_CLIENT_SELECTION instead.
-
-menu "Supported DRM clients"
- depends on DRM_CLIENT_SELECTION
-
-config DRM_FBDEV_EMULATION
- bool "Enable legacy fbdev support for your modesetting driver"
- depends on DRM_CLIENT_SELECTION
- select DRM_CLIENT
- select DRM_CLIENT_SETUP
- select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
- default FB
- help
- Choose this option if you have a need for the legacy fbdev
- support. Note that this support also provides the linux console
- support on top of your modesetting driver.
-
- If in doubt, say "Y".
-
-config DRM_FBDEV_OVERALLOC
- int "Overallocation of the fbdev buffer"
- depends on DRM_FBDEV_EMULATION
- default 100
- help
- Defines the fbdev buffer overallocation in percent. Default
- is 100. Typical values for double buffering will be 200,
- triple buffering 300.
-
-config DRM_FBDEV_LEAK_PHYS_SMEM
- bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
- depends on DRM_FBDEV_EMULATION && EXPERT
- default n
- help
- In order to keep user-space compatibility, we want in certain
- use-cases to keep leaking the fbdev physical address to the
- user-space program handling the fbdev buffer.
- This affects, not only, Amlogic, Allwinner or Rockchip devices
- with ARM Mali GPUs using an userspace Blob.
- This option is not supported by upstream developers and should be
- removed as soon as possible and be considered as a broken and
- legacy behaviour from a modern fbdev device driver.
-
- Please send any bug reports when using this to your proprietary
- software vendor that requires this.
-
- If in doubt, say "N" or spread the word to your closed source
- library vendor.
-
-endmenu
+source "drivers/gpu/drm/clients/Kconfig"
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
@@ -359,6 +294,7 @@ config DRM_TTM_HELPER
tristate
depends on DRM
select DRM_TTM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
@@ -367,6 +303,7 @@ config DRM_TTM_HELPER
config DRM_GEM_DMA_HELPER
tristate
depends on DRM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
select FB_CORE if DRM_FBDEV_EMULATION
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
@@ -375,6 +312,7 @@ config DRM_GEM_DMA_HELPER
config DRM_GEM_SHMEM_HELPER
tristate
depends on DRM && MMU
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
@@ -530,6 +468,10 @@ config DRM_HYPERV
config DRM_EXPORT_FOR_TESTS
bool
+# Separate option as not all DRM drivers use it
+config DRM_PANEL_BACKLIGHT_QUIRKS
+ tristate
+
config DRM_LIB_RANDOM
bool
default n
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 463afad1b5ca..19fb370fbc56 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -91,10 +91,12 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
drm-$(CONFIG_DRM_PANIC) += drm_panic.o
+drm-$(CONFIG_DRM_DRAW) += drm_draw.o
drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
+obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
# Memory-management helpers
@@ -149,14 +151,6 @@ drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
#
-# DRM clients
-#
-
-drm_client_lib-y := drm_client_setup.o
-drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o
-obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o
-
-#
# Drivers and the rest
#
@@ -165,6 +159,7 @@ obj-y += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-y += arm/
+obj-y += clients/
obj-y += display/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 41fa3377d9cf..1a11cab741ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -26,6 +26,7 @@ config DRM_AMDGPU
select DRM_BUDDY
select DRM_SUBALLOC_HELPER
select DRM_EXEC
+ select DRM_PANEL_BACKLIGHT_QUIRKS
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7b18c52825d..5b21674b07fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2017-2024 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -105,7 +105,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o
+ umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o umc_v8_14.o
# add IH block
amdgpu-y += \
@@ -200,6 +200,7 @@ amdgpu-y += \
vcn_v4_0_3.o \
vcn_v4_0_5.o \
vcn_v5_0_0.o \
+ vcn_v5_0_1.o \
amdgpu_jpeg.o \
jpeg_v1_0.o \
jpeg_v2_0.o \
@@ -208,7 +209,8 @@ amdgpu-y += \
jpeg_v4_0.o \
jpeg_v4_0_3.o \
jpeg_v4_0_5.o \
- jpeg_v5_0_0.o
+ jpeg_v5_0_0.o \
+ jpeg_v5_0_1.o
# add VPE block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index f44de9d4b6a1..e13fbd974141 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -334,6 +334,8 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
AMDGPU_INIT_LEVEL_RESET_RECOVERY);
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
+ /*TBD: Ideally should clear only GFX, SDMA blocks*/
+ amdgpu_ras_clear_err_state(tmp_adev);
r = aldebaran_mode2_restore_ip(tmp_adev);
if (r)
goto end;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4653a8d2823a..69895fccb474 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -880,6 +880,7 @@ struct amdgpu_device {
bool need_swiotlb;
bool accel_working;
struct notifier_block acpi_nb;
+ struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct debugfs_blob_wrapper debugfs_vbios_blob;
struct debugfs_blob_wrapper debugfs_discovery_blob;
@@ -1174,7 +1175,6 @@ struct amdgpu_device {
struct work_struct reset_work;
- bool job_hang;
bool dc_enabled;
/* Mask of active clusters */
uint32_t aid_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index 5ef6b745f222..f3289d289913 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -71,6 +71,11 @@ struct ras_query_context;
#define ACA_ERROR_CE_MASK BIT_MASK(ACA_ERROR_TYPE_CE)
#define ACA_ERROR_DEFERRED_MASK BIT_MASK(ACA_ERROR_TYPE_DEFERRED)
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+
enum aca_reg_idx {
ACA_REG_IDX_CTL = 0,
ACA_REG_IDX_STATUS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index ec5e0dcf8613..deb0785350e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -140,7 +140,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
* 2. power off the acp tiles
* 3. check and enter ulv state
*/
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
@@ -157,7 +157,7 @@ static int acp_poweron(struct generic_pm_domain *genpd)
* 2. turn on acp clock
* 3. power on acp tiles
*/
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -236,7 +236,7 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
} else if (r) {
return r;
@@ -508,7 +508,7 @@ static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
/* return early if no ACP */
if (!adev->acp.acp_genpd) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -565,7 +565,7 @@ static int acp_suspend(struct amdgpu_ip_block *ip_block)
/* power up on suspend */
if (!adev->acp.acp_cell)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -575,7 +575,7 @@ static int acp_resume(struct amdgpu_ip_block *ip_block)
/* power down again on resume */
if (!adev->acp.acp_cell)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
@@ -584,19 +584,19 @@ static bool acp_is_idle(void *handle)
return true;
}
-static int acp_set_clockgating_state(void *handle,
+static int acp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int acp_set_powergating_state(void *handle,
+static int acp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 3afcd1e8aa54..2c1b38c5cfc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -368,7 +368,7 @@ void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
- amdgpu_bo_reserve(*bo, true);
+ (void)amdgpu_bo_reserve(*bo, true);
amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(*bo);
amdgpu_bo_unreserve(*bo);
@@ -715,8 +715,9 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
- ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
+ if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
+ (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
@@ -724,7 +725,9 @@ void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
/* Disable GFXOFF and PG. Temporary workaround
* to fix some compute applications issue on GFX9.
*/
- adev->ip_blocks[AMD_IP_BLOCK_TYPE_GFX].version->funcs->set_powergating_state((void *)adev, state);
+ struct amdgpu_ip_block *gfx_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (gfx_block != NULL)
+ gfx_block->version->funcs->set_powergating_state((void *)gfx_block, state);
}
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
@@ -834,7 +837,7 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- if (!kiq_ring->sched.ready || adev->job_hang)
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
return 0;
ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 4b80ad860639..8af67f18500a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -433,6 +433,9 @@ void kgd2kfd_unlock_kfd(void);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
+bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault);
+
#else
static inline int kgd2kfd_init(void)
{
@@ -518,5 +521,12 @@ static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;
}
+
+static inline bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault)
+{
+ return false;
+}
+
#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index cc66ebb7bae1..441568163e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -1131,6 +1131,9 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
uint32_t low, high;
uint64_t queue_addr = 0;
+ if (!amdgpu_gpu_recovery)
+ return 0;
+
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
@@ -1179,6 +1182,9 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
uint32_t low, high, pipe_reset_data = 0;
uint64_t queue_addr = 0;
+ if (!amdgpu_gpu_recovery)
+ return 0;
+
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index f30548f4c3b3..1e998f972c30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -730,7 +730,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
return;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
@@ -779,7 +779,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
}
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -989,7 +989,7 @@ unwind:
if (!attachment[i])
continue;
if (attachment[i]->bo_va) {
- amdgpu_bo_reserve(bo[i], true);
+ (void)amdgpu_bo_reserve(bo[i], true);
if (--attachment[i]->bo_va->ref_count == 0)
amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
amdgpu_bo_unreserve(bo[i]);
@@ -1259,11 +1259,11 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
return -EBUSY;
}
- amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+ (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
- amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
+ (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ (void)amdgpu_sync_fence(sync, bo_va->last_pt_update);
return 0;
}
@@ -2352,7 +2352,7 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
{
struct amdgpu_bo *bo = mem->bo;
- amdgpu_bo_reserve(bo, true);
+ (void)amdgpu_bo_reserve(bo, true);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 45affc02548c..423fd2eebe1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -47,35 +47,37 @@
/* Check if current bios is an ATOM BIOS.
* Return true if it is ATOM BIOS. Otherwise, return false.
*/
-static bool check_atom_bios(uint8_t *bios, size_t size)
+static bool check_atom_bios(struct amdgpu_device *adev, size_t size)
{
uint16_t tmp, bios_header_start;
+ uint8_t *bios = adev->bios;
if (!bios || size < 0x49) {
- DRM_INFO("vbios mem is null or mem size is wrong\n");
+ dev_dbg(adev->dev, "VBIOS mem is null or mem size is wrong\n");
return false;
}
if (!AMD_IS_VALID_VBIOS(bios)) {
- DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]);
+ dev_dbg(adev->dev, "VBIOS signature incorrect %x %x\n", bios[0],
+ bios[1]);
return false;
}
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
- DRM_INFO("Can't locate bios header\n");
+ dev_dbg(adev->dev, "Can't locate VBIOS header\n");
return false;
}
tmp = bios_header_start + 4;
if (size < tmp) {
- DRM_INFO("BIOS header is broken\n");
+ dev_dbg(adev->dev, "VBIOS header is broken\n");
return false;
}
if (!memcmp(bios + tmp, "ATOM", 4) ||
!memcmp(bios + tmp, "MOTA", 4)) {
- DRM_DEBUG("ATOMBIOS detected\n");
+ dev_dbg(adev->dev, "ATOMBIOS detected\n");
return true;
}
@@ -118,7 +120,7 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
- if (!check_atom_bios(adev->bios, size)) {
+ if (!check_atom_bios(adev, size)) {
kfree(adev->bios);
return false;
}
@@ -146,7 +148,7 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
- if (!check_atom_bios(adev->bios, size)) {
+ if (!check_atom_bios(adev, size)) {
kfree(adev->bios);
return false;
}
@@ -186,7 +188,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
/* read complete BIOS */
amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
- if (!check_atom_bios(adev->bios, len)) {
+ if (!check_atom_bios(adev, len)) {
kfree(adev->bios);
return false;
}
@@ -216,7 +218,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, romlen);
iounmap(bios);
- if (!check_atom_bios(adev->bios, romlen))
+ if (!check_atom_bios(adev, romlen))
goto free_bios;
adev->bios_size = romlen;
@@ -324,7 +326,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
break;
}
- if (!check_atom_bios(adev->bios, size)) {
+ if (!check_atom_bios(adev, size)) {
kfree(adev->bios);
return false;
}
@@ -389,7 +391,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
vhdr->ImageLength,
GFP_KERNEL);
- if (!check_atom_bios(adev->bios, vhdr->ImageLength)) {
+ if (!check_atom_bios(adev, vhdr->ImageLength)) {
kfree(adev->bios);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 16153d275d7a..68bce6a6d09d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -414,7 +414,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "%s", fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw,
+ AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
amdgpu_ucode_release(&adev->pm.fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5df21529b3b1..5cc5f59e3018 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1105,7 +1105,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
* We can't use gang submit on with reserved VMIDs when the VM changes
* can't be invalidated by more than one engine at the same time.
*/
- if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
+ if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) {
for (i = 0; i < p->gang_size; ++i) {
struct drm_sched_entity *entity = p->entities[i];
struct drm_gpu_scheduler *sched = entity->rq->sched;
@@ -1189,7 +1189,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (!bo)
continue;
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(bo, false);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a68338cb7b4a..49ca8c814455 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2095,6 +2095,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog)
amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm);
+ amdgpu_debugfs_vcn_sched_mask_init(adev);
amdgpu_debugfs_jpeg_sched_mask_init(adev);
amdgpu_debugfs_gfx_sched_mask_init(adev);
amdgpu_debugfs_compute_sched_mask_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cd4fac120834..018dfccd771b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -199,14 +199,16 @@ void amdgpu_set_init_level(struct amdgpu_device *adev,
}
static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data);
/**
* DOC: pcie_replay_count
*
* The amdgpu driver provides a sysfs API for reporting the total number
- * of PCIe replays (NAKs)
+ * of PCIe replays (NAKs).
* The file pcie_replay_count is used for this and returns the total
- * number of replays as a sum of the NAKs generated and NAKs received
+ * number of replays as a sum of the NAKs generated and NAKs received.
*/
static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
@@ -432,8 +434,8 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
* @dev: drm_device pointer
*
* Return:
- * 1 if the device supporte BACO;
- * 3 if the device support MACO (only works if BACO is supported)
+ * 1 if the device supports BACO;
+ * 3 if the device supports MACO (only works if BACO is supported)
* otherwise return 0.
*/
int amdgpu_device_supports_baco(struct drm_device *dev)
@@ -580,7 +582,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
}
/**
- * amdgpu_device_aper_access - access vram by vram aperature
+ * amdgpu_device_aper_access - access vram by vram aperture
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
@@ -671,7 +673,7 @@ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
* here is that the GPU reset is not running on another thread in parallel.
*
* For this we trylock the read side of the reset semaphore, if that succeeds
- * we know that the reset is not running in paralell.
+ * we know that the reset is not running in parallel.
*
* If the trylock fails we assert that we are either already holding the read
* side of the lock or are the reset thread itself and hold the write side of
@@ -1402,6 +1404,7 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
ret = amdgpu_atomfirmware_asic_init(adev, true);
@@ -1635,6 +1638,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ /* resizing on Dell G5 SE platforms causes problems with runtime pm */
+ if ((amdgpu_runtime_pm != 0) &&
+ adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
+ adev->pdev->device == 0x731f &&
+ adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ return 0;
+
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
DRM_WARN("System can't access extended configuration space, please check!!\n");
@@ -1736,7 +1746,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
uint32_t fw_ver;
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
- /* force vPost if error occured */
+ /* force vPost if error occurred */
if (err)
return true;
@@ -2168,7 +2178,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2202,7 +2212,7 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2362,8 +2372,8 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
- ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s>\n",
+ adev->num_ip_blocks, ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks].adev = adev;
@@ -2381,7 +2391,7 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
* the module parameter virtual_display. This feature provides a virtual
* display hardware on headless boards or in virtualized environments.
* This function parses and validates the configuration string specified by
- * the user and configues the virtual display configuration (number of
+ * the user and configures the virtual display configuration (number of
* virtual connectors, crtcs, etc.) specified.
*/
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
@@ -2444,7 +2454,7 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
*
* Parses the asic configuration parameters specified in the gpu info
- * firmware and makes them availale to the driver for use in configuring
+ * firmware and makes them available to the driver for use in configuring
* the asic.
* Returns 0 on success, -EINVAL on failure.
*/
@@ -2485,6 +2495,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_gpu_info.bin", chip_name);
if (err) {
dev_err(adev->dev,
@@ -2504,7 +2515,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
/*
- * Should be droped when DAL no longer needs it.
+ * Should be dropped when DAL no longer needs it.
*/
if (adev->asic_type == CHIP_NAVI12)
goto parse_soc_bounding_box;
@@ -3064,7 +3075,7 @@ init_failed:
*
* Writes a reset magic value to the gart pointer in VRAM. The driver calls
* this function before a GPU reset. If the value is retained after a
- * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
+ * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
*/
static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
{
@@ -3140,7 +3151,7 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
state);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
@@ -3177,7 +3188,7 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
- r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
state);
if (r) {
DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
@@ -3379,7 +3390,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
amdgpu_amdkfd_suspend(adev, false);
- /* Workaroud for ASICs need to disable SMC first */
+ /* Workaround for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
@@ -3481,7 +3492,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
WARN_ON_ONCE(adev->gfx.gfx_off_state);
WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
}
@@ -4309,7 +4320,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/*
* Reset domain needs to be present early, before XGMI hive discovered
- * (if any) and intitialized to use reset sem and in_gpu reset flag
+ * (if any) and initialized to use reset sem and in_gpu reset flag
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
@@ -4599,6 +4610,11 @@ fence_driver_init:
amdgpu_device_check_iommu_direct_map(adev);
+ adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
+ r = register_pm_notifier(&adev->pm_nb);
+ if (r)
+ goto failed;
+
return 0;
release_ras_con:
@@ -4663,6 +4679,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
drain_workqueue(adev->mman.bdev.wq);
adev->shutdown = true;
+ unregister_pm_notifier(&adev->pm_nb);
+
/* make sure IB test finished before entering exclusive mode
* to avoid preemption on IB test
*/
@@ -4781,8 +4799,8 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
{
int ret;
- /* No need to evict vram on APUs for suspend to ram or s2idle */
- if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
+ /* No need to evict vram on APUs unless going to S4 */
+ if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
return 0;
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
@@ -4795,6 +4813,41 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* Suspend & resume.
*/
/**
+ * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
+ * @nb: notifier block
+ * @mode: suspend mode
+ * @data: data
+ *
+ * This function is called when the system is about to suspend or hibernate.
+ * It is used to evict resources from the device before the system goes to
+ * sleep while there is still access to swap.
+ */
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data)
+{
+ struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
+ int r;
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ adev->in_s4 = true;
+ fallthrough;
+ case PM_SUSPEND_PREPARE:
+ r = amdgpu_device_evict_resources(adev);
+ /*
+ * This is considered non-fatal at this time because
+ * amdgpu_device_prepare() will also fatally evict resources.
+ * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
+ */
+ if (r)
+ drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
* amdgpu_device_prepare - prepare for device suspend
*
* @dev: drm dev pointer
@@ -4833,7 +4886,7 @@ int amdgpu_device_prepare(struct drm_device *dev)
return 0;
unprepare:
- adev->in_s0ix = adev->in_s3 = false;
+ adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
return r;
}
@@ -5184,7 +5237,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- amdgpu_ras_set_fed(adev, false);
+ amdgpu_ras_clear_err_state(adev);
amdgpu_irq_gpu_reset_resume_helper(adev);
/* some sw clean up VF needs to do before recover */
@@ -5241,16 +5294,18 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_has_job_running - check if there is any job in mirror list
+ * amdgpu_device_has_job_running - check if there is any unfinished job
*
* @adev: amdgpu_device pointer
*
- * check if there is any job in mirror list
+ * check if there is any job running on the device when guest driver receives
+ * FLR notification from host driver. If there are still jobs running, then
+ * the guest driver will not respond the FLR reset. Instead, let the job hit
+ * the timeout and guest driver then issue the reset request.
*/
bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
{
int i;
- struct drm_sched_job *job;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -5258,11 +5313,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
if (!amdgpu_ring_sched_ready(ring))
continue;
- spin_lock(&ring->sched.job_list_lock);
- job = list_first_entry_or_null(&ring->sched.pending_list,
- struct drm_sched_job, list);
- spin_unlock(&ring->sched.job_list_lock);
- if (job)
+ if (amdgpu_fence_count_emitted(ring))
return true;
}
return false;
@@ -5487,7 +5538,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_set_init_level(tmp_adev, init_level);
if (full_reset) {
/* post card */
- amdgpu_ras_set_fed(tmp_adev, false);
+ amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
dev_warn(tmp_adev->dev, "asic atom init failed!");
@@ -5821,6 +5872,18 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
/*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+ /*
* Special case: RAS triggered and full reset isn't supported
*/
need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
@@ -5903,7 +5966,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
/*
- * Mark these ASICs to be reseted as untracked first
+ * Mark these ASICs to be reset as untracked first
* And add them back after reset completed
*/
amdgpu_unregister_gpu_instance(tmp_adev);
@@ -6102,19 +6165,56 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
+ *
+ * @adev: amdgpu_device pointer
+ * @speed: pointer to the speed of the link
+ * @width: pointer to the width of the link
+ *
+ * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
+ * AMD dGPU which may be a virtual upstream bridge.
+ */
+static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ struct pci_dev *parent = adev->pdev;
+
+ if (!speed || !width)
+ return;
+
+ parent = pci_upstream_bridge(parent);
+ if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ while ((parent = pci_upstream_bridge(parent))) {
+ if (parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ }
+ }
+ } else {
+ /* use the device itself */
+ *speed = pcie_get_speed_cap(adev->pdev);
+ *width = pcie_get_width_cap(adev->pdev);
+ }
+}
+
+/**
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
*
* @adev: amdgpu_device pointer
*
- * Fetchs and stores in the driver the PCIE capabilities (gen speed
+ * Fetches and stores in the driver the PCIE capabilities (gen speed
* and lanes) of the slot the device is in. Handles APUs and
* virtualized environments where PCIE config space may not be available.
*/
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
- struct pci_dev *pdev;
enum pci_bus_speed speed_cap, platform_speed_cap;
- enum pcie_link_width platform_link_width;
+ enum pcie_link_width platform_link_width, link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -6136,11 +6236,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
&platform_link_width);
+ amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
if (adev->pm.pcie_gen_mask == 0) {
/* asic caps */
- pdev = adev->pdev;
- speed_cap = pcie_get_speed_cap(pdev);
if (speed_cap == PCI_SPEED_UNKNOWN) {
adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
@@ -6196,51 +6295,103 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
}
if (adev->pm.pcie_mlw_mask == 0) {
+ /* asic caps */
+ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
+ } else {
+ switch (link_width) {
+ case PCIE_LNK_X32:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X16:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X12:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X8:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X4:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X2:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X1:
+ adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* platform caps */
if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
} else {
switch (platform_link_width) {
case PCIE_LNK_X32:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X16:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X12:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X8:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X4:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X2:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X1:
- adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+ adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1040204ac8b9..949d74eff294 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -104,7 +104,9 @@
#include "smuio_v13_0_6.h"
#include "smuio_v14_0_2.h"
#include "vcn_v5_0_0.h"
+#include "vcn_v5_0_1.h"
#include "jpeg_v5_0_0.h"
+#include "jpeg_v5_0_1.h"
#include "amdgpu_vpe.h"
#if defined(CONFIG_DRM_AMD_ISP)
@@ -1340,7 +1342,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
*/
if (adev->vcn.num_vcn_inst <
AMDGPU_MAX_VCN_INSTANCES) {
- adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
ip->revision & 0xc0;
adev->vcn.num_vcn_inst++;
adev->vcn.inst_mask |=
@@ -1705,7 +1707,7 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
* so this won't overflow.
*/
for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
- adev->vcn.vcn_codec_disable_mask[v] =
+ adev->vcn.inst[v].vcn_codec_disable_mask =
le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
}
break;
@@ -1836,6 +1838,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -1890,6 +1893,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -2013,6 +2017,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
@@ -2184,6 +2189,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -2238,6 +2244,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(4, 4, 2):
case IP_VERSION(4, 4, 5):
+ case IP_VERSION(4, 4, 4):
amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
break;
case IP_VERSION(5, 0, 0):
@@ -2361,6 +2368,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
break;
+ case IP_VERSION(5, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
@@ -2405,6 +2416,7 @@ static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
aqua_vanjaram_init_soc_config(adev);
break;
default:
@@ -2652,6 +2664,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->family = AMDGPU_FAMILY_AI;
break;
case IP_VERSION(9, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b119d27271c1..35c778426a7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -33,6 +33,7 @@
#include "soc15_common.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
+#include "bif/bif_4_1_d.h"
#include <asm/div64.h>
#include <linux/pci.h>
@@ -1788,3 +1789,82 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
return 0;
}
+/* panic_bo is set in amdgpu_dm_plane_get_scanout_buffer() and only used in amdgpu_dm_set_pixel()
+ * they are called from the panic handler, and protected by the drm_panic spinlock.
+ */
+static struct amdgpu_bo *panic_abo;
+
+/* Use the indirect MMIO to write each pixel to the GPU VRAM,
+ * This is a simplified version of amdgpu_device_mm_access()
+ */
+static void amdgpu_display_set_pixel(struct drm_scanout_buffer *sb,
+ unsigned int x,
+ unsigned int y,
+ u32 color)
+{
+ struct amdgpu_res_cursor cursor;
+ unsigned long offset;
+ struct amdgpu_bo *abo = panic_abo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ uint32_t tmp;
+
+ offset = x * 4 + y * sb->pitch[0];
+ amdgpu_res_first(abo->tbo.resource, offset, 4, &cursor);
+
+ tmp = cursor.start >> 31;
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t) cursor.start) | 0x80000000);
+ if (tmp != 0xffffffff)
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ WREG32_NO_KIQ(mmMM_DATA, color);
+}
+
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct amdgpu_bo *abo;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ if (!fb)
+ return -EINVAL;
+
+ DRM_DEBUG_KMS("Framebuffer %dx%d %p4cc\n", fb->width, fb->height, &fb->format->format);
+
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
+ if (!abo)
+ return -EINVAL;
+
+ sb->width = fb->width;
+ sb->height = fb->height;
+ /* Use the generic linear format, because tiling will be disabled in panic_flush() */
+ sb->format = drm_format_info(fb->format->format);
+ if (!sb->format)
+ return -EINVAL;
+
+ sb->pitch[0] = fb->pitches[0];
+
+ if (abo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) {
+ if (abo->tbo.resource->mem_type != TTM_PL_VRAM) {
+ drm_warn(plane->dev, "amdgpu panic, framebuffer not in VRAM\n");
+ return -EINVAL;
+ }
+ /* Only handle 32bits format, to simplify mmio access */
+ if (fb->format->cpp[0] != 4) {
+ drm_warn(plane->dev, "amdgpu panic, pixel format is not 32bits\n");
+ return -EINVAL;
+ }
+ sb->set_pixel = amdgpu_display_set_pixel;
+ panic_abo = abo;
+ return 0;
+ }
+ if (!abo->kmap.virtual &&
+ ttm_bo_kmap(&abo->tbo, 0, PFN_UP(abo->tbo.base.size), &abo->kmap)) {
+ drm_warn(plane->dev, "amdgpu bo map failed, panic won't be displayed\n");
+ return -ENOMEM;
+ }
+ if (abo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK)
+ iosys_map_set_vaddr_iomem(&sb->map[0], abo->kmap.virtual);
+ else
+ iosys_map_set_vaddr(&sb->map[0], abo->kmap.virtual);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 9d19940f73c8..dfa0d642ac16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -23,6 +23,8 @@
#ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__
+#include <drm/drm_panic.h>
+
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
@@ -49,4 +51,7 @@ amdgpu_lookup_format_info(u32 format, uint64_t modifier);
int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 8e81a83d37d8..9f627caedc3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -36,6 +36,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
#include <drm/amdgpu_drm.h>
#include <drm/ttm/ttm_tt.h>
#include <linux/dma-buf.h>
@@ -60,6 +61,8 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
+ amdgpu_vm_bo_update_shared(bo);
+
return 0;
}
@@ -345,7 +348,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
/* FIXME: This should be after the "if", but needs a fix to make sure
* DMABuf imports are initialized in the right VM list.
*/
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(bo, false);
if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 38686203bea6..c0ddbe7d6f0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -23,7 +23,7 @@
*/
#include <drm/amdgpu_drm.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem.h>
@@ -119,9 +119,11 @@
* - 3.57.0 - Compute tunneling on GFX10+
* - 3.58.0 - Add GFX12 DCC support
* - 3.59.0 - Cleared VRAM
+ * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
+ * - 3.61.0 - Contains fix for RV/PCO compute queues
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 59
+#define KMS_DRIVER_MINOR 61
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -280,7 +282,7 @@ module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
/**
* DOC: gttsize (int)
* Restrict the size of GTT domain (for userspace use) in MiB for testing.
- * The default is -1 (Use 1/2 RAM, minimum value is 3GB).
+ * The default is -1 (Use value specified by TTM).
*/
MODULE_PARM_DESC(gttsize, "Size of the GTT userspace domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
@@ -399,7 +401,7 @@ module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
* the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
*/
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
-module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+module_param_named_unsafe(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
/**
* DOC: bapm (int)
@@ -457,7 +459,7 @@ module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
* Enable experimental hw support (1 = enable). The default is 0 (disabled).
*/
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
-module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+module_param_named_unsafe(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
/**
* DOC: dc (int)
@@ -568,14 +570,14 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
-module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+module_param_named_unsafe(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/**
* DOC: emu_mode (int)
* Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
*/
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
-module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+module_param_named_unsafe(emu_mode, amdgpu_emu_mode, int, 0444);
/**
* DOC: ras_enable (int)
@@ -730,7 +732,7 @@ module_param_named(noretry, amdgpu_noretry, int, 0644);
*/
MODULE_PARM_DESC(force_asic_type,
"A non negative value used to specify the asic type for all supported GPUs");
-module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+module_param_named_unsafe(force_asic_type, amdgpu_force_asic_type, int, 0444);
/**
* DOC: use_xgmi_p2p (int)
@@ -749,7 +751,7 @@ module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444);
* assigns queues to HQDs.
*/
int sched_policy = KFD_SCHED_POLICY_HWS;
-module_param(sched_policy, int, 0444);
+module_param_unsafe(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
@@ -799,7 +801,7 @@ MODULE_PARM_DESC(send_sigterm,
* Setting 1 enables halt on hang.
*/
int halt_if_hws_hang;
-module_param(halt_if_hws_hang, int, 0644);
+module_param_unsafe(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
/**
@@ -808,7 +810,7 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
* check says. Default value: false (rely on MEC2 firmware version check).
*/
bool hws_gws_support;
-module_param(hws_gws_support, bool, 0444);
+module_param_unsafe(hws_gws_support, bool, 0444);
MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
/**
@@ -841,7 +843,7 @@ MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = defa
*/
int amdgpu_no_queue_eviction_on_vm_fault;
MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
-module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
+module_param_named_unsafe(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
#endif
/**
@@ -849,7 +851,7 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm
*/
int amdgpu_mtype_local;
MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)");
-module_param_named(mtype_local, amdgpu_mtype_local, int, 0444);
+module_param_named_unsafe(mtype_local, amdgpu_mtype_local, int, 0444);
/**
* DOC: pcie_p2p (bool)
@@ -953,7 +955,7 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
-module_param_named(reset_method, amdgpu_reset_method, int, 0644);
+module_param_named_unsafe(reset_method, amdgpu_reset_method, int, 0644);
/**
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
@@ -1049,7 +1051,7 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
* - 0x4: Disable GPU soft recovery, always do a full reset
*/
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
-module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
+module_param_named_unsafe(debug_mask, amdgpu_debug_mask, uint, 0444);
/**
* DOC: agp (int)
@@ -2552,9 +2554,7 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
- adev->in_s4 = true;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_s4 = false;
if (r)
return r;
@@ -2566,8 +2566,13 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_resume(drm_dev, true);
+ r = amdgpu_device_resume(drm_dev, true);
+ adev->in_s4 = false;
+
+ return r;
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2580,6 +2585,9 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}
@@ -2916,7 +2924,6 @@ static const struct drm_driver amdgpu_kms_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
@@ -2940,7 +2947,6 @@ const struct drm_driver amdgpu_partition_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index 5bc2cb661af7..2d86cc6f7f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -40,7 +40,6 @@
#define DRIVER_NAME "amdgpu"
#define DRIVER_DESC "AMD GPU"
-#define DRIVER_DATE "20150101"
extern const struct drm_driver amdgpu_partition_driver;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index df2cf5c33925..91d638098889 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -60,7 +60,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_mem_stats stats[__AMDGPU_PL_LAST + 1] = { };
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
ktime_t usage[AMDGPU_HW_IP_NUM];
const char *pl_name[] = {
[TTM_PL_VRAM] = "vram",
@@ -72,15 +72,8 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
[AMDGPU_PL_DOORBELL] = "doorbell",
};
unsigned int hw_ip, i;
- int ret;
-
- ret = amdgpu_bo_reserve(vm->root.bo, false);
- if (ret)
- return;
-
- amdgpu_vm_get_memory(vm, stats, ARRAY_SIZE(stats));
- amdgpu_bo_unreserve(vm->root.bo);
+ amdgpu_vm_get_memory(vm, stats);
amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
/*
@@ -114,9 +107,11 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
drm_printf(p, "amd-evicted-vram:\t%llu KiB\n",
stats[TTM_PL_VRAM].evicted/1024UL);
drm_printf(p, "amd-requested-vram:\t%llu KiB\n",
- stats[TTM_PL_VRAM].requested/1024UL);
+ (stats[TTM_PL_VRAM].drm.shared +
+ stats[TTM_PL_VRAM].drm.private) / 1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
- stats[TTM_PL_TT].requested/1024UL);
+ (stats[TTM_PL_TT].drm.shared +
+ stats[TTM_PL_TT].drm.private) / 1024UL);
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index ceb5163480f4..09c9194d5bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -384,7 +384,7 @@ int amdgpu_fru_sysfs_init(struct amdgpu_device *adev)
void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev)
{
- if (!is_fru_eeprom_supported(adev, NULL) || !adev->fru_info)
+ if (!adev->fru_info)
return;
sysfs_remove_files(&adev->dev->kobj, amdgpu_fru_attributes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
index bc58dca18035..98f3196599ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -32,7 +32,7 @@ struct amdgpu_fru_info {
char product_name[AMDGPU_PRODUCT_NAME_LEN];
char serial[20];
char manufacturer_name[32];
- char fru_id[32];
+ char fru_id[50];
};
int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 2d4b67175b55..328a1b963548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
+ return 0;
+
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
return 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 1a5df8b94661..69429df09477 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -42,6 +42,7 @@
#include "amdgpu_dma_buf.h"
#include "amdgpu_hmm.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
@@ -87,10 +88,8 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
- if (aobj) {
- amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
- }
+ amdgpu_hmm_unregister(aobj);
+ ttm_bo_put(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
@@ -179,6 +178,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
if (r)
return r;
+ amdgpu_vm_bo_update_shared(abo);
bo_va = amdgpu_vm_bo_find(vm, abo);
if (!bo_va)
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
@@ -252,6 +252,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
amdgpu_vm_bo_del(adev, bo_va);
+ amdgpu_vm_bo_update_shared(bo);
if (!amdgpu_vm_ready(vm))
goto out_unlock;
@@ -839,7 +840,6 @@ error:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
@@ -899,7 +899,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
- amdgpu_vm_bo_invalidate(adev, robj, true);
+ amdgpu_vm_bo_invalidate(robj, true);
amdgpu_bo_unreserve(robj);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 69a6b6dba0a5..c1f35ded684e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -515,7 +515,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- if (!kiq_ring->sched.ready || adev->job_hang || amdgpu_in_reset(adev))
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
return 0;
spin_lock(&kiq->ring_lock);
@@ -567,7 +567,7 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- if (!adev->gfx.kiq[0].ring.sched.ready || adev->job_hang)
+ if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
return 0;
if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
@@ -806,7 +806,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
/* If going to s2idle, no need to wait */
if (adev->in_s0ix) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
- AMD_IP_BLOCK_TYPE_GFX, true))
+ AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
@@ -818,7 +818,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
if (adev->gfx.gfx_off_state &&
- !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+ !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
adev->gfx.gfx_off_state = false;
if (adev->gfx.funcs->init_spm_golden) {
@@ -1484,6 +1484,24 @@ static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
return 0;
}
+/**
+ * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * Provides the sysfs interface to manually run a cleaner shader, which is
+ * used to clear the GPU state between different tasks. Writing a value to the
+ * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
+ * The value written corresponds to the partition index on multi-partition
+ * devices. On single-partition devices, the value should be '0'.
+ *
+ * The cleaner shader clears the Local Data Store (LDS) and General Purpose
+ * Registers (GPRs) to ensure data isolation between GPU workloads.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -1532,6 +1550,19 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
return count;
}
+/**
+ * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer to store the output data
+ *
+ * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
+ * feature for each GPU partition. Reading from the 'enforce_isolation'
+ * sysfs file returns the isolation settings for all partitions, where '0'
+ * indicates disabled and '1' indicates enabled.
+ *
+ * Return: The number of bytes read from the sysfs file.
+ */
static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1555,6 +1586,20 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
return size;
}
+/**
+ * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * This function allows control over the 'enforce_isolation' feature, which
+ * serializes access to the graphics engine. Writing '1' or '0' to the
+ * 'enforce_isolation' sysfs file enables or disables process isolation for
+ * each partition. The input should specify the setting for all partitions.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1593,22 +1638,19 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
}
mutex_lock(&adev->enforce_isolation_mutex);
-
for (i = 0; i < num_partitions; i++) {
- if (adev->enforce_isolation[i] && !partition_values[i]) {
+ if (adev->enforce_isolation[i] && !partition_values[i])
/* Going from enabled to disabled */
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
- amdgpu_mes_set_enforce_isolation(adev, i, false);
- } else if (!adev->enforce_isolation[i] && partition_values[i]) {
+ else if (!adev->enforce_isolation[i] && partition_values[i])
/* Going from disabled to enabled */
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- amdgpu_mes_set_enforce_isolation(adev, i, true);
- }
adev->enforce_isolation[i] = partition_values[i];
}
-
mutex_unlock(&adev->enforce_isolation_mutex);
+ amdgpu_mes_update_enforce_isolation(adev);
+
return count;
}
@@ -1940,6 +1982,17 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
mutex_unlock(&adev->enforce_isolation_mutex);
}
+/**
+ * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
+ * @adev: amdgpu_device pointer
+ * @idx: Index of the GPU partition
+ *
+ * When kernel submissions come in, the jobs are given a time slice and once
+ * that time slice is up, if there are KFD user queues active, kernel
+ * submissions are blocked until KFD has had its time slice. Once the KFD time
+ * slice is up, KFD user queues are preempted and kernel submissions are
+ * unblocked and allowed to run again.
+ */
static void
amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
u32 idx)
@@ -1985,10 +2038,20 @@ amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
msleep(GFX_SLICE_PERIOD_MS);
}
+/**
+ * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring begin_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2007,15 +2070,28 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
}
+/**
+ * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring end_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2031,9 +2107,12 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
}
/*
@@ -2050,7 +2129,7 @@ static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
if (!adev)
return -ENODEV;
- mask = (1 << adev->gfx.num_gfx_rings) - 1;
+ mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
if ((val & mask) == 0)
return -EINVAL;
@@ -2078,7 +2157,7 @@ static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
ring = &adev->gfx.gfx_ring[i];
if (ring->sched.ready)
- mask |= 1 << i;
+ mask |= 1ULL << i;
}
*val = mask;
@@ -2120,7 +2199,7 @@ static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
if (!adev)
return -ENODEV;
- mask = (1 << adev->gfx.num_compute_rings) - 1;
+ mask = (1ULL << adev->gfx.num_compute_rings) - 1;
if ((val & mask) == 0)
return -EINVAL;
@@ -2149,7 +2228,7 @@ static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
ring = &adev->gfx.compute_ring[i];
if (ring->sched.ready)
- mask |= 1 << i;
+ mask |= 1ULL << i;
}
*val = mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 8b512dc28df8..2ea98ec60220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,16 +89,14 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/**
* amdgpu_ib_free - free an IB (Indirect Buffer)
*
- * @adev: amdgpu_device pointer
* @ib: IB object to free
* @f: the fence SA bo need wait on for the ib alloation
*
* Free an IB (all asics).
*/
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f)
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f)
{
- amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
+ amdgpu_sa_bo_free(&ib->sa_bo, f);
}
/**
@@ -193,8 +191,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
- (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
- amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
need_pipe_sync = true;
if (tmp)
@@ -299,7 +297,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_patch_cond_exec(ring, cond_exec);
ring->current_ctx = fence_ctx;
- if (vm && ring->funcs->emit_switch_buffer)
+ if (job && ring->funcs->emit_switch_buffer)
amdgpu_ring_emit_switch_buffer(ring);
if (ring->funcs->emit_wave_limit &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f3b0aaf3ebc6..901f8b12c672 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -298,3 +298,9 @@ uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
dw2 = le32_to_cpu(ih->ring[ring_index + 2]);
return dw1 | ((u64)(dw2 & 0xffff) << 32);
}
+
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+{
+ return ih == &adev->irq.ih ? "ih" : ih == &adev->irq.ih_soft ? "sw ih" :
+ ih == &adev->irq.ih1 ? "ih1" : ih == &adev->irq.ih2 ? "ih2" : "unknown";
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 508f02eb0cf8..7d4395a5d8ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -110,4 +110,5 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
signed int offset);
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 263ce1811cc8..732744488b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -77,7 +77,8 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev)
sizeof(ucode_prefix));
/* read isp fw */
- r = amdgpu_ucode_request(adev, &adev->isp.fw, "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->isp.fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s.bin", ucode_prefix);
if (r) {
amdgpu_ucode_release(&adev->isp.fw);
return r;
@@ -128,13 +129,13 @@ static bool isp_is_idle(void *handle)
return true;
}
-static int isp_set_clockgating_state(void *handle,
+static int isp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int isp_set_powergating_state(void *handle,
+static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index a21c510c408e..100f04475943 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -102,8 +102,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_ENODEV;
}
- adev->job_hang = true;
-
/*
* Do the coredump immediately after a job timeout to get a very
* close dump/snapshot/representation of GPU's current error status
@@ -181,7 +179,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
- adev->job_hang = false;
drm_dev_exit(idx);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -197,11 +194,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!*job)
return -ENOMEM;
- /*
- * Initialize the scheduler to at least some ring so that we always
- * have a pointer to adev.
- */
- (*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
@@ -267,7 +259,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
f = NULL;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(NULL, &job->ibs[i], f);
+ amdgpu_ib_free(&job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
@@ -366,6 +358,13 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
+ /*
+ * The VM structure might be released after the VMID is
+ * assigned, we had multiple problems with people trying to use
+ * the VM pointer so better set it to NULL.
+ */
+ if (!fence)
+ job->vm = NULL;
}
return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 3eb4a4653fce..d9cb343a8708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -27,7 +27,8 @@
#include "amdgpu_ras.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
-#define AMDGPU_MAX_JPEG_RINGS 8
+#define AMDGPU_MAX_JPEG_RINGS 10
+#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 016a6f6c4267..98528ee94c15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -846,7 +846,7 @@ out:
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
uint64_t vm_size;
- uint32_t pcie_gen_mask;
+ uint32_t pcie_gen_mask, pcie_width_mask;
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info)
@@ -934,15 +934,18 @@ out:
dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
/* Combine the chip gen mask with the platform (CPU/mobo) mask. */
- pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
+ pcie_gen_mask = adev->pm.pcie_gen_mask &
+ (adev->pm.pcie_gen_mask >> CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT);
+ pcie_width_mask = adev->pm.pcie_mlw_mask &
+ (adev->pm.pcie_mlw_mask >> CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT);
dev_info->pcie_gen = fls(pcie_gen_mask);
dev_info->pcie_num_lanes =
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 59ec20b07a6a..709c11cbeabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1610,10 +1610,12 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
}
- r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mes.bin", ucode_prefix);
}
@@ -1679,7 +1681,8 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
}
/* Fix me -- node_id is used to identify the correct MES instances in the future */
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
+static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
+ uint32_t node_id, bool enable)
{
struct mes_misc_op_input op_input = {0};
int r;
@@ -1701,6 +1704,23 @@ error:
return r;
}
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i])
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
+ else
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
+ return r;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index c6f93cbd6739..e98ea7ede1ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -40,7 +40,7 @@
#define AMDGPU_MES_VERSION_MASK 0x00000fff
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
-#define AMDGPU_MES_MSCRATCH_SIZE 0x8000
+#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
enum amdgpu_mes_priority_level {
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
@@ -534,6 +534,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6852d50caa89..96f4b8904e9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -41,6 +41,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_vram_mgr.h"
#include "amdgpu_vm.h"
+#include "amdgpu_dma_buf.h"
/**
* DOC: amdgpu_object
@@ -324,6 +325,9 @@ error_free:
*
* Allocates and pins a BO for kernel internal use.
*
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
* Returns:
@@ -347,6 +351,76 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
return 0;
}
+EXPORT_SYMBOL(amdgpu_bo_create_kernel);
+
+/**
+ * amdgpu_bo_create_isp_user - create user BO for isp
+ *
+ * @adev: amdgpu device object
+ * @dma_buf: DMABUF handle for isp buffer
+ * @domain: where to place it
+ * @bo: used to initialize BOs in structures
+ * @gpu_addr: GPU addr of the pinned BO
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate gpu_addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dma_buf, u32 domain, struct amdgpu_bo **bo,
+ u64 *gpu_addr)
+
+{
+ struct drm_gem_object *gem_obj;
+ int r;
+
+ gem_obj = amdgpu_gem_prime_import(&adev->ddev, dma_buf);
+ *bo = gem_to_amdgpu_bo(gem_obj);
+ if (!(*bo)) {
+ dev_err(adev->dev, "failed to get valid isp user bo\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_bo_reserve(*bo, false);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to reserve isp user bo\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_pin(*bo, domain);
+ if (r) {
+ dev_err(adev->dev, "(%d) isp user bo pin failed\n", r);
+ goto error_unreserve;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(*bo)->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", *bo);
+ goto error_unpin;
+ }
+
+ if (!WARN_ON(!gpu_addr))
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo);
+
+ amdgpu_bo_unreserve(*bo);
+
+ return 0;
+
+error_unpin:
+ amdgpu_bo_unpin(*bo);
+error_unreserve:
+ amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unref(bo);
+
+ return r;
+}
+EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
@@ -423,6 +497,9 @@ error:
* @cpu_addr: pointer to where the BO's CPU memory space address was stored
*
* unmaps and unpin a BO for kernel internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
*/
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr)
@@ -447,6 +524,30 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (cpu_addr)
*cpu_addr = NULL;
}
+EXPORT_SYMBOL(amdgpu_bo_free_kernel);
+
+/**
+ * amdgpu_bo_free_isp_user - free BO for isp use
+ *
+ * @bo: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
+{
+ if (bo == NULL)
+ return;
+
+ if (amdgpu_bo_reserve(bo, true) == 0) {
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+ }
+ amdgpu_bo_unref(&bo);
+}
+EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
/* Validate bo size is bit bigger than the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
@@ -1150,7 +1251,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo;
@@ -1158,7 +1258,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = ttm_to_amdgpu_bo(bo);
- amdgpu_vm_bo_invalidate(adev, abo, evict);
+ amdgpu_vm_bo_move(abo, new_mem, evict);
amdgpu_bo_kunmap(abo);
@@ -1171,75 +1271,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
old_mem ? old_mem->mem_type : -1);
}
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats,
- unsigned int sz)
-{
- const unsigned int domain_to_pl[] = {
- [ilog2(AMDGPU_GEM_DOMAIN_CPU)] = TTM_PL_SYSTEM,
- [ilog2(AMDGPU_GEM_DOMAIN_GTT)] = TTM_PL_TT,
- [ilog2(AMDGPU_GEM_DOMAIN_VRAM)] = TTM_PL_VRAM,
- [ilog2(AMDGPU_GEM_DOMAIN_GDS)] = AMDGPU_PL_GDS,
- [ilog2(AMDGPU_GEM_DOMAIN_GWS)] = AMDGPU_PL_GWS,
- [ilog2(AMDGPU_GEM_DOMAIN_OA)] = AMDGPU_PL_OA,
- [ilog2(AMDGPU_GEM_DOMAIN_DOORBELL)] = AMDGPU_PL_DOORBELL,
- };
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_resource *res = bo->tbo.resource;
- struct drm_gem_object *obj = &bo->tbo.base;
- uint64_t size = amdgpu_bo_size(bo);
- unsigned int type;
-
- if (!res) {
- /*
- * If no backing store use one of the preferred domain for basic
- * stats. We take the MSB since that should give a reasonable
- * view.
- */
- BUILD_BUG_ON(TTM_PL_VRAM < TTM_PL_TT ||
- TTM_PL_VRAM < TTM_PL_SYSTEM);
- type = fls(bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK);
- if (!type)
- return;
- type--;
- if (drm_WARN_ON_ONCE(&adev->ddev,
- type >= ARRAY_SIZE(domain_to_pl)))
- return;
- type = domain_to_pl[type];
- } else {
- type = res->mem_type;
- }
-
- if (drm_WARN_ON_ONCE(&adev->ddev, type >= sz))
- return;
-
- /* DRM stats common fields: */
-
- if (drm_gem_object_is_shared_for_memory_stats(obj))
- stats[type].drm.shared += size;
- else
- stats[type].drm.private += size;
-
- if (res) {
- stats[type].drm.resident += size;
-
- if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP))
- stats[type].drm.active += size;
- else if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
- stats[type].drm.purgeable += size;
- }
-
- /* amdgpu specific stats: */
-
- if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
- stats[TTM_PL_VRAM].requested += size;
- if (type != TTM_PL_VRAM)
- stats[TTM_PL_VRAM].evicted += size;
- } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
- stats[TTM_PL_TT].requested += size;
- }
-}
-
/**
* amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
@@ -1455,6 +1486,45 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_mem_stats_placement - bo placement for memory accounting
+ * @bo: the buffer object we should look at
+ *
+ * BO can have multiple preferred placements, to avoid double counting we want
+ * to file it under a single placement for memory stats.
+ * Luckily, if we take the highest set bit in preferred_domains the result is
+ * quite sensible.
+ *
+ * Returns:
+ * Which of the placements should the BO be accounted under.
+ */
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
+{
+ uint32_t domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
+
+ if (!domain)
+ return TTM_PL_SYSTEM;
+
+ switch (rounddown_pow_of_two(domain)) {
+ case AMDGPU_GEM_DOMAIN_CPU:
+ return TTM_PL_SYSTEM;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ return TTM_PL_TT;
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ return TTM_PL_VRAM;
+ case AMDGPU_GEM_DOMAIN_GDS:
+ return AMDGPU_PL_GDS;
+ case AMDGPU_GEM_DOMAIN_GWS:
+ return AMDGPU_PL_GWS;
+ case AMDGPU_GEM_DOMAIN_OA:
+ return AMDGPU_PL_OA;
+ case AMDGPU_GEM_DOMAIN_DOORBELL:
+ return AMDGPU_PL_DOORBELL;
+ default:
+ return TTM_PL_SYSTEM;
+ }
+}
+
+/**
* amdgpu_bo_get_preferred_domain - get preferred domain
* @adev: amdgpu device object
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index be6769852ece..375448627f7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -260,6 +260,10 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dbuf, u32 domain,
+ struct amdgpu_bo **bo,
+ u64 *gpu_addr);
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr);
@@ -271,6 +275,7 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
struct amdgpu_bo_vm **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
@@ -300,9 +305,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats,
- unsigned int size);
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);
@@ -337,8 +340,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct drm_suballoc **sa_bo,
unsigned int size);
-void amdgpu_sa_bo_free(struct amdgpu_device *adev,
- struct drm_suballoc **sa_bo,
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo,
struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 448f9e742983..e5fc80ed06ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -208,6 +208,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
psp->boot_time_tmr = false;
fallthrough;
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = false;
@@ -359,6 +360,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
int i;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
return false;
@@ -870,6 +872,7 @@ static bool psp_skip_tmr(struct psp_context *psp)
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return true;
default:
@@ -2264,7 +2267,8 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return -EINVAL;
if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
- ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
return -EINVAL;
ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
@@ -2385,6 +2389,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
+ if ((is_psp_fw_valid(psp->spdm_drv)) &&
+ (psp->funcs->bootloader_load_spdm_drv != NULL)) {
+ ret = psp_bootloader_load_spdm_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load spdm_drv failed!\n");
+ return ret;
+ }
+ }
+
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@@ -3007,10 +3020,7 @@ static int psp_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
mutex_lock(&adev->firmware.mutex);
- /*
- * This sequence is just used on hw_init only once, no need on
- * resume.
- */
+
ret = amdgpu_ucode_init_bo(adev);
if (ret)
goto failed;
@@ -3135,6 +3145,10 @@ static int psp_resume(struct amdgpu_ip_block *ip_block)
mutex_lock(&adev->firmware.mutex);
+ ret = amdgpu_ucode_init_bo(adev);
+ if (ret)
+ goto failed;
+
ret = psp_hw_start(psp);
if (ret)
goto failed;
@@ -3289,7 +3303,8 @@ int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_asd.bin", chip_name);
if (err)
goto out;
@@ -3311,7 +3326,8 @@ int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_toc.bin", chip_name);
if (err)
goto out;
@@ -3407,6 +3423,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ipkeymgr_drv.start_addr = ucode_start_addr;
break;
+ case PSP_FW_TYPE_PSP_SPDM_DRV:
+ psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->spdm_drv.start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
@@ -3474,7 +3496,8 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3750,7 +3773,8 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
struct amdgpu_device *adev = psp->adev;
int err;
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
@@ -3785,14 +3809,16 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s_cap.bin", chip_name);
if (err) {
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
err = 0;
- goto out;
+ } else {
+ dev_err(adev->dev, "fail to initialize cap microcode\n");
}
- dev_err(adev->dev, "fail to initialize cap microcode\n");
+ goto out;
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
@@ -3849,13 +3875,13 @@ int psp_config_sq_perfmon(struct psp_context *psp,
return ret;
}
-static int psp_set_clockgating_state(void *handle,
+static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int psp_set_powergating_state(void *handle,
+static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3867,10 +3893,12 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_ip_block *ip_block;
uint32_t fw_ver;
int ret;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_info(adev->dev, "PSP block is not ready yet\n.");
return -EBUSY;
}
@@ -3899,8 +3927,10 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
struct amdgpu_bo *fw_buf_bo = NULL;
uint64_t fw_pri_mc_addr;
void *fw_pri_cpu_addr;
+ struct amdgpu_ip_block *ip_block;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_err(adev->dev, "PSP block is not ready yet.");
return -EBUSY;
}
@@ -3908,7 +3938,8 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
if (!drm_dev_enter(ddev, &idx))
return -ENODEV;
- ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
+ ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s", buf);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 567cb1f924ca..8d5acc415d38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -80,6 +80,7 @@ enum psp_bootloader_cmd {
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
+ PSP_BL__LOAD_SPDMDRV = 0x20000000,
};
enum psp_ring_type {
@@ -120,6 +121,7 @@ struct psp_funcs {
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp);
+ int (*bootloader_load_spdm_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -343,6 +345,7 @@ struct psp_context {
struct psp_bin_desc dbg_drv;
struct psp_bin_desc ras_drv;
struct psp_bin_desc ipkeymgr_drv;
+ struct psp_bin_desc spdm_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -434,6 +437,9 @@ struct amdgpu_psp_funcs {
#define psp_bootloader_load_ipkeymgr_drv(psp) \
((psp)->funcs->bootloader_load_ipkeymgr_drv ? \
(psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0)
+#define psp_bootloader_load_spdm_drv(psp) \
+ ((psp)->funcs->bootloader_load_spdm_drv ? \
+ (psp)->funcs->bootloader_load_spdm_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 4c9fa24dd972..f0924aa3f4e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -36,6 +36,7 @@
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include "nbio_v4_3.h"
+#include "nbif_v6_3_1.h"
#include "nbio_v7_9.h"
#include "atom.h"
#include "amdgpu_reset.h"
@@ -192,7 +193,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
- err_data.err_addr_cnt);
+ err_data.err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, NULL);
}
@@ -2015,6 +2016,7 @@ static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = true;
break;
@@ -2156,6 +2158,16 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
/* Fatal error events are handled on host side */
if (amdgpu_sriov_vf(adev))
return;
+ /**
+ * If the current interrupt is caused by a non-fatal RAS error, skip
+ * check for fatal error. For fatal errors, FED status of all devices
+ * in XGMI hive gets set when the first device gets fatal error
+ * interrupt. The error gets propagated to other devices as well, so
+ * make sure to ack the interrupt regardless of FED status.
+ */
+ if (!amdgpu_ras_get_fed_status(adev) &&
+ amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
+ return;
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
@@ -2185,6 +2197,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
if (ret)
return;
+ amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
/* both query_poison_status and handle_poison_consumption are optional,
* but at least one of them should be implemented if we need poison
* consumption handler
@@ -2717,40 +2730,203 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
+{
+ struct ta_ras_query_address_input addr_in;
+ uint32_t socket = 0;
+ int ret = 0;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.socket_id = socket;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ /* tell RAS TA the node instance is not used */
+ addr_in.ma.node_inst = TA_RAS_INV_NODE;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+
+ return ret;
+}
+
+static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
+{
+ struct ta_ras_query_address_input addr_in;
+ uint32_t die_id, socket = 0;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
+
+ /* although die id is gotten from PA in nps1 mode, the id is
+ * fitable for any nps mode
+ */
+ if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
+ die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
+ else
+ return -EINVAL;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ addr_in.ma.umc_inst = bps->mcumc_id;
+ addr_in.ma.node_inst = die_id;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+ else
+ return -EINVAL;
+}
+
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- struct eeprom_table_record *bps, int pages)
+ struct eeprom_table_record *bps, int pages, bool from_rom)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
+ struct ras_err_data err_data;
+ struct eeprom_table_record *err_rec;
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras_context.ras->eeprom_control;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i;
+ uint32_t i, j, loop_cnt = 1;
+ bool find_pages_per_pa = false;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
+ if (from_rom) {
+ err_data.err_addr =
+ kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ err_rec = err_data.err_addr;
+ loop_cnt = adev->umc.retire_unit;
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ }
+
mutex_lock(&con->recovery_lock);
data = con->eh_data;
- if (!data)
- goto out;
+ if (!data) {
+ /* Returning 0 as the absence of eh_data is acceptable */
+ goto free;
+ }
for (i = 0; i < pages; i++) {
- if (amdgpu_ras_check_bad_page_unlock(con,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- continue;
+ if (from_rom &&
+ control->rec_type == AMDGPU_RAS_EEPROM_REC_MCA) {
+ if (!find_pages_per_pa) {
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
+ if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
+ /* may use old RAS TA, use PA to find pages in
+ * one row
+ */
+ if (amdgpu_umc_pages_in_a_row(adev, &err_data,
+ bps[i].retired_page <<
+ AMDGPU_GPU_PAGE_SHIFT)) {
+ ret = -EINVAL;
+ goto free;
+ } else {
+ find_pages_per_pa = true;
+ }
+ } else {
+ /* unsupported cases */
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+ }
+ } else {
+ if (amdgpu_umc_pages_in_a_row(adev, &err_data,
+ bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+ } else {
+ if (from_rom && !find_pages_per_pa) {
+ if (bps[i].retired_page & UMC_CHANNEL_IDX_V2) {
+ /* bad page in any NPS mode in eeprom */
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ } else {
+ /* legacy bad page in eeprom, generated only in
+ * NPS1 mode
+ */
+ if (amdgpu_ras_mca2pa(adev, &bps[i], &err_data)) {
+ /* old RAS TA or ASICs which don't support to
+ * convert addrss via mca address
+ */
+ if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
+ find_pages_per_pa = true;
+ err_rec = &bps[i];
+ loop_cnt = 1;
+ } else {
+ /* non-nps1 mode, old RAS TA
+ * can't support it
+ */
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+ }
+ }
- if (!data->space_left &&
- amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
- ret = -ENOMEM;
- goto out;
+ if (!find_pages_per_pa)
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ err_rec = &bps[i];
+ }
}
- amdgpu_ras_reserve_page(adev, bps[i].retired_page);
+ for (j = 0; j < loop_cnt; j++) {
+ if (amdgpu_ras_check_bad_page_unlock(con,
+ err_rec[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
+ if (!data->space_left &&
+ amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
+ ret = -ENOMEM;
+ goto free;
+ }
- memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
- data->count++;
- data->space_left--;
+ amdgpu_ras_reserve_page(adev, err_rec[j].retired_page);
+
+ memcpy(&data->bps[data->count], &(err_rec[j]),
+ sizeof(struct eeprom_table_record));
+ data->count++;
+ data->space_left--;
+ }
}
+
+free:
+ if (from_rom)
+ kfree(err_data.err_addr);
out:
mutex_unlock(&con->recovery_lock);
@@ -2768,7 +2944,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count;
+ int save_count, unit_num, bad_page_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -2780,19 +2956,32 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- save_count = data->count - control->ras_num_recs;
+ bad_page_num = control->ras_num_bad_pages;
+ save_count = data->count - bad_page_num;
mutex_unlock(&con->recovery_lock);
+ unit_num = save_count / adev->umc.retire_unit;
if (new_cnt)
- *new_cnt = save_count / adev->umc.retire_unit;
+ *new_cnt = unit_num;
/* only new entries are saved */
if (save_count > 0) {
- if (amdgpu_ras_eeprom_append(control,
- &data->bps[control->ras_num_recs],
- save_count)) {
- dev_err(adev->dev, "Failed to save EEPROM table data!");
- return -EIO;
+ if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[control->ras_num_recs],
+ save_count)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ } else {
+ for (i = 0; i < unit_num; i++) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[bad_page_num + i * adev->umc.retire_unit],
+ 1)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ }
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
@@ -2821,11 +3010,32 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
return -ENOMEM;
ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
- if (ret)
+ if (ret) {
dev_err(adev->dev, "Failed to load EEPROM table records!");
- else
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
+ } else {
+ if (control->ras_num_recs > 1 &&
+ adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ if ((bps[0].address == bps[1].address) &&
+ (bps[0].mem_channel == bps[1].mem_channel))
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
+ else
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+ }
+
+ ret = amdgpu_ras_eeprom_check(control);
+ if (ret)
+ goto out;
+
+ /* HW not usable */
+ if (amdgpu_ras_is_rma(adev)) {
+ ret = -EHWPOISON;
+ goto out;
+ }
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ }
+
+out:
kfree(bps);
return ret;
}
@@ -3205,31 +3415,36 @@ static int amdgpu_ras_page_retirement_thread(void *param)
int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
int ret;
if (!con || amdgpu_sriov_vf(adev))
return 0;
- ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
-
+ control = &con->eeprom_control;
+ ret = amdgpu_ras_eeprom_init(control);
if (ret)
return ret;
- /* HW not usable */
- if (amdgpu_ras_is_rma(adev))
- return -EHWPOISON;
+ if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
+
+ /* default status is MCA storage */
+ if (control->ras_num_recs <= 1 &&
+ adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
- if (con->eeprom_control.ras_num_recs) {
+ if (control->ras_num_recs) {
ret = amdgpu_ras_load_bad_pages(adev);
if (ret)
return ret;
amdgpu_dpm_send_hbm_bad_pages_num(
- adev, con->eeprom_control.ras_num_recs);
+ adev, control->ras_num_bad_pages);
if (con->update_channel_flag == true) {
amdgpu_dpm_send_hbm_bad_channel_flag(
- adev, con->eeprom_control.bad_channel_bitmap);
+ adev, control->bad_channel_bitmap);
con->update_channel_flag = false;
}
}
@@ -3366,6 +3581,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return true;
default:
@@ -3378,7 +3594,9 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(14, 0, 3):
return true;
default:
return false;
@@ -3629,6 +3847,7 @@ static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
break;
@@ -3704,7 +3923,19 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
* check DF RAS */
adev->nbio.ras = &nbio_v4_3_ras;
break;
+ case IP_VERSION(6, 3, 1):
+ if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
+ /* unlike other generation of nbio ras,
+ * nbif v6_3_1 only support fatal error interrupt
+ * to inform software that DF is freezed due to
+ * system fatal error event. driver should not
+ * enable nbio ras in such case. Instead,
+ * check DF RAS
+ */
+ adev->nbio.ras = &nbif_v6_3_1_ras;
+ break;
case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
if (!adev->gmc.is_app_apu)
adev->nbio.ras = &nbio_v7_9_ras;
break;
@@ -4083,7 +4314,7 @@ bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
if (!ras)
return false;
- return atomic_read(&ras->fed);
+ return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
}
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
@@ -4091,8 +4322,48 @@ void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
struct amdgpu_ras *ras;
ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (status)
+ set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ else
+ clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ }
+}
+
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras)
+ ras->ras_err_state = 0;
+}
+
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
if (ras)
- atomic_set(&ras->fed, !!status);
+ set_bit(block, &ras->ras_err_state);
+}
+
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (block == AMDGPU_RAS_BLOCK__ANY)
+ return (ras->ras_err_state != 0);
+ else
+ return test_bit(block, &ras->ras_err_state) ||
+ test_bit(AMDGPU_RAS_BLOCK__LAST,
+ &ras->ras_err_state);
+ }
+
+ return false;
}
static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6db772ecfee4..82db986c36a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -99,7 +99,8 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__IH,
AMDGPU_RAS_BLOCK__MPIO,
- AMDGPU_RAS_BLOCK__LAST
+ AMDGPU_RAS_BLOCK__LAST,
+ AMDGPU_RAS_BLOCK__ANY = -1
};
enum amdgpu_ras_mca_block {
@@ -482,6 +483,8 @@ struct ras_ecc_err {
uint64_t ipid;
uint64_t addr;
uint64_t pa_pfn;
+ /* save global channel index across all UMC instances */
+ uint32_t channel_idx;
struct ras_err_pages err_pages;
};
@@ -558,8 +561,8 @@ struct amdgpu_ras {
struct ras_ecc_log_info umc_ecc_log;
struct delayed_work page_retirement_dwork;
- /* Fatal error detected flag */
- atomic_t fed;
+ /* ras errors detected */
+ unsigned long ras_err_state;
/* RAS event manager */
struct ras_event_manager __event_mgr;
@@ -750,7 +753,7 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- struct eeprom_table_record *bps, int pages);
+ struct eeprom_table_record *bps, int pages, bool from_rom);
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
unsigned long *new_cnt);
@@ -952,6 +955,10 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev);
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block);
u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type);
int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index f28f6b4ba765..52c16bfeccaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -470,9 +470,10 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
res = __write_table_ras_info(control);
control->ras_num_recs = 0;
+ control->ras_num_bad_pages = 0;
control->ras_fri = 0;
- amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
control->bad_channel_bitmap = 0;
amdgpu_dpm_send_hbm_bad_channel_flag(adev, control->bad_channel_bitmap);
@@ -559,7 +560,7 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
if (amdgpu_bad_page_threshold == -1) {
dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
- con->eeprom_control.ras_num_recs, con->bad_page_cnt_threshold);
+ con->eeprom_control.ras_num_bad_pages, con->bad_page_cnt_threshold);
dev_warn(adev->dev,
"But GPU can be operated due to bad_page_threshold = -1.\n");
return false;
@@ -621,6 +622,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(to_amdgpu_device(control));
+ struct amdgpu_device *adev = to_amdgpu_device(control);
u32 a, b, i;
u8 *buf, *pp;
int res;
@@ -723,6 +725,12 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
control->ras_num_recs = 1 + (control->ras_max_record_count + b
- control->ras_fri)
% control->ras_max_record_count;
+
+ if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
+ control->ras_num_bad_pages = control->ras_num_recs;
+ else
+ control->ras_num_bad_pages =
+ control->ras_num_recs * adev->umc.retire_unit;
Out:
kfree(buf);
return res;
@@ -740,10 +748,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
/* Modify the header if it exceeds.
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->ras_num_recs >= ras->bad_page_cnt_threshold) {
+ control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) {
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) {
control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
@@ -798,9 +806,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
*/
if (amdgpu_bad_page_threshold != 0 &&
control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
- control->ras_num_recs < ras->bad_page_cnt_threshold)
+ control->ras_num_bad_pages < ras->bad_page_cnt_threshold)
control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
- control->ras_num_recs) * 100) /
+ control->ras_num_bad_pages) * 100) /
ras->bad_page_cnt_threshold;
/* Recalc the checksum.
@@ -841,7 +849,7 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
+ int res, i;
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -855,6 +863,10 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
return -EINVAL;
}
+ /* set the new channel index flag */
+ for (i = 0; i < num; i++)
+ record[i].retired_page |= UMC_CHANNEL_IDX_V2;
+
mutex_lock(&control->ras_tbl_mutex);
res = amdgpu_ras_eeprom_append_table(control, record, num);
@@ -864,6 +876,11 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
amdgpu_ras_debugfs_set_ret_size(control);
mutex_unlock(&control->ras_tbl_mutex);
+
+ /* clear channel index flag, the flag is only saved on eeprom */
+ for (i = 0; i < num; i++)
+ record[i].retired_page &= ~UMC_CHANNEL_IDX_V2;
+
return res;
}
@@ -1373,9 +1390,35 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
}
control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ return 0;
+}
+
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.ras_eeprom_i2c_bus || !adev->pm.ras_eeprom_i2c_bus->algo)
+ return -ENOENT;
+
+ if (!__get_eeprom_i2c_addr(adev, control))
+ return -EINVAL;
+
+ if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
+ control->ras_num_bad_pages = control->ras_num_recs;
+ else
+ control->ras_num_bad_pages =
+ control->ras_num_recs * adev->umc.retire_unit;
+
if (hdr->header == RAS_TABLE_HDR_VAL) {
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->ras_num_recs);
+ control->ras_num_bad_pages);
if (hdr->version == RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
@@ -1390,9 +1433,9 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
/* Warn if we are at 90% of the threshold or above
*/
- if (10 * control->ras_num_recs >= 9 * ras->bad_page_cnt_threshold)
+ if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold)
dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
- control->ras_num_recs,
+ control->ras_num_bad_pages,
ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
@@ -1403,10 +1446,12 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
}
res = __verify_ras_table_checksum(control);
- if (res)
- DRM_ERROR("RAS Table incorrect checksum or error:%d\n",
+ if (res) {
+ dev_err(adev->dev, "RAS Table incorrect checksum or error:%d\n",
res);
- if (ras->bad_page_cnt_threshold > control->ras_num_recs) {
+ return -EINVAL;
+ }
+ if (ras->bad_page_cnt_threshold > control->ras_num_bad_pages) {
/* This means that, the threshold was increased since
* the last time the system was booted, and now,
* ras->bad_page_cnt_threshold - control->num_recs > 0,
@@ -1416,13 +1461,13 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
dev_info(adev->dev,
"records:%d threshold:%d, resetting "
"RAS table header signature",
- control->ras_num_recs,
+ control->ras_num_bad_pages,
ras->bad_page_cnt_threshold);
res = amdgpu_ras_eeprom_correct_header_tag(control,
RAS_TABLE_HDR_VAL);
} else {
dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
if (amdgpu_bad_page_threshold == -1) {
dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
res = 0;
@@ -1431,7 +1476,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
dev_err(adev->dev,
"RAS records:%d exceed threshold:%d, "
"GPU will not be initialized. Replace this GPU or increase the threshold",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
}
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index b9ebda577797..81d55cb7b397 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -43,6 +43,19 @@ enum amdgpu_ras_eeprom_err_type {
AMDGPU_RAS_EEPROM_ERR_COUNT,
};
+/*
+ * one UMC MCA address could map to multiply physical address (PA),
+ * such as 1:16, we use eeprom_table_record.address to store MCA
+ * address and use eeprom_table_record.retired_page to save PA.
+ *
+ * AMDGPU_RAS_EEPROM_REC_PA: one record store one PA
+ * AMDGPU_RAS_EEPROM_REC_MCA: one record store one MCA address
+ */
+enum amdgpu_ras_eeprom_rec_type {
+ AMDGPU_RAS_EEPROM_REC_PA,
+ AMDGPU_RAS_EEPROM_REC_MCA,
+};
+
struct amdgpu_ras_eeprom_table_header {
uint32_t header;
uint32_t version;
@@ -82,6 +95,11 @@ struct amdgpu_ras_eeprom_control {
*/
u32 ras_num_recs;
+ /* the bad page number is ras_num_recs or
+ * ras_num_recs * umc.retire_unit
+ */
+ u32 ras_num_bad_pages;
+
/* First record index to read, 0-based.
* Range is [0, num_recs-1]. This is
* an absolute index, starting right after
@@ -102,6 +120,7 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
+ enum amdgpu_ras_eeprom_rec_type rec_type;
};
/*
@@ -145,6 +164,8 @@ uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *co
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index a0acb65f4b40..dabfbdf6f1ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -183,6 +183,7 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_init(adev);
break;
@@ -206,6 +207,7 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_fini(adev);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 36fc9578c53c..dee5a1b4e572 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -462,8 +462,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size,
enum amdgpu_ib_pool_type pool,
struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f);
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, struct amdgpu_job *job,
struct dma_fence **f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 10df731998b2..39070b2a4c04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -93,8 +93,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
return 0;
}
-void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo,
- struct dma_fence *fence)
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo, struct dma_fence *fence)
{
if (sa_bo == NULL || *sa_bo == NULL) {
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 113f0d242618..174badca27e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -219,9 +219,11 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
if (instance == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s%d.bin", ucode_prefix, instance);
if (err)
goto out;
@@ -261,6 +263,8 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 2) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 4, 4) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 5)) &&
adev->firmware.load_type ==
AMDGPU_FW_LOAD_PSP &&
@@ -358,13 +362,13 @@ static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
if (!adev)
return -ENODEV;
- mask = (1 << adev->sdma.num_instances) - 1;
+ mask = BIT_ULL(adev->sdma.num_instances) - 1;
if ((val & mask) == 0)
return -EINVAL;
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
- if (val & (1 << i))
+ if (val & BIT_ULL(i))
ring->sched.ready = true;
else
ring->sched.ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 2db58b5812a8..5f60736051d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -107,6 +107,7 @@ struct amdgpu_sdma {
struct amdgpu_irq_src doorbell_invalid_irq;
struct amdgpu_irq_src pool_timeout_irq;
struct amdgpu_irq_src srbm_write_irq;
+ struct amdgpu_irq_src ctxt_empty_irq;
int num_instances;
uint32_t sdma_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c8180cad0abd..262bd010a283 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -309,7 +309,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
mutex_lock(&adev->mman.gtt_window_lock);
while (src_mm.remaining) {
uint64_t from, to, cur_size, tiling_flags;
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_compress_disable;
struct dma_fence *next;
/* Never copy more than 256MiB at once to avoid a timeout */
@@ -340,9 +340,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
+ write_compress_disable =
+ AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
- AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
+ AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
+ AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
+ write_compress_disable));
}
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
@@ -1762,7 +1766,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
if (!adev->bios &&
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
reserve_size = max(reserve_size, (uint32_t)280 << 20);
else if (!reserve_size)
reserve_size = DISCOVERY_TMR_OFFSET;
@@ -2065,6 +2070,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
@@ -2275,7 +2281,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor cursor;
u64 addr;
- int r;
+ int r = 0;
if (!adev->mman.buffer_funcs_enabled)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 2852a6064c9a..208b7d1d8a27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -26,15 +26,15 @@
#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
+#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
-#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
-#define __AMDGPU_PL_LAST (TTM_PL_PRIV + 4)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -119,6 +119,8 @@ struct amdgpu_copy_mem {
#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT 14
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK 0x1
#define AMDGPU_COPY_FLAGS_SET(field, value) \
(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 4c7b53648a50..cf700824b960 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -1434,6 +1434,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
*
* @adev: amdgpu device
* @fw: pointer to load firmware to
+ * @required: whether the firmware is required
* @fmt: firmware name format string
* @...: variable arguments
*
@@ -1442,7 +1443,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
* the error code to -ENODEV, so that early_init functions will fail to load.
*/
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fmt, ...)
+ enum amdgpu_ucode_required required, const char *fmt, ...)
{
char fname[AMDGPU_UCODE_NAME_MAX];
va_list ap;
@@ -1456,16 +1457,24 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
return -EOVERFLOW;
}
- r = request_firmware(fw, fname, adev->dev);
+ if (required == AMDGPU_UCODE_REQUIRED)
+ r = request_firmware(fw, fname, adev->dev);
+ else {
+ r = firmware_request_nowarn(fw, fname, adev->dev);
+ if (r)
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fname);
+ }
if (r)
return -ENODEV;
r = amdgpu_ucode_validate(*fw);
- if (r) {
+ if (r)
+ /*
+ * The amdgpu_ucode_request() should be paired with amdgpu_ucode_release()
+ * regardless of success/failure, and the amdgpu_ucode_release() takes care of
+ * firmware release and need to avoid redundant release FW operation here.
+ */
dev_dbg(adev->dev, "\"%s\" failed to validate\n", fname);
- release_firmware(*fw);
- *fw = NULL;
- }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4150ec0aa10d..4eedd92f000b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -126,6 +126,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_DBG_DRV,
PSP_FW_TYPE_PSP_RAS_DRV,
PSP_FW_TYPE_PSP_IPKEYMGR_DRV,
+ PSP_FW_TYPE_PSP_SPDM_DRV,
PSP_FW_TYPE_MAX_INDEX,
};
@@ -551,6 +552,11 @@ enum amdgpu_firmware_load_type {
AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO,
};
+enum amdgpu_ucode_required {
+ AMDGPU_UCODE_OPTIONAL,
+ AMDGPU_UCODE_REQUIRED,
+};
+
/* conform to smu_ucode_xfer_cz.h */
#define AMDGPU_SDMA0_UCODE_LOADED 0x00000001
#define AMDGPU_SDMA1_UCODE_LOADED 0x00000002
@@ -604,9 +610,9 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
-__printf(3, 4)
+__printf(4, 5)
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fmt, ...);
+ enum amdgpu_ucode_required required, const char *fmt, ...);
void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 896f3609b0ee..eafe20d8fe0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -78,7 +78,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
- err_data.err_addr_cnt);
+ err_data.err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, NULL);
}
@@ -166,10 +166,11 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt);
+ err_data->err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, &err_count);
- amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev,
+ con->eeprom_control.ras_num_bad_pages);
if (con->update_channel_flag == true) {
amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
@@ -444,3 +445,77 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr)
+{
+ struct ta_ras_query_address_output addr_out;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ addr_out.pa.pa = pa_addr;
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data, NULL,
+ &addr_out, false);
+ else
+ return -EINVAL;
+}
+
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len)
+{
+ int i, ret;
+ struct ras_err_data err_data;
+
+ err_data.err_addr = kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n");
+ return 0;
+ }
+
+ ret = amdgpu_umc_pages_in_a_row(adev, &err_data, pa_addr);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ if (i >= len)
+ goto out;
+
+ pfns[i] = err_data.err_addr[i].retired_page;
+ }
+ ret = i;
+
+out:
+ kfree(err_data.err_addr);
+ return ret;
+}
+
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr)
+{
+ struct ta_ras_query_address_input addr_in;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch;
+ addr_in.ma.umc_inst = umc;
+ addr_in.ma.node_inst = node;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ ret = adev->umc.ras->convert_ras_err_addr(adev, NULL, &addr_in,
+ addr_out, dump_addr);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index ce4179db2a6d..a4a7e61817aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -54,6 +54,22 @@
/* Page retirement tag */
#define UMC_ECC_NEW_DETECTED_TAG 0x1
+/*
+ * a flag to indicate v2 of channel index stored in eeprom
+ *
+ * v1 (legacy way): store channel index within a umc instance in eeprom
+ * range in UMC v12: 0 ~ 7
+ * v2: store global channel index in eeprom
+ * range in UMC v12: 0 ~ 127
+ *
+ * NOTE: it's better to store it in eeprom_table_record.mem_channel,
+ * but there is only 8 bits in mem_channel, and the channel number may
+ * increase in the future, we decide to save it in
+ * eeprom_table_record.retired_page. retired_page is useless in v2,
+ * we depend on eeprom_table_record.address instead of retired_page in v2.
+ * Only 48 bits are saved on eeprom, use bit 47 here.
+ */
+#define UMC_CHANNEL_IDX_V2 BIT_ULL(47)
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -70,6 +86,13 @@ struct amdgpu_umc_ras {
enum amdgpu_mca_error_type type, void *ras_error_status);
int (*update_ecc_status)(struct amdgpu_device *adev,
uint64_t status, uint64_t ipid, uint64_t addr);
+ int (*convert_ras_err_addr)(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out,
+ bool dump_addr);
+ uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
+ uint64_t mca_addr, uint64_t retired_page);
};
struct amdgpu_umc_funcs {
@@ -134,4 +157,12 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
void *ras_error_status);
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr);
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len);
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index bd2d3863c3ed..dde15c6a96e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -587,7 +587,8 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
break;
}
- r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (r) {
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 65bb26215e86..74758b5ffc6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,7 +260,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->uvd.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 599d3ca4e0ef..b9060bcd4806 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,7 +158,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->vce.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
@@ -503,7 +503,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
- amdgpu_ib_free(ring->adev, &ib_msg, f);
+ amdgpu_ib_free(&ib_msg, f);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 3e94c3ba1ba2..83faf6e6788a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2024 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -62,6 +62,7 @@
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
+#define FIRMWARE_VCN5_0_1 "amdgpu/vcn_5_0_1.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -88,6 +89,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
+MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -99,11 +101,15 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
- r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s_%d.bin", ucode_prefix, i);
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_%d.bin", ucode_prefix, i);
else
- r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (r) {
- amdgpu_ucode_release(&adev->vcn.fw[i]);
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
return r;
}
}
@@ -151,7 +157,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
adev->vcn.using_unified_queue =
amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
- hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[0].fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -270,7 +276,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
- amdgpu_ucode_release(&adev->vcn.fw[j]);
+ amdgpu_ucode_release(&adev->vcn.inst[j].fw);
}
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
@@ -282,7 +288,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
- int vcn_config = adev->vcn.vcn_config[vcn_instance];
+ int vcn_config = adev->vcn.inst[vcn_instance].vcn_config;
if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
ret = true;
@@ -362,12 +368,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
const struct common_firmware_header *hdr;
unsigned int offset;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->vcn.inst[i].cpu_addr,
- adev->vcn.fw[i]->data + offset,
+ adev->vcn.inst[i].fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
@@ -580,7 +586,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -591,7 +597,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
return r;
}
@@ -773,7 +779,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -784,7 +790,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
return r;
}
@@ -1014,7 +1020,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
error:
- amdgpu_ib_free(adev, &ib, fence);
+ amdgpu_ib_free(&ib, fence);
dma_fence_put(fence);
return r;
@@ -1025,7 +1031,8 @@ int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
long r;
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) &&
+ (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) {
r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
if (r)
goto error;
@@ -1063,7 +1070,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
/* currently only support 2 FW instances */
if (i >= 2) {
dev_info(adev->dev, "More then 2 VCN FW instances!\n");
@@ -1071,12 +1078,14 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
}
idx = AMDGPU_UCODE_ID_VCN + i;
adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
+ adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(4, 0, 3))
+ IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, UVD_HWIP, 0) ==
+ IP_VERSION(5, 0, 1))
break;
}
}
@@ -1320,3 +1329,71 @@ void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
}
}
+
+/*
+ * debugfs to enable/disable vcn job submission to specific core or
+ * instance. It is created only if the queue type is unified.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->vcn.num_vcn_inst) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (val & (1ULL << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops,
+ amdgpu_debugfs_vcn_sched_mask_get,
+ amdgpu_debugfs_vcn_sched_mask_set, "%llx\n");
+#endif
+
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.using_unified_queue)
+ return;
+ sprintf(name, "amdgpu_vcn_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_vcn_sched_mask_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 1e32311c1dff..adaf4388ad28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -163,20 +163,30 @@
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
uint32_t internal_reg_offset, addr; \
- bool video_range, aon_range; \
+ bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
addr <<= 2; \
video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \
+ video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS + 0x2600))))); \
aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \
+ aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS + 0x600))))); \
if (video_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \
(VCN_VID_IP_ADDRESS)); \
else if (aon_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \
(VCN_AON_IP_ADDRESS)); \
+ else if (video1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS) + \
+ (VCN_VID_IP_ADDRESS)); \
+ else if (aon1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS) + \
+ (VCN_AON_IP_ADDRESS)); \
else \
internal_reg_offset = (0xFFFFF & addr); \
\
@@ -297,6 +307,9 @@ struct amdgpu_vcn_inst {
atomic_t dpg_enc_submission_cnt;
struct amdgpu_vcn_fw_shared fw_shared;
uint8_t aid_id;
+ const struct firmware *fw; /* VCN firmware */
+ uint8_t vcn_config;
+ uint32_t vcn_codec_disable_mask;
};
struct amdgpu_vcn_ras {
@@ -306,15 +319,12 @@ struct amdgpu_vcn_ras {
struct amdgpu_vcn {
unsigned fw_version;
struct delayed_work idle_work;
- const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
bool indirect_sram;
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
- uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES];
- uint32_t vcn_codec_disable_mask[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
struct mutex vcn_pg_lock;
struct mutex vcn1_jpeg1_workaround;
@@ -523,5 +533,6 @@ int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev);
int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index c704e9803e11..0af469ec6fcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -1263,12 +1263,10 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
return 0;
- tmp = kmalloc(used_size, GFP_KERNEL);
+ tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
- memcpy(tmp, &host_telemetry->body.error_count, used_size);
-
if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 8bf28d336807..7507d9443028 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -188,8 +188,8 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
- hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate;
+ hrtimer_setup(&amdgpu_crtc->vblank_timer, &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return ret;
}
@@ -632,13 +632,13 @@ static bool amdgpu_vkms_is_idle(void *handle)
return true;
}
-static int amdgpu_vkms_set_clockgating_state(void *handle,
+static int amdgpu_vkms_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int amdgpu_vkms_set_powergating_state(void *handle,
+static int amdgpu_vkms_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c9c48b782ec1..5c07777d3239 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
#include "amdgpu.h"
+#include "amdgpu_vm.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
@@ -311,6 +312,111 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
}
/**
+ * amdgpu_vm_update_shared - helper to update shared memory stat
+ * @base: base structure for tracking BO usage in a VM
+ *
+ * Takes the vm status_lock and updates the shared memory stat. If the basic
+ * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
+ * as well.
+ */
+static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
+{
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ uint64_t size = amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
+ bool shared;
+
+ spin_lock(&vm->status_lock);
+ shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ if (base->shared != shared) {
+ base->shared = shared;
+ if (shared) {
+ vm->stats[bo_memtype].drm.shared += size;
+ vm->stats[bo_memtype].drm.private -= size;
+ } else {
+ vm->stats[bo_memtype].drm.shared -= size;
+ vm->stats[bo_memtype].drm.private += size;
+ }
+ }
+ spin_unlock(&vm->status_lock);
+}
+
+/**
+ * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
+ * @bo: amdgpu buffer object
+ *
+ * Update the per VM stats for all the vm if needed from private to shared or
+ * vice versa.
+ */
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
+{
+ struct amdgpu_vm_bo_base *base;
+
+ for (base = bo->vm_bo; base; base = base->next)
+ amdgpu_vm_update_shared(base);
+}
+
+/**
+ * amdgpu_vm_update_stats_locked - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
+ *
+ * Caller need to have the vm status_lock held. Useful for when multiple update
+ * need to happen at the same time.
+ */
+static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
+{
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ int64_t size = sign * amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
+
+ /* For drm-total- and drm-shared-, BO are accounted by their preferred
+ * placement, see also amdgpu_bo_mem_stats_placement.
+ */
+ if (base->shared)
+ vm->stats[bo_memtype].drm.shared += size;
+ else
+ vm->stats[bo_memtype].drm.private += size;
+
+ if (res && res->mem_type < __AMDGPU_PL_NUM) {
+ uint32_t res_memtype = res->mem_type;
+
+ vm->stats[res_memtype].drm.resident += size;
+ /* BO only count as purgeable if it is resident,
+ * since otherwise there's nothing to purge.
+ */
+ if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
+ vm->stats[res_memtype].drm.purgeable += size;
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
+ vm->stats[bo_memtype].evicted += size;
+ }
+}
+
+/**
+ * amdgpu_vm_update_stats - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
+ *
+ * Updates the basic memory stat when bo is added/deleted/moved.
+ */
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
+{
+ struct amdgpu_vm *vm = base->vm;
+
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(base, res, sign);
+ spin_unlock(&vm->status_lock);
+}
+
+/**
* amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
*
* @base: base structure for tracking BO usage in a VM
@@ -333,6 +439,11 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
+ spin_lock(&vm->status_lock);
+ base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
+ spin_unlock(&vm->status_lock);
+
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@@ -1083,53 +1194,11 @@ error_free:
return r;
}
-static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
- struct amdgpu_mem_stats *stats,
- unsigned int size)
-{
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct amdgpu_bo *bo = bo_va->base.bo;
-
- if (!bo)
- return;
-
- /*
- * For now ignore BOs which are currently locked and potentially
- * changing their location.
- */
- if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
- !dma_resv_trylock(bo->tbo.base.resv))
- return;
-
- amdgpu_bo_get_memory(bo, stats, size);
- if (!amdgpu_vm_is_bo_always_valid(vm, bo))
- dma_resv_unlock(bo->tbo.base.resv);
-}
-
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats,
- unsigned int size)
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- struct amdgpu_bo_va *bo_va, *tmp;
-
spin_lock(&vm->status_lock);
- list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats, size);
+ memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
spin_unlock(&vm->status_lock);
}
@@ -2075,6 +2144,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
if (*base != &bo_va->base)
continue;
+ amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
*base = bo_va->base.next;
break;
}
@@ -2143,14 +2213,12 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
/**
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
- * @adev: amdgpu_device pointer
* @bo: amdgpu buffer object
* @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo, bool evicted)
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
{
struct amdgpu_vm_bo_base *bo_base;
@@ -2176,6 +2244,32 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_bo_move - handle BO move
+ *
+ * @bo: amdgpu buffer object
+ * @new_mem: the new placement of the BO move
+ * @evicted: is the BO evicted
+ *
+ * Update the memory stats for the new placement and mark @bo as invalid.
+ */
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted)
+{
+ struct amdgpu_vm_bo_base *bo_base;
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
+ amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
+ spin_unlock(&vm->status_lock);
+ }
+
+ amdgpu_vm_bo_invalidate(bo, evicted);
+}
+
+/**
* amdgpu_vm_get_block_size - calculate VM page table size as power of two
*
* @vm_size: VM size
@@ -2594,6 +2688,16 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->is_compute_context = false;
}
+static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
+{
+ for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
+ if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
+ vm->stats[i].evicted == 0))
+ return false;
+ }
+ return true;
+}
+
/**
* amdgpu_vm_fini - tear down a vm instance
*
@@ -2617,7 +2721,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- amdgpu_vm_put_task_info(vm->task_info);
amdgpu_vm_set_pasid(adev, vm, 0);
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
@@ -2666,6 +2769,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
+
+ if (!amdgpu_vm_stats_is_zero(vm)) {
+ struct amdgpu_task_info *ti = vm->task_info;
+
+ dev_warn(adev->dev,
+ "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
+ ti->process_name, ti->pid, ti->task_name, ti->tgid);
+ }
+
+ amdgpu_vm_put_task_info(vm->task_info);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 5d119ac26c4f..a3e128e373bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -35,6 +35,7 @@
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_ids.h"
+#include "amdgpu_ttm.h"
struct drm_exec;
@@ -202,9 +203,13 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
- /* protected by spinlock */
+ /* protected by vm status_lock */
struct list_head vm_status;
+ /* if the bo is counted as shared in mem stats
+ * protected by vm status_lock */
+ bool shared;
+
/* protected by the BO being reserved */
bool moved;
};
@@ -324,10 +329,7 @@ struct amdgpu_vm_fault_info {
struct amdgpu_mem_stats {
struct drm_memory_stats drm;
- /* buffers that requested this placement */
- uint64_t requested;
- /* buffers that requested this placement
- * but are currently evicted */
+ /* buffers that requested this placement but are currently evicted */
uint64_t evicted;
};
@@ -345,6 +347,9 @@ struct amdgpu_vm {
/* Lock to protect vm_bo add/del/move on all lists of vm */
spinlock_t status_lock;
+ /* Memory statistics for this vm, protected by status_lock */
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
@@ -524,8 +529,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo, bool evicted);
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted);
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *new_res, int sign);
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo);
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
@@ -576,8 +585,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats,
- unsigned int size);
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index f78a0434a48f..b0bf21682115 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -537,6 +537,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
if (!entry->bo)
return;
+ amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 110b120d7375..121ee17b522b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -236,7 +236,8 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
int ret;
amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
- ret = amdgpu_ucode_request(adev, &adev->vpe.fw, "amdgpu/%s.bin", fw_prefix);
+ ret = amdgpu_ucode_request(adev, &adev->vpe.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", fw_prefix);
if (ret)
goto out;
@@ -646,16 +647,16 @@ static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static int vpe_set_clockgating_state(void *handle,
+static int vpe_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int vpe_set_powergating_state(void *handle,
+static int vpe_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
if (!adev->pm.dpm_enabled)
@@ -833,7 +834,7 @@ static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 7d26a962f811..ff5e52025266 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -567,7 +567,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
else
remaining_size -= size;
}
- mutex_unlock(&mgr->lock);
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
@@ -584,6 +583,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
(u64)vres->base.size,
&vres->blocks);
}
+ mutex_unlock(&mgr->lock);
vres->base.start = 0;
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index e209b5e101df..23b6f7a4aa4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -427,7 +427,7 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
return;
sched = entity->entity.rq->sched;
- if (sched->ready) {
+ if (drm_sched_wqueue_ready(sched)) {
ring = to_amdgpu_ring(entity->entity.rq->sched);
atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index e2cb1f080e88..08d6787893b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -2161,13 +2161,13 @@ static int cik_common_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int cik_common_set_clockgating_state(void *handle,
+static int cik_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int cik_common_set_powergating_state(void *handle,
+static int cik_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 1da17755ad53..444563486769 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -402,13 +402,13 @@ static int cik_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int cik_ih_set_clockgating_state(void *handle,
+static int cik_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int cik_ih_set_powergating_state(void *handle,
+static int cik_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ede1a028d48d..d9bd8f3f17e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -133,9 +133,11 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
@@ -696,7 +698,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1189,11 +1191,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int cik_sdma_set_clockgating_state(void *handle,
+static int cik_sdma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -1204,7 +1206,7 @@ static int cik_sdma_set_clockgating_state(void *handle,
return 0;
}
-static int cik_sdma_set_powergating_state(void *handle,
+static int cik_sdma_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index d72973bd570d..82586b76aeda 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -398,14 +398,14 @@ static int cz_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int cz_ih_set_clockgating_state(void *handle,
+static int cz_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
// TODO
return 0;
}
-static int cz_ih_set_powergating_state(void *handle,
+static int cz_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
// TODO
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5098c50d54c8..c5e3d2251b18 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2687,6 +2687,32 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v10_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v10_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v10_0_panic_flush,
+};
+
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2734,6 +2760,7 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v10_0_drm_primary_plane_helper_funcs);
return 0;
}
@@ -3302,13 +3329,13 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v10_0_set_clockgating_state(void *handle,
+static int dce_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v10_0_set_powergating_state(void *handle,
+static int dce_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index c5680ff4ab9f..ea42a4472bf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2800,6 +2800,32 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v11_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v11_0_panic_flush,
+};
+
static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2847,6 +2873,7 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs);
return 0;
}
@@ -3434,13 +3461,13 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v11_0_set_clockgating_state(void *handle,
+static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v11_0_set_powergating_state(void *handle,
+static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index eb7de9122d99..915804a6a1d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2602,6 +2602,32 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v6_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_ARRAY_MODE(0x7);
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v6_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v6_0_panic_flush,
+};
+
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2629,6 +2655,7 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v6_0_drm_primary_plane_helper_funcs);
return 0;
}
@@ -3124,13 +3151,13 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
}
-static int dce_v6_0_set_clockgating_state(void *handle,
+static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v6_0_set_powergating_state(void *handle,
+static int dce_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 04b79ff87f75..f2edc0fece5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2613,6 +2613,31 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v8_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+}
+
+static const struct drm_plane_helper_funcs dce_v8_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v8_0_panic_flush,
+};
+
static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2640,6 +2665,7 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v8_0_drm_primary_plane_helper_funcs);
return 0;
}
@@ -3212,13 +3238,13 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
}
-static int dce_v8_0_set_clockgating_state(void *handle,
+static int dce_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v8_0_set_powergating_state(void *handle,
+static int dce_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 24dce803a829..5ba263fe5512 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -45,6 +45,7 @@
#include "clearstate_gfx10.h"
#include "v10_structs.h"
#include "gfx_v10_0.h"
+#include "gfx_v10_0_cleaner_shader.h"
#include "nbio_v2_3.h"
/*
@@ -3673,7 +3674,7 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid);
-static int gfx_v10_0_set_powergating_state(void *handle,
+static int gfx_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -4036,7 +4037,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -4138,18 +4139,21 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce%s.bin", ucode_prefix, wks);
if (err)
goto out;
@@ -4173,6 +4177,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec%s.bin", ucode_prefix, wks);
if (err)
goto out;
@@ -4180,6 +4185,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2%s.bin", ucode_prefix, wks);
if (!err) {
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
@@ -4733,6 +4739,23 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 64 &&
+ adev->gfx.pfp_fw_version >= 100 &&
+ adev->gfx.mec_fw_version >= 122) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -5952,7 +5975,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
else
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
- if (adev->job_hang && !enable)
+ if (amdgpu_in_reset(adev) && !enable)
return 0;
for (i = 0; i < adev->usec_timeout; i++) {
@@ -6599,17 +6622,13 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp | 0x80);
break;
default:
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
break;
}
}
@@ -7457,7 +7476,7 @@ static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
* otherwise the gfxoff disallowing will be failed to set.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1))
- gfx_v10_0_set_powergating_state(ip_block->adev, AMD_PG_STATE_UNGATE);
+ gfx_v10_0_set_powergating_state(ip_block, AMD_PG_STATE_UNGATE);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -8345,10 +8364,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
-static int gfx_v10_0_set_powergating_state(void *handle,
+static int gfx_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -8383,10 +8402,10 @@ static int gfx_v10_0_set_powergating_state(void *handle,
return 0;
}
-static int gfx_v10_0_set_clockgating_state(void *handle,
+static int gfx_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
new file mode 100644
index 000000000000..663c2572d440
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Define the cleaner shader gfx_10_3_0 */
+static const u32 gfx_10_3_0_cleaner_shader_hex[] = {
+ 0xb0804004, 0xbf8a0000,
+ 0xbe8203b8, 0xbefc0380,
+ 0x7e008480, 0x7e028480,
+ 0x7e048480, 0x7e068480,
+ 0x7e088480, 0x7e0a8480,
+ 0x7e0c8480, 0x7e0e8480,
+ 0xbefc0302, 0x80828802,
+ 0xbf84fff5, 0xbe8203ff,
+ 0x80000000, 0x87020002,
+ 0xbf840012, 0xbefe03c1,
+ 0xbeff03c1, 0xd7650001,
+ 0x0001007f, 0xd7660001,
+ 0x0002027e, 0x16020288,
+ 0xbe8203bf, 0xbefc03c1,
+ 0xd9382000, 0x00020201,
+ 0xd9386040, 0x00040401,
+ 0xd70f6a01, 0x000202ff,
+ 0x00000400, 0x80828102,
+ 0xbf84fff7, 0xbefc03ff,
+ 0x00000068, 0xbe803080,
+ 0xbe813080, 0xbe823080,
+ 0xbe833080, 0x80fc847c,
+ 0xbf84fffa, 0xbeea0480,
+ 0xbeec0480, 0xbeee0480,
+ 0xbef00480, 0xbef20480,
+ 0xbef40480, 0xbef60480,
+ 0xbef80480, 0xbefa0480,
+ 0xbf810000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm
new file mode 100644
index 000000000000..0e1c246166c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// GFX10.3 : Clear SGPRs, VGPRs and LDS
+// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot
+// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD
+// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS)
+// It takes 2 workgroups to use all of LDS: one on each CU of the WGP
+// Each wave clears SGPRs 0 - 107
+// Each wave clears VGPRs 0 - 63
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+
+
+shader main
+ asic(GFX10)
+ type(CS)
+ wave_size(32)
+// Note: original source code from SQ team
+
+//
+// Create 32 waves in a threadgroup (CS waves)
+// Each allocates 64 VGPRs
+// The workgroup allocates all of LDS (64kbytes)
+//
+// Takes about 2500 clocks to run.
+// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks)
+//
+ S_BARRIER
+ s_mov_b32 s2, 0x00000038 // Loop 64/8=8 times (loop unrolled for performance)
+ s_mov_b32 m0, 0
+ //
+ // CLEAR VGPRs
+ //
+label_0005:
+ v_movreld_b32 v0, 0
+ v_movreld_b32 v1, 0
+ v_movreld_b32 v2, 0
+ v_movreld_b32 v3, 0
+ v_movreld_b32 v4, 0
+ v_movreld_b32 v5, 0
+ v_movreld_b32 v6, 0
+ v_movreld_b32 v7, 0
+ s_mov_b32 m0, s2
+ s_sub_u32 s2, s2, 8
+ s_cbranch_scc0 label_0005
+ //
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+
+ //
+ // CLEAR SGPRs
+ //
+label_0023:
+ s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+
+ s_endpgm
+
+end
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 2ae058a224f4..56c06b72a70a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -615,7 +615,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
if (!ring->is_mes_queue)
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
if (!ring->is_mes_queue)
@@ -639,6 +639,7 @@ static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *
int err = 0;
err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_toc.bin", ucode_prefix);
if (err)
goto out;
@@ -688,6 +689,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", ucode_prefix);
if (err)
goto out;
@@ -705,6 +707,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", ucode_prefix);
if (err)
goto out;
@@ -720,9 +723,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
adev->pdev->revision == 0xCE)
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/gc_11_0_0_rlc_1.bin");
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
@@ -735,6 +740,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", ucode_prefix);
if (err)
goto out;
@@ -1885,6 +1891,7 @@ static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
{
+ u32 rb_bitmap_per_sa;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
@@ -1902,9 +1909,11 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
+
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
- active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
}
active_rb_bitmap &= global_active_rb_bitmap;
@@ -3918,9 +3927,7 @@ static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
@@ -5458,10 +5465,10 @@ static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
-static int gfx_v11_0_set_powergating_state(void *handle,
+static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -5494,10 +5501,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
return 0;
}
-static int gfx_v11_0_set_clockgating_state(void *handle,
+static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -6646,30 +6653,14 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, r = 0;
+ int r = 0;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
- WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
-
- /* make sure dequeue is complete*/
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
- break;
- udelay(1);
- }
- if (i >= adev->usec_timeout)
- r = -ETIMEDOUT;
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ dev_err(adev->dev, "reset via MMIO failed %d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index da327ab48a57..48ff00427882 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -513,7 +513,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
if (!ring->is_mes_queue)
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
if (!ring->is_mes_queue)
@@ -537,6 +537,7 @@ static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *
int err = 0;
err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_toc.bin", ucode_prefix);
if (err)
goto out;
@@ -566,6 +567,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", ucode_prefix);
if (err)
goto out;
@@ -573,6 +575,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", ucode_prefix);
if (err)
goto out;
@@ -581,6 +584,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
@@ -593,6 +597,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", ucode_prefix);
if (err)
goto out;
@@ -1347,6 +1352,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if (adev->gfx.me_fw_version >= 2480 &&
+ adev->gfx.pfp_fw_version >= 2530 &&
+ adev->gfx.mec_fw_version >= 2680 &&
+ adev->mes.fw_version[0] >= 100)
+ adev->gfx.enable_cleaner_shader = true;
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -1437,11 +1450,19 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->gfx.gfx_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if ((adev->gfx.me_fw_version >= 2660) &&
+ (adev->gfx.mec_fw_version >= 2920)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
+ }
if (!adev->enable_mes_kiq) {
r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0);
@@ -1610,6 +1631,7 @@ static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev)
static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
{
+ u32 rb_bitmap_per_sa;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
@@ -1627,12 +1649,14 @@ static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
+
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
- active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -2413,7 +2437,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
(void **)&adev->gfx.me.me_fw_data_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
- gfx_v12_0_pfp_fini(adev);
+ gfx_v12_0_me_fini(adev);
return r;
}
@@ -2832,9 +2856,7 @@ static void gfx_v12_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v12_0_cp_set_doorbell_range(struct amdgpu_device *adev)
@@ -3864,10 +3886,10 @@ static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
}
#endif
-static int gfx_v12_0_set_powergating_state(void *handle,
+static int gfx_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -3999,17 +4021,6 @@ static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
if (def != data)
WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
-
- data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
- data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
-
- /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
- if (adev->sdma.num_instances > 1) {
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
- }
}
}
@@ -4115,10 +4126,10 @@ static int gfx_v12_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v12_0_set_clockgating_state(void *handle,
+static int gfx_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -5233,24 +5244,16 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int r, i;
+ int r;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- mutex_lock(&adev->srbm_mutex);
- soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
- WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
- break;
- udelay(1);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
+ if (r) {
+ dev_err(adev->dev, "reset via MMIO failed %d\n", r);
+ return r;
}
- soc24_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
index bcc9c72ccbde..f7184b2dc4e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
@@ -26,4 +26,6 @@
extern const struct amdgpu_ip_block_version gfx_v12_0_ip_block;
+int gfx_v12_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 41f50bf380c4..f26e2cdec07a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -337,6 +337,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
@@ -345,6 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
@@ -353,6 +355,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
@@ -361,6 +364,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -1906,7 +1910,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
error:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
}
@@ -3373,11 +3377,11 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v6_0_set_clockgating_state(void *handle,
+static int gfx_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -3395,11 +3399,11 @@ static int gfx_v6_0_set_clockgating_state(void *handle,
return 0;
}
-static int gfx_v6_0_set_powergating_state(void *handle,
+static int gfx_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
gate = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 824d5913103b..84745b2453ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -934,33 +934,39 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
if (err)
goto out;
}
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
out:
if (err) {
@@ -2324,7 +2330,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
error:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
}
@@ -4846,11 +4852,11 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v7_0_set_clockgating_state(void *handle,
+static int gfx_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -4869,11 +4875,11 @@ static int gfx_v7_0_set_clockgating_state(void *handle,
return 0;
}
-static int gfx_v7_0_set_powergating_state(void *handle,
+static int gfx_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
gate = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b7006c41e270..6a025438f9d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -914,7 +914,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -982,13 +982,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_pfp_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
}
if (err)
@@ -999,13 +1002,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_me_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
}
if (err)
@@ -1017,13 +1023,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_ce_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
}
if (err)
@@ -1044,6 +1053,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->virt.chained_ib_support = false;
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -1093,13 +1103,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_mec_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
}
if (err)
@@ -1112,13 +1125,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
(adev->asic_type != CHIP_TOPAZ)) {
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_mec2_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
}
if (!err) {
@@ -1640,7 +1656,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
RREG32(sec_ded_counter_registers[i]);
fail:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
@@ -4304,9 +4320,7 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32(mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32(mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32(mmRLC_CP_SCHEDULERS, tmp);
+ WREG32(mmRLC_CP_SCHEDULERS, tmp | 0x80);
}
static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
@@ -5321,7 +5335,7 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
(adev->asic_type == CHIP_POLARIS12) ||
(adev->asic_type == CHIP_VEGAM))
/* Send msg to SMU via Powerplay */
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable, 0);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
@@ -5367,10 +5381,10 @@ static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
}
}
-static int gfx_v8_0_set_powergating_state(void *handle,
+static int gfx_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -5625,8 +5639,6 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t temp, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
@@ -5720,8 +5732,6 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5731,8 +5741,6 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
@@ -5813,12 +5821,12 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
}
gfx_v8_0_wait_for_rlc_serdes(adev);
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
* === MGCG + MGLS + TS(CG/LS) ===
@@ -5832,6 +5840,8 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
}
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5982,10 +5992,10 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v8_0_set_clockgating_state(void *handle,
+static int gfx_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 0b6f09f2cc9b..0dce4421418c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1243,7 +1243,7 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -1429,18 +1429,21 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
int err;
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
@@ -1476,6 +1479,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc_am4.bin", chip_name);
else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
(smu_version >= 0x41e2b))
@@ -1483,9 +1487,11 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
*/
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_kicker_rlc.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -1518,9 +1524,11 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
- "amdgpu/%s_sjt_mec.bin", chip_name);
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sjt_mec.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
@@ -1531,9 +1539,11 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sjt_mec2.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
if (!err) {
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
@@ -3488,9 +3498,7 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
@@ -4780,7 +4788,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
fail:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
@@ -4956,8 +4964,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -5022,8 +5028,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
@@ -5034,8 +5038,6 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (!adev->gfx.num_gfx_rings)
return;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* Enable 3D CGCG/CGLS */
if (enable) {
/* write cmd to clear cgcg/cgls ov */
@@ -5077,8 +5079,6 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5086,8 +5086,6 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t def, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
@@ -5129,13 +5127,12 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS
* === MGCG + MGLS ===
@@ -5155,6 +5152,7 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === MGCG + MGLS === */
gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
}
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5232,10 +5230,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
-static int gfx_v9_0_set_powergating_state(void *handle,
+static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
@@ -5277,10 +5275,10 @@ static int gfx_v9_0_set_powergating_state(void *handle,
return 0;
}
-static int gfx_v9_0_set_clockgating_state(void *handle,
+static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -7439,6 +7437,38 @@ static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
+static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ip_block *gfx_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+
+ amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
+
+ /* Raven and PCO APUs seem to have stability issues
+ * with compute and gfxoff and gfx pg. Disable gfx pg during
+ * submission and allow again afterwards.
+ */
+ if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
+ gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_UNGATE);
+}
+
+static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ip_block *gfx_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+
+ /* Raven and PCO APUs seem to have stability issues
+ * with compute and gfxoff and gfx pg. Disable gfx pg during
+ * submission and allow again afterwards.
+ */
+ if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
+ gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_GATE);
+
+ amdgpu_gfx_enforce_isolation_ring_end_use(ring);
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -7615,8 +7645,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_wave_limit = gfx_v9_0_emit_wave_limit,
.reset = gfx_v9_0_reset_kcq,
.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v9_0_ring_begin_use_compute,
+ .end_use = gfx_v9_0_ring_end_use_compute,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 3f4fd2f08163..d81449f9d822 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -412,7 +412,7 @@ static int gfx_v9_4_2_run_shader(struct amdgpu_device *adev,
r = amdgpu_ib_schedule(ring, 1, ib, NULL, fence_ptr);
if (r) {
dev_err(adev->dev, "ib submit failed (%d).\n", r);
- amdgpu_ib_free(adev, ib, NULL);
+ amdgpu_ib_free(ib, NULL);
}
return r;
}
@@ -611,16 +611,16 @@ static int gfx_v9_4_2_do_sgprs_init(struct amdgpu_device *adev)
}
disp2_failed:
- amdgpu_ib_free(adev, &disp_ibs[2], NULL);
+ amdgpu_ib_free(&disp_ibs[2], NULL);
dma_fence_put(fences[2]);
disp1_failed:
- amdgpu_ib_free(adev, &disp_ibs[1], NULL);
+ amdgpu_ib_free(&disp_ibs[1], NULL);
dma_fence_put(fences[1]);
disp0_failed:
- amdgpu_ib_free(adev, &disp_ibs[0], NULL);
+ amdgpu_ib_free(&disp_ibs[0], NULL);
dma_fence_put(fences[0]);
pro_end:
- amdgpu_ib_free(adev, &wb_ib, NULL);
+ amdgpu_ib_free(&wb_ib, NULL);
if (r)
dev_info(adev->dev, "Init SGPRS Failed\n");
@@ -687,10 +687,10 @@ static int gfx_v9_4_2_do_vgprs_init(struct amdgpu_device *adev)
}
disp_failed:
- amdgpu_ib_free(adev, &disp_ib, NULL);
+ amdgpu_ib_free(&disp_ib, NULL);
dma_fence_put(fence);
pro_end:
- amdgpu_ib_free(adev, &wb_ib, NULL);
+ amdgpu_ib_free(&wb_ib, NULL);
if (r)
dev_info(adev->dev, "Init VGPRS Failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 54459254bd37..2ba185875baa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -43,8 +43,10 @@
MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
@@ -54,10 +56,6 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
-#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
-#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
-#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
-
#define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */
#define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */
#define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */
@@ -351,13 +349,17 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
GOLDEN_GB_ADDR_CONFIG);
- /* Golden settings applied by driver for ASIC with rev_id 0 */
- if (adev->rev_id == 0) {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
- REDUCE_FIFO_DEPTH_BY_2, 2);
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
} else {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
- SPARE, 0x1);
+ /* Golden settings applied by driver for ASIC with rev_id 0 */
+ if (adev->rev_id == 0) {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
+ REDUCE_FIFO_DEPTH_BY_2, 2);
+ } else {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
+ SPARE, 0x1);
+ }
}
}
}
@@ -501,7 +503,7 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -545,6 +547,7 @@ static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -576,12 +579,19 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
{
int err;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
- "amdgpu/%s_sjt_mec.bin", chip_name);
- else
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sjt_mec.bin", chip_name);
+
+ if (err)
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mec.bin", chip_name);
+ } else
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
- "amdgpu/%s_mec.bin", chip_name);
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
@@ -935,6 +945,7 @@ static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1785,9 +1796,7 @@ static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
@@ -2770,16 +2779,16 @@ static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
};
-static int gfx_v9_4_3_set_powergating_state(void *handle,
+static int gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
}
-static int gfx_v9_4_3_set_clockgating_state(void *handle,
+static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, num_xcc;
if (amdgpu_sriov_vf(adev))
@@ -4659,7 +4668,6 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- amdgpu_gfx_off_ctrl(adev, false);
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
xcc_offset = xcc_id * reg_count;
for (i = 0; i < reg_count; i++)
@@ -4667,7 +4675,6 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
GET_INST(GC, xcc_id)));
}
- amdgpu_gfx_off_ctrl(adev, true);
/* dump compute queue registers for all instances */
if (!adev->gfx.ip_dump_compute_queues)
@@ -4676,7 +4683,6 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
- amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->srbm_mutex);
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
xcc_offset = xcc_id * reg_count * num_inst;
@@ -4703,7 +4709,6 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
}
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_off_ctrl(adev, true);
}
static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
@@ -4866,6 +4871,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
/* 9.4.3 removed all the GDS internal memory,
* only support GWS opcode in kernel, like barrier
* semaphore.etc */
@@ -4879,6 +4885,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
/* deprecated for 9.4.3, no usage at all */
adev->gds.gds_compute_max_wave_id = 0;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index ed8e130c7d19..5470cef7e9bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -368,7 +368,9 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
amdgpu_ip_version(adev, GC_HWIP, 0) ==
IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 4));
+ IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 5, 0));
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 697599c46240..9bedca9a79c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1088,11 +1088,11 @@ static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int gmc_v10_0_set_clockgating_state(void *handle,
+static int gmc_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
@@ -1131,7 +1131,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
athub_v2_0_get_clockgating(adev, flags);
}
-static int gmc_v10_0_set_powergating_state(void *handle,
+static int gmc_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index f893ab4c14df..72751ab4c766 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -996,11 +996,11 @@ static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int gmc_v11_0_set_clockgating_state(void *handle,
+static int gmc_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
@@ -1018,7 +1018,7 @@ static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
athub_v3_0_get_clockgating(adev, flags);
}
-static int gmc_v11_0_set_powergating_state(void *handle,
+static int gmc_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index d22b027fd0bb..c3c144a4f45e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -40,7 +40,7 @@
#include "gfxhub_v12_0.h"
#include "mmhub_v4_1_0.h"
#include "athub_v4_1_0.h"
-
+#include "umc_v8_14.h"
static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
@@ -501,9 +501,6 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
uint64_t *flags)
{
struct amdgpu_bo *bo = mapping->bo_va->base.bo;
- struct amdgpu_device *bo_adev;
- bool coherent, is_system;
-
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
@@ -519,25 +516,11 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (!bo)
- return;
-
- if (bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
- AMDGPU_GEM_CREATE_UNCACHED))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
-
- bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
- coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
- is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) ||
- (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
-
if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
*flags |= AMDGPU_PTE_DCC;
- /* WA for HW bug */
- if (is_system || ((bo_adev != adev) && coherent))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
-
+ if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
}
static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
@@ -581,6 +564,18 @@ static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev)
{
+ switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
+ case IP_VERSION(8, 14, 0):
+ adev->umc.channel_inst_num = UMC_V8_14_CHANNEL_INSTANCE_NUM;
+ adev->umc.umc_inst_num = UMC_V8_14_UMC_INSTANCE_NUM(adev);
+ adev->umc.node_inst_num = 0;
+ adev->umc.max_ras_err_cnt_per_query = UMC_V8_14_TOTAL_CHANNEL_NUM(adev);
+ adev->umc.channel_offs = UMC_V8_14_PER_CHANNEL_OFFSET;
+ adev->umc.ras = &umc_v8_14_ras;
+ break;
+ default:
+ break;
+ }
}
@@ -829,6 +824,10 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -980,11 +979,11 @@ static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int gmc_v12_0_set_clockgating_state(void *handle,
+static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
@@ -1002,7 +1001,7 @@ static void gmc_v12_0_get_clockgating_state(void *handle, u64 *flags)
athub_v4_1_0_get_clockgating(adev, flags);
}
-static int gmc_v12_0_set_powergating_state(void *handle,
+static int gmc_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index ca000b3d1afc..2245dda92021 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -131,7 +131,8 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
chip_name = "si58";
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
dev_err(adev->dev,
"si_mc: Failed to load firmware \"%s_mc.bin\"\n",
@@ -1094,13 +1095,13 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v6_0_set_clockgating_state(void *handle,
+static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int gmc_v6_0_set_powergating_state(void *handle,
+static int gmc_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index b6016f11956e..9aac4b1101e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -157,7 +157,8 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
pr_err("cik_mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
@@ -1317,11 +1318,11 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v7_0_set_clockgating_state(void *handle,
+static int gmc_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -1337,7 +1338,7 @@ static int gmc_v7_0_set_clockgating_state(void *handle,
return 0;
}
-static int gmc_v7_0_set_powergating_state(void *handle,
+static int gmc_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 12d5967ecd45..d06585207c33 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -259,7 +259,8 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
pr_err("mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
@@ -1658,10 +1659,10 @@ static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
}
}
-static int gmc_v8_0_set_clockgating_state(void *handle,
+static int gmc_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1679,7 +1680,7 @@ static int gmc_v8_0_set_clockgating_state(void *handle,
return 0;
}
-static int gmc_v8_0_set_powergating_state(void *handle,
+static int gmc_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 50c5da3020cb..291549765c38 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -623,6 +623,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
}
+ if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault))
+ return 1;
+
if (!printk_ratelimit())
return 0;
@@ -645,7 +648,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
soc15_ih_clientid_name[entry->client_id]);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -795,7 +799,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -1138,12 +1143,13 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
+ uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
bool snoop = false;
bool is_local;
dma_resv_assert_held(bo->tbo.base.resv);
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ switch (gc_ip_version) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
if (is_vram) {
@@ -1157,10 +1163,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
/* FIXME: is this still needed? Or does
* amdgpu_ttm_tt_pde_flags already handle this?
*/
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 3)) &&
+ if (gc_ip_version == IP_VERSION(9, 4, 2) &&
adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
@@ -1184,6 +1187,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
/* Only local VRAM BOs or system memory on non-NUMA APUs
* can be assumed to be local in their entirety. Choose
* MTYPE_NC as safe fallback for all system memory BOs on
@@ -1208,7 +1212,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
if (uncached) {
mtype = MTYPE_UC;
} else if (ext_coherent) {
- if (adev->rev_id)
+ if (gc_ip_version == IP_VERSION(9, 5, 0) || adev->rev_id)
mtype = is_local ? MTYPE_CC : MTYPE_UC;
else
mtype = MTYPE_UC;
@@ -1218,10 +1222,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
/* dGPU */
if (is_local)
mtype = mtype_local;
- else if (is_vram)
- mtype = MTYPE_NC;
- else
+ else if (gc_ip_version < IP_VERSION(9, 5, 0) && !is_vram)
mtype = MTYPE_UC;
+ else
+ mtype = MTYPE_NC;
}
break;
@@ -1275,7 +1279,8 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
* memory can use more efficient MTYPEs.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 5, 0))
return;
/* Only direct-mapped memory allows us to determine the NUMA node from
@@ -1540,6 +1545,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
adev->mmhub.ras = &mmhub_v1_7_ras;
break;
case IP_VERSION(1, 8, 0):
+ case IP_VERSION(1, 8, 1):
adev->mmhub.ras = &mmhub_v1_8_ras;
break;
default:
@@ -1551,7 +1557,8 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1619,7 +1626,8 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
@@ -1792,6 +1800,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
default:
adev->gmc.gart_size = 512ULL << 20;
break;
@@ -2070,7 +2079,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2154,6 +2164,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
NUM_XCC(adev->gfx.xcc_mask));
@@ -2220,7 +2231,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_gmc_get_vbios_allocations(adev);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
r = gmc_v9_0_init_mem_ranges(adev);
if (r)
return r;
@@ -2250,7 +2262,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) ?
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) ?
3 :
8;
@@ -2263,7 +2276,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
amdgpu_gmc_sysfs_init(adev);
return 0;
@@ -2274,7 +2288,8 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2544,10 +2559,10 @@ static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int gmc_v9_0_set_clockgating_state(void *handle,
+static int gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->set_clockgating(adev, state);
@@ -2565,7 +2580,7 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
athub_v1_0_get_clockgating(adev, flags);
}
-static int gmc_v9_0_set_powergating_state(void *handle,
+static int gmc_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 7f45e93c0397..8ac3d3282268 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -392,13 +392,13 @@ static int iceland_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int iceland_ih_set_clockgating_state(void *handle,
+static int iceland_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int iceland_ih_set_powergating_state(void *handle,
+static int iceland_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 38f953fd65d9..f8a485164437 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -693,10 +693,10 @@ static void ih_v6_0_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int ih_v6_0_set_clockgating_state(void *handle,
+static int ih_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v6_0_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -756,10 +756,10 @@ static void ih_v6_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
-static int ih_v6_0_set_powergating_state(void *handle,
+static int ih_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 61381e0c3795..dd0042efceec 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -674,10 +674,10 @@ static void ih_v6_1_update_clockgating_state(struct amdgpu_device *adev,
return;
}
-static int ih_v6_1_set_clockgating_state(void *handle,
+static int ih_v6_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v6_1_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -737,10 +737,10 @@ static void ih_v6_1_update_ih_mem_power_gating(struct amdgpu_device *adev,
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
-static int ih_v6_1_set_powergating_state(void *handle,
+static int ih_v6_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index d2428cf5d385..8f9b15c171f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -664,10 +664,10 @@ static void ih_v7_0_update_clockgating_state(struct amdgpu_device *adev,
return;
}
-static int ih_v7_0_set_clockgating_state(void *handle,
+static int ih_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v7_0_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -727,10 +727,10 @@ static void ih_v7_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
-static int ih_v7_0_set_powergating_state(void *handle,
+static int ih_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index d4f72e47ae9e..aeca5c08ea2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -50,7 +50,8 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
index 1341f0292031..df898dbb746e 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
@@ -47,7 +47,8 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 6e29b69894a5..7c9251c03815 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -35,7 +35,7 @@
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v2_0_set_powergating_state(void *handle,
+static int jpeg_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
/**
@@ -154,7 +154,7 @@ static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
- jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v2_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -675,14 +675,14 @@ static int jpeg_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int jpeg_v2_0_set_clockgating_state(void *handle,
+static int jpeg_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
- if (!jpeg_v2_0_is_idle(handle))
+ if (!jpeg_v2_0_is_idle(adev))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {
@@ -692,10 +692,10 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v2_0_set_powergating_state(void *handle,
+static int jpeg_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->jpeg.cur_state)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 9ac421486f05..11f6af2646e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -38,7 +38,7 @@
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v2_5_set_powergating_state(void *handle,
+static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev);
@@ -219,7 +219,7 @@ static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
- jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
@@ -518,10 +518,10 @@ static int jpeg_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int jpeg_v2_5_set_clockgating_state(void *handle,
+static int jpeg_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
int i;
@@ -530,7 +530,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
continue;
if (enable) {
- if (!jpeg_v2_5_is_idle(handle))
+ if (!jpeg_v2_5_is_idle(adev))
return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i);
} else {
@@ -541,10 +541,10 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v2_5_set_powergating_state(void *handle,
+static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->jpeg.cur_state)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index e0df6800502c..4eca65ea9053 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -36,7 +36,7 @@
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v3_0_set_powergating_state(void *handle,
+static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
/**
@@ -168,7 +168,7 @@ static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
- jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -466,14 +466,14 @@ static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v3_0_set_clockgating_state(void *handle,
+static int jpeg_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v3_0_is_idle(handle))
+ if (!jpeg_v3_0_is_idle(adev))
return -EBUSY;
jpeg_v3_0_enable_clock_gating(adev);
} else {
@@ -483,10 +483,10 @@ static int jpeg_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v3_0_set_powergating_state(void *handle,
+static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if(state == adev->jpeg.cur_state)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index eca1963c33b6..0aef1f64afd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -39,7 +39,7 @@
static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev);
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v4_0_set_powergating_state(void *handle,
+static int jpeg_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
@@ -206,7 +206,7 @@ static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
- jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v4_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
@@ -635,14 +635,14 @@ static int jpeg_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v4_0_set_clockgating_state(void *handle,
+static int jpeg_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v4_0_is_idle(handle))
+ if (!jpeg_v4_0_is_idle(adev))
return -EBUSY;
jpeg_v4_0_enable_clock_gating(adev);
} else {
@@ -652,10 +652,10 @@ static int jpeg_v4_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v4_0_set_powergating_state(void *handle,
+static int jpeg_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 67b51bcbacd1..88f9771c1686 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -43,7 +43,7 @@ enum jpeg_engin_status {
static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v4_0_3_set_powergating_state(void *handle,
+static int jpeg_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring);
@@ -76,7 +76,7 @@ static int jpeg_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
+ adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
jpeg_v4_0_3_set_dec_ring_funcs(adev);
jpeg_v4_0_3_set_irq_funcs(adev);
@@ -321,7 +321,7 @@ static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
ring->wptr = 0;
@@ -379,7 +379,7 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
return ret;
@@ -949,16 +949,16 @@ static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int jpeg_v4_0_3_set_clockgating_state(void *handle,
+static int jpeg_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (enable) {
- if (!jpeg_v4_0_3_is_idle(handle))
+ if (!jpeg_v4_0_3_is_idle(adev))
return -EBUSY;
jpeg_v4_0_3_enable_clock_gating(adev, i);
} else {
@@ -968,10 +968,10 @@ static int jpeg_v4_0_3_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v4_0_3_set_powergating_state(void *handle,
+static int jpeg_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
@@ -1231,9 +1231,95 @@ static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
.reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
};
+static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int jpeg_v4_0_3_err_codes[] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31
+};
+
+static bool jpeg_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ jpeg_v4_0_3_err_codes,
+ ARRAY_SIZE(jpeg_v4_0_3_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops jpeg_v4_0_3_aca_bank_ops = {
+ .aca_bank_parser = jpeg_v4_0_3_aca_bank_parser,
+ .aca_bank_is_valid = jpeg_v4_0_3_aca_bank_is_valid,
+};
+
+static const struct aca_info jpeg_v4_0_3_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &jpeg_v4_0_3_aca_bank_ops,
+};
+
+static int jpeg_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v4_0_3_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
static struct amdgpu_jpeg_ras jpeg_v4_0_3_ras = {
.ras_block = {
.hw_ops = &jpeg_v4_0_3_ras_hw_ops,
+ .ras_late_init = jpeg_v4_0_3_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 1d9e3b101c3a..6b3656984957 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -48,7 +48,7 @@
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v4_0_5_set_powergating_state(void *handle,
+static int jpeg_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring);
@@ -236,7 +236,7 @@ static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS))
- jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v4_0_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
return 0;
@@ -660,10 +660,10 @@ static int jpeg_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int jpeg_v4_0_5_set_clockgating_state(void *handle,
+static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
@@ -672,7 +672,7 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle,
continue;
if (enable) {
- if (!jpeg_v4_0_5_is_idle(handle))
+ if (!jpeg_v4_0_5_is_idle(adev))
return -EBUSY;
jpeg_v4_0_5_enable_clock_gating(adev, i);
@@ -684,10 +684,10 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v4_0_5_set_powergating_state(void *handle,
+static int jpeg_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index 58fb1e5fa89c..d5cf0f2799d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -31,12 +31,12 @@
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
-#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
#include "jpeg_v5_0_0.h"
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v5_0_0_set_powergating_state(void *handle,
+static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
/**
@@ -74,7 +74,7 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
/* JPEG TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
+ VCN_5_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
if (r)
return r;
@@ -172,7 +172,7 @@ static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
- jpeg_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -560,14 +560,14 @@ static int jpeg_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v5_0_0_set_clockgating_state(void *handle,
+static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (enable) {
- if (!jpeg_v5_0_0_is_idle(handle))
+ if (!jpeg_v5_0_0_is_idle(adev))
return -EBUSY;
jpeg_v5_0_0_enable_clock_gating(adev);
} else {
@@ -577,10 +577,10 @@ static int jpeg_v5_0_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v5_0_0_set_powergating_state(void *handle,
+static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->jpeg.cur_state)
@@ -612,7 +612,7 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
DRM_DEBUG("IH: JPEG TRAP\n");
switch (entry->src_id) {
- case VCN_4_0__SRCID__JPEG_DECODE:
+ case VCN_5_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
new file mode 100644
index 000000000000..40d4c32a8c2a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -0,0 +1,708 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2014-2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "jpeg_v4_0_3.h"
+#include "jpeg_v5_0_1.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+
+static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring);
+
+static int amdgpu_ih_srcid_jpeg[] = {
+ VCN_5_0__SRCID__JPEG_DECODE,
+ VCN_5_0__SRCID__JPEG1_DECODE,
+ VCN_5_0__SRCID__JPEG2_DECODE,
+ VCN_5_0__SRCID__JPEG3_DECODE,
+ VCN_5_0__SRCID__JPEG4_DECODE,
+ VCN_5_0__SRCID__JPEG5_DECODE,
+ VCN_5_0__SRCID__JPEG6_DECODE,
+ VCN_5_0__SRCID__JPEG7_DECODE,
+ VCN_5_0__SRCID__JPEG8_DECODE,
+ VCN_5_0__SRCID__JPEG9_DECODE,
+};
+
+static int jpeg_v5_0_1_core_reg_offset(u32 pipe)
+{
+ if (pipe <= AMDGPU_MAX_JPEG_RINGS_4_0_3)
+ return ((0x40 * pipe) - 0xc80);
+ else
+ return ((0x40 * pipe) - 0x440);
+}
+
+/**
+ * jpeg_v5_0_1_early_init - set function pointers
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ if (!adev->jpeg.num_jpeg_inst || adev->jpeg.num_jpeg_inst > AMDGPU_MAX_JPEG_INSTANCES)
+ return -ENOENT;
+
+ adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
+ jpeg_v5_0_1_set_dec_ring_funcs(adev);
+ jpeg_v5_0_1_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_sw_init - sw init for JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, j, r, jpeg_inst;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ ring->use_doorbell = false;
+ ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
+ if (!amdgpu_sriov_vf(adev)) {
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 1 + j + 11 * jpeg_inst;
+ } else {
+ if (j < 4)
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 4 + j + 32 * jpeg_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 8 + j + 32 * jpeg_inst;
+ }
+ sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch[j] =
+ regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
+ adev->jpeg.inst[i].external.jpeg_pitch[j] =
+ SOC15_REG_OFFSET1(JPEG, jpeg_inst, regUVD_JRBC_SCRATCH0,
+ (j ? jpeg_v5_0_1_core_reg_offset(j) : 0));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_sw_fini - sw fini for JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_1_hw_init - start and test JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ */
+static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, j, r, jpeg_inst;
+
+ if (amdgpu_sriov_vf(adev)) {
+ /* jpeg_v5_0_1_start_sriov(adev); */
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ jpeg_v5_0_1_dec_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ }
+ return 0;
+ }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+ ring = adev->jpeg.inst[i].ring_dec;
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 11 * jpeg_inst,
+ adev->jpeg.inst[i].aid_id);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (ring->use_doorbell)
+ WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
+ (ring->pipe ? (ring->pipe - 0x15) : 0),
+ ring->doorbell_index <<
+ VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_hw_fini - stop the hardware block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0;
+
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+
+ return ret;
+}
+
+/**
+ * jpeg_v5_0_1_suspend - suspend JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = jpeg_v5_0_1_hw_fini(ip_block);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_1_resume - resume JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v5_0_1_hw_init(ip_block);
+
+ return r;
+}
+
+static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx)
+{
+ int jpeg_inst;
+
+ jpeg_inst = GET_INST(JPEG, inst_idx);
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* keep the JPEG in static PG mode */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
+
+ return 0;
+}
+
+static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
+{
+ int jpeg_inst;
+
+ jpeg_inst = GET_INST(JPEG, inst_idx);
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i, j, jpeg_inst, r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ /* disable antihang */
+ r = jpeg_v5_0_1_disable_antihang(adev, i);
+ if (r)
+ return r;
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
+ u32 reg, data, mask;
+
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+
+ /* enable System Interrupt for JRBC */
+ reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
+ if (j < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << j;
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j);
+ WREG32_P(reg, data, mask);
+ } else {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12);
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12));
+ WREG32_P(reg, data, mask);
+ }
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_VMID,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset,
+ (0x00000001L | 0x00000002L));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ reg_offset, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ reg_offset, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_RPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_WPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset, 0x00000002L);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_SIZE,
+ reg_offset, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
+ reg_offset);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v5_0_1_stop(struct amdgpu_device *adev)
+{
+ int i, jpeg_inst, r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable antihang */
+ r = jpeg_v5_0_1_enable_antihang(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v5_0_1_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_RPTR,
+ ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
+}
+
+/**
+ * jpeg_v5_0_1_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v5_0_1_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_WPTR,
+ ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
+}
+
+/**
+ * jpeg_v5_0_1_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
+ regUVD_JRBC_RB_WPTR,
+ (ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0),
+ lower_32_bits(ring->wptr));
+ }
+}
+
+static bool jpeg_v5_0_1_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool ret = false;
+ int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
+
+ ret &= ((RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC_STATUS, reg_offset) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ }
+ }
+
+ return ret;
+}
+
+static int jpeg_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
+
+ ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC_STATUS, reg_offset,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ }
+ }
+ return ret;
+}
+
+static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+ int i;
+
+ if (!enable)
+ return 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (!jpeg_v5_0_1_is_idle(adev))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret;
+
+ if (state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_stop(adev);
+ else
+ ret = jpeg_v5_0_1_start(adev);
+
+ if (!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 i, inst;
+
+ i = node_id_to_phys_map[entry->node_id];
+ DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n");
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst)
+ if (adev->jpeg.inst[inst].aid_id == i)
+ break;
+
+ if (inst >= adev->jpeg.num_jpeg_inst) {
+ dev_WARN_ONCE(adev->dev, 1,
+ "Interrupt received for unknown JPEG instance %d",
+ entry->node_id);
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case VCN_5_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]);
+ break;
+ case VCN_5_0__SRCID__JPEG1_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]);
+ break;
+ case VCN_5_0__SRCID__JPEG2_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]);
+ break;
+ case VCN_5_0__SRCID__JPEG3_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]);
+ break;
+ case VCN_5_0__SRCID__JPEG4_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]);
+ break;
+ case VCN_5_0__SRCID__JPEG5_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]);
+ break;
+ case VCN_5_0__SRCID__JPEG6_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]);
+ break;
+ case VCN_5_0__SRCID__JPEG7_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]);
+ break;
+ case VCN_5_0__SRCID__JPEG8_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[8]);
+ break;
+ case VCN_5_0__SRCID__JPEG9_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[9]);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
+ .name = "jpeg_v5_0_1",
+ .early_init = jpeg_v5_0_1_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v5_0_1_sw_init,
+ .sw_fini = jpeg_v5_0_1_sw_fini,
+ .hw_init = jpeg_v5_0_1_hw_init,
+ .hw_fini = jpeg_v5_0_1_hw_fini,
+ .suspend = jpeg_v5_0_1_suspend,
+ .resume = jpeg_v5_0_1_resume,
+ .is_idle = jpeg_v5_0_1_is_idle,
+ .wait_for_idle = jpeg_v5_0_1_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v5_0_1_set_clockgating_state,
+ .set_powergating_state = jpeg_v5_0_1_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
+ .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
+ .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v5_0_1_dec_ring_emit_vm_flush */
+ 22 + 22 + /* jpeg_v5_0_1_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v5_0_1_dec_ring_emit_ib */
+ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
+ .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v4_0_3_dec_ring_nop,
+ .insert_start = jpeg_v4_0_3_dec_ring_insert_start,
+ .insert_end = jpeg_v4_0_3_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, j, jpeg_inst;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v5_0_1_dec_ring_vm_funcs;
+ adev->jpeg.inst[i].ring_dec[j].me = i;
+ adev->jpeg.inst[i].ring_dec[j].pipe = j;
+ }
+ jpeg_inst = GET_INST(JPEG, i);
+ adev->jpeg.inst[i].aid_id =
+ jpeg_inst / adev->jpeg.num_inst_per_aid;
+ }
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = {
+ .set = jpeg_v5_0_1_set_interrupt_state,
+ .process = jpeg_v5_0_1_process_interrupt,
+};
+
+static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
+ adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
+
+ adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs;
+}
+
+const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 5,
+ .minor = 0,
+ .rev = 1,
+ .funcs = &jpeg_v5_0_1_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
new file mode 100644
index 000000000000..8ce146c00bb6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V5_0_1_H__
+#define __JPEG_V5_0_1_H__
+
+extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
+
+#endif /* __JPEG_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 9c905b9e9376..f9a4d08eef92 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1505,9 +1505,7 @@ static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
@@ -1635,6 +1633,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 9ecc5d61e49b..0fd0fa6ed518 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include "amdgpu.h"
+#include "gfx_v12_0.h"
#include "soc15_common.h"
#include "soc21.h"
#include "gc/gc_12_0_0_offset.h"
@@ -350,6 +351,132 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+int gfx_v12_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req)
+{
+ u32 i, tmp, val;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* Request with MeId=2, PipeId=0 */
+ tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
+ WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
+
+ val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
+ if (req) {
+ if (val == tmp)
+ break;
+ } else {
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
+ REQUEST, 1);
+
+ /* unlocked or locked by firmware */
+ if (val != tmp)
+ break;
+ }
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = mes->adev;
+ uint32_t value, reg;
+ int i, r = 0;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
+ me_id, pipe_id, queue_id, vmid);
+
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ gfx_v12_0_request_gfx_index_mutex(adev, true);
+ /* all se allow writes */
+ WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (pipe_id == 0)
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
+ else
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
+ WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
+ gfx_v12_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
+
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_SDMA) {
+ dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ switch (me_id) {
+ case 1:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ);
+ break;
+ case 0:
+ default:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ);
+ break;
+ }
+
+ value = 1 << queue_id;
+ WREG32(reg, value);
+ /* wait for queue reset done */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(reg) & value))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on sdma queue reset done\n");
+ r = -ETIMEDOUT;
+ }
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input)
{
@@ -629,7 +756,8 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr +
+ pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
}
if (enforce_isolation)
@@ -721,6 +849,11 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
union MESAPI__RESET mes_reset_queue_pkt;
int pipe;
+ if (input->use_mmio)
+ return mes_v12_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
@@ -851,29 +984,50 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
uint32_t pipe, data = 0;
if (enable) {
- data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
- WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
-
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
soc21_grbm_select(adev, 3, pipe, 0, 0);
+ if (amdgpu_mes_log_enable) {
+ u32 log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE;
+ /* In case uni mes is not enabled, only program for pipe 0 */
+ if (adev->mes.event_log_size >= (pipe + 1) * log_size) {
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO,
+ lower_32_bits(adev->mes.event_log_gpu_addr +
+ pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE));
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI,
+ upper_32_bits(adev->mes.event_log_gpu_addr +
+ pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE));
+ dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n",
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI),
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO));
+ }
+ }
+
+ data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
+ if (pipe == 0)
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
+ else
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
+ WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
lower_32_bits(ucode_addr));
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
upper_32_bits(ucode_addr));
+
+ /* unhalt MES and activate one pipe each loop */
+ data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
+ if (pipe)
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
+ dev_info(adev->dev, "program CP_MES_CNTL : 0x%x\n", data);
+
+ WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
+
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- /* unhalt MES and activate pipe0 */
- data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
- WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
-
if (amdgpu_emu_mode)
msleep(100);
else if (adev->enable_uni_mes)
@@ -1347,8 +1501,9 @@ static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.enable_legacy_queue_map = true;
- adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE;
-
+ adev->mes.event_log_size = adev->enable_uni_mes ?
+ (AMDGPU_MAX_MES_PIPES * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE)) :
+ (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
r = amdgpu_mes_init(adev);
if (r)
return r;
@@ -1455,9 +1610,7 @@ static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
@@ -1590,6 +1743,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index e9a6f33ca710..243eabda0607 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -356,7 +356,7 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GMC,
- enable);
+ enable, 0);
}
static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index b01bb759d0f4..e646e5cef0a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -33,7 +33,6 @@
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
-#define mmSMNAID_AID0_MCA_SMU 0x03b30400
static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 0820ed62e2e8..62cdfe10e6f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -434,9 +434,8 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
* this should allow us to catch up.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
@@ -667,17 +666,17 @@ static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int navi10_ih_set_clockgating_state(void *handle,
+static int navi10_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
navi10_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
return 0;
}
-static int navi10_ih_set_powergating_state(void *handle,
+static int navi10_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
index 39919e0892c1..c92875ceb31f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
@@ -28,6 +28,7 @@
#include "nbif/nbif_6_3_1_sh_mask.h"
#include "pcie/pcie_6_1_0_offset.h"
#include "pcie/pcie_6_1_0_sh_mask.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
static void nbif_v6_3_1_remap_hdp_registers(struct amdgpu_device *adev)
@@ -518,3 +519,83 @@ const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs = {
.get_rom_offset = nbif_v6_3_1_get_rom_offset,
.set_reg_remap = nbif_v6_3_1_set_reg_remap,
};
+
+static int nbif_v6_3_1_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_doorbell_int_cntl;
+
+ bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+ bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
+
+ return 0;
+}
+
+static int nbif_v6_3_1_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to bif ring. since bif ring is not enabled, just leave process callback
+ * as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbif_v6_3_1_ras_err_event_athub_irq_funcs = {
+ .set = nbif_v6_3_1_set_ras_err_event_athub_irq_state,
+ .process = nbif_v6_3_1_process_err_event_athub_irq,
+};
+
+static void nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_int_cntl;
+
+ bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+static int nbif_v6_3_1_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbif_v6_3_1_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt
+ * nbif v6_3_1 uses the same irq source as nbio v7_4
+ */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+struct amdgpu_nbio_ras nbif_v6_3_1_ras = {
+ .handle_ras_err_event_athub_intr_no_bifring =
+ nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_err_event_athub_interrupt =
+ nbif_v6_3_1_init_ras_err_event_athub_interrupt,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
index b7f2e0d88905..9ac4831d39e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
@@ -29,5 +29,6 @@
extern const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs;
extern const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs;
+extern struct amdgpu_nbio_ras nbif_v6_3_1_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 3bad565ded73..95c609317a8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -78,12 +78,12 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = {
/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -104,10 +104,10 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = {
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -115,10 +115,10 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -1039,10 +1039,10 @@ static bool nv_common_is_idle(void *handle)
return true;
}
-static int nv_common_set_clockgating_state(void *handle,
+static int nv_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1070,7 +1070,7 @@ static int nv_common_set_clockgating_state(void *handle,
return 0;
}
-static int nv_common_set_powergating_state(void *handle,
+static int nv_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* TODO */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index c4b775aaee9f..cc621064610f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -51,6 +51,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_12_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_12_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_14_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_14_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin");
@@ -122,6 +124,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
@@ -177,6 +180,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
retry_cnt =
((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))) ?
PSP_VMBX_POLLING_LIMIT :
10;
@@ -203,6 +207,7 @@ static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
int ret;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
ret = psp_v13_0_wait_for_vmbx_ready(psp);
if (ret)
@@ -288,6 +293,11 @@ static int psp_v13_0_bootloader_load_ras_drv(struct psp_context *psp)
return psp_v13_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
}
+static int psp_v13_0_bootloader_load_spdm_drv(struct psp_context *psp)
+{
+ return psp_v13_0_bootloader_load_component(psp, &psp->spdm_drv, PSP_BL__LOAD_SPDMDRV);
+}
+
static inline void psp_v13_0_init_sos_version(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -798,6 +808,7 @@ static bool psp_v13_0_get_ras_capability(struct psp_context *psp)
return false;
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) &&
(!(adev->flags & AMD_IS_APU))) {
reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127);
@@ -857,6 +868,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.bootloader_load_intf_drv = psp_v13_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv,
.bootloader_load_ras_drv = psp_v13_0_bootloader_load_ras_drv,
+ .bootloader_load_spdm_drv = psp_v13_0_bootloader_load_spdm_drv,
.bootloader_load_sos = psp_v13_0_bootloader_load_sos,
.ring_create = psp_v13_0_ring_create,
.ring_stop = psp_v13_0_ring_stop,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 7948d74f8722..135c5099bfb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -145,9 +145,11 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
@@ -631,7 +633,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1080,14 +1082,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int sdma_v2_4_set_clockgating_state(void *handle,
+static int sdma_v2_4_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
/* XXX handled via the smc on VI */
return 0;
}
-static int sdma_v2_4_set_powergating_state(void *handle,
+static int sdma_v2_4_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 9a3d729545a7..c611328671ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -305,9 +305,11 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
@@ -904,7 +906,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1483,10 +1485,10 @@ static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
}
}
-static int sdma_v3_0_set_clockgating_state(void *handle,
+static int sdma_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1506,7 +1508,7 @@ static int sdma_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v3_0_set_powergating_state(void *handle,
+static int sdma_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index c1f98f6cf20d..b48d9c0b2e1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1565,7 +1565,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1956,7 +1956,7 @@ static int sdma_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (adev->flags & AMD_IS_APU)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false, 0);
if (!amdgpu_sriov_vf(adev))
sdma_v4_0_init_golden_registers(adev);
@@ -1983,7 +1983,7 @@ static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v4_0_enable(adev, false);
if (adev->flags & AMD_IS_APU)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true, 0);
return 0;
}
@@ -2297,10 +2297,10 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
}
}
-static int sdma_v4_0_set_clockgating_state(void *handle,
+static int sdma_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -2312,10 +2312,10 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v4_0_set_powergating_state(void *handle,
+static int sdma_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index a38553f38fdc..5e0066cd6c51 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -189,6 +189,7 @@ static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) {
ret = amdgpu_sdma_init_microcode(adev, 0, true);
break;
@@ -667,11 +668,12 @@ static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
*
* @adev: amdgpu_device pointer
* @i: instance to resume
+ * @restore: used to restore wptr when restart
*
* Set up the gfx DMA ring buffers and enable them.
* Returns 0 for success, error for failure.
*/
-static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
+static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
@@ -698,16 +700,24 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
- ring->wptr = 0;
+ if (!restore)
+ ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
/* Initialize the ring buffer's read and write pointers */
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+ if (restore) {
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ } else {
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+ }
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
@@ -755,11 +765,12 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
*
* @adev: amdgpu_device pointer
* @i: instance to resume
+ * @restore: boolean to say restore needed or not
*
* Set up the page DMA ring buffers and enable them.
* Returns 0 for success, error for failure.
*/
-static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
+static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
@@ -775,10 +786,17 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
- WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
- WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
- WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
+ if (restore) {
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ } else {
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
+ }
/* set the wb address whether it's enabled or not */
WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
@@ -792,7 +810,8 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
- ring->wptr = 0;
+ if (!restore)
+ ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
@@ -911,12 +930,13 @@ static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @inst_mask: mask of dma engine instances to be enabled
+ * @restore: boolean to say restore needed or not
*
* Set up the DMA engines and enable them.
* Returns 0 for success, error for failure.
*/
static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
- uint32_t inst_mask)
+ uint32_t inst_mask, bool restore)
{
struct amdgpu_ring *ring;
uint32_t tmp_mask;
@@ -927,7 +947,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
sdma_v4_4_2_inst_enable(adev, false, inst_mask);
} else {
/* bypass sdma microcode loading on Gopher */
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
+ if (!restore && adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
adev->sdma.instance[0].fw) {
r = sdma_v4_4_2_inst_load_microcode(adev, inst_mask);
if (r)
@@ -946,17 +966,19 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
uint32_t temp;
WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
- sdma_v4_4_2_gfx_resume(adev, i);
+ sdma_v4_4_2_gfx_resume(adev, i, restore);
if (adev->sdma.has_page_queue)
- sdma_v4_4_2_page_resume(adev, i);
+ sdma_v4_4_2_page_resume(adev, i, restore);
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
- /* enable context empty interrupt during initialization */
- temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
- WREG32_SDMA(i, regSDMA_CNTL, temp);
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
+ /* enable context empty interrupt during initialization */
+ temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
+ }
if (!amdgpu_sriov_vf(adev)) {
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
/* unhalt engine */
@@ -1110,7 +1132,7 @@ static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1384,6 +1406,12 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
&adev->sdma.srbm_write_irq);
if (r)
return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_CTXEMPTY,
+ &adev->sdma.ctxt_empty_irq);
+ if (r)
+ return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1466,6 +1494,7 @@ static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_sdma_sysfs_reset_mask_fini(adev);
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5))
amdgpu_sdma_destroy_inst_ctx(adev, true);
else
@@ -1486,7 +1515,7 @@ static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev))
sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
- r = sdma_v4_4_2_inst_start(adev, inst_mask);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
return r;
}
@@ -1514,7 +1543,7 @@ static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int sdma_v4_4_2_set_clockgating_state(void *handle,
+static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block)
@@ -1522,7 +1551,7 @@ static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_in_reset(adev))
- sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ sdma_v4_4_2_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
return sdma_v4_4_2_hw_fini(ip_block);
}
@@ -1573,6 +1602,42 @@ static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r;
+ u32 inst_mask;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ /* stop queue */
+ inst_mask = 1 << ring->me;
+ sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_4_2_inst_page_stop(adev, inst_mask);
+
+ r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, ring->me));
+ if (r)
+ return r;
+
+ udelay(50);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32_SDMA(ring->me, regSDMA_F32_CNTL), SDMA_F32_CNTL, HALT))
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout) {
+ dev_err(adev->dev, "timed out waiting for SDMA%d unhalt after reset\n",
+ ring->me);
+ return -ETIMEDOUT;
+ }
+
+ return sdma_v4_4_2_inst_start(adev, inst_mask, true);
+}
+
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1755,6 +1820,16 @@ static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_4_2_process_ctxt_empty_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* There is nothing useful to be done here, only kept for debug */
+ dev_dbg_ratelimited(adev->dev, "SDMA context empty interrupt");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
static void sdma_v4_4_2_inst_update_medium_grain_light_sleep(
struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
{
@@ -1821,10 +1896,10 @@ static void sdma_v4_4_2_inst_update_medium_grain_clock_gating(
}
}
-static int sdma_v4_4_2_set_clockgating_state(void *handle,
+static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t inst_mask;
if (amdgpu_sriov_vf(adev))
@@ -1839,7 +1914,7 @@ static int sdma_v4_4_2_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v4_4_2_set_powergating_state(void *handle,
+static int sdma_v4_4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1895,7 +1970,6 @@ static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
if (!adev->sdma.ip_dump)
return;
- amdgpu_gfx_off_ctrl(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
instance_offset = i * reg_count;
for (j = 0; j < reg_count; j++)
@@ -1903,7 +1977,6 @@ static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
RREG32(sdma_v4_4_2_get_reg_offset(adev, i,
sdma_reg_list_4_4_2[j].reg_offset));
}
- amdgpu_gfx_off_ctrl(adev, true);
}
const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
@@ -1955,6 +2028,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = sdma_v4_4_2_reset_queue,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
@@ -2038,6 +2112,10 @@ static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
.process = sdma_v4_4_2_process_srbm_write_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ctxt_empty_irq_funcs = {
+ .process = sdma_v4_4_2_process_ctxt_empty_irq,
+};
+
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
@@ -2046,6 +2124,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.ctxt_empty_irq.num_types = adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
@@ -2054,6 +2133,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
+ adev->sdma.ctxt_empty_irq.funcs = &sdma_v4_4_2_ctxt_empty_irq_funcs;
}
/**
@@ -2167,7 +2247,7 @@ static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask)
if (!amdgpu_sriov_vf(adev))
sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
- r = sdma_v4_4_2_inst_start(adev, inst_mask);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index fa9b40934957..b764550834a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1194,7 +1194,7 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1853,10 +1853,10 @@ static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
}
-static int sdma_v5_0_set_clockgating_state(void *handle,
+static int sdma_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1877,7 +1877,7 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v5_0_set_powergating_state(void *handle,
+static int sdma_v5_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index ba5160399ab2..b1818e87889a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1050,7 +1050,7 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1812,10 +1812,10 @@ static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
}
-static int sdma_v5_2_set_clockgating_state(void *handle,
+static int sdma_v5_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1841,7 +1841,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v5_2_set_powergating_state(void *handle,
+static int sdma_v5_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index d46128b0ec92..1a023b45f0be 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1063,7 +1063,7 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1601,13 +1601,13 @@ static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int sdma_v6_0_set_clockgating_state(void *handle,
+static int sdma_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int sdma_v6_0_set_powergating_state(void *handle,
+static int sdma_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index d2ce6b6a7ff6..7e10e94624e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -490,162 +490,185 @@ static void sdma_v7_0_enable(struct amdgpu_device *adev, bool enable)
}
/**
- * sdma_v7_0_gfx_resume - setup and start the async dma engines
+ * sdma_v7_0_gfx_resume_instance - start/restart a certain sdma engine
*
* @adev: amdgpu_device pointer
+ * @i: instance
+ * @restore: used to restore wptr when restart
*
- * Set up the gfx DMA ring buffers and enable them.
- * Returns 0 for success, error for failure.
+ * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
+ * Return 0 for success.
*/
-static int sdma_v7_0_gfx_resume(struct amdgpu_device *adev)
+static int sdma_v7_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
u32 doorbell;
u32 doorbell_offset;
- u32 tmp;
+ u32 temp;
u64 wptr_gpu_addr;
- int i, r;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
+ int r;
- //if (!amdgpu_sriov_vf(adev))
- // WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ ring = &adev->sdma.instance[i].ring;
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ if (restore) {
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ } else {
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
+ }
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = ring->wptr_gpu_addr;
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev))
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
+ else
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
- /* setup the wptr shadow polling */
- wptr_gpu_addr = ring->wptr_gpu_addr;
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
-
- /* set the wb address whether it's enabled or not */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- if (amdgpu_sriov_vf(adev))
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
- else
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, MCU_WPTR_POLL_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, MCU_WPTR_POLL_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+ if (!restore)
ring->wptr = 0;
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
- }
+ if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ }
- doorbell = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
- doorbell_offset = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
+ doorbell = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
- OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
- }
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
-
- if (i == 0)
- adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
-
- if (amdgpu_sriov_vf(adev))
- sdma_v7_0_ring_set_wptr(ring);
-
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
-
- /* Set up sdma hang watchdog */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
- /* 100ms per unit */
- tmp = REG_SET_FIELD(tmp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
- max(adev->usec_timeout/100000, 1));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), tmp);
-
- /* Set up RESP_MODE to non-copy addresses */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
- tmp = REG_SET_FIELD(tmp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- tmp = REG_SET_FIELD(tmp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), tmp);
-
- /* program default cache read and write policy */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- tmp &= 0xFF0FFF;
- tmp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), tmp);
-
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL));
- tmp = REG_SET_FIELD(tmp, SDMA0_MCU_CNTL, HALT, 0);
- tmp = REG_SET_FIELD(tmp, SDMA0_MCU_CNTL, RESET, 0);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL), tmp);
- }
+ if (ring->use_doorbell) {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ } else {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
+ }
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+ if (i == 0)
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
- ib_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev))
+ sdma_v7_0_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
+
+ /* Set up sdma hang watchdog */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
+ /* 100ms per unit */
+ temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
+ max(adev->usec_timeout/100000, 1));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_MCU_CNTL, HALT, 0);
+ temp = REG_SET_FIELD(temp, SDMA0_MCU_CNTL, RESET, 0);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL), temp);
+ }
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
- /* enable DMA IBs */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
+ /* enable DMA IBs */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
+ ring->sched.ready = true;
- ring->sched.ready = true;
+ if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
+ sdma_v7_0_ctx_switch_enable(adev, true);
+ sdma_v7_0_enable(adev, true);
+ }
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v7_0_ctx_switch_enable(adev, true);
- sdma_v7_0_enable(adev, true);
- }
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ ring->sched.ready = false;
- r = amdgpu_ring_test_helper(ring);
- if (r) {
- ring->sched.ready = false;
- return r;
- }
+ return r;
+}
+
+/**
+ * sdma_v7_0_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v7_0_gfx_resume(struct amdgpu_device *adev)
+{
+ int i, r;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = sdma_v7_0_gfx_resume_instance(adev, i, false);
+ if (r)
+ return r;
}
return 0;
+
}
/**
@@ -806,6 +829,31 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
return false;
}
+static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring)
+ break;
+ }
+
+ if (i == adev->sdma.num_instances) {
+ DRM_ERROR("sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
+ if (r)
+ return r;
+
+ return sdma_v7_0_gfx_resume_instance(adev, i, true);
+}
+
/**
* sdma_v7_0_start - setup and start the async dma engines
*
@@ -1060,7 +1108,7 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1316,6 +1364,13 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ adev->sdma.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ r = amdgpu_sdma_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
/* Allocate memory for SDMA IP Dump buffer */
ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
if (ptr)
@@ -1334,6 +1389,7 @@ static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ amdgpu_sdma_sysfs_reset_mask_fini(adev);
amdgpu_sdma_destroy_inst_ctx(adev, true);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
@@ -1524,13 +1580,13 @@ static int sdma_v7_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int sdma_v7_0_set_clockgating_state(void *handle,
+static int sdma_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int sdma_v7_0_set_powergating_state(void *handle,
+static int sdma_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1636,6 +1692,7 @@ static const struct amdgpu_ring_funcs sdma_v7_0_ring_funcs = {
.emit_reg_write_reg_wait = sdma_v7_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v7_0_ring_init_cond_exec,
.preempt_ib = sdma_v7_0_ring_preempt_ib,
+ .reset = sdma_v7_0_reset_queue,
};
static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1684,11 +1741,12 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint32_t byte_count,
uint32_t copy_flags)
{
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_cm;
max_com = AMDGPU_COPY_FLAGS_GET(copy_flags, MAX_COMPRESSED);
data_format = AMDGPU_COPY_FLAGS_GET(copy_flags, DATA_FORMAT);
num_type = AMDGPU_COPY_FLAGS_GET(copy_flags, NUMBER_TYPE);
+ write_cm = AMDGPU_COPY_FLAGS_GET(copy_flags, WRITE_COMPRESS_DISABLE) ? 2 : 1;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
@@ -1705,7 +1763,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)))
ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(data_format) | SDMA_DCC_NUM_TYPE(num_type) |
((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
- ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
+ ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(write_cm) : 0) |
SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
else
ib->ptr[ib->length_dw++] = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 00f63d3fbea7..77ef7da2e4fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -2649,13 +2649,13 @@ static bool si_common_is_idle(void *handle)
return true;
}
-static int si_common_set_clockgating_state(void *handle,
+static int si_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int si_common_set_powergating_state(void *handle,
+static int si_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 47647a6083e8..dbd78d5345a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -286,7 +286,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -629,13 +629,13 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
-static int si_dma_set_clockgating_state(void *handle,
+static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
u32 orig, data, offset;
int i;
bool enable;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
enable = (state == AMD_CG_STATE_GATE);
@@ -672,12 +672,12 @@ static int si_dma_set_clockgating_state(void *handle,
return 0;
}
-static int si_dma_set_powergating_state(void *handle,
+static int si_dma_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
WREG32(DMA_PGFSM_WRITE, 0x00002000);
WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 2ec1ebe4db11..a32b6243c1f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -263,13 +263,13 @@ static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int si_ih_set_clockgating_state(void *handle,
+static int si_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int si_ih_set_powergating_state(void *handle,
+static int si_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index ede072758dab..e98fb3fa36a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -103,12 +103,11 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -120,12 +119,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
@@ -138,10 +137,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -171,6 +170,24 @@ static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {
.codec_array = NULL,
};
+static const struct amdgpu_video_codecs vcn_5_0_1_video_codecs_encode_vcn0 = {
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+static const struct amdgpu_video_codec_info vcn_5_0_1_video_codecs_decode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs vcn_5_0_1_video_codecs_decode_vcn0 = {
+ .codec_count = ARRAY_SIZE(vcn_5_0_1_video_codecs_decode_array_vcn0),
+ .codec_array = vcn_5_0_1_video_codecs_decode_array_vcn0,
+};
+
static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@@ -209,6 +226,12 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
else
*codecs = &vcn_4_0_3_video_codecs_decode;
return 0;
+ case IP_VERSION(5, 0, 1):
+ if (encode)
+ *codecs = &vcn_5_0_1_video_codecs_encode_vcn0;
+ else
+ *codecs = &vcn_5_0_1_video_codecs_decode_vcn0;
+ return 0;
default:
return -EINVAL;
}
@@ -327,6 +350,7 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
return 10000;
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) ||
@@ -556,6 +580,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
break;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
/* Use gpu_recovery param to target a reset method.
* Enable triggering of GPU reset only if specified
* by module parameter.
@@ -1177,6 +1202,7 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->asic_funcs = &aqua_vanjaram_asic_funcs;
adev->cg_flags =
AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
@@ -1385,10 +1411,10 @@ static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
}
-static int soc15_common_set_clockgating_state(void *handle,
+static int soc15_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1453,6 +1479,7 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) &&
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) &&
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 12)) &&
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14))) {
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
@@ -1473,7 +1500,7 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
adev->df.funcs->get_clockgating_state(adev, flags);
}
-static int soc15_common_set_powergating_state(void *handle,
+static int soc15_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* todo */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index d6999835918f..62ad67d0b598 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -928,10 +928,10 @@ static bool soc21_common_is_idle(void *handle)
return true;
}
-static int soc21_common_set_clockgating_state(void *handle,
+static int soc21_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(4, 3, 0):
@@ -954,10 +954,10 @@ static int soc21_common_set_clockgating_state(void *handle,
return 0;
}
-static int soc21_common_set_powergating_state(void *handle,
+static int soc21_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
case IP_VERSION(6, 0, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index be96de92b2f5..6b8e078ee7c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -444,8 +444,18 @@ static int soc24_common_late_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
+ } else {
+ if (adev->nbio.ras &&
+ adev->nbio.ras_err_event_athub_irq.funcs)
+ /* don't need to fail gpu late init
+ * if enabling athub_err_event interrupt failed
+ * nbif v6_3_1 only support fatal error hanlding
+ * just enable the interrupt directly
+ */
+ amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
/* Enable selfring doorbell aperture late because doorbell BAR
* aperture will change if resize BAR successfully in gmc sw_init.
@@ -501,8 +511,13 @@ static int soc24_common_hw_fini(struct amdgpu_ip_block *ip_block)
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_put_irq(adev);
+ } else {
+ if (adev->nbio.ras &&
+ adev->nbio.ras_err_event_athub_irq.funcs)
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
return 0;
}
@@ -522,10 +537,10 @@ static bool soc24_common_is_idle(void *handle)
return true;
}
-static int soc24_common_set_clockgating_state(void *handle,
+static int soc24_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(6, 3, 1):
@@ -542,10 +557,10 @@ static int soc24_common_set_clockgating_state(void *handle,
return 0;
}
-static int soc24_common_set_powergating_state(void *handle,
+static int soc24_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
case IP_VERSION(7, 0, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 21b71a427b1f..64891f099366 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -30,6 +30,9 @@
#define RSP_ID_MASK (1U << 31)
#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+/* invalid node instance value */
+#define TA_RAS_INV_NODE 0xffff
+
/* RAS related enumerations */
/**********************************************************/
enum ras_command {
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
index 00d8bdb8254f..9ec2e03d41c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -31,10 +31,12 @@
* Secure Display Command ID
*/
enum ta_securedisplay_command {
- /* Query whether TA is responding used only for validation purpose */
+ /* Query whether TA is responding. It is used only for validation purpose */
TA_SECUREDISPLAY_COMMAND__QUERY_TA = 1,
/* Send region of Interest and CRC value to I2C */
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC = 2,
+ /* V2 to send multiple regions of Interest and CRC value to I2C */
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2 = 3,
/* Maximum Command ID */
TA_SECUREDISPLAY_COMMAND__MAX_ID = 0x7FFFFFFF,
};
@@ -83,6 +85,8 @@ enum ta_securedisplay_ta_query_cmd_ret {
enum ta_securedisplay_buffer_size {
/* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */
TA_SECUREDISPLAY_I2C_BUFFER_SIZE = 15,
+ /* 16 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) + 1 byte(roi_idx) */
+ TA_SECUREDISPLAY_V2_I2C_BUFFER_SIZE = 16,
};
/** Input/output structures for Secure Display commands */
@@ -95,7 +99,15 @@ enum ta_securedisplay_buffer_size {
* Physical ID to determine which DIO scratch register should be used to get ROI
*/
struct ta_securedisplay_send_roi_crc_input {
- uint32_t phy_id; /* Physical ID */
+ /* Physical ID */
+ uint32_t phy_id;
+};
+
+struct ta_securedisplay_send_roi_crc_v2_input {
+ /* Physical ID */
+ uint32_t phy_id;
+ /* Region of interest index */
+ uint8_t roi_idx;
};
/** @union ta_securedisplay_cmd_input
@@ -104,6 +116,8 @@ struct ta_securedisplay_send_roi_crc_input {
union ta_securedisplay_cmd_input {
/* send ROI and CRC input buffer format */
struct ta_securedisplay_send_roi_crc_input send_roi_crc;
+ /* send ROI and CRC input buffer format, v2 adds a ROI index */
+ struct ta_securedisplay_send_roi_crc_v2_input send_roi_crc_v2;
uint32_t reserved[4];
};
@@ -128,6 +142,10 @@ struct ta_securedisplay_send_roi_crc_output {
uint8_t reserved;
};
+struct ta_securedisplay_send_roi_crc_v2_output {
+ uint8_t i2c_buf[TA_SECUREDISPLAY_V2_I2C_BUFFER_SIZE]; /* I2C buffer */
+};
+
/** @union ta_securedisplay_cmd_output
* Output buffer
*/
@@ -136,6 +154,8 @@ union ta_securedisplay_cmd_output {
struct ta_securedisplay_query_ta_output query_ta;
/* Send ROI CRC output buffer format used only for validation purpose */
struct ta_securedisplay_send_roi_crc_output send_roi_crc;
+ /* Send ROI CRC output buffer format used only for validation purpose */
+ struct ta_securedisplay_send_roi_crc_v2_output send_roi_crc_v2;
uint32_t reserved[4];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 5a04a6770138..0968e551f7b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -448,13 +448,13 @@ static int tonga_ih_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int tonga_ih_set_clockgating_state(void *handle,
+static int tonga_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int tonga_ih_set_powergating_state(void *handle,
+static int tonga_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 1a8ea834efa6..a7b9c358a2d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -173,156 +173,96 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
-static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
+static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- struct ta_ras_query_address_input *addr_in)
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out,
+ bool dump_addr)
{
- uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column, err_addr;
- struct ta_ras_query_address_output addr_out;
+ uint32_t col, col_lower, row, row_lower, bank;
+ uint32_t channel_index = 0, umc_inst = 0;
+ uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS];
+ uint64_t soc_pa, column, err_addr;
+ struct ta_ras_query_address_output addr_out_tmp;
+ struct ta_ras_query_address_output *paddr_out;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ int ret = 0;
+
+ if (!addr_out)
+ paddr_out = &addr_out_tmp;
+ else
+ paddr_out = addr_out;
- err_addr = addr_in->ma.err_addr;
- addr_in->addr_type = TA_RAS_MCA_TO_PA;
- if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
- dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
- err_addr);
+ err_addr = bank = 0;
+ if (addr_in) {
+ err_addr = addr_in->ma.err_addr;
+ addr_in->addr_type = TA_RAS_MCA_TO_PA;
+ ret = psp_ras_query_address(&adev->psp, addr_in, paddr_out);
+ if (ret) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
- return;
- }
+ goto out;
+ }
- soc_pa = addr_out.pa.pa;
- bank = addr_out.pa.bank;
- channel_index = addr_out.pa.channel_idx;
-
- col = (err_addr >> 1) & 0x1fULL;
- row = (err_addr >> 10) & 0x3fffULL;
- row_xor = row ^ (0x1ULL << 13);
- /* clear [C3 C2] in soc physical address */
- soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
- /* clear [C4] in soc physical address */
- soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
-
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
- retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
- /* include column bit 0 and 1 */
- col &= 0x3;
- col |= (column << 2);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, addr_in->ma.umc_inst);
-
- /* shift R13 bit */
- retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row_xor, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, addr_in->ma.umc_inst);
+ bank = paddr_out->pa.bank;
+ /* no need to care about umc inst if addr_in is NULL */
+ umc_inst = addr_in->ma.umc_inst;
}
-}
-static void umc_v12_0_dump_addr_info(struct amdgpu_device *adev,
- struct ta_ras_query_address_output *addr_out,
- uint64_t err_addr)
-{
- uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column;
-
- soc_pa = addr_out->pa.pa;
- bank = addr_out->pa.bank;
- channel_index = addr_out->pa.channel_idx;
-
- col = (err_addr >> 1) & 0x1fULL;
- row = (err_addr >> 10) & 0x3fffULL;
- row_xor = row ^ (0x1ULL << 13);
- /* clear [C3 C2] in soc physical address */
- soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
- /* clear [C4] in soc physical address */
- soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
-
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
- retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
- /* include column bit 0 and 1 */
- col &= 0x3;
- col |= (column << 2);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row, col, bank, channel_index);
-
- /* shift R13 bit */
- retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row_xor, col, bank, channel_index);
- }
-}
+ loop_bits[0] = UMC_V12_0_PA_C2_BIT;
+ loop_bits[1] = UMC_V12_0_PA_C3_BIT;
+ loop_bits[2] = UMC_V12_0_PA_C4_BIT;
+ loop_bits[3] = UMC_V12_0_PA_R13_BIT;
-static int umc_v12_0_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
- uint64_t pa_addr, uint64_t *pfns, int len)
-{
- uint64_t soc_pa, retired_page, column;
- uint32_t pos = 0;
-
- soc_pa = pa_addr;
- /* clear [C3 C2] in soc physical address */
- soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
- /* clear [C4] in soc physical address */
- soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
-
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
- retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
-
- if (pos >= len)
- return 0;
- pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-
- /* shift R13 bit */
- retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
-
- if (pos >= len)
- return 0;
- pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ /* other nps modes are taken as nps1 */
+ if (nps == AMDGPU_NPS4_PARTITION_MODE) {
+ loop_bits[0] = UMC_V12_0_PA_CH4_BIT;
+ loop_bits[1] = UMC_V12_0_PA_CH5_BIT;
+ loop_bits[2] = UMC_V12_0_PA_B0_BIT;
+ loop_bits[3] = UMC_V12_0_PA_R11_BIT;
}
- return pos;
-}
-
-static int umc_v12_0_convert_mca_to_addr(struct amdgpu_device *adev,
- uint64_t err_addr, uint32_t ch, uint32_t umc,
- uint32_t node, uint32_t socket,
- uint64_t *addr, bool dump_addr)
-{
- struct ta_ras_query_address_input addr_in;
- struct ta_ras_query_address_output addr_out;
-
- memset(&addr_in, 0, sizeof(addr_in));
- addr_in.ma.err_addr = err_addr;
- addr_in.ma.ch_inst = ch;
- addr_in.ma.umc_inst = umc;
- addr_in.ma.node_inst = node;
- addr_in.ma.socket_id = socket;
- addr_in.addr_type = TA_RAS_MCA_TO_PA;
- if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) {
- dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
- err_addr);
- return -EINVAL;
+ soc_pa = paddr_out->pa.pa;
+ channel_index = paddr_out->pa.channel_idx;
+ /* clear loop bits in soc physical address */
+ for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
+ soc_pa &= ~BIT_ULL(loop_bits[i]);
+
+ paddr_out->pa.pa = soc_pa;
+ /* get column bit 0 and 1 in mca address */
+ col_lower = (err_addr >> 1) & 0x3ULL;
+ /* MA_R13_BIT will be handled later */
+ row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
+
+ if (!err_data && !dump_addr)
+ goto out;
+
+ /* loop for all possibilities of retired bits */
+ for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) {
+ soc_pa = paddr_out->pa.pa;
+ for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]);
+
+ col = ((column & 0x7) << 2) | col_lower;
+ /* add row bit 13 */
+ row = ((column >> 3) << 13) | row_lower;
+
+ if (dump_addr)
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ soc_pa, row, col, bank, channel_index);
+
+ if (err_data)
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ soc_pa, channel_index, umc_inst);
}
- if (dump_addr)
- umc_v12_0_dump_addr_info(adev, &addr_out, err_addr);
-
- *addr = addr_out.pa.pa;
-
- return 0;
+out:
+ return ret;
}
static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
@@ -374,7 +314,7 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
addr_in.ma.umc_inst = umc_inst;
addr_in.ma.node_inst = node_inst;
- umc_v12_0_convert_error_address(adev, err_data, &addr_in);
+ umc_v12_0_convert_error_address(adev, err_data, &addr_in, NULL, true);
}
/* clear umc status */
@@ -526,6 +466,9 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
+ struct ta_ras_query_address_output addr_out;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ uint32_t shift_bit = UMC_V12_0_PA_C4_BIT;
int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
@@ -552,10 +495,10 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
MCA_IPID_2_UMC_CH(ipid),
err_addr);
- ret = umc_v12_0_convert_mca_to_addr(adev,
+ ret = amdgpu_umc_mca_to_addr(adev,
err_addr, MCA_IPID_2_UMC_CH(ipid),
MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid),
- MCA_IPID_2_SOCKET_ID(ipid), &pa_addr, true);
+ MCA_IPID_2_SOCKET_ID(ipid), &addr_out, true);
if (ret)
return ret;
@@ -563,14 +506,21 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
if (!ecc_err)
return -ENOMEM;
+ pa_addr = addr_out.pa.pa;
ecc_err->status = status;
ecc_err->ipid = ipid;
ecc_err->addr = addr;
- ecc_err->pa_pfn = UMC_V12_ADDR_MASK_BAD_COLS(pa_addr) >> AMDGPU_GPU_PAGE_SHIFT;
+ ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
+ ecc_err->channel_idx = addr_out.pa.channel_idx;
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ shift_bit = UMC_V12_0_PA_B0_BIT;
/* If converted pa_pfn is 0, use pa C4 pfn. */
if (!ecc_err->pa_pfn)
- ecc_err->pa_pfn = BIT_ULL(UMC_V12_0_PA_C4_BIT) >> AMDGPU_GPU_PAGE_SHIFT;
+ ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
if (ret) {
@@ -586,7 +536,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
con->umc_ecc_log.de_queried_count++;
memset(page_pfn, 0, sizeof(page_pfn));
- count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
pa_addr,
page_pfn, ARRAY_SIZE(page_pfn));
if (count <= 0) {
@@ -629,7 +579,7 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
return -EINVAL;
memset(page_pfn, 0, sizeof(page_pfn));
- count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT,
page_pfn, ARRAY_SIZE(page_pfn));
@@ -637,7 +587,7 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
ret = amdgpu_umc_fill_error_record(err_data,
ecc_err->addr,
page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
- MCA_IPID_2_UMC_CH(ecc_err->ipid),
+ ecc_err->channel_idx,
MCA_IPID_2_UMC_INST(ecc_err->ipid));
if (ret)
break;
@@ -676,6 +626,31 @@ static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
mutex_unlock(&con->umc_ecc_log.lock);
}
+static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev,
+ uint64_t mca_addr, uint64_t retired_page)
+{
+ uint32_t die = 0;
+
+ /* we only calculate die id for nps1 mode right now */
+ die += ((((retired_page >> 12) & 0x1ULL)^
+ ((retired_page >> 20) & 0x1ULL) ^
+ ((retired_page >> 27) & 0x1ULL) ^
+ ((retired_page >> 34) & 0x1ULL) ^
+ ((retired_page >> 41) & 0x1ULL)) << 0);
+
+ /* the original PA_C4 and PA_R13 may be cleared in retired_page, so
+ * get them from mca_addr.
+ */
+ die += ((((retired_page >> 13) & 0x1ULL) ^
+ ((mca_addr >> 5) & 0x1ULL) ^
+ ((retired_page >> 28) & 0x1ULL) ^
+ ((mca_addr >> 23) & 0x1ULL) ^
+ ((retired_page >> 42) & 0x1ULL)) << 1);
+ die &= 3;
+
+ return die;
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -686,5 +661,7 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
.check_ecc_err_status = umc_v12_0_check_ecc_err_status,
.update_ecc_status = umc_v12_0_update_ecc_status,
+ .convert_ras_err_addr = umc_v12_0_convert_error_address,
+ .get_die_id_from_pa = umc_v12_0_get_die_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index be5598d76c1d..9298018d938f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,12 +55,24 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
+/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */
+#define UMC_V12_0_RETIRE_LOOP_BITS 4
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
+#define UMC_V12_0_PA_C3_BIT 16
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
+#define UMC_V12_0_PA_R0_BIT 22
+#define UMC_V12_0_PA_R11_BIT 33
#define UMC_V12_0_PA_R13_BIT 35
+/* channel bit in SOC physical address */
+#define UMC_V12_0_PA_CH4_BIT 12
+#define UMC_V12_0_PA_CH5_BIT 13
+/* bank bit in SOC physical address */
+#define UMC_V12_0_PA_B0_BIT 19
+/* row bits in MCA address */
+#define UMC_V12_0_MA_R0_BIT 10
#define MCA_UMC_HWID_V12_0 0x96
#define MCA_UMC_MCATYPE_V12_0 0x0
@@ -81,11 +93,6 @@
(((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
-#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
- ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
- (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
- (0x1ULL << UMC_V12_0_PA_R13_BIT)))
-
bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_14.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.c
new file mode 100644
index 000000000000..eaca10a3c4a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v8_14.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
+#include "amdgpu.h"
+#include "umc/umc_8_14_0_offset.h"
+#include "umc/umc_8_14_0_sh_mask.h"
+
+static inline uint32_t get_umc_v8_14_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs * ch_inst + UMC_V8_14_INST_DIST * umc_inst;
+}
+
+static int umc_v8_14_clear_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t umc_reg_offset =
+ get_umc_v8_14_reg_offset(adev, umc_inst, ch_inst);
+
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ /* clear error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V8_14_CE_CNT_INIT);
+
+ return 0;
+}
+
+static void umc_v8_14_clear_error_count(struct amdgpu_device *adev)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_14_clear_error_count_per_channel, NULL);
+}
+
+static void umc_v8_14_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+
+ /* UMC 8_14 registers */
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_GeccErrCnt, GeccErrCnt) -
+ UMC_V8_14_CE_CNT_INIT);
+}
+
+static void umc_v8_14_query_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+ /* UMC 8_14 registers */
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_GeccErrCnt, GeccUnCorrErrCnt) -
+ UMC_V8_14_CE_CNT_INIT);
+}
+
+static int umc_v8_14_query_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ uint32_t umc_reg_offset =
+ get_umc_v8_14_reg_offset(adev, umc_inst, ch_inst);
+
+ umc_v8_14_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v8_14_query_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+
+ return 0;
+}
+
+static void umc_v8_14_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_14_query_error_count_per_channel, ras_error_status);
+
+ umc_v8_14_clear_error_count(adev);
+}
+
+static int umc_v8_14_err_cnt_init_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt_addr;
+ uint32_t umc_reg_offset =
+ get_umc_v8_14_reg_offset(adev, umc_inst, ch_inst);
+
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+
+ /* set ce error interrupt type to APIC based interrupt */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_GeccErrCntSel,
+ GeccErrInt, 0x1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ /* set error count to initial value */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_14_CE_CNT_INIT);
+
+ return 0;
+}
+
+static void umc_v8_14_err_cnt_init(struct amdgpu_device *adev)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_14_err_cnt_init_per_channel, NULL);
+}
+
+const struct amdgpu_ras_block_hw_ops umc_v8_14_ras_hw_ops = {
+ .query_ras_error_count = umc_v8_14_query_ras_error_count,
+};
+
+struct amdgpu_umc_ras umc_v8_14_ras = {
+ .ras_block = {
+ .hw_ops = &umc_v8_14_ras_hw_ops,
+ },
+ .err_cnt_init = umc_v8_14_err_cnt_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_14.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.h
new file mode 100644
index 000000000000..20a258f0017a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V8_14_H__
+#define __UMC_V8_14_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+/* number of umc channel instance with memory map register access */
+#define UMC_V8_14_CHANNEL_INSTANCE_NUM 2
+/* number of umc instance with memory map register access */
+#define UMC_V8_14_UMC_INSTANCE_NUM(adev) ((adev)->umc.node_inst_num)
+
+/* Total channel instances for all available umc nodes */
+#define UMC_V8_14_TOTAL_CHANNEL_NUM(adev) \
+ (UMC_V8_14_CHANNEL_INSTANCE_NUM * (adev)->gmc.num_umc)
+
+/* UMC register per channel offset */
+#define UMC_V8_14_PER_CHANNEL_OFFSET 0x400
+
+#define UMC_V8_14_INST_DIST 0x40000
+
+/* EccErrCnt max value */
+#define UMC_V8_14_CE_CNT_MAX 0xffff
+/* umc ce interrupt threshold */
+#define UMC_V8_14_CE_INT_THRESHOLD 0xffff
+/* umc ce count initial value */
+#define UMC_V8_14_CE_CNT_INIT (UMC_V8_14_CE_CNT_MAX - UMC_V8_14_CE_INT_THRESHOLD)
+
+extern struct amdgpu_umc_ras umc_v8_14_ras;
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index bdbca25d80c4..5830e799c0a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -790,13 +790,13 @@ static int uvd_v3_1_soft_reset(struct amdgpu_ip_block *ip_block)
return uvd_v3_1_start(adev);
}
-static int uvd_v3_1_set_clockgating_state(void *handle,
+static int uvd_v3_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int uvd_v3_1_set_powergating_state(void *handle,
+static int uvd_v3_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index a836dc9cfcad..f93079e09215 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -44,7 +44,7 @@ static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v4_2_start(struct amdgpu_device *adev);
static void uvd_v4_2_stop(struct amdgpu_device *adev);
-static int uvd_v4_2_set_clockgating_state(void *handle,
+static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
bool sw_mode);
@@ -708,13 +708,13 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int uvd_v4_2_set_clockgating_state(void *handle,
+static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int uvd_v4_2_set_powergating_state(void *handle,
+static int uvd_v4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -724,7 +724,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index ab55fae3569e..050a0f309390 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -42,7 +42,7 @@ static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v5_0_start(struct amdgpu_device *adev);
static void uvd_v5_0_stop(struct amdgpu_device *adev);
-static int uvd_v5_0_set_clockgating_state(void *handle,
+static int uvd_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
@@ -155,7 +155,7 @@ static int uvd_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
int r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v5_0_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
r = amdgpu_ring_test_helper(ring);
@@ -790,16 +790,11 @@ static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
}
}
-static int uvd_v5_0_set_clockgating_state(void *handle,
+static int uvd_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
- struct amdgpu_ip_block *ip_block;
-
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
- if (!ip_block)
- return -EINVAL;
if (enable) {
/* wait for STATUS to clear */
@@ -817,7 +812,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
return 0;
}
-static int uvd_v5_0_set_powergating_state(void *handle,
+static int uvd_v5_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -827,7 +822,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
if (state == AMD_PG_STATE_GATE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 39f8c3d3a135..d9d036ee51fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -48,7 +48,7 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
-static int uvd_v6_0_set_clockgating_state(void *handle,
+static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
@@ -467,7 +467,7 @@ static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
int i, r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v6_0_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
r = amdgpu_ring_test_helper(ring);
@@ -1450,17 +1450,12 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
}
}
-static int uvd_v6_0_set_clockgating_state(void *handle,
+static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ip_block *ip_block;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
- if (!ip_block)
- return -EINVAL;
-
if (enable) {
/* wait for STATUS to clear */
if (uvd_v6_0_wait_for_idle(ip_block))
@@ -1476,7 +1471,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
return 0;
}
-static int uvd_v6_0_set_powergating_state(void *handle,
+static int uvd_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -1486,7 +1481,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 3c8ab8698af8..9d237b5937fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1511,7 +1511,7 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int uvd_v7_0_set_clockgating_state(void *handle,
+static int uvd_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
/* needed for driver unload*/
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c1ed91b39415..09fd6ef99b3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -284,7 +284,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
return 0;
}
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
if (!ip_block)
return -EINVAL;
@@ -578,13 +578,13 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int vce_v2_0_set_clockgating_state(void *handle,
+static int vce_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
bool sw_cg = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE) {
gate = true;
@@ -596,7 +596,7 @@ static int vce_v2_0_set_clockgating_state(void *handle,
return 0;
}
-static int vce_v2_0_set_powergating_state(void *handle,
+static int vce_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
@@ -606,7 +606,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
return vce_v2_0_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6bb318a06f19..f8bddcd19b68 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -65,7 +65,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
-static int vce_v3_0_set_clockgating_state(void *handle,
+static int vce_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
/**
* vce_v3_0_ring_get_rptr - get read pointer
@@ -497,7 +497,7 @@ static int vce_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
return r;
vce_v3_0_stop(adev);
- return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+ return vce_v3_0_set_clockgating_state(ip_block, AMD_CG_STATE_GATE);
}
static int vce_v3_0_suspend(struct amdgpu_ip_block *ip_block)
@@ -760,10 +760,10 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int vce_v3_0_set_clockgating_state(void *handle,
+static int vce_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
int i;
@@ -801,7 +801,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int vce_v3_0_set_powergating_state(void *handle,
+static int vce_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
@@ -811,7 +811,7 @@ static int vce_v3_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
if (state == AMD_PG_STATE_GATE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 79ee555768a5..335bda64ff5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -684,14 +684,14 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
}
-static int vce_v4_0_set_clockgating_state(void *handle,
+static int vce_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
/* needed for driver unload*/
return 0;
}
-static int vce_v4_0_set_powergating_state(void *handle,
+static int vce_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
@@ -701,7 +701,7 @@ static int vce_v4_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
return vce_v4_0_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 10e99c926fb8..5ea96c983517 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -85,7 +85,8 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
+static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -281,7 +282,7 @@ static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
- vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v1_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
return 0;
@@ -303,7 +304,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
if (idle_work_unexecuted) {
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ amdgpu_dpm_enable_vcn(adev, false, 0);
}
r = vcn_v1_0_hw_fini(ip_block);
@@ -344,7 +345,7 @@ static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
*/
static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -411,7 +412,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1394,15 +1395,15 @@ static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int vcn_v1_0_set_clockgating_state(void *handle,
+static int vcn_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v1_0_is_idle(handle))
+ if (!vcn_v1_0_is_idle(adev))
return -EBUSY;
vcn_v1_0_enable_clock_gating(adev);
} else {
@@ -1799,7 +1800,7 @@ static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun
}
}
-static int vcn_v1_0_set_powergating_state(void *handle,
+static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
@@ -1810,7 +1811,7 @@ static int vcn_v1_0_set_powergating_state(void *handle,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == adev->vcn.cur_state)
return 0;
@@ -1856,7 +1857,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ amdgpu_dpm_enable_vcn(adev, false, 0);
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
@@ -1886,7 +1887,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ amdgpu_dpm_enable_vcn(adev, true, 0);
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index e0322cbca3ec..e42cfc731ad8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,7 +92,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_0_set_powergating_state(void *handle,
+static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -318,7 +318,7 @@ static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
- vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v2_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -372,7 +372,7 @@ static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
*/
static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
if (amdgpu_sriov_vf(adev))
@@ -428,7 +428,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -978,7 +978,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
int i, j, r;
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ amdgpu_dpm_enable_vcn(adev, true, 0);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
@@ -1235,7 +1235,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
power_off:
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ amdgpu_dpm_enable_vcn(adev, false, 0);
return 0;
}
@@ -1335,10 +1335,10 @@ static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int vcn_v2_0_set_clockgating_state(void *handle,
+static int vcn_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -1346,7 +1346,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v2_0_is_idle(handle))
+ if (!vcn_v2_0_is_idle(adev))
return -EBUSY;
vcn_v2_0_enable_clock_gating(adev);
} else {
@@ -1796,7 +1796,7 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
}
-static int vcn_v2_0_set_powergating_state(void *handle,
+static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
@@ -1807,7 +1807,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
@@ -1920,7 +1920,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
init_table += header->vcn_table_offset;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 6aa08281d094..b518202955ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -95,7 +95,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_5_set_powergating_state(void *handle,
+static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -399,7 +399,7 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
- vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
@@ -465,7 +465,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -514,7 +514,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1012,8 +1012,10 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1285,7 +1287,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
@@ -1485,8 +1487,10 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1778,6 +1782,7 @@ static bool vcn_v2_5_is_idle(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
@@ -1801,17 +1806,17 @@ static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int vcn_v2_5_set_clockgating_state(void *handle,
+static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
return 0;
if (enable) {
- if (!vcn_v2_5_is_idle(handle))
+ if (!vcn_v2_5_is_idle(adev))
return -EBUSY;
vcn_v2_5_enable_clock_gating(adev);
} else {
@@ -1821,10 +1826,10 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
return 0;
}
-static int vcn_v2_5_set_powergating_state(void *handle,
+static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 6732ad7f16f5..63ddd4cca910 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -105,7 +105,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v3_0_set_powergating_state(void *handle,
+static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -430,9 +430,9 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
- vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
+ vcn_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
}
@@ -490,7 +490,7 @@ static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
*/
static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -540,7 +540,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1141,8 +1141,10 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1373,7 +1375,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
@@ -1632,8 +1634,10 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev)
vcn_v3_0_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -2132,10 +2136,10 @@ static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
return ret;
}
-static int vcn_v3_0_set_clockgating_state(void *handle,
+static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -2155,10 +2159,10 @@ static int vcn_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int vcn_v3_0_set_powergating_state(void *handle,
+static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
/* for SRIOV, guest should not control VCN Power-gating
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index fcc8511e91ee..00551d6f0370 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -96,7 +96,7 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_set_powergating_state(void *handle,
+static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -366,9 +366,9 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
continue;
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, regUVD_STATUS))) {
+ vcn_v4_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
@@ -431,7 +431,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -491,7 +491,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
{
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -1097,8 +1097,10 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
uint32_t tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1341,7 +1343,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
@@ -1623,8 +1625,10 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
vcn_v4_0_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -2007,14 +2011,15 @@ static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+static int vcn_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -2037,14 +2042,15 @@ static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_sta
/**
* vcn_v4_0_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state)
+static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
/* for SRIOV, guest should not control VCN Power-gating
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 3f69b9b2bcd0..ecdc027f8220 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -87,7 +87,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_3_set_powergating_state(void *handle,
+static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -349,7 +349,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
- vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -407,7 +407,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
vcn_inst = GET_INST(VCN, inst_idx);
@@ -482,7 +482,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -957,6 +957,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
vcn_inst = GET_INST(VCN, i);
+ vcn_v4_0_3_fw_shared_init(adev, vcn_inst);
+
memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
header.version = MMSCH_VERSION;
header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
@@ -969,7 +971,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
@@ -1121,8 +1123,10 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev)
int i, j, k, r, vcn_inst;
uint32_t tmp;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -1395,8 +1399,10 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
vcn_v4_0_3_enable_clock_gating(adev, i);
}
Done:
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1616,15 +1622,15 @@ static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
/* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v4_0_3_set_clockgating_state(void *handle,
+static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -1644,15 +1650,15 @@ static int vcn_v4_0_3_set_clockgating_state(void *handle,
/**
* vcn_v4_0_3_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v4_0_3_set_powergating_state(void *handle,
+static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
/* for SRIOV, guest should not control VCN Power-gating
@@ -1911,9 +1917,94 @@ static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
.reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
};
+static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int vcn_v4_0_3_err_codes[] = {
+ 14, 15, /* VCN */
+};
+
+static bool vcn_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ vcn_v4_0_3_err_codes,
+ ARRAY_SIZE(vcn_v4_0_3_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops vcn_v4_0_3_aca_bank_ops = {
+ .aca_bank_parser = vcn_v4_0_3_aca_bank_parser,
+ .aca_bank_is_valid = vcn_v4_0_3_aca_bank_is_valid,
+};
+
+static const struct aca_info vcn_v4_0_3_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &vcn_v4_0_3_aca_bank_ops,
+};
+
+static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
+ &vcn_v4_0_3_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
static struct amdgpu_vcn_ras vcn_v4_0_3_ras = {
.ras_block = {
.hw_ops = &vcn_v4_0_3_ras_hw_ops,
+ .ras_late_init = vcn_v4_0_3_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 71961fb3f7ff..23d3c16c9d9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -95,7 +95,7 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_5_set_powergating_state(void *handle,
+static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -309,7 +309,7 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v4_0_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
}
@@ -370,7 +370,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -431,7 +431,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -1000,8 +1000,10 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev)
uint32_t tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1277,8 +1279,10 @@ static int vcn_v4_0_5_stop(struct amdgpu_device *adev)
vcn_v4_0_5_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1492,14 +1496,15 @@ static int vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_5_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v4_0_5_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
@@ -1522,14 +1527,15 @@ static int vcn_v4_0_5_set_clockgating_state(void *handle, enum amd_clockgating_s
/**
* vcn_v4_0_5_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_state state)
+static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->vcn.cur_state)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index bd3d2bbdc16b..b6d78381ebfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -32,7 +32,7 @@
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
-#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
#include "vcn_v5_0_0.h"
#include <drm/drm_drv.h>
@@ -78,7 +78,7 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v5_0_0_set_powergating_state(void *handle,
+static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -105,6 +105,21 @@ static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
return amdgpu_vcn_early_init(adev);
}
+void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
+{
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+ uint32_t *ptr;
+
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+}
+
/**
* vcn_v5_0_0_sw_init - sw init for VCN block
*
@@ -117,8 +132,6 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -140,13 +153,13 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
- VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
+ VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
if (r)
return r;
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
- VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
+ VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
if (r)
return r;
@@ -177,14 +190,7 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ vcn_v5_0_0_alloc_ip_dump(adev);
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -283,7 +289,7 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
}
@@ -344,7 +350,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -405,7 +411,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -771,8 +777,10 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev)
uint32_t tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1018,8 +1026,10 @@ static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
vcn_v5_0_0_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1229,14 +1239,15 @@ static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
/**
* vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
@@ -1259,14 +1270,15 @@ static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_s
/**
* vcn_v5_0_0_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v5_0_0_set_powergating_state(void *handle, enum amd_powergating_state state)
+static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->vcn.cur_state)
@@ -1312,10 +1324,10 @@ static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgp
DRM_DEBUG("IH: VCN TRAP\n");
switch (entry->src_id) {
- case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break;
- case VCN_4_0__SRCID_UVD_POISON:
+ case VCN_5_0__SRCID_UVD_POISON:
amdgpu_vcn_process_poison_irq(adev, source, entry);
break;
default:
@@ -1351,7 +1363,8 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
+ struct drm_printer *p)
{
struct amdgpu_device *adev = ip_block->adev;
int i, j;
@@ -1383,7 +1396,7 @@ static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm
}
}
-static void vcn_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
+void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
int i, j;
@@ -1424,8 +1437,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_v5_0_0_set_powergating_state,
- .dump_ip_state = vcn_v5_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_print_ip_state,
+ .dump_ip_state = vcn_v5_0_0_dump_ip_state,
+ .print_ip_state = vcn_v5_0_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
index 51bbccd4360f..b8927652bc50 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -32,6 +32,11 @@
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
+void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev);
+void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
+ struct drm_printer *p);
+void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block);
+
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
new file mode 100644
index 000000000000..8b463c977d08
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -0,0 +1,1118 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "soc15_hw_ip.h"
+#include "vcn_v2_0.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+#include "vcn_v5_0_0.h"
+#include "vcn_v5_0_1.h"
+
+#include <drm/drm_drv.h>
+
+static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
+static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
+
+/**
+ * vcn_v5_0_1_early_init - set function pointers and load microcode
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Set ring and irq function pointers
+ * Load microcode from filesystem
+ */
+static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ /* re-use enc ring as unified ring */
+ adev->vcn.num_enc_rings = 1;
+
+ vcn_v5_0_1_set_unified_ring_funcs(adev);
+ vcn_v5_0_1_set_irq_funcs(adev);
+
+ return amdgpu_vcn_early_init(adev);
+}
+
+/**
+ * vcn_v5_0_1_sw_init - sw init for VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Load firmware and sw initialization
+ */
+static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, r, vcn_inst;
+
+ r = amdgpu_vcn_sw_init(adev);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev);
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ /* VCN UNIFIED TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+
+ vcn_inst = GET_INST(VCN, i);
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
+
+ ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
+ sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
+
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score);
+ if (r)
+ return r;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = true;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ }
+
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ vcn_v5_0_0_alloc_ip_dump(adev);
+
+ return amdgpu_vcn_sysfs_reset_mask_init(adev);
+}
+
+/**
+ * vcn_v5_0_1_sw_fini - sw fini for VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * VCN suspend and free up sw allocation
+ */
+static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, r, idx;
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ fw_shared->sq.is_enabled = 0;
+ }
+
+ drm_dev_exit(idx);
+ }
+
+ r = amdgpu_vcn_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sw_fini(adev);
+
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
+ kfree(adev->vcn.ip_dump);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_1_hw_init - start and test VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, r, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 9 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_hw_fini - stop the hardware block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Stop the VCN block, mark ring as not ready any more
+ */
+static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_suspend - suspend VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * HW fini and suspend VCN block
+ */
+static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = vcn_v5_0_1_hw_fini(ip_block);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_suspend(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_1_resume - resume VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Resume firmware and hw init VCN block
+ */
+static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ r = vcn_v5_0_1_hw_init(ip_block);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_1_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Let the VCN memory controller know it's offsets
+ */
+static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
+{
+ uint32_t offset, size, vcn_inst;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ vcn_inst = GET_INST(VCN, inst);
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr));
+ offset = size;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
+
+ /* cache window 1: stack */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+
+ /* cache window 2: context */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+}
+
+/**
+ * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Let the VCN memory controller know it's offsets with dpg mode
+ */
+static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ uint32_t offset, size;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
+ inst_idx].tmr_mc_addr_lo), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
+ inst_idx].tmr_mc_addr_hi), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ }
+ offset = 0;
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ offset = size;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+ }
+
+ if (!indirect)
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ else
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+ /* cache window 1: stack */
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ }
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+ /* cache window 2: context */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+ /* non-cache window */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+}
+
+/**
+ * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Disable clock gating for VCN block
+ */
+static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+}
+
+/**
+ * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Enable clock gating for VCN block
+ */
+static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+}
+
+/**
+ * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Start VCN block with dpg mode
+ */
+static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_ring *ring;
+ int vcn_inst;
+ uint32_t tmp;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
+
+ if (indirect) {
+ adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
+ (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+ /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
+ WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF,
+ adev->vcn.inst[inst_idx].aid_id, 0, true);
+ }
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* disable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
+
+ vcn_v5_0_1_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* enable LMI MC and UMC channels */
+ tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
+
+ /* enable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+ if (indirect)
+ amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_start - VCN start
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Start VCN block
+ */
+static int vcn_v5_0_1_start(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_ring *ring;
+ uint32_t tmp;
+ int i, j, k, r, vcn_inst;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v5_0_1_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ continue;
+ }
+
+ vcn_inst = GET_INST(VCN, i);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_1_mc_resume(adev, i);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(100);
+ if (amdgpu_emu_mode == 1)
+ msleep(20);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
+ r = 0;
+ break;
+ }
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
+ }
+ }
+
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
+
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ *
+ * Stop VCN block with dpg mode
+ */
+static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t tmp;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+}
+
+/**
+ * vcn_v5_0_1_stop - VCN stop
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop VCN block
+ */
+static int vcn_v5_0_1_stop(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ uint32_t tmp;
+ int i, r = 0, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_1_stop_dpg_mode(adev, i);
+ continue;
+ }
+
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ return r;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* clear status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+ }
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified read pointer
+ */
+static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
+}
+
+/**
+ * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified write pointer
+ */
+static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell)
+ return *ring->wptr_cpu_addr;
+ else
+ return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR);
+}
+
+/**
+ * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell) {
+ *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ }
+}
+
+static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_ENC,
+ .align_mask = 0x3f,
+ .nop = VCN_ENC_CMD_NO_OP,
+ .get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
+ .get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
+ .set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
+ .emit_ib = vcn_v2_0_enc_ring_emit_ib,
+ .emit_fence = vcn_v2_0_enc_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
+ .test_ib = amdgpu_vcn_unified_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = vcn_v2_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+/**
+ * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set unified ring functions
+ */
+static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs;
+ adev->vcn.inst[i].ring_enc[0].me = i;
+ vcn_inst = GET_INST(VCN, i);
+ adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid;
+ }
+}
+
+/**
+ * vcn_v5_0_1_is_idle - check VCN block is idle
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Check whether VCN block is idle
+ */
+static bool vcn_v5_0_1_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 1;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE);
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_1_wait_for_idle - wait for VCN block idle
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Wait for VCN block idle
+ */
+static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE,
+ UVD_STATUS__IDLE);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ * @state: clock gating state
+ *
+ * Set VCN block clockgating state
+ */
+static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool enable = state == AMD_CG_STATE_GATE;
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (enable) {
+ if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
+ return -EBUSY;
+ vcn_v5_0_1_enable_clock_gating(adev, i);
+ } else {
+ vcn_v5_0_1_disable_clock_gating(adev, i);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_set_powergating_state - set VCN block powergating state
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret;
+
+ if (state == adev->vcn.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = vcn_v5_0_1_stop(adev);
+ else
+ ret = vcn_v5_0_1_start(adev);
+
+ if (!ret)
+ adev->vcn.cur_state = state;
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_1_process_interrupt - process VCN block interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @entry: interrupt entry from clients and sources
+ *
+ * Process VCN block interrupt
+ */
+static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t i, inst;
+
+ i = node_id_to_phys_map[entry->node_id];
+
+ DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
+ if (adev->vcn.inst[inst].aid_id == i)
+ break;
+ if (inst >= adev->vcn.num_vcn_inst) {
+ dev_WARN_ONCE(adev->dev, 1,
+ "Interrupt received for unknown VCN instance %d",
+ entry->node_id);
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
+ .process = vcn_v5_0_1_process_interrupt,
+};
+
+/**
+ * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set VCN block interrupt irq functions
+ */
+static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ adev->vcn.inst->irq.num_types++;
+ adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
+}
+
+static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
+ .name = "vcn_v5_0_1",
+ .early_init = vcn_v5_0_1_early_init,
+ .late_init = NULL,
+ .sw_init = vcn_v5_0_1_sw_init,
+ .sw_fini = vcn_v5_0_1_sw_fini,
+ .hw_init = vcn_v5_0_1_hw_init,
+ .hw_fini = vcn_v5_0_1_hw_fini,
+ .suspend = vcn_v5_0_1_suspend,
+ .resume = vcn_v5_0_1_resume,
+ .is_idle = vcn_v5_0_1_is_idle,
+ .wait_for_idle = vcn_v5_0_1_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
+ .set_powergating_state = vcn_v5_0_1_set_powergating_state,
+ .dump_ip_state = vcn_v5_0_0_dump_ip_state,
+ .print_ip_state = vcn_v5_0_0_print_ip_state,
+};
+
+const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCN,
+ .major = 5,
+ .minor = 0,
+ .rev = 1,
+ .funcs = &vcn_v5_0_1_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
new file mode 100644
index 000000000000..82ac709f44bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCN_v5_0_1_H__
+#define __VCN_v5_0_1_H__
+
+extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block;
+
+#endif /* __VCN_v5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 0fedadd0a6a4..98fc6941159e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -364,9 +364,8 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
* this should allow us to catchup.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn_ratelimited(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
@@ -605,10 +604,10 @@ static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int vega10_ih_set_clockgating_state(void *handle,
+static int vega10_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega10_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -616,7 +615,7 @@ static int vega10_ih_set_clockgating_state(void *handle,
}
-static int vega10_ih_set_powergating_state(void *handle,
+static int vega10_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 1c9aff742e43..e9e3b2ed4b7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -366,6 +366,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
/* Enable IH Retry CAM */
if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0) ||
amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 4) ||
amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 5))
WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN,
ENABLE, 1);
@@ -443,9 +444,8 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
* this should allow us to catchup.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn_ratelimited(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
@@ -697,10 +697,10 @@ static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int vega20_ih_set_clockgating_state(void *handle,
+static int vega20_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega20_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -708,7 +708,7 @@ static int vega20_ih_set_clockgating_state(void *handle,
}
-static int vega20_ih_set_powergating_state(void *handle,
+static int vega20_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index a83505815d39..0c9c4d8b7b71 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -167,16 +167,16 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -188,9 +188,9 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
};
@@ -206,16 +206,16 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -227,9 +227,9 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
{
@@ -239,13 +239,6 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
};
static const struct amdgpu_video_codecs cz_video_codecs_decode =
@@ -1945,10 +1938,10 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
return 0;
}
-static int vi_common_set_clockgating_state(void *handle,
+static int vi_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1988,7 +1981,7 @@ static int vi_common_set_clockgating_state(void *handle,
return 0;
}
-static int vi_common_set_powergating_state(void *handle,
+static int vi_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 02f7ba8c93cd..0320163b6e74 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -274,7 +274,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf820258,
+ 0xbf820001, 0xbf820259,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -390,141 +390,98 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
- 0xbef600ff, 0x01000000,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf85004d, 0xbe840080,
- 0xd2890000, 0x00000900,
- 0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
- 0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000902,
+ 0xd2890000, 0x00000901,
0x80048104, 0xd2890001,
- 0x00000902, 0x80048104,
- 0xd2890002, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
0x80048104, 0xd2890003,
- 0x00000902, 0x80048104,
+ 0x00000901, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000903, 0x80048104,
- 0xd2890001, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
0x80048104, 0xd2890002,
- 0x00000903, 0x80048104,
- 0xd2890003, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbf820008,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf840063, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf84005f, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
- 0xd2890000, 0x00000900,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
+ 0x00000903, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
- 0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb2a05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840063,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf84005f,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -544,137 +501,181 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200c7,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x04000000,
- 0xbf84001e, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf840019,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2a05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xbf9c0000, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xbf8c0f70,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200c7, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001e,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf840019, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbefe00c1, 0xbeff00c1,
0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
+ 0xb8ef2a05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
+ 0xbf8c0f70, 0xb8f82a05,
0x80788178, 0x8e788a78,
0xb8ee1605, 0x806e816e,
0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe0070,
- 0xbeff0071, 0x866f7bff,
- 0x000003ff, 0xb96f4803,
- 0x866f7bff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2a05,
- 0x806e816e, 0x8e6e8a6e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x866f7bff, 0x000003ff,
+ 0xb96f4803, 0x866f7bff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
@@ -1302,7 +1303,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
- 0xbf820001, 0xbf8202d4,
+ 0xbf820001, 0xbf8202d5,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -1419,99 +1420,37 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -1530,31 +1469,50 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2a05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -1574,215 +1532,259 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbefc0080,
- 0xbf11017c, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850059,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbefc0080, 0xbf11017c,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850059, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xbe840080,
+ 0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffa9, 0xbf9c0000,
- 0xbf820016, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffeb,
- 0xbf9c0000, 0xbf8200e3,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x04000000,
- 0xbf84001f, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf84001a,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x8078ff78,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xe0510000, 0x781d0000,
- 0xe0510100, 0x781d0000,
- 0x807cff7c, 0x00000200,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7c, 0xbf85fff6,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffa9,
+ 0xbf9c0000, 0xbf820016,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffeb, 0xbf9c0000,
+ 0xbf8200e3, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001f,
0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf84001a, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x806fff6f,
- 0x00008000, 0xbef80080,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbefc0080,
- 0xbf11087c, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbefc0080, 0xe0510000,
+ 0x781d0000, 0xe0510100,
+ 0x781d0000, 0x807cff7c,
+ 0x00000200, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85fff6, 0xbefe00c1,
+ 0xbeff00c1, 0xbef600ff,
+ 0x01000000, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x806fff6f, 0x00008000,
+ 0xbef80080, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2a05, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
- 0xbf820001, 0xbf8202df,
+ 0xbf820001, 0xbf8202e0,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -1899,99 +1901,37 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -2010,31 +1950,50 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2b05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -2054,51 +2013,31 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xb8fb2985,
- 0x807b817b, 0x8e7b837b,
- 0xb8fa2b05, 0x807a817a,
- 0x8e7a827a, 0x80fb7a7b,
- 0x867b7b7b, 0xbf84007a,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
0x807bff7b, 0x00001000,
- 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850059, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xbe840080,
+ 0xbf850051, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -2137,139 +2076,203 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffa9,
- 0xbf9c0000, 0xbf820016,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffeb, 0xbf9c0000,
- 0xbf8200ee, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x866eff7f,
- 0x04000000, 0xbf84001f,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200ee,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
0xbefe00c1, 0xbeff00c1,
- 0xb8ef4306, 0x866fc16f,
- 0xbf84001a, 0x8e6f866f,
- 0x8e6f826f, 0xbef6006f,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2b05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb8ef2985, 0x806f816f,
- 0x8e6f836f, 0xb8f92b05,
- 0x80798179, 0x8e798279,
- 0x80ef796f, 0x866f6f6f,
- 0xbf84001a, 0x806fff6f,
- 0x00008000, 0xbefc0080,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2985, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
@@ -3151,7 +3154,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
};
static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
- 0xbf820001, 0xbf8202db,
+ 0xbf820001, 0xbf8202dc,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -3266,99 +3269,37 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -3377,31 +3318,50 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2b05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -3421,51 +3381,31 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xb8fb2985,
- 0x807b817b, 0x8e7b837b,
- 0xb8fa2b05, 0x807a817a,
- 0x8e7a827a, 0x80fb7a7b,
- 0x867b7b7b, 0xbf84007a,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
0x807bff7b, 0x00001000,
- 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850059, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xbe840080,
+ 0xbf850051, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -3504,143 +3444,207 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffa9,
- 0xbf9c0000, 0xbf820016,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffeb, 0xbf9c0000,
- 0xbf8200ee, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x866eff7f,
- 0x04000000, 0xbf84001f,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200ee,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
0xbefe00c1, 0xbeff00c1,
- 0xb8ef4306, 0x866fc16f,
- 0xbf84001a, 0x8e6f866f,
- 0x8e6f826f, 0xbef6006f,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2b05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb8ef2985, 0x806f816f,
- 0x8e6f836f, 0xb8f92b05,
- 0x80798179, 0x8e798279,
- 0x80ef796f, 0x866f6f6f,
- 0xbf84001a, 0x806fff6f,
- 0x00008000, 0xbefc0080,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2985, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b79,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b79, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx12_hex[] = {
- 0xbfa00001, 0xbfa0024b,
+ 0xbfa00001, 0xbfa002a2,
0xb0804009, 0xb8f8f804,
0x9178ff78, 0x00008c00,
0xb8fbf811, 0x8b6eff78,
@@ -3714,7 +3718,15 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00011677, 0xd7610000,
0x00011a79, 0xd7610000,
0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
+ 0x00011e7f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x00003fff, 0xbeff0080,
0xee0a407a, 0x000c0000,
0x00004000, 0xd760007a,
@@ -3751,38 +3763,46 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00000200, 0xbef600ff,
0x01000000, 0x7e000280,
0x7e020280, 0x7e040280,
- 0xbefd0080, 0xbe804ec2,
- 0xbf94fffe, 0xb8faf804,
- 0x8b7a847a, 0x91788478,
- 0x8c787a78, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xd7610002, 0x0000fa6c,
- 0x807d817d, 0x917aff6d,
- 0x80000000, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa6e,
- 0x807d817d, 0xd7610002,
- 0x0000fa6f, 0x807d817d,
- 0xd7610002, 0x0000fa78,
- 0x807d817d, 0xb8faf811,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa7b, 0x807d817d,
- 0xb8f1f801, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f814, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f815, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f812, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f813, 0xd7610002,
- 0x0000fa71, 0x807d817d,
+ 0xbe804ec2, 0xbf94fffe,
+ 0xb8faf804, 0x8b7a847a,
+ 0x91788478, 0x8c787a78,
+ 0x917aff6d, 0x80000000,
+ 0xd7610002, 0x00010071,
+ 0xd7610002, 0x0001026c,
+ 0xd7610002, 0x0001047a,
+ 0xd7610002, 0x0001066e,
+ 0xd7610002, 0x0001086f,
+ 0xd7610002, 0x00010a78,
+ 0xd7610002, 0x00010e7b,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xb8faf811, 0xd7610002,
+ 0x00010c7a, 0xb8faf801,
+ 0xd7610002, 0x0001107a,
+ 0xb8faf814, 0xd7610002,
+ 0x0001127a, 0xb8faf815,
+ 0xd7610002, 0x0001147a,
+ 0xb8faf812, 0xd7610002,
+ 0x0001167a, 0xb8faf813,
+ 0xd7610002, 0x0001187a,
0xb8faf802, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xbefa50c1, 0xbfc70000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xbefe00ff,
+ 0x00011a7a, 0xbefa50c1,
+ 0xbfc70000, 0xd7610002,
+ 0x00011c7a, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x0000ffff, 0xbeff0080,
0xc4068070, 0x008ce802,
0x00000000, 0xbefe00c1,
@@ -3797,328 +3817,840 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbe824102, 0xbe844104,
0xbe864106, 0xbe884108,
0xbe8a410a, 0xbe8c410c,
- 0xbe8e410e, 0xd7610002,
- 0x0000f200, 0x80798179,
- 0xd7610002, 0x0000f201,
- 0x80798179, 0xd7610002,
- 0x0000f202, 0x80798179,
- 0xd7610002, 0x0000f203,
- 0x80798179, 0xd7610002,
- 0x0000f204, 0x80798179,
- 0xd7610002, 0x0000f205,
- 0x80798179, 0xd7610002,
- 0x0000f206, 0x80798179,
- 0xd7610002, 0x0000f207,
- 0x80798179, 0xd7610002,
- 0x0000f208, 0x80798179,
- 0xd7610002, 0x0000f209,
- 0x80798179, 0xd7610002,
- 0x0000f20a, 0x80798179,
- 0xd7610002, 0x0000f20b,
- 0x80798179, 0xd7610002,
- 0x0000f20c, 0x80798179,
- 0xd7610002, 0x0000f20d,
- 0x80798179, 0xd7610002,
- 0x0000f20e, 0x80798179,
- 0xd7610002, 0x0000f20f,
- 0x80798179, 0xbf06a079,
- 0xbfa10007, 0xc4068070,
+ 0xbe8e410e, 0xbf068079,
+ 0xbfa10032, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd7610002,
+ 0x0001180c, 0xd7610002,
+ 0x00011a0d, 0xd7610002,
+ 0x00011c0e, 0xd7610002,
+ 0x00011e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xbfa00038, 0xd7610002,
+ 0x00012000, 0xd7610002,
+ 0x00012201, 0xd7610002,
+ 0x00012402, 0xd7610002,
+ 0x00012603, 0xd7610002,
+ 0x00012804, 0xd7610002,
+ 0x00012a05, 0xd7610002,
+ 0x00012c06, 0xd7610002,
+ 0x00012e07, 0xd7610002,
+ 0x00013008, 0xd7610002,
+ 0x00013209, 0xd7610002,
+ 0x0001340a, 0xd7610002,
+ 0x0001360b, 0xd7610002,
+ 0x0001380c, 0xd7610002,
+ 0x00013a0d, 0xd7610002,
+ 0x00013c0e, 0xd7610002,
+ 0x00013e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xc4068070, 0x008ce802,
+ 0x00000000, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ff88, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xc4068070,
0x008ce802, 0x00000000,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8fb4306, 0x8b7bc17b,
+ 0xbfa10044, 0x8b7aff6d,
+ 0x80000000, 0xbfa10041,
+ 0x847b897b, 0xbef6007b,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbb,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
- 0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
- 0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
- 0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
- 0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xc4068070, 0x008ce802,
- 0x00000000, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8fb4306,
- 0x8b7bc17b, 0xbfa10044,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10041, 0x847b897b,
- 0xbef6007b, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0x8070ff70,
- 0x00000200, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xd71f0000,
- 0x000100c1, 0xd7200000,
- 0x000200c1, 0x16000084,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa20013, 0xbe8300ff,
- 0x00000080, 0xbf800000,
- 0xbf800000, 0xbf800000,
- 0xd8d80000, 0x01000000,
- 0xbf8a0000, 0xc4068070,
- 0x008ce801, 0x00000000,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff3, 0xbfa00012,
- 0xbe8300ff, 0x00000100,
+ 0xbef600ff, 0x01000000,
+ 0xd71f0000, 0x000100c1,
+ 0xd7200000, 0x000200c1,
+ 0x16000084, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa20013,
+ 0xbe8300ff, 0x00000080,
0xbf800000, 0xbf800000,
0xbf800000, 0xd8d80000,
0x01000000, 0xbf8a0000,
0xc4068070, 0x008ce801,
0x00000000, 0x807d037d,
0x80700370, 0xd5250000,
- 0x0001ff00, 0x00000100,
+ 0x0001ff00, 0x00000080,
0xbf0a7b7d, 0xbfa2fff3,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20004, 0xbef000ff,
- 0x00000200, 0xbeff0080,
- 0xbfa00003, 0xbef000ff,
- 0x00000400, 0xbeff00c1,
- 0xb8fb3b05, 0x807b817b,
- 0x847b827b, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa2001b, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10040,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
- 0xc4068070, 0x008ce800,
- 0x00000000, 0xc4068070,
- 0x008ce801, 0x00008000,
- 0xc4068070, 0x008ce802,
- 0x00010000, 0xc4068070,
- 0x008ce803, 0x00018000,
- 0x807d847d, 0x8070ff70,
- 0x00000200, 0xbf0a7b7d,
- 0xbfa2ffeb, 0xbfa0002a,
+ 0xbfa00012, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8a0000, 0xc4068070,
+ 0x008ce801, 0x00000000,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a7b7d,
+ 0xbfa2fff3, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20004,
+ 0xbef000ff, 0x00000200,
+ 0xbeff0080, 0xbfa00003,
+ 0xbef000ff, 0x00000400,
+ 0xbeff00c1, 0xb8fb3b05,
+ 0x807b817b, 0x847b827b,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa2001b,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10015, 0x7e008700,
+ 0xbfa10040, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xc4068070,
0x008ce800, 0x00000000,
0xc4068070, 0x008ce801,
- 0x00010000, 0xc4068070,
- 0x008ce802, 0x00020000,
+ 0x00008000, 0xc4068070,
+ 0x008ce802, 0x00010000,
0xc4068070, 0x008ce803,
- 0x00030000, 0x807d847d,
- 0x8070ff70, 0x00000400,
+ 0x00018000, 0x807d847d,
+ 0x8070ff70, 0x00000200,
0xbf0a7b7d, 0xbfa2ffeb,
- 0xb8fb1e06, 0x8b7bc17b,
- 0xbfa1000d, 0x847b837b,
- 0x807b7d7b, 0xbefe00c1,
- 0xbeff0080, 0x7e008700,
+ 0xbfa0002a, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10015,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
0xc4068070, 0x008ce800,
- 0x00000000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff7,
- 0xbfa0016e, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xbef1007f,
- 0xb8f20742, 0x84729972,
- 0x8b6eff7f, 0x04000000,
- 0xbfa1003b, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef4306,
- 0x8b6fc16f, 0xbfa10030,
- 0x846f896f, 0xbef6006f,
+ 0x00000000, 0xc4068070,
+ 0x008ce801, 0x00010000,
+ 0xc4068070, 0x008ce802,
+ 0x00020000, 0xc4068070,
+ 0x008ce803, 0x00030000,
+ 0x807d847d, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7d,
+ 0xbfa2ffeb, 0xb8fb1e06,
+ 0x8b7bc17b, 0xbfa1000d,
+ 0x847b837b, 0x807b7d7b,
+ 0xbefe00c1, 0xbeff0080,
+ 0x7e008700, 0xc4068070,
+ 0x008ce800, 0x00000000,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff7, 0xbfa0016e,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xbef1007f, 0xb8f20742,
+ 0x84729972, 0x8b6eff7f,
+ 0x04000000, 0xbfa1003b,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef4306, 0x8b6fc16f,
+ 0xbfa10030, 0x846f896f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000d,
+ 0xc4050078, 0x0080e800,
+ 0x00000000, 0xbf8a0000,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff4,
+ 0xbfa0000c, 0xc4050078,
+ 0x0080e800, 0x00000000,
+ 0xbf8a0000, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff4, 0xbef80080,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa2002c, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000200,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10061, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00008000, 0xc4050078,
+ 0x008ce802, 0x00010000,
+ 0xc4050078, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00008000, 0xc405006e,
+ 0x008ce802, 0x00010000,
+ 0xc405006e, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0xbfa0003d, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10016, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00010000, 0xc4050078,
+ 0x008ce802, 0x00020000,
+ 0xc4050078, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000f,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
+ 0xc4050078, 0x008ce800,
+ 0x00000000, 0xbf8a0000,
+ 0x7e008500, 0x807d817d,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff6,
+ 0xbeff00c1, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00010000, 0xc405006e,
+ 0x008ce802, 0x00020000,
+ 0xc405006e, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000d, 0xc4050078,
- 0x0080e800, 0x00000000,
- 0xbf8a0000, 0xdac00000,
- 0x00000000, 0x807dff7d,
- 0x00000080, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff4, 0xbfa0000c,
- 0xc4050078, 0x0080e800,
- 0x00000000, 0xbf8a0000,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff4,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa2002c,
+ 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10061,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00008000,
- 0xc4050078, 0x008ce802,
- 0x00010000, 0xc4050078,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
+ 0xbefd00ff, 0x0000006c,
+ 0x80f89078, 0xf462403a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd847d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0x80f8a078, 0xf462603a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd887d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0x80f8c078, 0xf462803a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd907d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0xbe884308, 0xbe8a430a,
+ 0xbe8c430c, 0xbe8e430e,
+ 0xbf06807d, 0xbfa1fff0,
+ 0xb980f801, 0x00000000,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00008000,
- 0xc405006e, 0x008ce802,
- 0x00010000, 0xc405006e,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0xbfa0003d,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10016,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00010000,
- 0xc4050078, 0x008ce802,
- 0x00020000, 0xc4050078,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000f, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xc4050078,
- 0x008ce800, 0x00000000,
- 0xbf8a0000, 0x7e008500,
- 0x807d817d, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff6, 0xbeff00c1,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00010000,
- 0xc405006e, 0x008ce802,
- 0x00020000, 0xc405006e,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef600ff,
- 0x01000000, 0xbefd00ff,
- 0x0000006c, 0x80f89078,
- 0xf462403a, 0xf0000000,
- 0xbf8a0000, 0x80fd847d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0x80f8a078,
- 0xf462603a, 0xf0000000,
- 0xbf8a0000, 0x80fd887d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0x80f8c078,
- 0xf462803a, 0xf0000000,
- 0xbf8a0000, 0x80fd907d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0xbe884308,
- 0xbe8a430a, 0xbe8c430c,
- 0xbe8e430e, 0xbf06807d,
- 0xbfa1fff0, 0xb980f801,
- 0x00000000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xbeff0071,
- 0xf4621bfa, 0xf0000000,
- 0x80788478, 0xf4621b3a,
+ 0xbeff0071, 0xf4621bfa,
0xf0000000, 0x80788478,
- 0xf4621b7a, 0xf0000000,
- 0x80788478, 0xf4621c3a,
+ 0xf4621b3a, 0xf0000000,
+ 0x80788478, 0xf4621b7a,
0xf0000000, 0x80788478,
- 0xf4621c7a, 0xf0000000,
- 0x80788478, 0xf4621eba,
+ 0xf4621c3a, 0xf0000000,
+ 0x80788478, 0xf4621c7a,
0xf0000000, 0x80788478,
- 0xf4621efa, 0xf0000000,
- 0x80788478, 0xf4621e7a,
+ 0xf4621eba, 0xf0000000,
+ 0x80788478, 0xf4621efa,
0xf0000000, 0x80788478,
- 0xf4621cfa, 0xf0000000,
- 0x80788478, 0xf4621bba,
+ 0xf4621e7a, 0xf0000000,
+ 0x80788478, 0xf4621cfa,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef814,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef815, 0xf4621bba,
+ 0xb96ef814, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef812,
+ 0xbf8a0000, 0xb96ef815,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef813, 0x8b6eff7f,
- 0x04000000, 0xbfa1000d,
- 0x80788478, 0xf4621bba,
+ 0xb96ef812, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xbf0d806e,
- 0xbfa10006, 0x856e906e,
- 0x8b6e6e6e, 0xbfa10003,
- 0xbe804ec1, 0x816ec16e,
- 0xbfa0fffb, 0xbefd006f,
- 0xbefe0070, 0xbeff0071,
- 0xb97b2011, 0x857b867b,
- 0xb97b0191, 0x857b827b,
- 0xb97bba11, 0xb973f801,
- 0xb8ee3b05, 0x806e816e,
- 0xbf0d9972, 0xbfa20002,
- 0x846e896e, 0xbfa00001,
- 0x846e8a6e, 0xb8ef1e06,
- 0x846f8a6f, 0x806e6f6e,
- 0x806eff6e, 0x00000200,
- 0x806e746e, 0x826f8075,
- 0x8b6fff6f, 0x0000ffff,
- 0xf4605c37, 0xf8000050,
- 0xf4605d37, 0xf8000060,
- 0xf4601e77, 0xf8000074,
- 0xbf8a0000, 0x8b6dff6d,
- 0x0000ffff, 0x8bfe7e7e,
- 0x8bea6a6a, 0xb97af804,
+ 0xbf8a0000, 0xb96ef813,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1000d, 0x80788478,
+ 0xf4621bba, 0xf0000000,
+ 0x80788478, 0xbf8a0000,
+ 0xbf0d806e, 0xbfa10006,
+ 0x856e906e, 0x8b6e6e6e,
+ 0xbfa10003, 0xbe804ec1,
+ 0x816ec16e, 0xbfa0fffb,
+ 0xbefd006f, 0xbefe0070,
+ 0xbeff0071, 0xb97b2011,
+ 0x857b867b, 0xb97b0191,
+ 0x857b827b, 0xb97bba11,
+ 0xb973f801, 0xb8ee3b05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbfa20002, 0x846e896e,
+ 0xbfa00001, 0x846e8a6e,
+ 0xb8ef1e06, 0x846f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x8b6fff6f,
+ 0x0000ffff, 0xf4605c37,
+ 0xf8000050, 0xf4605d37,
+ 0xf8000060, 0xf4601e77,
+ 0xf8000074, 0xbf8a0000,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb97af804, 0xbe804ec2,
+ 0xbf94fffe, 0xbe804a6c,
0xbe804ec2, 0xbf94fffe,
- 0xbe804a6c, 0xbfb10000,
+ 0xbfb10000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
- 0xbf9f0000, 0x00000000,
+};
+
+static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
+ 0xbf820001, 0xbf8202ca,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
+ 0x866eff78, 0x00002000,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001a,
+ 0x866eff7b, 0x00000400,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x03c00900,
+ 0xbf850011, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf850006,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf85003a,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8979ff79, 0xfc000000,
+ 0x87797a79, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
+ 0xb8fbf813, 0x8efa887a,
+ 0xbf0d8f7b, 0xbf840002,
+ 0x877bff7b, 0xffff0000,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8979ff79, 0x00800000,
+ 0x87796e79, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
+ 0xbf850002, 0x806c846c,
+ 0x826d806d, 0x866dff6d,
+ 0x0000ffff, 0x8f7a8b79,
+ 0x867aff7a, 0x001f8000,
+ 0xb97af807, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8378,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9780002, 0xbe801f6c,
+ 0x866dff6d, 0x0000ffff,
+ 0xbefa0080, 0xb97a0283,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8979ff79, 0xfc000000,
+ 0x87797a79, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2985,
+ 0x807a817a, 0x8e7a8a7a,
+ 0x8e7a817a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611b3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611b7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611bba, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611bfa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611a3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf85004d, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb5306, 0x867bc17b,
+ 0xbf840052, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf84004e, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85001d,
+ 0x24040682, 0xd86c0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000100, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffe5,
+ 0xbf820016, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
+ 0xd0c9006a, 0x0000f702,
+ 0xbefe016a, 0xbf87fff6,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200f4,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf840025, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef5306,
+ 0x866fc16f, 0xbf840020,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0xe0510200, 0x781d0000,
+ 0xe0510300, 0x781d0000,
+ 0xe0510400, 0x781d0000,
+ 0x807cff7c, 0x00000500,
+ 0x8078ff78, 0x00000500,
+ 0xbf0a6f7c, 0xbf85fff0,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbef600ff, 0x01000000,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
+ 0x00000078, 0x80788478,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
+ 0x00000078, 0x80788478,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
+ 0x00000078, 0x80788478,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
+ 0x00000078, 0x80788478,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
+ 0x00000078, 0x80788478,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b79, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 44772eec9ef4..96fbb16ceb21 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -34,41 +34,24 @@
* cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
* sp3 gfx11.sp3 -hex gfx11.hex
*
- * gfx12:
- * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx10.asm -P -o gfx12.sp3
- * sp3 gfx12.sp3 -hex gfx12.hex
*/
#define CHIP_NAVI10 26
#define CHIP_SIENNA_CICHLID 30
#define CHIP_PLUM_BONITO 36
-#define CHIP_GFX12 37
#define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
-#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO && ASIC_FAMILY < CHIP_GFX12)
+#define SW_SA_TRAP (ASIC_FAMILY == CHIP_PLUM_BONITO)
#define SAVE_AFTER_XNACK_ERROR (HAVE_XNACK && !NO_SQC_STORE) // workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
-#if ASIC_FAMILY < CHIP_GFX12
#define S_COHERENCE glc:1
#define V_COHERENCE slc:1 glc:1
#define S_WAITCNT_0 s_waitcnt 0
-#else
-#define S_COHERENCE scope:SCOPE_SYS
-#define V_COHERENCE scope:SCOPE_SYS
-#define S_WAITCNT_0 s_wait_idle
-
-#define HW_REG_SHADER_FLAT_SCRATCH_LO HW_REG_WAVE_SCRATCH_BASE_LO
-#define HW_REG_SHADER_FLAT_SCRATCH_HI HW_REG_WAVE_SCRATCH_BASE_HI
-#define HW_REG_GPR_ALLOC HW_REG_WAVE_GPR_ALLOC
-#define HW_REG_LDS_ALLOC HW_REG_WAVE_LDS_ALLOC
-#define HW_REG_MODE HW_REG_WAVE_MODE
-#endif
-#if ASIC_FAMILY < CHIP_GFX12
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
@@ -81,21 +64,6 @@ var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_E
var S_STATUS_HALT_MASK = SQ_WAVE_STATUS_HALT_MASK
var S_SAVE_PC_HI_TRAP_ID_MASK = 0x00FF0000
var S_SAVE_PC_HI_HT_MASK = 0x01000000
-#else
-var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
-var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
-var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00
-var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000
-var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000
-var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15
-var SQ_WAVE_STATUS_WAVE64_SHIFT = 29
-var SQ_WAVE_STATUS_WAVE64_SIZE = 1
-var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9
-var S_STATUS_HWREG = HW_REG_WAVE_STATE_PRIV
-var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
-var S_STATUS_HALT_MASK = SQ_WAVE_STATE_PRIV_HALT_MASK
-var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000
-#endif
var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
@@ -110,7 +78,6 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
#endif
-#if ASIC_FAMILY < CHIP_GFX12
var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
var SQ_WAVE_TRAPSTS_EXCP_MASK = 0x1FF
var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
@@ -161,39 +128,6 @@ var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
var S_TRAPSTS_HWREG = HW_REG_TRAPSTS
var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_TRAPSTS_SAVECTX_MASK
var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_TRAPSTS_SAVECTX_SHIFT
-#else
-var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF
-var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10
-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5
-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20
-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40
-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6
-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80
-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200
-var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800
-var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80
-var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200
-
-var S_TRAPSTS_HWREG = HW_REG_WAVE_EXCP_FLAG_PRIV
-var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
-var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
-var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
-var S_TRAPSTS_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
-var S_TRAPSTS_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
-var S_TRAPSTS_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
-var S_TRAPSTS_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
-var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
-var BARRIER_STATE_SIGNAL_OFFSET = 16
-var BARRIER_STATE_VALID_OFFSET = 0
-#endif
// bits [31:24] unused by SPI debug data
var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
@@ -305,11 +239,7 @@ L_TRAP_NO_BARRIER:
L_HALTED:
// Host trap may occur while wave is halted.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
-#else
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
-#endif
s_cbranch_scc1 L_FETCH_2ND_TRAP
L_CHECK_SAVE:
@@ -336,7 +266,6 @@ L_NOT_HALTED:
// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
// Maskable exceptions only cause the wave to enter the trap handler if
// their respective bit in mode.excp_en is set.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
s_cbranch_scc0 L_CHECK_TRAP_ID
@@ -349,17 +278,6 @@ L_NOT_ADDR_WATCH:
s_lshl_b32 ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
s_and_b32 ttmp2, ttmp2, ttmp3
s_cbranch_scc1 L_FETCH_2ND_TRAP
-#else
- s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- s_and_b32 ttmp3, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
- s_cbranch_scc0 L_NOT_ADDR_WATCH
- s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
-
-L_NOT_ADDR_WATCH:
- s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
- s_and_b32 ttmp2, ttmp3, ttmp2
- s_cbranch_scc1 L_FETCH_2ND_TRAP
-#endif
L_CHECK_TRAP_ID:
// Check trap_id != 0
@@ -369,13 +287,8 @@ L_CHECK_TRAP_ID:
#if SINGLE_STEP_MISSED_WORKAROUND
// Prioritize single step exception over context save.
// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
-#if ASIC_FAMILY < CHIP_GFX12
s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
-#else
- // WAVE_TRAP_CTRL is already in ttmp3.
- s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
-#endif
s_cbranch_scc1 L_FETCH_2ND_TRAP
#endif
@@ -425,12 +338,7 @@ L_NO_NEXT_TRAP:
s_cbranch_scc1 L_TRAP_CASE
// Host trap will not cause trap re-entry.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
-#else
- s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
-#endif
s_cbranch_scc1 L_EXIT_TRAP
s_or_b32 s_save_status, s_save_status, S_STATUS_HALT_MASK
@@ -457,16 +365,7 @@ L_EXIT_TRAP:
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
-#if ASIC_FAMILY < CHIP_GFX12
s_setreg_b32 hwreg(S_STATUS_HWREG), s_save_status
-#else
- // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
- // Only restore fields which the trap handler changes.
- s_lshr_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_SCC_SHIFT
- s_setreg_b32 hwreg(S_STATUS_HWREG, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
- SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_status
-#endif
-
s_rfe_b64 [ttmp0, ttmp1]
L_SAVE:
@@ -478,14 +377,6 @@ L_SAVE:
s_endpgm
L_HAVE_VGPRS:
#endif
-#if ASIC_FAMILY >= CHIP_GFX12
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
- s_cbranch_scc0 L_HAVE_VGPRS
- s_endpgm
-L_HAVE_VGPRS:
-#endif
-
s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
s_mov_b32 s_save_tmp, 0
s_setreg_b32 hwreg(S_TRAPSTS_HWREG, S_TRAPSTS_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit
@@ -671,19 +562,6 @@ L_SAVE_HWREG:
s_mov_b32 m0, 0x0 //Next lane of v2 to write to
#endif
-#if ASIC_FAMILY >= CHIP_GFX12
- // Ensure no further changes to barrier or LDS state.
- // STATE_PRIV.BARRIER_COMPLETE may change up to this point.
- s_barrier_signal -2
- s_barrier_wait -2
-
- // Re-read final state of BARRIER_COMPLETE field for save.
- s_getreg_b32 s_save_tmp, hwreg(S_STATUS_HWREG)
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
- s_or_b32 s_save_status, s_save_status, s_save_tmp
-#endif
-
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
@@ -707,21 +585,6 @@ L_SAVE_HWREG:
s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-#if ASIC_FAMILY >= CHIP_GFX12
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
- write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_get_barrier_state s_save_tmp, -1
- s_wait_kmcnt (0)
- write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
-#endif
-
#if NO_SQC_STORE
// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
s_mov_b32 exec_lo, 0xFFFF
@@ -814,9 +677,7 @@ L_SAVE_LDS_NORMAL:
s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
-#if ASIC_FAMILY < CHIP_GFX12
s_barrier //LDS is used? wait for other waves in the same TG
-#endif
s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
s_cbranch_scc0 L_SAVE_LDS_DONE
@@ -1081,11 +942,6 @@ L_RESTORE:
s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
-#if ASIC_FAMILY >= CHIP_GFX12
- // Save s_restore_spi_init_hi for later use.
- s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
-#endif
-
//determine it is wave32 or wave64
get_wave_size2(s_restore_size)
@@ -1320,9 +1176,7 @@ L_RESTORE_SGPR:
// s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception.
// Clear DEBUG_EN before and restore MODE after the barrier.
s_setreg_imm32_b32 hwreg(HW_REG_MODE), 0
-#if ASIC_FAMILY < CHIP_GFX12
s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
-#endif
/* restore HW registers */
L_RESTORE_HWREG:
@@ -1334,11 +1188,6 @@ L_RESTORE_HWREG:
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
-#if ASIC_FAMILY >= CHIP_GFX12
- // Restore s_restore_spi_init_hi before the saved value gets clobbered.
- s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
-#endif
-
read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
@@ -1358,44 +1207,6 @@ L_RESTORE_HWREG:
s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch
-#if ASIC_FAMILY >= CHIP_GFX12
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
- s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
-
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
- s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
-
- // Only the first wave needs to restore the workgroup barrier.
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
-
- // Skip over WAVE_STATUS, since there is no state to restore from it
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4
-
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
-
- s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
-
- // extract the saved signal count from s_restore_tmp
- s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
-
- // We need to call s_barrier_signal repeatedly to restore the signal
- // count of the work group barrier. The member count is already
- // initialized with the number of waves in the work group.
-L_BARRIER_RESTORE_LOOP:
- s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
- s_barrier_signal -1
- s_add_i32 s_restore_tmp, s_restore_tmp, -1
- s_branch L_BARRIER_RESTORE_LOOP
-
-L_SKIP_BARRIER_RESTORE:
-#endif
-
s_mov_b32 m0, s_restore_m0
s_mov_b32 exec_lo, s_restore_exec_lo
s_mov_b32 exec_hi, s_restore_exec_hi
@@ -1453,13 +1264,6 @@ L_RETURN_WITHOUT_PRIV:
s_setreg_b32 hwreg(S_STATUS_HWREG), s_restore_status // SCC is included, which is changed by previous salu
-#if ASIC_FAMILY >= CHIP_GFX12
- // Make barrier and LDS state visible to all waves in the group.
- // STATE_PRIV.BARRIER_COMPLETE may change after this point.
- s_barrier_signal -2
- s_barrier_wait -2
-#endif
-
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
@@ -1598,11 +1402,7 @@ function get_hwreg_size_bytes
end
function get_wave_size2(s_reg)
-#if ASIC_FAMILY < CHIP_GFX12
s_getreg_b32 s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
-#else
- s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
-#endif
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
new file mode 100644
index 000000000000..5a1a1b1f897f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
@@ -0,0 +1,1136 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* To compile this assembly code:
+ *
+ * gfx12:
+ * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx12.asm -P -o gfx12.sp3
+ * sp3 gfx12.sp3 -hex gfx12.hex
+ */
+
+#define CHIP_GFX12 37
+
+#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
+#define HAVE_VALU_SGPR_HAZARD (ASIC_FAMILY == CHIP_GFX12)
+
+var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
+var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
+var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00
+var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000
+var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000
+var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15
+var SQ_WAVE_STATUS_WAVE64_SHIFT = 29
+var SQ_WAVE_STATUS_WAVE64_SIZE = 1
+var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24
+var SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
+var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000
+
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 8
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
+var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9
+
+var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF
+var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10
+var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5
+var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20
+var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40
+var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6
+var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80
+var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200
+var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800
+var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80
+var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200
+
+var SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK= SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT
+var BARRIER_STATE_SIGNAL_OFFSET = 16
+var BARRIER_STATE_VALID_OFFSET = 0
+
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
+
+// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
+// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
+var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000
+var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC
+var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_SAVE_PC_HI_FIRST_WAVE_MASK = 0x80000000
+var S_SAVE_PC_HI_FIRST_WAVE_SHIFT = 31
+
+var s_sgpr_save_num = 108
+
+var s_save_spi_init_lo = exec_lo
+var s_save_spi_init_hi = exec_hi
+var s_save_pc_lo = ttmp0
+var s_save_pc_hi = ttmp1
+var s_save_exec_lo = ttmp2
+var s_save_exec_hi = ttmp3
+var s_save_state_priv = ttmp12
+var s_save_excp_flag_priv = ttmp15
+var s_save_xnack_mask = s_save_excp_flag_priv
+var s_wave_size = ttmp7
+var s_save_buf_rsrc0 = ttmp8
+var s_save_buf_rsrc1 = ttmp9
+var s_save_buf_rsrc2 = ttmp10
+var s_save_buf_rsrc3 = ttmp11
+var s_save_mem_offset = ttmp4
+var s_save_alloc_size = s_save_excp_flag_priv
+var s_save_tmp = ttmp14
+var s_save_m0 = ttmp5
+var s_save_ttmps_lo = s_save_tmp
+var s_save_ttmps_hi = s_save_excp_flag_priv
+
+var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
+
+var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+var S_WAVE_SIZE = 25
+
+var s_restore_spi_init_lo = exec_lo
+var s_restore_spi_init_hi = exec_hi
+var s_restore_mem_offset = ttmp12
+var s_restore_alloc_size = ttmp3
+var s_restore_tmp = ttmp2
+var s_restore_mem_offset_save = s_restore_tmp
+var s_restore_m0 = s_restore_alloc_size
+var s_restore_mode = ttmp7
+var s_restore_flat_scratch = s_restore_tmp
+var s_restore_pc_lo = ttmp0
+var s_restore_pc_hi = ttmp1
+var s_restore_exec_lo = ttmp4
+var s_restore_exec_hi = ttmp5
+var s_restore_state_priv = ttmp14
+var s_restore_excp_flag_priv = ttmp15
+var s_restore_xnack_mask = ttmp13
+var s_restore_buf_rsrc0 = ttmp8
+var s_restore_buf_rsrc1 = ttmp9
+var s_restore_buf_rsrc2 = ttmp10
+var s_restore_buf_rsrc3 = ttmp11
+var s_restore_size = ttmp6
+var s_restore_ttmps_lo = s_restore_tmp
+var s_restore_ttmps_hi = s_restore_alloc_size
+var s_restore_spi_init_hi_save = s_restore_exec_hi
+
+shader main
+ asic(DEFAULT)
+ type(CS)
+ wave_size(32)
+
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
+
+L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE
+
+L_SKIP_RESTORE:
+ s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC
+
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK
+
+ s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+
+ s_and_b32 ttmp2, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
+ s_cbranch_scc0 L_NOT_HALTED
+
+L_HALTED:
+ // Host trap may occur while wave is halted.
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_SAVE:
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+ s_cbranch_scc1 L_SAVE
+
+ // Wave is halted but neither host trap nor SAVECTX is raised.
+ // Caused by instruction fetch memory violation.
+ // Spin wait until context saved to prevent interrupt storm.
+ s_sleep 0x10
+ s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ s_branch L_CHECK_SAVE
+
+L_NOT_HALTED:
+ // Let second-level handle non-SAVECTX exception or trap.
+ // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+ // Check non-maskable exceptions. memory_violation, illegal_instruction
+ // and xnack_error exceptions always cause the wave to enter the trap
+ // handler.
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+ // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ // Maskable exceptions only cause the wave to enter the trap handler if
+ // their respective bit in mode.excp_en is set.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ s_and_b32 ttmp3, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
+ s_cbranch_scc0 L_NOT_ADDR_WATCH
+ s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
+
+L_NOT_ADDR_WATCH:
+ s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ s_and_b32 ttmp2, ttmp3, ttmp2
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+ // Check trap_id != 0
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+#if SINGLE_STEP_MISSED_WORKAROUND
+ // Prioritize single step exception over context save.
+ // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+ // WAVE_TRAP_CTRL is already in ttmp3.
+ s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+#endif
+
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+ s_cbranch_scc1 L_SAVE
+
+L_FETCH_2ND_TRAP:
+ // Read second-level TBA/TMA from first-level TMA and jump if available.
+ // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
+ // ttmp12 holds SQ_WAVE_STATUS
+ s_sendmsg_rtn_b64 [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
+ s_wait_idle
+ s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+ s_bitcmp1_b32 ttmp15, 0xF
+ s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA
+ s_or_b32 ttmp15, ttmp15, 0xFFFF0000
+L_NO_SIGN_EXTEND_TMA:
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag
+ s_wait_idle
+ s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+ s_or_b32 ttmp11, ttmp11, ttmp2
+
+ s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 scope:SCOPE_SYS // second-level TBA
+ s_wait_idle
+ s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 scope:SCOPE_SYS // second-level TMA
+ s_wait_idle
+
+ s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
+ s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
+ s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
+
+L_NO_NEXT_TRAP:
+ // If not caused by trap then halt wave to prevent re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_TRAP_CASE
+
+ // Host trap will not cause trap re-entry.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+ s_cbranch_scc1 L_EXIT_TRAP
+ s_or_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
+
+ // If the PC points to S_ENDPGM then context save will fail if STATE_PRIV.HALT is set.
+ // Rewind the PC to prevent this from occurring.
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+ s_branch L_EXIT_TRAP
+
+L_TRAP_CASE:
+ // Advance past trap instruction to prevent re-entry.
+ s_add_u32 ttmp0, ttmp0, 0x4
+ s_addc_u32 ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+
+ // Restore SQ_WAVE_STATUS.
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+ // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
+ // Only restore fields which the trap handler changes.
+ s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
+ SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv
+
+ s_rfe_b64 [ttmp0, ttmp1]
+
+L_SAVE:
+ // If VGPRs have been deallocated then terminate the wavefront.
+ // It has no remaining program to run and cannot save without VGPRs.
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+ s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
+ s_cbranch_scc0 L_HAVE_VGPRS
+ s_endpgm
+L_HAVE_VGPRS:
+
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_mov_b32 s_save_tmp, 0
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+ /* inform SPI the readiness and wait for SPI's go signal */
+ s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+ s_sendmsg_rtn_b64 [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
+ s_wait_idle
+
+ // Save first_wave flag so we can clear high bits of save address.
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_lshl_b32 s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+
+ // Trap temporaries must be saved via VGPR but all VGPRs are in use.
+ // There is no ttmp space to hold the resource constant for VGPR save.
+ // Save v0 by itself since it requires only two SGPRs.
+ s_mov_b32 s_save_ttmps_lo, exec_lo
+ s_and_b32 s_save_ttmps_hi, exec_hi, 0xFFFF
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] scope:SCOPE_SYS
+ v_mov_b32 v0, 0x0
+ s_mov_b32 exec_lo, s_save_ttmps_lo
+ s_mov_b32 exec_hi, s_save_ttmps_hi
+
+ // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
+ get_wave_size2(s_save_ttmps_hi)
+ get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
+ get_svgpr_size_bytes(s_save_ttmps_hi)
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
+ s_and_b32 s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
+ s_addc_u32 s_save_ttmps_hi, s_save_ttmps_hi, 0x0
+
+ v_writelane_b32 v0, ttmp4, 0x4
+ v_writelane_b32 v0, ttmp5, 0x5
+ v_writelane_b32 v0, ttmp6, 0x6
+ v_writelane_b32 v0, ttmp7, 0x7
+ v_writelane_b32 v0, ttmp8, 0x8
+ v_writelane_b32 v0, ttmp9, 0x9
+ v_writelane_b32 v0, ttmp10, 0xA
+ v_writelane_b32 v0, ttmp11, 0xB
+ v_writelane_b32 v0, ttmp13, 0xD
+ v_writelane_b32 v0, exec_lo, 0xE
+ v_writelane_b32 v0, exec_hi, 0xF
+ valu_sgpr_hazard()
+
+ s_mov_b32 exec_lo, 0x3FFF
+ s_mov_b32 exec_hi, 0x0
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] offset:0x40 scope:SCOPE_SYS
+ v_readlane_b32 ttmp14, v0, 0xE
+ v_readlane_b32 ttmp15, v0, 0xF
+ s_mov_b32 exec_lo, ttmp14
+ s_mov_b32 exec_hi, ttmp15
+
+ /* setup Resource Contants */
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
+ s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
+
+ s_mov_b32 s_save_m0, m0
+
+ /* global mem offset */
+ s_mov_b32 s_save_mem_offset, 0x0
+ get_wave_size2(s_wave_size)
+
+ /* save first 4 VGPRs, needed for SGPR save */
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_4VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_4VGPR_WAVE32
+L_ENABLE_SAVE_4VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ s_branch L_SAVE_4VGPR_WAVE64
+L_SAVE_4VGPR_WAVE32:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
+ s_branch L_SAVE_HWREG
+
+L_SAVE_4VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
+
+ /* save HW registers */
+
+L_SAVE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
+ v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
+ v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
+
+ // Ensure no further changes to barrier or LDS state.
+ // STATE_PRIV.BARRIER_COMPLETE may change up to this point.
+ s_barrier_signal -2
+ s_barrier_wait -2
+
+ // Re-read final state of BARRIER_COMPLETE field for save.
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATE_PRIV)
+ s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+ s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp
+
+ s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ v_writelane_b32 v2, s_save_m0, 0x0
+ v_writelane_b32 v2, s_save_pc_lo, 0x1
+ v_writelane_b32 v2, s_save_tmp, 0x2
+ v_writelane_b32 v2, s_save_exec_lo, 0x3
+ v_writelane_b32 v2, s_save_exec_hi, 0x4
+ v_writelane_b32 v2, s_save_state_priv, 0x5
+ v_writelane_b32 v2, s_save_xnack_mask, 0x7
+ valu_sgpr_hazard()
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ v_writelane_b32 v2, s_save_tmp, 0x6
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_MODE)
+ v_writelane_b32 v2, s_save_tmp, 0x8
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
+ v_writelane_b32 v2, s_save_tmp, 0x9
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
+ v_writelane_b32 v2, s_save_tmp, 0xA
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ v_writelane_b32 v2, s_save_tmp, 0xB
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ v_writelane_b32 v2, s_save_tmp, 0xC
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+ v_writelane_b32 v2, s_save_tmp, 0xD
+
+ s_get_barrier_state s_save_tmp, -1
+ s_wait_kmcnt (0)
+ v_writelane_b32 v2, s_save_tmp, 0xE
+ valu_sgpr_hazard()
+
+ // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+ s_mov_b32 exec_lo, 0xFFFF
+ s_mov_b32 exec_hi, 0x0
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ // Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+
+ /* save SGPRs */
+ // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
+
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 ttmp13, 0x0 //next VGPR lane to copy SGPR into
+
+ s_mov_b32 m0, 0x0 //SGPR initial index value =0
+ s_nop 0x0 //Manually inserted wait states
+L_SAVE_SGPR_LOOP:
+ // SGPR is allocated in 16 SGPR granularity
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
+ s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
+
+ s_cmp_eq_u32 ttmp13, 0x0
+ s_cbranch_scc0 L_WRITE_V2_SECOND_HALF
+ write_16sgpr_to_v2(s0, 0x0)
+ s_branch L_SAVE_SGPR_SKIP_TCP_STORE
+L_WRITE_V2_SECOND_HALF:
+ write_16sgpr_to_v2(s0, 0x10)
+
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80
+ s_mov_b32 ttmp13, 0x0
+ v_mov_b32 v2, 0x0
+L_SAVE_SGPR_SKIP_TCP_STORE:
+
+ s_add_u32 m0, m0, 16 //next sgpr index
+ s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SGPR_LOOP //first 96 SGPR save is complete?
+
+ //save the rest 12 SGPR
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ write_12sgpr_to_v2(s0)
+
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ /* save LDS */
+
+L_SAVE_LDS:
+ // Change EXEC to all threads...
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_LDS_NORMAL
+L_ENABLE_SAVE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_LDS_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
+
+ s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SAVE_LDS_DONE
+
+ // first wave do LDS save;
+
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
+ s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ //load 0~63*4(byte address) to vgpr v0
+ v_mbcnt_lo_u32_b32 v0, -1, 0
+ v_mbcnt_hi_u32_b32 v0, -1, v0
+ v_mul_u32_u24 v0, 4, v0
+
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_SAVE_LDS_W64
+
+L_SAVE_LDS_W32:
+ s_mov_b32 s3, 128
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W32:
+ ds_read_b32 v1, v0
+ s_wait_idle
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 128 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete?
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_W64:
+ s_mov_b32 s3, 256
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W64:
+ ds_read_b32 v1, v0
+ s_wait_idle
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 256 //mem offset increased by 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete?
+
+L_SAVE_LDS_DONE:
+ /* save VGPRs - set the Rest VGPRs */
+L_SAVE_VGPR:
+ // VGPR SR memory offset: 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI
+ s_mov_b32 s_save_mem_offset, (0+128*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_VGPR_NORMAL
+L_ENABLE_SAVE_VGPR_EXEC_HI:
+ s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_VGPR_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_SAVE_VGPR_WAVE64
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+L_SAVE_VGPR_W32_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128*4 //every buffer_store_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W32_LOOP //VGPR save is complete?
+
+ s_branch L_SAVE_VGPR_END
+
+L_SAVE_VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_SHARED_VGPR
+
+L_SAVE_VGPR_W64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete?
+
+L_SAVE_SHARED_VGPR:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_SAVE_VGPR_END //no shared_vgpr used? jump to L_SAVE_LDS
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //save shared_vgpr will start from the index of m0
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+
+L_SAVE_SHARED_VGPR_WAVE64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete?
+
+L_SAVE_VGPR_END:
+ s_branch L_END_PGM
+
+L_RESTORE:
+ /* Setup Resource Contants */
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
+ s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+
+ // Save s_restore_spi_init_hi for later use.
+ s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
+
+ //determine it is wave32 or wave64
+ get_wave_size2(s_restore_size)
+
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_RESTORE_VGPR
+
+ /* restore LDS */
+L_RESTORE_LDS:
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_LDS_NORMAL
+L_ENABLE_RESTORE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_LDS_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
+ s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
+
+L_RESTORE_LDS_LOOP_W32:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_wait_idle
+ ds_store_addtid_b32 v0
+ s_add_u32 m0, m0, 128 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete?
+ s_branch L_RESTORE_VGPR
+
+L_RESTORE_LDS_LOOP_W64:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_wait_idle
+ ds_store_addtid_b32 v0
+ s_add_u32 m0, m0, 256 // 256 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete?
+
+ /* restore VGPRs */
+L_RESTORE_VGPR:
+ // VGPR SR memory offset : 0
+ s_mov_b32 s_restore_mem_offset, 0x0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_VGPR_NORMAL
+L_ENABLE_RESTORE_VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_VGPR_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE32_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*3
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 //every buffer_load_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete?
+
+ /* VGPR restore on v0 */
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*3
+ s_wait_idle
+
+ s_branch L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE64:
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SHARED_VGPR
+
+L_RESTORE_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*3
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+L_RESTORE_SHARED_VGPR:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used?
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //restore shared_vgpr will start from the index of m0
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+ s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!!
+
+ /* VGPR restore on v0 */
+L_RESTORE_V0:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*3
+ s_wait_idle
+
+ /* restore SGPRs */
+ //will be 2+8+16*6
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+L_RESTORE_SGPR:
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 20*4 //s108~s127 is not saved
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 m0, s_sgpr_save_num
+
+ read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 4 // Restore from S[0] to S[104]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+
+ read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 8 // Restore from S[0] to S[96]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+
+ L_RESTORE_SGPR_LOOP:
+ read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+ s_movreld_b64 s8, s8
+ s_movreld_b64 s10, s10
+ s_movreld_b64 s12, s12
+ s_movreld_b64 s14, s14
+
+ s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0
+ s_cbranch_scc0 L_RESTORE_SGPR_LOOP
+
+ // s_barrier with STATE_PRIV.TRAP_AFTER_INST=1, STATUS.PRIV=1 incorrectly asserts debug exception.
+ // Clear DEBUG_EN before and restore MODE after the barrier.
+ s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE), 0
+
+ /* restore HW registers */
+L_RESTORE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // Restore s_restore_spi_init_hi before the saved value gets clobbered.
+ s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_state_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_excp_flag_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_LO), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_HI), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+ s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
+
+ // Only the first wave needs to restore the workgroup barrier.
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+
+ // Skip over WAVE_STATUS, since there is no state to restore from it
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+
+ // extract the saved signal count from s_restore_tmp
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
+
+ // We need to call s_barrier_signal repeatedly to restore the signal
+ // count of the work group barrier. The member count is already
+ // initialized with the number of waves in the work group.
+L_BARRIER_RESTORE_LOOP:
+ s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+ s_barrier_signal -1
+ s_add_i32 s_restore_tmp, s_restore_tmp, -1
+ s_branch L_BARRIER_RESTORE_LOOP
+
+L_SKIP_BARRIER_RESTORE:
+
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+
+ // EXCP_FLAG_PRIV.SAVE_CONTEXT and HOST_TRAP may have changed.
+ // Only restore the other fields to avoid clobbering them.
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, 0, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE), s_restore_excp_flag_priv
+ s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE), s_restore_excp_flag_priv
+ s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE), s_restore_excp_flag_priv
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_MODE), s_restore_mode
+
+ // Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
+ get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
+ get_svgpr_size_bytes(s_restore_ttmps_hi)
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
+ s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
+ s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
+ s_load_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 scope:SCOPE_SYS
+ s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 scope:SCOPE_SYS
+ s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 scope:SCOPE_SYS
+ s_wait_idle
+
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu
+
+ // Make barrier and LDS state visible to all waves in the group.
+ // STATE_PRIV.BARRIER_COMPLETE may change after this point.
+ s_barrier_signal -2
+ s_barrier_wait -2
+
+ s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+
+L_END_PGM:
+ // Make sure that no wave of the workgroup can exit the trap handler
+ // before the workgroup barrier state is saved.
+ s_barrier_signal -2
+ s_barrier_wait -2
+ s_endpgm_saved
+end
+
+function write_16sgpr_to_v2(s, lane_offset)
+ // Copy into VGPR for later TCP store.
+ for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx + lane_offset
+ end
+ valu_sgpr_hazard()
+ s_add_u32 ttmp13, ttmp13, 0x10
+end
+
+function write_12sgpr_to_v2(s)
+ // Copy into VGPR for later TCP store.
+ for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx
+ end
+ valu_sgpr_hazard()
+end
+
+function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dword s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+end
+
+function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*16
+ s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*8
+ s_buffer_load_dwordx8 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*4
+ s_buffer_load_dwordx4 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function get_vgpr_size_bytes(s_vgpr_size_byte, s_size)
+ s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
+ s_bitcmp1_b32 s_size, S_WAVE_SIZE
+ s_cbranch_scc1 L_ENABLE_SHIFT_W64
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+7) //Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4 (non-zero value)
+ s_branch L_SHIFT_DONE
+L_ENABLE_SHIFT_W64:
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value)
+L_SHIFT_DONE:
+end
+
+function get_svgpr_size_bytes(s_svgpr_size_byte)
+ s_getreg_b32 s_svgpr_size_byte, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_lshl_b32 s_svgpr_size_byte, s_svgpr_size_byte, (3+7)
+end
+
+function get_sgpr_size_bytes
+ return 512
+end
+
+function get_hwreg_size_bytes
+ return 128
+end
+
+function get_wave_size2(s_reg)
+ s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
+ s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
+end
+
+function valu_sgpr_hazard
+#if HAVE_VALU_SGPR_HAZARD
+ for var rep = 0; rep < 8; rep ++
+ ds_nop
+ end
+#endif
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index bb26338204f4..6869e07a2fff 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -37,17 +37,28 @@
* gc_9_4_3:
* cpp -DASIC_FAMILY=GC_9_4_3 cwsr_trap_handler_gfx9.asm -P -o gc_9_4_3.sp3
* sp3 gc_9_4_3.sp3 -hex gc_9_4_3.hex
+ *
+ * gc_9_5_0:
+ * cpp -DASIC_FAMILY=GC_9_5_0 cwsr_trap_handler_gfx9.asm -P -o gc_9_5_0.sp3
+ * sp3 gc_9_5_0.sp3 -hex gc_9_5_0.hex
*/
#define CHIP_VEGAM 18
#define CHIP_ARCTURUS 23
#define CHIP_ALDEBARAN 25
#define CHIP_GC_9_4_3 26
+#define CHIP_GC_9_5_0 27
var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
var SINGLE_STEP_MISSED_WORKAROUND = (ASIC_FAMILY <= CHIP_ALDEBARAN) //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
+#if ASIC_FAMILY < CHIP_GC_9_4_3
+#define VMEM_MODIFIERS slc:1 glc:1
+#else
+#define VMEM_MODIFIERS sc0:1 nt:1
+#endif
+
/**************************************************************************/
/* variables */
/**************************************************************************/
@@ -62,7 +73,13 @@ var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK = 0x400000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+#if ASIC_FAMILY >= CHIP_GC_9_5_0
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 11
+var LDS_RESTORE_GRANULARITY_BYTES = 1280
+#else
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var LDS_RESTORE_GRANULARITY_BYTES = 512
+#endif
var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 3 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits
var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
@@ -430,7 +447,9 @@ L_SAVE:
s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
+ // Clear VSKIP state now that MODE.VSKIP has been saved.
+ // If user shader set it then vector instructions would be skipped.
+ s_setvskip 0,0
/* the first wave in the threadgroup */
s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK // extract fisrt wave bit
@@ -557,12 +576,21 @@ if SAVE_AFTER_XNACK_ERROR
v_lshlrev_b32 v2, 2, v3
L_SAVE_LDS_LOOP_SQC:
+#if ASIC_FAMILY < CHIP_GC_9_5_0
ds_read2_b32 v[0:1], v2 offset0:0 offset1:0x40
s_waitcnt lgkmcnt(0)
-
write_vgprs_to_mem_with_sqc(v0, 2, s_save_buf_rsrc0, s_save_mem_offset)
v_add_u32 v2, 0x200, v2
+#else
+ // gfx950 needs to save in multiple of 256 bytes.
+ ds_read_b32 v0, v2
+ s_waitcnt lgkmcnt(0)
+ write_vgprs_to_mem_with_sqc(v0, 1, s_save_buf_rsrc0, s_save_mem_offset)
+
+ v_add_u32 v2, 0x100, v2
+#endif
+
v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
s_cbranch_vccnz L_SAVE_LDS_LOOP_SQC
@@ -581,11 +609,14 @@ end
L_SAVE_LDS_LOOP_VECTOR:
ds_read_b64 v[0:1], v2 //x =LDS[a], byte address
s_waitcnt lgkmcnt(0)
- buffer_store_dwordx2 v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset offen:1 glc:1 slc:1
+ buffer_store_dwordx2 v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset VMEM_MODIFIERS offen:1
// s_waitcnt vmcnt(0)
// v_add_u32 v2, vcc[0:1], v2, v3
v_add_u32 v2, v2, v3
v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
+#if ASIC_FAMILY >= CHIP_GC_9_5_0
+ s_mov_b64 exec, vcc
+#endif
s_cbranch_vccnz L_SAVE_LDS_LOOP_VECTOR
// restore rsrc3
@@ -748,8 +779,13 @@ L_RESTORE:
L_RESTORE_LDS_LOOP:
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
- s_add_u32 m0, m0, 256*2 // 128 DW
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
+#if ASIC_FAMILY >= CHIP_GC_9_5_0
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:512 // third 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:768 // forth 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:1024 // fifth 64DW
+#endif
+ s_add_u32 m0, m0, LDS_RESTORE_GRANULARITY_BYTES // 128/320 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, LDS_RESTORE_GRANULARITY_BYTES //mem offset increased by 128/320 DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
s_cbranch_scc1 L_RESTORE_LDS_LOOP //LDS restore is complete?
@@ -979,17 +1015,17 @@ L_TCP_STORE_CHECK_DONE:
end
function write_4vgprs_to_mem(s_rsrc, s_mem_offset)
- buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- buffer_store_dword v1, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256
- buffer_store_dword v2, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*2
- buffer_store_dword v3, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*3
+ buffer_store_dword v0, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS
+ buffer_store_dword v1, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256
+ buffer_store_dword v2, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*2
+ buffer_store_dword v3, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*3
end
function read_4vgprs_from_mem(s_rsrc, s_mem_offset)
- buffer_load_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- buffer_load_dword v1, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256
- buffer_load_dword v2, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*2
- buffer_load_dword v3, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*3
+ buffer_load_dword v0, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS
+ buffer_load_dword v1, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256
+ buffer_load_dword v2, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*2
+ buffer_load_dword v3, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*3
s_waitcnt vmcnt(0)
end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index e5324c5bc6c7..693469c18c60 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1639,6 +1639,7 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd,
*pcache_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index 312dfa84f29f..a8abc3091801 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -350,10 +350,27 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
{
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
uint32_t flags = pdd->process->dbg_flags;
+ struct amdgpu_device *adev = pdd->dev->adev;
+ int r;
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
return 0;
+ if (!pdd->proc_ctx_cpu_ptr) {
+ r = amdgpu_amdkfd_alloc_gtt_mem(adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate process context bo\n");
+ return r;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
pdd->watch_points, flags, sq_trap_en);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
index 924d0fd85dfb..27aa1a5b120f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
@@ -79,6 +79,7 @@ static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
return (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0) ||
KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0));
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 9b51dd75fefc..a29374c86405 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -85,6 +85,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(4, 4, 0):/* ALDEBARAN */
case IP_VERSION(4, 4, 2):
case IP_VERSION(4, 4, 5):
+ case IP_VERSION(4, 4, 4):
case IP_VERSION(5, 0, 0):/* NAVI10 */
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
case IP_VERSION(5, 0, 2):/* NAVI14 */
@@ -152,6 +153,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
break;
case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
case IP_VERSION(9, 4, 4): /* GC 9.4.4 */
+ case IP_VERSION(9, 5, 0): /* GC 9.5.0 */
kfd->device_info.event_interrupt_class =
&event_interrupt_class_v9_4_3;
break;
@@ -356,6 +358,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 90402;
f2g = &gc_9_4_3_kfd2kgd;
break;
+ case IP_VERSION(9, 5, 0):
+ gfx_target_version = 90500;
+ f2g = &gc_9_4_3_kfd2kgd;
+ break;
/* Navi10 */
case IP_VERSION(10, 1, 10):
gfx_target_version = 100100;
@@ -515,6 +521,10 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
> KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
+ } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 5, 0)) {
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_5_0_hex) > PAGE_SIZE);
+ kfd->cwsr_isa = cwsr_trap_gfx9_5_0_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_5_0_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex)
> KFD_CWSR_TMA_OFFSET);
@@ -567,6 +577,7 @@ static int kfd_gws_init(struct kfd_node *node)
&& kfd->mec2_fw_version >= 0x28) ||
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(node) == IP_VERSION(9, 4, 4)) ||
+ (KFD_GC_VERSION(node) == IP_VERSION(9, 5, 0)) ||
(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
&& kfd->mec2_fw_version >= 0x6b) ||
@@ -638,6 +649,14 @@ static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
struct kfd_node *knode;
unsigned int i;
+ /*
+ * flush_work ensures that there are no outstanding
+ * work-queue items that will access interrupt_ring. New work items
+ * can't be created because we stopped interrupt handling above.
+ */
+ flush_workqueue(kfd->ih_wq);
+ destroy_workqueue(kfd->ih_wq);
+
for (i = 0; i < num_nodes; i++) {
knode = kfd->nodes[i];
device_queue_manager_uninit(knode->dqm);
@@ -733,14 +752,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
- /* For GFX9.4.3, we need special handling for VMIDs depending on
- * partition mode.
+ /* For multi-partition capable GPUs, we need special handling for VMIDs
+ * depending on partition mode.
* In CPX mode, the VMID range needs to be shared between XCDs.
* Additionally, there are 13 VMIDs (3-15) available for KFD. To
* divide them equally, we change starting VMID to 4 and not use
* VMID 3.
- * If the VMID range changes for GFX9.4.3, then this code MUST be
- * revisited.
+ * If the VMID range changes for multi-partition capable GPUs, then
+ * this code MUST be revisited.
*/
if (kfd->adev->xcp_mgr) {
partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
@@ -805,14 +824,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
/*
- * For GFX9.4.3, the KFD abstracts all partitions within a socket as
- * xGMI connected in the topology so assign a unique hive id per
- * device based on the pci device location if device is in PCIe mode.
+ * For multi-partition capable GPUs, the KFD abstracts all partitions
+ * within a socket as xGMI connected in the topology so assign a unique
+ * hive id per device based on the pci device location if device is in
+ * PCIe mode.
*/
- if (!kfd->hive_id &&
- (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) &&
- kfd->num_nodes > 1)
+ if (!kfd->hive_id && kfd->num_nodes > 1)
kfd->hive_id = pci_dev_id(kfd->adev->pdev);
kfd->noretry = kfd->adev->gmc.noretry;
@@ -850,12 +867,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
}
- if ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) &&
- partition_mode == AMDGPU_CPX_PARTITION_MODE &&
+ if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
kfd->num_nodes != 1) {
- /* For GFX9.4.3 and CPX mode, first XCD gets VMID range
- * 4-9 and second XCD gets VMID range 10-15.
+ /* For multi-partition capable GPUs and CPX mode, first
+ * XCD gets VMID range 4-9 and second XCD gets VMID
+ * range 10-15.
*/
node->vm_info.first_vmid_kfd = (i%2 == 0) ?
@@ -879,8 +895,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
amdgpu_amdkfd_get_local_mem_info(kfd->adev,
&node->local_mem_info, node->xcp);
- if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4))
+ if (kfd->adev->xcp_mgr)
kfd_setup_interrupt_bitmap(node, i);
/* Initialize the KFD node */
@@ -1059,21 +1074,6 @@ static int kfd_resume(struct kfd_node *node)
return err;
}
-static inline void kfd_queue_work(struct workqueue_struct *wq,
- struct work_struct *work)
-{
- int cpu, new_cpu;
-
- cpu = new_cpu = smp_processor_id();
- do {
- new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
- if (cpu_to_node(new_cpu) == numa_node_id())
- break;
- } while (cpu != new_cpu);
-
- queue_work_on(new_cpu, wq, work);
-}
-
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
@@ -1099,7 +1099,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(node,
is_patched ? patched_ihre : ih_ring_entry)) {
- kfd_queue_work(node->ih_wq, &node->interrupt_work);
+ queue_work(node->kfd->ih_wq, &node->interrupt_work);
spin_unlock_irqrestore(&node->interrupt_lock, flags);
return;
}
@@ -1514,6 +1514,73 @@ bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
return kfd_compute_active(node);
}
+/**
+ * kgd2kfd_vmfault_fast_path() - KFD vm page fault interrupt handling fast path for gmc v9
+ * @adev: amdgpu device
+ * @entry: vm fault interrupt vector
+ * @retry_fault: if this is retry fault
+ *
+ * retry fault -
+ * with CAM enabled, adev primary ring
+ * | gmc_v9_0_process_interrupt()
+ * adev soft_ring
+ * | gmc_v9_0_process_interrupt() worker failed to recover page fault
+ * KFD node ih_fifo
+ * | KFD interrupt_wq worker
+ * kfd_signal_vm_fault_event
+ *
+ * without CAM, adev primary ring1
+ * | gmc_v9_0_process_interrupt worker failed to recvoer page fault
+ * KFD node ih_fifo
+ * | KFD interrupt_wq worker
+ * kfd_signal_vm_fault_event
+ *
+ * no-retry fault -
+ * adev primary ring
+ * | gmc_v9_0_process_interrupt()
+ * KFD node ih_fifo
+ * | KFD interrupt_wq worker
+ * kfd_signal_vm_fault_event
+ *
+ * fast path - After kfd_signal_vm_fault_event, gmc_v9_0_process_interrupt drop the page fault
+ * of same process, don't copy interrupt to KFD node ih_fifo.
+ * With gdb debugger enabled, need convert the retry fault to no-retry fault for
+ * debugger, cannot use the fast path.
+ *
+ * Return:
+ * true - use the fast path to handle this fault
+ * false - use normal path to handle it
+ */
+bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault)
+{
+ struct kfd_process *p;
+ u32 cam_index;
+
+ if (entry->ih == &adev->irq.ih_soft || entry->ih == &adev->irq.ih1) {
+ p = kfd_lookup_process_by_pasid(entry->pasid);
+ if (!p)
+ return true;
+
+ if (p->gpu_page_fault && !p->debug_trap_enabled) {
+ if (retry_fault && adev->irq.retry_cam_enabled) {
+ cam_index = entry->src_data[2] & 0x3ff;
+ WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
+ }
+
+ kfd_unref_process(p);
+ return true;
+ }
+
+ /*
+ * This is the first page fault, set flag and then signal user space
+ */
+ p->gpu_page_fault = true;
+ kfd_unref_process(p);
+ }
+ return false;
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 16b5daaa272f..34c2c42c0f95 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1230,11 +1230,13 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
decrement_queue_count(dqm, qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
- retval = remove_queue_mes(dqm, q, qpd);
- if (retval) {
+ int err;
+
+ err = remove_queue_mes(dqm, q, qpd);
+ if (err) {
dev_err(dev, "Failed to evict queue %d\n",
q->properties.queue_id);
- goto out;
+ retval = err;
}
}
}
@@ -2325,9 +2327,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
+ while (halt_if_hws_hang)
+ schedule();
if (reset_queues_on_hws_hang(dqm)) {
- while (halt_if_hws_hang)
- schedule();
dqm->is_hws_hang = true;
kfd_hws_hang(dqm);
retval = -ETIME;
@@ -2388,6 +2390,9 @@ static int wait_on_destroy_queue(struct device_queue_manager *dqm,
q->process);
int ret = 0;
+ if (WARN_ON(!pdd))
+ return ret;
+
if (pdd->qpd.is_debug)
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 210bcc048f4c..67137e674f1d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -64,7 +64,8 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0))
qpd->sh_mem_config |=
(1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index ea3792249209..d075f24e5f9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -748,6 +748,16 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint64_t *slots = page_slots(p->signal_page);
uint32_t id;
+ /*
+ * If id is valid but slot is not signaled, GPU may signal the same event twice
+ * before driver have chance to process the first interrupt, then signal slot is
+ * auto-reset after set_event wakeup the user space, just drop the second event as
+ * the application only need wakeup once.
+ */
+ if ((valid_id_bits > 31 || (1U << valid_id_bits) >= KFD_SIGNAL_EVENT_LIMIT) &&
+ partial_id < KFD_SIGNAL_EVENT_LIMIT && slots[partial_id] == UNSIGNALED_EVENT_SLOT)
+ goto out_unlock;
+
if (valid_id_bits)
pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
partial_id, valid_id_bits);
@@ -776,6 +786,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
}
}
+out_unlock:
rcu_read_unlock();
kfd_unref_process(p);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index d46a13156ee9..0cb5c582ce7d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -184,6 +184,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
} else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
+ amdgpu_ras_set_err_poison(dev->adev, AMDGPU_RAS_BLOCK__GFX);
break;
case SOC15_IH_CLIENTID_VMC:
case SOC15_IH_CLIENTID_VMC1:
@@ -213,6 +214,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
} else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
+ amdgpu_ras_set_err_poison(dev->adev, AMDGPU_RAS_BLOCK__SDMA);
break;
default:
dev_warn(dev->adev->dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 9b6b6e882593..783c2f5a04e4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -46,7 +46,7 @@
#include <linux/kfifo.h>
#include "kfd_priv.h"
-#define KFD_IH_NUM_ENTRIES 8192
+#define KFD_IH_NUM_ENTRIES 16384
static void interrupt_wq(struct work_struct *);
@@ -62,11 +62,14 @@ int kfd_interrupt_init(struct kfd_node *node)
return r;
}
- node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
- if (unlikely(!node->ih_wq)) {
- kfifo_free(&node->ih_fifo);
- dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
- return -ENOMEM;
+ if (!node->kfd->ih_wq) {
+ node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND,
+ node->kfd->num_nodes);
+ if (unlikely(!node->kfd->ih_wq)) {
+ kfifo_free(&node->ih_fifo);
+ dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
+ return -ENOMEM;
+ }
}
spin_lock_init(&node->interrupt_lock);
@@ -96,16 +99,6 @@ void kfd_interrupt_exit(struct kfd_node *node)
spin_lock_irqsave(&node->interrupt_lock, flags);
node->interrupts_active = false;
spin_unlock_irqrestore(&node->interrupt_lock, flags);
-
- /*
- * flush_work ensures that there are no outstanding
- * work-queue items that will access interrupt_ring. New work items
- * can't be created because we stopped interrupt handling above.
- */
- flush_workqueue(node->ih_wq);
-
- destroy_workqueue(node->ih_wq);
-
kfifo_free(&node->ih_fifo);
}
@@ -114,55 +107,48 @@ void kfd_interrupt_exit(struct kfd_node *node)
*/
bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry)
{
- int count;
-
- count = kfifo_in(&node->ih_fifo, ih_ring_entry,
- node->kfd->device_info.ih_ring_entry_size);
- if (count != node->kfd->device_info.ih_ring_entry_size) {
- dev_dbg_ratelimited(node->adev->dev,
- "Interrupt ring overflow, dropping interrupt %d\n",
- count);
+ if (kfifo_is_full(&node->ih_fifo)) {
+ dev_warn_ratelimited(node->adev->dev, "KFD node %d ih_fifo overflow\n",
+ node->node_id);
return false;
}
+ kfifo_in(&node->ih_fifo, ih_ring_entry, node->kfd->device_info.ih_ring_entry_size);
return true;
}
/*
* Assumption: single reader/writer. This function is not re-entrant
*/
-static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry)
+static bool dequeue_ih_ring_entry(struct kfd_node *node, u32 **ih_ring_entry)
{
int count;
- count = kfifo_out(&node->ih_fifo, ih_ring_entry,
- node->kfd->device_info.ih_ring_entry_size);
-
- WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size);
+ if (kfifo_is_empty(&node->ih_fifo))
+ return false;
+ count = kfifo_out_linear_ptr(&node->ih_fifo, ih_ring_entry,
+ node->kfd->device_info.ih_ring_entry_size);
+ WARN_ON(count != node->kfd->device_info.ih_ring_entry_size);
return count == node->kfd->device_info.ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
{
- struct kfd_node *dev = container_of(work, struct kfd_node,
- interrupt_work);
- uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
+ struct kfd_node *dev = container_of(work, struct kfd_node, interrupt_work);
+ uint32_t *ih_ring_entry;
unsigned long start_jiffies = jiffies;
- if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
- dev_err_once(dev->adev->dev, "Ring entry too small\n");
- return;
- }
-
- while (dequeue_ih_ring_entry(dev, ih_ring_entry)) {
+ while (dequeue_ih_ring_entry(dev, &ih_ring_entry)) {
dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
+ kfifo_skip_count(&dev->ih_fifo, dev->kfd->device_info.ih_ring_entry_size);
+
if (time_is_before_jiffies(start_jiffies + HZ)) {
/* If we spent more than a second processing signals,
* reschedule the worker to avoid soft-lockup warnings
*/
- queue_work(dev->ih_wq, &dev->interrupt_work);
+ queue_work(dev->kfd->ih_wq, &dev->interrupt_work);
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 4b275937d05e..d05d199b5e44 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -278,10 +278,11 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
{
- uint64_t npages = migrate->cpages;
+ uint64_t npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
+ uint64_t mpages = 0;
dma_addr_t *src;
uint64_t *dst;
uint64_t i, j;
@@ -295,14 +296,16 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
- for (i = j = 0; i < npages; i++) {
+ for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
struct page *spage;
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
-
+ if (migrate->src[i] & MIGRATE_PFN_MIGRATE) {
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+ mpages++;
+ }
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
@@ -353,9 +356,12 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
out_free_vram_pages:
if (r) {
pr_debug("failed %d to copy memory to vram\n", r);
- while (i--) {
+ for (i = 0; i < npages && mpages; i++) {
+ if (!dst[i])
+ continue;
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
+ mpages--;
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 2eff37aaf827..1695dd78ede8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 68dbc0399c87..3c0ae28c5923 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
index 2b72d5b4949b..565858b9044d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
@@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 84e8ea3a8a0c..3014925d95ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -78,7 +78,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0)) {
m->compute_static_thread_mgmt_se4 = se_mask[4];
m->compute_static_thread_mgmt_se5 = se_mask[5];
m->compute_static_thread_mgmt_se6 = se_mask[6];
@@ -182,6 +183,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -244,7 +248,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
@@ -301,7 +305,8 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_ctx_save_control = 0;
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0))
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
@@ -885,7 +890,8 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) {
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
@@ -909,8 +915,10 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
+ mqd->check_preemption_failed = check_preemption_failed;
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) {
mqd->init_mqd = init_mqd_hiq_v9_4_3;
mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 37930629edc5..4984b41cd372 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -28,6 +28,10 @@
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
+#define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
+#define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
+#define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
+
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
{
@@ -40,7 +44,7 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
- bool *over_subscription)
+ int *over_subscription)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
@@ -58,17 +62,20 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- *over_subscription = false;
+ *over_subscription = 0;
if (node->max_proc_per_quantum > 1)
max_proc_per_quantum = node->max_proc_per_quantum;
- if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_cp_queues_num(pm->dqm) ||
- gws_queue_count > 1) {
- *over_subscription = true;
+ if (process_count > max_proc_per_quantum)
+ *over_subscription |= OVER_SUBSCRIPTION_PROCESS_COUNT;
+ if (compute_queue_count > get_cp_queues_num(pm->dqm))
+ *over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
+ if (gws_queue_count > 1)
+ *over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
+
+ if (*over_subscription)
dev_dbg(dev, "Over subscribed runlist\n");
- }
map_queue_size = pm->pmf->map_queues_size;
/* calculate run list ib allocation size */
@@ -89,7 +96,7 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
- bool *is_over_subscription)
+ int *is_over_subscription)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
@@ -134,7 +141,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct qcm_process_device *qpd;
struct queue *q;
struct kernel_queue *kq;
- bool is_over_subscription;
+ int is_over_subscription;
rl_wptr = retval = processes_mapped = 0;
@@ -213,15 +220,20 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (is_over_subscription) {
if (!pm->is_over_subscription)
- dev_warn(
- dev,
- "Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
+ dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
+ is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
+ " too many processes." : "",
+ is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
+ " too many queues." : "",
+ is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
+ " multiple processes using cooperative launch." : "");
+
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,
alloc_size_bytes / sizeof(uint32_t),
true);
}
- pm->is_over_subscription = is_over_subscription;
+ pm->is_over_subscription = !!is_over_subscription;
for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
pr_debug("0x%2X ", rl_buffer[i]);
@@ -248,7 +260,8 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
default:
if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 5, 0))
pm->pmf = &kfd_aldebaran_pm_funcs;
else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
pm->pmf = &kfd_v9_pm_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 9e5ca0b93b2a..d8cd913aa772 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -32,7 +32,7 @@
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
-#include <linux/kfd_ioctl.h>
+#include <uapi/linux/kfd_ioctl.h>
#include <linux/idr.h>
#include <linux/kfifo.h>
#include <linux/seq_file.h>
@@ -207,7 +207,8 @@ enum cache_policy {
#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) || \
- (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)))
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) || \
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)))
struct kfd_node;
@@ -273,7 +274,6 @@ struct kfd_node {
/* Interrupts */
struct kfifo ih_fifo;
- struct workqueue_struct *ih_wq;
struct work_struct interrupt_work;
spinlock_t interrupt_lock;
@@ -366,6 +366,8 @@ struct kfd_dev {
struct kfd_node *nodes[MAX_KFD_NODES];
unsigned int num_nodes;
+ struct workqueue_struct *ih_wq;
+
/* Kernel doorbells for KFD device */
struct amdgpu_bo *doorbells;
@@ -1002,6 +1004,9 @@ struct kfd_process {
struct semaphore runtime_enable_sema;
bool is_runtime_retry;
struct kfd_runtime_info runtime_info;
+
+ /* if gpu page fault sent to KFD */
+ bool gpu_page_fault;
};
#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
@@ -1150,7 +1155,8 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
uint32_t i;
if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0))
return dev->nodes[0];
for (i = 0; i < dev->num_nodes; i++)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d0ee173acf82..083f83c94531 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1160,7 +1160,8 @@ static void kfd_process_wq_release(struct work_struct *work)
*/
synchronize_rcu();
ef = rcu_access_pointer(p->ef);
- dma_fence_signal(ef);
+ if (ef)
+ dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
@@ -2127,10 +2128,11 @@ int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
irq_drain_fence[3] = pdd->process->pasid;
/*
- * For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
+ * For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
*/
if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 5, 0)) {
node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
irq_drain_fence[3] |= node_id << 16;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 59b92d66e958..bd36a75309e1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -86,9 +86,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
if (pdd->already_dequeued)
return;
-
+ /* The MES context flush needs to filter out the case which the
+ * KFD process is created without setting up the MES context and
+ * queue for creating a compute queue.
+ */
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
- if (dev->kfd->shared_resources.enable_mes &&
+ if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
down_read_trylock(&dev->adev->reset_domain->sem)) {
amdgpu_mes_flush_shader_debugger(dev->adev,
pdd->proc_ctx_gpu_addr);
@@ -131,8 +134,9 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
if (!gws && pdd->qpd.num_gws == 0)
return -EINVAL;
- if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
+ if ((KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0)) &&
!dev->kfd->shared_resources.enable_mes) {
if (gws)
ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
@@ -197,6 +201,7 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
if (pqn->q->gws) {
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 5, 0) &&
!dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(
pqm->process->kgd_process_info, pqn->q->gws);
@@ -295,7 +300,7 @@ static int init_user_queue(struct process_queue_manager *pqm,
return 0;
free_gang_ctx_bo:
- amdgpu_amdkfd_free_gtt_mem(dev->adev, (*q)->gang_ctx_bo);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &(*q)->gang_ctx_bo);
cleanup:
uninit_queue(*q);
*q = NULL;
@@ -320,11 +325,12 @@ int pqm_create_queue(struct process_queue_manager *pqm,
unsigned int max_queues = 127; /* HWS limit */
/*
- * On GFX 9.4.3, increase the number of queues that
- * can be created to 255. No HWS limit on GFX 9.4.3.
+ * On GFX 9.4.3/9.5.0, increase the number of queues that
+ * can be created to 255. No HWS limit on GFX 9.4.3/9.5.0.
*/
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0))
max_queues = 255;
q = NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index ad29634f8b44..4afff7094caf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -233,6 +233,7 @@ void kfd_queue_buffer_put(struct amdgpu_bo **bo)
int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
{
struct kfd_topology_device *topo_dev;
+ u64 expected_queue_size;
struct amdgpu_vm *vm;
u32 total_cwsr_size;
int err;
@@ -241,6 +242,15 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
if (!topo_dev)
return -EINVAL;
+ /* AQL queues on GFX7 and GFX8 appear twice their actual size */
+ if (properties->type == KFD_QUEUE_TYPE_COMPUTE &&
+ properties->format == KFD_QUEUE_FORMAT_AQL &&
+ topo_dev->node_props.gfx_target_version >= 70000 &&
+ topo_dev->node_props.gfx_target_version < 90000)
+ expected_queue_size = properties->queue_size / 2;
+ else
+ expected_queue_size = properties->queue_size;
+
vm = drm_priv_to_vm(pdd->drm_priv);
err = amdgpu_bo_reserve(vm->root.bo, false);
if (err)
@@ -255,7 +265,7 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
- &properties->ring_bo, properties->queue_size);
+ &properties->ring_bo, expected_queue_size);
if (err)
goto out_err_unreserve;
@@ -266,8 +276,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
/* EOP buffer is not required for all ASICs */
if (properties->eop_ring_buffer_address) {
if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
- pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
- properties->eop_buf_bo->tbo.base.size,
+ pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n",
+ properties->eop_ring_buffer_size,
topo_dev->node_props.eop_buffer_size);
err = -EINVAL;
goto out_err_unreserve;
@@ -394,7 +404,8 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
- gfxv == 90008) /* GFX_VERSION_ARCTURUS */
+ gfxv == 90008 || /* GFX_VERSION_ARCTURUS */
+ gfxv == 90500)
vgpr_size = 0x80000;
else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */
gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */
@@ -405,9 +416,10 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
return vgpr_size;
}
-#define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv) \
+#define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props) \
(kfd_get_vgpr_size_per_cu(gfxv) + SGPR_SIZE_PER_CU +\
- LDS_SIZE_PER_CU + HWREG_SIZE_PER_CU)
+ (((gfxv) == 90500) ? (props->lds_size_in_kb << 10) : LDS_SIZE_PER_CU) +\
+ HWREG_SIZE_PER_CU)
#define CNTL_STACK_BYTES_PER_WAVE(gfxv) \
((gfxv) >= 100100 ? 12 : 8) /* GFX_VERSION_NAVI10*/
@@ -431,7 +443,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
min(cu_num * 40, props->array_count / props->simd_arrays_per_engine * 512)
: cu_num * 32;
- wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv), PAGE_SIZE);
+ wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props), PAGE_SIZE);
ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8;
ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size,
PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 3e2911895c74..9477a4adcd36 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1195,6 +1195,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
struct kfd_node *bo_node;
uint32_t flags = prange->flags;
uint32_t mapping_flags = 0;
+ uint32_t gc_ip_version = KFD_GC_VERSION(node);
uint64_t pte_flags;
bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
@@ -1204,7 +1205,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_node = prange->svm_bo->node;
- switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) {
+ switch (gc_ip_version) {
case IP_VERSION(9, 4, 1):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_node == node) {
@@ -1241,8 +1242,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
if (ext_coherent)
- mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC;
+ mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
else
mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -1257,9 +1260,13 @@ svm_range_get_pte_flags(struct kfd_node *node,
*/
else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
mapping_flags |= AMDGPU_VM_MTYPE_NC;
- /* PCIe P2P or extended system scope coherence */
- else
+ /* PCIe P2P on GPUs pre-9.5.0 */
+ else if (gc_ip_version < IP_VERSION(9, 5, 0) &&
+ !svm_nodes_in_same_hive(bo_node, node))
mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ /* Other remote memory */
+ else
+ mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the APU */
} else if (node->adev->flags & AMD_IS_APU) {
/* On NUMA systems, locality is determined per-page
@@ -1271,18 +1278,15 @@ svm_range_get_pte_flags(struct kfd_node *node,
mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ if (gc_ip_version < IP_VERSION(9, 5, 0))
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
}
break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
- if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_node != node)
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- } else {
- mapping_flags |= coherent ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- }
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
break;
default:
mapping_flags |= coherent ?
@@ -1299,7 +1303,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
pte_flags = AMDGPU_PTE_VALID;
pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
- if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
+ if (gc_ip_version >= IP_VERSION(12, 0, 0))
pte_flags |= AMDGPU_PTE_IS_PTE;
pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 9476e30d6baa..ceb9fb475ef1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1714,7 +1714,8 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
pcache->cacheline_size = pcache_info[cache_type].cache_line_size;
if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(knode) == IP_VERSION(9, 5, 0))
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
else
mode = UNKNOWN_MEMORY_PARTITION_MODE;
@@ -1776,7 +1777,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
struct amdgpu_cu_info *cu_info = &kdev->adev->gfx.cu_info;
struct amdgpu_gfx_config *gfx_info = &kdev->adev->gfx.config;
int gpu_processor_id;
- struct kfd_cache_properties *props_ext;
+ struct kfd_cache_properties *props_ext = NULL;
int num_of_entries = 0;
int num_of_cache_types = 0;
struct kfd_gpu_cache_info cache_info[KFD_MAX_CACHE_TYPES];
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 11e3f2f3b174..abd3b6564373 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,6 +8,8 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
depends on BROKEN || !CC_IS_CLANG || ARM64 || LOONGARCH || RISCV || SPARC64 || X86_64
+ select CEC_CORE
+ select CEC_NOTIFIER
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || LOONGARCH || RISCV))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 48be917e7bc5..39df45f652b3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -93,10 +93,12 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_utils.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <media/cec-notifier.h>
#include <acpi/video.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -243,6 +245,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void handle_hpd_rx_irq(void *param);
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -955,13 +961,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
}
}
-static int dm_set_clockgating_state(void *handle,
+static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dm_set_powergating_state(void *handle,
+static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1036,8 +1042,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
continue;
*enabled = true;
+ mutex_lock(&connector->eld_mutex);
ret = drm_eld_size(connector->eld);
memcpy(buf, connector->eld, min(max_bytes, ret));
+ mutex_unlock(&connector->eld_mutex);
break;
}
@@ -1614,75 +1622,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
},
},
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
{
- const struct dmi_system_id *dmi_id;
+ int dmi_id;
+ struct drm_device *dev = dm->ddev;
dm->aux_hpd_discon_quirk = false;
+ init_data->flags.support_edp0_on_dp1 = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
+
+ if (!dmi_id)
+ return;
- dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
- if (dmi_id) {
+ if (quirk_entries.aux_hpd_discon) {
dm->aux_hpd_discon_quirk = true;
- DRM_INFO("aux_hpd_discon_quirk attached\n");
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ init_data->flags.support_edp0_on_dp1 = true;
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
}
}
@@ -1990,7 +2053,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
- retrieve_dmi_info(&adev->dm);
+ retrieve_dmi_info(&adev->dm, &init_data);
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -2030,6 +2093,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP)
+ adev->dm.dc->debug.force_disable_subvp = true;
+
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
adev->dm.dc->debug.using_dml21 = true;
@@ -2152,9 +2218,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
- if (!adev->dm.secure_display_ctxs)
+ amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctx.crtc_ctx)
DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
+ adev->dm.secure_display_ctx.support_mul_roi = true;
+
#endif
DRM_DEBUG_DRIVER("KMS initialized.\n");
@@ -2197,15 +2267,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- if (adev->dm.secure_display_ctxs) {
+ if (adev->dm.secure_display_ctx.crtc_ctx) {
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->dm.secure_display_ctxs[i].crtc) {
- flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
- flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work);
}
}
- kfree(adev->dm.secure_display_ctxs);
- adev->dm.secure_display_ctxs = NULL;
+ kfree(adev->dm.secure_display_ctx.crtc_ctx);
+ adev->dm.secure_display_ctx.crtc_ctx = NULL;
}
#endif
if (adev->dm.hdcp_workqueue) {
@@ -2338,7 +2408,8 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, "%s", fw_name_dmcu);
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name_dmcu);
if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
@@ -2746,6 +2817,48 @@ out_fail:
mutex_unlock(&mgr->lock);
}
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_phys_addr_invalidate(n);
+}
+
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_set_phys_addr(n,
+ connector->display_info.source_physical_address);
+}
+
+static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(ddev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (suspend)
+ hdmi_cec_unset_edid(aconnector);
+ else
+ hdmi_cec_set_edid(aconnector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
@@ -3017,6 +3130,8 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
if (IS_ERR(adev->dm.cached_state))
return PTR_ERR(adev->dm.cached_state);
+ s3_handle_hdmi_cec(adev_to_drm(adev), true);
+
s3_handle_mst(adev_to_drm(adev), true);
amdgpu_dm_irq_suspend(adev);
@@ -3260,8 +3375,19 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
mutex_unlock(&dm->dc_lock);
+ /* set the backlight after a reset */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i])
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
return 0;
}
+
+ /* leave display off for S4 sequence */
+ if (adev->in_s4)
+ return 0;
+
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
dm_state->context = dc_state_create(dm->dc, NULL);
@@ -3289,6 +3415,8 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
*/
amdgpu_dm_irq_resume_early(adev);
+ s3_handle_hdmi_cec(ddev, false);
+
/* On resume we need to rewrite the MSTM control bits to enable MST*/
s3_handle_mst(ddev, false);
@@ -3457,6 +3585,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct drm_luminance_range_info *luminance_range;
+ int min_input_signal_override;
if (aconnector->bl_idx == -1 ||
aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
@@ -3493,6 +3622,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->aux_min_input_signal = 0;
caps->aux_max_input_signal = 512;
}
+
+ min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
+ if (min_input_signal_override >= 0)
+ caps->min_input_signal = min_input_signal_override;
}
void amdgpu_dm_update_connector_after_detect(
@@ -3598,6 +3731,7 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->drm_edid = NULL;
+ hdmi_cec_unset_edid(aconnector);
if (aconnector->dc_link->aux_mode) {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
}
@@ -3607,6 +3741,7 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length);
drm_edid_connector_update(connector, aconnector->drm_edid);
+ hdmi_cec_set_edid(aconnector);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_attach(&aconnector->dm_dp_aux.aux,
connector->display_info.source_physical_address);
@@ -3623,6 +3758,7 @@ void amdgpu_dm_update_connector_after_detect(
amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid);
update_connector_ext_caps(aconnector);
} else {
+ hdmi_cec_unset_edid(aconnector);
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
aconnector->num_modes = 0;
@@ -4785,6 +4921,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->backlight_dev[aconnector->bl_idx] =
backlight_device_register(bl_name, aconnector->base.kdev, dm,
&amdgpu_dm_backlight_ops, &props);
+ dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
DRM_ERROR("DM: Backlight registration failed!\n");
@@ -4852,7 +4989,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
aconnector->bl_idx = bl_idx;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
dm->backlight_link[bl_idx] = link;
dm->num_of_edps++;
@@ -5306,7 +5442,8 @@ static int dm_init_microcode(struct amdgpu_device *adev)
/* ASIC doesn't support DMUB. */
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, "%s", fw_name_dmub);
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name_dmub);
return r;
}
@@ -5522,8 +5659,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const u64 tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc)
+ bool tmz_surface)
{
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
@@ -5622,7 +5758,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
&plane_info->tiling_info,
&plane_info->plane_size,
&plane_info->dcc, address,
- tmz_surface, force_disable_dcc);
+ tmz_surface);
if (ret)
return ret;
@@ -5643,7 +5779,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_scaling_info scaling_info;
struct dc_plane_info plane_info;
int ret;
- bool force_disable_dcc = false;
ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
if (ret)
@@ -5654,13 +5789,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->clip_rect = scaling_info.clip_rect;
dc_plane_state->scaling_quality = scaling_info.scaling_quality;
- force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state,
afb->tiling_flags,
&plane_info,
&dc_plane_state->address,
- afb->tmz_surface,
- force_disable_dcc);
+ afb->tmz_surface);
if (ret)
return ret;
@@ -7042,6 +7175,7 @@ static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector))
sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+ cec_notifier_conn_unregister(amdgpu_dm_connector->notifier);
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
}
@@ -7176,8 +7310,14 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
+ struct i2c_adapter *ddc;
- drm_edid = drm_edid_read(connector);
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
@@ -7222,14 +7362,21 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
const struct drm_edid *drm_edid;
const struct edid *edid;
+ struct i2c_adapter *ddc;
- drm_edid = drm_edid_read(connector);
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
@@ -8278,6 +8425,27 @@ create_i2c(struct ddc_service *ddc_service,
return i2c;
}
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_connector_info conn_info;
+ struct drm_device *ddev = aconnector->base.dev;
+ struct device *hdmi_dev = ddev->dev;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) {
+ drm_info(ddev, "HDMI-CEC feature masked\n");
+ return -EINVAL;
+ }
+
+ cec_fill_conn_info_from_drm(&conn_info, &aconnector->base);
+ aconnector->notifier =
+ cec_notifier_conn_register(hdmi_dev, NULL, &conn_info);
+ if (!aconnector->notifier) {
+ drm_err(ddev, "Failed to create cec notifier\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
/*
* Note: this function assumes that dc_link_detect() was called for the
@@ -8341,6 +8509,10 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ amdgpu_dm_initialize_hdmi_connector(aconnector);
+
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
@@ -8400,16 +8572,6 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
{
- /*
- * We have no guarantee that the frontend index maps to the same
- * backend index - some even map to more than one.
- *
- * TODO: Use a different interrupt or check DC itself for the mapping.
- */
- int irq_type =
- amdgpu_display_crtc_idx_to_irq_type(
- adev,
- acrtc->crtc_id);
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8435,28 +8597,7 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
-
- amdgpu_irq_get(
- adev,
- &adev->pageflip_irq,
- irq_type);
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_get(
- adev,
- &adev->vline0_irq,
- irq_type);
-#endif
} else {
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_put(
- adev,
- &adev->vline0_irq,
- irq_type);
-#endif
- amdgpu_irq_put(
- adev,
- &adev->pageflip_irq,
- irq_type);
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -8927,6 +9068,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (pr->config.replay_supported && !pr->replay_feature_enabled)
@@ -8953,14 +9095,15 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
- if (acrtc_attach->dm_irq_params.allow_sr_entry &&
+ if (!vrr_active &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
if (pr->replay_feature_enabled && !pr->replay_allow_active)
amdgpu_dm_replay_enable(acrtc_state->stream, true);
- if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
+ if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -9097,7 +9240,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
afb->tiling_flags,
&bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address,
- afb->tmz_surface, false);
+ afb->tmz_surface);
drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
@@ -9131,7 +9274,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
timestamp_ns;
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
mutex_unlock(&dm->dc_lock);
}
}
@@ -9297,11 +9440,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
amdgpu_dm_replay_disable(acrtc_state->stream);
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
}
mutex_unlock(&dm->dc_lock);
@@ -10060,14 +10203,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
+ uint8_t cnt;
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- acrtc->dm_irq_params.window_param.update_win = true;
-
- /**
- * It takes 2 frames for HW to stably generate CRC when
- * resuming from suspend, so we set skip_frame_cnt 2.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
+ for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
+ if (acrtc->dm_irq_params.window_param[cnt].enable) {
+ acrtc->dm_irq_params.window_param[cnt].update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
+ acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2;
+ }
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
@@ -11155,8 +11303,8 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
int plane_src_w, plane_src_h;
dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
- *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
- *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+ *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
+ *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
}
/*
@@ -11410,6 +11558,30 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
return 0;
}
+static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state, *old_plane_state;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ old_plane_state = drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
+ DRM_ERROR("Failed to get plane state for plane %s\n", plane->name);
+ return false;
+ }
+
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
+ return true;
+ }
+
+ return false;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
@@ -11607,10 +11779,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
- if (old_plane_state->fb && new_plane_state->fb &&
- get_mem_type(old_plane_state->fb) !=
- get_mem_type(new_plane_state->fb))
- lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
@@ -11905,9 +12073,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/*
* Only allow async flips for fast updates that don't change
- * the FB pitch, the DCC state, rotation, etc.
+ * the FB pitch, the DCC state, rotation, mem_type, etc.
*/
- if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ if (new_crtc_state->async_flip &&
+ (lock_and_validation_needed ||
+ amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
@@ -12239,10 +12409,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
- amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
- amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
- if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
- freesync_capable = true;
+ if (amdgpu_dm_connector->dc_link &&
+ amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+ }
+
parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (vsdb_info.replay_mode) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6464a8378387..d2703ca7dff3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -541,12 +541,12 @@ struct amdgpu_display_manager {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
- * @secure_display_ctxs:
+ * @secure_display_ctx:
*
- * Store the ROI information and the work_struct to command dmub and psp for
- * all crtcs.
+ * Store secure display relevant info. e.g. the ROI information
+ * , the work_struct to command dmub, etc.
*/
- struct secure_display_context *secure_display_ctxs;
+ struct secure_display_context secure_display_ctx;
#endif
/**
* @hpd_rx_offload_wq:
@@ -671,6 +671,8 @@ struct amdgpu_dm_connector {
uint32_t connector_id;
int bl_idx;
+ struct cec_notifier *notifier;
+
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
const struct drm_edid *drm_edid;
@@ -697,6 +699,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ uint32_t mst_local_bw;
+ uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
@@ -1010,4 +1014,8 @@ void dm_free_gpu_mem(struct amdgpu_device *adev,
bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index f936a35fa9eb..033bd817d871 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -30,6 +30,7 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "amdgpu_securedisplay.h"
+#include "amdgpu_dm_psr.h"
static const char *const pipe_crc_sources[] = {
"none",
@@ -83,45 +84,274 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+static void update_phy_id_mapping(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
+ struct drm_connector_list_iter iter;
+ uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
+
+ dm->secure_display_ctx.phy_mapping_updated = false;
+
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->status != connector_status_connected)
+ continue;
+
+ if (idx >= AMDGPU_DM_MAX_CRTC) {
+ DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
+ mutex_unlock(&ddev->mode_config.mutex);
+ return;
+ }
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ sort_connector[idx] = aconnector;
+ idx++;
+ connector_cnt++;
+ }
+ drm_connector_list_iter_end(&iter);
+
+ /* sort connectors by link_enc_hw_instance first */
+ for (idx = connector_cnt; idx > 1 ; idx--) {
+ for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
+ if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
+ sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
+ swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
+ }
+ }
+
+ /*
+ * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
+ * sorted together above.
+ */
+ for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
+ if (sort_connector[idx]->mst_root) {
+ uint8_t i, j, k;
+ uint8_t mst_con_cnt = 1;
+
+ for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
+ if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
+ mst_con_cnt++;
+ else
+ break;
+ }
+
+ for (i = mst_con_cnt; i > 1; i--) {
+ for (j = idx; j < (idx + i - 2); j++) {
+ int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
+ int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
+ u8 *rad;
+ u8 *next_rad;
+ bool swap = false;
+
+ /* Sort by mst tree depth first. Then compare RAD if depth is the same*/
+ if (mstb_lct > next_mstb_lct) {
+ swap = true;
+ } else if (mstb_lct == next_mstb_lct) {
+ if (mstb_lct == 1) {
+ if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
+ swap = true;
+ } else if (mstb_lct > 1) {
+ rad = sort_connector[j]->mst_output_port->parent->rad;
+ next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
+
+ for (k = 0; k < mstb_lct - 1; k++) {
+ int shift = (k % 2) ? 0 : 4;
+ int port_num = (rad[k / 2] >> shift) & 0xf;
+ int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
+
+ if (port_num > next_port_num) {
+ swap = true;
+ break;
+ }
+ }
+ } else {
+ DRM_ERROR("MST LCT shouldn't be set as < 1");
+ mutex_unlock(&ddev->mode_config.mutex);
+ return;
+ }
+ }
+
+ if (swap)
+ swap(sort_connector[j], sort_connector[j + 1]);
+ }
+ }
+
+ idx += mst_con_cnt;
+ } else {
+ idx++;
+ }
+ }
+
+ /* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
+ memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
+ for (idx = 0; idx < connector_cnt; idx++) {
+ aconnector = sort_connector[idx];
+
+ dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
+ dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
+ dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
+
+ if (sort_connector[idx]->mst_root) {
+ dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
+ dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
+ dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
+ memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
+ aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
+ }
+ }
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
+ dm->secure_display_ctx.phy_mapping_updated = true;
+}
+
+static bool get_phy_id(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
+{
+ int idx, idx_2;
+ bool found = false;
+
+ /*
+ * Assume secure display start after all connectors are probed. The connection
+ * config is static as well
+ */
+ if (!dm->secure_display_ctx.phy_mapping_updated) {
+ DRM_WARN("%s Should update the phy id table before get it's value", __func__);
+ return false;
+ }
+
+ for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
+ if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
+ DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
+ return false;
+ }
+
+ if (aconnector->dc_link->link_enc_hw_inst ==
+ dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
+ if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
+ found = true;
+ goto out;
+ } else {
+ /* Could caused by wrongly pass mst root connector */
+ if (!aconnector->mst_output_port) {
+ DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
+ return false;
+ }
+
+ if (aconnector->mst_root &&
+ aconnector->mst_root->mst_mgr.mst_primary == NULL) {
+ DRM_WARN("%s pass in a stale mst connector", __func__);
+ }
+
+ if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
+ aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
+ if (aconnector->mst_output_port->parent->lct == 1) {
+ found = true;
+ goto out;
+ } else if (aconnector->mst_output_port->parent->lct > 1) {
+ /* Check RAD */
+ for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
+ int shift = (idx_2 % 2) ? 0 : 4;
+ int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
+ int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
+
+ if (port_num != port_num2)
+ break;
+ }
+
+ if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
+ found = true;
+ goto out;
+ }
+ } else {
+ DRM_ERROR("lCT should be >= 1");
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+out:
+ if (found) {
+ DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
+ *phy_id = idx;
+ } else {
+ DRM_WARN("Can't find associated phy ID");
+ return false;
+ }
+
+ return true;
+}
+
static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
{
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_dm_connector *aconnector;
bool was_activated;
+ uint8_t phy_id;
+ unsigned long flags;
+ int i;
- spin_lock_irq(&drm_dev->event_lock);
- was_activated = acrtc->dm_irq_params.window_param.activated;
- acrtc->dm_irq_params.window_param.x_start = 0;
- acrtc->dm_irq_params.window_param.y_start = 0;
- acrtc->dm_irq_params.window_param.x_end = 0;
- acrtc->dm_irq_params.window_param.y_end = 0;
- acrtc->dm_irq_params.window_param.activated = false;
- acrtc->dm_irq_params.window_param.update_win = false;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- spin_unlock_irq(&drm_dev->event_lock);
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ was_activated = acrtc->dm_irq_params.crc_window_activated;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ acrtc->dm_irq_params.window_param[i].x_start = 0;
+ acrtc->dm_irq_params.window_param[i].y_start = 0;
+ acrtc->dm_irq_params.window_param[i].x_end = 0;
+ acrtc->dm_irq_params.window_param[i].y_end = 0;
+ acrtc->dm_irq_params.window_param[i].enable = false;
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
+ }
+ acrtc->dm_irq_params.crc_window_activated = false;
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
/* Disable secure_display if it was enabled */
- if (was_activated) {
+ if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
/* stop ROI update on this crtc */
- flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work);
- flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work);
- dc_stream_forward_crc_window(stream, NULL, true);
+ flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
+ flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
+ else
+ dc_stream_forward_crc_window(stream, NULL, phy_id, true);
+ } else {
+ DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
+ }
}
}
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
{
- struct secure_display_context *secure_display_ctx;
+ struct secure_display_crtc_context *crtc_ctx;
struct psp_context *psp;
struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_crtc *crtc;
struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector;
uint8_t phy_inst;
+ struct amdgpu_display_manager *dm;
+ struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
+ uint8_t roi_idx = 0;
int ret;
+ int i;
- secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
- crtc = secure_display_ctx->crtc;
+ crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
+ crtc = crtc_ctx->crtc;
if (!crtc)
return;
@@ -133,21 +363,50 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
return;
}
+ dm = &drm_to_adev(crtc->dev)->dm;
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
- phy_inst = stream->link->link_enc_hw_inst;
-
- /* need lock for multiple crtcs to use the command buffer */
- mutex_lock(&psp->securedisplay_context.mutex);
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ if (!aconnector)
+ return;
- psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
- TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ mutex_lock(&crtc->dev->mode_config.mutex);
+ if (!get_phy_id(dm, aconnector, &phy_inst)) {
+ DRM_WARN("%s Can't find mapping phy id!", __func__);
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+ return;
+ }
+ mutex_unlock(&crtc->dev->mode_config.mutex);
- securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ /* need lock for multiple crtcs to use the command buffer */
+ mutex_lock(&psp->securedisplay_context.mutex);
/* PSP TA is expected to finish data transmission over I2C within current frame,
* even there are up to 4 crtcs request to send in this frame.
*/
- ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ if (dm->secure_display_ctx.support_mul_roi) {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
+
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ if (crc_cpy[i].crc_ready)
+ roi_idx |= 1 << i;
+ }
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+ } else {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ }
if (!ret) {
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
@@ -160,22 +419,47 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
static void
amdgpu_dm_forward_crc_window(struct work_struct *work)
{
- struct secure_display_context *secure_display_ctx;
+ struct secure_display_crtc_context *crtc_ctx;
struct amdgpu_display_manager *dm;
struct drm_crtc *crtc;
struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector;
+ struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
+ uint8_t phy_id;
- secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work);
- crtc = secure_display_ctx->crtc;
+ crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
+ crtc = crtc_ctx->crtc;
if (!crtc)
return;
dm = &drm_to_adev(crtc->dev)->dm;
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector)
+ return;
+
+ mutex_lock(&crtc->dev->mode_config.mutex);
+ if (!get_phy_id(dm, aconnector, &phy_id)) {
+ DRM_WARN("%s Can't find mapping phy id!", __func__);
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+ return;
+ }
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
mutex_lock(&dm->dc_lock);
- dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false);
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, roi_cpy,
+ phy_id, false);
+ else
+ dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
+ phy_id, false);
mutex_unlock(&dm->dc_lock);
}
@@ -186,7 +470,7 @@ bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
bool ret = false;
spin_lock_irq(&drm_dev->event_lock);
- ret = acrtc->dm_irq_params.window_param.activated;
+ ret = acrtc->dm_irq_params.crc_window_activated;
spin_unlock_irq(&drm_dev->event_lock);
return ret;
@@ -224,10 +508,14 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
+ /* For PSR1, check that the panel has exited PSR */
+ if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ amdgpu_dm_psr_wait_disable(stream_state);
+
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
- stream_state, NULL, enable, enable)) {
+ stream_state, NULL, enable, enable, 0, true)) {
ret = -EINVAL;
goto unlock;
}
@@ -258,6 +546,10 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
struct drm_crtc_commit *commit;
struct dm_crtc_state *crtc_state;
struct drm_device *drm_dev = crtc->dev;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+#endif
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct drm_dp_aux *aux = NULL;
bool enable = false;
@@ -357,6 +649,17 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
+ /*
+ * Reading the CRC requires the vblank interrupt handler to be
+ * enabled. Keep a reference until CRC capture stops.
+ */
+ enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+ }
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
@@ -367,16 +670,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- /*
- * Reading the CRC requires the vblank interrupt handler to be
- * enabled. Keep a reference until CRC capture stops.
- */
- enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
if (!enabled && enable) {
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto cleanup;
-
if (dm_is_crc_source_dprx(source)) {
if (drm_dp_start_crc(aux, crtc)) {
DRM_DEBUG_DRIVER("dp start crc failed\n");
@@ -402,6 +696,13 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Initialize phy id mapping table for secure display*/
+ if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
+ !dm->secure_display_ctx.phy_mapping_updated)
+ update_phy_id_mapping(adev);
+#endif
+
cleanup:
if (commit)
drm_crtc_commit_put(commit);
@@ -456,7 +757,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
}
if (dm_is_crc_source_crtc(cur_crc_src)) {
- if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
&crcs[0], &crcs[1], &crcs[2]))
return;
@@ -472,8 +773,17 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
enum amdgpu_dm_pipe_crc_source cur_crc_src;
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_device *adev = NULL;
- struct secure_display_context *secure_display_ctx = NULL;
+ struct secure_display_crtc_context *crtc_ctx = NULL;
+ bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
+ uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
unsigned long flags1;
+ bool forward_roi_change = false;
+ bool notify_ta = false;
+ bool all_crc_ready = true;
+ struct dc_stream_state *stream_state;
+ int i;
if (crtc == NULL)
return;
@@ -481,78 +791,160 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
acrtc = to_amdgpu_crtc(crtc);
adev = drm_to_adev(crtc->dev);
drm_dev = crtc->dev;
+ stream_state = to_dm_crtc_state(crtc->state)->stream;
spin_lock_irqsave(&drm_dev->event_lock, flags1);
cur_crc_src = acrtc->dm_irq_params.crc_src;
/* Early return if CRC capture is not enabled. */
if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
- !dm_is_crc_source_crtc(cur_crc_src))
- goto cleanup;
-
- if (!acrtc->dm_irq_params.window_param.activated)
- goto cleanup;
+ !dm_is_crc_source_crtc(cur_crc_src)) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
+ }
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
+ if (!acrtc->dm_irq_params.crc_window_activated) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
}
- secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id];
- if (WARN_ON(secure_display_ctx->crtc != crtc)) {
- /* We have set the crtc when creating secure_display_context,
+ crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
+ if (WARN_ON(crtc_ctx->crtc != crtc)) {
+ /* We have set the crtc when creating secure_display_crtc_context,
* don't expect it to be changed here.
*/
- secure_display_ctx->crtc = crtc;
+ crtc_ctx->crtc = crtc;
}
- if (acrtc->dm_irq_params.window_param.update_win) {
- /* prepare work for dmub to update ROI */
- secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
- secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
- secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
- acrtc->dm_irq_params.window_param.x_start;
- secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
- acrtc->dm_irq_params.window_param.y_start;
- schedule_work(&secure_display_ctx->forward_roi_work);
-
- acrtc->dm_irq_params.window_param.update_win = false;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ struct crc_params crc_window = {
+ .windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ .windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ };
+
+ crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
+
+ if (!acrtc->dm_irq_params.window_param[i].enable) {
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- /* Statically skip 1 frame, because we may need to wait below things
- * before sending ROI to dmub:
- * 1. We defer the work by using system workqueue.
- * 2. We may need to wait for dc_lock before accessing dmub.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
+ if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- } else {
- /* prepare work for psp to read ROI/CRC and send to I2C */
- schedule_work(&secure_display_ctx->notify_ta_work);
+ if (acrtc->dm_irq_params.window_param[i].update_win) {
+ crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
+ crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
+ crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
+ crc_window.windowa_y_start;
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to dmub to update ROI */
+ forward_roi_change = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* update ROI via dm*/
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ reset_crc_frame_count[i] = true;
+
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+
+ /* Statically skip 1 frame, because we may need to wait below things
+ * before sending ROI to dmub:
+ * 1. We defer the work by using system workqueue.
+ * 2. We may need to wait for dc_lock before accessing dmub.
+ */
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ } else {
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
+ &crc_r[i], &crc_g[i], &crc_b[i]))
+ DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to psp to read ROI/CRC and output via I2C */
+ notify_ta = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* Avoid ROI window get changed, keep overwriting. */
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ /* crc ready for psp to read out */
+ crtc_ctx->crc_info.crc[i].crc_ready = true;
+ }
}
-cleanup:
spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+
+ if (forward_roi_change)
+ schedule_work(&crtc_ctx->forward_roi_work);
+
+ if (notify_ta)
+ schedule_work(&crtc_ctx->notify_ta_work);
+
+ spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
+ crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
+ crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
+
+ if (!crtc_ctx->roi[i].enable) {
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ continue;
+ }
+
+ if (!crtc_ctx->crc_info.crc[i].crc_ready)
+ all_crc_ready = false;
+
+ if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
+ /* Reset the reference frame count after user update the ROI
+ * or it reaches the maximum value.
+ */
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ else
+ crtc_ctx->crc_info.crc[i].frame_count += 1;
+ }
+ spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
+
+ if (all_crc_ready)
+ complete_all(&crtc_ctx->crc_info.completion);
}
-struct secure_display_context *
-amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
+void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
{
- struct secure_display_context *secure_display_ctxs = NULL;
+ struct secure_display_crtc_context *crtc_ctx = NULL;
int i;
- secure_display_ctxs = kcalloc(adev->mode_info.num_crtc,
- sizeof(struct secure_display_context),
+ crtc_ctx = kcalloc(adev->mode_info.num_crtc,
+ sizeof(struct secure_display_crtc_context),
GFP_KERNEL);
- if (!secure_display_ctxs)
- return NULL;
+ if (!crtc_ctx) {
+ adev->dm.secure_display_ctx.crtc_ctx = NULL;
+ return;
+ }
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window);
- INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
- secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base;
+ INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
+ INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
+ spin_lock_init(&crtc_ctx[i].crc_info.lock);
}
- return secure_display_ctxs;
+ adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
+
+ adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 748e80ef40d0..3da056c8d20b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -40,20 +40,53 @@ enum amdgpu_dm_pipe_crc_source {
};
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+#define MAX_CRTC 6
+
+enum secure_display_mode {
+ /* via dmub + psp */
+ LEGACY_MODE = 0,
+ /* driver directly */
+ DISPLAY_CRC_MODE,
+ SECURE_DISPLAY_MODE_MAX,
+};
+
+struct phy_id_mapping {
+ bool assigned;
+ bool is_mst;
+ uint8_t enc_hw_inst;
+ u8 lct;
+ u8 port_num;
+ u8 rad[8];
+};
+
+struct crc_data {
+ uint32_t crc_R;
+ uint32_t crc_G;
+ uint32_t crc_B;
+ uint32_t frame_count;
+ bool crc_ready;
+};
+
+struct crc_info {
+ struct crc_data crc[MAX_CRC_WINDOW_NUM];
+ struct completion completion;
+ spinlock_t lock;
+};
+
struct crc_window_param {
uint16_t x_start;
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
/* CRC window is activated or not*/
- bool activated;
+ bool enable;
/* Update crc window during vertical blank or not */
bool update_win;
/* skip reading/writing for few frames */
int skip_frame_cnt;
};
-struct secure_display_context {
+struct secure_display_crtc_context {
/* work to notify PSP TA*/
struct work_struct notify_ta_work;
@@ -63,7 +96,20 @@ struct secure_display_context {
struct drm_crtc *crtc;
/* Region of Interest (ROI) */
- struct rect rect;
+ struct crc_window roi[MAX_CRC_WINDOW_NUM];
+
+ struct crc_info crc_info;
+};
+
+struct secure_display_context {
+
+ struct secure_display_crtc_context *crtc_ctx;
+ /* Whether dmub support multiple ROI setting */
+ bool support_mul_roi;
+ enum secure_display_mode op_mode;
+ bool phy_mapping_updated;
+ int phy_id_mapping_cnt;
+ struct phy_id_mapping phy_id_mapping[MAX_CRTC];
};
#endif
@@ -95,8 +141,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
-struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(
- struct amdgpu_device *adev);
+void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev);
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 64a041c2af05..36a830a7440f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -93,7 +93,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -142,7 +142,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
+ amdgpu_dm_psr_disable(vblank_work->stream, false);
} else if (link->psr_settings.psr_feature_enabled &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 17e948753f59..c1212947a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -37,7 +37,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state);
int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 6a97bb2d9160..049046c60462 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -25,6 +25,7 @@
#include <linux/string_helpers.h>
#include <linux/uaccess.h>
+#include <media/cec-notifier.h>
#include "dc.h"
#include "amdgpu.h"
@@ -258,7 +259,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
struct dc_link *link = connector->dc_link;
struct amdgpu_device *adev = drm_to_adev(connector->base.dev);
struct dc *dc = (struct dc *)link->dc;
- struct dc_link_settings prefer_link_settings;
+ struct dc_link_settings prefer_link_settings = {0};
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
/* 0: lane_count; 1: link_rate */
@@ -389,7 +390,7 @@ static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf,
struct dc_link *link = aconnector->dc_link;
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
struct dc *dc = (struct dc *)link->dc;
- struct dc_link_settings prefer_link_settings;
+ struct dc_link_settings prefer_link_settings = {0};
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
/* 0: lane_count; 1: link_rate */
@@ -613,7 +614,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
uint32_t wr_buf_size = 40;
long param[3];
bool use_prefer_link_setting;
- struct link_training_settings link_lane_settings;
+ struct link_training_settings link_lane_settings = {0};
int max_param_num = 3;
uint8_t param_nums = 0;
int r = 0;
@@ -768,7 +769,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
- struct link_training_settings link_training_settings;
+ struct link_training_settings link_training_settings = {0};
int i;
if (size == 0)
@@ -902,9 +903,10 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
{
struct amdgpu_device *adev = m->private;
struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ struct dmub_fw_meta_info *fw_meta_info = NULL;
struct dmub_debugfs_trace_entry *entries;
uint8_t *tbuf_base;
- uint32_t tbuf_size, max_entries, num_entries, i;
+ uint32_t tbuf_size, max_entries, num_entries, first_entry, i;
if (!fb_info)
return 0;
@@ -913,20 +915,42 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
if (!tbuf_base)
return 0;
- tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
+ if (adev->dm.dmub_srv)
+ fw_meta_info = &adev->dm.dmub_srv->meta_info;
+
+ tbuf_size = fw_meta_info ? fw_meta_info->trace_buffer_size :
+ DMUB_TRACE_BUFFER_SIZE;
max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
sizeof(struct dmub_debugfs_trace_entry);
num_entries =
((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
+ /* DMCUB tracebuffer is a ring. If it rolled over, print a hint that
+ * entries are being overwritten.
+ */
+ if (num_entries > max_entries)
+ seq_printf(m, "...\n");
+
+ first_entry = num_entries % max_entries;
num_entries = min(num_entries, max_entries);
entries = (struct dmub_debugfs_trace_entry
*)(tbuf_base +
sizeof(struct dmub_debugfs_trace_header));
- for (i = 0; i < num_entries; ++i) {
+ /* To print entries chronologically, start from the first entry till the
+ * top of buffer, then from base of buffer to first entry.
+ */
+ for (i = first_entry; i < num_entries; ++i) {
+ struct dmub_debugfs_trace_entry *entry = &entries[i];
+
+ seq_printf(m,
+ "trace_code=%u tick_count=%u param0=%u param1=%u\n",
+ entry->trace_code, entry->tick_count, entry->param0,
+ entry->param1);
+ }
+ for (i = 0; i < first_entry; ++i) {
struct dmub_debugfs_trace_entry *entry = &entries[i];
seq_printf(m,
@@ -2825,6 +2849,67 @@ static int is_dpia_link_show(struct seq_file *m, void *data)
return 0;
}
+/**
+ * hdmi_cec_state_show - Read out the HDMI-CEC feature status
+ * @m: sequence file.
+ * @data: unused.
+ *
+ * Return 0 on success
+ */
+static int hdmi_cec_state_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ seq_printf(m, "%s:%d\n", connector->name, connector->base.id);
+ seq_printf(m, "HDMI-CEC status: %d\n", aconnector->notifier ? 1 : 0);
+
+ return 0;
+}
+
+/**
+ * hdmi_cec_state_write - Enable/Disable HDMI-CEC feature from driver side
+ * @f: file structure.
+ * @buf: userspace buffer. set to '1' to enable; '0' to disable cec feature.
+ * @size: size of buffer from userpsace.
+ * @pos: unused.
+ *
+ * Return size on success, error code on failure
+ */
+static ssize_t hdmi_cec_state_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int ret;
+ bool enable;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct drm_device *ddev = aconnector->base.dev;
+
+ if (size == 0)
+ return -EINVAL;
+
+ ret = kstrtobool_from_user(buf, size, &enable);
+ if (ret) {
+ drm_dbg_driver(ddev, "invalid user data !\n");
+ return ret;
+ }
+
+ if (enable) {
+ if (aconnector->notifier)
+ return -EINVAL;
+ ret = amdgpu_dm_initialize_hdmi_connector(aconnector);
+ if (ret)
+ return ret;
+ hdmi_cec_set_edid(aconnector);
+ } else {
+ if (!aconnector->notifier)
+ return -EINVAL;
+ cec_notifier_conn_unregister(aconnector->notifier);
+ aconnector->notifier = NULL;
+ }
+
+ return size;
+}
+
DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
@@ -2837,6 +2922,7 @@ DEFINE_SHOW_ATTRIBUTE(psr_capability);
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status);
DEFINE_SHOW_ATTRIBUTE(is_dpia_link);
+DEFINE_SHOW_STORE_ATTRIBUTE(hdmi_cec_state);
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
.owner = THIS_MODULE,
@@ -2972,7 +3058,8 @@ static const struct {
char *name;
const struct file_operations *fops;
} hdmi_debugfs_entries[] = {
- {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+ {"hdmi_cec_state", &hdmi_cec_state_fops}
};
/*
@@ -3457,8 +3544,8 @@ static int crc_win_x_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3474,7 +3561,7 @@ static int crc_win_x_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_start;
+ *val = acrtc->dm_irq_params.window_param[0].x_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3494,8 +3581,8 @@ static int crc_win_y_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3511,7 +3598,7 @@ static int crc_win_y_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_start;
+ *val = acrtc->dm_irq_params.window_param[0].y_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3530,8 +3617,8 @@ static int crc_win_x_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3547,7 +3634,7 @@ static int crc_win_x_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_end;
+ *val = acrtc->dm_irq_params.window_param[0].x_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3566,8 +3653,8 @@ static int crc_win_y_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3583,7 +3670,7 @@ static int crc_win_y_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_end;
+ *val = acrtc->dm_irq_params.window_param[0].y_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3606,13 +3693,14 @@ static int crc_win_update_set(void *data, u64 val)
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
- acrtc->dm_irq_params.window_param.activated = true;
- acrtc->dm_irq_params.window_param.update_win = true;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
+ acrtc->dm_irq_params.window_param[0].enable = true;
+ acrtc->dm_irq_params.window_param[0].update_win = true;
+ acrtc->dm_irq_params.window_param[0].skip_frame_cnt = 0;
+ acrtc->dm_irq_params.crc_window_activated = true;
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
mutex_unlock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index e339c7a8d541..c0dc23244049 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -455,6 +455,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
for (i = 0; i < hdcp_work->max_link; i++) {
cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
}
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 6cbbb71d752b..fbd80d8545a8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -885,6 +885,12 @@ bool dm_helpers_dp_write_dsc_enable(
return ret;
}
+bool dm_helpers_dp_write_hblank_reduction(struct dc_context *ctx, const struct dc_stream_state *stream)
+{
+ // TODO
+ return false;
+}
+
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
{
bool dp_sink_present;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 3390f0d8420a..a215234151ac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -894,6 +894,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
+ int i;
+
+ /* First, clear all hpd and hpdrx interrupts */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
+ if (!dc_interrupt_set(adev->dm.dc, i, false))
+ drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
+ i);
+ }
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -907,10 +916,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
+ /*
+ * Get a base driver irq reference for hpd ints for the lifetime
+ * of dm. Note that only hpd interrupt types are registered with
+ * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
+ * hpd_rx isn't available. DM currently controls hpd_rx
+ * explicitly with dc_interrupt_set()
+ */
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- true);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+ /*
+ * TODO: There's a mismatch between mode_info.num_hpd
+ * and what bios reports as the # of connectors with hpd
+ * sources. Since the # of hpd source types registered
+ * with base driver == mode_info.num_hpd, we have to
+ * fallback to dc_interrupt_set for the remaining types.
+ */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -935,6 +965,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -948,9 +979,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- false);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+
+ /* TODO: See same TODO in amdgpu_dm_hpd_init() */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ false);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 6a7ecc1e4602..6c9de834455b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -39,7 +39,9 @@ struct dm_irq_params {
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source crc_src;
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- struct crc_window_param window_param;
+ struct crc_window_param window_param[MAX_CRC_WINDOW_NUM];
+ /* At least one CRC window is activated or not*/
+ bool crc_window_activated;
#endif
#endif
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6e4359490613..07e744da7bf4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -155,6 +155,17 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
return 0;
}
+
+static inline void
+amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->drm_edid = NULL;
+ aconnector->dsc_aux = NULL;
+ aconnector->mst_output_port->passthrough_aux = NULL;
+ aconnector->mst_local_bw = 0;
+ aconnector->vc_full_pbn = 0;
+}
+
static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
@@ -182,9 +193,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -504,9 +513,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -590,11 +597,12 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_PROBE, true);
- if (drm_connector_init(
+ if (drm_connector_dynamic_init(
dev,
connector,
&dm_dp_mst_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort)) {
+ DRM_MODE_CONNECTOR_DisplayPort,
+ NULL)) {
kfree(aconnector);
return NULL;
}
@@ -1688,16 +1696,16 @@ clean_exit:
return ret;
}
-static unsigned int kbps_from_pbn(unsigned int pbn)
+static uint32_t kbps_from_pbn(unsigned int pbn)
{
- unsigned int kbps = pbn;
+ uint64_t kbps = (uint64_t)pbn;
kbps *= (1000000 / PEAK_FACTOR_X1000);
kbps *= 8;
kbps *= 54;
kbps /= 64;
- return kbps;
+ return (uint32_t)kbps;
}
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
@@ -1819,9 +1827,18 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct drm_dp_mst_port *immediate_upstream_port = NULL;
uint32_t end_link_bw = 0;
- /*Get last DP link BW capability*/
- if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
- if (stream_kbps > end_link_bw) {
+ /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
+ if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
+ if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
+ dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
+ aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
+ aconnector->mst_local_bw = end_link_bw;
+ } else {
+ end_link_bw = aconnector->mst_local_bw;
+ }
+
+ if (end_link_bw > 0 && stream_kbps > end_link_bw) {
DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
@@ -1835,11 +1852,15 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (immediate_upstream_port) {
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
- if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
- "Max dsc compression can't fit into MST available bw\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ } else {
+ /* For topology LCT 1 case - only one mstb*/
+ virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
+ }
+
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 495e3cd70426..92472109f84a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include "drm/drm_framebuffer.h"
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -176,7 +177,7 @@ static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier
return AMD_FMT_MOD_GET(TILE, modifier);
}
-static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
+static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info,
uint64_t tiling_flags)
{
/* Fill GFX8 params */
@@ -189,6 +190,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+ tiling_info->gfxversion = DcGfxVersion8;
/* XXX fix me for VI */
tiling_info->gfx8.num_banks = num_banks;
tiling_info->gfx8.array_mode =
@@ -209,7 +211,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info)
+ struct dc_tiling_info *tiling_info)
{
/* Fill GFX9 params */
tiling_info->gfx9.num_pipes =
@@ -230,7 +232,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgp
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
uint64_t modifier)
{
unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
@@ -260,7 +262,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amd
static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
- const union dc_tiling_info *tiling_info,
+ const struct dc_tiling_info *tiling_info,
const struct dc_plane_dcc_param *dcc,
const struct dc_plane_address *address,
const struct plane_size *plane_size)
@@ -275,8 +277,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
if (!dcc->enable)
return 0;
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
- !dc->cap_funcs.get_dcc_compression_cap)
+ if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
+ format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return -EINVAL;
+
+ if (!dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL;
input.format = format;
@@ -307,18 +312,18 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address,
- const bool force_disable_dcc)
+ struct dc_plane_address *address)
{
const uint64_t modifier = afb->base.modifier;
int ret = 0;
amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxVersion9;
- if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
uint64_t dcc_address = afb->address + afb->base.offsets[1];
bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
@@ -358,10 +363,9 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address,
- const bool force_disable_dcc)
+ struct dc_plane_address *address)
{
const uint64_t modifier = afb->base.modifier;
int ret = 0;
@@ -370,8 +374,9 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxAddr3;
- if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
dcc->enable = 1;
@@ -835,12 +840,11 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc)
+ bool tmz_surface)
{
const struct drm_framebuffer *fb = &afb->base;
int ret;
@@ -900,16 +904,14 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
- address,
- force_disable_dcc);
+ address);
if (ret)
return ret;
} else if (adev->family >= AMDGPU_FAMILY_AI) {
ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
- address,
- force_disable_dcc);
+ address);
if (ret)
return ret;
} else {
@@ -1000,14 +1002,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state =
dm_plane_state_new->dc_state;
- bool force_disable_dcc = !plane_state->dcc.enable;
amdgpu_dm_plane_fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
afb->tiling_flags,
&plane_state->tiling_info, &plane_state->plane_size,
&plane_state->dcc, &plane_state->address,
- afb->tmz_surface, force_disable_dcc);
+ afb->tmz_surface);
}
return 0;
@@ -1421,6 +1422,20 @@ static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
amdgpu_dm_plane_handle_cursor_update(plane, old_state);
}
+static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct dc_plane_state *dc_plane_state;
+
+ if (!dm_plane_state || !dm_plane_state->dc_state)
+ return;
+
+ dc_plane_state = dm_plane_state->dc_state;
+
+ dc_plane_force_update_for_panic(dc_plane_state, fb->modifier ? true : false);
+}
+
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
.cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
@@ -1429,6 +1444,16 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.atomic_async_update = amdgpu_dm_plane_atomic_async_update
};
+static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = {
+ .prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
+ .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
+ .atomic_check = amdgpu_dm_plane_atomic_check,
+ .atomic_async_check = amdgpu_dm_plane_atomic_async_check,
+ .atomic_async_update = amdgpu_dm_plane_atomic_async_update,
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = amdgpu_dm_plane_panic_flush,
+};
+
static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
{
struct dm_plane_state *amdgpu_state = NULL;
@@ -1855,7 +1880,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
plane->type != DRM_PLANE_TYPE_CURSOR)
drm_plane_enable_fb_damage_clips(plane);
- drm_plane_helper_add(plane, &dm_plane_helper_funcs);
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(plane, &dm_plane_helper_funcs);
#ifdef AMD_PRIVATE_COLOR
dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 6498359bff6f..615d2ab2b803 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -47,12 +47,11 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc);
+ bool tmz_surface);
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f40240aafe98..e140b7a04d72 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
return false;
- return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
+ /* Temporarily disable PSR-SU to avoid glitches */
+ return false;
}
/*
@@ -201,14 +202,13 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*
* Return: true if success
*/
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait)
{
- unsigned int power_opt = 0;
bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL);
}
/*
@@ -251,3 +251,33 @@ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
return allow_active;
}
+
+/**
+ * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR
+ * @stream: stream state attached to the eDP link
+ *
+ * Waits for a max of 500ms for the eDP panel to exit PSR.
+ *
+ * Return: true if panel exited PSR, false otherwise.
+ */
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream)
+{
+ enum dc_psr_state psr_state = PSR_STATE0;
+ struct dc_link *link = stream->link;
+ int retry_count;
+
+ if (link == NULL)
+ return false;
+
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dc_link_get_psr_state(link, &psr_state);
+ if (psr_state == PSR_STATE0)
+ break;
+ udelay(500);
+ }
+
+ if (retry_count == 1000)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index cd2d45c2b5ef..e2366321a3c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -34,8 +34,9 @@
void amdgpu_dm_set_psr_caps(struct dc_link *link);
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index c9a6de110b74..a62f6c51301c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -3088,11 +3088,12 @@ static enum bp_result construct_integrated_info(
info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id,
info->ext_disp_conn_info.path[i].caps
);
- if (info->ext_disp_conn_info.path[i].caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
- DC_LOG_BIOS("BIOS EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ if ((info->ext_disp_conn_info.path[i].caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ DC_LOG_BIOS("BIOS AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
else if (bp->base.ctx->dc->config.force_bios_fixed_vs) {
- info->ext_disp_conn_info.path[i].caps |= EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
- DC_LOG_BIOS("driver forced EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ info->ext_disp_conn_info.path[i].caps &= ~AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ info->ext_disp_conn_info.path[i].caps |= AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
+ DC_LOG_BIOS("driver forced AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
}
}
// Log the Checksum and Voltage Swing
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index ab1132bc896a..d9955c5d2e5e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -174,7 +174,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN32)
###############################################################################
# DCN35
###############################################################################
-CLK_MGR_DCN35 = dcn35_smu.o dcn35_clk_mgr.o
+CLK_MGR_DCN35 = dcn35_smu.o dcn351_clk_mgr.o dcn35_clk_mgr.o
AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 0e243f4344d0..4c3e58c730b1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -355,8 +355,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
BREAK_TO_DEBUGGER();
return NULL;
}
+ if (ctx->dce_version == DCN_VERSION_3_51)
+ dcn351_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ else
+ dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index 7920f6f1aa62..76c612ecfe3c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -34,8 +34,8 @@
#include "dm_services.h"
#include "cyan_skillfish_ip_offset.h"
-#include "dcn/dcn_2_0_3_offset.h"
-#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dcn/dcn_2_0_1_offset.h"
+#include "dcn/dcn_2_0_1_sh_mask.h"
#include "clk/clk_11_0_1_offset.h"
#include "clk/clk_11_0_1_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
new file mode 100644
index 000000000000..6a6ae618650b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "dcn35_clk_mgr.h"
+
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define mmCLK1_CLK_PLL_REQ 0x16E37
+
+#define mmCLK1_CLK0_DFS_CNTL 0x16E69
+#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
+#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
+#define mmCLK1_CLK3_DFS_CNTL 0x16E72
+#define mmCLK1_CLK4_DFS_CNTL 0x16E75
+#define mmCLK1_CLK5_DFS_CNTL 0x16E78
+
+#define mmCLK1_CLK0_CURRENT_CNT 0x16EFC
+#define mmCLK1_CLK1_CURRENT_CNT 0x16EFD
+#define mmCLK1_CLK2_CURRENT_CNT 0x16EFE
+#define mmCLK1_CLK3_CURRENT_CNT 0x16EFF
+#define mmCLK1_CLK4_CURRENT_CNT 0x16F00
+#define mmCLK1_CLK5_CURRENT_CNT 0x16F01
+
+#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
+#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
+#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
+#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
+#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
+#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
+
+#define mmCLK1_CLK0_DS_CNTL 0x16E83
+#define mmCLK1_CLK1_DS_CNTL 0x16E8C
+#define mmCLK1_CLK2_DS_CNTL 0x16E95
+#define mmCLK1_CLK3_DS_CNTL 0x16E9E
+#define mmCLK1_CLK4_DS_CNTL 0x16EA7
+#define mmCLK1_CLK5_DS_CNTL 0x16EB0
+
+#define mmCLK1_CLK0_ALLOW_DS 0x16E84
+#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
+#define mmCLK1_CLK2_ALLOW_DS 0x16E96
+#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
+#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
+#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
+
+#define mmCLK5_spll_field_8 0x1B04B
+#define mmDENTIST_DISPCLK_CNTL 0x0124
+#define regDENTIST_DISPCLK_CNTL 0x0064
+#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+
+// DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+
+#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
+#define REG(reg) \
+ (clk_mgr->regs->reg)
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define CLK_SR_DCN35(reg_name)\
+ .reg_name = mm ## reg_name
+
+static const struct clk_mgr_registers clk_mgr_regs_dcn351 = {
+ CLK_REG_LIST_DCN35()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift_dcn351 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
+
+static const struct clk_mgr_mask clk_mgr_mask_dcn351 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
+
+#define TO_CLK_MGR_DCN35(clk_mgr)\
+ container_of(clk_mgr, struct clk_mgr_dcn35, base)
+
+
+void dcn351_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ /*register offset changed*/
+ clk_mgr->base.regs = &clk_mgr_regs_dcn351;
+ clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn351;
+ clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn351;
+
+ dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index b77333817f18..1648226586e2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -36,15 +36,11 @@
#include "dcn20/dcn20_clk_mgr.h"
-
-
#include "reg_helper.h"
#include "core_types.h"
#include "dcn35_smu.h"
#include "dm_helpers.h"
-/* TODO: remove this include once we ported over remaining clk mgr functions*/
-#include "dcn30/dcn30_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
@@ -55,35 +51,102 @@
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define mmCLK1_CLK_PLL_REQ 0x16E37
+
+#define mmCLK1_CLK0_DFS_CNTL 0x16E69
+#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
+#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
+#define mmCLK1_CLK3_DFS_CNTL 0x16E72
+#define mmCLK1_CLK4_DFS_CNTL 0x16E75
+#define mmCLK1_CLK5_DFS_CNTL 0x16E78
+
+#define mmCLK1_CLK0_CURRENT_CNT 0x16EFB
+#define mmCLK1_CLK1_CURRENT_CNT 0x16EFC
+#define mmCLK1_CLK2_CURRENT_CNT 0x16EFD
+#define mmCLK1_CLK3_CURRENT_CNT 0x16EFE
+#define mmCLK1_CLK4_CURRENT_CNT 0x16EFF
+#define mmCLK1_CLK5_CURRENT_CNT 0x16F00
+
+#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
+#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
+#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
+#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
+#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
+#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
+
+#define mmCLK1_CLK0_DS_CNTL 0x16E83
+#define mmCLK1_CLK1_DS_CNTL 0x16E8C
+#define mmCLK1_CLK2_DS_CNTL 0x16E95
+#define mmCLK1_CLK3_DS_CNTL 0x16E9E
+#define mmCLK1_CLK4_DS_CNTL 0x16EA7
+#define mmCLK1_CLK5_DS_CNTL 0x16EB0
+
+#define mmCLK1_CLK0_ALLOW_DS 0x16E84
+#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
+#define mmCLK1_CLK2_ALLOW_DS 0x16E96
+#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
+#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
+#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
+
+#define mmCLK5_spll_field_8 0x1B24B
+#define mmDENTIST_DISPCLK_CNTL 0x0124
+#define regDENTIST_DISPCLK_CNTL 0x0064
+#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+// DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+
+#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
-#define regCLK1_CLK_PLL_REQ 0x0237
-#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
+#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
-#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
-#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
-#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
-#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
-#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
-#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define REG(reg) \
+ (clk_mgr->regs->reg)
-#define regCLK1_CLK2_BYPASS_CNTL 0x029c
-#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+#define BASE(seg) BASE_INNER(seg)
-#define regCLK5_0_CLK5_spll_field_8 0x464b
-#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+#define SR(reg_name)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+#define CLK_SR_DCN35(reg_name)\
+ .reg_name = mm ## reg_name
-#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+static const struct clk_mgr_registers clk_mgr_regs_dcn35 = {
+ CLK_REG_LIST_DCN35()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift_dcn35 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
-#define REG(reg_name) \
- (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+static const struct clk_mgr_mask clk_mgr_mask_dcn35 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
#define TO_CLK_MGR_DCN35(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn35, base)
@@ -338,6 +401,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
dcn35_smu_set_dtbclk(clk_mgr, false);
+
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */
@@ -355,11 +419,17 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
- dcn35_smu_set_dtbclk(clk_mgr, true);
- clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ int actual_dtbclk = 0;
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
- clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ dcn35_smu_set_dtbclk(clk_mgr, true);
+
+ actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+ if (actual_dtbclk) {
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
}
/* check that we're not already in D0 */
@@ -452,7 +522,6 @@ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
struct fixed31_32 pll_req;
unsigned int fbmult_frac_val = 0;
unsigned int fbmult_int_val = 0;
- struct dc_context *ctx = clk_mgr->base.ctx;
/*
* Register value of fbmult is in 8.16 format, we are converting to 314.32
@@ -512,22 +581,20 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc_context *ctx = clk_mgr->base.ctx;
+
uint32_t ssc_enable;
- REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+ ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
- return ssc_enable == 1;
+ return ssc_enable != 0;
}
static void init_clk_states(struct clk_mgr *clk_mgr)
{
- struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
- clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
@@ -538,6 +605,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+
init_clk_states(clk_mgr);
// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
@@ -632,6 +700,7 @@ static struct wm_table lpddr5_wm_table = {
};
static DpmClocks_t_dcn35 dummy_clocks;
+static DpmClocks_t_dcn351 dummy_clocks_dcn351;
static struct dcn35_watermarks dummy_wms = { 0 };
@@ -642,10 +711,10 @@ static struct dcn35_ss_info_table ss_info_table = {
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
{
- struct dc_context *ctx = clk_mgr->base.ctx;
- uint32_t clock_source;
+ uint32_t clock_source = 0;
+
+ clock_source = REG_READ(CLK1_CLK2_BYPASS_CNTL) & CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK;
- REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
// If it's DFS mode, clock_source is 0.
if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
@@ -755,6 +824,22 @@ static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
+static void dcn351_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+ struct dcn351_smu_dpm_clks *smu_dpm_clks)
+{
+ DpmClocks_t_dcn351 *table = smu_dpm_clks->dpm_clks;
+
+ if (!clk_mgr->smu_ver)
+ return;
+ if (!table || smu_dpm_clks->mc_address.quad_part == 0)
+ return;
+ memset(table, 0, sizeof(*table));
+ dcn35_smu_set_dram_addr_high(clk_mgr,
+ smu_dpm_clks->mc_address.high_part);
+ dcn35_smu_set_dram_addr_low(clk_mgr,
+ smu_dpm_clks->mc_address.low_part);
+ dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
+}
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
@@ -1093,6 +1178,57 @@ struct clk_mgr_funcs dcn35_fpga_funcs = {
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
};
+static void translate_to_DpmClocks_t_dcn35(struct dcn351_smu_dpm_clks *smu_dpm_clks_a,
+ struct dcn35_smu_dpm_clks *smu_dpm_clks_b)
+{
+ /*translate two structures and only take need clock tables*/
+ uint8_t i;
+
+ if (smu_dpm_clks_a == NULL || smu_dpm_clks_b == NULL ||
+ smu_dpm_clks_a->dpm_clks == NULL || smu_dpm_clks_b->dpm_clks == NULL)
+ return;
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++)
+ smu_dpm_clks_b->dpm_clks->DcfClocks[i] = smu_dpm_clks_a->dpm_clks->DcfClocks[i];
+
+ for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++)
+ smu_dpm_clks_b->dpm_clks->DispClocks[i] = smu_dpm_clks_a->dpm_clks->DispClocks[i];
+
+ for (i = 0; i < NUM_DPPCLK_DPM_LEVELS; i++)
+ smu_dpm_clks_b->dpm_clks->DppClocks[i] = smu_dpm_clks_a->dpm_clks->DppClocks[i];
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ smu_dpm_clks_b->dpm_clks->FclkClocks_Freq[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Freq[i];
+ smu_dpm_clks_b->dpm_clks->FclkClocks_Voltage[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Voltage[i];
+ }
+ for (i = 0; i < NUM_MEM_PSTATE_LEVELS; i++) {
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].MemClk =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].MemClk;
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].UClk =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].UClk;
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].Voltage =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].Voltage;
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].WckRatio =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].WckRatio;
+ }
+ smu_dpm_clks_b->dpm_clks->MaxGfxClk = smu_dpm_clks_a->dpm_clks->MaxGfxClk;
+ smu_dpm_clks_b->dpm_clks->MinGfxClk = smu_dpm_clks_a->dpm_clks->MinGfxClk;
+ smu_dpm_clks_b->dpm_clks->NumDcfClkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumDcfClkLevelsEnabled;
+ smu_dpm_clks_b->dpm_clks->NumDispClkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumDispClkLevelsEnabled;
+ smu_dpm_clks_b->dpm_clks->NumFclkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumFclkLevelsEnabled;
+ smu_dpm_clks_b->dpm_clks->NumMemPstatesEnabled =
+ smu_dpm_clks_a->dpm_clks->NumMemPstatesEnabled;
+ smu_dpm_clks_b->dpm_clks->NumSocClkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumSocClkLevelsEnabled;
+
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
+ smu_dpm_clks_b->dpm_clks->SocClocks[i] = smu_dpm_clks_a->dpm_clks->SocClocks[i];
+ smu_dpm_clks_b->dpm_clks->SocVoltage[i] = smu_dpm_clks_a->dpm_clks->SocVoltage[i];
+ }
+}
void dcn35_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn35 *clk_mgr,
@@ -1100,6 +1236,7 @@ void dcn35_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
+ struct dcn351_smu_dpm_clks smu_dpm_clks_dcn351 = { 0 };
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn35_funcs;
@@ -1112,6 +1249,12 @@ void dcn35_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000;
+ if (ctx->dce_version == DCN_VERSION_3_5) {
+ clk_mgr->base.regs = &clk_mgr_regs_dcn35;
+ clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35;
+ clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35;
+ }
+
clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
@@ -1130,14 +1273,24 @@ void dcn35_clk_mgr_construct(
DC_MEM_ALLOC_TYPE_GART,
sizeof(DpmClocks_t_dcn35),
&smu_dpm_clks.mc_address.quad_part);
-
if (smu_dpm_clks.dpm_clks == NULL) {
smu_dpm_clks.dpm_clks = &dummy_clocks;
smu_dpm_clks.mc_address.quad_part = 0;
}
-
ASSERT(smu_dpm_clks.dpm_clks);
+ if (ctx->dce_version == DCN_VERSION_3_51) {
+ smu_dpm_clks_dcn351.dpm_clks = (DpmClocks_t_dcn351 *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
+ DC_MEM_ALLOC_TYPE_GART,
+ sizeof(DpmClocks_t_dcn351),
+ &smu_dpm_clks_dcn351.mc_address.quad_part);
+ if (smu_dpm_clks_dcn351.dpm_clks == NULL) {
+ smu_dpm_clks_dcn351.dpm_clks = &dummy_clocks_dcn351;
+ smu_dpm_clks_dcn351.mc_address.quad_part = 0;
+ }
+ }
+
clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(&clk_mgr->base);
if (clk_mgr->base.smu_ver)
@@ -1166,7 +1319,11 @@ void dcn35_clk_mgr_construct(
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
int i;
- dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ if (ctx->dce_version == DCN_VERSION_3_51) {
+ dcn351_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks_dcn351);
+ translate_to_DpmClocks_t_dcn35(&smu_dpm_clks_dcn351, &smu_dpm_clks);
+ } else
+ dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
"NumDispClkLevelsEnabled: %d\n"
"NumSocClkLevelsEnabled: %d\n"
@@ -1227,6 +1384,10 @@ void dcn35_clk_mgr_construct(
dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
smu_dpm_clks.dpm_clks);
+ if (smu_dpm_clks_dcn351.dpm_clks && smu_dpm_clks_dcn351.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
+ smu_dpm_clks_dcn351.dpm_clks);
+
if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
bool ips_support = false;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
index 1203dc605b12..a12a9bf90806 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
@@ -60,4 +60,8 @@ void dcn35_clk_mgr_construct(struct dc_context *ctx,
void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+void dcn351_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg);
#endif //__DCN35_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
index 3fae13c73934..ab9d21ba0c43 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
@@ -126,18 +126,31 @@ typedef struct {
uint32_t MaxGfxClk;
} DpmClocks_t_dcn35;
-
-// Throttler Status Bitmask
-
-
-
-
-
-
-
-
-
-
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+ uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+ uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; // Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t Vcn0ClkLevelsEnabled; // Applies to both Vclk0 and Dclk0
+ uint8_t Vcn1ClkLevelsEnabled; // Applies to both Vclk1 and Dclk1
+ uint8_t VpeClkLevelsEnabled;
+ uint8_t NumMemPstatesEnabled;
+ uint8_t NumFclkLevelsEnabled;
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t_dcn351;
#define TABLE_BIOS_IF 0 // Called by BIOS
#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
@@ -163,6 +176,10 @@ struct dcn35_smu_dpm_clks {
union large_integer mc_address;
};
+struct dcn351_smu_dpm_clks {
+ DpmClocks_t_dcn351 *dpm_clks;
+ union large_integer mc_address;
+};
/* TODO: taken from vgh, may not be correct */
struct display_idle_optimization {
unsigned int df_request_disabled : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
index dbfdd3487da5..2e0d34fd7512 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
@@ -43,7 +43,9 @@
#define DALSMC_MSG_ActiveUclkFclk 0x18
#define DALSMC_MSG_IdleUclkFclk 0x19
#define DALSMC_MSG_SetUclkPstateAllow 0x1A
-#define DALSMC_Message_Count 0x1B
+#define DALSMC_MSG_SubvpUclkFclk 0x1B
+#define DALSMC_MSG_GetNumUmcChannels 0x1C
+#define DALSMC_Message_Count 0x1D
typedef enum {
FCLK_SWITCH_DISALLOW,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 8cfc5f435937..8082bb877611 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -141,6 +141,20 @@ static bool dcn401_is_ppclk_idle_dpm_enabled(struct clk_mgr_internal *clk_mgr, P
return ppclk_idle_dpm_enabled;
}
+static bool dcn401_is_df_throttle_opt_enabled(struct clk_mgr_internal *clk_mgr)
+{
+ bool is_df_throttle_opt_enabled = false;
+
+ if (ASICREV_IS_GC_12_0_1_A0(clk_mgr->base.ctx->asic_id.hw_internal_rev) &&
+ clk_mgr->smu_ver >= 0x663500) {
+ is_df_throttle_opt_enabled = !clk_mgr->base.ctx->dc->debug.force_subvp_df_throttle;
+ }
+
+ is_df_throttle_opt_enabled &= clk_mgr->smu_present;
+
+ return is_df_throttle_opt_enabled;
+}
+
/* Query SMU for all clock states for a particular clock */
static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0,
unsigned int *num_levels)
@@ -614,207 +628,6 @@ static void dcn401_update_clocks_update_dentist(
}
-static void dcn401_update_clocks_legacy(struct clk_mgr *clk_mgr_base,
- struct dc_state *context,
- bool safe_to_lower)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
- struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
- bool update_dppclk = false;
- bool update_dispclk = false;
- bool enter_display_off = false;
- bool dpp_clock_lowered = false;
- struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- bool force_reset = false;
- bool update_uclk = false, update_fclk = false;
- bool p_state_change_support;
- bool fclk_p_state_change_support;
- int total_plane_count;
-
- if (dc->work_arounds.skip_clock_update)
- return;
-
- if (clk_mgr_base->clks.dispclk_khz == 0 ||
- (dc->debug.force_clock_mode & 0x1)) {
- /* This is from resume or boot up, if forced_clock cfg option used,
- * we bypass program dispclk and DPPCLK, but need set them for S3.
- */
- force_reset = true;
-
- dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
-
- /* Force_clock_mode 0x1: force reset the clock even it is the same clock
- * as long as it is in Passive level.
- */
- }
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
-
- if (display_count == 0)
- enter_display_off = true;
-
- if (clk_mgr->smu_present) {
- if (enter_display_off == safe_to_lower)
- dcn401_smu_set_num_of_displays(clk_mgr, display_count);
-
- clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
-
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
-
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
- clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
-
- /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW */
- if (clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- dcn401_smu_send_fclk_pstate_message(clk_mgr, true);
- }
- }
-
- if (dc->debug.force_min_dcfclk_mhz > 0)
- new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
- new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
- clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DCFCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
- clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DCFCLK))
- dcn401_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
- /* We don't actually care about socclk, don't notify SMU of hard min */
- clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
-
- clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
-
- if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
- clk_mgr_base->clks.num_ways < new_clocks->num_ways) {
- clk_mgr_base->clks.num_ways = new_clocks->num_ways;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
- }
-
-
- p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.prev_p_state_change_support)) {
- clk_mgr_base->clks.p_state_change_support = p_state_change_support;
- clk_mgr_base->clks.fw_based_mclk_switching = p_state_change_support && new_clocks->fw_based_mclk_switching;
-
- /* to disable P-State switching, set UCLK min = max */
- if (!clk_mgr_base->clks.p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
- }
-
- /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
- if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
- update_fclk = true;
- }
-
- if (!clk_mgr_base->clks.fclk_p_state_change_support &&
- update_fclk &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_FCLK)) {
- /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
- dcn401_smu_send_fclk_pstate_message(clk_mgr, false);
- }
-
- /* Always update saved value, even if new value not set due to P-State switching unsupported */
- if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
- clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
- update_uclk = true;
- }
-
- /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
- if (clk_mgr_base->clks.p_state_change_support &&
- (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
-
- if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
- clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
- clk_mgr_base->clks.num_ways = new_clocks->num_ways;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
- }
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
- if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
- dpp_clock_lowered = true;
-
- clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
- clk_mgr_base->clks.actual_dppclk_khz = new_clocks->dppclk_khz;
-
- if (clk_mgr->smu_present && !dpp_clock_lowered && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK))
- clk_mgr_base->clks.actual_dppclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DPPCLK, clk_mgr_base->clks.dppclk_khz);
- update_dppclk = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
-
- if (clk_mgr->smu_present && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK))
- clk_mgr_base->clks.actual_dispclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DISPCLK, clk_mgr_base->clks.dispclk_khz);
-
- update_dispclk = true;
- }
-
- if (!new_clocks->dtbclk_en && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DTBCLK)) {
- new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
- }
-
- /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
- if (!dc->debug.disable_dtb_ref_clk_switch &&
- should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DTBCLK)) {
- /* DCCG requires KHz precision for DTBCLK */
- clk_mgr_base->clks.ref_dtbclk_khz =
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
-
- dcn401_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
- }
-
- if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
- if (dpp_clock_lowered) {
- /* if clock is being lowered, increase DTO before lowering refclk */
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context,
- safe_to_lower, clk_mgr_base->clks.dppclk_khz);
- dcn401_update_clocks_update_dentist(clk_mgr, context);
- if (clk_mgr->smu_present && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK)) {
- clk_mgr_base->clks.actual_dppclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DPPCLK,
- clk_mgr_base->clks.dppclk_khz);
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower,
- clk_mgr_base->clks.actual_dppclk_khz);
- }
-
- } else {
- /* if clock is being raised, increase refclk before lowering DTO */
- if (update_dppclk || update_dispclk)
- dcn401_update_clocks_update_dentist(clk_mgr, context);
- /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
- * that we do not lower dto when it is not safe to lower. We do not need to
- * compare the current and new dppclk before calling this function.
- */
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context,
- safe_to_lower, clk_mgr_base->clks.actual_dppclk_khz);
- }
- }
-
- if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
- /*update dmcu for wait_loop count*/
- dmcu->funcs->set_psr_wait_loop(dmcu,
- clk_mgr_base->clks.dispclk_khz / 1000 / 7);
-}
-
static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned int num_steps)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -869,6 +682,12 @@ static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned
params->update_idle_hardmin_params.uclk_mhz,
params->update_idle_hardmin_params.fclk_mhz);
break;
+ case CLK_MGR401_UPDATE_SUBVP_HARDMINS:
+ dcn401_smu_set_subvp_uclk_fclk_hardmin(
+ clk_mgr_internal,
+ params->update_idle_hardmin_params.uclk_mhz,
+ params->update_idle_hardmin_params.fclk_mhz);
+ break;
case CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK:
dcn401_smu_set_min_deep_sleep_dcef_clk(
clk_mgr_internal,
@@ -945,15 +764,21 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
bool update_active_uclk = false;
bool update_idle_fclk = false;
bool update_idle_uclk = false;
+ bool update_subvp_prefetch_dramclk = false;
+ bool update_subvp_prefetch_fclk = false;
bool is_idle_dpm_enabled = dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) &&
dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK) &&
dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) &&
dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_FCLK);
+ bool is_df_throttle_opt_enabled = is_idle_dpm_enabled &&
+ dcn401_is_df_throttle_opt_enabled(clk_mgr_internal);
int total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
int active_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz);
int active_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.fclk_khz);
int idle_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_dramclk_khz);
int idle_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_fclk_khz);
+ int subvp_prefetch_dramclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_dramclk_khz);
+ int subvp_prefetch_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_fclk_khz);
unsigned int num_steps = 0;
@@ -982,15 +807,15 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
update_active_fclk = true;
update_idle_fclk = true;
- /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW */
- if (clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
- block_sequence[num_steps].params.update_pstate_support_params.support = true;
- block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
- num_steps++;
- }
- }
+ /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW (message not supported on DCN401)*/
+ // if (clk_mgr_base->clks.fclk_p_state_change_support) {
+ // /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+ // if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
+ // block_sequence[num_steps].params.update_pstate_support_params.support = true;
+ // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
+ // num_steps++;
+ // }
+ // }
}
if (!clk_mgr_base->clks.fclk_p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
@@ -1109,6 +934,12 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
}
}
+ if (should_set_clock(safe_to_lower, new_clocks->subvp_prefetch_dramclk_khz, clk_mgr_base->clks.subvp_prefetch_dramclk_khz)) {
+ clk_mgr_base->clks.subvp_prefetch_dramclk_khz = new_clocks->subvp_prefetch_dramclk_khz;
+ update_subvp_prefetch_dramclk = true;
+ subvp_prefetch_dramclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_dramclk_khz);
+ }
+
/* FCLK */
/* Always update saved value, even if new value not set due to P-State switching unsupported */
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
@@ -1129,6 +960,12 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
}
}
+ if (should_set_clock(safe_to_lower, new_clocks->subvp_prefetch_fclk_khz, clk_mgr_base->clks.subvp_prefetch_fclk_khz)) {
+ clk_mgr_base->clks.subvp_prefetch_fclk_khz = new_clocks->subvp_prefetch_fclk_khz;
+ update_subvp_prefetch_fclk = true;
+ subvp_prefetch_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_fclk_khz);
+ }
+
/* When idle DPM is enabled, need to send active and idle hardmins separately */
/* CLK_MGR401_UPDATE_ACTIVE_HARDMINS */
if ((update_active_uclk || update_active_fclk) && is_idle_dpm_enabled) {
@@ -1146,6 +983,14 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
num_steps++;
}
+ /* CLK_MGR401_UPDATE_SUBVP_HARDMINS */
+ if ((update_subvp_prefetch_dramclk || update_subvp_prefetch_fclk) && is_df_throttle_opt_enabled) {
+ block_sequence[num_steps].params.update_idle_hardmin_params.uclk_mhz = subvp_prefetch_dramclk_mhz;
+ block_sequence[num_steps].params.update_idle_hardmin_params.fclk_mhz = subvp_prefetch_fclk_mhz;
+ block_sequence[num_steps].func = CLK_MGR401_UPDATE_SUBVP_HARDMINS;
+ num_steps++;
+ }
+
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (update_active_uclk || update_idle_uclk) {
if (!is_idle_dpm_enabled) {
@@ -1178,14 +1023,14 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
// (*num_steps)++;
// }
- /* disable FCLK P-State support if needed */
- if (!fclk_p_state_change_support &&
- should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
- block_sequence[num_steps].params.update_pstate_support_params.support = false;
- block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
- num_steps++;
- }
+ /* disable FCLK P-State support if needed (message not supported on DCN401)*/
+ // if (!fclk_p_state_change_support &&
+ // should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support) &&
+ // dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
+ // block_sequence[num_steps].params.update_pstate_support_params.support = false;
+ // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
+ // num_steps++;
+ // }
}
if (new_clocks->fw_based_mclk_switching != clk_mgr_base->clks.fw_based_mclk_switching &&
@@ -1366,11 +1211,6 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
unsigned int num_steps = 0;
- if (dc->debug.enable_legacy_clock_update) {
- dcn401_update_clocks_legacy(clk_mgr_base, context, safe_to_lower);
- return;
- }
-
/* build bandwidth related clocks update sequence */
num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base,
context,
@@ -1505,6 +1345,20 @@ static void dcn401_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool curren
dcn401_execute_block_sequence(clk_mgr_base, num_steps);
}
+static int dcn401_get_hard_min_memclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.dramclk_khz;
+}
+
+static int dcn401_get_hard_min_fclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz;
+}
+
/* Get current memclk states, update bounding box */
static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
@@ -1549,6 +1403,15 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
if (clk_mgr->dpm_present && !num_levels)
clk_mgr->dpm_present = false;
+ clk_mgr_base->bw_params->num_channels = dcn401_smu_get_num_of_umc_channels(clk_mgr);
+ if (clk_mgr_base->ctx->dc_bios) {
+ /* use BIOS values if none provided by PMFW */
+ if (clk_mgr_base->bw_params->num_channels == 0) {
+ clk_mgr_base->bw_params->num_channels = clk_mgr_base->ctx->dc_bios->vram_info.num_chans;
+ }
+ clk_mgr_base->bw_params->dram_channel_width_bytes = clk_mgr_base->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ }
+
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
@@ -1638,6 +1501,8 @@ static struct clk_mgr_funcs dcn401_funcs = {
.enable_pme_wa = dcn401_enable_pme_wa,
.is_smu_present = dcn401_is_smu_present,
.get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist,
+ .get_hard_min_memclk = dcn401_get_hard_min_memclk,
+ .get_hard_min_fclk = dcn401_get_hard_min_fclk,
};
struct clk_mgr_internal *dcn401_clk_mgr_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
index 8b0461992b22..6c9ae5ca2c7e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
@@ -90,6 +90,7 @@ enum dcn401_clk_mgr_block_sequence_func {
CLK_MGR401_UPDATE_DTBCLK_DTO,
CLK_MGR401_UPDATE_DENTIST,
CLK_MGR401_UPDATE_PSR_WAIT_LOOP,
+ CLK_MGR401_UPDATE_SUBVP_HARDMINS,
};
struct dcn401_clk_mgr_block_sequence {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
index 7700477d019b..21c35528f61f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
@@ -21,6 +21,14 @@
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
+/* temporary define */
+#ifndef DALSMC_MSG_SubvpUclkFclk
+#define DALSMC_MSG_SubvpUclkFclk 0x1B
+#endif
+#ifndef DALSMC_MSG_GetNumUmcChannels
+#define DALSMC_MSG_GetNumUmcChannels 0x1C
+#endif
+
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
@@ -296,6 +304,24 @@ bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
return success;
}
+bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
+ uint16_t uclk_freq_mhz,
+ uint16_t fclk_freq_mhz)
+{
+ uint32_t response = 0;
+ bool success;
+
+ /* 15:0 for uclk, 32:16 for fclk */
+ uint32_t param = (fclk_freq_mhz << 16) | uclk_freq_mhz;
+
+ smu_print("SMU Set active hardmin by freq: uclk_freq_mhz = %d MHz, fclk_freq_mhz = %d MHz\n", uclk_freq_mhz, fclk_freq_mhz);
+
+ success = dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SubvpUclkFclk, param, &response);
+
+ return success;
+}
+
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz)
{
smu_print("SMU Set min deep sleep dcef clk: freq_mhz = %d MHz\n", freq_mhz);
@@ -311,3 +337,14 @@ void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t n
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_NumOfDisplays, num_displays, NULL);
}
+
+unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr)
+{
+ unsigned int response = 0;
+
+ dcn401_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_GetNumUmcChannels, 0, &response);
+
+ smu_print("SMU Get Num UMC Channels: num_umc_channels = %d\n", response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
index 651fb8d62864..e02eb1294b37 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
@@ -23,7 +23,11 @@ bool dcn401_smu_set_idle_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
uint16_t uclk_freq_mhz,
uint16_t fclk_freq_mhz);
+bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
+ uint16_t uclk_freq_mhz,
+ uint16_t fclk_freq_mhz);
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
+unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 49fe7dcf9372..f84e795e35f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -579,7 +579,7 @@ dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
bool
dc_stream_forward_crc_window(struct dc_stream_state *stream,
- struct rect *rect, bool is_stop)
+ struct rect *rect, uint8_t phy_id, bool is_stop)
{
struct dmcu *dmcu;
struct dc_dmub_srv *dmub_srv;
@@ -598,7 +598,7 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
if (i == MAX_PIPES)
return false;
- mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
+ mux_mapping.phy_output_num = phy_id;
mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
dmcu = dc->res_pool->dmcu;
@@ -615,6 +615,68 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
return true;
}
+
+static void
+dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv *dmub_srv,
+ struct crc_window *window, struct otg_phy_mux *mux_mapping, bool stop)
+{
+ int i;
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.secure_display.mul_roi_ctl.phy_id = mux_mapping->phy_output_num;
+ cmd.secure_display.mul_roi_ctl.otg_id = mux_mapping->otg_output_num;
+
+ cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+
+ if (stop) {
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE;
+ } else {
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_start = window[i].rect.x;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_start = window[i].rect.y;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_end = window[i].rect.x + window[i].rect.width;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_end = window[i].rect.y + window[i].rect.height;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].enable = window[i].enable;
+ }
+ }
+
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+bool
+dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream,
+ struct crc_window *window, uint8_t phy_id, bool stop)
+{
+ struct dc_dmub_srv *dmub_srv;
+ struct otg_phy_mux mux_mapping;
+ struct pipe_ctx *pipe;
+ int i;
+ struct dc *dc = stream->ctx->dc;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
+ break;
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
+
+ mux_mapping.phy_output_num = phy_id;
+ mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
+
+ dmub_srv = dc->ctx->dmub_srv;
+
+ /* forward to dmub only. no dmcu support*/
+ if (dmub_srv)
+ dc_stream_forward_dmub_multiple_crc_window(dmub_srv, window, &mux_mapping, stop);
+ else
+ return false;
+
+ return true;
+}
#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
/**
@@ -625,15 +687,17 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
* @enable: Enable CRC if true, disable otherwise.
* @continuous: Capture CRC on every frame if true. Otherwise, only capture
* once.
+ * @idx: Capture CRC on which CRC engine instance
+ * @reset: Reset CRC engine before the configuration
*
- * By default, only CRC0 is configured, and the entire frame is used to
- * calculate the CRC.
+ * By default, the entire frame is used to calculate the CRC.
*
* Return: %false if the stream is not found or CRC capture is not supported;
* %true if the stream has been configured.
*/
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
- struct crc_params *crc_window, bool enable, bool continuous)
+ struct crc_params *crc_window, bool enable, bool continuous,
+ uint8_t idx, bool reset)
{
struct pipe_ctx *pipe;
struct crc_params param;
@@ -677,6 +741,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
param.continuous_mode = continuous;
param.enable = enable;
+ param.crc_eng_inst = idx;
+ param.reset = reset;
+
tg = pipe->stream_res.tg;
/* Only call if supported */
@@ -691,6 +758,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
*
* @dc: DC object.
* @stream: The DC stream state of the stream to get CRCs from.
+ * @idx: index of crc engine to get CRC from
* @r_cr: CRC value for the red component.
* @g_y: CRC value for the green component.
* @b_cb: CRC value for the blue component.
@@ -700,7 +768,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* Return:
* %false if stream is not found, or if CRCs are not enabled.
*/
-bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
+bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
int i;
@@ -721,7 +789,7 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
tg = pipe->stream_res.tg;
if (tg->funcs->get_crc)
- return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
+ return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb);
DC_LOG_WARNING("CRC capture not supported.");
return false;
}
@@ -1173,6 +1241,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2)
get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_VABC)
+ get_vabc_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
}
}
}
@@ -2063,7 +2133,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (context->stream_count > get_seamless_boot_stream_count(context) ||
+ if (get_seamless_boot_stream_count(context) == 0 ||
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
hwss_wait_for_no_pipes_pending(dc, context);
@@ -2153,6 +2223,11 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
+ /* revalidate streams */
+ res = dc_validate_stream(dc, stream);
+ if (res != DC_OK)
+ return res;
+
dc_stream_log(dc, stream);
set[i].stream = stream;
@@ -2487,7 +2562,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
- sizeof(union dc_tiling_info)) != 0) {
+ sizeof(struct dc_tiling_info)) != 0) {
update_flags->bits.swizzle_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_MED);
@@ -2982,6 +3057,10 @@ static void copy_surface_update_to_plane(
if (srf_update->cursor_csc_color_matrix)
surface->cursor_csc_color_matrix =
*srf_update->cursor_csc_color_matrix;
+
+ if (srf_update->bias_and_scale.bias_and_scale_valid)
+ surface->bias_and_scale =
+ srf_update->bias_and_scale;
}
static void copy_stream_update_to_stream(struct dc *dc,
@@ -4510,7 +4589,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
struct pipe_split_policy_backup policy;
struct dc_state *intermediate_context;
struct dc_state *old_current_state = dc->current_state;
- struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ struct dc_surface_update srf_updates[MAX_SURFACES] = {0};
int surface_count;
/*
@@ -5307,11 +5386,9 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc->vm_pa_config.valid) {
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
-
break;
default:
ASSERT(dc->current_state->stream_count == 0);
-
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
dc_state_destruct(dc->current_state);
@@ -5435,6 +5512,11 @@ bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips)
void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
{
+ int idle_fclk_khz = 0, idle_dramclk_khz = 0, i = 0;
+ enum mall_stream_type subvp_pipe_type[MAX_PIPES] = {0};
+ struct pipe_ctx *pipe = NULL;
+ struct dc_state *context = dc->current_state;
+
if (dc->debug.disable_idle_power_optimizations) {
DC_LOG_DEBUG("%s: disabled\n", __func__);
return;
@@ -5459,6 +5541,23 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
dc->idle_optimizations_allowed = allow;
DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled");
}
+
+ // log idle clocks and sub vp pipe types at idle optimization time
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_fclk)
+ idle_fclk_khz = dc->clk_mgr->funcs->get_hard_min_fclk(dc->clk_mgr);
+
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk)
+ idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
+ }
+
+ DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
+ __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
+ subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);
+
}
void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
@@ -6056,7 +6155,7 @@ void dc_query_current_properties(struct dc *dc, struct dc_current_properties *pr
bool subvp_sw_cursor_req = false;
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
+ if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) {
subvp_sw_cursor_req = true;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 252af83e34a5..6eb9bae3af91 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -425,6 +425,44 @@ void get_hdr_visual_confirm_color(
}
}
+/* Visual Confirm color definition for VABC */
+void get_vabc_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+ struct dc_link *edp_link = NULL;
+
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link) {
+ if (pipe_ctx->stream->link->connector_signal == SIGNAL_TYPE_EDP)
+ edp_link = pipe_ctx->stream->link;
+ }
+
+ if (edp_link) {
+ switch (edp_link->backlight_control_type) {
+ case BACKLIGHT_CONTROL_PWM:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case BACKLIGHT_CONTROL_AMD_AUX:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case BACKLIGHT_CONTROL_VESA_AUX:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ }
+ } else {
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ }
+}
+
void get_subvp_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 457d60eeb486..c1b79b379447 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -125,6 +125,14 @@ uint32_t dc_link_bandwidth_kbps(
return link->dc->link_srv->dp_link_bandwidth_kbps(link, link_settings);
}
+uint32_t dc_link_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params)
+{
+ return link->dc->link_srv->dp_required_hblank_size_bytes(link,
+ audio_params);
+}
+
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
{
dc->link_srv->get_cur_res_map(dc, map);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 626f75b6ad00..298668e9729c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1455,7 +1455,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Invalid input */
- if (!plane_state->dst_rect.width ||
+ if (!plane_state ||
+ !plane_state->dst_rect.width ||
!plane_state->dst_rect.height ||
!plane_state->src_rect.width ||
!plane_state->src_rect.height) {
@@ -3388,10 +3389,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
break;
case COLOR_DEPTH_121212:
normalized_pix_clk = (pix_clk * 36) / 24;
- break;
+ break;
+ case COLOR_DEPTH_141414:
+ normalized_pix_clk = (pix_clk * 42) / 24;
+ break;
case COLOR_DEPTH_161616:
normalized_pix_clk = (pix_clk * 48) / 24;
- break;
+ break;
default:
ASSERT(0);
break;
@@ -4478,7 +4482,7 @@ static void set_hfvs_info_packet(
static void adaptive_sync_override_dp_info_packets_sdp_line_num(
const struct dc_crtc_timing *timing,
struct enc_sdp_line_num *sdp_line_num,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+ unsigned int vstartup_start)
{
uint32_t asic_blank_start = 0;
uint32_t asic_blank_end = 0;
@@ -4493,8 +4497,8 @@ static void adaptive_sync_override_dp_info_packets_sdp_line_num(
asic_blank_end = (asic_blank_start - tg->v_border_bottom -
tg->v_addressable - tg->v_border_top);
- if (pipe_dlg_param->vstartup_start > asic_blank_end) {
- v_update = (tg->v_total - (pipe_dlg_param->vstartup_start - asic_blank_end));
+ if (vstartup_start > asic_blank_end) {
+ v_update = (tg->v_total - (vstartup_start - asic_blank_end));
sdp_line_num->adaptive_sync_line_num_valid = true;
sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1);
} else {
@@ -4507,7 +4511,7 @@ static void set_adaptive_sync_info_packet(
struct dc_info_packet *info_packet,
const struct dc_stream_state *stream,
struct encoder_info_frame *info_frame,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+ unsigned int vstartup_start)
{
if (!stream->adaptive_sync_infopacket.valid)
return;
@@ -4515,7 +4519,7 @@ static void set_adaptive_sync_info_packet(
adaptive_sync_override_dp_info_packets_sdp_line_num(
&stream->timing,
&info_frame->sdp_line_num,
- pipe_dlg_param);
+ vstartup_start);
*info_packet = stream->adaptive_sync_infopacket;
}
@@ -4548,6 +4552,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
{
enum signal_type signal = SIGNAL_TYPE_NONE;
struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+ unsigned int vstartup_start = 0;
/* default all packets to invalid */
info->avi.valid = false;
@@ -4561,6 +4566,9 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->adaptive_sync.valid = false;
signal = pipe_ctx->stream->signal;
+ if (pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe)
+ vstartup_start = pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe(pipe_ctx);
+
/* HDMi and DP have different info packets*/
if (dc_is_hdmi_signal(signal)) {
set_avi_info_frame(&info->avi, pipe_ctx);
@@ -4582,7 +4590,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_adaptive_sync_info_packet(&info->adaptive_sync,
pipe_ctx->stream,
info,
- &pipe_ctx->pipe_dlg_param);
+ vstartup_start);
}
patch_gamut_packet_checksum(&info->gamut);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index e006f816ff2f..1b2cce127981 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -483,9 +483,9 @@ bool dc_state_add_plane(
if (stream_status == NULL) {
dm_error("Existing stream not found; failed to attach surface!\n");
goto out;
- } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ } else if (stream_status->plane_count == MAX_SURFACES) {
dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
+ plane_state, MAX_SURFACES);
goto out;
} else if (!otg_master_pipe) {
goto out;
@@ -600,7 +600,7 @@ bool dc_state_rem_all_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == stream) {
@@ -875,7 +875,7 @@ bool dc_state_rem_all_phantom_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == phantom_stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 55dc482d9b36..e8134c47fe0d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -37,6 +37,8 @@
#define DC_LOGGER dc->ctx->logger
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+#ifndef MAX
#define MAX(x, y) ((x > y) ? x : y)
#endif
@@ -605,17 +607,6 @@ bool dc_stream_remove_writeback(struct dc *dc,
return true;
}
-bool dc_stream_warmup_writeback(struct dc *dc,
- int num_dwb,
- struct dc_writeback_info *wb_info)
-{
- dc_exit_ips_for_hw_access(dc);
-
- if (dc->hwss.mmhubbub_warmup)
- return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
- else
- return false;
-}
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
{
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ccbb15f1638c..f3471d45b312 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -83,13 +83,6 @@ uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane
/*******************************************************************************
* Public functions
******************************************************************************/
-void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
- uint32_t controller_id)
-{
- plane_state->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
- /*register_flip_interrupt(surface);*/
-}
-
struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
{
struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
@@ -277,4 +270,50 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut)
kref_get(&lut->refcount);
}
+void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
+ bool clear_tiling)
+{
+ struct dc *dc;
+ int i;
+
+ if (!plane_state)
+ return;
+
+ dc = plane_state->ctx->dc;
+ if (!dc || !dc->current_state)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx)
+ continue;
+
+ if (dc->ctx->dce_version >= DCE_VERSION_MAX) {
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && hubp->funcs->hubp_clear_tiling)
+ hubp->funcs->hubp_clear_tiling(hubp);
+
+ /* force page flip to see the new content of the framebuffer */
+ hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
+ &plane_state->address,
+ true);
+ } else {
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ if (!mi)
+ continue;
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && mi->funcs->mem_input_clear_tiling)
+ mi->funcs->mem_input_clear_tiling(mi);
+
+ /* force page flip to see the new content of the framebuffer */
+ mi->funcs->mem_input_program_surface_flip_and_addr(mi,
+ &plane_state->address,
+ true);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e9b9126c0401..053481ab69ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,9 +55,9 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.310"
+#define DC_VER "3.2.316"
-#define MAX_SURFACES 3
+#define MAX_SURFACES 4
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
@@ -463,6 +463,7 @@ struct dc_config {
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
unsigned int disable_ips_in_vpb;
+ bool disable_ips_in_dpms_off;
bool usb4_bw_alloc_support;
bool allow_0_dtb_clk;
bool use_assr_psp_message;
@@ -471,6 +472,7 @@ struct dc_config {
bool disable_hbr_audio_dp2;
bool consolidated_dpia_dp_lt;
bool set_pipe_unlock_order;
+ bool enable_dpia_pre_training;
};
enum visual_confirm {
@@ -487,6 +489,7 @@ enum visual_confirm {
VISUAL_CONFIRM_MCLK_SWITCH = 16,
VISUAL_CONFIRM_FAMS2 = 19,
VISUAL_CONFIRM_HW_CURSOR = 20,
+ VISUAL_CONFIRM_VABC = 21,
};
enum dc_psr_power_opts {
@@ -628,6 +631,8 @@ struct dc_clocks {
int bw_dispclk_khz;
int idle_dramclk_khz;
int idle_fclk_khz;
+ int subvp_prefetch_dramclk_khz;
+ int subvp_prefetch_fclk_khz;
};
struct dc_bw_validation_profile {
@@ -772,7 +777,8 @@ union dpia_debug_options {
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
- uint32_t reserved:25;
+ uint32_t enable_dpia_pre_training:1; /* bit 7 */
+ uint32_t reserved:24;
} bits;
uint32_t raw;
};
@@ -1055,8 +1061,8 @@ struct dc_debug_options {
bool dml21_force_pstate_method;
uint32_t dml21_force_pstate_method_values[MAX_PIPES];
uint32_t dml21_disable_pstate_method_mask;
+ union fw_assisted_mclk_switch_version fams_version;
union dmub_fams2_global_feature_config fams2_config;
- bool enable_legacy_clock_update;
unsigned int force_cositing;
unsigned int disable_spl;
unsigned int force_easf;
@@ -1070,6 +1076,7 @@ struct dc_debug_options {
bool skip_full_updated_if_possible;
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
+ bool force_subvp_df_throttle;
};
@@ -1300,7 +1307,7 @@ struct dc_plane_state {
struct rect clip_rect;
struct plane_size plane_size;
- union dc_tiling_info tiling_info;
+ struct dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
@@ -1371,7 +1378,7 @@ struct dc_plane_state {
struct dc_plane_info {
struct plane_size plane_size;
- union dc_tiling_info tiling_info;
+ struct dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
@@ -1398,7 +1405,7 @@ struct dc_scratch_space {
* store current value in plane states so we can still recover
* a valid current state during dc update.
*/
- struct dc_plane_state plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state plane_states[MAX_SURFACES];
struct dc_stream_state stream_state;
};
@@ -1526,6 +1533,7 @@ struct dc_surface_update {
const struct dc_cm2_parameters *cm2_params;
const struct dc_csc_transform *cursor_csc_color_matrix;
unsigned int sdr_white_level_nits;
+ struct dc_bias_and_scale bias_and_scale;
};
/*
@@ -2019,6 +2027,24 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting);
+struct dp_audio_bandwidth_params {
+ const struct dc_crtc_timing *crtc_timing;
+ enum dp_link_encoding link_encoding;
+ uint32_t channel_count;
+ uint32_t sample_rate_hz;
+};
+
+/* The function calculates the minimum size of hblank (in bytes) needed to
+ * support the specified channel count and sample rate combination, given the
+ * link encoding and timing to be used. This calculation is not supported
+ * for 8b/10b SST.
+ *
+ * return - min hblank size in bytes, 0 if 8b/10b SST.
+ */
+uint32_t dc_link_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
/* The function takes a snapshot of current link resource allocation state
* @dc: pointer to dc of the dm calling this
* @map: a dc link resource snapshot defined internally to dc.
@@ -2378,6 +2404,13 @@ struct dc_sink_dsc_caps {
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
+struct dc_sink_hblank_expansion_caps {
+ // 'true' if these are virtual DPCD's HBlank expansion caps (immediately upstream of sink in MST topology),
+ // 'false' if they are sink's HBlank expansion caps
+ bool is_virtual_dpcd_hblank_expansion;
+ struct hblank_expansion_dpcd_caps dpcd_caps;
+};
+
struct dc_sink_fec_caps {
bool is_rx_fec_supported;
bool is_topology_fec_supported;
@@ -2404,6 +2437,7 @@ struct dc_sink {
struct scdc_caps scdc_caps;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
+ struct dc_sink_hblank_expansion_caps hblank_expansion_caps;
bool is_vsc_sdp_colorimetry_supported;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index f90fc154549a..44ff9abe2880 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1245,7 +1245,7 @@ static int count_active_streams(const struct dc *dc)
for (i = 0; i < dc->current_state->stream_count; ++i) {
struct dc_stream_state *stream = dc->current_state->streams[i];
- if (stream && !stream->dpms_off)
+ if (stream && (!stream->dpms_off || dc->config.disable_ips_in_dpms_off))
count += 1;
}
@@ -1694,10 +1694,10 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
{
uint8_t num_cmds = 1;
uint32_t i;
- union dmub_rb_cmd cmd[MAX_STREAMS + 1];
+ union dmub_rb_cmd cmd[2 * MAX_STREAMS + 1];
struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config;
- memset(cmd, 0, sizeof(union dmub_rb_cmd) * (MAX_STREAMS + 1));
+ memset(cmd, 0, sizeof(union dmub_rb_cmd) * (2 * MAX_STREAMS + 1));
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
@@ -1714,17 +1714,26 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* construct per-stream configs */
for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
- struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config;
+ struct dmub_rb_cmd_fams2 *stream_base_cmd = &cmd[i+1].fams2_config;
+ struct dmub_rb_cmd_fams2 *stream_sub_state_cmd = &cmd[i+1+context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config;
/* configure command header */
- stream_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
- stream_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
- stream_cmd->header.multi_cmd_pending = 1;
- /* copy stream static state */
- memcpy(&stream_cmd->config.stream,
- &context->bw_ctx.bw.dcn.fams2_stream_params[i],
- sizeof(struct dmub_fams2_stream_static_state));
+ stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_base_cmd->header.multi_cmd_pending = 1;
+ stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_sub_state_cmd->header.multi_cmd_pending = 1;
+ /* copy stream static base state */
+ memcpy(&stream_base_cmd->config,
+ &context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
+ sizeof(union dmub_cmd_fams2_config));
+ /* copy stream static sub state */
+ memcpy(&stream_sub_state_cmd->config,
+ &context->bw_ctx.bw.dcn.fams2_stream_sub_params[i],
+ sizeof(union dmub_cmd_fams2_config));
}
}
@@ -1735,8 +1744,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
/* set multi pending for global, and unset for last stream cmd */
global_cmd->header.multi_cmd_pending = 1;
- cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
- num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
+ cmd[2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
+ num_cmds += 2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
}
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 8dd6eb044829..94ce8fe74481 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -969,6 +969,21 @@ union dp_sink_video_fallback_formats {
uint8_t raw;
};
+union dp_receive_port0_cap {
+ struct {
+ uint8_t RESERVED :1;
+ uint8_t LOCAL_EDID_PRESENT :1;
+ uint8_t ASSOCIATED_TO_PRECEDING_PORT:1;
+ uint8_t HBLANK_EXPANSION_CAPABLE :1;
+ uint8_t BUFFER_SIZE_UNIT :1;
+ uint8_t BUFFER_SIZE_PER_PORT :1;
+ uint8_t HBLANK_REDUCTION_CAPABLE :1;
+ uint8_t RESERVED2:1;
+ uint8_t BUFFER_SIZE:8;
+ } bits;
+ uint8_t raw[2];
+};
+
union dpcd_max_uncompressed_pixel_rate_cap {
struct {
uint16_t max_uncompressed_pixel_rate_cap :15;
@@ -1193,6 +1208,7 @@ struct dpcd_caps {
struct replay_info pr_info;
uint16_t edp_oled_emission_rate;
+ union dp_receive_port0_cap receive_port0_cap;
};
union dpcd_sink_ext_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 9014c2409817..9d18f1c08079 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -94,6 +94,11 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
const int num_slices_h,
const bool is_dp);
+void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
+ const struct dsc_dec_dpcd_caps *dsc_sink_caps);
+void dc_dsc_dump_encoder_caps(const struct display_stream_compressor *dsc,
+ const struct dc_crtc_timing *timing);
+
/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to DM,
* and DM can choose to OVERRIDE the limitation on CASE BY CASE basis.
* Hardware/specs limitation should not be writable by DM.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index c10567ec1c81..5ac55601a6da 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -341,89 +341,101 @@ enum swizzle_mode_addr3_values {
DC_ADDR3_SW_UNKNOWN = DC_ADDR3_SW_MAX
};
-union dc_tiling_info {
-
- struct {
- /* Specifies the number of memory banks for tiling
- * purposes.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 2,4,8,16
- */
- unsigned int num_banks;
- /* Specifies the number of tiles in the x direction
- * to be incorporated into the same bank.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 1,2,4,8
- */
- unsigned int bank_width;
- unsigned int bank_width_c;
- /* Specifies the number of tiles in the y direction to
- * be incorporated into the same bank.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 1,2,4,8
- */
- unsigned int bank_height;
- unsigned int bank_height_c;
- /* Specifies the macro tile aspect ratio. Only applies
- * to 2D and 3D tiling modes.
- */
- unsigned int tile_aspect;
- unsigned int tile_aspect_c;
- /* Specifies the number of bytes that will be stored
- * contiguously for each tile.
- * If the tile data requires more storage than this
- * amount, it is split into multiple slices.
- * This field must not be larger than
- * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
- * Only applies to 2D and 3D tiling modes.
- * For color render targets, TILE_SPLIT >= 256B.
- */
- enum tile_split_values tile_split;
- enum tile_split_values tile_split_c;
- /* Specifies the addressing within a tile.
- * 0x0 - DISPLAY_MICRO_TILING
- * 0x1 - THIN_MICRO_TILING
- * 0x2 - DEPTH_MICRO_TILING
- * 0x3 - ROTATED_MICRO_TILING
- */
- enum tile_mode_values tile_mode;
- enum tile_mode_values tile_mode_c;
- /* Specifies the number of pipes and how they are
- * interleaved in the surface.
- * Refer to memory addressing document for complete
- * details and constraints.
- */
- unsigned int pipe_config;
- /* Specifies the tiling mode of the surface.
- * THIN tiles use an 8x8x1 tile size.
- * THICK tiles use an 8x8x4 tile size.
- * 2D tiling modes rotate banks for successive Z slices
- * 3D tiling modes rotate pipes and banks for Z slices
- * Refer to memory addressing document for complete
- * details and constraints.
- */
- enum array_mode_values array_mode;
- } gfx8;
+enum dc_gfxversion {
+ DcGfxVersion7 = 0,
+ DcGfxVersion8,
+ DcGfxVersion9,
+ DcGfxVersion10,
+ DcGfxVersion11,
+ DcGfxAddr3,
+ DcGfxVersionUnknown
+};
+
+ struct dc_tiling_info {
+ unsigned int gfxversion; // Specifies which part of the union to use. Must use DalGfxVersion enum
+ union {
+ struct {
+ /* Specifies the number of memory banks for tiling
+ * purposes.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 2,4,8,16
+ */
+ unsigned int num_banks;
+ /* Specifies the number of tiles in the x direction
+ * to be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_width;
+ unsigned int bank_width_c;
+ /* Specifies the number of tiles in the y direction to
+ * be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_height;
+ unsigned int bank_height_c;
+ /* Specifies the macro tile aspect ratio. Only applies
+ * to 2D and 3D tiling modes.
+ */
+ unsigned int tile_aspect;
+ unsigned int tile_aspect_c;
+ /* Specifies the number of bytes that will be stored
+ * contiguously for each tile.
+ * If the tile data requires more storage than this
+ * amount, it is split into multiple slices.
+ * This field must not be larger than
+ * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
+ * Only applies to 2D and 3D tiling modes.
+ * For color render targets, TILE_SPLIT >= 256B.
+ */
+ enum tile_split_values tile_split;
+ enum tile_split_values tile_split_c;
+ /* Specifies the addressing within a tile.
+ * 0x0 - DISPLAY_MICRO_TILING
+ * 0x1 - THIN_MICRO_TILING
+ * 0x2 - DEPTH_MICRO_TILING
+ * 0x3 - ROTATED_MICRO_TILING
+ */
+ enum tile_mode_values tile_mode;
+ enum tile_mode_values tile_mode_c;
+ /* Specifies the number of pipes and how they are
+ * interleaved in the surface.
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ unsigned int pipe_config;
+ /* Specifies the tiling mode of the surface.
+ * THIN tiles use an 8x8x1 tile size.
+ * THICK tiles use an 8x8x4 tile size.
+ * 2D tiling modes rotate banks for successive Z slices
+ * 3D tiling modes rotate pipes and banks for Z slices
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ enum array_mode_values array_mode;
+ } gfx8;
- struct {
- enum swizzle_mode_values swizzle;
- unsigned int num_pipes;
- unsigned int max_compressed_frags;
- unsigned int pipe_interleave;
-
- unsigned int num_banks;
- unsigned int num_shader_engines;
- unsigned int num_rb_per_se;
- bool shaderEnable;
-
- bool meta_linear;
- bool rb_aligned;
- bool pipe_aligned;
- unsigned int num_pkrs;
- } gfx9;/*gfx9, gfx10 and above*/
- struct {
- enum swizzle_mode_addr3_values swizzle;
- } gfx_addr3;/*gfx with addr3 and above*/
+ struct {
+ enum swizzle_mode_values swizzle;
+ unsigned int num_pipes;
+ unsigned int max_compressed_frags;
+ unsigned int pipe_interleave;
+
+ unsigned int num_banks;
+ unsigned int num_shader_engines;
+ unsigned int num_rb_per_se;
+ bool shaderEnable;
+
+ bool meta_linear;
+ bool rb_aligned;
+ bool pipe_aligned;
+ unsigned int num_pkrs;
+ } gfx9;/*gfx9, gfx10 and above*/
+ struct {
+ enum swizzle_mode_addr3_values swizzle;
+ } gfx_addr3;/*gfx with addr3 and above*/
+ };
};
/* Rotation angle */
@@ -975,6 +987,9 @@ struct dc_crtc_timing {
struct dc_crtc_timing_flags flags;
uint32_t dsc_fixed_bits_per_pixel_x16; /* DSC target bitrate in 1/16 of bpp (e.g. 128 -> 8bpp) */
struct dc_dsc_config dsc_cfg;
+
+ /* The number of pixels that HBlank has been expanded by from the original EDID timing. */
+ uint32_t expanded_hblank;
};
enum trigger_delay {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index bd37ec82b42d..fabcefeda288 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -34,4 +34,7 @@ const struct dc_plane_status *dc_plane_get_status(
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
+void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
+ bool clear_tiling);
+
#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 0e310fd48b5c..3518eb1b8cd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -64,6 +64,13 @@ static void populate_inits_from_splinits(struct scl_inits *inits,
inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19);
inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19);
}
+static void populate_splformat_from_format(enum spl_pixel_format *spl_pixel_format, const enum pixel_format pixel_format)
+{
+ if (pixel_format < PIXEL_FORMAT_INVALID)
+ *spl_pixel_format = (enum spl_pixel_format)pixel_format;
+ else
+ *spl_pixel_format = SPL_PIXEL_FORMAT_INVALID;
+}
/// @brief Translate SPL input parameters from pipe context
/// @param pipe_ctx
/// @param spl_in
@@ -89,7 +96,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->callbacks = dcn2_spl_callbacks;
}
// Make format field from spl_in point to plane_res scl_data format
- spl_in->basic_in.format = (enum spl_pixel_format)pipe_ctx->plane_res.scl_data.format;
+ populate_splformat_from_format(&spl_in->basic_in.format, pipe_ctx->plane_res.scl_data.format);
// Make view_format from basic_out point to view_format from stream
spl_in->basic_out.view_format = (enum spl_view_3d)stream->view_format;
// Populate spl input basic input clip rect from plane state clip rect
@@ -108,12 +115,14 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->basic_in.horizontal_mirror = plane_state->horizontal_mirror;
// Calculate horizontal splits and split index
- spl_in->basic_in.mpc_combine_h = resource_get_mpc_slice_count(pipe_ctx);
+ spl_in->basic_in.num_h_slices_recout_width_align.use_recout_width_aligned = false;
+ spl_in->basic_in.num_h_slices_recout_width_align.num_slices_recout_width.mpc_num_h_slices =
+ resource_get_mpc_slice_count(pipe_ctx);
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
- spl_in->basic_in.mpc_combine_v = 0;
+ spl_in->basic_in.mpc_h_slice_index = 0;
else
- spl_in->basic_in.mpc_combine_v = resource_get_mpc_slice_index(pipe_ctx);
+ spl_in->basic_in.mpc_h_slice_index = resource_get_mpc_slice_index(pipe_ctx);
populate_splrect_from_rect(&spl_in->basic_out.odm_slice_rect, &odm_slice_src);
spl_in->basic_out.odm_combine_factor = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 413970588a26..3e303c7808fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -56,7 +56,7 @@ struct dc_stream_status {
int plane_count;
int audio_inst;
struct timing_sync_info timing_sync_info;
- struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state *plane_states[MAX_SURFACES];
bool is_abm_supported;
struct mall_stream_config mall_stream_config;
bool fpo_in_use;
@@ -447,10 +447,6 @@ enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
-bool dc_stream_warmup_writeback(struct dc *dc,
- int num_dwb,
- struct dc_writeback_info *wb_info);
-
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream);
bool dc_stream_set_dynamic_metadata(struct dc *dc,
@@ -541,17 +537,26 @@ bool dc_stream_get_crtc_position(struct dc *dc,
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
bool dc_stream_forward_crc_window(struct dc_stream_state *stream,
struct rect *rect,
+ uint8_t phy_id,
bool is_stop);
+
+bool dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream,
+ struct crc_window *window,
+ uint8_t phy_id,
+ bool stop);
#endif
bool dc_stream_configure_crc(struct dc *dc,
struct dc_stream_state *stream,
struct crc_params *crc_window,
bool enable,
- bool continuous);
+ bool continuous,
+ uint8_t idx,
+ bool reset);
bool dc_stream_get_crc(struct dc *dc,
struct dc_stream_state *stream,
+ uint8_t idx,
uint32_t *r_cr,
uint32_t *g_y,
uint32_t *b_cb);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index edf4df1d03b5..0c2aa91f0a11 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -76,7 +76,6 @@ struct dc_perf_trace {
unsigned long last_entry_write;
};
-#define MAX_SURFACE_NUM 6
#define NUM_PIXEL_FORMATS 10
enum tiling_mode {
@@ -875,6 +874,14 @@ struct dsc_dec_dpcd_caps {
bool is_dp; /* Decoded format */
};
+struct hblank_expansion_dpcd_caps {
+ bool expansion_supported;
+ bool reduction_supported;
+ bool buffer_unit_bytes; /* True: buffer size in bytes. False: buffer size in pixels*/
+ bool buffer_per_port; /* True: buffer size per port. False: buffer size per lane*/
+ uint32_t buffer_size; /* Add 1 to value and multiply by 32 */
+};
+
struct dc_golden_table {
uint16_t dc_golden_table_ver;
uint32_t aux_dphy_rx_control0_val;
@@ -932,10 +939,17 @@ enum backlight_control_type {
};
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+#define MAX_CRC_WINDOW_NUM 2
+
struct otg_phy_mux {
uint8_t phy_output_num;
uint8_t otg_output_num;
};
+
+struct crc_window {
+ struct rect rect;
+ bool enable;
+};
#endif
enum dc_detect_reason {
@@ -1052,10 +1066,13 @@ enum replay_FW_Message_type {
union replay_error_status {
struct {
- unsigned char STATE_TRANSITION_ERROR :1;
- unsigned char LINK_CRC_ERROR :1;
- unsigned char DESYNC_ERROR :1;
- unsigned char RESERVED :5;
+ unsigned int STATE_TRANSITION_ERROR :1;
+ unsigned int LINK_CRC_ERROR :1;
+ unsigned int DESYNC_ERROR :1;
+ unsigned int RESERVED_3 :1;
+ unsigned int LOW_RR_INCORRECT_VTOTAL :1;
+ unsigned int NO_DOUBLED_RR :1;
+ unsigned int RESERVED_6_7 :2;
} bits;
unsigned char raw;
};
@@ -1102,6 +1119,8 @@ struct replay_config {
union replay_error_status replay_error_status;
/* Replay Low Hz enable Options */
union replay_low_refresh_rate_enable_options low_rr_enable_options;
+ /* Replay coasting vtotal is within low refresh rate range. */
+ bool low_rr_activated;
};
/* Replay feature flags*/
@@ -1126,10 +1145,12 @@ struct replay_settings {
uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
uint32_t link_off_frame_count;
- /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
- uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+ /* Replay pseudo vtotal for low refresh rate*/
+ uint16_t low_rr_full_screen_video_pseudo_vtotal;
/* Replay last pseudo vtotal set to DMUB */
uint16_t last_pseudo_vtotal;
+ /* Replay desync error */
+ uint32_t replay_desync_error_fail_count;
};
/* To split out "global" and "per-panel" config settings.
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index b700608e4240..077337698e0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1105,6 +1105,9 @@ static bool dcn401_program_pix_clk(
&dto_params);
} else {
+ if (pll_settings->actual_pix_clk_100hz > 6000000UL)
+ return false;
+
/* disables DP DTO when provided with TMDS signal type */
clock_source->ctx->dc->res_pool->dccg->funcs->set_dp_dto(
clock_source->ctx->dc->res_pool->dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index f5e1d9caee4c..1c2009e38aa1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -98,7 +98,7 @@ static enum mi_bits_per_pixel get_mi_bpp(
}
static enum mi_tiling_format get_mi_tiling(
- union dc_tiling_info *tiling_info)
+ struct dc_tiling_info *tiling_info)
{
switch (tiling_info->gfx8.array_mode) {
case DC_ARRAY_1D_TILED_THIN1:
@@ -133,7 +133,7 @@ static bool is_vert_scan(enum dc_rotation_angle rotation)
static void dce_mi_program_pte_vm(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation)
{
struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
@@ -430,7 +430,7 @@ static void dce120_mi_program_display_marks(struct mem_input *mi,
}
static void program_tiling(
- struct dce_mem_input *dce_mi, const union dc_tiling_info *info)
+ struct dce_mem_input *dce_mi, const struct dc_tiling_info *info)
{
if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
REG_UPDATE_6(GRPH_CONTROL,
@@ -481,7 +481,6 @@ static void program_tiling(
}
}
-
static void program_size_and_rotation(
struct dce_mem_input *dce_mi,
enum dc_rotation_angle rotation,
@@ -627,10 +626,31 @@ static void program_grph_pixel_format(
GRPH_PRESCALE_B_SIGN, sign);
}
+static void dce_mi_clear_tiling(
+ struct mem_input *mi)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+
+ if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
+ REG_UPDATE(GRPH_CONTROL,
+ GRPH_SW_MODE, DC_SW_LINEAR);
+ }
+
+ if (dce_mi->masks->GRPH_MICRO_TILE_MODE) { /* GFX8 */
+ REG_UPDATE(GRPH_CONTROL,
+ GRPH_ARRAY_MODE, DC_SW_LINEAR);
+ }
+
+ if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX6 but reuses gfx8 struct */
+ REG_UPDATE(GRPH_CONTROL,
+ GRPH_ARRAY_MODE, DC_SW_LINEAR);
+ }
+}
+
static void dce_mi_program_surface_config(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -650,7 +670,7 @@ static void dce_mi_program_surface_config(
static void dce60_mi_program_surface_config(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation, /* not used in DCE6 */
struct dc_plane_dcc_param *dcc,
@@ -884,7 +904,8 @@ static const struct mem_input_funcs dce_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -897,7 +918,8 @@ static const struct mem_input_funcs dce60_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce60_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
#endif
@@ -910,7 +932,8 @@ static const struct mem_input_funcs dce112_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
static const struct mem_input_funcs dce120_mi_funcs = {
@@ -922,7 +945,8 @@ static const struct mem_input_funcs dce120_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
void dce_mem_input_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..6e2fce329d73 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -69,5 +69,16 @@ bool should_use_dmub_lock(struct dc_link *link)
if (link->replay_settings.replay_feature_enabled)
return true;
+ /* only use HW lock for PSR1 on single eDP */
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(link->dc, edp_links, &edp_num);
+
+ if (edp_num == 1)
+ return true;
+ }
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index cae18f8c1c9a..88c75c243bf8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -390,8 +390,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
- else
- link->psr_settings.force_ffu_mode = 0;
+
copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 8a3fbf95c48f..2c43c2422638 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -162,7 +162,7 @@ static void enable(struct dce_mem_input *mem_input110)
static void program_tiling(
struct dce_mem_input *mem_input110,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
uint32_t value = 0;
@@ -523,7 +523,7 @@ static const unsigned int dvmm_Hw_Setting_Linear[4][9] = {
/* Helper to get table entry from surface info */
static const unsigned int *get_dvmm_hw_setting(
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum surface_pixel_format format,
bool chroma)
{
@@ -563,7 +563,7 @@ static const unsigned int *get_dvmm_hw_setting(
static void dce_mem_input_v_program_pte_vm(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation)
{
struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
@@ -636,7 +636,7 @@ static void dce_mem_input_v_program_pte_vm(
static void dce_mem_input_v_program_surface_config(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index fa422a8cbced..61b0807693fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -2127,70 +2127,131 @@ bool dce110_configure_crc(struct timing_generator *tg,
cntl_addr = CRTC_REG(mmCRTC_CRC_CNTL);
- /* First, disable CRC before we configure it. */
- dm_write_reg(tg->ctx, cntl_addr, 0);
+ if (!params->enable || params->reset)
+ /* First, disable CRC before we configure it. */
+ dm_write_reg(tg->ctx, cntl_addr, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
- set_reg_field_value(value, params->windowa_x_start,
- CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_START);
- set_reg_field_value(value, params->windowa_x_end,
- CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window A y axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
- set_reg_field_value(value, params->windowa_y_start,
- CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_START);
- set_reg_field_value(value, params->windowa_y_end,
- CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window B x axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
- set_reg_field_value(value, params->windowb_x_start,
- CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_START);
- set_reg_field_value(value, params->windowb_x_end,
- CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window B y axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
- set_reg_field_value(value, params->windowb_y_start,
- CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_START);
- set_reg_field_value(value, params->windowb_y_end,
- CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- value = 0;
- set_reg_field_value(value, params->continuous_mode ? 1 : 0,
- CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
- set_reg_field_value(value, params->selection,
- CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
- set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
- dm_write_reg(tg->ctx, cntl_addr, value);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
+ set_reg_field_value(value, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START);
+ set_reg_field_value(value, params->windowa_x_end,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window A y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
+ set_reg_field_value(value, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START);
+ set_reg_field_value(value, params->windowa_y_end,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
+ set_reg_field_value(value, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START);
+ set_reg_field_value(value, params->windowb_x_end,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
+ set_reg_field_value(value, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START);
+ set_reg_field_value(value, params->windowb_y_end,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Set crc mode and selection, and enable.*/
+ value = 0;
+ set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+ CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+ set_reg_field_value(value, params->selection,
+ CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
+ set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+ dm_write_reg(tg->ctx, cntl_addr, value);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWA_X_CONTROL);
+ set_reg_field_value(value, params->windowa_x_start,
+ CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_START);
+ set_reg_field_value(value, params->windowa_x_end,
+ CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window A y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWA_Y_CONTROL);
+ set_reg_field_value(value, params->windowa_y_start,
+ CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_START);
+ set_reg_field_value(value, params->windowa_y_end,
+ CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWB_X_CONTROL);
+ set_reg_field_value(value, params->windowb_x_start,
+ CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_START);
+ set_reg_field_value(value, params->windowb_x_end,
+ CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWB_Y_CONTROL);
+ set_reg_field_value(value, params->windowb_y_start,
+ CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_START);
+ set_reg_field_value(value, params->windowb_y_end,
+ CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Set crc mode and selection, and enable.*/
+ value = 0;
+ set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+ CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+ set_reg_field_value(value, params->selection,
+ CRTC_CRC_CNTL, CRTC_CRC1_SELECT);
+ set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+ dm_write_reg(tg->ctx, cntl_addr, value);
+ break;
+ default:
+ return false;
+ }
return true;
}
-bool dce110_get_crc(struct timing_generator *tg,
+bool dce110_get_crc(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t addr = 0;
@@ -2206,14 +2267,30 @@ bool dce110_get_crc(struct timing_generator *tg,
if (!field)
return false;
- addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
- value = dm_read_reg(tg->ctx, addr);
- *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
- *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
+ switch (idx) {
+ case 0:
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
+ value = dm_read_reg(tg->ctx, addr);
+ *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
- addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
- value = dm_read_reg(tg->ctx, addr);
- *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
+ value = dm_read_reg(tg->ctx, addr);
+ *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
+ break;
+ case 1:
+ addr = CRTC_REG(mmCRTC_CRC1_DATA_RG);
+ value = dm_read_reg(tg->ctx, addr);
+ *r_cr = get_reg_field_value(value, CRTC_CRC1_DATA_RG, CRC1_R_CR);
+ *g_y = get_reg_field_value(value, CRTC_CRC1_DATA_RG, CRC1_G_Y);
+
+ addr = CRTC_REG(mmCRTC_CRC1_DATA_B);
+ value = dm_read_reg(tg->ctx, addr);
+ *b_cb = get_reg_field_value(value, CRTC_CRC1_DATA_B, CRC1_B_CB);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index ee4de740aceb..e4f5cad64f32 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -286,7 +286,7 @@ bool dce110_arm_vert_intr(
bool dce110_configure_crc(struct timing_generator *tg,
const struct crc_params *params);
-bool dce110_get_crc(struct timing_generator *tg,
+bool dce110_get_crc(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
bool dce110_is_two_pixels_per_container(const struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index fcf59348eb62..31c4f44ceaac 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -1100,45 +1100,79 @@ static bool dce120_configure_crc(struct timing_generator *tg,
if (!dce120_is_tg_enabled(tg))
return false;
- /* First, disable CRC before we configure it. */
- dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
- tg110->offsets.crtc, 0);
+ if (!params->enable || params->reset)
+ /* First, disable CRC before we configure it. */
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
+ tg110->offsets.crtc, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start,
- CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end);
-
- /* Window A y axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end);
-
- /* Window B x axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start,
- CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end);
-
- /* Window B y axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
- CRTC_CRC_EN, params->continuous_mode ? 1 : 0,
- CRTC_CRC0_SELECT, params->selection,
- CRTC_CRC_EN, 1);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
+ CRTC_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ CRTC_CRC0_SELECT, params->selection,
+ CRTC_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ CRTC_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ CRTC_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ CRTC_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ CRTC_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable */
+ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
+ CRTC_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ CRTC_CRC1_SELECT, params->selection,
+ CRTC_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
-static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr,
- uint32_t *g_y, uint32_t *b_cb)
+static bool dce120_get_crc(struct timing_generator *tg, uint8_t idx,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t value, field;
@@ -1151,14 +1185,30 @@ static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr,
if (!field)
return false;
- value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG,
- tg110->offsets.crtc);
- *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR);
- *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y);
+ switch (idx) {
+ case 0:
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG,
+ tg110->offsets.crtc);
+ *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y);
- value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B,
- tg110->offsets.crtc);
- *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB);
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B,
+ tg110->offsets.crtc);
+ *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB);
+ break;
+ case 1:
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC1_DATA_RG,
+ tg110->offsets.crtc);
+ *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_RG, CRC1_R_CR);
+ *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_RG, CRC1_G_Y);
+
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC1_DATA_B,
+ tg110->offsets.crtc);
+ *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_B, CRC1_B_CB);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index e5fb0e8333e4..e691a1cf3356 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = {
dce60_timing_generator_enable_advanced_request,
.configure_crc = dce60_configure_crc,
.get_crc = dce110_get_crc,
+ .is_two_pixels_per_container = dce110_is_two_pixels_per_container,
};
void dce60_timing_generator_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 573898984726..f9961a6446f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -168,31 +168,33 @@ void dcn31_panel_cntl_construct(
struct dcn31_panel_cntl *dcn31_panel_cntl,
const struct panel_cntl_init_data *init_data)
{
- uint8_t pwrseq_inst = 0xF;
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
dcn31_panel_cntl->base.ctx = init_data->ctx;
dcn31_panel_cntl->base.inst = init_data->inst;
- switch (init_data->eng_id) {
- case ENGINE_ID_DIGA:
- pwrseq_inst = 0;
- break;
- case ENGINE_ID_DIGB:
- pwrseq_inst = 1;
- break;
- default:
- DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
- ASSERT(false);
- break;
- }
-
- if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1)
+ if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1) {
//If supported, power sequencer mapping shall follow the DIG instance
+ uint8_t pwrseq_inst = 0xF;
+
+ switch (init_data->eng_id) {
+ case ENGINE_ID_DIGA:
+ pwrseq_inst = 0;
+ break;
+ case ENGINE_ID_DIGB:
+ pwrseq_inst = 1;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
+ ASSERT(false);
+ break;
+ }
+
dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
- else
+ } else {
/* If not supported, pwrseq will be assigned in order,
* so first pwrseq will be assigned to first panel instance (legacy behavior)
*/
dcn31_panel_cntl->base.pwrseq_inst = dcn31_panel_cntl->base.inst;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
index b2cea59ba5d4..9a92f73d5b7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
@@ -653,8 +653,9 @@ void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_lin
if (!query_dp_alt_from_dmub(enc, &cmd))
return;
- if (cmd.query_dp_alt.data.is_usb &&
- cmd.query_dp_alt.data.is_dp4 == 0)
+ if (cmd.query_dp_alt.data.is_dp_alt_disable == 0 &&
+ cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
index d4a3e811aa39..ea0c9a9d0bd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
@@ -28,6 +28,7 @@
#include "link_encoder.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn35_dio_link_encoder.h"
+#include "dc_dmub_srv.h"
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
@@ -159,6 +160,8 @@ static const struct link_encoder_funcs dcn35_link_enc_funcs = {
.is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
+ .enable_dpia_output = dcn35_link_encoder_enable_dpia_output,
+ .disable_dpia_output = dcn35_link_encoder_disable_dpia_output,
};
void dcn35_link_encoder_construct(
@@ -265,3 +268,80 @@ void dcn35_link_encoder_construct(
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
+
+/* DPIA equivalent of link_transmitter_control. */
+static bool link_dpia_control(struct dc_context *dc_ctx,
+ struct dmub_cmd_dig_dpia_control_data *dpia_control)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.dig1_dpia_control.header.type = DMUB_CMD__DPIA;
+ cmd.dig1_dpia_control.header.sub_type =
+ DMUB_CMD__DPIA_DIG1_DPIA_CONTROL;
+ cmd.dig1_dpia_control.header.payload_bytes =
+ sizeof(cmd.dig1_dpia_control) -
+ sizeof(cmd.dig1_dpia_control.header);
+
+ cmd.dig1_dpia_control.dpia_control = *dpia_control;
+
+ dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+static void link_encoder_disable(struct dcn10_link_encoder *enc10)
+{
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+}
+
+void dcn35_link_encoder_enable_dpia_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = digmode;
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin unused by DPIA. */
+ dpia_control.hpdsel = 6;
+ dpia_control.dpia_id = dpia_id;
+ dpia_control.fec_rdy = fec_rdy;
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+}
+
+void dcn35_link_encoder_disable_dpia_output(
+ struct link_encoder *enc,
+ uint8_t dpia_id,
+ uint8_t digmode)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc))
+ return;
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_DISABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = digmode;
+ dpia_control.dpia_id = dpia_id;
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+
+ link_encoder_disable(enc10);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
index d546a3676304..f9d4221f4b43 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
@@ -144,4 +144,22 @@ bool dcn35_is_dig_enabled(struct link_encoder *enc);
enum signal_type dcn35_get_dig_mode(struct link_encoder *enc);
void dcn35_link_encoder_setup(struct link_encoder *enc, enum signal_type signal);
+/*
+ * Enable DP transmitter and its encoder for dpia port.
+ */
+void dcn35_link_encoder_enable_dpia_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy);
+
+/*
+ * Disable transmitter and its encoder for dpia port.
+ */
+void dcn35_link_encoder_disable_dpia_output(
+ struct link_encoder *enc,
+ uint8_t dpia_id,
+ uint8_t digmode);
+
#endif /* __DC_LINK_ENCODER__DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 2e4a46f1b499..5efddd48d5c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -158,6 +158,11 @@ bool dm_helpers_dp_write_dsc_enable(
const struct dc_stream_state *stream,
bool enable
);
+
+bool dm_helpers_dp_write_hblank_reduction(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream);
+
bool dm_helpers_is_dp_sink_present(
struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 46f9c05de16e..e1d500633dfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -29,11 +29,15 @@ dml_ccflags := $(CC_FLAGS_FPU)
dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-frame_warn_flag := -Wframe-larger-than=3072
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ frame_warn_limit := 3072
+ else
+ frame_warn_limit := 2048
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index 39525721c976..f1235bf9a596 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1312,138 +1312,6 @@ bool dcn_validate_bandwidth(
return false;
}
-static unsigned int dcn_find_normalized_clock_vdd_Level(
- const struct dc *dc,
- enum dm_pp_clock_type clocks_type,
- int clocks_in_khz)
-{
- int vdd_level = dcn_bw_v_min0p65;
-
- if (clocks_in_khz == 0)/*todo some clock not in the considerations*/
- return vdd_level;
-
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
-
- case DM_PP_CLOCK_TYPE_DPPCLK:
- if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
-
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- {
- unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
-
- if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- }
- break;
-
- case DM_PP_CLOCK_TYPE_DCFCLK:
- if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
-
- default:
- break;
- }
- return vdd_level;
-}
-
-unsigned int dcn_find_dcfclk_suits_all(
- const struct dc *dc,
- struct dc_clocks *clocks)
-{
- unsigned vdd_level, vdd_level_temp;
- unsigned dcf_clk;
-
- /*find a common supported voltage level*/
- vdd_level = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
-
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
-
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
-
- /*find that level conresponding dcfclk*/
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
- if (vdd_level == dcn_bw_v_max0p91) {
- BREAK_TO_DEBUGGER();
- dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
- } else if (vdd_level == dcn_bw_v_max0p9)
- dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
- else if (vdd_level == dcn_bw_v_nom0p8)
- dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000;
- else if (vdd_level == dcn_bw_v_mid0p72)
- dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000;
- else
- dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
-
- DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk);
- return dcf_clk;
-}
-
void dcn_bw_update_from_pplib_fclks(
struct dc *dc,
struct dm_pp_clock_levels_with_voltage *fclks)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 76d3bb3c9155..8d4873f80df0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -1562,6 +1562,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int)(dst_y_per_row_vblank * (double)htotal
* ref_freq_to_pix_freq / (double)dpte_groups_per_row_ub_l);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 86ac7d59fd32..0748ef36a16a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1595,6 +1595,7 @@ double dml32_TruncToValidBPP(
unsigned int NonDSCBPP0;
unsigned int NonDSCBPP1;
unsigned int NonDSCBPP2;
+ unsigned int NonDSCBPP3 = BPP_INVALID;
if (Format == dm_420) {
NonDSCBPP0 = 12;
@@ -1603,6 +1604,7 @@ double dml32_TruncToValidBPP(
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
} else if (Format == dm_444) {
+ NonDSCBPP3 = 18;
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
NonDSCBPP2 = 36;
@@ -1667,6 +1669,8 @@ double dml32_TruncToValidBPP(
return NonDSCBPP1;
else if (MaxLinkBPP >= NonDSCBPP0)
return 16.0;
+ else if ((Output == dm_dp2p0 || Output == dm_dp) && NonDSCBPP3 != BPP_INVALID && MaxLinkBPP >= NonDSCBPP3)
+ return NonDSCBPP3; // Special case to allow 6bpc RGB for DP connections.
else
return BPP_INVALID;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index beed7adbbd43..47d785204f29 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 1,
+ .do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index 072bd0539605..6b2ab4ec2b5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -66,11 +66,15 @@ static inline double dml_max5(double a, double b, double c, double d, double e)
static inline double dml_ceil(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(a, granularity);
}
static inline double dml_floor(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(a, granularity);
}
@@ -114,11 +118,15 @@ static inline double dml_ceil_2(double f)
static inline double dml_ceil_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(x, granularity);
}
static inline double dml_floor_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(x, granularity);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index c4378e620cbf..21fd466dba26 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -28,11 +28,19 @@ dml2_ccflags := $(CC_FLAGS_FPU)
dml2_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-frame_warn_flag := -Wframe-larger-than=3072
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
+ else
+ frame_warn_limit := 2048
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
@@ -73,9 +81,8 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
@@ -94,9 +101,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
@@ -113,9 +119,8 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_r
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags)
-DML21 := src/dml2_top/dml_top.o
-DML21 += src/dml2_top/dml_top_mcache.o
-DML21 += src/dml2_top/dml2_top_optimization.o
+DML21 := src/dml2_top/dml2_top_interfaces.o
+DML21 += src/dml2_top/dml2_top_soc15.o
DML21 += src/inc/dml2_debug.o
DML21 += src/dml2_core/dml2_core_dcn4.o
DML21 += src/dml2_core/dml2_core_factory.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 8dabb1ac0b68..84a2de9a76d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -1736,7 +1736,7 @@ static void CalculateBytePerPixelAndBlockSizes(
#endif
} // CalculateBytePerPixelAndBlockSizes
-static dml_float_t CalculateTWait(
+static noinline_for_stack dml_float_t CalculateTWait(
dml_uint_t PrefetchMode,
enum dml_use_mall_for_pstate_change_mode UseMALLForPStateChange,
dml_bool_t SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
@@ -4458,7 +4458,7 @@ static void CalculateSwathWidth(
}
} // CalculateSwathWidth
-static dml_float_t CalculateExtraLatency(
+static noinline_for_stack dml_float_t CalculateExtraLatency(
dml_uint_t RoundTripPingLatencyCycles,
dml_uint_t ReorderingBytes,
dml_float_t DCFCLK,
@@ -5915,7 +5915,7 @@ static dml_uint_t DSCDelayRequirement(
return DSCDelayRequirement_val;
}
-static dml_bool_t CalculateVActiveBandwithSupport(dml_uint_t NumberOfActiveSurfaces,
+static noinline_for_stack dml_bool_t CalculateVActiveBandwithSupport(dml_uint_t NumberOfActiveSurfaces,
dml_float_t ReturnBW,
dml_bool_t NotUrgentLatencyHiding[],
dml_float_t ReadBandwidthLuma[],
@@ -6019,7 +6019,7 @@ static void CalculatePrefetchBandwithSupport(
#endif
}
-static dml_float_t CalculateBandwidthAvailableForImmediateFlip(
+static noinline_for_stack dml_float_t CalculateBandwidthAvailableForImmediateFlip(
dml_uint_t NumberOfActiveSurfaces,
dml_float_t ReturnBW,
dml_float_t ReadBandwidthLuma[],
@@ -6213,7 +6213,7 @@ static dml_uint_t CalculateMaxVStartup(
return max_vstartup_lines;
}
-static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *mode_lib,
+static noinline_for_stack void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *mode_lib,
struct CalculatePrefetchSchedule_params_st *CalculatePrefetchSchedule_params,
dml_uint_t j,
dml_uint_t k)
@@ -6265,7 +6265,7 @@ static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *m
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
}
-static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
{
struct dml_core_mode_support_locals_st *s = &mode_lib->scratch.dml_core_mode_support_locals;
struct CalculatePrefetchSchedule_params_st *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
@@ -6301,9 +6301,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.meta_row_bandwidth_this_state,
mode_lib->ms.dpte_row_bandwidth_this_state,
mode_lib->ms.NoOfDPPThisState,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor);
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j]);
s->VMDataOnlyReturnBWPerState = dml_get_return_bw_mbps_vm_only(
&mode_lib->ms.soc,
@@ -6434,7 +6434,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
/* Output */
&mode_lib->ms.UrgentBurstFactorCursorPre[k],
&mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChroma[k],
+ &mode_lib->ms.UrgentBurstFactorChromaPre[k],
&mode_lib->ms.NotUrgentLatencyHidingPre[k]);
mode_lib->ms.cursor_bw_pre[k] = mode_lib->ms.cache_display_cfg.plane.NumberOfCursors[k] * mode_lib->ms.cache_display_cfg.plane.CursorWidth[k] *
@@ -6458,9 +6458,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cursor_bw_pre,
mode_lib->ms.prefetch_vmrow_bw,
mode_lib->ms.NoOfDPPThisState,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j],
mode_lib->ms.UrgentBurstFactorLumaPre,
mode_lib->ms.UrgentBurstFactorChromaPre,
mode_lib->ms.UrgentBurstFactorCursorPre,
@@ -6517,9 +6517,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cursor_bw,
mode_lib->ms.cursor_bw_pre,
mode_lib->ms.NoOfDPPThisState,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j],
mode_lib->ms.UrgentBurstFactorLumaPre,
mode_lib->ms.UrgentBurstFactorChromaPre,
mode_lib->ms.UrgentBurstFactorCursorPre);
@@ -6586,9 +6586,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cursor_bw_pre,
mode_lib->ms.prefetch_vmrow_bw,
mode_lib->ms.NoOfDPP[j], // VBA_ERROR DPPPerSurface is not assigned at this point, should use NoOfDpp here
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j],
mode_lib->ms.UrgentBurstFactorLumaPre,
mode_lib->ms.UrgentBurstFactorChromaPre,
mode_lib->ms.UrgentBurstFactorCursorPre,
@@ -7809,9 +7809,9 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
mode_lib->ms.DETBufferSizeYThisState[k],
mode_lib->ms.DETBufferSizeCThisState[k],
/* Output */
- &mode_lib->ms.UrgentBurstFactorCursor[k],
- &mode_lib->ms.UrgentBurstFactorLuma[k],
- &mode_lib->ms.UrgentBurstFactorChroma[k],
+ &mode_lib->ms.UrgentBurstFactorCursor[j][k],
+ &mode_lib->ms.UrgentBurstFactorLuma[j][k],
+ &mode_lib->ms.UrgentBurstFactorChroma[j][k],
&mode_lib->ms.NotUrgentLatencyHiding[k]);
}
@@ -8318,7 +8318,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
if (clk_cfg->dcfclk_option != dml_use_override_freq)
locals->Dcfclk = mode_lib->ms.DCFCLK;
else
- locals->Dcfclk = clk_cfg->dcfclk_freq_mhz;
+ locals->Dcfclk = clk_cfg->dcfclk_mhz;
#ifdef __DML_VBA_DEBUG__
dml_print_dml_policy(&mode_lib->ms.policy);
@@ -8371,7 +8371,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
if (clk_cfg->dispclk_option == dml_use_required_freq)
locals->Dispclk = locals->Dispclk_calculated;
else if (clk_cfg->dispclk_option == dml_use_override_freq)
- locals->Dispclk = clk_cfg->dispclk_freq_mhz;
+ locals->Dispclk = clk_cfg->dispclk_mhz;
else
locals->Dispclk = mode_lib->ms.state.dispclk_mhz;
#ifdef __DML_VBA_DEBUG__
@@ -8412,7 +8412,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
if (clk_cfg->dppclk_option[k] == dml_use_required_freq)
locals->Dppclk[k] = locals->Dppclk_calculated[k];
else if (clk_cfg->dppclk_option[k] == dml_use_override_freq)
- locals->Dppclk[k] = clk_cfg->dppclk_freq_mhz[k];
+ locals->Dppclk[k] = clk_cfg->dppclk_mhz[k];
else
locals->Dppclk[k] = mode_lib->ms.state.dppclk_mhz;
#ifdef __DML_VBA_DEBUG__
@@ -9190,6 +9190,8 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
&locals->FractionOfUrgentBandwidth,
&s->dummy_boolean[0]); // dml_bool_t *PrefetchBandwidthSupport
+
+
if (s->VRatioPrefetchMoreThanMax != false || s->DestinationLineTimesForPrefetchLessThan2 != false) {
dml_print("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
dml_print("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
@@ -9204,6 +9206,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
}
}
+
if (locals->PrefetchModeSupported == true && mode_lib->ms.support.ImmediateFlipSupport == true) {
locals->BandwidthAvailableForImmediateFlip = CalculateBandwidthAvailableForImmediateFlip(
mode_lib->ms.num_active_planes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
index f951936bb579..dd3f43181a6e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
@@ -28,6 +28,7 @@
#define __DISPLAY_MODE_CORE_STRUCT_H__
#include "display_mode_lib_defines.h"
+#include "dml_top_display_cfg_types.h"
enum dml_project_id {
dml_project_invalid = 0,
@@ -49,7 +50,9 @@ enum dml_use_mall_for_pstate_change_mode {
dml_use_mall_pstate_change_disable = 0,
dml_use_mall_pstate_change_full_frame = 1,
dml_use_mall_pstate_change_sub_viewport = 2,
- dml_use_mall_pstate_change_phantom_pipe = 3
+ dml_use_mall_pstate_change_phantom_pipe = 3,
+ dml_use_mall_pstate_change_phantom_pipe_no_data_return = 4,
+ dml_use_mall_pstate_change_imall = 5
};
enum dml_use_mall_for_static_screen_mode {
dml_use_mall_static_screen_disable = 0,
@@ -171,7 +174,11 @@ enum dml_swizzle_mode {
dml_sw_256kb_z_x = 28,
dml_sw_256kb_s_x = 29,
dml_sw_256kb_d_x = 30,
- dml_sw_256kb_r_x = 31
+ dml_sw_256kb_r_x = 31,
+ dml_sw_256b_2d = 32,
+ dml_sw_4kb_2d = 33,
+ dml_sw_64kb_2d = 34,
+ dml_sw_256kb_2d = 35
};
enum dml_lb_depth {
dml_lb_6 = 0,
@@ -223,24 +230,28 @@ enum dml_mpc_use_policy {
dml_mpc_disabled = 0,
dml_mpc_as_possible = 1,
dml_mpc_as_needed_for_voltage = 2,
- dml_mpc_as_needed_for_pstate_and_voltage = 3
+ dml_mpc_as_needed_for_pstate_and_voltage = 3,
+ dml_mpc_as_needed = 4,
+ dml_mpc_2to1 = 5
};
enum dml_odm_use_policy {
dml_odm_use_policy_bypass = 0,
dml_odm_use_policy_combine_as_needed = 1,
dml_odm_use_policy_combine_2to1 = 2,
- dml_odm_use_policy_combine_4to1 = 3,
- dml_odm_use_policy_split_1to2 = 4,
- dml_odm_use_policy_mso_1to2 = 5,
- dml_odm_use_policy_mso_1to4 = 6
+ dml_odm_use_policy_combine_3to1 = 3,
+ dml_odm_use_policy_combine_4to1 = 4,
+ dml_odm_use_policy_split_1to2 = 5,
+ dml_odm_use_policy_mso_1to2 = 6,
+ dml_odm_use_policy_mso_1to4 = 7
};
enum dml_odm_mode {
dml_odm_mode_bypass = 0,
dml_odm_mode_combine_2to1 = 1,
- dml_odm_mode_combine_4to1 = 2,
- dml_odm_mode_split_1to2 = 3,
- dml_odm_mode_mso_1to2 = 4,
- dml_odm_mode_mso_1to4 = 5
+ dml_odm_mode_combine_3to1 = 2,
+ dml_odm_mode_combine_4to1 = 3,
+ dml_odm_mode_split_1to2 = 4,
+ dml_odm_mode_mso_1to2 = 5,
+ dml_odm_mode_mso_1to4 = 6
};
enum dml_writeback_configuration {
dml_whole_buffer_for_single_stream_no_interleave = 0,
@@ -289,6 +300,17 @@ struct soc_state_bounding_box_st {
dml_float_t fclk_change_latency_us;
dml_float_t usr_retraining_latency_us;
dml_bool_t use_ideal_dram_bw_strobe;
+ dml_float_t g6_temp_read_blackout_us;
+
+ struct {
+ dml_uint_t urgent_ramp_uclk_cycles;
+ dml_uint_t trip_to_memory_uclk_cycles;
+ dml_uint_t meta_trip_to_memory_uclk_cycles;
+ dml_uint_t maximum_latency_when_urgent_uclk_cycles;
+ dml_uint_t average_latency_when_urgent_uclk_cycles;
+ dml_uint_t maximum_latency_when_non_urgent_uclk_cycles;
+ dml_uint_t average_latency_when_non_urgent_uclk_cycles;
+ } dml_dcn401_uclk_dpm_dependent_soc_qos_params;
};
struct soc_bounding_box_st {
@@ -297,7 +319,7 @@ struct soc_bounding_box_st {
dml_float_t pcierefclk_mhz;
dml_float_t refclk_mhz;
dml_float_t amclk_mhz;
- dml_float_t max_outstanding_reqs;
+ dml_uint_t max_outstanding_reqs;
dml_float_t pct_ideal_sdp_bw_after_urgent;
dml_float_t pct_ideal_fabric_bw_after_urgent;
dml_float_t pct_ideal_dram_bw_after_urgent_pixel_only;
@@ -308,6 +330,16 @@ struct soc_bounding_box_st {
dml_float_t max_avg_fabric_bw_use_normal_percent;
dml_float_t max_avg_dram_bw_use_normal_percent;
dml_float_t max_avg_dram_bw_use_normal_strobe_percent;
+
+ dml_float_t svp_prefetch_pct_ideal_sdp_bw_after_urgent;
+ dml_float_t svp_prefetch_pct_ideal_fabric_bw_after_urgent;
+ dml_float_t svp_prefetch_pct_ideal_dram_bw_after_urgent_pixel_only;
+ dml_float_t svp_prefetch_pct_ideal_dram_bw_after_urgent_pixel_and_vm;
+ dml_float_t svp_prefetch_pct_ideal_dram_bw_after_urgent_vm_only;
+ dml_float_t svp_prefetch_max_avg_sdp_bw_use_normal_percent;
+ dml_float_t svp_prefetch_max_avg_fabric_bw_use_normal_percent;
+ dml_float_t svp_prefetch_max_avg_dram_bw_use_normal_percent;
+
dml_uint_t round_trip_ping_latency_dcfclk_cycles;
dml_uint_t urgent_out_of_order_return_per_channel_pixel_only_bytes;
dml_uint_t urgent_out_of_order_return_per_channel_pixel_and_vm_bytes;
@@ -324,6 +356,26 @@ struct soc_bounding_box_st {
dml_uint_t mall_allocated_for_dcn_mbytes;
dml_float_t dispclk_dppclk_vco_speed_mhz;
dml_bool_t do_urgent_latency_adjustment;
+
+ dml_uint_t mem_word_bytes;
+ dml_uint_t num_dcc_mcaches;
+ dml_uint_t mcache_size_bytes;
+ dml_uint_t mcache_line_size_bytes;
+
+ struct {
+ dml_bool_t UseNewDCN401SOCParameters;
+ dml_uint_t df_qos_response_time_fclk_cycles;
+ dml_uint_t max_round_trip_to_furthest_cs_fclk_cycles;
+ dml_uint_t mall_overhead_fclk_cycles;
+ dml_uint_t meta_trip_adder_fclk_cycles;
+ dml_uint_t average_transport_distance_fclk_cycles;
+ dml_float_t umc_urgent_ramp_latency_margin;
+ dml_float_t umc_max_latency_margin;
+ dml_float_t umc_average_latency_margin;
+ dml_float_t fabric_max_transport_latency_margin;
+ dml_float_t fabric_average_transport_latency_margin;
+ } dml_dcn401_soc_qos_params;
+
};
struct ip_params_st {
@@ -515,6 +567,10 @@ struct dml_plane_cfg_st {
dml_uint_t CursorWidth[__DML_NUM_PLANES__];
dml_uint_t CursorBPP[__DML_NUM_PLANES__];
+ dml_bool_t setup_for_tdlut[__DML_NUM_PLANES__];
+ enum dml2_tdlut_addressing_mode tdlut_addressing_mode[__DML_NUM_PLANES__];
+ enum dml2_tdlut_width_mode tdlut_width_mode[__DML_NUM_PLANES__];
+
enum dml_use_mall_for_static_screen_mode UseMALLForStaticScreen[__DML_NUM_PLANES__];
enum dml_use_mall_for_pstate_change_mode UseMALLForPStateChange[__DML_NUM_PLANES__];
@@ -604,6 +660,17 @@ struct dml_hw_resource_st {
dml_float_t DLGRefClkFreqMHz; /// <brief DLG Global Reference timer
};
+/// @brief To control the clk usage for model programming
+struct dml_clk_cfg_st {
+ enum dml_clk_cfg_policy dcfclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
+ enum dml_clk_cfg_policy dispclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
+ enum dml_clk_cfg_policy dppclk_option[__DML_NUM_PLANES__];
+
+ dml_float_t dcfclk_mhz;
+ dml_float_t dispclk_mhz;
+ dml_float_t dppclk_mhz[__DML_NUM_PLANES__];
+}; // dml_clk_cfg_st
+
/// @brief DML display configuration.
/// Describe how to display a surface in multi-plane setup and output to different output and writeback using the specified timgin
struct dml_display_cfg_st {
@@ -616,19 +683,9 @@ struct dml_display_cfg_st {
unsigned int num_timings;
struct dml_hw_resource_st hw; //< brief for mode programming
+ struct dml_clk_cfg_st clk_overrides; //< brief for mode programming clk override
}; // dml_display_cfg_st
-/// @brief To control the clk usage for model programming
-struct dml_clk_cfg_st {
- enum dml_clk_cfg_policy dcfclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
- enum dml_clk_cfg_policy dispclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
- enum dml_clk_cfg_policy dppclk_option[__DML_NUM_PLANES__];
-
- dml_float_t dcfclk_freq_mhz;
- dml_float_t dispclk_freq_mhz;
- dml_float_t dppclk_freq_mhz[__DML_NUM_PLANES__];
-}; // dml_clk_cfg_st
-
/// @brief DML mode evaluation and programming policy
/// Those knobs that affect mode support and mode programming
struct dml_mode_eval_policy_st {
@@ -884,11 +941,11 @@ struct mode_support_st {
dml_uint_t meta_row_height[__DML_NUM_PLANES__];
dml_uint_t meta_row_height_chroma[__DML_NUM_PLANES__];
dml_float_t UrgLatency;
- dml_float_t UrgentBurstFactorCursor[__DML_NUM_PLANES__];
+ dml_float_t UrgentBurstFactorCursor[2][__DML_NUM_PLANES__];
dml_float_t UrgentBurstFactorCursorPre[__DML_NUM_PLANES__];
- dml_float_t UrgentBurstFactorLuma[__DML_NUM_PLANES__];
+ dml_float_t UrgentBurstFactorLuma[2][__DML_NUM_PLANES__];
dml_float_t UrgentBurstFactorLumaPre[__DML_NUM_PLANES__];
- dml_float_t UrgentBurstFactorChroma[__DML_NUM_PLANES__];
+ dml_float_t UrgentBurstFactorChroma[2][__DML_NUM_PLANES__];
dml_float_t UrgentBurstFactorChromaPre[__DML_NUM_PLANES__];
dml_float_t MaximumSwathWidthInLineBufferLuma;
dml_float_t MaximumSwathWidthInLineBufferChroma;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
index c247aee89caf..89890c88fd66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
@@ -690,12 +690,12 @@ __DML_DLL_EXPORT__ void dml_print_clk_cfg(const struct dml_clk_cfg_st *clk_cfg)
dml_print("DML: clk_cfg: dcfclk_option = %d\n", clk_cfg->dcfclk_option);
dml_print("DML: clk_cfg: dispclk_option = %d\n", clk_cfg->dispclk_option);
- dml_print("DML: clk_cfg: dcfclk_freq_mhz = %f\n", clk_cfg->dcfclk_freq_mhz);
- dml_print("DML: clk_cfg: dispclk_freq_mhz = %f\n", clk_cfg->dispclk_freq_mhz);
+ dml_print("DML: clk_cfg: dcfclk_mhz = %f\n", clk_cfg->dcfclk_mhz);
+ dml_print("DML: clk_cfg: dispclk_mhz = %f\n", clk_cfg->dispclk_mhz);
for (dml_uint_t i = 0; i < DCN_DML__NUM_PLANE; i++) {
dml_print("DML: clk_cfg: i=%d, dppclk_option = %d\n", i, clk_cfg->dppclk_option[i]);
- dml_print("DML: clk_cfg: i=%d, dppclk_freq_mhz = %f\n", i, clk_cfg->dppclk_freq_mhz[i]);
+ dml_print("DML: clk_cfg: i=%d, dppclk_mhz = %f\n", i, clk_cfg->dppclk_mhz[i]);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index c6a5a8614679..0c8ec30ea672 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -10,7 +10,6 @@
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
#include "bounding_boxes/dcn4_soc_bb.h"
-#include "bounding_boxes/dcn3_soc_bb.h"
static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
@@ -20,10 +19,6 @@ static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_
const struct dml2_soc_qos_parameters *qos_params;
switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
- soc_bb = &dml2_socbb_dcn31;
- qos_params = &dml_dcn31_soc_qos_params;
- break;
case DCN_VERSION_4_01:
default:
if (config->bb_from_dmub)
@@ -60,9 +55,6 @@ static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_ini
const struct dml2_ip_capabilities *ip_caps;
switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
- ip_caps = &dml2_dcn31_max_ip_caps;
- break;
case DCN_VERSION_4_01:
default:
ip_caps = &dml2_dcn401_max_ip_caps;
@@ -302,12 +294,17 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
(in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10;
- if (in_dc->ctx->dc_bios->vram_info.num_chans) {
+ if (dc_bw_params->num_channels) {
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
+ } else if (in_dc->ctx->dc_bios->vram_info.num_chans) {
dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans;
dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
}
- if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
+ if (dc_bw_params->dram_channel_width_bytes) {
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+ } else if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
}
@@ -721,11 +718,21 @@ static void populate_dml21_surface_config_from_plane_state(
surface->dcc.informative.fraction_of_zero_size_request_plane1 = plane_state->dcc.independent_64b_blks_c;
surface->dcc.plane0.pitch = plane_state->dcc.meta_pitch;
surface->dcc.plane1.pitch = plane_state->dcc.meta_pitch_c;
- if (in_dc->ctx->dce_version < DCN_VERSION_4_01) {
- /* needed for N-1 testing */
+
+ // Update swizzle / array mode based on the gfx_format
+ switch (plane_state->tiling_info.gfxversion) {
+ case DcGfxVersion7:
+ case DcGfxVersion8:
+ // Placeholder for programming the array_mode
+ break;
+ case DcGfxVersion9:
+ case DcGfxVersion10:
+ case DcGfxVersion11:
surface->tiling = gfx9_to_dml2_swizzle_mode(plane_state->tiling_info.gfx9.swizzle);
- } else {
+ break;
+ case DcGfxAddr3:
surface->tiling = gfx_addr3_to_dml2_swizzle_mode(plane_state->tiling_info.gfx_addr3.swizzle);
+ break;
}
}
@@ -1010,7 +1017,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_streams++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
@@ -1035,7 +1042,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]);
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
@@ -1077,28 +1084,8 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0;
context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz;
context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
-}
-
-void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx)
-{
- struct dml2_core_internal_display_mode_lib *mode_lib = &in_ctx->v21.dml_init.dml2_instance->core_instance.clean_me_up.mode_lib;
- double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
-
- if (reg_set_idx >= DML2_DCHUB_WATERMARK_SET_NUM) {
- /* invalid register set index */
- return;
- }
-
- /* convert to legacy format (time in ns) */
- watermark->urgent_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->pte_meta_urgent_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.cstate_enter_plus_exit_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].sr_enter / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.cstate_exit_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].sr_exit / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.pstate_change_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].uclk_pstate / refclk_freq_in_mhz) * 1000.0;
- watermark->urgent_latency_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.fclk_pstate_change_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].fclk_pstate / refclk_freq_in_mhz) * 1000.0;
- watermark->frac_urg_bw_flip = in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].frac_urg_bw_flip;
- watermark->frac_urg_bw_nom = in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].frac_urg_bw_nom;
+ context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz;
+ context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
@@ -1144,53 +1131,6 @@ void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_se
}
}
-
-void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming)
-{
- unsigned int hactive, vactive, hblank_start, vblank_start, hblank_end, vblank_end;
- struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
-
- hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right + pipe_ctx->hblank_borrow;
- vactive = timing->v_addressable + timing->v_border_bottom + timing->v_border_top;
- hblank_start = pipe_ctx->stream->timing.h_total - pipe_ctx->stream->timing.h_front_porch;
- vblank_start = pipe_ctx->stream->timing.v_total - pipe_ctx->stream->timing.v_front_porch;
-
- hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right - pipe_ctx->hblank_borrow;
- vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
-
- if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
- /* phantom has its own global sync */
- global_sync = &stream_programming->phantom_stream.global_sync;
- }
-
- pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4x.vstartup_lines;
- pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4x.vupdate_offset_pixels;
- pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4x.vupdate_vupdate_width_pixels;
- pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4x.vready_offset_pixels;
- pipe_ctx->pipe_dlg_param.pstate_keepout = global_sync->dcn4x.pstate_keepout_start_lines;
-
- pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst;
-
- pipe_ctx->pipe_dlg_param.hactive = hactive;
- pipe_ctx->pipe_dlg_param.vactive = vactive;
- pipe_ctx->pipe_dlg_param.htotal = pipe_ctx->stream->timing.h_total;
- pipe_ctx->pipe_dlg_param.vtotal = pipe_ctx->stream->timing.v_total;
- pipe_ctx->pipe_dlg_param.hblank_end = hblank_end;
- pipe_ctx->pipe_dlg_param.vblank_end = vblank_end;
- pipe_ctx->pipe_dlg_param.hblank_start = hblank_start;
- pipe_ctx->pipe_dlg_param.vblank_start = vblank_start;
- pipe_ctx->pipe_dlg_param.vfront_porch = pipe_ctx->stream->timing.v_front_porch;
- pipe_ctx->pipe_dlg_param.pixel_rate_mhz = pipe_ctx->stream->timing.pix_clk_100hz / 10000.00;
- pipe_ctx->pipe_dlg_param.refresh_rate = ((timing->pix_clk_100hz * 100) / timing->h_total) / timing->v_total;
- pipe_ctx->pipe_dlg_param.vtotal_max = pipe_ctx->stream->adjust.v_total_max;
- pipe_ctx->pipe_dlg_param.vtotal_min = pipe_ctx->stream->adjust.v_total_min;
- pipe_ctx->pipe_dlg_param.recout_height = pipe_ctx->plane_res.scl_data.recout.height;
- pipe_ctx->pipe_dlg_param.recout_width = pipe_ctx->plane_res.scl_data.recout.width;
- pipe_ctx->pipe_dlg_param.full_recout_height = pipe_ctx->plane_res.scl_data.recout.height;
- pipe_ctx->pipe_dlg_param.full_recout_width = pipe_ctx->plane_res.scl_data.recout.width;
-}
-
void dml21_map_hw_resources(struct dml2_context *dml_ctx)
{
unsigned int i = 0;
@@ -1226,22 +1166,22 @@ void dml21_set_dc_p_state_type(
bool sub_vp_enabled)
{
switch (stream_programming->uclk_pstate_method) {
- case dml2_uclk_pstate_support_method_vactive:
- case dml2_uclk_pstate_support_method_fw_vactive_drr:
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
break;
- case dml2_uclk_pstate_support_method_vblank:
- case dml2_uclk_pstate_support_method_fw_vblank_drr:
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
if (sub_vp_enabled)
pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
else
pipe_ctx->p_state_type = P_STATE_V_BLANK;
break;
- case dml2_uclk_pstate_support_method_fw_subvp_phantom:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
pipe_ctx->p_state_type = P_STATE_SUB_VP;
break;
- case dml2_uclk_pstate_support_method_fw_drr:
+ case dml2_pstate_method_fw_drr:
if (sub_vp_enabled)
pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 476a7f6e4875..069b939c672a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -21,8 +21,6 @@ void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_
void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
-void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming);
-void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx);
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index 51d491bffa32..1e56d995cd0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -142,108 +142,21 @@ int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
return num_pipes;
}
-
-void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- struct pipe_ctx *out)
+void dml21_pipe_populate_global_sync(struct dml2_context *dml_ctx,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming)
{
- memset(&out->rq_regs, 0, sizeof(out->rq_regs));
- out->rq_regs.rq_regs_l.chunk_size = rq_regs->rq_regs_l.chunk_size;
- out->rq_regs.rq_regs_l.min_chunk_size = rq_regs->rq_regs_l.min_chunk_size;
- //out->rq_regs.rq_regs_l.meta_chunk_size = rq_regs->rq_regs_l.meta_chunk_size;
- //out->rq_regs.rq_regs_l.min_meta_chunk_size = rq_regs->rq_regs_l.min_meta_chunk_size;
- out->rq_regs.rq_regs_l.dpte_group_size = rq_regs->rq_regs_l.dpte_group_size;
- out->rq_regs.rq_regs_l.mpte_group_size = rq_regs->rq_regs_l.mpte_group_size;
- out->rq_regs.rq_regs_l.swath_height = rq_regs->rq_regs_l.swath_height;
- out->rq_regs.rq_regs_l.pte_row_height_linear = rq_regs->rq_regs_l.pte_row_height_linear;
-
- out->rq_regs.rq_regs_c.chunk_size = rq_regs->rq_regs_c.chunk_size;
- out->rq_regs.rq_regs_c.min_chunk_size = rq_regs->rq_regs_c.min_chunk_size;
- //out->rq_regs.rq_regs_c.meta_chunk_size = rq_regs->rq_regs_c.meta_chunk_size;
- //out->rq_regs.rq_regs_c.min_meta_chunk_size = rq_regs->rq_regs_c.min_meta_chunk_size;
- out->rq_regs.rq_regs_c.dpte_group_size = rq_regs->rq_regs_c.dpte_group_size;
- out->rq_regs.rq_regs_c.mpte_group_size = rq_regs->rq_regs_c.mpte_group_size;
- out->rq_regs.rq_regs_c.swath_height = rq_regs->rq_regs_c.swath_height;
- out->rq_regs.rq_regs_c.pte_row_height_linear = rq_regs->rq_regs_c.pte_row_height_linear;
-
- out->rq_regs.drq_expansion_mode = rq_regs->drq_expansion_mode;
- out->rq_regs.prq_expansion_mode = rq_regs->prq_expansion_mode;
- //out->rq_regs.mrq_expansion_mode = rq_regs->mrq_expansion_mode;
- out->rq_regs.crq_expansion_mode = rq_regs->crq_expansion_mode;
- out->rq_regs.plane1_base_address = rq_regs->plane1_base_address;
- out->unbounded_req = rq_regs->unbounded_request_enabled;
-
- memset(&out->dlg_regs, 0, sizeof(out->dlg_regs));
- out->dlg_regs.refcyc_h_blank_end = disp_dlg_regs->refcyc_h_blank_end;
- out->dlg_regs.dlg_vblank_end = disp_dlg_regs->dlg_vblank_end;
- out->dlg_regs.min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
- out->dlg_regs.refcyc_per_htotal = disp_dlg_regs->refcyc_per_htotal;
- out->dlg_regs.refcyc_x_after_scaler = disp_dlg_regs->refcyc_x_after_scaler;
- out->dlg_regs.dst_y_after_scaler = disp_dlg_regs->dst_y_after_scaler;
- out->dlg_regs.dst_y_prefetch = disp_dlg_regs->dst_y_prefetch;
- out->dlg_regs.dst_y_per_vm_vblank = disp_dlg_regs->dst_y_per_vm_vblank;
- out->dlg_regs.dst_y_per_row_vblank = disp_dlg_regs->dst_y_per_row_vblank;
- out->dlg_regs.dst_y_per_vm_flip = disp_dlg_regs->dst_y_per_vm_flip;
- out->dlg_regs.dst_y_per_row_flip = disp_dlg_regs->dst_y_per_row_flip;
- out->dlg_regs.ref_freq_to_pix_freq = disp_dlg_regs->ref_freq_to_pix_freq;
- out->dlg_regs.vratio_prefetch = disp_dlg_regs->vratio_prefetch;
- out->dlg_regs.vratio_prefetch_c = disp_dlg_regs->vratio_prefetch_c;
- out->dlg_regs.refcyc_per_tdlut_group = disp_dlg_regs->refcyc_per_tdlut_group;
- out->dlg_regs.refcyc_per_pte_group_vblank_l = disp_dlg_regs->refcyc_per_pte_group_vblank_l;
- out->dlg_regs.refcyc_per_pte_group_vblank_c = disp_dlg_regs->refcyc_per_pte_group_vblank_c;
- //out->dlg_regs.refcyc_per_meta_chunk_vblank_l = disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;
- //out->dlg_regs.refcyc_per_meta_chunk_vblank_c = disp_dlg_regs->refcyc_per_meta_chunk_vblank_c;
- out->dlg_regs.refcyc_per_pte_group_flip_l = disp_dlg_regs->refcyc_per_pte_group_flip_l;
- out->dlg_regs.refcyc_per_pte_group_flip_c = disp_dlg_regs->refcyc_per_pte_group_flip_c;
- //out->dlg_regs.refcyc_per_meta_chunk_flip_l = disp_dlg_regs->refcyc_per_meta_chunk_flip_l;
- //out->dlg_regs.refcyc_per_meta_chunk_flip_c = disp_dlg_regs->refcyc_per_meta_chunk_flip_c;
- out->dlg_regs.dst_y_per_pte_row_nom_l = disp_dlg_regs->dst_y_per_pte_row_nom_l;
- out->dlg_regs.dst_y_per_pte_row_nom_c = disp_dlg_regs->dst_y_per_pte_row_nom_c;
- out->dlg_regs.refcyc_per_pte_group_nom_l = disp_dlg_regs->refcyc_per_pte_group_nom_l;
- out->dlg_regs.refcyc_per_pte_group_nom_c = disp_dlg_regs->refcyc_per_pte_group_nom_c;
- //out->dlg_regs.dst_y_per_meta_row_nom_l = disp_dlg_regs->dst_y_per_meta_row_nom_l;
- //out->dlg_regs.dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_c;
- //out->dlg_regs.refcyc_per_meta_chunk_nom_l = disp_dlg_regs->refcyc_per_meta_chunk_nom_l;
- //out->dlg_regs.refcyc_per_meta_chunk_nom_c = disp_dlg_regs->refcyc_per_meta_chunk_nom_c;
- out->dlg_regs.refcyc_per_line_delivery_pre_l = disp_dlg_regs->refcyc_per_line_delivery_pre_l;
- out->dlg_regs.refcyc_per_line_delivery_pre_c = disp_dlg_regs->refcyc_per_line_delivery_pre_c;
- out->dlg_regs.refcyc_per_line_delivery_l = disp_dlg_regs->refcyc_per_line_delivery_l;
- out->dlg_regs.refcyc_per_line_delivery_c = disp_dlg_regs->refcyc_per_line_delivery_c;
- out->dlg_regs.refcyc_per_vm_group_vblank = disp_dlg_regs->refcyc_per_vm_group_vblank;
- out->dlg_regs.refcyc_per_vm_group_flip = disp_dlg_regs->refcyc_per_vm_group_flip;
- out->dlg_regs.refcyc_per_vm_req_vblank = disp_dlg_regs->refcyc_per_vm_req_vblank;
- out->dlg_regs.refcyc_per_vm_req_flip = disp_dlg_regs->refcyc_per_vm_req_flip;
- out->dlg_regs.dst_y_offset_cur0 = disp_dlg_regs->dst_y_offset_cur0;
- out->dlg_regs.chunk_hdl_adjust_cur0 = disp_dlg_regs->chunk_hdl_adjust_cur0;
- //out->dlg_regs.dst_y_offset_cur1 = disp_dlg_regs->dst_y_offset_cur1;
- //out->dlg_regs.chunk_hdl_adjust_cur1 = disp_dlg_regs->chunk_hdl_adjust_cur1;
- out->dlg_regs.vready_after_vcount0 = disp_dlg_regs->vready_after_vcount0;
- out->dlg_regs.dst_y_delta_drq_limit = disp_dlg_regs->dst_y_delta_drq_limit;
- out->dlg_regs.refcyc_per_vm_dmdata = disp_dlg_regs->refcyc_per_vm_dmdata;
- out->dlg_regs.dmdata_dl_delta = disp_dlg_regs->dmdata_dl_delta;
-
- memset(&out->ttu_regs, 0, sizeof(out->ttu_regs));
- out->ttu_regs.qos_level_low_wm = disp_ttu_regs->qos_level_low_wm;
- out->ttu_regs.qos_level_high_wm = disp_ttu_regs->qos_level_high_wm;
- out->ttu_regs.min_ttu_vblank = disp_ttu_regs->min_ttu_vblank;
- out->ttu_regs.qos_level_flip = disp_ttu_regs->qos_level_flip;
- out->ttu_regs.refcyc_per_req_delivery_l = disp_ttu_regs->refcyc_per_req_delivery_l;
- out->ttu_regs.refcyc_per_req_delivery_c = disp_ttu_regs->refcyc_per_req_delivery_c;
- out->ttu_regs.refcyc_per_req_delivery_cur0 = disp_ttu_regs->refcyc_per_req_delivery_cur0;
- //out->ttu_regs.refcyc_per_req_delivery_cur1 = disp_ttu_regs->refcyc_per_req_delivery_cur1;
- out->ttu_regs.refcyc_per_req_delivery_pre_l = disp_ttu_regs->refcyc_per_req_delivery_pre_l;
- out->ttu_regs.refcyc_per_req_delivery_pre_c = disp_ttu_regs->refcyc_per_req_delivery_pre_c;
- out->ttu_regs.refcyc_per_req_delivery_pre_cur0 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur0;
- //out->ttu_regs.refcyc_per_req_delivery_pre_cur1 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur1;
- out->ttu_regs.qos_level_fixed_l = disp_ttu_regs->qos_level_fixed_l;
- out->ttu_regs.qos_level_fixed_c = disp_ttu_regs->qos_level_fixed_c;
- out->ttu_regs.qos_level_fixed_cur0 = disp_ttu_regs->qos_level_fixed_cur0;
- //out->ttu_regs.qos_level_fixed_cur1 = disp_ttu_regs->qos_level_fixed_cur1;
- out->ttu_regs.qos_ramp_disable_l = disp_ttu_regs->qos_ramp_disable_l;
- out->ttu_regs.qos_ramp_disable_c = disp_ttu_regs->qos_ramp_disable_c;
- out->ttu_regs.qos_ramp_disable_cur0 = disp_ttu_regs->qos_ramp_disable_cur0;
- //out->ttu_regs.qos_ramp_disable_cur1 = disp_ttu_regs->qos_ramp_disable_cur1;
+ union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
+
+ if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
+ /* phantom has its own global sync */
+ global_sync = &stream_programming->phantom_stream.global_sync;
+ }
+
+ memcpy(&pipe_ctx->global_sync,
+ global_sync,
+ sizeof(union dml2_global_sync_programming));
}
void dml21_populate_mall_allocation_size(struct dc_state *context,
@@ -301,28 +214,16 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex
{
unsigned int pipe_reg_index = 0;
- dml21_populate_pipe_ctx_dlg_params(dml_ctx, context, pipe_ctx, stream_prog);
+ dml21_pipe_populate_global_sync(dml_ctx, context, pipe_ctx, stream_prog);
find_pipe_regs_idx(dml_ctx, pipe_ctx, &pipe_reg_index);
if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
memcpy(&pipe_ctx->hubp_regs, pln_prog->phantom_plane.pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = false;
-
- /* legacy only, should be removed later */
- dml21_update_pipe_ctx_dchub_regs(&pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->rq_regs,
- &pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->dlg_regs,
- &pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
-
pipe_ctx->det_buffer_size_kb = 0;
} else {
memcpy(&pipe_ctx->hubp_regs, pln_prog->pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = pln_prog->pipe_regs[pipe_reg_index]->rq_regs.unbounded_request_enabled;
-
- /* legacy only, should be removed later */
- dml21_update_pipe_ctx_dchub_regs(&pln_prog->pipe_regs[pipe_reg_index]->rq_regs,
- &pln_prog->pipe_regs[pipe_reg_index]->dlg_regs,
- &pln_prog->pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
-
pipe_ctx->det_buffer_size_kb = pln_prog->pipe_regs[pipe_reg_index]->det_size * 64;
}
@@ -482,7 +383,8 @@ void dml21_build_fams2_programming(const struct dc *dc,
unsigned int num_fams2_streams = 0;
/* reset fams2 data */
- memset(&context->bw_ctx.bw.dcn.fams2_stream_params, 0, sizeof(struct dmub_fams2_stream_static_state) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
if (dml_ctx->v21.mode_programming.programming->fams2_required) {
@@ -490,8 +392,10 @@ void dml21_build_fams2_programming(const struct dc *dc,
int dml_stream_idx;
struct dc_stream_state *phantom_stream;
struct dc_stream_status *phantom_status;
+ enum fams2_stream_type type = 0;
- struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[num_fams2_streams];
+ union dmub_cmd_fams2_config *static_base_state = &context->bw_ctx.bw.dcn.fams2_stream_base_params[num_fams2_streams];
+ union dmub_cmd_fams2_config *static_sub_state = &context->bw_ctx.bw.dcn.fams2_stream_sub_params[num_fams2_streams];
struct dc_stream_state *stream = context->streams[i];
@@ -508,28 +412,38 @@ void dml21_build_fams2_programming(const struct dc *dc,
}
/* copy static state from PMO */
- memcpy(static_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params,
- sizeof(struct dmub_fams2_stream_static_state));
-
- /* get information from context */
- static_state->num_planes = context->stream_status[i].plane_count;
- static_state->otg_inst = context->stream_status[i].primary_otg_inst;
-
- /* populate pipe masks for planes */
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- for (k = 0; k < dc->res_pool->pipe_count; k++) {
- if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
- static_state->pipe_mask |= (1 << k);
- static_state->plane_pipe_masks[j] |= (1 << k);
+ memcpy(static_base_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params,
+ sizeof(union dmub_cmd_fams2_config));
+ memcpy(static_sub_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
+ sizeof(union dmub_cmd_fams2_config));
+
+ switch (dc->debug.fams_version.minor) {
+ case 1:
+ default:
+ type = static_base_state->stream_v1.base.type;
+
+ /* get information from context */
+ static_base_state->stream_v1.base.num_planes = context->stream_status[i].plane_count;
+ static_base_state->stream_v1.base.otg_inst = context->stream_status[i].primary_otg_inst;
+
+ /* populate pipe masks for planes */
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (context->res_ctx.pipe_ctx[k].stream &&
+ context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
+ static_base_state->stream_v1.base.pipe_mask |= (1 << k);
+ static_base_state->stream_v1.base.plane_pipe_masks[j] |= (1 << k);
+ }
}
}
}
+
/* get per method programming */
- switch (static_state->type) {
+ switch (type) {
case FAMS2_STREAM_TYPE_VBLANK:
case FAMS2_STREAM_TYPE_VACTIVE:
case FAMS2_STREAM_TYPE_DRR:
@@ -543,16 +457,27 @@ void dml21_build_fams2_programming(const struct dc *dc,
/* phantom status should always be present */
ASSERT(phantom_status);
- static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+ if (!phantom_status)
+ break;
- /* populate pipe masks for phantom planes */
- for (j = 0; j < phantom_status->plane_count; j++) {
- for (k = 0; k < dc->res_pool->pipe_count; k++) {
- if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
- static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k);
- static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ switch (dc->debug.fams_version.minor) {
+ case 1:
+ default:
+ static_sub_state->stream_v1.sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+
+ /* populate pipe masks for phantom planes */
+ for (j = 0; j < phantom_status->plane_count; j++) {
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (context->res_ctx.pipe_ctx[k].stream &&
+ context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
+ switch (dc->debug.fams_version.minor) {
+ case 1:
+ default:
+ static_sub_state->stream_v1.sub_state.subvp.phantom_pipe_mask |= (1 << k);
+ static_sub_state->stream_v1.sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ }
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
index d5153fbac921..4bff52eaaef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
@@ -18,10 +18,10 @@ struct dml2_display_ttu_regs;
int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
int dml21_find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int plane_id);
bool dml21_get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, unsigned int *plane_id);
-void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- struct pipe_ctx *out);
+void dml21_pipe_populate_global_sync(struct dml2_context *dml_ctx,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming);
void dml21_populate_mall_allocation_size(struct dc_state *context,
struct dml2_context *in_ctx,
struct dml2_per_plane_programming *pln_prog,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index bbc28b9a15a3..fb80ba9287b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -75,7 +75,6 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
{
switch (in_dc->ctx->dce_version) {
case DCN_VERSION_4_01:
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
(*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
default:
@@ -233,13 +232,6 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml21_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml_ctx, in_dc->res_pool->pipe_count);
dml21_copy_clocks_to_dc_state(dml_ctx, context);
dml21_extract_watermark_sets(in_dc, &context->bw_ctx.bw.dcn.watermarks, dml_ctx);
- if (in_dc->ctx->dce_version == DCN_VERSION_3_2) {
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.a, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.b, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.c, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.d, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- }
-
dml21_build_fams2_programming(in_dc, context, dml_ctx);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
deleted file mode 100644
index d82c681a5402..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * Copyright 2022 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DML_DML_DCN3_SOC_BB__
-#define __DML_DML_DCN3_SOC_BB__
-
-#include "dml_top_soc_parameter_types.h"
-
-static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = {
- .derate_table = {
- .system_active_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .system_active_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .dcn_mall_prefetch_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .dcn_mall_prefetch_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .system_idle_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 100,
- },
- },
- .writeback = {
- .base_latency_us = 12,
- .scaling_factor_us = 0,
- .scaling_factor_mhz = 0,
- },
- .qos_params = {
- .dcn4x = {
- .df_qos_response_time_fclk_cycles = 300,
- .max_round_trip_to_furthest_cs_fclk_cycles = 350,
- .mall_overhead_fclk_cycles = 50,
- .meta_trip_adder_fclk_cycles = 36,
- .average_transport_distance_fclk_cycles = 257,
- .umc_urgent_ramp_latency_margin = 50,
- .umc_max_latency_margin = 30,
- .umc_average_latency_margin = 20,
- .fabric_max_transport_latency_margin = 20,
- .fabric_average_transport_latency_margin = 10,
-
- .per_uclk_dpm_params = {
- {
- .minimum_uclk_khz = 97,
- .urgent_ramp_uclk_cycles = 472,
- .trip_to_memory_uclk_cycles = 827,
- .meta_trip_to_memory_uclk_cycles = 827,
- .maximum_latency_when_urgent_uclk_cycles = 72,
- .average_latency_when_urgent_uclk_cycles = 61,
- .maximum_latency_when_non_urgent_uclk_cycles = 827,
- .average_latency_when_non_urgent_uclk_cycles = 118,
- },
- {
- .minimum_uclk_khz = 435,
- .urgent_ramp_uclk_cycles = 546,
- .trip_to_memory_uclk_cycles = 848,
- .meta_trip_to_memory_uclk_cycles = 848,
- .maximum_latency_when_urgent_uclk_cycles = 146,
- .average_latency_when_urgent_uclk_cycles = 90,
- .maximum_latency_when_non_urgent_uclk_cycles = 848,
- .average_latency_when_non_urgent_uclk_cycles = 135,
- },
- {
- .minimum_uclk_khz = 731,
- .urgent_ramp_uclk_cycles = 632,
- .trip_to_memory_uclk_cycles = 874,
- .meta_trip_to_memory_uclk_cycles = 874,
- .maximum_latency_when_urgent_uclk_cycles = 232,
- .average_latency_when_urgent_uclk_cycles = 124,
- .maximum_latency_when_non_urgent_uclk_cycles = 874,
- .average_latency_when_non_urgent_uclk_cycles = 155,
- },
- {
- .minimum_uclk_khz = 1187,
- .urgent_ramp_uclk_cycles = 716,
- .trip_to_memory_uclk_cycles = 902,
- .meta_trip_to_memory_uclk_cycles = 902,
- .maximum_latency_when_urgent_uclk_cycles = 316,
- .average_latency_when_urgent_uclk_cycles = 160,
- .maximum_latency_when_non_urgent_uclk_cycles = 902,
- .average_latency_when_non_urgent_uclk_cycles = 177,
- },
- },
- },
- },
- .qos_type = dml2_qos_param_type_dcn4x,
-};
-
-static const struct dml2_soc_bb dml2_socbb_dcn31 = {
- .clk_table = {
- .uclk = {
- .clk_values_khz = {97000, 435000, 731000, 1187000},
- .num_clk_values = 4,
- },
- .fclk = {
- .clk_values_khz = {300000, 2500000},
- .num_clk_values = 2,
- },
- .dcfclk = {
- .clk_values_khz = {200000, 1800000},
- .num_clk_values = 2,
- },
- .dispclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .dppclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .dtbclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .phyclk = {
- .clk_values_khz = {810000, 810000},
- .num_clk_values = 2,
- },
- .socclk = {
- .clk_values_khz = {300000, 1600000},
- .num_clk_values = 2,
- },
- .dscclk = {
- .clk_values_khz = {666667, 666667},
- .num_clk_values = 2,
- },
- .phyclk_d18 = {
- .clk_values_khz = {625000, 625000},
- .num_clk_values = 2,
- },
- .phyclk_d32 = {
- .clk_values_khz = {2000000, 2000000},
- .num_clk_values = 2,
- },
- .dram_config = {
- .channel_width_bytes = 2,
- .channel_count = 16,
- .transactions_per_clock = 16,
- },
- },
-
- .qos_parameters = {
- .derate_table = {
- .system_active_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .system_active_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .dcn_mall_prefetch_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .dcn_mall_prefetch_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .system_idle_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 100,
- },
- },
- .writeback = {
- .base_latency_us = 0,
- .scaling_factor_us = 0,
- .scaling_factor_mhz = 0,
- },
- .qos_params = {
- .dcn4x = {
- .df_qos_response_time_fclk_cycles = 300,
- .max_round_trip_to_furthest_cs_fclk_cycles = 350,
- .mall_overhead_fclk_cycles = 50,
- .meta_trip_adder_fclk_cycles = 36,
- .average_transport_distance_fclk_cycles = 260,
- .umc_urgent_ramp_latency_margin = 50,
- .umc_max_latency_margin = 30,
- .umc_average_latency_margin = 20,
- .fabric_max_transport_latency_margin = 20,
- .fabric_average_transport_latency_margin = 10,
-
- .per_uclk_dpm_params = {
- {
- // State 1
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 472,
- .trip_to_memory_uclk_cycles = 827,
- .meta_trip_to_memory_uclk_cycles = 827,
- .maximum_latency_when_urgent_uclk_cycles = 72,
- .average_latency_when_urgent_uclk_cycles = 72,
- .maximum_latency_when_non_urgent_uclk_cycles = 827,
- .average_latency_when_non_urgent_uclk_cycles = 117,
- },
- {
- // State 2
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 546,
- .trip_to_memory_uclk_cycles = 848,
- .meta_trip_to_memory_uclk_cycles = 848,
- .maximum_latency_when_urgent_uclk_cycles = 146,
- .average_latency_when_urgent_uclk_cycles = 146,
- .maximum_latency_when_non_urgent_uclk_cycles = 848,
- .average_latency_when_non_urgent_uclk_cycles = 133,
- },
- {
- // State 3
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 564,
- .trip_to_memory_uclk_cycles = 853,
- .meta_trip_to_memory_uclk_cycles = 853,
- .maximum_latency_when_urgent_uclk_cycles = 164,
- .average_latency_when_urgent_uclk_cycles = 164,
- .maximum_latency_when_non_urgent_uclk_cycles = 853,
- .average_latency_when_non_urgent_uclk_cycles = 136,
- },
- {
- // State 4
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 613,
- .trip_to_memory_uclk_cycles = 869,
- .meta_trip_to_memory_uclk_cycles = 869,
- .maximum_latency_when_urgent_uclk_cycles = 213,
- .average_latency_when_urgent_uclk_cycles = 213,
- .maximum_latency_when_non_urgent_uclk_cycles = 869,
- .average_latency_when_non_urgent_uclk_cycles = 149,
- },
- {
- // State 5
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 632,
- .trip_to_memory_uclk_cycles = 874,
- .meta_trip_to_memory_uclk_cycles = 874,
- .maximum_latency_when_urgent_uclk_cycles = 232,
- .average_latency_when_urgent_uclk_cycles = 232,
- .maximum_latency_when_non_urgent_uclk_cycles = 874,
- .average_latency_when_non_urgent_uclk_cycles = 153,
- },
- {
- // State 6
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 665,
- .trip_to_memory_uclk_cycles = 885,
- .meta_trip_to_memory_uclk_cycles = 885,
- .maximum_latency_when_urgent_uclk_cycles = 265,
- .average_latency_when_urgent_uclk_cycles = 265,
- .maximum_latency_when_non_urgent_uclk_cycles = 885,
- .average_latency_when_non_urgent_uclk_cycles = 161,
- },
- {
- // State 7
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 689,
- .trip_to_memory_uclk_cycles = 895,
- .meta_trip_to_memory_uclk_cycles = 895,
- .maximum_latency_when_urgent_uclk_cycles = 289,
- .average_latency_when_urgent_uclk_cycles = 289,
- .maximum_latency_when_non_urgent_uclk_cycles = 895,
- .average_latency_when_non_urgent_uclk_cycles = 167,
- },
- {
- // State 8
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 716,
- .trip_to_memory_uclk_cycles = 902,
- .meta_trip_to_memory_uclk_cycles = 902,
- .maximum_latency_when_urgent_uclk_cycles = 316,
- .average_latency_when_urgent_uclk_cycles = 316,
- .maximum_latency_when_non_urgent_uclk_cycles = 902,
- .average_latency_when_non_urgent_uclk_cycles = 174,
- },
- },
- },
- },
- .qos_type = dml2_qos_param_type_dcn4x,
- },
-
- .power_management_parameters = {
- .dram_clk_change_blackout_us = 400,
- .fclk_change_blackout_us = 0,
- .g7_ppt_blackout_us = 0,
- .stutter_enter_plus_exit_latency_us = 50,
- .stutter_exit_latency_us = 43,
- .z8_stutter_enter_plus_exit_latency_us = 0,
- .z8_stutter_exit_latency_us = 0,
- },
-
- .vmin_limit = {
- .dispclk_khz = 600 * 1000,
- },
-
- .dprefclk_mhz = 700,
- .xtalclk_mhz = 100,
- .pcie_refclk_mhz = 100,
- .dchub_refclk_mhz = 50,
- .mall_allocated_for_dcn_mbytes = 64,
- .max_outstanding_reqs = 512,
- .fabric_datapath_to_dcn_data_return_bytes = 64,
- .return_bus_width_bytes = 64,
- .hostvm_min_page_size_kbytes = 0,
- .gpuvm_min_page_size_kbytes = 256,
- .phy_downspread_percent = 0,
- .dcn_downspread_percent = 0,
- .dispclk_dppclk_vco_speed_mhz = 4500,
- .do_urgent_latency_adjustment = 0,
- .mem_word_bytes = 32,
- .num_dcc_mcaches = 8,
- .mcache_size_bytes = 2048,
- .mcache_line_size_bytes = 32,
- .max_fclk_for_uclk_dpm_khz = 1250 * 1000,
-};
-
-static const struct dml2_ip_capabilities dml2_dcn31_max_ip_caps = {
- .pipe_count = 4,
- .otg_count = 4,
- .num_dsc = 4,
- .max_num_dp2p0_streams = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_dp2p0_outputs = 4,
- .rob_buffer_size_kbytes = 192,
- .config_return_buffer_size_in_kbytes = 1152,
- .meta_fifo_size_in_kentries = 22,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .subvp_drr_scheduling_margin_us = 100,
- .subvp_prefetch_end_to_mall_start_us = 15,
- .subvp_fw_processing_delay = 15,
-
- .fams2 = {
- .max_allow_delay_us = 100 * 1000,
- .scheduling_delay_us = 50,
- .vertical_interrupt_ack_delay_us = 18,
- .allow_programming_delay_us = 18,
- .min_allow_width_us = 20,
- .subvp_df_throttle_delay_us = 100,
- .subvp_programming_delay_us = 18,
- .subvp_prefetch_to_mall_delay_us = 18,
- .drr_programming_delay_us = 18,
- },
-};
-
-#endif /* __DML_DML_DCN3_SOC_BB__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index 8ef7977841de..793e1c038efd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -344,6 +344,7 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.config_return_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 22,
.compressed_buffer_segment_size_in_kbytes = 64,
+ .cursor_buffer_size = 24,
.max_flip_time_us = 80,
.max_flip_time_lines = 32,
.hostvm_mode = 0,
@@ -354,7 +355,7 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.fams2 = {
.max_allow_delay_us = 100 * 1000,
- .scheduling_delay_us = 125,
+ .scheduling_delay_us = 550,
.vertical_interrupt_ack_delay_us = 40,
.allow_programming_delay_us = 18,
.min_allow_width_us = 20,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index b132f676a68d..5e1ab6d97640 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -10,9 +10,10 @@
#define DML2_MAX_PLANES 8
#define DML2_MAX_DCN_PIPES 8
#define DML2_MAX_MCACHES 8 // assume plane is going to be supported by a max of 8 mcaches
+#define DML2_MAX_WRITEBACK 3
enum dml2_swizzle_mode {
- dml2_sw_linear,
+ dml2_sw_linear, // SW_LINEAR accepts 256 byte aligned pitch and also 128 byte aligned pitch if DCC is not enabled
dml2_sw_256b_2d,
dml2_sw_4kb_2d,
dml2_sw_64kb_2d,
@@ -24,7 +25,8 @@ enum dml2_swizzle_mode {
dml2_gfx11_sw_64kb_d_x,
dml2_gfx11_sw_64kb_r_x,
dml2_gfx11_sw_256kb_d_x,
- dml2_gfx11_sw_256kb_r_x
+ dml2_gfx11_sw_256kb_r_x,
+
};
enum dml2_source_format_class {
@@ -38,7 +40,13 @@ enum dml2_source_format_class {
dml2_rgbe_alpha = 9,
dml2_rgbe = 10,
dml2_mono_8 = 11,
- dml2_mono_16 = 12
+ dml2_mono_16 = 12,
+ dml2_422_planar_8 = 13,
+ dml2_422_planar_10 = 14,
+ dml2_422_planar_12 = 15,
+ dml2_422_packed_8 = 16,
+ dml2_422_packed_10 = 17,
+ dml2_422_packed_12 = 18
};
enum dml2_rotation_angle {
@@ -121,15 +129,6 @@ enum dml2_dsc_enable_option {
dml2_dsc_enable_if_necessary = 2
};
-enum dml2_pstate_support_method {
- dml2_pstate_method_uninitialized,
- dml2_pstate_method_not_supported,
- dml2_pstate_method_vactive,
- dml2_pstate_method_vblank,
- dml2_pstate_method_svp,
- dml2_pstate_method_drr
-};
-
enum dml2_tdlut_addressing_mode {
dml2_tdlut_sw_linear = 0,
dml2_tdlut_simple_linear = 1
@@ -287,22 +286,23 @@ struct dml2_link_output_cfg {
bool validate_output; // Do not validate the link configuration for this display stream.
};
-struct dml2_writeback_cfg {
- bool enable;
+struct dml2_writeback_info {
enum dml2_source_format_class pixel_format;
- unsigned int active_writebacks_per_surface;
+ unsigned long input_width;
+ unsigned long input_height;
+ unsigned long output_width;
+ unsigned long output_height;
+ unsigned long v_taps;
+ unsigned long h_taps;
+ unsigned long v_taps_chroma;
+ unsigned long h_taps_chroma;
+ double h_ratio;
+ double v_ratio;
+};
- struct {
- bool enabled;
- unsigned long input_width;
- unsigned long input_height;
- unsigned long output_width;
- unsigned long output_height;
- unsigned long v_taps;
- unsigned long h_taps;
- double h_ratio;
- double v_ratio;
- } scaling_info;
+struct dml2_writeback_cfg {
+ unsigned int active_writebacks_per_stream;
+ struct dml2_writeback_info writeback_stream[DML2_MAX_WRITEBACK];
};
struct dml2_plane_parameters {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index ebd8abe894a9..5f0bc42d1d2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -167,11 +167,13 @@ struct dml2_ip_capabilities {
unsigned int max_num_dp2p0_streams;
unsigned int max_num_hdmi_frl_outputs;
unsigned int max_num_dp2p0_outputs;
+ unsigned int max_num_wb;
unsigned int rob_buffer_size_kbytes;
unsigned int config_return_buffer_size_in_kbytes;
unsigned int config_return_buffer_segment_size_in_kbytes;
unsigned int meta_fifo_size_in_kentries;
unsigned int compressed_buffer_segment_size_in_kbytes;
+ unsigned int cursor_buffer_size;
unsigned int max_flip_time_us;
unsigned int max_flip_time_lines;
unsigned int hostvm_mode;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index eeb96c455658..d2d053f2354d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -26,20 +26,14 @@ enum dml2_project_id {
dml2_project_dcn4x_stage2_auto_drr_svp = 3,
};
-enum dml2_dram_clock_change_support {
- dml2_dram_clock_change_vactive = 0,
- dml2_dram_clock_change_vblank = 1,
- dml2_dram_clock_change_vblank_and_vactive = 2,
- dml2_dram_clock_change_drr = 3,
- dml2_dram_clock_change_mall_svp = 4,
- dml2_dram_clock_change_mall_full_frame = 6,
- dml2_dram_clock_change_unsupported = 7
-};
-
-enum dml2_fclock_change_support {
- dml2_fclock_change_vactive = 0,
- dml2_fclock_change_vblank = 1,
- dml2_fclock_change_unsupported = 2
+enum dml2_pstate_change_support {
+ dml2_pstate_change_vactive = 0,
+ dml2_pstate_change_vblank = 1,
+ dml2_pstate_change_vblank_and_vactive = 2,
+ dml2_pstate_change_drr = 3,
+ dml2_pstate_change_mall_svp = 4,
+ dml2_pstate_change_mall_full_frame = 6,
+ dml2_pstate_change_unsupported = 7
};
enum dml2_output_type_and_rate__type {
@@ -202,24 +196,23 @@ struct dml2_mcache_surface_allocation {
} informative;
};
-enum dml2_uclk_pstate_support_method {
- dml2_uclk_pstate_support_method_not_supported = 0,
- /* hw */
- dml2_uclk_pstate_support_method_vactive = 1,
- dml2_uclk_pstate_support_method_vblank = 2,
- dml2_uclk_pstate_support_method_reserved_hw = 5,
- /* fw */
- dml2_uclk_pstate_support_method_fw_subvp_phantom = 6,
- dml2_uclk_pstate_support_method_reserved_fw = 10,
- /* fw w/drr */
- dml2_uclk_pstate_support_method_fw_vactive_drr = 11,
- dml2_uclk_pstate_support_method_fw_vblank_drr = 12,
- dml2_uclk_pstate_support_method_fw_subvp_phantom_drr = 13,
- dml2_uclk_pstate_support_method_reserved_fw_drr_fixed = 20,
- dml2_uclk_pstate_support_method_fw_drr = 21,
- dml2_uclk_pstate_support_method_reserved_fw_drr_var = 22,
-
- dml2_uclk_pstate_support_method_count
+enum dml2_pstate_method {
+ dml2_pstate_method_na = 0,
+ /* hw exclusive modes */
+ dml2_pstate_method_vactive = 1,
+ dml2_pstate_method_vblank = 2,
+ dml2_pstate_method_reserved_hw = 5,
+ /* fw assisted exclusive modes */
+ dml2_pstate_method_fw_svp = 6,
+ dml2_pstate_method_reserved_fw = 10,
+ /* fw assisted modes requiring drr modulation */
+ dml2_pstate_method_fw_vactive_drr = 11,
+ dml2_pstate_method_fw_vblank_drr = 12,
+ dml2_pstate_method_fw_svp_drr = 13,
+ dml2_pstate_method_reserved_fw_drr_clamped = 20,
+ dml2_pstate_method_fw_drr = 21,
+ dml2_pstate_method_reserved_fw_drr_var = 22,
+ dml2_pstate_method_count
};
struct dml2_per_plane_programming {
@@ -241,7 +234,7 @@ struct dml2_per_plane_programming {
// If a stream is using odm split, then this value is always 1
unsigned int num_dpps_required;
- enum dml2_uclk_pstate_support_method uclk_pstate_support_method;
+ enum dml2_pstate_method uclk_pstate_support_method;
// MALL size requirements for MALL SS and SubVP
unsigned int surface_size_mall_bytes;
@@ -281,7 +274,7 @@ struct dml2_per_stream_programming {
unsigned int num_odms_required;
- enum dml2_uclk_pstate_support_method uclk_pstate_method;
+ enum dml2_pstate_method uclk_pstate_method;
struct {
bool enabled;
@@ -289,7 +282,8 @@ struct dml2_per_stream_programming {
union dml2_global_sync_programming global_sync;
} phantom_stream;
- struct dmub_fams2_stream_static_state fams2_params;
+ union dmub_cmd_fams2_config fams2_base_params;
+ union dmub_cmd_fams2_config fams2_sub_params;
};
//-----------------
@@ -339,7 +333,7 @@ struct dml2_mode_support_info {
bool DCCMetaBufferSizeNotExceeded;
bool TotalVerticalActiveBandwidthSupport;
bool VActiveBandwidthSupport;
- enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
bool USRRetrainingSupport;
bool PrefetchSupported;
bool DynamicMetadataSupported;
@@ -361,6 +355,7 @@ struct dml2_mode_support_info {
unsigned int AlignedYPitch[DML2_MAX_PLANES];
unsigned int AlignedCPitch[DML2_MAX_PLANES];
bool g6_temp_read_support;
+ bool temp_read_or_ppt_support;
}; // dml2_mode_support_info
struct dml2_display_cfg_programming {
@@ -392,6 +387,11 @@ struct dml2_display_cfg_programming {
unsigned long fclk_khz;
unsigned long dcfclk_khz;
} svp_prefetch;
+ struct {
+ unsigned long uclk_khz;
+ unsigned long fclk_khz;
+ unsigned long dcfclk_khz;
+ } svp_prefetch_no_throttle;
unsigned long deepsleep_dcfclk_khz;
unsigned long dispclk_khz;
@@ -444,7 +444,7 @@ struct dml2_display_cfg_programming {
double pstate_change_us;
double fclk_pstate_change_us;
double usr_retraining_us;
- double g6_temp_read_watermark_us;
+ double temp_read_or_ppt_watermark_us;
} watermarks;
struct {
@@ -653,6 +653,7 @@ struct dml2_display_cfg_programming {
double DisplayPipeLineDeliveryTimeLumaPrefetch[DML2_MAX_PLANES];
double DisplayPipeLineDeliveryTimeChromaPrefetch[DML2_MAX_PLANES];
+ double WritebackRequiredBandwidth;
double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES];
double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES];
double DSCCLK_calculated[DML2_MAX_PLANES];
@@ -662,6 +663,7 @@ struct dml2_display_cfg_programming {
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY
bool ROBUrgencyAvoidance;
+ double LowestPrefetchMargin;
} misc;
struct dml2_mode_support_info mode_support_info;
@@ -675,6 +677,7 @@ struct dml2_display_cfg_programming {
bool failed_mcache_validation;
bool failed_dpmm;
bool failed_mode_programming;
+ bool failed_map_watermarks;
} informative;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index 3d41ffde91c1..d68b4567e218 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -9,7 +9,7 @@
#include "dml2_debug.h"
#include "lib_float_math.h"
-static const struct dml2_core_ip_params core_dcn4_ip_caps_base = {
+struct dml2_core_ip_params core_dcn4_ip_caps_base = {
// Hardcoded values for DCN3x
.vblank_nom_default_us = 668,
.remote_iommu_outstanding_translations = 256,
@@ -90,6 +90,7 @@ static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *i
ip_caps->config_return_buffer_segment_size_in_kbytes = ip_params->config_return_buffer_segment_size_in_kbytes;
ip_caps->meta_fifo_size_in_kentries = ip_params->meta_fifo_size_in_kentries;
ip_caps->compressed_buffer_segment_size_in_kbytes = ip_params->compressed_buffer_segment_size_in_kbytes;
+ ip_caps->cursor_buffer_size = ip_params->cursor_buffer_size;
ip_caps->max_flip_time_us = ip_params->max_flip_time_us;
ip_caps->max_flip_time_lines = ip_params->max_flip_time_lines;
ip_caps->hostvm_mode = ip_params->hostvm_mode;
@@ -114,6 +115,7 @@ static void patch_ip_params_with_ip_caps(struct dml2_core_ip_params *ip_params,
ip_params->config_return_buffer_segment_size_in_kbytes = ip_caps->config_return_buffer_segment_size_in_kbytes;
ip_params->meta_fifo_size_in_kentries = ip_caps->meta_fifo_size_in_kentries;
ip_params->compressed_buffer_segment_size_in_kbytes = ip_caps->compressed_buffer_segment_size_in_kbytes;
+ ip_params->cursor_buffer_size = ip_caps->cursor_buffer_size;
ip_params->max_flip_time_us = ip_caps->max_flip_time_us;
ip_params->max_flip_time_lines = ip_caps->max_flip_time_lines;
ip_params->hostvm_mode = ip_caps->hostvm_mode;
@@ -316,28 +318,9 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
// Setup the appropriate p-state strategy
if (display_cfg->stage3.performed && display_cfg->stage3.success) {
- switch (display_cfg->stage3.pstate_switch_modes[plane_index]) {
- case dml2_uclk_pstate_support_method_vactive:
- case dml2_uclk_pstate_support_method_vblank:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom:
- case dml2_uclk_pstate_support_method_fw_drr:
- case dml2_uclk_pstate_support_method_fw_vactive_drr:
- case dml2_uclk_pstate_support_method_fw_vblank_drr:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
- programming->plane_programming[plane_index].uclk_pstate_support_method = display_cfg->stage3.pstate_switch_modes[plane_index];
- break;
- case dml2_uclk_pstate_support_method_reserved_hw:
- case dml2_uclk_pstate_support_method_reserved_fw:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_fixed:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_var:
- case dml2_uclk_pstate_support_method_not_supported:
- case dml2_uclk_pstate_support_method_count:
- default:
- programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
- break;
- }
+ programming->plane_programming[plane_index].uclk_pstate_support_method = display_cfg->stage3.pstate_switch_modes[plane_index];
} else {
- programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
+ programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_na;
}
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
@@ -360,7 +343,8 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
/* unconditionally populate fams2 params */
dml2_core_calcs_get_stream_fams2_programming(&core->clean_me_up.mode_lib,
display_cfg,
- &programming->stream_programming[main_plane->stream_index].fams2_params,
+ &programming->stream_programming[main_plane->stream_index].fams2_base_params,
+ &programming->stream_programming[main_plane->stream_index].fams2_sub_params,
programming->stream_programming[main_plane->stream_index].uclk_pstate_method,
plane_index);
@@ -572,18 +556,18 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
in_out->programming->plane_programming[plane_index].num_dpps_required = core->clean_me_up.mode_lib.mp.NoOfDPP[plane_index];
if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_fw_svp;
else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_fw_svp;
else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_fw_svp;
else {
if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_vactive;
else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_vblank;
else
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_na;
}
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 601320b1be81..8ed49a9df378 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -11,6 +11,9 @@
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
#define DML_MAX_NUM_OF_SLICES_PER_DSC 4
+#define DML_MAX_COMPRESSION_RATIO 4
+//#define DML_MODE_SUPPORT_USE_DPM_DRAM_BW
+//#define DML_GLOBAL_PREFETCH_CHECK
#define ALLOW_SDPIF_RATE_LIMIT_PRE_CSTATE
const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
@@ -132,9 +135,9 @@ static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_su
dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
@@ -315,12 +318,11 @@ dml_get_var_func(meta_trip_memory_us, double, mode_lib->mp.MetaTripToMemory);
dml_get_var_func(wm_fclk_change, double, mode_lib->mp.Watermark.FCLKChangeWatermark);
dml_get_var_func(wm_usr_retraining, double, mode_lib->mp.Watermark.USRRetrainingWatermark);
-dml_get_var_func(wm_g6_temp_read, double, mode_lib->mp.Watermark.g6_temp_read_watermark_us);
+dml_get_var_func(wm_temp_read_or_ppt, double, mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us);
dml_get_var_func(wm_dram_clock_change, double, mode_lib->mp.Watermark.DRAMClockChangeWatermark);
dml_get_var_func(fraction_of_urgent_bandwidth, double, mode_lib->mp.FractionOfUrgentBandwidth);
dml_get_var_func(fraction_of_urgent_bandwidth_imm_flip, double, mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip);
dml_get_var_func(fraction_of_urgent_bandwidth_mall, double, mode_lib->mp.FractionOfUrgentBandwidthMALL);
-dml_get_var_func(urgent_latency, double, mode_lib->mp.UrgentLatency);
dml_get_var_func(wm_writeback_dram_clock_change, double, mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
dml_get_var_func(wm_writeback_fclk_change, double, mode_lib->mp.Watermark.WritebackFCLKChangeWatermark);
dml_get_var_func(stutter_efficiency, double, mode_lib->mp.StutterEfficiency);
@@ -355,7 +357,9 @@ dml_get_var_func(svp_prefetch_urg_bw_available_sdp, double, mode_lib->mp.urg_ban
dml_get_var_func(svp_prefetch_urg_bw_available_dram, double, mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
dml_get_var_func(svp_prefetch_urg_bw_available_dram_vm_only, double, mode_lib->mp.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_svp_prefetch]);
+dml_get_var_func(urgent_latency, double, mode_lib->mp.UrgentLatency);
dml_get_var_func(max_urgent_latency_us, double, mode_lib->ms.support.max_urgent_latency_us);
+dml_get_var_func(max_non_urgent_latency_us, double, mode_lib->ms.support.max_non_urgent_latency_us);
dml_get_var_func(avg_non_urgent_latency_us, double, mode_lib->ms.support.avg_non_urgent_latency_us);
dml_get_var_func(avg_urgent_latency_us, double, mode_lib->ms.support.avg_urgent_latency_us);
@@ -466,6 +470,24 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
case dml2_420_12:
val = 1;
break;
+ case dml2_422_planar_8:
+ val = 0;
+ break;
+ case dml2_422_planar_10:
+ val = 0;
+ break;
+ case dml2_422_planar_12:
+ val = 0;
+ break;
+ case dml2_422_packed_8:
+ val = 0;
+ break;
+ case dml2_422_packed_10:
+ val = 0;
+ break;
+ case dml2_422_packed_12:
+ val = 0;
+ break;
case dml2_rgbe_alpha:
val = 0;
break;
@@ -487,32 +509,31 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
{
- switch (sw_mode) {
- case (dml2_sw_linear):
- return 256; break;
- case (dml2_sw_256b_2d):
- return 256; break;
- case (dml2_sw_4kb_2d):
- return 4096; break;
- case (dml2_sw_64kb_2d):
- return 65536; break;
- case (dml2_sw_256kb_2d):
- return 262144; break;
- case (dml2_gfx11_sw_linear):
- return 256; break;
- case (dml2_gfx11_sw_64kb_d):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_t):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_x):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_r_x):
- return 65536; break;
- case (dml2_gfx11_sw_256kb_d_x):
- return 262144; break;
- case (dml2_gfx11_sw_256kb_r_x):
- return 262144; break;
- default:
+ if (sw_mode == dml2_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_sw_256b_2d)
+ return 256;
+ else if (sw_mode == dml2_sw_4kb_2d)
+ return 4096;
+ else if (sw_mode == dml2_sw_64kb_2d)
+ return 65536;
+ else if (sw_mode == dml2_sw_256kb_2d)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_t)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_256kb_d_x)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
+ return 262144;
+ else {
DML2_ASSERT(0);
return 256;
}
@@ -579,8 +600,8 @@ static void CalculateBytePerPixelAndBlockSizes(
{
*BytePerPixelDETY = 0;
*BytePerPixelDETC = 0;
- *BytePerPixelY = 0;
- *BytePerPixelC = 0;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 1;
if (SourcePixelFormat == dml2_444_64) {
*BytePerPixelDETY = 8;
@@ -820,7 +841,7 @@ static void CalculateSwathWidth(
// Output
unsigned int req_per_swath_ub_l[],
unsigned int req_per_swath_ub_c[],
- unsigned int SwathWidthSingleDPPY[],
+ unsigned int SwathWidthSingleDPPY[], // post-rotated plane width
unsigned int SwathWidthSingleDPPC[],
unsigned int SwathWidthY[], // per-pipe
unsigned int SwathWidthC[], // per-pipe
@@ -1403,7 +1424,6 @@ static unsigned int dscceComputeDelay(
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock, padding_pixels, ssm_group_priming_delay, ssm_pipeline_delay, obsm_pipeline_delay, slice_padded_pixels, ixd_plus_padding, ixd_plus_padding_groups, cycles_per_group, group_delay, pipeline_delay, pixels, additional_group_delay, lines_to_reach_ixd, groups_to_reach_ixd, slice_width_groups, initial_xmit_delay, number_of_lines_to_reach_ixd, slice_width_modified;
-
if (pixelFormat == dml2_420)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
@@ -1428,7 +1448,6 @@ static unsigned int dscceComputeDelay(
}
}
-
//sub-stream multiplexer balance fifo priming delay in groups as per dsc standard
if (bpc == 8)
ssm_group_priming_delay = 83;
@@ -1447,9 +1466,6 @@ static unsigned int dscceComputeDelay(
//determine number of padded pixels in the last group of a slice line, computed as
slice_padded_pixels = 3 * slice_width_groups - slice_width_modified;
-
-
-
//determine integer number of complete slice lines required to reach initial transmit delay without ssm delay considered
number_of_lines_to_reach_ixd = initial_xmit_delay / slice_width_modified;
@@ -1463,7 +1479,6 @@ static unsigned int dscceComputeDelay(
//number of groups required for a slice to reach initial transmit delay is the sum of the padded initial transmit delay plus the ssm group priming delay
groups_to_reach_ixd = ixd_plus_padding_groups + ssm_group_priming_delay;
-
//number of lines required to reach padded initial transmit delay in groups in slices to the left of the last horizontal slice
//needs to be rounded up as a complete slice lines are buffered prior to initial transmit delay being reached in the last horizontal slice
lines_to_reach_ixd = (groups_to_reach_ixd + slice_width_groups - 1) / slice_width_groups; //round up lines to reach ixd to next
@@ -1506,7 +1521,6 @@ static unsigned int dscceComputeDelay(
return pixels;
}
-
//updated in dcn4
static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, enum dml2_output_encoder_class Output)
{
@@ -2090,7 +2104,6 @@ static void CalculateDCCConfiguration(
yuv420 = 1;
else
yuv420 = 0;
-
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
@@ -2561,8 +2574,7 @@ static void calculate_mcache_setting(
if (*p->num_mcaches_l) {
l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
}
-
- if (l->is_dual_plane && *p->num_mcaches_c) {
+ if (l->is_dual_plane) {
l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
@@ -2682,12 +2694,12 @@ static double dml_get_return_bandwidth_available(
bool is_avg_bw,
bool is_hvm_en,
bool is_hvm_only,
- double dcflk_mhz,
+ double dcfclk_mhz,
double fclk_mhz,
double dram_bw_mbps)
{
double return_bw_mbps = 0.;
- double ideal_sdp_bandwidth = (double)soc->return_bus_width_bytes * dcflk_mhz;
+ double ideal_sdp_bandwidth = (double)soc->return_bus_width_bytes * dcfclk_mhz;
double ideal_fabric_bandwidth = fclk_mhz * (double)soc->fabric_datapath_to_dcn_data_return_bytes;
double ideal_dram_bandwidth = dram_bw_mbps; //dram_speed_mts * soc->clk_table.dram_config.channel_count * soc->clk_table.dram_config.channel_width_bytes;
@@ -2753,7 +2765,7 @@ static double dml_get_return_bandwidth_available(
dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcflk_mhz = %f\n", __func__, dcflk_mhz);
+ dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
@@ -2766,7 +2778,7 @@ static double dml_get_return_bandwidth_available(
return return_bw_mbps;
}
-static void calculate_bandwidth_available(
+static noinline_for_stack void calculate_bandwidth_available(
double avg_bandwidth_available_min[dml2_core_internal_soc_state_max],
double avg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
double urg_bandwidth_available_min[dml2_core_internal_soc_state_max], // min between SDP and DRAM
@@ -3516,10 +3528,9 @@ static void CalculateUrgentBurstFactor(
dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
-
}
-static void CalculateDCFCLKDeepSleep(
+static void CalculateDCFCLKDeepSleepTdlut(
const struct dml2_display_cfg *display_cfg,
unsigned int NumberOfActiveSurfaces,
unsigned int BytePerPixelY[],
@@ -3534,6 +3545,10 @@ static void CalculateDCFCLKDeepSleep(
double ReadBandwidthChroma[],
unsigned int ReturnBusWidth,
+ double dispclk,
+ unsigned int tdlut_bytes_to_deliver[],
+ double prefetch_swath_time_us[],
+
// Output
double *DCFClkDeepSleep)
{
@@ -3568,6 +3583,22 @@ static void CalculateDCFCLKDeepSleep(
}
DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], pixel_rate_mhz / 16);
+ // adjust for 3dlut delivery time
+ if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && tdlut_bytes_to_deliver[k] > 0) {
+ double tdlut_required_deepsleep_dcfclk = (double) tdlut_bytes_to_deliver[k] / 64.0 / prefetch_swath_time_us[k];
+
+ dml2_printf("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ dml2_printf("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
+ dml2_printf("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
+ dml2_printf("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
+
+ // increase the deepsleep dcfclk to match the original dispclk throughput rate
+ if (tdlut_required_deepsleep_dcfclk > DCFClkDeepSleepPerSurface[k]) {
+ DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], tdlut_required_deepsleep_dcfclk);
+ DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], dispclk / 4.0);
+ }
+ }
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
@@ -3590,9 +3621,56 @@ static void CalculateDCFCLKDeepSleep(
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
*DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
}
+
dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
}
+static noinline_for_stack void CalculateDCFCLKDeepSleep(
+ const struct dml2_display_cfg *display_cfg,
+ unsigned int NumberOfActiveSurfaces,
+ unsigned int BytePerPixelY[],
+ unsigned int BytePerPixelC[],
+ unsigned int SwathWidthY[],
+ unsigned int SwathWidthC[],
+ unsigned int DPPPerSurface[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double Dppclk[],
+ double ReadBandwidthLuma[],
+ double ReadBandwidthChroma[],
+ unsigned int ReturnBusWidth,
+
+ // Output
+ double *DCFClkDeepSleep)
+{
+ double zero_double[DML2_MAX_PLANES];
+ unsigned int zero_integer[DML2_MAX_PLANES];
+
+ memset(zero_double, 0, DML2_MAX_PLANES * sizeof(double));
+ memset(zero_integer, 0, DML2_MAX_PLANES * sizeof(unsigned int));
+
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ NumberOfActiveSurfaces,
+ BytePerPixelY,
+ BytePerPixelC,
+ SwathWidthY,
+ SwathWidthC,
+ DPPPerSurface,
+ PSCL_THROUGHPUT,
+ PSCL_THROUGHPUT_CHROMA,
+ Dppclk,
+ ReadBandwidthLuma,
+ ReadBandwidthChroma,
+ ReturnBusWidth,
+ 0,
+ zero_integer, //tdlut_bytes_to_deliver,
+ zero_double, //prefetch_swath_time_us,
+
+ // Output
+ DCFClkDeepSleep);
+}
+
static double CalculateWriteBackDelay(
enum dml2_source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -3816,8 +3894,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
+ p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
+ p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
}
if (p->SwathHeightC[k] == 0)
@@ -4064,7 +4142,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
return true;
}
-static void CalculateODMMode(
+static noinline_for_stack void CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
enum dml2_output_format_class OutFormat,
@@ -4161,7 +4239,7 @@ static void CalculateODMMode(
#endif
}
-static void CalculateOutputLink(
+static noinline_for_stack void CalculateOutputLink(
struct dml2_core_internal_scratch *s,
double PHYCLK,
double PHYCLKD18,
@@ -4592,6 +4670,7 @@ static void calculate_tdlut_setting(
*p->tdlut_groups_per_2row_ub = 0;
*p->tdlut_opt_time = 0;
*p->tdlut_drain_time = 0;
+ *p->tdlut_bytes_to_deliver = 0;
*p->tdlut_bytes_per_group = 0;
*p->tdlut_pte_bytes_per_frame = 0;
*p->tdlut_bytes_per_frame = 0;
@@ -4660,6 +4739,7 @@ static void calculate_tdlut_setting(
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
+ *p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0);
}
#ifdef __DML_VBA_DEBUG__
@@ -4680,6 +4760,7 @@ static void calculate_tdlut_setting(
dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
+ dml2_printf("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
#endif
}
@@ -5069,20 +5150,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->trip_to_mem = 0.0;
*p->Tvm_trips = 0.0;
*p->Tr0_trips = 0.0;
- s->Tvm_no_trip_oto = 0.0;
- s->Tr0_no_trip_oto = 0.0;
s->Tvm_trips_rounded = 0.0;
s->Tr0_trips_rounded = 0.0;
s->max_Tsw = 0.0;
s->Lsw_oto = 0.0;
- s->Tpre_rounded = 0.0;
+ *p->Tpre_rounded = 0.0;
s->prefetch_bw_equ = 0.0;
s->Tvm_equ = 0.0;
s->Tr0_equ = 0.0;
s->Tdmbf = 0.0;
s->Tdmec = 0.0;
s->Tdmsks = 0.0;
- s->prefetch_sw_bytes = 0.0;
+ *p->prefetch_sw_bytes = 0.0;
s->prefetch_bw_pr = 0.0;
s->bytes_pp = 0.0;
s->dep_bytes = 0.0;
@@ -5207,6 +5286,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
+ dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
#endif
if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
@@ -5277,23 +5357,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC;
}
- s->prefetch_bw_pr = s->bytes_pp * p->myPipe->PixelClock / (double)p->myPipe->DPPPerSurface;
- if (p->myPipe->VRatio < 1.0)
- s->prefetch_bw_pr = p->myPipe->VRatio * s->prefetch_bw_pr;
- s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime);
-
- s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
- s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw);
-
- s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
-
- s->min_Lsw_equ = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_EQU__;
- s->min_Lsw_equ = math_max2(s->min_Lsw_equ, 2.0);
- s->min_Lsw_equ = math_max2(s->min_Lsw_equ, p->tdlut_drain_time / s->LineTime);
+ *p->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
+ *p->prefetch_sw_bytes = *p->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
vm_bytes = p->vm_bytes; // vm_bytes is dpde0_bytes_per_frame_ub_l + dpde0_bytes_per_frame_ub_c + 2*extra_dpde_bytes;
extra_tdpe_bytes = (unsigned int)math_max2(0, (p->display_cfg->gpuvm_max_page_table_levels - 1) * 128);
@@ -5302,57 +5367,103 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
vm_bytes = vm_bytes + p->tdlut_pte_bytes_per_frame + (p->display_cfg->gpuvm_enable ? extra_tdpe_bytes : 0);
tdlut_row_bytes = (unsigned long) math_ceil2(p->tdlut_bytes_per_frame/2.0, 1.0);
+
+ s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
+ s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
+ s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
+
+ // use vactive swath bw for prefetch oto and also cap prefetch_bw_oto to max_vratio_oto
+ // Note: in prefetch calculation, acounting is done mostly per-pipe.
+ // vactive swath bw represents the per-surface (aka per dml plane) bw to move vratio_l/c lines of bytes_l/c per line time
+ s->per_pipe_vactive_sw_bw = p->vactive_sw_bw_l / (double)p->myPipe->DPPPerSurface;
+
+ // one-to-one prefetch bw as one line of bytes per line time (as per vratio_pre_l/c = 1)
+ s->prefetch_bw_oto = (p->swath_width_luma_ub * p->myPipe->BytePerPixelY) / s->LineTime;
+
+ if (p->myPipe->BytePerPixelC > 0) {
+ s->per_pipe_vactive_sw_bw += p->vactive_sw_bw_c / (double)p->myPipe->DPPPerSurface;
+ s->prefetch_bw_oto += (p->swath_width_chroma_ub * p->myPipe->BytePerPixelC) / s->LineTime;
+ }
+
+ s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
+
+ s->prefetch_bw_oto = math_min2(s->prefetch_bw_oto, *p->prefetch_sw_bytes/(s->min_Lsw_oto*s->LineTime));
+
+ s->Lsw_oto = math_ceil2(4.0 * *p->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, 1.0) / 4.0;
+
s->prefetch_bw_oto = math_max3(s->prefetch_bw_oto,
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
- s->Lsw_oto = math_ceil2(4.0 * math_max2(s->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, s->min_Lsw_oto), 1.0) / 4.0;
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
+ dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
+ dml2_printf("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
+#endif
if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_no_trip_oto = math_max2(
+ s->Tvm_oto = math_max3(
+ *p->Tvm_trips,
*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto,
s->LineTime / 4.0);
- s->Tvm_oto = math_max2(
- *p->Tvm_trips,
- s->Tvm_no_trip_oto);
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
- s->Tvm_no_trip_oto = s->Tvm_trips_rounded;
s->Tvm_oto = s->Tvm_trips_rounded;
}
if ((p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable)) {
- s->Tr0_no_trip_oto = math_max2(
+ s->Tr0_oto = math_max3(
+ *p->Tr0_trips,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
- s->Tr0_oto = math_max2(
- *p->Tr0_trips,
- s->Tr0_no_trip_oto);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
- } else {
- s->Tr0_no_trip_oto = (s->LineTime - s->Tvm_oto) / 4.0;
- s->Tr0_oto = s->Tr0_no_trip_oto;
- }
+ } else
+ s->Tr0_oto = s->LineTime / 4.0;
s->Tvm_oto_lines = math_ceil2(4.0 * s->Tvm_oto / s->LineTime, 1) / 4.0;
s->Tr0_oto_lines = math_ceil2(4.0 * s->Tr0_oto / s->LineTime, 1) / 4.0;
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ dml2_printf("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
+ if (p->impacted_dst_y_pre > 0) {
+ dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ s->dst_y_prefetch_oto = math_max2(s->dst_y_prefetch_oto, p->impacted_dst_y_pre);
+ dml2_printf("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
+ }
+#endif
+ *p->Tpre_oto = s->dst_y_prefetch_oto * s->LineTime;
+
//To (time for delay after scaler) in line time
Lo = (unsigned int)(*p->DSTYAfterScaler + (double)*p->DSTXAfterScaler / (double)p->myPipe->HTotal);
+ s->min_Lsw_equ = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_EQU__;
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, p->tdlut_drain_time / s->LineTime);
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, 2.0);
//Tpre_equ in line time
if (p->DynamicMetadataVMEnabled && p->DynamicMetadataEnable)
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, *p->Tvm_trips) + s->TWait_p) / s->LineTime - Lo;
else
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, p->ExtraLatencyPrefetch) + s->TWait_p) / s->LineTime - Lo;
+
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ s->dst_y_prefetch_equ_impacted = math_max2(p->impacted_dst_y_pre, s->dst_y_prefetch_equ);
+
+ s->dst_y_prefetch_equ_impacted = math_min2(s->dst_y_prefetch_equ_impacted, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
+
+ if (s->dst_y_prefetch_equ_impacted > s->dst_y_prefetch_equ)
+ s->dst_y_prefetch_equ -= s->dst_y_prefetch_equ_impacted - s->dst_y_prefetch_equ;
+#endif
+
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
@@ -5370,7 +5481,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, s->prefetch_sw_bytes);
+ dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
@@ -5394,7 +5505,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
#endif
double Tpre = s->dst_y_prefetch_equ * s->LineTime;
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- s->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
+ *p->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
@@ -5420,7 +5531,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, s->Tpre_rounded, (s->Tpre_rounded - Tpre));
+ dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
#endif
@@ -5434,78 +5545,85 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// Tpre_rounded is Tpre rounding to 2-bit fraction
// Tvm_trips_rounded is Tvm_trips ceiling to 1/4 line time
// Tr0_trips_rounded is Tr0_trips ceiling to 1/4 line time
- // So that means prefetch bw calculated can be higher since the total time availabe for prefetch is less
- bool min_Lsw_equ_ok = s->Tpre_rounded >= s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded + s->min_Lsw_equ*s->LineTime;
+ // So that means prefetch bw calculated can be higher since the total time available for prefetch is less
+ bool min_Lsw_equ_ok = *p->Tpre_rounded >= s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded + s->min_Lsw_equ*s->LineTime;
+ bool tpre_gt_req_latency = true;
+#if 0
+ // Check that Tpre_rounded is big enough if all of the stages of the prefetch are time constrained.
+ // The terms Tvm_trips_rounded and Tr0_trips_rounded represent the min time constraints for the VM and row stages.
+ // Normally, these terms cover the overall time constraint for Tpre >= (Tex + max{Ttrip, Turg}), but if these terms are at their minimum, an explicit check is necessary.
+ tpre_gt_req_latency = *p->Tpre_rounded > (math_max2(p->Turg, s->trip_to_mem) + p->ExtraLatencyPrefetch);
+#endif
- if (s->dst_y_prefetch_equ > 1 && min_Lsw_equ_ok) {
+ if (s->dst_y_prefetch_equ > 1 && min_Lsw_equ_ok && tpre_gt_req_latency) {
s->prefetch_bw1 = 0.;
s->prefetch_bw2 = 0.;
s->prefetch_bw3 = 0.;
s->prefetch_bw4 = 0.;
// prefetch_bw1: VM + 2*R0 + SW
- if (s->Tpre_rounded - *p->Tno_bw > 0) {
+ if (*p->Tpre_rounded - *p->Tno_bw > 0) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor
+ 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)
- + s->prefetch_sw_bytes)
- / (s->Tpre_rounded - *p->Tno_bw);
- s->Tsw_est1 = s->prefetch_sw_bytes / s->prefetch_bw1;
+ + *p->prefetch_sw_bytes)
+ / (*p->Tpre_rounded - *p->Tno_bw);
+ s->Tsw_est1 = *p->prefetch_sw_bytes / s->prefetch_bw1;
} else
s->prefetch_bw1 = 0;
dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
- if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
+ if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
- (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
+ (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
- dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, s->Tpre_rounded);
+ dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
// prefetch_bw2: VM + SW
- if (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded > 0) {
- s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded);
- s->Tsw_est2 = s->prefetch_sw_bytes / s->prefetch_bw2;
+ if (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded > 0) {
+ s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + *p->prefetch_sw_bytes) /
+ (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded);
+ s->Tsw_est2 = *p->prefetch_sw_bytes / s->prefetch_bw2;
} else
s->prefetch_bw2 = 0;
dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
- if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
- s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
+ if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
+ s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
}
// prefetch_bw3: 2*R0 + SW
- if (s->Tpre_rounded - s->Tvm_trips_rounded > 0) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - s->Tvm_trips_rounded);
- s->Tsw_est3 = s->prefetch_sw_bytes / s->prefetch_bw3;
+ if (*p->Tpre_rounded - s->Tvm_trips_rounded > 0) {
+ s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + *p->prefetch_sw_bytes) /
+ (*p->Tpre_rounded - s->Tvm_trips_rounded);
+ s->Tsw_est3 = *p->prefetch_sw_bytes / s->prefetch_bw3;
} else
s->prefetch_bw3 = 0;
dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
- if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
+ if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
+ s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
// prefetch_bw4: SW
- if (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded > 0)
- s->prefetch_bw4 = s->prefetch_sw_bytes / (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded);
+ if (*p->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded > 0)
+ s->prefetch_bw4 = *p->prefetch_sw_bytes / (*p->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded);
else
s->prefetch_bw4 = 0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, s->Tpre_rounded, (s->Tpre_rounded - Tpre));
+ dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
@@ -5617,9 +5735,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
- // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
- s->Lsw_equ = s->dst_y_prefetch_equ - math_ceil2(4.0 * (s->Tvm_equ + 2 * s->Tr0_equ) / s->LineTime, 1.0) / 4.0;
-
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
*p->dst_y_prefetch = s->dst_y_prefetch_oto;
@@ -5628,31 +5743,33 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- s->dst_y_per_vm_no_trip_vblank = math_ceil2(4.0 * s->Tvm_no_trip_oto / s->LineTime, 1.0) / 4.0;
- s->dst_y_per_row_no_trip_vblank = math_ceil2(4.0 * s->Tr0_no_trip_oto / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
+
} else {
*p->dst_y_prefetch = s->dst_y_prefetch_equ;
+
+ if (s->dst_y_prefetch_equ < s->dst_y_prefetch_equ_impacted)
+ *p->dst_y_prefetch = s->dst_y_prefetch_equ_impacted;
+
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- s->dst_y_per_vm_no_trip_vblank = *p->dst_y_per_vm_vblank;
- s->dst_y_per_row_no_trip_vblank = *p->dst_y_per_row_vblank;
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
- /* take worst case Lsw to calculate bandwidth requirement regardless of schedule */
- s->LinesToRequestPrefetchPixelData = math_min2(s->Lsw_equ, s->Lsw_oto); // Lsw
+ // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
+ s->LinesToRequestPrefetchPixelData = *p->dst_y_prefetch - *p->dst_y_per_vm_vblank - 2 * *p->dst_y_per_row_vblank; // Lsw
s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line);
*p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime);
+ *p->prefetch_swath_time_us = (s->LinesToRequestPrefetchPixelData * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
@@ -5663,6 +5780,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ dml2_printf("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
@@ -5749,8 +5867,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
- __func__, min_Lsw_equ_ok, s->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
+ dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ __func__, min_Lsw_equ_ok, *p->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
+ dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded+Tvm_trips_rounded+2.0*Tr0_trips_rounded+min_Tsw_equ (%f) should be > \n",
+ __func__, tpre_gt_req_latency, (s->min_Lsw_equ*s->LineTime + s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded), p->Turg, s->trip_to_mem, p->ExtraLatencyPrefetch);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5769,13 +5889,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (vm_bytes == 0) {
prefetch_vm_bw = 0;
- } else if (s->dst_y_per_vm_no_trip_vblank > 0) {
+ } else if (*p->dst_y_per_vm_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
- prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (s->dst_y_per_vm_no_trip_vblank * s->LineTime);
+ prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -5787,8 +5907,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
prefetch_row_bw = 0;
- } else if (s->dst_y_per_row_no_trip_vblank > 0) {
- prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (s->dst_y_per_row_no_trip_vblank * s->LineTime);
+ } else if (*p->dst_y_per_row_vblank > 0) {
+ prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
@@ -5828,6 +5948,171 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
return s->NoTimeToPrefetch;
}
+static unsigned int get_num_lb_source_lines(unsigned int max_line_buffer_lines,
+ unsigned int line_buffer_size_bits,
+ unsigned int num_pipes,
+ unsigned int vp_width,
+ unsigned int vp_height,
+ double h_ratio,
+ enum dml2_rotation_angle rotation_angle)
+{
+ unsigned int num_lb_source_lines = 0;
+ double lb_bit_per_pixel = 57.0;
+ unsigned recin_width = vp_width/num_pipes;
+
+ if (dml_is_vertical_rotation(rotation_angle))
+ recin_width = vp_height/num_pipes;
+
+ num_lb_source_lines = (unsigned int) math_min2((double) max_line_buffer_lines,
+ math_floor2(line_buffer_size_bits / lb_bit_per_pixel / (recin_width / math_max2(h_ratio, 1.0)), 1.0));
+
+ return num_lb_source_lines;
+}
+
+static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned int num_planes, unsigned int Trpd_dcfclk_cycles[])
+{
+ int max_value = -1;
+ int max_idx = -1;
+ for (unsigned int i = 0; i < num_planes; i++) {
+ if (i != this_plane_idx && (int) Trpd_dcfclk_cycles[i] > max_value) {
+ max_value = Trpd_dcfclk_cycles[i];
+ max_idx = i;
+ }
+ }
+ if (max_idx <= 0) {
+ dml2_assert(max_idx >= 0);
+ max_idx = this_plane_idx;
+ }
+
+ return max_idx;
+}
+
+static double calculate_impacted_Tsw(unsigned int exclude_plane_idx, unsigned int num_planes, double *prefetch_swath_bytes, double bw_mbps)
+{
+ double sum = 0.;
+ for (unsigned int i = 0; i < num_planes; i++) {
+ if (i != exclude_plane_idx) {
+ sum += prefetch_swath_bytes[i];
+ }
+ }
+ return sum / bw_mbps;
+}
+
+// a global check against the aggregate effect of the per plane prefetch schedule
+static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *scratch,
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *p)
+{
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals *s = &scratch->CheckGlobalPrefetchAdmissibility_locals;
+ unsigned int i, k;
+
+ memset(s, 0, sizeof(struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals));
+
+ *p->recalc_prefetch_schedule = 0;
+ s->prefetch_global_check_passed = 1;
+ // worst case if the rob and cdb is fully hogged
+ s->max_Trpd_dcfclk_cycles = (unsigned int) math_ceil2((p->rob_buffer_size_kbytes*1024 + p->compressed_buffer_size_kbytes*DML_MAX_COMPRESSION_RATIO*1024)/64.0, 1.0);
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
+ dml2_printf("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
+ dml2_printf("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
+ dml2_printf("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
+ dml2_printf("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
+ dml2_printf("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
+#endif
+
+ // calculate the return impact from each plane, request is 256B per dcfclk
+ for (i = 0; i < p->num_active_planes; i++) {
+ s->src_detile_buf_size_bytes_l[i] = p->detile_buffer_size_bytes_l[i];
+ s->src_detile_buf_size_bytes_c[i] = p->detile_buffer_size_bytes_c[i];
+ s->src_swath_bytes_l[i] = p->full_swath_bytes_l[i];
+ s->src_swath_bytes_c[i] = p->full_swath_bytes_c[i];
+
+ if (p->pixel_format[i] == dml2_420_10) {
+ s->src_detile_buf_size_bytes_l[i] = (unsigned int) (s->src_detile_buf_size_bytes_l[i] * 1.5);
+ s->src_detile_buf_size_bytes_c[i] = (unsigned int) (s->src_detile_buf_size_bytes_c[i] * 1.5);
+ s->src_swath_bytes_l[i] = (unsigned int) (s->src_swath_bytes_l[i] * 1.5);
+ s->src_swath_bytes_c[i] = (unsigned int) (s->src_swath_bytes_c[i] * 1.5);
+ }
+
+ s->burst_bytes_to_fill_det = (unsigned int) (math_floor2(s->src_detile_buf_size_bytes_l[i] / p->chunk_bytes_l, 1) * p->chunk_bytes_l);
+ s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_l[i] / p->swath_height_l[i], 1) * s->src_swath_bytes_l[i]);
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
+ dml2_printf("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
+ dml2_printf("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
+ dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
+ dml2_printf("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
+ dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
+#endif
+
+ if (s->src_swath_bytes_c[i] > 0) { // dual_plane
+ s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(s->src_detile_buf_size_bytes_c[i] / p->chunk_bytes_c, 1) * p->chunk_bytes_c);
+
+ if (p->pixel_format[i] == dml2_422_planar_8 || p->pixel_format[i] == dml2_422_planar_10 || p->pixel_format[i] == dml2_422_planar_12) {
+ s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_c[i] / p->swath_height_c[i], 1) * s->src_swath_bytes_c[i]);
+ }
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
+ dml2_printf("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
+ dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
+ dml2_printf("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
+#endif
+ }
+
+ s->time_to_fill_det_us = (double) s->burst_bytes_to_fill_det / (256 * p->estimated_dcfclk_mhz); // fill time assume full burst at request rate
+ s->accumulated_return_path_dcfclk_cycles[i] = (unsigned int) math_ceil2(((DML_MAX_COMPRESSION_RATIO-1) * 64 * p->estimated_dcfclk_mhz) * s->time_to_fill_det_us / 64.0, 1.0); //for 64B per DCFClk
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
+ dml2_printf("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
+ dml2_printf("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
+#endif
+ // clamping to worst case delay which is one which occupy the full rob+cdb
+ if (s->accumulated_return_path_dcfclk_cycles[i] > s->max_Trpd_dcfclk_cycles)
+ s->accumulated_return_path_dcfclk_cycles[i] = s->max_Trpd_dcfclk_cycles;
+ }
+
+ // Figure out the impacted prefetch time for each plane
+ // if impacted_Tre is > equ bw Tpre, we need to fail the prefetch schedule as we need a higher state to support the bw
+ for (i = 0; i < p->num_active_planes; i++) {
+ k = find_max_impact_plane(i, p->num_active_planes, s->accumulated_return_path_dcfclk_cycles); // plane k causes most impact to plane i
+ // the rest of planes (except for k) complete for bw
+ p->impacted_dst_y_pre[i] = s->accumulated_return_path_dcfclk_cycles[k]/p->estimated_dcfclk_mhz;
+ p->impacted_dst_y_pre[i] += calculate_impacted_Tsw(k, p->num_active_planes, p->prefetch_sw_bytes, p->estimated_urg_bandwidth_required_mbps);
+ p->impacted_dst_y_pre[i] = math_ceil2(p->impacted_dst_y_pre[i] / p->line_time[i], 0.25);
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
+#endif
+ }
+
+ if (p->Tpre_rounded != NULL && p->Tpre_oto != NULL) {
+ for (i = 0; i < p->num_active_planes; i++) {
+ if (p->impacted_dst_y_pre[i] > p->dst_y_prefetch[i]) {
+ s->prefetch_global_check_passed = 0;
+ *p->recalc_prefetch_schedule = 1;
+ }
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
+ dml2_printf("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
+#endif
+ }
+ } else {
+ // likely a mode programming calls, assume support, and no recalc - not used anyways
+ s->prefetch_global_check_passed = 1;
+ *p->recalc_prefetch_schedule = 0;
+ }
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
+ dml2_printf("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
+#endif
+
+ return s->prefetch_global_check_passed;
+}
+
static void calculate_peak_bandwidth_required(
struct dml2_core_internal_scratch *s,
struct dml2_core_calcs_calculate_peak_bandwidth_required_params *p)
@@ -6046,7 +6331,7 @@ static void check_urgent_bandwidth_support(
double *frac_urg_bandwidth_nom,
double *frac_urg_bandwidth_mall,
bool *vactive_bandwidth_support_ok, // vactive ok
- bool *bandwidth_support_ok, // max of vm, prefetch, vactive all ok
+ bool *bandwidth_support_ok,// max of vm, prefetch, vactive all ok
unsigned int mall_allocated_for_dcn_mbytes,
double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
@@ -6116,7 +6401,6 @@ static void check_urgent_bandwidth_support(
}
}
#endif
-
}
static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal_soc_state_type eval_state,
@@ -6438,7 +6722,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->Z8StutterExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
p->Watermark->Z8StutterEnterPlusExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
}
- p->Watermark->g6_temp_read_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
+ p->Watermark->temp_read_or_ppt_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
@@ -6454,12 +6738,12 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: g6_temp_read_watermark_us = %f\n", __func__, p->Watermark->g6_temp_read_watermark_us);
+ dml2_printf("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
#endif
s->TotalActiveWriteback = 0;
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
s->TotalActiveWriteback = s->TotalActiveWriteback + 1;
}
}
@@ -6522,7 +6806,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines= %u\n", __func__, k, p->MaxLineBufferLines);
+ dml2_printf("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
@@ -6563,7 +6847,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->ActiveDRAMClockChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->DRAMClockChangeWatermark;
s->ActiveFCLKChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->FCLKChangeWatermark;
s->USRRetrainingLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->USRRetrainingWatermark;
- s->g6_temp_read_latency_margin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->g6_temp_read_watermark_us;
+ s->g6_temp_read_latency_margin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->temp_read_or_ppt_watermark_us;
if (p->VActiveLatencyHidingMargin)
p->VActiveLatencyHidingMargin[k] = s->ActiveDRAMClockChangeLatencyMargin[k];
@@ -6571,9 +6855,12 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
if (p->VActiveLatencyHidingUs)
p->VActiveLatencyHidingUs[k] = s->ActiveClockChangeLatencyHiding;
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
- s->WritebackLatencyHiding = (double)p->WritebackInterfaceBufferSize * 1024.0 / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height * (double)h_total / pixel_clock_mhz) * 4.0);
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
+ if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
+ s->WritebackLatencyHiding = (double)p->WritebackInterfaceBufferSize * 1024.0
+ / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height
+ * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width
+ / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height * (double)h_total / pixel_clock_mhz) * 4.0);
+ if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format == dml2_444_64) {
s->WritebackLatencyHiding = s->WritebackLatencyHiding / 2;
}
s->WritebackDRAMClockChangeLatencyMargin = s->WritebackLatencyHiding - p->Watermark->WritebackDRAMClockChangeWatermark;
@@ -6588,36 +6875,36 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
uclk_pstate_change_strategy = p->display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy;
reserved_vblank_time_us = (double)p->display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns / 1000;
- p->FCLKChangeSupport[k] = dml2_fclock_change_unsupported;
+ p->FCLKChangeSupport[k] = dml2_pstate_change_unsupported;
if (s->ActiveFCLKChangeLatencyMargin[k] > 0)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vactive;
+ p->FCLKChangeSupport[k] = dml2_pstate_change_vactive;
else if (reserved_vblank_time_us >= p->mmSOCParameters.FCLKChangeLatency)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vblank;
+ p->FCLKChangeSupport[k] = dml2_pstate_change_vblank;
- if (p->FCLKChangeSupport[k] == dml2_fclock_change_unsupported)
+ if (p->FCLKChangeSupport[k] == dml2_pstate_change_unsupported)
*p->global_fclk_change_supported = false;
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_unsupported;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_unsupported;
if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_auto) {
if (p->display_cfg->overrides.all_streams_blanked ||
(s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency))
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank_and_vactive;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vblank_and_vactive;
else if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vactive;
else if (reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vblank;
} else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vactive && s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vactive;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vblank && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vblank;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_drr)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_drr;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_drr;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_svp)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_svp;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_mall_svp;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_full_frame;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_mall_full_frame;
- if (p->DRAMClockChangeSupport[k] == dml2_dram_clock_change_unsupported)
+ if (p->DRAMClockChangeSupport[k] == dml2_pstate_change_unsupported)
*p->global_dram_clock_change_supported = false;
s->dst_y_pstate = (unsigned int)(math_ceil2((p->mmSOCParameters.DRAMClockChangeLatency + p->mmSOCParameters.UrgentLatency) / (h_total / pixel_clock_mhz), 1));
@@ -6725,7 +7012,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency(
}
}
-static void calculate_vactive_det_fill_latency(
+static noinline_for_stack void calculate_vactive_det_fill_latency(
const struct dml2_display_cfg *display_cfg,
unsigned int num_active_planes,
unsigned int bytes_required_l[],
@@ -6915,8 +7202,7 @@ struct dml2_core_internal_g6_temp_read_blackouts_table {
} entries[DML_MAX_CLK_TABLE_SIZE];
};
-static const struct dml2_core_internal_g6_temp_read_blackouts_table
- core_dcn4_g6_temp_read_blackout_table = {
+struct dml2_core_internal_g6_temp_read_blackouts_table core_dcn4_g6_temp_read_blackout_table = {
.entries = {
{
.uclk_khz = 96000,
@@ -7036,6 +7322,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
+#endif
struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
@@ -7083,12 +7372,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; k++)
dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
-
- // dml2_printf_dml_policy(&mode_lib->ms.policy);
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, mode_lib->ms.num_active_planes);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -7183,8 +7466,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- mode_lib->ms.SurfaceReadBandwidthLuma[k] = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->ms.SurfaceReadBandwidthChroma[k] = mode_lib->ms.SwathWidthCSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ mode_lib->ms.vactive_sw_bw_l[k] = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ mode_lib->ms.vactive_sw_bw_c[k] = mode_lib->ms.SwathWidthCSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
mode_lib->ms.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width *
display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
@@ -7194,35 +7477,35 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
#endif
}
// Writeback bandwidth
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format == dml2_444_64) {
+ mode_lib->ms.WriteBandwidth[k][0] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height
+ * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width
+ / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height
* display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
/ ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8.0;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
+ } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
+ mode_lib->ms.WriteBandwidth[k][0] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height
+ * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width
+ / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height
* display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
/ ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4.0;
} else {
- mode_lib->ms.WriteBandwidth[k] = 0.0;
+ mode_lib->ms.WriteBandwidth[k][0] = 0.0;
}
}
/*Writeback Latency support check*/
mode_lib->ms.support.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0 &&
+ (mode_lib->ms.WriteBandwidth[k][0] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) {
mode_lib->ms.support.WritebackLatencySupport = false;
}
}
@@ -7231,19 +7514,19 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
/* Writeback Scale Ratio and Taps Support Check */
mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > mode_lib->ip.writeback_max_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > mode_lib->ip.writeback_max_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio < mode_lib->ip.writeback_min_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio < mode_lib->ip.writeback_min_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > (unsigned int) mode_lib->ip.writeback_max_hscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps > (unsigned int) mode_lib->ip.writeback_max_vscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps
- || (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > 2.0 && ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps % 2) == 1))) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio > mode_lib->ip.writeback_max_hscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio > mode_lib->ip.writeback_max_vscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio < mode_lib->ip.writeback_min_hscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio < mode_lib->ip.writeback_min_vscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps > (unsigned int) mode_lib->ip.writeback_max_hscl_taps
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps > (unsigned int) mode_lib->ip.writeback_max_vscl_taps
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps
+ || (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps > 2.0 && ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps % 2) == 1))) {
mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
}
- if (2.0 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps - 1) * 57 > mode_lib->ip.writeback_line_buffer_buffer_size) {
+ if (2.0 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height * (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps - 1) * 57 > mode_lib->ip.writeback_line_buffer_buffer_size) {
mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
}
}
@@ -7423,8 +7706,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateSwathAndDETConfiguration_params->nomDETInKByte = mode_lib->ms.NomDETInKByte;
CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->ms.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->ms.SurfaceReadBandwidthChroma;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->ms.vactive_sw_bw_l;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->ms.vactive_sw_bw_c;
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = mode_lib->ms.MaximumSwathWidthLuma;
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = mode_lib->ms.MaximumSwathWidthChroma;
CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->ms.Read256BlockHeightY;
@@ -7671,16 +7954,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
//DISPCLK/DPPCLK
mode_lib->ms.WritebackRequiredDISPCLK = 0;
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->ms.WritebackRequiredDISPCLK = math_max2(mode_lib->ms.WritebackRequiredDISPCLK,
- CalculateWriteBackDISPCLK(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
+ CalculateWriteBackDISPCLK(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
mode_lib->ip.writeback_line_buffer_buffer_size));
}
@@ -7712,7 +7995,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!s->stream_visited[display_cfg->plane_descriptors[k].stream_index]) {
s->stream_visited[display_cfg->plane_descriptors[k].stream_index] = 1;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true)
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0)
s->TotalNumberOfActiveWriteback = s->TotalNumberOfActiveWriteback + 1;
s->TotalNumberOfActiveOTG = s->TotalNumberOfActiveOTG + 1;
@@ -8256,23 +8539,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.PSCL_FACTOR,
mode_lib->ms.PSCL_FACTOR_CHROMA,
mode_lib->ms.RequiredDPPCLK,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
mode_lib->soc.return_bus_width_bytes,
/* Output */
&mode_lib->ms.dcfclk_deepsleep);
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->ms.WritebackDelayTime[k] = mode_lib->soc.qos_parameters.writeback.base_latency_us + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->ms.RequiredDISPCLK;
} else {
mode_lib->ms.WritebackDelayTime[k] = 0.0;
@@ -8349,7 +8632,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: urgent latency tolerance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
+ dml2_printf("DML::%s: urgent latency tolarance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
#endif
mode_lib->ms.support.OutstandingRequestsSupport = true;
@@ -8367,6 +8650,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
* (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
+ mode_lib->ms.support.max_non_urgent_latency_us
+ = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
+ / mode_lib->ms.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
+
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
@@ -8408,7 +8698,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
memset(calculate_mcache_setting_params, 0, sizeof(struct dml2_core_calcs_calculate_mcache_setting_params));
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0 || mode_lib->ip.dcn_mrq_present) {
+ if (mode_lib->soc.mcache_size_bytes == 0 || mode_lib->ip.dcn_mrq_present) {
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.mall_prefetch_sdp_overhead_factor[k] = 1.0;
mode_lib->ms.mall_prefetch_dram_overhead_factor[k] = 1.0;
@@ -8515,8 +8805,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->hostvm_enable,
mode_lib->ms.MaxDCFCLK,
mode_lib->ms.MaxFabricClock,
+#ifdef DML_MODE_SUPPORT_USE_DPM_DRAM_BW
+ mode_lib->ms.dram_bw_mbps);
+#else
mode_lib->ms.max_dram_bw_mbps);
-
+#endif
// Average BW support check
calculate_avg_bandwidth_required(
@@ -8524,8 +8817,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
// input
display_cfg,
mode_lib->ms.num_active_planes,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
mode_lib->ms.cursor_bw,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
@@ -8595,6 +8888,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
@@ -8638,9 +8932,32 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&mode_lib->ms.ExtraLatency_sr,
&mode_lib->ms.ExtraLatencyPrefetch);
- {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ s->impacted_dst_y_pre[k] = 0;
+
+ s->recalc_prefetch_schedule = 0;
+ s->recalc_prefetch_done = 0;
+ do {
mode_lib->ms.support.PrefetchSupported = true;
+
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
mode_lib->ms.TWait[k] = CalculateTWait(
@@ -8730,6 +9047,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
// output
CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
@@ -8758,6 +9078,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
@@ -8766,6 +9090,27 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
} // for k num_planes
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.BytePerPixelY,
+ mode_lib->ms.BytePerPixelC,
+ mode_lib->ms.SwathWidthY,
+ mode_lib->ms.SwathWidthC,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.PSCL_FACTOR,
+ mode_lib->ms.PSCL_FACTOR_CHROMA,
+ mode_lib->ms.RequiredDPPCLK,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
+ mode_lib->soc.return_bus_width_bytes,
+ mode_lib->ms.RequiredDISPCLK,
+ s->tdlut_bytes_to_deliver,
+ s->prefetch_swath_time_us,
+
+ /* Output */
+ &mode_lib->ms.dcfclk_deepsleep);
+
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
if (mode_lib->ms.dst_y_prefetch[k] < 2.0
|| mode_lib->ms.LinesForVM[k] >= 32.0
@@ -8789,7 +9134,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
mode_lib->ms.support.VRatioInPrefetchSupported = false;
@@ -8799,10 +9144,14 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
+
+ // By default, do not recalc prefetch schedule
+ s->recalc_prefetch_schedule = 0;
+
// Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- double line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
// Calculate Urgent burst factor for prefetch
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
@@ -8815,7 +9164,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.swath_width_chroma_ub[k],
mode_lib->ms.SwathHeightY[k],
mode_lib->ms.SwathHeightC[k],
- line_time_us,
+ s->line_times[k],
mode_lib->ms.UrgLatency,
mode_lib->ms.VRatioPreY[k],
mode_lib->ms.VRatioPreC[k],
@@ -8852,8 +9201,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.SurfaceReadBandwidthChroma;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
@@ -8899,127 +9248,164 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
+ if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
+
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
+ ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
+ s->recalc_prefetch_done = 1;
+ s->recalc_prefetch_schedule = 1;
+ }
+#endif
+ } // prefetch schedule ok, do urg bw and flip schedule
+ } while (s->recalc_prefetch_schedule);
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true && mode_lib->ms.support.VRatioInPrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
+ // Flip Schedule
+ // Both prefetch schedule and BW okay
+ if (mode_lib->ms.support.PrefetchSupported == true) {
+ mode_lib->ms.BandwidthAvailableForImmediateFlip =
+ get_bandwidth_available_for_immediate_flip(
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
+ mode_lib->ms.support.urg_bandwidth_available);
- }
+ mode_lib->ms.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip) {
+ s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
+ s->HostVMInefficiencyFactor,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.meta_row_bytes[k]);
+ } else {
+ s->per_pipe_flip_bytes[k] = 0;
+ }
+ mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- mode_lib->ip.max_flip_time_lines,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
+ }
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 1;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.SurfaceReadBandwidthChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ CalculateFlipSchedule(
+ &mode_lib->scratch,
+ display_cfg->plane_descriptors[k].immediate_flip,
+ 1, // use_lb_flip_bw
+ s->HostVMInefficiencyFactor,
+ s->Tvm_trips_flip[k],
+ s->Tr0_trips_flip[k],
+ s->Tvm_trips_flip_rounded[k],
+ s->Tr0_trips_flip_rounded[k],
+ display_cfg->gpuvm_enable,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.BandwidthAvailableForImmediateFlip,
+ mode_lib->ms.TotImmediateFlipBytes,
+ display_cfg->plane_descriptors[k].pixel_format,
+ (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
+ mode_lib->ms.Tno_bw_flip[k],
+ mode_lib->ms.dpte_row_height[k],
+ mode_lib->ms.dpte_row_height_chroma[k],
+ mode_lib->ms.use_one_row_for_frame_flip[k],
+ mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
+ s->per_pipe_flip_bytes[k],
+ mode_lib->ms.meta_row_bytes[k],
+ s->meta_row_height_luma[k],
+ s->meta_row_height_chroma[k],
+ mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
+
+ /* Output */
+ &mode_lib->ms.dst_y_per_vm_flip[k],
+ &mode_lib->ms.dst_y_per_row_flip[k],
+ &mode_lib->ms.final_flip_bw[k],
+ &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
+ }
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 1;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
- } else { // if prefetch not support, assume iflip is not supported too
+ calculate_immediate_flip_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth_flip
+ &mode_lib->ms.support.ImmediateFlipSupport,
+
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_flip,
+ mode_lib->ms.support.non_urg_bandwidth_required_flip,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
mode_lib->ms.support.ImmediateFlipSupport = false;
- }
- } // prefetch schedule
+ }
+
+ } else { // if prefetch not support, assume iflip is not supported too
+ mode_lib->ms.support.ImmediateFlipSupport = false;
}
s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
@@ -9116,8 +9502,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->pstate_bytes_required_c,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
mode_lib->ms.surface_avg_vactive_required_bw,
mode_lib->ms.surface_peak_required_bw,
/* outputs */
@@ -9187,12 +9573,12 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
mode_lib->ms.support.DPPPerSurface[k] = mode_lib->ms.NoOfDPP[k];
}
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.ODMMode[k] = mode_lib->ms.ODMMode[k];
mode_lib->ms.support.DSCEnabled[k] = mode_lib->ms.RequiresDSC[k];
mode_lib->ms.support.FECEnabled[k] = mode_lib->ms.RequiresFEC[k];
@@ -9229,7 +9615,7 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
@@ -9882,7 +10268,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
if (!l->stream_visited[p->display_cfg->plane_descriptors[k].stream_index]) {
- if (p->display_cfg->stream_descriptors[k].writeback.enable)
+ if (p->display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0)
l->TotalActiveWriteback = l->TotalActiveWriteback + 1;
if (TotalNumberOfActiveOTG == 0) { // first otg
@@ -9984,6 +10370,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
struct dml2_core_calcs_CalculateStutterEfficiency_params *CalculateStutterEfficiency_params = &mode_lib->scratch.CalculateStutterEfficiency_params;
struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_shared_CalculateMetaAndPTETimes_params *CalculateMetaAndPTETimes_params = &mode_lib->scratch.CalculateMetaAndPTETimes_params;
@@ -10075,12 +10462,6 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_assert(s->SOCCLK > 0);
#ifdef __DML_VBA_DEBUG__
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, s->num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, s->num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, s->num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, s->num_active_planes);
- // dml2_printf_dml_display_cfg_hw_resource(&display_cfg->hw, s->num_active_planes);
-
dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
@@ -10198,10 +10579,10 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width * display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
- mode_lib->mp.SurfaceReadBandwidthLuma[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->mp.SurfaceReadBandwidthChroma[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: ReadBandwidthSurfaceLuma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthSurfaceChroma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
+ mode_lib->mp.vactive_sw_bw_l[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ mode_lib->mp.vactive_sw_bw_c[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ dml2_printf("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
}
CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
@@ -10217,8 +10598,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateSwathAndDETConfiguration_params->nomDETInKByte = s->NomDETInKByte;
CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->mp.vactive_sw_bw_l;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->mp.vactive_sw_bw_c;
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = s->dummy_single_array[0];
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = s->dummy_single_array[1];
CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->mp.Read256BlockHeightY;
@@ -10539,8 +10920,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
}
@@ -10583,17 +10964,17 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.TCalc = 24.0 / mode_lib->mp.DCFCLKDeepSleep;
for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->mp.WritebackDelay[k] =
mode_lib->soc.qos_parameters.writeback.base_latency_us
+ CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->mp.Dispclk;
} else
mode_lib->mp.WritebackDelay[k] = 0;
@@ -10679,10 +11060,25 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
bool cursor_not_enough_urgent_latency_hiding = 0;
- double line_time_us = 0.0;
-
- line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->mp.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->mp.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
calculate_cursor_req_attributes(
display_cfg->plane_descriptors[k].cursor.cursor_width,
@@ -10699,7 +11095,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
display_cfg->plane_descriptors[k].cursor.cursor_width,
s->cursor_bytes_per_chunk[k],
s->cursor_lines_per_chunk[k],
- line_time_us,
+ s->line_times[k],
mode_lib->mp.UrgentLatency,
// output
@@ -10714,7 +11110,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.swath_width_chroma_ub[k],
mode_lib->mp.SwathHeightY[k],
mode_lib->mp.SwathHeightC[k],
- line_time_us,
+ s->line_times[k],
mode_lib->mp.UrgentLatency,
display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
@@ -10752,6 +11148,35 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
#endif
+ if (s->num_active_planes > 1) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = s->num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->mp.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->mp.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->mp.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->mp.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->mp.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = 0; // don't care
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = 0; // don't care
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = mode_lib->mp.Dcfclk;
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->mp.dst_y_prefetch;
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->dummy_boolean[0];
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params); // dont care about the check output for mode programming
+ }
+
{
s->DestinationLineTimesForPrefetchLessThan2 = false;
s->VRatioPrefetchMoreThanMax = false;
@@ -10763,11 +11188,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
mode_lib->mp.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.TripToMemory,
- !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
- get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
+ display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
+ mode_lib->mp.UrgentLatency,
+ mode_lib->mp.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
myPipe->Dppclk = mode_lib->mp.Dppclk[k];
myPipe->Dispclk = mode_lib->mp.Dispclk;
@@ -10848,6 +11273,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->mp.meta_row_bytes[k];
CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->mp.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->mp.vactive_sw_bw_c[k];
// output
CalculatePrefetchSchedule_params->DSTXAfterScaler = &mode_lib->mp.DSTXAfterScaler[k];
@@ -10876,9 +11304,18 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VUpdateWidthPix = &mode_lib->mp.VUpdateWidthPix[k];
CalculatePrefetchSchedule_params->VReadyOffsetPix = &mode_lib->mp.VReadyOffsetPix[k];
CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->mp.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->dummy_single[0];
mode_lib->mp.NoTimeToPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
+ if (s->impacted_dst_y_pre[k] > 0)
+ mode_lib->mp.impacted_prefetch_margin_us[k] = (mode_lib->mp.dst_y_prefetch[k] - s->impacted_dst_y_pre[k]) * s->line_times[k];
+ else
+ mode_lib->mp.impacted_prefetch_margin_us[k] = 0;
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
#endif
@@ -10956,8 +11393,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceLuma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceChroma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
@@ -10988,8 +11425,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor;
calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->mp.mall_prefetch_dram_overhead_factor;
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.SurfaceReadBandwidthChroma;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
@@ -11120,8 +11557,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor;
calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->mp.mall_prefetch_dram_overhead_factor;
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.SurfaceReadBandwidthChroma;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
@@ -11238,8 +11675,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->mmSOCParameters.USRRetrainingLatency = 0;
s->mmSOCParameters.SMNLatency = 0;
s->mmSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
- s->mmSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
- s->mmSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mmSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->mp.uclk_freq_mhz, mode_lib->mp.FabricClock, in_out_params->min_clk_index);
+ s->mmSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->mp.FabricClock;
s->mmSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
CalculateWatermarks_params->display_cfg = display_cfg;
@@ -11289,7 +11726,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) - mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
@@ -11475,25 +11912,25 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
//Maximum Bandwidth Used
s->TotalWRBandwidth = 0;
- s->WRBandwidth = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_32) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8;
+ for (k = 0; k < display_cfg->num_streams; ++k) {
+ s->WRBandwidth = 0;
+ if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) {
+ s->WRBandwidth = display_cfg->stream_descriptors[k].writeback.writeback_stream[0].output_height
+ * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].output_width /
+ (display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height
+ / ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000))
+ * (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0);
+ s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
}
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
}
mode_lib->mp.TotalDataReadBandwidth = 0;
for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.SurfaceReadBandwidthLuma[k] + mode_lib->mp.SurfaceReadBandwidthChroma[k];
+ mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.vactive_sw_bw_l[k] + mode_lib->mp.vactive_sw_bw_c[k];
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
#endif
}
@@ -11530,8 +11967,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateStutterEfficiency_params->BlockWidth256BytesC = mode_lib->mp.Read256BlockWidthC;
CalculateStutterEfficiency_params->DCCYMaxUncompressedBlock = mode_lib->mp.DCCYMaxUncompressedBlock;
CalculateStutterEfficiency_params->DCCCMaxUncompressedBlock = mode_lib->mp.DCCCMaxUncompressedBlock;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
+ CalculateStutterEfficiency_params->ReadBandwidthSurfaceLuma = mode_lib->mp.vactive_sw_bw_l;
+ CalculateStutterEfficiency_params->ReadBandwidthSurfaceChroma = mode_lib->mp.vactive_sw_bw_c;
CalculateStutterEfficiency_params->dpte_row_bw = mode_lib->mp.dpte_row_bw;
CalculateStutterEfficiency_params->meta_row_bw = mode_lib->mp.meta_row_bw;
CalculateStutterEfficiency_params->rob_alloc_compressed = mode_lib->ip.dcn_mrq_present;
@@ -11742,7 +12179,7 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
+ wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
wm_regs->usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
@@ -12321,14 +12758,18 @@ void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_interna
void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib,
const struct display_configuation_with_meta *display_cfg,
- struct dmub_fams2_stream_static_state *fams2_programming,
- enum dml2_uclk_pstate_support_method pstate_method,
+ union dmub_cmd_fams2_config *fams2_base_programming,
+ union dmub_cmd_fams2_config *fams2_sub_programming,
+ enum dml2_pstate_method pstate_method,
int plane_index)
{
const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index];
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index];
const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index];
+ struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base;
+ union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state;
+
unsigned int i;
if (display_cfg->display_config.overrides.all_streams_blanked) {
@@ -12337,110 +12778,110 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
}
/* from display configuration */
- fams2_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
- fams2_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
- fams2_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
+ base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
+ base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch);
- fams2_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_descriptor->timing.v_active);
- fams2_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
+ base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
/* from meta */
- fams2_programming->otg_vline_time_ns =
+ base_programming->otg_vline_time_ns =
(unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0);
- fams2_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
- fams2_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
- fams2_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
- fams2_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
+ base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
+ base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
+ base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_fams2_meta->method_drr.programming_delay_otg_vlines);
- fams2_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
- fams2_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
+ base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
+ base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
/* from core */
- fams2_programming->config.bits.min_ttu_vblank_usable = true;
+ base_programming->config.bits.min_ttu_vblank_usable = true;
for (i = 0; i < display_cfg->display_config.num_planes; i++) {
/* check if all planes support p-state in blank */
if (display_cfg->display_config.plane_descriptors[i].stream_index == plane_descriptor->stream_index &&
mode_lib->mp.MinTTUVBlank[i] <= mode_lib->mp.Watermark.DRAMClockChangeWatermark) {
- fams2_programming->config.bits.min_ttu_vblank_usable = false;
+ base_programming->config.bits.min_ttu_vblank_usable = false;
break;
}
}
switch (pstate_method) {
- case dml2_uclk_pstate_support_method_vactive:
- case dml2_uclk_pstate_support_method_fw_vactive_drr:
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
/* legacy vactive */
- fams2_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
- fams2_programming->sub_state.legacy.vactive_det_fill_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
- fams2_programming->config.bits.clamp_vtotal_min = true;
+ base_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
+ sub_programming->legacy.vactive_det_fill_delay_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
+ base_programming->config.bits.clamp_vtotal_min = true;
break;
- case dml2_uclk_pstate_support_method_vblank:
- case dml2_uclk_pstate_support_method_fw_vblank_drr:
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
/* legacy vblank */
- fams2_programming->type = FAMS2_STREAM_TYPE_VBLANK;
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
- fams2_programming->config.bits.clamp_vtotal_min = true;
+ base_programming->type = FAMS2_STREAM_TYPE_VBLANK;
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
+ base_programming->config.bits.clamp_vtotal_min = true;
break;
- case dml2_uclk_pstate_support_method_fw_drr:
+ case dml2_pstate_method_fw_drr:
/* drr */
- fams2_programming->type = FAMS2_STREAM_TYPE_DRR;
- fams2_programming->sub_state.drr.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
- fams2_programming->sub_state.drr.nom_stretched_vtotal =
- (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
+ base_programming->type = FAMS2_STREAM_TYPE_DRR;
+ sub_programming->drr.programming_delay_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
+ sub_programming->drr.nom_stretched_vtotal =
+ (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
/* drr only clamps to vtotal min for single display */
- fams2_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
- fams2_programming->sub_state.drr.only_stretch_if_required = true;
+ base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
+ sub_programming->drr.only_stretch_if_required = true;
break;
- case dml2_uclk_pstate_support_method_fw_subvp_phantom:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
/* subvp */
- fams2_programming->type = FAMS2_STREAM_TYPE_SUBVP;
- fams2_programming->sub_state.subvp.vratio_numerator =
- (uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
- fams2_programming->sub_state.subvp.vratio_denominator = 1000;
- fams2_programming->sub_state.subvp.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
- fams2_programming->sub_state.subvp.prefetch_to_mall_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
- fams2_programming->sub_state.subvp.phantom_vtotal =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
- fams2_programming->sub_state.subvp.phantom_vactive =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
- fams2_programming->sub_state.subvp.config.bits.is_multi_planar =
- plane_descriptor->surface.plane1.height > 0;
- fams2_programming->sub_state.subvp.config.bits.is_yuv420 =
- plane_descriptor->pixel_format == dml2_420_8 ||
- plane_descriptor->pixel_format == dml2_420_10 ||
- plane_descriptor->pixel_format == dml2_420_12;
-
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
- fams2_programming->config.bits.clamp_vtotal_min = true;
+ base_programming->type = FAMS2_STREAM_TYPE_SUBVP;
+ sub_programming->subvp.vratio_numerator =
+ (uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
+ sub_programming->subvp.vratio_denominator = 1000;
+ sub_programming->subvp.programming_delay_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
+ sub_programming->subvp.prefetch_to_mall_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
+ sub_programming->subvp.phantom_vtotal =
+ (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
+ sub_programming->subvp.phantom_vactive =
+ (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
+ sub_programming->subvp.config.bits.is_multi_planar =
+ plane_descriptor->surface.plane1.height > 0;
+ sub_programming->subvp.config.bits.is_yuv420 =
+ plane_descriptor->pixel_format == dml2_420_8 ||
+ plane_descriptor->pixel_format == dml2_420_10 ||
+ plane_descriptor->pixel_format == dml2_420_12;
+
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
+ base_programming->config.bits.clamp_vtotal_min = true;
break;
- case dml2_uclk_pstate_support_method_reserved_hw:
- case dml2_uclk_pstate_support_method_reserved_fw:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_fixed:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_var:
- case dml2_uclk_pstate_support_method_not_supported:
- case dml2_uclk_pstate_support_method_count:
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_na:
+ case dml2_pstate_method_count:
default:
/* this should never happen */
break;
@@ -12569,6 +13010,8 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState;
out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize;
out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits;
+ out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.temp_read_or_ppt_support;
+ out->informative.mode_support_info.g6_temp_read_support = mode_lib->ms.support.g6_temp_read_support;
out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots;
out->informative.mode_support_info.NotEnoughDSCUnits = mode_lib->ms.support.NotEnoughDSCUnits;
@@ -12662,7 +13105,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.watermarks.pstate_change_us = dml_get_wm_dram_clock_change(mode_lib);
out->informative.watermarks.fclk_pstate_change_us = dml_get_wm_fclk_change(mode_lib);
out->informative.watermarks.usr_retraining_us = dml_get_wm_usr_retraining(mode_lib);
- out->informative.watermarks.g6_temp_read_watermark_us = dml_get_wm_g6_temp_read(mode_lib);
+ out->informative.watermarks.temp_read_or_ppt_watermark_us = dml_get_wm_temp_read_or_ppt(mode_lib);
out->informative.mall.total_surface_size_in_mall_bytes = 0;
for (k = 0; k < out->display_config.num_planes; ++k)
@@ -12745,6 +13188,8 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.qos.max_active_fclk_change_latency_supported = dml_get_fclk_change_latency(mode_lib);
+ out->informative.misc.LowestPrefetchMargin = 10 * 1000 * 1000;
+
for (k = 0; k < out->display_config.num_planes; k++) {
if ((out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us)
@@ -12824,6 +13269,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
+ out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0;
out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
@@ -12831,6 +13277,9 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.PTE_BUFFER_MODE[k] = mode_lib->mp.PTE_BUFFER_MODE[k];
out->informative.misc.DSCDelay[k] = mode_lib->mp.DSCDelay[k];
out->informative.misc.MaxActiveDRAMClockChangeLatencySupported[k] = mode_lib->mp.MaxActiveDRAMClockChangeLatencySupported[k];
+
+ if (mode_lib->mp.impacted_prefetch_margin_us[k] < out->informative.misc.LowestPrefetchMargin)
+ out->informative.misc.LowestPrefetchMargin = mode_lib->mp.impacted_prefetch_margin_us[k];
}
// For this DV informative layer, all pipes in the same planes will just use the same id
@@ -12853,16 +13302,11 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.non_optimized_mcache_allocation[k].global_mcache_ids_plane1[n] = k;
}
}
-
- out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
- / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
+ out->informative.qos.max_non_urgent_latency_us = dml_get_max_non_urgent_latency_us(mode_lib);
if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
- / mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
+ / mode_lib->ms.support.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
out->informative.misc.ROBUrgencyAvoidance = true;
} else {
out->informative.misc.ROBUrgencyAvoidance = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index df2d1550a14b..27ef0e096b25 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
@@ -28,7 +28,7 @@ void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *displ
void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_cfg_programming *out);
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index);
void dml2_core_calcs_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index);
-void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_fams2_stream_static_state *fams2_programming, enum dml2_uclk_pstate_support_method pstate_method, int plane_index);
+void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, union dmub_cmd_fams2_config *fams2_base_programming, union dmub_cmd_fams2_config *fams2_sub_programming, enum dml2_pstate_method pstate_method, int plane_index);
void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_cmd_fams2_global_config *fams2_global_config);
void dml2_core_calcs_get_dpte_row_height(unsigned int *dpte_row_height, struct dml2_core_internal_display_mode_lib *mode_lib, bool is_plane1, enum dml2_source_format_class SourcePixelFormat, enum dml2_swizzle_mode SurfaceTiling, enum dml2_rotation_angle ScanDirection, unsigned int pitch, unsigned int GPUVMMinPageSizeKBytes);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index cbdfbd5a0bde..23c0fca5515f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -201,7 +201,7 @@ struct dml2_core_internal_watermarks {
double Z8StutterExitWatermark;
double Z8StutterEnterPlusExitWatermark;
double USRRetrainingWatermark;
- double g6_temp_read_watermark_us;
+ double temp_read_or_ppt_watermark_us;
};
struct dml2_core_internal_mode_support_info {
@@ -252,8 +252,8 @@ struct dml2_core_internal_mode_support_info {
bool PTEBufferSizeNotExceeded;
bool DCCMetaBufferSizeNotExceeded;
- enum dml2_dram_clock_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
- enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
bool USRRetrainingSupport;
@@ -318,12 +318,15 @@ struct dml2_core_internal_mode_support_info {
bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
double max_urgent_latency_us;
+ double max_non_urgent_latency_us;
double avg_non_urgent_latency_us;
double avg_urgent_latency_us;
+ double df_response_time_us;
bool incorrect_imall_usage;
bool g6_temp_read_support;
+ bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
};
@@ -378,8 +381,8 @@ struct dml2_core_internal_mode_support {
unsigned int DETBufferSizeC[DML2_MAX_PLANES];
unsigned int SwathHeightY[DML2_MAX_PLANES];
unsigned int SwathHeightC[DML2_MAX_PLANES];
- unsigned int SwathWidthY[DML2_MAX_PLANES];
- unsigned int SwathWidthC[DML2_MAX_PLANES];
+ unsigned int SwathWidthY[DML2_MAX_PLANES]; // per-pipe
+ unsigned int SwathWidthC[DML2_MAX_PLANES]; // per-pipe
// ----------------------------------
// Intermediates/Informational
@@ -476,9 +479,9 @@ struct dml2_core_internal_mode_support {
// Bandwidth Related Info
double BandwidthAvailableForImmediateFlip;
- double SurfaceReadBandwidthLuma[DML2_MAX_PLANES]; // no dcc overhead, for the plane
- double SurfaceReadBandwidthChroma[DML2_MAX_PLANES];
- double WriteBandwidth[DML2_MAX_PLANES];
+ double vactive_sw_bw_l[DML2_MAX_PLANES]; // no dcc overhead, for the plane
+ double vactive_sw_bw_c[DML2_MAX_PLANES];
+ double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
@@ -539,7 +542,7 @@ struct dml2_core_internal_mode_program {
unsigned int qos_param_index; // to access the uclk dependent dpm table
unsigned int active_min_uclk_dpm_index; // to access the min_clk table
double FabricClock; /// <brief Basically just the clock freq at the min (or given) state
- double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
+ //double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double dram_bw_mbps;
double uclk_freq_mhz;
unsigned int NoOfDPP[DML2_MAX_PLANES];
@@ -562,14 +565,14 @@ struct dml2_core_internal_mode_program {
double BytePerPixelInDETC[DML2_MAX_PLANES];
unsigned int BytePerPixelY[DML2_MAX_PLANES];
unsigned int BytePerPixelC[DML2_MAX_PLANES];
- unsigned int SwathWidthY[DML2_MAX_PLANES];
- unsigned int SwathWidthC[DML2_MAX_PLANES];
+ unsigned int SwathWidthY[DML2_MAX_PLANES]; // per-pipe
+ unsigned int SwathWidthC[DML2_MAX_PLANES]; // per-pipe
unsigned int req_per_swath_ub_l[DML2_MAX_PLANES];
unsigned int req_per_swath_ub_c[DML2_MAX_PLANES];
unsigned int SwathWidthSingleDPPY[DML2_MAX_PLANES];
unsigned int SwathWidthSingleDPPC[DML2_MAX_PLANES];
- double SurfaceReadBandwidthLuma[DML2_MAX_PLANES];
- double SurfaceReadBandwidthChroma[DML2_MAX_PLANES];
+ double vactive_sw_bw_l[DML2_MAX_PLANES];
+ double vactive_sw_bw_c[DML2_MAX_PLANES];
double excess_vactive_fill_bw_l[DML2_MAX_PLANES];
double excess_vactive_fill_bw_c[DML2_MAX_PLANES];
@@ -797,8 +800,9 @@ struct dml2_core_internal_mode_program {
double MaxActiveFCLKChangeLatencySupported;
bool USRRetrainingSupport;
bool g6_temp_read_support;
- enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
- enum dml2_dram_clock_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ bool temp_read_or_ppt_support;
+ enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
@@ -846,6 +850,8 @@ struct dml2_core_internal_mode_program {
bool mall_comb_mcache_l[DML2_MAX_PLANES];
bool mall_comb_mcache_c[DML2_MAX_PLANES];
bool lc_comb_mcache[DML2_MAX_PLANES];
+
+ double impacted_prefetch_margin_us[DML2_MAX_PLANES];
};
struct dml2_core_internal_SOCParametersList {
@@ -862,6 +868,7 @@ struct dml2_core_internal_SOCParametersList {
double USRRetrainingLatency;
double SMNLatency;
double g6_temp_read_blackout_us;
+ double temp_read_or_ppt_blackout_us;
double max_urgent_latency_us;
double df_response_time_us;
enum dml2_qos_param_type qos_type;
@@ -951,6 +958,7 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
double tdlut_opt_time[DML2_MAX_PLANES];
double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_to_deliver[DML2_MAX_PLANES];
unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
@@ -961,6 +969,18 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+
+ double prefetch_sw_bytes[DML2_MAX_PLANES];
+ double Tpre_rounded[DML2_MAX_PLANES];
+ double Tpre_oto[DML2_MAX_PLANES];
+ bool recalc_prefetch_schedule;
+ bool recalc_prefetch_done;
+ double impacted_dst_y_pre[DML2_MAX_PLANES];
+ double line_times[DML2_MAX_PLANES];
+ enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_l[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_c[DML2_MAX_PLANES];
+ double prefetch_swath_time_us[DML2_MAX_PLANES];
};
struct dml2_core_calcs_mode_programming_locals {
@@ -1024,6 +1044,7 @@ struct dml2_core_calcs_mode_programming_locals {
unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
double tdlut_opt_time[DML2_MAX_PLANES];
double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_to_deliver[DML2_MAX_PLANES];
unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
@@ -1041,6 +1062,16 @@ struct dml2_core_calcs_mode_programming_locals {
unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+
+ double prefetch_sw_bytes[DML2_MAX_PLANES];
+ double Tpre_rounded[DML2_MAX_PLANES];
+ double Tpre_oto[DML2_MAX_PLANES];
+ bool recalc_prefetch_schedule;
+ double impacted_dst_y_pre[DML2_MAX_PLANES];
+ double line_times[DML2_MAX_PLANES];
+ enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_l[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_c[DML2_MAX_PLANES];
};
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals {
@@ -1048,6 +1079,7 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_local
double ActiveFCLKChangeLatencyMargin[DML2_MAX_PLANES];
double USRRetrainingLatencyMargin[DML2_MAX_PLANES];
double g6_temp_read_latency_margin[DML2_MAX_PLANES];
+ double temp_read_or_ppt_latency_margin[DML2_MAX_PLANES];
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
@@ -1185,17 +1217,14 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double LineTime;
double dst_y_prefetch_equ;
double prefetch_bw_oto;
+ double per_pipe_vactive_sw_bw;
double Tvm_oto;
double Tr0_oto;
- double Tvm_no_trip_oto;
- double Tr0_no_trip_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingVM;
double TimeForFetchingRowInVBlank;
- double dst_y_per_vm_no_trip_vblank;
- double dst_y_per_row_no_trip_vblank;
double LinesToRequestPrefetchPixelData;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
@@ -1203,15 +1232,12 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double Tr0_trips_rounded;
double max_Tsw;
double Lsw_oto;
- double Lsw_equ;
- double Tpre_rounded;
double prefetch_bw_equ;
double Tvm_equ;
double Tr0_equ;
double Tdmbf;
double Tdmec;
double Tdmsks;
- double prefetch_sw_bytes;
double total_row_bytes;
double prefetch_bw_pr;
double bytes_pp;
@@ -1225,6 +1251,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double prefetch_bw2;
double prefetch_bw3;
double prefetch_bw4;
+ double dst_y_prefetch_equ_impacted;
double TWait_p;
unsigned int cursor_prefetch_bytes;
@@ -1545,17 +1572,18 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
// Output
struct dml2_core_internal_watermarks *Watermark;
- enum dml2_dram_clock_change_support *DRAMClockChangeSupport;
+ enum dml2_pstate_change_support *DRAMClockChangeSupport;
bool *global_dram_clock_change_supported;
double *MaxActiveDRAMClockChangeLatencySupported;
unsigned int *SubViewportLinesNeededInMALL;
- enum dml2_fclock_change_support *FCLKChangeSupport;
+ enum dml2_pstate_change_support *FCLKChangeSupport;
bool *global_fclk_change_supported;
double *MaxActiveFCLKChangeLatencySupported;
bool *USRRetrainingSupport;
double *VActiveLatencyHidingMargin;
double *VActiveLatencyHidingUs;
bool *g6_temp_read_support;
+ bool *temp_read_or_ppt_support;
};
@@ -1727,8 +1755,8 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double PrefetchSourceLinesC;
unsigned int VInitPreFillC;
unsigned int MaxNumSwathC;
- unsigned int swath_width_luma_ub;
- unsigned int swath_width_chroma_ub;
+ unsigned int swath_width_luma_ub; // per-pipe
+ unsigned int swath_width_chroma_ub; // per-pipe
unsigned int SwathHeightY;
unsigned int SwathHeightC;
double TWait;
@@ -1750,6 +1778,10 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
unsigned int meta_row_bytes;
double mall_prefetch_sdp_overhead_factor;
+ double impacted_dst_y_pre;
+ double vactive_sw_bw_l; // per surface bw
+ double vactive_sw_bw_c; // per surface bw
+
// output
unsigned int *DSTXAfterScaler;
unsigned int *DSTYAfterScaler;
@@ -1767,6 +1799,8 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *Tdmdl_vm;
double *Tdmdl;
double *TSetup;
+ double *Tpre_rounded;
+ double *Tpre_oto;
double *Tvm_trips;
double *Tr0_trips;
double *Tvm_trips_flip;
@@ -1777,6 +1811,48 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
unsigned int *VUpdateWidthPix;
unsigned int *VReadyOffsetPix;
double *prefetch_cursor_bw;
+ double *prefetch_sw_bytes;
+ double *prefetch_swath_time_us;
+};
+
+struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params {
+ unsigned int num_active_planes;
+ enum dml2_source_format_class *pixel_format;
+ unsigned int rob_buffer_size_kbytes;
+ unsigned int compressed_buffer_size_kbytes;
+ unsigned int chunk_bytes_l; // same for all planes
+ unsigned int chunk_bytes_c;
+ unsigned int *detile_buffer_size_bytes_l;
+ unsigned int *detile_buffer_size_bytes_c;
+ unsigned int *full_swath_bytes_l;
+ unsigned int *full_swath_bytes_c;
+ unsigned int *lb_source_lines_l;
+ unsigned int *lb_source_lines_c;
+ unsigned int *swath_height_l;
+ unsigned int *swath_height_c;
+ double *prefetch_sw_bytes;
+ double *Tpre_rounded;
+ double *Tpre_oto;
+ double estimated_dcfclk_mhz;
+ double estimated_urg_bandwidth_required_mbps;
+ double *line_time;
+ double *dst_y_prefetch;
+
+ // output
+ bool *recalc_prefetch_schedule;
+ double *impacted_dst_y_pre;
+};
+
+struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals {
+ unsigned int max_Trpd_dcfclk_cycles;
+ unsigned int burst_bytes_to_fill_det;
+ double time_to_fill_det_us;
+ unsigned int accumulated_return_path_dcfclk_cycles[DML2_MAX_PLANES];
+ bool prefetch_global_check_passed;
+ unsigned int src_swath_bytes_l[DML2_MAX_PLANES];
+ unsigned int src_swath_bytes_c[DML2_MAX_PLANES];
+ unsigned int src_detile_buf_size_bytes_l[DML2_MAX_PLANES];
+ unsigned int src_detile_buf_size_bytes_c[DML2_MAX_PLANES];
};
struct dml2_core_calcs_calculate_mcache_row_bytes_params {
@@ -1921,6 +1997,7 @@ struct dml2_core_calcs_calculate_tdlut_setting_params {
unsigned int *tdlut_groups_per_2row_ub;
double *tdlut_opt_time;
double *tdlut_drain_time;
+ unsigned int *tdlut_bytes_to_deliver;
unsigned int *tdlut_bytes_per_group;
};
@@ -2004,6 +2081,7 @@ struct dml2_core_internal_scratch {
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals;
struct dml2_core_calcs_CalculateVMRowAndSwath_locals CalculateVMRowAndSwath_locals;
struct dml2_core_calcs_CalculatePrefetchSchedule_locals CalculatePrefetchSchedule_locals;
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals CheckGlobalPrefetchAdmissibility_locals;
struct dml2_core_shared_CalculateSwathAndDETConfiguration_locals CalculateSwathAndDETConfiguration_locals;
struct dml2_core_shared_TruncToValidBPP_locals TruncToValidBPP_locals;
struct dml2_core_shared_CalculateDETBufferSize_locals CalculateDETBufferSize_locals;
@@ -2019,6 +2097,7 @@ struct dml2_core_internal_scratch {
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params CalculateSwathAndDETConfiguration_params;
struct dml2_core_calcs_CalculateStutterEfficiency_params CalculateStutterEfficiency_params;
struct dml2_core_calcs_CalculatePrefetchSchedule_params CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params CheckGlobalPrefetchAdmissibility_params;
struct dml2_core_calcs_calculate_mcache_setting_params calculate_mcache_setting_params;
struct dml2_core_calcs_calculate_tdlut_setting_params calculate_tdlut_setting_params;
struct dml2_core_shared_calculate_vm_and_row_bytes_params calculate_vm_and_row_bytes_params;
@@ -2038,7 +2117,6 @@ struct dml2_core_internal_display_mode_lib {
// Used to hold input; intermediate and output of the calculations
struct dml2_core_internal_mode_support ms; // struct for mode support
struct dml2_core_internal_mode_program mp; // struct for mode programming
-
// Available overridable calculators for core_shared.
// if null, core_shared will use default calculators.
struct dml2_core_shared_calculation_funcs funcs;
@@ -2051,7 +2129,6 @@ struct dml2_core_calcs_mode_support_ex {
const struct dml2_display_cfg *in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
-
//unsigned int in_state_index;
struct dml2_core_internal_mode_support_info *out_evaluation_info;
};
@@ -2064,9 +2141,7 @@ struct dml2_core_calcs_mode_programming_ex {
const struct dml2_mcg_min_clock_table *min_clk_table;
const struct core_display_cfg_support_info *cfg_support_info;
int min_clk_index;
-
struct dml2_display_cfg_programming *programming;
-
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index 714b5c39b7e6..456b3f8a6d38 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -63,6 +63,150 @@ bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
case dml2_mono_16:
val = 0;
break;
+ case dml2_422_planar_8:
+ val = 0;
+ break;
+ case dml2_422_planar_10:
+ val = 0;
+ break;
+ case dml2_422_planar_12:
+ val = 0;
+ break;
+ case dml2_422_packed_8:
+ val = 0;
+ break;
+ case dml2_422_packed_10:
+ val = 0;
+ break;
+ case dml2_422_packed_12:
+ val = 0;
+ break;
+ default:
+ DML2_ASSERT(0);
+ break;
+ }
+ return val;
+}
+
+bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format)
+{
+ bool val = false;
+
+ switch (source_format) {
+ case dml2_444_8:
+ val = 0;
+ break;
+ case dml2_444_16:
+ val = 0;
+ break;
+ case dml2_444_32:
+ val = 0;
+ break;
+ case dml2_444_64:
+ val = 0;
+ break;
+ case dml2_420_8:
+ val = 0;
+ break;
+ case dml2_420_10:
+ val = 0;
+ break;
+ case dml2_420_12:
+ val = 0;
+ break;
+ case dml2_rgbe_alpha:
+ val = 0;
+ break;
+ case dml2_rgbe:
+ val = 0;
+ break;
+ case dml2_mono_8:
+ val = 0;
+ break;
+ case dml2_mono_16:
+ val = 0;
+ break;
+ case dml2_422_planar_8:
+ val = 1;
+ break;
+ case dml2_422_planar_10:
+ val = 1;
+ break;
+ case dml2_422_planar_12:
+ val = 1;
+ break;
+ case dml2_422_packed_8:
+ val = 0;
+ break;
+ case dml2_422_packed_10:
+ val = 0;
+ break;
+ case dml2_422_packed_12:
+ val = 0;
+ break;
+ default:
+ DML2_ASSERT(0);
+ break;
+ }
+ return val;
+}
+
+bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
+{
+ bool val = false;
+
+ switch (source_format) {
+ case dml2_444_8:
+ val = 0;
+ break;
+ case dml2_444_16:
+ val = 0;
+ break;
+ case dml2_444_32:
+ val = 0;
+ break;
+ case dml2_444_64:
+ val = 0;
+ break;
+ case dml2_420_8:
+ val = 0;
+ break;
+ case dml2_420_10:
+ val = 0;
+ break;
+ case dml2_420_12:
+ val = 0;
+ break;
+ case dml2_rgbe_alpha:
+ val = 0;
+ break;
+ case dml2_rgbe:
+ val = 0;
+ break;
+ case dml2_mono_8:
+ val = 0;
+ break;
+ case dml2_mono_16:
+ val = 0;
+ break;
+ case dml2_422_planar_8:
+ val = 0;
+ break;
+ case dml2_422_planar_10:
+ val = 0;
+ break;
+ case dml2_422_planar_12:
+ val = 0;
+ break;
+ case dml2_422_packed_8:
+ val = 1;
+ break;
+ case dml2_422_packed_10:
+ val = 1;
+ break;
+ case dml2_422_packed_12:
+ val = 1;
+ break;
default:
DML2_ASSERT(0);
break;
@@ -154,9 +298,9 @@ void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mod
dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
@@ -280,39 +424,49 @@ bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_c
return is_phantom;
}
-unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
-{
- switch (sw_mode) {
- case (dml2_sw_linear):
- return 256; break;
- case (dml2_sw_256b_2d):
- return 256; break;
- case (dml2_sw_4kb_2d):
- return 4096; break;
- case (dml2_sw_64kb_2d):
- return 65536; break;
- case (dml2_sw_256kb_2d):
- return 262144; break;
- case (dml2_gfx11_sw_linear):
- return 256; break;
- case (dml2_gfx11_sw_64kb_d):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_t):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_x):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_r_x):
- return 65536; break;
- case (dml2_gfx11_sw_256kb_d_x):
- return 262144; break;
- case (dml2_gfx11_sw_256kb_r_x):
- return 262144; break;
- default:
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+
+ if (sw_mode == dml2_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_sw_256b_2d)
+ return 256;
+ else if (sw_mode == dml2_sw_4kb_2d)
+ return 4096;
+ else if (sw_mode == dml2_sw_64kb_2d)
+ return 65536;
+ else if (sw_mode == dml2_sw_256kb_2d)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_t)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_256kb_d_x)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
+ return 262144;
+ else {
DML2_ASSERT(0);
return 256;
};
}
+bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+ return (byte_per_pixel != 2);
+}
+
+bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
+{
+ return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
+};
+
bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan)
{
@@ -325,7 +479,6 @@ bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan)
return is_vert;
}
-
int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
{
int unsigned version = 0;
@@ -334,17 +487,17 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_sw_256b_2d ||
sw_mode == dml2_sw_4kb_2d ||
sw_mode == dml2_sw_64kb_2d ||
- sw_mode == dml2_sw_256kb_2d) {
+ sw_mode == dml2_sw_256kb_2d)
version = 12;
- } else if (sw_mode == dml2_gfx11_sw_linear ||
+ else if (sw_mode == dml2_gfx11_sw_linear ||
sw_mode == dml2_gfx11_sw_64kb_d ||
sw_mode == dml2_gfx11_sw_64kb_d_t ||
sw_mode == dml2_gfx11_sw_64kb_d_x ||
sw_mode == dml2_gfx11_sw_64kb_r_x ||
sw_mode == dml2_gfx11_sw_256kb_d_x ||
- sw_mode == dml2_gfx11_sw_256kb_r_x) {
+ sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
- } else {
+ else {
dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
DML2_ASSERT(0);
}
@@ -403,7 +556,7 @@ bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
{
bool ret_val = 0;
- if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
+ if (dml2_core_utils_is_420(source_format) || dml2_core_utils_is_422_planar(source_format) || (source_format == dml2_rgbe_alpha))
ret_val = 1;
return ret_val;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
index a5cc6a07167a..95f0d017add4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
@@ -11,6 +11,8 @@
double dml2_core_utils_div_rem(double dividend, unsigned int divisor, unsigned int *remainder);
const char *dml2_core_utils_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type);
bool dml2_core_utils_is_420(enum dml2_source_format_class source_format);
+bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format);
+bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format);
void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only);
const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type);
void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg);
@@ -18,8 +20,10 @@ unsigned int dml2_core_utils_round_to_multiple(unsigned int num, unsigned int mu
unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info);
void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane);
bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
-unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode);
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
+bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan);
+bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode);
int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode);
unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params);
unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 8869ea089312..fc77fb34a19a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -96,6 +96,7 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
double min_uclk_latency;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
+ /* assumes DF throttling is enabled */
min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100);
@@ -125,6 +126,37 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
+
+ /* assumes DF throttling is disabled */
+ min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
+ min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100);
+
+ min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
+ min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100);
+
+ min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg;
+
+ min_fclk_avg = (double)mode_support_result->global.svp_prefetch.average_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
+ min_fclk_avg = (double)min_fclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.fclk_derate_percent / 100);
+
+ min_fclk_urgent = (double)mode_support_result->global.svp_prefetch.urgent_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
+ min_fclk_urgent = (double)min_fclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.fclk_derate_percent / 100);
+
+ min_fclk_bw = min_fclk_urgent > min_fclk_avg ? min_fclk_urgent : min_fclk_avg;
+
+ min_dcfclk_avg = (double)mode_support_result->global.svp_prefetch.average_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
+ min_dcfclk_avg = (double)min_dcfclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dcfclk_derate_percent / 100);
+
+ min_dcfclk_urgent = (double)mode_support_result->global.svp_prefetch.urgent_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
+ min_dcfclk_urgent = (double)min_dcfclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100);
+
+ min_dcfclk_bw = min_dcfclk_urgent > min_dcfclk_avg ? min_dcfclk_urgent : min_dcfclk_avg;
+
+ get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
+
+ in_out->programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
}
static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
@@ -272,6 +304,17 @@ static bool map_soc_min_clocks_to_dpm_fine_grained(struct dml2_display_cfg_progr
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.uclk_khz, &state_table->uclk);
+ /* these clocks are optional, so they can fail to map, in which case map all to 0 */
+ if (result) {
+ if (!round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz, &state_table->dcfclk) ||
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz, &state_table->fclk) ||
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz, &state_table->uclk)) {
+ display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz = 0;
+ }
+ }
+
return result;
}
@@ -374,11 +417,11 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
static bool are_timings_trivially_synchronizable(struct dml2_display_cfg *display_config, int mask)
{
- unsigned char i;
+ unsigned int i;
bool identical = true;
bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->num_streams; i++) {
@@ -413,10 +456,10 @@ static bool are_timings_trivially_synchronizable(struct dml2_display_cfg *displa
static int find_smallest_idle_time_in_vblank_us(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out, int mask)
{
- unsigned char i;
+ unsigned int i;
int min_idle_us = 0;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
// Create a remap array to enable simple iteration through only masked stream indicies
@@ -711,7 +754,7 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
@@ -725,7 +768,7 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index a31db5742675..e763c8e45da8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -195,11 +195,11 @@ static int count_planes_with_stream_index(const struct dml2_display_cfg *display
static bool are_timings_trivially_synchronizable(struct display_configuation_with_meta *display_config, int mask)
{
- unsigned char i;
+ unsigned int i;
bool identical = true;
bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->display_config.num_streams; i++) {
@@ -347,8 +347,12 @@ static int find_highest_odm_load_stream_index(
int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
+ if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
+ odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
/ mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
+ else
+ odm_load = 0;
+
if (odm_load > highest_odm_load) {
highest_odm_load_index = i;
highest_odm_load = odm_load;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 92269f0e50ed..a3324f7b9ba6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -13,32 +13,32 @@ static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
// VActive Preferred
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then SVP
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Finally VBlank, but allow base clocks for latency to increase
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
*/
@@ -49,56 +49,56 @@ static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1
static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
// VActive only is preferred
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then VActive + VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then VBlank only
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then SVP + VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then SVP + DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then SVP + SVP
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then DRR + VActive
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then DRR + DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Finally VBlank, but allow base clocks for latency to increase
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
*/
@@ -109,32 +109,32 @@ static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2
static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
// All VActive
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na },
.allow_state_increase = true,
},
// VActive + 1 VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = false,
},
// All VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = false,
},
// All DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na },
.allow_state_increase = true,
},
// All VBlank, with state increase allowed
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = true,
},
*/
@@ -145,32 +145,32 @@ static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3
static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
// All VActive
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive },
.allow_state_increase = true,
},
// VActive + 1 VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank },
.allow_state_increase = false,
},
// All Vblank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
.allow_state_increase = false,
},
// All DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr },
.allow_state_increase = true,
},
// All VBlank, with state increase allowed
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
.allow_state_increase = true,
},
*/
@@ -355,29 +355,30 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o
return result;
}
-static enum dml2_pmo_pstate_method convert_strategy_to_drr_variant(const enum dml2_pmo_pstate_method base_strategy)
+static enum dml2_pstate_method convert_strategy_to_drr_variant(const enum dml2_pstate_method base_strategy)
{
- enum dml2_pmo_pstate_method variant_strategy = 0;
+ enum dml2_pstate_method variant_strategy = 0;
switch (base_strategy) {
- case dml2_pmo_pstate_strategy_vactive:
- variant_strategy = dml2_pmo_pstate_strategy_fw_vactive_drr;
+ case dml2_pstate_method_vactive:
+ variant_strategy = dml2_pstate_method_fw_vactive_drr;
break;
- case dml2_pmo_pstate_strategy_vblank:
- variant_strategy = dml2_pmo_pstate_strategy_fw_vblank_drr;
+ case dml2_pstate_method_vblank:
+ variant_strategy = dml2_pstate_method_fw_vblank_drr;
break;
- case dml2_pmo_pstate_strategy_fw_svp:
- variant_strategy = dml2_pmo_pstate_strategy_fw_svp_drr;
+ case dml2_pstate_method_fw_svp:
+ variant_strategy = dml2_pstate_method_fw_svp_drr;
break;
- case dml2_pmo_pstate_strategy_fw_vactive_drr:
- case dml2_pmo_pstate_strategy_fw_vblank_drr:
- case dml2_pmo_pstate_strategy_fw_svp_drr:
- case dml2_pmo_pstate_strategy_fw_drr:
- case dml2_pmo_pstate_strategy_reserved_hw:
- case dml2_pmo_pstate_strategy_reserved_fw:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_clamped:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_var:
- case dml2_pmo_pstate_strategy_na:
+ case dml2_pstate_method_fw_vactive_drr:
+ case dml2_pstate_method_fw_vblank_drr:
+ case dml2_pstate_method_fw_svp_drr:
+ case dml2_pstate_method_fw_drr:
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_count:
+ case dml2_pstate_method_na:
default:
/* no variant for this mode */
variant_strategy = base_strategy;
@@ -419,23 +420,22 @@ static unsigned int get_num_expanded_strategies(
static void insert_strategy_into_expanded_list(
const struct dml2_pmo_pstate_strategy *per_stream_pstate_strategy,
- int stream_count,
- struct dml2_pmo_init_data *init_data)
+ const int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
- struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
-
- expanded_strategy_list = get_expanded_strategy_list(init_data, stream_count);
+ if (expanded_strategy_list && num_expanded_strategies) {
+ memcpy(&expanded_strategy_list[*num_expanded_strategies], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
- if (expanded_strategy_list) {
- memcpy(&expanded_strategy_list[init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
-
- init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]++;
+ (*num_expanded_strategies)++;
}
}
-static void expand_base_strategy(struct dml2_pmo_instance *pmo,
+static void expand_base_strategy(
const struct dml2_pmo_pstate_strategy *base_strategy,
- unsigned int stream_count)
+ const unsigned int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
bool skip_to_next_stream;
bool expanded_strategy_added;
@@ -473,7 +473,7 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
if (i >= stream_count - 1) {
/* insert into strategy list */
- insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, &pmo->init_data);
+ insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, expanded_strategy_list, num_expanded_strategies);
expanded_strategy_added = true;
} else {
/* skip to next stream */
@@ -512,9 +512,9 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_strategy,
const struct dml2_pmo_pstate_strategy *variant_strategy,
- unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
- unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
- unsigned int stream_count)
+ const unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
+ const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
+ const unsigned int stream_count)
{
bool valid = true;
unsigned int i;
@@ -522,7 +522,7 @@ static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_
/* check all restrictions are met */
for (i = 0; i < stream_count; i++) {
/* vblank + vblank_drr variants are invalid */
- if (base_strategy->per_stream_pstate_method[i] == dml2_pmo_pstate_strategy_vblank &&
+ if (base_strategy->per_stream_pstate_method[i] == dml2_pstate_method_vblank &&
((num_streams_per_base_method[i] > 0 && num_streams_per_variant_method[i] > 0) ||
num_streams_per_variant_method[i] > 1)) {
valid = false;
@@ -533,9 +533,12 @@ static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_
return valid;
}
-static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
+static void expand_variant_strategy(
const struct dml2_pmo_pstate_strategy *base_strategy,
- unsigned int stream_count)
+ const unsigned int stream_count,
+ const bool should_permute,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
bool variant_found;
unsigned int i, j;
@@ -544,7 +547,7 @@ static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
- enum dml2_pmo_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
+ enum dml2_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
struct dml2_pmo_pstate_strategy variant_strategy = { 0 };
/* determine number of displays per method */
@@ -585,7 +588,13 @@ static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
}
if (variant_found && is_variant_method_valid(base_strategy, &variant_strategy, num_streams_per_base_method, num_streams_per_variant_method, stream_count)) {
- expand_base_strategy(pmo, &variant_strategy, stream_count);
+ if (should_permute) {
+ /* permutations are permitted, proceed to expand */
+ expand_base_strategy(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
+ } else {
+ /* no permutations allowed, so add to list now */
+ insert_strategy_into_expanded_list(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
+ }
}
/* rollback to earliest method with bases remaining */
@@ -612,18 +621,19 @@ static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
}
}
-static void expand_base_strategies(
- struct dml2_pmo_instance *pmo,
- const struct dml2_pmo_pstate_strategy *base_strategies_list,
- const unsigned int num_base_strategies,
- unsigned int stream_count)
+void pmo_dcn4_fams2_expand_base_pstate_strategies(
+ const struct dml2_pmo_pstate_strategy *base_strategies_list,
+ const unsigned int num_base_strategies,
+ const unsigned int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
unsigned int i;
/* expand every explicit base strategy (except all DRR) */
for (i = 0; i < num_base_strategies; i++) {
- expand_base_strategy(pmo, &base_strategies_list[i], stream_count);
- expand_variant_strategy(pmo, &base_strategies_list[i], stream_count);
+ expand_base_strategy(&base_strategies_list[i], stream_count, expanded_strategy_list, num_expanded_strategies);
+ expand_variant_strategy(&base_strategies_list[i], stream_count, true, expanded_strategy_list, num_expanded_strategies);
}
}
@@ -652,25 +662,45 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_1_display, base_strategy_list_1_display_size, 1);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_1_display,
+ base_strategy_list_1_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 2:
DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_2_display, base_strategy_list_2_display_size, 2);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_2_display,
+ base_strategy_list_2_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 3:
DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_3_display, base_strategy_list_3_display_size, 3);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_3_display,
+ base_strategy_list_3_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 4:
DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_4_display, base_strategy_list_4_display_size, 4);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_4_display,
+ base_strategy_list_4_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
}
}
@@ -783,8 +813,12 @@ static int find_highest_odm_load_stream_index(
int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
+ if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
+ odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
/ mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
+ else
+ odm_load = 0;
+
if (odm_load > highest_odm_load) {
highest_odm_load_index = i;
highest_odm_load = odm_load;
@@ -941,11 +975,8 @@ static void build_synchronized_timing_groups(
/* find synchronizable timing groups */
for (j = i + 1; j < display_config->display_config.num_streams; j++) {
if (memcmp(master_timing,
- &display_config->display_config.stream_descriptors[j].timing,
- sizeof(struct dml2_timing_cfg)) == 0 &&
- display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder &&
- (display_config->display_config.stream_descriptors[i].output.output_encoder != dml2_hdmi || //hdmi requires formats match
- display_config->display_config.stream_descriptors[i].output.output_format == display_config->display_config.stream_descriptors[j].output.output_format)) {
+ &display_config->display_config.stream_descriptors[j].timing,
+ sizeof(struct dml2_timing_cfg)) == 0) {
set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
set_bit_in_bitfield(&stream_mapped_mask, j);
}
@@ -959,7 +990,7 @@ static bool all_timings_support_vactive(const struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_config,
unsigned int mask)
{
- unsigned char i;
+ unsigned int i;
bool valid = true;
// Create a remap array to enable simple iteration through only masked stream indicies
@@ -1008,7 +1039,7 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_config,
unsigned int mask)
{
- unsigned char i;
+ unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
const struct dml2_stream_parameters *stream_descriptor;
const struct dml2_fams2_meta *stream_fams2_meta;
@@ -1050,7 +1081,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
const struct dml2_plane_parameters *plane_descriptor;
const struct dml2_fams2_meta *stream_fams2_meta;
unsigned int microschedule_vlines;
- unsigned char i;
+ unsigned int i;
unsigned int num_planes_per_stream[DML2_MAX_PLANES] = { 0 };
@@ -1106,24 +1137,73 @@ static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *ps
scratch->pmo_dcn4.num_pstate_candidates++;
}
-static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_method method)
+static enum dml2_pstate_method uclk_pstate_strategy_override_to_pstate_method(const enum dml2_uclk_pstate_change_strategy override_strategy)
{
- unsigned char i;
- enum dml2_uclk_pstate_change_strategy matching_strategy = (enum dml2_uclk_pstate_change_strategy) dml2_pmo_pstate_strategy_na;
+ enum dml2_pstate_method method = dml2_pstate_method_na;
- if (method == dml2_pmo_pstate_strategy_vactive || method == dml2_pmo_pstate_strategy_fw_vactive_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
- else if (method == dml2_pmo_pstate_strategy_vblank || method == dml2_pmo_pstate_strategy_fw_vblank_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
- else if (method == dml2_pmo_pstate_strategy_fw_svp)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
- else if (method == dml2_pmo_pstate_strategy_fw_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_drr;
+ switch (override_strategy) {
+ case dml2_uclk_pstate_change_strategy_force_vactive:
+ method = dml2_pstate_method_vactive;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_vblank:
+ method = dml2_pstate_method_vblank;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_drr:
+ method = dml2_pstate_method_fw_drr;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_mall_svp:
+ method = dml2_pstate_method_fw_svp;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_mall_full_frame:
+ case dml2_uclk_pstate_change_strategy_auto:
+ default:
+ method = dml2_pstate_method_na;
+ }
+
+ return method;
+}
+
+static enum dml2_uclk_pstate_change_strategy pstate_method_to_uclk_pstate_strategy_override(const enum dml2_pstate_method method)
+{
+ enum dml2_uclk_pstate_change_strategy override_strategy = dml2_uclk_pstate_change_strategy_auto;
+
+ switch (method) {
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
+ break;
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
+ break;
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
+ break;
+ case dml2_pstate_method_fw_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_drr;
+ break;
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_count:
+ case dml2_pstate_method_na:
+ default:
+ override_strategy = dml2_uclk_pstate_change_strategy_auto;
+ }
+
+ return override_strategy;
+}
+
+static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pstate_method method)
+{
+ unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(plane_mask, i)) {
if (display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto &&
- display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != matching_strategy)
+ display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != pstate_method_to_uclk_pstate_strategy_override(method))
return false;
}
}
@@ -1149,32 +1229,33 @@ static void build_method_scheduling_params(
static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
- enum dml2_pmo_pstate_method stream_pstate_method,
+ enum dml2_pstate_method stream_pstate_method,
int stream_idx)
{
struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
switch (stream_pstate_method) {
- case dml2_pmo_pstate_strategy_vactive:
- case dml2_pmo_pstate_strategy_fw_vactive_drr:
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
break;
- case dml2_pmo_pstate_strategy_vblank:
- case dml2_pmo_pstate_strategy_fw_vblank_drr:
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
break;
- case dml2_pmo_pstate_strategy_fw_svp:
- case dml2_pmo_pstate_strategy_fw_svp_drr:
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
break;
- case dml2_pmo_pstate_strategy_fw_drr:
+ case dml2_pstate_method_fw_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
break;
- case dml2_pmo_pstate_strategy_reserved_hw:
- case dml2_pmo_pstate_strategy_reserved_fw:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_clamped:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_var:
- case dml2_pmo_pstate_strategy_na:
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_count:
+ case dml2_pstate_method_na:
default:
stream_method_fams2_meta = NULL;
}
@@ -1215,7 +1296,7 @@ static bool is_timing_group_schedulable(
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
if (!stream_method_fams2_meta)
- return false;
+ continue;
if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
@@ -1295,7 +1376,7 @@ static bool is_config_schedulable(
if (j_disallow_us < jp1_disallow_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
- s->pmo_dcn4.sorted_group_gtl_disallow_index[j+1]);
+ s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]);
swapped = true;
}
}
@@ -1354,7 +1435,7 @@ static bool is_config_schedulable(
if (j_period_us < jp1_period_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
- s->pmo_dcn4.sorted_group_gtl_period_index[j+1]);
+ s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]);
swapped = true;
}
}
@@ -1413,7 +1494,7 @@ static bool is_config_schedulable(
static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_method stream_pstate_method,
+ const enum dml2_pstate_method stream_pstate_method,
unsigned int stream_index)
{
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[stream_index];
@@ -1468,7 +1549,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
{
struct dml2_pmo_scratch *s = &pmo->scratch;
- unsigned char stream_index = 0;
+ unsigned int stream_index = 0;
unsigned int svp_count = 0;
unsigned int svp_stream_mask = 0;
@@ -1494,19 +1575,19 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
strategy_matches_drr_requirements &=
stream_matches_drr_policy(pmo, display_cfg, pstate_strategy->per_stream_pstate_method[stream_index], stream_index);
- if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
svp_count++;
set_bit_in_bitfield(&svp_stream_mask, stream_index);
- } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
drr_count++;
set_bit_in_bitfield(&drr_stream_mask, stream_index);
- } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
- pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
vactive_count++;
set_bit_in_bitfield(&vactive_stream_mask, stream_index);
- } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
vblank_count++;
set_bit_in_bitfield(&vblank_stream_mask, stream_index);
}
@@ -1532,7 +1613,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
{
- unsigned char i;
+ unsigned int i;
int min_vactive_margin_us = 0xFFFFFFF;
for (i = 0; i < DML2_MAX_PLANES; i++) {
@@ -1625,7 +1706,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
/* for single stream, guarantee at least an instant of allow */
stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
math_max2(0.0,
- timing->v_active - stream_fams2_meta->min_allow_width_otg_vlines - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
+ timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
} else {
/* for multi stream, bound to a max fill time defined by IP caps */
stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
@@ -1738,8 +1819,10 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
struct display_configuation_with_meta *display_config;
const struct dml2_plane_parameters *plane_descriptor;
const struct dml2_pmo_pstate_strategy *strategy_list = NULL;
+ struct dml2_pmo_pstate_strategy override_base_strategy = { 0 };
unsigned int strategy_list_size = 0;
- unsigned char plane_index, stream_index, i;
+ unsigned int plane_index, stream_index, i;
+ bool build_override_strategy = true;
state->performed = true;
in_out->base_display_config->stage3.min_clk_index_for_latency = in_out->base_display_config->stage1.min_clk_index_for_latency;
@@ -1763,7 +1846,11 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
set_bit_in_bitfield(&s->pmo_dcn4.stream_plane_mask[plane_descriptor->stream_index], plane_index);
- state->pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
+ state->pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
+
+ build_override_strategy &= plane_descriptor->overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto;
+ override_base_strategy.per_stream_pstate_method[plane_descriptor->stream_index] =
+ uclk_pstate_strategy_override_to_pstate_method(plane_descriptor->overrides.uclk_pstate_change_strategy);
}
// Figure out which streams can do vactive, and also build up implicit SVP and FAMS2 meta
@@ -1781,13 +1868,30 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
/* get synchronized timing groups */
build_synchronized_timing_groups(pmo, display_config);
- strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
- if (!strategy_list)
- return false;
-
- strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
+ if (build_override_strategy) {
+ /* build expanded override strategy list (no permutations) */
+ override_base_strategy.allow_state_increase = true;
+ s->pmo_dcn4.num_expanded_override_strategies = 0;
+ insert_strategy_into_expanded_list(&override_base_strategy,
+ display_config->display_config.num_streams,
+ s->pmo_dcn4.expanded_override_strategy_list,
+ &s->pmo_dcn4.num_expanded_override_strategies);
+ expand_variant_strategy(&override_base_strategy,
+ display_config->display_config.num_streams,
+ false,
+ s->pmo_dcn4.expanded_override_strategy_list,
+ &s->pmo_dcn4.num_expanded_override_strategies);
+
+ /* use override strategy list */
+ strategy_list = s->pmo_dcn4.expanded_override_strategy_list;
+ strategy_list_size = s->pmo_dcn4.num_expanded_override_strategies;
+ } else {
+ /* use predefined strategy list */
+ strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
+ strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
+ }
- if (strategy_list_size == 0)
+ if (!strategy_list || strategy_list_size == 0)
return false;
s->pmo_dcn4.num_pstate_candidates = 0;
@@ -1799,7 +1903,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
}
if (s->pmo_dcn4.num_pstate_candidates > 0) {
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates - 1].allow_state_increase = true;
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates-1].allow_state_increase = true;
s->pmo_dcn4.cur_pstate_candidate = -1;
return true;
} else {
@@ -1832,7 +1936,7 @@ static void reset_display_configuration(struct display_configuation_with_meta *d
// Reset strategy to auto
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_auto;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_not_supported;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_na;
}
}
@@ -1840,7 +1944,7 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1849,7 +1953,7 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
}
}
@@ -1861,13 +1965,13 @@ static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *
{
struct dml2_pmo_scratch *scratch = &pmo->scratch;
- unsigned char plane_index;
+ unsigned int plane_index;
int stream_index = -1;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp;
}
}
@@ -1884,13 +1988,13 @@ static void setup_planes_for_svp_drr_by_mask(struct display_configuation_with_me
{
struct dml2_pmo_scratch *scratch = &pmo->scratch;
- unsigned char plane_index;
+ unsigned int plane_index;
int stream_index = -1;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_subvp_phantom_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp_drr;
}
}
@@ -1905,7 +2009,7 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1915,7 +2019,7 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
plane->overrides.reserved_vblank_time_ns = (long)math_max2(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000.0,
plane->overrides.reserved_vblank_time_ns);
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vblank;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
}
}
@@ -1925,7 +2029,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1933,7 +2037,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
plane = &display_config->display_config.plane_descriptors[plane_index];
plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_vblank_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
}
}
}
@@ -1942,14 +2046,14 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
unsigned int stream_index;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
@@ -1963,14 +2067,14 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
unsigned int stream_index;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_vactive_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
@@ -1992,26 +2096,26 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
+ if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
success = false;
break;
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive) {
setup_planes_for_vactive_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank) {
setup_planes_for_vblank_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp) {
fams2_required = true;
setup_planes_for_svp_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
fams2_required = true;
setup_planes_for_vactive_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
fams2_required = true;
setup_planes_for_vblank_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
fams2_required = true;
setup_planes_for_svp_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
fams2_required = true;
setup_planes_for_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
}
@@ -2031,7 +2135,7 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask)
{
int min_time_us = 0xFFFFFF;
- unsigned char plane_index = 0;
+ unsigned int plane_index = 0;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
@@ -2066,34 +2170,34 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
- if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
REQUIRED_RESERVED_TIME ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr) ||
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
+ if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pstate_method_fw_drr) ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
p_state_supported = false;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 0c25bd3e9ac0..6baab7ad6ecc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
@@ -23,4 +23,11 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out);
bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out);
+void pmo_dcn4_fams2_expand_base_pstate_strategies(
+ const struct dml2_pmo_pstate_strategy *base_strategies_list,
+ const unsigned int num_base_strategies,
+ const unsigned int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
index add51d41a515..7ed0242a4b33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -72,7 +72,6 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
out->init_for_stutter = pmo_dcn4_fams2_init_for_stutter;
out->test_for_stutter = pmo_dcn4_fams2_test_for_stutter;
out->optimize_for_stutter = pmo_dcn4_fams2_optimize_for_stutter;
-
result = true;
break;
case dml2_project_invalid:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
new file mode 100644
index 000000000000..f88931ccbc5e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml_top.h"
+#include "dml2_internal_shared_types.h"
+#include "dml2_top_soc15.h"
+
+unsigned int dml2_get_instance_size_bytes(void)
+{
+ return sizeof(struct dml2_instance);
+}
+
+bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
+{
+ switch (in_out->options.project_id) {
+ case dml2_project_dcn4x_stage1:
+ case dml2_project_dcn4x_stage2:
+ case dml2_project_dcn4x_stage2_auto_drr_svp:
+ return dml2_top_soc15_initialize_instance(in_out);
+ case dml2_project_invalid:
+ default:
+ return false;
+ }
+}
+
+bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
+{
+ if (!in_out->dml2_instance->funcs.check_mode_supported)
+ return false;
+
+ return in_out->dml2_instance->funcs.check_mode_supported(in_out);
+}
+
+bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out)
+{
+ if (!in_out->dml2_instance->funcs.build_mode_programming)
+ return false;
+
+ return in_out->dml2_instance->funcs.build_mode_programming(in_out);
+}
+
+bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out)
+{
+ if (!in_out->dml2_instance->funcs.build_mcache_programming)
+ return false;
+
+ return in_out->dml2_instance->funcs.build_mcache_programming(in_out);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
new file mode 100644
index 000000000000..5e14d85821e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml2_top_legacy.h"
+#include "dml2_top_soc15.h"
+#include "dml2_core_factory.h"
+#include "dml2_pmo_factory.h"
+#include "display_mode_core_structs.h"
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
new file mode 100644
index 000000000000..14d0ae03dce6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DML2_TOP_LEGACY_H__
+#define __DML2_TOP_LEGACY_H__
+#include "dml2_internal_shared_types.h"
+bool dml2_top_legacy_initialize_instance(struct dml2_initialize_instance_in_out *in_out);
+#endif /* __DML2_TOP_LEGACY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
deleted file mode 100644
index d0e026d981b5..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
+++ /dev/null
@@ -1,307 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_top_optimization.h"
-#include "dml2_internal_shared_types.h"
-#include "dml_top_mcache.h"
-
-static void copy_display_configuration_with_meta(struct display_configuation_with_meta *dst, const struct display_configuation_with_meta *src)
-{
- memcpy(dst, src, sizeof(struct display_configuation_with_meta));
-}
-
-bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
-
- state->performed = true;
-
- return true;
-}
-
-bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
-
- return state->min_clk_index_for_latency == 0;
-}
-
-bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params)
-{
- bool result = false;
-
- if (params->display_config->stage1.min_clk_index_for_latency > 0) {
- copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
- params->optimized_display_config->stage1.min_clk_index_for_latency--;
- result = true;
- }
-
- return result;
-}
-
-bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
- bool mcache_success = false;
- bool result = false;
-
- memset(l, 0, sizeof(struct dml2_optimization_test_function_locals));
-
- l->test_mcache.calc_mcache_count_params.dml2_instance = params->dml;
- l->test_mcache.calc_mcache_count_params.display_config = &params->display_config->display_config;
- l->test_mcache.calc_mcache_count_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
-
- result = dml2_top_mcache_calc_mcache_count_and_offsets(&l->test_mcache.calc_mcache_count_params); // use core to get the basic mcache_allocations
-
- if (result) {
- l->test_mcache.assign_global_mcache_ids_params.allocations = params->display_config->stage2.mcache_allocations;
- l->test_mcache.assign_global_mcache_ids_params.num_allocations = params->display_config->display_config.num_planes;
-
- dml2_top_mcache_assign_global_mcache_ids(&l->test_mcache.assign_global_mcache_ids_params);
-
- l->test_mcache.validate_admissibility_params.dml2_instance = params->dml;
- l->test_mcache.validate_admissibility_params.display_cfg = &params->display_config->display_config;
- l->test_mcache.validate_admissibility_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
- l->test_mcache.validate_admissibility_params.cfg_support_info = &params->display_config->mode_support_result.cfg_support_info;
-
- mcache_success = dml2_top_mcache_validate_admissability(&l->test_mcache.validate_admissibility_params); // also find the shift to make mcache allocation works
-
- memcpy(params->display_config->stage2.per_plane_mcache_support, l->test_mcache.validate_admissibility_params.per_plane_status, sizeof(bool) * DML2_MAX_PLANES);
- }
-
- return mcache_success;
-}
-
-bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
- bool optimize_success = false;
-
- if (params->last_candidate_supported == false)
- return false;
-
- copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
-
- l->optimize_mcache.optimize_mcache_params.instance = &params->dml->pmo_instance;
- l->optimize_mcache.optimize_mcache_params.dcc_mcache_supported = params->display_config->stage2.per_plane_mcache_support;
- l->optimize_mcache.optimize_mcache_params.display_config = &params->display_config->display_config;
- l->optimize_mcache.optimize_mcache_params.optimized_display_cfg = &params->optimized_display_config->display_config;
- l->optimize_mcache.optimize_mcache_params.cfg_support_info = &params->optimized_display_config->mode_support_result.cfg_support_info;
-
- optimize_success = params->dml->pmo_instance.optimize_dcc_mcache(&l->optimize_mcache.optimize_mcache_params);
-
- return optimize_success;
-}
-
-bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_init_function_locals *l = params->locals;
-
- l->vmin.init_params.instance = &params->dml->pmo_instance;
- l->vmin.init_params.base_display_config = params->display_config;
- return params->dml->pmo_instance.init_for_vmin(&l->vmin.init_params);
-}
-
-bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
-
- l->test_vmin.pmo_test_vmin_params.instance = &params->dml->pmo_instance;
- l->test_vmin.pmo_test_vmin_params.display_config = params->display_config;
- l->test_vmin.pmo_test_vmin_params.vmin_limits = &params->dml->soc_bbox.vmin_limit;
- return params->dml->pmo_instance.test_for_vmin(&l->test_vmin.pmo_test_vmin_params);
-}
-
-bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
-
- if (params->last_candidate_supported == false)
- return false;
-
- l->optimize_vmin.pmo_optimize_vmin_params.instance = &params->dml->pmo_instance;
- l->optimize_vmin.pmo_optimize_vmin_params.base_display_config = params->display_config;
- l->optimize_vmin.pmo_optimize_vmin_params.optimized_display_config = params->optimized_display_config;
- return params->dml->pmo_instance.optimize_for_vmin(&l->optimize_vmin.pmo_optimize_vmin_params);
-}
-
-bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
-{
- bool test_passed = false;
- bool optimize_succeeded = true;
- bool candidate_validation_passed = true;
- struct optimization_init_function_params init_params = { 0 };
- struct optimization_test_function_params test_params = { 0 };
- struct optimization_optimize_function_params optimize_params = { 0 };
-
- if (!params->dml ||
- !params->optimize_function ||
- !params->test_function ||
- !params->display_config ||
- !params->optimized_display_config)
- return false;
-
- copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
-
- init_params.locals = &l->init_function_locals;
- init_params.dml = params->dml;
- init_params.display_config = &l->cur_candidate_display_cfg;
-
- if (params->init_function && !params->init_function(&init_params))
- return false;
-
- test_params.locals = &l->test_function_locals;
- test_params.dml = params->dml;
- test_params.display_config = &l->cur_candidate_display_cfg;
-
- test_passed = params->test_function(&test_params);
-
- while (!test_passed && optimize_succeeded) {
- memset(&optimize_params, 0, sizeof(struct optimization_optimize_function_params));
-
- optimize_params.locals = &l->optimize_function_locals;
- optimize_params.dml = params->dml;
- optimize_params.display_config = &l->cur_candidate_display_cfg;
- optimize_params.optimized_display_config = &l->next_candidate_display_cfg;
- optimize_params.last_candidate_supported = candidate_validation_passed;
-
- optimize_succeeded = params->optimize_function(&optimize_params);
-
- if (optimize_succeeded) {
- l->mode_support_params.instance = &params->dml->core_instance;
- l->mode_support_params.display_cfg = &l->next_candidate_display_cfg;
- l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
-
- if (l->next_candidate_display_cfg.stage3.performed)
- l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage3.min_clk_index_for_latency;
- else
- l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage1.min_clk_index_for_latency;
-
- candidate_validation_passed = params->dml->core_instance.mode_support(&l->mode_support_params);
-
- l->next_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
- }
-
- if (optimize_succeeded && candidate_validation_passed) {
- memset(&test_params, 0, sizeof(struct optimization_test_function_params));
- test_params.locals = &l->test_function_locals;
- test_params.dml = params->dml;
- test_params.display_config = &l->next_candidate_display_cfg;
- test_passed = params->test_function(&test_params);
-
- copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, &l->next_candidate_display_cfg);
-
- // If optimization is not all or nothing, then store partial progress in output
- if (!params->all_or_nothing)
- copy_display_configuration_with_meta(params->optimized_display_config, &l->next_candidate_display_cfg);
- }
- }
-
- if (test_passed)
- copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
-
- return test_passed;
-}
-
-bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
-{
- int highest_state, lowest_state, cur_state;
- bool supported = false;
-
- if (!params->dml ||
- !params->optimize_function ||
- !params->test_function ||
- !params->display_config ||
- !params->optimized_display_config)
- return false;
-
- copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
- highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
- lowest_state = 0;
-
- while (highest_state > lowest_state) {
- cur_state = (highest_state + lowest_state) / 2;
-
- l->mode_support_params.instance = &params->dml->core_instance;
- l->mode_support_params.display_cfg = &l->cur_candidate_display_cfg;
- l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
- l->mode_support_params.min_clk_index = cur_state;
-
- supported = params->dml->core_instance.mode_support(&l->mode_support_params);
-
- if (supported) {
- l->cur_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
- highest_state = cur_state;
- } else {
- lowest_state = cur_state + 1;
- }
- }
- l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency = lowest_state;
-
- copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
-
- return true;
-}
-
-bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_init_function_locals *l = params->locals;
-
- l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.init_params.base_display_config = params->display_config;
-
- return params->dml->pmo_instance.init_for_uclk_pstate(&l->uclk_pstate.init_params);
-}
-
-bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
-
- l->uclk_pstate.test_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.test_params.base_display_config = params->display_config;
-
- return params->dml->pmo_instance.test_for_uclk_pstate(&l->uclk_pstate.test_params);
-}
-
-bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
-
- l->uclk_pstate.optimize_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.optimize_params.base_display_config = params->display_config;
- l->uclk_pstate.optimize_params.optimized_display_config = params->optimized_display_config;
- l->uclk_pstate.optimize_params.last_candidate_failed = !params->last_candidate_supported;
-
- return params->dml->pmo_instance.optimize_for_uclk_pstate(&l->uclk_pstate.optimize_params);
-}
-
-bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_init_function_locals *l = params->locals;
-
- l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.init_params.base_display_config = params->display_config;
-
- return params->dml->pmo_instance.init_for_stutter(&l->stutter.stutter_params);
-}
-
-bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
-
- l->stutter.stutter_params.instance = &params->dml->pmo_instance;
- l->stutter.stutter_params.base_display_config = params->display_config;
- return params->dml->pmo_instance.test_for_stutter(&l->stutter.stutter_params);
-}
-
-bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
-
- l->stutter.stutter_params.instance = &params->dml->pmo_instance;
- l->stutter.stutter_params.base_display_config = params->display_config;
- l->stutter.stutter_params.optimized_display_config = params->optimized_display_config;
- l->stutter.stutter_params.last_candidate_failed = !params->last_candidate_supported;
- return params->dml->pmo_instance.optimize_for_stutter(&l->stutter.stutter_params);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
deleted file mode 100644
index 9f22ab33eab1..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DML2_TOP_OPTIMIZATION_H__
-#define __DML2_TOP_OPTIMIZATION_H__
-
-#include "dml2_external_lib_deps.h"
-#include "dml2_internal_shared_types.h"
-
-bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params);
-bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params);
-
-bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
new file mode 100644
index 000000000000..a8f58f8448e4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
@@ -0,0 +1,1178 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml2_top_soc15.h"
+#include "dml2_mcg_factory.h"
+#include "dml2_dpmm_factory.h"
+#include "dml2_core_factory.h"
+#include "dml2_pmo_factory.h"
+#include "lib_float_math.h"
+#include "dml2_debug.h"
+static void setup_unoptimized_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
+{
+ memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
+ out->stage1.min_clk_index_for_latency = dml->min_clk_table.dram_bw_table.num_entries - 1; //dml->min_clk_table.clean_me_up.soc_bb.num_states - 1;
+}
+
+static void setup_speculative_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
+{
+ memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
+ out->stage1.min_clk_index_for_latency = 0;
+}
+
+static void copy_display_configuration_with_meta(struct display_configuation_with_meta *dst, const struct display_configuation_with_meta *src)
+{
+ memcpy(dst, src, sizeof(struct display_configuation_with_meta));
+}
+
+static bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
+
+ state->performed = true;
+
+ return true;
+}
+
+static bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
+
+ return state->min_clk_index_for_latency == 0;
+}
+
+static bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params)
+{
+ bool result = false;
+
+ if (params->display_config->stage1.min_clk_index_for_latency > 0) {
+ copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
+ params->optimized_display_config->stage1.min_clk_index_for_latency--;
+ result = true;
+ }
+
+ return result;
+}
+
+static bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+ bool mcache_success = false;
+ bool result = false;
+
+ memset(l, 0, sizeof(struct dml2_optimization_test_function_locals));
+
+ l->test_mcache.calc_mcache_count_params.dml2_instance = params->dml;
+ l->test_mcache.calc_mcache_count_params.display_config = &params->display_config->display_config;
+ l->test_mcache.calc_mcache_count_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
+
+ result = dml2_top_mcache_calc_mcache_count_and_offsets(&l->test_mcache.calc_mcache_count_params); // use core to get the basic mcache_allocations
+
+ if (result) {
+ l->test_mcache.assign_global_mcache_ids_params.allocations = params->display_config->stage2.mcache_allocations;
+ l->test_mcache.assign_global_mcache_ids_params.num_allocations = params->display_config->display_config.num_planes;
+
+ dml2_top_mcache_assign_global_mcache_ids(&l->test_mcache.assign_global_mcache_ids_params);
+
+ l->test_mcache.validate_admissibility_params.dml2_instance = params->dml;
+ l->test_mcache.validate_admissibility_params.display_cfg = &params->display_config->display_config;
+ l->test_mcache.validate_admissibility_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
+ l->test_mcache.validate_admissibility_params.cfg_support_info = &params->display_config->mode_support_result.cfg_support_info;
+
+ mcache_success = dml2_top_mcache_validate_admissability(&l->test_mcache.validate_admissibility_params); // also find the shift to make mcache allocation works
+
+ memcpy(params->display_config->stage2.per_plane_mcache_support, l->test_mcache.validate_admissibility_params.per_plane_status, sizeof(bool) * DML2_MAX_PLANES);
+ }
+
+ return mcache_success;
+}
+
+static bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+ bool optimize_success = false;
+
+ if (params->last_candidate_supported == false)
+ return false;
+
+ copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
+
+ l->optimize_mcache.optimize_mcache_params.instance = &params->dml->pmo_instance;
+ l->optimize_mcache.optimize_mcache_params.dcc_mcache_supported = params->display_config->stage2.per_plane_mcache_support;
+ l->optimize_mcache.optimize_mcache_params.display_config = &params->display_config->display_config;
+ l->optimize_mcache.optimize_mcache_params.optimized_display_cfg = &params->optimized_display_config->display_config;
+ l->optimize_mcache.optimize_mcache_params.cfg_support_info = &params->optimized_display_config->mode_support_result.cfg_support_info;
+
+ optimize_success = params->dml->pmo_instance.optimize_dcc_mcache(&l->optimize_mcache.optimize_mcache_params);
+
+ return optimize_success;
+}
+
+static bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_init_function_locals *l = params->locals;
+
+ l->vmin.init_params.instance = &params->dml->pmo_instance;
+ l->vmin.init_params.base_display_config = params->display_config;
+ return params->dml->pmo_instance.init_for_vmin(&l->vmin.init_params);
+}
+
+static bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+
+ l->test_vmin.pmo_test_vmin_params.instance = &params->dml->pmo_instance;
+ l->test_vmin.pmo_test_vmin_params.display_config = params->display_config;
+ l->test_vmin.pmo_test_vmin_params.vmin_limits = &params->dml->soc_bbox.vmin_limit;
+ return params->dml->pmo_instance.test_for_vmin(&l->test_vmin.pmo_test_vmin_params);
+}
+
+static bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+
+ if (params->last_candidate_supported == false)
+ return false;
+
+ l->optimize_vmin.pmo_optimize_vmin_params.instance = &params->dml->pmo_instance;
+ l->optimize_vmin.pmo_optimize_vmin_params.base_display_config = params->display_config;
+ l->optimize_vmin.pmo_optimize_vmin_params.optimized_display_config = params->optimized_display_config;
+ return params->dml->pmo_instance.optimize_for_vmin(&l->optimize_vmin.pmo_optimize_vmin_params);
+}
+
+static bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_init_function_locals *l = params->locals;
+
+ l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.init_params.base_display_config = params->display_config;
+
+ return params->dml->pmo_instance.init_for_uclk_pstate(&l->uclk_pstate.init_params);
+}
+
+static bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+
+ l->uclk_pstate.test_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.test_params.base_display_config = params->display_config;
+
+ return params->dml->pmo_instance.test_for_uclk_pstate(&l->uclk_pstate.test_params);
+}
+
+static bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+
+ l->uclk_pstate.optimize_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.optimize_params.base_display_config = params->display_config;
+ l->uclk_pstate.optimize_params.optimized_display_config = params->optimized_display_config;
+ l->uclk_pstate.optimize_params.last_candidate_failed = !params->last_candidate_supported;
+
+ return params->dml->pmo_instance.optimize_for_uclk_pstate(&l->uclk_pstate.optimize_params);
+}
+
+static bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_init_function_locals *l = params->locals;
+
+ l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.init_params.base_display_config = params->display_config;
+
+ return params->dml->pmo_instance.init_for_stutter(&l->stutter.stutter_params);
+}
+
+static bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+
+ l->stutter.stutter_params.instance = &params->dml->pmo_instance;
+ l->stutter.stutter_params.base_display_config = params->display_config;
+ return params->dml->pmo_instance.test_for_stutter(&l->stutter.stutter_params);
+}
+
+static bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+
+ l->stutter.stutter_params.instance = &params->dml->pmo_instance;
+ l->stutter.stutter_params.base_display_config = params->display_config;
+ l->stutter.stutter_params.optimized_display_config = params->optimized_display_config;
+ l->stutter.stutter_params.last_candidate_failed = !params->last_candidate_supported;
+ return params->dml->pmo_instance.optimize_for_stutter(&l->stutter.stutter_params);
+}
+
+static bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
+{
+ bool test_passed = false;
+ bool optimize_succeeded = true;
+ bool candidate_validation_passed = true;
+ struct optimization_init_function_params init_params = { 0 };
+ struct optimization_test_function_params test_params = { 0 };
+ struct optimization_optimize_function_params optimize_params = { 0 };
+
+ if (!params->dml ||
+ !params->optimize_function ||
+ !params->test_function ||
+ !params->display_config ||
+ !params->optimized_display_config)
+ return false;
+
+ copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
+
+ init_params.locals = &l->init_function_locals;
+ init_params.dml = params->dml;
+ init_params.display_config = &l->cur_candidate_display_cfg;
+
+ if (params->init_function && !params->init_function(&init_params))
+ return false;
+
+ test_params.locals = &l->test_function_locals;
+ test_params.dml = params->dml;
+ test_params.display_config = &l->cur_candidate_display_cfg;
+
+ test_passed = params->test_function(&test_params);
+
+ while (!test_passed && optimize_succeeded) {
+ memset(&optimize_params, 0, sizeof(struct optimization_optimize_function_params));
+
+ optimize_params.locals = &l->optimize_function_locals;
+ optimize_params.dml = params->dml;
+ optimize_params.display_config = &l->cur_candidate_display_cfg;
+ optimize_params.optimized_display_config = &l->next_candidate_display_cfg;
+ optimize_params.last_candidate_supported = candidate_validation_passed;
+
+ optimize_succeeded = params->optimize_function(&optimize_params);
+
+ if (optimize_succeeded) {
+ l->mode_support_params.instance = &params->dml->core_instance;
+ l->mode_support_params.display_cfg = &l->next_candidate_display_cfg;
+ l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
+
+ if (l->next_candidate_display_cfg.stage3.performed)
+ l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage3.min_clk_index_for_latency;
+ else
+ l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage1.min_clk_index_for_latency;
+ candidate_validation_passed = params->dml->core_instance.mode_support(&l->mode_support_params);
+ l->next_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
+ }
+
+ if (optimize_succeeded && candidate_validation_passed) {
+ memset(&test_params, 0, sizeof(struct optimization_test_function_params));
+ test_params.locals = &l->test_function_locals;
+ test_params.dml = params->dml;
+ test_params.display_config = &l->next_candidate_display_cfg;
+ test_passed = params->test_function(&test_params);
+
+ copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, &l->next_candidate_display_cfg);
+
+ // If optimization is not all or nothing, then store partial progress in output
+ if (!params->all_or_nothing)
+ copy_display_configuration_with_meta(params->optimized_display_config, &l->next_candidate_display_cfg);
+ }
+ }
+
+ if (test_passed)
+ copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
+
+ return test_passed;
+}
+
+static bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
+{
+ int highest_state, lowest_state, cur_state;
+ bool supported = false;
+
+ if (!params->dml ||
+ !params->optimize_function ||
+ !params->test_function ||
+ !params->display_config ||
+ !params->optimized_display_config)
+ return false;
+
+ copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
+ highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
+ lowest_state = 0;
+
+ while (highest_state > lowest_state) {
+ cur_state = (highest_state + lowest_state) / 2;
+
+ l->mode_support_params.instance = &params->dml->core_instance;
+ l->mode_support_params.display_cfg = &l->cur_candidate_display_cfg;
+ l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
+ l->mode_support_params.min_clk_index = cur_state;
+ supported = params->dml->core_instance.mode_support(&l->mode_support_params);
+
+ if (supported) {
+ l->cur_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
+ highest_state = cur_state;
+ } else {
+ lowest_state = cur_state + 1;
+ }
+ }
+ l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency = lowest_state;
+
+ copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
+
+ return true;
+}
+
+/*
+* Takes an input set of mcache boundaries and finds the appropriate setting of cache programming.
+* Returns true if a valid set of programming can be made, and false otherwise. "Valid" means
+* that the horizontal viewport does not span more than 2 cache slices.
+*
+* It optionally also can apply a constant shift to all the cache boundaries.
+*/
+static const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
+static const uint32_t SPLIT_LOCATION_UNDEFINED = 0xFFFF;
+
+static bool calculate_first_second_splitting(const int *mcache_boundaries, int num_boundaries, int shift,
+ int pipe_h_vp_start, int pipe_h_vp_end, int *first_offset, int *second_offset)
+{
+ const int MAX_VP = 0xFFFFFF;
+ int left_cache_id;
+ int right_cache_id;
+ int range_start;
+ int range_end;
+ bool success = false;
+
+ if (num_boundaries <= 1) {
+ if (first_offset && second_offset) {
+ *first_offset = 0;
+ *second_offset = -1;
+ }
+ success = true;
+ return success;
+ } else {
+ range_start = 0;
+ for (left_cache_id = 0; left_cache_id < num_boundaries; left_cache_id++) {
+ range_end = mcache_boundaries[left_cache_id] - shift - 1;
+
+ if (range_start <= pipe_h_vp_start && pipe_h_vp_start <= range_end)
+ break;
+
+ range_start = range_end + 1;
+ }
+
+ range_end = MAX_VP;
+ for (right_cache_id = num_boundaries - 1; right_cache_id >= -1; right_cache_id--) {
+ if (right_cache_id >= 0)
+ range_start = mcache_boundaries[right_cache_id] - shift;
+ else
+ range_start = 0;
+
+ if (range_start <= pipe_h_vp_end && pipe_h_vp_end <= range_end) {
+ break;
+ }
+ range_end = range_start - 1;
+ }
+ right_cache_id = (right_cache_id + 1) % num_boundaries;
+
+ if (right_cache_id == left_cache_id) {
+ if (first_offset && second_offset) {
+ *first_offset = left_cache_id;
+ *second_offset = -1;
+ }
+ success = true;
+ } else if (right_cache_id == (left_cache_id + 1) % num_boundaries) {
+ if (first_offset && second_offset) {
+ *first_offset = left_cache_id;
+ *second_offset = right_cache_id;
+ }
+ success = true;
+ }
+ }
+
+ return success;
+}
+
+/*
+* For a given set of pipe start/end x positions, checks to see it can support the input mcache splitting.
+* It also attempts to "optimize" by finding a shift if the default 0 shift does not work.
+*/
+static bool find_shift_for_valid_cache_id_assignment(int *mcache_boundaries, unsigned int num_boundaries,
+ int *pipe_vp_startx, int *pipe_vp_endx, unsigned int pipe_count, int shift_granularity, int *shift)
+{
+ int max_shift = 0xFFFF;
+ unsigned int pipe_index;
+ unsigned int i, slice_width;
+ bool success = false;
+
+ for (i = 0; i < num_boundaries; i++) {
+ if (i == 0)
+ slice_width = mcache_boundaries[i];
+ else
+ slice_width = mcache_boundaries[i] - mcache_boundaries[i - 1];
+
+ if (max_shift > (int)slice_width) {
+ max_shift = slice_width;
+ }
+ }
+
+ for (*shift = 0; *shift <= max_shift; *shift += shift_granularity) {
+ success = true;
+ for (pipe_index = 0; pipe_index < pipe_count; pipe_index++) {
+ if (!calculate_first_second_splitting(mcache_boundaries, num_boundaries, *shift,
+ pipe_vp_startx[pipe_index], pipe_vp_endx[pipe_index], 0, 0)) {
+ success = false;
+ break;
+ }
+ }
+ if (success)
+ break;
+ }
+
+ return success;
+}
+
+/*
+* Counts the number of elements inside input array within the given span length.
+* Formally, what is the size of the largest subset of the array where the largest and smallest element
+* differ no more than the span.
+*/
+static unsigned int count_elements_in_span(int *array, unsigned int array_size, unsigned int span)
+{
+ unsigned int i;
+ unsigned int span_start_value;
+ unsigned int span_start_index;
+ unsigned int greatest_element_count;
+
+ if (array_size == 0)
+ return 1;
+
+ if (span == 0)
+ return array_size > 0 ? 1 : 0;
+
+ span_start_value = 0;
+ span_start_index = 0;
+ greatest_element_count = 0;
+
+ while (span_start_index < array_size) {
+ for (i = span_start_index; i < array_size; i++) {
+ if (array[i] - span_start_value <= span) {
+ if (i - span_start_index + 1 > greatest_element_count) {
+ greatest_element_count = i - span_start_index + 1;
+ }
+ } else
+ break;
+ }
+
+ span_start_index++;
+
+ if (span_start_index < array_size) {
+ span_start_value = array[span_start_index - 1] + 1;
+ }
+ }
+
+ return greatest_element_count;
+}
+
+static bool calculate_h_split_for_scaling_transform(int full_vp_width, int h_active, int num_pipes,
+ enum dml2_scaling_transform scaling_transform, int *pipe_vp_x_start, int *pipe_vp_x_end)
+{
+ int i, slice_width;
+ const char MAX_SCL_VP_OVERLAP = 3;
+ bool success = false;
+
+ switch (scaling_transform) {
+ case dml2_scaling_transform_centered:
+ case dml2_scaling_transform_aspect_ratio:
+ case dml2_scaling_transform_fullscreen:
+ slice_width = full_vp_width / num_pipes;
+ for (i = 0; i < num_pipes; i++) {
+ pipe_vp_x_start[i] = i * slice_width;
+ pipe_vp_x_end[i] = (i + 1) * slice_width - 1;
+
+ if (pipe_vp_x_start[i] < MAX_SCL_VP_OVERLAP)
+ pipe_vp_x_start[i] = 0;
+ else
+ pipe_vp_x_start[i] -= MAX_SCL_VP_OVERLAP;
+
+ if (pipe_vp_x_end[i] > full_vp_width - MAX_SCL_VP_OVERLAP - 1)
+ pipe_vp_x_end[i] = full_vp_width - 1;
+ else
+ pipe_vp_x_end[i] += MAX_SCL_VP_OVERLAP;
+ }
+ break;
+ case dml2_scaling_transform_explicit:
+ default:
+ success = false;
+ break;
+ }
+
+ return success;
+}
+
+bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
+ struct dml2_top_mcache_validate_admissability_locals *l = &dml->scratch.mcache_validate_admissability_locals;
+
+ const int MAX_PIXEL_OVERLAP = 6;
+ int max_per_pipe_vp_p0 = 0;
+ int max_per_pipe_vp_p1 = 0;
+ int temp, p0shift, p1shift;
+ unsigned int plane_index = 0;
+ unsigned int i;
+ unsigned int odm_combine_factor;
+ unsigned int mpc_combine_factor;
+ unsigned int num_dpps;
+ unsigned int num_boundaries;
+ enum dml2_scaling_transform scaling_transform;
+ const struct dml2_plane_parameters *plane;
+ const struct dml2_stream_parameters *stream;
+
+ bool p0pass = false;
+ bool p1pass = false;
+ bool all_pass = true;
+
+ for (plane_index = 0; plane_index < params->display_cfg->num_planes; plane_index++) {
+ if (!params->display_cfg->plane_descriptors[plane_index].surface.dcc.enable)
+ continue;
+
+ plane = &params->display_cfg->plane_descriptors[plane_index];
+ stream = &params->display_cfg->stream_descriptors[plane->stream_index];
+
+ num_dpps = odm_combine_factor = params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
+
+ if (odm_combine_factor == 1)
+ num_dpps = mpc_combine_factor = (unsigned int)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
+ else
+ mpc_combine_factor = 1;
+
+ if (odm_combine_factor > 1) {
+ max_per_pipe_vp_p0 = plane->surface.plane0.width;
+ temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane0.h_ratio * stream->timing.h_active / odm_combine_factor);
+
+ if (temp < max_per_pipe_vp_p0)
+ max_per_pipe_vp_p0 = temp;
+
+ max_per_pipe_vp_p1 = plane->surface.plane1.width;
+ temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane1.h_ratio * stream->timing.h_active / odm_combine_factor);
+
+ if (temp < max_per_pipe_vp_p1)
+ max_per_pipe_vp_p1 = temp;
+ } else {
+ max_per_pipe_vp_p0 = plane->surface.plane0.width / mpc_combine_factor;
+ max_per_pipe_vp_p1 = plane->surface.plane1.width / mpc_combine_factor;
+ }
+
+ max_per_pipe_vp_p0 += 2 * MAX_PIXEL_OVERLAP;
+ max_per_pipe_vp_p1 += MAX_PIXEL_OVERLAP;
+
+ p0shift = 0;
+ p1shift = 0;
+
+ // The last element in the unshifted boundary array will always be the first pixel outside the
+ // plane, which means theres no mcache associated with it, so -1
+ num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1;
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
+ num_boundaries, max_per_pipe_vp_p0) <= 1) && (num_boundaries <= num_dpps)) {
+ p0pass = true;
+ }
+ num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1;
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
+ num_boundaries, max_per_pipe_vp_p1) <= 1) && (num_boundaries <= num_dpps)) {
+ p1pass = true;
+ }
+
+ if (!p0pass || !p1pass) {
+ if (odm_combine_factor > 1) {
+ num_dpps = odm_combine_factor;
+ scaling_transform = plane->composition.scaling_transform;
+ } else {
+ num_dpps = mpc_combine_factor;
+ scaling_transform = dml2_scaling_transform_fullscreen;
+ }
+
+ if (!p0pass) {
+ if (plane->composition.viewport.stationary) {
+ calculate_h_split_for_scaling_transform(plane->surface.plane0.width,
+ stream->timing.h_active, num_dpps, scaling_transform,
+ &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
+ p0pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
+ params->mcache_allocations[plane_index].num_mcaches_plane0,
+ &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index], num_dpps,
+ params->mcache_allocations[plane_index].shift_granularity.p0, &p0shift);
+ }
+ }
+ if (!p1pass) {
+ if (plane->composition.viewport.stationary) {
+ calculate_h_split_for_scaling_transform(plane->surface.plane1.width,
+ stream->timing.h_active, num_dpps, scaling_transform,
+ &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
+ p1pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
+ params->mcache_allocations[plane_index].num_mcaches_plane1,
+ &l->plane1.pipe_vp_startx[plane_index], &l->plane1.pipe_vp_endx[plane_index], num_dpps,
+ params->mcache_allocations[plane_index].shift_granularity.p1, &p1shift);
+ }
+ }
+ }
+
+ if (p0pass && p1pass) {
+ for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane0; i++) {
+ params->mcache_allocations[plane_index].mcache_x_offsets_plane0[i] -= p0shift;
+ }
+ for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane1; i++) {
+ params->mcache_allocations[plane_index].mcache_x_offsets_plane1[i] -= p1shift;
+ }
+ }
+
+ params->per_plane_status[plane_index] = p0pass && p1pass;
+ all_pass &= p0pass && p1pass;
+ }
+
+ return all_pass;
+}
+
+static void reset_mcache_allocations(struct dml2_hubp_pipe_mcache_regs *per_plane_pipe_mcache_regs)
+{
+ // Initialize all entries to special valid MCache ID and special valid split coordinate
+ per_plane_pipe_mcache_regs->main.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p0.split_location = SPLIT_LOCATION_UNDEFINED;
+
+ per_plane_pipe_mcache_regs->mall.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p0.split_location = SPLIT_LOCATION_UNDEFINED;
+
+ per_plane_pipe_mcache_regs->main.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p1.split_location = SPLIT_LOCATION_UNDEFINED;
+
+ per_plane_pipe_mcache_regs->mall.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p1.split_location = SPLIT_LOCATION_UNDEFINED;
+}
+
+void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params)
+{
+ int i;
+ unsigned int j;
+ int next_unused_cache_id = 0;
+
+ for (i = 0; i < params->num_allocations; i++) {
+ if (!params->allocations[i].valid)
+ continue;
+
+ for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
+ params->allocations[i].global_mcache_ids_plane0[j] = next_unused_cache_id++;
+ }
+ for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
+ params->allocations[i].global_mcache_ids_plane1[j] = next_unused_cache_id++;
+ }
+
+ // The "psuedo-last" slice is always wrapped around
+ params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0] =
+ params->allocations[i].global_mcache_ids_plane0[0];
+ params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1] =
+ params->allocations[i].global_mcache_ids_plane1[0];
+
+ // If we need dedicated caches for mall requesting, then we assign them here.
+ if (params->allocations[i].requires_dedicated_mall_mcache) {
+ for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
+ params->allocations[i].global_mcache_ids_mall_plane0[j] = next_unused_cache_id++;
+ }
+ for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
+ params->allocations[i].global_mcache_ids_mall_plane1[j] = next_unused_cache_id++;
+ }
+
+ // The "psuedo-last" slice is always wrapped around
+ params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0] =
+ params->allocations[i].global_mcache_ids_mall_plane0[0];
+ params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1] =
+ params->allocations[i].global_mcache_ids_mall_plane1[0];
+ }
+
+ // If P0 and P1 are sharing caches, then it means the largest mcache IDs for p0 and p1 can be the same
+ // since mcache IDs are always ascending, then it means the largest mcacheID of p1 should be the
+ // largest mcacheID of P0
+ if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
+ params->allocations[i].last_slice_sharing.plane0_plane1) {
+ params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
+ params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
+ }
+
+ // If we need dedicated caches handle last slice sharing
+ if (params->allocations[i].requires_dedicated_mall_mcache) {
+ if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
+ params->allocations[i].last_slice_sharing.plane0_plane1) {
+ params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
+ params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1];
+ }
+ // If mall_comb_mcache_l is set then it means that largest mcache ID for MALL p0 can be same as regular read p0
+ if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p0) {
+ params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1] =
+ params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
+ }
+ // If mall_comb_mcache_c is set then it means that largest mcache ID for MALL p1 can be same as regular
+ // read p1 (which can be same as regular read p0 if plane0_plane1 is also set)
+ if (params->allocations[i].num_mcaches_plane1 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p1) {
+ params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
+ params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1];
+ }
+ }
+
+ // If you don't need dedicated mall mcaches, the mall mcache assignments are identical to the normal requesting
+ if (!params->allocations[i].requires_dedicated_mall_mcache) {
+ memcpy(params->allocations[i].global_mcache_ids_mall_plane0, params->allocations[i].global_mcache_ids_plane0,
+ sizeof(params->allocations[i].global_mcache_ids_mall_plane0));
+ memcpy(params->allocations[i].global_mcache_ids_mall_plane1, params->allocations[i].global_mcache_ids_plane1,
+ sizeof(params->allocations[i].global_mcache_ids_mall_plane1));
+ }
+ }
+}
+
+bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
+ struct dml2_top_mcache_verify_mcache_size_locals *l = &dml->scratch.mcache_verify_mcache_size_locals;
+
+ unsigned int total_mcaches_required;
+ unsigned int i;
+ bool result = false;
+
+ if (dml->soc_bbox.num_dcc_mcaches == 0) {
+ return true;
+ }
+
+ total_mcaches_required = 0;
+ l->calc_mcache_params.instance = &dml->core_instance;
+ for (i = 0; i < params->display_config->num_planes; i++) {
+ if (!params->display_config->plane_descriptors[i].surface.dcc.enable) {
+ memset(&params->mcache_allocations[i], 0, sizeof(struct dml2_mcache_surface_allocation));
+ continue;
+ }
+
+ l->calc_mcache_params.plane_descriptor = &params->display_config->plane_descriptors[i];
+ l->calc_mcache_params.mcache_allocation = &params->mcache_allocations[i];
+ l->calc_mcache_params.plane_index = i;
+
+ if (!dml->core_instance.calculate_mcache_allocation(&l->calc_mcache_params)) {
+ result = false;
+ break;
+ }
+
+ if (params->mcache_allocations[i].valid) {
+ total_mcaches_required += params->mcache_allocations[i].num_mcaches_plane0 + params->mcache_allocations[i].num_mcaches_plane1;
+ if (params->mcache_allocations[i].last_slice_sharing.plane0_plane1)
+ total_mcaches_required--;
+ }
+ }
+ dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
+
+ if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
+ result = false;
+ } else {
+ result = true;
+ }
+
+ return result;
+}
+
+static bool dml2_top_soc15_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
+ struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals;
+ struct dml2_display_cfg_programming *dpmm_programming = &dml->dpmm_instance.dpmm_scratch.programming;
+
+ bool result = false;
+ bool mcache_success = false;
+ memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
+
+ setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
+
+ l->mode_support_params.instance = &dml->core_instance;
+ l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_support_params.min_clk_table = &dml->min_clk_table;
+ l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
+ result = dml->core_instance.mode_support(&l->mode_support_params);
+ l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
+
+ if (result) {
+ struct optimization_phase_params mcache_phase = {
+ .dml = dml,
+ .display_config = &l->base_display_config_with_meta,
+ .test_function = dml2_top_optimization_test_function_mcache,
+ .optimize_function = dml2_top_optimization_optimize_function_mcache,
+ .optimized_display_config = &l->optimized_display_config_with_meta,
+ .all_or_nothing = false,
+ };
+ mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &mcache_phase);
+ }
+
+ /*
+ * Call DPMM to map all requirements to minimum clock state
+ */
+ if (result) {
+ l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
+ l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_mode_params.programming = dpmm_programming;
+ l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
+ l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
+ result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
+ }
+
+ in_out->is_supported = mcache_success;
+ result = result && in_out->is_supported;
+
+ return result;
+}
+
+static bool dml2_top_soc15_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
+ struct dml2_build_mode_programming_locals *l = &dml->scratch.build_mode_programming_locals;
+
+ bool result = false;
+ bool mcache_success = false;
+ bool uclk_pstate_success = false;
+ bool vmin_success = false;
+ bool stutter_success = false;
+ unsigned int i;
+
+ memset(l, 0, sizeof(struct dml2_build_mode_programming_locals));
+ memset(in_out->programming, 0, sizeof(struct dml2_display_cfg_programming));
+
+ memcpy(&in_out->programming->display_config, in_out->display_config, sizeof(struct dml2_display_cfg));
+
+ setup_speculative_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
+
+ l->mode_support_params.instance = &dml->core_instance;
+ l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_support_params.min_clk_table = &dml->min_clk_table;
+ l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
+ result = dml->core_instance.mode_support(&l->mode_support_params);
+
+ l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
+
+ if (!result) {
+ setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
+
+ l->mode_support_params.instance = &dml->core_instance;
+ l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_support_params.min_clk_table = &dml->min_clk_table;
+ l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
+ result = dml->core_instance.mode_support(&l->mode_support_params);
+ l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
+
+ if (!result) {
+ l->informative_params.instance = &dml->core_instance;
+ l->informative_params.programming = in_out->programming;
+ l->informative_params.mode_is_supported = false;
+ dml->core_instance.populate_informative(&l->informative_params);
+
+ return false;
+ }
+
+ /*
+ * Phase 1: Determine minimum clocks to satisfy latency requirements for this mode
+ */
+ memset(&l->min_clock_for_latency_phase, 0, sizeof(struct optimization_phase_params));
+ l->min_clock_for_latency_phase.dml = dml;
+ l->min_clock_for_latency_phase.display_config = &l->base_display_config_with_meta;
+ l->min_clock_for_latency_phase.init_function = dml2_top_optimization_init_function_min_clk_for_latency;
+ l->min_clock_for_latency_phase.test_function = dml2_top_optimization_test_function_min_clk_for_latency;
+ l->min_clock_for_latency_phase.optimize_function = dml2_top_optimization_optimize_function_min_clk_for_latency;
+ l->min_clock_for_latency_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->min_clock_for_latency_phase.all_or_nothing = false;
+
+ dml2_top_optimization_perform_optimization_phase_1(&l->optimization_phase_locals, &l->min_clock_for_latency_phase);
+
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ }
+
+ /*
+ * Phase 2: Satisfy DCC mcache requirements
+ */
+ memset(&l->mcache_phase, 0, sizeof(struct optimization_phase_params));
+ l->mcache_phase.dml = dml;
+ l->mcache_phase.display_config = &l->base_display_config_with_meta;
+ l->mcache_phase.test_function = dml2_top_optimization_test_function_mcache;
+ l->mcache_phase.optimize_function = dml2_top_optimization_optimize_function_mcache;
+ l->mcache_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->mcache_phase.all_or_nothing = true;
+
+ mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->mcache_phase);
+
+ if (!mcache_success) {
+ l->informative_params.instance = &dml->core_instance;
+ l->informative_params.programming = in_out->programming;
+ l->informative_params.mode_is_supported = false;
+
+ dml->core_instance.populate_informative(&l->informative_params);
+
+ in_out->programming->informative.failed_mcache_validation = true;
+ return false;
+ }
+
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+
+ /*
+ * Phase 3: Optimize for Pstate
+ */
+ memset(&l->uclk_pstate_phase, 0, sizeof(struct optimization_phase_params));
+ l->uclk_pstate_phase.dml = dml;
+ l->uclk_pstate_phase.display_config = &l->base_display_config_with_meta;
+ l->uclk_pstate_phase.init_function = dml2_top_optimization_init_function_uclk_pstate;
+ l->uclk_pstate_phase.test_function = dml2_top_optimization_test_function_uclk_pstate;
+ l->uclk_pstate_phase.optimize_function = dml2_top_optimization_optimize_function_uclk_pstate;
+ l->uclk_pstate_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->uclk_pstate_phase.all_or_nothing = true;
+
+ uclk_pstate_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->uclk_pstate_phase);
+
+ if (uclk_pstate_success) {
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ l->base_display_config_with_meta.stage3.success = true;
+ }
+
+ /*
+ * Phase 4: Optimize for Vmin
+ */
+ memset(&l->vmin_phase, 0, sizeof(struct optimization_phase_params));
+ l->vmin_phase.dml = dml;
+ l->vmin_phase.display_config = &l->base_display_config_with_meta;
+ l->vmin_phase.init_function = dml2_top_optimization_init_function_vmin;
+ l->vmin_phase.test_function = dml2_top_optimization_test_function_vmin;
+ l->vmin_phase.optimize_function = dml2_top_optimization_optimize_function_vmin;
+ l->vmin_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->vmin_phase.all_or_nothing = false;
+
+ vmin_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->vmin_phase);
+
+ if (l->optimized_display_config_with_meta.stage4.performed) {
+ /*
+ * when performed is true, optimization has applied to
+ * optimized_display_config_with_meta and it has passed mode
+ * support. However it may or may not pass the test function to
+ * reach actual Vmin. As long as voltage is optimized even if it
+ * doesn't reach Vmin level, there is still power benefit so in
+ * this case we will still copy this optimization into base
+ * display config.
+ */
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ l->base_display_config_with_meta.stage4.success = vmin_success;
+ }
+
+ /*
+ * Phase 5: Optimize for Stutter
+ */
+ memset(&l->stutter_phase, 0, sizeof(struct optimization_phase_params));
+ l->stutter_phase.dml = dml;
+ l->stutter_phase.display_config = &l->base_display_config_with_meta;
+ l->stutter_phase.init_function = dml2_top_optimization_init_function_stutter;
+ l->stutter_phase.test_function = dml2_top_optimization_test_function_stutter;
+ l->stutter_phase.optimize_function = dml2_top_optimization_optimize_function_stutter;
+ l->stutter_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->stutter_phase.all_or_nothing = true;
+
+ stutter_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->stutter_phase);
+
+ if (stutter_success) {
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ l->base_display_config_with_meta.stage5.success = true;
+ }
+
+ /*
+ * Populate mcache programming
+ */
+ for (i = 0; i < in_out->display_config->num_planes; i++) {
+ in_out->programming->plane_programming[i].mcache_allocation = l->base_display_config_with_meta.stage2.mcache_allocations[i];
+ }
+
+ /*
+ * Call DPMM to map all requirements to minimum clock state
+ */
+ if (result) {
+ l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
+ l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_mode_params.programming = in_out->programming;
+ l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
+ l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
+ result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
+ if (!result)
+ in_out->programming->informative.failed_dpmm = true;
+ }
+
+ if (result) {
+ l->mode_programming_params.instance = &dml->core_instance;
+ l->mode_programming_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_programming_params.cfg_support_info = &l->base_display_config_with_meta.mode_support_result.cfg_support_info;
+ l->mode_programming_params.programming = in_out->programming;
+ result = dml->core_instance.mode_programming(&l->mode_programming_params);
+ if (!result)
+ in_out->programming->informative.failed_mode_programming = true;
+ }
+
+ if (result) {
+ l->dppm_map_watermarks_params.core = &dml->core_instance;
+ l->dppm_map_watermarks_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_watermarks_params.programming = in_out->programming;
+ result = dml->dpmm_instance.map_watermarks(&l->dppm_map_watermarks_params);
+ }
+
+ l->informative_params.instance = &dml->core_instance;
+ l->informative_params.programming = in_out->programming;
+ l->informative_params.mode_is_supported = result;
+
+ dml->core_instance.populate_informative(&l->informative_params);
+
+ return result;
+}
+
+bool dml2_top_soc15_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params)
+{
+ bool success = true;
+ int config_index, pipe_index;
+ int first_offset, second_offset;
+ int free_per_plane_reg_index = 0;
+
+ memset(params->per_plane_pipe_mcache_regs, 0, DML2_MAX_PLANES * DML2_MAX_DCN_PIPES * sizeof(struct dml2_hubp_pipe_mcache_regs *));
+
+ for (config_index = 0; config_index < params->num_configurations; config_index++) {
+ for (pipe_index = 0; pipe_index < params->mcache_configurations[config_index].num_pipes; pipe_index++) {
+ // Allocate storage for the mcache regs
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index] = &params->mcache_regs_set[free_per_plane_reg_index++];
+
+ reset_mcache_allocations(params->per_plane_pipe_mcache_regs[config_index][pipe_index]);
+
+ if (params->mcache_configurations[config_index].plane_descriptor->surface.dcc.enable) {
+ // P0 always enabled
+ if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0,
+ params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane0,
+ 0,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start +
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_width - 1,
+ &first_offset, &second_offset)) {
+ success = false;
+ break;
+ }
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[first_offset];
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[first_offset];
+
+ if (second_offset >= 0) {
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
+ }
+
+ // Populate P1 if enabled
+ if (params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1_enabled) {
+ if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1,
+ params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane1,
+ 0,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start +
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_width - 1,
+ &first_offset, &second_offset)) {
+ success = false;
+ break;
+ }
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[first_offset];
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[first_offset];
+
+ if (second_offset >= 0) {
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
+ }
+ }
+ }
+ }
+ }
+
+ return success;
+}
+
+static const struct dml2_top_funcs soc15_funcs = {
+ .check_mode_supported = dml2_top_soc15_check_mode_supported,
+ .build_mode_programming = dml2_top_soc15_build_mode_programming,
+ .build_mcache_programming = dml2_top_soc15_build_mcache_programming,
+};
+
+bool dml2_top_soc15_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
+ struct dml2_initialize_instance_locals *l = &dml->scratch.initialize_instance_locals;
+ struct dml2_core_initialize_in_out core_init_params = { 0 };
+ struct dml2_mcg_build_min_clock_table_params_in_out mcg_build_min_clk_params = { 0 };
+ struct dml2_pmo_initialize_in_out pmo_init_params = { 0 };
+ bool result = false;
+
+ memset(l, 0, sizeof(struct dml2_initialize_instance_locals));
+ memset(dml, 0, sizeof(struct dml2_instance));
+
+ memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
+ memcpy(&dml->soc_bbox, &in_out->soc_bb, sizeof(struct dml2_soc_bb));
+
+ dml->project_id = in_out->options.project_id;
+ dml->pmo_options = in_out->options.pmo_options;
+
+ // Initialize All Components
+ result = dml2_mcg_create(in_out->options.project_id, &dml->mcg_instance);
+
+ if (result)
+ result = dml2_dpmm_create(in_out->options.project_id, &dml->dpmm_instance);
+
+ if (result)
+ result = dml2_core_create(in_out->options.project_id, &dml->core_instance);
+
+ if (result) {
+ mcg_build_min_clk_params.soc_bb = &in_out->soc_bb;
+ mcg_build_min_clk_params.min_clk_table = &dml->min_clk_table;
+ result = dml->mcg_instance.build_min_clock_table(&mcg_build_min_clk_params);
+ }
+
+ if (result) {
+ core_init_params.project_id = in_out->options.project_id;
+ core_init_params.instance = &dml->core_instance;
+ core_init_params.minimum_clock_table = &dml->min_clk_table;
+ core_init_params.explicit_ip_bb = in_out->overrides.explicit_ip_bb;
+ core_init_params.explicit_ip_bb_size = in_out->overrides.explicit_ip_bb_size;
+ core_init_params.ip_caps = &in_out->ip_caps;
+ core_init_params.soc_bb = &in_out->soc_bb;
+ result = dml->core_instance.initialize(&core_init_params);
+
+ if (core_init_params.explicit_ip_bb && core_init_params.explicit_ip_bb_size > 0) {
+ memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
+ }
+ }
+
+ if (result)
+ result = dml2_pmo_create(in_out->options.project_id, &dml->pmo_instance);
+
+ if (result) {
+ pmo_init_params.instance = &dml->pmo_instance;
+ pmo_init_params.soc_bb = &dml->soc_bbox;
+ pmo_init_params.ip_caps = &dml->ip_caps;
+ pmo_init_params.mcg_clock_table_size = dml->min_clk_table.dram_bw_table.num_entries;
+ pmo_init_params.options = &dml->pmo_options;
+ dml->pmo_instance.initialize(&pmo_init_params);
+ }
+ dml->funcs = soc15_funcs;
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
index 7b1f6f7143d0..53bd8602f9ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
@@ -2,22 +2,13 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#ifndef __DML_TOP_MCACHE_H__
-#define __DML_TOP_MCACHE_H__
-
-#include "dml2_external_lib_deps.h"
-#include "dml_top_display_cfg_types.h"
-#include "dml_top_types.h"
+#ifndef __DML2_TOP_SOC15_H__
+#define __DML2_TOP_SOC15_H__
#include "dml2_internal_shared_types.h"
+bool dml2_top_soc15_initialize_instance(struct dml2_initialize_instance_in_out *in_out);
bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params);
-
void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params);
-
bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params);
-
-bool dml2_top_mcache_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params);
-
-bool dml2_top_mcache_unit_test(void);
-
-#endif
+bool dml2_top_soc15_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params);
+#endif /* __DML2_TOP_SOC15_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
deleted file mode 100644
index a342ebfbe4e7..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
+++ /dev/null
@@ -1,549 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_debug.h"
-
-#include "dml_top_mcache.h"
-#include "lib_float_math.h"
-
-#include "dml2_internal_shared_types.h"
-
-/*
-* Takes an input set of mcache boundaries and finds the appropriate setting of cache programming.
-* Returns true if a valid set of programming can be made, and false otherwise. "Valid" means
-* that the horizontal viewport does not span more than 2 cache slices.
-*
-* It optionally also can apply a constant shift to all the cache boundaries.
-*/
-static const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
-static const uint32_t SPLIT_LOCATION_UNDEFINED = 0xFFFF;
-
-static bool calculate_first_second_splitting(const int *mcache_boundaries, int num_boundaries, int shift,
- int pipe_h_vp_start, int pipe_h_vp_end, int *first_offset, int *second_offset)
-{
- const int MAX_VP = 0xFFFFFF;
- int left_cache_id;
- int right_cache_id;
- int range_start;
- int range_end;
- bool success = false;
-
- if (num_boundaries <= 1) {
- if (first_offset && second_offset) {
- *first_offset = 0;
- *second_offset = -1;
- }
- success = true;
- return success;
- } else {
- range_start = 0;
- for (left_cache_id = 0; left_cache_id < num_boundaries; left_cache_id++) {
- range_end = mcache_boundaries[left_cache_id] - shift - 1;
-
- if (range_start <= pipe_h_vp_start && pipe_h_vp_start <= range_end)
- break;
-
- range_start = range_end + 1;
- }
-
- range_end = MAX_VP;
- for (right_cache_id = num_boundaries - 1; right_cache_id >= -1; right_cache_id--) {
- if (right_cache_id >= 0)
- range_start = mcache_boundaries[right_cache_id] - shift;
- else
- range_start = 0;
-
- if (range_start <= pipe_h_vp_end && pipe_h_vp_end <= range_end) {
- break;
- }
- range_end = range_start - 1;
- }
- right_cache_id = (right_cache_id + 1) % num_boundaries;
-
- if (right_cache_id == left_cache_id) {
- if (first_offset && second_offset) {
- *first_offset = left_cache_id;
- *second_offset = -1;
- }
- success = true;
- } else if (right_cache_id == (left_cache_id + 1) % num_boundaries) {
- if (first_offset && second_offset) {
- *first_offset = left_cache_id;
- *second_offset = right_cache_id;
- }
- success = true;
- }
- }
-
- return success;
-}
-
-/*
-* For a given set of pipe start/end x positions, checks to see it can support the input mcache splitting.
-* It also attempts to "optimize" by finding a shift if the default 0 shift does not work.
-*/
-static bool find_shift_for_valid_cache_id_assignment(int *mcache_boundaries, unsigned int num_boundaries,
- int *pipe_vp_startx, int *pipe_vp_endx, unsigned int pipe_count, int shift_granularity, int *shift)
-{
- int max_shift = 0xFFFF;
- unsigned int pipe_index;
- unsigned int i, slice_width;
- bool success = false;
-
- for (i = 0; i < num_boundaries; i++) {
- if (i == 0)
- slice_width = mcache_boundaries[i];
- else
- slice_width = mcache_boundaries[i] - mcache_boundaries[i - 1];
-
- if (max_shift > (int)slice_width) {
- max_shift = slice_width;
- }
- }
-
- for (*shift = 0; *shift <= max_shift; *shift += shift_granularity) {
- success = true;
- for (pipe_index = 0; pipe_index < pipe_count; pipe_index++) {
- if (!calculate_first_second_splitting(mcache_boundaries, num_boundaries, *shift,
- pipe_vp_startx[pipe_index], pipe_vp_endx[pipe_index], 0, 0)) {
- success = false;
- break;
- }
- }
- if (success)
- break;
- }
-
- return success;
-}
-
-/*
-* Counts the number of elements inside input array within the given span length.
-* Formally, what is the size of the largest subset of the array where the largest and smallest element
-* differ no more than the span.
-*/
-static unsigned int count_elements_in_span(int *array, unsigned int array_size, unsigned int span)
-{
- unsigned int i;
- unsigned int span_start_value;
- unsigned int span_start_index;
- unsigned int greatest_element_count;
-
- if (array_size == 0)
- return 1;
-
- if (span == 0)
- return array_size > 0 ? 1 : 0;
-
- span_start_value = 0;
- span_start_index = 0;
- greatest_element_count = 0;
-
- while (span_start_index < array_size) {
- for (i = span_start_index; i < array_size; i++) {
- if (array[i] - span_start_value <= span) {
- if (i - span_start_index + 1 > greatest_element_count) {
- greatest_element_count = i - span_start_index + 1;
- }
- } else
- break;
- }
-
- span_start_index++;
-
- if (span_start_index < array_size) {
- span_start_value = array[span_start_index - 1] + 1;
- }
- }
-
- return greatest_element_count;
-}
-
-static bool calculate_h_split_for_scaling_transform(int full_vp_width, int h_active, int num_pipes,
- enum dml2_scaling_transform scaling_transform, int *pipe_vp_x_start, int *pipe_vp_x_end)
-{
- int i, slice_width;
- const char MAX_SCL_VP_OVERLAP = 3;
- bool success = false;
-
- switch (scaling_transform) {
- case dml2_scaling_transform_centered:
- case dml2_scaling_transform_aspect_ratio:
- case dml2_scaling_transform_fullscreen:
- slice_width = full_vp_width / num_pipes;
- for (i = 0; i < num_pipes; i++) {
- pipe_vp_x_start[i] = i * slice_width;
- pipe_vp_x_end[i] = (i + 1) * slice_width - 1;
-
- if (pipe_vp_x_start[i] < MAX_SCL_VP_OVERLAP)
- pipe_vp_x_start[i] = 0;
- else
- pipe_vp_x_start[i] -= MAX_SCL_VP_OVERLAP;
-
- if (pipe_vp_x_end[i] > full_vp_width - MAX_SCL_VP_OVERLAP - 1)
- pipe_vp_x_end[i] = full_vp_width - 1;
- else
- pipe_vp_x_end[i] += MAX_SCL_VP_OVERLAP;
- }
- break;
- case dml2_scaling_transform_explicit:
- default:
- success = false;
- break;
- }
-
- return success;
-}
-
-bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params)
-{
- struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
- struct dml2_top_mcache_validate_admissability_locals *l = &dml->scratch.mcache_validate_admissability_locals;
-
- const int MAX_PIXEL_OVERLAP = 6;
- int max_per_pipe_vp_p0 = 0;
- int max_per_pipe_vp_p1 = 0;
- int temp, p0shift, p1shift;
- unsigned int plane_index = 0;
- unsigned int i;
- unsigned int odm_combine_factor;
- unsigned int mpc_combine_factor;
- unsigned int num_dpps;
- unsigned int num_boundaries;
- enum dml2_scaling_transform scaling_transform;
- const struct dml2_plane_parameters *plane;
- const struct dml2_stream_parameters *stream;
-
- bool p0pass = false;
- bool p1pass = false;
- bool all_pass = true;
-
- for (plane_index = 0; plane_index < params->display_cfg->num_planes; plane_index++) {
- if (!params->display_cfg->plane_descriptors[plane_index].surface.dcc.enable)
- continue;
-
- plane = &params->display_cfg->plane_descriptors[plane_index];
- stream = &params->display_cfg->stream_descriptors[plane->stream_index];
-
- num_dpps = odm_combine_factor = params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
-
- if (odm_combine_factor == 1)
- num_dpps = mpc_combine_factor = (unsigned int)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
- else
- mpc_combine_factor = 1;
-
- if (odm_combine_factor > 1) {
- max_per_pipe_vp_p0 = plane->surface.plane0.width;
- temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane0.h_ratio * stream->timing.h_active / odm_combine_factor);
-
- if (temp < max_per_pipe_vp_p0)
- max_per_pipe_vp_p0 = temp;
-
- max_per_pipe_vp_p1 = plane->surface.plane1.width;
- temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane1.h_ratio * stream->timing.h_active / odm_combine_factor);
-
- if (temp < max_per_pipe_vp_p1)
- max_per_pipe_vp_p1 = temp;
- } else {
- max_per_pipe_vp_p0 = plane->surface.plane0.width / mpc_combine_factor;
- max_per_pipe_vp_p1 = plane->surface.plane1.width / mpc_combine_factor;
- }
-
- max_per_pipe_vp_p0 += 2 * MAX_PIXEL_OVERLAP;
- max_per_pipe_vp_p1 += MAX_PIXEL_OVERLAP;
-
- p0shift = 0;
- p1shift = 0;
-
- // The last element in the unshifted boundary array will always be the first pixel outside the
- // plane, which means theres no mcache associated with it, so -1
- num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1;
- if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
- num_boundaries, max_per_pipe_vp_p0) <= 1) && (num_boundaries <= num_dpps)) {
- p0pass = true;
- }
- num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1;
- if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
- num_boundaries, max_per_pipe_vp_p1) <= 1) && (num_boundaries <= num_dpps)) {
- p1pass = true;
- }
-
- if (!p0pass || !p1pass) {
- if (odm_combine_factor > 1) {
- num_dpps = odm_combine_factor;
- scaling_transform = plane->composition.scaling_transform;
- } else {
- num_dpps = mpc_combine_factor;
- scaling_transform = dml2_scaling_transform_fullscreen;
- }
-
- if (!p0pass) {
- if (plane->composition.viewport.stationary) {
- calculate_h_split_for_scaling_transform(plane->surface.plane0.width,
- stream->timing.h_active, num_dpps, scaling_transform,
- &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
- p0pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
- params->mcache_allocations[plane_index].num_mcaches_plane0,
- &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index], num_dpps,
- params->mcache_allocations[plane_index].shift_granularity.p0, &p0shift);
- }
- }
- if (!p1pass) {
- if (plane->composition.viewport.stationary) {
- calculate_h_split_for_scaling_transform(plane->surface.plane1.width,
- stream->timing.h_active, num_dpps, scaling_transform,
- &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
- p1pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
- params->mcache_allocations[plane_index].num_mcaches_plane1,
- &l->plane1.pipe_vp_startx[plane_index], &l->plane1.pipe_vp_endx[plane_index], num_dpps,
- params->mcache_allocations[plane_index].shift_granularity.p1, &p1shift);
- }
- }
- }
-
- if (p0pass && p1pass) {
- for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane0; i++) {
- params->mcache_allocations[plane_index].mcache_x_offsets_plane0[i] -= p0shift;
- }
- for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane1; i++) {
- params->mcache_allocations[plane_index].mcache_x_offsets_plane1[i] -= p1shift;
- }
- }
-
- params->per_plane_status[plane_index] = p0pass && p1pass;
- all_pass &= p0pass && p1pass;
- }
-
- return all_pass;
-}
-
-static void reset_mcache_allocations(struct dml2_hubp_pipe_mcache_regs *per_plane_pipe_mcache_regs)
-{
- // Initialize all entries to special valid MCache ID and special valid split coordinate
- per_plane_pipe_mcache_regs->main.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p0.split_location = SPLIT_LOCATION_UNDEFINED;
-
- per_plane_pipe_mcache_regs->mall.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p0.split_location = SPLIT_LOCATION_UNDEFINED;
-
- per_plane_pipe_mcache_regs->main.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p1.split_location = SPLIT_LOCATION_UNDEFINED;
-
- per_plane_pipe_mcache_regs->mall.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p1.split_location = SPLIT_LOCATION_UNDEFINED;
-}
-
-bool dml2_top_mcache_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params)
-{
- bool success = true;
- int config_index, pipe_index;
- int first_offset, second_offset;
- int free_per_plane_reg_index = 0;
-
- memset(params->per_plane_pipe_mcache_regs, 0, DML2_MAX_PLANES * DML2_MAX_DCN_PIPES * sizeof(struct dml2_hubp_pipe_mcache_regs *));
-
- for (config_index = 0; config_index < params->num_configurations; config_index++) {
- for (pipe_index = 0; pipe_index < params->mcache_configurations[config_index].num_pipes; pipe_index++) {
- // Allocate storage for the mcache regs
- params->per_plane_pipe_mcache_regs[config_index][pipe_index] = &params->mcache_regs_set[free_per_plane_reg_index++];
-
- reset_mcache_allocations(params->per_plane_pipe_mcache_regs[config_index][pipe_index]);
-
- if (params->mcache_configurations[config_index].plane_descriptor->surface.dcc.enable) {
- // P0 always enabled
- if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0,
- params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane0,
- 0,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start +
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_width - 1,
- &first_offset, &second_offset)) {
- success = false;
- break;
- }
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[first_offset];
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[first_offset];
-
- if (second_offset >= 0) {
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
- }
-
- // Populate P1 if enabled
- if (params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1_enabled) {
- if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1,
- params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane1,
- 0,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start +
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_width - 1,
- &first_offset, &second_offset)) {
- success = false;
- break;
- }
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[first_offset];
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[first_offset];
-
- if (second_offset >= 0) {
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
- }
- }
- }
- }
- }
-
- return success;
-}
-
-void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params)
-{
- int i;
- unsigned int j;
- int next_unused_cache_id = 0;
-
- for (i = 0; i < params->num_allocations; i++) {
- if (!params->allocations[i].valid)
- continue;
-
- for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
- params->allocations[i].global_mcache_ids_plane0[j] = next_unused_cache_id++;
- }
- for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
- params->allocations[i].global_mcache_ids_plane1[j] = next_unused_cache_id++;
- }
-
- // The "psuedo-last" slice is always wrapped around
- params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0] =
- params->allocations[i].global_mcache_ids_plane0[0];
- params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1] =
- params->allocations[i].global_mcache_ids_plane1[0];
-
- // If we need dedicated caches for mall requesting, then we assign them here.
- if (params->allocations[i].requires_dedicated_mall_mcache) {
- for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
- params->allocations[i].global_mcache_ids_mall_plane0[j] = next_unused_cache_id++;
- }
- for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
- params->allocations[i].global_mcache_ids_mall_plane1[j] = next_unused_cache_id++;
- }
-
- // The "psuedo-last" slice is always wrapped around
- params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0] =
- params->allocations[i].global_mcache_ids_mall_plane0[0];
- params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1] =
- params->allocations[i].global_mcache_ids_mall_plane1[0];
- }
-
- // If P0 and P1 are sharing caches, then it means the largest mcache IDs for p0 and p1 can be the same
- // since mcache IDs are always ascending, then it means the largest mcacheID of p1 should be the
- // largest mcacheID of P0
- if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
- params->allocations[i].last_slice_sharing.plane0_plane1) {
- params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
- params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
- }
-
- // If we need dedicated caches handle last slice sharing
- if (params->allocations[i].requires_dedicated_mall_mcache) {
- if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
- params->allocations[i].last_slice_sharing.plane0_plane1) {
- params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
- params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1];
- }
- // If mall_comb_mcache_l is set then it means that largest mcache ID for MALL p0 can be same as regular read p0
- if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p0) {
- params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1] =
- params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
- }
- // If mall_comb_mcache_c is set then it means that largest mcache ID for MALL p1 can be same as regular
- // read p1 (which can be same as regular read p0 if plane0_plane1 is also set)
- if (params->allocations[i].num_mcaches_plane1 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p1) {
- params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
- params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1];
- }
- }
-
- // If you don't need dedicated mall mcaches, the mall mcache assignments are identical to the normal requesting
- if (!params->allocations[i].requires_dedicated_mall_mcache) {
- memcpy(params->allocations[i].global_mcache_ids_mall_plane0, params->allocations[i].global_mcache_ids_plane0,
- sizeof(params->allocations[i].global_mcache_ids_mall_plane0));
- memcpy(params->allocations[i].global_mcache_ids_mall_plane1, params->allocations[i].global_mcache_ids_plane1,
- sizeof(params->allocations[i].global_mcache_ids_mall_plane1));
- }
- }
-}
-
-bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params)
-{
- struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
- struct dml2_top_mcache_verify_mcache_size_locals *l = &dml->scratch.mcache_verify_mcache_size_locals;
-
- unsigned int total_mcaches_required;
- unsigned int i;
- bool result = false;
-
- if (dml->soc_bbox.num_dcc_mcaches == 0) {
- return true;
- }
-
- total_mcaches_required = 0;
- l->calc_mcache_params.instance = &dml->core_instance;
- for (i = 0; i < params->display_config->num_planes; i++) {
- if (!params->display_config->plane_descriptors[i].surface.dcc.enable) {
- memset(&params->mcache_allocations[i], 0, sizeof(struct dml2_mcache_surface_allocation));
- continue;
- }
-
- l->calc_mcache_params.plane_descriptor = &params->display_config->plane_descriptors[i];
- l->calc_mcache_params.mcache_allocation = &params->mcache_allocations[i];
- l->calc_mcache_params.plane_index = i;
-
- if (!dml->core_instance.calculate_mcache_allocation(&l->calc_mcache_params)) {
- result = false;
- break;
- }
-
- if (params->mcache_allocations[i].valid) {
- total_mcaches_required += params->mcache_allocations[i].num_mcaches_plane0 + params->mcache_allocations[i].num_mcaches_plane1;
- if (params->mcache_allocations[i].last_slice_sharing.plane0_plane1)
- total_mcaches_required--;
- }
- }
- dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
-
- if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
- result = false;
- } else {
- result = true;
- }
-
- return result;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
index e9b8e10695ae..f95c7ff56f15 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
@@ -4,6 +4,11 @@
#include "dml2_debug.h"
+int dml2_log_internal(const char *format, ...)
+{
+ return 0;
+}
+
int dml2_printf(const char *format, ...)
{
#ifdef _DEBUG
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index d51a1b6c62f2..a27792b56f7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -8,9 +8,53 @@
#ifdef _DEBUG
#define DML2_ASSERT(condition) dml2_assert(condition)
#else
-#define DML2_ASSERT(condition)
+#define DML2_ASSERT(condition) ((void)0)
+#endif
+/*
+ * DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart.
+ * DML_LOG_ERROR - unexpected but recoverable failures inside DML
+ * DML_LOG_WARN - unexpected inputs or events to DML
+ * DML_LOG_INFO - high level tracing of DML interfaces
+ * DML_LOG_DEBUG - detailed tracing of DML internal components
+ * DML_LOG_VERBOSE - detailed tracing of DML calculation procedure
+ */
+#if !defined(DML_LOG_LEVEL)
+#if defined(_DEBUG) && defined(_DEBUG_PRINTS)
+/* for backward compatibility with old macros */
+#define DML_LOG_LEVEL 5
+#else
+#define DML_LOG_LEVEL 0
+#endif
+#endif
+
+#define DML_LOG_FATAL(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= 1
+#define DML_LOG_ERROR(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_ERROR(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 2
+#define DML_LOG_WARN(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_WARN(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 3
+#define DML_LOG_INFO(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_INFO(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 4
+#define DML_LOG_DEBUG(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_DEBUG(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 5
+#define DML_LOG_VERBOSE(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
#endif
+int dml2_log_internal(const char *format, ...);
int dml2_printf(const char *format, ...);
void dml2_assert(int condition);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index aeac9f159fa5..7fb6026bcb49 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -8,7 +8,6 @@
#include "dml2_external_lib_deps.h"
#include "dml_top_types.h"
#include "dml2_core_shared_types.h"
-
/*
* DML2 MCG Types and Interfaces
*/
@@ -63,7 +62,6 @@ struct dml2_mcg_build_min_clock_table_params_in_out {
*/
struct dml2_mcg_min_clock_table *min_clk_table;
};
-
struct dml2_mcg_instance {
bool (*build_min_clock_table)(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool (*unit_test)(void);
@@ -81,7 +79,6 @@ struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out {
struct dml2_soc_bb *soc_bb;
struct dml2_mcg_min_clock_table *min_clk_table;
const struct display_configuation_with_meta *display_cfg;
-
struct {
bool perform_pseudo_map;
struct dml2_core_internal_soc_bb *soc_bb;
@@ -309,7 +306,7 @@ struct dml2_optimization_stage3_state {
// The pstate support mode for each plane
// The number of valid elements == display_cfg.num_planes
// The indexing of pstate_switch_modes matches plane_descriptors[]
- enum dml2_uclk_pstate_support_method pstate_switch_modes[DML2_MAX_PLANES];
+ enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
// Meta-data for implicit SVP generation, indexed by stream index
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
@@ -356,6 +353,10 @@ struct display_configuation_with_meta {
struct dml2_optimization_stage5_state stage5;
};
+struct dml2_pmo_pstate_strategy {
+ enum dml2_pstate_method per_stream_pstate_method[DML2_MAX_PLANES];
+ bool allow_state_increase;
+};
struct dml2_core_mode_support_in_out {
/*
* Inputs
@@ -365,7 +366,6 @@ struct dml2_core_mode_support_in_out {
struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
-
/*
* Outputs
*/
@@ -395,7 +395,6 @@ struct dml2_core_mode_programming_in_out {
struct dml2_core_instance *instance;
const struct display_configuation_with_meta *display_cfg;
const struct core_display_cfg_support_info *cfg_support_info;
-
/*
* Outputs (also Input the clk freq are also from programming struct)
*/
@@ -445,6 +444,7 @@ struct dml2_core_internal_state_intermediates {
struct dml2_core_mode_support_locals {
struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
struct dml2_display_cfg svp_expanded_display_cfg;
+ struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params;
};
struct dml2_core_mode_programming_locals {
@@ -600,34 +600,11 @@ struct dml2_pmo_optimize_for_stutter_in_out {
struct display_configuation_with_meta *optimized_display_config;
};
-enum dml2_pmo_pstate_method {
- dml2_pmo_pstate_strategy_na = 0,
- /* hw exclusive modes */
- dml2_pmo_pstate_strategy_vactive = 1,
- dml2_pmo_pstate_strategy_vblank = 2,
- dml2_pmo_pstate_strategy_reserved_hw = 5,
- /* fw assisted exclusive modes */
- dml2_pmo_pstate_strategy_fw_svp = 6,
- dml2_pmo_pstate_strategy_reserved_fw = 10,
- /* fw assisted modes requiring drr modulation */
- dml2_pmo_pstate_strategy_fw_vactive_drr = 11,
- dml2_pmo_pstate_strategy_fw_vblank_drr = 12,
- dml2_pmo_pstate_strategy_fw_svp_drr = 13,
- dml2_pmo_pstate_strategy_reserved_fw_drr_clamped = 20,
- dml2_pmo_pstate_strategy_fw_drr = 21,
- dml2_pmo_pstate_strategy_reserved_fw_drr_var = 22,
-};
-
-struct dml2_pmo_pstate_strategy {
- enum dml2_pmo_pstate_method per_stream_pstate_method[DML2_MAX_PLANES];
- bool allow_state_increase;
-};
-
-#define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw - dml2_pmo_pstate_strategy_na + 1)) - 1) << dml2_pmo_pstate_strategy_na)
-#define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
-#define PMO_DRR_CLAMPED_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_clamped - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
-#define PMO_DRR_VAR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_drr)
-#define PMO_FW_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_svp + 1)) - 1) << dml2_pmo_pstate_strategy_fw_svp)
+#define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw - dml2_pstate_method_na + 1)) - 1) << dml2_pstate_method_na)
+#define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_var - dml2_pstate_method_fw_vactive_drr + 1)) - 1) << dml2_pstate_method_fw_vactive_drr)
+#define PMO_DRR_CLAMPED_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_clamped - dml2_pstate_method_fw_vactive_drr + 1)) - 1) << dml2_pstate_method_fw_vactive_drr)
+#define PMO_DRR_VAR_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_var - dml2_pstate_method_fw_drr + 1)) - 1) << dml2_pstate_method_fw_drr)
+#define PMO_FW_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_var - dml2_pstate_method_fw_svp + 1)) - 1) << dml2_pstate_method_fw_svp)
#define PMO_DCN4_MAX_DISPLAYS 4
#define PMO_DCN4_MAX_NUM_VARIANTS 2
@@ -645,6 +622,8 @@ struct dml2_pmo_scratch {
int stream_mask;
} pmo_dcn3;
struct {
+ struct dml2_pmo_pstate_strategy expanded_override_strategy_list[2 * 2 * 2 * 2];
+ unsigned int num_expanded_override_strategies;
struct dml2_pmo_pstate_strategy pstate_strategy_candidates[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
int num_pstate_candidates;
int cur_pstate_candidate;
@@ -706,7 +685,6 @@ struct dml2_pmo_instance {
int mpc_combine_limit;
int odm_combine_limit;
int mcg_clock_table_size;
-
union {
struct {
struct {
@@ -963,7 +941,13 @@ struct dml2_top_mcache_validate_admissability_locals {
struct dml2_top_display_cfg_support_info {
const struct dml2_display_cfg *display_config;
struct core_display_cfg_support_info core_info;
- enum dml2_pstate_support_method per_plane_pstate_method[DML2_MAX_PLANES];
+};
+
+struct dml2_top_funcs {
+ bool (*check_mode_supported)(struct dml2_check_mode_supported_in_out *in_out);
+ bool (*build_mode_programming)(struct dml2_build_mode_programming_in_out *in_out);
+ bool (*build_mcache_programming)(struct dml2_build_mcache_programming_in_out *in_out);
+ bool (*unit_test)(void);
};
struct dml2_instance {
@@ -978,8 +962,8 @@ struct dml2_instance {
struct dml2_ip_capabilities ip_caps;
struct dml2_mcg_min_clock_table min_clk_table;
-
struct dml2_pmo_options pmo_options;
+ struct dml2_top_funcs funcs;
struct {
struct dml2_initialize_instance_locals initialize_instance_locals;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 3d29169dd6bb..6b3b8803e0ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -813,7 +813,7 @@ static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struc
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index bde4250853b1..b8a34abaf519 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -553,13 +553,53 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
}
- dml2_policy_build_synthetic_soc_states(s, p);
- if (dml2->v20.dml_core_ctx.project == dml_project_dcn35) {
- // Override last out_state with data from last in_state
- // This will ensure that out_state contains max fclk
- memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
- &p->in_states->state_array[p->in_states->num_states - 1],
- sizeof(struct soc_state_bounding_box_st));
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0,
+ max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0, max_socclk_mhz = 0;
+
+ for (i = 0; i < p->in_states->num_states; i++) {
+ if (p->in_states->state_array[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = (int)p->in_states->state_array[i].dcfclk_mhz;
+ if (p->in_states->state_array[i].fabricclk_mhz > max_fclk_mhz)
+ max_fclk_mhz = (int)p->in_states->state_array[i].fabricclk_mhz;
+ if (p->in_states->state_array[i].socclk_mhz > max_socclk_mhz)
+ max_socclk_mhz = (int)p->in_states->state_array[i].socclk_mhz;
+ if (p->in_states->state_array[i].dram_speed_mts > max_uclk_mhz)
+ max_uclk_mhz = (int)p->in_states->state_array[i].dram_speed_mts;
+ if (p->in_states->state_array[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = (int)p->in_states->state_array[i].dispclk_mhz;
+ if (p->in_states->state_array[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = (int)p->in_states->state_array[i].dppclk_mhz;
+ if (p->in_states->state_array[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = (int)p->in_states->state_array[i].phyclk_mhz;
+ if (p->in_states->state_array[i].dtbclk_mhz > max_dtbclk_mhz)
+ max_dtbclk_mhz = (int)p->in_states->state_array[i].dtbclk_mhz;
+ }
+
+ for (i = 0; i < p->in_states->num_states; i++) {
+ /* Independent states - including base (unlisted) parameters from state 0. */
+ p->out_states->state_array[i] = p->in_states->state_array[0];
+
+ p->out_states->state_array[i].dispclk_mhz = max_dispclk_mhz;
+ p->out_states->state_array[i].dppclk_mhz = max_dppclk_mhz;
+ p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
+ p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
+
+ p->out_states->state_array[i].dscclk_mhz = max_dispclk_mhz / 3.0;
+ p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
+ p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
+
+ /* Dependent states. */
+ p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[i].dram_speed_mts;
+ p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz;
+ p->out_states->state_array[i].socclk_mhz = p->in_states->state_array[i].socclk_mhz;
+ p->out_states->state_array[i].dcfclk_mhz = p->in_states->state_array[i].dcfclk_mhz;
+ }
+
+ p->out_states->num_states = p->in_states->num_states;
+ } else {
+ dml2_policy_build_synthetic_soc_states(s, p);
}
}
@@ -746,7 +786,7 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
out->OutputEncoder[location] = dml_dp;
- if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
+ if (location < MAX_HPO_DP2_ENCODERS && dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
break;
case SIGNAL_TYPE_EDP:
@@ -1303,7 +1343,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_timings++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
@@ -1343,7 +1383,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
populate_dml_plane_cfg_from_plane_state(
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 9190c1328d5b..68b882d28195 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -531,14 +531,21 @@ static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct d
static bool call_dml_mode_support_and_programming(struct dc_state *context)
{
unsigned int result = 0;
- unsigned int min_state;
+ unsigned int min_state = 0;
int min_state_for_g6_temp_read = 0;
+
+
+ if (!context)
+ return false;
+
struct dml2_context *dml2 = context->bw_ctx.dml2;
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
- min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ if (!context->streams[0]->sink->link->dc->caps.is_apu) {
+ min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
- ASSERT(min_state_for_g6_temp_read >= 0);
+ ASSERT(min_state_for_g6_temp_read >= 0);
+ }
if (!dml2->config.use_native_pstate_optimization) {
result = optimize_pstate_with_svp_and_drr(dml2, context);
@@ -549,14 +556,20 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
* Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
*/
- if (min_state_for_g6_temp_read >= 0)
- min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
- else
- min_state = s->mode_support_params.out_lowest_state_idx;
-
- if (result)
- result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
+ if (!context->streams[0]->sink->link->dc->caps.is_apu) {
+ if (min_state_for_g6_temp_read >= 0)
+ min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
+ else
+ min_state = s->mode_support_params.out_lowest_state_idx;
+ }
+ if (result) {
+ if (!context->streams[0]->sink->link->dc->caps.is_apu) {
+ result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
+ } else {
+ result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
+ }
+ }
return result;
}
@@ -685,6 +698,8 @@ static bool dml2_validate_only(struct dc_state *context)
build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
map_dc_state_into_dml_display_cfg(dml2, context, &dml2->v20.scratch.cur_display_config);
+ if (!dml2->config.skip_hw_state_mapping)
+ dml2_apply_det_buffer_allocation_policy(dml2, &dml2->v20.scratch.cur_display_config);
result = pack_and_call_dml_mode_support_ex(dml2,
&dml2->v20.scratch.cur_display_config,
@@ -732,11 +747,10 @@ static inline struct dml2_context *dml2_allocate_memory(void)
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, dml2, config);
return;
- }
+ }
// Store config options
(*dml2)->config = *config;
@@ -771,10 +785,8 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01))
return dml21_create(in_dc, dml2, config);
- }
// Allocate Mode Lib Ctx
*dml2 = dml2_allocate_memory();
@@ -842,8 +854,7 @@ void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
dml21_reinit(in_dc, dml2, config);
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
index 377ef6d01ae5..00d22e542469 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
@@ -427,18 +427,6 @@ void dml_rq_dlg_get_dlg_reg(dml_display_dlg_regs_st *disp_dlg_regs,
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
- // hack for FPGA
- /* NOTE: We dont have getenv defined in driver and it does not make any sense in the driver */
- /*char* fpga_env = getenv("FPGA_FPDIV");
- if(fpga_env !=NULL)
- {
- if(disp_dlg_regs->vratio_prefetch >= (dml_uint_t)dml_pow(2, 22))
- {
- disp_dlg_regs->vratio_prefetch = (dml_uint_t)dml_pow(2, 22)-1;
- dml_print("FPGA msg: vratio_prefetch exceed the max value, the register field is [21:0]\n");
- }
- }*/
-
disp_dlg_regs->refcyc_per_vm_group_vblank = (dml_uint_t)(dml_get_refcyc_per_vm_group_vblank_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_group_flip = (dml_uint_t)(dml_get_refcyc_per_vm_group_flip_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_req_vblank = (dml_uint_t)(dml_get_refcyc_per_vm_req_vblank_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10));
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index e1da48b05d00..75fb77bca83b 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -194,6 +194,9 @@ void dpp_reset(struct dpp *dpp_base)
dpp->filter_h = NULL;
dpp->filter_v = NULL;
+ memset(&dpp_base->pos, 0, sizeof(dpp_base->pos));
+ memset(&dpp_base->att, 0, sizeof(dpp_base->att));
+
memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
}
@@ -480,10 +483,11 @@ void dpp1_set_cursor_position(
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
- REG_UPDATE(CURSOR0_CONTROL,
- CUR0_ENABLE, cur_en);
+ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ }
}
void dpp1_cnv_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 3b6ca7974e18..1236e0f9a256 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -154,9 +154,11 @@ void dpp401_set_cursor_position(
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
uint32_t cur_en = pos->enable ? 1 : 0;
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ }
}
void dpp401_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index d9aaebfa3a0a..11535922b5ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -30,6 +30,9 @@
#include "rc_calc.h"
#include "fixed31_32.h"
+#define DC_LOGGER \
+ dsc->ctx->logger
+
/* This module's internal functions */
/* default DSC policy target bitrate limit is 16bpp */
@@ -480,6 +483,48 @@ bool dc_dsc_compute_bandwidth_range(
return is_dsc_possible;
}
+void dc_dsc_dump_encoder_caps(const struct display_stream_compressor *dsc,
+ const struct dc_crtc_timing *timing)
+{
+ struct dsc_enc_caps dsc_enc_caps;
+
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+
+ DC_LOG_DSC("dsc encoder caps:");
+ DC_LOG_DSC("\tdsc_version 0x%x", dsc_enc_caps.dsc_version);
+ DC_LOG_DSC("\tslice_caps 0x%x", dsc_enc_caps.slice_caps.raw);
+ DC_LOG_DSC("\tlb_bit_depth %d", dsc_enc_caps.lb_bit_depth);
+ DC_LOG_DSC("\tis_block_pred_supported %d", dsc_enc_caps.is_block_pred_supported);
+ DC_LOG_DSC("\tcolor_formats 0x%x", dsc_enc_caps.color_formats.raw);
+ DC_LOG_DSC("\tcolor_depth 0x%x", dsc_enc_caps.color_depth.raw);
+ DC_LOG_DSC("\tmax_total_throughput_mps %d", dsc_enc_caps.max_total_throughput_mps);
+ DC_LOG_DSC("\tmax_slice_width %d", dsc_enc_caps.max_slice_width);
+ DC_LOG_DSC("\tbpp_increment_div %d", dsc_enc_caps.bpp_increment_div);
+}
+
+void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
+ const struct dsc_dec_dpcd_caps *dsc_sink_caps)
+{
+ DC_LOG_DSC("dsc decoder caps:");
+ DC_LOG_DSC("\tis_dsc_supported %d", dsc_sink_caps->is_dsc_supported);
+ DC_LOG_DSC("\tdsc_version 0x%x", dsc_sink_caps->dsc_version);
+ DC_LOG_DSC("\trc_buffer_size %d", dsc_sink_caps->rc_buffer_size);
+ DC_LOG_DSC("\tslice_caps1 0x%x", dsc_sink_caps->slice_caps1.raw);
+ DC_LOG_DSC("\tslice_caps2 0x%x", dsc_sink_caps->slice_caps2.raw);
+ DC_LOG_DSC("\tlb_bit_depth %d", dsc_sink_caps->lb_bit_depth);
+ DC_LOG_DSC("\tis_block_pred_supported %d", dsc_sink_caps->is_block_pred_supported);
+ DC_LOG_DSC("\tedp_max_bits_per_pixel %d", dsc_sink_caps->edp_max_bits_per_pixel);
+ DC_LOG_DSC("\tcolor_formats 0x%x", dsc_sink_caps->color_formats.raw);
+ DC_LOG_DSC("\tthroughput_mode_0_mps %d", dsc_sink_caps->throughput_mode_0_mps);
+ DC_LOG_DSC("\tthroughput_mode_1_mps %d", dsc_sink_caps->throughput_mode_1_mps);
+ DC_LOG_DSC("\tmax_slice_width %d", dsc_sink_caps->max_slice_width);
+ DC_LOG_DSC("\tbpp_increment_div %d", dsc_sink_caps->bpp_increment_div);
+ DC_LOG_DSC("\tbranch_overall_throughput_0_mps %d", dsc_sink_caps->branch_overall_throughput_0_mps);
+ DC_LOG_DSC("\tbranch_overall_throughput_1_mps %d", dsc_sink_caps->branch_overall_throughput_1_mps);
+ DC_LOG_DSC("\tbranch_max_line_width %d", dsc_sink_caps->branch_max_line_width);
+ DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp);
+}
+
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
index fae98cf52020..bc058f682438 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
@@ -270,16 +270,3 @@ void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
dwbc30->dwbc_shift = dwbc_shift;
dwbc30->dwbc_mask = dwbc_mask;
}
-
-void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay)
-{
- struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
-
- /*
- * Set maximum delay of host read access to DWBSCL LUT or OGAM LUT if there are no
- * idle cycles in HW pipeline (in number of clock cycles times 4)
- */
- REG_UPDATE(DWB_HOST_READ_CONTROL, DWB_HOST_READ_RATE_CONTROL, host_read_delay);
-
- DC_LOG_DWB("%s dwb3_rate_control at inst = %d", __func__, dwbc->inst);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
index 0f3f7c5fbaec..7f053f49ec6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
@@ -914,7 +914,6 @@ bool dwb3_ogam_set_input_transfer_func(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
-void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index fe741100c0f8..d347bb06577a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -129,7 +129,8 @@ bool hubbub3_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index 7fb5523f9722..b98505b240a7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -750,7 +750,8 @@ static bool hubbub31_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 5264dc26cce1..32a6be543105 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -786,7 +786,8 @@ static bool hubbub32_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 5eb3da8d5206..dce7269959ce 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -326,7 +326,8 @@ static bool hubbub35_program_watermarks(
DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/
REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index 22ac2b7e49ae..9b026600b90e 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -140,7 +140,7 @@ void hubp1_vready_workaround(struct hubp *hubp,
void hubp1_program_tiling(
struct hubp *hubp,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -518,6 +518,20 @@ bool hubp1_program_surface_flip_and_addr(
return true;
}
+void hubp1_clear_tiling(struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, 0,
+ SECONDARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_IND_64B_BLK, 0);
+}
+
void hubp1_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size independent_64b_blks)
{
@@ -532,10 +546,16 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable,
SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
}
+void hubp_reset(struct hubp *hubp)
+{
+ memset(&hubp->pos, 0, sizeof(hubp->pos));
+ memset(&hubp->att, 0, sizeof(hubp->att));
+}
+
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -1337,8 +1357,9 @@ static void hubp1_wait_pipe_read_start(struct hubp *hubp)
void hubp1_init(struct hubp *hubp)
{
- //do nothing
+ hubp_reset(hubp);
}
+
static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
@@ -1351,6 +1372,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_hubp_blank_en = hubp1_set_hubp_blank_en,
.set_cursor_attributes = hubp1_cursor_set_attributes,
@@ -1363,6 +1385,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
.hubp_init = hubp1_init,
+ .hubp_clear_tiling = hubp1_clear_tiling,
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index 69119b2fdce2..c7765e6f09e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -706,7 +706,7 @@ struct dcn10_hubp {
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -739,13 +739,15 @@ void hubp1_program_rotation(
void hubp1_program_tiling(
struct hubp *hubp,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp1_dcc_control(struct hubp *hubp,
bool enable,
enum hubp_ind_block_size independent_64b_blks);
+void hubp_reset(struct hubp *hubp);
+
bool hubp1_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
@@ -794,4 +796,6 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset);
void hubp1_set_flip_int(struct hubp *hubp);
+void hubp1_clear_tiling(struct hubp *hubp);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 0637e4c552d8..91259b896e03 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -310,7 +310,7 @@ void hubp2_setup_interdependent(
*/
static void hubp2_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_3(DCSURF_ADDR_CONFIG,
@@ -406,6 +406,20 @@ void hubp2_program_rotation(
H_MIRROR_EN, mirror);
}
+void hubp2_clear_tiling(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, 0,
+ SECONDARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_IND_64B_BLK, 0);
+}
+
void hubp2_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size independent_64b_blks)
{
@@ -536,7 +550,7 @@ void hubp2_program_pixel_format(
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -1044,11 +1058,13 @@ void hubp2_cursor_set_position(
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
+ REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
+ }
REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, pos->x,
@@ -1660,6 +1676,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp2_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -1676,6 +1693,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_clear_tiling = hubp2_clear_tiling,
};
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 18e194507e36..6968087a3605 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -382,7 +382,7 @@ void hubp2_program_pixel_format(
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -409,6 +409,8 @@ void hubp2_read_state_common(struct hubp *hubp);
void hubp2_read_state(struct hubp *hubp);
+void hubp2_clear_tiling(struct hubp *hubp);
+
#endif /* __DC_MEM_INPUT_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
index cd2bfcc51276..ec88ee424a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
@@ -42,7 +42,7 @@
static void hubp201_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -121,6 +121,7 @@ static struct hubp_funcs dcn201_hubp_funcs = {
.set_cursor_position = hubp1_cursor_set_position,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
@@ -131,6 +132,7 @@ static struct hubp_funcs dcn201_hubp_funcs = {
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
+ .hubp_clear_tiling = hubp1_clear_tiling,
};
bool dcn201_hubp_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
index e13d69a22c1c..e2740482e1cf 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
@@ -811,6 +811,8 @@ static void hubp21_init(struct hubp *hubp)
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+
+ hubp_reset(hubp);
}
static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
@@ -823,6 +825,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = hubp21_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
@@ -837,6 +840,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_init = hubp21_init,
.validate_dml_output = hubp21_validate_dml_output,
.hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_clear_tiling = hubp1_clear_tiling,
};
bool hubp21_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 60a64d290352..0da70b50e86d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -318,7 +318,7 @@ bool hubp3_program_surface_flip_and_addr(
void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_4(DCSURF_ADDR_CONFIG,
@@ -334,6 +334,22 @@ void hubp3_program_tiling(
}
+void hubp3_clear_tiling(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_6(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ PRIMARY_SURFACE_DCC_IND_BLK, 0,
+ PRIMARY_SURFACE_DCC_IND_BLK_C, 0,
+ SECONDARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_IND_BLK, 0,
+ SECONDARY_SURFACE_DCC_IND_BLK_C, 0);
+}
+
void hubp3_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size)
{
@@ -395,7 +411,7 @@ void hubp3_dmdata_set_attributes(
void hubp3_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -483,6 +499,10 @@ void hubp3_init(struct hubp *hubp)
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
+
+ hubp_reset(hubp);
}
static struct hubp_funcs dcn30_hubp_funcs = {
@@ -497,6 +517,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -512,6 +533,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index b010531a7fe8..b7d7adf0b58c 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -264,7 +264,7 @@ bool hubp3_program_surface_flip_and_addr(
void hubp3_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -280,7 +280,7 @@ void hubp3_setup(
void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp3_dcc_control(struct hubp *hubp, bool enable,
@@ -297,6 +297,8 @@ void hubp3_read_state(struct hubp *hubp);
void hubp3_init(struct hubp *hubp);
+void hubp3_clear_tiling(struct hubp *hubp);
+
#endif /* __DC_HUBP_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 8394e8c06919..c2900c79a2d3 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -79,6 +79,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -96,6 +97,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index ca5b4b28a664..f3a21c623f44 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -168,6 +168,8 @@ void hubp32_init(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
}
static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
@@ -181,6 +183,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -201,7 +204,8 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_force_cursor_pstate_disallow = hubp32_update_force_cursor_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp32_update_mall_sel,
- .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
+ .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index d1f05b82b3dd..5661d7a80d54 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -172,7 +172,7 @@ void hubp35_program_pixel_format(
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -199,6 +199,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -216,6 +217,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp35_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
index 586b43aa5834..d913f80b3130 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
@@ -65,7 +65,7 @@ void hubp35_program_pixel_format(
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index b1ebf5053b4f..5ed195377a6c 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -40,7 +40,7 @@
#define FN(reg_name, field_name) \
hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
-static void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
+void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
const struct dc_plane_address address)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -49,14 +49,14 @@ static void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
REG_WRITE(HUBP_3DLUT_ADDRESS_LOW, address.lut3d.addr.low_part);
}
-static void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group)
+void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_DLG_PARAM, REFCYC_PER_3DLUT_GROUP, refcyc_per_3dlut_group);
}
-static void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable)
+void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -72,28 +72,28 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp)
return ret;
}
-static void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode)
+void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_ADDRESSING_MODE, addr_mode);
}
-static void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width)
+void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
}
-static void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0);
}
-static void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
+void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r)
@@ -106,21 +106,21 @@ static void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
HUBP_3DLUT_CROSSBAR_SELECT_CR_R, bit_slice_cr_r);
}
-static void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale)
+void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE, HUBP0_3DLUT_FL_BIAS, bias, HUBP0_3DLUT_FL_SCALE, scale);
}
-static void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode)
+void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_MODE, mode);
}
-static void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format)
+void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -141,34 +141,48 @@ void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor
void hubp401_init(struct hubp *hubp)
{
- //For now nothing to do, HUBPREQ_DEBUG_DB register is removed on DCN4x.
+ hubp_reset(hubp);
}
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing)
{
- uint32_t value = 0;
+ unsigned int vstartup_lines = pipe_global_sync->dcn4x.vstartup_lines;
+ unsigned int vupdate_offset_pixels = pipe_global_sync->dcn4x.vupdate_offset_pixels;
+ unsigned int vupdate_width_pixels = pipe_global_sync->dcn4x.vupdate_vupdate_width_pixels;
+ unsigned int vready_offset_pixels = pipe_global_sync->dcn4x.vready_offset_pixels;
+ unsigned int htotal = timing->h_total;
+ unsigned int vblank_start = 0;
+ unsigned int vblank_end = 0;
+ unsigned int pixel_width = 0;
+ uint32_t reg_value = 0;
+ bool is_vready_at_or_after_vsync = false;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
/*
* if (VSTARTUP_START - (VREADY_OFFSET+VUPDATE_WIDTH+VUPDATE_OFFSET)/htotal) <= OTG_V_BLANK_END
* Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 1
* else
* Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
- if (pipe_dest->htotal != 0) {
- if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
- + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
- value = 1;
- } else
- value = 0;
+ if (htotal != 0) {
+ vblank_start = timing->v_total - timing->v_front_porch;
+ vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
+ pixel_width = vready_offset_pixels + vupdate_width_pixels + vupdate_offset_pixels;
+
+ is_vready_at_or_after_vsync = (vstartup_lines - pixel_width / htotal) <= vblank_end;
+
+ if (is_vready_at_or_after_vsync)
+ reg_value = 1;
}
- REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, reg_value);
}
void hubp401_program_requestor(
struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs)
+ struct dml2_display_rq_regs *rq_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -196,8 +210,8 @@ void hubp401_program_requestor(
void hubp401_program_deadline(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+ struct dml2_display_dlg_regs *dlg_attr,
+ struct dml2_display_ttu_regs *ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -294,66 +308,64 @@ void hubp401_program_deadline(
void hubp401_setup(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
- struct _vcs_dpi_display_rq_regs_st *rq_regs,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing)
{
/* otg is locked when this func is called. Register are double buffered.
* disable the requestors is not needed
*/
- hubp401_vready_at_or_After_vsync(hubp, pipe_dest);
- hubp401_program_requestor(hubp, rq_regs);
- hubp401_program_deadline(hubp, dlg_attr, ttu_attr);
+ hubp401_vready_at_or_After_vsync(hubp, pipe_global_sync, timing);
+ hubp401_program_requestor(hubp, &pipe_regs->rq_regs);
+ hubp401_program_deadline(hubp, &pipe_regs->dlg_regs, &pipe_regs->ttu_regs);
}
void hubp401_setup_interdependent(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+ struct dml2_dchub_per_pipe_register_set *pipe_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_SET_2(PREFETCH_SETTINGS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+ DST_Y_PREFETCH, pipe_regs->dlg_regs.dst_y_prefetch,
+ VRATIO_PREFETCH, pipe_regs->dlg_regs.vratio_prefetch);
REG_SET(PREFETCH_SETTINGS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+ VRATIO_PREFETCH_C, pipe_regs->dlg_regs.vratio_prefetch_c);
REG_SET_2(VBLANK_PARAMETERS_0, 0,
- DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
- DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+ DST_Y_PER_VM_VBLANK, pipe_regs->dlg_regs.dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, pipe_regs->dlg_regs.dst_y_per_row_vblank);
REG_SET_2(FLIP_PARAMETERS_0, 0,
- DST_Y_PER_VM_FLIP, dlg_attr->dst_y_per_vm_flip,
- DST_Y_PER_ROW_FLIP, dlg_attr->dst_y_per_row_flip);
+ DST_Y_PER_VM_FLIP, pipe_regs->dlg_regs.dst_y_per_vm_flip,
+ DST_Y_PER_ROW_FLIP, pipe_regs->dlg_regs.dst_y_per_row_flip);
REG_SET(VBLANK_PARAMETERS_3, 0,
- REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+ REFCYC_PER_META_CHUNK_VBLANK_L, pipe_regs->dlg_regs.refcyc_per_meta_chunk_vblank_l);
REG_SET(VBLANK_PARAMETERS_4, 0,
- REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+ REFCYC_PER_META_CHUNK_VBLANK_C, pipe_regs->dlg_regs.refcyc_per_meta_chunk_vblank_c);
REG_SET(FLIP_PARAMETERS_2, 0,
- REFCYC_PER_META_CHUNK_FLIP_L, dlg_attr->refcyc_per_meta_chunk_flip_l);
+ REFCYC_PER_META_CHUNK_FLIP_L, pipe_regs->dlg_regs.refcyc_per_meta_chunk_flip_l);
REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
- REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
- REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+ REFCYC_PER_LINE_DELIVERY_PRE_L, pipe_regs->dlg_regs.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, pipe_regs->dlg_regs.refcyc_per_line_delivery_pre_c);
REG_SET(DCN_SURF0_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_l);
+ pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_l);
REG_SET(DCN_SURF1_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_c);
+ pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_c);
REG_SET(DCN_CUR0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+ REFCYC_PER_REQ_DELIVERY_PRE, pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_cur0);
REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
- MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
- QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+ MIN_TTU_VBLANK, pipe_regs->ttu_regs.min_ttu_vblank,
+ QoS_LEVEL_FLIP, pipe_regs->ttu_regs.qos_level_flip);
}
@@ -508,6 +520,18 @@ bool hubp401_program_surface_flip_and_addr(
return true;
}
+void hubp401_clear_tiling(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_EN, 0);
+}
+
void hubp401_dcc_control(struct hubp *hubp,
struct dc_plane_dcc_param *dcc)
{
@@ -520,7 +544,7 @@ void hubp401_dcc_control(struct hubp *hubp,
void hubp401_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
/* DCSURF_ADDR_CONFIG still shows up in reg spec, but does not need to be programmed for DCN4x
@@ -568,7 +592,7 @@ void hubp401_program_size(
void hubp401_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -718,11 +742,13 @@ void hubp401_cursor_set_position(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+ }
REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, x_pos,
@@ -969,11 +995,12 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_surface_flip_and_addr = hubp401_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp401_program_surface_config,
.hubp_is_flip_pending = hubp2_is_flip_pending,
- .hubp_setup = hubp401_setup,
- .hubp_setup_interdependent = hubp401_setup_interdependent,
+ .hubp_setup2 = hubp401_setup,
+ .hubp_setup_interdependent2 = hubp401_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = hubp401_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
.set_cursor_position = hubp401_cursor_set_position,
@@ -1004,7 +1031,8 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_width = hubp401_program_3dlut_fl_width,
.hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
- .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done
+ .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
+ .hubp_clear_tiling = hubp2_clear_tiling,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index e52fdb5b0cd0..6e1d4c90ddd4 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -256,29 +256,15 @@
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
-void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
-
-void hubp401_program_requestor(
- struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs);
-
-void hubp401_program_deadline(
- struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
-
void hubp401_setup(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
- struct _vcs_dpi_display_rq_regs_st *rq_regs,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
void hubp401_setup_interdependent(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+ struct dml2_dchub_per_pipe_register_set *pipe_regs);
bool hubp401_program_surface_flip_and_addr(
struct hubp *hubp,
@@ -290,7 +276,7 @@ void hubp401_dcc_control(struct hubp *hubp,
void hubp401_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp401_program_size(
@@ -302,7 +288,7 @@ void hubp401_program_size(
void hubp401_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -340,4 +326,42 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp);
void hubp401_set_unbounded_requesting(struct hubp *hubp, bool enable);
+void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale);
+
+void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
+
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled);
+
+void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
+
+void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
+
+void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable);
+
+void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group);
+
+void hubp401_program_3dlut_fl_addr(struct hubp *hubp, const struct dc_plane_address address);
+
+void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format);
+
+void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
+
+void hubp401_clear_tiling(struct hubp *hubp);
+
+void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
+
+void hubp401_program_requestor(
+ struct hubp *hubp,
+ struct dml2_display_rq_regs *rq_regs);
+
+void hubp401_program_deadline(
+ struct hubp *hubp,
+ struct dml2_display_dlg_regs *dlg_attr,
+ struct dml2_display_ttu_regs *ttu_attr);
+
#endif /* __DC_HUBP_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 681bb92c6069..44e405e9bc97 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -1286,6 +1286,7 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
@@ -1447,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index b029ec1b26d3..a5e18ab72394 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1288,7 +1288,7 @@ static void dcn20_power_on_plane_resources(
}
}
-static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
//if (dc->debug.sanity_checks) {
@@ -1467,7 +1467,7 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_detect_pipe_changes(struct dc_state *old_state,
+void dcn20_detect_pipe_changes(struct dc_state *old_state,
struct dc_state *new_state,
struct pipe_ctx *old_pipe,
struct pipe_ctx *new_pipe)
@@ -1655,7 +1655,7 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
}
}
-static void dcn20_update_dchubp_dpp(
+void dcn20_update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -1678,25 +1678,41 @@ static void dcn20_update_dchubp_dpp(
* VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
* VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
*/
+
if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
- hubp->funcs->hubp_setup(
- hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs,
- &pipe_ctx->rq_regs,
- &pipe_ctx->pipe_dlg_param);
+ if (hubp->funcs->hubp_setup2) {
+ hubp->funcs->hubp_setup2(
+ hubp,
+ &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync,
+ &pipe_ctx->stream->timing);
+ } else {
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
}
if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
- if (pipe_ctx->update_flags.bits.hubp_interdependent)
- hubp->funcs->hubp_setup_interdependent(
- hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ if (pipe_ctx->update_flags.bits.hubp_interdependent) {
+ if (hubp->funcs->hubp_setup_interdependent2) {
+ hubp->funcs->hubp_setup_interdependent2(
+ hubp,
+ &pipe_ctx->hubp_regs);
+ } else {
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+ }
+ }
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
@@ -1756,10 +1772,9 @@ static void dcn20_update_dchubp_dpp(
&pipe_ctx->plane_res.scl_data.viewport_c);
viewport_changed = true;
}
- if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
- hubp->funcs->hubp_program_mcache_id_and_split_coordinate(
- hubp,
- &pipe_ctx->mcache_regs);
+
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, &pipe_ctx->mcache_regs);
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
@@ -1838,7 +1853,7 @@ static void dcn20_update_dchubp_dpp(
hubp->funcs->phantom_hubp_post_enable(hubp);
}
-static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+static int dcn20_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
{
struct pipe_ctx *other_pipe;
int vready_offset = pipe->pipe_dlg_param.vready_offset;
@@ -1864,6 +1879,30 @@ static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
return vready_offset;
}
+static void dcn20_program_tg(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dce_hwseq *hws)
+{
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn20_calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+}
+
static void dcn20_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -1874,33 +1913,17 @@ static void dcn20_program_pipe(
/* Only need to unblank on top pipe */
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.odm ||
- pipe_ctx->stream->update_flags.bits.abm_level)
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
hws->funcs.blank_pixel_data(dc, pipe_ctx,
- !pipe_ctx->plane_state ||
- !pipe_ctx->plane_state->visible);
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
}
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
- && !pipe_ctx->prev_odm_pipe) {
- pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg,
- calculate_vready_offset_for_group(pipe_ctx),
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width,
- pipe_ctx->pipe_dlg_param.pstate_keepout);
-
- if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
-
- pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
-
- if (hws->funcs.setup_vupdate_interrupt)
- hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
- }
+ && !pipe_ctx->prev_odm_pipe)
+ dcn20_program_tg(dc, pipe_ctx, context, hws);
if (pipe_ctx->update_flags.bits.odm)
hws->funcs.update_odm(dc, context, pipe_ctx);
@@ -1931,22 +1954,22 @@ static void dcn20_program_pipe(
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->plane_state->update_flags.bits.hdr_mult))
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (hws->funcs.populate_mcm_luts) {
if (pipe_ctx->plane_state) {
hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
+ pipe_ctx->plane_state->lut_bank_a);
pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
}
}
if (pipe_ctx->plane_state &&
- (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change ||
- pipe_ctx->plane_state->update_flags.bits.lut_3d ||
- pipe_ctx->update_flags.bits.enable))
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
@@ -1954,10 +1977,10 @@ static void dcn20_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf ||
+ (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->update_flags.bits.output_tf_change))
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -1966,7 +1989,7 @@ static void dcn20_program_pipe(
* causes a different pipe to be chosen to odm combine with.
*/
if (pipe_ctx->update_flags.bits.enable
- || pipe_ctx->update_flags.bits.opp_changed) {
+ || pipe_ctx->update_flags.bits.opp_changed) {
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp,
@@ -1996,14 +2019,14 @@ static void dcn20_program_pipe(
memset(&params, 0, sizeof(params));
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
dc->hwss.set_disp_pattern_generator(dc,
- pipe_ctx,
- pipe_ctx->stream_res.test_pattern_params.test_pattern,
- pipe_ctx->stream_res.test_pattern_params.color_space,
- pipe_ctx->stream_res.test_pattern_params.color_depth,
- NULL,
- pipe_ctx->stream_res.test_pattern_params.width,
- pipe_ctx->stream_res.test_pattern_params.height,
- pipe_ctx->stream_res.test_pattern_params.offset);
+ pipe_ctx,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ NULL,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
}
}
@@ -2012,11 +2035,12 @@ void dcn20_program_front_end_for_ctx(
struct dc_state *context)
{
int i;
- struct dce_hwseq *hws = dc->hwseq;
- DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
- struct pipe_ctx *pipe;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct pipe_ctx *pipe = NULL;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
@@ -2029,7 +2053,7 @@ void dcn20_program_front_end_for_ctx(
ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe, pipe->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
@@ -2044,30 +2068,31 @@ void dcn20_program_front_end_for_ctx(
if (prev_hubp_count == 0 && hubp_count > 0) {
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, false);
+ dc->res_pool->hubbub, true, false);
udelay(500);
}
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
- &context->res_ctx.pipe_ctx[i]);
+ &context->res_ctx.pipe_ctx[i]);
/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
* buffer updates properly)
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
- dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
if (tg->funcs->enable_crtc) {
- if (dc->hwseq->funcs.blank_pixel_data) {
+ if (dc->hwseq->funcs.blank_pixel_data)
dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
- }
+
tg->funcs->enable_crtc(tg);
}
}
@@ -2075,15 +2100,15 @@ void dcn20_program_front_end_for_ctx(
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
- && !context->res_ctx.pipe_ctx[i].top_pipe
- && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
- && context->res_ctx.pipe_ctx[i].stream)
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
- || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
struct hubbub *hubbub = dc->res_pool->hubbub;
/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
@@ -2093,13 +2118,18 @@ void dcn20_program_front_end_for_ctx(
* DET allocation.
*/
if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) {
+ (context->res_ctx.pipe_ctx[i].plane_state &&
+ dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i])
+ == SUBVP_PHANTOM))) {
if (hubbub->funcs->program_det_size)
- hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ hubbub->funcs->program_det_size(hubbub,
+ dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
if (dc->res_pool->hubbub->funcs->program_det_segments)
- dc->res_pool->hubbub->funcs->program_det_segments(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
}
- hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -2107,9 +2137,9 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
- !resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->update_flags.bits.odm &&
- hws->funcs.update_odm)
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
hws->funcs.update_odm(dc, context, pipe);
}
@@ -2127,25 +2157,28 @@ void dcn20_program_front_end_for_ctx(
else {
/* Don't program phantom pipes in the regular front end programming sequence.
* There is an MPO transition case where a pipe being used by a video plane is
- * transitioned directly to be a phantom pipe when closing the MPO video. However
- * the phantom pipe will program a new HUBP_VTG_SEL (update takes place right away),
- * but the MPO still exists until the double buffered update of the main pipe so we
- * will get a frame of underflow if the phantom pipe is programmed here.
+ * transitioned directly to be a phantom pipe when closing the MPO video.
+ * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
+ * right away) but the MPO still exists until the double buffered update of the
+ * main pipe so we will get a frame of underflow if the phantom pipe is
+ * programmed here.
*/
- if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
+ if (pipe->stream &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
dcn20_program_pipe(dc, pipe, context);
}
pipe = pipe->bottom_pipe;
}
}
+
/* Program secondary blending tree and writeback pipes */
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->top_pipe && !pipe->prev_odm_pipe
- && pipe->stream && pipe->stream->num_wb_info > 0
- && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
- || pipe->stream->update_flags.raw)
- && hws->funcs.program_all_writeback_pipes_in_tree)
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
@@ -2164,7 +2197,7 @@ void dcn20_program_front_end_for_ctx(
* buffered pending status clear and reset opp head pipe's none double buffered
* registers to their initial state.
*/
-static void post_unlock_reset_opp(struct dc *dc,
+void dcn20_post_unlock_reset_opp(struct dc *dc,
struct pipe_ctx *opp_head)
{
struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
@@ -2201,16 +2234,17 @@ void dcn20_post_unlock_program_front_end(
struct dc *dc,
struct dc_state *context)
{
- int i;
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000;
+ // Timeout for pipe enable
+ unsigned int timeout_us = 100000;
unsigned int polling_interval_us = 1;
struct dce_hwseq *hwseq = dc->hwseq;
+ int i;
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
- !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
- post_unlock_reset_opp(dc,
- &dc->current_state->res_ctx.pipe_ctx[i]);
+ !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
+ dcn20_post_unlock_reset_opp(dc,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
@@ -2226,11 +2260,12 @@ void dcn20_post_unlock_program_front_end(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
- dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
- && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
udelay(polling_interval_us);
}
}
@@ -2244,15 +2279,14 @@ void dcn20_post_unlock_program_front_end(
* before we've transitioned to 2:1 or 4:1
*/
if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
- resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
- dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
int j = 0;
struct timing_generator *tg = pipe->stream_res.tg;
-
if (tg->funcs->get_optc_double_buffer_pending) {
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
- && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
udelay(polling_interval_us);
}
}
@@ -2260,7 +2294,7 @@ void dcn20_post_unlock_program_front_end(
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, false, false);
+ dc->res_pool->hubbub, false, false);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -2291,11 +2325,11 @@ void dcn20_post_unlock_program_front_end(
return;
/* P-State support transitions:
- * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
- * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
- * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
- * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
- * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
*/
if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
@@ -2310,12 +2344,11 @@ void dcn20_post_unlock_program_front_end(
if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
-
/* WA for stutter underflow during MPO transitions when adding 2nd plane */
if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
if (dc->current_state->stream_status[0].plane_count == 1 &&
- context->stream_status[0].plane_count > 1) {
+ context->stream_status[0].plane_count > 1) {
struct timing_generator *tg = dc->res_pool->timing_generators[0];
@@ -2463,7 +2496,7 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
- calculate_vready_offset_for_group(pipe_ctx),
+ dcn20_calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index 5c874f7b0683..9d1ad3b29ca5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -154,6 +154,21 @@ void dcn20_setup_gsl_group_as_lock(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool enable);
-
+void dcn20_detect_pipe_changes(
+ struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
+void dcn20_enable_plane(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn20_post_unlock_reset_opp(
+ struct dc *dc,
+ struct pipe_ctx *opp_head);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 0e8d32e3dbae..c32764aef884 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -86,7 +86,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index 780ce4c064aa..dcb27cdbce73 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
@@ -86,7 +86,6 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 5f8f45b48720..fb2ffb637931 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -89,7 +89,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -98,7 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .set_backlight_level = dcn31_set_backlight_level,
+ .set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 9b88eb72086d..be26c925fdfa 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -162,6 +162,8 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
int opp_inst[MAX_PIPES] = {0};
int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
+ struct mpc *mpc = dc->res_pool->mpc;
+ int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -174,6 +176,16 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ false,
+ 0,
+ NULL);
+ }
+ }
+
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index 6bdfbf22ce87..21ef03a76229 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -91,7 +91,6 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -100,7 +99,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .set_backlight_level = dcn31_set_backlight_level,
+ .set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index fa11f075d1f9..ee4de9ddfef4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -985,6 +985,7 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
+ dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support;
/* for DCN401 testing only */
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
@@ -1398,12 +1399,12 @@ void dcn32_disable_link_output(struct dc_link *link,
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
-
- if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_power_control &&
- !link->skip_implict_edp_power_control)
- link->dc->hwss.edp_power_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 5ecee7e320da..e4d149eff10f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -87,7 +87,6 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index e599cdc465bf..b907ad1acedd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -236,7 +236,8 @@ void dcn35_init_hw(struct dc *dc)
}
hws->funcs.init_pipes(dc, dc->current_state);
- if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
@@ -426,6 +427,8 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
int opp_inst[MAX_PIPES] = {0};
int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
+ struct mpc *mpc = dc->res_pool->mpc;
+ int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -438,6 +441,16 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ false,
+ 0,
+ NULL);
+ }
+ }
+
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -788,6 +801,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -944,6 +958,7 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
/*to do, need to support both case*/
hubp->power_gated = true;
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream = NULL;
@@ -1020,8 +1035,13 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
- if (pipe_ctx->stream_res.dsc)
+ if (pipe_ctx->stream_res.dsc) {
update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
+ if (dc->caps.sequential_ono) {
+ update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
+ update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
+ }
+ }
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
@@ -1579,3 +1599,37 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
return false;
}
+
+/*
+ * Set powerup to true for every pipe to match pre-OS configuration.
+ */
+static void dcn35_calc_blocks_to_ungate_for_hw_release(struct dc *dc, struct pg_block_update *update_state)
+{
+ int i = 0, j = 0;
+
+ memset(update_state, 0, sizeof(struct pg_block_update));
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
+ update_state->pg_pipe_res_update[j][i] = true;
+
+ update_state->pg_res_update[PG_HPO] = true;
+ update_state->pg_res_update[PG_DWB] = true;
+}
+
+/*
+ * The purpose is to power up all gatings to restore optimization to pre-OS env.
+ * Re-use hwss func and existing PG&RCG flags to decide powerup sequence.
+ */
+void dcn35_hardware_release(struct dc *dc)
+{
+ struct pg_block_update pg_update_state;
+
+ dcn35_calc_blocks_to_ungate_for_hw_release(dc, &pg_update_state);
+
+ if (dc->hwss.root_clock_control)
+ dc->hwss.root_clock_control(dc, &pg_update_state, true);
+ /*power up required HW block*/
+ if (dc->hwss.hw_block_power_up)
+ dc->hwss.hw_block_power_up(dc, &pg_update_state);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index e27b3609020f..0b1d6f608edd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -99,4 +99,6 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+void dcn35_hardware_release(struct dc *dc);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index fd67779c27a9..c7acaf97974c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -92,7 +92,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -123,6 +122,11 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
+ .hardware_release = dcn35_hardware_release,
+ .detect_pipe_changes = dcn20_detect_pipe_changes,
+ .enable_plane = dcn20_enable_plane,
+ .update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 3c275a1eff58..4f73e7f551ac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -91,7 +91,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 307782592789..555a9f590cd7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -3,6 +3,7 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
@@ -126,91 +127,6 @@ void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
}
-struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc, uint8_t region)
-{
- struct dce_hwseq *hws = dc->hwseq;
- struct ips_ono_region_state state = {0, 0};
-
- switch (region) {
- case 0:
- /* dccg, dio, dcio */
- REG_GET_2(DOMAIN22_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 1:
- /* dchubbub, dchvm, dchubbubmem */
- REG_GET_2(DOMAIN23_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 2:
- /* mpc, opp, optc, dwb */
- REG_GET_2(DOMAIN24_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 3:
- /* hpo */
- REG_GET_2(DOMAIN25_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 4:
- /* dchubp0, dpp0 */
- REG_GET_2(DOMAIN0_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 5:
- /* dsc0 */
- REG_GET_2(DOMAIN16_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 6:
- /* dchubp1, dpp1 */
- REG_GET_2(DOMAIN1_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 7:
- /* dsc1 */
- REG_GET_2(DOMAIN17_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 8:
- /* dchubp2, dpp2 */
- REG_GET_2(DOMAIN2_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 9:
- /* dsc2 */
- REG_GET_2(DOMAIN18_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 10:
- /* dchubp3, dpp3 */
- REG_GET_2(DOMAIN3_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 11:
- /* dsc3 */
- REG_GET_2(DOMAIN19_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- default:
- break;
- }
-
- return state;
-}
-
void dcn401_init_hw(struct dc *dc)
{
struct abm **abms = dc->res_pool->multiple_abms;
@@ -435,7 +351,8 @@ void dcn401_init_hw(struct dc *dc)
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
- dc->debug.fams2_config.bits.enable &= dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver == 2;
+ dc->debug.fams2_config.bits.enable &=
+ dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
@@ -821,7 +738,7 @@ enum dc_status dcn401_enable_stream_timing(
int opp_inst[MAX_PIPES] = {0};
struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
struct dc_crtc_timing patched_crtc_timing = stream->timing;
- bool manual_mode;
+ bool manual_mode = false;
unsigned int tmds_div = PIXEL_RATE_DIV_NA;
unsigned int unused_div = PIXEL_RATE_DIV_NA;
int odm_slice_width;
@@ -880,15 +797,15 @@ enum dc_status dcn401_enable_stream_timing(
patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
pipe_ctx->stream_res.tg->funcs->program_timing(
- pipe_ctx->stream_res.tg,
- &patched_crtc_timing,
- pipe_ctx->pipe_dlg_param.vready_offset,
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width,
- pipe_ctx->pipe_dlg_param.pstate_keepout,
- pipe_ctx->stream->signal,
- true);
+ pipe_ctx->stream_res.tg,
+ &patched_crtc_timing,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
+ pipe_ctx->stream->signal,
+ true);
for (i = 0; i < opp_cnt; i++) {
opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
@@ -2012,3 +1929,730 @@ void dcn401_reset_hw_ctx_wrap(
}
}
}
+
+static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+{
+ struct pipe_ctx *other_pipe;
+ unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
+
+ /* Always use the largest vready_offset of all connected pipes */
+ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+
+ return vready_offset;
+}
+
+static void dcn401_program_tg(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dce_hwseq *hws)
+{
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+}
+
+static void dcn401_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx,
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe)
+ dcn401_program_tg(dc, pipe_ctx, context, hws);
+
+ if (pipe_ctx->update_flags.bits.odm)
+ hws->funcs.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ if (hws->funcs.enable_plane)
+ hws->funcs.enable_plane(dc, pipe_ctx, context);
+ else
+ dc->hwss.enable_plane(dc, pipe_ctx, context);
+
+ if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+ dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+ }
+
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size)
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ }
+
+ if (pipe_ctx->update_flags.raw ||
+ (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
+ pipe_ctx->stream->update_flags.raw)
+ dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
+
+ if (hws->funcs.populate_mcm_luts) {
+ if (pipe_ctx->plane_state) {
+ hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
+ pipe_ctx->plane_state->lut_bank_a);
+ pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
+ }
+ }
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf ||
+ (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ dc->hwss.set_pipe(pipe_ctx);
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
+ pipe_ctx->stream->abm_level);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.test_pattern_changed) {
+ struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
+ struct bit_depth_reduction_params params;
+
+ memset(&params, 0, sizeof(params));
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ dc->hwss.set_disp_pattern_generator(dc,
+ pipe_ctx,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ NULL,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
+ }
+}
+
+void dcn401_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ unsigned int prev_hubp_count = 0;
+ unsigned int hubp_count = 0;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct pipe_ctx *pipe = NULL;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (resource_is_pipe_topology_changed(dc->current_state, context))
+ resource_log_pipe_topology_update(dc, context);
+
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state->triplebuffer_flips)
+ BREAK_TO_DEBUGGER();
+
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
+ }
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ prev_hubp_count++;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ hubp_count++;
+ }
+
+ if (prev_hubp_count == 0 && hubp_count > 0) {
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, false);
+ udelay(500);
+ }
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+
+ /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
+ * buffer updates properly)
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
+ dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+ if (tg->funcs->enable_crtc) {
+ if (dc->hwseq->funcs.blank_pixel_data)
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
+
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+ }
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ /* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
+ * then we want to do the programming here (effectively it's being disabled). If we do
+ * the programming later the DET won't be updated until the OTG for the phantom pipe is
+ * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
+ * DET allocation.
+ */
+ if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
+ (context->res_ctx.pipe_ctx[i].plane_state &&
+ dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
+ SUBVP_PHANTOM))) {
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub,
+ dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ }
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
+ }
+
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ if (hws->funcs.program_pipe)
+ hws->funcs.program_pipe(dc, pipe, context);
+ else {
+ /* Don't program phantom pipes in the regular front end programming sequence.
+ * There is an MPO transition case where a pipe being used by a video plane is
+ * transitioned directly to be a phantom pipe when closing the MPO video.
+ * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
+ * right away) but the MPO still exists until the double buffered update of the
+ * main pipe so we will get a frame of underflow if the phantom pipe is
+ * programmed here.
+ */
+ if (pipe->stream &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
+ dcn401_program_pipe(dc, pipe, context);
+ }
+
+ pipe = pipe->bottom_pipe;
+ }
+ }
+
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+
+ /* Avoid underflow by check of pipe line read when adding 2nd plane. */
+ if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
+ }
+ }
+}
+
+void dcn401_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ // Timeout for pipe enable
+ unsigned int timeout_us = 100000;
+ unsigned int polling_interval_us = 1;
+ struct dce_hwseq *hwseq = dc->hwseq;
+ int i;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
+ !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
+ dc->hwss.post_unlock_reset_opp(dc,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ /*
+ * If we are enabling a pipe, we need to wait for pending clear as this is a critical
+ * part of the enable operation otherwise, DM may request an immediate flip which
+ * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
+ * is unsupported on DCN.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ // Don't check flip pending on phantom pipes
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* When going from a smaller ODM slice count to larger, we must ensure double
+ * buffer update completes before we return to ensure we don't reduce DISPCLK
+ * before we've transitioned to 2:1 or 4:1
+ */
+ if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ int j = 0;
+ struct timing_generator *tg = pipe->stream_res.tg;
+
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+ }
+ }
+
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, false, false);
+
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ /* Program phantom pipe here to prevent a frame of underflow in the MPO transition
+ * case (if a pipe being used for a video plane transitions to a phantom pipe, it
+ * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
+ * programming sequence).
+ */
+ while (pipe) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ /* When turning on the phantom pipe we want to run through the
+ * entire enable sequence, so apply all the "enable" flags.
+ */
+ if (dc->hwss.apply_update_flags_for_phantom)
+ dc->hwss.apply_update_flags_for_phantom(pipe);
+ if (dc->hwss.update_phantom_vp_position)
+ dc->hwss.update_phantom_vp_position(dc, context, pipe);
+ dcn401_program_pipe(dc, pipe, context);
+ }
+ pipe = pipe->bottom_pipe;
+ }
+ }
+ }
+
+ if (!hwseq)
+ return;
+
+ /* P-State support transitions:
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ */
+ if (hwseq->funcs.update_force_pstate)
+ dc->hwseq->funcs.update_force_pstate(dc, context);
+
+ /* Only program the MALL registers after all the main and phantom pipes
+ * are done programming.
+ */
+ if (hwseq->funcs.program_mall_pipe_config)
+ hwseq->funcs.program_mall_pipe_config(dc, context);
+
+ /* WA to apply WM setting*/
+ if (hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
+
+
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
+ tg->funcs->get_frame_count(tg);
+ }
+ }
+}
+
+bool dcn401_update_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* recalculate DML parameters */
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ return false;
+
+ /* apply updated bandwidth parameters */
+ dc->hwss.prepare_bandwidth(dc, context);
+
+ /* update hubp configs for all pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe == NULL) {
+ bool blank = !is_pipe_tree_visible(pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
+
+ if (pipe_ctx->prev_odm_pipe == NULL)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+ }
+
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
+ pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
+ pipe_ctx->plane_res.hubp,
+ &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync,
+ &pipe_ctx->stream->timing);
+ }
+
+ return true;
+}
+
+void dcn401_detect_pipe_changes(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe)
+{
+ bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
+ bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
+
+ unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
+ unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
+ unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
+ unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
+ unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
+ unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
+ unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
+ unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
+
+ new_pipe->update_flags.raw = 0;
+
+ /* If non-phantom pipe is being transitioned to a phantom pipe,
+ * set disable and return immediately. This is because the pipe
+ * that was previously in use must be fully disabled before we
+ * can "enable" it as a phantom pipe (since the OTG will certainly
+ * be different). The post_unlock sequence will set the correct
+ * update flags to enable the phantom pipe.
+ */
+ if (old_pipe->plane_state && !old_is_phantom &&
+ new_pipe->plane_state && new_is_phantom) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
+ return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.unbounded_req = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ new_pipe->update_flags.bits.det_size = 1;
+ if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
+ new_pipe->stream_res.test_pattern_params.width != 0 &&
+ new_pipe->stream_res.test_pattern_params.height != 0)
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+
+ /* For SubVP we need to unconditionally enable because any phantom pipes are
+ * always removed then newly added for every full updates whenever SubVP is in use.
+ * The remove-add sequence of the phantom pipe always results in the pipe
+ * being blanked in enable_stream_timing (DPG).
+ */
+ if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
+ new_pipe->update_flags.bits.enable = 1;
+
+ /* Phantom pipes are effectively disabled, if the pipe was previously phantom
+ * we have to enable
+ */
+ if (old_pipe->plane_state && old_is_phantom &&
+ new_pipe->plane_state && !new_is_phantom)
+ new_pipe->update_flags.bits.enable = 1;
+
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ /* Detect plane change */
+ if (old_pipe->plane_state != new_pipe->plane_state)
+ new_pipe->update_flags.bits.plane_changed = true;
+
+ /* Detect top pipe only changes */
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
+ /* Detect global sync changes */
+ if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
+ || (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
+ || (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
+ || (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+
+ if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
+ new_pipe->update_flags.bits.det_size = 1;
+
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /*
+ * Detect mpcc blending changes, only dpp inst and opp matter here,
+ * mpccs getting removed/inserted update connected ones during their own
+ * programming
+ */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
+ struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
+ struct dml2_display_rq_regs old_rq_regs = old_pipe->hubp_regs.rq_regs;
+ struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
+ struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
+ struct dml2_display_rq_regs *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
+
+ /* Detect pipe interdependent updates */
+ if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
+ || (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
+ || (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
+ || (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
+ || (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
+ || (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
+ || (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
+ || (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
+ || (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
+ || (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
+ || (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
+ || (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
+ new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
+ || (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
+ || (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
+ old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
+ old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
+ old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
+ old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
+ old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
+ old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
+ old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
+ old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
+ old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
+ old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
+ old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
+ old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
+ old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
+ old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
+ memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
+ memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
+
+ if (old_pipe->unbounded_req != new_pipe->unbounded_req)
+ new_pipe->update_flags.bits.unbounded_req = 1;
+
+ if (memcmp(&old_pipe->stream_res.test_pattern_params,
+ &new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 28a513dfc005..17cea748789e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -63,8 +63,6 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx);
bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable);
-struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc,
- uint8_t region);
void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
const struct pipe_ctx *top_pipe_to_program);
@@ -96,5 +94,12 @@ void dcn401_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
-
+void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context);
+void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context);
+bool dcn401_update_bandwidth(struct dc *dc, struct dc_state *context);
+void dcn401_detect_pipe_changes(
+ struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index 23e4f208152e..44cb376f97c1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -17,9 +17,9 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.init_hw = dcn401_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
- .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .program_front_end_for_ctx = dcn401_program_front_end_for_ctx,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
- .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .post_unlock_program_front_end = dcn401_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -42,7 +42,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn401_prepare_bandwidth,
.optimize_bandwidth = dcn401_optimize_bandwidth,
- .update_bandwidth = dcn20_update_bandwidth,
+ .update_bandwidth = dcn401_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn31_set_static_screen_control,
@@ -66,7 +66,6 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -100,6 +99,10 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .detect_pipe_changes = dcn401_detect_pipe_changes,
+ .enable_plane = dcn20_enable_plane,
+ .update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 66fdc5805d0a..a7d66cfd93c9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -194,7 +194,6 @@ enum block_sequence_func {
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
-
};
struct block_sequence {
@@ -331,10 +330,6 @@ struct hw_sequencer_funcs {
void (*disable_writeback)(struct dc *dc,
unsigned int dwb_pipe_inst);
- bool (*mmhubbub_warmup)(struct dc *dc,
- unsigned int num_dwb,
- struct dc_writeback_info *wb_info);
-
/* Clock Related */
enum dc_status (*set_clock)(struct dc *dc,
enum dc_clock_type clock_type,
@@ -462,6 +457,18 @@ struct hw_sequencer_funcs {
struct dc_state *context);
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
void (*wait_for_all_pending_updates)(const struct pipe_ctx *pipe_ctx);
+ void (*detect_pipe_changes)(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
+ void (*enable_plane)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ void (*update_dchubp_dpp)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ void (*post_unlock_reset_opp)(struct dc *dc,
+ struct pipe_ctx *opp_head);
};
void color_space_to_black_color(
@@ -489,11 +496,12 @@ void get_hdr_visual_confirm_color(
void get_mpctree_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
+void get_vabc_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
void get_subvp_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
void get_fams2_visual_confirm_color(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 2edd5b38ce4f..d558efc6e12f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -45,9 +45,6 @@
#define MAX_SVP_PHANTOM_STREAMS 2
#define MAX_SVP_PHANTOM_PLANES 2
-void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
- uint32_t controller_id);
-
#include "grph_object_id.h"
#include "link_encoder.h"
#include "stream_encoder.h"
@@ -220,6 +217,7 @@ struct resource_funcs {
*/
int (*get_power_profile)(const struct dc_state *context);
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
+ unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx);
};
struct audio_support{
@@ -468,6 +466,7 @@ struct pipe_ctx {
unsigned int surface_size_in_mall_bytes;
struct dml2_dchub_per_pipe_register_set hubp_regs;
struct dml2_hubp_pipe_mcache_regs mcache_regs;
+ union dml2_global_sync_programming global_sync;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
@@ -542,7 +541,8 @@ struct dcn_bw_output {
bool legacy_svp_drr_stream_index_valid;
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
struct dmub_cmd_fams2_global_config fams2_global_config;
- struct dmub_fams2_stream_static_state fams2_stream_params[DML2_MAX_PLANES];
+ union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES];
+ union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
struct dml2_display_arb_regs arb_regs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 55529c5f471c..d19a595c2be4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -624,10 +624,6 @@ bool dcn_validate_bandwidth(
struct dc_state *context,
bool fast_validate);
-unsigned int dcn_find_dcfclk_suits_all(
- const struct dc *dc,
- struct dc_clocks *clocks);
-
void dcn_get_soc_clks(
struct dc *dc,
int *min_fclk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 2d06067ff36d..c14d64687a3d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -306,6 +306,9 @@ struct clk_mgr_funcs {
*/
void (*set_hard_min_memclk)(struct clk_mgr *clk_mgr, bool current_mode);
+ int (*get_hard_min_memclk)(struct clk_mgr *clk_mgr);
+ int (*get_hard_min_fclk)(struct clk_mgr *clk_mgr);
+
/* Send message to PMFW to set hard max memclk frequency to highest DPM */
void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index c2dd061892f4..7a1ca1e98059 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -166,6 +166,41 @@ enum dentist_divider_range {
CLK_SR_DCN32(CLK1_CLK4_CURRENT_CNT), \
CLK_SR_DCN32(CLK4_CLK0_CURRENT_CNT)
+#define CLK_REG_LIST_DCN35() \
+ CLK_SR_DCN35(CLK1_CLK_PLL_REQ), \
+ CLK_SR_DCN35(CLK1_CLK0_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK1_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK2_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK3_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK4_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK5_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK0_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK1_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK2_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK3_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK4_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK5_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK0_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK1_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK2_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK3_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK4_BYPASS_CNTL),\
+ CLK_SR_DCN35(CLK1_CLK5_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK0_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK1_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK2_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK3_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK4_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK5_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK0_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK1_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK2_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK3_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
+ CLK_SR_DCN35(CLK5_spll_field_8), \
+ SR(DENTIST_DISPCLK_CNTL), \
+
#define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\
CLK_SF(CLK1_CLK_PLL_REQ, FbMult_int, mask_sh),\
@@ -236,6 +271,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK2_DFS_CNTL;
uint32_t CLK1_CLK3_DFS_CNTL;
uint32_t CLK1_CLK4_DFS_CNTL;
+ uint32_t CLK1_CLK5_DFS_CNTL;
uint32_t CLK2_CLK2_DFS_CNTL;
uint32_t CLK1_CLK0_CURRENT_CNT;
@@ -243,11 +279,34 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK2_CURRENT_CNT;
uint32_t CLK1_CLK3_CURRENT_CNT;
uint32_t CLK1_CLK4_CURRENT_CNT;
+ uint32_t CLK1_CLK5_CURRENT_CNT;
uint32_t CLK0_CLK0_DFS_CNTL;
uint32_t CLK0_CLK1_DFS_CNTL;
uint32_t CLK0_CLK3_DFS_CNTL;
uint32_t CLK0_CLK4_DFS_CNTL;
+ uint32_t CLK1_CLK0_BYPASS_CNTL;
+ uint32_t CLK1_CLK1_BYPASS_CNTL;
+ uint32_t CLK1_CLK2_BYPASS_CNTL;
+ uint32_t CLK1_CLK3_BYPASS_CNTL;
+ uint32_t CLK1_CLK4_BYPASS_CNTL;
+ uint32_t CLK1_CLK5_BYPASS_CNTL;
+
+ uint32_t CLK1_CLK0_DS_CNTL;
+ uint32_t CLK1_CLK1_DS_CNTL;
+ uint32_t CLK1_CLK2_DS_CNTL;
+ uint32_t CLK1_CLK3_DS_CNTL;
+ uint32_t CLK1_CLK4_DS_CNTL;
+ uint32_t CLK1_CLK5_DS_CNTL;
+
+ uint32_t CLK1_CLK0_ALLOW_DS;
+ uint32_t CLK1_CLK1_ALLOW_DS;
+ uint32_t CLK1_CLK2_ALLOW_DS;
+ uint32_t CLK1_CLK3_ALLOW_DS;
+ uint32_t CLK1_CLK4_ALLOW_DS;
+ uint32_t CLK1_CLK5_ALLOW_DS;
+ uint32_t CLK5_spll_field_8;
+
};
struct clk_mgr_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 16580d624278..b610beb075d5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -42,6 +42,7 @@
#include "cursor_reg_cache.h"
#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2/dml21/inc/dml_top_types.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -144,14 +145,26 @@ struct hubp_funcs {
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ void (*hubp_setup2)(
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
+
void (*hubp_setup_interdependent)(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+ void (*hubp_setup_interdependent2)(
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *pipe_regs);
+
void (*dcc_control)(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size);
+ void (*hubp_reset)(struct hubp *hubp);
+
void (*mem_program_viewport)(
struct hubp *hubp,
const struct rect *viewport,
@@ -165,7 +178,7 @@ struct hubp_funcs {
void (*hubp_program_pte_vm)(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation);
void (*hubp_set_vm_system_aperture_settings)(
@@ -179,7 +192,7 @@ struct hubp_funcs {
void (*hubp_program_surface_config)(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -275,6 +288,7 @@ struct hubp_funcs {
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
+ void (*hubp_clear_tiling)(struct hubp *hubp);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index af9183f5d69b..08c16ba52a51 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -168,6 +168,14 @@ struct link_encoder_funcs {
struct link_encoder *enc,
enum encoder_type_select sel,
uint32_t hpo_inst);
+ void (*enable_dpia_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy);
+ void (*disable_dpia_output)(struct link_encoder *link_enc,
+ uint8_t dpia_id,
+ uint8_t digmode);
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index a8b44f398ce6..42fbc70f7056 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -150,7 +150,7 @@ struct mem_input_funcs {
void (*mem_input_program_pte_vm)(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation);
void (*mem_input_set_vm_system_aperture_settings)(
@@ -164,7 +164,7 @@ struct mem_input_funcs {
void (*mem_input_program_surface_config)(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -187,6 +187,8 @@ struct mem_input_funcs {
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param);
+ void (*mem_input_clear_tiling)(
+ struct mem_input *mem_input);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 03cbcbb36f1c..6fdc9809280c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -210,7 +210,7 @@ void optc1_enable_crtc_reset(struct timing_generator *optc,
bool optc1_configure_crc(struct timing_generator *optc, const struct crc_params *params);
-bool optc1_get_crc(struct timing_generator *optc,
+bool optc1_get_crc(struct timing_generator *optc, uint8_t idx,
uint32_t *r_cr,
uint32_t *g_y,
uint32_t *b_cb);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index b74e18cc1e66..9885cb3c310f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -141,6 +141,9 @@ struct crc_params {
bool continuous_mode;
bool enable;
+
+ uint8_t crc_eng_inst;
+ bool reset;
};
/**
@@ -291,7 +294,7 @@ struct timing_generator_funcs {
* @get_crc: Get CRCs for the given timing generator. Return false if
* CRCs are not enabled (via configure_crc).
*/
- bool (*get_crc)(struct timing_generator *tg,
+ bool (*get_crc)(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
void (*program_manual_trigger)(struct timing_generator *optc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index f04292086c08..fd1f9d3db039 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -148,6 +148,10 @@ struct link_service {
const struct dc_stream_state *stream,
const unsigned int num_streams);
+ uint32_t (*dp_required_hblank_size_bytes)(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
/*************************** DPMS *************************************/
void (*set_dpms_on)(struct dc_state *state, struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 4fb9cd6708d5..1d61d475d36f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -30,8 +30,8 @@
#include "../dce110/irq_service_dce110.h"
#include "irq_service_dcn201.h"
-#include "dcn/dcn_2_0_3_offset.h"
-#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dcn/dcn_2_0_1_offset.h"
+#include "dcn/dcn_2_0_1_sh_mask.h"
#include "cyan_skillfish_ip_offset.h"
#include "soc15_hw_ip.h"
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index ff8fe1a94965..96febabf464a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -251,7 +251,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
dp_fixed_vs_pe_read_lane_adjust(
link,
@@ -646,7 +646,7 @@ bool dp_set_test_pattern(
if (IS_DP_PHY_PATTERN(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
dp_fixed_vs_pe_set_retimer_lane_settings(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 3e47a6735912..06faa461067b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -164,7 +164,9 @@ void disable_dio_link_output(struct dc_link *link,
{
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
- link_enc->funcs->disable_output(link_enc, signal);
+ if (link_enc != NULL)
+ link_enc->funcs->disable_output(link_enc, signal);
+
link->dc->link_srv->dp_trace_source_sequence(link,
DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
index 348ea4cb832d..a6d1d7641ab4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -187,7 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
{
- return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
+ return ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
}
const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 6499807af72a..36adf95744fe 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
@@ -77,17 +77,74 @@ static void set_dio_dpia_lane_settings(struct dc_link *link,
{
}
+static void enable_dpia_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc != NULL) {
+ if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->enable_dpia_output) {
+ uint8_t fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
+ uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
+
+ link_enc->funcs->enable_dpia_output(
+ link_enc,
+ link_settings,
+ link->ddc_hw_inst,
+ digmode,
+ fec_rdy);
+ } else {
+ if (dc_is_dp_sst_signal(signal))
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ else
+ link_enc->funcs->enable_dp_mst_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ }
+
+ }
+
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+}
+
+static void disable_dpia_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc != NULL) {
+ if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->disable_dpia_output) {
+ uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
+
+ link_enc->funcs->disable_dpia_output(link_enc, link->ddc_hw_inst, digmode);
+ } else
+ link_enc->funcs->disable_output(link_enc, signal);
+ }
+
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
static const struct link_hwss dpia_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
- .disable_link_output = disable_dio_link_output,
+ .disable_link_output = disable_dpia_link_output,
.setup_audio_output = setup_dio_audio_output,
.enable_audio_packet = enable_dio_audio_packet,
.disable_audio_packet = disable_dio_audio_packet,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
- .enable_dp_link_output = enable_dio_dp_link_output,
+ .enable_dp_link_output = enable_dpia_link_output,
.set_dp_link_test_pattern = set_dio_dpia_link_test_pattern,
.set_dp_lane_settings = set_dio_dpia_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
index ad16ec5d9bb7..259e0f4775e1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
@@ -27,6 +27,9 @@
#include "link_hwss.h"
+#define DIG_SST_MODE 0
+#define DIG_MST_MODE 5
+
const struct link_hwss *get_dpia_link_hwss(void);
bool can_use_dpia_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index e026c728042a..550e1a098fa2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -829,7 +829,8 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
if (link->dc->debug.skip_detection_link_training ||
dc_is_embedded_signal(link->local_sink->sink_signal) ||
- link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)) {
destrictive = false;
} else if (link_dp_get_encoding_format(&max_link_cap) ==
DP_8b_10b_ENCODING) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 5d66bfc7fe6e..ec7de9c01fab 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -772,6 +772,20 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
return result;
}
+static bool dp_set_hblank_reduction_on_rx(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ bool result = false;
+
+ if (dc_is_virtual_signal(stream->signal))
+ result = true;
+ else
+ result = dm_helpers_dp_write_hblank_reduction(dc->ctx, stream);
+ return result;
+}
+
+
/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
* i.e. after dp_enable_dsc_on_rx() had been called
*/
@@ -1953,11 +1967,15 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
if (stream->phy_pix_clk > 340000)
is_over_340mhz = true;
+ if (dc_is_tmds_signal(stream->signal) && stream->phy_pix_clk > 6000000UL) {
+ ASSERT(false);
+ return;
+ }
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
/* DP159, Retimer settings */
eng_id = pipe_ctx->stream_res.stream_enc->id;
@@ -1968,7 +1986,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
write_i2c_default_retimer_setting(pipe_ctx,
is_vga_mode, is_over_340mhz);
}
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ } else if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
/* PI3EQX1204, Redriver settings */
write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
}
@@ -2024,7 +2042,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
int lt_attempts = LINK_TRAINING_ATTEMPTS;
// Increase retry count if attempting DP1.x on FIXED_VS link
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
lt_attempts = 10;
@@ -2039,7 +2057,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
/* Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
*/
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)
do_fallback = true;
/*
@@ -2375,13 +2394,13 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
unsigned short masked_chip_caps = link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
//Need to inform that sink is going to use legacy HDMI mode.
write_scdc_data(
link->ddc,
165000,//vbios only handles 165Mhz.
false);
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
/* DP159, Retimer settings */
if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
write_i2c_retimer_setting(pipe_ctx,
@@ -2389,7 +2408,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
else
write_i2c_default_retimer_setting(pipe_ctx,
false, false);
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ } else if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
/* PI3EQX1204, Redriver settings */
write_i2c_redriver_setting(pipe_ctx, false);
}
@@ -2529,6 +2548,15 @@ void link_set_dpms_on(
if (pipe_ctx->stream->dpms_off)
return;
+ /* For Dp tunneling link, a pending HPD means that we have a race condition between processing
+ * current link and processing the pending HPD. If we enable the link now, we may end up with a
+ * link that is not actually connected to a sink. So we skip enabling the link in this case.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->is_hpd_pending) {
+ DC_LOG_DEBUG("%s, Link%d HPD is pending, not enable it.\n", __func__, link->link_index);
+ return;
+ }
+
/* Have to setup DSC before DIG FE and BE are connected (which happens before the
* link training). This is to make sure the bandwidth sent to DIG BE won't be
* bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
@@ -2594,6 +2622,9 @@ void link_set_dpms_on(
}
}
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_set_hblank_reduction_on_rx(pipe_ctx);
+
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
allocate_usb4_bandwidth(pipe_ctx->stream);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 5e1b5ab9fbc6..a7877d57a00f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -101,6 +101,7 @@ static void construct_link_service_validation(struct link_service *link_srv)
link_srv->validate_mode_timing = link_validate_mode_timing;
link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth;
+ link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes;
}
/* link dpms owns the programming sequence of stream's dpms state associated
@@ -698,7 +699,7 @@ static bool construct_phy(struct dc_link *link,
link->chip_caps);
}
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
link->bios_forced_drive_settings.VOLTAGE_SWING =
(bios->integrated_info->ext_disp_conn_info.fixdpvoltageswing & 0x3);
link->bios_forced_drive_settings.PRE_EMPHASIS =
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 60f15a9ba7a5..29606fda029d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -409,3 +409,182 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
}
+
+struct dp_audio_layout_config {
+ uint8_t layouts_per_sample_denom;
+ uint8_t symbols_per_layout;
+ uint8_t max_layouts_per_audio_sdp;
+};
+
+static void get_audio_layout_config(
+ uint32_t channel_count,
+ enum dp_link_encoding encoding,
+ struct dp_audio_layout_config *output)
+{
+ memset(output, 0, sizeof(struct dp_audio_layout_config));
+
+ /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP,
+ * with each layout being the same size (8ch layout).
+ */
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ }
+}
+
+static uint32_t get_av_stream_map_lane_count(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t av_stream_map_lane_count = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (!is_mst)
+ av_stream_map_lane_count = lane_count;
+ else
+ av_stream_map_lane_count = 4;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ av_stream_map_lane_count = 4;
+ }
+
+ ASSERT(av_stream_map_lane_count != 0);
+
+ return av_stream_map_lane_count;
+}
+
+static uint32_t get_audio_sdp_overhead(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t audio_sdp_overhead = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (is_mst)
+ audio_sdp_overhead = 16; /* 4 * 2 + 8 */
+ else
+ audio_sdp_overhead = lane_count * 2 + 8;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ audio_sdp_overhead = 10; /* 4 x 2.5 */
+ }
+
+ ASSERT(audio_sdp_overhead != 0);
+
+ return audio_sdp_overhead;
+}
+
+/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST.
+ */
+static uint32_t calculate_overhead_hblank_bw_in_symbols(
+ uint32_t max_slice_h)
+{
+ uint32_t overhead_hblank_bw = 0; /* in stream symbols */
+
+ overhead_hblank_bw += max_slice_h * 4; /* EOC overhead */
+ overhead_hblank_bw += 12; /* Main link overhead (VBID, BS/BE) */
+
+ return overhead_hblank_bw;
+}
+
+uint32_t dp_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params)
+{
+ /* Main logic from dce_audio is duplicated here, with the main
+ * difference being:
+ * - Pre-determined lane count of 4
+ * - Assumed 16 dsc slices for worst case
+ * - Assumed SDP split disabled for worst case
+ * TODO: Unify logic from dce_audio to prevent duplicated logic.
+ */
+
+ const struct dc_crtc_timing *timing = audio_params->crtc_timing;
+ const uint32_t channel_count = audio_params->channel_count;
+ const uint32_t sample_rate_hz = audio_params->sample_rate_hz;
+ const enum dp_link_encoding link_encoding = audio_params->link_encoding;
+
+ // 8b/10b MST and 128b/132b are always 4 logical lanes.
+ const uint32_t lane_count = 4;
+ const bool is_mst = (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT);
+ // Maximum slice count is with ODM 4:1, 4 slices per DSC
+ const uint32_t max_slices_h = 16;
+
+ const uint32_t av_stream_map_lane_count = get_av_stream_map_lane_count(
+ link_encoding, lane_count, is_mst);
+ const uint32_t audio_sdp_overhead = get_audio_sdp_overhead(
+ link_encoding, lane_count, is_mst);
+ struct dp_audio_layout_config layout_config;
+
+ if (link_encoding == DP_8b_10b_ENCODING && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
+ return 0;
+
+ get_audio_layout_config(
+ channel_count, link_encoding, &layout_config);
+
+ /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */
+ struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100);
+ struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction(
+ timing->pix_clk_100hz, (long long)timing->h_total * 10);
+ struct fixed31_32 samples_per_line;
+ struct fixed31_32 layouts_per_line;
+ struct fixed31_32 symbols_per_sdp_max_layout;
+ struct fixed31_32 remainder;
+ uint32_t num_sdp_with_max_layouts;
+ uint32_t required_symbols_per_hblank;
+ uint32_t required_bytes_per_hblank = 0;
+
+ samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000);
+ samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz);
+ layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config.layouts_per_sample_denom);
+ // HBlank expansion usage assumes SDP split disabled to allow for worst case.
+ layouts_per_line = dc_fixpt_from_int(dc_fixpt_ceil(layouts_per_line));
+
+ num_sdp_with_max_layouts = dc_fixpt_floor(
+ dc_fixpt_div_int(layouts_per_line, layout_config.max_layouts_per_audio_sdp));
+ symbols_per_sdp_max_layout = dc_fixpt_from_int(
+ layout_config.max_layouts_per_audio_sdp * layout_config.symbols_per_layout);
+ symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead);
+ symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin);
+ required_symbols_per_hblank = num_sdp_with_max_layouts;
+ required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+
+ if (num_sdp_with_max_layouts != dc_fixpt_ceil(
+ dc_fixpt_div_int(layouts_per_line, layout_config.max_layouts_per_audio_sdp))) {
+ remainder = dc_fixpt_sub_int(layouts_per_line,
+ num_sdp_with_max_layouts * layout_config.max_layouts_per_audio_sdp);
+ remainder = dc_fixpt_mul_int(remainder, layout_config.symbols_per_layout);
+ remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead);
+ remainder = dc_fixpt_mul(remainder, audio_sdp_margin);
+ required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+ }
+
+ required_symbols_per_hblank += calculate_overhead_hblank_bw_in_symbols(max_slices_h);
+
+ if (link_encoding == DP_8b_10b_ENCODING)
+ required_bytes_per_hblank = required_symbols_per_hblank; // 8 bits per 8b/10b symbol
+ else if (link_encoding == DP_128b_132b_ENCODING)
+ required_bytes_per_hblank = required_symbols_per_hblank * 4; // 32 bits per 128b/132b symbol
+
+ return required_bytes_per_hblank;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 595fb05946e9..bf398c49c3e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -37,4 +37,9 @@ uint32_t dp_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
+
+uint32_t dp_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
#endif /* __LINK_VALIDATION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index d6d5bbf2108c..267180e7bc48 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -505,7 +505,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc,
bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
- if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((ddc->link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
!ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
ddc->ctx->dce_version == DCN_VERSION_3_1) {
/* Fixed VS workaround for AUX timeout */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 9dabaf682171..44c3023a7731 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1554,7 +1554,7 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
/* If this chip cap is set, at least one retimer must exist in the chain
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
(dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
/* If you see this message consistently, either the host platform has FIXED_VS flag
* incorrectly configured or the sink device is returning an invalid count.
@@ -1632,13 +1632,6 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
}
- /* Read DP tunneling information. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dpcd_get_tunneling_device_data(link);
- if (status != DC_OK)
- dm_error("%s: Read tunneling device data failed.\n", __func__);
- }
-
dpcd_set_source_specific_data(link);
/* Sink may need to configure internals based on vendor, so allow some
* time before proceeding with possibly vendor specific transactions
@@ -1711,7 +1704,7 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
if (status != DC_OK)
- dm_error("%s: Read DPRX caps data failed.\n", __func__);
+ dm_error("%s: Read DPRX feature list failed.\n", __func__);
/* AdaptiveSyncCapability */
dpcd_dprx_data = 0;
@@ -1726,15 +1719,13 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.adaptive_sync_caps.dp_adap_sync_caps.raw = dpcd_dprx_data;
if (status != DC_OK)
- dm_error("%s: Read DPRX caps data failed. Addr:%#x\n",
+ dm_error("%s: Read DPRX feature list_1 failed. Addr:%#x\n",
__func__, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1);
}
-
else {
link->dpcd_caps.dprx_feature.raw = 0;
}
-
/* Error condition checking...
* It is impossible for Sink to report Max Lane Count = 0.
* It is possible for Sink to report Max Link Rate = 0, if it is
@@ -1788,6 +1779,11 @@ static bool retrieve_link_cap(struct dc_link *link)
link->test_pattern_enabled = false;
link->compliance_test_state.raw = 0;
+ link->dpcd_caps.receive_port0_cap.raw[0] =
+ dpcd_data[DP_RECEIVE_PORT_0_CAP_0 - DP_DPCD_REV];
+ link->dpcd_caps.receive_port0_cap.raw[1] =
+ dpcd_data[DP_RECEIVE_PORT_0_BUFFER_SIZE - DP_DPCD_REV];
+
/* read sink count */
core_link_read_dpcd(link,
DP_SINK_COUNT,
@@ -1918,6 +1914,7 @@ static bool retrieve_link_cap(struct dc_link *link)
if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
+ /* Read 128b/132b suppoerted link rates */
core_link_read_dpcd(link,
DP_128B132B_SUPPORTED_LINK_RATES,
&link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
@@ -1965,6 +1962,13 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw,
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
+ /* Read DP tunneling information. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ status = dpcd_get_tunneling_device_data(link);
+ if (status != DC_OK)
+ dm_error("%s: Read DP tunneling device data failed.\n", __func__);
+ }
+
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
@@ -2308,6 +2312,14 @@ bool dp_verify_link_cap_with_retries(
} else {
link->verified_link_cap = last_verified_link_cap;
}
+
+ /* For Dp tunneling link, a pending HPD means that we have a race condition between processing
+ * current link and processing the pending HPD. Since the training is failed, we should just brak
+ * the loop so that we have chance to process the pending HPD.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->is_hpd_pending)
+ break;
+
fsleep(10 * 1000);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 48abeaa88678..a08403c022ea 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -226,6 +226,8 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) {
bool allow_active;
+ link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
+
if (link->replay_settings.config.force_disable_desync_error_check)
return;
@@ -237,6 +239,9 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_configuration.raw,
sizeof(replay_configuration.raw));
+ /* Update desync error counter */
+ link->replay_settings.replay_desync_error_fail_count++;
+
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -408,7 +413,8 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)
link->skip_fallback_on_link_loss = true;
device_service_clear.bits.AUTOMATED_TEST = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index bafa52a0165a..2c73ac87cd66 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -104,7 +104,7 @@ void dp_set_hw_lane_settings(
// Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b
if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
!is_immediate_downstream(link, offset) &&
- (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
+ (!((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING))
return;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 754c895e1bfb..88d4288cde0f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -739,7 +739,7 @@ void override_training_settings(
if (overrides->ffe_preset != NULL)
lt_settings->ffe_preset = overrides->ffe_preset;
/* Override HW lane settings with BIOS forced values if present */
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
@@ -1574,7 +1574,7 @@ enum link_training_result dp_perform_link_training(
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index fe26fde12eeb..85298b8a1b5e 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -110,6 +110,23 @@ void mpc3_disable_dwb_mux(
MPC_DWB0_MUX, 0xf);
}
+void mpc3_set_out_rate_control(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ /* Always disable mpc out rate and flow control.
+ * MPC flow rate control is not needed for DCN30 and above.
+ */
+ REG_UPDATE_2(MUX[opp_id],
+ MPC_OUT_RATE_CONTROL_DISABLE, 1,
+ MPC_OUT_RATE_CONTROL, 0);
+}
+
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
@@ -1519,6 +1536,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
+ .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc3_program_shaper,
.acquire_rmu = mpcc3_acquire_rmu,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
index ce93003dae01..103f29900a2c 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
@@ -1085,6 +1085,13 @@ bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id);
+void mpc3_set_out_rate_control(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control);
+
void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 097d06023e64..19d5ebc6763c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -302,7 +302,6 @@ void optc1_program_timing(
/* Enable stereo - only when we need to pack 3D frame. Other types
* of stereo handled in explicit call
*/
-
if (optc->funcs->is_two_pixels_per_container(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
@@ -1471,37 +1470,71 @@ bool optc1_configure_crc(struct timing_generator *optc,
if (!optc1_is_tg_enabled(optc))
return false;
- REG_WRITE(OTG_CRC_CNTL, 0);
+ if (!params->enable || params->reset)
+ REG_WRITE(OTG_CRC_CNTL, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
- OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
- OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
-
- /* Window A y axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
- OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
-
- /* Window B x axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
- OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
- OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
-
- /* Window B y axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
- OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- REG_UPDATE_3(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_X_CONTROL,
+ OTG_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_Y_CONTROL,
+ OTG_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_X_CONTROL,
+ OTG_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_Y_CONTROL,
+ OTG_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
@@ -1510,6 +1543,7 @@ bool optc1_configure_crc(struct timing_generator *optc,
* optc1_get_crc - Capture CRC result per component
*
* @optc: timing_generator instance.
+ * @idx: index of crc engine to get CRC from
* @r_cr: 16-bit primary CRC signature for red data.
* @g_y: 16-bit primary CRC signature for green data.
* @b_cb: 16-bit primary CRC signature for blue data.
@@ -1521,7 +1555,7 @@ bool optc1_configure_crc(struct timing_generator *optc,
* If CRC is disabled, return false; otherwise, return true, and the CRC
* results in the parameters.
*/
-bool optc1_get_crc(struct timing_generator *optc,
+bool optc1_get_crc(struct timing_generator *optc, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t field = 0;
@@ -1533,14 +1567,30 @@ bool optc1_get_crc(struct timing_generator *optc,
if (!field)
return false;
- /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
- REG_GET_2(OTG_CRC0_DATA_RG,
- CRC0_R_CR, r_cr,
- CRC0_G_Y, g_y);
+ switch (idx) {
+ case 0:
+ /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
+ REG_GET_2(OTG_CRC0_DATA_RG,
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
- /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
- REG_GET(OTG_CRC0_DATA_B,
- CRC0_B_CB, b_cb);
+ /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
+ REG_GET(OTG_CRC0_DATA_B,
+ CRC0_B_CB, b_cb);
+ break;
+ case 1:
+ /* OTG_CRC1_DATA_RG has the CRC16 results for the red and green component */
+ REG_GET_2(OTG_CRC1_DATA_RG,
+ CRC1_R_CR, r_cr,
+ CRC1_G_Y, g_y);
+
+ /* OTG_CRC1_DATA_B has the CRC16 results for the blue component */
+ REG_GET(OTG_CRC1_DATA_B,
+ CRC1_B_CB, b_cb);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 40757f20d73f..159172178d51 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -86,6 +86,12 @@
SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_DATA_RG, OTG, inst),\
+ SRI(OTG_CRC1_DATA_B, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWA_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWA_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWB_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWB_Y_CONTROL, OTG, inst),\
SR(GSL_SOURCE_SELECT),\
SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst)
@@ -315,6 +321,7 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC1_SELECT, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
@@ -327,6 +334,17 @@ struct dcn_optc_registers {
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_RG, CRC1_R_CR, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_RG, CRC1_G_Y, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_B, CRC1_B_CB, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_END, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
@@ -482,6 +500,7 @@ struct dcn_optc_registers {
type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
type OTG_CRC_CONT_EN;\
type OTG_CRC0_SELECT;\
+ type OTG_CRC1_SELECT;\
type OTG_CRC_EN;\
type CRC0_R_CR;\
type CRC0_G_Y;\
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index dfa9364fe5a6..d21e82b927d0 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -183,34 +183,87 @@ static bool optc35_configure_crc(struct timing_generator *optc,
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ /* Cannot configure crc on a CRTC that is disabled */
if (!optc1_is_tg_enabled(optc))
return false;
- REG_WRITE(OTG_CRC_CNTL, 0);
+
+ if (!params->enable || params->reset)
+ REG_WRITE(OTG_CRC_CNTL, 0);
+
if (!params->enable)
return true;
- REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
- OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
- OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
- OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
- OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
- OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
- OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
- if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0) {
- REG_UPDATE_4(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1,
- OTG_CRC_WINDOW_DB_EN, 1);
- } else
- REG_UPDATE_3(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1);
+
+ /* Program frame boundaries */
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0)
+ REG_UPDATE_4(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1,
+ OTG_CRC_WINDOW_DB_EN, 1);
+ else
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_X_CONTROL,
+ OTG_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_Y_CONTROL,
+ OTG_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_X_CONTROL,
+ OTG_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_Y_CONTROL,
+ OTG_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0)
+ REG_UPDATE_4(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1,
+ OTG_CRC_WINDOW_DB_EN, 1);
+ else
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 783ca9acc762..338a0cad23a5 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -315,7 +315,7 @@ void optc401_set_drr(
struct drr_params amended_params = { 0 };
bool program_manual_trigger = false;
- if (dc->caps.dmub_caps.fams_ver >= 2 && dc->debug.fams2_config.bits.enable) {
+ if (dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver && dc->debug.fams2_config.bits.enable) {
if (params != NULL &&
params->vertical_total_max > 0 &&
params->vertical_total_min > 0) {
@@ -380,7 +380,7 @@ void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, i
{
struct dc *dc = optc->ctx->dc;
- if (dc->caps.dmub_caps.fams_ver >= 2 && dc->debug.fams2_config.bits.enable) {
+ if (dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver && dc->debug.fams2_config.bits.enable) {
/* FAMS2 */
dc_dmub_srv_fams2_drr_update(dc, optc->inst,
vtotal_min,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index 770a380cc03d..e92f14d50adb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -1258,6 +1258,11 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
return NULL;
}
+unsigned int dcn10_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx)
+{
+ return pipe_ctx->pipe_dlg_param.vstartup_start;
+}
+
static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
@@ -1272,7 +1277,8 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
.patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
index bf8e33cd8147..7bc1be53e800 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
@@ -51,6 +51,7 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
const struct resource_pool *pool,
struct dc_stream_state *stream);
+unsigned int dcn10_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx);
#endif /* __DC_RESOURCE_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 7a5b9aa5292c..5c6dc710e96c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -1509,60 +1509,9 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
if (prev_odm_pipe->plane_state) {
- struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
- struct output_pixel_processor *opp = next_odm_pipe->stream_res.opp;
- int new_width;
-
- /* HACTIVE halved for odm combine */
- sd->h_active /= 2;
- /* Calculate new vp and recout for left pipe */
- /* Need at least 16 pixels width per side */
- if (sd->recout.x + 16 >= sd->h_active)
- return false;
- new_width = sd->h_active - sd->recout.x;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
-
- /* Calculate new vp and recout for right pipe */
- sd = &next_odm_pipe->plane_res.scl_data;
- /* HACTIVE halved for odm combine */
- sd->h_active /= 2;
- /* Need at least 16 pixels width per side */
- if (new_width <= 16)
- return false;
- new_width = sd->recout.width + sd->recout.x - sd->h_active;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
- sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->h_active - sd->recout.x));
- sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->h_active - sd->recout.x));
- sd->recout.x = 0;
-
- /*
- * When odm is used in YcbCr422 or 420 colour space, a split screen
- * will be seen with the previous calculations since the extra left
- * edge pixel is accounted for in fmt but not in viewport.
- *
- * Below are calculations which fix the split by fixing the calculations
- * if there is an extra left edge pixel.
- */
- if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count
- && opp->funcs->opp_get_left_edge_extra_pixel_count(
- opp, next_odm_pipe->stream->timing.pixel_encoding,
- resource_is_pipe_type(next_odm_pipe, OTG_MASTER)) == 1) {
- sd->h_active += 1;
- sd->recout.width += 1;
- sd->viewport.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
- sd->viewport_c.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
- sd->viewport_c.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
- sd->viewport.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
+ if (!resource_build_scaling_params(prev_odm_pipe) ||
+ !resource_build_scaling_params(next_odm_pipe)) {
+ return false;
}
}
@@ -2280,7 +2229,8 @@ static const struct resource_funcs dcn20_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index d3d67d366523..43fa2cb117f3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -59,8 +59,8 @@
#include "cyan_skillfish_ip_offset.h"
-#include "dcn/dcn_2_0_3_offset.h"
-#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dcn/dcn_2_0_1_offset.h"
+#include "dcn/dcn_2_0_1_sh_mask.h"
#include "dpcs/dpcs_2_0_3_offset.h"
#include "dpcs/dpcs_2_0_3_sh_mask.h"
@@ -1079,7 +1079,8 @@ static struct resource_funcs dcn201_res_pool_funcs = {
.populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn201_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 021ba8ac5c8c..2615c36d5ffe 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -1378,6 +1378,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
.get_panel_config_defaults = dcn21_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn21_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index bfd0eccbed28..13202ce30d66 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -2250,6 +2250,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
.update_bw_bounding_box = dcn30_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn30_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
#define CTX ctx
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index a9816affd312..121a86a59833 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -671,9 +671,9 @@ static const struct dc_plane_cap plane_cap = {
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 167,
- .nv12 = 167,
- .fp16 = 167
+ .argb8888 = 358,
+ .nv12 = 358,
+ .fp16 = 358
},
64,
64
@@ -693,7 +693,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 7680,/*upto 8K*/
+ .max_downscale_src_width = 4096,/*upto true 4k*/
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
@@ -1400,7 +1400,8 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn301_update_bw_bounding_box,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn301_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 7baefc910a3d..012c5fd52cb1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -1151,6 +1151,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.update_bw_bounding_box = dcn302_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn302_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct dc_cap_funcs cap_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 8a57d46ad15f..a8d0b4686f9a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -1096,6 +1096,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {
.update_bw_bounding_box = dcn303_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn303_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct dc_cap_funcs cap_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 54ec3d8e920c..911bd60d4fbc 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1849,6 +1849,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 2794473f2aff..e3ba105034f8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1778,6 +1778,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 4ee33eb3381d..14acef036b5a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1846,6 +1846,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
.get_power_profile = dcn315_get_power_profile,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 79eddbafe3c2..568094827212 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -1720,6 +1720,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 12d247a7ec45..664302876019 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -2066,6 +2066,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 06b9479c8bd3..38d76434683e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1624,6 +1624,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 89e2adcf2a28..8ee3d99ea2aa 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1752,6 +1752,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
return out;
}
+enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+ plane_state->tiling_info.gfxversion = DcGfxVersion9;
+ dcn20_patch_unknown_plane_state(plane_state);
+ return DC_OK;
+}
+
static struct resource_funcs dcn35_res_pool_funcs = {
.destroy = dcn35_destroy_resource_pool,
@@ -1775,10 +1782,11 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn35_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index f97bb4cb3761..9d03a55d90cf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -35,6 +35,7 @@
extern struct _vcs_dpi_ip_params_st dcn3_5_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc;
+enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state);
struct dcn35_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 263a37c1cd3a..14f7c3acdc96 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1754,10 +1754,11 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn351_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 2a3dabfe3cea..c1ebc6b1c937 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -726,6 +726,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_unbounded_requesting = false,
.enable_legacy_fast_update = false,
.dcc_meta_propagation_delay_us = 10,
+ .fams_version = {
+ .minor = 1,
+ .major = 2,
+ }, //v2.1
.fams2_config = {
.bits = {
.enable = true,
@@ -733,7 +737,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_stall_recovery = true,
}
},
- .force_cositing = CHROMA_COSITING_TOPLEFT + 1,
+ .force_cositing = CHROMA_COSITING_NONE + 1,
};
static struct dce_aux *dcn401_aux_engine_create(
@@ -1293,6 +1297,29 @@ static struct hpo_dp_link_encoder *dcn401_hpo_dp_link_encoder_create(
return &hpo_dp_enc31->base;
}
+static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans)
+{
+ unsigned int num_available_chans = 1;
+
+ /* channels for MALL must be a power of 2 */
+ while (num_chans > 1) {
+ num_available_chans = (num_available_chans << 1);
+ num_chans = (num_chans >> 1);
+ }
+
+ /* cannot be odd */
+ num_available_chans &= ~1;
+
+ /* clamp to max available channels for MALL per ASIC */
+ if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 16 ? 16 : num_available_chans;
+ } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 8 ? 8 : num_available_chans;
+ }
+
+ return num_available_chans;
+}
+
static struct dce_hwseq *dcn401_hwseq_create(
struct dc_context *ctx)
{
@@ -1588,6 +1615,14 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
+ /* re-calculate the available MALL size if required */
+ if (bw_params->num_channels > 0) {
+ dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
+ dc, bw_params->num_channels) *
+ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
+ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
+ }
+
DC_FP_START();
dcn401_update_bw_bounding_box_fpu(dc, bw_params);
@@ -1605,6 +1640,7 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
+ plane_state->tiling_info.gfxversion = DcGfxAddr3;
plane_state->tiling_info.gfx_addr3.swizzle = DC_ADDR3_SW_64KB_2D;
return DC_OK;
}
@@ -1704,27 +1740,9 @@ static int dcn401_get_power_profile(const struct dc_state *context)
return dpm_level;
}
-static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans)
+static unsigned int dcn401_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx)
{
- unsigned int num_available_chans = 1;
-
- /* channels for MALL must be a power of 2 */
- while (num_chans > 1) {
- num_available_chans = (num_available_chans << 1);
- num_chans = (num_chans >> 1);
- }
-
- /* cannot be odd */
- num_available_chans &= ~1;
-
- /* clamp to max available channels for MALL per ASIC */
- if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) {
- num_available_chans = num_available_chans > 16 ? 16 : num_available_chans;
- } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) {
- num_available_chans = num_available_chans > 8 ? 8 : num_available_chans;
- }
-
- return num_available_chans;
+ return pipe_ctx->global_sync.dcn4x.vstartup_lines;
}
static struct resource_funcs dcn401_res_pool_funcs = {
@@ -1754,6 +1772,7 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
.get_power_profile = dcn401_get_power_profile,
+ .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index 73a65913cb12..38a9a0d68058 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -11,6 +11,41 @@
#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
+static bool spl_is_yuv420(enum spl_pixel_format format)
+{
+ if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
+ (format <= SPL_PIXEL_FORMAT_420BPP10))
+ return true;
+
+ return false;
+}
+
+static bool spl_is_rgb8(enum spl_pixel_format format)
+{
+ if (format == SPL_PIXEL_FORMAT_ARGB8888)
+ return true;
+
+ return false;
+}
+
+static bool spl_is_video_format(enum spl_pixel_format format)
+{
+ if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
+ && format <= SPL_PIXEL_FORMAT_VIDEO_END)
+ return true;
+ else
+ return false;
+}
+
+static bool spl_is_subsampled_format(enum spl_pixel_format format)
+{
+ if (format >= SPL_PIXEL_FORMAT_SUBSAMPLED_BEGIN
+ && format <= SPL_PIXEL_FORMAT_SUBSAMPLED_END)
+ return true;
+ else
+ return false;
+}
+
static struct spl_rect intersect_rec(const struct spl_rect *r0, const struct spl_rect *r1)
{
struct spl_rect rec;
@@ -137,15 +172,32 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
struct spl_in *spl_in,
struct spl_rect *plane_clip_rec)
{
- int mpc_slice_count = spl_in->basic_in.mpc_combine_h;
- int mpc_slice_idx = spl_in->basic_in.mpc_combine_v;
+ bool use_recout_width_aligned =
+ spl_in->basic_in.num_h_slices_recout_width_align.use_recout_width_aligned;
+ int mpc_slice_count =
+ spl_in->basic_in.num_h_slices_recout_width_align.num_slices_recout_width.mpc_num_h_slices;
+ int recout_width_align =
+ spl_in->basic_in.num_h_slices_recout_width_align.num_slices_recout_width.mpc_recout_width_align;
+ int mpc_slice_idx = spl_in->basic_in.mpc_h_slice_index;
int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
struct spl_rect mpc_rec;
- mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
- mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
- mpc_rec.height = plane_clip_rec->height;
- mpc_rec.y = plane_clip_rec->y;
+ if (use_recout_width_aligned) {
+ mpc_rec.width = recout_width_align;
+ if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) {
+ mpc_rec.width = plane_clip_rec->width % recout_width_align;
+ mpc_rec.x = plane_clip_rec->x + recout_width_align * mpc_slice_idx;
+ } else
+ mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+
+ } else {
+ mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
+ mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ }
SPL_ASSERT(mpc_slice_count == 1 ||
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE ||
mpc_rec.width % 2 == 0);
@@ -391,8 +443,7 @@ static void spl_calculate_scaling_ratios(struct spl_in *spl_in,
spl_scratch->scl_data.ratios.horz_c = spl_scratch->scl_data.ratios.horz;
spl_scratch->scl_data.ratios.vert_c = spl_scratch->scl_data.ratios.vert;
- if (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
- || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) {
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
spl_scratch->scl_data.ratios.horz_c.value /= 2;
spl_scratch->scl_data.ratios.vert_c.value /= 2;
}
@@ -529,23 +580,6 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
*vp_offset = src_size - *vp_offset - *vp_size;
}
-static bool spl_is_yuv420(enum spl_pixel_format format)
-{
- if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
- (format <= SPL_PIXEL_FORMAT_420BPP10))
- return true;
-
- return false;
-}
-
-static bool spl_is_rgb8(enum spl_pixel_format format)
-{
- if (format == SPL_PIXEL_FORMAT_ARGB8888)
- return true;
-
- return false;
-}
-
/*Calculate inits and viewport */
static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
struct spl_scratch *spl_scratch)
@@ -556,8 +590,7 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
struct spl_rect recout_clip_in_recout_dst;
struct spl_rect overlap_in_active_timing;
struct spl_rect odm_slice = calculate_odm_slice_in_timing_active(spl_in);
- int vpc_div = (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
- || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ int vpc_div = spl_is_subsampled_format(spl_in->basic_in.format) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
struct spl_fixed31_32 init_adj_h = spl_fixpt_zero;
struct spl_fixed31_32 init_adj_v = spl_fixpt_zero;
@@ -585,12 +618,7 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
&flip_vert_scan_dir,
&flip_horz_scan_dir);
- if (orthogonal_rotation) {
- spl_swap(src.width, src.height);
- spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
- }
-
- if (spl_is_yuv420(spl_in->basic_in.format)) {
+ if (spl_is_subsampled_format(spl_in->basic_in.format)) {
/* this gives the direction of the cositing (negative will move
* left, right otherwise)
*/
@@ -598,15 +626,15 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
switch (spl_in->basic_in.cositing) {
- case CHROMA_COSITING_LEFT:
- init_adj_h = spl_fixpt_zero;
+ case CHROMA_COSITING_TOPLEFT:
+ init_adj_h = spl_fixpt_from_fraction(sign, 4);
init_adj_v = spl_fixpt_from_fraction(sign, 4);
break;
- case CHROMA_COSITING_NONE:
+ case CHROMA_COSITING_LEFT:
init_adj_h = spl_fixpt_from_fraction(sign, 4);
- init_adj_v = spl_fixpt_from_fraction(sign, 4);
+ init_adj_v = spl_fixpt_zero;
break;
- case CHROMA_COSITING_TOPLEFT:
+ case CHROMA_COSITING_NONE:
default:
init_adj_h = spl_fixpt_zero;
init_adj_v = spl_fixpt_zero;
@@ -614,6 +642,12 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
}
}
+ if (orthogonal_rotation) {
+ spl_swap(src.width, src.height);
+ spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
+ spl_swap(init_adj_h, init_adj_v);
+ }
+
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
@@ -678,7 +712,7 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
* since 3d is special and needs to calculate vp as if there is no recout offset
* This may break with rotation, good thing we aren't mixing hw rotation and 3d
*/
- if (spl_in->basic_in.mpc_combine_v) {
+ if (spl_in->basic_in.mpc_h_slice_index) {
SPL_ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
(spl_in->basic_out.view_format != SPL_VIEW_3D_TOP_AND_BOTTOM &&
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE));
@@ -698,24 +732,6 @@ static void spl_clamp_viewport(struct spl_rect *viewport)
viewport->width = MIN_VIEWPORT_SIZE;
}
-static bool spl_dscl_is_420_format(enum spl_pixel_format format)
-{
- if (format == SPL_PIXEL_FORMAT_420BPP8 ||
- format == SPL_PIXEL_FORMAT_420BPP10)
- return true;
- else
- return false;
-}
-
-static bool spl_dscl_is_video_format(enum spl_pixel_format format)
-{
- if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
- && format <= SPL_PIXEL_FORMAT_VIDEO_END)
- return true;
- else
- return false;
-}
-
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
const struct spl_scaler_data *data,
bool enable_isharp, bool enable_easf)
@@ -732,8 +748,8 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
&& !enable_isharp)
return SCL_MODE_SCALING_444_BYPASS;
- if (!spl_dscl_is_420_format(pixel_format)) {
- if (spl_dscl_is_video_format(pixel_format))
+ if (!spl_is_subsampled_format(pixel_format)) {
+ if (spl_is_video_format(pixel_format))
return SCL_MODE_SCALING_444_YCBCR_ENABLE;
else
return SCL_MODE_SCALING_444_RGB_ENABLE;
@@ -756,7 +772,7 @@ static bool spl_choose_lls_policy(enum spl_pixel_format format,
enum spl_transfer_func_predefined tf_predefined_type,
enum linear_light_scaling *lls_pref)
{
- if (spl_is_yuv420(format)) {
+ if (spl_is_video_format(format)) {
*lls_pref = LLS_PREF_NO;
if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
(tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
@@ -815,7 +831,7 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
/* Check if video is in fullscreen mode */
static bool spl_is_video_fullscreen(struct spl_in *spl_in)
{
- if (spl_is_yuv420(spl_in->basic_in.format) && spl_in->is_fullscreen)
+ if (spl_is_video_format(spl_in->basic_in.format) && spl_in->is_fullscreen)
return true;
return false;
}
@@ -846,10 +862,10 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
* Apply sharpness to RGB and YUV (NV12/P010)
* surfaces based on policy setting
*/
- if (!spl_is_yuv420(spl_in->basic_in.format) &&
+ if (!spl_is_video_format(spl_in->basic_in.format) &&
(spl_in->sharpen_policy == SHARPEN_YUV))
return enable_isharp;
- else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) &&
+ else if ((spl_is_video_format(spl_in->basic_in.format) && !fullscreen) &&
(spl_in->sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV))
return enable_isharp;
else if (!spl_in->is_fullscreen &&
@@ -882,8 +898,8 @@ static void spl_get_taps_non_adaptive_scaler(
if (in_taps->v_taps == 0) {
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1)
- spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
- spl_scratch->scl_data.ratios.vert, 2)), 8);
+ spl_scratch->scl_data.taps.v_taps = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.vert), 8);
else
spl_scratch->scl_data.taps.v_taps = 4;
} else
@@ -891,8 +907,8 @@ static void spl_get_taps_non_adaptive_scaler(
if (in_taps->v_taps_c == 0) {
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1)
- spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
- spl_scratch->scl_data.ratios.vert_c, 2)), 8);
+ spl_scratch->scl_data.taps.v_taps_c = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.vert_c), 8);
else
spl_scratch->scl_data.taps.v_taps_c = 4;
} else
@@ -932,7 +948,7 @@ static bool spl_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
bool skip_easf = false;
- bool is_ycbcr = spl_dscl_is_video_format(spl_in->basic_in.format);
+ bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
@@ -964,7 +980,7 @@ static bool spl_get_optimal_number_of_taps(
if (skip_easf)
spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
else {
- if (spl_is_yuv420(spl_in->basic_in.format)) {
+ if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
spl_scratch->scl_data.taps.v_taps = 6;
spl_scratch->scl_data.taps.h_taps_c = 4;
@@ -982,8 +998,7 @@ static bool spl_get_optimal_number_of_taps(
min_taps_c = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c);
/* Use LB_MEMORY_CONFIG_3 for 4:2:0 */
- if ((spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8)
- || (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10))
+ if (spl_is_yuv420(spl_in->basic_in.format))
lb_config = LB_MEMORY_CONFIG_3;
else
lb_config = LB_MEMORY_CONFIG_0;
@@ -1039,13 +1054,11 @@ static bool spl_get_optimal_number_of_taps(
if (spl_scratch->scl_data.taps.h_taps_c == 5)
spl_scratch->scl_data.taps.h_taps_c = 4;
- if (spl_is_yuv420(spl_in->basic_in.format)) {
- if ((spl_scratch->scl_data.taps.h_taps <= 4) ||
- (spl_scratch->scl_data.taps.h_taps_c <= 3)) {
+ if (spl_is_video_format(spl_in->basic_in.format)) {
+ if (spl_scratch->scl_data.taps.h_taps <= 4) {
*enable_easf_v = false;
*enable_easf_h = false;
- } else if ((spl_scratch->scl_data.taps.v_taps <= 3) ||
- (spl_scratch->scl_data.taps.v_taps_c <= 3)) {
+ } else if (spl_scratch->scl_data.taps.v_taps <= 3) {
*enable_easf_v = false;
*enable_easf_h = true;
} else {
@@ -1086,10 +1099,10 @@ static bool spl_get_optimal_number_of_taps(
spl_scratch->scl_data.taps.h_taps = 1;
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_ycbcr)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_ycbcr)
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled)
spl_scratch->scl_data.taps.v_taps_c = 1;
*enable_easf_v = false;
@@ -1103,11 +1116,11 @@ static bool spl_get_optimal_number_of_taps(
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)))
spl_scratch->scl_data.taps.v_taps = 1;
- if ((!*enable_easf_h) && !is_ycbcr &&
+ if ((!*enable_easf_h) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)))
spl_scratch->scl_data.taps.h_taps_c = 1;
- if ((!*enable_easf_v) && !is_ycbcr &&
+ if ((!*enable_easf_v) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
spl_scratch->scl_data.taps.v_taps_c = 1;
}
@@ -1118,7 +1131,7 @@ static bool spl_get_optimal_number_of_taps(
static void spl_set_black_color_data(enum spl_pixel_format format,
struct scl_black_color *scl_black_color)
{
- bool ycbcr = spl_dscl_is_video_format(format);
+ bool ycbcr = spl_is_video_format(format);
if (ycbcr) {
scl_black_color->offset_rgb_y = BLACK_OFFSET_RGB_Y;
scl_black_color->offset_rgb_cbcr = BLACK_OFFSET_CBCR;
@@ -1585,7 +1598,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
0x0; // fp1.5.10, C3 coefficient
}
- if (spl_is_yuv420(format)) { /* TODO: 0 = RGB, 1 = YUV */
+ if (spl_is_subsampled_format(format)) { /* TODO: 0 = RGB, 1 = YUV */
dscl_prog_data->easf_matrix_mode = 1;
/*
* 2-bit, BF3 chroma mode correction calculation mode
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index 55d557df4aa5..467af9dd90de 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -63,13 +63,13 @@ enum spl_pixel_format {
SPL_PIXEL_FORMAT_420BPP8,
SPL_PIXEL_FORMAT_420BPP10,
/*end of pixel format definition*/
- SPL_PIXEL_FORMAT_INVALID,
- SPL_PIXEL_FORMAT_422BPP8,
- SPL_PIXEL_FORMAT_422BPP10,
SPL_PIXEL_FORMAT_GRPH_BEGIN = SPL_PIXEL_FORMAT_INDEX8,
SPL_PIXEL_FORMAT_GRPH_END = SPL_PIXEL_FORMAT_FP16,
+ SPL_PIXEL_FORMAT_SUBSAMPLED_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
+ SPL_PIXEL_FORMAT_SUBSAMPLED_END = SPL_PIXEL_FORMAT_420BPP10,
SPL_PIXEL_FORMAT_VIDEO_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
SPL_PIXEL_FORMAT_VIDEO_END = SPL_PIXEL_FORMAT_420BPP10,
+ SPL_PIXEL_FORMAT_INVALID,
SPL_PIXEL_FORMAT_UNKNOWN
};
@@ -436,8 +436,14 @@ struct basic_in {
struct spl_rect clip_rect; // Clip rect
enum spl_rotation_angle rotation; // Rotation
bool horizontal_mirror; // Horizontal mirror
- int mpc_combine_h; // MPC Horizontal Combine Factor (split_count)
- int mpc_combine_v; // MPC Vertical Combine Factor (split_idx)
+ struct { // previous mpc_combine_h - split count
+ bool use_recout_width_aligned;
+ union {
+ int mpc_num_h_slices;
+ int mpc_recout_width_align;
+ } num_slices_recout_width;
+ } num_h_slices_recout_width_align;
+ int mpc_h_slice_index; // previous mpc_combine_v - split_idx
// Inputs for adaptive scaler - TODO
enum spl_transfer_func_type tf_type; /* Transfer function type */
enum spl_transfer_func_predefined tf_predefined_type; /* Transfer function predefined type */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index b353c4ceb60d..4b3ccbca0da2 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -69,6 +69,9 @@
#define DMUB_PC_SNAPSHOT_COUNT 10
+/* Default tracebuffer size if meta is absent. */
+#define DMUB_TRACE_BUFFER_SIZE (64 * 1024)
+
/* Forward declarations */
struct dmub_srv;
struct dmub_srv_common_regs;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index b800a507d1e0..d0fe324cb537 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -431,7 +431,68 @@ union replay_debug_flags {
*/
uint32_t enable_ips_residency_profiling : 1;
- uint32_t reserved : 20;
+ /**
+ * 0x1000 (bit 12)
+ * @enable_coasting_vtotal_check: Enable Coasting_vtotal_check
+ */
+ uint32_t enable_coasting_vtotal_check : 1;
+ /**
+ * 0x2000 (bit 13)
+ * @enable_visual_confirm_debug: Enable Visual Confirm Debug
+ */
+ uint32_t enable_visual_confirm_debug : 1;
+
+ uint32_t reserved : 18;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+/**
+ * Flags record error state.
+ */
+union replay_visual_confirm_error_state_flags {
+ struct {
+ /**
+ * 0x1 (bit 0) - Desync Error flag.
+ */
+ uint32_t desync_error : 1;
+
+ /**
+ * 0x2 (bit 1) - State Transition Error flag.
+ */
+ uint32_t state_transition_error : 1;
+
+ /**
+ * 0x4 (bit 2) - Crc Error flag
+ */
+ uint32_t crc_error : 1;
+
+ /**
+ * 0x8 (bit 3) - Reserved
+ */
+ uint32_t reserved_3 : 1;
+
+ /**
+ * 0x10 (bit 4) - Incorrect Coasting vtotal checking --> use debug flag to control DPCD write.
+ * Added new debug flag to control DPCD.
+ */
+ uint32_t incorrect_vtotal_in_static_screen : 1;
+
+ /**
+ * 0x20 (bit 5) - No doubled Refresh Rate.
+ */
+ uint32_t no_double_rr : 1;
+
+ /**
+ * Reserved bit 6-7
+ */
+ uint32_t reserved_6_7 : 2;
+
+ /**
+ * Reserved bit 9-31
+ */
+ uint32_t reserved_9_31 : 24;
} bitfields;
uint32_t u32All;
@@ -475,11 +536,23 @@ union replay_hw_flags {
* Use TPS3 signal when restore main link.
*/
uint32_t force_wakeup_by_tps3 : 1;
+ /**
+ * @is_alpm_initialized: Indicates whether ALPM is initialized
+ */
+ uint32_t is_alpm_initialized : 1;
} bitfields;
uint32_t u32All;
};
+union fw_assisted_mclk_switch_version {
+ struct {
+ uint8_t minor : 5;
+ uint8_t major : 3;
+ };
+ uint8_t ver;
+};
+
/**
* DMUB feature capabilities.
* After DMUB init, driver will query FW capabilities prior to enabling certain features.
@@ -1823,52 +1896,11 @@ enum fams2_stream_type {
FAMS2_STREAM_TYPE_SUBVP = 4,
};
-/* dynamic stream state */
-struct dmub_fams2_legacy_stream_dynamic_state {
- uint8_t force_allow_at_vblank;
- uint8_t pad[3];
-};
-
-struct dmub_fams2_subvp_stream_dynamic_state {
- uint16_t viewport_start_hubp_vline;
- uint16_t viewport_height_hubp_vlines;
- uint16_t viewport_start_c_hubp_vline;
- uint16_t viewport_height_c_hubp_vlines;
- uint16_t phantom_viewport_height_hubp_vlines;
- uint16_t phantom_viewport_height_c_hubp_vlines;
- uint16_t microschedule_start_otg_vline;
- uint16_t mall_start_otg_vline;
- uint16_t mall_start_hubp_vline;
- uint16_t mall_start_c_hubp_vline;
- uint8_t force_allow_at_vblank_only;
- uint8_t pad[3];
-};
-
-struct dmub_fams2_drr_stream_dynamic_state {
- uint16_t stretched_vtotal;
- uint8_t use_cur_vtotal;
- uint8_t pad;
-};
-
-struct dmub_fams2_stream_dynamic_state {
- uint64_t ref_tick;
- uint32_t cur_vtotal;
- uint16_t adjusted_allow_end_otg_vline;
- uint8_t pad[2];
- struct dmub_optc_position ref_otg_pos;
- struct dmub_optc_position target_otg_pos;
- union {
- struct dmub_fams2_legacy_stream_dynamic_state legacy;
- struct dmub_fams2_subvp_stream_dynamic_state subvp;
- struct dmub_fams2_drr_stream_dynamic_state drr;
- } sub_state;
-};
-
/* static stream state */
struct dmub_fams2_legacy_stream_static_state {
uint8_t vactive_det_fill_delay_otg_vlines;
uint8_t programming_delay_otg_vlines;
-};
+}; //v0
struct dmub_fams2_subvp_stream_static_state {
uint16_t vratio_numerator;
@@ -1887,14 +1919,59 @@ struct dmub_fams2_subvp_stream_static_state {
uint8_t phantom_otg_inst;
uint8_t phantom_pipe_mask;
uint8_t phantom_plane_pipe_masks[DMUB_MAX_PHANTOM_PLANES]; // phantom pipe mask per plane (for flip passthrough)
-};
+}; //v0
struct dmub_fams2_drr_stream_static_state {
uint16_t nom_stretched_vtotal;
uint8_t programming_delay_otg_vlines;
uint8_t only_stretch_if_required;
uint8_t pad[2];
-};
+}; //v0
+
+struct dmub_fams2_cmd_legacy_stream_static_state {
+ uint16_t vactive_det_fill_delay_otg_vlines;
+ uint16_t programming_delay_otg_vlines;
+}; //v1
+
+struct dmub_fams2_cmd_subvp_stream_static_state {
+ uint16_t vratio_numerator;
+ uint16_t vratio_denominator;
+ uint16_t phantom_vtotal;
+ uint16_t phantom_vactive;
+ uint16_t programming_delay_otg_vlines;
+ uint16_t prefetch_to_mall_otg_vlines;
+ union {
+ struct {
+ uint8_t is_multi_planar : 1;
+ uint8_t is_yuv420 : 1;
+ } bits;
+ uint8_t all;
+ } config;
+ uint8_t phantom_otg_inst;
+ uint8_t phantom_pipe_mask;
+ uint8_t pad0;
+ uint8_t phantom_plane_pipe_masks[DMUB_MAX_PHANTOM_PLANES]; // phantom pipe mask per plane (for flip passthrough)
+ uint8_t pad1[4 - (DMUB_MAX_PHANTOM_PLANES % 4)];
+}; //v1
+
+struct dmub_fams2_cmd_drr_stream_static_state {
+ uint16_t nom_stretched_vtotal;
+ uint16_t programming_delay_otg_vlines;
+ uint8_t only_stretch_if_required;
+ uint8_t pad[3];
+}; //v1
+
+union dmub_fams2_stream_static_sub_state {
+ struct dmub_fams2_legacy_stream_static_state legacy;
+ struct dmub_fams2_subvp_stream_static_state subvp;
+ struct dmub_fams2_drr_stream_static_state drr;
+}; //v0
+
+union dmub_fams2_cmd_stream_static_sub_state {
+ struct dmub_fams2_cmd_legacy_stream_static_state legacy;
+ struct dmub_fams2_cmd_subvp_stream_static_state subvp;
+ struct dmub_fams2_cmd_drr_stream_static_state drr;
+}; //v1
struct dmub_fams2_stream_static_state {
enum fams2_stream_type type;
@@ -1924,13 +2001,45 @@ struct dmub_fams2_stream_static_state {
uint8_t pipe_mask; // pipe mask for the whole config
uint8_t num_planes;
uint8_t plane_pipe_masks[DMUB_MAX_PLANES]; // pipe mask per plane (for flip passthrough)
- uint8_t pad[DMUB_MAX_PLANES % 4];
+ uint8_t pad[4 - (DMUB_MAX_PLANES % 4)];
+ union dmub_fams2_stream_static_sub_state sub_state;
+}; //v0
+
+struct dmub_fams2_cmd_stream_static_base_state {
+ enum fams2_stream_type type;
+ uint32_t otg_vline_time_ns;
+ uint32_t otg_vline_time_ticks;
+ uint16_t htotal;
+ uint16_t vtotal; // nominal vtotal
+ uint16_t vblank_start;
+ uint16_t vblank_end;
+ uint16_t max_vtotal;
+ uint16_t allow_start_otg_vline;
+ uint16_t allow_end_otg_vline;
+ uint16_t drr_keepout_otg_vline; // after this vline, vtotal cannot be changed
+ uint16_t scheduling_delay_otg_vlines; // min time to budget for ready to microschedule start
+ uint16_t contention_delay_otg_vlines; // time to budget for contention on execution
+ uint16_t vline_int_ack_delay_otg_vlines; // min time to budget for vertical interrupt firing
+ uint16_t allow_to_target_delay_otg_vlines; // time from allow vline to target vline
union {
- struct dmub_fams2_legacy_stream_static_state legacy;
- struct dmub_fams2_subvp_stream_static_state subvp;
- struct dmub_fams2_drr_stream_static_state drr;
- } sub_state;
-};
+ struct {
+ uint8_t is_drr : 1; // stream is DRR enabled
+ uint8_t clamp_vtotal_min : 1; // clamp vtotal to min instead of nominal
+ uint8_t min_ttu_vblank_usable : 1; // if min ttu vblank is above wm, no force pstate is needed in blank
+ } bits;
+ uint8_t all;
+ } config;
+ uint8_t otg_inst;
+ uint8_t pipe_mask; // pipe mask for the whole config
+ uint8_t num_planes;
+ uint8_t plane_pipe_masks[DMUB_MAX_PLANES]; // pipe mask per plane (for flip passthrough)
+ uint8_t pad[4 - (DMUB_MAX_PLANES % 4)];
+}; //v1
+
+struct dmub_fams2_stream_static_state_v1 {
+ struct dmub_fams2_cmd_stream_static_base_state base;
+ union dmub_fams2_cmd_stream_static_sub_state sub_state;
+}; //v1
/**
* enum dmub_fams2_allow_delay_check_mode - macroscheduler mode for breaking on excessive
@@ -1970,7 +2079,11 @@ struct dmub_cmd_fams2_global_config {
union dmub_cmd_fams2_config {
struct dmub_cmd_fams2_global_config global;
- struct dmub_fams2_stream_static_state stream;
+ struct dmub_fams2_stream_static_state stream; //v0
+ union {
+ struct dmub_fams2_cmd_stream_static_base_state base;
+ union dmub_fams2_cmd_stream_static_sub_state sub_state;
+ } stream_v1; //v1
};
/**
@@ -3592,6 +3705,8 @@ enum dmub_cmd_replay_general_subtype {
*/
REPLAY_GENERAL_CMD_DISABLED_ADAPTIVE_SYNC_SDP,
REPLAY_GENERAL_CMD_DISABLED_DESYNC_ERROR_DETECTION,
+ REPLAY_GENERAL_CMD_UPDATE_ERROR_STATUS,
+ REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index a3f3ff5d49ac..6157886f4802 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -61,10 +61,6 @@
/* Default state size if meta is absent. */
#define DMUB_FW_STATE_SIZE (64 * 1024)
-/* Default tracebuffer size if meta is absent. */
-#define DMUB_TRACE_BUFFER_SIZE (64 * 1024)
-
-
/* Default scratch mem size. */
#define DMUB_SCRATCH_MEM_SIZE (1024)
@@ -708,7 +704,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw6.region.base = DMUB_CW6_BASE;
cw6.region.top = cw6.region.base + fw_state_fb->size;
- dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->fw_state = (void *)((uintptr_t)(fw_state_fb->cpu_addr) + DMUB_DEBUG_FW_STATE_OFFSET);
region6.offset.quad_part = shared_state_fb->gpu_addr;
region6.region.base = DMUB_CW6_BASE;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 95838c7ab054..29ccd3532d13 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -996,9 +996,9 @@ void set_replay_coasting_vtotal(struct dc_link *link,
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
-void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
+void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
{
- link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal;
+ link->replay_settings.low_rr_full_screen_video_pseudo_vtotal = vtotal;
}
void calculate_replay_link_off_frame_count(struct dc_link *link,
@@ -1039,3 +1039,8 @@ bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_back
memcpy(caps->data_points, custom_backlight_profiles[config_no].data_points, data_points_size);
return true;
}
+
+void reset_replay_dsync_error_count(struct dc_link *link)
+{
+ link->replay_settings.replay_desync_error_fail_count = 0;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index cac302e8fa10..758a8aa31fbe 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -62,7 +62,7 @@ void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
uint32_t vtotal);
void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
enum replay_coasting_vtotal_type type);
-void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
+void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
@@ -78,4 +78,5 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
bool fill_custom_backlight_caps(unsigned int config_no,
struct dm_acpi_atif_backlight_caps *caps);
+void reset_replay_dsync_error_count(struct dc_link *link);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index a1ece3eecdf5..a08611cb8041 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -49,6 +49,17 @@
| CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
+
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1 0x00000001
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 0x00000002
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 0x00000004
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 0x00000008
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 0x00000010
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 0x00000020
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 0x00000040
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_MASK 0x0000FFFF
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_SHIFT 0
+
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 0x00040000
@@ -56,6 +67,7 @@
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 0x00100000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 0x00200000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
+#define CAIL_PCIE_LINK_WIDTH_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
/* 1/2/4/8/16 lanes */
@@ -65,4 +77,10 @@
| CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
| CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+#define AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16)
+
#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 7eefcb0f5070..6dccee403a3d 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -344,6 +344,16 @@ enum DC_DEBUG_MASK {
* eDP display from ACPI _DDC method.
*/
DC_DISABLE_ACPI_EDID = 0x8000,
+
+ /**
+ * @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver.
+ */
+ DC_DISABLE_HDMI_CEC = 0x10000,
+
+ /**
+ * @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver.
+ */
+ DC_DISABLE_SUBVP = 0x20000,
};
enum amd_dpm_forced_level;
@@ -401,9 +411,9 @@ struct amd_ip_funcs {
int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block);
int (*soft_reset)(struct amdgpu_ip_block *ip_block);
int (*post_soft_reset)(struct amdgpu_ip_block *ip_block);
- int (*set_clockgating_state)(void *handle,
+ int (*set_clockgating_state)(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
- int (*set_powergating_state)(void *handle,
+ int (*set_powergating_state)(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
void (*get_clockgating_state)(void *handle, u64 *flags);
void (*dump_ip_state)(struct amdgpu_ip_block *ip_block);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h
index cae1a7e74323..73c5dd5e83d4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h
@@ -19,8 +19,8 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _dcn_2_0_3_OFFSET_HEADER
-#define _dcn_2_0_3_OFFSET_HEADER
+#ifndef _dcn_2_0_1_OFFSET_HEADER
+#define _dcn_2_0_1_OFFSET_HEADER
// addressBlock: dce_dc_dccg_dccg_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h
index ca1e1eb39256..290d807800a6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h
@@ -18,8 +18,8 @@
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _dcn_2_0_3_SH_MASK_HEADER
-#define _dcn_2_0_3_SH_MASK_HEADER
+#ifndef _dcn_2_0_1_SH_MASK_HEADER
+#define _dcn_2_0_1_SH_MASK_HEADER
// addressBlock: dce_dc_dccg_dccg_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h
new file mode 100644
index 000000000000..0e8f12728d5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_8_14_0_OFFSET_HEADER
+#define _umc_8_14_0_OFFSET_HEADER
+
+#define regUMCCH0_GeccErrCntSel 0x0328
+#define regUMCCH0_GeccErrCntSel_BASE_IDX 0
+#define regUMCCH0_GeccErrCnt 0x0329
+#define regUMCCH0_GeccErrCnt_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h
new file mode 100644
index 000000000000..5d723b5d9b87
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_8_14_0_SH_MASK_HEADER
+#define _umc_8_14_0_SH_MASK_HEADER
+
+//UMCCH0_GeccErrCntSel
+#define UMCCH0_GeccErrCntSel__GeccErrInt__SHIFT 0xc
+#define UMCCH0_GeccErrCntSel__GeccErrCntEn__SHIFT 0xf
+#define UMCCH0_GeccErrCntSel__PoisonCntEn__SHIFT 0x10
+#define UMCCH0_GeccErrCntSel__GeccErrInt_MASK 0x00003000L
+#define UMCCH0_GeccErrCntSel__GeccErrCntEn_MASK 0x00008000L
+#define UMCCH0_GeccErrCntSel__PoisonCntEn_MASK 0x00030000L
+//UMCCH0_GeccErrCnt
+#define UMCCH0_GeccErrCnt__GeccErrCnt__SHIFT 0x0
+#define UMCCH0_GeccErrCnt__GeccUnCorrErrCnt__SHIFT 0x10
+#define UMCCH0_GeccErrCnt__GeccErrCnt_MASK 0x0000FFFFL
+#define UMCCH0_GeccErrCnt__GeccUnCorrErrCnt_MASK 0xFFFF0000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index b0fc22383e28..0160d65f3f5e 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1300,12 +1300,17 @@ struct atom_ext_display_path
//usCaps
enum ext_display_path_cap_def {
- EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE = 0x0001,
- EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = 0x0002,
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007C,
- EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x01 << 2), //PI redriver chip
- EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x02 << 2), //TI retimer chip
- EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x03 << 2) //Parade DP->HDMI recoverter chip
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007E,
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007E,
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = (0x01 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x02 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2 = (0x03 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x04 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x06 << 1),
+ EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = (0x07 << 1),
+ EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x08 << 1), //PI redriver chip
+ EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x09 << 1), //TI retimer chip
+ EXT_DISPLAY_PATH_CAPS__AMD_INTERNAL = (0x0a << 1), //AMD internal customer chip placeholder
};
struct atom_external_display_connection_info
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
new file mode 100644
index 000000000000..64b553e7de1a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __IRQSRCS_VCN_5_0_H__
+#define __IRQSRCS_VCN_5_0_H__
+
+#define VCN_5_0__SRCID__UVD_TRAP 114 // 0x72 UVD_TRAP
+#define VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE 119 // 0x77 Encoder General Purpose
+#define VCN_5_0__SRCID__UVD_ENC_LOW_LATENCY 120 // 0x78 Encoder Low Latency
+#define VCN_5_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 // 0x7c UVD system message interrupt
+#define VCN_5_0__SRCID__JPEG_ENCODE 151 // 0x97 JRBC Encode interrupt
+#define VCN_5_0__SRCID__JPEG_DECODE 153 // 0x99 JRBC Decode interrupt
+#define VCN_5_0__SRCID__JPEG1_DECODE 149 // 0x95 JRBC1 Decode interrupt
+#define VCN_5_0__SRCID__JPEG2_DECODE 151 // 0x97 JRBC2 Decode interrupt
+#define VCN_5_0__SRCID__JPEG3_DECODE 171 // 0xab JRBC3 Decode interrupt
+#define VCN_5_0__SRCID__JPEG4_DECODE 172 // 0xac JRBC4 Decode interrupt
+#define VCN_5_0__SRCID__JPEG5_DECODE 173 // 0xad JRBC5 Decode interrupt
+#define VCN_5_0__SRCID__JPEG6_DECODE 174 // 0xae JRBC6 Decode interrupt
+#define VCN_5_0__SRCID__JPEG7_DECODE 175 // 0xaf JRBC7 Decode interrupt
+#define VCN_5_0__SRCID__JPEG8_DECODE 177 // 0xb1 JRBC8 Decode interrupt
+#define VCN_5_0__SRCID__JPEG9_DECODE 178 // 0xb2 JRBC9 Decode interrupt
+
+#define VCN_5_0__SRCID_UVD_POISON 160
+#define VCN_5_0__SRCID_DJPEG0_POISON 161
+#define VCN_5_0__SRCID_EJPEG0_POISON 162
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index d7acdd42d80f..9189dcb65188 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -421,7 +421,9 @@ struct amd_pm_funcs {
int (*load_firmware)(void *handle);
int (*wait_for_fw_loading_complete)(void *handle);
int (*set_powergating_by_smu)(void *handle,
- uint32_t block_type, bool gate);
+ uint32_t block_type,
+ bool gate,
+ int inst);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
int (*set_power_limit)(void *handle, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 9dc82f4d7c93..7a22aef6e59c 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -70,13 +70,18 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
return ret;
}
-int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
+int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
+ bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
- if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
+ if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
+ (!is_vcn || adev->vcn.num_vcn_inst == 1)) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate");
return 0;
@@ -88,7 +93,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
case AMD_IP_BLOCK_TYPE_GFX:
- case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
case AMD_IP_BLOCK_TYPE_JPEG:
case AMD_IP_BLOCK_TYPE_GMC:
@@ -96,7 +100,12 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_VPE:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
+ (adev)->powerplay.pp_handle, block_type, gate, 0));
+ break;
+ case AMD_IP_BLOCK_TYPE_VCN:
+ if (pp_funcs && pp_funcs->set_powergating_by_smu)
+ ret = (pp_funcs->set_powergating_by_smu(
+ (adev)->powerplay.pp_handle, block_type, gate, inst));
break;
default:
break;
@@ -566,7 +575,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
return;
}
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+}
+
+void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -591,7 +610,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
return;
}
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -601,7 +620,7 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -611,7 +630,7 @@ void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
enable ? "enable" : "disable", ret);
@@ -700,6 +719,21 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
return ret;
}
+int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_sdma(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -953,6 +987,24 @@ enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device
return level;
}
+static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
+{
+ /* enter UMD Pstate */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+}
+
+static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
+{
+ /* exit UMD Pstate */
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+}
+
int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
enum amd_dpm_forced_level level)
{
@@ -973,6 +1025,10 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
if (current_level == level)
return 0;
+ if (!(current_level & profile_mode_mask) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
+ return -EINVAL;
+
if (adev->asic_type == CHIP_RAVEN) {
if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
@@ -984,35 +1040,25 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
}
}
- if (!(current_level & profile_mode_mask) &&
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
- return -EINVAL;
-
- if (!(current_level & profile_mode_mask) &&
- (level & profile_mode_mask)) {
- /* enter UMD Pstate */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_UNGATE);
- amdgpu_device_ip_set_clockgating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- } else if ((current_level & profile_mode_mask) &&
- !(level & profile_mode_mask)) {
- /* exit UMD Pstate */
- amdgpu_device_ip_set_clockgating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
- }
+ if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
+ amdgpu_dpm_enter_umd_state(adev);
+ else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask))
+ amdgpu_dpm_exit_umd_state(adev);
mutex_lock(&adev->pm.mutex);
if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
level)) {
mutex_unlock(&adev->pm.mutex);
+ /* If new level failed, retain the umd state as before */
+ if (!(current_level & profile_mode_mask) &&
+ (level & profile_mode_mask))
+ amdgpu_dpm_exit_umd_state(adev);
+ else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask))
+ amdgpu_dpm_enter_umd_state(adev);
+
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e8ae7681bf0a..77b1f061bbf0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2421,6 +2421,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
*states = ATTR_STATE_SUPPORTED;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 363af8990aa2..1f5ac7e0230d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -397,7 +397,7 @@ int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit
int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit);
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
- uint32_t block_type, bool gate);
+ uint32_t block_type, bool gate, int inst);
extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
@@ -446,6 +446,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable);
@@ -601,5 +602,6 @@ int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
int policy_level);
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
+int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 8908646ad620..e237ea1185a7 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3042,6 +3042,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
@@ -3049,6 +3050,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -3066,32 +3069,42 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
return 0;
}
static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
/* asic init will reset to the boot state */
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+ return ret;
}
static bool kv_dpm_is_idle(void *handle)
@@ -3177,13 +3190,13 @@ static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int kv_dpm_set_clockgating_state(void *handle,
+static int kv_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int kv_dpm_set_powergating_state(void *handle,
+static int kv_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3276,7 +3289,9 @@ static int kv_dpm_read_sensor(void *handle, int idx,
}
static int kv_set_powergating_by_smu(void *handle,
- uint32_t block_type, bool gate)
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index e861355ebd75..c7518b13e787 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
- if (!adev->pm.dpm_enabled)
- return;
+ mutex_lock(&adev->pm.mutex);
+ if (!adev->pm.dpm_enabled) {
+ mutex_unlock(&adev->pm.mutex);
+ return;
+ }
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp,
@@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
adev->pm.dpm.state = dpm_state;
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index ee23a0f897c5..d6dfe2599ebe 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7709,7 +7709,8 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s_smc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_smc.bin", chip_name);
if (err) {
DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s_smc.bin\"\n",
err, chip_name);
@@ -7785,6 +7786,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
@@ -7792,6 +7794,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
return ret;
}
@@ -7809,32 +7812,44 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
+
return 0;
}
static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+
+ return ret;
}
static bool si_dpm_is_idle(void *handle)
@@ -7849,13 +7864,13 @@ static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int si_dpm_set_clockgating_state(void *handle,
+static int si_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int si_dpm_set_powergating_state(void *handle,
+static int si_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 26624a716fc6..686345f75f26 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -244,7 +244,7 @@ static bool pp_is_idle(void *handle)
return false;
}
-static int pp_set_powergating_state(void *handle,
+static int pp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -267,7 +267,7 @@ static int pp_resume(struct amdgpu_ip_block *ip_block)
return hwmgr_resume(hwmgr);
}
-static int pp_set_clockgating_state(void *handle,
+static int pp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
@@ -1227,7 +1227,9 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate)
}
static int pp_set_powergating_by_smu(void *handle,
- uint32_t block_type, bool gate)
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index fe24219c3bf4..4bd92fd782be 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -992,6 +992,8 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
GetIndexIntoMasterTable(DATA, SMU_Info),
&size, &frev, &crev);
+ if (!psmu_info)
+ return -EINVAL;
for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
index 3007b054c873..776d58ea63ae 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
@@ -1120,13 +1120,14 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
if (0 != result)
- return result;
+ goto exit_safe_mode;
vega10_didt_set_mask(hwmgr, false);
+exit_safe_mode:
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
- return 0;
+ return result;
}
static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 21bd635bcdfc..ed9dac00ebfb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -238,7 +238,8 @@ static bool is_vcn_enabled(struct amdgpu_device *adev)
}
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
- bool enable)
+ bool enable,
+ int inst)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -253,12 +254,12 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
if (!smu->ppt_funcs->dpm_set_vcn_enable)
return 0;
- if (atomic_read(&power_gate->vcn_gated) ^ enable)
+ if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)
return 0;
- ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff);
+ ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);
if (!ret)
- atomic_set(&power_gate->vcn_gated, !enable);
+ atomic_set(&power_gate->vcn_gated[inst], !enable);
return ret;
}
@@ -345,8 +346,9 @@ static int smu_set_mall_enable(struct smu_context *smu)
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
* @handle: smu_context pointer
- * @block_type: the IP block to power gate/ungate
- * @gate: to power gate if true, ungate otherwise
+ * @block_type: the IP block to power gate/ungate
+ * @gate: to power gate if true, ungate otherwise
+ * @inst: the instance of the IP block to power gate/ungate
*
* This API uses no smu->mutex lock protection due to:
* 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
@@ -357,7 +359,8 @@ static int smu_set_mall_enable(struct smu_context *smu)
*/
static int smu_dpm_set_power_gate(void *handle,
uint32_t block_type,
- bool gate)
+ bool gate,
+ int inst)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -376,10 +379,10 @@ static int smu_dpm_set_power_gate(void *handle,
*/
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCN:
- ret = smu_dpm_set_vcn_enable(smu, !gate);
+ ret = smu_dpm_set_vcn_enable(smu, !gate, inst);
if (ret)
- dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
- gate ? "gate" : "ungate");
+ dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",
+ gate ? "gate" : "ungate", inst);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = smu_gfx_off_control(smu, gate);
@@ -609,7 +612,8 @@ static int smu_sys_set_pp_table(void *handle,
return -EIO;
}
- if (!smu_table->hardcode_pptable) {
+ if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
+ kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
if (!smu_table->hardcode_pptable)
return -ENOMEM;
@@ -724,6 +728,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
smu_v13_0_6_set_ppt_funcs(smu);
/* Enable pp_od_clk_voltage node */
smu->od_enabled = true;
@@ -782,21 +787,25 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
- int vcn_gate, jpeg_gate;
+ int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;
int ret = 0;
if (!smu->ppt_funcs->set_default_dpm_table)
return 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
- vcn_gate = atomic_read(&power_gate->vcn_gated);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);
+ }
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
jpeg_gate = atomic_read(&power_gate->jpeg_gated);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
- ret = smu_dpm_set_vcn_enable(smu, true);
- if (ret)
- return ret;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ ret = smu_dpm_set_vcn_enable(smu, true, i);
+ if (ret)
+ return ret;
+ }
}
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
@@ -813,8 +822,10 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
err_out:
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
- smu_dpm_set_vcn_enable(smu, !vcn_gate);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);
+ }
return ret;
}
@@ -1268,7 +1279,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret;
+ int i, ret;
smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
@@ -1280,7 +1291,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
atomic64_set(&smu->throttle_int_counter, 0);
smu->watermarks_bitmap = 0;
- atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
@@ -1810,7 +1822,7 @@ static int smu_start_smc_engine(struct smu_context *smu)
static int smu_hw_init(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int i, ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -1836,7 +1848,8 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
ret = smu_set_gfx_imu_enable(smu);
if (ret)
return ret;
- smu_dpm_set_vcn_enable(smu, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ smu_dpm_set_vcn_enable(smu, true, i);
smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
@@ -2034,12 +2047,13 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret;
+ int i, ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- smu_dpm_set_vcn_enable(smu, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ smu_dpm_set_vcn_enable(smu, false, i);
smu_dpm_set_jpeg_enable(smu, false);
smu_dpm_set_vpe_enable(smu, false);
smu_dpm_set_umsch_mm_enable(smu, false);
@@ -2191,13 +2205,13 @@ static int smu_display_configuration_change(void *handle,
return 0;
}
-static int smu_set_clockgating_state(void *handle,
+static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int smu_set_powergating_state(void *handle,
+static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -2979,9 +2993,10 @@ static int smu_read_sensor(void *handle,
int *size_arg)
{
struct smu_context *smu = handle;
+ struct amdgpu_device *adev = smu->adev;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
- int ret = 0;
+ int i, ret = 0;
uint32_t *size, size_val;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -3027,7 +3042,13 @@ static int smu_read_sensor(void *handle,
*size = 4;
break;
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
- *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
+ *(uint32_t *)data = 0;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {
+ *(uint32_t *)data = 1;
+ break;
+ }
+ }
*size = 4;
break;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
@@ -3895,3 +3916,13 @@ int smu_send_rma_reason(struct smu_context *smu)
return ret;
}
+
+int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)
+ ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3925815358ce..3630593bce61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -399,7 +399,7 @@ struct smu_dpm_context {
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
- atomic_t vcn_gated;
+ atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
atomic_t jpeg_gated;
atomic_t vpe_gated;
atomic_t umsch_mm_gated;
@@ -1373,6 +1373,11 @@ struct pptable_funcs {
int (*send_rma_reason)(struct smu_context *smu);
/**
+ * @reset_sdma: message SMU to soft reset sdma instance.
+ */
+ int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1631,6 +1636,7 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
+int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 0f96b8c59a0e..274b3e1cc4fb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -34,6 +34,8 @@
#define NUM_PCIE_BITRATES 4
#define NUM_XGMI_BITRATES 4
#define NUM_XGMI_WIDTHS 3
+#define NUM_SOC_P2S_TABLES 3
+#define NUM_TDP_GROUPS 4
typedef enum {
/*0*/ FEATURE_DATA_CALCULATION = 0,
@@ -80,8 +82,10 @@ typedef enum {
/*41*/ FEATURE_CXL_QOS = 41,
/*42*/ FEATURE_SOC_DC_RTC = 42,
/*43*/ FEATURE_GFX_DC_RTC = 43,
+/*44*/ FEATURE_DVM_MIN_PSM = 44,
+/*45*/ FEATURE_PRC = 45,
-/*44*/ NUM_FEATURES = 44
+/*46*/ NUM_FEATURES = 46
} FEATURE_LIST_e;
//enum for MPIO PCIe gen speed msgs
@@ -123,7 +127,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xE
+#define SMU_METRICS_TABLE_VERSION 0xF
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -234,6 +238,9 @@ typedef struct __attribute__((packed, aligned(4))) {
//PCIE BW Data and error count
uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitAcc[8];
} MetricsTableX_t;
typedef struct __attribute__((packed, aligned(4))) {
@@ -328,13 +335,14 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t JpegBusy[32];
} MetricsTableA_t;
-#define SMU_VF_METRICS_TABLE_VERSION 0x3
+#define SMU_VF_METRICS_TABLE_VERSION 0x5
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
uint64_t AccGfxclk_TargFreq;
uint64_t AccGfxRsmuDpm_Busy;
+ uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 41cb681927e2..147bfb12fd75 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -93,7 +93,8 @@
#define PPSMC_MSG_SelectPLPDMode 0x40
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SelectPstatePolicy 0x44
-#define PPSMC_Message_Count 0x45
+#define PPSMC_MSG_ResetSDMA 0x4D
+#define PPSMC_Message_Count 0x4E
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index a299dc4a8071..e4cd6a0d13da 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -275,7 +275,8 @@
__SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
__SMU_DUMMY_MAP(SelectPstatePolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
- __SMU_DUMMY_MAP(MALLPowerState),
+ __SMU_DUMMY_MAP(MALLPowerState), \
+ __SMU_DUMMY_MAP(ResetSDMA),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index ae3563d71fa0..8d4a96e23326 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -107,6 +107,7 @@ struct smu_13_0_dpm_context {
struct smu_13_0_dpm_tables dpm_tables;
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
+ uint64_t caps;
};
enum smu_13_0_power_state {
@@ -303,5 +304,7 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value);
+
+void smu_v13_0_interrupt_work(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 286777ada1df..19a25fdc2f5b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1157,19 +1157,15 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu,
int inst)
{
struct amdgpu_device *adev = smu->adev;
- int i, ret = 0;
+ int ret = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* vcn dpm on is a prerequisite for vcn power gate messages */
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- 0x10000 * i, NULL);
- if (ret)
- return ret;
- }
+ if (adev->vcn.harvest_config & (1 << inst))
+ return ret;
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ 0x10000 * inst, NULL);
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 480cf3cb204d..189c6a32b6bd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -105,7 +105,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index f6b029354327..83163d7c7f00 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1732,7 +1732,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
- gpu_metrics->average_mm_activity = 0;
/* Valid power data is available only from primary die */
if (aldebaran_is_primary(smu)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 2bfea740dace..fbbdfa54f6a2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -103,7 +103,8 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -1320,11 +1321,11 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
return 0;
}
-static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
+void smu_v13_0_interrupt_work(struct smu_context *smu)
{
- return smu_cmn_send_smc_msg(smu,
- SMU_MSG_ReenableAcDcInterrupt,
- NULL);
+ smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt,
+ NULL);
}
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
@@ -1377,12 +1378,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
switch (ctxid) {
case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = true;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = false;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
@@ -2108,18 +2109,14 @@ int smu_v13_0_set_vcn_enable(struct smu_context *smu,
int inst)
{
struct amdgpu_device *adev = smu->adev;
- int i, ret = 0;
+ int ret = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- i << 16U, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ inst << 16U, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 3aa705aae4c0..0551a3311217 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2643,11 +2643,12 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
&backend_workload_mask);
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
+ if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) &&
+ ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500))) {
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_POWERSAVING);
@@ -3219,6 +3220,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index ab3c93ddce46..da7bd9227afe 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -101,23 +101,24 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
-static inline bool smu_v13_0_6_is_unified_metrics(struct smu_context *smu)
-{
- return (smu->adev->flags & AMD_IS_APU) &&
- smu->smc_fw_version <= 0x4556900;
-}
-
-static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *smu)
-{
- switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- return smu->smc_fw_version >= 0x557600;
- case IP_VERSION(13, 0, 14):
- return smu->smc_fw_version >= 0x05550E00;
- default:
- return false;
- }
-}
+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
+
+enum smu_v13_0_6_caps {
+ SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(PER_INST_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(SDMA_RESET),
+ SMU_CAP(ALL),
+};
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
@@ -193,6 +194,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0),
+ MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
};
// clang-format on
@@ -280,6 +282,162 @@ struct smu_v13_0_6_dpm_map {
uint32_t *freq_table;
};
+static inline void smu_v13_0_6_cap_set(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ dpm_context->caps |= BIT_ULL(cap);
+}
+
+static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ dpm_context->caps &= ~BIT_ULL(cap);
+}
+
+static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ return !!(dpm_context->caps & BIT_ULL(cap));
+}
+
+static void smu_v13_0_14_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ uint32_t fw_ver = smu->smc_fw_version;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver >= 0x05550E00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
+ if (fw_ver >= 0x05551000)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if (fw_ver >= 0x05550B00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ if (fw_ver >= 0x5551200)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+}
+
+static void smu_v13_0_12_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ uint32_t fw_ver = smu->smc_fw_version;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver < 0x00561900)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
+
+ if (fw_ver >= 0x00561700)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+}
+
+static void smu_v13_0_6_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t fw_ver = smu->smc_fw_version;
+ uint32_t pgm = (fw_ver >> 24) & 0xFF;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver < 0x552F00)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
+ if (fw_ver < 0x554500)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT));
+
+ if (adev->flags & AMD_IS_APU) {
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+
+ if (fw_ver <= 0x4556900)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(UNI_METRICS));
+ if (fw_ver >= 0x04556F00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if (fw_ver >= 0x04556A00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ } else {
+ if (fw_ver >= 0x557600)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
+ if (fw_ver < 0x00556000)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
+ if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600))
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(SET_UCLK_MAX));
+ if (fw_ver < 0x556300)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
+ if (fw_ver < 0x554800)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(MCA_DEBUG_MODE));
+ if (fw_ver >= 0x556F00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ if (fw_ver < 0x00555a00)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
+ if (fw_ver < 0x00555600)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+ if (pgm == 0 && fw_ver >= 0x557900)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ }
+ if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
+ ((pgm == 0) && (fw_ver >= 0x00557900)) ||
+ ((pgm == 4) && (fw_ver >= 0x4557000)))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+}
+
+static void smu_v13_0_x_init_caps(struct smu_context *smu)
+{
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ return smu_v13_0_12_init_caps(smu);
+ case IP_VERSION(13, 0, 14):
+ return smu_v13_0_14_init_caps(smu);
+ default:
+ return smu_v13_0_6_init_caps(smu);
+ }
+}
+
+static int smu_v13_0_6_check_fw_version(struct smu_context *smu)
+{
+ int r;
+
+ r = smu_v13_0_check_fw_version(smu);
+ /* Initialize caps flags once fw version is fetched */
+ if (!r)
+ smu_v13_0_x_init_caps(smu);
+
+ return r;
+}
+
static int smu_v13_0_6_init_microcode(struct smu_context *smu)
{
const struct smc_firmware_header_v2_1 *v2_1;
@@ -304,7 +462,8 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
sizeof(ucode_prefix));
- ret = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ ret = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (ret)
goto out;
@@ -600,7 +759,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
int ret, i, retry = 100;
uint32_t table_version;
@@ -796,8 +955,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
smu_v13_0_6_setup_driver_pptable(smu);
/* DPM policy not supported in older firmwares */
- if (!(smu->adev->flags & AMD_IS_APU) &&
- (smu->smc_fw_version < 0x00556000)) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM_POLICY))) {
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
smu_dpm->dpm_policies->policy_mask &=
@@ -974,7 +1132,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -987,7 +1145,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
- if (smu->smc_fw_version >= 0x552F00) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
xcc_id = GET_INST(GC, 0);
*value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
} else {
@@ -1674,7 +1832,7 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu)
static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
{
/* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
- if (smu->smc_fw_version < 0x554800)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(MCA_DEBUG_MODE)))
return 0;
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
@@ -1819,9 +1977,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
if (max == pstate_table->uclk_pstate.curr.max)
return 0;
/* For VF, only allowed in FW versions 85.102 or greater */
- if (amdgpu_sriov_vf(adev) &&
- ((smu->smc_fw_version < 0x556600) ||
- (adev->flags & AMD_IS_APU)))
+ if (!smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(SET_UCLK_MAX)))
return -EOPNOTSUPP;
/* Only max clock limiting is allowed for UCLK */
ret = smu_v13_0_set_soft_freq_limited_range(
@@ -2025,7 +2182,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
ret = smu_cmn_get_enabled_mask(smu, feature_mask);
- if (ret == -EIO && smu->smc_fw_version < 0x552F00) {
+ if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
*feature_mask = 0;
ret = 0;
}
@@ -2318,11 +2475,10 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
- bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst;
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_7 *gpu_metrics =
(struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
MetricsTableX_t *metrics_x;
@@ -2330,6 +2486,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct amdgpu_xcp *xcp;
u16 link_width_level;
u32 inst_mask;
+ bool per_inst;
metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
@@ -2356,6 +2513,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->average_umc_activity =
SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+ gpu_metrics->mem_max_bandwidth =
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, flag));
+
gpu_metrics->curr_socket_power =
SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag));
/* Energy counter reported in 15.259uJ (2^-16) units */
@@ -2400,7 +2560,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
* table for both pf & one vf for smu version 85.99.0 or higher else report only
* for pf from registers
*/
- if (smu->smc_fw_version >= 0x556300) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) {
gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
gpu_metrics->pcie_link_speed =
pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
@@ -2429,7 +2589,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
metrics_x->PCIeNAKSentCountAcc;
gpu_metrics->pcie_nak_rcvd_count_acc =
metrics_x->PCIeNAKReceivedCountAcc;
- if (smu_v13_0_6_is_other_end_count_available(smu))
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS)))
gpu_metrics->pcie_lc_perf_other_end_recovery =
metrics_x->PCIeOtherEndRecoveryAcc;
@@ -2454,17 +2614,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
- apu_per_inst = (adev->flags & AMD_IS_APU) && (smu->smc_fw_version >= 0x04556A00);
- smu_13_0_6_per_inst = !(adev->flags & AMD_IS_APU) &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
- == IP_VERSION(13, 0, 6)) &&
- (smu->smc_fw_version >= 0x556F00);
- smu_13_0_14_per_inst = !(adev->flags & AMD_IS_APU) &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
- == IP_VERSION(13, 0, 14)) &&
- (smu->smc_fw_version >= 0x05550B00);
-
- per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst;
+ per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
for_each_xcp(adev->xcp_mgr, xcp, i) {
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
@@ -2494,6 +2644,12 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
SMUQ10_ROUND(metrics_x->GfxBusy[inst]);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
+
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS)))
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
+ SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc
+ [inst]);
idx++;
}
}
@@ -2598,7 +2754,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
return -EINVAL;
/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
- if (smu->smc_fw_version < 0x554500)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(CTF_LIMIT)))
return 0;
/* Get SOC Max operating temperature */
@@ -2700,11 +2856,10 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret;
/* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
- if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(RMA_MSG)))
return 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL);
@@ -2716,6 +2871,23 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET)))
+ return -EOPNOTSUPP;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ResetSDMA, inst_mask, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetSDMA event with mask 0x%x\n",
+ inst_mask);
+
+ return ret;
+}
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3026,7 +3198,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
if (instlo != 0x03b30400)
return false;
- if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) {
errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
errcode &= 0xff;
} else {
@@ -3312,9 +3484,10 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
int error_code;
- if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600)
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND)))
error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
else
error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
@@ -3352,7 +3525,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
/* pptable related */
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_v13_0_6_check_fw_version,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
@@ -3385,6 +3558,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
+ .reset_sdma = smu_v13_0_6_reset_sdma,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index aabb94796005..55ef18517b0f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2797,6 +2797,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index a87040cb2f2e..ddb6444406d2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -79,7 +79,8 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -1511,29 +1512,24 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
int inst)
{
struct amdgpu_device *adev = smu->adev;
- int i, ret = 0;
+ int ret = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return ret;
- if (smu->is_apu) {
- if (i == 0)
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
- i << 16U, NULL);
- else if (i == 1)
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
- i << 16U, NULL);
- } else {
+ if (smu->is_apu) {
+ if (inst == 0)
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- i << 16U, NULL);
- }
-
- if (ret)
- return ret;
+ SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
+ inst << 16U, NULL);
+ else if (inst == 1)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
+ inst << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ inst << 16U, NULL);
}
return ret;
@@ -1899,16 +1895,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
NULL);
}
-static int smu_v14_0_process_pending_interrupt(struct smu_context *smu)
-{
- int ret = 0;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
- ret = smu_v14_0_allow_ih_interrupt(smu);
-
- return ret;
-}
-
int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
{
int ret = 0;
@@ -1920,7 +1906,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
if (ret)
return ret;
- return smu_v14_0_process_pending_interrupt(smu);
+ return smu_v14_0_allow_ih_interrupt(smu);
}
int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5cad09c5f2ff..3f1fcf8c4ee8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1193,16 +1193,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- PPTable_t *pptable = smu->smu_table.driver_pptable;
- const OverDriveLimits_t * const overdrive_upperlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMax;
- const OverDriveLimits_t * const overdrive_lowerlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMin;
-
size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
- size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
- overdrive_lowerlimits->GfxclkFoffset,
- overdrive_upperlimits->GfxclkFoffset);
+ size += sysfs_emit_at(buf, size, "%dMhz\n",
+ od_table->OverDriveTable.GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1337,12 +1330,8 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &min_value,
- NULL);
- smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
- NULL,
+ &min_value,
&max_value);
size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
@@ -1627,6 +1616,39 @@ out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
}
+static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANPWM,
+ speed);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+ return ret;
+ }
+
+ /* Convert the PMFW output which is in percent to pwm(255) based */
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ if (!speed)
+ return -EINVAL;
+
+ return smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
+}
+
static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -2417,36 +2439,24 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -ENOTSUPP;
}
- for (i = 0; i < size; i += 2) {
- if (i + 2 > size) {
- dev_info(adev->dev, "invalid number of input parameters %d\n", size);
- return -EINVAL;
- }
-
- switch (input[i]) {
- case 1:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMAX,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
+ if (size != 1) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
- default:
- dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
- dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
- return -EINVAL;
- }
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
+ minimum, maximum);
+ return -EINVAL;
}
+ od_table->OverDriveTable.GfxclkFoffset = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
@@ -2804,6 +2814,8 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.set_performance_level = smu_v14_0_set_performance_level,
.gfx_off_control = smu_v14_0_gfx_off_control,
.get_unique_id = smu_v14_0_2_get_unique_id,
+ .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
+ .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
.get_power_limit = smu_v14_0_2_get_power_limit,
.set_power_limit = smu_v14_0_2_set_power_limit,
.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index d981d721e796..358c1512b087 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -9,7 +9,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 1e7b1fcb2848..6ed504099188 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -63,7 +63,6 @@ static const struct drm_driver komeda_kms_driver = {
.fops = &komeda_cma_fops,
.name = "komeda",
.desc = "Arm Komeda Display Processor driver",
- .date = "20181101",
.major = 0,
.minor = 1,
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index ebccb74306a7..f30b3d5eeca5 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
&n_formats);
+ if (!formats) {
+ kfree(kwb_conn);
+ return -ENOMEM;
+ }
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 191b806624df..c3179d74f3f5 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -22,8 +22,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -233,7 +233,6 @@ static const struct drm_driver hdlcd_driver = {
.fops = &fops,
.name = "hdlcd",
.desc = "ARM HDLCD Controller DRM",
- .date = "20151021",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index fd2be80f3bf5..e083021e9e99 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -16,9 +16,9 @@
#include <linux/pm_runtime.h>
#include <linux/debugfs.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -570,7 +570,6 @@ static const struct drm_driver malidp_driver = {
.fops = &fops,
.name = "mali-dp",
.desc = "ARM Mali Display Processor driver",
- .date = "20160106",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 650e450cc19b..cae25ad66c74 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -11,8 +11,8 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
@@ -45,7 +45,6 @@ static const struct drm_driver armada_drm_driver = {
.minor = 0,
.name = "armada-drm",
.desc = "Armada SoC DRM",
- .date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.num_ioctls = ARRAY_SIZE(armada_ioctls),
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index b7e608ba6194..397e677a691c 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -13,8 +13,8 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_device.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -252,7 +252,6 @@ static const struct drm_driver aspeed_gfx_driver = {
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
- .date = "20180319",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 0e282b7b167c..b9eb67e3fa90 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -195,7 +195,7 @@ static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled)
if (enabled)
vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE;
- for (i = 0; i < 200; ++i) {
+ for (i = 0; i < 1000; ++i) {
if (i)
mdelay(1);
vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 4afe4be072ef..ff3bcdd1cff2 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -31,8 +31,8 @@
#include <linux/of.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -60,7 +60,6 @@ static const struct drm_driver ast_driver = {
.fops = &ast_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 21ce3769bf0d..6b4305ac07d4 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -43,7 +43,6 @@
#define DRIVER_NAME "ast"
#define DRIVER_DESC "AST"
-#define DRIVER_DATE "20120228"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 7b209af7cf45..fa8ad94e431a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -16,9 +16,9 @@
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -846,7 +846,6 @@ static const struct drm_driver atmel_hlcdc_dc_driver = {
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
- .date = "20141504",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 8f786592143b..657bc3dd18df 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -214,7 +214,8 @@ static void audio_shutdown(struct device *dev, void *data)
}
static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index b754947e3e00..83d711ee3a2e 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -793,7 +793,7 @@ static void anx6345_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id anx6345_id[] = {
- { "anx6345", 0 },
+ { "anx6345" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, anx6345_id);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index a2675b121fe4..4be34d5c7a3b 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1952,7 +1952,8 @@ static void anx7625_audio_shutdown(struct device *dev, void *data)
}
static int anx7625_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -2002,8 +2003,10 @@ static int anx7625_audio_get_eld(struct device *dev, void *data,
memset(buf, 0, len);
} else {
dev_dbg(dev, "audio copy eld\n");
+ mutex_lock(&ctx->connector->eld_mutex);
memcpy(buf, ctx->connector->eld,
min(sizeof(ctx->connector->eld), len));
+ mutex_unlock(&ctx->connector->eld_mutex);
}
return 0;
@@ -2137,49 +2140,6 @@ static void hdcp_check_work_func(struct work_struct *work)
drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
}
-static int anx7625_connector_atomic_check(struct anx7625_data *ctx,
- struct drm_connector_state *state)
-{
- struct device *dev = ctx->dev;
- int cp;
-
- dev_dbg(dev, "hdcp state check\n");
- cp = state->content_protection;
-
- if (cp == ctx->hdcp_cp)
- return 0;
-
- if (cp == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
- if (ctx->dp_en) {
- dev_dbg(dev, "enable HDCP\n");
- anx7625_hdcp_enable(ctx);
-
- queue_delayed_work(ctx->hdcp_workqueue,
- &ctx->hdcp_work,
- msecs_to_jiffies(2000));
- }
- }
-
- if (cp == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- if (ctx->hdcp_cp != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- dev_err(dev, "current CP is not ENABLED\n");
- return -EINVAL;
- }
- anx7625_hdcp_disable(ctx);
- ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
- drm_hdcp_update_content_protection(ctx->connector,
- ctx->hdcp_cp);
- dev_dbg(dev, "update CP to UNDESIRE\n");
- }
-
- if (cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- dev_err(dev, "Userspace illegal set to PROTECTION ENABLE\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int anx7625_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -2416,7 +2376,7 @@ static int anx7625_bridge_atomic_check(struct drm_bridge *bridge,
anx7625_bridge_mode_fixup(bridge, &crtc_state->mode,
&crtc_state->adjusted_mode);
- return anx7625_connector_atomic_check(ctx, conn_state);
+ return 0;
}
static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
@@ -2425,6 +2385,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
dev_dbg(dev, "drm atomic enable\n");
@@ -2439,6 +2400,22 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
_anx7625_hpd_polling(ctx, 5000 * 100);
anx7625_dp_start(ctx);
+
+ conn_state = drm_atomic_get_new_connector_state(state->base.state, connector);
+
+ if (WARN_ON(!conn_state))
+ return;
+
+ if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (ctx->dp_en) {
+ dev_dbg(dev, "enable HDCP\n");
+ anx7625_hdcp_enable(ctx);
+
+ queue_delayed_work(ctx->hdcp_workqueue,
+ &ctx->hdcp_work,
+ msecs_to_jiffies(2000));
+ }
+ }
}
static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -2449,6 +2426,17 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
dev_dbg(dev, "drm atomic disable\n");
+ flush_workqueue(ctx->hdcp_workqueue);
+
+ if (ctx->connector &&
+ ctx->hdcp_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ anx7625_hdcp_disable(ctx);
+ ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_hdcp_update_content_protection(ctx->connector,
+ ctx->hdcp_cp);
+ dev_dbg(dev, "update CP to DESIRE\n");
+ }
+
ctx->connector = NULL;
anx7625_dp_stop(ctx);
@@ -2795,7 +2783,7 @@ static void anx7625_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id anx7625_id[] = {
- {"anx7625", 0},
+ { "anx7625" },
{}
};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 31832ba4017f..42248f179b69 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -500,34 +500,6 @@ static void cdns_mhdp_hdcp_prop_work(struct work_struct *work)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
-int cdns_mhdp_hdcp_set_lc(struct cdns_mhdp_device *mhdp, u8 *val)
-{
- int ret;
-
- mutex_lock(&mhdp->mbox_mutex);
- ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_GENERAL,
- HDCP_GENERAL_SET_LC_128,
- 16, val);
- mutex_unlock(&mhdp->mbox_mutex);
-
- return ret;
-}
-
-int
-cdns_mhdp_hdcp_set_public_key_param(struct cdns_mhdp_device *mhdp,
- struct cdns_hdcp_tx_public_key_param *val)
-{
- int ret;
-
- mutex_lock(&mhdp->mbox_mutex);
- ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
- HDCP2X_TX_SET_PUBLIC_KEY_PARAMS,
- sizeof(*val), (u8 *)val);
- mutex_unlock(&mhdp->mbox_mutex);
-
- return ret;
-}
-
int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
{
int ret;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
index 334c0b8b0d4f..3b6ec9c3a8d8 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
@@ -82,9 +82,6 @@ struct cdns_hdcp_tx_public_key_param {
u8 E[DLP_E];
};
-int cdns_mhdp_hdcp_set_public_key_param(struct cdns_mhdp_device *mhdp,
- struct cdns_hdcp_tx_public_key_param *val);
-int cdns_mhdp_hdcp_set_lc(struct cdns_mhdp_device *mhdp, u8 *val);
int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type);
int cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp);
void cdns_mhdp_hdcp_init(struct cdns_mhdp_device *mhdp);
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 9eecac457dcf..d47703559b0d 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -785,7 +785,7 @@ static struct mipi_dsi_driver chipone_dsi_driver = {
},
};
-static struct i2c_device_id chipone_i2c_id[] = {
+static const struct i2c_device_id chipone_i2c_id[] = {
{ "chipone,icn6211" },
{},
};
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index c83486cf6b15..da17f0978a79 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -597,7 +597,7 @@ static const struct of_device_id ch7033_dt_ids[] = {
MODULE_DEVICE_TABLE(of, ch7033_dt_ids);
static const struct i2c_device_id ch7033_ids[] = {
- { "ch7033", 0 },
+ { "ch7033" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ch7033_ids);
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index cbabd4e20d3e..306b5e374b9e 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -48,6 +48,7 @@
#define REG_COL_DEP GENMASK(1, 0)
#define BIT8 FIELD_PREP(REG_COL_DEP, 1)
#define OUT_MAP BIT(4)
+#define VESA BIT(4)
#define JEIDA 0
#define REG_DESSC_ENB BIT(6)
#define DMODE BIT(7)
@@ -428,12 +429,30 @@ static inline void it6263_lvds_reset(struct it6263 *it)
fsleep(10000);
}
+static inline bool it6263_is_input_bus_fmt_valid(int input_fmt)
+{
+ switch (input_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ return true;
+ }
+ return false;
+}
+
static inline void it6263_lvds_set_interface(struct it6263 *it)
{
+ u8 fmt;
+
/* color depth */
regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_COL_DEP, BIT8);
+
+ if (it->lvds_data_mapping == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG)
+ fmt = VESA;
+ else
+ fmt = JEIDA;
+
/* output mapping */
- regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, JEIDA);
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, fmt);
if (it->lvds_dual_link) {
regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, DISO);
@@ -550,15 +569,6 @@ static int it6263_read_edid(void *data, u8 *buf, unsigned int block, size_t len)
return 0;
}
-static int it6263_bridge_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return drm_atomic_helper_connector_hdmi_check(conn_state->connector,
- conn_state->state);
-}
-
static void
it6263_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
@@ -714,14 +724,14 @@ it6263_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
*num_input_fmts = 0;
- if (it->lvds_data_mapping != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA)
+ if (!it6263_is_input_bus_fmt_valid(it->lvds_data_mapping))
return NULL;
input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
return NULL;
- input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
+ input_fmts[0] = it->lvds_data_mapping;
*num_input_fmts = 1;
return input_fmts;
@@ -793,7 +803,6 @@ static const struct drm_bridge_funcs it6263_bridge_funcs = {
.mode_valid = it6263_bridge_mode_valid,
.atomic_disable = it6263_bridge_atomic_disable,
.atomic_enable = it6263_bridge_atomic_enable,
- .atomic_check = it6263_bridge_atomic_check,
.detect = it6263_bridge_detect,
.edid_read = it6263_bridge_edid_read,
.atomic_get_input_bus_fmts = it6263_bridge_atomic_get_input_bus_fmts,
@@ -845,8 +854,8 @@ static int it6263_probe(struct i2c_client *client)
it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter,
LVDS_INPUT_CTRL_I2C_ADDR);
if (IS_ERR(it->lvds_i2c))
- dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
- "failed to allocate I2C device for LVDS\n");
+ return dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
+ "failed to allocate I2C device for LVDS\n");
it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c,
&it6263_lvds_regmap_config);
@@ -878,7 +887,7 @@ static const struct of_device_id it6263_of_match[] = {
MODULE_DEVICE_TABLE(of, it6263_of_match);
static const struct i2c_device_id it6263_i2c_ids[] = {
- { "it6263", 0 },
+ { "it6263" },
{ }
};
MODULE_DEVICE_TABLE(i2c, it6263_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 008d86cc562a..88ef76a37fe6 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -19,6 +19,7 @@
#include <linux/regulator/consumer.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/bitfield.h>
#include <crypto/hash.h>
@@ -126,6 +127,7 @@
#define REG_AUX_OUT_DATA0 0x27
#define REG_AUX_CMD_REQ 0x2B
+#define M_AUX_REQ_CMD 0x0F
#define AUX_BUSY BIT(5)
#define REG_AUX_DATA_0_7 0x2C
@@ -266,6 +268,18 @@
#define REG_SSC_CTRL1 0x189
#define REG_SSC_CTRL2 0x18A
+#define REG_AUX_USER_CTRL 0x190
+#define EN_USER_AUX BIT(0)
+#define USER_AUX_DONE BIT(1)
+#define AUX_EVENT BIT(4)
+
+#define REG_AUX_USER_DATA_REC 0x191
+#define M_AUX_IN_REC 0xF0
+#define M_AUX_OUT_REC 0x0F
+
+#define REG_AUX_USER_REPLY 0x19A
+#define REG_AUX_USER_RXB(n) (n + 0x19B)
+
#define RBR DP_LINK_BW_1_62
#define HBR DP_LINK_BW_2_7
#define HBR2 DP_LINK_BW_5_4
@@ -296,11 +310,13 @@
#define MAX_LANE_COUNT 4
#define MAX_LINK_RATE HBR
#define AUTO_TRAIN_RETRY 3
-#define MAX_HDCP_DOWN_STREAM_COUNT 10
+#define MAX_HDCP_DOWN_STREAM_COUNT 127
#define MAX_CR_LEVEL 0x03
#define MAX_EQ_LEVEL 0x03
#define AUX_WAIT_TIMEOUT_MS 15
-#define AUX_FIFO_MAX_SIZE 32
+#define AUX_FIFO_MAX_SIZE 16
+#define AUX_I2C_MAX_SIZE 4
+#define AUX_I2C_DEFER_RETRY 4
#define PIXEL_CLK_DELAY 1
#define PIXEL_CLK_INVERSE 0
#define ADJUST_PHASE_THRESHOLD 80000
@@ -323,7 +339,15 @@
enum aux_cmd_type {
CMD_AUX_NATIVE_READ = 0x0,
CMD_AUX_NATIVE_WRITE = 0x5,
+ CMD_AUX_GI2C_ADR = 0x08,
+ CMD_AUX_GI2C_READ = 0x09,
+ CMD_AUX_GI2C_WRITE = 0x0A,
CMD_AUX_I2C_EDID_READ = 0xB,
+ CMD_AUX_I2C_READ = 0x0D,
+ CMD_AUX_I2C_WRITE = 0x0C,
+
+ /* KSV read with AUX FIFO extend from CMD_AUX_NATIVE_READ*/
+ CMD_AUX_GET_KSV_LIST = 0x10,
};
enum aux_cmd_reply {
@@ -965,7 +989,8 @@ static ssize_t it6505_aux_operation(struct it6505 *it6505,
it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, AUX_USER_MODE);
aux_op_start:
- if (cmd == CMD_AUX_I2C_EDID_READ) {
+ /* HW AUX FIFO supports only EDID and DCPD KSV FIFO area */
+ if (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) {
/* AUX EDID FIFO has max length of AUX_FIFO_MAX_SIZE bytes. */
size = min_t(size_t, size, AUX_FIFO_MAX_SIZE);
/* Enable AUX FIFO read back and clear FIFO */
@@ -996,7 +1021,7 @@ aux_op_start:
size);
/* Aux Fire */
- it6505_write(it6505, REG_AUX_CMD_REQ, cmd);
+ it6505_write(it6505, REG_AUX_CMD_REQ, FIELD_GET(M_AUX_REQ_CMD, cmd));
ret = it6505_aux_wait(it6505);
if (ret < 0)
@@ -1030,7 +1055,7 @@ aux_op_start:
goto aux_op_start;
}
- if (cmd == CMD_AUX_I2C_EDID_READ) {
+ if (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) {
for (i = 0; i < size; i++) {
ret = it6505_read(it6505, REG_AUX_DATA_FIFO);
if (ret < 0)
@@ -1055,7 +1080,7 @@ aux_op_start:
ret = i;
aux_op_err:
- if (cmd == CMD_AUX_I2C_EDID_READ) {
+ if (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) {
/* clear AUX FIFO */
it6505_set_bits(it6505, REG_AUX_CTRL,
AUX_EN_FIFO_READ | CLR_EDID_FIFO,
@@ -1076,10 +1101,14 @@ static ssize_t it6505_aux_do_transfer(struct it6505 *it6505,
size_t size, enum aux_cmd_reply *reply)
{
int i, ret_size, ret = 0, request_size;
+ int fifo_max_size = (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) ?
+ AUX_FIFO_MAX_SIZE : 4;
mutex_lock(&it6505->aux_lock);
- for (i = 0; i < size; i += 4) {
- request_size = min((int)size - i, 4);
+ i = 0;
+ do {
+ request_size = min_t(int, (int)size - i, fifo_max_size);
+
ret_size = it6505_aux_operation(it6505, cmd, address + i,
buffer + i, request_size,
reply);
@@ -1088,14 +1117,170 @@ static ssize_t it6505_aux_do_transfer(struct it6505 *it6505,
goto aux_op_err;
}
+ i += request_size;
ret += ret_size;
- }
+ } while (i < size);
aux_op_err:
mutex_unlock(&it6505->aux_lock);
return ret;
}
+static bool it6505_aux_i2c_reply_defer(u8 reply)
+{
+ if (reply == DP_AUX_NATIVE_REPLY_DEFER || reply == DP_AUX_I2C_REPLY_DEFER)
+ return true;
+ return false;
+}
+
+static bool it6505_aux_i2c_reply_nack(u8 reply)
+{
+ if (reply == DP_AUX_NATIVE_REPLY_NACK || reply == DP_AUX_I2C_REPLY_NACK)
+ return true;
+ return false;
+}
+
+static int it6505_aux_i2c_wait(struct it6505 *it6505, u8 *reply)
+{
+ int err = 0;
+ unsigned long timeout;
+ struct device *dev = it6505->dev;
+
+ timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
+
+ do {
+ if (it6505_read(it6505, REG_AUX_USER_CTRL) & AUX_EVENT)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "Timed out waiting AUX I2C, BUSY = %X\n",
+ it6505_aux_op_finished(it6505));
+ err = -ETIMEDOUT;
+ goto end_aux_i2c_wait;
+ }
+ usleep_range(300, 800);
+ } while (!it6505_aux_op_finished(it6505));
+
+ *reply = it6505_read(it6505, REG_AUX_USER_REPLY) >> 4;
+
+ if (*reply == 0)
+ goto end_aux_i2c_wait;
+
+ if (it6505_aux_i2c_reply_defer(*reply))
+ err = -EBUSY;
+ else if (it6505_aux_i2c_reply_nack(*reply))
+ err = -ENXIO;
+
+end_aux_i2c_wait:
+ it6505_set_bits(it6505, REG_AUX_USER_CTRL, USER_AUX_DONE, USER_AUX_DONE);
+ return err;
+}
+
+static int it6505_aux_i2c_readb(struct it6505 *it6505, u8 *buf, size_t size, u8 *reply)
+{
+ int ret, i;
+ int retry;
+
+ for (retry = 0; retry < AUX_I2C_DEFER_RETRY; retry++) {
+ it6505_write(it6505, REG_AUX_CMD_REQ, CMD_AUX_GI2C_READ);
+
+ ret = it6505_aux_i2c_wait(it6505, reply);
+ if (it6505_aux_i2c_reply_defer(*reply))
+ continue;
+ if (ret >= 0)
+ break;
+ }
+
+ for (i = 0; i < size; i++)
+ buf[i] = it6505_read(it6505, REG_AUX_USER_RXB(0 + i));
+
+ return size;
+}
+
+static int it6505_aux_i2c_writeb(struct it6505 *it6505, u8 *buf, size_t size, u8 *reply)
+{
+ int i, ret;
+ int retry;
+
+ for (i = 0; i < size; i++)
+ it6505_write(it6505, REG_AUX_OUT_DATA0 + i, buf[i]);
+
+ for (retry = 0; retry < AUX_I2C_DEFER_RETRY; retry++) {
+ it6505_write(it6505, REG_AUX_CMD_REQ, CMD_AUX_GI2C_WRITE);
+
+ ret = it6505_aux_i2c_wait(it6505, reply);
+ if (it6505_aux_i2c_reply_defer(*reply))
+ continue;
+ if (ret >= 0)
+ break;
+ }
+ return size;
+}
+
+static ssize_t it6505_aux_i2c_operation(struct it6505 *it6505,
+ struct drm_dp_aux_msg *msg)
+{
+ int ret;
+ ssize_t request_size, data_cnt = 0;
+ u8 *buffer = msg->buffer;
+
+ /* set AUX user mode */
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_USER_MODE | AUX_NO_SEGMENT_WR, AUX_USER_MODE);
+ it6505_set_bits(it6505, REG_AUX_USER_CTRL, EN_USER_AUX, EN_USER_AUX);
+ /* clear AUX FIFO */
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO);
+
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO, 0x00);
+
+ it6505_write(it6505, REG_AUX_ADR_0_7, 0x00);
+ it6505_write(it6505, REG_AUX_ADR_8_15, msg->address << 1);
+
+ if (msg->size == 0) {
+ /* IIC Start/STOP dummy write */
+ it6505_write(it6505, REG_AUX_ADR_16_19, msg->request);
+ it6505_write(it6505, REG_AUX_CMD_REQ, CMD_AUX_GI2C_ADR);
+ ret = it6505_aux_i2c_wait(it6505, &msg->reply);
+ goto end_aux_i2c_transfer;
+ }
+
+ /* IIC data transfer */
+ data_cnt = 0;
+ do {
+ request_size = min_t(ssize_t, msg->size - data_cnt, AUX_I2C_MAX_SIZE);
+ it6505_write(it6505, REG_AUX_ADR_16_19,
+ msg->request | ((request_size - 1) << 4));
+ if ((msg->request & DP_AUX_I2C_READ) == DP_AUX_I2C_READ)
+ ret = it6505_aux_i2c_readb(it6505, &buffer[data_cnt],
+ request_size, &msg->reply);
+ else
+ ret = it6505_aux_i2c_writeb(it6505, &buffer[data_cnt],
+ request_size, &msg->reply);
+
+ if (ret < 0)
+ goto end_aux_i2c_transfer;
+
+ data_cnt += request_size;
+ } while (data_cnt < msg->size);
+ ret = data_cnt;
+end_aux_i2c_transfer:
+
+ it6505_set_bits(it6505, REG_AUX_USER_CTRL, EN_USER_AUX, 0);
+ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, 0);
+ return ret;
+}
+
+static ssize_t it6505_aux_i2c_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct it6505 *it6505 = container_of(aux, struct it6505, aux);
+
+ guard(mutex)(&it6505->aux_lock);
+ return it6505_aux_i2c_operation(it6505, msg);
+}
+
static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -1105,9 +1290,8 @@ static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux,
int ret;
enum aux_cmd_reply reply;
- /* IT6505 doesn't support arbitrary I2C read / write. */
if (is_i2c)
- return -EINVAL;
+ return it6505_aux_i2c_transfer(aux, msg);
switch (msg->request) {
case DP_AUX_NATIVE_READ:
@@ -1178,6 +1362,37 @@ static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block,
return 0;
}
+static int it6505_get_ksvlist(struct it6505 *it6505, u8 *buf, size_t len)
+{
+ struct device *dev = it6505->dev;
+ enum aux_cmd_reply reply;
+ int request_size, ret;
+ int i = 0;
+
+ do {
+ request_size = min_t(int, (int)len - i, 15);
+
+ ret = it6505_aux_do_transfer(it6505, CMD_AUX_GET_KSV_LIST,
+ DP_AUX_HDCP_KSV_FIFO,
+ buf + i, request_size, &reply);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "request_size = %d, ret =%d", request_size, ret);
+ if (ret < 0)
+ return ret;
+
+ i += request_size;
+ } while (i < len);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "ksv read cnt = %d down_stream_cnt=%d ", i, i / 5);
+
+ for (i = 0 ; i < len; i += 5) {
+ DRM_DEV_DEBUG_DRIVER(dev, "ksv[%d] = %02X%02X%02X%02X%02X",
+ i / 5, buf[i], buf[i + 1], buf[i + 2], buf[i + 3], buf[i + 4]);
+ }
+
+ return len;
+}
+
static void it6505_variable_config(struct it6505 *it6505)
{
it6505->link_rate_bw_code = HBR;
@@ -1959,7 +2174,7 @@ static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
{
struct device *dev = it6505->dev;
u8 binfo[2];
- int down_stream_count, i, err, msg_count = 0;
+ int down_stream_count, err, msg_count = 0;
err = it6505_get_dpcd(it6505, DP_AUX_HDCP_BINFO, binfo,
ARRAY_SIZE(binfo));
@@ -1984,18 +2199,11 @@ static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
down_stream_count);
return 0;
}
+ err = it6505_get_ksvlist(it6505, sha1_input, down_stream_count * 5);
+ if (err < 0)
+ return err;
- for (i = 0; i < down_stream_count; i++) {
- err = it6505_get_dpcd(it6505, DP_AUX_HDCP_KSV_FIFO +
- (i % 3) * DRM_HDCP_KSV_LEN,
- sha1_input + msg_count,
- DRM_HDCP_KSV_LEN);
-
- if (err < 0)
- return err;
-
- msg_count += 5;
- }
+ msg_count += down_stream_count * 5;
it6505->hdcp_down_stream_count = down_stream_count;
sha1_input[msg_count++] = binfo[0];
@@ -2023,7 +2231,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
{
struct device *dev = it6505->dev;
u8 av[5][4], bv[5][4];
- int i, err;
+ int i, err, retry;
i = it6505_setup_sha1_input(it6505, it6505->sha1_input);
if (i <= 0) {
@@ -2032,22 +2240,28 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
}
it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
+ /*1B-05 V' must retry 3 times */
+ for (retry = 0; retry < 3; retry++) {
+ err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
+ sizeof(bv));
- err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
- sizeof(bv));
+ if (err < 0) {
+ dev_err(dev, "Read V' value Fail %d", retry);
+ continue;
+ }
- if (err < 0) {
- dev_err(dev, "Read V' value Fail");
- return false;
- }
+ for (i = 0; i < 5; i++) {
+ if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
+ av[i][1] != av[i][2] || bv[i][0] != av[i][3])
+ break;
- for (i = 0; i < 5; i++)
- if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
- bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
- return false;
+ DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i);
+ return true;
+ }
+ }
- DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!");
- return true;
+ DRM_DEV_DEBUG_DRIVER(dev, "V' NOT match!! %d", retry);
+ return false;
}
static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
@@ -2055,12 +2269,13 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
struct it6505 *it6505 = container_of(work, struct it6505,
hdcp_wait_ksv_list);
struct device *dev = it6505->dev;
- unsigned int timeout = 5000;
- u8 bstatus = 0;
+ u8 bstatus;
bool ksv_list_check;
+ /* 1B-04 wait ksv list for 5s */
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(5000) + 1;
- timeout /= 20;
- while (timeout > 0) {
+ for (;;) {
if (!it6505_get_sink_hpd_status(it6505))
return;
@@ -2069,27 +2284,23 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
if (bstatus & DP_BSTATUS_READY)
break;
- msleep(20);
- timeout--;
- }
+ if (time_after(jiffies, timeout)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "KSV list wait timeout");
+ goto timeout;
+ }
- if (timeout == 0) {
- DRM_DEV_DEBUG_DRIVER(dev, "timeout and ksv list wait failed");
- goto timeout;
+ msleep(20);
}
ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505);
DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s",
ksv_list_check ? "pass" : "fail");
- if (ksv_list_check) {
- it6505_set_bits(it6505, REG_HDCP_TRIGGER,
- HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
+
+ if (ksv_list_check)
return;
- }
+
timeout:
- it6505_set_bits(it6505, REG_HDCP_TRIGGER,
- HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL,
- HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL);
+ it6505_start_hdcp(it6505);
}
static void it6505_hdcp_work(struct work_struct *work)
@@ -2312,14 +2523,20 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector);
if (dp_irq_vector & DP_CP_IRQ) {
- it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
- HDCP_TRIGGER_CPIRQ);
-
bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS);
if (bstatus < 0)
return bstatus;
DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus);
+
+ /*Check BSTATUS when recive CP_IRQ */
+ if (bstatus & DP_BSTATUS_R0_PRIME_READY &&
+ it6505->hdcp_status == HDCP_AUTH_GOING)
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
+ HDCP_TRIGGER_CPIRQ);
+ else if (bstatus & (DP_BSTATUS_REAUTH_REQ | DP_BSTATUS_LINK_FAILURE) &&
+ it6505->hdcp_status == HDCP_AUTH_DONE)
+ it6505_start_hdcp(it6505);
}
ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status);
@@ -2456,7 +2673,11 @@ static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
{
struct device *dev = it6505->dev;
- DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt");
+ DRM_DEV_DEBUG_DRIVER(dev, "HDCP repeater R0 event Interrupt");
+ /* 1B01 HDCP encription should start when R0 is ready*/
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+ HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
+
schedule_work(&it6505->hdcp_wait_ksv_list);
}
@@ -3497,7 +3718,7 @@ static void it6505_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id it6505_id[] = {
- { "it6505", 0 },
+ { "it6505" },
{ }
};
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 35ae3f0e8f51..23edcde6b9a7 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -1450,8 +1450,10 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
dev_dbg(dev, "No connector present, passing empty EDID data");
memset(buf, 0, len);
} else {
+ mutex_lock(&ctx->connector->eld_mutex);
memcpy(buf, ctx->connector->eld,
min(sizeof(ctx->connector->eld), len));
+ mutex_unlock(&ctx->connector->eld_mutex);
}
mutex_unlock(&ctx->lock);
@@ -1464,7 +1466,6 @@ static const struct hdmi_codec_ops it66121_audio_codec_ops = {
.audio_shutdown = it66121_audio_shutdown,
.mute_stream = it66121_audio_mute,
.get_eld = it66121_audio_get_eld,
- .no_capture_mute = 1,
};
static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
@@ -1474,11 +1475,12 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
.i2s = 1, /* Only i2s support for now */
.spdif = 0,
.max_i2s_channels = 8,
+ .no_capture_mute = 1,
};
dev_dbg(dev, "%s\n", __func__);
- if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) {
+ if (!of_property_present(dev->of_node, "#sound-dai-cells")) {
dev_info(dev, "No \"#sound-dai-cells\", no audio\n");
return 0;
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index e265ab3c8c92..52da204f5740 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -815,8 +815,8 @@ static const struct of_device_id lt8912_dt_match[] = {
MODULE_DEVICE_TABLE(of, lt8912_dt_match);
static const struct i2c_device_id lt8912_id[] = {
- {"lt8912", 0},
- {},
+ { "lt8912" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, lt8912_id);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index c8881796fba4..999ddebb832d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -773,7 +773,7 @@ static void lt9211_remove(struct i2c_client *client)
drm_bridge_remove(&ctx->bridge);
}
-static struct i2c_device_id lt9211_id[] = {
+static const struct i2c_device_id lt9211_id[] = {
{ "lontium,lt9211" },
{},
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 1b31fdebe164..e650cd83fc8d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -45,7 +45,6 @@ struct lt9611 {
struct device_node *dsi1_node;
struct mipi_dsi_device *dsi0;
struct mipi_dsi_device *dsi1;
- struct platform_device *audio_pdev;
bool ac_mode;
@@ -757,7 +756,6 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- unsigned long long rate;
if (mode->hdisplay > 3840)
return MODE_BAD_HVALUE;
@@ -765,17 +763,7 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->hdisplay > 2000 && !lt9611->dsi1_node)
return MODE_PANEL;
- rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
- return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate);
-}
-
-static int lt9611_bridge_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return drm_atomic_helper_connector_hdmi_check(conn_state->connector,
- conn_state->state);
+ return MODE_OK;
}
static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
@@ -866,6 +854,10 @@ static int lt9611_hdmi_clear_infoframe(struct drm_bridge *bridge,
unsigned int mask;
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ mask = LT9611_INFOFRAME_AUDIO;
+ break;
+
case HDMI_INFOFRAME_TYPE_AVI:
mask = LT9611_INFOFRAME_AVI;
break;
@@ -899,6 +891,11 @@ static int lt9611_hdmi_write_infoframe(struct drm_bridge *bridge,
int i;
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ mask = LT9611_INFOFRAME_AUDIO;
+ addr = 0x84b2;
+ break;
+
case HDMI_INFOFRAME_TYPE_AVI:
mask = LT9611_INFOFRAME_AVI;
addr = 0x8440;
@@ -942,6 +939,55 @@ lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
return MODE_OK;
}
+static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ regmap_write(lt9611->regmap, 0x82d6, 0x8c);
+ regmap_write(lt9611->regmap, 0x82d7, 0x04);
+
+ regmap_write(lt9611->regmap, 0x8406, 0x08);
+ regmap_write(lt9611->regmap, 0x8407, 0x10);
+
+ regmap_write(lt9611->regmap, 0x8434, 0xd5);
+
+ return 0;
+}
+
+static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ if (hparms->sample_rate == 48000)
+ regmap_write(lt9611->regmap, 0x840f, 0x2b);
+ else if (hparms->sample_rate == 96000)
+ regmap_write(lt9611->regmap, 0x840f, 0xab);
+ else
+ return -EINVAL;
+
+ regmap_write(lt9611->regmap, 0x8435, 0x00);
+ regmap_write(lt9611->regmap, 0x8436, 0x18);
+ regmap_write(lt9611->regmap, 0x8437, 0x00);
+
+ return drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &hparms->cea);
+}
+
+static void lt9611_hdmi_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
+
+ regmap_write(lt9611->regmap, 0x8406, 0x00);
+ regmap_write(lt9611->regmap, 0x8407, 0x00);
+}
+
static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.attach = lt9611_bridge_attach,
.mode_valid = lt9611_bridge_mode_valid,
@@ -949,7 +995,6 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.edid_read = lt9611_bridge_edid_read,
.hpd_enable = lt9611_bridge_hpd_enable,
- .atomic_check = lt9611_bridge_atomic_check,
.atomic_pre_enable = lt9611_bridge_atomic_pre_enable,
.atomic_enable = lt9611_bridge_atomic_enable,
.atomic_disable = lt9611_bridge_atomic_disable,
@@ -962,6 +1007,10 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.hdmi_tmds_char_rate_valid = lt9611_hdmi_tmds_char_rate_valid,
.hdmi_write_infoframe = lt9611_hdmi_write_infoframe,
.hdmi_clear_infoframe = lt9611_hdmi_clear_infoframe,
+
+ .hdmi_audio_startup = lt9611_hdmi_audio_startup,
+ .hdmi_audio_prepare = lt9611_hdmi_audio_prepare,
+ .hdmi_audio_shutdown = lt9611_hdmi_audio_shutdown,
};
static int lt9611_parse_dt(struct device *dev,
@@ -1015,101 +1064,6 @@ static int lt9611_read_device_rev(struct lt9611 *lt9611)
return ret;
}
-static int lt9611_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
-{
- struct lt9611 *lt9611 = data;
-
- if (hparms->sample_rate == 48000)
- regmap_write(lt9611->regmap, 0x840f, 0x2b);
- else if (hparms->sample_rate == 96000)
- regmap_write(lt9611->regmap, 0x840f, 0xab);
- else
- return -EINVAL;
-
- regmap_write(lt9611->regmap, 0x8435, 0x00);
- regmap_write(lt9611->regmap, 0x8436, 0x18);
- regmap_write(lt9611->regmap, 0x8437, 0x00);
-
- return 0;
-}
-
-static int lt9611_audio_startup(struct device *dev, void *data)
-{
- struct lt9611 *lt9611 = data;
-
- regmap_write(lt9611->regmap, 0x82d6, 0x8c);
- regmap_write(lt9611->regmap, 0x82d7, 0x04);
-
- regmap_write(lt9611->regmap, 0x8406, 0x08);
- regmap_write(lt9611->regmap, 0x8407, 0x10);
-
- regmap_write(lt9611->regmap, 0x8434, 0xd5);
-
- return 0;
-}
-
-static void lt9611_audio_shutdown(struct device *dev, void *data)
-{
- struct lt9611 *lt9611 = data;
-
- regmap_write(lt9611->regmap, 0x8406, 0x00);
- regmap_write(lt9611->regmap, 0x8407, 0x00);
-}
-
-static int lt9611_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
-{
- struct of_endpoint of_ep;
- int ret;
-
- ret = of_graph_parse_endpoint(endpoint, &of_ep);
- if (ret < 0)
- return ret;
-
- /*
- * HDMI sound should be located as reg = <2>
- * Then, it is sound port 0
- */
- if (of_ep.port == 2)
- return 0;
-
- return -EINVAL;
-}
-
-static const struct hdmi_codec_ops lt9611_codec_ops = {
- .hw_params = lt9611_hdmi_hw_params,
- .audio_shutdown = lt9611_audio_shutdown,
- .audio_startup = lt9611_audio_startup,
- .get_dai_id = lt9611_hdmi_i2s_get_dai_id,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &lt9611_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-static int lt9611_audio_init(struct device *dev, struct lt9611 *lt9611)
-{
- codec_data.data = lt9611;
- lt9611->audio_pdev =
- platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(lt9611->audio_pdev);
-}
-
-static void lt9611_audio_exit(struct lt9611 *lt9611)
-{
- if (lt9611->audio_pdev) {
- platform_device_unregister(lt9611->audio_pdev);
- lt9611->audio_pdev = NULL;
- }
-}
-
static int lt9611_probe(struct i2c_client *client)
{
struct lt9611 *lt9611;
@@ -1173,6 +1127,9 @@ static int lt9611_probe(struct i2c_client *client)
i2c_set_clientdata(client, lt9611);
+ /* Disable Audio InfoFrame, enabled by default */
+ regmap_update_bits(lt9611->regmap, 0x843d, LT9611_INFOFRAME_AUDIO, 0);
+
lt9611->bridge.funcs = &lt9611_bridge_funcs;
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
@@ -1181,6 +1138,9 @@ static int lt9611_probe(struct i2c_client *client)
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
lt9611->bridge.vendor = "Lontium";
lt9611->bridge.product = "LT9611";
+ lt9611->bridge.hdmi_audio_dev = dev;
+ lt9611->bridge.hdmi_audio_max_i2s_playback_channels = 8;
+ lt9611->bridge.hdmi_audio_dai_port = 2;
drm_bridge_add(&lt9611->bridge);
@@ -1202,10 +1162,6 @@ static int lt9611_probe(struct i2c_client *client)
lt9611_enable_hpd_interrupts(lt9611);
- ret = lt9611_audio_init(dev, lt9611);
- if (ret)
- goto err_remove_bridge;
-
return 0;
err_remove_bridge:
@@ -1226,7 +1182,6 @@ static void lt9611_remove(struct i2c_client *client)
struct lt9611 *lt9611 = i2c_get_clientdata(client);
disable_irq(client->irq);
- lt9611_audio_exit(lt9611);
drm_bridge_remove(&lt9611->bridge);
regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies);
@@ -1235,8 +1190,8 @@ static void lt9611_remove(struct i2c_client *client)
of_node_put(lt9611->dsi0_node);
}
-static struct i2c_device_id lt9611_id[] = {
- { "lontium,lt9611", 0 },
+static const struct i2c_device_id lt9611_id[] = {
+ { "lontium,lt9611" },
{}
};
MODULE_DEVICE_TABLE(i2c, lt9611_id);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 4d1d40e1f1b4..f4c3ff1fdc69 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -522,7 +522,8 @@ static void lt9611uxc_audio_shutdown(struct device *dev, void *data)
}
static int lt9611uxc_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -913,8 +914,8 @@ static void lt9611uxc_remove(struct i2c_client *client)
of_node_put(lt9611uxc->dsi0_node);
}
-static struct i2c_device_id lt9611uxc_id[] = {
- { "lontium,lt9611uxc", 0 },
+static const struct i2c_device_id lt9611uxc_id[] = {
+ { "lontium,lt9611uxc" },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 37f1acf5c0f8..a3dcee62e7a5 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -318,8 +318,8 @@ static void stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
}
static const struct i2c_device_id stdp4028_ge_b850v3_fw_i2c_table[] = {
- {"stdp4028_ge_fw", 0},
- {},
+ { "stdp4028_ge_fw" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, stdp4028_ge_b850v3_fw_i2c_table);
@@ -365,8 +365,8 @@ static void stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
}
static const struct i2c_device_id stdp2690_ge_b850v3_fw_i2c_table[] = {
- {"stdp2690_ge_fw", 0},
- {},
+ { "stdp2690_ge_fw" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, stdp2690_ge_b850v3_fw_i2c_table);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index e77aab965fcf..44e36ae66db4 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -319,8 +319,8 @@ static void ptn3460_remove(struct i2c_client *client)
}
static const struct i2c_device_id ptn3460_i2c_table[] = {
- {"ptn3460", 0},
- {},
+ { "ptn3460" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 9be9cc5b9025..bf2d1632b020 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -815,7 +815,8 @@ static int sii902x_audio_get_eld(struct device *dev, void *data,
}
static int sii902x_audio_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -840,7 +841,6 @@ static const struct hdmi_codec_ops sii902x_audio_codec_ops = {
.mute_stream = sii902x_audio_mute,
.get_eld = sii902x_audio_get_eld,
.get_dai_id = sii902x_audio_get_dai_id,
- .no_capture_mute = 1,
};
static int sii902x_audio_codec_init(struct sii902x *sii902x,
@@ -863,11 +863,12 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x,
.i2s = 1, /* Only i2s support for now. */
.spdif = 0,
.max_i2s_channels = 0,
+ .no_capture_mute = 1,
};
u8 lanes[4];
int num_lanes, i;
- if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) {
+ if (!of_property_present(dev->of_node, "#sound-dai-cells")) {
dev_dbg(dev, "%s: No \"#sound-dai-cells\", no audio\n",
__func__);
return 0;
@@ -1239,8 +1240,8 @@ static const struct of_device_id sii902x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, sii902x_dt_ids);
static const struct i2c_device_id sii902x_i2c_ids[] = {
- { "sii9022", 0 },
- { },
+ { "sii9022" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 0c74cdc07032..cd7837c9a6e0 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -945,8 +945,8 @@ static const struct of_device_id sii9234_dt_match[] = {
MODULE_DEVICE_TABLE(of, sii9234_dt_match);
static const struct i2c_device_id sii9234_id[] = {
- { "SII9234", 0 },
- { },
+ { "SII9234" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sii9234_id);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 26b8d137bce0..28a2e1ee04b2 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2368,8 +2368,8 @@ static const struct of_device_id sii8620_dt_match[] = {
MODULE_DEVICE_TABLE(of, sii8620_dt_match);
static const struct i2c_device_id sii8620_id[] = {
- { "sii8620", 0 },
- { },
+ { "sii8620" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sii8620_id);
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index ca416dab156d..f3ab2f985f8c 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -59,3 +59,9 @@ config DRM_DW_MIPI_DSI
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
+
+config DRM_DW_MIPI_DSI2
+ tristate
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 9869d9651ed1..9dc376d220ad 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
obj-$(CONFIG_DRM_DW_HDMI_QP) += dw-hdmi-qp.o
obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o
+obj-$(CONFIG_DRM_DW_MIPI_DSI2) += dw-mipi-dsi2.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index f1c5a8d0fa90..2c903c9fe805 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -148,7 +148,8 @@ static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf,
}
static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 181c5164b231..b281cabfe992 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -361,22 +361,6 @@ static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi,
return 0;
}
-static int dw_hdmi_qp_bridge_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct dw_hdmi_qp *hdmi = bridge->driver_private;
- int ret;
-
- ret = drm_atomic_helper_connector_hdmi_check(conn_state->connector,
- conn_state->state);
- if (ret)
- dev_dbg(hdmi->dev, "%s failed: %d\n", __func__, ret);
-
- return ret;
-}
-
static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
@@ -442,16 +426,14 @@ dw_hdmi_qp_bridge_edid_read(struct drm_bridge *bridge,
}
static enum drm_mode_status
-dw_hdmi_qp_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_info *info,
- const struct drm_display_mode *mode)
+dw_hdmi_qp_bridge_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long rate)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
- unsigned long long rate;
- rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
if (rate > HDMI14_MAX_TMDSCLK) {
- dev_dbg(hdmi->dev, "Unsupported mode clock: %d\n", mode->clock);
+ dev_dbg(hdmi->dev, "Unsupported TMDS char rate: %lld\n", rate);
return MODE_CLOCK_HIGH;
}
@@ -505,12 +487,11 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
- .atomic_check = dw_hdmi_qp_bridge_atomic_check,
.atomic_enable = dw_hdmi_qp_bridge_atomic_enable,
.atomic_disable = dw_hdmi_qp_bridge_atomic_disable,
.detect = dw_hdmi_qp_bridge_detect,
.edid_read = dw_hdmi_qp_bridge_edid_read,
- .mode_valid = dw_hdmi_qp_bridge_mode_valid,
+ .hdmi_tmds_char_rate_valid = dw_hdmi_qp_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe,
.hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe,
};
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
index 2115b8ef0bd6..72987e6c4689 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Algea Cao <algea.cao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
new file mode 100644
index 000000000000..d7569bf2d9c3
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2024, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Modified by Heiko Stuebner <heiko.stuebner@cherry.de>
+ * This generic Synopsys DesignWare MIPI DSI2 host driver is based on the
+ * Rockchip version from rockchip/dw-mipi-dsi2.c converted to use bridge APIs.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/bridge/dw_mipi_dsi2.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+
+#define DSI2_PWR_UP 0x000c
+#define RESET 0
+#define POWER_UP BIT(0)
+#define CMD_TX_MODE(x) FIELD_PREP(BIT(24), x)
+#define DSI2_SOFT_RESET 0x0010
+#define SYS_RSTN BIT(2)
+#define PHY_RSTN BIT(1)
+#define IPI_RSTN BIT(0)
+#define INT_ST_MAIN 0x0014
+#define DSI2_MODE_CTRL 0x0018
+#define DSI2_MODE_STATUS 0x001c
+#define DSI2_CORE_STATUS 0x0020
+#define PRI_RD_DATA_AVAIL BIT(26)
+#define PRI_FIFOS_NOT_EMPTY BIT(25)
+#define PRI_BUSY BIT(24)
+#define CRI_RD_DATA_AVAIL BIT(18)
+#define CRT_FIFOS_NOT_EMPTY BIT(17)
+#define CRI_BUSY BIT(16)
+#define IPI_FIFOS_NOT_EMPTY BIT(9)
+#define IPI_BUSY BIT(8)
+#define CORE_FIFOS_NOT_EMPTY BIT(1)
+#define CORE_BUSY BIT(0)
+#define MANUAL_MODE_CFG 0x0024
+#define MANUAL_MODE_EN BIT(0)
+#define DSI2_TIMEOUT_HSTX_CFG 0x0048
+#define TO_HSTX(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_HSTXRDY_CFG 0x004c
+#define TO_HSTXRDY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPRX_CFG 0x0050
+#define TO_LPRXRDY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPTXRDY_CFG 0x0054
+#define TO_LPTXRDY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPTXTRIG_CFG 0x0058
+#define TO_LPTXTRIG(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPTXULPS_CFG 0x005c
+#define TO_LPTXULPS(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_BTA_CFG 0x60
+#define TO_BTA(x) FIELD_PREP(GENMASK(15, 0), x)
+
+#define DSI2_PHY_MODE_CFG 0x0100
+#define PPI_WIDTH(x) FIELD_PREP(GENMASK(9, 8), x)
+#define PHY_LANES(x) FIELD_PREP(GENMASK(5, 4), (x) - 1)
+#define PHY_TYPE(x) FIELD_PREP(BIT(0), x)
+#define DSI2_PHY_CLK_CFG 0X0104
+#define PHY_LPTX_CLK_DIV(x) FIELD_PREP(GENMASK(12, 8), x)
+#define CLK_TYPE_MASK BIT(0)
+#define NON_CONTINUOUS_CLK BIT(0)
+#define CONTINUOUS_CLK 0
+#define DSI2_PHY_LP2HS_MAN_CFG 0x010c
+#define PHY_LP2HS_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+#define DSI2_PHY_HS2LP_MAN_CFG 0x0114
+#define PHY_HS2LP_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+#define DSI2_PHY_MAX_RD_T_MAN_CFG 0x011c
+#define PHY_MAX_RD_TIME(x) FIELD_PREP(GENMASK(26, 0), x)
+#define DSI2_PHY_ESC_CMD_T_MAN_CFG 0x0124
+#define PHY_ESC_CMD_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+#define DSI2_PHY_ESC_BYTE_T_MAN_CFG 0x012c
+#define PHY_ESC_BYTE_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+
+#define DSI2_PHY_IPI_RATIO_MAN_CFG 0x0134
+#define PHY_IPI_RATIO(x) FIELD_PREP(GENMASK(21, 0), x)
+#define DSI2_PHY_SYS_RATIO_MAN_CFG 0x013C
+#define PHY_SYS_RATIO(x) FIELD_PREP(GENMASK(16, 0), x)
+
+#define DSI2_DSI_GENERAL_CFG 0x0200
+#define BTA_EN BIT(1)
+#define EOTP_TX_EN BIT(0)
+#define DSI2_DSI_VCID_CFG 0x0204
+#define TX_VCID(x) FIELD_PREP(GENMASK(1, 0), x)
+#define DSI2_DSI_SCRAMBLING_CFG 0x0208
+#define SCRAMBLING_SEED(x) FIELD_PREP(GENMASK(31, 16), x)
+#define SCRAMBLING_EN BIT(0)
+#define DSI2_DSI_VID_TX_CFG 0x020c
+#define LPDT_DISPLAY_CMD_EN BIT(20)
+#define BLK_VFP_HS_EN BIT(14)
+#define BLK_VBP_HS_EN BIT(13)
+#define BLK_VSA_HS_EN BIT(12)
+#define BLK_HFP_HS_EN BIT(6)
+#define BLK_HBP_HS_EN BIT(5)
+#define BLK_HSA_HS_EN BIT(4)
+#define VID_MODE_TYPE(x) FIELD_PREP(GENMASK(1, 0), x)
+#define DSI2_CRI_TX_HDR 0x02c0
+#define CMD_TX_MODE(x) FIELD_PREP(BIT(24), x)
+#define DSI2_CRI_TX_PLD 0x02c4
+#define DSI2_CRI_RX_HDR 0x02c8
+#define DSI2_CRI_RX_PLD 0x02cc
+
+#define DSI2_IPI_COLOR_MAN_CFG 0x0300
+#define IPI_DEPTH(x) FIELD_PREP(GENMASK(7, 4), x)
+#define IPI_DEPTH_5_6_5_BITS 0x02
+#define IPI_DEPTH_6_BITS 0x03
+#define IPI_DEPTH_8_BITS 0x05
+#define IPI_DEPTH_10_BITS 0x06
+#define IPI_FORMAT(x) FIELD_PREP(GENMASK(3, 0), x)
+#define IPI_FORMAT_RGB 0x0
+#define IPI_FORMAT_DSC 0x0b
+#define DSI2_IPI_VID_HSA_MAN_CFG 0x0304
+#define VID_HSA_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_HBP_MAN_CFG 0x030c
+#define VID_HBP_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_HACT_MAN_CFG 0x0314
+#define VID_HACT_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_HLINE_MAN_CFG 0x031c
+#define VID_HLINE_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_VSA_MAN_CFG 0x0324
+#define VID_VSA_LINES(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DSI2_IPI_VID_VBP_MAN_CFG 0X032C
+#define VID_VBP_LINES(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DSI2_IPI_VID_VACT_MAN_CFG 0X0334
+#define VID_VACT_LINES(x) FIELD_PREP(GENMASK(13, 0), x)
+#define DSI2_IPI_VID_VFP_MAN_CFG 0X033C
+#define VID_VFP_LINES(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DSI2_IPI_PIX_PKT_CFG 0x0344
+#define MAX_PIX_PKT(x) FIELD_PREP(GENMASK(15, 0), x)
+
+#define DSI2_INT_ST_PHY 0x0400
+#define DSI2_INT_MASK_PHY 0x0404
+#define DSI2_INT_ST_TO 0x0410
+#define DSI2_INT_MASK_TO 0x0414
+#define DSI2_INT_ST_ACK 0x0420
+#define DSI2_INT_MASK_ACK 0x0424
+#define DSI2_INT_ST_IPI 0x0430
+#define DSI2_INT_MASK_IPI 0x0434
+#define DSI2_INT_ST_FIFO 0x0440
+#define DSI2_INT_MASK_FIFO 0x0444
+#define DSI2_INT_ST_PRI 0x0450
+#define DSI2_INT_MASK_PRI 0x0454
+#define DSI2_INT_ST_CRI 0x0460
+#define DSI2_INT_MASK_CRI 0x0464
+#define DSI2_INT_FORCE_CRI 0x0468
+#define DSI2_MAX_REGISGER DSI2_INT_FORCE_CRI
+
+#define MODE_STATUS_TIMEOUT_US 10000
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+enum vid_mode_type {
+ VID_MODE_TYPE_NON_BURST_SYNC_PULSES,
+ VID_MODE_TYPE_NON_BURST_SYNC_EVENTS,
+ VID_MODE_TYPE_BURST,
+};
+
+enum mode_ctrl {
+ IDLE_MODE,
+ AUTOCALC_MODE,
+ COMMAND_MODE,
+ VIDEO_MODE,
+ DATA_STREAM_MODE,
+ VIDEO_TEST_MODE,
+ DATA_STREAM_TEST_MODE,
+};
+
+enum ppi_width {
+ PPI_WIDTH_8_BITS,
+ PPI_WIDTH_16_BITS,
+ PPI_WIDTH_32_BITS,
+};
+
+struct cmd_header {
+ u8 cmd_type;
+ u8 delay;
+ u8 payload_length;
+};
+
+struct dw_mipi_dsi2 {
+ struct drm_bridge bridge;
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge *panel_bridge;
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *pclk;
+ struct clk *sys_clk;
+
+ unsigned int lane_mbps; /* per lane */
+ u32 channel;
+ u32 lanes;
+ u32 format;
+ unsigned long mode_flags;
+
+ struct drm_display_mode mode;
+ const struct dw_mipi_dsi2_plat_data *plat_data;
+};
+
+static inline struct dw_mipi_dsi2 *host_to_dsi2(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct dw_mipi_dsi2, dsi_host);
+}
+
+static inline struct dw_mipi_dsi2 *bridge_to_dsi2(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dw_mipi_dsi2, bridge);
+}
+
+static int cri_fifos_wait_avail(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 sts, mask;
+ int ret;
+
+ mask = CRI_BUSY | CRT_FIFOS_NOT_EMPTY;
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_CORE_STATUS, sts,
+ !(sts & mask), 0, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(dsi2->dev, "command interface is busy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_set_vid_mode(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 val = 0, mode;
+ int ret;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ val |= BLK_HFP_HS_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ val |= BLK_HBP_HS_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ val |= BLK_HSA_HS_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ val |= VID_MODE_TYPE_BURST;
+ else if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_PULSES;
+ else
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
+
+ regmap_write(dsi2->regmap, DSI2_DSI_VID_TX_CFG, val);
+
+ regmap_write(dsi2->regmap, DSI2_MODE_CTRL, VIDEO_MODE);
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_MODE_STATUS,
+ mode, mode & VIDEO_MODE,
+ 1000, MODE_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(dsi2->dev, "failed to enter video mode\n");
+}
+
+static void dw_mipi_dsi2_set_data_stream_mode(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 mode;
+ int ret;
+
+ regmap_write(dsi2->regmap, DSI2_MODE_CTRL, DATA_STREAM_MODE);
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_MODE_STATUS,
+ mode, mode & DATA_STREAM_MODE,
+ 1000, MODE_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(dsi2->dev, "failed to enter data stream mode\n");
+}
+
+static void dw_mipi_dsi2_set_cmd_mode(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 mode;
+ int ret;
+
+ regmap_write(dsi2->regmap, DSI2_MODE_CTRL, COMMAND_MODE);
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_MODE_STATUS,
+ mode, mode & COMMAND_MODE,
+ 1000, MODE_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(dsi2->dev, "failed to enter data stream mode\n");
+}
+
+static void dw_mipi_dsi2_host_softrst(struct dw_mipi_dsi2 *dsi2)
+{
+ regmap_write(dsi2->regmap, DSI2_SOFT_RESET, 0x0);
+ usleep_range(50, 100);
+ regmap_write(dsi2->regmap, DSI2_SOFT_RESET,
+ SYS_RSTN | PHY_RSTN | IPI_RSTN);
+}
+
+static void dw_mipi_dsi2_phy_clk_mode_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 sys_clk, esc_clk_div;
+ u32 val = 0;
+
+ /*
+ * clk_type should be NON_CONTINUOUS_CLK before
+ * initial deskew calibration be sent.
+ */
+ val |= NON_CONTINUOUS_CLK;
+
+ /* The maximum value of the escape clock frequency is 20MHz */
+ sys_clk = clk_get_rate(dsi2->sys_clk) / USEC_PER_SEC;
+ esc_clk_div = DIV_ROUND_UP(sys_clk, 20 * 2);
+ val |= PHY_LPTX_CLK_DIV(esc_clk_div);
+
+ regmap_write(dsi2->regmap, DSI2_PHY_CLK_CFG, val);
+}
+
+static void dw_mipi_dsi2_phy_ratio_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ struct drm_display_mode *mode = &dsi2->mode;
+ u64 sys_clk = clk_get_rate(dsi2->sys_clk);
+ u64 pixel_clk, ipi_clk, phy_hsclk;
+ u64 tmp;
+
+ /*
+ * in DPHY mode, the phy_hstx_clk is exactly 1/16 the Lane high-speed
+ * data rate; In CPHY mode, the phy_hstx_clk is exactly 1/7 the trio
+ * high speed symbol rate.
+ */
+ phy_hsclk = DIV_ROUND_CLOSEST_ULL(dsi2->lane_mbps * USEC_PER_SEC, 16);
+
+ /* IPI_RATIO_MAN_CFG = PHY_HSTX_CLK / IPI_CLK */
+ pixel_clk = mode->crtc_clock * MSEC_PER_SEC;
+ ipi_clk = pixel_clk / 4;
+
+ tmp = DIV_ROUND_CLOSEST_ULL(phy_hsclk << 16, ipi_clk);
+ regmap_write(dsi2->regmap, DSI2_PHY_IPI_RATIO_MAN_CFG,
+ PHY_IPI_RATIO(tmp));
+
+ /*
+ * SYS_RATIO_MAN_CFG = MIPI_DCPHY_HSCLK_Freq / MIPI_DCPHY_HSCLK_Freq
+ */
+ tmp = DIV_ROUND_CLOSEST_ULL(phy_hsclk << 16, sys_clk);
+ regmap_write(dsi2->regmap, DSI2_PHY_SYS_RATIO_MAN_CFG,
+ PHY_SYS_RATIO(tmp));
+}
+
+static void dw_mipi_dsi2_lp2hs_or_hs2lp_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+ struct dw_mipi_dsi2_phy_timing timing;
+ int ret;
+
+ ret = phy_ops->get_timing(dsi2->plat_data->priv_data,
+ dsi2->lane_mbps, &timing);
+ if (ret)
+ dev_err(dsi2->dev, "Retrieving phy timings failed\n");
+
+ regmap_write(dsi2->regmap, DSI2_PHY_LP2HS_MAN_CFG, PHY_LP2HS_TIME(timing.data_lp2hs));
+ regmap_write(dsi2->regmap, DSI2_PHY_HS2LP_MAN_CFG, PHY_HS2LP_TIME(timing.data_hs2lp));
+}
+
+static void dw_mipi_dsi2_phy_init(struct dw_mipi_dsi2 *dsi2)
+{
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+ struct dw_mipi_dsi2_phy_iface iface;
+ u32 val = 0;
+
+ phy_ops->get_interface(dsi2->plat_data->priv_data, &iface);
+
+ switch (iface.ppi_width) {
+ case 8:
+ val |= PPI_WIDTH(PPI_WIDTH_8_BITS);
+ break;
+ case 16:
+ val |= PPI_WIDTH(PPI_WIDTH_16_BITS);
+ break;
+ case 32:
+ val |= PPI_WIDTH(PPI_WIDTH_32_BITS);
+ break;
+ default:
+ /* Caught in probe */
+ break;
+ }
+
+ val |= PHY_LANES(dsi2->lanes);
+ val |= PHY_TYPE(DW_MIPI_DSI2_DPHY);
+ regmap_write(dsi2->regmap, DSI2_PHY_MODE_CFG, val);
+
+ dw_mipi_dsi2_phy_clk_mode_cfg(dsi2);
+ dw_mipi_dsi2_phy_ratio_cfg(dsi2);
+ dw_mipi_dsi2_lp2hs_or_hs2lp_cfg(dsi2);
+
+ /* phy configuration 8 - 10 */
+}
+
+static void dw_mipi_dsi2_tx_option_set(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 val;
+
+ val = BTA_EN | EOTP_TX_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ val &= ~EOTP_TX_EN;
+
+ regmap_write(dsi2->regmap, DSI2_DSI_GENERAL_CFG, val);
+ regmap_write(dsi2->regmap, DSI2_DSI_VCID_CFG, TX_VCID(dsi2->channel));
+}
+
+static void dw_mipi_dsi2_ipi_color_coding_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 val, color_depth;
+
+ switch (dsi2->format) {
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ color_depth = IPI_DEPTH_6_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ color_depth = IPI_DEPTH_5_6_5_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ color_depth = IPI_DEPTH_8_BITS;
+ break;
+ }
+
+ val = IPI_DEPTH(color_depth) |
+ IPI_FORMAT(IPI_FORMAT_RGB);
+ regmap_write(dsi2->regmap, DSI2_IPI_COLOR_MAN_CFG, val);
+}
+
+static void dw_mipi_dsi2_vertical_timing_config(struct dw_mipi_dsi2 *dsi2,
+ const struct drm_display_mode *mode)
+{
+ u32 vactive, vsa, vfp, vbp;
+
+ vactive = mode->vdisplay;
+ vsa = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VSA_MAN_CFG, VID_VSA_LINES(vsa));
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VBP_MAN_CFG, VID_VBP_LINES(vbp));
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VACT_MAN_CFG, VID_VACT_LINES(vactive));
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VFP_MAN_CFG, VID_VFP_LINES(vfp));
+}
+
+static void dw_mipi_dsi2_ipi_set(struct dw_mipi_dsi2 *dsi2)
+{
+ struct drm_display_mode *mode = &dsi2->mode;
+ u32 hline, hsa, hbp, hact;
+ u64 hline_time, hsa_time, hbp_time, hact_time, tmp;
+ u64 pixel_clk, phy_hs_clk;
+ u16 val;
+
+ val = mode->hdisplay;
+
+ regmap_write(dsi2->regmap, DSI2_IPI_PIX_PKT_CFG, MAX_PIX_PKT(val));
+
+ dw_mipi_dsi2_ipi_color_coding_cfg(dsi2);
+
+ /*
+ * if the controller is intended to operate in data stream mode,
+ * no more steps are required.
+ */
+ if (!(dsi2->mode_flags & MIPI_DSI_MODE_VIDEO))
+ return;
+
+ hact = mode->hdisplay;
+ hsa = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+ hline = mode->htotal;
+
+ pixel_clk = mode->crtc_clock * MSEC_PER_SEC;
+
+ phy_hs_clk = DIV_ROUND_CLOSEST_ULL(dsi2->lane_mbps * USEC_PER_SEC, 16);
+
+ tmp = hsa * phy_hs_clk;
+ hsa_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HSA_MAN_CFG, VID_HSA_TIME(hsa_time));
+
+ tmp = hbp * phy_hs_clk;
+ hbp_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HBP_MAN_CFG, VID_HBP_TIME(hbp_time));
+
+ tmp = hact * phy_hs_clk;
+ hact_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HACT_MAN_CFG, VID_HACT_TIME(hact_time));
+
+ tmp = hline * phy_hs_clk;
+ hline_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HLINE_MAN_CFG, VID_HLINE_TIME(hline_time));
+
+ dw_mipi_dsi2_vertical_timing_config(dsi2, mode);
+}
+
+static void
+dw_mipi_dsi2_work_mode(struct dw_mipi_dsi2 *dsi2, u32 mode)
+{
+ /*
+ * select controller work in Manual mode
+ * Manual: MANUAL_MODE_EN
+ * Automatic: 0
+ */
+ regmap_write(dsi2->regmap, MANUAL_MODE_CFG, mode);
+}
+
+static int dw_mipi_dsi2_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2 *dsi2 = host_to_dsi2(host);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ struct drm_bridge *bridge;
+ int ret;
+
+ if (device->lanes > dsi2->plat_data->max_data_lanes) {
+ dev_err(dsi2->dev, "the number of data lanes(%u) is too many\n",
+ device->lanes);
+ return -EINVAL;
+ }
+
+ dsi2->lanes = device->lanes;
+ dsi2->channel = device->channel;
+ dsi2->format = device->format;
+ dsi2->mode_flags = device->mode_flags;
+
+ bridge = devm_drm_of_get_bridge(dsi2->dev, dsi2->dev->of_node, 1, 0);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ bridge->pre_enable_prev_first = true;
+ dsi2->panel_bridge = bridge;
+
+ drm_bridge_add(&dsi2->bridge);
+
+ if (pdata->host_ops && pdata->host_ops->attach) {
+ ret = pdata->host_ops->attach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2 *dsi2 = host_to_dsi2(host);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ int ret;
+
+ if (pdata->host_ops && pdata->host_ops->detach) {
+ ret = pdata->host_ops->detach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
+
+ drm_bridge_remove(&dsi2->bridge);
+
+ drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_gen_pkt_hdr_write(struct dw_mipi_dsi2 *dsi2,
+ u32 hdr_val, bool lpm)
+{
+ int ret;
+
+ regmap_write(dsi2->regmap, DSI2_CRI_TX_HDR, hdr_val | CMD_TX_MODE(lpm));
+
+ ret = cri_fifos_wait_avail(dsi2);
+ if (ret) {
+ dev_err(dsi2->dev, "failed to write command header\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_write(struct dw_mipi_dsi2 *dsi2,
+ const struct mipi_dsi_packet *packet, bool lpm)
+{
+ const u8 *tx_buf = packet->payload;
+ int len = packet->payload_length, pld_data_bytes = sizeof(u32);
+ __le32 word;
+
+ /* Send payload */
+ while (len) {
+ if (len < pld_data_bytes) {
+ word = 0;
+ memcpy(&word, tx_buf, len);
+ regmap_write(dsi2->regmap, DSI2_CRI_TX_PLD, le32_to_cpu(word));
+ len = 0;
+ } else {
+ memcpy(&word, tx_buf, pld_data_bytes);
+ regmap_write(dsi2->regmap, DSI2_CRI_TX_PLD, le32_to_cpu(word));
+ tx_buf += pld_data_bytes;
+ len -= pld_data_bytes;
+ }
+ }
+
+ word = 0;
+ memcpy(&word, packet->header, sizeof(packet->header));
+ return dw_mipi_dsi2_gen_pkt_hdr_write(dsi2, le32_to_cpu(word), lpm);
+}
+
+static int dw_mipi_dsi2_read(struct dw_mipi_dsi2 *dsi2,
+ const struct mipi_dsi_msg *msg)
+{
+ u8 *payload = msg->rx_buf;
+ int i, j, ret, len = msg->rx_len;
+ u8 data_type;
+ u16 wc;
+ u32 val;
+
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_CORE_STATUS,
+ val, val & CRI_RD_DATA_AVAIL,
+ 100, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(dsi2->dev, "CRI has no available read data\n");
+ return ret;
+ }
+
+ regmap_read(dsi2->regmap, DSI2_CRI_RX_HDR, &val);
+ data_type = val & 0x3f;
+
+ if (mipi_dsi_packet_format_is_short(data_type)) {
+ for (i = 0; i < len && i < 2; i++)
+ payload[i] = (val >> (8 * (i + 1))) & 0xff;
+
+ return 0;
+ }
+
+ wc = (val >> 8) & 0xffff;
+ /* Receive payload */
+ for (i = 0; i < len && i < wc; i += 4) {
+ regmap_read(dsi2->regmap, DSI2_CRI_RX_PLD, &val);
+ for (j = 0; j < 4 && j + i < len && j + i < wc; j++)
+ payload[i + j] = val >> (8 * j);
+ }
+
+ return 0;
+}
+
+static ssize_t dw_mipi_dsi2_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct dw_mipi_dsi2 *dsi2 = host_to_dsi2(host);
+ bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
+ struct mipi_dsi_packet packet;
+ int ret, nb_bytes;
+
+ regmap_update_bits(dsi2->regmap, DSI2_DSI_VID_TX_CFG,
+ LPDT_DISPLAY_CMD_EN,
+ lpm ? LPDT_DISPLAY_CMD_EN : 0);
+
+ /* create a packet to the DSI protocol */
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ dev_err(dsi2->dev, "failed to create packet: %d\n", ret);
+ return ret;
+ }
+
+ ret = cri_fifos_wait_avail(dsi2);
+ if (ret)
+ return ret;
+
+ ret = dw_mipi_dsi2_write(dsi2, &packet, lpm);
+ if (ret)
+ return ret;
+
+ if (msg->rx_buf && msg->rx_len) {
+ ret = dw_mipi_dsi2_read(dsi2, msg);
+ if (ret < 0)
+ return ret;
+ nb_bytes = msg->rx_len;
+ } else {
+ nb_bytes = packet.size;
+ }
+
+ return nb_bytes;
+}
+
+static const struct mipi_dsi_host_ops dw_mipi_dsi2_host_ops = {
+ .attach = dw_mipi_dsi2_host_attach,
+ .detach = dw_mipi_dsi2_host_detach,
+ .transfer = dw_mipi_dsi2_host_transfer,
+};
+
+static u32 *
+dw_mipi_dsi2_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ u32 *input_fmts;
+
+ if (pdata->get_input_bus_fmts)
+ return pdata->get_input_bus_fmts(pdata->priv_data,
+ bridge, bridge_state,
+ crtc_state, conn_state,
+ output_fmt, num_input_fmts);
+
+ /* Fall back to MEDIA_BUS_FMT_FIXED as the only input format. */
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+ input_fmts[0] = MEDIA_BUS_FMT_FIXED;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int dw_mipi_dsi2_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ bool ret;
+
+ bridge_state->input_bus_cfg.flags =
+ DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ if (pdata->mode_fixup) {
+ ret = pdata->mode_fixup(pdata->priv_data, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_DRIVER("failed to fixup mode " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&crtc_state->mode));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_bridge_post_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+
+ regmap_write(dsi2->regmap, DSI2_IPI_PIX_PKT_CFG, 0);
+
+ /*
+ * Switch to command mode before panel-bridge post_disable &
+ * panel unprepare.
+ * Note: panel-bridge disable & panel disable has been called
+ * before by the drm framework.
+ */
+ dw_mipi_dsi2_set_cmd_mode(dsi2);
+
+ regmap_write(dsi2->regmap, DSI2_PWR_UP, RESET);
+
+ if (phy_ops->power_off)
+ phy_ops->power_off(dsi2->plat_data->priv_data);
+
+ clk_disable_unprepare(dsi2->sys_clk);
+ clk_disable_unprepare(dsi2->pclk);
+ pm_runtime_put(dsi2->dev);
+}
+
+static unsigned int dw_mipi_dsi2_get_lanes(struct dw_mipi_dsi2 *dsi2)
+{
+ /* single-dsi, so no other instance to consider */
+ return dsi2->lanes;
+}
+
+static void dw_mipi_dsi2_mode_set(struct dw_mipi_dsi2 *dsi2,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+ void *priv_data = dsi2->plat_data->priv_data;
+ u32 lanes = dw_mipi_dsi2_get_lanes(dsi2);
+ int ret;
+
+ clk_prepare_enable(dsi2->pclk);
+ clk_prepare_enable(dsi2->sys_clk);
+
+ ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi2->mode_flags,
+ lanes, dsi2->format, &dsi2->lane_mbps);
+ if (ret)
+ DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
+
+ pm_runtime_get_sync(dsi2->dev);
+
+ dw_mipi_dsi2_host_softrst(dsi2);
+ regmap_write(dsi2->regmap, DSI2_PWR_UP, RESET);
+
+ dw_mipi_dsi2_work_mode(dsi2, MANUAL_MODE_EN);
+ dw_mipi_dsi2_phy_init(dsi2);
+
+ if (phy_ops->power_on)
+ phy_ops->power_on(dsi2->plat_data->priv_data);
+
+ dw_mipi_dsi2_tx_option_set(dsi2);
+
+ /*
+ * initial deskew calibration is send after phy_power_on,
+ * then we can configure clk_type.
+ */
+
+ regmap_update_bits(dsi2->regmap, DSI2_PHY_CLK_CFG, CLK_TYPE_MASK,
+ dsi2->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS ? NON_CONTINUOUS_CLK :
+ CONTINUOUS_CLK);
+
+ regmap_write(dsi2->regmap, DSI2_PWR_UP, POWER_UP);
+ dw_mipi_dsi2_set_cmd_mode(dsi2);
+
+ dw_mipi_dsi2_ipi_set(dsi2);
+}
+
+static void dw_mipi_dsi2_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Power up the dsi ctl into a command mode */
+ dw_mipi_dsi2_mode_set(dsi2, &dsi2->mode);
+}
+
+static void dw_mipi_dsi2_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Store the display mode for later use in pre_enable callback */
+ drm_mode_copy(&dsi2->mode, adjusted_mode);
+}
+
+static void dw_mipi_dsi2_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Switch to video mode for panel-bridge enable & panel enable */
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO)
+ dw_mipi_dsi2_set_vid_mode(dsi2);
+ else
+ dw_mipi_dsi2_set_data_stream_mode(dsi2);
+}
+
+static enum drm_mode_status
+dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ enum drm_mode_status mode_status = MODE_OK;
+
+ if (pdata->mode_valid)
+ mode_status = pdata->mode_valid(pdata->priv_data, mode,
+ dsi2->mode_flags,
+ dw_mipi_dsi2_get_lanes(dsi2),
+ dsi2->format);
+
+ return mode_status;
+}
+
+static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Set the encoder type as caller does not know it */
+ bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
+ flags);
+}
+
+static const struct drm_bridge_funcs dw_mipi_dsi2_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = dw_mipi_dsi2_bridge_atomic_get_input_bus_fmts,
+ .atomic_check = dw_mipi_dsi2_bridge_atomic_check,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = dw_mipi_dsi2_bridge_atomic_pre_enable,
+ .atomic_enable = dw_mipi_dsi2_bridge_atomic_enable,
+ .atomic_post_disable = dw_mipi_dsi2_bridge_post_atomic_disable,
+ .mode_set = dw_mipi_dsi2_bridge_mode_set,
+ .mode_valid = dw_mipi_dsi2_bridge_mode_valid,
+ .attach = dw_mipi_dsi2_bridge_attach,
+};
+
+static const struct regmap_config dw_mipi_dsi2_regmap_config = {
+ .name = "dsi2-host",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static struct dw_mipi_dsi2 *
+__dw_mipi_dsi2_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi2_plat_data *plat_data)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *apb_rst;
+ struct dw_mipi_dsi2 *dsi2;
+ int ret;
+
+ dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
+ if (!dsi2)
+ return ERR_PTR(-ENOMEM);
+
+ dsi2->dev = dev;
+ dsi2->plat_data = plat_data;
+
+ if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps ||
+ !plat_data->phy_ops->get_timing)
+ return dev_err_ptr_probe(dev, -ENODEV, "Phy not properly configured\n");
+
+ if (!plat_data->regmap) {
+ void __iomem *base = devm_platform_ioremap_resource(pdev, 0);
+
+ if (IS_ERR(base))
+ return dev_err_cast_probe(dev, base, "failed to registers\n");
+
+ dsi2->regmap = devm_regmap_init_mmio(dev, base,
+ &dw_mipi_dsi2_regmap_config);
+ if (IS_ERR(dsi2->regmap))
+ return dev_err_cast_probe(dev, dsi2->regmap, "failed to init regmap\n");
+ } else {
+ dsi2->regmap = plat_data->regmap;
+ }
+
+ dsi2->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(dsi2->pclk))
+ return dev_err_cast_probe(dev, dsi2->pclk, "Unable to get pclk\n");
+
+ dsi2->sys_clk = devm_clk_get(dev, "sys");
+ if (IS_ERR(dsi2->sys_clk))
+ return dev_err_cast_probe(dev, dsi2->sys_clk, "Unable to get sys_clk\n");
+
+ /*
+ * Note that the reset was not defined in the initial device tree, so
+ * we have to be prepared for it not being found.
+ */
+ apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb");
+ if (IS_ERR(apb_rst))
+ return dev_err_cast_probe(dev, apb_rst, "Unable to get reset control\n");
+
+ if (apb_rst) {
+ ret = clk_prepare_enable(dsi2->pclk);
+ if (ret) {
+ dev_err(dev, "%s: Failed to enable pclk\n", __func__);
+ return ERR_PTR(ret);
+ }
+
+ reset_control_assert(apb_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(apb_rst);
+
+ clk_disable_unprepare(dsi2->pclk);
+ }
+
+ devm_pm_runtime_enable(dev);
+
+ dsi2->dsi_host.ops = &dw_mipi_dsi2_host_ops;
+ dsi2->dsi_host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi2->dsi_host);
+ if (ret) {
+ dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ pm_runtime_disable(dev);
+ return ERR_PTR(ret);
+ }
+
+ dsi2->bridge.driver_private = dsi2;
+ dsi2->bridge.funcs = &dw_mipi_dsi2_bridge_funcs;
+ dsi2->bridge.of_node = pdev->dev.of_node;
+
+ return dsi2;
+}
+
+static void __dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2)
+{
+ mipi_dsi_host_unregister(&dsi2->dsi_host);
+}
+
+/*
+ * Probe/remove API, used to create the bridge instance.
+ */
+struct dw_mipi_dsi2 *
+dw_mipi_dsi2_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi2_plat_data *plat_data)
+{
+ return __dw_mipi_dsi2_probe(pdev, plat_data);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_probe);
+
+void dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2)
+{
+ __dw_mipi_dsi2_remove(dsi2);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_remove);
+
+/*
+ * Bind/unbind API, used from platforms based on the component framework
+ * to attach the bridge to an encoder.
+ */
+int dw_mipi_dsi2_bind(struct dw_mipi_dsi2 *dsi2, struct drm_encoder *encoder)
+{
+ return drm_bridge_attach(encoder, &dsi2->bridge, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_bind);
+
+void dw_mipi_dsi2_unbind(struct dw_mipi_dsi2 *dsi2)
+{
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_unbind);
+
+MODULE_AUTHOR("Guochun Huang <hero.huang@rock-chips.com>");
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@cherry.de>");
+MODULE_DESCRIPTION("DW MIPI DSI2 host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dw-mipi-dsi2");
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 7275e66faefc..4637bf6ea7a3 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2587,7 +2587,7 @@ static void tc_remove(struct i2c_client *client)
}
static const struct i2c_device_id tc358767_i2c_ids[] = {
- { "tc358767", 0 },
+ { "tc358767" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 2cb748bbefcd..ec79b0dd0e2c 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1244,8 +1244,8 @@ static const struct regmap_config tc358768_regmap_config = {
};
static const struct i2c_device_id tc358768_i2c_ids[] = {
- { "tc358768", 0 },
- { "tc358778", 0 },
+ { "tc358768" },
+ { "tc358778" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index a0a1b5dd794e..eaec70fa42b6 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -389,7 +389,7 @@ static void dlpc3433_remove(struct i2c_client *client)
}
static const struct i2c_device_id dlpc3433_id[] = {
- { "ti,dlpc3433", 0 },
+ { "ti,dlpc3433" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, dlpc3433_id);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 57a7ed13f996..336380114eea 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -132,6 +132,16 @@
#define REG_IRQ_STAT_CHA_SOT_BIT_ERR BIT(2)
#define REG_IRQ_STAT_CHA_PLL_UNLOCK BIT(0)
+enum sn65dsi83_channel {
+ CHANNEL_A,
+ CHANNEL_B
+};
+
+enum sn65dsi83_lvds_term {
+ OHM_100,
+ OHM_200
+};
+
enum sn65dsi83_model {
MODEL_SN65DSI83,
MODEL_SN65DSI84,
@@ -147,6 +157,8 @@ struct sn65dsi83 {
struct regulator *vcc;
bool lvds_dual_link;
bool lvds_dual_link_even_odd_swap;
+ int lvds_vod_swing_conf[2];
+ int lvds_term_conf[2];
};
static const struct regmap_range sn65dsi83_readable_ranges[] = {
@@ -237,6 +249,36 @@ static const struct regmap_config sn65dsi83_regmap_config = {
.max_register = REG_IRQ_STAT,
};
+static const int lvds_vod_swing_data_table[2][4][2] = {
+ { /* 100 Ohm */
+ { 180000, 313000 },
+ { 215000, 372000 },
+ { 250000, 430000 },
+ { 290000, 488000 },
+ },
+ { /* 200 Ohm */
+ { 150000, 261000 },
+ { 200000, 346000 },
+ { 250000, 428000 },
+ { 300000, 511000 },
+ },
+};
+
+static const int lvds_vod_swing_clock_table[2][4][2] = {
+ { /* 100 Ohm */
+ { 140000, 244000 },
+ { 168000, 290000 },
+ { 195000, 335000 },
+ { 226000, 381000 },
+ },
+ { /* 200 Ohm */
+ { 117000, 204000 },
+ { 156000, 270000 },
+ { 195000, 334000 },
+ { 234000, 399000 },
+ },
+};
+
static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
{
return container_of(bridge, struct sn65dsi83, bridge);
@@ -435,12 +477,16 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
val |= REG_LVDS_FMT_LVDS_LINK_CFG;
regmap_write(ctx->regmap, REG_LVDS_FMT, val);
- regmap_write(ctx->regmap, REG_LVDS_VCOM, 0x05);
+ regmap_write(ctx->regmap, REG_LVDS_VCOM,
+ REG_LVDS_VCOM_CHA_LVDS_VOD_SWING(ctx->lvds_vod_swing_conf[CHANNEL_A]) |
+ REG_LVDS_VCOM_CHB_LVDS_VOD_SWING(ctx->lvds_vod_swing_conf[CHANNEL_B]));
regmap_write(ctx->regmap, REG_LVDS_LANE,
(ctx->lvds_dual_link_even_odd_swap ?
REG_LVDS_LANE_EVEN_ODD_SWAP : 0) |
- REG_LVDS_LANE_CHA_LVDS_TERM |
- REG_LVDS_LANE_CHB_LVDS_TERM);
+ (ctx->lvds_term_conf[CHANNEL_A] ?
+ REG_LVDS_LANE_CHA_LVDS_TERM : 0) |
+ (ctx->lvds_term_conf[CHANNEL_B] ?
+ REG_LVDS_LANE_CHB_LVDS_TERM : 0));
regmap_write(ctx->regmap, REG_LVDS_CM, 0x00);
le16val = cpu_to_le16(mode->hdisplay);
@@ -576,10 +622,103 @@ static const struct drm_bridge_funcs sn65dsi83_funcs = {
.atomic_get_input_bus_fmts = sn65dsi83_atomic_get_input_bus_fmts,
};
+static int sn65dsi83_select_lvds_vod_swing(struct device *dev,
+ u32 lvds_vod_swing_data[2], u32 lvds_vod_swing_clk[2], u8 lvds_term)
+{
+ int i;
+
+ for (i = 0; i <= 3; i++) {
+ if (lvds_vod_swing_data_table[lvds_term][i][0] >= lvds_vod_swing_data[0] &&
+ lvds_vod_swing_data_table[lvds_term][i][1] <= lvds_vod_swing_data[1] &&
+ lvds_vod_swing_clock_table[lvds_term][i][0] >= lvds_vod_swing_clk[0] &&
+ lvds_vod_swing_clock_table[lvds_term][i][1] <= lvds_vod_swing_clk[1])
+ return i;
+ }
+
+ dev_err(dev, "failed to find appropriate LVDS_VOD_SWING configuration\n");
+ return -EINVAL;
+}
+
+static int sn65dsi83_parse_lvds_endpoint(struct sn65dsi83 *ctx, int channel)
+{
+ struct device *dev = ctx->dev;
+ struct device_node *endpoint;
+ int endpoint_reg;
+ /* Set so the property can be freely selected if not defined */
+ u32 lvds_vod_swing_data[2] = { 0, 1000000 };
+ u32 lvds_vod_swing_clk[2] = { 0, 1000000 };
+ /* Set default near end terminataion to 200 Ohm */
+ u32 lvds_term = 200;
+ int lvds_vod_swing_conf;
+ int ret = 0;
+ int ret_data;
+ int ret_clock;
+
+ if (channel == CHANNEL_A)
+ endpoint_reg = 2;
+ else
+ endpoint_reg = 3;
+
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, endpoint_reg, -1);
+
+ of_property_read_u32(endpoint, "ti,lvds-termination-ohms", &lvds_term);
+ if (lvds_term == 100)
+ ctx->lvds_term_conf[channel] = OHM_100;
+ else if (lvds_term == 200)
+ ctx->lvds_term_conf[channel] = OHM_200;
+ else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret_data = of_property_read_u32_array(endpoint, "ti,lvds-vod-swing-data-microvolt",
+ lvds_vod_swing_data, ARRAY_SIZE(lvds_vod_swing_data));
+ if (ret_data != 0 && ret_data != -EINVAL) {
+ ret = ret_data;
+ goto exit;
+ }
+
+ ret_clock = of_property_read_u32_array(endpoint, "ti,lvds-vod-swing-clock-microvolt",
+ lvds_vod_swing_clk, ARRAY_SIZE(lvds_vod_swing_clk));
+ if (ret_clock != 0 && ret_clock != -EINVAL) {
+ ret = ret_clock;
+ goto exit;
+ }
+
+ /* Use default value if both properties are NOT defined. */
+ if (ret_data == -EINVAL && ret_clock == -EINVAL)
+ lvds_vod_swing_conf = 0x1;
+
+ /* Use lookup table if any of the two properties is defined. */
+ if (!ret_data || !ret_clock) {
+ lvds_vod_swing_conf = sn65dsi83_select_lvds_vod_swing(dev, lvds_vod_swing_data,
+ lvds_vod_swing_clk, ctx->lvds_term_conf[channel]);
+ if (lvds_vod_swing_conf < 0) {
+ ret = lvds_vod_swing_conf;
+ goto exit;
+ }
+ }
+
+ ctx->lvds_vod_swing_conf[channel] = lvds_vod_swing_conf;
+ ret = 0;
+exit:
+ of_node_put(endpoint);
+ return ret;
+}
+
static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
{
struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
+ int ret;
+
+ ret = sn65dsi83_parse_lvds_endpoint(ctx, CHANNEL_A);
+ if (ret < 0)
+ return ret;
+
+ ret = sn65dsi83_parse_lvds_endpoint(ctx, CHANNEL_B);
+ if (ret < 0)
+ return ret;
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
@@ -606,7 +745,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0);
if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ return dev_err_probe(dev, PTR_ERR(panel_bridge), "Failed to get panel bridge\n");
ctx->panel_bridge = panel_bridge;
@@ -732,7 +871,7 @@ static void sn65dsi83_remove(struct i2c_client *client)
drm_bridge_remove(&ctx->bridge);
}
-static struct i2c_device_id sn65dsi83_id[] = {
+static const struct i2c_device_id sn65dsi83_id[] = {
{ "ti,sn65dsi83", MODEL_SN65DSI83 },
{ "ti,sn65dsi84", MODEL_SN65DSI84 },
{},
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 9e31f750fd88..e4d9006b59f1 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1970,9 +1970,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return ti_sn65dsi86_add_aux_device(pdata, &pdata->aux_aux, "aux");
}
-static struct i2c_device_id ti_sn65dsi86_id[] = {
- { "ti,sn65dsi86", 0},
- {},
+static const struct i2c_device_id ti_sn65dsi86_id[] = {
+ { "ti,sn65dsi86" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ti_sn65dsi86_id);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 107a2c0b96c9..79ab5da827e1 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -435,7 +435,7 @@ static void tfp410_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id tfp410_i2c_ids[] = {
- { "tfp410", 0 },
+ { "tfp410" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids);
diff --git a/drivers/gpu/drm/clients/Kconfig b/drivers/gpu/drm/clients/Kconfig
new file mode 100644
index 000000000000..6096c623d9d5
--- /dev/null
+++ b/drivers/gpu/drm/clients/Kconfig
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_CLIENT_LIB
+ tristate
+ depends on DRM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ help
+ This option enables the DRM client library and selects all
+ modules and components according to the enabled clients.
+
+config DRM_CLIENT_SELECTION
+ tristate
+ depends on DRM
+ select DRM_CLIENT_LIB if DRM_CLIENT_LOG
+ select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION
+ help
+ Drivers that support in-kernel DRM clients have to select this
+ option.
+
+config DRM_CLIENT_SETUP
+ bool
+ depends on DRM_CLIENT_SELECTION
+ help
+ Enables the DRM client selection. DRM drivers that support the
+ default clients should select DRM_CLIENT_SELECTION instead.
+
+menu "Supported DRM clients"
+ depends on DRM_CLIENT_SELECTION
+
+config DRM_FBDEV_EMULATION
+ bool "Enable legacy fbdev support for your modesetting driver"
+ depends on DRM_CLIENT_SELECTION
+ select DRM_CLIENT
+ select DRM_CLIENT_SETUP
+ select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+ default FB
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provides the linux console
+ support on top of your modesetting driver.
+
+ If in doubt, say "Y".
+
+config DRM_FBDEV_OVERALLOC
+ int "Overallocation of the fbdev buffer"
+ depends on DRM_FBDEV_EMULATION
+ default 100
+ help
+ Defines the fbdev buffer overallocation in percent. Default
+ is 100. Typical values for double buffering will be 200,
+ triple buffering 300.
+
+config DRM_FBDEV_LEAK_PHYS_SMEM
+ bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
+ depends on DRM_FBDEV_EMULATION && EXPERT
+ default n
+ help
+ In order to keep user-space compatibility, we want in certain
+ use-cases to keep leaking the fbdev physical address to the
+ user-space program handling the fbdev buffer.
+ This affects, not only, Amlogic, Allwinner or Rockchip devices
+ with ARM Mali GPUs using a userspace Blob.
+ This option is not supported by upstream developers and should be
+ removed as soon as possible and be considered as a broken and
+ legacy behaviour from a modern fbdev device driver.
+
+ Please send any bug reports when using this to your proprietary
+ software vendor that requires this.
+
+ If in doubt, say "N" or spread the word to your closed source
+ library vendor.
+
+config DRM_CLIENT_LOG
+ bool "Print the kernel boot message on the screen"
+ depends on DRM_CLIENT_SELECTION
+ select DRM_CLIENT
+ select DRM_CLIENT_SETUP
+ select DRM_DRAW
+ select FONT_SUPPORT
+ help
+ This enable a drm logger, that will print the kernel messages to the
+ screen until the userspace is ready to take over.
+
+ If you only need logs, but no terminal, or if you prefer userspace
+ terminal, say "Y".
+
+choice
+ prompt "Default DRM Client"
+ depends on DRM_CLIENT_SELECTION
+ depends on DRM_FBDEV_EMULATION || DRM_CLIENT_LOG
+ default DRM_CLIENT_DEFAULT_FBDEV
+ help
+ Selects the default drm client.
+
+ The selection made here can be overridden by using the kernel
+ command line 'drm_client_lib.active=fbdev' option.
+
+config DRM_CLIENT_DEFAULT_FBDEV
+ bool "fbdev"
+ depends on DRM_FBDEV_EMULATION
+ help
+ Use fbdev emulation as default drm client. This is needed to have
+ fbcon on top of a drm driver.
+
+config DRM_CLIENT_DEFAULT_LOG
+ bool "log"
+ depends on DRM_CLIENT_LOG
+ help
+ Use drm log as default drm client. This will display boot logs on the
+ screen, but doesn't implement a full terminal. For that you will need
+ a userspace terminal using drm/kms.
+
+endchoice
+
+config DRM_CLIENT_DEFAULT
+ string
+ depends on DRM_CLIENT
+ default "fbdev" if DRM_CLIENT_DEFAULT_FBDEV
+ default "log" if DRM_CLIENT_DEFAULT_LOG
+ default ""
+
+endmenu
diff --git a/drivers/gpu/drm/clients/Makefile b/drivers/gpu/drm/clients/Makefile
new file mode 100644
index 000000000000..c16addbc327f
--- /dev/null
+++ b/drivers/gpu/drm/clients/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+subdir-ccflags-y += -I$(src)/..
+
+drm_client_lib-y := drm_client_setup.o
+drm_client_lib-$(CONFIG_DRM_CLIENT_LOG) += drm_log.o
+drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o
+obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o
diff --git a/drivers/gpu/drm/clients/drm_client_internal.h b/drivers/gpu/drm/clients/drm_client_internal.h
new file mode 100644
index 000000000000..6dc078bf6503
--- /dev/null
+++ b/drivers/gpu/drm/clients/drm_client_internal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_CLIENT_INTERNAL_H
+#define DRM_CLIENT_INTERNAL_H
+
+struct drm_device;
+struct drm_format_info;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format);
+#else
+static inline int drm_fbdev_client_setup(struct drm_device *dev,
+ const struct drm_format_info *format)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DRM_CLIENT_LOG
+void drm_log_register(struct drm_device *dev);
+#else
+static inline void drm_log_register(struct drm_device *dev) {}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index c14221ca5a0d..e17265039ca8 100644
--- a/drivers/gpu/drm/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -1,11 +1,18 @@
// SPDX-License-Identifier: MIT
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_device.h>
-#include <drm/drm_fbdev_client.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
+#include "drm_client_internal.h"
+
+static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT;
+module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444);
+MODULE_PARM_DESC(active,
+ "Choose which drm client to start, default is"
+ CONFIG_DRM_CLIENT_DEFAULT "]");
+
/**
* drm_client_setup() - Setup in-kernel DRM clients
* @dev: DRM device
@@ -24,11 +31,26 @@
*/
void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
{
- int ret;
- ret = drm_fbdev_client_setup(dev, format);
- if (ret)
- drm_warn(dev, "Failed to set up DRM client; error %d\n", ret);
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (!strcmp(drm_client_default, "fbdev")) {
+ int ret;
+
+ ret = drm_fbdev_client_setup(dev, format);
+ if (ret)
+ drm_warn(dev, "Failed to set up DRM client; error %d\n", ret);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_DRM_CLIENT_LOG
+ if (!strcmp(drm_client_default, "log")) {
+ drm_log_register(dev);
+ return;
+ }
+#endif
+ if (strcmp(drm_client_default, ""))
+ drm_warn(dev, "Unknown DRM client %s\n", drm_client_default);
}
EXPORT_SYMBOL(drm_client_setup);
diff --git a/drivers/gpu/drm/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c
index 246fb63ab250..f894ba52bdb5 100644
--- a/drivers/gpu/drm/drm_fbdev_client.c
+++ b/drivers/gpu/drm/clients/drm_fbdev_client.c
@@ -3,11 +3,12 @@
#include <drm/drm_client.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_client.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
+#include "drm_client_internal.h"
+
/*
* struct drm_client_funcs
*/
@@ -164,4 +165,3 @@ err_drm_client_init:
kfree(fb_helper);
return ret;
}
-EXPORT_SYMBOL(drm_fbdev_client_setup);
diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c
new file mode 100644
index 000000000000..379850c83e51
--- /dev/null
+++ b/drivers/gpu/drm/clients/drm_log.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/*
+ * Copyright (c) 2024 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ */
+
+#include <linux/console.h>
+#include <linux/font.h>
+#include <linux/init.h>
+#include <linux/iosys-map.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <drm/drm_client.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
+
+#include "drm_client_internal.h"
+#include "drm_draw_internal.h"
+#include "drm_internal.h"
+
+MODULE_AUTHOR("Jocelyn Falempe");
+MODULE_DESCRIPTION("DRM boot logger");
+MODULE_LICENSE("GPL");
+
+static unsigned int scale = 1;
+module_param(scale, uint, 0444);
+MODULE_PARM_DESC(scale, "Integer scaling factor for drm_log, default is 1");
+
+/**
+ * DOC: overview
+ *
+ * This is a simple graphic logger, to print the kernel message on screen, until
+ * a userspace application is able to take over.
+ * It is only for debugging purpose.
+ */
+
+struct drm_log_scanout {
+ struct drm_client_buffer *buffer;
+ const struct font_desc *font;
+ u32 rows;
+ u32 columns;
+ u32 scaled_font_h;
+ u32 scaled_font_w;
+ u32 line;
+ u32 format;
+ u32 px_width;
+ u32 front_color;
+ u32 prefix_color;
+};
+
+struct drm_log {
+ struct mutex lock;
+ struct drm_client_dev client;
+ struct console con;
+ bool probed;
+ u32 n_scanout;
+ struct drm_log_scanout *scanout;
+};
+
+static struct drm_log *client_to_drm_log(struct drm_client_dev *client)
+{
+ return container_of(client, struct drm_log, client);
+}
+
+static struct drm_log *console_to_drm_log(struct console *con)
+{
+ return container_of(con, struct drm_log, con);
+}
+
+static void drm_log_blit(struct iosys_map *dst, unsigned int dst_pitch,
+ const u8 *src, unsigned int src_pitch,
+ u32 height, u32 width, u32 px_width, u32 color)
+{
+ switch (px_width) {
+ case 2:
+ drm_draw_blit16(dst, dst_pitch, src, src_pitch, height, width, scale, color);
+ break;
+ case 3:
+ drm_draw_blit24(dst, dst_pitch, src, src_pitch, height, width, scale, color);
+ break;
+ case 4:
+ drm_draw_blit32(dst, dst_pitch, src, src_pitch, height, width, scale, color);
+ break;
+ default:
+ WARN_ONCE(1, "Can't blit with pixel width %d\n", px_width);
+ }
+}
+
+static void drm_log_clear_line(struct drm_log_scanout *scanout, u32 line)
+{
+ struct drm_framebuffer *fb = scanout->buffer->fb;
+ unsigned long height = scanout->scaled_font_h;
+ struct iosys_map map;
+ struct drm_rect r = DRM_RECT_INIT(0, line * height, fb->width, height);
+
+ if (drm_client_buffer_vmap_local(scanout->buffer, &map))
+ return;
+ iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]);
+ drm_client_buffer_vunmap_local(scanout->buffer);
+ drm_client_framebuffer_flush(scanout->buffer, &r);
+}
+
+static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s,
+ unsigned int len, unsigned int prefix_len)
+{
+ struct drm_framebuffer *fb = scanout->buffer->fb;
+ struct iosys_map map;
+ const struct font_desc *font = scanout->font;
+ size_t font_pitch = DIV_ROUND_UP(font->width, 8);
+ const u8 *src;
+ u32 px_width = fb->format->cpp[0];
+ struct drm_rect r = DRM_RECT_INIT(0, scanout->line * scanout->scaled_font_h,
+ fb->width, (scanout->line + 1) * scanout->scaled_font_h);
+ u32 i;
+
+ if (drm_client_buffer_vmap_local(scanout->buffer, &map))
+ return;
+
+ iosys_map_incr(&map, r.y1 * fb->pitches[0]);
+ for (i = 0; i < len && i < scanout->columns; i++) {
+ u32 color = (i < prefix_len) ? scanout->prefix_color : scanout->front_color;
+ src = drm_draw_get_char_bitmap(font, s[i], font_pitch);
+ drm_log_blit(&map, fb->pitches[0], src, font_pitch,
+ scanout->scaled_font_h, scanout->scaled_font_w,
+ px_width, color);
+ iosys_map_incr(&map, scanout->scaled_font_w * px_width);
+ }
+
+ scanout->line++;
+ if (scanout->line >= scanout->rows)
+ scanout->line = 0;
+ drm_client_buffer_vunmap_local(scanout->buffer);
+ drm_client_framebuffer_flush(scanout->buffer, &r);
+}
+
+static void drm_log_draw_new_line(struct drm_log_scanout *scanout,
+ const char *s, unsigned int len, unsigned int prefix_len)
+{
+ if (scanout->line == 0) {
+ drm_log_clear_line(scanout, 0);
+ drm_log_clear_line(scanout, 1);
+ drm_log_clear_line(scanout, 2);
+ } else if (scanout->line + 2 < scanout->rows)
+ drm_log_clear_line(scanout, scanout->line + 2);
+
+ drm_log_draw_line(scanout, s, len, prefix_len);
+}
+
+/*
+ * Depends on print_time() in printk.c
+ * Timestamp is written with "[%5lu.%06lu]"
+ */
+#define TS_PREFIX_LEN 13
+
+static void drm_log_draw_kmsg_record(struct drm_log_scanout *scanout,
+ const char *s, unsigned int len)
+{
+ u32 prefix_len = 0;
+
+ if (len > TS_PREFIX_LEN && s[0] == '[' && s[6] == '.' && s[TS_PREFIX_LEN] == ']')
+ prefix_len = TS_PREFIX_LEN + 1;
+
+ /* do not print the ending \n character */
+ if (s[len - 1] == '\n')
+ len--;
+
+ while (len > scanout->columns) {
+ drm_log_draw_new_line(scanout, s, scanout->columns, prefix_len);
+ s += scanout->columns;
+ len -= scanout->columns;
+ prefix_len = 0;
+ }
+ if (len)
+ drm_log_draw_new_line(scanout, s, len, prefix_len);
+}
+
+static u32 drm_log_find_usable_format(struct drm_plane *plane)
+{
+ int i;
+
+ for (i = 0; i < plane->format_count; i++)
+ if (drm_draw_color_from_xrgb8888(0xffffff, plane->format_types[i]) != 0)
+ return plane->format_types[i];
+ return DRM_FORMAT_INVALID;
+}
+
+static int drm_log_setup_modeset(struct drm_client_dev *client,
+ struct drm_mode_set *mode_set,
+ struct drm_log_scanout *scanout)
+{
+ struct drm_crtc *crtc = mode_set->crtc;
+ u32 width = mode_set->mode->hdisplay;
+ u32 height = mode_set->mode->vdisplay;
+ u32 format;
+
+ scanout->font = get_default_font(width, height, NULL, NULL);
+ if (!scanout->font)
+ return -ENOENT;
+
+ format = drm_log_find_usable_format(crtc->primary);
+ if (format == DRM_FORMAT_INVALID)
+ return -EINVAL;
+
+ scanout->buffer = drm_client_framebuffer_create(client, width, height, format);
+ if (IS_ERR(scanout->buffer)) {
+ drm_warn(client->dev, "drm_log can't create framebuffer %d %d %p4cc\n",
+ width, height, &format);
+ return -ENOMEM;
+ }
+ mode_set->fb = scanout->buffer->fb;
+ scanout->scaled_font_h = scanout->font->height * scale;
+ scanout->scaled_font_w = scanout->font->width * scale;
+ scanout->rows = height / scanout->scaled_font_h;
+ scanout->columns = width / scanout->scaled_font_w;
+ scanout->front_color = drm_draw_color_from_xrgb8888(0xffffff, format);
+ scanout->prefix_color = drm_draw_color_from_xrgb8888(0x4e9a06, format);
+ return 0;
+}
+
+static int drm_log_count_modeset(struct drm_client_dev *client)
+{
+ struct drm_mode_set *mode_set;
+ int count = 0;
+
+ mutex_lock(&client->modeset_mutex);
+ drm_client_for_each_modeset(mode_set, client)
+ count++;
+ mutex_unlock(&client->modeset_mutex);
+ return count;
+}
+
+static void drm_log_init_client(struct drm_log *dlog)
+{
+ struct drm_client_dev *client = &dlog->client;
+ struct drm_mode_set *mode_set;
+ int i, max_modeset;
+ int n_modeset = 0;
+
+ dlog->probed = true;
+
+ if (drm_client_modeset_probe(client, 0, 0))
+ return;
+
+ max_modeset = drm_log_count_modeset(client);
+ if (!max_modeset)
+ return;
+
+ dlog->scanout = kcalloc(max_modeset, sizeof(*dlog->scanout), GFP_KERNEL);
+ if (!dlog->scanout)
+ return;
+
+ mutex_lock(&client->modeset_mutex);
+ drm_client_for_each_modeset(mode_set, client) {
+ if (!mode_set->mode)
+ continue;
+ if (drm_log_setup_modeset(client, mode_set, &dlog->scanout[n_modeset]))
+ continue;
+ n_modeset++;
+ }
+ mutex_unlock(&client->modeset_mutex);
+ if (n_modeset == 0)
+ goto err_nomodeset;
+
+ if (drm_client_modeset_commit(client))
+ goto err_failed_commit;
+
+ dlog->n_scanout = n_modeset;
+ return;
+
+err_failed_commit:
+ for (i = 0; i < n_modeset; i++)
+ drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+
+err_nomodeset:
+ kfree(dlog->scanout);
+ dlog->scanout = NULL;
+}
+
+static void drm_log_free_scanout(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+ int i;
+
+ if (dlog->n_scanout) {
+ for (i = 0; i < dlog->n_scanout; i++)
+ drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+ dlog->n_scanout = 0;
+ kfree(dlog->scanout);
+ dlog->scanout = NULL;
+ }
+}
+
+static void drm_log_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+ struct drm_device *dev = client->dev;
+
+ unregister_console(&dlog->con);
+
+ mutex_lock(&dlog->lock);
+ drm_log_free_scanout(client);
+ drm_client_release(client);
+ mutex_unlock(&dlog->lock);
+ kfree(dlog);
+ drm_dbg(dev, "Unregistered with drm log\n");
+}
+
+static int drm_log_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
+ mutex_lock(&dlog->lock);
+ drm_log_free_scanout(client);
+ dlog->probed = false;
+ mutex_unlock(&dlog->lock);
+ return 0;
+}
+
+static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
+ console_stop(&dlog->con);
+
+ return 0;
+}
+
+static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
+ console_start(&dlog->con);
+
+ return 0;
+}
+
+static const struct drm_client_funcs drm_log_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_log_client_unregister,
+ .hotplug = drm_log_client_hotplug,
+ .suspend = drm_log_client_suspend,
+ .resume = drm_log_client_resume,
+};
+
+static void drm_log_write_thread(struct console *con, struct nbcon_write_context *wctxt)
+{
+ struct drm_log *dlog = console_to_drm_log(con);
+ int i;
+
+ if (!dlog->probed)
+ drm_log_init_client(dlog);
+
+ /* Check that we are still the master before drawing */
+ if (drm_master_internal_acquire(dlog->client.dev)) {
+ drm_master_internal_release(dlog->client.dev);
+
+ for (i = 0; i < dlog->n_scanout; i++)
+ drm_log_draw_kmsg_record(&dlog->scanout[i], wctxt->outbuf, wctxt->len);
+ }
+}
+
+static void drm_log_lock(struct console *con, unsigned long *flags)
+{
+ struct drm_log *dlog = console_to_drm_log(con);
+
+ mutex_lock(&dlog->lock);
+ migrate_disable();
+}
+
+static void drm_log_unlock(struct console *con, unsigned long flags)
+{
+ struct drm_log *dlog = console_to_drm_log(con);
+
+ migrate_enable();
+ mutex_unlock(&dlog->lock);
+}
+
+static void drm_log_register_console(struct console *con)
+{
+ strscpy(con->name, "drm_log");
+ con->write_thread = drm_log_write_thread;
+ con->device_lock = drm_log_lock;
+ con->device_unlock = drm_log_unlock;
+ con->flags = CON_PRINTBUFFER | CON_NBCON;
+ con->index = -1;
+
+ register_console(con);
+}
+
+/**
+ * drm_log_register() - Register a drm device to drm_log
+ * @dev: the drm device to register.
+ */
+void drm_log_register(struct drm_device *dev)
+{
+ struct drm_log *new;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto err_warn;
+
+ mutex_init(&new->lock);
+ if (drm_client_init(dev, &new->client, "drm_log", &drm_log_client_funcs))
+ goto err_free;
+
+ drm_client_register(&new->client);
+
+ drm_log_register_console(&new->con);
+
+ drm_dbg(dev, "Registered with drm log as %s\n", new->con.name);
+ return;
+
+err_free:
+ kfree(new);
+err_warn:
+ drm_warn(dev, "Failed to register with drm log\n");
+}
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 6a4e892afcf8..8d22b7627d41 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -15,6 +15,7 @@ if DRM_DISPLAY_HELPER
config DRM_BRIDGE_CONNECTOR
bool
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
help
DRM connector implementation terminating DRM bridge chains.
@@ -75,6 +76,12 @@ config DRM_DISPLAY_HDCP_HELPER
help
DRM display helpers for HDCP.
+config DRM_DISPLAY_HDMI_AUDIO_HELPER
+ bool
+ help
+ DRM display helpers for HDMI Audio functionality (generic HDMI Codec
+ implementation).
+
config DRM_DISPLAY_HDMI_HELPER
bool
help
@@ -82,6 +89,7 @@ config DRM_DISPLAY_HDMI_HELPER
config DRM_DISPLAY_HDMI_STATE_HELPER
bool
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select DRM_DISPLAY_HDMI_HELPER
help
DRM KMS state helpers for HDMI.
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index 629c834c3192..b17879b957d5 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -14,6 +14,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \
drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_AUDIO_HELPER) += \
+ drm_hdmi_audio_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 320c297008aa..56f977bbe62d 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -17,7 +17,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
/**
@@ -179,11 +182,15 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
struct drm_bridge *detect = bridge_connector->bridge_detect;
+ struct drm_bridge *hdmi = bridge_connector->bridge_hdmi;
enum drm_connector_status status;
if (detect) {
status = detect->funcs->detect(detect);
+ if (hdmi)
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
+
drm_bridge_connector_hpd_notify(connector, status);
} else {
switch (connector->connector_type) {
@@ -202,6 +209,16 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
return status;
}
+static void drm_bridge_connector_force(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hdmi = bridge_connector->bridge_hdmi;
+
+ if (hdmi)
+ drm_atomic_helper_connector_hdmi_force(connector);
+}
+
static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
struct dentry *root)
{
@@ -230,6 +247,7 @@ static void drm_bridge_connector_reset(struct drm_connector *connector)
static const struct drm_connector_funcs drm_bridge_connector_funcs = {
.reset = drm_bridge_connector_reset,
.detect = drm_bridge_connector_detect,
+ .force = drm_bridge_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -276,6 +294,14 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
struct drm_bridge *bridge;
/*
+ * If there is a HDMI bridge, EDID has been updated as a part of
+ * the .detect(). Just update the modes here.
+ */
+ bridge = bridge_connector->bridge_hdmi;
+ if (bridge)
+ return drm_edid_connector_add_modes(connector);
+
+ /*
* If display exposes EDID, then we parse that in the normal way to
* build table of supported modes.
*/
@@ -299,11 +325,37 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
return 0;
}
+static enum drm_mode_status
+drm_bridge_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hdmi)
+ return drm_hdmi_connector_mode_valid(connector, mode);
+
+ return MODE_OK;
+}
+
+static int drm_bridge_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hdmi)
+ return drm_atomic_helper_connector_hdmi_check(connector, state);
+
+ return 0;
+}
+
static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
.get_modes = drm_bridge_connector_get_modes,
- /* No need for .mode_valid(), the bridges are checked by the core. */
+ .mode_valid = drm_bridge_connector_mode_valid,
.enable_hpd = drm_bridge_connector_enable_hpd,
.disable_hpd = drm_bridge_connector_disable_hpd,
+ .atomic_check = drm_bridge_connector_atomic_check,
};
static enum drm_mode_status
@@ -354,10 +406,94 @@ static int drm_bridge_connector_write_infoframe(struct drm_connector *connector,
return bridge->funcs->hdmi_write_infoframe(bridge, type, buffer, len);
}
+static const struct drm_edid *
+drm_bridge_connector_read_edid(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_edid;
+ if (!bridge)
+ return NULL;
+
+ return drm_bridge_edid_read(bridge, connector);
+}
+
static const struct drm_connector_hdmi_funcs drm_bridge_connector_hdmi_funcs = {
.tmds_char_rate_valid = drm_bridge_connector_tmds_char_rate_valid,
.clear_infoframe = drm_bridge_connector_clear_infoframe,
.write_infoframe = drm_bridge_connector_write_infoframe,
+ .read_edid = drm_bridge_connector_read_edid,
+};
+
+static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(connector, bridge);
+}
+
+static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+}
+
+static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return;
+
+ bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+}
+
+static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
+ bool enable, int direction)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ if (bridge->funcs->hdmi_audio_mute_stream)
+ return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
+ enable, direction);
+ else
+ return -ENOTSUPP;
+}
+
+static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
+ .startup = drm_bridge_connector_audio_startup,
+ .prepare = drm_bridge_connector_audio_prepare,
+ .shutdown = drm_bridge_connector_audio_shutdown,
+ .mute_stream = drm_bridge_connector_audio_mute_stream,
};
/* -----------------------------------------------------------------------------
@@ -459,7 +595,12 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return ERR_PTR(-EINVAL);
- if (bridge_connector->bridge_hdmi)
+ if (bridge_connector->bridge_hdmi) {
+ if (!connector->ycbcr_420_allowed)
+ supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
+
+ bridge = bridge_connector->bridge_hdmi;
+
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
bridge_connector->bridge_hdmi->product,
@@ -468,12 +609,31 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc,
supported_formats,
max_bpc);
- else
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (bridge->hdmi_audio_max_i2s_playback_channels ||
+ bridge->hdmi_audio_spdif_playback) {
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ ret = drm_connector_hdmi_audio_init(connector,
+ bridge->hdmi_audio_dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ } else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
connector_type, ddc);
- if (ret)
- return ERR_PTR(ret);
+ if (ret)
+ return ERR_PTR(ret);
+ }
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 007ceb281d00..56a4965e518c 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
if (!aux->transfer)
return;
-#ifndef CONFIG_MEDIA_CEC_RC
- /*
- * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
- * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
- *
- * Do this here as well to ensure the tests against cec_caps are
- * correct.
- */
- cec_caps &= ~CEC_CAP_RC;
-#endif
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
@@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
num_las = CEC_MAX_LOG_ADDRS;
if (aux->cec.adap) {
- if (aux->cec.adap->capabilities == cec_caps &&
+ /* Check if the adapter properties have changed */
+ if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
+ (cec_caps & CEC_CAP_MONITOR_ALL) &&
aux->cec.adap->available_log_addrs == num_las) {
/* Unchanged, so just set the phys addr */
cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 6ee51003de3c..61c7c2c588c6 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -22,15 +22,16 @@
#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/dynamic_debug.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
-#include <linux/dynamic_debug.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -779,6 +780,128 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+static int read_payload_update_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+/**
+ * drm_dp_dpcd_write_payload() - Write Virtual Channel information to payload table
+ * @aux: DisplayPort AUX channel
+ * @vcpid: Virtual Channel Payload ID
+ * @start_time_slot: Starting time slot
+ * @time_slot_count: Time slot count
+ *
+ * Write the Virtual Channel payload allocation table, checking the payload
+ * update status and retrying as necessary.
+ *
+ * Returns:
+ * 0 on success, negative error otherwise
+ */
+int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
+ int vcpid, u8 start_time_slot, u8 time_slot_count)
+{
+ u8 payload_alloc[3], status;
+ int ret;
+ int retries = 0;
+
+ drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
+
+ payload_alloc[0] = vcpid;
+ payload_alloc[1] = start_time_slot;
+ payload_alloc[2] = time_slot_count;
+
+ ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret != 3) {
+ drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
+ goto fail;
+ }
+
+retry:
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
+ retries++;
+ if (retries < 20) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ drm_dbg_kms(aux->drm_dev, "status not set after read payload table status %d\n",
+ status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_write_payload);
+
+/**
+ * drm_dp_dpcd_clear_payload() - Clear the entire VC Payload ID table
+ * @aux: DisplayPort AUX channel
+ *
+ * Clear the entire VC Payload ID table.
+ *
+ * Returns: 0 on success, negative error code on errors.
+ */
+int drm_dp_dpcd_clear_payload(struct drm_dp_aux *aux)
+{
+ return drm_dp_dpcd_write_payload(aux, 0, 0, 0x3f);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_clear_payload);
+
+/**
+ * drm_dp_dpcd_poll_act_handled() - Poll for ACT handled status
+ * @aux: DisplayPort AUX channel
+ * @timeout_ms: Timeout in ms
+ *
+ * Try waiting for the sink to finish updating its payload table by polling for
+ * the ACT handled bit of DP_PAYLOAD_TABLE_UPDATE_STATUS for up to @timeout_ms
+ * milliseconds, defaulting to 3000 ms if 0.
+ *
+ * Returns:
+ * 0 if the ACT was handled in time, negative error code on failure.
+ */
+int drm_dp_dpcd_poll_act_handled(struct drm_dp_aux *aux, int timeout_ms)
+{
+ int ret, status;
+
+ /* default to 3 seconds, this is arbitrary */
+ timeout_ms = timeout_ms ?: 3000;
+
+ ret = readx_poll_timeout(read_payload_update_status, aux, status,
+ status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+ 200, timeout_ms * USEC_PER_MSEC);
+ if (ret < 0 && status >= 0) {
+ drm_err(aux->drm_dev, "Failed to get ACT after %d ms, last status: %02x\n",
+ timeout_ms, status);
+ return -EINVAL;
+ } else if (status < 0) {
+ /*
+ * Failure here isn't unexpected - the hub may have
+ * just been unplugged
+ */
+ drm_dbg_kms(aux->drm_dev, "Failed to read payload table status: %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_poll_act_handled);
+
static bool is_edid_digital_input_dp(const struct drm_edid *drm_edid)
{
/* FIXME: get rid of drm_edid_raw() */
@@ -2421,7 +2544,7 @@ u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
- switch (bpp_increment_dpcd) {
+ switch (bpp_increment_dpcd & DP_DSC_BITS_PER_PIXEL_MASK) {
case DP_DSC_BITS_PER_PIXEL_1_16:
return 16;
case DP_DSC_BITS_PER_PIXEL_1_8:
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index dc4446d589e7..6d09bef671da 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -29,7 +29,6 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/iopoll.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stacktrace.h>
@@ -68,9 +67,6 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots);
-
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
@@ -2285,7 +2281,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
port->cached_edid = drm_edid_read_ddc(port->connector,
&port->aux.ddc);
- drm_connector_register(port->connector);
+ drm_connector_dynamic_register(port->connector);
return;
error:
@@ -3267,7 +3263,7 @@ EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload)
{
- return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
+ return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot,
payload->time_slots);
}
@@ -3298,7 +3294,7 @@ static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_
}
if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP)
- drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
+ drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0);
}
/**
@@ -3576,8 +3572,7 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
}
/**
- * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
- * @mgr: The &drm_dp_mst_topology_mgr to use
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link
* @link_rate: link rate in 10kbits/s units
* @link_lane_count: lane count
*
@@ -3588,17 +3583,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
*
* Returns the BW / timeslot value in 20.12 fixed point format.
*/
-fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count)
+fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
int ch_coding_efficiency =
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
fixed20_12 ret;
- if (link_rate == 0 || link_lane_count == 0)
- drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
- link_rate, link_lane_count);
-
/* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
ch_coding_efficiency),
@@ -3686,7 +3676,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
/* Write reset payload */
- drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
+ drm_dp_dpcd_clear_payload(mgr->aux);
drm_dp_mst_queue_probe_work(mgr);
@@ -4035,6 +4025,22 @@ out:
return 0;
}
+static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
+{
+ bool probing_done = false;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
+ probing_done = mgr->mst_primary->link_address_sent;
+ drm_dp_mst_topology_put_mstb(mgr->mst_primary);
+ }
+
+ mutex_unlock(&mgr->lock);
+
+ return probing_done;
+}
+
static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_pending_up_req *up_req)
@@ -4065,8 +4071,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
- dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
- hotplug = true;
+ if (!primary_mstb_probing_is_done(mgr)) {
+ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
+ } else {
+ dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
}
drm_dp_mst_topology_put_mstb(mstb);
@@ -4148,10 +4158,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
+ drm_dp_mst_topology_put_mstb(mst_primary);
+
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
- bool handle_csn;
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
conn_stat->port_number,
@@ -4160,16 +4171,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->message_capability_status,
conn_stat->input_port,
conn_stat->peer_device_type);
-
- mutex_lock(&mgr->probe_lock);
- handle_csn = mst_primary->link_address_sent;
- mutex_unlock(&mgr->probe_lock);
-
- if (!handle_csn) {
- drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
- kfree(up_req);
- goto out_put_primary;
- }
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
@@ -4184,9 +4185,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
-
-out_put_primary:
- drm_dp_mst_topology_put_mstb(mst_primary);
out_clear_reply:
reset_msg_rx_state(&mgr->up_req_recv);
return ret;
@@ -4747,61 +4745,6 @@ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_
}
EXPORT_SYMBOL(drm_dp_mst_update_slots);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots)
-{
- u8 payload_alloc[3], status;
- int ret;
- int retries = 0;
-
- drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
-
- payload_alloc[0] = id;
- payload_alloc[1] = start_slot;
- payload_alloc[2] = num_slots;
-
- ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
- drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
- goto fail;
- }
-
-retry:
- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0) {
- drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
- goto fail;
- }
-
- if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
- retries++;
- if (retries < 20) {
- usleep_range(10000, 20000);
- goto retry;
- }
- drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
- status);
- ret = -EINVAL;
- goto fail;
- }
- ret = 0;
-fail:
- return ret;
-}
-
-static int do_get_act_status(struct drm_dp_aux *aux)
-{
- int ret;
- u8 status;
-
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0)
- return ret;
-
- return status;
-}
-
/**
* drm_dp_check_act_status() - Polls for ACT handled status.
* @mgr: manager to use
@@ -4819,28 +4762,9 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
* There doesn't seem to be any recommended retry count or timeout in
* the MST specification. Since some hubs have been observed to take
* over 1 second to update their payload allocations under certain
- * conditions, we use a rather large timeout value.
+ * conditions, we use a rather large timeout value of 3 seconds.
*/
- const int timeout_ms = 3000;
- int ret, status;
-
- ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
- status & DP_PAYLOAD_ACT_HANDLED || status < 0,
- 200, timeout_ms * USEC_PER_MSEC);
- if (ret < 0 && status >= 0) {
- drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
- timeout_ms, status);
- return -EINVAL;
- } else if (status < 0) {
- /*
- * Failure here isn't unexpected - the hub may have
- * just been unplugged
- */
- drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
- return status;
- }
-
- return 0;
+ return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000);
}
EXPORT_SYMBOL(drm_dp_check_act_status);
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
new file mode 100644
index 000000000000..05afc9f0bdd6
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
+
+#include <sound/hdmi-codec.h>
+
+static int drm_connector_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ if (funcs->startup)
+ return funcs->startup(connector);
+
+ return 0;
+}
+
+static int drm_connector_hdmi_audio_prepare(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ return funcs->prepare(connector, fmt, hparms);
+}
+
+static void drm_connector_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ return funcs->shutdown(connector);
+}
+
+static int drm_connector_hdmi_audio_mute_stream(struct device *dev, void *data,
+ bool enable, int direction)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ if (funcs->mute_stream)
+ return funcs->mute_stream(connector, enable, direction);
+
+ return -ENOTSUPP;
+}
+
+static int drm_connector_hdmi_audio_get_dai_id(struct snd_soc_component *comment,
+ struct device_node *endpoint,
+ void *data)
+{
+ struct drm_connector *connector = data;
+ struct of_endpoint of_ep;
+ int ret;
+
+ if (connector->hdmi_audio.dai_port < 0)
+ return -ENOTSUPP;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ if (of_ep.port == connector->hdmi_audio.dai_port)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int drm_connector_hdmi_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct drm_connector *connector = data;
+
+ mutex_lock(&connector->eld_mutex);
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
+
+ return 0;
+}
+
+static int drm_connector_hdmi_audio_hook_plugged_cb(struct device *dev,
+ void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct drm_connector *connector = data;
+
+ mutex_lock(&connector->hdmi_audio.lock);
+
+ connector->hdmi_audio.plugged_cb = fn;
+ connector->hdmi_audio.plugged_cb_dev = codec_dev;
+
+ fn(codec_dev, connector->hdmi_audio.last_state);
+
+ mutex_unlock(&connector->hdmi_audio.lock);
+
+ return 0;
+}
+
+void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector,
+ bool plugged)
+{
+ mutex_lock(&connector->hdmi_audio.lock);
+
+ connector->hdmi_audio.last_state = plugged;
+
+ if (connector->hdmi_audio.plugged_cb &&
+ connector->hdmi_audio.plugged_cb_dev)
+ connector->hdmi_audio.plugged_cb(connector->hdmi_audio.plugged_cb_dev,
+ connector->hdmi_audio.last_state);
+
+ mutex_unlock(&connector->hdmi_audio.lock);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_audio_plugged_notify);
+
+static const struct hdmi_codec_ops drm_connector_hdmi_audio_ops = {
+ .audio_startup = drm_connector_hdmi_audio_startup,
+ .prepare = drm_connector_hdmi_audio_prepare,
+ .audio_shutdown = drm_connector_hdmi_audio_shutdown,
+ .mute_stream = drm_connector_hdmi_audio_mute_stream,
+ .get_eld = drm_connector_hdmi_audio_get_eld,
+ .get_dai_id = drm_connector_hdmi_audio_get_dai_id,
+ .hook_plugged_cb = drm_connector_hdmi_audio_hook_plugged_cb,
+};
+
+/**
+ * drm_connector_hdmi_audio_init - Initialize HDMI Codec device for the DRM connector
+ * @connector: A pointer to the connector to allocate codec for
+ * @hdmi_codec_dev: device to be used as a parent for the HDMI Codec
+ * @funcs: callbacks for this HDMI Codec
+ * @max_i2s_playback_channels: maximum number of playback I2S channels
+ * @spdif_playback: set if HDMI codec has S/PDIF playback port
+ * @dai_port: sound DAI port, -1 if it is not enabled
+ *
+ * Create a HDMI codec device to be used with the specified connector.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_hdmi_audio_init(struct drm_connector *connector,
+ struct device *hdmi_codec_dev,
+ const struct drm_connector_hdmi_audio_funcs *funcs,
+ unsigned int max_i2s_playback_channels,
+ bool spdif_playback,
+ int dai_port)
+{
+ struct hdmi_codec_pdata codec_pdata = {
+ .ops = &drm_connector_hdmi_audio_ops,
+ .max_i2s_channels = max_i2s_playback_channels,
+ .i2s = !!max_i2s_playback_channels,
+ .spdif = spdif_playback,
+ .no_i2s_capture = true,
+ .no_spdif_capture = true,
+ .data = connector,
+ };
+ struct platform_device *pdev;
+
+ if (!funcs ||
+ !funcs->prepare ||
+ !funcs->shutdown)
+ return -EINVAL;
+
+ connector->hdmi_audio.funcs = funcs;
+ connector->hdmi_audio.dai_port = dai_port;
+
+ pdev = platform_device_register_data(hdmi_codec_dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_pdata, sizeof(codec_pdata));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ connector->hdmi_audio.codec_pdev = pdev;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_hdmi_audio_init);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index feb7a3a75981..9b2ee2385634 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -5,6 +5,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -347,6 +348,8 @@ static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
is_limited_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL;
int ret;
+ infoframe->set = false;
+
ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, mode);
if (ret)
return ret;
@@ -376,6 +379,8 @@ static int hdmi_generate_spd_infoframe(const struct drm_connector *connector,
&infoframe->data.spd;
int ret;
+ infoframe->set = false;
+
ret = hdmi_spd_infoframe_init(frame,
connector->hdmi.vendor,
connector->hdmi.product);
@@ -398,6 +403,8 @@ static int hdmi_generate_hdr_infoframe(const struct drm_connector *connector,
&infoframe->data.drm;
int ret;
+ infoframe->set = false;
+
if (connector->max_bpc < 10)
return 0;
@@ -425,6 +432,8 @@ static int hdmi_generate_hdmi_vendor_infoframe(const struct drm_connector *conne
&infoframe->data.vendor.hdmi;
int ret;
+ infoframe->set = false;
+
if (!info->has_hdmi_infoframe)
return 0;
@@ -494,6 +503,9 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
connector_state_get_mode(new_conn_state);
int ret;
+ if (!new_conn_state->crtc || !new_conn_state->best_encoder)
+ return 0;
+
new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
ret = hdmi_compute_config(connector, new_conn_state, mode);
@@ -521,6 +533,27 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
+/**
+ * drm_hdmi_connector_mode_valid() - Check if mode is valid for HDMI connector
+ * @connector: DRM connector to validate the mode
+ * @mode: Display mode to validate
+ *
+ * Generic .mode_valid implementation for HDMI connectors.
+ */
+enum drm_mode_status
+drm_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ unsigned long long clock;
+
+ clock = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
+ if (!clock)
+ return MODE_ERROR;
+
+ return hdmi_clock_valid(connector, mode, clock);
+}
+EXPORT_SYMBOL(drm_hdmi_connector_mode_valid);
+
static int clear_device_infoframe(struct drm_connector *connector,
enum hdmi_infoframe_type type)
{
@@ -748,3 +781,61 @@ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(struct drm_connector *con
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_clear_audio_infoframe);
+
+static void
+drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ const struct drm_edid *drm_edid;
+
+ if (status == connector_status_disconnected) {
+ // TODO: also handle CEC and scramber, HDMI sink disconnected.
+ drm_connector_hdmi_audio_plugged_notify(connector, false);
+ drm_edid_connector_update(connector, NULL);
+ return;
+ }
+
+ if (connector->hdmi.funcs->read_edid)
+ drm_edid = connector->hdmi.funcs->read_edid(connector);
+ else
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ drm_edid_free(drm_edid);
+
+ if (status == connector_status_connected) {
+ // TODO: also handle CEC and scramber, HDMI sink is now connected.
+ drm_connector_hdmi_audio_plugged_notify(connector, true);
+ }
+}
+
+/**
+ * drm_atomic_helper_connector_hdmi_hotplug - Handle the hotplug event for the HDMI connector
+ * @connector: A pointer to the HDMI connector
+ * @status: Connection status
+ *
+ * This function should be called as a part of the .detect() / .detect_ctx()
+ * callbacks, updating the HDMI-specific connector's data.
+ */
+void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ drm_atomic_helper_connector_hdmi_update(connector, status);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_hotplug);
+
+/**
+ * drm_atomic_helper_connector_hdmi_force - HDMI Connector implementation of the force callback
+ * @connector: A pointer to the HDMI connector
+ *
+ * This function implements the .force() callback for the HDMI connectors. It
+ * can either be used directly as the callback or should be called from within
+ * the .force() callback implementation to maintain the HDMI-specific
+ * connector's data.
+ */
+void drm_atomic_helper_connector_hdmi_force(struct drm_connector *connector)
+{
+ drm_atomic_helper_connector_hdmi_update(connector, connector->status);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_force);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 370dc676e3aa..fd36b8fd54e9 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -956,6 +956,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
+
+ if (connector->dpms == mode)
+ goto out;
+
connector->dpms = mode;
crtc = connector->state->crtc;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c6af46dd02bf..241a384ebce3 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -207,6 +207,10 @@ void drm_bridge_add(struct drm_bridge *bridge)
{
mutex_init(&bridge->hpd_mutex);
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
+ BIT(HDMI_COLORSPACE_YUV420));
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 251f94313717..aca442c25209 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -743,6 +743,15 @@ retry:
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
+ for (i = 0; i < count; i++) {
+ struct drm_connector *connector = connectors[i];
+
+ if (connector->has_tile)
+ drm_client_get_tile_offsets(dev, connectors, connector_count,
+ modes, offsets, i,
+ connector->tile_h_loc, connector->tile_v_loc);
+ }
+
/*
* If the BIOS didn't enable everything it could, fall back to have the
* same user experiencing of lighting up as much as possible like the
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index fc35f47e2849..48b08c9611a7 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,6 +33,7 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/uaccess.h>
@@ -218,11 +219,11 @@ void drm_connector_free_work_fn(struct work_struct *work)
}
}
-static int __drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type,
- struct i2c_adapter *ddc)
+static int drm_connector_init_only(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -273,12 +274,15 @@ static int __drm_connector_init(struct drm_device *dev,
/* provide ddc symlink in sysfs */
connector->ddc = ddc;
+ INIT_LIST_HEAD(&connector->head);
INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
+ mutex_init(&connector->eld_mutex);
mutex_init(&connector->edid_override_mutex);
mutex_init(&connector->hdmi.infoframes.lock);
+ mutex_init(&connector->hdmi_audio.lock);
connector->edid_blob_ptr = NULL;
connector->epoch_counter = 0;
connector->tile_blob_ptr = NULL;
@@ -288,14 +292,6 @@ static int __drm_connector_init(struct drm_device *dev,
drm_connector_get_cmdline_mode(connector);
- /* We should add connectors at the end to avoid upsetting the connector
- * index too much.
- */
- spin_lock_irq(&config->connector_list_lock);
- list_add_tail(&connector->head, &config->connector_list);
- config->num_connector++;
- spin_unlock_irq(&config->connector_list_lock);
-
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_connector_attach_edid_property(connector);
@@ -332,6 +328,54 @@ out_put:
return ret;
}
+static void drm_connector_add(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (drm_WARN_ON(dev, !list_empty(&connector->head)))
+ return;
+
+ spin_lock_irq(&config->connector_list_lock);
+ list_add_tail(&connector->head, &config->connector_list);
+ config->num_connector++;
+ spin_unlock_irq(&config->connector_list_lock);
+}
+
+static void drm_connector_remove(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ /*
+ * For dynamic connectors drm_connector_cleanup() can call this function
+ * before the connector is registered and added to the list.
+ */
+ if (list_empty(&connector->head))
+ return;
+
+ spin_lock_irq(&dev->mode_config.connector_list_lock);
+ list_del_init(&connector->head);
+ dev->mode_config.num_connector--;
+ spin_unlock_irq(&dev->mode_config.connector_list_lock);
+}
+
+static int drm_connector_init_and_add(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
+ int ret;
+
+ ret = drm_connector_init_only(dev, connector, funcs, connector_type, ddc);
+ if (ret)
+ return ret;
+
+ drm_connector_add(connector);
+
+ return 0;
+}
+
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
@@ -361,11 +405,52 @@ int drm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
- return __drm_connector_init(dev, connector, funcs, connector_type, NULL);
+ return drm_connector_init_and_add(dev, connector, funcs, connector_type, NULL);
}
EXPORT_SYMBOL(drm_connector_init);
/**
+ * drm_connector_dynamic_init - Init a preallocated dynamic connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ * @ddc: pointer to the associated ddc adapter
+ *
+ * Initialises a preallocated dynamic connector. Connectors should be
+ * subclassed as part of driver connector objects. The connector
+ * structure should not be allocated with devm_kzalloc().
+ *
+ * Drivers should call this for dynamic connectors which can be hotplugged
+ * after drm_dev_register() has been called already, e.g. DP MST connectors.
+ * For all other - static - connectors, drivers should call one of the
+ * drm_connector_init*()/drmm_connector_init*() functions.
+ *
+ * After calling this function the drivers must call
+ * drm_connector_dynamic_register().
+ *
+ * To remove the connector the driver must call drm_connector_unregister()
+ * followed by drm_connector_put(). Putting the last reference will call the
+ * driver's &drm_connector_funcs.destroy hook, which in turn must call
+ * drm_connector_cleanup() and free the connector structure.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_dynamic_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
+ if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
+ return -EINVAL;
+
+ return drm_connector_init_only(dev, connector, funcs, connector_type, ddc);
+}
+EXPORT_SYMBOL(drm_connector_dynamic_init);
+
+/**
* drm_connector_init_with_ddc - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
@@ -398,7 +483,7 @@ int drm_connector_init_with_ddc(struct drm_device *dev,
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
- return __drm_connector_init(dev, connector, funcs, connector_type, ddc);
+ return drm_connector_init_and_add(dev, connector, funcs, connector_type, ddc);
}
EXPORT_SYMBOL(drm_connector_init_with_ddc);
@@ -442,7 +527,7 @@ int drmm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL;
- ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
+ ret = drm_connector_init_and_add(dev, connector, funcs, connector_type, ddc);
if (ret)
return ret;
@@ -507,6 +592,9 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
if (!supported_formats || !(supported_formats & BIT(HDMI_COLORSPACE_RGB)))
return -EINVAL;
+ if (connector->ycbcr_420_allowed != !!(supported_formats & BIT(HDMI_COLORSPACE_YUV420)))
+ return -EINVAL;
+
if (!(max_bpc == 8 || max_bpc == 10 || max_bpc == 12))
return -EINVAL;
@@ -631,6 +719,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
DRM_CONNECTOR_REGISTERED))
drm_connector_unregister(connector);
+ platform_device_unregister(connector->hdmi_audio.codec_pdev);
+
if (connector->privacy_screen) {
drm_privacy_screen_put(connector->privacy_screen);
connector->privacy_screen = NULL;
@@ -659,16 +749,15 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->name = NULL;
fwnode_handle_put(connector->fwnode);
connector->fwnode = NULL;
- spin_lock_irq(&dev->mode_config.connector_list_lock);
- list_del(&connector->head);
- dev->mode_config.num_connector--;
- spin_unlock_irq(&dev->mode_config.connector_list_lock);
+
+ drm_connector_remove(connector);
WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
if (connector->state && connector->funcs->atomic_destroy_state)
connector->funcs->atomic_destroy_state(connector,
connector->state);
+ mutex_destroy(&connector->hdmi_audio.lock);
mutex_destroy(&connector->hdmi.infoframes.lock);
mutex_destroy(&connector->mutex);
@@ -683,14 +772,17 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* drm_connector_register - register a connector
* @connector: the connector to register
*
- * Register userspace interfaces for a connector. Only call this for connectors
- * which can be hotplugged after drm_dev_register() has been called already,
- * e.g. DP MST connectors. All other connectors will be registered automatically
- * when calling drm_dev_register().
+ * Register userspace interfaces for a connector. Drivers shouldn't call this
+ * function. Static connectors will be registered automatically by DRM core
+ * from drm_dev_register(), dynamic connectors (MST) should be registered by
+ * drivers calling drm_connector_dynamic_register().
*
* When the connector is no longer available, callers must call
* drm_connector_unregister().
*
+ * Note: Existing uses of this function in drivers should be a nop already and
+ * are scheduled to be removed.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -750,12 +842,43 @@ unlock:
EXPORT_SYMBOL(drm_connector_register);
/**
+ * drm_connector_dynamic_register - register a dynamic connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector. Only call this for connectors
+ * initialized by calling drm_connector_dynamic_init(). All other connectors
+ * will be registered automatically when calling drm_dev_register().
+ *
+ * When the connector is no longer available the driver must call
+ * drm_connector_unregister().
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_dynamic_register(struct drm_connector *connector)
+{
+ /* Was the connector inited already? */
+ if (WARN_ON(!(connector->funcs && connector->funcs->destroy)))
+ return -EINVAL;
+
+ drm_connector_add(connector);
+
+ return drm_connector_register(connector);
+}
+EXPORT_SYMBOL(drm_connector_dynamic_register);
+
+/**
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
- * Unregister userspace interfaces for a connector. Only call this for
- * connectors which have been registered explicitly by calling
- * drm_connector_register().
+ * Unregister userspace interfaces for a connector. Drivers should call this
+ * for dynamic connectors (MST) only, which were registered explicitly by
+ * calling drm_connector_dynamic_register(). All other - static - connectors
+ * will be unregistered automatically by DRM core and drivers shouldn't call
+ * this function for those.
+ *
+ * Note: Existing uses of this function in drivers for static connectors
+ * should be a nop already and are scheduled to be removed.
*/
void drm_connector_unregister(struct drm_connector *connector)
{
@@ -1304,6 +1427,10 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* callback. For atomic drivers the remapping to the "ACTIVE" property is
* implemented in the DRM core.
*
+ * On atomic drivers any DPMS setproperty ioctl where the value does not
+ * change is completely skipped, otherwise a full atomic commit will occur.
+ * On legacy drivers the exact behavior is driver specific.
+ *
* Note that this property cannot be set through the MODE_ATOMIC ioctl,
* userspace must use "ACTIVE" on the CRTC instead.
*
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
new file mode 100644
index 000000000000..cb2ad12bce57
--- /dev/null
+++ b/drivers/gpu/drm/drm_draw.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/*
+ * Copyright (c) 2023 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/iosys-map.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "drm_draw_internal.h"
+
+/*
+ * Conversions from xrgb8888
+ */
+
+static u16 convert_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00F80000) >> 8) |
+ ((pix & 0x0000FC00) >> 5) |
+ ((pix & 0x000000F8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_rgba5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2) |
+ BIT(0); /* set alpha bit */
+}
+
+static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u32 convert_xrgb8888_to_argb8888(u32 pix)
+{
+ return pix | GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ ((pix & 0xff000000) >> 24) << 24;
+}
+
+static u32 convert_xrgb8888_to_abgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return pix | ((pix >> 8) & 0x00300C03);
+}
+
+static u32 convert_xrgb8888_to_argb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+}
+
+static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00FF0000) >> 14) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x000000FF) << 22);
+ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+}
+
+/**
+ * drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
+ * @color: input color, in xrgb8888 format
+ * @format: output format
+ *
+ * Returns:
+ * Color in the format specified, casted to u32.
+ * Or 0 if the format is not supported.
+ */
+u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ return convert_xrgb8888_to_rgb565(color);
+ case DRM_FORMAT_RGBA5551:
+ return convert_xrgb8888_to_rgba5551(color);
+ case DRM_FORMAT_XRGB1555:
+ return convert_xrgb8888_to_xrgb1555(color);
+ case DRM_FORMAT_ARGB1555:
+ return convert_xrgb8888_to_argb1555(color);
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ return color;
+ case DRM_FORMAT_ARGB8888:
+ return convert_xrgb8888_to_argb8888(color);
+ case DRM_FORMAT_XBGR8888:
+ return convert_xrgb8888_to_xbgr8888(color);
+ case DRM_FORMAT_ABGR8888:
+ return convert_xrgb8888_to_abgr8888(color);
+ case DRM_FORMAT_XRGB2101010:
+ return convert_xrgb8888_to_xrgb2101010(color);
+ case DRM_FORMAT_ARGB2101010:
+ return convert_xrgb8888_to_argb2101010(color);
+ case DRM_FORMAT_ABGR2101010:
+ return convert_xrgb8888_to_abgr2101010(color);
+ default:
+ WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_draw_color_from_xrgb8888);
+
+/*
+ * Blit functions
+ */
+void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u16 fg16)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
+}
+EXPORT_SYMBOL(drm_draw_blit16);
+
+void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ u32 off = y * dpitch + x * 3;
+
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(drm_draw_blit24);
+
+void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
+}
+EXPORT_SYMBOL(drm_draw_blit32);
+
+/*
+ * Fill functions
+ */
+void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
+}
+EXPORT_SYMBOL(drm_draw_fill16);
+
+void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ unsigned int off = y * dpitch + x * 3;
+
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_draw_fill24);
+
+void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
+}
+EXPORT_SYMBOL(drm_draw_fill32);
diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h
new file mode 100644
index 000000000000..f121ee7339dc
--- /dev/null
+++ b/drivers/gpu/drm/drm_draw_internal.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/*
+ * Copyright (c) 2023 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ */
+
+#ifndef __DRM_DRAW_INTERNAL_H__
+#define __DRM_DRAW_INTERNAL_H__
+
+#include <linux/font.h>
+#include <linux/types.h>
+
+struct iosys_map;
+
+/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
+static inline bool drm_draw_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
+{
+ return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
+}
+
+static inline const u8 *drm_draw_get_char_bitmap(const struct font_desc *font,
+ char c, size_t font_pitch)
+{
+ return font->data + (c * font->height) * font_pitch;
+}
+
+u32 drm_draw_color_from_xrgb8888(u32 color, u32 format);
+
+void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u16 fg16);
+
+void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32);
+
+void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32);
+
+void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color);
+
+void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color);
+
+void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color);
+
+#endif /* __DRM_DRAW_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index c2c172eb25df..3cf440eee8a2 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -26,6 +26,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/module.h>
@@ -820,6 +821,37 @@ void drm_dev_put(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_put);
+static void drmm_cg_unregister_region(struct drm_device *dev, void *arg)
+{
+ dmem_cgroup_unregister_region(arg);
+}
+
+/**
+ * drmm_cgroup_register_region - Register a region of a DRM device to cgroups
+ * @dev: device for region
+ * @region_name: Region name for registering
+ * @size: Size of region in bytes
+ *
+ * This decreases the ref-count of @dev by one. The device is destroyed if the
+ * ref-count drops to zero.
+ */
+struct dmem_cgroup_region *drmm_cgroup_register_region(struct drm_device *dev, const char *region_name, u64 size)
+{
+ struct dmem_cgroup_region *region;
+ int ret;
+
+ region = dmem_cgroup_register_region(size, "drm/%s/%s", dev->unique, region_name);
+ if (IS_ERR_OR_NULL(region))
+ return region;
+
+ ret = drmm_add_action_or_reset(dev, drmm_cg_unregister_region, region);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return region;
+}
+EXPORT_SYMBOL_GPL(drmm_cgroup_register_region);
+
static int create_compat_control_link(struct drm_device *dev)
{
struct drm_minor *minor;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 855beafb76ff..13bc4c290b17 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5605,7 +5605,9 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name);
static void clear_eld(struct drm_connector *connector)
{
+ mutex_lock(&connector->eld_mutex);
memset(connector->eld, 0, sizeof(connector->eld));
+ mutex_unlock(&connector->eld_mutex);
connector->latency_present[0] = false;
connector->latency_present[1] = false;
@@ -5657,6 +5659,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
if (!drm_edid)
return;
+ mutex_lock(&connector->eld_mutex);
+
mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD monitor %s\n",
connector->base.id, connector->name,
@@ -5717,6 +5721,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD size %d, SAD count %d\n",
connector->base.id, connector->name,
drm_eld_size(eld), total_sad_count);
+
+ mutex_unlock(&connector->eld_mutex);
}
static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index c9008113111b..fb3614a7ba44 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1354,14 +1354,14 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
-static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
+static void pan_set(struct drm_fb_helper *fb_helper, int dx, int dy)
{
struct drm_mode_set *mode_set;
mutex_lock(&fb_helper->client.modeset_mutex);
drm_client_for_each_modeset(mode_set, &fb_helper->client) {
- mode_set->x = x;
- mode_set->y = y;
+ mode_set->x += dx;
+ mode_set->y += dy;
}
mutex_unlock(&fb_helper->client.modeset_mutex);
}
@@ -1370,16 +1370,18 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- int ret;
+ int ret, dx, dy;
- pan_set(fb_helper, var->xoffset, var->yoffset);
+ dx = var->xoffset - info->var.xoffset;
+ dy = var->yoffset - info->var.yoffset;
+ pan_set(fb_helper, dx, dy);
ret = drm_client_modeset_commit_locked(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
} else
- pan_set(fb_helper, info->var.xoffset, info->var.yoffset);
+ pan_set(fb_helper, -dx, -dy);
return ret;
}
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index b14b581c059d..02a516e77192 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
#include <linux/fb.h>
+#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -70,37 +71,102 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
-static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
+ void *shadow = info->screen_buffer;
+
+ if (!fb_helper->dev)
+ return;
- if (!dma->map_noncoherent)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ if (info->fbdefio)
+ fb_deferred_io_cleanup(info);
+ drm_fb_helper_fini(fb_helper);
+ vfree(shadow);
- return fb_deferred_io_mmap(info, vma);
+ drm_client_buffer_vunmap(fb_helper->buffer);
+ drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
-static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
+static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
- __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
+ FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
DRM_FB_HELPER_DEFAULT_OPS,
- __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
- .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
- .fb_destroy = drm_fbdev_dma_fb_destroy,
+ .fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
};
/*
* struct drm_fb_helper
*/
+static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct iosys_map *dst)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ size_t offset = clip->y1 * fb->pitches[0];
+ size_t len = clip->x2 - clip->x1;
+ unsigned int y;
+ void *src;
+
+ switch (drm_format_info_bpp(fb->format, 0)) {
+ case 1:
+ offset += clip->x1 / 8;
+ len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
+ break;
+ case 2:
+ offset += clip->x1 / 4;
+ len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
+ break;
+ case 4:
+ offset += clip->x1 / 2;
+ len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
+ break;
+ default:
+ offset += clip->x1 * fb->format->cpp[0];
+ len *= fb->format->cpp[0];
+ break;
+ }
+
+ src = fb_helper->info->screen_buffer + offset;
+ iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ iosys_map_memcpy_to(dst, 0, src, len);
+ iosys_map_incr(dst, fb->pitches[0]);
+ src += fb->pitches[0];
+ }
+}
+
+static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct iosys_map dst;
+
+ /*
+ * For fbdev emulation, we only have to protect against fbdev modeset
+ * operations. Nothing else will involve the client buffer's BO. So it
+ * is sufficient to acquire struct drm_fb_helper.lock here.
+ */
+ mutex_lock(&fb_helper->lock);
+
+ dst = buffer->map;
+ drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
+
+ mutex_unlock(&fb_helper->lock);
+
+ return 0;
+}
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
@@ -112,6 +178,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
return 0;
if (helper->fb->funcs->dirty) {
+ ret = drm_fbdev_dma_damage_blit(helper, clip);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ return ret;
+
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
@@ -128,14 +198,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
* struct drm_fb_helper
*/
+static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct fb_info *info = fb_helper->info;
+ struct iosys_map map = buffer->map;
+
+ info->fbops = &drm_fbdev_dma_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ if (dma_obj->map_noncoherent)
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_size = sizes->surface_height * fb->pitches[0];
+ info->screen_buffer = map.vaddr;
+ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ }
+ info->fix.smem_len = info->screen_size;
+
+ return 0;
+}
+
+static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct fb_info *info = fb_helper->info;
+ size_t screen_size = buffer->gem->size;
+ void *screen_buffer;
+ int ret;
+
+ /*
+ * Deferred I/O requires struct page for framebuffer memory,
+ * which is not guaranteed for all DMA ranges. We thus create
+ * a shadow buffer in system memory.
+ */
+ screen_buffer = vzalloc(screen_size);
+ if (!screen_buffer)
+ return -ENOMEM;
+
+ info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_buffer = screen_buffer;
+ info->fix.smem_len = screen_size;
+
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_vfree;
+
+ return 0;
+
+err_vfree:
+ vfree(screen_buffer);
+ return ret;
+}
+
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
- bool use_deferred_io = false;
struct drm_client_buffer *buffer;
- struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
struct fb_info *info;
u32 format;
@@ -152,19 +288,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb;
- /*
- * Deferred I/O requires struct page for framebuffer memory,
- * which is not guaranteed for all DMA ranges. We thus only
- * install deferred I/O if we have a framebuffer that requires
- * it.
- */
- if (fb->funcs->dirty)
- use_deferred_io = true;
-
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
@@ -185,45 +311,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
- if (use_deferred_io)
- info->fbops = &drm_fbdev_dma_deferred_fb_ops;
+ if (fb->funcs->dirty)
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
else
- info->fbops = &drm_fbdev_dma_fb_ops;
-
- /* screen */
- info->flags |= FBINFO_VIRTFB; /* system memory */
- if (dma_obj->map_noncoherent)
- info->flags |= FBINFO_READS_FAST; /* signal caching */
- info->screen_size = sizes->surface_height * fb->pitches[0];
- info->screen_buffer = map.vaddr;
- if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
- if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
- }
- info->fix.smem_len = info->screen_size;
-
- /*
- * Only set up deferred I/O if the screen buffer supports
- * it. If this disagrees with the previous test for ->dirty,
- * mmap on the /dev/fb file might not work correctly.
- */
- if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
- unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
-
- if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
- use_deferred_io = false;
- }
-
- /* deferred I/O */
- if (use_deferred_io) {
- fb_helper->fbdefio.delay = HZ / 20;
- fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
-
- info->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(info);
- if (ret)
- goto err_drm_fb_helper_release_info;
- }
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
return 0;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index cb5f22f5bbb6..2289e71e2fa2 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -845,6 +845,16 @@ static void print_size(struct drm_printer *p, const char *stat,
drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
}
+int drm_memory_stats_is_zero(const struct drm_memory_stats *stats)
+{
+ return (stats->shared == 0 &&
+ stats->private == 0 &&
+ stats->resident == 0 &&
+ stats->purgeable == 0 &&
+ stats->active == 0);
+}
+EXPORT_SYMBOL(drm_memory_stats_is_zero);
+
/**
* drm_print_memory_stats - A helper to print memory stats
* @p: The printer to print output to
@@ -860,7 +870,9 @@ void drm_print_memory_stats(struct drm_printer *p,
{
print_size(p, "total", region, stats->private + stats->shared);
print_size(p, "shared", region, stats->shared);
- print_size(p, "active", region, stats->active);
+
+ if (supported_status & DRM_GEM_OBJECT_ACTIVE)
+ print_size(p, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_RESIDENT)
print_size(p, "resident", region, stats->resident);
@@ -893,15 +905,13 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
if (obj->funcs && obj->funcs->status) {
s = obj->funcs->status(obj);
- supported_status = DRM_GEM_OBJECT_RESIDENT |
- DRM_GEM_OBJECT_PURGEABLE;
+ supported_status |= s;
}
- if (drm_gem_object_is_shared_for_memory_stats(obj)) {
+ if (drm_gem_object_is_shared_for_memory_stats(obj))
status.shared += obj->size;
- } else {
+ else
status.private += obj->size;
- }
if (s & DRM_GEM_OBJECT_RESIDENT) {
status.resident += add_size;
@@ -914,6 +924,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) {
status.active += add_size;
+ supported_status |= DRM_GEM_OBJECT_ACTIVE;
/* If still active, don't count as purgeable: */
s &= ~DRM_GEM_OBJECT_PURGEABLE;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 37d2e0a4ef4b..8642a2fb25a9 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -150,6 +150,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
drm_connector_list_iter_begin(dev, &conn_iter);
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
+ /*
+ * FIXME: the connectors on the list may not be fully initialized yet,
+ * if the ioctl is called before the connectors are registered. (See
+ * drm_dev_register()->drm_modeset_register_all() for static and
+ * drm_connector_dynamic_register() for dynamic connectors.)
+ * The driver should only get registered after static connectors are
+ * fully initialized and dynamic connectors should be added to the
+ * connector list only after fully initializing them.
+ */
drm_for_each_connector_iter(connector, &conn_iter) {
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 71573b85d924..e72f855fc495 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1282,8 +1282,7 @@ EXPORT_SYMBOL(drm_mode_set_name);
* @mode: mode
*
* Returns:
- * @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
- * value first if it is not yet set.
+ * @modes's vrefresh rate in Hz, rounded to the nearest integer.
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 5c2abc9eca9c..5530919e0ba0 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -564,6 +564,8 @@ EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep);
* Gets parent DSI bus for a DSI device controlled through a bus other
* than MIPI-DCS (SPI, I2C, etc.) using the Device Tree.
*
+ * This function assumes that the device's port@0 is the DSI input.
+ *
* Returns pointer to mipi_dsi_host if successful, -EINVAL if the
* request is unsupported, -EPROBE_DEFER if the DSI host is found but
* not available, or -ENODEV otherwise.
@@ -576,7 +578,7 @@ struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
/*
* Get first endpoint child from device.
*/
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (!endpoint)
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 19ab0a794add..9940e96d35e3 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,6 +24,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -413,7 +414,7 @@ bool drm_is_panel_follower(struct device *dev)
* don't bother trying to parse it here. We just need to know if the
* property is there.
*/
- return of_property_read_bool(dev->of_node, "panel");
+ return of_property_present(dev->of_node, "panel");
}
EXPORT_SYMBOL(drm_is_panel_follower);
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
new file mode 100644
index 000000000000..c477d98ade2b
--- /dev/null
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_utils.h>
+
+struct drm_panel_min_backlight_quirk {
+ struct {
+ enum dmi_field field;
+ const char * const value;
+ } dmi_match;
+ struct drm_edid_ident ident;
+ u8 min_brightness;
+};
+
+static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = {
+ /* 13 inch matte panel */
+ {
+ .dmi_match.field = DMI_BOARD_VENDOR,
+ .dmi_match.value = "Framework",
+ .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca),
+ .ident.name = "NE135FBM-N41",
+ .min_brightness = 0,
+ },
+ /* 13 inch glossy panel */
+ {
+ .dmi_match.field = DMI_BOARD_VENDOR,
+ .dmi_match.value = "Framework",
+ .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f),
+ .ident.name = "NE135FBM-N41",
+ .min_brightness = 0,
+ },
+ /* 13 inch 2.8k panel */
+ {
+ .dmi_match.field = DMI_BOARD_VENDOR,
+ .dmi_match.value = "Framework",
+ .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4),
+ .ident.name = "NE135A1M-NY1",
+ .min_brightness = 0,
+ },
+};
+
+static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk,
+ const struct drm_edid *edid)
+{
+ if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ return false;
+
+ if (!drm_edid_match(edid, &quirk->ident))
+ return false;
+
+ return true;
+}
+
+/**
+ * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel.
+ * @edid: EDID of the panel to check
+ *
+ * This function checks for platform specific (e.g. DMI based) quirks
+ * providing info on the minimum backlight brightness for systems where this
+ * cannot be probed correctly from the hard-/firm-ware.
+ *
+ * Returns:
+ * A negative error value or
+ * an override value in the range [0, 255] representing 0-100% to be scaled to
+ * the drivers target range.
+ */
+int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid)
+{
+ const struct drm_panel_min_backlight_quirk *quirk;
+ size_t i;
+
+ if (!IS_ENABLED(CONFIG_DMI))
+ return -ENODATA;
+
+ if (!edid)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) {
+ quirk = &drm_panel_min_backlight_quirks[i];
+
+ if (drm_panel_min_backlight_quirk_matches(quirk, edid))
+ return quirk->min_brightness;
+ }
+
+ return -ENODATA;
+}
+EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk);
+
+MODULE_DESCRIPTION("Quirks for panel backlight overrides");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 0a9ecc1380d2..f128d345b16d 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -31,6 +31,7 @@
#include <drm/drm_rect.h>
#include "drm_crtc_internal.h"
+#include "drm_draw_internal.h"
MODULE_AUTHOR("Jocelyn Falempe");
MODULE_DESCRIPTION("DRM panic handler");
@@ -139,181 +140,8 @@ device_initcall(drm_panic_setup_logo);
#endif
/*
- * Color conversion
+ * Blit & Fill functions
*/
-
-static u16 convert_xrgb8888_to_rgb565(u32 pix)
-{
- return ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
-{
- return ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
-}
-
-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
-{
- return ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_argb1555(u32 pix)
-{
- return BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u32 convert_xrgb8888_to_argb8888(u32 pix)
-{
- return pix | GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
-}
-
-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
-{
- pix = ((pix & 0x00FF0000) >> 14) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x000000FF) << 22);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-/*
- * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
- * @color: input color, in xrgb8888 format
- * @format: output format
- *
- * Returns:
- * Color in the format specified, casted to u32.
- * Or 0 if the format is not supported.
- */
-static u32 convert_from_xrgb8888(u32 color, u32 format)
-{
- switch (format) {
- case DRM_FORMAT_RGB565:
- return convert_xrgb8888_to_rgb565(color);
- case DRM_FORMAT_RGBA5551:
- return convert_xrgb8888_to_rgba5551(color);
- case DRM_FORMAT_XRGB1555:
- return convert_xrgb8888_to_xrgb1555(color);
- case DRM_FORMAT_ARGB1555:
- return convert_xrgb8888_to_argb1555(color);
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_XRGB8888:
- return color;
- case DRM_FORMAT_ARGB8888:
- return convert_xrgb8888_to_argb8888(color);
- case DRM_FORMAT_XBGR8888:
- return convert_xrgb8888_to_xbgr8888(color);
- case DRM_FORMAT_ABGR8888:
- return convert_xrgb8888_to_abgr8888(color);
- case DRM_FORMAT_XRGB2101010:
- return convert_xrgb8888_to_xrgb2101010(color);
- case DRM_FORMAT_ARGB2101010:
- return convert_xrgb8888_to_argb2101010(color);
- case DRM_FORMAT_ABGR2101010:
- return convert_xrgb8888_to_abgr2101010(color);
- default:
- WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
- return 0;
- }
-}
-
-/*
- * Blit & Fill
- */
-/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
-static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
-{
- return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
-}
-
-static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
- const u8 *sbuf8, unsigned int spitch,
- unsigned int height, unsigned int width,
- unsigned int scale, u16 fg16)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
-}
-
-static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
- const u8 *sbuf8, unsigned int spitch,
- unsigned int height, unsigned int width,
- unsigned int scale, u32 fg32)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- u32 off = y * dpitch + x * 3;
-
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
- /* write blue-green-red to output in little endianness */
- iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
- iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
- iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
- }
- }
- }
-}
-
-static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
- const u8 *sbuf8, unsigned int spitch,
- unsigned int height, unsigned int width,
- unsigned int scale, u32 fg32)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
-}
-
static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect *clip,
const u8 *sbuf8, unsigned int spitch, unsigned int scale,
u32 fg_color)
@@ -322,7 +150,7 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
for (y = 0; y < drm_rect_height(clip); y++)
for (x = 0; x < drm_rect_width(clip); x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
@@ -354,62 +182,22 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
- drm_panic_blit16(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ drm_draw_blit16(&map, sb->pitch[0], sbuf8, spitch,
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 3:
- drm_panic_blit24(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ drm_draw_blit24(&map, sb->pitch[0], sbuf8, spitch,
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 4:
- drm_panic_blit32(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ drm_draw_blit32(&map, sb->pitch[0], sbuf8, spitch,
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
default:
WARN_ONCE(1, "Can't blit with pixel width %d\n", sb->format->cpp[0]);
}
}
-static void drm_panic_fill16(struct iosys_map *dmap, unsigned int dpitch,
- unsigned int height, unsigned int width,
- u16 color)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
-}
-
-static void drm_panic_fill24(struct iosys_map *dmap, unsigned int dpitch,
- unsigned int height, unsigned int width,
- u32 color)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- unsigned int off = y * dpitch + x * 3;
-
- /* write blue-green-red to output in little endianness */
- iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
- iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
- iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
- }
- }
-}
-
-static void drm_panic_fill32(struct iosys_map *dmap, unsigned int dpitch,
- unsigned int height, unsigned int width,
- u32 color)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
-}
-
static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
struct drm_rect *clip,
u32 color)
@@ -442,27 +230,22 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
- drm_panic_fill16(&map, sb->pitch[0], drm_rect_height(clip),
- drm_rect_width(clip), color);
+ drm_draw_fill16(&map, sb->pitch[0], drm_rect_height(clip),
+ drm_rect_width(clip), color);
break;
case 3:
- drm_panic_fill24(&map, sb->pitch[0], drm_rect_height(clip),
- drm_rect_width(clip), color);
+ drm_draw_fill24(&map, sb->pitch[0], drm_rect_height(clip),
+ drm_rect_width(clip), color);
break;
case 4:
- drm_panic_fill32(&map, sb->pitch[0], drm_rect_height(clip),
- drm_rect_width(clip), color);
+ drm_draw_fill32(&map, sb->pitch[0], drm_rect_height(clip),
+ drm_rect_width(clip), color);
break;
default:
WARN_ONCE(1, "Can't fill with pixel width %d\n", sb->format->cpp[0]);
}
}
-static const u8 *get_char_bitmap(const struct font_desc *font, char c, size_t font_pitch)
-{
- return font->data + (c * font->height) * font_pitch;
-}
-
static unsigned int get_max_line_len(const struct drm_panic_line *lines, int len)
{
int i;
@@ -501,7 +284,7 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
rec.x1 += (drm_rect_width(clip) - (line_len * font->width)) / 2;
for (j = 0; j < line_len; j++) {
- src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
+ src = drm_draw_get_char_bitmap(font, msg[i].txt[j], font_pitch);
rec.x2 = rec.x1 + font->width;
drm_panic_blit(sb, &rec, src, font_pitch, 1, color);
rec.x1 += font->width;
@@ -533,8 +316,10 @@ static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *
static void draw_panic_static_user(struct drm_scanout_buffer *sb)
{
- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
+ sb->format->format);
+ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
+ sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg;
unsigned int msg_width, msg_height;
@@ -600,8 +385,10 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
*/
static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
{
- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
+ sb->format->format);
+ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
+ sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
struct kmsg_dump_iter iter;
@@ -791,8 +578,10 @@ static int drm_panic_get_qr_code(u8 **qr_image)
*/
static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
{
- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
+ sb->format->format);
+ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
+ sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg, r_qr, r_qr_canvas;
unsigned int max_qr_size, scale;
@@ -878,7 +667,7 @@ static bool drm_panic_is_format_supported(const struct drm_format_info *format)
{
if (format->num_planes != 1)
return false;
- return convert_from_xrgb8888(0xffffff, format->format) != 0;
+ return drm_draw_color_from_xrgb8888(0xffffff, format->format) != 0;
}
static void draw_panic_dispatch(struct drm_scanout_buffer *sb)
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index ef2d490965ba..6903e2010cb9 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -545,7 +545,7 @@ impl EncodedMsg<'_> {
}
self.push(&mut offset, (MODE_STOP, 4));
- let pad_offset = (offset + 7) / 8;
+ let pad_offset = offset.div_ceil(8);
for i in pad_offset..self.version.max_data() {
self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)];
}
@@ -659,7 +659,7 @@ struct QrImage<'a> {
impl QrImage<'_> {
fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> {
let width = em.version.width();
- let stride = (width + 7) / 8;
+ let stride = width.div_ceil(8);
let data = qrdata;
let mut qr_image = QrImage {
@@ -911,16 +911,16 @@ impl QrImage<'_> {
///
/// * `url`: The base URL of the QR code. It will be encoded as Binary segment.
/// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it
-/// will be encoded as binary segment, otherwise it will be encoded
-/// efficiently as a numeric segment, and appended to the URL.
+/// will be encoded as binary segment, otherwise it will be encoded
+/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than data_size.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
-/// a V40 QR code. It will then be overwritten with the QR code image.
+/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
-/// segments and ECC.
+/// segments and ECC.
/// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes
-/// long for V40.
+/// long for V40.
///
/// # Safety
///
@@ -931,7 +931,7 @@ impl QrImage<'_> {
/// They must remain valid for the duration of the function call.
#[no_mangle]
pub unsafe extern "C" fn drm_panic_qr_generate(
- url: *const i8,
+ url: *const kernel::ffi::c_char,
data: *mut u8,
data_len: usize,
data_size: usize,
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 08cfea04e22b..79517bd4418f 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -390,3 +390,26 @@ void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset)
}
}
EXPORT_SYMBOL(drm_print_regset32);
+
+/**
+ * drm_print_hex_dump - print a hex dump to a &drm_printer stream
+ * @p: The &drm_printer
+ * @prefix: Prefix for each line, may be NULL for no prefix
+ * @buf: Buffer to dump
+ * @len: Length of buffer
+ *
+ * Print hex dump to &drm_printer, with 16 space-separated hex bytes per line,
+ * optionally with a prefix on each line. No separator is added after prefix.
+ */
+void drm_print_hex_dump(struct drm_printer *p, const char *prefix,
+ const u8 *buf, size_t len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 16) {
+ int bytes_per_line = min(16, len - i);
+
+ drm_printf(p, "%s%*ph\n", prefix ?: "", bytes_per_line, buf + i);
+ }
+}
+EXPORT_SYMBOL(drm_print_hex_dump);
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 1752ffb44e1d..9cc71120246f 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -277,7 +277,7 @@ int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
INIT_LIST_HEAD(&vblank->pending_work);
init_waitqueue_head(&vblank->work_wait_queue);
- worker = kthread_create_worker(0, "card%d-crtc%d",
+ worker = kthread_run_worker(0, "card%d-crtc%d",
vblank->dev->primary->index,
vblank->pipe);
if (IS_ERR(worker))
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 7aa5f14d0c87..3a221923f15d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -9,7 +9,6 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_perfmon.h"
#define SUBALLOC_SIZE SZ_512K
#define SUBALLOC_GRANULE SZ_4K
@@ -100,7 +99,7 @@ retry:
mutex_unlock(&suballoc->lock);
ret = wait_event_interruptible_timeout(suballoc->free_event,
suballoc->free_space,
- msecs_to_jiffies(10 * 1000));
+ secs_to_jiffies(10));
if (!ret) {
dev_err(suballoc->dev,
"Timeout waiting for cmdbuf space\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index a46f9e4ac09a..3e91747ed339 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -488,7 +488,16 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_FOPS(fops);
+static void etnaviv_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ drm_show_memory_stats(p, file);
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ DRM_GEM_FOPS,
+ .show_fdinfo = drm_show_fdinfo,
+};
static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
@@ -498,12 +507,12 @@ static const struct drm_driver etnaviv_drm_driver = {
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
#endif
+ .show_fdinfo = etnaviv_show_fdinfo,
.ioctls = etnaviv_ioctls,
.num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
.fops = &fops,
.name = "etnaviv",
.desc = "etnaviv DRM",
- .date = "20151214",
.major = 1,
.minor = 4,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 16473c371444..2f844e82bc46 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -342,6 +342,7 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj)
static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
{
struct page **pages;
+ pgprot_t prot;
lockdep_assert_held(&obj->lock);
@@ -349,8 +350,19 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
if (IS_ERR(pages))
return NULL;
- return vmap(pages, obj->base.size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ switch (obj->flags & ETNA_BO_CACHE_MASK) {
+ case ETNA_BO_CACHED:
+ prot = PAGE_KERNEL;
+ break;
+ case ETNA_BO_UNCACHED:
+ prot = pgprot_noncached(PAGE_KERNEL);
+ break;
+ case ETNA_BO_WC:
+ default:
+ prot = pgprot_writecombine(PAGE_KERNEL);
+ }
+
+ return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot);
}
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
@@ -528,6 +540,17 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_unlock(&priv->gem_lock);
}
+static enum drm_gem_object_status etnaviv_gem_status(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (etnaviv_obj->pages)
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ return status;
+}
+
static const struct vm_operations_struct vm_ops = {
.fault = etnaviv_gem_fault,
.open = drm_gem_vm_open,
@@ -541,6 +564,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.get_sg_table = etnaviv_gem_prime_get_sg_table,
.vmap = etnaviv_gem_prime_vmap,
.mmap = etnaviv_gem_mmap,
+ .status = etnaviv_gem_status,
.vm_ops = &vm_ops,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 687555aae807..e5ee82a0674c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -44,9 +44,7 @@ struct etnaviv_gem_object {
u32 flags;
struct list_head gem_node;
- struct etnaviv_gpu *gpu; /* non-null if active */
atomic_t gpu_active;
- u32 access;
struct page **pages;
struct sg_table *sgt;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 2d4c112ce033..cf0d9049bcf1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h"
@@ -172,6 +173,29 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
return 0;
}
+static int etnaviv_gpu_reset_deassert(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ /*
+ * 32 core clock cycles (slowest clock) required before deassertion
+ * 1 microsecond might match all implementations without computation
+ */
+ usleep_range(1, 2);
+
+ ret = reset_control_deassert(gpu->rst);
+ if (ret)
+ return ret;
+
+ /*
+ * 128 core clock cycles (slowest clock) required before any activity on AHB
+ * 1 microsecond might match all implementations without computation
+ */
+ usleep_range(1, 2);
+
+ return 0;
+}
+
static inline bool etnaviv_is_model_rev(struct etnaviv_gpu *gpu, u32 model, u32 revision)
{
return gpu->identity.model == model &&
@@ -799,6 +823,12 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto pm_put;
}
+ ret = etnaviv_gpu_reset_deassert(gpu);
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset deassert failed\n");
+ goto fail;
+ }
+
etnaviv_hw_identify(gpu);
if (gpu->identity.model == 0) {
@@ -1860,6 +1890,17 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
if (IS_ERR(gpu->mmio))
return PTR_ERR(gpu->mmio);
+
+ /* Get Reset: */
+ gpu->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(gpu->rst))
+ return dev_err_probe(dev, PTR_ERR(gpu->rst),
+ "failed to get reset\n");
+
+ err = reset_control_assert(gpu->rst);
+ if (err)
+ return dev_err_probe(dev, err, "failed to assert reset\n");
+
/* Get Interrupt: */
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 4d8a7d48ade3..5cb46c84e03a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -93,6 +93,7 @@ struct etnaviv_event {
struct etnaviv_cmdbuf_suballoc;
struct regulator;
struct clk;
+struct reset_control;
#define ETNA_NR_EVENTS 30
@@ -158,6 +159,7 @@ struct etnaviv_gpu {
struct clk *clk_reg;
struct clk *clk_core;
struct clk *clk_shader;
+ struct reset_control *rst;
unsigned int freq_scale;
unsigned int fe_waitcycles;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 7e065b3723cf..df5192083b20 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -19,12 +19,6 @@ static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
size_t unmapped_page, unmapped = 0;
size_t pgsize = SZ_4K;
- if (!IS_ALIGNED(iova | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
- iova, size, pgsize);
- return;
- }
-
while (unmapped < size) {
unmapped_page = context->global->ops->unmap(context, iova,
pgsize);
@@ -45,12 +39,6 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
size_t orig_size = size;
int ret = 0;
- if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
- iova, &paddr, size, pgsize);
- return -EINVAL;
- }
-
while (size) {
ret = context->global->ops->map(context, iova, paddr, pgsize,
prot);
@@ -82,11 +70,19 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
return -EINVAL;
for_each_sgtable_dma_sg(sgt, sg, i) {
- phys_addr_t pa = sg_dma_address(sg) - sg->offset;
- unsigned int da_len = sg_dma_len(sg) + sg->offset;
+ phys_addr_t pa = sg_dma_address(sg);
+ unsigned int da_len = sg_dma_len(sg);
unsigned int bytes = min_t(unsigned int, da_len, va_len);
- VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);
+ VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes);
+
+ if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) {
+ dev_err(context->global->dev,
+ "unaligned: iova 0x%x pa %pa size 0x%x\n",
+ iova, &pa, bytes);
+ ret = -EINVAL;
+ goto fail;
+ }
ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1c44f85c5f54..f313ae7bc3a3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -13,9 +13,9 @@
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
@@ -35,7 +35,6 @@
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
-#define DRIVER_DATE "20180330"
/*
* Interface history:
@@ -118,7 +117,6 @@ static const struct drm_driver exynos_drm_driver = {
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 466a9e514aa1..176fd8871759 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1648,7 +1648,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_connector *connector = &hdata->connector;
+ mutex_lock(&connector->eld_mutex);
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
return 0;
}
@@ -1658,7 +1660,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.audio_shutdown = hdmi_audio_shutdown,
.mute_stream = hdmi_audio_mute,
.get_eld = hdmi_audio_get_eld,
- .no_capture_mute = 1,
};
static int hdmi_register_audio_device(struct hdmi_context *hdata)
@@ -1667,6 +1668,7 @@ static int hdmi_register_audio_device(struct hdmi_context *hdata)
.ops = &audio_codec_ops,
.max_i2s_channels = 6,
.i2s = 1,
+ .no_capture_mute = 1,
};
hdata->audio.pdev = platform_device_register_data(
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index be1ab673e49e..03b076db9381 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -18,8 +18,8 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -174,7 +174,6 @@ static const struct drm_driver fsl_dcu_drm_driver = {
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
- .date = "20160425",
.major = 1,
.minor = 1,
};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 2c2b92324a2e..c418e8496bdf 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -6,6 +6,7 @@
*/
#include <linux/backlight.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 7e76790c6a81..cba97d7db131 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -279,6 +279,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
0, PCI_DEVFN(2, 0));
int ret = -1;
+ if (pci_gfx_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
/* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index c419ebbc49ec..85d3557c2eb9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -19,8 +19,8 @@
#include <acpi/video.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -513,7 +513,6 @@ static const struct drm_driver driver = {
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index de62cbfcdc72..7f77cb2b2751 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -26,7 +26,6 @@
#define DRIVER_NAME "gma500"
#define DRIVER_DESC "DRM driver for the Intel GMA500, GMA600, GMA3600, GMA3650"
-#define DRIVER_DATE "20140314"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index 09ccdc1dc1a2..cb405771d6e2 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -13,9 +13,9 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -381,7 +381,6 @@ static const struct drm_driver gud_drm_driver = {
.name = "gud",
.desc = "Generic USB Display",
- .date = "20200422",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 80253d39664a..98d77d74999d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
- depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on DRM && PCI
depends on MMU
select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index d25c75e60d3d..95a4ed599d98 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \
+ dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
new file mode 100644
index 000000000000..0a903cce1fa9
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/minmax.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_reg.h"
+
+#define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4)
+#define HIBMC_AUX_CMD_ADDR GENMASK(27, 8)
+#define HIBMC_AUX_CMD_I2C_ADDR_ONLY BIT(28)
+#define HIBMC_BYTES_IN_U32 4
+#define HIBMC_AUX_I2C_WRITE_SUCCESS 0x1
+#define HIBMC_DP_MIN_PULSE_NUM 0x9
+#define BITS_IN_U8 8
+
+static inline void hibmc_dp_aux_reset(struct hibmc_dp_dev *dp)
+{
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_DPTX_RST_CTRL, HIBMC_DP_CFG_AUX_RST_N, 0x0);
+ usleep_range(10, 15);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_DPTX_RST_CTRL, HIBMC_DP_CFG_AUX_RST_N, 0x1);
+}
+
+static void hibmc_dp_aux_read_data(struct hibmc_dp_dev *dp, u8 *buf, u8 size)
+{
+ u32 reg_num;
+ u32 value;
+ u32 num;
+ u8 i, j;
+
+ reg_num = DIV_ROUND_UP(size, HIBMC_BYTES_IN_U32);
+ for (i = 0; i < reg_num; i++) {
+ /* number of bytes read from a single register */
+ num = min(size - i * HIBMC_BYTES_IN_U32, HIBMC_BYTES_IN_U32);
+ value = readl(dp->base + HIBMC_DP_AUX_RD_DATA0 + i * HIBMC_BYTES_IN_U32);
+ /* convert the 32-bit value of the register to the buffer. */
+ for (j = 0; j < num; j++)
+ buf[i * HIBMC_BYTES_IN_U32 + j] = value >> (j * BITS_IN_U8);
+ }
+}
+
+static void hibmc_dp_aux_write_data(struct hibmc_dp_dev *dp, u8 *buf, u8 size)
+{
+ u32 reg_num;
+ u32 value;
+ u32 num;
+ u8 i, j;
+
+ reg_num = DIV_ROUND_UP(size, HIBMC_BYTES_IN_U32);
+ for (i = 0; i < reg_num; i++) {
+ /* number of bytes written to a single register */
+ num = min_t(u8, size - i * HIBMC_BYTES_IN_U32, HIBMC_BYTES_IN_U32);
+ value = 0;
+ /* obtain the 32-bit value written to a single register. */
+ for (j = 0; j < num; j++)
+ value |= buf[i * HIBMC_BYTES_IN_U32 + j] << (j * BITS_IN_U8);
+ /* writing data to a single register */
+ writel(value, dp->base + HIBMC_DP_AUX_WR_DATA0 + i * HIBMC_BYTES_IN_U32);
+ }
+}
+
+static u32 hibmc_dp_aux_build_cmd(const struct drm_dp_aux_msg *msg)
+{
+ u32 aux_cmd = msg->request;
+
+ if (msg->size)
+ aux_cmd |= FIELD_PREP(HIBMC_AUX_CMD_REQ_LEN, (msg->size - 1));
+ else
+ aux_cmd |= FIELD_PREP(HIBMC_AUX_CMD_I2C_ADDR_ONLY, 1);
+
+ aux_cmd |= FIELD_PREP(HIBMC_AUX_CMD_ADDR, msg->address);
+
+ return aux_cmd;
+}
+
+/* ret >= 0, ret is size; ret < 0, ret is err code */
+static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_msg *msg)
+{
+ u32 buf_data_cnt;
+ u32 aux_status;
+
+ aux_status = readl(dp->base + HIBMC_DP_AUX_STATUS);
+ msg->reply = FIELD_GET(HIBMC_DP_CFG_AUX_STATUS, aux_status);
+
+ if (aux_status & HIBMC_DP_CFG_AUX_TIMEOUT)
+ return -ETIMEDOUT;
+
+ /* only address */
+ if (!msg->size)
+ return 0;
+
+ if (msg->reply != DP_AUX_NATIVE_REPLY_ACK)
+ return -EIO;
+
+ buf_data_cnt = FIELD_GET(HIBMC_DP_CFG_AUX_READY_DATA_BYTE, aux_status);
+
+ switch (msg->request) {
+ case DP_AUX_NATIVE_WRITE:
+ return msg->size;
+ case DP_AUX_I2C_WRITE | DP_AUX_I2C_MOT:
+ if (buf_data_cnt == HIBMC_AUX_I2C_WRITE_SUCCESS)
+ return msg->size;
+ else
+ return FIELD_GET(HIBMC_DP_CFG_AUX, aux_status);
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ | DP_AUX_I2C_MOT:
+ buf_data_cnt--;
+ if (buf_data_cnt != msg->size) {
+ /* only the successful part of data is read */
+ return -EBUSY;
+ }
+
+ /* all data is successfully read */
+ hibmc_dp_aux_read_data(dp, msg->buffer, msg->size);
+ return msg->size;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* ret >= 0 ,ret is size; ret < 0, ret is err code */
+static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux);
+ u32 aux_cmd;
+ int ret;
+ u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */
+
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA0);
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA1);
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA2);
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA3);
+
+ hibmc_dp_aux_write_data(dp, msg->buffer, msg->size);
+
+ aux_cmd = hibmc_dp_aux_build_cmd(msg);
+ writel(aux_cmd, dp->base + HIBMC_DP_AUX_CMD_ADDR);
+
+ /* enable aux transfer */
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_REQ, 0x1);
+ ret = readl_poll_timeout(dp->base + HIBMC_DP_AUX_REQ, val,
+ !(val & HIBMC_DP_CFG_AUX_REQ), 50, 5000);
+ if (ret) {
+ hibmc_dp_aux_reset(dp);
+ return ret;
+ }
+
+ return hibmc_dp_aux_parse_xfer(dp, msg);
+}
+
+void hibmc_dp_aux_init(struct hibmc_dp_dev *dp)
+{
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
+ HIBMC_DP_MIN_PULSE_NUM);
+
+ dp->aux.transfer = hibmc_dp_aux_xfer;
+ dp->aux.is_remote = 0;
+ drm_dp_aux_init(&dp->aux);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
new file mode 100644
index 000000000000..2c52a4476c4d
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_COMM_H
+#define DP_COMM_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <drm/display/drm_dp_helper.h>
+
+#define HIBMC_DP_LANE_NUM_MAX 2
+
+struct hibmc_link_status {
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+struct hibmc_link_cap {
+ u8 link_rate;
+ u8 lanes;
+};
+
+struct hibmc_dp_link {
+ struct hibmc_link_status status;
+ u8 train_set[HIBMC_DP_LANE_NUM_MAX];
+ struct hibmc_link_cap cap;
+};
+
+struct hibmc_dp_dev {
+ struct drm_dp_aux aux;
+ struct drm_device *dev;
+ void __iomem *base;
+ struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */
+ struct hibmc_dp_link link;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+};
+
+#define dp_field_modify(reg_value, mask, val) \
+ do { \
+ (reg_value) &= ~(mask); \
+ (reg_value) |= FIELD_PREP(mask, val); \
+ } while (0) \
+
+#define hibmc_dp_reg_write_field(dp, offset, mask, val) \
+ do { \
+ typeof(dp) _dp = dp; \
+ typeof(_dp->base) addr = (_dp->base + (offset)); \
+ mutex_lock(&_dp->lock); \
+ u32 reg_value = readl(addr); \
+ dp_field_modify(reg_value, mask, val); \
+ writel(reg_value, addr); \
+ mutex_unlock(&_dp->lock); \
+ } while (0)
+
+void hibmc_dp_aux_init(struct hibmc_dp_dev *dp);
+int hibmc_dp_link_training(struct hibmc_dp_dev *dp);
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
new file mode 100644
index 000000000000..74dd9956144e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_CONFIG_H
+#define DP_CONFIG_H
+
+#define HIBMC_DP_BPP 24
+#define HIBMC_DP_SYMBOL_PER_FCLK 4
+#define HIBMC_DP_MSA1 0x20
+#define HIBMC_DP_MSA2 0x845c00
+#define HIBMC_DP_OFFSET 0x1e0000
+#define HIBMC_DP_HDCP 0x2
+#define HIBMC_DP_INT_RST 0xffff
+#define HIBMC_DP_DPTX_RST 0x3ff
+#define HIBMC_DP_CLK_EN 0x7
+#define HIBMC_DP_SYNC_EN_MASK 0x3
+#define HIBMC_DP_LINK_RATE_CAL 27
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
new file mode 100644
index 000000000000..a8d543881c09
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "dp_config.h"
+#include "dp_comm.h"
+#include "dp_reg.h"
+#include "dp_hw.h"
+
+static void hibmc_dp_set_tu(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
+{
+ u32 tu_symbol_frac_size;
+ u32 tu_symbol_size;
+ u32 rate_ks;
+ u8 lane_num;
+ u32 value;
+ u32 bpp;
+
+ lane_num = dp->link.cap.lanes;
+ if (lane_num == 0) {
+ drm_err(dp->dev, "set tu failed, lane num cannot be 0!\n");
+ return;
+ }
+
+ bpp = HIBMC_DP_BPP;
+ rate_ks = dp->link.cap.link_rate * HIBMC_DP_LINK_RATE_CAL;
+ value = (mode->clock * bpp * 5) / (61 * lane_num * rate_ks);
+
+ if (value % 10 == 9) { /* 9 carry */
+ tu_symbol_size = value / 10 + 1;
+ tu_symbol_frac_size = 0;
+ } else {
+ tu_symbol_size = value / 10;
+ tu_symbol_frac_size = value % 10 + 1;
+ }
+
+ drm_dbg_dp(dp->dev, "tu value: %u.%u value: %u\n",
+ tu_symbol_size, tu_symbol_frac_size, value);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE, tu_symbol_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE, tu_symbol_frac_size);
+}
+
+static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
+{
+ u32 hblank_size;
+ u32 htotal_size;
+ u32 htotal_int;
+ u32 hblank_int;
+ u32 fclk; /* flink_clock */
+
+ fclk = dp->link.cap.link_rate * HIBMC_DP_LINK_RATE_CAL;
+
+ /* Considering the effect of spread spectrum, the value may be deviated.
+ * The coefficient (0.9947) is used to offset the deviation.
+ */
+ htotal_int = mode->htotal * 9947 / 10000;
+ htotal_size = htotal_int * fclk / (HIBMC_DP_SYMBOL_PER_FCLK * (mode->clock / 1000));
+
+ hblank_int = mode->htotal - mode->hdisplay - mode->hdisplay * 53 / 10000;
+ hblank_size = hblank_int * fclk * 9947 /
+ (mode->clock * 10 * HIBMC_DP_SYMBOL_PER_FCLK);
+
+ drm_dbg_dp(dp->dev, "h_active %u v_active %u htotal_size %u hblank_size %u",
+ mode->hdisplay, mode->vdisplay, htotal_size, hblank_size);
+ drm_dbg_dp(dp->dev, "flink_clock %u pixel_clock %d", fclk, mode->clock / 1000);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
+ HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
+ HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size);
+}
+
+static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
+{
+ u32 timing_delay;
+ u32 vblank;
+ u32 hstart;
+ u32 vstart;
+
+ vblank = mode->vtotal - mode->vdisplay;
+ timing_delay = mode->htotal - mode->hsync_start;
+ hstart = mode->htotal - mode->hsync_start;
+ vstart = mode->vtotal - mode->vsync_start;
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG0,
+ HIBMC_DP_CFG_TIMING_GEN0_HBLANK, mode->htotal - mode->hdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG0,
+ HIBMC_DP_CFG_TIMING_GEN0_HACTIVE, mode->hdisplay);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG2,
+ HIBMC_DP_CFG_TIMING_GEN0_VBLANK, vblank);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG2,
+ HIBMC_DP_CFG_TIMING_GEN0_VACTIVE, mode->vdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG3,
+ HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH,
+ mode->vsync_start - mode->vdisplay);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG0,
+ HIBMC_DP_CFG_STREAM_HACTIVE, mode->hdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG0,
+ HIBMC_DP_CFG_STREAM_HBLANK, mode->htotal - mode->hdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG2,
+ HIBMC_DP_CFG_STREAM_HSYNC_WIDTH,
+ mode->hsync_end - mode->hsync_start);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG1,
+ HIBMC_DP_CFG_STREAM_VACTIVE, mode->vdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG1,
+ HIBMC_DP_CFG_STREAM_VBLANK, vblank);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG3,
+ HIBMC_DP_CFG_STREAM_VFRONT_PORCH,
+ mode->vsync_start - mode->vdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG3,
+ HIBMC_DP_CFG_STREAM_VSYNC_WIDTH,
+ mode->vsync_end - mode->vsync_start);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_MSA0,
+ HIBMC_DP_CFG_STREAM_VSTART, vstart);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_MSA0,
+ HIBMC_DP_CFG_STREAM_HSTART, hstart);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_VSYNC_POLARITY,
+ mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 : 0);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_HSYNC_POLARITY,
+ mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 : 0);
+
+ /* MSA mic 0 and 1 */
+ writel(HIBMC_DP_MSA1, dp->base + HIBMC_DP_VIDEO_MSA1);
+ writel(HIBMC_DP_MSA2, dp->base + HIBMC_DP_VIDEO_MSA2);
+
+ hibmc_dp_set_tu(dp, mode);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_RGB_ENABLE, 0x1);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_VIDEO_MAPPING, 0);
+
+ /* divide 2: up even */
+ if (timing_delay % 2)
+ timing_delay++;
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_MODEL_CTRL,
+ HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1, timing_delay);
+
+ hibmc_dp_set_sst(dp, mode);
+}
+
+int hibmc_dp_hw_init(struct hibmc_dp *dp)
+{
+ struct drm_device *drm_dev = dp->drm_dev;
+ struct hibmc_dp_dev *dp_dev;
+
+ dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL);
+ if (!dp_dev)
+ return -ENOMEM;
+
+ mutex_init(&dp_dev->lock);
+
+ dp->dp_dev = dp_dev;
+
+ dp_dev->dev = drm_dev;
+ dp_dev->base = dp->mmio + HIBMC_DP_OFFSET;
+
+ hibmc_dp_aux_init(dp_dev);
+
+ dp_dev->link.cap.lanes = 0x2;
+ dp_dev->link.cap.link_rate = DP_LINK_BW_2_7;
+
+ /* hdcp data */
+ writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
+ /* int init */
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+ /* rst */
+ writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL);
+ /* clock enable */
+ writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL);
+
+ return 0;
+}
+
+void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ if (enable) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_VIDEO_CTRL, BIT(0), 0x1);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_DPTX_GCTL0, BIT(10), 0x1);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ } else {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_DPTX_GCTL0, BIT(10), 0);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_VIDEO_CTRL, BIT(0), 0);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ }
+
+ msleep(50);
+}
+
+int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+ int ret;
+
+ if (!dp_dev->link.status.channel_equalized) {
+ ret = hibmc_dp_link_training(dp_dev);
+ if (ret) {
+ drm_err(dp->drm_dev, "dp link training failed, ret: %d\n", ret);
+ return ret;
+ }
+ }
+
+ hibmc_dp_display_en(dp, false);
+ hibmc_dp_link_cfg(dp_dev, mode);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
new file mode 100644
index 000000000000..4dc13b3d9875
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_KAPI_H
+#define DP_KAPI_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_print.h>
+
+struct hibmc_dp_dev;
+
+struct hibmc_dp {
+ struct hibmc_dp_dev *dp_dev;
+ struct drm_device *drm_dev;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ void __iomem *mmio;
+};
+
+int hibmc_dp_hw_init(struct hibmc_dp *dp);
+int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode);
+void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
new file mode 100644
index 000000000000..f6355c16cc0a
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_reg.h"
+
+#define HIBMC_EQ_MAX_RETRY 5
+
+static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
+{
+ u8 buf[2];
+ int ret;
+
+ /* DP 2 lane */
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_LANE_DATA_EN,
+ dp->link.cap.lanes == 0x2 ? 0x3 : 0x1);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_DPTX_GCTL0, HIBMC_DP_CFG_PHY_LANE_NUM,
+ dp->link.cap.lanes == 0x2 ? 0x1 : 0);
+
+ /* enhanced frame */
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_FRAME_MODE, 0x1);
+
+ /* set rate and lane count */
+ buf[0] = dp->link.cap.link_rate;
+ buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes;
+ ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ /* set 8b/10b and downspread */
+ buf[0] = DP_SPREAD_AMP_0_5;
+ buf[1] = DP_SET_ANSI_8B10B;
+ ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
+ if (ret)
+ drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
+
+ return ret;
+}
+
+static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
+{
+ int ret;
+ u8 val;
+ u8 buf;
+
+ buf = (u8)pattern;
+ if (pattern != DP_TRAINING_PATTERN_DISABLE && pattern != DP_TRAINING_PATTERN_4) {
+ buf |= DP_LINK_SCRAMBLING_DISABLE;
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_SCRAMBLE_EN, 0x1);
+ } else {
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_SCRAMBLE_EN, 0);
+ }
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ val = 0;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ val = 1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ val = 2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ val = 3;
+ break;
+ case DP_TRAINING_PATTERN_4:
+ val = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val);
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n");
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp)
+{
+ u8 *train_set = dp->link.train_set;
+ int ret;
+ u8 i;
+
+ ret = hibmc_dp_link_training_configure(dp);
+ if (ret)
+ return ret;
+
+ ret = hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dp->link.cap.lanes; i++)
+ train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
+ if (ret != dp->link.cap.lanes) {
+ drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n");
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp,
+ u8 lane_status[DP_LINK_STATUS_SIZE])
+{
+ u8 train_set[HIBMC_DP_LANE_NUM_MAX] = {0};
+ u8 lane;
+
+ for (lane = 0; lane < dp->link.cap.lanes; lane++)
+ train_set[lane] = drm_dp_get_adjust_request_voltage(lane_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(lane_status, lane);
+
+ if (memcmp(dp->link.train_set, train_set, HIBMC_DP_LANE_NUM_MAX)) {
+ memcpy(dp->link.train_set, train_set, HIBMC_DP_LANE_NUM_MAX);
+ return true;
+ }
+
+ return false;
+}
+
+static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.link_rate) {
+ case DP_LINK_BW_2_7:
+ dp->link.cap.link_rate = DP_LINK_BW_1_62;
+ return 0;
+ case DP_LINK_BW_5_4:
+ dp->link.cap.link_rate = DP_LINK_BW_2_7;
+ return 0;
+ case DP_LINK_BW_8_1:
+ dp->link.cap.link_rate = DP_LINK_BW_5_4;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.lanes) {
+ case 0x2:
+ dp->link.cap.lanes--;
+ break;
+ case 0x1:
+ drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n");
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
+{
+ u8 lane_status[DP_LINK_STATUS_SIZE] = {0};
+ bool level_changed;
+ u32 voltage_tries;
+ u32 cr_tries;
+ int ret;
+
+ /*
+ * DP 1.4 spec define 10 for maxtries value, for pre DP 1.4 version set a limit of 80
+ * (4 voltage levels x 4 preemphasis levels x 5 identical voltage retries)
+ */
+
+ voltage_tries = 1;
+ for (cr_tries = 0; cr_tries < 80; cr_tries++) {
+ drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
+ if (ret != DP_LINK_STATUS_SIZE) {
+ drm_err(dp->dev, "Get lane status failed\n");
+ return ret;
+ }
+
+ if (drm_dp_clock_recovery_ok(lane_status, dp->link.cap.lanes)) {
+ drm_dbg_dp(dp->dev, "dp link training cr done\n");
+ dp->link.status.clock_recovered = true;
+ return 0;
+ }
+
+ if (voltage_tries == 5) {
+ drm_dbg_dp(dp->dev, "same voltage tries 5 times\n");
+ dp->link.status.clock_recovered = false;
+ return 0;
+ }
+
+ level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status);
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
+ dp->link.cap.lanes);
+ if (ret != dp->link.cap.lanes) {
+ drm_dbg_dp(dp->dev, "Update link training failed\n");
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ voltage_tries = level_changed ? 1 : voltage_tries + 1;
+ }
+
+ drm_err(dp->dev, "dp link training clock recovery 80 times failed\n");
+ dp->link.status.clock_recovered = false;
+
+ return 0;
+}
+
+static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
+{
+ u8 lane_status[DP_LINK_STATUS_SIZE] = {0};
+ u8 eq_tries;
+ int ret;
+
+ ret = hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_2);
+ if (ret)
+ return ret;
+
+ for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) {
+ drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
+ if (ret != DP_LINK_STATUS_SIZE) {
+ drm_err(dp->dev, "get lane status failed\n");
+ break;
+ }
+
+ if (!drm_dp_clock_recovery_ok(lane_status, dp->link.cap.lanes)) {
+ drm_dbg_dp(dp->dev, "clock recovery check failed\n");
+ drm_dbg_dp(dp->dev, "cannot continue channel equalization\n");
+ dp->link.status.clock_recovered = false;
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(lane_status, dp->link.cap.lanes)) {
+ dp->link.status.channel_equalized = true;
+ drm_dbg_dp(dp->dev, "dp link training eq done\n");
+ break;
+ }
+
+ hibmc_dp_link_get_adjust_train(dp, lane_status);
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+ dp->link.train_set, dp->link.cap.lanes);
+ if (ret != dp->link.cap.lanes) {
+ drm_dbg_dp(dp->dev, "Update link training failed\n");
+ ret = (ret >= 0) ? -EIO : ret;
+ break;
+ }
+ }
+
+ if (eq_tries == HIBMC_EQ_MAX_RETRY)
+ drm_err(dp->dev, "channel equalization failed %u times\n", eq_tries);
+
+ hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int hibmc_dp_link_downgrade_training_cr(struct hibmc_dp_dev *dp)
+{
+ if (hibmc_dp_link_reduce_rate(dp))
+ return hibmc_dp_link_reduce_lane(dp);
+
+ return 0;
+}
+
+static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp)
+{
+ if ((dp->link.status.clock_recovered && !dp->link.status.channel_equalized)) {
+ if (!hibmc_dp_link_reduce_lane(dp))
+ return 0;
+ }
+
+ return hibmc_dp_link_reduce_rate(dp);
+}
+
+int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
+{
+ struct hibmc_dp_link *link = &dp->link;
+ int ret;
+
+ while (true) {
+ ret = hibmc_dp_link_training_cr_pre(dp);
+ if (ret)
+ goto err;
+
+ ret = hibmc_dp_link_training_cr(dp);
+ if (ret)
+ goto err;
+
+ if (!link->status.clock_recovered) {
+ ret = hibmc_dp_link_downgrade_training_cr(dp);
+ if (ret)
+ goto err;
+ continue;
+ }
+
+ ret = hibmc_dp_link_training_channel_eq(dp);
+ if (ret)
+ goto err;
+
+ if (!link->status.channel_equalized) {
+ ret = hibmc_dp_link_downgrade_training_eq(dp);
+ if (ret)
+ goto err;
+ continue;
+ }
+
+ return 0;
+ }
+
+err:
+ hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
new file mode 100644
index 000000000000..4a515c726d52
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_REG_H
+#define DP_REG_H
+
+#define HIBMC_DP_AUX_CMD_ADDR 0x50
+#define HIBMC_DP_AUX_WR_DATA0 0x54
+#define HIBMC_DP_AUX_WR_DATA1 0x58
+#define HIBMC_DP_AUX_WR_DATA2 0x5c
+#define HIBMC_DP_AUX_WR_DATA3 0x60
+#define HIBMC_DP_AUX_RD_DATA0 0x64
+#define HIBMC_DP_AUX_REQ 0x74
+#define HIBMC_DP_AUX_STATUS 0x78
+#define HIBMC_DP_PHYIF_CTRL0 0xa0
+#define HIBMC_DP_VIDEO_CTRL 0x100
+#define HIBMC_DP_VIDEO_CONFIG0 0x104
+#define HIBMC_DP_VIDEO_CONFIG1 0x108
+#define HIBMC_DP_VIDEO_CONFIG2 0x10c
+#define HIBMC_DP_VIDEO_CONFIG3 0x110
+#define HIBMC_DP_VIDEO_PACKET 0x114
+#define HIBMC_DP_VIDEO_MSA0 0x118
+#define HIBMC_DP_VIDEO_MSA1 0x11c
+#define HIBMC_DP_VIDEO_MSA2 0x120
+#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
+#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
+#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
+#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
+#define HIBMC_DP_HDCP_CFG 0x600
+#define HIBMC_DP_DPTX_RST_CTRL 0x700
+#define HIBMC_DP_DPTX_CLK_CTRL 0x704
+#define HIBMC_DP_DPTX_GCTL0 0x708
+#define HIBMC_DP_INTR_ENABLE 0x720
+#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
+#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
+#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
+
+#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
+#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
+#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
+#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
+#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
+#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
+#define HIBMC_DP_CFG_AUX_REQ BIT(0)
+#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
+#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
+#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
+#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
+#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
+#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
+#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
+#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
+#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
+#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
+#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
+#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
+#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
new file mode 100644
index 000000000000..603d6b198a54
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/io.h>
+
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+
+#include "hibmc_drm_drv.h"
+#include "dp/dp_hw.h"
+
+static int hibmc_dp_connector_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, 1024, 768); // temporary implementation
+
+ return count;
+}
+
+static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = {
+ .get_modes = hibmc_dp_connector_get_modes,
+};
+
+static const struct drm_connector_funcs hibmc_dp_conn_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode)
+{
+ int ret;
+
+ hibmc_dp_display_en(dp, false);
+
+ ret = hibmc_dp_mode_set(dp, mode);
+ if (ret)
+ drm_err(dp->drm_dev, "hibmc dp mode set failed: %d\n", ret);
+
+ return ret;
+}
+
+static void hibmc_dp_encoder_enable(struct drm_encoder *drm_encoder,
+ struct drm_atomic_state *state)
+{
+ struct hibmc_dp *dp = container_of(drm_encoder, struct hibmc_dp, encoder);
+ struct drm_display_mode *mode = &drm_encoder->crtc->state->mode;
+
+ if (hibmc_dp_prepare(dp, mode))
+ return;
+
+ hibmc_dp_display_en(dp, true);
+}
+
+static void hibmc_dp_encoder_disable(struct drm_encoder *drm_encoder,
+ struct drm_atomic_state *state)
+{
+ struct hibmc_dp *dp = container_of(drm_encoder, struct hibmc_dp, encoder);
+
+ hibmc_dp_display_en(dp, false);
+}
+
+static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = {
+ .atomic_enable = hibmc_dp_encoder_enable,
+ .atomic_disable = hibmc_dp_encoder_disable,
+};
+
+int hibmc_dp_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = &priv->dev;
+ struct drm_crtc *crtc = &priv->crtc;
+ struct hibmc_dp *dp = &priv->dp;
+ struct drm_connector *connector = &dp->connector;
+ struct drm_encoder *encoder = &dp->encoder;
+ int ret;
+
+ dp->mmio = priv->mmio;
+ dp->drm_dev = dev;
+
+ ret = hibmc_dp_hw_init(&priv->dp);
+ if (ret) {
+ drm_err(dev, "hibmc dp hw init failed: %d\n", ret);
+ return ret;
+ }
+
+ hibmc_dp_display_en(&priv->dp, false);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret) {
+ drm_err(dev, "init dp encoder failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs);
+
+ ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ drm_err(dev, "init dp connector failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &hibmc_dp_conn_helper_funcs);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 8c488c98ac97..e6de6d5edf6b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -15,8 +15,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -28,6 +28,10 @@
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
+#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
+#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00
+#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff
+
DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_interrupt(int irq, void *arg)
@@ -57,7 +61,6 @@ static const struct drm_driver hibmc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hibmc_fops,
.name = "hibmc",
- .date = "20160828",
.desc = "hibmc drm driver",
.major = 1,
.minor = 0,
@@ -118,6 +121,14 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
+ /* if DP existed, init DP */
+ if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) &
+ HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) {
+ ret = hibmc_dp_init(priv);
+ if (ret)
+ drm_err(dev, "failed to init dp: %d\n", ret);
+ }
+
ret = hibmc_vdac_init(priv);
if (ret) {
drm_err(dev, "failed to init vdac: %d\n", ret);
@@ -328,6 +339,8 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
goto err_return;
}
+ pci_set_master(pdev);
+
ret = hibmc_load(dev);
if (ret) {
drm_err(dev, "failed to load hibmc: %d\n", ret);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 6b566f3aeecb..d982f1e4b958 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -20,9 +20,12 @@
#include <drm/drm_framebuffer.h>
-struct hibmc_connector {
- struct drm_connector base;
+#include "dp/dp_hw.h"
+struct hibmc_vdac {
+ struct drm_device *dev;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
struct i2c_adapter adapter;
struct i2c_algo_bit_data bit_data;
};
@@ -35,13 +38,13 @@ struct hibmc_drm_private {
struct drm_device dev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
- struct drm_encoder encoder;
- struct hibmc_connector connector;
+ struct hibmc_vdac vdac;
+ struct hibmc_dp dp;
};
-static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
+static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
{
- return container_of(connector, struct hibmc_connector, base);
+ return container_of(connector, struct hibmc_vdac, connector);
}
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
@@ -57,6 +60,8 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
-int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
+
+int hibmc_dp_init(struct hibmc_drm_private *priv);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index e6e48651c15c..99b3b77b5445 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -25,8 +25,8 @@
static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
{
- struct hibmc_connector *hibmc_connector = data;
- struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ struct hibmc_vdac *vdac = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if (value) {
@@ -45,8 +45,8 @@ static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
static int hibmc_get_i2c_signal(void *data, u32 mask)
{
- struct hibmc_connector *hibmc_connector = data;
- struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ struct hibmc_vdac *vdac = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if ((tmp_dir & mask) != mask) {
@@ -77,22 +77,21 @@ static int hibmc_ddc_getscl(void *data)
return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
}
-int hibmc_ddc_create(struct drm_device *drm_dev,
- struct hibmc_connector *connector)
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
{
- connector->adapter.owner = THIS_MODULE;
- snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
- connector->adapter.dev.parent = drm_dev->dev;
- i2c_set_adapdata(&connector->adapter, connector);
- connector->adapter.algo_data = &connector->bit_data;
-
- connector->bit_data.udelay = 20;
- connector->bit_data.timeout = usecs_to_jiffies(2000);
- connector->bit_data.data = connector;
- connector->bit_data.setsda = hibmc_ddc_setsda;
- connector->bit_data.setscl = hibmc_ddc_setscl;
- connector->bit_data.getsda = hibmc_ddc_getsda;
- connector->bit_data.getscl = hibmc_ddc_getscl;
-
- return i2c_bit_add_bus(&connector->adapter);
+ vdac->adapter.owner = THIS_MODULE;
+ snprintf(vdac->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+ vdac->adapter.dev.parent = drm_dev->dev;
+ i2c_set_adapdata(&vdac->adapter, vdac);
+ vdac->adapter.algo_data = &vdac->bit_data;
+
+ vdac->bit_data.udelay = 20;
+ vdac->bit_data.timeout = usecs_to_jiffies(2000);
+ vdac->bit_data.data = vdac;
+ vdac->bit_data.setsda = hibmc_ddc_setsda;
+ vdac->bit_data.setscl = hibmc_ddc_setscl;
+ vdac->bit_data.getsda = hibmc_ddc_getsda;
+ vdac->bit_data.getscl = hibmc_ddc_getscl;
+
+ return i2c_bit_add_bus(&vdac->adapter);
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 409c551c92af..05e19ea4c9f9 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -24,11 +24,11 @@
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
- struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
const struct drm_edid *drm_edid;
int count;
- drm_edid = drm_edid_read_ddc(connector, &hibmc_connector->adapter);
+ drm_edid = drm_edid_read_ddc(connector, &vdac->adapter);
drm_edid_connector_update(connector, drm_edid);
@@ -51,9 +51,9 @@ out:
static void hibmc_connector_destroy(struct drm_connector *connector)
{
- struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
- i2c_del_adapter(&hibmc_connector->adapter);
+ i2c_del_adapter(&vdac->adapter);
drm_connector_cleanup(connector);
}
@@ -93,20 +93,20 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
- struct hibmc_connector *hibmc_connector = &priv->connector;
- struct drm_encoder *encoder = &priv->encoder;
+ struct hibmc_vdac *vdac = &priv->vdac;
+ struct drm_encoder *encoder = &vdac->encoder;
struct drm_crtc *crtc = &priv->crtc;
- struct drm_connector *connector = &hibmc_connector->base;
+ struct drm_connector *connector = &vdac->connector;
int ret;
- ret = hibmc_ddc_create(dev, hibmc_connector);
+ ret = hibmc_ddc_create(dev, vdac);
if (ret) {
drm_err(dev, "failed to create ddc: %d\n", ret);
return ret;
}
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+ ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
return ret;
@@ -117,7 +117,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
ret = drm_connector_init_with_ddc(dev, connector,
&hibmc_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
- &hibmc_connector->adapter);
+ &vdac->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 5616c3917c03..2eb49177ac42 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -929,7 +929,6 @@ static const struct drm_driver ade_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "kirin",
.desc = "Hisilicon Kirin620 SoC DRM Driver",
- .date = "20150718",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index b3ab944652a6..1e1c87be1204 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -17,8 +17,8 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index e0953777a206..0d49f168a919 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -9,8 +9,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -20,7 +20,6 @@
#define DRIVER_NAME "hyperv_drm"
#define DRIVER_DESC "DRM driver for Hyper-V synthetic video device"
-#define DRIVER_DATE "2020"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -31,7 +30,6 @@ static struct drm_driver hyperv_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
@@ -156,6 +154,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
return 0;
err_free_mmio:
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
@@ -174,6 +173,7 @@ static void hyperv_vmbus_remove(struct hv_device *hdev)
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
}
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 131512a5f3bd..fcb0fcd6c897 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -486,7 +486,7 @@ static int ch7006_encoder_init(struct i2c_client *client,
}
static const struct i2c_device_id ch7006_ids[] = {
- { "ch7006", 0 },
+ { "ch7006" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ch7006_ids);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index ff23422727fc..c17afa025d9d 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -413,7 +413,7 @@ sil164_encoder_init(struct i2c_client *client,
}
static const struct i2c_device_id sil164_ids[] = {
- { "sil164", 0 },
+ { "sil164" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sil164_ids);
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 82d618c40dce..cbff851e0c85 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -486,8 +486,8 @@ static void tda9950_remove(struct i2c_client *client)
}
static struct i2c_device_id tda9950_ids[] = {
- { "tda9950", 0 },
- { },
+ { "tda9950" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, tda9950_ids);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 2160f05bbd16..82d4a4e206a5 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1165,7 +1165,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.audio_shutdown = tda998x_audio_shutdown,
.mute_stream = tda998x_audio_mute_stream,
.get_eld = tda998x_audio_get_eld,
- .no_capture_mute = 1,
};
static int tda998x_audio_codec_init(struct tda998x_priv *priv,
@@ -1176,6 +1175,7 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
.max_i2s_channels = 2,
.no_i2s_capture = 1,
.no_spdif_capture = 1,
+ .no_capture_mute = 1,
};
if (priv->audio_port_enable[AUDIO_ROUTE_I2S])
@@ -2094,7 +2094,7 @@ MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
#endif
static const struct i2c_device_id tda998x_ids[] = {
- { "tda998x", 0 },
+ { "tda998x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda998x_ids);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 31710d98cad5..3dda9f0eda82 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -30,11 +30,11 @@ i915-y += \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
- i915_suspend.o \
i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
intel_clock_gating.o \
+ intel_cpu_info.o \
intel_device_info.o \
intel_memory_region.o \
intel_pcode.o \
@@ -43,6 +43,7 @@ i915-y += \
intel_sbi.o \
intel_step.o \
intel_uncore.o \
+ intel_uncore_trace.o \
intel_wakeref.o \
vlv_sideband.o \
vlv_suspend.o
@@ -220,6 +221,7 @@ i915-$(CONFIG_HWMON) += \
i915-y += \
display/hsw_ips.o \
display/i9xx_plane.o \
+ display/i9xx_display_sr.o \
display/i9xx_wm.o \
display/intel_alpm.o \
display/intel_atomic.o \
@@ -236,6 +238,7 @@ i915-y += \
display/intel_crtc_state_dump.o \
display/intel_cursor.o \
display/intel_display.o \
+ display/intel_display_conversion.o \
display/intel_display_driver.o \
display/intel_display_irq.o \
display/intel_display_params.o \
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 9d47f8a93e94..686393dfbbf5 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,7 +26,6 @@
*
*/
-#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 4fbec065d53e..56353377466c 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -8,6 +8,7 @@
#include <linux/string_helpers.h>
#include "g4x_dp.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_audio.h"
#include "intel_backlight.h"
@@ -55,8 +56,8 @@ const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
return IS_CHERRYVIEW(i915) ? &chv_dpll[0] : &vlv_dpll[0];
}
-void g4x_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void g4x_dp_set_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dpll *divisor = NULL;
@@ -1223,6 +1224,25 @@ static bool ilk_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(display, DEISR) & bit;
}
+static int g4x_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ int ret;
+
+ if (HAS_PCH_SPLIT(i915) && encoder->port != PORT_A)
+ crtc_state->has_pch_encoder = true;
+
+ ret = intel_dp_compute_config(encoder, crtc_state, conn_state);
+ if (ret)
+ return ret;
+
+ g4x_dp_set_clock(encoder, crtc_state);
+
+ return 0;
+}
+
static void g4x_dp_suspend_complete(struct intel_encoder *encoder)
{
/*
@@ -1307,7 +1327,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder_link_check_init(intel_encoder, intel_dp_link_check);
intel_encoder->hotplug = intel_dp_hotplug;
- intel_encoder->compute_config = intel_dp_compute_config;
+ intel_encoder->compute_config = g4x_dp_compute_config;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->sync_state = intel_dp_sync_state;
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h
index c75e64ae79b7..839a251dc069 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.h
+++ b/drivers/gpu/drm/i915/display/g4x_dp.h
@@ -19,8 +19,6 @@ struct intel_encoder;
#ifdef I915
const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
-void g4x_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe);
@@ -31,10 +29,6 @@ static inline const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
{
return NULL;
}
-static inline void g4x_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
-}
static inline bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, int port,
enum pipe *pipe)
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index d1a7d0d57c6b..98e6a931042f 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -6,6 +6,7 @@
*/
#include "g4x_hdmi.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -682,7 +683,7 @@ static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port
"Platform does not support HDMI %c\n", port_name(port));
}
-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
struct intel_display *display = &dev_priv->display;
@@ -692,10 +693,10 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
struct intel_connector *intel_connector;
if (!assert_port_valid(dev_priv, port))
- return;
+ return false;
if (!assert_hdmi_port_valid(dev_priv, port))
- return;
+ return false;
devdata = intel_bios_encoder_data_lookup(display, port);
@@ -706,15 +707,13 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
- return;
+ return false;
dig_port->aux_ch = AUX_CH_NONE;
intel_connector = intel_connector_alloc();
- if (!intel_connector) {
- kfree(dig_port);
- return;
- }
+ if (!intel_connector)
+ goto err_connector_alloc;
intel_encoder = &dig_port->base;
@@ -722,9 +721,10 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
mutex_init(&dig_port->hdcp_mutex);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
- &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
- "HDMI %c", port_name(port));
+ if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "HDMI %c", port_name(port)))
+ goto err_encoder_init;
intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = g4x_hdmi_compute_config;
@@ -787,5 +787,17 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(dig_port);
- intel_hdmi_init_connector(dig_port, intel_connector);
+ if (!intel_hdmi_init_connector(dig_port, intel_connector))
+ goto err_init_connector;
+
+ return true;
+
+err_init_connector:
+ drm_encoder_cleanup(&intel_encoder->base);
+err_encoder_init:
+ kfree(intel_connector);
+err_connector_alloc:
+ kfree(dig_port);
+
+ return false;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h
index 817f55c7a3a1..a52e8986ec7a 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.h
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
@@ -16,14 +16,15 @@ struct drm_connector;
struct drm_i915_private;
#ifdef I915
-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port);
int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state);
#else
-static inline void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+static inline bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, int port)
{
+ return false;
}
static inline int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 34c5d28fc866..d02c328bf902 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -185,10 +185,12 @@ void hsw_ips_post_update(struct intel_atomic_state *state,
/* IPS only exists on ULT machines and is tied to pipe A. */
bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
- return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
+ struct intel_display *display = to_intel_display(crtc);
+
+ return HAS_IPS(display) && crtc->pipe == PIPE_A;
}
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -218,6 +220,20 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
return true;
}
+int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (!IS_BROADWELL(i915))
+ return 0;
+
+ if (!hsw_crtc_state_ips_capable(crtc_state))
+ return 0;
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+}
+
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h
index 35364228e1c1..7af12f88a8ce 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.h
+++ b/drivers/gpu/drm/i915/display/hsw_ips.h
@@ -19,7 +19,7 @@ bool hsw_ips_pre_update(struct intel_atomic_state *state,
void hsw_ips_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
bool hsw_crtc_supports_ips(struct intel_crtc *crtc);
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state);
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
@@ -42,9 +42,9 @@ static inline bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
return false;
}
-static inline bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+static inline int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- return false;
+ return 0;
}
static inline int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.c b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
new file mode 100644
index 000000000000..32abe9743014
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/drm_device.h>
+
+#include "i915_reg.h"
+#include "i9xx_display_sr.h"
+#include "i9xx_wm_regs.h"
+#include "intel_de.h"
+#include "intel_gmbus.h"
+#include "intel_pci_config.h"
+
+static void i9xx_display_save_swf(struct intel_display *display)
+{
+ int i;
+
+ /* Scratch space */
+ if (DISPLAY_VER(display) == 2 && display->platform.mobile) {
+ for (i = 0; i < 7; i++) {
+ display->restore.saveSWF0[i] = intel_de_read(display, SWF0(display, i));
+ display->restore.saveSWF1[i] = intel_de_read(display, SWF1(display, i));
+ }
+ for (i = 0; i < 3; i++)
+ display->restore.saveSWF3[i] = intel_de_read(display, SWF3(display, i));
+ } else if (DISPLAY_VER(display) == 2) {
+ for (i = 0; i < 7; i++)
+ display->restore.saveSWF1[i] = intel_de_read(display, SWF1(display, i));
+ } else if (HAS_GMCH(display)) {
+ for (i = 0; i < 16; i++) {
+ display->restore.saveSWF0[i] = intel_de_read(display, SWF0(display, i));
+ display->restore.saveSWF1[i] = intel_de_read(display, SWF1(display, i));
+ }
+ for (i = 0; i < 3; i++)
+ display->restore.saveSWF3[i] = intel_de_read(display, SWF3(display, i));
+ }
+}
+
+static void i9xx_display_restore_swf(struct intel_display *display)
+{
+ int i;
+
+ /* Scratch space */
+ if (DISPLAY_VER(display) == 2 && display->platform.mobile) {
+ for (i = 0; i < 7; i++) {
+ intel_de_write(display, SWF0(display, i), display->restore.saveSWF0[i]);
+ intel_de_write(display, SWF1(display, i), display->restore.saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ intel_de_write(display, SWF3(display, i), display->restore.saveSWF3[i]);
+ } else if (DISPLAY_VER(display) == 2) {
+ for (i = 0; i < 7; i++)
+ intel_de_write(display, SWF1(display, i), display->restore.saveSWF1[i]);
+ } else if (HAS_GMCH(display)) {
+ for (i = 0; i < 16; i++) {
+ intel_de_write(display, SWF0(display, i), display->restore.saveSWF0[i]);
+ intel_de_write(display, SWF1(display, i), display->restore.saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ intel_de_write(display, SWF3(display, i), display->restore.saveSWF3[i]);
+ }
+}
+
+void i9xx_display_sr_save(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ /* Display arbitration control */
+ if (DISPLAY_VER(display) <= 4)
+ display->restore.saveDSPARB = intel_de_read(display, DSPARB(display));
+
+ if (DISPLAY_VER(display) == 4)
+ pci_read_config_word(pdev, GCDGMBUS, &display->restore.saveGCDGMBUS);
+
+ i9xx_display_save_swf(display);
+}
+
+void i9xx_display_sr_restore(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ i9xx_display_restore_swf(display);
+
+ if (DISPLAY_VER(display) == 4)
+ pci_write_config_word(pdev, GCDGMBUS, display->restore.saveGCDGMBUS);
+
+ /* Display arbitration */
+ if (DISPLAY_VER(display) <= 4)
+ intel_de_write(display, DSPARB(display), display->restore.saveDSPARB);
+}
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.h b/drivers/gpu/drm/i915/display/i9xx_display_sr.h
new file mode 100644
index 000000000000..39b8c18fe738
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __I9XX_DISPLAY_SR_H__
+#define __I9XX_DISPLAY_SR_H__
+
+struct intel_display;
+
+void i9xx_display_sr_save(struct intel_display *display);
+void i9xx_display_sr_restore(struct intel_display *display);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 17a1e3801a85..48e657a80a16 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -8,6 +8,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index e3b13886177a..db78c1e6b0a3 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
+#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
#include "intel_bo.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm_regs.h b/drivers/gpu/drm/i915/display/i9xx_wm_regs.h
new file mode 100644
index 000000000000..d68d22235cf2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm_regs.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __I9XX_WM_REGS_H__
+#define __I9XX_WM_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define DSPARB(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
+#define DSPARB_CSTART_MASK (0x7f << 7)
+#define DSPARB_CSTART_SHIFT 7
+#define DSPARB_BSTART_MASK (0x7f)
+#define DSPARB_BSTART_SHIFT 0
+#define DSPARB_BEND_SHIFT 9 /* on 855 */
+#define DSPARB_AEND_SHIFT 0
+#define DSPARB_SPRITEA_SHIFT_VLV 0
+#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEB_SHIFT_VLV 8
+#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
+#define DSPARB_SPRITEC_SHIFT_VLV 16
+#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
+#define DSPARB_SPRITED_SHIFT_VLV 24
+#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
+#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
+#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
+#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
+#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
+#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
+#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
+#define DSPARB_SPRITED_HI_SHIFT_VLV 12
+#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
+#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
+#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
+#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
+#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
+#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define DSPARB_SPRITEE_SHIFT_VLV 0
+#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEF_SHIFT_VLV 8
+#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
+
+/* pnv/gen4/g4x/vlv/chv */
+#define DSPFW1(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
+#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff << 23)
+#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f << 16)
+#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f << 8)
+#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
+#define DSPFW_PLANEA_SHIFT 0
+#define DSPFW_PLANEA_MASK (0x7f << 0)
+#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
+#define DSPFW2(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
+#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
+#define DSPFW_FBC_SR_SHIFT 28
+#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
+#define DSPFW_FBC_HPLL_SR_SHIFT 24
+#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
+#define DSPFW_SPRITEB_SHIFT (16)
+#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
+#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
+#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_CURSORA_MASK (0x3f << 8)
+#define DSPFW_PLANEC_OLD_SHIFT 0
+#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
+#define DSPFW_SPRITEA_SHIFT 0
+#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
+#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
+#define DSPFW3(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
+#define DSPFW_HPLL_SR_EN (1 << 31)
+#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
+#define DSPFW_CURSOR_SR_SHIFT 24
+#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
+#define DSPFW_HPLL_SR_SHIFT 0
+#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
+
+/* vlv/chv */
+#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
+#define DSPFW_SPRITEB_WM1_SHIFT 16
+#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
+#define DSPFW_CURSORA_WM1_SHIFT 8
+#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
+#define DSPFW_SPRITEA_WM1_SHIFT 0
+#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
+#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
+#define DSPFW_PLANEB_WM1_SHIFT 24
+#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
+#define DSPFW_PLANEA_WM1_SHIFT 16
+#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
+#define DSPFW_CURSORB_WM1_SHIFT 8
+#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
+#define DSPFW_CURSOR_SR_WM1_SHIFT 0
+#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
+#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
+#define DSPFW_SR_WM1_SHIFT 0
+#define DSPFW_SR_WM1_MASK (0x1ff << 0)
+#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
+#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
+#define DSPFW_SPRITED_WM1_SHIFT 24
+#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
+#define DSPFW_SPRITED_SHIFT 16
+#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
+#define DSPFW_SPRITEC_WM1_SHIFT 8
+#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
+#define DSPFW_SPRITEC_SHIFT 0
+#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
+#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
+#define DSPFW_SPRITEF_WM1_SHIFT 24
+#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
+#define DSPFW_SPRITEF_SHIFT 16
+#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
+#define DSPFW_SPRITEE_WM1_SHIFT 8
+#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
+#define DSPFW_SPRITEE_SHIFT 0
+#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
+#define DSPFW_PLANEC_WM1_SHIFT 24
+#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
+#define DSPFW_PLANEC_SHIFT 16
+#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
+#define DSPFW_CURSORC_WM1_SHIFT 8
+#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
+#define DSPFW_CURSORC_SHIFT 0
+#define DSPFW_CURSORC_MASK (0x3f << 0)
+
+/* vlv/chv high order bits */
+#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
+#define DSPFW_SR_HI_SHIFT 24
+#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SPRITEF_HI_SHIFT 23
+#define DSPFW_SPRITEF_HI_MASK (1 << 23)
+#define DSPFW_SPRITEE_HI_SHIFT 22
+#define DSPFW_SPRITEE_HI_MASK (1 << 22)
+#define DSPFW_PLANEC_HI_SHIFT 21
+#define DSPFW_PLANEC_HI_MASK (1 << 21)
+#define DSPFW_SPRITED_HI_SHIFT 20
+#define DSPFW_SPRITED_HI_MASK (1 << 20)
+#define DSPFW_SPRITEC_HI_SHIFT 16
+#define DSPFW_SPRITEC_HI_MASK (1 << 16)
+#define DSPFW_PLANEB_HI_SHIFT 12
+#define DSPFW_PLANEB_HI_MASK (1 << 12)
+#define DSPFW_SPRITEB_HI_SHIFT 8
+#define DSPFW_SPRITEB_HI_MASK (1 << 8)
+#define DSPFW_SPRITEA_HI_SHIFT 4
+#define DSPFW_SPRITEA_HI_MASK (1 << 4)
+#define DSPFW_PLANEA_HI_SHIFT 0
+#define DSPFW_PLANEA_HI_MASK (1 << 0)
+#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
+#define DSPFW_SR_WM1_HI_SHIFT 24
+#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
+#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
+#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
+#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
+#define DSPFW_PLANEC_WM1_HI_SHIFT 21
+#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
+#define DSPFW_SPRITED_WM1_HI_SHIFT 20
+#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
+#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
+#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
+#define DSPFW_PLANEB_WM1_HI_SHIFT 12
+#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
+#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
+#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
+#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
+#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
+#define DSPFW_PLANEA_WM1_HI_SHIFT 0
+#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
+
+/* drain latency register values*/
+#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
+#define DDL_CURSOR_SHIFT 24
+#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
+#define DDL_PLANE_SHIFT 0
+#define DDL_PRECISION_HIGH (1 << 7)
+#define DDL_PRECISION_LOW (0 << 7)
+#define DRAIN_LATENCY_MASK 0x7f
+
+/* FIFO watermark sizes etc */
+#define G4X_FIFO_LINE_SIZE 64
+#define I915_FIFO_LINE_SIZE 64
+#define I830_FIFO_LINE_SIZE 32
+
+#define VALLEYVIEW_FIFO_SIZE 255
+#define G4X_FIFO_SIZE 127
+#define I965_FIFO_SIZE 512
+#define I945_FIFO_SIZE 127
+#define I915_FIFO_SIZE 95
+#define I855GM_FIFO_SIZE 127 /* In cachelines */
+#define I830_FIFO_SIZE 95
+
+#define VALLEYVIEW_MAX_WM 0xff
+#define G4X_MAX_WM 0x3f
+#define I915_MAX_WM 0x3f
+
+#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE 64
+#define PINEVIEW_MAX_WM 0x1ff
+#define PINEVIEW_DFT_WM 0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM 0
+#define PINEVIEW_GUARD_WM 10
+#define PINEVIEW_CURSOR_FIFO 64
+#define PINEVIEW_CURSOR_MAX_WM 0x3f
+#define PINEVIEW_CURSOR_DFT_WM 0
+#define PINEVIEW_CURSOR_GUARD_WM 5
+
+#define VALLEYVIEW_CURSOR_MAX_WM 64
+#define I965_CURSOR_FIFO 64
+#define I965_CURSOR_MAX_WM 32
+#define I965_CURSOR_DFT_WM 8
+
+/* define the Watermark register on Ironlake */
+#define _WM0_PIPEA_ILK 0x45100
+#define _WM0_PIPEB_ILK 0x45104
+#define _WM0_PIPEC_IVB 0x45200
+#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
+ _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
+#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
+#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
+#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x))
+#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x))
+#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x))
+#define WM1_LP_ILK _MMIO(0x45108)
+#define WM2_LP_ILK _MMIO(0x4510c)
+#define WM3_LP_ILK _MMIO(0x45110)
+#define WM_LP_ENABLE REG_BIT(31)
+#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24)
+#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19)
+#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20)
+#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8)
+#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x))
+#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x))
+#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x))
+#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x))
+#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x))
+#define WM1S_LP_ILK _MMIO(0x45120)
+#define WM2S_LP_IVB _MMIO(0x45124)
+#define WM3S_LP_IVB _MMIO(0x45128)
+#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */
+#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0)
+#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x))
+
+#define WM_MISC _MMIO(0x45260)
+#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
+
+#define WM_DBG _MMIO(0x45280)
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
+#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
+#define WM_DBG_DISALLOW_SPRITE (1 << 2)
+
+#endif /* __I9XX_WM_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 8a49f499e3fb..82bf6c654de2 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -31,6 +31,7 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_probe_helper.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "icl_dsi.h"
#include "icl_dsi_regs.h"
@@ -808,8 +809,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* select data lane width */
tmp = intel_de_read(display,
TRANS_DDI_FUNC_CTL(display, dsi_trans));
- tmp &= ~DDI_PORT_WIDTH_MASK;
- tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+ tmp &= ~TRANS_DDI_PORT_WIDTH_MASK;
+ tmp |= TRANS_DDI_PORT_WIDTH(intel_dsi->lane_count);
/* select input pipe */
tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
@@ -1602,7 +1603,9 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: split only when necessary */
if (crtc_state->dsc.slice_count > 1)
- crtc_state->dsc.dsc_split = true;
+ crtc_state->dsc.num_streams = 2;
+ else
+ crtc_state->dsc.num_streams = 1;
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index c3b29a331d72..bbf8c5a8fdbd 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -9,8 +9,9 @@
#include <linux/acpi.h>
#include <acpi/video.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_acpi.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index d89630b2d5c1..612e9b0ec14a 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -40,6 +40,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
+#include "i915_drv.h"
#include "i915_config.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic_plane.h"
@@ -207,17 +208,6 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
fb->format->cpp[color_plane];
}
-static bool
-use_min_ddb(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- return DISPLAY_VER(i915) >= 13 &&
- crtc_state->uapi.async_flip &&
- plane->async_flip;
-}
-
static unsigned int
intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
@@ -225,8 +215,8 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- int width, height;
unsigned int rel_data_rate;
+ int width, height;
if (plane->id == PLANE_CURSOR)
return 0;
@@ -235,14 +225,6 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
return 0;
/*
- * We calculate extra ddb based on ratio plane rate/total data rate
- * in case, in some cases we should not allocate extra ddb for the plane,
- * so do not count its data rate, if this is the case.
- */
- if (use_min_ddb(crtc_state, plane))
- return 0;
-
- /*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
@@ -256,7 +238,11 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
height /= 2;
}
- rel_data_rate = width * height * fb->format->cpp[color_plane];
+ rel_data_rate =
+ skl_plane_relative_data_rate(crtc_state, plane, width, height,
+ fb->format->cpp[color_plane]);
+ if (!rel_data_rate)
+ return 0;
return intel_adjusted_rate(&plane_state->uapi.src,
&plane_state->uapi.dst,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 32aa9ec1a204..ce8a4319a63c 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -681,12 +681,11 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder trans = crtc_state->cpu_transcoder;
- if (HAS_DP20(i915))
- intel_de_rmw(i915, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
+ if (HAS_DP20(display))
+ intel_de_rmw(display, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
}
@@ -699,10 +698,12 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ mutex_lock(&connector->eld_mutex);
if (!connector->eld[0]) {
drm_dbg_kms(&i915->drm,
"Bogus ELD on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ mutex_unlock(&connector->eld_mutex);
return false;
}
@@ -710,6 +711,7 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
memcpy(crtc_state->eld, connector->eld, sizeof(crtc_state->eld));
crtc_state->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ mutex_unlock(&connector->eld_mutex);
return true;
}
@@ -978,6 +980,53 @@ retry:
drm_modeset_acquire_fini(&ctx);
}
+int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ int min_cdclk = 0;
+
+ if (!crtc_state->has_audio)
+ return 0;
+
+ /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
+ * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
+ * there may be audio corruption or screen corruption." This cdclk
+ * restriction for GLK is 316.8 MHz.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->port_clock >= 540000 &&
+ crtc_state->lane_count == 4) {
+ if (DISPLAY_VER(display) == 10) {
+ /* Display WA #1145: glk */
+ min_cdclk = max(min_cdclk, 316800);
+ } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) {
+ /* Display WA #1144: skl,bxt */
+ min_cdclk = max(min_cdclk, 432000);
+ }
+ }
+
+ /*
+ * According to BSpec, "The CD clock frequency must be at least twice
+ * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+ */
+ if (DISPLAY_VER(display) >= 9)
+ min_cdclk = max(min_cdclk, 2 * 96000);
+
+ /*
+ * "For DP audio configuration, cdclk frequency shall be set to
+ * meet the following requirements:
+ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
+ * 270 | 320 or higher
+ * 162 | 200 or higher"
+ */
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_dp_encoder(crtc_state))
+ min_cdclk = max(min_cdclk, crtc_state->port_clock);
+
+ return min_cdclk;
+}
+
static unsigned long i915_audio_component_get_power(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 576c061d72a4..1bafc155434a 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -27,6 +27,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
+int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 3f81a726cc7d..7e6ce905bdaf 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -10,6 +10,7 @@
#include <acpi/video.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
@@ -40,8 +41,9 @@ static u32 scale(u32 source_val,
{
u64 target_val;
- WARN_ON(source_min > source_max);
- WARN_ON(target_min > target_max);
+ if (WARN_ON(source_min >= source_max) ||
+ WARN_ON(target_min > target_max))
+ return target_min;
/* defensive */
source_val = clamp(source_val, source_min, source_max);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a4cdd82c4a75..e0e4e9b62d8d 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1402,12 +1402,21 @@ parse_power_conservation_features(struct intel_display *display,
panel_type);
}
+static void vbt_edp_to_pps_delays(struct intel_pps_delays *pps,
+ const struct edp_power_seq *edp_pps)
+{
+ pps->power_up = edp_pps->t1_t3;
+ pps->backlight_on = edp_pps->t8;
+ pps->backlight_off = edp_pps->t9;
+ pps->power_down = edp_pps->t10;
+ pps->power_cycle = edp_pps->t11_t12;
+}
+
static void
parse_edp(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_edp *edp;
- const struct edp_power_seq *edp_pps;
const struct edp_fast_link_params *edp_link_params;
int panel_type = panel->vbt.panel_type;
@@ -1428,10 +1437,10 @@ parse_edp(struct intel_display *display,
}
/* Get the eDP sequencing and link info */
- edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->fast_link_params[panel_type];
- panel->vbt.edp.pps = *edp_pps;
+ vbt_edp_to_pps_delays(&panel->vbt.edp.pps,
+ &edp->power_seqs[panel_type]);
if (display->vbt.version >= 224) {
panel->vbt.edp.rate =
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 8b703f6cfe17..f9841f0498c6 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -50,14 +50,6 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
-struct edp_power_seq {
- u16 t1_t3;
- u16 t8;
- u16 t9;
- u16 t10;
- u16 t11_t12;
-} __packed;
-
/*
* MIPI Sequence Block definitions
*
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index a52b0ae68b96..23edc81741de 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -1256,7 +1256,7 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915,
min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
for_each_pipe(i915, pipe)
- min_cdclk = max(bw_state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
return min_cdclk;
}
@@ -1447,13 +1447,14 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
int intel_bw_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_bw_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.bw.obj,
+ intel_atomic_global_obj_init(display, &display->bw.obj,
&state->base, &intel_bw_funcs);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 03c4eef3f92a..c7a603589412 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -29,6 +29,7 @@
#include "soc/intel_dram.h"
#include "hsw_ips.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -37,7 +38,6 @@
#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_dp.h"
#include "intel_display_types.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
@@ -46,6 +46,7 @@
#include "intel_vdsc.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
+#include "vlv_dsi.h"
#include "vlv_sideband.h"
/**
@@ -2761,154 +2762,62 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
"Post changing CDCLK to");
}
-static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+/* pixels per CDCLK */
+static int intel_cdclk_ppc(struct intel_display *display, bool double_wide)
+{
+ return DISPLAY_VER(display) >= 10 || double_wide ? 2 : 1;
+}
+
+/* max pixel rate as % of CDCLK (not accounting for PPC) */
+static int intel_cdclk_guardband(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *dev_priv = to_i915(display->drm);
- int pixel_rate = crtc_state->pixel_rate;
- if (DISPLAY_VER(display) >= 10)
- return DIV_ROUND_UP(pixel_rate, 2);
- else if (DISPLAY_VER(display) == 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return pixel_rate;
+ if (DISPLAY_VER(display) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return 100;
else if (IS_CHERRYVIEW(dev_priv))
- return DIV_ROUND_UP(pixel_rate * 100, 95);
- else if (crtc_state->double_wide)
- return DIV_ROUND_UP(pixel_rate * 100, 90 * 2);
+ return 95;
else
- return DIV_ROUND_UP(pixel_rate * 100, 90);
+ return 90;
}
-static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct intel_plane *plane;
- int min_cdclk = 0;
-
- for_each_intel_plane_on_crtc(display->drm, crtc, plane)
- min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk);
+ struct intel_display *display = to_intel_display(crtc_state);
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+ int guardband = intel_cdclk_guardband(display);
+ int pixel_rate = crtc_state->pixel_rate;
- return min_cdclk;
+ return DIV_ROUND_UP(pixel_rate * 100, guardband * ppc);
}
-static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_display *display = to_intel_display(crtc);
- int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
+ struct intel_plane *plane;
int min_cdclk = 0;
- /*
- * When we decide to use only one VDSC engine, since
- * each VDSC operates with 1 ppc throughput, pixel clock
- * cannot be higher than the VDSC clock (cdclk)
- * If there 2 VDSC engines, then pixel clock can't be higher than
- * VDSC clock(cdclk) * 2 and so on.
- */
- min_cdclk = max_t(int, min_cdclk,
- DIV_ROUND_UP(crtc_state->pixel_rate, num_vdsc_instances));
-
- if (crtc_state->joiner_pipes) {
- int pixel_clock = intel_dp_mode_to_fec_clock(crtc_state->hw.adjusted_mode.clock);
-
- /*
- * According to Bigjoiner bw check:
- * compressed_bpp <= PPC * CDCLK * Big joiner Interface bits / Pixel clock
- *
- * We have already computed compressed_bpp, so now compute the min CDCLK that
- * is required to support this compressed_bpp.
- *
- * => CDCLK >= compressed_bpp * Pixel clock / (PPC * Bigjoiner Interface bits)
- *
- * Since PPC = 2 with bigjoiner
- * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
- */
- int bigjoiner_interface_bits = DISPLAY_VER(display) >= 14 ? 36 : 24;
- int min_cdclk_bj =
- (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
- pixel_clock) / (2 * bigjoiner_interface_bits);
-
- min_cdclk = max(min_cdclk, min_cdclk_bj);
- }
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane)
+ min_cdclk = max(min_cdclk, crtc_state->min_cdclk[plane->id]);
return min_cdclk;
}
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int min_cdclk;
if (!crtc_state->hw.enable)
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
- min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
-
- /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
- * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
- * there may be audio corruption or screen corruption." This cdclk
- * restriction for GLK is 316.8 MHz.
- */
- if (intel_crtc_has_dp_encoder(crtc_state) &&
- crtc_state->has_audio &&
- crtc_state->port_clock >= 540000 &&
- crtc_state->lane_count == 4) {
- if (DISPLAY_VER(display) == 10) {
- /* Display WA #1145: glk */
- min_cdclk = max(316800, min_cdclk);
- } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) {
- /* Display WA #1144: skl,bxt */
- min_cdclk = max(432000, min_cdclk);
- }
- }
-
- /*
- * According to BSpec, "The CD clock frequency must be at least twice
- * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
- */
- if (crtc_state->has_audio && DISPLAY_VER(display) >= 9)
- min_cdclk = max(2 * 96000, min_cdclk);
-
- /*
- * "For DP audio configuration, cdclk frequency shall be set to
- * meet the following requirements:
- * DP Link Frequency(MHz) | Cdclk frequency(MHz)
- * 270 | 320 or higher
- * 162 | 200 or higher"
- */
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
- min_cdclk = max(crtc_state->port_clock, min_cdclk);
-
- /*
- * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
- * than 320000KHz.
- */
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
- IS_VALLEYVIEW(dev_priv))
- min_cdclk = max(320000, min_cdclk);
-
- /*
- * On Geminilake once the CDCLK gets as low as 79200
- * picture gets unstable, despite that values are
- * correct for DSI PLL and DE PLL.
- */
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
- IS_GEMINILAKE(dev_priv))
- min_cdclk = max(158400, min_cdclk);
-
- /* Account for additional needs from the planes */
- min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
-
- if (crtc_state->dsc.compression_enable)
- min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, hsw_ips_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_audio_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, vlv_dsi_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_planes_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
return min_cdclk;
}
@@ -2960,7 +2869,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
min_cdclk = max(cdclk_state->force_min_cdclk,
cdclk_state->bw_min_cdclk);
for_each_pipe(display, pipe)
- min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(min_cdclk, cdclk_state->min_cdclk[pipe]);
/*
* Avoid glk_force_audio_cdclk() causing excessive screen
@@ -2972,7 +2881,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
*/
if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
!is_power_of_2(cdclk_state->active_pipes))
- min_cdclk = max(2 * 96000, min_cdclk);
+ min_cdclk = max(min_cdclk, 2 * 96000);
if (min_cdclk > display->cdclk.max_cdclk_freq) {
drm_dbg_kms(display->drm,
@@ -3028,8 +2937,8 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
min_voltage_level = 0;
for_each_pipe(display, pipe)
- min_voltage_level = max(cdclk_state->min_voltage_level[pipe],
- min_voltage_level);
+ min_voltage_level = max(min_voltage_level,
+ cdclk_state->min_voltage_level[pipe]);
return min_voltage_level;
}
@@ -3308,14 +3217,13 @@ int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joi
int intel_cdclk_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL);
if (!cdclk_state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &display->cdclk.obj,
+ intel_atomic_global_obj_init(display, &display->cdclk.obj,
&cdclk_state->base, &intel_cdclk_funcs);
return 0;
@@ -3452,20 +3360,11 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
static int intel_compute_max_dotclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ int ppc = intel_cdclk_ppc(display, HAS_DOUBLE_WIDE(display));
+ int guardband = intel_cdclk_guardband(display);
int max_cdclk_freq = display->cdclk.max_cdclk_freq;
- if (DISPLAY_VER(display) >= 10)
- return 2 * max_cdclk_freq;
- else if (DISPLAY_VER(display) == 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return max_cdclk_freq;
- else if (IS_CHERRYVIEW(dev_priv))
- return max_cdclk_freq*95/100;
- else if (DISPLAY_VER(display) < 4)
- return 2*max_cdclk_freq*90/100;
- else
- return max_cdclk_freq*90/100;
+ return ppc * max_cdclk_freq * guardband / 100;
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 7cd902bbd244..2f51eccdb27a 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -22,6 +22,7 @@
*
*/
+#include "i915_drv.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 3252dab56430..4fbe2e3542ca 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,6 +3,7 @@
* Copyright © 2018 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 74c1983fe07e..4634d3fd9f20 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -38,6 +38,7 @@
#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crt.h"
+#include "intel_crt_regs.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -55,18 +56,23 @@
#include "intel_pch_refclk.h"
/* Here's the desired hotplug mode */
-#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_ENABLE | \
+ ADPA_CRT_HOTPLUG_PERIOD_128 | \
ADPA_CRT_HOTPLUG_WARMUP_10MS | \
ADPA_CRT_HOTPLUG_SAMPLE_4S | \
ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
- ADPA_CRT_HOTPLUG_VOLREF_325MV | \
- ADPA_CRT_HOTPLUG_ENABLE)
+ ADPA_CRT_HOTPLUG_VOLREF_325MV)
+#define ADPA_HOTPLUG_MASK (ADPA_CRT_HOTPLUG_MONITOR_MASK | \
+ ADPA_CRT_HOTPLUG_ENABLE | \
+ ADPA_CRT_HOTPLUG_PERIOD_MASK | \
+ ADPA_CRT_HOTPLUG_WARMUP_MASK | \
+ ADPA_CRT_HOTPLUG_SAMPLE_MASK | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_MASK | \
+ ADPA_CRT_HOTPLUG_VOLREF_MASK | \
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
struct intel_crt {
struct intel_encoder base;
- /* DPMS state is stored in the connector, which we need in the
- * encoder's enable/disable callbacks */
- struct intel_connector *connector;
bool force_hotplug_required;
i915_reg_t adpa_reg;
};
@@ -91,9 +97,9 @@ bool intel_crt_port_enabled(struct intel_display *display,
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
- *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+ *pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK_CPT, val);
else
- *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK, val);
return val & ADPA_DAC_ENABLE;
}
@@ -141,27 +147,27 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
}
static void intel_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
- pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ crtc_state->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
- lpt_pch_get_config(pipe_config);
+ lpt_pch_get_config(crtc_state);
- hsw_ddi_get_config(encoder, pipe_config);
+ hsw_ddi_get_config(encoder, crtc_state);
- pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
- DRM_MODE_FLAG_NHSYNC |
- DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_NVSYNC);
- pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ crtc_state->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ crtc_state->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -244,7 +250,7 @@ static void hsw_disable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder);
@@ -257,7 +263,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -287,7 +293,7 @@ static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
@@ -300,7 +306,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
@@ -319,7 +325,7 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
@@ -355,8 +361,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -399,48 +404,48 @@ intel_crt_mode_valid(struct drm_connector *connector,
}
static int intel_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
return 0;
}
static int pch_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- pipe_config->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(pipe_config))
+ crtc_state->has_pch_encoder = true;
+ if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
return 0;
}
static int hsw_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -450,30 +455,30 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_hblank_start > 4096)
return -EINVAL;
- pipe_config->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(pipe_config))
+ crtc_state->has_pch_encoder = true;
+ if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
- if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+ if (crtc_state->bw_constrained && crtc_state->pipe_bpp < 24) {
drm_dbg_kms(display->drm,
"LPT only supports 24bpp\n");
return -EINVAL;
}
- pipe_config->pipe_bpp = 24;
+ crtc_state->pipe_bpp = 24;
}
/* FDI must always be 2.7 GHz */
- pipe_config->port_clock = 135000 * 2;
+ crtc_state->port_clock = 135000 * 2;
- pipe_config->enhanced_framing = true;
+ crtc_state->enhanced_framing = true;
- adjusted_mode->crtc_clock = lpt_iclkip(pipe_config);
+ adjusted_mode->crtc_clock = lpt_iclkip(crtc_state);
return 0;
}
@@ -481,9 +486,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 adpa;
bool ret;
@@ -532,9 +536,8 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool reenable_hpd;
u32 adpa;
bool ret;
@@ -588,8 +591,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 stat;
bool ret = false;
int i, tries = 0;
@@ -856,7 +858,7 @@ intel_crt_detect(struct drm_connector *connector,
struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct intel_encoder *intel_encoder = &crt->base;
+ struct intel_encoder *encoder = &crt->base;
struct drm_atomic_state *state;
intel_wakeref_t wakeref;
int status;
@@ -865,15 +867,14 @@ intel_crt_detect(struct drm_connector *connector,
connector->base.id, connector->name,
force);
- if (!intel_display_device_enabled(dev_priv))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return connector->status;
if (display->params.load_detect_test) {
- wakeref = intel_display_power_get(dev_priv,
- intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
goto load_detect;
}
@@ -881,8 +882,7 @@ intel_crt_detect(struct drm_connector *connector,
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
- wakeref = intel_display_power_get(dev_priv,
- intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
if (I915_HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
@@ -939,7 +939,7 @@ load_detect:
}
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return status;
}
@@ -947,19 +947,17 @@ out:
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct intel_encoder *intel_encoder = &crt->base;
+ struct intel_encoder *encoder = &crt->base;
intel_wakeref_t wakeref;
struct i2c_adapter *ddc;
int ret;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
- wakeref = intel_display_power_get(dev_priv,
- intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
ret = intel_crt_ddc_get_modes(connector, connector->ddc);
if (ret || !IS_G4X(dev_priv))
@@ -970,7 +968,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
ret = intel_crt_ddc_get_modes(connector, ddc);
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -984,7 +982,7 @@ void intel_crt_reset(struct drm_encoder *encoder)
u32 adpa;
adpa = intel_de_read(display, crt->adpa_reg);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa &= ~ADPA_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
intel_de_write(display, crt->adpa_reg, adpa);
intel_de_posting_read(display, crt->adpa_reg);
@@ -1022,9 +1020,8 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct drm_connector *connector;
+ struct intel_connector *connector;
struct intel_crt *crt;
- struct intel_connector *intel_connector;
i915_reg_t adpa_reg;
u8 ddc_pin;
u32 adpa;
@@ -1047,7 +1044,9 @@ void intel_crt_init(struct intel_display *display)
* it and see what happens.
*/
intel_de_write(display, adpa_reg,
- adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ adpa | ADPA_DAC_ENABLE |
+ ADPA_HSYNC_CNTL_DISABLE |
+ ADPA_VSYNC_CNTL_DISABLE);
if ((intel_de_read(display, adpa_reg) & ADPA_DAC_ENABLE) == 0)
return;
intel_de_write(display, adpa_reg, adpa);
@@ -1057,17 +1056,15 @@ void intel_crt_init(struct intel_display *display)
if (!crt)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(crt);
return;
}
ddc_pin = display->vbt.crt_ddc_pin;
- connector = &intel_connector->base;
- crt->connector = intel_connector;
- drm_connector_init_with_ddc(display->drm, connector,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_crt_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
intel_gmbus_get_adapter(display, ddc_pin));
@@ -1075,7 +1072,7 @@ void intel_crt_init(struct intel_display *display)
drm_encoder_init(display->drm, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC, "CRT");
- intel_connector_attach_encoder(intel_connector, &crt->base);
+ intel_connector_attach_encoder(connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI);
@@ -1085,7 +1082,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.pipe_mask = ~0;
if (DISPLAY_VER(display) != 2)
- connector->interlace_allowed = true;
+ connector->base.interlace_allowed = true;
crt->adpa_reg = adpa_reg;
@@ -1095,11 +1092,11 @@ void intel_crt_init(struct intel_display *display)
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
- intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
} else {
- intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
- intel_connector->base.polled = intel_connector->polled;
+ connector->base.polled = connector->polled;
if (HAS_DDI(display)) {
assert_port_valid(dev_priv, PORT_E);
@@ -1132,9 +1129,9 @@ void intel_crt_init(struct intel_display *display)
crt->base.get_hw_state = intel_crt_get_hw_state;
crt->base.enable = intel_enable_crt;
}
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ connector->get_hw_state = intel_connector_get_hw_state;
- drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &intel_crt_connector_helper_funcs);
/*
* TODO: find a proper way to discover whether we need to set the the
diff --git a/drivers/gpu/drm/i915/display/intel_crt_regs.h b/drivers/gpu/drm/i915/display/intel_crt_regs.h
new file mode 100644
index 000000000000..571a67ae9afa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crt_regs.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_CRT_REGS_H__
+#define __INTEL_CRT_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define ADPA _MMIO(0x61100)
+#define PCH_ADPA _MMIO(0xe1100)
+#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
+#define ADPA_DAC_ENABLE REG_BIT(31)
+#define ADPA_PIPE_SEL_MASK REG_BIT(30)
+#define ADPA_PIPE_SEL(pipe) REG_FIELD_PREP(ADPA_PIPE_SEL_MASK, (pipe))
+#define ADPA_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29)
+#define ADPA_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(ADPA_PIPE_SEL_MASK_CPT, (pipe))
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK REG_GENMASK(25, 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE REG_FIELD_PREP(ADPA_CRT_HOTPLUG_MONITOR_MASK, 0)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR REG_FIELD_PREP(ADPA_CRT_HOTPLUG_MONITOR_MASK, 3)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO REG_FIELD_PREP(ADPA_CRT_HOTPLUG_MONITOR_MASK, 2)
+#define ADPA_CRT_HOTPLUG_ENABLE REG_BIT(23)
+#define ADPA_CRT_HOTPLUG_PERIOD_MASK REG_BIT(22)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_PERIOD_MASK, 0)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_PERIOD_MASK, 1)
+#define ADPA_CRT_HOTPLUG_WARMUP_MASK REG_BIT(21)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS REG_FIELD_PREP(ADPA_CRT_HOTPLUG_WARMUP_MASK, 0)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS REG_FIELD_PREP(ADPA_CRT_HOTPLUG_WARMUP_MASK, 1)
+#define ADPA_CRT_HOTPLUG_SAMPLE_MASK REG_BIT(20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S REG_FIELD_PREP(ADPA_CRT_HOTPLUG_SAMPLE_MASK, 0)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S REG_FIELD_PREP(ADPA_CRT_HOTPLUG_SAMPLE_MASK, 1)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_MASK REG_GENMASK(19, 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 0)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 1)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 2)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 3)
+#define ADPA_CRT_HOTPLUG_VOLREF_MASK REG_BIT(17)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLREF_MASK, 0)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLREF_MASK, 1)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER REG_BIT(16)
+#define ADPA_USE_VGA_HVPOLARITY REG_BIT(15)
+#define ADPA_HSYNC_CNTL_DISABLE REG_BIT(11)
+#define ADPA_VSYNC_CNTL_DISABLE REG_BIT(10)
+#define ADPA_VSYNC_ACTIVE_HIGH REG_BIT(4)
+#define ADPA_HSYNC_ACTIVE_HIGH REG_BIT(3)
+
+#define _VGA_MSR_WRITE _MMIO(0x3c2)
+
+#endif /* __INTEL_CRT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index a2c528d707f4..c910168602d2 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -12,6 +12,7 @@
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
+#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i9xx_plane.h"
#include "icl_dsi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 705ec5ad385c..1faef60be472 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -50,16 +50,6 @@ intel_dump_infoframe(struct drm_i915_private *i915,
hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
}
-static void
-intel_dump_buffer(const char *prefix, const u8 *buf, size_t len)
-{
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE,
- 16, 0, buf, len, false);
-}
-
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -293,8 +283,8 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_dp_as_sdp_log(&p, &pipe_config->infoframes.as_sdp);
if (pipe_config->has_audio)
- intel_dump_buffer("ELD: ", pipe_config->eld,
- drm_eld_size(pipe_config->eld));
+ drm_print_hex_dump(&p, "ELD: ", pipe_config->eld,
+ drm_eld_size(pipe_config->eld));
drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
str_yes_no(pipe_config->vrr.enable),
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 9ba77970dab7..57cf8f46a458 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -11,6 +11,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -619,7 +620,6 @@ static void skl_write_cursor_wm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -627,14 +627,14 @@ static void skl_write_cursor_wm(struct intel_dsb *dsb,
&crtc_state->wm.skl.plane_ddb[plane_id];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
intel_de_write_dsb(display, dsb, CUR_WM(pipe, level),
skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
intel_de_write_dsb(display, dsb, CUR_WM_TRANS(pipe),
skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
intel_de_write_dsb(display, dsb, CUR_WM_SAGV(pipe),
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 0c7aee13495a..e768dc6a15b3 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -5,6 +5,8 @@
#include <linux/log2.h>
#include <linux/math64.h>
+
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
@@ -2983,7 +2985,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ bool lane_reversal = dig_port->lane_reversal;
u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
INTEL_CX0_LANE0;
intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -3066,7 +3068,10 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ if (DISPLAY_VER(display) >= 30)
+ clock = REG_FIELD_GET(XE3_DDI_CLOCK_SELECT_MASK, val);
+ else
+ clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
@@ -3081,13 +3086,18 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
return 540000;
case XELPDP_DDI_CLOCK_SELECT_TBT_810:
return 810000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_312_5:
+ return 1000000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_625:
+ return 2000000;
default:
MISSING_CASE(clock);
return 162000;
}
}
-static int intel_mtl_tbt_clock_select(int clock)
+static int intel_mtl_tbt_clock_select(struct intel_display *display,
+ int clock)
{
switch (clock) {
case 162000:
@@ -3098,6 +3108,18 @@ static int intel_mtl_tbt_clock_select(int clock)
return XELPDP_DDI_CLOCK_SELECT_TBT_540;
case 810000:
return XELPDP_DDI_CLOCK_SELECT_TBT_810;
+ case 1000000:
+ if (DISPLAY_VER(display) < 30) {
+ drm_WARN_ON(display->drm, "UHBR10 not supported for the platform\n");
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ }
+ return XELPDP_DDI_CLOCK_SELECT_TBT_312_5;
+ case 2000000:
+ if (DISPLAY_VER(display) < 30) {
+ drm_WARN_ON(display->drm, "UHBR20 not supported for the platform\n");
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ }
+ return XELPDP_DDI_CLOCK_SELECT_TBT_625;
default:
MISSING_CASE(clock);
return XELPDP_DDI_CLOCK_SELECT_TBT_162;
@@ -3110,15 +3132,26 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val = 0;
+ u32 mask;
/*
* 1. Program PORT_CLOCK_CTL REGISTER to configure
* clock muxes, gating and SSC
*/
- val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(crtc_state->port_clock));
+
+ if (DISPLAY_VER(display) >= 30) {
+ mask = XE3_DDI_CLOCK_SELECT_MASK;
+ val |= XE3_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
+ } else {
+ mask = XELPDP_DDI_CLOCK_SELECT_MASK;
+ val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
+ }
+
+ mask |= XELPDP_FORWARD_CLOCK_UNGATE;
val |= XELPDP_FORWARD_CLOCK_UNGATE;
+
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val);
+ mask, val);
/* 2. Read back PORT_CLOCK_CTL REGISTER */
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index f0e5c196eae4..da154ff26b96 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -9,6 +9,11 @@
#include "i915_reg_defs.h"
#include "intel_display_limits.h"
+/* DDI Buffer Control */
+#define _DDI_CLK_VALFREQ_A 0x64030
+#define _DDI_CLK_VALFREQ_B 0x64130
+#define DDI_CLK_VALFREQ(port) _MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
+
/*
* Wrapper macro to convert from port number to the index used in some of the
* registers. For Display version 20 and above it converts the port number to a
@@ -187,7 +192,9 @@
#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19)
#define XELPDP_TBT_CLOCK_ACK REG_BIT(18)
#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
+#define XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
+#define XE3_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XE3_DDI_CLOCK_SELECT_MASK, val)
#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0
#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8
#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9
@@ -195,11 +202,20 @@
#define XELPDP_DDI_CLOCK_SELECT_TBT_270 0xd
#define XELPDP_DDI_CLOCK_SELECT_TBT_540 0xe
#define XELPDP_DDI_CLOCK_SELECT_TBT_810 0xf
+#define XELPDP_DDI_CLOCK_SELECT_TBT_312_5 0x18
+#define XELPDP_DDI_CLOCK_SELECT_TBT_625 0x19
#define XELPDP_FORWARD_CLOCK_UNGATE REG_BIT(10)
#define XELPDP_LANE1_PHY_CLOCK_SELECT REG_BIT(8)
#define XELPDP_SSC_ENABLE_PLLA REG_BIT(1)
#define XELPDP_SSC_ENABLE_PLLB REG_BIT(0)
+#define TCSS_DISP_MAILBOX_IN_CMD _MMIO(0x161300)
+#define TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY REG_BIT(31)
+#define TCSS_DISP_MAILBOX_IN_CMD_CMD_MASK REG_GENMASK(7, 0)
+#define TCSS_DISP_MAILBOX_IN_CMD_DATA(val) REG_FIELD_PREP(TCSS_DISP_MAILBOX_IN_CMD_CMD_MASK, val)
+
+#define TCSS_DISP_MAILBOX_IN_DATA _MMIO(0x161304)
+
/* C10 Vendor Registers */
#define PHY_C10_VDR_PLL(idx) (0xC00 + (idx))
#define C10_PLL0_FRACEN REG_BIT8(4)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 49b5cc01ce40..ff2cf3daa7a2 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -28,6 +28,7 @@
#include <linux/iopoll.h>
#include <linux/string_helpers.h>
+#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_privacy_screen_consumer.h>
@@ -335,10 +336,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
- intel_dp->DP = dig_port->saved_port_bits |
- DDI_PORT_WIDTH(crtc_state->lane_count) |
+ intel_dp->DP = DDI_PORT_WIDTH(crtc_state->lane_count) |
DDI_BUF_TRANS_SELECT(0);
+ if (dig_port->lane_reversal)
+ intel_dp->DP |= DDI_BUF_PORT_REVERSAL;
+ if (dig_port->ddi_a_4_lanes)
+ intel_dp->DP |= DDI_A_4_LANES;
+
if (DISPLAY_VER(i915) >= 14) {
if (intel_dp_is_uhbr(crtc_state))
intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
@@ -455,17 +460,20 @@ static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
}
static void
-intel_ddi_config_transcoder_dp2(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+intel_ddi_config_transcoder_dp2(const struct intel_crtc_state *crtc_state,
+ bool enable)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
- if (intel_dp_is_uhbr(crtc_state))
+ if (!HAS_DP20(display))
+ return;
+
+ if (enable && intel_dp_is_uhbr(crtc_state))
val = TRANS_DP2_128B132B_CHANNEL_CODING;
- intel_de_write(i915, TRANS_DP2_CTL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_DP2_CTL(cpu_transcoder), val);
}
/*
@@ -554,7 +562,8 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
temp |= (crtc_state->fdi_lanes - 1) << 1;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
if (intel_dp_is_uhbr(crtc_state))
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
else
@@ -617,9 +626,10 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
/*
* Same as intel_ddi_enable_transcoder_func(), but it does not set the enable
- * bit.
+ * bit for the DDI function and enables the DP2 configuration. Called for all
+ * transcoder types.
*/
-static void
+void
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -628,12 +638,20 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
+ intel_ddi_config_transcoder_dp2(crtc_state, true);
+
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
ctl);
}
+/*
+ * Disable the DDI function and port syncing.
+ * For SST, pre-TGL MST, TGL+ MST-slave transcoders: deselect the DDI port,
+ * SST/MST mode and disable the DP2 configuration. For TGL+ MST-master
+ * transcoders these are done later in intel_ddi_post_disable_dp().
+ */
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -670,6 +688,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
ctl);
+ if (intel_dp_mst_is_slave_trans(crtc_state))
+ intel_ddi_config_transcoder_dp2(crtc_state, false);
+
if (intel_has_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
drm_dbg_kms(display->drm, "Quirk Increase DDI disabled time\n");
@@ -700,15 +721,15 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
- struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(intel_connector);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
enum transcoder cpu_transcoder;
intel_wakeref_t wakeref;
enum pipe pipe = 0;
- u32 tmp;
+ u32 ddi_mode;
bool ret;
wakeref = intel_display_power_get_if_enabled(dev_priv,
@@ -716,6 +737,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
if (!wakeref)
return false;
+ /* Note: This returns false for DP MST primary encoders. */
if (!encoder->get_hw_state(encoder, &pipe)) {
ret = false;
goto out;
@@ -726,38 +748,28 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
else
cpu_transcoder = (enum transcoder) pipe;
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ TRANS_DDI_MODE_SELECT_MASK;
- switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
- case TRANS_DDI_MODE_SELECT_HDMI:
- case TRANS_DDI_MODE_SELECT_DVI:
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI ||
+ ddi_mode == TRANS_DDI_MODE_SELECT_DVI) {
ret = type == DRM_MODE_CONNECTOR_HDMIA;
- break;
-
- case TRANS_DDI_MODE_SELECT_DP_SST:
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && !HAS_DP20(display)) {
+ ret = type == DRM_MODE_CONNECTOR_VGA;
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_SST) {
ret = type == DRM_MODE_CONNECTOR_eDP ||
- type == DRM_MODE_CONNECTOR_DisplayPort;
- break;
-
- case TRANS_DDI_MODE_SELECT_DP_MST:
- /* if the transcoder is in MST state then
- * connector isn't connected */
+ type == DRM_MODE_CONNECTOR_DisplayPort;
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
+ /*
+ * encoder->get_hw_state() should have bailed out on MST. This
+ * must be SST and non-eDP.
+ */
+ ret = type == DRM_MODE_CONNECTOR_DisplayPort;
+ } else if (drm_WARN_ON(display->drm, ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST)) {
+ /* encoder->get_hw_state() should have bailed out on MST. */
ret = false;
- break;
-
- case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
- if (HAS_DP20(dev_priv))
- /* 128b/132b */
- ret = false;
- else
- /* FDI */
- ret = type == DRM_MODE_CONNECTOR_VGA;
- break;
-
- default:
+ } else {
ret = false;
- break;
}
out:
@@ -769,13 +781,13 @@ out:
static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
u32 tmp;
- u8 mst_pipe_mask;
+ u8 mst_pipe_mask = 0, dp128b132b_pipe_mask = 0;
*pipe_mask = 0;
*is_dp_mst = false;
@@ -812,10 +824,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
goto out;
}
- mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
- unsigned int port_mask, ddi_select;
+ u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
@@ -839,10 +850,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if ((tmp & port_mask) != ddi_select)
continue;
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST ||
- (HAS_DP20(dev_priv) &&
- (tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B))
+ ddi_mode = tmp & TRANS_DDI_MODE_SELECT_MASK;
+
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST)
mst_pipe_mask |= BIT(p);
+ else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display))
+ dp128b132b_pipe_mask |= BIT(p);
*pipe_mask |= BIT(p);
}
@@ -852,6 +865,24 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
"No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
+ if (!mst_pipe_mask && dp128b132b_pipe_mask) {
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ /*
+ * If we don't have 8b/10b MST, but have more than one
+ * transcoder in 128b/132b mode, we know it must be 128b/132b
+ * MST.
+ *
+ * Otherwise, we fall back to checking the current MST
+ * state. It's not accurate for hardware takeover at probe, but
+ * we don't expect MST to have been enabled at that point, and
+ * can assume it's SST.
+ */
+ if (hweight8(dp128b132b_pipe_mask) > 1 ||
+ intel_dp_mst_encoder_active_links(dig_port))
+ mst_pipe_mask = dp128b132b_pipe_mask;
+ }
+
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
drm_dbg_kms(&dev_priv->drm,
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
@@ -862,9 +893,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
drm_dbg_kms(&dev_priv->drm,
- "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n",
encoder->base.base.id, encoder->base.name,
- *pipe_mask, mst_pipe_mask);
+ *pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -2196,8 +2227,8 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
return DP_TP_CTL(encoder->port);
}
-i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -2208,6 +2239,25 @@ i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
return DP_TP_STATUS(encoder->port);
}
+void intel_ddi_clear_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ intel_de_write(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT);
+}
+
+void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT, 1))
+ drm_err(display->drm, "Timed out waiting for ACT sent\n");
+}
+
static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable)
@@ -2376,12 +2426,10 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
if (intel_encoder_is_combo(encoder)) {
enum phy phy = intel_encoder_to_phy(encoder);
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
intel_combo_phy_power_up_lanes(i915, phy, false,
crtc_state->lane_count,
- lane_reversal);
+ dig_port->lane_reversal);
}
}
@@ -2506,25 +2554,24 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- u32 val;
+ u32 val = 0;
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port));
- val &= ~XELPDP_PORT_WIDTH_MASK;
val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count));
- val &= ~XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK;
if (intel_dp_is_uhbr(crtc_state))
val |= XELPDP_PORT_BUF_PORT_DATA_40BIT;
else
val |= XELPDP_PORT_BUF_PORT_DATA_10BIT;
- if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
+ if (dig_port->lane_reversal)
val |= XELPDP_PORT_REVERSAL;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(i915, port), val);
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK,
+ val);
}
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
@@ -2546,6 +2593,7 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int ret;
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2583,10 +2631,6 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/*
* 6.b If DP v2.0/128b mode - Configure TRANS_DP2_CTL register settings.
- */
- intel_ddi_config_transcoder_dp2(encoder, crtc_state);
-
- /*
* 6.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
* Transport Select
*/
@@ -2644,6 +2688,14 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 6.o Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
+ /* 7.a 128b/132b SST. */
+ if (!is_mst && intel_dp_is_uhbr(crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, tu slots */
+ ret = drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, crtc_state->dp_m_n.tu);
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+ }
+
if (!is_mst)
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2657,6 +2709,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int ret;
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2721,9 +2774,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_ddi_enable_transcoder_clock(encoder, crtc_state);
- if (HAS_DP20(dev_priv))
- intel_ddi_config_transcoder_dp2(encoder, crtc_state);
-
/*
* 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
* Transport Select
@@ -2786,6 +2836,13 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
+ if (!is_mst && intel_dp_is_uhbr(crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, tu slots */
+ ret = drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, crtc_state->dp_m_n.tu);
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+ }
+
if (!is_mst)
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2862,9 +2919,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (HAS_DP20(dev_priv))
+ if (HAS_DP20(display))
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
@@ -2872,9 +2929,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (crtc_state->has_panel_replay)
intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2911,6 +2968,24 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
crtc_state, conn_state);
}
+/*
+ * Note: Also called from the ->pre_enable of the first active MST stream
+ * encoder on its primary encoder.
+ *
+ * When called from DP MST code:
+ *
+ * - conn_state will be NULL
+ *
+ * - encoder will be the primary encoder (i.e. mst->primary)
+ *
+ * - the main connector associated with this port won't be active or linked to a
+ * crtc
+ *
+ * - crtc_state will be the state of the first stream to be activated on this
+ * port, and it may not be the same stream that will be deactivated last, but
+ * each stream should have a state that is identical when it comes to the DP
+ * link parameteres
+ */
static void intel_ddi_pre_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -2920,19 +2995,6 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- /*
- * When called from DP MST code:
- * - conn_state will be NULL
- * - encoder will be the main encoder (ie. mst->primary)
- * - the main connector associated with this port
- * won't be active or linked to a crtc
- * - crtc_state will be the state of the first stream to
- * be activated on this port, and it may not be the same
- * stream that will be deactivated last, but each stream
- * should have a state that is identical when it comes to
- * the DP link parameteres
- */
-
drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -3071,7 +3133,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
if (DISPLAY_VER(dev_priv) >= 12) {
- if (is_mst) {
+ if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
intel_de_rmw(dev_priv,
@@ -3088,6 +3150,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false);
+ intel_ddi_config_transcoder_dp2(old_crtc_state, false);
+
/*
* From TGL spec: "If single stream or multi-stream master transcoder:
* Configure Transcoder Clock select to direct no clock to the
@@ -3153,7 +3217,9 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *pipe_crtc;
+ bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI);
int i;
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3165,6 +3231,20 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(old_crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, clear */
+ drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, 0);
+
+ intel_ddi_clear_act_sent(encoder, old_crtc_state);
+
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder),
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
+
+ intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
+ drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
+ }
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3180,6 +3260,11 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
}
}
+/*
+ * Note: Also called from the ->post_disable of the last active MST stream
+ * encoder on its primary encoder. See also the comment for
+ * intel_ddi_pre_enable().
+ */
static void intel_ddi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -3210,6 +3295,11 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
old_conn_state);
}
+/*
+ * Note: Also called from the ->post_pll_disable of the last active MST stream
+ * encoder on its primary encoder. See also the comment for
+ * intel_ddi_pre_enable().
+ */
static void intel_ddi_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -3260,7 +3350,7 @@ static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
crtc_state);
}
-static void intel_enable_ddi_dp(struct intel_atomic_state *state,
+static void intel_ddi_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3282,18 +3372,8 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
-/* FIXME bad home for this function */
-i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- return DISPLAY_VER(i915) >= 14 ?
- MTL_CHICKEN_TRANS(cpu_transcoder) :
- CHICKEN_TRANS(cpu_transcoder);
-}
-
static i915_reg_t
-gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
- enum port port)
+gen9_chicken_trans_reg_by_port(struct intel_display *display, enum port port)
{
static const enum transcoder trans[] = {
[PORT_A] = TRANSCODER_EDP,
@@ -3303,19 +3383,20 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
[PORT_E] = TRANSCODER_A,
};
- drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) < 9);
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) < 9);
- if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
+ if (drm_WARN_ON(display->drm, port < PORT_A || port > PORT_E))
port = PORT_A;
- return CHICKEN_TRANS(trans[port]);
+ return CHICKEN_TRANS(display, trans[port]);
}
-static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
@@ -3346,7 +3427,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
* the bits affect a specific DDI port rather than
* a specific transcoder.
*/
- i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
+ i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port);
u32 val;
val = intel_de_read(dev_priv, reg);
@@ -3386,20 +3467,26 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
* is filled with lane count, already set in the crtc_state.
* The same is required to be filled in PORT_BUF_CTL for C10/20 Phy.
*/
- buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE;
+ buf_ctl = DDI_BUF_CTL_ENABLE;
+
+ if (dig_port->lane_reversal)
+ buf_ctl |= DDI_BUF_PORT_REVERSAL;
+ if (dig_port->ddi_a_4_lanes)
+ buf_ctl |= DDI_A_4_LANES;
+
if (DISPLAY_VER(dev_priv) >= 14) {
u8 lane_count = mtl_get_port_width(crtc_state->lane_count);
u32 port_buf = 0;
port_buf |= XELPDP_PORT_WIDTH(lane_count);
- if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
+ if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
- buf_ctl |= DDI_PORT_WIDTH(lane_count);
+ buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
if (DISPLAY_VER(dev_priv) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
@@ -3413,20 +3500,46 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_wait_ddi_buf_active(encoder);
}
-static void intel_enable_ddi(struct intel_atomic_state *state,
+static void intel_ddi_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *pipe_crtc;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ bool is_hdmi = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
int i;
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
+
+ intel_de_write(display, TRANS_DP2_VFREQHIGH(cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
+ intel_de_write(display, TRANS_DP2_VFREQLOW(cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
+ }
+
intel_ddi_enable_transcoder_func(encoder, crtc_state);
/* Enable/Disable DP2.0 SDP split config before transcoder */
intel_audio_sdp_split_update(crtc_state);
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_ddi_clear_act_sent(encoder, crtc_state);
+
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder), 0,
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+
+ intel_ddi_wait_for_act_sent(encoder, crtc_state);
+ drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
+ }
+
intel_enable_transcoder(crtc_state);
intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
@@ -3438,16 +3551,16 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_crtc_vblank_on(pipe_crtc_state);
}
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
+ if (is_hdmi)
+ intel_ddi_enable_hdmi(state, encoder, crtc_state, conn_state);
else
- intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
+ intel_ddi_enable_dp(state, encoder, crtc_state, conn_state);
intel_hdcp_enable(state, encoder, crtc_state, conn_state);
}
-static void intel_disable_ddi_dp(struct intel_atomic_state *state,
+static void intel_ddi_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -3468,7 +3581,7 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
false);
}
-static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
+static void intel_ddi_disable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -3483,7 +3596,7 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
connector->base.id, connector->name);
}
-static void intel_disable_ddi(struct intel_atomic_state *state,
+static void intel_ddi_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -3493,10 +3606,10 @@ static void intel_disable_ddi(struct intel_atomic_state *state,
intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- intel_disable_ddi_hdmi(state, encoder, old_crtc_state,
+ intel_ddi_disable_hdmi(state, encoder, old_crtc_state,
old_conn_state);
else
- intel_disable_ddi_dp(state, encoder, old_crtc_state,
+ intel_ddi_disable_dp(state, encoder, old_crtc_state,
old_conn_state);
}
@@ -3556,6 +3669,11 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
intel_update_active_dpll(state, pipe_crtc, encoder);
}
+/*
+ * Note: Also called from the ->pre_pll_enable of the first active MST stream
+ * encoder on its primary encoder. See also the comment for
+ * intel_ddi_pre_enable().
+ */
static void
intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -3599,9 +3717,9 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
u32 dp_tp_ctl;
@@ -3609,21 +3727,22 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
* TODO: To train with only a different voltage swing entry is not
* necessary disable and enable port
*/
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
if (dp_tp_ctl & DP_TP_CTL_ENABLE)
mtl_disable_ddi_buf(encoder, crtc_state);
/* 6.d Configure and enable DP_TP_CTL with link training pattern 1 selected */
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
} else {
dp_tp_ctl |= DP_TP_CTL_MODE_SST;
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
/* 6.f Enable D2D Link */
mtl_ddi_enable_d2d(encoder);
@@ -3636,11 +3755,11 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
/* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
intel_dp->DP |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
/* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
intel_wait_ddi_buf_active(encoder);
@@ -3675,7 +3794,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
}
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
} else {
dp_tp_ctl |= DP_TP_CTL_MODE_SST;
@@ -3868,29 +3988,141 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
crtc_state->sync_mode_slaves_mask);
}
+static void intel_ddi_read_func_ctl_dvi(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_HDMI);
+ if (DISPLAY_VER(display) >= 14)
+ crtc_state->lane_count =
+ ((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+ else
+ crtc_state->lane_count = 4;
+}
+
+static void intel_ddi_read_func_ctl_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ crtc_state->has_hdmi_sink = true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, crtc_state);
+
+ if (crtc_state->infoframes.enable)
+ crtc_state->has_infoframe = true;
+
+ if (ddi_func_ctl & TRANS_DDI_HDMI_SCRAMBLING)
+ crtc_state->hdmi_scrambling = true;
+ if (ddi_func_ctl & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
+ crtc_state->hdmi_high_tmds_clock_ratio = true;
+
+ intel_ddi_read_func_ctl_dvi(encoder, crtc_state, ddi_func_ctl);
+}
+
+static void intel_ddi_read_func_ctl_fdi(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+ crtc_state->enhanced_framing =
+ intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state)) &
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+}
+
+static void intel_ddi_read_func_ctl_dp_sst(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_DP);
+ crtc_state->lane_count =
+ ((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (DISPLAY_VER(display) >= 12 &&
+ (ddi_func_ctl & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B)
+ crtc_state->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, ddi_func_ctl);
+
+ intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n);
+ intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder, &crtc_state->dp_m2_n2);
+
+ crtc_state->enhanced_framing =
+ intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state)) &
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+
+ if (DISPLAY_VER(display) >= 11)
+ crtc_state->fec_enable =
+ intel_de_read(display,
+ dp_tp_ctl_reg(encoder, crtc_state)) & DP_TP_CTL_FEC_ENABLE;
+
+ if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
+ crtc_state->infoframes.enable |=
+ intel_lspcon_infoframes_enabled(encoder, crtc_state);
+ else
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, crtc_state);
+}
+
+static void intel_ddi_read_func_ctl_dp_mst(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_DP_MST);
+ crtc_state->lane_count =
+ ((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (DISPLAY_VER(display) >= 12)
+ crtc_state->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, ddi_func_ctl);
+
+ intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n);
+
+ if (DISPLAY_VER(display) >= 11)
+ crtc_state->fec_enable =
+ intel_de_read(display,
+ dp_tp_ctl_reg(encoder, crtc_state)) & DP_TP_CTL_FEC_ENABLE;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, crtc_state);
+}
+
static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- u32 temp, flags = 0;
+ u32 ddi_func_ctl, ddi_mode, flags = 0;
- temp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
- if (temp & TRANS_DDI_PHSYNC)
+ ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ if (ddi_func_ctl & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
- if (temp & TRANS_DDI_PVSYNC)
+ if (ddi_func_ctl & TRANS_DDI_PVSYNC)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->hw.adjusted_mode.flags |= flags;
- switch (temp & TRANS_DDI_BPC_MASK) {
+ switch (ddi_func_ctl & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
pipe_config->pipe_bpp = 18;
break;
@@ -3907,93 +4139,37 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
break;
}
- switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
- case TRANS_DDI_MODE_SELECT_HDMI:
- pipe_config->has_hdmi_sink = true;
-
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
-
- if (pipe_config->infoframes.enable)
- pipe_config->has_infoframe = true;
-
- if (temp & TRANS_DDI_HDMI_SCRAMBLING)
- pipe_config->hdmi_scrambling = true;
- if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
- pipe_config->hdmi_high_tmds_clock_ratio = true;
- fallthrough;
- case TRANS_DDI_MODE_SELECT_DVI:
- pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
- if (DISPLAY_VER(dev_priv) >= 14)
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
- else
- pipe_config->lane_count = 4;
- break;
- case TRANS_DDI_MODE_SELECT_DP_SST:
- if (encoder->type == INTEL_OUTPUT_EDP)
- pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
- else
- pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
-
- intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder,
- &pipe_config->dp_m_n);
- intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder,
- &pipe_config->dp_m2_n2);
-
- pipe_config->enhanced_framing =
- intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) &
- DP_TP_CTL_ENHANCED_FRAME_ENABLE;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- pipe_config->fec_enable =
- intel_de_read(dev_priv,
- dp_tp_ctl_reg(encoder, pipe_config)) & DP_TP_CTL_FEC_ENABLE;
+ ddi_mode = ddi_func_ctl & TRANS_DDI_MODE_SELECT_MASK;
+
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI) {
+ intel_ddi_read_func_ctl_hdmi(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DVI) {
+ intel_ddi_read_func_ctl_dvi(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && !HAS_DP20(display)) {
+ intel_ddi_read_func_ctl_fdi(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_SST) {
+ intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
+ intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
- pipe_config->infoframes.enable |=
- intel_lspcon_infoframes_enabled(encoder, pipe_config);
+ /*
+ * If this is true, we know we're being called from mst stream
+ * encoder's ->get_config().
+ */
+ if (intel_dp_mst_encoder_active_links(dig_port))
+ intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
- break;
- case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
- if (!HAS_DP20(dev_priv)) {
- /* FDI */
- pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- pipe_config->enhanced_framing =
- intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) &
- DP_TP_CTL_ENHANCED_FRAME_ENABLE;
- break;
- }
- fallthrough; /* 128b/132b */
- case TRANS_DDI_MODE_SELECT_DP_MST:
- pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
-
- if (DISPLAY_VER(dev_priv) >= 12)
- pipe_config->mst_master_transcoder =
- REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
-
- intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder,
- &pipe_config->dp_m_n);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- pipe_config->fec_enable =
- intel_de_read(dev_priv,
- dp_tp_ctl_reg(encoder, pipe_config)) & DP_TP_CTL_FEC_ENABLE;
-
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
- break;
- default:
- break;
+ intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
}
}
+/*
+ * Note: Also called from the ->get_config of the MST stream encoders on their
+ * primary encoder, via the platform specific hooks here. See also the comment
+ * for intel_ddi_pre_enable().
+ */
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -4461,8 +4637,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.late_register = intel_ddi_encoder_late_register,
};
-static struct intel_connector *
-intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_connector *connector;
@@ -4470,7 +4645,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
connector = intel_connector_alloc();
if (!connector)
- return NULL;
+ return -ENOMEM;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
if (DISPLAY_VER(i915) >= 14)
@@ -4485,7 +4660,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
if (!intel_dp_init_connector(dig_port, connector)) {
kfree(connector);
- return NULL;
+ return -EINVAL;
}
if (dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -4501,7 +4676,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
}
}
- return connector;
+ return 0;
}
static int intel_hdmi_reset_link(struct intel_encoder *encoder,
@@ -4667,20 +4842,28 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
}
-static struct intel_connector *
-intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
+static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
{
struct intel_connector *connector;
enum port port = dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
- return NULL;
+ return -ENOMEM;
dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
- intel_hdmi_init_connector(dig_port, connector);
- return connector;
+ if (!intel_hdmi_init_connector(dig_port, connector)) {
+ /*
+ * HDMI connector init failures may just mean conflicting DDC
+ * pins or not having enough lanes. Handle them gracefully, but
+ * don't fail the entire DDI init.
+ */
+ dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG;
+ kfree(connector);
+ }
+
+ return 0;
}
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
@@ -4690,7 +4873,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
if (dig_port->base.port != PORT_A)
return false;
- if (dig_port->saved_port_bits & DDI_A_4_LANES)
+ if (dig_port->ddi_a_4_lanes)
return false;
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
@@ -4728,7 +4911,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
if (intel_ddi_a_force_4_lanes(dig_port)) {
drm_dbg_kms(&dev_priv->drm,
"Forcing DDI_A_4_LANES for port A\n");
- dig_port->saved_port_bits |= DDI_A_4_LANES;
+ dig_port->ddi_a_4_lanes = true;
max_lanes = 4;
}
@@ -4835,8 +5018,10 @@ static void intel_ddi_tc_encoder_suspend_complete(struct intel_encoder *encoder)
static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
{
- intel_dp_encoder_shutdown(encoder);
- intel_hdmi_encoder_shutdown(encoder);
+ if (intel_encoder_is_dp(encoder))
+ intel_dp_encoder_shutdown(encoder);
+ if (intel_encoder_is_hdmi(encoder))
+ intel_hdmi_encoder_shutdown(encoder);
}
static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder)
@@ -4907,6 +5092,7 @@ void intel_ddi_init(struct intel_display *display,
bool init_hdmi, init_dp;
enum port port;
enum phy phy;
+ u32 ddi_buf_ctl;
port = intel_bios_encoder_port(devdata);
if (port == PORT_NONE)
@@ -5030,10 +5216,10 @@ void intel_ddi_init(struct intel_display *display,
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
encoder->compute_config_late = intel_ddi_compute_config_late;
- encoder->enable = intel_enable_ddi;
+ encoder->enable = intel_ddi_enable;
encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
encoder->pre_enable = intel_ddi_pre_enable;
- encoder->disable = intel_disable_ddi;
+ encoder->disable = intel_ddi_disable;
encoder->post_pll_disable = intel_ddi_post_pll_disable;
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
@@ -5156,17 +5342,12 @@ void intel_ddi_init(struct intel_display *display,
else
encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
- if (DISPLAY_VER(dev_priv) >= 11)
- dig_port->saved_port_bits =
- intel_de_read(dev_priv, DDI_BUF_CTL(port))
- & DDI_BUF_PORT_REVERSAL;
- else
- dig_port->saved_port_bits =
- intel_de_read(dev_priv, DDI_BUF_CTL(port))
- & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+ ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+
+ dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) ||
+ ddi_buf_ctl & DDI_BUF_PORT_REVERSAL;
- if (intel_bios_encoder_lane_reversal(devdata))
- dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
+ dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
@@ -5229,7 +5410,7 @@ void intel_ddi_init(struct intel_display *display,
intel_infoframe_init(dig_port);
if (init_dp) {
- if (!intel_ddi_init_dp_connector(dig_port))
+ if (intel_ddi_init_dp_connector(dig_port))
goto err;
dig_port->hpd_pulse = intel_dp_hpd_pulse;
@@ -5243,7 +5424,7 @@ void intel_ddi_init(struct intel_display *display,
* but leave it just in case we have some really bad VBTs...
*/
if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(dig_port))
+ if (intel_ddi_init_hdmi_connector(dig_port))
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 640851d46b1b..2faadd1441e2 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -26,10 +26,12 @@ enum transcoder;
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder);
+
+void intel_ddi_clear_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -57,6 +59,8 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+void intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 4d21ce734343..9389b295036e 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1687,18 +1687,24 @@ dg2_get_snps_buf_trans(struct intel_encoder *encoder,
}
static const struct intel_ddi_buf_trans *
-mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
+mtl_get_c10_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000)
+ return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+mtl_get_c20_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state) && intel_dp_is_uhbr(crtc_state))
return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_encoder_is_c10phy(encoder)))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
- else if (!intel_encoder_is_c10phy(encoder))
- return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
else
- return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
+ return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
}
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
@@ -1706,7 +1712,10 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (DISPLAY_VER(i915) >= 14) {
- encoder->get_buf_trans = mtl_get_cx0_buf_trans;
+ if (intel_encoder_is_c10phy(encoder))
+ encoder->get_buf_trans = mtl_get_c10_buf_trans;
+ else
+ encoder->get_buf_trans = mtl_get_c20_buf_trans;
} else if (IS_DG2(i915)) {
encoder->get_buf_trans = dg2_get_snps_buf_trans;
} else if (IS_ALDERLAKE_P(i915)) {
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index bb51f974e9e2..b7399e9d11cc 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,14 +6,16 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
-#include "i915_drv.h"
-#include "i915_trace.h"
+#include "intel_display_conversion.h"
+#include "intel_display_core.h"
+#include "intel_dmc_wl.h"
#include "intel_dsb.h"
#include "intel_uncore.h"
+#include "intel_uncore_trace.h"
static inline struct intel_uncore *__to_uncore(struct intel_display *display)
{
- return &to_i915(display->drm)->uncore;
+ return to_intel_uncore(display->drm);
}
static inline u32
@@ -118,6 +120,16 @@ __intel_de_wait_for_register_nowl(struct intel_display *display,
}
static inline int
+__intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
+ i915_reg_t reg,
+ u32 mask, u32 value,
+ unsigned int fast_timeout_us)
+{
+ return __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, fast_timeout_us, 0, NULL);
+}
+
+static inline int
intel_de_wait(struct intel_display *display, i915_reg_t reg,
u32 mask, u32 value, unsigned int timeout)
{
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 863927f429aa..c9dcf2bbd4c7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -511,6 +511,7 @@ void vlv_wait_port_ready(struct intel_display *display,
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
@@ -554,8 +555,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
if (DISPLAY_VER(dev_priv) == 14)
set |= DP_FEC_BS_JITTER_WA;
- intel_de_rmw(dev_priv,
- hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
clear, set);
}
@@ -591,6 +591,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
@@ -628,7 +629,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
if ((val & TRANSCONF_ENABLE) == 0)
@@ -1744,10 +1745,9 @@ static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, crtc_state->cpu_transcoder),
HSW_FRAME_START_DELAY_MASK,
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
@@ -2371,7 +2371,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* GDG double wide on either pipe, otherwise pipe A only */
- return DISPLAY_VER(dev_priv) < 4 &&
+ return HAS_DOUBLE_WIDE(dev_priv) &&
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
@@ -3137,9 +3137,14 @@ bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
if (tmp & PIPE_MISC_YUV420_ENABLE) {
- /* We support 4:2:0 in full blend mode only */
- drm_WARN_ON(&dev_priv->drm,
- (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
+ /*
+ * We support 4:2:0 in full blend mode only.
+ * For xe3_lpd+ this is implied in YUV420 Enable bit.
+ * Ensure the same for prior platforms in YUV420 Mode bit.
+ */
+ if (DISPLAY_VER(dev_priv) < 30)
+ drm_WARN_ON(&dev_priv->drm,
+ (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
} else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
@@ -3207,7 +3212,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
- if (DISPLAY_VER(dev_priv) < 4)
+ if (HAS_DOUBLE_WIDE(dev_priv))
pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
@@ -3388,8 +3393,8 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- val |= PIPE_MISC_YUV420_ENABLE |
- PIPE_MISC_YUV420_MODE_FULL_BLEND;
+ val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE :
+ PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND;
if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
val |= PIPE_MISC_HDR_MODE_PRECISION;
@@ -3746,12 +3751,13 @@ static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes,
static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
u8 *primary_pipes, u8 *secondary_pipes)
{
+ struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
*primary_pipes = 0;
*secondary_pipes = 0;
- if (!HAS_ULTRAJOINER(i915))
+ if (!HAS_ULTRAJOINER(display))
return;
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
@@ -4111,6 +4117,7 @@ static void intel_joiner_get_config(struct intel_crtc_state *crtc_state)
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool active;
u32 tmp;
@@ -4187,7 +4194,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
} else {
@@ -4545,6 +4552,7 @@ static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -4581,12 +4589,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
+ ret = intel_atomic_setup_scalers(state, crtc);
if (ret)
return ret;
}
- if (HAS_IPS(dev_priv)) {
+ if (HAS_IPS(display)) {
ret = hsw_ips_compute_config(state, crtc);
if (ret)
return ret;
@@ -5208,7 +5216,7 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
- pipe_config_mismatch(p, fastset, crtc, name, "dp sdp");
+ pipe_config_mismatch(p, fastset, crtc, name, "dp vsc sdp");
drm_printf(p, "expected:\n");
drm_dp_vsc_sdp_log(p, a);
@@ -5217,27 +5225,18 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
}
static void
-pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915,
- bool fastset, const char *name,
+pipe_config_dp_as_sdp_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
const struct drm_dp_as_sdp *a,
const struct drm_dp_as_sdp *b)
{
- struct drm_printer p;
+ pipe_config_mismatch(p, fastset, crtc, name, "dp as sdp");
- if (fastset) {
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
-
- drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name);
- } else {
- p = drm_err_printer(&i915->drm, NULL);
-
- drm_printf(&p, "mismatch in %s dp sdp\n", name);
- }
-
- drm_printf(&p, "expected:\n");
- drm_dp_as_sdp_log(&p, a);
- drm_printf(&p, "found:\n");
- drm_dp_as_sdp_log(&p, b);
+ drm_printf(p, "expected:\n");
+ drm_dp_as_sdp_log(p, a);
+ drm_printf(p, "found:\n");
+ drm_dp_as_sdp_log(p, b);
}
/* Returns the length up to and including the last differing byte */
@@ -5260,26 +5259,13 @@ pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset,
const char *name,
const u8 *a, const u8 *b, size_t len)
{
- const char *loglevel;
-
- if (fastset) {
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- loglevel = KERN_DEBUG;
- } else {
- loglevel = KERN_ERR;
- }
-
pipe_config_mismatch(p, fastset, crtc, name, "buffer");
/* only dump up to the last difference */
len = memcmp_diff_len(a, b, len);
- print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE,
- 16, 0, a, len, false);
- print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE,
- 16, 0, b, len, false);
+ drm_print_hex_dump(p, "expected: ", a, len);
+ drm_print_hex_dump(p, "found: ", b, len);
}
static void
@@ -5322,6 +5308,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
{
+ struct intel_display *display = to_intel_display(current_config);
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_printer p;
@@ -5498,7 +5485,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \
if (!intel_compare_dp_as_sdp(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_dp_as_sdp_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
@@ -5562,7 +5549,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
+ if (HAS_DOUBLE_BUFFERED_M_N(display)) {
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
} else {
@@ -5743,7 +5730,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
- PIPE_CONF_CHECK_BOOL(dsc.dsc_split);
+ PIPE_CONF_CHECK_I(dsc.num_streams);
PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
PIPE_CONF_CHECK_BOOL(splitter.enable);
@@ -6641,12 +6628,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_plane_state *plane_state;
struct intel_crtc_state *crtc_state;
+ struct intel_plane *plane;
struct intel_crtc *crtc;
u8 affected_pipes = 0;
u8 modeset_pipes = 0;
int i;
+ /*
+ * Any plane which is in use by the joiner needs its crtc.
+ * Pull those in first as this will not have happened yet
+ * if the plane remains disabled according to uapi.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ crtc = to_intel_crtc(plane_state->hw.crtc);
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ /* Now pull in all joined crtcs */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
affected_pipes |= crtc_state->joiner_pipes;
if (intel_crtc_needs_modeset(crtc_state))
@@ -6797,6 +6802,7 @@ static int intel_atomic_check_config_and_link(struct intel_atomic_state *state)
int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *_state)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -6804,7 +6810,7 @@ int intel_atomic_check(struct drm_device *dev,
int ret, i;
bool any_ms = false;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return -ENODEV;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -7572,7 +7578,7 @@ static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state)
static void intel_atomic_cleanup_work(struct work_struct *work)
{
struct intel_atomic_state *state =
- container_of(work, struct intel_atomic_state, base.commit_work);
+ container_of(work, struct intel_atomic_state, cleanup_work);
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
@@ -7822,8 +7828,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.funcs.display->commit_modeset_enables(state);
- if (state->modeset)
- intel_set_cdclk_post_plane_update(state);
+ intel_program_dpkgc_latency(state);
intel_wait_for_vblank_workers(state);
@@ -7898,6 +7903,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
+ if (state->modeset)
+ intel_set_cdclk_post_plane_update(state);
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -7927,8 +7934,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* schedule point (cond_resched()) here anyway to keep latencies
* down.
*/
- INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
- queue_work(system_highpri_wq, &state->base.commit_work);
+ INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work);
+ queue_work(dev_priv->display.wq.cleanup, &state->cleanup_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -8166,7 +8173,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_lvds_init(dev_priv);
intel_crt_init(display);
- dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
+ dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
if (ilk_has_edp_a(dev_priv))
g4x_dp_init(dev_priv, DP_A, PORT_A);
@@ -8212,14 +8219,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
+ has_edp = intel_dp_is_port_edp(display, PORT_B);
has_port = intel_bios_is_port_present(display, PORT_B);
if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
+ has_edp = intel_dp_is_port_edp(display, PORT_C);
has_port = intel_bios_is_port_present(display, PORT_C);
if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
@@ -8308,11 +8315,12 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
static int max_dotclock(struct drm_i915_private *i915)
{
- int max_dotclock = i915->display.cdclk.max_dotclk_freq;
+ struct intel_display *display = &i915->display;
+ int max_dotclock = display->cdclk.max_dotclk_freq;
- if (HAS_ULTRAJOINER(i915))
+ if (HAS_ULTRAJOINER(display))
max_dotclock *= 4;
- else if (HAS_UNCOMPRESSED_JOINER(i915) || HAS_BIGJOINER(i915))
+ else if (HAS_UNCOMPRESSED_JOINER(display) || HAS_BIGJOINER(display))
max_dotclock *= 2;
return max_dotclock;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index caef04f655c5..49a246feb1ae 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -238,9 +238,6 @@ enum phy_fia {
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
for_each_if((__phys_mask) & BIT(__phy))
-#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-
#define for_each_intel_plane(dev, intel_plane) \
list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
new file mode 100644
index 000000000000..0578b68404da
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include "i915_drv.h"
+
+struct intel_display *__i915_to_display(struct drm_i915_private *i915)
+{
+ return &i915->display;
+}
+
+struct intel_display *__drm_to_display(struct drm_device *drm)
+{
+ return __i915_to_display(to_i915(drm));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
index ad8545c8055d..46c7208d42ba 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.h
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -8,14 +8,20 @@
#ifndef __INTEL_DISPLAY_CONVERSION__
#define __INTEL_DISPLAY_CONVERSION__
+struct drm_device;
+struct drm_i915_private;
+struct intel_display;
+
+struct intel_display *__i915_to_display(struct drm_i915_private *i915);
+struct intel_display *__drm_to_display(struct drm_device *drm);
/*
* Transitional macro to optionally convert struct drm_i915_private * to struct
* intel_display *, also accepting the latter.
*/
#define __to_intel_display(p) \
_Generic(p, \
- const struct drm_i915_private *: (&((const struct drm_i915_private *)(p))->display), \
- struct drm_i915_private *: (&((struct drm_i915_private *)(p))->display), \
+ const struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
+ struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
const struct intel_display *: (p), \
struct intel_display *: (p))
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 45b7c6900adc..554870d2494b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -453,7 +453,14 @@ struct intel_display {
} ips;
struct {
- bool display_irqs_enabled;
+ /*
+ * Most platforms treat the display irq block as an always-on
+ * power domain. vlv/chv can disable it at runtime and need
+ * special care to avoid writing any of the display block
+ * registers outside of the power domain. We defer setting up
+ * the display irqs in this case to the runtime pm.
+ */
+ bool vlv_display_irqs_enabled;
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
@@ -505,6 +512,11 @@ struct intel_display {
/* restore state for suspend/resume and display reset */
struct drm_atomic_state *modeset_state;
struct drm_modeset_acquire_ctx reset_ctx;
+ u32 saveDSPARB;
+ u32 saveSWF0[16];
+ u32 saveSWF1[16];
+ u32 saveSWF3[3];
+ u16 saveGCDGMBUS;
} restore;
struct {
@@ -542,6 +554,9 @@ struct intel_display {
/* unbound hipri wq for page flips/plane updates */
struct workqueue_struct *flip;
+
+ /* hipri wq for commit cleanups */
+ struct workqueue_struct *cleanup;
} wq;
/* Grouping using named structs. Keep sorted. */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 11aff485d8fa..f1d76484025a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -11,8 +11,10 @@
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
+#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i9xx_wm_regs.h"
#include "intel_alpm.h"
#include "intel_bo.h"
#include "intel_crtc.h"
@@ -730,11 +732,12 @@ static bool
intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
enum i915_power_well_id power_well_id)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t wakeref;
bool is_enabled;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- is_enabled = intel_display_power_well_is_enabled(i915,
+ is_enabled = intel_display_power_well_is_enabled(display,
power_well_id);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -1012,6 +1015,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
DP_DSC_YCbCr444)));
seq_printf(m, "DSC_Sink_BPP_Precision: %d\n",
drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd));
+ seq_printf(m, "DSC_Sink_Max_Slice_Count: %d\n",
+ drm_dp_dsc_sink_max_slice_count((connector->dp.dsc_dpcd), intel_dp_is_edp(intel_dp)));
seq_printf(m, "Force_DSC_Enable: %s\n",
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
@@ -1331,7 +1336,7 @@ static ssize_t i915_joiner_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int force_joined_pipes = 0;
int ret;
@@ -1349,7 +1354,7 @@ static ssize_t i915_joiner_write(struct file *file,
connector->force_joined_pipes = force_joined_pipes;
break;
case 4:
- if (HAS_ULTRAJOINER(i915)) {
+ if (HAS_ULTRAJOINER(display)) {
connector->force_joined_pipes = force_joined_pipes;
break;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index ec3ed29a83c9..88914a1f3f62 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -7,9 +7,10 @@
#include <linux/kernel.h>
#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include "intel_display_core.h"
#include "intel_display_debugfs_params.h"
-#include "i915_drv.h"
#include "intel_display_params.h"
/* int param */
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 5f98e1b2a401..68cb7f9b9ef3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -16,6 +16,7 @@
#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_step.h"
@@ -252,6 +253,7 @@ static const struct intel_display_device_info no_display = {};
static const struct platform_desc i830_desc = {
PLATFORM(i830),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
I830_DISPLAY,
@@ -270,6 +272,7 @@ static const struct platform_desc i845_desc = {
static const struct platform_desc i85x_desc = {
PLATFORM(i85x),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
I830_DISPLAY,
@@ -312,6 +315,7 @@ static const struct platform_desc i915g_desc = {
static const struct platform_desc i915gm_desc = {
PLATFORM(i915gm),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN3_DISPLAY,
I9XX_COLORS,
@@ -336,6 +340,7 @@ static const struct platform_desc i945g_desc = {
static const struct platform_desc i945gm_desc = {
PLATFORM(i915gm),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN3_DISPLAY,
I9XX_COLORS,
@@ -357,13 +362,21 @@ static const struct platform_desc g33_desc = {
},
};
-static const struct platform_desc pnv_desc = {
+static const struct intel_display_device_info pnv_display = {
+ GEN3_DISPLAY,
+ I9XX_COLORS,
+ .has_hotplug = 1,
+};
+
+static const struct platform_desc pnv_g_desc = {
PLATFORM(pineview),
- .info = &(const struct intel_display_device_info) {
- GEN3_DISPLAY,
- I9XX_COLORS,
- .has_hotplug = 1,
- },
+ .info = &pnv_display,
+};
+
+static const struct platform_desc pnv_m_desc = {
+ PLATFORM(pineview),
+ PLATFORM_GROUP(mobile),
+ .info = &pnv_display,
};
#define GEN4_DISPLAY \
@@ -390,6 +403,7 @@ static const struct platform_desc i965g_desc = {
static const struct platform_desc i965gm_desc = {
PLATFORM(i965gm),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN4_DISPLAY,
.has_overlay = 1,
@@ -413,6 +427,7 @@ static const struct platform_desc g45_desc = {
static const struct platform_desc gm45_desc = {
PLATFORM(gm45),
PLATFORM_GROUP(g4x),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN4_DISPLAY,
.supports_tv = 1,
@@ -443,6 +458,7 @@ static const struct platform_desc ilk_d_desc = {
static const struct platform_desc ilk_m_desc = {
PLATFORM(ironlake),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
ILK_DISPLAY,
@@ -450,38 +466,54 @@ static const struct platform_desc ilk_m_desc = {
},
};
-static const struct platform_desc snb_desc = {
+static const struct intel_display_device_info snb_display = {
+ .has_hotplug = 1,
+ I9XX_PIPE_OFFSETS,
+ I9XX_CURSOR_OFFSETS,
+ ILK_COLORS,
+
+ .__runtime_defaults.ip.ver = 6,
+ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
+ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
+};
+
+static const struct platform_desc snb_d_desc = {
PLATFORM(sandybridge),
- .info = &(const struct intel_display_device_info) {
- .has_hotplug = 1,
- I9XX_PIPE_OFFSETS,
- I9XX_CURSOR_OFFSETS,
- ILK_COLORS,
+ .info = &snb_display,
+};
- .__runtime_defaults.ip.ver = 6,
- .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
- .__runtime_defaults.cpu_transcoder_mask =
- BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
- .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
- .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
- },
+static const struct platform_desc snb_m_desc = {
+ PLATFORM(sandybridge),
+ PLATFORM_GROUP(mobile),
+ .info = &snb_display,
};
-static const struct platform_desc ivb_desc = {
+static const struct intel_display_device_info ivb_display = {
+ .has_hotplug = 1,
+ IVB_PIPE_OFFSETS,
+ IVB_CURSOR_OFFSETS,
+ IVB_COLORS,
+
+ .__runtime_defaults.ip.ver = 7,
+ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
+ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
+};
+
+static const struct platform_desc ivb_d_desc = {
PLATFORM(ivybridge),
- .info = &(const struct intel_display_device_info) {
- .has_hotplug = 1,
- IVB_PIPE_OFFSETS,
- IVB_CURSOR_OFFSETS,
- IVB_COLORS,
+ .info = &ivb_display,
+};
- .__runtime_defaults.ip.ver = 7,
- .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .__runtime_defaults.cpu_transcoder_mask =
- BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
- .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
- .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
- },
+static const struct platform_desc ivb_m_desc = {
+ PLATFORM(ivybridge),
+ PLATFORM_GROUP(mobile),
+ .info = &ivb_display,
};
static const struct platform_desc vlv_desc = {
@@ -1011,6 +1043,7 @@ static const enum intel_step dg1_steppings[] = {
static const struct platform_desc dg1_desc = {
PLATFORM(dg1),
+ PLATFORM_GROUP(dgfx),
.info = &(const struct intel_display_device_info) {
XE_D_DISPLAY,
@@ -1238,6 +1271,7 @@ static const enum intel_step dg2_g12_steppings[] = {
static const struct platform_desc dg2_desc = {
PLATFORM(dg2),
+ PLATFORM_GROUP(dgfx),
.subplatforms = (const struct subplatform_desc[]) {
{
SUBPLATFORM(dg2, g10),
@@ -1338,6 +1372,7 @@ static const struct platform_desc lnl_desc = {
static const struct platform_desc bmg_desc = {
PLATFORM(battlemage),
+ PLATFORM_GROUP(dgfx),
};
static const struct platform_desc ptl_desc = {
@@ -1381,11 +1416,14 @@ static const struct {
INTEL_I965GM_IDS(INTEL_DISPLAY_DEVICE, &i965gm_desc),
INTEL_GM45_IDS(INTEL_DISPLAY_DEVICE, &gm45_desc),
INTEL_G45_IDS(INTEL_DISPLAY_DEVICE, &g45_desc),
- INTEL_PNV_IDS(INTEL_DISPLAY_DEVICE, &pnv_desc),
+ INTEL_PNV_G_IDS(INTEL_DISPLAY_DEVICE, &pnv_g_desc),
+ INTEL_PNV_M_IDS(INTEL_DISPLAY_DEVICE, &pnv_m_desc),
INTEL_ILK_D_IDS(INTEL_DISPLAY_DEVICE, &ilk_d_desc),
INTEL_ILK_M_IDS(INTEL_DISPLAY_DEVICE, &ilk_m_desc),
- INTEL_SNB_IDS(INTEL_DISPLAY_DEVICE, &snb_desc),
- INTEL_IVB_IDS(INTEL_DISPLAY_DEVICE, &ivb_desc),
+ INTEL_SNB_D_IDS(INTEL_DISPLAY_DEVICE, &snb_d_desc),
+ INTEL_SNB_M_IDS(INTEL_DISPLAY_DEVICE, &snb_m_desc),
+ INTEL_IVB_D_IDS(INTEL_DISPLAY_DEVICE, &ivb_d_desc),
+ INTEL_IVB_M_IDS(INTEL_DISPLAY_DEVICE, &ivb_m_desc),
INTEL_HSW_IDS(INTEL_DISPLAY_DEVICE, &hsw_desc),
INTEL_VLV_IDS(INTEL_DISPLAY_DEVICE, &vlv_desc),
INTEL_BDW_IDS(INTEL_DISPLAY_DEVICE, &bdw_desc),
@@ -1429,9 +1467,9 @@ static const struct {
};
static const struct intel_display_device_info *
-probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *ip_ver)
+probe_gmdid_display(struct intel_display *display, struct intel_display_ip_ver *ip_ver)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
struct intel_display_ip_ver gmd_id;
void __iomem *addr;
u32 val;
@@ -1439,7 +1477,8 @@ probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *
addr = pci_iomap_range(pdev, 0, i915_mmio_reg_offset(GMD_ID_DISPLAY), sizeof(u32));
if (!addr) {
- drm_err(&i915->drm, "Cannot map MMIO BAR to read display GMD_ID\n");
+ drm_err(display->drm,
+ "Cannot map MMIO BAR to read display GMD_ID\n");
return NULL;
}
@@ -1447,7 +1486,7 @@ probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *
pci_iounmap(pdev, addr);
if (val == 0) {
- drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+ drm_dbg_kms(display->drm, "Device doesn't have display\n");
return NULL;
}
@@ -1463,7 +1502,8 @@ probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *
}
}
- drm_err(&i915->drm, "Unrecognized display IP version %d.%02d; disabling display.\n",
+ drm_err(display->drm,
+ "Unrecognized display IP version %d.%02d; disabling display.\n",
gmd_id.ver, gmd_id.rel);
return NULL;
}
@@ -1564,10 +1604,9 @@ static void display_platforms_or(struct intel_display_platforms *dst,
bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits());
}
-void intel_display_device_probe(struct drm_i915_private *i915)
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
{
- struct intel_display *display = &i915->display;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct intel_display *display = to_intel_display(pdev);
const struct intel_display_device_info *info;
struct intel_display_ip_ver ip_ver = {};
const struct platform_desc *desc;
@@ -1575,55 +1614,56 @@ void intel_display_device_probe(struct drm_i915_private *i915)
enum intel_step step;
/* Add drm device backpointer as early as possible. */
- i915->display.drm = &i915->drm;
+ display->drm = pci_get_drvdata(pdev);
- intel_display_params_copy(&i915->display.params);
+ intel_display_params_copy(&display->params);
if (has_no_display(pdev)) {
- drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+ drm_dbg_kms(display->drm, "Device doesn't have display\n");
goto no_display;
}
desc = find_platform_desc(pdev);
if (!desc) {
- drm_dbg_kms(&i915->drm, "Unknown device ID %04x; disabling display.\n",
+ drm_dbg_kms(display->drm,
+ "Unknown device ID %04x; disabling display.\n",
pdev->device);
goto no_display;
}
info = desc->info;
if (!info)
- info = probe_gmdid_display(i915, &ip_ver);
+ info = probe_gmdid_display(display, &ip_ver);
if (!info)
goto no_display;
- DISPLAY_INFO(i915) = info;
+ DISPLAY_INFO(display) = info;
- memcpy(DISPLAY_RUNTIME_INFO(i915),
- &DISPLAY_INFO(i915)->__runtime_defaults,
- sizeof(*DISPLAY_RUNTIME_INFO(i915)));
+ memcpy(DISPLAY_RUNTIME_INFO(display),
+ &DISPLAY_INFO(display)->__runtime_defaults,
+ sizeof(*DISPLAY_RUNTIME_INFO(display)));
- drm_WARN_ON(&i915->drm, !desc->name ||
+ drm_WARN_ON(display->drm, !desc->name ||
!display_platforms_weight(&desc->platforms));
display->platform = desc->platforms;
subdesc = find_subplatform_desc(pdev, desc);
if (subdesc) {
- drm_WARN_ON(&i915->drm, !subdesc->name ||
+ drm_WARN_ON(display->drm, !subdesc->name ||
!display_platforms_weight(&subdesc->platforms));
display_platforms_or(&display->platform, &subdesc->platforms);
/* Ensure platform and subplatform are distinct */
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
display_platforms_weight(&display->platform) !=
display_platforms_weight(&desc->platforms) +
display_platforms_weight(&subdesc->platforms));
}
if (ip_ver.ver || ip_ver.rel || ip_ver.step) {
- DISPLAY_RUNTIME_INFO(i915)->ip = ip_ver;
+ DISPLAY_RUNTIME_INFO(display)->ip = ip_ver;
step = STEP_A0 + ip_ver.step;
if (step > STEP_FUTURE) {
drm_dbg_kms(display->drm, "Using future display stepping\n");
@@ -1634,29 +1674,32 @@ void intel_display_device_probe(struct drm_i915_private *i915)
subdesc ? &subdesc->step_info : NULL);
}
- DISPLAY_RUNTIME_INFO(i915)->step = step;
+ DISPLAY_RUNTIME_INFO(display)->step = step;
- drm_info(&i915->drm, "Found %s%s%s (device ID %04x) display version %u.%02u stepping %s\n",
+ drm_info(display->drm, "Found %s%s%s (device ID %04x) %s display version %u.%02u stepping %s\n",
desc->name, subdesc ? "/" : "", subdesc ? subdesc->name : "",
- pdev->device, DISPLAY_RUNTIME_INFO(i915)->ip.ver,
- DISPLAY_RUNTIME_INFO(i915)->ip.rel,
+ pdev->device, display->platform.dgfx ? "discrete" : "integrated",
+ DISPLAY_RUNTIME_INFO(display)->ip.ver,
+ DISPLAY_RUNTIME_INFO(display)->ip.rel,
step != STEP_NONE ? intel_step_name(step) : "N/A");
- return;
+ return display;
no_display:
- DISPLAY_INFO(i915) = &no_display;
+ DISPLAY_INFO(display) = &no_display;
+
+ return display;
}
-void intel_display_device_remove(struct drm_i915_private *i915)
+void intel_display_device_remove(struct intel_display *display)
{
- intel_display_params_free(&i915->display.params);
+ intel_display_params_free(&display->params);
}
-static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915)
+static void __intel_display_device_info_runtime_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(display);
enum pipe pipe;
BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->pipe_mask) < I915_MAX_PIPES);
@@ -1664,35 +1707,35 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->port_mask) < I915_MAX_PORTS);
/* This covers both ULT and ULX */
- if (IS_HASWELL_ULT(i915) || IS_BROADWELL_ULT(i915))
+ if (display->platform.haswell_ult || display->platform.broadwell_ult)
display_runtime->port_mask &= ~BIT(PORT_D);
- if (IS_ICL_WITH_PORT_F(i915))
+ if (display->platform.icelake_port_f)
display_runtime->port_mask |= BIT(PORT_F);
/* Wa_14011765242: adl-s A0,A1 */
- if (IS_ALDERLAKE_S(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_A2))
- for_each_pipe(i915, pipe)
+ if (display->platform.alderlake_s && IS_DISPLAY_STEP(display, STEP_A0, STEP_A2))
+ for_each_pipe(display, pipe)
display_runtime->num_scalers[pipe] = 0;
- else if (DISPLAY_VER(i915) >= 11) {
- for_each_pipe(i915, pipe)
+ else if (DISPLAY_VER(display) >= 11) {
+ for_each_pipe(display, pipe)
display_runtime->num_scalers[pipe] = 2;
- } else if (DISPLAY_VER(i915) >= 9) {
+ } else if (DISPLAY_VER(display) >= 9) {
display_runtime->num_scalers[PIPE_A] = 2;
display_runtime->num_scalers[PIPE_B] = 2;
display_runtime->num_scalers[PIPE_C] = 1;
}
- if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915))
- for_each_pipe(i915, pipe)
+ if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 4;
- else if (DISPLAY_VER(i915) >= 11)
- for_each_pipe(i915, pipe)
+ else if (DISPLAY_VER(display) >= 11)
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 6;
- else if (DISPLAY_VER(i915) == 10)
- for_each_pipe(i915, pipe)
+ else if (DISPLAY_VER(display) == 10)
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 3;
- else if (IS_BROXTON(i915)) {
+ else if (display->platform.broxton) {
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
@@ -1705,23 +1748,23 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
display_runtime->num_sprites[PIPE_A] = 2;
display_runtime->num_sprites[PIPE_B] = 2;
display_runtime->num_sprites[PIPE_C] = 1;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- for_each_pipe(i915, pipe)
+ } else if (display->platform.valleyview || display->platform.cherryview) {
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 2;
- } else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) {
- for_each_pipe(i915, pipe)
+ } else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) {
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 1;
}
- if ((IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) &&
- !(intel_de_read(i915, GU_CNTL_PROTECTED) & DEPRESENT)) {
- drm_info(&i915->drm, "Display not present, disabling\n");
+ if ((display->platform.dgfx || DISPLAY_VER(display) >= 14) &&
+ !(intel_de_read(display, GU_CNTL_PROTECTED) & DEPRESENT)) {
+ drm_info(display->drm, "Display not present, disabling\n");
goto display_fused_off;
}
- if (IS_DISPLAY_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
- u32 fuse_strap = intel_de_read(i915, FUSE_STRAP);
- u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP);
+ if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 sfuse_strap = intel_de_read(display, SFUSE_STRAP);
/*
* SFUSE_STRAP is supposed to have a bit signalling the display
@@ -1736,16 +1779,16 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(HAS_PCH_CPT(i915) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
- drm_info(&i915->drm,
+ drm_info(display->drm,
"Display fused off, disabling\n");
goto display_fused_off;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
- drm_info(&i915->drm, "PipeC fused off\n");
+ drm_info(display->drm, "PipeC fused off\n");
display_runtime->pipe_mask &= ~BIT(PIPE_C);
display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- } else if (DISPLAY_VER(i915) >= 9) {
- u32 dfsm = intel_de_read(i915, SKL_DFSM);
+ } else if (DISPLAY_VER(display) >= 9) {
+ u32 dfsm = intel_de_read(display, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
display_runtime->pipe_mask &= ~BIT(PIPE_A);
@@ -1763,7 +1806,7 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
display_runtime->fbc_mask &= ~BIT(INTEL_FBC_C);
}
- if (DISPLAY_VER(i915) >= 12 &&
+ if (DISPLAY_VER(display) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
display_runtime->pipe_mask &= ~BIT(PIPE_D);
display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -1776,15 +1819,15 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
display_runtime->has_hdcp = 0;
- if (IS_DG2(i915) || DISPLAY_VER(i915) < 13) {
+ if (display->platform.dg2 || DISPLAY_VER(display) < 13) {
if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
display_runtime->fbc_mask = 0;
}
- if (DISPLAY_VER(i915) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ if (DISPLAY_VER(display) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
display_runtime->has_dmc = 0;
- if (IS_DISPLAY_VER(i915, 10, 12) &&
+ if (IS_DISPLAY_VER(display, 10, 12) &&
(dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
display_runtime->has_dsc = 0;
@@ -1793,8 +1836,8 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
display_runtime->has_dbuf_overlap_detection = false;
}
- if (DISPLAY_VER(i915) >= 20) {
- u32 cap = intel_de_read(i915, XE2LPD_DE_CAP);
+ if (DISPLAY_VER(display) >= 20) {
+ u32 cap = intel_de_read(display, XE2LPD_DE_CAP);
if (REG_FIELD_GET(XE2LPD_DE_CAP_DSC_MASK, cap) ==
XE2LPD_DE_CAP_DSC_REMOVED)
@@ -1802,18 +1845,19 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
if (REG_FIELD_GET(XE2LPD_DE_CAP_SCALER_MASK, cap) ==
XE2LPD_DE_CAP_SCALER_SINGLE) {
- for_each_pipe(i915, pipe)
+ for_each_pipe(display, pipe)
if (display_runtime->num_scalers[pipe])
display_runtime->num_scalers[pipe] = 1;
}
}
- if (DISPLAY_VER(i915) >= 30)
+ if (DISPLAY_VER(display) >= 30)
display_runtime->edp_typec_support =
intel_de_read(display, PICA_PHY_CONFIG_CONTROL) & EDP_ON_TYPEC;
display_runtime->rawclk_freq = intel_read_rawclk(display);
- drm_dbg_kms(&i915->drm, "rawclk rate: %d kHz\n", display_runtime->rawclk_freq);
+ drm_dbg_kms(display->drm, "rawclk rate: %d kHz\n",
+ display_runtime->rawclk_freq);
return;
@@ -1821,21 +1865,21 @@ display_fused_off:
memset(display_runtime, 0, sizeof(*display_runtime));
}
-void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
+void intel_display_device_info_runtime_init(struct intel_display *display)
{
- if (HAS_DISPLAY(i915))
- __intel_display_device_info_runtime_init(i915);
+ if (HAS_DISPLAY(display))
+ __intel_display_device_info_runtime_init(display);
/* Display may have been disabled by runtime init */
- if (!HAS_DISPLAY(i915)) {
- i915->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
- i915->display.info.__device_info = &no_display;
+ if (!HAS_DISPLAY(display)) {
+ display->drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ display->info.__device_info = &no_display;
}
/* Disable nuclear pageflip by default on pre-g4x */
- if (!i915->display.params.nuclear_pageflip &&
- DISPLAY_VER(i915) < 5 && !IS_G4X(i915))
- i915->drm.driver_features &= ~DRIVER_ATOMIC;
+ if (!display->params.nuclear_pageflip &&
+ DISPLAY_VER(display) < 5 && !display->platform.g4x)
+ display->drm->driver_features &= ~DRIVER_ATOMIC;
}
void intel_display_device_info_print(const struct intel_display_device_info *info,
@@ -1872,10 +1916,8 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
* Disabling display means taking over the display hardware, putting it to
* sleep, and preventing connectors from being connected via any means.
*/
-bool intel_display_device_enabled(struct drm_i915_private *i915)
+bool intel_display_device_enabled(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
/* Only valid when HAS_DISPLAY() is true */
drm_WARN_ON(display->drm, !HAS_DISPLAY(display));
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 43144a037f9f..9a333d9e6601 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -12,8 +12,9 @@
#include "intel_display_conversion.h"
#include "intel_display_limits.h"
-struct drm_i915_private;
struct drm_printer;
+struct intel_display;
+struct pci_dev;
/*
* Display platforms and subplatforms. Keep platforms in display version based
@@ -21,6 +22,10 @@ struct drm_printer;
* platform.
*/
#define INTEL_DISPLAY_PLATFORMS(func) \
+ /* Platform group aliases */ \
+ func(g4x) /* g45 and gm45 */ \
+ func(mobile) /* mobile platforms */ \
+ func(dgfx) /* discrete graphics */ \
/* Display ver 2 */ \
func(i830) \
func(i845g) \
@@ -38,7 +43,6 @@ struct drm_printer;
func(i965gm) \
func(g45) \
func(gm45) \
- func(g4x) /* group alias for g45 and gm45 */ \
/* Display ver 5 */ \
func(ironlake) \
/* Display ver 6 */ \
@@ -136,61 +140,64 @@ struct intel_display_platforms {
func(overlay_needs_physical); \
func(supports_tv);
-#define HAS_4TILE(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
-#define HAS_BIGJOINER(i915) (DISPLAY_VER(i915) >= 11 && HAS_DSC(i915))
-#define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl)
-#define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash)
-#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && IS_DISPLAY_VER(i915, 7, 13))
-#define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915))
-#define HAS_DBUF_OVERLAP_DETECTION(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dbuf_overlap_detection)
-#define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi)
-#define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0)
-#define HAS_DMC(i915) (DISPLAY_RUNTIME_INFO(i915)->has_dmc)
-#define HAS_DOUBLE_BUFFERED_M_N(i915) (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915))
-#define HAS_DP_MST(i915) (DISPLAY_INFO(i915)->has_dp_mst)
-#define HAS_DP20(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
-#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
-#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
-#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
-#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
-#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
-#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
-#define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4)
-#define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 10 || IS_KABYLAKE(i915))
-#define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch)
-#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-#define HAS_IPC(i915) (DISPLAY_INFO(i915)->has_ipc)
-#define HAS_IPS(i915) (IS_HASWELL_ULT(i915) || IS_BROADWELL(i915))
-#define HAS_LRR(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_LSPCON(i915) (IS_DISPLAY_VER(i915, 9, 10))
-#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_OVERLAY(i915) (DISPLAY_INFO(i915)->has_overlay)
-#define HAS_PSR(i915) (DISPLAY_INFO(i915)->has_psr)
-#define HAS_PSR_HW_TRACKING(i915) (DISPLAY_INFO(i915)->has_psr_hw_tracking)
-#define HAS_PSR2_SEL_FETCH(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915) && !IS_GEMINILAKE(i915))
-#define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \
- BIT(trans)) != 0)
-#define HAS_UNCOMPRESSED_JOINER(i915) (DISPLAY_VER(i915) >= 13)
-#define HAS_ULTRAJOINER(i915) ((DISPLAY_VER(i915) >= 20 || \
- (IS_DGFX(i915) && DISPLAY_VER(i915) == 14)) && \
- HAS_DSC(i915))
-#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
-#define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13)
-#define HAS_CMRR(i915) (DISPLAY_VER(i915) >= 20)
-#define INTEL_NUM_PIPES(i915) (hweight8(DISPLAY_RUNTIME_INFO(i915)->pipe_mask))
-#define I915_HAS_HOTPLUG(i915) (DISPLAY_INFO(i915)->has_hotplug)
-#define OVERLAY_NEEDS_PHYSICAL(i915) (DISPLAY_INFO(i915)->overlay_needs_physical)
-#define SUPPORTS_TV(i915) (DISPLAY_INFO(i915)->supports_tv)
+#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
+#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
+#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
+#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
+#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
+#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
+#define HAS_DBUF_OVERLAP_DETECTION(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dbuf_overlap_detection)
+#define HAS_DDI(__display) (DISPLAY_INFO(__display)->has_ddi)
+#define HAS_DISPLAY(__display) (DISPLAY_RUNTIME_INFO(__display)->pipe_mask != 0)
+#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
+#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
+#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
+#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
+#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
+#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
+#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
+#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
+#define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0)
+#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
+#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
+#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
+#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
+#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
+#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
+#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
+#define HAS_LRR(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_LSPCON(__display) (IS_DISPLAY_VER(__display, 9, 10))
+#define HAS_MBUS_JOINING(__display) ((__display)->platform.alderlake_p || DISPLAY_VER(__display) >= 14)
+#define HAS_MSO(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_OVERLAY(__display) (DISPLAY_INFO(__display)->has_overlay)
+#define HAS_PSR(__display) (DISPLAY_INFO(__display)->has_psr)
+#define HAS_PSR_HW_TRACKING(__display) (DISPLAY_INFO(__display)->has_psr_hw_tracking)
+#define HAS_PSR2_SEL_FETCH(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_SAGV(__display) (DISPLAY_VER(__display) >= 9 && \
+ !(__display)->platform.broxton && !(__display)->platform.geminilake)
+#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
+ BIT(trans)) != 0)
+#define HAS_UNCOMPRESSED_JOINER(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_ULTRAJOINER(__display) ((DISPLAY_VER(__display) >= 20 || \
+ ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
+ HAS_DSC(__display))
+#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
+#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
+#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
+#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
+#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
+#define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv)
/* Check that device has a display IP version within the specific range. */
-#define IS_DISPLAY_VERx100(__i915, from, until) ( \
+#define IS_DISPLAY_VERx100(__display, from, until) ( \
BUILD_BUG_ON_ZERO((from) < 200) + \
- (DISPLAY_VERx100(__i915) >= (from) && \
- DISPLAY_VERx100(__i915) <= (until)))
+ (DISPLAY_VERx100(__display) >= (from) && \
+ DISPLAY_VERx100(__display) <= (until)))
/*
* Check if a device has a specific IP version as well as a stepping within the
@@ -201,30 +208,30 @@ struct intel_display_platforms {
* hardware fix is present and the software workaround is no longer necessary.
* E.g.,
*
- * IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_B2)
- * IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_C0, STEP_FOREVER)
+ * IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B2)
+ * IS_DISPLAY_VERx100_STEP(display, 1400, STEP_C0, STEP_FOREVER)
*
* "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
* stepping bound for the specified IP version.
*/
-#define IS_DISPLAY_VERx100_STEP(__i915, ipver, from, until) \
- (IS_DISPLAY_VERx100((__i915), (ipver), (ipver)) && \
- IS_DISPLAY_STEP((__i915), (from), (until)))
+#define IS_DISPLAY_VERx100_STEP(__display, ipver, from, until) \
+ (IS_DISPLAY_VERx100((__display), (ipver), (ipver)) && \
+ IS_DISPLAY_STEP((__display), (from), (until)))
-#define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info)
-#define DISPLAY_RUNTIME_INFO(i915) (&__to_intel_display(i915)->info.__runtime_info)
+#define DISPLAY_INFO(__display) (__to_intel_display(__display)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(__display) (&__to_intel_display(__display)->info.__runtime_info)
-#define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver)
-#define DISPLAY_VERx100(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver * 100 + \
- DISPLAY_RUNTIME_INFO(i915)->ip.rel)
-#define IS_DISPLAY_VER(i915, from, until) \
- (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
+#define DISPLAY_VER(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver)
+#define DISPLAY_VERx100(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver * 100 + \
+ DISPLAY_RUNTIME_INFO(__display)->ip.rel)
+#define IS_DISPLAY_VER(__display, from, until) \
+ (DISPLAY_VER(__display) >= (from) && DISPLAY_VER(__display) <= (until))
-#define INTEL_DISPLAY_STEP(__i915) (DISPLAY_RUNTIME_INFO(__i915)->step)
+#define INTEL_DISPLAY_STEP(__display) (DISPLAY_RUNTIME_INFO(__display)->step)
-#define IS_DISPLAY_STEP(__i915, since, until) \
- (drm_WARN_ON(__to_intel_display(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
- INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
+#define IS_DISPLAY_STEP(__display, since, until) \
+ (drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
+ INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until))
struct intel_display_runtime_info {
struct intel_display_ip_ver {
@@ -283,10 +290,10 @@ struct intel_display_device_info {
} color;
};
-bool intel_display_device_enabled(struct drm_i915_private *i915);
-void intel_display_device_probe(struct drm_i915_private *i915);
-void intel_display_device_remove(struct drm_i915_private *i915);
-void intel_display_device_info_runtime_init(struct drm_i915_private *i915);
+bool intel_display_device_enabled(struct intel_display *display);
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev);
+void intel_display_device_remove(struct intel_display *display);
+void intel_display_device_info_runtime_init(struct intel_display *display);
void intel_display_device_info_print(const struct intel_display_device_info *info,
const struct intel_display_runtime_info *runtime,
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 56b78cf6b854..50ec0c3c7588 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -80,12 +80,12 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
return false;
}
-void intel_display_driver_init_hw(struct drm_i915_private *i915)
+void intel_display_driver_init_hw(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
@@ -112,12 +112,12 @@ static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
};
-static void intel_mode_config_init(struct drm_i915_private *i915)
+static void intel_mode_config_init(struct intel_display *display)
{
- struct drm_mode_config *mode_config = &i915->drm.mode_config;
+ struct drm_mode_config *mode_config = &display->drm->mode_config;
- drm_mode_config_init(&i915->drm);
- INIT_LIST_HEAD(&i915->display.global.obj_list);
+ drm_mode_config_init(display->drm);
+ INIT_LIST_HEAD(&display->global.obj_list);
mode_config->min_width = 0;
mode_config->min_height = 0;
@@ -128,19 +128,19 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
mode_config->helper_private = &intel_mode_config_funcs;
- mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
+ mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
/*
* Maximum framebuffer dimensions, chosen to match
* the maximum render engine surface size on gen4+.
*/
- if (DISPLAY_VER(i915) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
mode_config->max_width = 16384;
mode_config->max_height = 16384;
- } else if (DISPLAY_VER(i915) >= 4) {
+ } else if (DISPLAY_VER(display) >= 4) {
mode_config->max_width = 8192;
mode_config->max_height = 8192;
- } else if (DISPLAY_VER(i915) == 3) {
+ } else if (DISPLAY_VER(display) == 3) {
mode_config->max_width = 4096;
mode_config->max_height = 4096;
} else {
@@ -148,11 +148,11 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->max_height = 2048;
}
- if (IS_I845G(i915) || IS_I865G(i915)) {
- mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
+ if (display->platform.i845g || display->platform.i865g) {
+ mode_config->cursor_width = display->platform.i845g ? 64 : 512;
mode_config->cursor_height = 1023;
- } else if (IS_I830(i915) || IS_I85X(i915) ||
- IS_I915G(i915) || IS_I915GM(i915)) {
+ } else if (display->platform.i830 || display->platform.i85x ||
+ display->platform.i915g || display->platform.i915gm) {
mode_config->cursor_width = 64;
mode_config->cursor_height = 64;
} else {
@@ -161,18 +161,17 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
}
}
-static void intel_mode_config_cleanup(struct drm_i915_private *i915)
+static void intel_mode_config_cleanup(struct intel_display *display)
{
- intel_atomic_global_obj_cleanup(i915);
- drm_mode_config_cleanup(&i915->drm);
+ intel_atomic_global_obj_cleanup(display);
+ drm_mode_config_cleanup(display->drm);
}
-static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+static void intel_plane_possible_crtcs_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc = intel_crtc_for_pipe(display,
plane->pipe);
@@ -180,41 +179,43 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
}
}
-void intel_display_driver_early_probe(struct drm_i915_private *i915)
+void intel_display_driver_early_probe(struct intel_display *display)
{
- if (!HAS_DISPLAY(i915))
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (!HAS_DISPLAY(display))
return;
- spin_lock_init(&i915->display.fb_tracking.lock);
- mutex_init(&i915->display.backlight.lock);
- mutex_init(&i915->display.audio.mutex);
- mutex_init(&i915->display.wm.wm_mutex);
- mutex_init(&i915->display.pps.mutex);
- mutex_init(&i915->display.hdcp.hdcp_mutex);
+ spin_lock_init(&display->fb_tracking.lock);
+ mutex_init(&display->backlight.lock);
+ mutex_init(&display->audio.mutex);
+ mutex_init(&display->wm.wm_mutex);
+ mutex_init(&display->pps.mutex);
+ mutex_init(&display->hdcp.hdcp_mutex);
intel_display_irq_init(i915);
intel_dkl_phy_init(i915);
- intel_color_init_hooks(&i915->display);
- intel_init_cdclk_hooks(&i915->display);
+ intel_color_init_hooks(display);
+ intel_init_cdclk_hooks(display);
intel_audio_hooks_init(i915);
intel_dpll_init_clock_hook(i915);
intel_init_display_hooks(i915);
intel_fdi_init_hook(i915);
- intel_dmc_wl_init(&i915->display);
+ intel_dmc_wl_init(display);
}
/* part #1: call before irq install */
-int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
+int intel_display_driver_probe_noirq(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (i915_inject_probe_failure(i915))
return -ENODEV;
- if (HAS_DISPLAY(i915)) {
- ret = drm_vblank_init(&i915->drm,
- INTEL_NUM_PIPES(i915));
+ if (HAS_DISPLAY(display)) {
+ ret = drm_vblank_init(display->drm,
+ INTEL_NUM_PIPES(display));
if (ret)
return ret;
}
@@ -226,24 +227,25 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
goto cleanup_bios;
/* FIXME: completely on the wrong abstraction layer */
- ret = intel_power_domains_init(i915);
+ ret = intel_power_domains_init(display);
if (ret < 0)
goto cleanup_vga;
- intel_pmdemand_init_early(i915);
+ intel_pmdemand_init_early(display);
- intel_power_domains_init_hw(i915, false);
+ intel_power_domains_init_hw(display, false);
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
intel_dmc_init(display);
- i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
- i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
- intel_mode_config_init(i915);
+ intel_mode_config_init(display);
ret = intel_cdclk_init(display);
if (ret)
@@ -261,7 +263,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- ret = intel_pmdemand_init(i915);
+ ret = intel_pmdemand_init(display);
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
@@ -273,7 +275,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
cleanup_vga_client_pw_domain_dmc:
intel_dmc_fini(display);
- intel_power_domains_driver_remove(i915);
+ intel_power_domains_driver_remove(display);
cleanup_vga:
intel_vga_unregister(display);
cleanup_bios:
@@ -282,7 +284,7 @@ cleanup_bios:
return ret;
}
-static void set_display_access(struct drm_i915_private *i915,
+static void set_display_access(struct intel_display *display,
bool any_task_allowed,
struct task_struct *allowed_task)
{
@@ -290,20 +292,20 @@ static void set_display_access(struct drm_i915_private *i915,
int err;
intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
- err = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
+ err = drm_modeset_lock_all_ctx(display->drm, &ctx);
if (err)
continue;
- i915->display.access.any_task_allowed = any_task_allowed;
- i915->display.access.allowed_task = allowed_task;
+ display->access.any_task_allowed = any_task_allowed;
+ display->access.allowed_task = allowed_task;
}
- drm_WARN_ON(&i915->drm, err);
+ drm_WARN_ON(display->drm, err);
}
/**
* intel_display_driver_enable_user_access - Enable display HW access for all threads
- * @i915: i915 device instance
+ * @display: display device instance
*
* Enable the display HW access for all threads. Examples for such accesses
* are modeset commits and connector probing.
@@ -311,16 +313,18 @@ static void set_display_access(struct drm_i915_private *i915,
* This function should be called during driver loading and system resume once
* all the HW initialization steps are done.
*/
-void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
+void intel_display_driver_enable_user_access(struct intel_display *display)
{
- set_display_access(i915, true, NULL);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ set_display_access(display, true, NULL);
intel_hpd_enable_detection_work(i915);
}
/**
* intel_display_driver_disable_user_access - Disable display HW access for user threads
- * @i915: i915 device instance
+ * @display: display device instance
*
* Disable the display HW access for user threads. Examples for such accesses
* are modeset commits and connector probing. For the current thread the
@@ -335,16 +339,18 @@ void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
* This function should be called during driver loading/unloading and system
* suspend/shutdown before starting the HW init/deinit programming.
*/
-void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
+void intel_display_driver_disable_user_access(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
intel_hpd_disable_detection_work(i915);
- set_display_access(i915, false, current);
+ set_display_access(display, false, current);
}
/**
* intel_display_driver_suspend_access - Suspend display HW access for all threads
- * @i915: i915 device instance
+ * @display: display device instance
*
* Disable the display HW access for all threads. Examples for such accesses
* are modeset commits and connector probing. This call should be either
@@ -354,14 +360,14 @@ void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
* This function should be called during driver unloading and system
* suspend/shutdown after completing the HW deinit programming.
*/
-void intel_display_driver_suspend_access(struct drm_i915_private *i915)
+void intel_display_driver_suspend_access(struct intel_display *display)
{
- set_display_access(i915, false, NULL);
+ set_display_access(display, false, NULL);
}
/**
* intel_display_driver_resume_access - Resume display HW access for the resume thread
- * @i915: i915 device instance
+ * @display: display device instance
*
* Enable the display HW access for the current resume thread, keeping the
* access disabled for all other (user) threads. Examples for such accesses
@@ -373,14 +379,14 @@ void intel_display_driver_suspend_access(struct drm_i915_private *i915)
* This function should be called during system resume before starting the HW
* init steps.
*/
-void intel_display_driver_resume_access(struct drm_i915_private *i915)
+void intel_display_driver_resume_access(struct intel_display *display)
{
- set_display_access(i915, false, current);
+ set_display_access(display, false, current);
}
/**
* intel_display_driver_check_access - Check if the current thread has disaplay HW access
- * @i915: i915 device instance
+ * @display: display device instance
*
* Check whether the current thread has display HW access, print a debug
* message if it doesn't. Such accesses are modeset commits and connector
@@ -389,26 +395,24 @@ void intel_display_driver_resume_access(struct drm_i915_private *i915)
* Returns %true if the current thread has display HW access, %false
* otherwise.
*/
-bool intel_display_driver_check_access(struct drm_i915_private *i915)
+bool intel_display_driver_check_access(struct intel_display *display)
{
- char comm[TASK_COMM_LEN];
char current_task[TASK_COMM_LEN + 16];
char allowed_task[TASK_COMM_LEN + 16] = "none";
- if (i915->display.access.any_task_allowed ||
- i915->display.access.allowed_task == current)
+ if (display->access.any_task_allowed ||
+ display->access.allowed_task == current)
return true;
snprintf(current_task, sizeof(current_task), "%s[%d]",
- get_task_comm(comm, current),
- task_pid_vnr(current));
+ current->comm, task_pid_vnr(current));
- if (i915->display.access.allowed_task)
+ if (display->access.allowed_task)
snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
- get_task_comm(comm, i915->display.access.allowed_task),
- task_pid_vnr(i915->display.access.allowed_task));
+ display->access.allowed_task->comm,
+ task_pid_vnr(display->access.allowed_task));
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Reject display access from task %s (allowed to %s)\n",
current_task, allowed_task);
@@ -416,14 +420,13 @@ bool intel_display_driver_check_access(struct drm_i915_private *i915)
}
/* part #2: call after irq install, but before gem init */
-int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
+int intel_display_driver_probe_nogem(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct drm_device *dev = display->drm;
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
intel_wm_init(i915);
@@ -434,22 +437,22 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_gmbus_setup(display);
- drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
- INTEL_NUM_PIPES(i915),
- INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+ drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(display),
+ INTEL_NUM_PIPES(display) > 1 ? "s" : "");
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
ret = intel_crtc_init(i915, pipe);
if (ret)
goto err_mode_config;
}
- intel_plane_possible_crtcs_init(i915);
+ intel_plane_possible_crtcs_init(display);
intel_shared_dpll_init(i915);
intel_fdi_pll_freq_update(i915);
intel_update_czclk(i915);
- intel_display_driver_init_hw(i915);
+ intel_display_driver_init_hw(display);
intel_dpll_update_ref_clks(i915);
if (display->cdclk.max_cdclk_freq == 0)
@@ -465,21 +468,21 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
if (ret)
goto err_hdcp;
- intel_display_driver_disable_user_access(i915);
+ intel_display_driver_disable_user_access(display);
- drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
+ drm_modeset_lock_all(display->drm);
+ intel_modeset_setup_hw_state(i915, display->drm->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(display);
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(display->drm);
- intel_initial_plane_config(i915);
+ intel_initial_plane_config(display);
/*
* Make sure hardware watermarks really match the state we read out.
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
- if (!HAS_GMCH(i915))
+ if (!HAS_GMCH(display))
ilk_wm_sanitize(i915);
return 0;
@@ -487,18 +490,18 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
err_hdcp:
intel_hdcp_component_fini(display);
err_mode_config:
- intel_mode_config_cleanup(i915);
+ intel_mode_config_cleanup(display);
return ret;
}
/* part #3: call after gem init */
-int intel_display_driver_probe(struct drm_i915_private *i915)
+int intel_display_driver_probe(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
/*
@@ -514,11 +517,11 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
* are already calculated and there is no assert_plane warnings
* during bootup.
*/
- ret = intel_initial_commit(&i915->drm);
+ ret = intel_initial_commit(display->drm);
if (ret)
- drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
+ drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
- intel_overlay_setup(i915);
+ intel_overlay_setup(display);
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
@@ -528,13 +531,13 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
return 0;
}
-void intel_display_driver_register(struct drm_i915_private *i915)
+void intel_display_driver_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS,
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
"i915 display info:");
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
/* Must be done after probing outputs */
@@ -543,7 +546,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_audio_init(i915);
- intel_display_driver_enable_user_access(i915);
+ intel_display_driver_enable_user_access(display);
intel_audio_register(i915);
@@ -554,41 +557,42 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* fbdev configuration, for which we use the
* fbdev->async_cookie.
*/
- drm_kms_helper_poll_init(&i915->drm);
+ drm_kms_helper_poll_init(display->drm);
intel_hpd_poll_disable(i915);
intel_fbdev_setup(i915);
- intel_display_device_info_print(DISPLAY_INFO(i915),
- DISPLAY_RUNTIME_INFO(i915), &p);
+ intel_display_device_info_print(DISPLAY_INFO(display),
+ DISPLAY_RUNTIME_INFO(display), &p);
}
/* part #1: call before irq uninstall */
-void intel_display_driver_remove(struct drm_i915_private *i915)
+void intel_display_driver_remove(struct intel_display *display)
{
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- flush_workqueue(i915->display.wq.flip);
- flush_workqueue(i915->display.wq.modeset);
+ flush_workqueue(display->wq.flip);
+ flush_workqueue(display->wq.modeset);
+ flush_workqueue(display->wq.cleanup);
/*
* MST topology needs to be suspended so we don't have any calls to
* fbdev after it's finalized. MST will be destroyed later as part of
* drm_mode_config_cleanup()
*/
- intel_dp_mst_suspend(i915);
+ intel_dp_mst_suspend(display);
}
/* part #2: call after irq uninstall */
-void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
+void intel_display_driver_remove_noirq(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- intel_display_driver_suspend_access(i915);
+ intel_display_driver_suspend_access(display);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -603,55 +607,54 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_hdcp_component_fini(display);
- intel_mode_config_cleanup(i915);
+ intel_mode_config_cleanup(display);
intel_dp_tunnel_mgr_cleanup(display);
- intel_overlay_cleanup(i915);
+ intel_overlay_cleanup(display);
intel_gmbus_teardown(display);
- destroy_workqueue(i915->display.wq.flip);
- destroy_workqueue(i915->display.wq.modeset);
+ destroy_workqueue(display->wq.flip);
+ destroy_workqueue(display->wq.modeset);
+ destroy_workqueue(display->wq.cleanup);
- intel_fbc_cleanup(&i915->display);
+ intel_fbc_cleanup(display);
}
/* part #3: call after gem init */
-void intel_display_driver_remove_nogem(struct drm_i915_private *i915)
+void intel_display_driver_remove_nogem(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
intel_dmc_fini(display);
- intel_power_domains_driver_remove(i915);
+ intel_power_domains_driver_remove(display);
intel_vga_unregister(display);
intel_bios_driver_remove(display);
}
-void intel_display_driver_unregister(struct drm_i915_private *i915)
+void intel_display_driver_unregister(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- drm_client_dev_unregister(&i915->drm);
+ drm_client_dev_unregister(display->drm);
/*
* After flushing the fbdev (incl. a late async config which
* will have delayed queuing of a hotplug event), then flush
* the hotplug events.
*/
- drm_kms_helper_poll_fini(&i915->drm);
+ drm_kms_helper_poll_fini(display->drm);
- intel_display_driver_disable_user_access(i915);
+ intel_display_driver_disable_user_access(display);
intel_audio_deinit(i915);
- drm_atomic_helper_shutdown(&i915->drm);
+ drm_atomic_helper_shutdown(display->drm);
acpi_video_unregister();
intel_opregion_unregister(display);
@@ -661,30 +664,36 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
* turn all crtc's off, but do not adjust state
* This has to be paired with a call to intel_modeset_setup_hw_state.
*/
-int intel_display_driver_suspend(struct drm_i915_private *i915)
+int intel_display_driver_suspend(struct intel_display *display)
{
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
- state = drm_atomic_helper_suspend(&i915->drm);
+ state = drm_atomic_helper_suspend(display->drm);
ret = PTR_ERR_OR_ZERO(state);
if (ret)
- drm_err(&i915->drm, "Suspending crtc's failed with %i\n",
+ drm_err(display->drm, "Suspending crtc's failed with %i\n",
ret);
else
- i915->display.restore.modeset_state = state;
+ display->restore.modeset_state = state;
+
+ /* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
+ flush_workqueue(display->wq.cleanup);
+
+ intel_dp_mst_suspend(display);
+
return ret;
}
int
-__intel_display_driver_resume(struct drm_i915_private *i915,
+__intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
@@ -710,33 +719,37 @@ __intel_display_driver_resume(struct drm_i915_private *i915,
}
/* ignore any reset values/BIOS leftovers in the WM registers */
- if (!HAS_GMCH(i915))
+ if (!HAS_GMCH(display))
to_intel_atomic_state(state)->skip_intermediate_wm = true;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- drm_WARN_ON(&i915->drm, ret == -EDEADLK);
+ drm_WARN_ON(display->drm, ret == -EDEADLK);
return ret;
}
-void intel_display_driver_resume(struct drm_i915_private *i915)
+void intel_display_driver_resume(struct intel_display *display)
{
- struct drm_atomic_state *state = i915->display.restore.modeset_state;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_atomic_state *state = display->restore.modeset_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- i915->display.restore.modeset_state = NULL;
+ /* MST sideband requires HPD interrupts enabled */
+ intel_dp_mst_resume(display);
+
+ display->restore.modeset_state = NULL;
if (state)
state->acquire_ctx = &ctx;
drm_modeset_acquire_init(&ctx, 0);
while (1) {
- ret = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
+ ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
if (ret != -EDEADLK)
break;
@@ -744,14 +757,14 @@ void intel_display_driver_resume(struct drm_i915_private *i915)
}
if (!ret)
- ret = __intel_display_driver_resume(i915, state, &ctx);
+ ret = __intel_display_driver_resume(display, state, &ctx);
skl_watermark_ipc_update(i915);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
if (ret)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.h b/drivers/gpu/drm/i915/display/intel_display_driver.h
index 42cc4af6d3fd..2966ff91b219 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.h
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.h
@@ -9,34 +9,34 @@
#include <linux/types.h>
struct drm_atomic_state;
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
+struct intel_display;
struct pci_dev;
bool intel_display_driver_probe_defer(struct pci_dev *pdev);
-void intel_display_driver_init_hw(struct drm_i915_private *i915);
-void intel_display_driver_early_probe(struct drm_i915_private *i915);
-int intel_display_driver_probe_noirq(struct drm_i915_private *i915);
-int intel_display_driver_probe_nogem(struct drm_i915_private *i915);
-int intel_display_driver_probe(struct drm_i915_private *i915);
-void intel_display_driver_register(struct drm_i915_private *i915);
-void intel_display_driver_remove(struct drm_i915_private *i915);
-void intel_display_driver_remove_noirq(struct drm_i915_private *i915);
-void intel_display_driver_remove_nogem(struct drm_i915_private *i915);
-void intel_display_driver_unregister(struct drm_i915_private *i915);
-int intel_display_driver_suspend(struct drm_i915_private *i915);
-void intel_display_driver_resume(struct drm_i915_private *i915);
+void intel_display_driver_init_hw(struct intel_display *display);
+void intel_display_driver_early_probe(struct intel_display *display);
+int intel_display_driver_probe_noirq(struct intel_display *display);
+int intel_display_driver_probe_nogem(struct intel_display *display);
+int intel_display_driver_probe(struct intel_display *display);
+void intel_display_driver_register(struct intel_display *display);
+void intel_display_driver_remove(struct intel_display *display);
+void intel_display_driver_remove_noirq(struct intel_display *display);
+void intel_display_driver_remove_nogem(struct intel_display *display);
+void intel_display_driver_unregister(struct intel_display *display);
+int intel_display_driver_suspend(struct intel_display *display);
+void intel_display_driver_resume(struct intel_display *display);
/* interface for intel_display_reset.c */
-int __intel_display_driver_resume(struct drm_i915_private *i915,
+int __intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
-void intel_display_driver_enable_user_access(struct drm_i915_private *i915);
-void intel_display_driver_disable_user_access(struct drm_i915_private *i915);
-void intel_display_driver_suspend_access(struct drm_i915_private *i915);
-void intel_display_driver_resume_access(struct drm_i915_private *i915);
-bool intel_display_driver_check_access(struct drm_i915_private *i915);
+void intel_display_driver_enable_user_access(struct intel_display *display);
+void intel_display_driver_disable_user_access(struct intel_display *display);
+void intel_display_driver_suspend_access(struct intel_display *display);
+void intel_display_driver_resume_access(struct intel_display *display);
+bool intel_display_driver_check_access(struct intel_display *display);
#endif /* __INTEL_DISPLAY_DRIVER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index e1547ebce60e..069043f9d894 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -434,7 +434,8 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
spin_lock(&dev_priv->irq_lock);
- if (!dev_priv->display.irq.display_irqs_enabled) {
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ !dev_priv->display.irq.vlv_display_irqs_enabled) {
spin_unlock(&dev_priv->irq_lock);
return;
}
@@ -843,7 +844,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (DISPLAY_VER(dev_priv) >= 14)
+ struct intel_display *display = &dev_priv->display;
+
+ if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
GEN12_PIPEDMC_FAULT |
@@ -853,7 +856,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE5_FAULT |
@@ -861,7 +864,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- else if (DISPLAY_VER(dev_priv) == 12)
+ else if (DISPLAY_VER(display) == 12)
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE7_FAULT |
@@ -871,7 +874,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- else if (DISPLAY_VER(dev_priv) == 11)
+ else if (DISPLAY_VER(display) == 11)
return GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE7_FAULT |
GEN11_PIPE_PLANE6_FAULT |
@@ -880,7 +883,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- else if (DISPLAY_VER(dev_priv) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
return GEN9_PIPE_CURSOR_FAULT |
GEN9_PIPE_PLANE4_FAULT |
GEN9_PIPE_PLANE3_FAULT |
@@ -1420,7 +1423,6 @@ static void intel_display_vblank_dc_work(struct work_struct *work)
{
struct intel_display *display =
container_of(work, typeof(*display), irq.vblank_dc_work);
- struct drm_i915_private *i915 = to_i915(display->drm);
int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
/*
@@ -1429,7 +1431,7 @@ static void intel_display_vblank_dc_work(struct work_struct *work)
* PSR code. If DC3CO is taken into use we need take that into account
* here as well.
*/
- intel_display_power_set_target_dc_state(i915, vblank_wa_num_pipes ? DC_STATE_DISABLE :
+ intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
DC_STATE_EN_UPTO_DC6);
}
@@ -1479,7 +1481,7 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
schedule_work(&display->irq.vblank_dc_work);
}
-void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -1497,6 +1499,12 @@ void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~0u;
}
+void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->display.irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_reset(dev_priv);
+}
+
void i9xx_display_irq_reset(struct drm_i915_private *i915)
{
if (I915_HAS_HOTPLUG(i915)) {
@@ -1516,6 +1524,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
u32 enable_mask;
enum pipe pipe;
+ if (!dev_priv->display.irq.vlv_display_irqs_enabled)
+ return;
+
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
@@ -1688,13 +1699,13 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
+ if (dev_priv->display.irq.vlv_display_irqs_enabled)
return;
- dev_priv->display.irq.display_irqs_enabled = true;
+ dev_priv->display.irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
- vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(dev_priv);
vlv_display_irq_postinstall(dev_priv);
}
}
@@ -1703,13 +1714,13 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (!dev_priv->display.irq.display_irqs_enabled)
+ if (!dev_priv->display.irq.vlv_display_irqs_enabled)
return;
- dev_priv->display.irq.display_irqs_enabled = false;
+ dev_priv->display.irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(dev_priv);
}
void ilk_de_irq_postinstall(struct drm_i915_private *i915)
@@ -1902,17 +1913,6 @@ void intel_display_irq_init(struct drm_i915_private *i915)
{
i915->drm.vblank_disable_immediate = true;
- /*
- * Most platforms treat the display irq block as an always-on power
- * domain. vlv/chv can disable it at runtime and need special care to
- * avoid writing any of the display block registers outside of the power
- * domain. We defer setting up the display irqs in this case to the
- * runtime pm.
- */
- i915->display.irq.display_irqs_enabled = true;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.irq.display_irqs_enabled = false;
-
intel_hotplug_irq_init(i915);
INIT_WORK(&i915->display.irq.vblank_dc_work,
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index 024de8abcb1a..f92e4640a613 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -3,8 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string_choices.h>
+
+#include <drm/drm_print.h>
+
#include "intel_display_params.h"
-#include "i915_drv.h"
#define intel_display_param_named(name, T, perm, desc) \
module_param_named(name, intel_display_modparams.name, T, perm); \
@@ -123,10 +128,10 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
"(0=disabled, 1=enabled) "
"Default: 1");
-intel_display_param_named_unsafe(enable_dmc_wl, bool, 0400,
+intel_display_param_named_unsafe(enable_dmc_wl, int, 0400,
"Enable DMC wakelock "
- "(0=disabled, 1=enabled) "
- "Default: 0");
+ "(-1=use per-chip default, 0=disabled, 1=enabled) "
+ "Default: -1");
__maybe_unused
static void _param_print_bool(struct drm_printer *p, const char *driver_name,
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index dcb6face936a..5317138e6044 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -47,7 +47,7 @@ struct drm_printer;
param(int, enable_psr, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
- param(bool, enable_dmc_wl, false, 0400) \
+ param(int, enable_dmc_wl, -1, 0400) \
#define MEMBER(T, member, ...) T member;
struct intel_display_params {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 2766fd9208b0..d3b8453a1705 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -28,12 +28,12 @@
#include "skl_watermark_regs.h"
#include "vlv_sideband.h"
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
- for_each_power_well(__dev_priv, __power_well) \
+#define for_each_power_domain_well(__display, __power_well, __domain) \
+ for_each_power_well((__display), __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
-#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
- for_each_power_well_reverse(__dev_priv, __power_well) \
+#define for_each_power_domain_well_reverse(__display, __power_well, __domain) \
+ for_each_power_well_reverse((__display), __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
static const char *
@@ -198,18 +198,18 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
}
}
-static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+static bool __intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
struct i915_power_well *power_well;
bool is_enabled;
- if (pm_runtime_suspended(dev_priv->drm.dev))
+ if (pm_runtime_suspended(display->drm->dev))
return false;
is_enabled = true;
- for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
+ for_each_power_domain_well_reverse(display, power_well, domain) {
if (intel_power_well_is_always_on(power_well))
continue;
@@ -242,23 +242,22 @@ static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
bool ret;
- power_domains = &dev_priv->display.power.domains;
-
mutex_lock(&power_domains->lock);
- ret = __intel_display_power_is_enabled(dev_priv, domain);
+ ret = __intel_display_power_is_enabled(display, domain);
mutex_unlock(&power_domains->lock);
return ret;
}
static u32
-sanitize_target_dc_state(struct drm_i915_private *i915,
+sanitize_target_dc_state(struct intel_display *display,
u32 target_dc_state)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
@@ -282,43 +281,43 @@ sanitize_target_dc_state(struct drm_i915_private *i915,
/**
* intel_display_power_set_target_dc_state - Set target dc state.
- * @dev_priv: i915 device
+ * @display: display device
* @state: state which needs to be set as target_dc_state.
*
* This function set the "DC off" power well target_dc_state,
* based upon this target_dc_stste, "DC off" power well will
* enable desired DC state.
*/
-void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state)
{
struct i915_power_well *power_well;
bool dc_off_enabled;
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
mutex_lock(&power_domains->lock);
- power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
+ power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
- if (drm_WARN_ON(&dev_priv->drm, !power_well))
+ if (drm_WARN_ON(display->drm, !power_well))
goto unlock;
- state = sanitize_target_dc_state(dev_priv, state);
+ state = sanitize_target_dc_state(display, state);
if (state == power_domains->target_dc_state)
goto unlock;
- dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
+ dc_off_enabled = intel_power_well_is_enabled(display, power_well);
/*
* If DC off power well is disabled, need to enable and disable the
* DC off power well to effect target DC state.
*/
if (!dc_off_enabled)
- intel_power_well_enable(dev_priv, power_well);
+ intel_power_well_enable(display, power_well);
power_domains->target_dc_state = state;
if (!dc_off_enabled)
- intel_power_well_disable(dev_priv, power_well);
+ intel_power_well_disable(display, power_well);
unlock:
mutex_unlock(&power_domains->lock);
@@ -338,11 +337,11 @@ static void __async_put_domains_mask(struct i915_power_domains *power_domains,
static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
- return !drm_WARN_ON(&i915->drm,
+ return !drm_WARN_ON(display->drm,
bitmap_intersects(power_domains->async_put_domains[0].bits,
power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM));
@@ -351,21 +350,21 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
static bool
__async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
struct intel_power_domain_mask async_put_mask;
enum intel_display_power_domain domain;
bool err = false;
err |= !assert_async_put_domain_masks_disjoint(power_domains);
__async_put_domains_mask(power_domains, &async_put_mask);
- err |= drm_WARN_ON(&i915->drm,
+ err |= drm_WARN_ON(display->drm,
!!power_domains->async_put_wakeref !=
!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, &async_put_mask)
- err |= drm_WARN_ON(&i915->drm,
+ err |= drm_WARN_ON(display->drm,
power_domains->domain_use_count[domain] != 1);
return !err;
@@ -374,27 +373,27 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, struct intel_power_domain_mask *mask)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
enum intel_display_power_domain domain;
- drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
+ drm_dbg_kms(display->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask)
- drm_dbg(&i915->drm, "%s use_count %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg_kms(display->drm, "%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
static void
print_async_put_domains_state(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
- drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
- str_yes_no(power_domains->async_put_wakeref));
+ drm_dbg_kms(display->drm, "async_put_wakeref: %s\n",
+ str_yes_no(power_domains->async_put_wakeref));
print_power_domains(power_domains, "async_put_domains[0]",
&power_domains->async_put_domains[0]);
@@ -454,10 +453,11 @@ cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
}
static bool
-intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
+intel_display_power_grab_async_put_ref(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -483,17 +483,17 @@ out_verify:
}
static void
-__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+__intel_display_power_get_domain(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
- if (intel_display_power_grab_async_put_ref(dev_priv, domain))
+ if (intel_display_power_grab_async_put_ref(display, domain))
return;
- for_each_power_domain_well(dev_priv, power_well, domain)
- intel_power_well_get(dev_priv, power_well);
+ for_each_power_domain_well(display, power_well, domain)
+ intel_power_well_get(display, power_well);
power_domains->domain_use_count[domain]++;
}
@@ -513,11 +513,12 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&power_domains->lock);
- __intel_display_power_get_domain(dev_priv, domain);
+ __intel_display_power_get_domain(display, domain);
mutex_unlock(&power_domains->lock);
return wakeref;
@@ -539,7 +540,8 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref;
bool is_enabled;
@@ -549,8 +551,8 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
mutex_lock(&power_domains->lock);
- if (__intel_display_power_is_enabled(dev_priv, domain)) {
- __intel_display_power_get_domain(dev_priv, domain);
+ if (__intel_display_power_is_enabled(display, domain)) {
+ __intel_display_power_get_domain(display, domain);
is_enabled = true;
} else {
is_enabled = false;
@@ -567,38 +569,36 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
}
static void
-__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
+__intel_display_power_put_domain(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
const char *name = intel_display_power_domain_str(domain);
struct intel_power_domain_mask async_put_mask;
- power_domains = &dev_priv->display.power.domains;
-
- drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
+ drm_WARN(display->drm, !power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
name);
async_put_domains_mask(power_domains, &async_put_mask);
- drm_WARN(&dev_priv->drm,
+ drm_WARN(display->drm,
test_bit(domain, async_put_mask.bits),
"Async disabling of domain %s is pending\n",
name);
power_domains->domain_use_count[domain]--;
- for_each_power_domain_well_reverse(dev_priv, power_well, domain)
- intel_power_well_put(dev_priv, power_well);
+ for_each_power_domain_well_reverse(display, power_well, domain)
+ intel_power_well_put(display, power_well);
}
-static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+static void __intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
mutex_lock(&power_domains->lock);
- __intel_display_power_put_domain(dev_priv, domain);
+ __intel_display_power_put_domain(display, domain);
mutex_unlock(&power_domains->lock);
}
@@ -607,23 +607,24 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
- drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
+ drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
- drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
- &power_domains->async_put_work,
- msecs_to_jiffies(delay_ms)));
+ drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
+ &power_domains->async_put_work,
+ msecs_to_jiffies(delay_ms)));
}
static void
release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
- struct drm_i915_private *dev_priv =
- container_of(power_domains, struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
@@ -633,7 +634,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
async_put_domains_clear_domain(power_domains, domain);
- __intel_display_power_put_domain(dev_priv, domain);
+ __intel_display_power_put_domain(display, domain);
}
intel_runtime_pm_put(rpm, wakeref);
@@ -642,10 +643,10 @@ release_async_put_domains(struct i915_power_domains *power_domains,
static void
intel_display_power_put_async_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.power.domains.async_put_work.work);
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = container_of(work, struct intel_display,
+ power.domains.async_put_work.work);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
intel_wakeref_t old_work_wakeref = NULL;
@@ -711,7 +712,8 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_display *display = &i915->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
@@ -720,12 +722,12 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
mutex_lock(&power_domains->lock);
if (power_domains->domain_use_count[domain] > 1) {
- __intel_display_power_put_domain(i915, domain);
+ __intel_display_power_put_domain(display, domain);
goto out_verify;
}
- drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
+ drm_WARN_ON(display->drm, power_domains->domain_use_count[domain] != 1);
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
@@ -764,7 +766,8 @@ out_verify:
*/
void intel_display_power_flush_work(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_display *display = &i915->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -789,22 +792,23 @@ out_verify:
/**
* intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
- * @i915: i915 device instance
+ * @display: display device instance
*
* Like intel_display_power_flush_work(), but also ensure that the work
* handler function is not running any more when this function returns.
*/
static void
-intel_display_power_flush_work_sync(struct drm_i915_private *i915)
+intel_display_power_flush_work_sync(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_display_power_flush_work(i915);
cancel_async_put_work(power_domains, true);
verify_async_put_domains_state(power_domains);
- drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
+ drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -822,7 +826,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put(dev_priv, domain);
+ struct intel_display *display = &dev_priv->display;
+
+ __intel_display_power_put(display, domain);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
#else
@@ -842,7 +848,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- __intel_display_power_put(dev_priv, domain);
+ struct intel_display *display = &dev_priv->display;
+
+ __intel_display_power_put(display, domain);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
#endif
@@ -852,9 +860,10 @@ intel_display_power_get_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t __maybe_unused wf;
- drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
+ drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get(i915, domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -868,9 +877,10 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t wf;
- drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
+ drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get_if_enabled(i915, domain);
if (!wf)
@@ -889,9 +899,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
struct intel_power_domain_mask *mask)
{
+ struct intel_display *display = &i915->display;
enum intel_display_power_domain domain;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask) {
@@ -906,8 +917,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
}
static int
-sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
- int disable_power_well)
+sanitize_disable_power_well_option(int disable_power_well)
{
if (disable_power_well >= 0)
return !!disable_power_well;
@@ -915,27 +925,26 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
return 1;
}
-static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
- int enable_dc)
+static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc)
{
u32 mask;
int requested_dc;
int max_dc;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return 0;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
max_dc = 2;
- else if (IS_DG2(dev_priv))
+ else if (display->platform.dg2)
max_dc = 1;
- else if (IS_DG1(dev_priv))
+ else if (display->platform.dg1)
max_dc = 3;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
max_dc = 4;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
max_dc = 1;
- else if (DISPLAY_VER(dev_priv) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
max_dc = 2;
else
max_dc = 0;
@@ -945,11 +954,10 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
- mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
- DISPLAY_VER(dev_priv) >= 11 ?
- DC_STATE_EN_DC9 : 0;
+ mask = display->platform.geminilake || display->platform.broxton ||
+ DISPLAY_VER(display) >= 11 ? DC_STATE_EN_DC9 : 0;
- if (!dev_priv->display.params.disable_power_well)
+ if (!display->params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -957,12 +965,12 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
} else if (enable_dc == -1) {
requested_dc = max_dc;
} else if (enable_dc > max_dc && enable_dc <= 4) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Adjusting requested max DC state (%d->%d)\n",
enable_dc, max_dc);
requested_dc = max_dc;
} else {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unexpected value for enable_dc (%d)\n", enable_dc);
requested_dc = max_dc;
}
@@ -982,30 +990,29 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
break;
}
- drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
+ drm_dbg_kms(display->drm, "Allowed DC state mask %02x\n", mask);
return mask;
}
/**
* intel_power_domains_init - initializes the power domain structures
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* Initializes the power domain structures for @dev_priv depending upon the
* supported platform.
*/
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
+int intel_power_domains_init(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
- dev_priv->display.params.disable_power_well =
- sanitize_disable_power_well_option(dev_priv,
- dev_priv->display.params.disable_power_well);
+ display->params.disable_power_well =
+ sanitize_disable_power_well_option(display->params.disable_power_well);
power_domains->allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc);
+ get_allowed_dc_mask(display, display->params.enable_dc);
power_domains->target_dc_state =
- sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ sanitize_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1017,39 +1024,39 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
/**
* intel_power_domains_cleanup - clean up power domains resources
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* Release any resources acquired by intel_power_domains_init()
*/
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
+void intel_power_domains_cleanup(struct intel_display *display)
{
- intel_display_power_map_cleanup(&dev_priv->display.power.domains);
+ intel_display_power_map_cleanup(&display->power.domains);
}
-static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
+static void intel_power_domains_sync_hw(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
- for_each_power_well(dev_priv, power_well)
- intel_power_well_sync_hw(dev_priv, power_well);
+ for_each_power_well(display, power_well)
+ intel_power_well_sync_hw(display, power_well);
mutex_unlock(&power_domains->lock);
}
-static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
+static void gen9_dbuf_slice_set(struct intel_display *display,
enum dbuf_slice slice, bool enable)
{
i915_reg_t reg = DBUF_CTL_S(slice);
bool state;
- intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
+ intel_de_rmw(display, reg, DBUF_POWER_REQUEST,
enable ? DBUF_POWER_REQUEST : 0);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(10);
- state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
- drm_WARN(&dev_priv->drm, enable != state,
+ state = intel_de_read(display, reg) & DBUF_POWER_STATE;
+ drm_WARN(display->drm, enable != state,
"DBuf slice %d power %s timeout!\n",
slice, str_enable_disable(enable));
}
@@ -1057,15 +1064,16 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
- u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
+ u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask;
enum dbuf_slice slice;
- drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
+ drm_WARN(display->drm, req_slices & ~slice_mask,
"Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
req_slices, slice_mask);
- drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
+ drm_dbg_kms(display->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
/*
@@ -1077,25 +1085,25 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
*/
mutex_lock(&power_domains->lock);
- for_each_dbuf_slice(dev_priv, slice)
- gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
+ for_each_dbuf_slice(display, slice)
+ gen9_dbuf_slice_set(display, slice, req_slices & BIT(slice));
- dev_priv->display.dbuf.enabled_slices = req_slices;
+ display->dbuf.enabled_slices = req_slices;
mutex_unlock(&power_domains->lock);
}
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_enable(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u8 slices_mask;
- dev_priv->display.dbuf.enabled_slices =
- intel_enabled_dbuf_slices_mask(dev_priv);
+ display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices;
+ slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices;
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_pmdemand_program_dbuf(dev_priv, slices_mask);
+ if (DISPLAY_VER(display) >= 14)
+ intel_pmdemand_program_dbuf(display, slices_mask);
/*
* Just power up at least 1 slice, we will
@@ -1104,33 +1112,35 @@ static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
gen9_dbuf_slices_update(dev_priv, slices_mask);
}
-static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_disable(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
gen9_dbuf_slices_update(dev_priv, 0);
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_pmdemand_program_dbuf(dev_priv, 0);
+ if (DISPLAY_VER(display) >= 14)
+ intel_pmdemand_program_dbuf(display, 0);
}
-static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
+static void gen12_dbuf_slices_config(struct intel_display *display)
{
enum dbuf_slice slice;
- if (IS_ALDERLAKE_P(dev_priv))
+ if (display->platform.alderlake_p)
return;
- for_each_dbuf_slice(dev_priv, slice)
- intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
+ for_each_dbuf_slice(display, slice)
+ intel_de_rmw(display, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
DBUF_TRACKER_STATE_SERVICE(8));
}
-static void icl_mbus_init(struct drm_i915_private *dev_priv)
+static void icl_mbus_init(struct intel_display *display)
{
- unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask;
+ unsigned long abox_regs = DISPLAY_INFO(display)->abox_mask;
u32 mask, val, i;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
@@ -1147,16 +1157,16 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
* expect us to program the abox_ctl0 register as well, even though
* we don't have to program other instance-0 registers like BW_BUDDY.
*/
- if (DISPLAY_VER(dev_priv) == 12)
+ if (DISPLAY_VER(display) == 12)
abox_regs |= BIT(0);
for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
- intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
+ intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
}
-static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
+static void hsw_assert_cdclk(struct intel_display *display)
{
- u32 val = intel_de_read(dev_priv, LCPLL_CTL);
+ u32 val = intel_de_read(display, LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
@@ -1165,18 +1175,18 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
*/
if (val & LCPLL_CD_SOURCE_FCLK)
- drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
+ drm_err(display->drm, "CDCLK source is not LCPLL\n");
if (val & LCPLL_PLL_DISABLE)
- drm_err(&dev_priv->drm, "LCPLL is disabled\n");
+ drm_err(display->drm, "LCPLL is disabled\n");
if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
- drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
+ drm_err(display->drm, "LCPLL not using non-SSC reference\n");
}
-static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+static void assert_can_disable_lcpll(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
for_each_intel_crtc(display->drm, crtc)
@@ -1201,7 +1211,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
INTEL_DISPLAY_STATE_WARN(display,
intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
INTEL_DISPLAY_STATE_WARN(display,
intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
@@ -1225,23 +1235,24 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
"IRQs enabled\n");
}
-static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
+static u32 hsw_read_dcomp(struct intel_display *display)
{
- if (IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv, D_COMP_HSW);
+ if (display->platform.haswell)
+ return intel_de_read(display, D_COMP_HSW);
else
- return intel_de_read(dev_priv, D_COMP_BDW);
+ return intel_de_read(display, D_COMP_BDW);
}
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
+static void hsw_write_dcomp(struct intel_display *display, u32 val)
{
- if (IS_HASWELL(dev_priv)) {
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (display->platform.haswell) {
if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
- drm_dbg_kms(&dev_priv->drm,
- "Failed to write to D_COMP\n");
+ drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
- intel_de_write(dev_priv, D_COMP_BDW, val);
- intel_de_posting_read(dev_priv, D_COMP_BDW);
+ intel_de_write(display, D_COMP_BDW, val);
+ intel_de_posting_read(display, D_COMP_BDW);
}
}
@@ -1253,45 +1264,45 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
* register. Callers should take care of disabling all the display engine
* functions, doing the mode unset, fixing interrupts, etc.
*/
-static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+static void hsw_disable_lcpll(struct intel_display *display,
bool switch_to_fclk, bool allow_power_down)
{
u32 val;
- assert_can_disable_lcpll(dev_priv);
+ assert_can_disable_lcpll(display);
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_write(display, LCPLL_CTL, val);
- if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
- drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
+ drm_err(display->drm, "Switching to FCLK failed\n");
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
}
val |= LCPLL_PLL_DISABLE;
- intel_de_write(dev_priv, LCPLL_CTL, val);
- intel_de_posting_read(dev_priv, LCPLL_CTL);
+ intel_de_write(display, LCPLL_CTL, val);
+ intel_de_posting_read(display, LCPLL_CTL);
- if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
- drm_err(&dev_priv->drm, "LCPLL still locked\n");
+ if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
+ drm_err(display->drm, "LCPLL still locked\n");
- val = hsw_read_dcomp(dev_priv);
+ val = hsw_read_dcomp(display);
val |= D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
+ hsw_write_dcomp(display, val);
ndelay(100);
- if (wait_for((hsw_read_dcomp(dev_priv) &
+ if (wait_for((hsw_read_dcomp(display) &
D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
- drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
+ drm_err(display->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
- intel_de_posting_read(dev_priv, LCPLL_CTL);
+ intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
+ intel_de_posting_read(display, LCPLL_CTL);
}
}
@@ -1299,12 +1310,12 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
* source.
*/
-static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+static void hsw_restore_lcpll(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
u32 val;
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
@@ -1318,28 +1329,28 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
- intel_de_write(dev_priv, LCPLL_CTL, val);
- intel_de_posting_read(dev_priv, LCPLL_CTL);
+ intel_de_write(display, LCPLL_CTL, val);
+ intel_de_posting_read(display, LCPLL_CTL);
}
- val = hsw_read_dcomp(dev_priv);
+ val = hsw_read_dcomp(display);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
+ hsw_write_dcomp(display, val);
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_write(display, LCPLL_CTL, val);
- if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
- drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
+ if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
+ drm_err(display->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
+ intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+ if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Switching back to LCPLL failed\n");
}
@@ -1372,36 +1383,42 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* For more, read "Display Sequences for Package C8" on the hardware
* documentation.
*/
-static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_enable_pc8(struct intel_display *display)
{
- drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ drm_dbg_kms(display->drm, "Enabling package C8+\n");
if (HAS_PCH_LPT_LP(dev_priv))
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
lpt_disable_clkout_dp(dev_priv);
- hsw_disable_lcpll(dev_priv, true, true);
+ hsw_disable_lcpll(display, true, true);
}
-static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_disable_pc8(struct intel_display *display)
{
- drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ drm_dbg_kms(display->drm, "Disabling package C8+\n");
- hsw_restore_lcpll(dev_priv);
+ hsw_restore_lcpll(display);
intel_init_pch_refclk(dev_priv);
/* Many display registers don't survive PC8+ */
+#ifdef I915 /* FIXME */
intel_clock_gating_init(dev_priv);
+#endif
}
-static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+static void intel_pch_reset_handshake(struct intel_display *display,
bool enable)
{
i915_reg_t reg;
u32 reset_bits;
- if (IS_IVYBRIDGE(dev_priv)) {
+ if (display->platform.ivybridge) {
reg = GEN7_MSG_CTL;
reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
} else {
@@ -1409,59 +1426,58 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
- intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
+ intel_de_rmw(display, reg, reset_bits, enable ? reset_bits : 0);
}
-static void skl_display_core_init(struct drm_i915_private *dev_priv,
+static void skl_display_core_init(struct intel_display *display,
bool resume)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_enable(display, well);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_MISC_IO);
+ intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
intel_cdclk_init_hw(display);
- gen9_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(display);
if (resume)
intel_dmc_load_program(display);
}
-static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void skl_display_core_uninit(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
gen9_disable_dc_states(display);
/* TODO: disable DMC program */
- gen9_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(display);
intel_cdclk_uninit_hw(display);
@@ -1476,17 +1492,16 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
* Note that even though the driver's request is removed power well 1
* may stay enabled after this due to DMC's own request on it.
*/
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
+static void bxt_display_core_init(struct intel_display *display, bool resume)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
@@ -1498,40 +1513,39 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
- intel_pch_reset_handshake(dev_priv, false);
+ intel_pch_reset_handshake(display, false);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/* Enable PG1 */
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
intel_cdclk_init_hw(display);
- gen9_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(display);
if (resume)
intel_dmc_load_program(display);
}
-static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+static void bxt_display_core_uninit(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
gen9_disable_dc_states(display);
/* TODO: disable DMC program */
- gen9_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(display);
intel_cdclk_uninit_hw(display);
@@ -1544,8 +1558,8 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
@@ -1582,20 +1596,21 @@ static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
{}
};
-static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
+static void tgl_bw_buddy_init(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum intel_dram_type type = dev_priv->dram_info.type;
u8 num_channels = dev_priv->dram_info.num_channels;
const struct buddy_page_mask *table;
- unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask;
+ unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
int config, i;
/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
- if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
+ if (display->platform.dgfx && !display->platform.dg1)
return;
- if (IS_ALDERLAKE_S(dev_priv) ||
- (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)))
+ if (display->platform.alderlake_s ||
+ (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)))
/* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
@@ -1607,29 +1622,29 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
break;
if (table[config].page_mask == 0) {
- drm_dbg(&dev_priv->drm,
- "Unknown memory configuration; disabling address buddy logic.\n");
+ drm_dbg_kms(display->drm,
+ "Unknown memory configuration; disabling address buddy logic.\n");
for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
- intel_de_write(dev_priv, BW_BUDDY_CTL(i),
+ intel_de_write(display, BW_BUDDY_CTL(i),
BW_BUDDY_DISABLE);
} else {
for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
- intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
+ intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
table[config].page_mask);
/* Wa_22010178259:tgl,dg1,rkl,adl-s */
- if (DISPLAY_VER(dev_priv) == 12)
- intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
+ if (DISPLAY_VER(display) == 12)
+ intel_de_rmw(display, BW_BUDDY_CTL(i),
BW_BUDDY_TLB_REQ_TIMER_MASK,
BW_BUDDY_TLB_REQ_TIMER(0x8));
}
}
}
-static void icl_display_core_init(struct drm_i915_private *dev_priv,
+static void icl_display_core_init(struct intel_display *display,
bool resume)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
@@ -1638,13 +1653,13 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/* 2. Initialize all combo phys */
@@ -1655,67 +1670,67 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
* The AUX IO power wells will be enabled on demand.
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
- if (DISPLAY_VER(dev_priv) == 14)
- intel_de_rmw(dev_priv, DC_STATE_EN,
+ if (DISPLAY_VER(display) == 14)
+ intel_de_rmw(display, DC_STATE_EN,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(display);
- if (DISPLAY_VER(dev_priv) >= 12)
- gen12_dbuf_slices_config(dev_priv);
+ if (DISPLAY_VER(display) >= 12)
+ gen12_dbuf_slices_config(display);
/* 5. Enable DBUF. */
- gen9_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(display);
/* 6. Setup MBUS. */
- icl_mbus_init(dev_priv);
+ icl_mbus_init(display);
/* 7. Program arbiter BW_BUDDY registers */
- if (DISPLAY_VER(dev_priv) >= 12)
- tgl_bw_buddy_init(dev_priv);
+ if (DISPLAY_VER(display) >= 12)
+ tgl_bw_buddy_init(display);
/* 8. Ensure PHYs have completed calibration and adaptation */
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
intel_snps_phy_wait_for_calibration(dev_priv);
/* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
- if (DISPLAY_VERx100(dev_priv) == 1401)
- intel_de_rmw(dev_priv, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
+ if (DISPLAY_VERx100(display) == 1401)
+ intel_de_rmw(display, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
if (resume)
intel_dmc_load_program(display);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
- if (IS_DISPLAY_VERx100(dev_priv, 1200, 1300))
- intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
+ if (IS_DISPLAY_VERx100(display, 1200, 1300))
+ intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
- if (DISPLAY_VER(dev_priv) == 13)
- intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
+ if (DISPLAY_VER(display) == 13)
+ intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
/* Wa_15013987218 */
- if (DISPLAY_VER(dev_priv) == 20) {
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ if (DISPLAY_VER(display) == 20) {
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
}
}
-static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void icl_display_core_uninit(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
gen9_disable_dc_states(display);
@@ -1724,13 +1739,13 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 1. Disable all display engine functions -> aready done */
/* 2. Disable DBUF */
- gen9_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(display);
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(display);
- if (DISPLAY_VER(dev_priv) == 14)
- intel_de_rmw(dev_priv, DC_STATE_EN, 0,
+ if (DISPLAY_VER(display) == 14)
+ intel_de_rmw(display, DC_STATE_EN, 0,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
/*
@@ -1739,20 +1754,20 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
* disabled at this point.
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
/* 5. */
intel_combo_phy_uninit(dev_priv);
}
-static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+static void chv_phy_control_init(struct intel_display *display)
{
struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
/*
* DISPLAY_PHY_CONTROL can get corrupted if read. As a
@@ -1761,7 +1776,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* power well state and lane status to reconstruct the
* expected initial value.
*/
- dev_priv->display.power.chv_phy_control =
+ display->power.chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
@@ -1775,39 +1790,39 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* override and set the lane powerdown bits accding to the
* current lane status.
*/
- if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
- u32 status = intel_de_read(dev_priv, DPLL(dev_priv, PIPE_A));
+ if (intel_power_well_is_enabled(display, cmn_bc)) {
+ u32 status = intel_de_read(display, DPLL(display, PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
- dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+ display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
- dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
+ display->power.chv_phy_assert[DPIO_PHY0] = false;
} else {
- dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
+ display->power.chv_phy_assert[DPIO_PHY0] = true;
}
- if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
- u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
+ if (intel_power_well_is_enabled(display, cmn_d)) {
+ u32 status = intel_de_read(display, DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
@@ -1815,42 +1830,42 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
- dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+ display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
- dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
+ display->power.chv_phy_assert[DPIO_PHY1] = false;
} else {
- dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
+ display->power.chv_phy_assert[DPIO_PHY1] = true;
}
- drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
- dev_priv->display.power.chv_phy_control);
+ drm_dbg_kms(display->drm, "Initial PHY_CONTROL=0x%08x\n",
+ display->power.chv_phy_control);
/* Defer application of initial phy_control to enabling the powerwell */
}
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+static void vlv_cmnlane_wa(struct intel_display *display)
{
struct i915_power_well *cmn =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *disp2d =
- lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
+ lookup_power_well(display, VLV_DISP_PW_DISP2D);
/* If the display might be already active skip this */
- if (intel_power_well_is_enabled(dev_priv, cmn) &&
- intel_power_well_is_enabled(dev_priv, disp2d) &&
- intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
+ if (intel_power_well_is_enabled(display, cmn) &&
+ intel_power_well_is_enabled(display, disp2d) &&
+ intel_de_read(display, DPIO_CTL) & DPIO_CMNRST)
return;
- drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
+ drm_dbg_kms(display->drm, "toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
- intel_power_well_enable(dev_priv, disp2d);
+ intel_power_well_enable(display, disp2d);
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
@@ -1859,11 +1874,12 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
- intel_power_well_disable(dev_priv, cmn);
+ intel_power_well_disable(display, cmn);
}
-static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
vlv_punit_get(dev_priv);
@@ -1873,14 +1889,14 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
return ret;
}
-static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+static void assert_ved_power_gated(struct intel_display *display)
{
- drm_WARN(&dev_priv->drm,
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ drm_WARN(display->drm,
+ !vlv_punit_is_power_gated(display, PUNIT_REG_VEDSSPM0),
"VED not power gated\n");
}
-static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+static void assert_isp_power_gated(struct intel_display *display)
{
static const struct pci_device_id isp_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
@@ -1888,16 +1904,16 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
{}
};
- drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ drm_WARN(display->drm, !pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(display, PUNIT_REG_ISPSSPM0),
"ISP not power gated\n");
}
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+static void intel_power_domains_verify_state(struct intel_display *display);
/**
* intel_power_domains_init_hw - initialize hardware power domain state
- * @i915: i915 device instance
+ * @display: display device instance
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
@@ -1911,34 +1927,35 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_driver_remove().
*/
-void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
+void intel_power_domains_init_hw(struct intel_display *display, bool resume)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
power_domains->initializing = true;
- if (DISPLAY_VER(i915) >= 11) {
- icl_display_core_init(i915, resume);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
- bxt_display_core_init(i915, resume);
- } else if (DISPLAY_VER(i915) == 9) {
- skl_display_core_init(i915, resume);
- } else if (IS_CHERRYVIEW(i915)) {
+ if (DISPLAY_VER(display) >= 11) {
+ icl_display_core_init(display, resume);
+ } else if (display->platform.geminilake || display->platform.broxton) {
+ bxt_display_core_init(display, resume);
+ } else if (DISPLAY_VER(display) == 9) {
+ skl_display_core_init(display, resume);
+ } else if (display->platform.cherryview) {
mutex_lock(&power_domains->lock);
- chv_phy_control_init(i915);
+ chv_phy_control_init(display);
mutex_unlock(&power_domains->lock);
- assert_isp_power_gated(i915);
- } else if (IS_VALLEYVIEW(i915)) {
+ assert_isp_power_gated(display);
+ } else if (display->platform.valleyview) {
mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(i915);
+ vlv_cmnlane_wa(display);
mutex_unlock(&power_domains->lock);
- assert_ved_power_gated(i915);
- assert_isp_power_gated(i915);
- } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
- hsw_assert_cdclk(i915);
- intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
- } else if (IS_IVYBRIDGE(i915)) {
- intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ assert_ved_power_gated(display);
+ assert_isp_power_gated(display);
+ } else if (display->platform.broadwell || display->platform.haswell) {
+ hsw_assert_cdclk(display);
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ } else if (display->platform.ivybridge) {
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
}
/*
@@ -1947,24 +1964,24 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
- drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->display.params.disable_power_well) {
- drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
- i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_INIT);
+ if (!display->params.disable_power_well) {
+ drm_WARN_ON(display->drm, power_domains->disable_wakeref);
+ display->power.domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
}
- intel_power_domains_sync_hw(i915);
+ intel_power_domains_sync_hw(display);
power_domains->initializing = false;
}
/**
* intel_power_domains_driver_remove - deinitialize hw power domain state
- * @i915: i915 device instance
+ * @display: display device instance
*
* De-initializes the display power domain HW state. It also ensures that the
* device stays powered up so that the driver can be reloaded.
@@ -1973,19 +1990,20 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
-void intel_power_domains_driver_remove(struct drm_i915_private *i915)
+void intel_power_domains_driver_remove(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->display.power.domains.init_wakeref);
+ fetch_and_zero(&display->power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915->display.params.disable_power_well)
+ if (!display->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->display.power.domains.disable_wakeref));
+ fetch_and_zero(&display->power.domains.disable_wakeref));
- intel_display_power_flush_work_sync(i915);
+ intel_display_power_flush_work_sync(display);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -1993,7 +2011,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
/**
* intel_power_domains_sanitize_state - sanitize power domains state
- * @i915: i915 device instance
+ * @display: display device instance
*
* Sanitize the power domains state during driver loading and system resume.
* The function will disable all display power wells that BIOS has enabled
@@ -2001,22 +2019,22 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
* on it by the time this function is called, after the state of all the
* pipe, encoder, etc. HW resources have been sanitized).
*/
-void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
+void intel_power_domains_sanitize_state(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
- for_each_power_well_reverse(i915, power_well) {
+ for_each_power_well_reverse(display, power_well) {
if (power_well->desc->always_on || power_well->count ||
- !intel_power_well_is_enabled(i915, power_well))
+ !intel_power_well_is_enabled(display, power_well))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS left unused %s power well enabled, disabling it\n",
intel_power_well_name(power_well));
- intel_power_well_disable(i915, power_well);
+ intel_power_well_disable(display, power_well);
}
mutex_unlock(&power_domains->lock);
@@ -2024,7 +2042,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
/**
* intel_power_domains_enable - enable toggling of display power wells
- * @i915: i915 device instance
+ * @display: display device instance
*
* Enable the ondemand enabling/disabling of the display power wells. Note that
* power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
@@ -2034,36 +2052,38 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
* of display HW readout (which will acquire the power references reflecting
* the current HW state).
*/
-void intel_power_domains_enable(struct drm_i915_private *i915)
+void intel_power_domains_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->display.power.domains.init_wakeref);
+ fetch_and_zero(&display->power.domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
}
/**
* intel_power_domains_disable - disable toggling of display power wells
- * @i915: i915 device instance
+ * @display: display device instance
*
* Disable the ondemand enabling/disabling of the display power wells. See
* intel_power_domains_enable() for which power wells this call controls.
*/
-void intel_power_domains_disable(struct drm_i915_private *i915)
+void intel_power_domains_disable(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
- drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
}
/**
* intel_power_domains_suspend - suspend power domain state
- * @i915: i915 device instance
+ * @display: display device instance
* @s2idle: specifies whether we go to idle, or deeper sleep
*
* This function prepares the hardware power domain state before entering
@@ -2072,9 +2092,9 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
* It must be called with power domains already disabled (after a call to
* intel_power_domains_disable()) and paired with intel_power_domains_resume().
*/
-void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
+void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
@@ -2091,7 +2111,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
intel_dmc_has_payload(display)) {
intel_display_power_flush_work(i915);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
return;
}
@@ -2099,26 +2119,26 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
- if (!i915->display.params.disable_power_well)
+ if (!display->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->display.power.domains.disable_wakeref));
+ fetch_and_zero(&display->power.domains.disable_wakeref));
intel_display_power_flush_work(i915);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
- if (DISPLAY_VER(i915) >= 11)
- icl_display_core_uninit(i915);
- else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- bxt_display_core_uninit(i915);
- else if (DISPLAY_VER(i915) == 9)
- skl_display_core_uninit(i915);
+ if (DISPLAY_VER(display) >= 11)
+ icl_display_core_uninit(display);
+ else if (display->platform.geminilake || display->platform.broxton)
+ bxt_display_core_uninit(display);
+ else if (DISPLAY_VER(display) == 9)
+ skl_display_core_uninit(display);
power_domains->display_core_suspended = true;
}
/**
* intel_power_domains_resume - resume power domain state
- * @i915: i915 device instance
+ * @display: display device instance
*
* This function resume the hardware power domain state during system resume.
*
@@ -2126,45 +2146,46 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_suspend().
*/
-void intel_power_domains_resume(struct drm_i915_private *i915)
+void intel_power_domains_resume(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
if (power_domains->display_core_suspended) {
- intel_power_domains_init_hw(i915, true);
+ intel_power_domains_init_hw(display, true);
power_domains->display_core_suspended = false;
} else {
- drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void intel_power_domains_dump_info(struct drm_i915_private *i915)
+static void intel_power_domains_dump_info(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
- for_each_power_well(i915, power_well) {
+ for_each_power_well(display, power_well) {
enum intel_display_power_domain domain;
- drm_dbg(&i915->drm, "%-25s %d\n",
- intel_power_well_name(power_well), intel_power_well_refcount(power_well));
+ drm_dbg_kms(display->drm, "%-25s %d\n",
+ intel_power_well_name(power_well), intel_power_well_refcount(power_well));
for_each_power_domain(domain, intel_power_well_domains(power_well))
- drm_dbg(&i915->drm, " %-23s %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg_kms(display->drm, " %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
}
/**
* intel_power_domains_verify_state - verify the HW/SW state for all power wells
- * @i915: i915 device instance
+ * @display: display device instance
*
* Verify if the reference count of each power well matches its HW enabled
* state and the total refcount of the domains it belongs to. This must be
@@ -2172,9 +2193,9 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
* acquiring reference counts for any power wells in use and disabling the
* ones left on by BIOS but not required by any active output.
*/
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+static void intel_power_domains_verify_state(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
bool dump_domain_info;
@@ -2183,16 +2204,16 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
verify_async_put_domains_state(power_domains);
dump_domain_info = false;
- for_each_power_well(i915, power_well) {
+ for_each_power_well(display, power_well) {
enum intel_display_power_domain domain;
int domains_count;
bool enabled;
- enabled = intel_power_well_is_enabled(i915, power_well);
+ enabled = intel_power_well_is_enabled(display, power_well);
if ((intel_power_well_refcount(power_well) ||
intel_power_well_is_always_on(power_well)) !=
enabled)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"power well %s state mismatch (refcount %d/enabled %d)",
intel_power_well_name(power_well),
intel_power_well_refcount(power_well), enabled);
@@ -2202,7 +2223,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
domains_count += power_domains->domain_use_count[domain];
if (intel_power_well_refcount(power_well) != domains_count) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n",
intel_power_well_name(power_well),
@@ -2216,7 +2237,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
static bool dumped;
if (!dumped) {
- intel_power_domains_dump_info(i915);
+ intel_power_domains_dump_info(display);
dumped = true;
}
}
@@ -2226,21 +2247,23 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
#else
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+static void intel_power_domains_verify_state(struct intel_display *display)
{
}
#endif
-void intel_display_power_suspend_late(struct drm_i915_private *i915)
+void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ intel_power_domains_suspend(display, s2idle);
- if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
- IS_BROXTON(i915)) {
+ if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
+ display->platform.broxton) {
bxt_enable_dc9(display);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_enable_pc8(i915);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_enable_pc8(display);
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
@@ -2248,66 +2271,66 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
-void intel_display_power_resume_early(struct drm_i915_private *i915)
+void intel_display_power_resume_early(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
- IS_BROXTON(i915)) {
+ if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
+ display->platform.broxton) {
gen9_sanitize_dc_state(display);
bxt_disable_dc9(display);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_disable_pc8(i915);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_disable_pc8(display);
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+
+ intel_power_domains_resume(display);
}
-void intel_display_power_suspend(struct drm_i915_private *i915)
+void intel_display_power_suspend(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (DISPLAY_VER(i915) >= 11) {
- icl_display_core_uninit(i915);
+ if (DISPLAY_VER(display) >= 11) {
+ icl_display_core_uninit(display);
bxt_enable_dc9(display);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
- bxt_display_core_uninit(i915);
+ } else if (display->platform.geminilake || display->platform.broxton) {
+ bxt_display_core_uninit(display);
bxt_enable_dc9(display);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_enable_pc8(i915);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_enable_pc8(display);
}
}
-void intel_display_power_resume(struct drm_i915_private *i915)
+void intel_display_power_resume(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct i915_power_domains *power_domains = &display->power.domains;
- if (DISPLAY_VER(i915) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
bxt_disable_dc9(display);
- icl_display_core_init(i915, true);
+ icl_display_core_init(display, true);
if (intel_dmc_has_payload(display)) {
if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(display);
else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(display);
}
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
bxt_disable_dc9(display);
- bxt_display_core_init(i915, true);
+ bxt_display_core_init(display, true);
if (intel_dmc_has_payload(display) &&
(power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(display);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_disable_pc8(i915);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_disable_pc8(display);
}
}
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_display *display = &i915->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
int i;
mutex_lock(&power_domains->lock);
@@ -2452,17 +2475,17 @@ d13_port_domains[] = {
};
static void
-intel_port_domains_for_platform(struct drm_i915_private *i915,
+intel_port_domains_for_platform(struct intel_display *display,
const struct intel_ddi_port_domains **domains,
int *domains_size)
{
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
*domains = d13_port_domains;
*domains_size = ARRAY_SIZE(d13_port_domains);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
*domains = d12_port_domains;
*domains_size = ARRAY_SIZE(d12_port_domains);
- } else if (DISPLAY_VER(i915) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
*domains = d11_port_domains;
*domains_size = ARRAY_SIZE(d11_port_domains);
} else {
@@ -2472,13 +2495,13 @@ intel_port_domains_for_platform(struct drm_i915_private *i915,
}
static const struct intel_ddi_port_domains *
-intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
+intel_port_domains_for_port(struct intel_display *display, enum port port)
{
const struct intel_ddi_port_domains *domains;
int domains_size;
int i;
- intel_port_domains_for_platform(i915, &domains, &domains_size);
+ intel_port_domains_for_platform(display, &domains, &domains_size);
for (i = 0; i < domains_size; i++)
if (port >= domains[i].port_start && port <= domains[i].port_end)
return &domains[i];
@@ -2489,9 +2512,10 @@ intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
- if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_IO_A;
return domains->ddi_io + (int)(port - domains->port_start);
@@ -2500,22 +2524,23 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
- if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_LANES_A;
return domains->ddi_lanes + (int)(port - domains->port_start);
}
static const struct intel_ddi_port_domains *
-intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
+intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch)
{
const struct intel_ddi_port_domains *domains;
int domains_size;
int i;
- intel_port_domains_for_platform(i915, &domains, &domains_size);
+ intel_port_domains_for_platform(display, &domains, &domains_size);
for (i = 0; i < domains_size; i++)
if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
return &domains[i];
@@ -2526,9 +2551,10 @@ intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
enum intel_display_power_domain
intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_IO_A;
return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
@@ -2537,9 +2563,10 @@ intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux
enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_A;
return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
@@ -2548,9 +2575,10 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch
enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_TBT1;
return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 3f8f84df4733..7b294eec4431 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -15,6 +15,7 @@ enum aux_ch;
enum port;
struct drm_i915_private;
struct i915_power_well;
+struct intel_display;
struct intel_encoder;
struct seq_file;
@@ -166,21 +167,21 @@ struct intel_display_power_domain_set {
for ((__domain) = 0; (__domain) < POWER_DOMAIN_NUM; (__domain)++) \
for_each_if(test_bit((__domain), (__mask)->bits))
-int intel_power_domains_init(struct drm_i915_private *dev_priv);
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
-void intel_power_domains_enable(struct drm_i915_private *dev_priv);
-void intel_power_domains_disable(struct drm_i915_private *dev_priv);
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv, bool s2idle);
-void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void intel_power_domains_sanitize_state(struct drm_i915_private *dev_priv);
-
-void intel_display_power_suspend_late(struct drm_i915_private *i915);
-void intel_display_power_resume_early(struct drm_i915_private *i915);
-void intel_display_power_suspend(struct drm_i915_private *i915);
-void intel_display_power_resume(struct drm_i915_private *i915);
-void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+int intel_power_domains_init(struct intel_display *display);
+void intel_power_domains_cleanup(struct intel_display *display);
+void intel_power_domains_init_hw(struct intel_display *display, bool resume);
+void intel_power_domains_driver_remove(struct intel_display *display);
+void intel_power_domains_enable(struct intel_display *display);
+void intel_power_domains_disable(struct intel_display *display);
+void intel_power_domains_suspend(struct intel_display *display, bool s2idle);
+void intel_power_domains_resume(struct intel_display *display);
+void intel_power_domains_sanitize_state(struct intel_display *display);
+
+void intel_display_power_suspend_late(struct intel_display *display, bool s2idle);
+void intel_display_power_resume_early(struct intel_display *display);
+void intel_display_power_suspend(struct intel_display *display);
+void intel_display_power_resume(struct intel_display *display);
+void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 5575aa0d6689..0c8ac1af6db7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -3,14 +3,12 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
-
-#include "vlv_sideband_reg.h"
-
+#include "intel_display_core.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
#include "intel_display_types.h"
+#include "vlv_sideband_reg.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
@@ -1752,9 +1750,9 @@ __set_power_wells(struct i915_power_domains *power_domains,
const struct i915_power_well_desc_list *power_well_descs,
int power_well_descs_sz)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
u64 power_well_ids = 0;
const struct i915_power_well_desc_list *desc_list;
const struct i915_power_well_desc *desc;
@@ -1778,7 +1776,7 @@ __set_power_wells(struct i915_power_domains *power_domains,
enum i915_power_well_id id = inst->id;
pw->desc = desc;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
overflows_type(inst - desc->instances->list, pw->instance_idx));
pw->instance_idx = inst - desc->instances->list;
@@ -1789,8 +1787,8 @@ __set_power_wells(struct i915_power_domains *power_domains,
if (id == DISP_PW_ID_NONE)
continue;
- drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
- drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
+ drm_WARN_ON(display->drm, id >= sizeof(power_well_ids) * 8);
+ drm_WARN_ON(display->drm, power_well_ids & BIT_ULL(id));
power_well_ids |= BIT_ULL(id);
}
@@ -1811,53 +1809,53 @@ __set_power_wells(struct i915_power_domains *power_domains,
*/
int intel_display_power_map_init(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (!HAS_DISPLAY(i915)) {
+ if (!HAS_DISPLAY(display)) {
power_domains->power_well_count = 0;
return 0;
}
- if (DISPLAY_VER(i915) >= 30)
+ if (DISPLAY_VER(display) >= 30)
return set_power_wells(power_domains, xe3lpd_power_wells);
- else if (DISPLAY_VER(i915) >= 20)
+ else if (DISPLAY_VER(display) >= 20)
return set_power_wells(power_domains, xe2lpd_power_wells);
- else if (DISPLAY_VER(i915) >= 14)
+ else if (DISPLAY_VER(display) >= 14)
return set_power_wells(power_domains, xelpdp_power_wells);
- else if (IS_DG2(i915))
+ else if (display->platform.dg2)
return set_power_wells(power_domains, xehpd_power_wells);
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return set_power_wells(power_domains, xelpd_power_wells);
- else if (IS_DG1(i915))
+ else if (display->platform.dg1)
return set_power_wells(power_domains, dg1_power_wells);
- else if (IS_ALDERLAKE_S(i915))
+ else if (display->platform.alderlake_s)
return set_power_wells(power_domains, adls_power_wells);
- else if (IS_ROCKETLAKE(i915))
+ else if (display->platform.rocketlake)
return set_power_wells(power_domains, rkl_power_wells);
- else if (DISPLAY_VER(i915) == 12)
+ else if (DISPLAY_VER(display) == 12)
return set_power_wells(power_domains, tgl_power_wells);
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
return set_power_wells(power_domains, icl_power_wells);
- else if (IS_GEMINILAKE(i915))
+ else if (display->platform.geminilake)
return set_power_wells(power_domains, glk_power_wells);
- else if (IS_BROXTON(i915))
+ else if (display->platform.broxton)
return set_power_wells(power_domains, bxt_power_wells);
- else if (DISPLAY_VER(i915) == 9)
+ else if (DISPLAY_VER(display) == 9)
return set_power_wells(power_domains, skl_power_wells);
- else if (IS_CHERRYVIEW(i915))
+ else if (display->platform.cherryview)
return set_power_wells(power_domains, chv_power_wells);
- else if (IS_BROADWELL(i915))
+ else if (display->platform.broadwell)
return set_power_wells(power_domains, bdw_power_wells);
- else if (IS_HASWELL(i915))
+ else if (display->platform.haswell)
return set_power_wells(power_domains, hsw_power_wells);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
return set_power_wells(power_domains, vlv_power_wells);
- else if (IS_I830(i915))
+ else if (display->platform.i830)
return set_power_wells(power_domains, i830_power_wells);
else
return set_power_wells(power_domains, i9xx_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index f0131dd853de..f45a4f9ba23c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -46,23 +46,23 @@ struct i915_power_well_ops {
* during driver init and resume time, possibly after first calling
* the enable/disable handlers.
*/
- void (*sync_hw)(struct drm_i915_private *i915,
+ void (*sync_hw)(struct intel_display *display,
struct i915_power_well *power_well);
/*
* Enable the well and resources that depend on it (for example
* interrupts located on the well). Called after the 0->1 refcount
* transition.
*/
- void (*enable)(struct drm_i915_private *i915,
+ void (*enable)(struct intel_display *display,
struct i915_power_well *power_well);
/*
* Disable the well and resources that depend on it. Called after
* the 1->0 refcount transition.
*/
- void (*disable)(struct drm_i915_private *i915,
+ void (*disable)(struct intel_display *display,
struct i915_power_well *power_well);
/* Returns the hw enabled state. */
- bool (*is_enabled)(struct drm_i915_private *i915,
+ bool (*is_enabled)(struct intel_display *display,
struct i915_power_well *power_well);
};
@@ -73,12 +73,12 @@ i915_power_well_instance(const struct i915_power_well *power_well)
}
struct i915_power_well *
-lookup_power_well(struct drm_i915_private *i915,
+lookup_power_well(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
struct i915_power_well *power_well;
- for_each_power_well(i915, power_well)
+ for_each_power_well(display, power_well)
if (i915_power_well_instance(power_well)->id == power_well_id)
return power_well;
@@ -89,58 +89,57 @@ lookup_power_well(struct drm_i915_private *i915,
* the first power well and hope the WARN gets reported so we can fix
* our driver.
*/
- drm_WARN(&i915->drm, 1,
+ drm_WARN(display->drm, 1,
"Power well %d not defined for this platform\n",
power_well_id);
- return &i915->display.power.domains.power_wells[0];
+ return &display->power.domains.power_wells[0];
}
-void intel_power_well_enable(struct drm_i915_private *i915,
+void intel_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well));
- power_well->desc->ops->enable(i915, power_well);
+ drm_dbg_kms(display->drm, "enabling %s\n", intel_power_well_name(power_well));
+ power_well->desc->ops->enable(display, power_well);
power_well->hw_enabled = true;
}
-void intel_power_well_disable(struct drm_i915_private *i915,
+void intel_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well));
+ drm_dbg_kms(display->drm, "disabling %s\n", intel_power_well_name(power_well));
power_well->hw_enabled = false;
- power_well->desc->ops->disable(i915, power_well);
+ power_well->desc->ops->disable(display, power_well);
}
-void intel_power_well_sync_hw(struct drm_i915_private *i915,
+void intel_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
- power_well->desc->ops->sync_hw(i915, power_well);
- power_well->hw_enabled =
- power_well->desc->ops->is_enabled(i915, power_well);
+ power_well->desc->ops->sync_hw(display, power_well);
+ power_well->hw_enabled = power_well->desc->ops->is_enabled(display, power_well);
}
-void intel_power_well_get(struct drm_i915_private *i915,
+void intel_power_well_get(struct intel_display *display,
struct i915_power_well *power_well)
{
if (!power_well->count++)
- intel_power_well_enable(i915, power_well);
+ intel_power_well_enable(display, power_well);
}
-void intel_power_well_put(struct drm_i915_private *i915,
+void intel_power_well_put(struct intel_display *display,
struct i915_power_well *power_well)
{
- drm_WARN(&i915->drm, !power_well->count,
+ drm_WARN(display->drm, !power_well->count,
"Use count on power well %s is already zero",
i915_power_well_instance(power_well)->name);
if (!--power_well->count)
- intel_power_well_disable(i915, power_well);
+ intel_power_well_disable(display, power_well);
}
-bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+bool intel_power_well_is_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- return power_well->desc->ops->is_enabled(i915, power_well);
+ return power_well->desc->ops->is_enabled(display, power_well);
}
bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
@@ -148,14 +147,14 @@ bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
return power_well->hw_enabled;
}
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_well_is_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
struct i915_power_well *power_well;
- power_well = lookup_power_well(dev_priv, power_well_id);
+ power_well = lookup_power_well(display, power_well_id);
- return intel_power_well_is_enabled(dev_priv, power_well);
+ return intel_power_well_is_enabled(display, power_well);
}
bool intel_power_well_is_always_on(struct i915_power_well *power_well)
@@ -184,10 +183,10 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
if (has_vga)
intel_vga_reset_io_mem(display);
@@ -196,9 +195,11 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
}
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (irq_pipe_mask)
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
@@ -221,12 +222,12 @@ static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
}
static struct intel_digital_port *
-aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
+aux_ch_to_digital_port(struct intel_display *display,
enum aux_ch aux_ch)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_digital_port *dig_port;
/* We'll check the MST primary port */
@@ -242,11 +243,11 @@ aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
return NULL;
}
-static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
+static enum phy icl_aux_pw_to_phy(struct intel_display *display,
const struct i915_power_well *power_well)
{
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
/*
* FIXME should we care about the (VBT defined) dig_port->aux_ch
@@ -258,7 +259,7 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
}
-static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+static void hsw_wait_for_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well,
bool timeout_expected)
{
@@ -271,39 +272,39 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
* an ack, but rather just wait a fixed amount of time and then
* proceed. This is only used on DG2.
*/
- if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) {
+ if (display->platform.dg2 && power_well->desc->fixed_enable_delay) {
usleep_range(600, 1200);
return;
}
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- if (intel_de_wait_for_set(dev_priv, regs->driver,
+ if (intel_de_wait_for_set(display, regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
- drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+ drm_dbg_kms(display->drm, "%s power well enable timeout\n",
intel_power_well_name(power_well));
- drm_WARN_ON(&dev_priv->drm, !timeout_expected);
+ drm_WARN_ON(display->drm, !timeout_expected);
}
}
-static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+static u32 hsw_power_well_requesters(struct intel_display *display,
const struct i915_power_well_regs *regs,
int pw_idx)
{
u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
u32 ret;
- ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
- ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
+ ret = intel_de_read(display, regs->bios) & req_mask ? 1 : 0;
+ ret |= intel_de_read(display, regs->driver) & req_mask ? 2 : 0;
if (regs->kvmr.reg)
- ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
- ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
+ ret |= intel_de_read(display, regs->kvmr) & req_mask ? 4 : 0;
+ ret |= intel_de_read(display, regs->debug) & req_mask ? 8 : 0;
return ret;
}
-static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+static void hsw_wait_for_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -320,28 +321,28 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
+ wait_for((disabled = !(intel_de_read(display, regs->driver) &
HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+ (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1);
if (disabled)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
intel_power_well_name(power_well),
!!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
}
-static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+static void gen9_wait_for_power_well_fuses(struct intel_display *display,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- drm_WARN_ON(&dev_priv->drm,
- intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ drm_WARN_ON(display->drm,
+ intel_de_wait_for_set(display, SKL_FUSE_STATUS,
SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -350,12 +351,12 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
/* Wa_16013190616:adlp */
- if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
+ if (display->platform.alderlake_p && pg == SKL_PG1)
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
@@ -365,112 +366,112 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
* after the enabling.
*/
if (pg == SKL_PG1)
- gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+ gen9_wait_for_power_well_fuses(display, SKL_PG0);
}
- intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+ hsw_wait_for_power_well_enable(display, power_well, false);
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
- gen9_wait_for_power_well_fuses(dev_priv, pg);
+ gen9_wait_for_power_well_fuses(display, pg);
}
- hsw_power_well_post_enable(dev_priv,
+ hsw_power_well_post_enable(display,
power_well->desc->irq_pipe_mask,
power_well->desc->has_vga);
}
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- hsw_power_well_pre_disable(dev_priv,
+ hsw_power_well_pre_disable(display,
power_well->desc->irq_pipe_mask);
- intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
- hsw_wait_for_power_well_disable(dev_priv, power_well);
+ intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
+ hsw_wait_for_power_well_disable(display, power_well);
}
-static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch)
+static bool intel_aux_ch_is_edp(struct intel_display *display, enum aux_ch aux_ch)
{
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
}
static void
-icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_combo_phy_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+ drm_WARN_ON(display->drm, !display->platform.icelake);
- intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
/*
* FIXME not sure if we should derive the PHY from the pw_idx, or
* from the VBT defined AUX_CH->DDI->PHY mapping.
*/
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
0, ICL_LANE_ENABLE_AUX);
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+ hsw_wait_for_power_well_enable(display, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx)))
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
+ !intel_aux_ch_is_edp(display, ICL_AUX_PW_TO_CH(pw_idx)))
+ intel_de_rmw(display, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
}
static void
-icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+icl_combo_phy_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+ drm_WARN_ON(display->drm, !display->platform.icelake);
/*
* FIXME not sure if we should derive the PHY from the pw_idx, or
* from the VBT defined AUX_CH->DDI->PHY mapping.
*/
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
ICL_LANE_ENABLE_AUX, 0);
- intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
+ intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
- hsw_wait_for_power_well_disable(dev_priv, power_well);
+ hsw_wait_for_power_well_disable(display, power_well);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+static void icl_tc_port_assert_ref_held(struct intel_display *display,
struct i915_power_well *power_well,
struct intel_digital_port *dig_port)
{
- if (drm_WARN_ON(&dev_priv->drm, !dig_port))
+ if (drm_WARN_ON(display->drm, !dig_port))
return;
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
return;
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
}
#else
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+static void icl_tc_port_assert_ref_held(struct intel_display *display,
struct i915_power_well *power_well,
struct intel_digital_port *dig_port)
{
@@ -480,8 +481,9 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
-static void icl_tc_cold_exit(struct drm_i915_private *i915)
+static void icl_tc_cold_exit(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret, tries = 0;
while (1) {
@@ -502,21 +504,22 @@ static void icl_tc_cold_exit(struct drm_i915_private *i915)
}
static void
-icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_tc_phy_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
- icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
+ icl_tc_port_assert_ref_held(display, power_well, dig_port);
- intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch),
+ intel_de_rmw(display, DP_AUX_CH_CTL(aux_ch),
DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
- intel_de_rmw(dev_priv, regs->driver,
+ intel_de_rmw(display, regs->driver,
0,
HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
@@ -526,51 +529,53 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
* exit sequence.
*/
timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
- icl_tc_cold_exit(dev_priv);
+ if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ icl_tc_cold_exit(display);
- hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
+ hsw_wait_for_power_well_enable(display, power_well, timeout_expected);
- if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
+ if (DISPLAY_VER(display) >= 12 && !is_tbt) {
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) &
DKL_CMN_UC_DW27_UC_HEALTH, 1))
- drm_warn(&dev_priv->drm,
+ drm_warn(display->drm,
"Timeout waiting TC uC health\n");
}
}
static void
-icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(dev_priv, phy))
- return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_enable(dev_priv,
+ return icl_tc_phy_aux_power_well_enable(display, power_well);
+ else if (display->platform.icelake)
+ return icl_combo_phy_aux_power_well_enable(display,
power_well);
else
- return hsw_power_well_enable(dev_priv, power_well);
+ return hsw_power_well_enable(display, power_well);
}
static void
-icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
+icl_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(dev_priv, phy))
- return hsw_power_well_disable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_disable(dev_priv,
+ return hsw_power_well_disable(display, power_well);
+ else if (display->platform.icelake)
+ return icl_combo_phy_aux_power_well_disable(display,
power_well);
else
- return hsw_power_well_disable(dev_priv, power_well);
+ return hsw_power_well_disable(display, power_well);
}
/*
@@ -578,7 +583,7 @@ icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool hsw_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -588,7 +593,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
HSW_PWR_WELL_CTL_STATE(pw_idx);
u32 val;
- val = intel_de_read(dev_priv, regs->driver);
+ val = intel_de_read(display, regs->driver);
/*
* On GEN9 big core due to a DMC bug the driver's request bits for PW1
@@ -596,9 +601,9 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
* BIOS's own request bits, which are forced-on for these power wells
* when exiting DC5/6.
*/
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= intel_de_read(dev_priv, regs->bios);
+ val |= intel_de_read(display, regs->bios);
return (val & mask) == mask;
}
@@ -691,7 +696,6 @@ static void gen9_write_dc_state(struct intel_display *display,
static u32 gen9_dc_mask(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
@@ -701,7 +705,7 @@ static u32 gen9_dc_mask(struct intel_display *display)
| DC_STATE_EN_DC9;
else if (DISPLAY_VER(display) == 11)
mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
@@ -798,7 +802,7 @@ static void tgl_disable_dc3co(struct intel_display *display)
static void assert_can_enable_dc5(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
enum i915_power_well_id high_pg;
/* Power wells at this level and above must be disabled for DC5 entry */
@@ -808,7 +812,7 @@ static void assert_can_enable_dc5(struct intel_display *display)
high_pg = SKL_DISP_PW_2;
drm_WARN_ONCE(display->drm,
- intel_display_power_well_is_enabled(dev_priv, high_pg),
+ intel_display_power_well_is_enabled(display, high_pg),
"Power wells above platform's DC5 limit still enabled.\n");
drm_WARN_ONCE(display->drm,
@@ -822,18 +826,16 @@ static void assert_can_enable_dc5(struct intel_display *display)
void gen9_enable_dc5(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
assert_can_enable_dc5(display);
drm_dbg_kms(display->drm, "Enabling DC5\n");
/* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(display) == 9 && !IS_BROXTON(dev_priv))
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
- intel_dmc_wl_enable(display);
+ intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC5);
gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5);
}
@@ -855,26 +857,22 @@ static void assert_can_enable_dc6(struct intel_display *display)
void skl_enable_dc6(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
assert_can_enable_dc6(display);
drm_dbg_kms(display->drm, "Enabling DC6\n");
/* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(display) == 9 && !IS_BROXTON(dev_priv))
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
- intel_dmc_wl_enable(display);
+ intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC6);
gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6);
}
void bxt_enable_dc9(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
assert_can_enable_dc9(display);
drm_dbg_kms(display->drm, "Enabling DC9\n");
@@ -882,7 +880,7 @@ void bxt_enable_dc9(struct intel_display *display)
* Power sequencer reset is needed on BXT/GLK, because the PPS registers
* aren't always on, unlike with South Display Engine on PCH.
*/
- if (IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (display->platform.broxton || display->platform.geminilake)
bxt_pps_reset_all(display);
gen9_set_dc_state(display, DC_STATE_EN_DC9);
}
@@ -898,63 +896,56 @@ void bxt_disable_dc9(struct intel_display *display)
intel_pps_unlock_regs_wa(display);
}
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void hsw_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = intel_de_read(dev_priv, regs->bios);
+ u32 bios_req = intel_de_read(display, regs->bios);
/* Take over the request bit if set by BIOS. */
if (bios_req & mask) {
- u32 drv_req = intel_de_read(dev_priv, regs->driver);
+ u32 drv_req = intel_de_read(display, regs->driver);
if (!(drv_req & mask))
- intel_de_write(dev_priv, regs->driver, drv_req | mask);
- intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
+ intel_de_write(display, regs->driver, drv_req | mask);
+ intel_de_write(display, regs->bios, bios_req & ~mask);
}
}
-static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void bxt_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
bxt_dpio_phy_init(display, i915_power_well_instance(power_well)->bxt.phy);
}
-static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void bxt_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
bxt_dpio_phy_uninit(display, i915_power_well_instance(power_well)->bxt.phy);
}
-static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool bxt_dpio_cmn_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
return bxt_dpio_phy_is_enabled(display, i915_power_well_instance(power_well)->bxt.phy);
}
-static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv)
+static void bxt_verify_dpio_phy_power_wells(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_well *power_well;
- power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+ power_well = lookup_power_well(display, BXT_DISP_PW_DPIO_CMN_A);
if (intel_power_well_refcount(power_well) > 0)
bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
- power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ power_well = lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
if (intel_power_well_refcount(power_well) > 0)
bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
- if (IS_GEMINILAKE(dev_priv)) {
- power_well = lookup_power_well(dev_priv,
+ if (display->platform.geminilake) {
+ power_well = lookup_power_well(display,
GLK_DISP_PW_DPIO_CMN_C);
if (intel_power_well_refcount(power_well) > 0)
bxt_dpio_phy_verify_state(display,
@@ -962,21 +953,20 @@ static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv)
}
}
-static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool gen9_dc_off_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
(intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
-static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+static void gen9_assert_dbuf_enabled(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices;
+ u8 enabled_dbuf_slices = display->dbuf.enabled_slices;
- drm_WARN(&dev_priv->drm,
+ drm_WARN(display->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices,
"Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
hw_enabled_dbuf_slices,
@@ -988,18 +978,25 @@ void gen9_disable_dc_states(struct intel_display *display)
struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_cdclk_config cdclk_config = {};
+ u32 old_state = power_domains->dc_state;
if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(display);
return;
}
- gen9_set_dc_state(display, DC_STATE_DISABLE);
-
- if (!HAS_DISPLAY(display))
+ if (HAS_DISPLAY(display)) {
+ intel_dmc_wl_get_noreg(display);
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
+ intel_dmc_wl_put_noreg(display);
+ } else {
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
return;
+ }
- intel_dmc_wl_disable(display);
+ if (old_state == DC_STATE_EN_UPTO_DC5 ||
+ old_state == DC_STATE_EN_UPTO_DC6)
+ intel_dmc_wl_disable(display);
intel_cdclk_get_cdclk(display, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
@@ -1007,10 +1004,10 @@ void gen9_disable_dc_states(struct intel_display *display)
intel_cdclk_clock_changed(&display->cdclk.hw,
&cdclk_config));
- gen9_assert_dbuf_enabled(dev_priv);
+ gen9_assert_dbuf_enabled(display);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_verify_dpio_phy_power_wells(dev_priv);
+ if (display->platform.geminilake || display->platform.broxton)
+ bxt_verify_dpio_phy_power_wells(display);
if (DISPLAY_VER(display) >= 11)
/*
@@ -1021,18 +1018,15 @@ void gen9_disable_dc_states(struct intel_display *display)
intel_combo_phy_init(dev_priv);
}
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+static void gen9_dc_off_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
gen9_disable_dc_states(display);
}
-static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+static void gen9_dc_off_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
if (!intel_dmc_has_payload(display))
@@ -1051,63 +1045,58 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
}
}
-static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+static void i9xx_power_well_sync_hw_noop(struct intel_display *display,
struct i915_power_well *power_well)
{
}
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+static void i9xx_always_on_power_well_noop(struct intel_display *display,
struct i915_power_well *power_well)
{
}
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static bool i9xx_always_on_power_well_enabled(struct intel_display *display,
+ struct i915_power_well *power_well)
{
return true;
}
-static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
- if ((intel_de_read(display, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE) == 0)
+ if ((intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(display, PIPE_A);
- if ((intel_de_read(display, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE) == 0)
+ if ((intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(display, PIPE_B);
}
-static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
i830_disable_pipe(display, PIPE_B);
i830_disable_pipe(display, PIPE_A);
}
-static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool i830_pipes_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
-
- return intel_de_read(display, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE &&
- intel_de_read(display, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE;
+ return intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE &&
+ intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
}
-static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
if (intel_power_well_refcount(power_well) > 0)
- i830_pipes_power_well_enable(dev_priv, power_well);
+ i830_pipes_power_well_enable(display, power_well);
else
- i830_pipes_power_well_disable(dev_priv, power_well);
+ i830_pipes_power_well_disable(display, power_well);
}
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+static void vlv_set_power_well(struct intel_display *display,
struct i915_power_well *power_well, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
u32 mask;
u32 state;
@@ -1131,7 +1120,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
@@ -1142,21 +1131,22 @@ out:
vlv_punit_put(dev_priv);
}
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
}
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
}
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool vlv_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
bool enabled = false;
u32 mask;
@@ -1173,7 +1163,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ drm_WARN_ON(display->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
state != PUNIT_PWRGT_PWR_GATE(pw_idx));
if (state == ctrl)
enabled = true;
@@ -1183,14 +1173,14 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- drm_WARN_ON(&dev_priv->drm, ctrl != state);
+ drm_WARN_ON(display->drm, ctrl != state);
vlv_punit_put(dev_priv);
return enabled;
}
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+static void vlv_init_display_clock_gating(struct intel_display *display)
{
/*
* On driver load, a pipe may be active and driving a DSI display.
@@ -1198,25 +1188,25 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
* Disable trickle feed and enable pnd deadline calculation
*/
- intel_de_write(dev_priv, MI_ARB_VLV,
+ intel_de_write(display, MI_ARB_VLV,
MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- intel_de_write(dev_priv, CBR1_VLV, 0);
+ intel_de_write(display, CBR1_VLV, 0);
- drm_WARN_ON(&dev_priv->drm, DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
- intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq,
+ drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0);
+ intel_de_write(display, RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq,
1000));
}
-static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+static void vlv_display_power_well_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1228,17 +1218,17 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(dev_priv, pipe) {
- u32 val = intel_de_read(dev_priv, DPLL(dev_priv, pipe));
+ for_each_pipe(display, pipe) {
+ u32 val = intel_de_read(display, DPLL(display, pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
+ intel_de_write(display, DPLL(display, pipe), val);
}
- vlv_init_display_clock_gating(dev_priv);
+ vlv_init_display_clock_gating(display);
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
@@ -1248,14 +1238,14 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
- if (dev_priv->display.power.domains.initializing)
+ if (display->power.domains.initializing)
return;
intel_hpd_init(dev_priv);
intel_hpd_poll_disable(dev_priv);
/* Re-enable the ADPA, if we have one */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_ANALOG)
intel_crt_reset(&encoder->base);
}
@@ -1265,9 +1255,9 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_pps_unlock_regs_wa(display);
}
-static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+static void vlv_display_power_well_deinit(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
@@ -1279,33 +1269,33 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
vlv_pps_reset_all(display);
/* Prevent us from re-enabling polling on accident in late suspend */
- if (!dev_priv->drm.dev->power.is_suspended)
+ if (!display->drm->dev->power.is_suspended)
intel_hpd_poll_enable(dev_priv);
}
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_display_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
- vlv_display_power_well_init(dev_priv);
+ vlv_display_power_well_init(display);
}
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_display_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_display_power_well_deinit(dev_priv);
+ vlv_display_power_well_deinit(display);
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
}
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
@@ -1318,32 +1308,32 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST);
+ intel_de_rmw(display, DPIO_CTL, 0, DPIO_CMNRST);
}
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0);
+ intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, 0);
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
}
#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
static void assert_chv_phy_status(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
u32 phy_control = display->power.chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
@@ -1368,7 +1358,7 @@ static void assert_chv_phy_status(struct intel_display *display)
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
- if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
+ if (intel_power_well_is_enabled(display, cmn_bc)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY0);
/* this assumes override is only used to enable lanes */
@@ -1409,7 +1399,7 @@ static void assert_chv_phy_status(struct intel_display *display)
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
}
- if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
+ if (intel_power_well_is_enabled(display, cmn_d)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY1);
/* this assumes override is only used to enable lanes */
@@ -1444,10 +1434,10 @@ static void assert_chv_phy_status(struct intel_display *display)
#undef BITS_SET
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
u32 tmp;
@@ -1463,7 +1453,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
/* Poll for phypwrgood signal */
if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS,
@@ -1507,10 +1497,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
assert_chv_phy_status(display);
}
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
@@ -1531,7 +1521,7 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
intel_de_write(display, DISPLAY_PHY_CONTROL,
display->power.chv_phy_control);
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
drm_dbg_kms(display->drm,
"Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
@@ -1543,9 +1533,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_chv_phy_status(display);
}
-static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, val, expected, actual;
/*
@@ -1555,7 +1546,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->display.power.chv_phy_assert[phy])
+ if (!display->power.chv_phy_assert[phy])
return;
if (ch == DPIO_CH0)
@@ -1598,7 +1589,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
DPIO_ALLDL_POWERDOWN_CH1, val);
- drm_WARN(&dev_priv->drm, actual != expected,
+ drm_WARN(display->drm, actual != expected,
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
!!(actual & DPIO_ALLDL_POWERDOWN),
!!(actual & DPIO_ANYDL_POWERDOWN),
@@ -1607,10 +1598,9 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
reg, val);
}
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
bool was_override;
@@ -1645,7 +1635,6 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_power_domains *power_domains = &display->power.domains;
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
@@ -1669,14 +1658,15 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
assert_chv_phy_status(display);
- assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+ assert_chv_phy_powergate(display, phy, ch, override, mask);
mutex_unlock(&power_domains->lock);
}
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool chv_pipe_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
bool enabled;
u32 state, ctrl;
@@ -1688,7 +1678,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ drm_WARN_ON(display->drm, state != DP_SSS_PWR_ON(pipe) &&
state != DP_SSS_PWR_GATE(pipe));
enabled = state == DP_SSS_PWR_ON(pipe);
@@ -1697,17 +1687,18 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
+ drm_WARN_ON(display->drm, ctrl << 16 != state);
vlv_punit_put(dev_priv);
return enabled;
}
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+static void chv_set_pipe_power_well(struct intel_display *display,
struct i915_power_well *power_well,
bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
@@ -1728,7 +1719,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
@@ -1739,32 +1730,33 @@ out:
vlv_punit_put(dev_priv);
}
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->display.power.chv_phy_control);
+ intel_de_write(display, DISPLAY_PHY_CONTROL,
+ display->power.chv_phy_control);
}
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- chv_set_pipe_power_well(dev_priv, power_well, true);
+ chv_set_pipe_power_well(display, power_well, true);
- vlv_display_power_well_init(dev_priv);
+ vlv_display_power_well_init(display);
}
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_display_power_well_deinit(dev_priv);
+ vlv_display_power_well_deinit(display);
- chv_set_pipe_power_well(dev_priv, power_well, false);
+ chv_set_pipe_power_well(display, power_well, false);
}
static void
-tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
+tgl_tc_cold_request(struct intel_display *display, bool block)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u8 tries = 0;
int ret;
@@ -1805,31 +1797,31 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
}
static void
-tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
+tgl_tc_cold_off_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- tgl_tc_cold_request(i915, true);
+ tgl_tc_cold_request(display, true);
}
static void
-tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
+tgl_tc_cold_off_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- tgl_tc_cold_request(i915, false);
+ tgl_tc_cold_request(display, false);
}
static void
-tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
+tgl_tc_cold_off_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
if (intel_power_well_refcount(power_well) > 0)
- tgl_tc_cold_off_power_well_enable(i915, power_well);
+ tgl_tc_cold_off_power_well_enable(display, power_well);
else
- tgl_tc_cold_off_power_well_disable(i915, power_well);
+ tgl_tc_cold_off_power_well_disable(display, power_well);
}
static bool
-tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
+tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
/*
@@ -1839,17 +1831,18 @@ tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
return intel_power_well_refcount(power_well);
}
-static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
+static void xelpdp_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(dev_priv, phy))
- icl_tc_port_assert_ref_held(dev_priv, power_well,
- aux_ch_to_digital_port(dev_priv, aux_ch));
+ icl_tc_port_assert_ref_held(display, power_well,
+ aux_ch_to_digital_port(display, aux_ch));
- intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch),
+ intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
@@ -1862,57 +1855,57 @@ static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
usleep_range(600, 1200);
}
-static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv,
+static void xelpdp_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
- intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch),
+ intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
0);
usleep_range(10, 30);
}
-static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
- return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch)) &
+ return intel_de_read(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch)) &
XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
}
-static void xe2lpd_pica_power_well_enable(struct drm_i915_private *dev_priv,
+static void xe2lpd_pica_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL,
+ intel_de_write(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_REQUEST);
- if (intel_de_wait_for_set(dev_priv, XE2LPD_PICA_PW_CTL,
+ if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
- drm_dbg_kms(&dev_priv->drm, "pica power well enable timeout\n");
+ drm_dbg_kms(display->drm, "pica power well enable timeout\n");
- drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when enabled");
+ drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
}
}
-static void xe2lpd_pica_power_well_disable(struct drm_i915_private *dev_priv,
+static void xe2lpd_pica_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL, 0);
+ intel_de_write(display, XE2LPD_PICA_PW_CTL, 0);
- if (intel_de_wait_for_clear(dev_priv, XE2LPD_PICA_PW_CTL,
+ if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
- drm_dbg_kms(&dev_priv->drm, "pica power well disable timeout\n");
+ drm_dbg_kms(display->drm, "pica power well disable timeout\n");
- drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when disabled");
+ drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
}
}
-static bool xe2lpd_pica_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool xe2lpd_pica_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- return intel_de_read(dev_priv, XE2LPD_PICA_PW_CTL) &
+ return intel_de_read(display, XE2LPD_PICA_PW_CTL) &
XE2LPD_PICA_CTL_POWER_STATUS;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index 93559f7c6100..338379dae44c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -10,21 +10,20 @@
#include "intel_display_power.h"
#include "intel_dpio_phy.h"
-struct drm_i915_private;
struct i915_power_well_ops;
struct intel_display;
struct intel_encoder;
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \
- (__power_well) - (__dev_priv)->display.power.domains.power_wells < \
- (__dev_priv)->display.power.domains.power_well_count; \
+#define for_each_power_well(___display, __power_well) \
+ for ((__power_well) = (___display)->power.domains.power_wells; \
+ (__power_well) - (___display)->power.domains.power_wells < \
+ (___display)->power.domains.power_well_count; \
(__power_well)++)
-#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->display.power.domains.power_wells + \
- (__dev_priv)->display.power.domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->display.power.domains.power_wells >= 0; \
+#define for_each_power_well_reverse(___display, __power_well) \
+ for ((__power_well) = (___display)->power.domains.power_wells + \
+ (___display)->power.domains.power_well_count - 1; \
+ (__power_well) - (___display)->power.domains.power_wells >= 0; \
(__power_well)--)
/*
@@ -127,23 +126,23 @@ struct i915_power_well {
u8 instance_idx;
};
-struct i915_power_well *lookup_power_well(struct drm_i915_private *i915,
+struct i915_power_well *lookup_power_well(struct intel_display *display,
enum i915_power_well_id id);
-void intel_power_well_enable(struct drm_i915_private *i915,
+void intel_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_disable(struct drm_i915_private *i915,
+void intel_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_sync_hw(struct drm_i915_private *i915,
+void intel_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_get(struct drm_i915_private *i915,
+void intel_power_well_get(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_put(struct drm_i915_private *i915,
+void intel_power_well_put(struct intel_display *display,
struct i915_power_well *power_well);
-bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+bool intel_power_well_is_enabled(struct intel_display *display,
struct i915_power_well *power_well);
bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well);
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_well_is_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id);
bool intel_power_well_is_always_on(struct i915_power_well *power_well);
const char *intel_power_well_name(struct i915_power_well *power_well);
@@ -152,7 +151,7 @@ int intel_power_well_refcount(struct i915_power_well *power_well);
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override);
void gen9_enable_dc5(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 49e2e650ebcd..093b386c95e8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -114,11 +114,11 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
* so need a full re-initialization.
*/
intel_pps_unlock_regs_wa(display);
- intel_display_driver_init_hw(i915);
+ intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
intel_hpd_init(i915);
- ret = __intel_display_driver_resume(i915, state, ctx);
+ ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
index 030c4f873da1..25ba043cbb65 100644
--- a/drivers/gpu/drm/i915/display/intel_display_snapshot.c
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
@@ -3,7 +3,9 @@
#include <linux/slab.h>
-#include "i915_drv.h"
+#include <drm/drm_drv.h>
+
+#include "intel_display_core.h"
#include "intel_display_device.h"
#include "intel_display_params.h"
#include "intel_display_snapshot.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 9bd8f1e505b0..338b9f7b20b8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -14,8 +14,8 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include "i915_drv.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_limits.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ff6eb93337e0..8271e50e3644 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -45,6 +45,7 @@
#include "i915_vma_types.h"
#include "intel_bios.h"
#include "intel_display.h"
+#include "intel_display_conversion.h"
#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
@@ -301,6 +302,15 @@ struct intel_panel_bl_funcs {
u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
};
+/* in 100us units */
+struct intel_pps_delays {
+ u16 power_up; /* eDP: T1+T3, LVDS: T1+T2 */
+ u16 backlight_on; /* eDP: T8, LVDS: T5 */
+ u16 backlight_off; /* eDP: T9, LVDS: T6/TX */
+ u16 power_down; /* eDP: T10, LVDS: T3 */
+ u16 power_cycle; /* eDP: T11+T12, LVDS: T7+T4 */
+};
+
enum drrs_type {
DRRS_TYPE_NONE,
DRRS_TYPE_STATIC,
@@ -328,7 +338,7 @@ struct intel_vbt_panel_data {
int preemphasis;
int vswing;
int bpp;
- struct edp_power_seq pps;
+ struct intel_pps_delays pps;
u8 drrs_msa_timing_delay;
bool low_vswing;
bool hobl;
@@ -587,6 +597,8 @@ struct intel_atomic_state {
bool skip_intermediate_wm;
bool rps_interactive;
+
+ struct work_struct cleanup_work;
};
struct intel_plane_state {
@@ -697,8 +709,8 @@ struct intel_initial_plane_config {
};
struct intel_scaler {
- int in_use;
u32 mode;
+ bool in_use;
};
struct intel_crtc_scaler_state {
@@ -769,6 +781,7 @@ struct skl_wm_level {
u8 lines;
bool enable;
bool ignore_lines;
+ bool auto_min_alloc_wm_enable;
bool can_sagv;
};
@@ -863,6 +876,13 @@ struct intel_crtc_wm_state {
struct skl_ddb_entry plane_ddb[I915_MAX_PLANES];
/* pre-icl: for planar Y */
struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+
+ /*
+ * xe3: Minimum amount of display blocks and minimum
+ * sagv allocation required for async flip
+ */
+ u16 plane_min_ddb[I915_MAX_PLANES];
+ u16 plane_interim_ddb[I915_MAX_PLANES];
} skl;
struct {
@@ -1140,8 +1160,6 @@ struct intel_crtc_state {
bool double_wide;
- int pbn;
-
struct intel_crtc_scaler_state scaler_state;
/* w/a for waiting 2 vblanks during crtc enable */
@@ -1235,7 +1253,7 @@ struct intel_crtc_state {
/* Display Stream compression state */
struct {
bool compression_enable;
- bool dsc_split;
+ int num_streams;
/* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
u16 compressed_bpp_x16;
u8 slice_count;
@@ -1568,8 +1586,8 @@ struct intel_pps {
* requiring a reinitialization. Only relevant on BXT+.
*/
bool bxt_pps_reset;
- struct edp_power_seq pps_delays;
- struct edp_power_seq bios_pps_delays;
+ struct intel_pps_delays pps_delays;
+ struct intel_pps_delays bios_pps_delays;
};
struct intel_psr {
@@ -1803,11 +1821,13 @@ struct intel_lspcon {
struct intel_digital_port {
struct intel_encoder base;
- u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
+
+ bool lane_reversal;
+ bool ddi_a_4_lanes;
bool release_cl2_override;
u8 max_lanes;
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
@@ -1946,6 +1966,19 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
}
}
+static inline bool intel_encoder_is_hdmi(struct intel_encoder *encoder)
+{
+ switch (encoder->type) {
+ case INTEL_OUTPUT_HDMI:
+ return true;
+ case INTEL_OUTPUT_DDI:
+ /* See if the HDMI encoder is valid. */
+ return i915_mmio_reg_valid(enc_to_intel_hdmi(encoder)->hdmi_reg);
+ default:
+ return false;
+ }
+}
+
static inline struct intel_lspcon *
enc_to_intel_lspcon(struct intel_encoder *encoder)
{
@@ -2086,7 +2119,7 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
* intel_display pointer.
*/
#define __drm_device_to_intel_display(p) \
- ((p) ? &to_i915(p)->display : NULL)
+ ((p) ? __drm_to_display(p) : NULL)
#define __device_to_intel_display(p) \
__drm_device_to_intel_display(dev_get_drvdata(p))
#define __pci_dev_to_intel_display(p) \
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 87bdacfd9edf..221d3abda791 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -638,8 +638,6 @@ void intel_dmc_disable_program(struct intel_display *display)
pipedmc_clock_gating_wa(display, true);
disable_all_event_handlers(display);
pipedmc_clock_gating_wa(display, false);
-
- intel_dmc_wl_disable(display);
}
void assert_dmc_loaded(struct intel_display *display)
@@ -1146,8 +1144,6 @@ void intel_dmc_suspend(struct intel_display *display)
if (dmc)
flush_work(&dmc->work);
- intel_dmc_wl_disable(display);
-
/* Drop the reference held in case DMC isn't loaded. */
if (!intel_dmc_has_payload(display))
intel_dmc_runtime_pm_put(display);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index 5634ff07269d..02de3ae15074 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -5,6 +5,10 @@
#include <linux/kernel.h>
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
#include "intel_de.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
@@ -39,7 +43,11 @@
* potential future use.
*/
-#define DMC_WAKELOCK_CTL_TIMEOUT 5
+/*
+ * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
+ * atomic variant of waiting MMIO.
+ */
+#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
#define DMC_WAKELOCK_HOLD_TIME 50
struct intel_dmc_wl_range {
@@ -47,8 +55,90 @@ struct intel_dmc_wl_range {
u32 end;
};
-static struct intel_dmc_wl_range lnl_wl_range[] = {
+static const struct intel_dmc_wl_range powered_off_ranges[] = {
{ .start = 0x60000, .end = 0x7ffff },
+ {},
+};
+
+static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
+ { .start = 0x45500 }, /* DC_STATE_SEL */
+ { .start = 0x457a0, .end = 0x457b0 }, /* DC*_RESIDENCY_COUNTER */
+ { .start = 0x45504 }, /* DC_STATE_EN */
+ { .start = 0x45400, .end = 0x4540c }, /* PWR_WELL_CTL_* */
+ { .start = 0x454f0 }, /* RETENTION_CTRL */
+
+ /* DBUF_CTL_* */
+ { .start = 0x44300 },
+ { .start = 0x44304 },
+ { .start = 0x44f00 },
+ { .start = 0x44f04 },
+ { .start = 0x44fe8 },
+ { .start = 0x45008 },
+
+ { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
+ { .start = 0x46000 }, /* CDCLK_CTL */
+ { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
+
+ /* TRANS_CMTG_CTL_* */
+ { .start = 0x6fa88 },
+ { .start = 0x6fb88 },
+
+ { .start = 0x46430 }, /* CHICKEN_DCPR_1 */
+ { .start = 0x46434 }, /* CHICKEN_DCPR_2 */
+ { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
+ { .start = 0x42084 }, /* CHICKEN_MISC_2 */
+ { .start = 0x42088 }, /* CHICKEN_MISC_3 */
+ { .start = 0x46160 }, /* CMTG_CLK_SEL */
+ { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+
+ {},
+};
+
+static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
+ { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
+
+ { .start = 0x45504 }, /* DC_STATE_EN */
+
+ /* DBUF_CTL_* */
+ { .start = 0x44300 },
+ { .start = 0x44304 },
+ { .start = 0x44f00 },
+ { .start = 0x44f04 },
+ { .start = 0x44fe8 },
+ { .start = 0x45008 },
+
+ { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
+ { .start = 0x46000 }, /* CDCLK_CTL */
+ { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
+ { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+
+ /* Scanline registers */
+ { .start = 0x70000 },
+ { .start = 0x70004 },
+ { .start = 0x70014 },
+ { .start = 0x70018 },
+ { .start = 0x71000 },
+ { .start = 0x71004 },
+ { .start = 0x71014 },
+ { .start = 0x71018 },
+ { .start = 0x72000 },
+ { .start = 0x72004 },
+ { .start = 0x72014 },
+ { .start = 0x72018 },
+ { .start = 0x73000 },
+ { .start = 0x73004 },
+ { .start = 0x73014 },
+ { .start = 0x73018 },
+ { .start = 0x7b000 },
+ { .start = 0x7b004 },
+ { .start = 0x7b014 },
+ { .start = 0x7b018 },
+ { .start = 0x7c000 },
+ { .start = 0x7c004 },
+ { .start = 0x7c014 },
+ { .start = 0x7c018 },
+
+ {},
};
static void __intel_dmc_wl_release(struct intel_display *display)
@@ -72,15 +162,18 @@ static void intel_dmc_wl_work(struct work_struct *work)
spin_lock_irqsave(&wl->lock, flags);
- /* Bail out if refcount reached zero while waiting for the spinlock */
- if (!refcount_read(&wl->refcount))
+ /*
+ * Bail out if refcount became non-zero while waiting for the spinlock,
+ * meaning that the lock is now taken again.
+ */
+ if (refcount_read(&wl->refcount))
goto out_unlock;
__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
- if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK, 0,
- DMC_WAKELOCK_CTL_TIMEOUT)) {
+ if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK, 0,
+ DMC_WAKELOCK_CTL_TIMEOUT_US)) {
WARN_RATELIMIT(1, "DMC wakelock release timed out");
goto out_unlock;
}
@@ -91,38 +184,110 @@ out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
-static bool intel_dmc_wl_check_range(u32 address)
+static void __intel_dmc_wl_take(struct intel_display *display)
{
- int i;
- bool wl_needed = false;
-
- for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
- if (address >= lnl_wl_range[i].start &&
- address <= lnl_wl_range[i].end) {
- wl_needed = true;
- break;
- }
+ struct intel_dmc_wl *wl = &display->wl;
+
+ /*
+ * Only try to take the wakelock if it's not marked as taken
+ * yet. It may be already taken at this point if we have
+ * already released the last reference, but the work has not
+ * run yet.
+ */
+ if (wl->taken)
+ return;
+
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
+ DMC_WAKELOCK_CTL_REQ);
+
+ /*
+ * We need to use the atomic variant of the waiting routine
+ * because the DMC wakelock is also taken in atomic context.
+ */
+ if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_TIMEOUT_US)) {
+ WARN_RATELIMIT(1, "DMC wakelock ack timed out");
+ return;
}
- return wl_needed;
+ wl->taken = true;
+}
+
+static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
+ const struct intel_dmc_wl_range ranges[])
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+
+ for (int i = 0; ranges[i].start; i++) {
+ u32 end = ranges[i].end ?: ranges[i].start;
+
+ if (ranges[i].start <= offset && offset <= end)
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_dmc_wl_check_range(i915_reg_t reg, u32 dc_state)
+{
+ const struct intel_dmc_wl_range *ranges;
+
+ /*
+ * Check that the offset is in one of the ranges for which
+ * registers are powered off during DC states.
+ */
+ if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
+ return true;
+
+ /*
+ * Check that the offset is for a register that is touched by
+ * the DMC and requires a DC exit for proper access.
+ */
+ switch (dc_state) {
+ case DC_STATE_EN_DC3CO:
+ ranges = xe3lpd_dc3co_dmc_ranges;
+ break;
+ case DC_STATE_EN_UPTO_DC5:
+ case DC_STATE_EN_UPTO_DC6:
+ ranges = xe3lpd_dc5_dc6_dmc_ranges;
+ break;
+ default:
+ ranges = NULL;
+ }
+
+ if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
+ return true;
+
+ return false;
}
static bool __intel_dmc_wl_supported(struct intel_display *display)
{
- if (DISPLAY_VER(display) < 20 ||
- !intel_dmc_has_payload(display) ||
- !display->params.enable_dmc_wl)
- return false;
+ return display->params.enable_dmc_wl && intel_dmc_has_payload(display);
+}
- return true;
+static void intel_dmc_wl_sanitize_param(struct intel_display *display)
+{
+ if (!HAS_DMC_WAKELOCK(display))
+ display->params.enable_dmc_wl = 0;
+ else if (display->params.enable_dmc_wl >= 0)
+ display->params.enable_dmc_wl = !!display->params.enable_dmc_wl;
+ else
+ display->params.enable_dmc_wl = DISPLAY_VER(display) >= 30;
+
+ drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d\n",
+ display->params.enable_dmc_wl);
}
void intel_dmc_wl_init(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
- /* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
- if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl)
+ intel_dmc_wl_sanitize_param(display);
+
+ if (!display->params.enable_dmc_wl)
return;
INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
@@ -130,7 +295,8 @@ void intel_dmc_wl_init(struct intel_display *display)
refcount_set(&wl->refcount, 0);
}
-void intel_dmc_wl_enable(struct intel_display *display)
+/* Must only be called as part of enabling dynamic DC states. */
+void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
{
struct intel_dmc_wl *wl = &display->wl;
unsigned long flags;
@@ -140,7 +306,9 @@ void intel_dmc_wl_enable(struct intel_display *display)
spin_lock_irqsave(&wl->lock, flags);
- if (wl->enabled)
+ wl->dc_state = dc_state;
+
+ if (drm_WARN_ON(display->drm, wl->enabled))
goto out_unlock;
/*
@@ -151,12 +319,29 @@ void intel_dmc_wl_enable(struct intel_display *display)
__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
wl->enabled = true;
- wl->taken = false;
+
+ /*
+ * This would be racy in the following scenario:
+ *
+ * 1. Function A calls intel_dmc_wl_get();
+ * 2. Some function calls intel_dmc_wl_disable();
+ * 3. Some function calls intel_dmc_wl_enable();
+ * 4. Concurrently with (3), function A performs the MMIO in between
+ * setting DMC_WAKELOCK_CFG_ENABLE and asserting the lock with
+ * __intel_dmc_wl_take().
+ *
+ * TODO: Check with the hardware team whether it is safe to assert the
+ * hardware lock before enabling to avoid such a scenario. Otherwise, we
+ * would need to deal with it via software synchronization.
+ */
+ if (refcount_read(&wl->refcount))
+ __intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
+/* Must only be called as part of disabling dynamic DC states. */
void intel_dmc_wl_disable(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
@@ -165,40 +350,63 @@ void intel_dmc_wl_disable(struct intel_display *display)
if (!__intel_dmc_wl_supported(display))
return;
- flush_delayed_work(&wl->work);
+ intel_dmc_wl_flush_release_work(display);
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (drm_WARN_ON(display->drm, !wl->enabled))
goto out_unlock;
/* Disable wakelock in DMC */
__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
- refcount_set(&wl->refcount, 0);
wl->enabled = false;
+
+ /*
+ * The spec is not explicit about the expectation of existing
+ * lock users at the moment of disabling, but it does say that we must
+ * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to
+ * disable with existing lock users.
+ *
+ * TODO: Get the correct expectation from the hardware team.
+ */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+
wl->taken = false;
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
-void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+void intel_dmc_wl_flush_release_work(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
- unsigned long flags;
if (!__intel_dmc_wl_supported(display))
return;
- if (!intel_dmc_wl_check_range(reg.reg))
+ flush_delayed_work(&wl->work);
+}
+
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
return;
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
goto out_unlock;
+ if (!wl->enabled) {
+ if (!refcount_inc_not_zero(&wl->refcount))
+ refcount_set(&wl->refcount, 1);
+ goto out_unlock;
+ }
+
cancel_delayed_work(&wl->work);
if (refcount_inc_not_zero(&wl->refcount))
@@ -206,26 +414,7 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
refcount_set(&wl->refcount, 1);
- /*
- * Only try to take the wakelock if it's not marked as taken
- * yet. It may be already taken at this point if we have
- * already released the last reference, but the work has not
- * run yet.
- */
- if (!wl->taken) {
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
- DMC_WAKELOCK_CTL_REQ);
-
- if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_TIMEOUT)) {
- WARN_RATELIMIT(1, "DMC wakelock ack timed out");
- goto out_unlock;
- }
-
- wl->taken = true;
- }
+ __intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
@@ -239,12 +428,9 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
if (!__intel_dmc_wl_supported(display))
return;
- if (!intel_dmc_wl_check_range(reg.reg))
- return;
-
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
goto out_unlock;
if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
@@ -252,6 +438,9 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
goto out_unlock;
if (refcount_dec_and_test(&wl->refcount)) {
+ if (!wl->enabled)
+ goto out_unlock;
+
__intel_dmc_wl_release(display);
goto out_unlock;
@@ -260,3 +449,13 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
+
+void intel_dmc_wl_get_noreg(struct intel_display *display)
+{
+ intel_dmc_wl_get(display, INVALID_MMIO_REG);
+}
+
+void intel_dmc_wl_put_noreg(struct intel_display *display)
+{
+ intel_dmc_wl_put(display, INVALID_MMIO_REG);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.h b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
index adab51208d0a..5488fbdf29b8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
@@ -15,17 +15,27 @@
struct intel_display;
struct intel_dmc_wl {
- spinlock_t lock; /* protects enabled, taken and refcount */
+ spinlock_t lock; /* protects enabled, taken, dc_state and refcount */
bool enabled;
bool taken;
refcount_t refcount;
+ /*
+ * We are keeping a copy of the enabled DC state because
+ * intel_display.power.domains is protected by a mutex and we do
+ * not want call mutex_lock() in atomic context, where some of
+ * the tracked MMIO operations happen.
+ */
+ u32 dc_state;
struct delayed_work work;
};
void intel_dmc_wl_init(struct intel_display *display);
-void intel_dmc_wl_enable(struct intel_display *display);
+void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state);
void intel_dmc_wl_disable(struct intel_display *display);
+void intel_dmc_wl_flush_release_work(struct intel_display *display);
void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg);
void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg);
+void intel_dmc_wl_get_noreg(struct intel_display *display);
+void intel_dmc_wl_put_noreg(struct intel_display *display);
#endif /* __INTEL_WAKELOCK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ff5ba7b3035f..aa77ddcee42c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -28,6 +28,7 @@
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/notifier.h>
+#include <linux/seq_buf.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/string_helpers.h>
@@ -93,8 +94,6 @@
#include "intel_vrr.h"
#include "intel_crtc_state_dump.h"
-#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
-
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@@ -109,10 +108,19 @@
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
-/* With Single pipe configuration, HW is capable of supporting maximum
- * of 4 slices per line.
+/*
+ * With Single pipe configuration, HW is capable of supporting maximum of:
+ * 2 slices per line for ICL, BMG
+ * 4 slices per line for other platforms.
+ * For now consider a max of 2 slices per line, which works for all platforms.
+ * With this we can have max of 4 DSC Slices per pipe.
+ *
+ * For higher resolutions where 12 slice support is required with
+ * ultrajoiner, only then each pipe can support 3 slices.
+ *
+ * #TODO Split this better to use 4 slices/dsc engine where supported.
*/
-static const u8 valid_dsc_slicecount[] = {1, 2, 4};
+static const u8 valid_dsc_slicecount[] = {1, 2, 3, 4};
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
@@ -257,6 +265,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
@@ -266,7 +275,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
if (intel_dp->num_sink_rates)
return;
- drm_err(&dp_to_i915(intel_dp)->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
@@ -281,6 +290,7 @@ static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
@@ -294,7 +304,7 @@ static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
return;
}
- drm_err(&dp_to_i915(intel_dp)->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
@@ -327,7 +337,9 @@ static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
{
- if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (drm_WARN_ON(display->drm,
index < 0 || index >= intel_dp->num_common_rates))
return 162000;
@@ -454,16 +466,16 @@ int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
bool intel_dp_has_joiner(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
/* eDP MSO is not compatible with joiner */
if (intel_dp->mso_link_count)
return false;
- return DISPLAY_VER(dev_priv) >= 12 ||
- (DISPLAY_VER(dev_priv) == 11 &&
+ return DISPLAY_VER(display) >= 12 ||
+ (DISPLAY_VER(display) == 11 &&
encoder->port != PORT_A);
}
@@ -492,12 +504,13 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
static int mtl_max_source_rate(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (intel_encoder_is_c10phy(encoder))
return 810000;
- if (DISPLAY_VERx100(to_i915(encoder->base.dev)) == 1401)
+ if (DISPLAY_VERx100(display) == 1401)
return 1350000;
return 2000000;
@@ -551,17 +564,16 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
static const int g4x_rates[] = {
162000, 270000
};
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
const int *source_rates;
int size, max_rate = 0, vbt_max_rate;
/* This should only be done once */
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->source_rates || intel_dp->num_source_rates);
- if (DISPLAY_VER(dev_priv) >= 14) {
- if (IS_BATTLEMAGE(dev_priv)) {
+ if (DISPLAY_VER(display) >= 14) {
+ if (display->platform.battlemage) {
source_rates = bmg_rates;
size = ARRAY_SIZE(bmg_rates);
} else {
@@ -569,26 +581,26 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
size = ARRAY_SIZE(mtl_rates);
}
max_rate = mtl_max_source_rate(intel_dp);
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
source_rates = icl_rates;
size = ARRAY_SIZE(icl_rates);
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
max_rate = dg2_max_source_rate(intel_dp);
- else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
- IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.alderlake_p || display->platform.alderlake_s ||
+ display->platform.dg1 || display->platform.rocketlake)
max_rate = 810000;
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
max_rate = ehl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
- } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) ||
- IS_BROADWELL(dev_priv)) {
+ } else if ((display->platform.haswell && !display->platform.haswell_ulx) ||
+ display->platform.broadwell) {
source_rates = hsw_rates;
size = ARRAY_SIZE(hsw_rates);
} else {
@@ -679,18 +691,18 @@ static int link_config_cmp_by_bw(const void *a, const void *b, const void *p)
static void intel_dp_link_config_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_dp_link_config *lc;
int num_common_lane_configs;
int i;
int j;
- if (drm_WARN_ON(&i915->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp))))
+ if (drm_WARN_ON(display->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp))))
return;
num_common_lane_configs = ilog2(intel_dp_max_common_lane_count(intel_dp)) + 1;
- if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates * num_common_lane_configs >
+ if (drm_WARN_ON(display->drm, intel_dp->num_common_rates * num_common_lane_configs >
ARRAY_SIZE(intel_dp->link.configs)))
return;
@@ -714,10 +726,10 @@ static void intel_dp_link_config_init(struct intel_dp *intel_dp)
void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct intel_dp_link_config *lc;
- if (drm_WARN_ON(&i915->drm, idx < 0 || idx >= intel_dp->link.num_configs))
+ if (drm_WARN_ON(display->drm, idx < 0 || idx >= intel_dp->link.num_configs))
idx = 0;
lc = &intel_dp->link.configs[idx];
@@ -746,9 +758,9 @@ int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lan
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
@@ -758,7 +770,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
intel_dp->common_rates);
/* Paranoia, there should always be something in common. */
- if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
+ if (drm_WARN_ON(display->drm, intel_dp->num_common_rates == 0)) {
intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
@@ -806,30 +818,30 @@ int intel_dp_bw_fec_overhead(bool fec_enabled)
}
static int
-small_joiner_ram_size_bits(struct drm_i915_private *i915)
+small_joiner_ram_size_bits(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 17280 * 8;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
return 7680 * 8;
else
return 6144 * 8;
}
-u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp)
+u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp)
{
u32 bits_per_pixel = bpp;
int i;
/* Error out if the max bpp is less than smallest allowed valid bpp */
if (bits_per_pixel < valid_dsc_bpp[0]) {
- drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+ drm_dbg_kms(display->drm, "Unsupported BPP %u, min %u\n",
bits_per_pixel, valid_dsc_bpp[0]);
return 0;
}
/* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
/*
@@ -841,7 +853,8 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
* DSC enabled.
*/
if (bits_per_pixel < 8) {
- drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n",
+ drm_dbg_kms(display->drm,
+ "Unsupported BPP %u, min 8\n",
bits_per_pixel);
return 0;
}
@@ -852,7 +865,7 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
if (bits_per_pixel < valid_dsc_bpp[i + 1])
break;
}
- drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n",
+ drm_dbg_kms(display->drm, "Set dsc bpp from %d to VESA %d\n",
bits_per_pixel, valid_dsc_bpp[i]);
bits_per_pixel = valid_dsc_bpp[i];
@@ -887,11 +900,10 @@ static u32 small_joiner_ram_max_bpp(struct intel_display *display,
u32 mode_hdisplay,
int num_joined_pipes)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 max_bpp;
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
- max_bpp = small_joiner_ram_size_bits(i915) / mode_hdisplay;
+ max_bpp = small_joiner_ram_size_bits(display) / mode_hdisplay;
max_bpp *= num_joined_pipes;
@@ -909,11 +921,10 @@ static u32 ultrajoiner_ram_max_bpp(u32 mode_hdisplay)
}
static
-u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915,
+u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
u32 mode_clock, u32 mode_hdisplay,
int num_joined_pipes)
{
- struct intel_display *display = to_intel_display(&i915->drm);
u32 max_bpp = small_joiner_ram_max_bpp(display, mode_hdisplay, num_joined_pipes);
if (num_joined_pipes > 1)
@@ -925,7 +936,7 @@ u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915,
return max_bpp;
}
-u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
+u16 intel_dp_dsc_get_max_compressed_bpp(struct intel_display *display,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
int num_joined_pipes,
@@ -967,17 +978,17 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
bits_per_pixel = min_t(u32, bits_per_pixel, 31);
- drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
+ drm_dbg_kms(display->drm, "Max link bpp is %u for %u timeslots "
"total bw %u pixel clock %u\n",
bits_per_pixel, timeslots,
(link_clock * lane_count * 8),
intel_dp_mode_to_fec_clock(mode_clock));
- joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock,
+ joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, mode_clock,
mode_hdisplay, num_joined_pipes);
bits_per_pixel = min(bits_per_pixel, joiner_max_bpp);
- bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp);
+ bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(display, bits_per_pixel, pipe_bpp);
return bits_per_pixel;
}
@@ -986,7 +997,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
int num_joined_pipes)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u8 min_slice_count, i;
int max_slice_width;
@@ -1001,12 +1012,12 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
* Due to some DSC engine BW limitations, we need to enable second
* slice and VDSC engine, whenever we approach close enough to max CDCLK
*/
- if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
+ if (mode_clock >= ((display->cdclk.max_cdclk_freq * 85) / 100))
min_slice_count = max_t(u8, min_slice_count, 2);
max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unsupported slice width %d by DP DSC Sink device\n",
max_slice_width);
return 0;
@@ -1020,6 +1031,13 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes;
+ /*
+ * 3 DSC Slices per pipe need 3 DSC engines,
+ * which is supported only with Ultrajoiner.
+ */
+ if (valid_dsc_slicecount[i] == 3 && num_joined_pipes != 4)
+ continue;
+
if (test_slice_count >
drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false))
break;
@@ -1032,11 +1050,14 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
if (num_joined_pipes > 1 && valid_dsc_slicecount[i] < 2)
continue;
+ if (mode_hdisplay % test_slice_count)
+ continue;
+
if (min_slice_count <= test_slice_count)
return test_slice_count;
}
- drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
+ drm_dbg_kms(display->drm, "Unsupported Slice Count %d\n",
min_slice_count);
return 0;
}
@@ -1044,7 +1065,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
static bool source_can_output(struct intel_dp *intel_dp,
enum intel_output_format format)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
switch (format) {
case INTEL_OUTPUT_FORMAT_RGB:
@@ -1056,11 +1077,11 @@ static bool source_can_output(struct intel_dp *intel_dp,
* Also, ILK doesn't seem capable of DP YCbCr output.
* The displayed image is severly corrupted. SNB+ is fine.
*/
- return !HAS_GMCH(i915) && !IS_IRONLAKE(i915);
+ return !HAS_GMCH(display) && !display->platform.ironlake;
case INTEL_OUTPUT_FORMAT_YCBCR420:
/* Platform < Gen 11 cannot output YCbCr420 format */
- return DISPLAY_VER(i915) >= 11;
+ return DISPLAY_VER(display) >= 11;
default:
MISSING_CASE(format);
@@ -1120,8 +1141,8 @@ static enum intel_output_format
intel_dp_output_format(struct intel_connector *connector,
enum intel_output_format sink_format)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
enum intel_output_format force_dsc_output_format =
intel_dp->force_dsc_output_format;
enum intel_output_format output_format;
@@ -1132,7 +1153,7 @@ intel_dp_output_format(struct intel_connector *connector,
dfp_can_convert(intel_dp, force_dsc_output_format, sink_format)))
return force_dsc_output_format;
- drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n");
+ drm_dbg_kms(display->drm, "Cannot force DSC output format\n");
}
if (sink_format == INTEL_OUTPUT_FORMAT_RGB ||
@@ -1146,7 +1167,7 @@ intel_dp_output_format(struct intel_connector *connector,
else
output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format));
+ drm_WARN_ON(display->drm, !source_can_output(intel_dp, output_format));
return output_format;
}
@@ -1197,7 +1218,7 @@ intel_dp_mode_min_output_bpp(struct intel_connector *connector,
return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
}
-static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
+static bool intel_dp_hdisplay_bad(struct intel_display *display,
int hdisplay)
{
/*
@@ -1213,7 +1234,7 @@ static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
*
* TODO: confirm the behaviour on HSW+
*/
- return hdisplay == 4096 && !HAS_DDI(dev_priv);
+ return hdisplay == 4096 && !HAS_DDI(display);
}
static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
@@ -1314,7 +1335,7 @@ bool intel_dp_needs_joiner(struct intel_dp *intel_dp,
int hdisplay, int clock,
int num_joined_pipes)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int hdisplay_limit;
if (!intel_dp_has_joiner(intel_dp))
@@ -1322,9 +1343,9 @@ bool intel_dp_needs_joiner(struct intel_dp *intel_dp,
num_joined_pipes /= 2;
- hdisplay_limit = DISPLAY_VER(i915) >= 30 ? 6144 : 5120;
+ hdisplay_limit = DISPLAY_VER(display) >= 30 ? 6144 : 5120;
- return clock > num_joined_pipes * i915->display.cdclk.max_dotclk_freq ||
+ return clock > num_joined_pipes * display->cdclk.max_dotclk_freq ||
hdisplay > num_joined_pipes * hdisplay_limit;
}
@@ -1333,16 +1354,15 @@ int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
int hdisplay, int clock)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (connector->force_joined_pipes)
return connector->force_joined_pipes;
- if (HAS_ULTRAJOINER(i915) &&
+ if (HAS_ULTRAJOINER(display) &&
intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 4))
return 4;
- if ((HAS_BIGJOINER(i915) || HAS_UNCOMPRESSED_JOINER(i915)) &&
+ if ((HAS_BIGJOINER(display) || HAS_UNCOMPRESSED_JOINER(display)) &&
intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 2))
return 2;
@@ -1351,12 +1371,12 @@ int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
bool intel_dp_has_dsc(const struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- if (!HAS_DSC(i915))
+ if (!HAS_DSC(display))
return false;
- if (connector->mst_port && !HAS_DSC_MST(i915))
+ if (connector->mst_port && !HAS_DSC_MST(display))
return false;
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
@@ -1373,13 +1393,14 @@ static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
u16 dsc_max_compressed_bpp = 0;
u8 dsc_slice_count = 0;
enum drm_mode_status status;
@@ -1412,7 +1433,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
- if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
+ if (intel_dp_hdisplay_bad(display, mode->hdisplay))
return MODE_H_ILLEGAL;
max_link_clock = intel_dp_max_link_rate(intel_dp);
@@ -1447,7 +1468,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
true);
} else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
dsc_max_compressed_bpp =
- intel_dp_dsc_get_max_compressed_bpp(dev_priv,
+ intel_dp_dsc_get_max_compressed_bpp(display,
max_link_clock,
max_lanes,
target_clock,
@@ -1465,7 +1486,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- if (intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes) && !dsc)
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc)
return MODE_CLOCK_HIGH;
if (mode_rate > max_rate && !dsc)
@@ -1478,51 +1499,43 @@ intel_dp_mode_valid(struct drm_connector *_connector,
return intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes);
}
-bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
+bool intel_dp_source_supports_tps3(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
+ return DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell;
}
-bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
+bool intel_dp_source_supports_tps4(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 10;
+ return DISPLAY_VER(display) >= 10;
}
-static void snprintf_int_array(char *str, size_t len,
- const int *array, int nelem)
+static void seq_buf_print_array(struct seq_buf *s, const int *array, int nelem)
{
int i;
- str[0] = '\0';
-
- for (i = 0; i < nelem; i++) {
- int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
- if (r >= len)
- return;
- str += r;
- len -= r;
- }
+ for (i = 0; i < nelem; i++)
+ seq_buf_printf(s, "%s%d", i ? ", " : "", array[i]);
}
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- char str[128]; /* FIXME: too big for stack? */
+ struct intel_display *display = to_intel_display(intel_dp);
+ DECLARE_SEQ_BUF(s, 128); /* FIXME: too big for stack? */
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- snprintf_int_array(str, sizeof(str),
- intel_dp->source_rates, intel_dp->num_source_rates);
- drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
+ seq_buf_print_array(&s, intel_dp->source_rates, intel_dp->num_source_rates);
+ drm_dbg_kms(display->drm, "source rates: %s\n", seq_buf_str(&s));
- snprintf_int_array(str, sizeof(str),
- intel_dp->sink_rates, intel_dp->num_sink_rates);
- drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
+ seq_buf_clear(&s);
+ seq_buf_print_array(&s, intel_dp->sink_rates, intel_dp->num_sink_rates);
+ drm_dbg_kms(display->drm, "sink rates: %s\n", seq_buf_str(&s));
- snprintf_int_array(str, sizeof(str),
- intel_dp->common_rates, intel_dp->num_common_rates);
- drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
+ seq_buf_clear(&s);
+ seq_buf_print_array(&s, intel_dp->common_rates, intel_dp->num_common_rates);
+ drm_dbg_kms(display->drm, "common rates: %s\n", seq_buf_str(&s));
}
static int forced_link_rate(struct intel_dp *intel_dp)
@@ -1559,11 +1572,11 @@ intel_dp_min_link_rate(struct intel_dp *intel_dp)
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int i = intel_dp_rate_index(intel_dp->sink_rates,
intel_dp->num_sink_rates, rate);
- if (drm_WARN_ON(&i915->drm, i < 0))
+ if (drm_WARN_ON(display->drm, i < 0))
i = 0;
return i;
@@ -1593,13 +1606,13 @@ bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return true;
- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
+ if (DISPLAY_VER(display) == 11 && encoder->port != PORT_A &&
!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
@@ -1614,13 +1627,15 @@ bool intel_dp_supports_fec(struct intel_dp *intel_dp,
drm_dp_sink_supports_fec(connector->dp.fec_capability);
}
-bool intel_dp_supports_dsc(const struct intel_connector *connector,
+bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state)
{
if (!intel_dp_has_dsc(connector))
return false;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) &&
+ !intel_dp_supports_fec(intel_dp, connector, crtc_state))
return false;
return intel_dsc_source_support(crtc_state);
@@ -1662,8 +1677,8 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
int bpp, bpc;
bpc = crtc_state->pipe_bpp / 3;
@@ -1685,13 +1700,13 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
bpp = bpc * 3;
if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
- if (intel_connector->base.display_info.bpc == 0 &&
- intel_connector->panel.vbt.edp.bpp &&
- intel_connector->panel.vbt.edp.bpp < bpp) {
- drm_dbg_kms(&dev_priv->drm,
+ if (connector->base.display_info.bpc == 0 &&
+ connector->panel.vbt.edp.bpp &&
+ connector->panel.vbt.edp.bpp < bpp) {
+ drm_dbg_kms(display->drm,
"clamping bpp for eDP panel to BIOS-provided %i\n",
- intel_connector->panel.vbt.edp.bpp);
- bpp = intel_connector->panel.vbt.edp.bpp;
+ connector->panel.vbt.edp.bpp);
+ bpp = connector->panel.vbt.edp.bpp;
}
}
@@ -1700,13 +1715,13 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
static bool has_seamless_m_n(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/*
* Seamless M/N reprogramming only implemented
* for BDW+ double buffered M/N registers so far.
*/
- return HAS_DOUBLE_BUFFERED_M_N(i915) &&
+ return HAS_DOUBLE_BUFFERED_M_N(display) &&
intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
}
@@ -1768,32 +1783,31 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-static
-u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915)
+int intel_dp_dsc_max_src_input_bpc(struct intel_display *display)
{
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return 12;
- if (DISPLAY_VER(i915) == 11)
+ if (DISPLAY_VER(display) == 11)
return 10;
- return 0;
+ return intel_dp_dsc_min_src_input_bpc();
}
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 max_req_bpc)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int i, num_bpc;
u8 dsc_bpc[3] = {};
- u8 dsc_max_bpc;
+ int dsc_max_bpc;
- dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
+ dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display);
if (!dsc_max_bpc)
return dsc_max_bpc;
- dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
+ dsc_max_bpc = min(dsc_max_bpc, max_req_bpc);
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
dsc_bpc);
@@ -1805,9 +1819,9 @@ int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
return 0;
}
-static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915)
+static int intel_dp_source_dsc_version_minor(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 14 ? 2 : 1;
+ return DISPLAY_VER(display) >= 14 ? 2 : 1;
}
static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
@@ -1841,7 +1855,7 @@ static int intel_dp_get_slice_height(int vactive)
static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
int ret;
@@ -1864,7 +1878,7 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
(connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
vdsc_cfg->dsc_version_minor =
- min(intel_dp_source_dsc_version_minor(i915),
+ min(intel_dp_source_dsc_version_minor(display),
intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd));
if (vdsc_cfg->convert_rgb)
vdsc_cfg->convert_rgb =
@@ -1874,7 +1888,7 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH,
drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd));
if (!vdsc_cfg->line_buf_depth) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DSC Sink Line Buffer Depth invalid\n");
return -EINVAL;
}
@@ -1889,7 +1903,7 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
static bool intel_dp_dsc_supports_format(const struct intel_connector *connector,
enum intel_output_format output_format)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u8 sink_dsc_format;
switch (output_format) {
@@ -1900,7 +1914,7 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
sink_dsc_format = DP_DSC_YCbCr444;
break;
case INTEL_OUTPUT_FORMAT_YCBCR420:
- if (min(intel_dp_source_dsc_version_minor(i915),
+ if (min(intel_dp_source_dsc_version_minor(display),
intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2)
return false;
sink_dsc_format = DP_DSC_YCbCr420_Native;
@@ -1961,7 +1975,7 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
static
u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *pipe_config,
int bpc)
{
u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd);
@@ -1986,7 +2000,7 @@ u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connec
return 0;
}
-int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
+int intel_dp_dsc_sink_min_compressed_bpp(const struct intel_crtc_state *pipe_config)
{
/* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
switch (pipe_config->output_format) {
@@ -2004,7 +2018,7 @@ int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
}
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *pipe_config,
int bpc)
{
return intel_dp_dsc_max_sink_compressed_bppx16(connector,
@@ -2019,13 +2033,22 @@ static int dsc_src_min_compressed_bpp(void)
static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /*
+ * Forcing DSC and using the platform's max compressed bpp is seen to cause
+ * underruns. Since DSC isn't needed in these cases, limit the
+ * max compressed bpp to 18, which is a safe value across platforms with different
+ * pipe bpps.
+ */
+ if (intel_dp->force_dsc_en)
+ return 18;
/*
* Max Compressed bpp for Gen 13+ is 27bpp.
* For earlier platform is 23bpp. (Bspec:49259).
*/
- if (DISPLAY_VER(i915) < 13)
+ if (DISPLAY_VER(display) < 13)
return 23;
else
return 27;
@@ -2049,11 +2072,10 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp)
+ for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) {
+ if (valid_dsc_bpp[i] < dsc_min_bpp ||
+ valid_dsc_bpp[i] > dsc_max_bpp)
continue;
- if (valid_dsc_bpp[i] > dsc_max_bpp)
- break;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
@@ -2086,13 +2108,13 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
int pipe_bpp,
int timeslots)
{
+ struct intel_display *display = to_intel_display(intel_dp);
u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u16 compressed_bppx16;
u8 bppx16_step;
int ret;
- if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
+ if (DISPLAY_VER(display) < 14 || bppx16_incr <= 1)
bppx16_step = 16;
else
bppx16_step = 16 / bppx16_incr;
@@ -2116,7 +2138,8 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
if (intel_dp->force_dsc_fractional_bpp_en &&
fxp_q4_to_frac(compressed_bppx16))
- drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
+ drm_dbg_kms(display->drm,
+ "Forcing DSC fractional bpp\n");
return 0;
}
@@ -2131,68 +2154,46 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
int pipe_bpp,
int timeslots)
{
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
- int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+ int dsc_min_bpp;
+ int dsc_max_bpp;
int dsc_joiner_max_bpp;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
- dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
- dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
- dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
- pipe_config,
- pipe_bpp / 3);
- dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
-
- dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
+ dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, adjusted_mode->clock,
adjusted_mode->hdisplay,
num_joined_pipes);
- dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp);
- dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
+ dsc_max_bpp = min(dsc_joiner_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
}
-static
-u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915)
+int intel_dp_dsc_min_src_input_bpc(void)
{
/* Min DSC Input BPC for ICL+ is 8 */
- return HAS_DSC(i915) ? 8 : 0;
+ return 8;
}
static
-bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits,
+bool is_dsc_pipe_bpp_sufficient(struct link_config_limits *limits,
int pipe_bpp)
{
- u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp;
-
- dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc);
- dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
-
- dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
- dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
-
- return pipe_bpp >= dsc_min_pipe_bpp &&
- pipe_bpp <= dsc_max_pipe_bpp;
+ return pipe_bpp >= limits->pipe.min_bpp &&
+ pipe_bpp <= limits->pipe.max_bpp;
}
static
int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
- struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int forced_bpp;
if (!intel_dp->force_dsc_bpc)
@@ -2200,12 +2201,14 @@ int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
forced_bpp = intel_dp->force_dsc_bpc * 3;
- if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, forced_bpp)) {
- drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n", intel_dp->force_dsc_bpc);
+ if (is_dsc_pipe_bpp_sufficient(limits, forced_bpp)) {
+ drm_dbg_kms(display->drm, "Input DSC BPC forced to %d\n",
+ intel_dp->force_dsc_bpc);
return forced_bpp;
}
- drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n",
+ drm_dbg_kms(display->drm,
+ "Cannot force DSC BPC:%d, due to DSC BPC limits\n",
intel_dp->force_dsc_bpc);
return 0;
@@ -2217,17 +2220,15 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct link_config_limits *limits,
int timeslots)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
const struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- u8 max_req_bpc = conn_state->max_requested_bpc;
- u8 dsc_max_bpc, dsc_max_bpp;
- u8 dsc_min_bpc, dsc_min_bpp;
+ int dsc_max_bpp;
+ int dsc_min_bpp;
u8 dsc_bpc[3] = {};
int forced_bpp, pipe_bpp;
int num_bpc, i, ret;
- forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
+ forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, limits);
if (forced_bpp) {
ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
@@ -2238,15 +2239,8 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
}
}
- dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
- if (!dsc_max_bpc)
- return -EINVAL;
-
- dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
- dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
-
- dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
- dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
+ dsc_max_bpp = limits->pipe.max_bpp;
+ dsc_min_bpp = limits->pipe.min_bpp;
/*
* Get the maximum DSC bpc that will be supported by any valid
@@ -2275,24 +2269,24 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int pipe_bpp, forced_bpp;
- int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
- int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+ int dsc_min_bpp;
+ int dsc_max_bpp;
- forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
+ forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, limits);
if (forced_bpp) {
pipe_bpp = forced_bpp;
} else {
- int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc);
+ int max_bpc = limits->pipe.max_bpp / 3;
/* For eDP use max bpp that can be supported with DSC. */
pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_bpc);
- if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) {
- drm_dbg_kms(&i915->drm,
+ if (!is_dsc_pipe_bpp_sufficient(limits, pipe_bpp)) {
+ drm_dbg_kms(display->drm,
"Computed BPC is not in DSC BPC limits\n");
return -EINVAL;
}
@@ -2300,17 +2294,9 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
pipe_config->port_clock = limits->max_rate;
pipe_config->lane_count = limits->max_lane_count;
- dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
- dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
- dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
- pipe_config,
- pipe_bpp / 3);
- dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
- dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
+ dsc_max_bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
@@ -2323,6 +2309,26 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
return 0;
}
+static void intel_dp_fec_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->fec_enable)
+ return;
+
+ /*
+ * Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional.
+ * Since, FEC is a bandwidth overhead, continue to not enable it for
+ * eDP. Until, there is a good reason to do so.
+ */
+ if (intel_dp_is_edp(intel_dp))
+ return;
+
+ if (intel_dp_is_uhbr(crtc_state))
+ return;
+
+ crtc_state->fec_enable = true;
+}
+
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -2330,8 +2336,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
int timeslots,
bool compute_pipe_bpp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
@@ -2339,18 +2344,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
int ret;
- /*
- * Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional.
- * Since, FEC is a bandwidth overhead, continue to not enable it for
- * eDP. Until, there is a good reason to do so.
- */
- pipe_config->fec_enable = pipe_config->fec_enable ||
- (!intel_dp_is_edp(intel_dp) &&
- intel_dp_supports_fec(intel_dp, connector, pipe_config) &&
- !intel_dp_is_uhbr(pipe_config));
-
- if (!intel_dp_supports_dsc(connector, pipe_config))
- return -EINVAL;
+ intel_dp_fec_compute_config(intel_dp, pipe_config);
if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format))
return -EINVAL;
@@ -2369,7 +2363,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
conn_state, limits, timeslots);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No Valid pipe bpp for given mode ret = %d\n", ret);
return ret;
}
@@ -2381,7 +2375,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
true);
if (!pipe_config->dsc.slice_count) {
- drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
+ drm_dbg_kms(display->drm,
+ "Unsupported Slice Count %d\n",
pipe_config->dsc.slice_count);
return -EINVAL;
}
@@ -2394,7 +2389,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_hdisplay,
num_joined_pipes);
if (!dsc_dp_slice_count) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Compressed Slice Count not supported\n");
return -EINVAL;
}
@@ -2405,13 +2400,20 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
+ * In case of Ultrajoiner along with 12 slices we need to use 3
+ * VDSC instances.
*/
- if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1)
- pipe_config->dsc.dsc_split = true;
+ if (pipe_config->joiner_pipes && num_joined_pipes == 4 &&
+ pipe_config->dsc.slice_count == 12)
+ pipe_config->dsc.num_streams = 3;
+ else if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1)
+ pipe_config->dsc.num_streams = 2;
+ else
+ pipe_config->dsc.num_streams = 1;
ret = intel_dp_dsc_compute_params(connector, pipe_config);
if (ret < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Cannot compute valid DSC parameters for Input Bpp = %d"
"Compressed BPP = " FXP_Q4_FMT "\n",
pipe_config->pipe_bpp,
@@ -2420,7 +2422,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
}
pipe_config->dsc.compression_enable = true;
- drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
+ drm_dbg_kms(display->drm, "DP DSC computed with Input Bpp = %d "
"Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
@@ -2429,25 +2431,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return 0;
}
-/**
- * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits
- * @intel_dp: intel DP
- * @crtc_state: crtc state
- * @dsc: DSC compression mode
- * @limits: link configuration limits
- *
- * Calculates the output link min, max bpp values in @limits based on the
- * pipe bpp range, @crtc_state and @dsc mode.
- *
- * Returns %true in case of success.
+/*
+ * Calculate the output link min, max bpp values in limits based on the pipe bpp
+ * range, crtc_state and dsc mode. Return true on success.
*/
-bool
+static bool
intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -2465,17 +2460,27 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp);
} else {
- /*
- * TODO: set the DSC link limits already here, atm these are
- * initialized only later in intel_edp_dsc_compute_pipe_bpp() /
- * intel_dp_dsc_compute_pipe_bpp()
- */
- limits->link.min_bpp_x16 = 0;
+ int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
+ int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+
+ dsc_src_min_bpp = dsc_src_min_compressed_bpp();
+ dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
+ dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
+ limits->link.min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
+
+ dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
+ dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ crtc_state,
+ limits->pipe.max_bpp / 3);
+ dsc_max_bpp = dsc_sink_max_bpp ?
+ min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
+
+ max_link_bpp_x16 = min(max_link_bpp_x16, fxp_q4_from_int(dsc_max_bpp));
}
limits->link.max_bpp_x16 = max_link_bpp_x16;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " FXP_Q4_FMT "\n",
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
@@ -2489,29 +2494,62 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
return true;
}
-static bool
+static void
+intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
+ struct link_config_limits *limits)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int dsc_min_bpc = intel_dp_dsc_min_src_input_bpc();
+ int dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display);
+
+ limits->pipe.max_bpp = clamp(limits->pipe.max_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
+ limits->pipe.min_bpp = clamp(limits->pipe.min_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
+}
+
+bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
struct link_config_limits *limits)
{
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+
limits->min_rate = intel_dp_min_link_rate(intel_dp);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
- /* FIXME 128b/132b SST support missing */
- limits->max_rate = min(limits->max_rate, 810000);
+ /* FIXME 128b/132b SST+DSC support missing */
+ if (!is_mst && dsc)
+ limits->max_rate = min(limits->max_rate, 810000);
limits->min_rate = min(limits->min_rate, limits->max_rate);
limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
- limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
- respect_downstream_limits);
+ if (is_mst) {
+ /*
+ * FIXME: If all the streams can't fit into the link with their
+ * current pipe_bpp we should reduce pipe_bpp across the board
+ * until things start to fit. Until then we limit to <= 8bpc
+ * since that's what was hardcoded for all MST streams
+ * previously. This hack should be removed once we have the
+ * proper retry logic in place.
+ */
+ limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
+ } else {
+ limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
+ respect_downstream_limits);
+ }
+
+ if (dsc)
+ intel_dp_dsc_compute_pipe_bpp_limits(intel_dp, limits);
- if (intel_dp->use_max_params) {
+ if (is_mst || intel_dp->use_max_params) {
/*
+ * For MST we always configure max link bw - the spec doesn't
+ * seem to suggest we should do otherwise.
+ *
* Use the maximum clock and number of lanes the eDP panel
* advertizes being capable of in case the initial fast
* optimal params failed us. The panels are generally
@@ -2526,6 +2564,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
intel_dp_test_compute_config(intel_dp, crtc_state, limits);
return intel_dp_compute_config_link_bpp_limits(intel_dp,
+ intel_dp->attached_connector,
crtc_state,
dsc,
limits);
@@ -2542,7 +2581,7 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
}
-bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915,
+bool intel_dp_joiner_needs_dsc(struct intel_display *display,
int num_joined_pipes)
{
/*
@@ -2551,7 +2590,7 @@ bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915,
* compression.
* Ultrajoiner always needs compression.
*/
- return (!HAS_UNCOMPRESSED_JOINER(i915) && num_joined_pipes == 2) ||
+ return (!HAS_UNCOMPRESSED_JOINER(display) && num_joined_pipes == 2) ||
num_joined_pipes == 4;
}
@@ -2561,7 +2600,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
@@ -2583,7 +2622,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
if (num_joined_pipes > 1)
pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe);
- joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, num_joined_pipes);
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_compute_config_limits(intel_dp, pipe_config,
@@ -2598,12 +2637,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
*/
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
conn_state, &limits);
+ if (!ret && intel_dp_is_uhbr(pipe_config))
+ ret = intel_dp_mtp_tu_compute_config(intel_dp,
+ pipe_config,
+ pipe_config->pipe_bpp,
+ pipe_config->pipe_bpp,
+ conn_state,
+ 0, false);
if (ret)
dsc_needed = true;
}
+ if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) {
+ drm_dbg_kms(display->drm, "DSC required but not available\n");
+ return -EINVAL;
+ }
+
if (dsc_needed) {
- drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ drm_dbg_kms(display->drm,
+ "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
@@ -2619,7 +2671,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
@@ -2665,12 +2717,11 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
}
}
-static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
- enum port port)
+static bool intel_dp_port_has_audio(struct intel_display *display, enum port port)
{
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
return false;
- if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
+ if (DISPLAY_VER(display) < 12 && port == PORT_A)
return false;
return true;
@@ -2680,8 +2731,7 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
const struct drm_connector_state *conn_state,
struct drm_dp_vsc_sdp *vsc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->has_panel_replay) {
/*
@@ -2758,7 +2808,7 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
vsc->bpc = crtc_state->pipe_bpp / 3;
/* only RGB pixelformat supports 6 bpc */
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
/* all YCbCr are always limited range */
@@ -2778,7 +2828,6 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
- /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
as_sdp->length = 0x9;
as_sdp->duration_incr_ms = 0;
@@ -2789,7 +2838,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
as_sdp->target_rr_divider = true;
} else {
- as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
+ as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
as_sdp->vtotal = adjusted_mode->vtotal;
as_sdp->target_rr = 0;
}
@@ -2848,8 +2897,8 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
int ret;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
if (!conn_state->hdr_output_metadata)
@@ -2858,7 +2907,8 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
+ drm_dbg_kms(display->drm,
+ "couldn't set HDR metadata in infoframe\n");
return;
}
@@ -2900,6 +2950,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
int link_bpp_x16)
{
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
@@ -2918,7 +2969,8 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
return;
}
- if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
+ if (display->platform.ironlake || display->platform.sandybridge ||
+ display->platform.ivybridge)
pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
pipe_config->has_drrs = true;
@@ -2940,13 +2992,13 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
static bool intel_dp_has_audio(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- if (!intel_dp_port_has_audio(i915, encoder->port))
+ if (!intel_dp_port_has_audio(display, encoder->port))
return false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
@@ -2961,7 +3013,7 @@ intel_dp_compute_output_format(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
const struct drm_display_info *info = &connector->base.display_info;
@@ -2972,7 +3024,7 @@ intel_dp_compute_output_format(struct intel_encoder *encoder,
ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
} else {
@@ -3056,7 +3108,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3064,9 +3116,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *connector = intel_dp->attached_connector;
int ret = 0, link_bpp_x16;
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
- pipe_config->has_pch_encoder = true;
-
fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
ret = intel_panel_compute_config(connector, adjusted_mode);
@@ -3084,7 +3133,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
- if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
+ if (intel_dp_hdisplay_bad(display, adjusted_mode->crtc_hdisplay))
return -EINVAL;
/*
@@ -3107,8 +3156,13 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- pipe_config->enhanced_framing =
- drm_dp_enhanced_frame_cap(intel_dp->dpcd);
+ if (intel_dp_is_uhbr(pipe_config)) {
+ /* 128b/132b SST also needs this */
+ pipe_config->mst_master_transcoder = pipe_config->cpu_transcoder;
+ } else {
+ pipe_config->enhanced_framing =
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd);
+ }
if (pipe_config->dsc.compression_enable)
link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
@@ -3124,7 +3178,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->splitter.link_count = n;
pipe_config->splitter.pixel_overlap = overlap;
- drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
+ drm_dbg_kms(display->drm,
+ "MSO link count %d, pixel overlap %d\n",
n, overlap);
adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
@@ -3138,20 +3193,19 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
- intel_link_compute_m_n(link_bpp_x16,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- intel_dp_bw_fec_overhead(pipe_config->fec_enable),
- &pipe_config->dp_m_n);
+ if (!intel_dp_is_uhbr(pipe_config)) {
+ intel_link_compute_m_n(link_bpp_x16,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ intel_dp_bw_fec_overhead(pipe_config->fec_enable),
+ &pipe_config->dp_m_n);
+ }
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
- if (!HAS_DDI(dev_priv))
- g4x_dp_set_clock(encoder, pipe_config);
-
intel_vrr_compute_config(pipe_config, conn_state);
intel_dp_compute_as_sdp(intel_dp, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
@@ -3188,13 +3242,13 @@ void intel_dp_reset_link_params(struct intel_dp *intel_dp)
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_backlight_enable(crtc_state, conn_state);
intel_pps_backlight_on(intel_dp);
@@ -3204,12 +3258,12 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_pps_backlight_off(intel_dp);
intel_backlight_disable(old_conn_state);
@@ -3252,11 +3306,11 @@ static void
intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
DP_DECOMPRESSION_EN, enable) < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s sink decompression state\n",
str_enable_disable(enable));
}
@@ -3265,7 +3319,7 @@ static void
intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dp_aux *aux = connector->port ?
connector->port->passthrough_aux : NULL;
@@ -3274,7 +3328,7 @@ intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
if (write_dsc_decompression_flag(aux,
DP_DSC_PASSTHROUGH_EN, enable) < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s sink compression passthrough state\n",
str_enable_disable(enable));
}
@@ -3283,7 +3337,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
const struct intel_connector *connector,
bool for_get_ref)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_connector *_connector_iter;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
@@ -3308,7 +3362,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
if (!connector_iter->dp.dsc_decompression_enabled)
continue;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
(for_get_ref && !new_conn_state->crtc) ||
(!for_get_ref && !old_conn_state->crtc));
@@ -3355,12 +3409,12 @@ void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
struct intel_connector *connector,
const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
if (!new_crtc_state->dsc.compression_enable)
return;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!connector->dp.dsc_decompression_aux ||
connector->dp.dsc_decompression_enabled))
return;
@@ -3386,12 +3440,12 @@ void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
struct intel_connector *connector,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
if (!old_crtc_state->dsc.compression_enable)
return;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!connector->dp.dsc_decompression_aux ||
!connector->dp.dsc_decompression_enabled))
return;
@@ -3406,7 +3460,7 @@ void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
static void
intel_dp_init_source_oui(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 oui[] = { 0x00, 0xaa, 0x01 };
u8 buf[3] = {};
@@ -3420,7 +3474,7 @@ intel_dp_init_source_oui(struct intel_dp *intel_dp)
* already set to what we want, so as to avoid clearing any state by accident
*/
if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
- drm_err(&i915->drm, "Failed to read source OUI\n");
+ drm_dbg_kms(display->drm, "Failed to read source OUI\n");
if (memcmp(oui, buf, sizeof(oui)) == 0) {
/* Assume the OUI was written now. */
@@ -3429,7 +3483,7 @@ intel_dp_init_source_oui(struct intel_dp *intel_dp)
}
if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) {
- drm_info(&i915->drm, "Failed to write source OUI\n");
+ drm_dbg_kms(display->drm, "Failed to write source OUI\n");
WRITE_ONCE(intel_dp->oui_valid, false);
}
@@ -3443,10 +3497,11 @@ void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp)
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
connector->base.base.id, connector->base.name,
connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
@@ -3457,8 +3512,8 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
/* If the device supports it, try to set the power state appropriately */
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int ret, i;
/* Should have a valid DPCD by this point */
@@ -3494,7 +3549,8 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
}
if (ret != 1)
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Set power to %s failed\n",
encoder->base.base.id, encoder->base.name,
mode == DP_SET_POWER_D0 ? "D0" : "D3");
}
@@ -3537,7 +3593,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool fastset = true;
@@ -3547,7 +3603,8 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
*/
if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
crtc_state->port_clock) < 0) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.connectors_changed = true;
fastset = false;
@@ -3561,14 +3618,15 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
* Remove once we have readout for DSC.
*/
if (crtc_state->dsc.compression_enable) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
}
if (CAN_PANEL_REPLAY(intel_dp)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
@@ -3580,7 +3638,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/* Clear the cached register set to avoid using stale values */
@@ -3589,10 +3647,10 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
intel_dp->pcon_dsc_dpcd,
sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
- drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ drm_err(display->drm, "Failed to read DPCD register 0x%x\n",
DP_PCON_DSC_ENCODER);
- drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
+ drm_dbg_kms(display->drm, "PCON ENCODER DSC DPCD: %*ph\n",
(int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
}
@@ -3630,19 +3688,19 @@ static int intel_dp_pcon_set_frl_mask(int max_frl)
static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
int max_frl_rate;
int max_lanes, rate_per_lane;
int max_dsc_lanes, dsc_rate_per_lane;
- max_lanes = connector->display_info.hdmi.max_lanes;
- rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
+ max_lanes = info->hdmi.max_lanes;
+ rate_per_lane = info->hdmi.max_frl_rate_per_lane;
max_frl_rate = max_lanes * rate_per_lane;
- if (connector->display_info.hdmi.dsc_cap.v_1p2) {
- max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
- dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
+ if (info->hdmi.dsc_cap.v_1p2) {
+ max_dsc_lanes = info->hdmi.dsc_cap.max_lanes;
+ dsc_rate_per_lane = info->hdmi.dsc_cap.max_frl_rate_per_lane;
if (max_dsc_lanes && dsc_rate_per_lane)
max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
}
@@ -3664,19 +3722,19 @@ intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
#define TIMEOUT_FRL_READY_MS 500
#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
-
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
u8 max_frl_bw_mask = 0, frl_trained_mask;
bool is_active;
max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
- drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
+ drm_dbg(display->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
- drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
+ drm_dbg(display->drm, "Sink max rate from EDID = %d Gbps\n",
+ max_edid_frl_bw);
max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
@@ -3684,7 +3742,7 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
return -EINVAL;
max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
- drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
+ drm_dbg(display->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
goto frl_trained;
@@ -3721,10 +3779,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
return -ETIMEDOUT;
frl_trained:
- drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
+ drm_dbg(display->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
intel_dp->frl.is_trained = true;
- drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
+ drm_dbg(display->drm, "FRL trained with : %d Gbps\n",
+ intel_dp->frl.trained_rate_gbps);
return 0;
}
@@ -3763,7 +3822,7 @@ int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
void intel_dp_check_frl_training(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/*
* Always go for FRL training if:
@@ -3778,14 +3837,16 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
int ret, mode;
- drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
+ drm_dbg(display->drm,
+ "Couldn't set FRL mode, continuing with TMDS mode\n");
ret = intel_dp_pcon_set_tmds_mode(intel_dp);
mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
- drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
+ drm_dbg(display->drm,
+ "Issue with PCON, cannot set TMDS mode\n");
} else {
- drm_dbg(&dev_priv->drm, "FRL training Completed\n");
+ drm_dbg(display->drm, "FRL training Completed\n");
}
}
@@ -3801,10 +3862,10 @@ static int
intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
- int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
- int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int hdmi_throughput = info->hdmi.dsc_cap.clk_per_slice;
+ int hdmi_max_slices = info->hdmi.dsc_cap.max_slices;
int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
@@ -3818,13 +3879,13 @@ intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
int num_slices, int slice_width)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
int output_format = crtc_state->output_format;
- bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
+ bool hdmi_all_bpp = info->hdmi.dsc_cap.all_bpp;
int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
int hdmi_max_chunk_bytes =
- connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
+ info->hdmi.dsc_cap.total_chunk_kbytes * 1024;
return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
num_slices, output_format, hdmi_all_bpp,
@@ -3835,24 +3896,26 @@ void
intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info;
u8 pps_param[6];
int slice_height;
int slice_width;
int num_slices;
int bits_per_pixel;
int ret;
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct drm_connector *connector;
bool hdmi_is_dsc_1_2;
if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
return;
- if (!intel_connector)
+ if (!connector)
return;
- connector = &intel_connector->base;
- hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
+
+ info = &connector->base.display_info;
+
+ hdmi_is_dsc_1_2 = info->hdmi.dsc_cap.v_1p2;
if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
!hdmi_is_dsc_1_2)
@@ -3883,13 +3946,13 @@ intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
+ drm_dbg_kms(display->drm, "Failed to set pcon DSC\n");
}
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
bool ycbcr444_to_420 = false;
bool rgb_to_ycbcr = false;
u8 tmp;
@@ -3904,7 +3967,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
- drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
+ drm_dbg_kms(display->drm,
+ "Failed to %s protocol converter HDMI mode\n",
str_enable_disable(intel_dp_has_hdmi_sink(intel_dp)));
if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
@@ -3939,14 +4003,14 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s protocol converter RGB->YCbCr conversion mode\n",
str_enable_disable(tmp));
}
@@ -3979,7 +4043,7 @@ static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/*
* Clear the cached register set to avoid using stale values
@@ -3998,11 +4062,11 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY,
&connector->dp.fec_capability) < 0) {
- drm_err(&i915->drm, "Failed to read FEC DPCD register\n");
+ drm_err(display->drm, "Failed to read FEC DPCD register\n");
return;
}
- drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
+ drm_dbg_kms(display->drm, "FEC CAPABILITY: %x\n",
connector->dp.fec_capability);
}
@@ -4017,10 +4081,10 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *
static void
intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
- if (!HAS_DSC(i915))
+ if (!HAS_DSC(display))
return;
if (intel_dp_is_edp(intel_dp))
@@ -4034,8 +4098,8 @@ intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *conn
static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int n = intel_dp->mso_link_count;
int overlap = intel_dp->mso_pixel_overlap;
@@ -4050,7 +4114,7 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
drm_mode_set_name(mode);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n",
connector->base.base.id, connector->base.name,
DRM_MODE_ARG(mode));
@@ -4058,7 +4122,7 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
@@ -4076,7 +4140,7 @@ void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
pipe_bpp, connector->panel.vbt.edp.bpp);
connector->panel.vbt.edp.bpp = pipe_bpp;
@@ -4085,7 +4149,7 @@ void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
static void intel_edp_mso_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_display_info *info = &connector->base.display_info;
u8 mso;
@@ -4094,23 +4158,25 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
return;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
- drm_err(&i915->drm, "Failed to read MSO cap\n");
+ drm_err(display->drm, "Failed to read MSO cap\n");
return;
}
/* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
- drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
+ drm_err(display->drm, "Invalid MSO link count cap %u\n", mso);
mso = 0;
}
if (mso) {
- drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
+ drm_dbg_kms(display->drm,
+ "Sink MSO %ux%u configuration, pixel overlap %u\n",
mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
info->mso_pixel_overlap);
- if (!HAS_MSO(i915)) {
- drm_err(&i915->drm, "No source MSO support, disabling\n");
+ if (!HAS_MSO(display)) {
+ drm_err(display->drm,
+ "No source MSO support, disabling\n");
mso = 0;
}
}
@@ -4161,11 +4227,10 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
/* this function is meant to be called only once */
- drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
+ drm_WARN_ON(display->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
return false;
@@ -4189,7 +4254,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd)) {
- drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
+ drm_dbg_kms(display->drm, "eDP DPCD: %*ph\n",
(int)sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
@@ -4300,9 +4365,9 @@ static enum drm_dp_mst_mode
intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
enum drm_dp_mst_mode sink_mst_mode)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (!i915->display.params.enable_dp_mst)
+ if (!display->params.enable_dp_mst)
return DRM_DP_SST;
if (!intel_dp_mst_source_support(intel_dp))
@@ -4318,7 +4383,7 @@ intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
static enum drm_dp_mst_mode
intel_dp_mst_detect(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum drm_dp_mst_mode sink_mst_mode;
enum drm_dp_mst_mode mst_detect;
@@ -4327,12 +4392,12 @@ intel_dp_mst_detect(struct intel_dp *intel_dp)
mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n",
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp_mst_source_support(intel_dp)),
intel_dp_mst_mode_str(sink_mst_mode),
- str_yes_no(i915->display.params.enable_dp_mst),
+ str_yes_no(display->params.enable_dp_mst),
intel_dp_mst_mode_str(mst_detect));
return mst_detect;
@@ -4358,12 +4423,13 @@ intel_dp_mst_configure(struct intel_dp *intel_dp)
static void
intel_dp_mst_disconnect(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!intel_dp->is_mst)
return;
- drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n",
+ drm_dbg_kms(display->drm,
+ "MST device may have disappeared %d vs %d\n",
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
@@ -4444,7 +4510,7 @@ static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp,
}
static ssize_t
-intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
+intel_dp_hdr_metadata_infoframe_sdp_pack(struct intel_display *display,
const struct hdmi_drm_infoframe *drm_infoframe,
struct dp_sdp *sdp,
size_t size)
@@ -4461,12 +4527,13 @@ intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
if (len < 0) {
- drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n");
+ drm_dbg_kms(display->drm,
+ "buffer size is smaller than hdr metadata infoframe\n");
return -ENOSPC;
}
if (len != infoframe_size) {
- drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
+ drm_dbg_kms(display->drm, "wrong static hdr metadata size\n");
return -ENOSPC;
}
@@ -4524,8 +4591,8 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct dp_sdp sdp = {};
ssize_t len;
@@ -4538,7 +4605,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp);
break;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
+ len = intel_dp_hdr_metadata_infoframe_sdp_pack(display,
&crtc_state->infoframes.drm.drm,
&sdp, sizeof(sdp));
break;
@@ -4551,7 +4618,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
return;
}
- if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ if (drm_WARN_ON(display->drm, len < 0))
return;
dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
@@ -4562,20 +4629,19 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv,
- crtc_state->cpu_transcoder);
+ struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display, crtc_state->cpu_transcoder);
u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
- if (HAS_AS_SDP(dev_priv))
+ if (HAS_AS_SDP(display))
dip_enable |= VIDEO_DIP_ENABLE_AS_ADL;
- u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
+ u32 val = intel_de_read(display, reg) & ~dip_enable;
/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
- if (!enable && HAS_DSC(dev_priv))
+ if (!enable && HAS_DSC(display))
val &= ~VDIP_ENABLE_PPS;
/*
@@ -4585,8 +4651,8 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
if (!enable || !crtc_state->has_psr)
val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
if (!enable)
return;
@@ -4707,8 +4773,8 @@ intel_read_dp_as_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_dp_as_sdp *as_sdp)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = DP_SDP_ADAPTIVE_SYNC;
struct dp_sdp sdp = {};
int ret;
@@ -4722,7 +4788,7 @@ intel_read_dp_as_sdp(struct intel_encoder *encoder,
ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp));
if (ret)
- drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n");
+ drm_dbg_kms(display->drm, "Failed to unpack DP AS SDP\n");
}
static int
@@ -4775,8 +4841,8 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_dp_vsc_sdp *vsc)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = DP_SDP_VSC;
struct dp_sdp sdp = {};
int ret;
@@ -4790,15 +4856,15 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
if (ret)
- drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
+ drm_dbg_kms(display->drm, "Failed to unpack DP VSC SDP\n");
}
static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct hdmi_drm_infoframe *drm_infoframe)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
struct dp_sdp sdp = {};
int ret;
@@ -4814,7 +4880,7 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
sizeof(sdp));
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to unpack DP HDR Metadata Infoframe SDP\n");
}
@@ -4844,8 +4910,8 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
static bool intel_dp_link_ok(struct intel_dp *intel_dp,
u8 link_status[DP_LINK_STATUS_SIZE])
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool uhbr = intel_dp->link_rate >= 1000000;
bool ok;
@@ -4859,7 +4925,7 @@ static bool intel_dp_link_ok(struct intel_dp *intel_dp,
return true;
intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s link not ok, retraining\n",
encoder->base.base.id, encoder->base.name,
uhbr ? "128b/132b" : "8b/10b");
@@ -4882,14 +4948,14 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 link_status[DP_LINK_STATUS_SIZE] = {};
const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
esi_link_status_size) != esi_link_status_size) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[ENCODER:%d:%s] Failed to read link status\n",
encoder->base.base.id, encoder->base.name);
return false;
@@ -4915,27 +4981,27 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
+ drm_WARN_ON_ONCE(display->drm, intel_dp->active_mst_links < 0);
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"failed to get ESI - device may have failed\n");
link_ok = false;
break;
}
- drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi);
+ drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
if (intel_dp->active_mst_links > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
@@ -4947,7 +5013,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
intel_dp_mst_hpd_irq(intel_dp, esi, ack);
if (esi[3] & DP_TUNNELING_IRQ) {
- if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ if (drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
&intel_dp->aux))
reprobe_needed = true;
ack[3] |= DP_TUNNELING_IRQ;
@@ -4957,7 +5023,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
break;
if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
- drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
+ drm_dbg_kms(display->drm, "Failed to ack ESI\n");
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
@@ -5045,7 +5111,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -5058,7 +5124,7 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
return true;
/* MST */
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
encoder = &intel_dp->mst_encoders[pipe]->base;
if (conn_state->best_encoder == &encoder->base)
return true;
@@ -5086,14 +5152,14 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx,
u8 *pipe_mask)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
int ret = 0;
*pipe_mask = 0;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state =
connector->base.state;
@@ -5113,7 +5179,8 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
crtc_state = to_intel_crtc_state(crtc->base.state);
- drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
+ drm_WARN_ON(display->drm,
+ !intel_crtc_has_dp_encoder(crtc_state));
if (!crtc_state->hw.active)
continue;
@@ -5143,6 +5210,7 @@ static bool intel_dp_is_connected(struct intel_dp *intel_dp)
static int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 pipe_mask;
@@ -5151,7 +5219,7 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
if (!intel_dp_is_connected(intel_dp))
return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
ctx);
if (ret)
return ret;
@@ -5169,7 +5237,8 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
if (!intel_dp_needs_link_retrain(intel_dp))
return 0;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link (forced %s)\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] retraining link (forced %s)\n",
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp->link.force_retrain));
@@ -5180,7 +5249,7 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
intel_dp->link.force_retrain = false;
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] link retraining failed: %pe\n",
encoder->base.base.id, encoder->base.name,
ERR_PTR(ret));
@@ -5213,7 +5282,7 @@ void intel_dp_check_link_state(struct intel_dp *intel_dp)
static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
@@ -5232,12 +5301,12 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
- drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
+ drm_dbg_kms(display->drm, "Sink specific irq unhandled\n");
}
static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
bool reprobe_needed = false;
u8 val;
@@ -5249,7 +5318,7 @@ static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
return false;
if ((val & DP_TUNNELING_IRQ) &&
- drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
&intel_dp->aux))
reprobe_needed = true;
@@ -5318,12 +5387,12 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u8 *dpcd = intel_dp->dpcd;
u8 type;
- if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
+ if (drm_WARN_ON(display->drm, intel_dp_is_edp(intel_dp)))
return connector_status_connected;
lspcon_resume(dig_port);
@@ -5366,7 +5435,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
}
/* Anything else is out of spec, warn and ignore */
- drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
+ drm_dbg_kms(display->drm, "Broken DP branch device, ignoring\n");
return connector_status_disconnected;
}
@@ -5461,7 +5530,7 @@ static void
intel_dp_update_dfp(struct intel_dp *intel_dp,
const struct drm_edid *drm_edid)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
intel_dp->dfp.max_bpc =
@@ -5485,7 +5554,7 @@ intel_dp_update_dfp(struct intel_dp *intel_dp,
drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
intel_dp->downstream_ports);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
connector->base.base.id, connector->base.name,
intel_dp->dfp.max_bpc,
@@ -5518,7 +5587,7 @@ intel_dp_can_ycbcr420(struct intel_dp *intel_dp)
static void
intel_dp_update_420(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
intel_dp->dfp.ycbcr420_passthrough =
@@ -5536,7 +5605,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
connector->base.base.id, connector->base.name,
str_yes_no(intel_dp->dfp.rgb_to_ycbcr),
@@ -5547,7 +5616,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
static void
intel_dp_set_edid(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
const struct drm_edid *drm_edid;
bool vrr_capable;
@@ -5560,7 +5629,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
drm_edid_connector_update(&connector->base, drm_edid);
vrr_capable = intel_vrr_is_capable(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
@@ -5597,38 +5666,37 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
static void
intel_dp_detect_sdp_caps(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- intel_dp->as_sdp_supported = HAS_AS_SDP(i915) &&
+ intel_dp->as_sdp_supported = HAS_AS_SDP(display) &&
drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
}
static int
-intel_dp_detect(struct drm_connector *connector,
+intel_dp_detect(struct drm_connector *_connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_connector *intel_connector =
- to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
+ struct intel_display *display = to_intel_display(_connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
int ret;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- drm_WARN_ON(&dev_priv->drm,
- !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
- if (!intel_display_device_enabled(dev_priv))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(dev_priv))
- return connector->status;
+ if (!intel_display_driver_check_access(display))
+ return connector->base.status;
- intel_dp_flush_connector_commits(intel_connector);
+ intel_dp_flush_connector_commits(connector);
intel_pps_vdd_on(intel_dp);
@@ -5654,7 +5722,7 @@ intel_dp_detect(struct drm_connector *connector,
if (status == connector_status_disconnected) {
intel_dp_test_reset(intel_dp);
- memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
+ memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
intel_dp->psr.sink_panel_replay_support = false;
intel_dp->psr.sink_panel_replay_su_support = false;
@@ -5675,12 +5743,12 @@ intel_dp_detect(struct drm_connector *connector,
}
if (ret == 1)
- intel_connector->base.epoch_counter++;
+ connector->base.epoch_counter++;
if (!intel_dp_is_edp(intel_dp))
intel_psr_init_dpcd(intel_dp);
- intel_dp_detect_dsc_caps(intel_dp, intel_connector);
+ intel_dp_detect_dsc_caps(intel_dp, connector);
intel_dp_detect_sdp_caps(intel_dp);
@@ -5723,8 +5791,7 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (intel_dp_is_edp(intel_dp) ||
- to_intel_connector(connector)->detect_edid)
+ if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
status = connector_status_connected;
intel_dp_check_device_service_irq(intel_dp);
@@ -5734,7 +5801,7 @@ out_unset_edid:
intel_dp_unset_edid(intel_dp);
if (!intel_dp_is_edp(intel_dp))
- drm_dp_set_subconnector_property(connector,
+ drm_dp_set_subconnector_property(&connector->base,
status,
intel_dp->dpcd,
intel_dp->downstream_ports);
@@ -5747,15 +5814,13 @@ out_vdd_off:
static void
intel_dp_force(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return;
intel_dp_unset_edid(intel_dp);
@@ -5766,30 +5831,31 @@ intel_dp_force(struct drm_connector *connector)
intel_dp_set_edid(intel_dp);
}
-static int intel_dp_get_modes(struct drm_connector *connector)
+static int intel_dp_get_modes(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_display *display = to_intel_display(_connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int num_modes;
/* drm_edid_connector_update() done in ->detect() or ->force() */
- num_modes = drm_edid_connector_add_modes(connector);
+ num_modes = drm_edid_connector_add_modes(&connector->base);
/* Also add fixed mode, which may or may not be present in EDID */
- if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
- num_modes += intel_panel_get_modes(intel_connector);
+ if (intel_dp_is_edp(intel_dp))
+ num_modes += intel_panel_get_modes(connector);
if (num_modes)
return num_modes;
- if (!intel_connector->detect_edid) {
- struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
+ if (!connector->detect_edid) {
struct drm_display_mode *mode;
- mode = drm_dp_downstream_mode(connector->dev,
+ mode = drm_dp_downstream_mode(display->drm,
intel_dp->dpcd,
intel_dp->downstream_ports);
if (mode) {
- drm_mode_probed_add(connector, mode);
+ drm_mode_probed_add(&connector->base, mode);
num_modes++;
}
}
@@ -5800,7 +5866,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static int
intel_dp_connector_register(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_lspcon *lspcon = &dig_port->lspcon;
@@ -5810,7 +5876,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
+ drm_dbg_kms(display->drm, "registering %s bus for %s\n",
intel_dp->aux.name, connector->kdev->kobj.name);
intel_dp->aux.dev = connector->kdev;
@@ -5847,10 +5913,11 @@ intel_dp_connector_unregister(struct drm_connector *connector)
void intel_dp_connector_sync_state(struct intel_connector *connector,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (crtc_state && crtc_state->dsc.compression_enable) {
- drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
+ drm_WARN_ON(display->drm,
+ !connector->dp.dsc_decompression_aux);
connector->dp.dsc_decompression_enabled = true;
} else {
connector->dp.dsc_decompression_enabled = false;
@@ -5880,18 +5947,18 @@ void intel_dp_encoder_flush_work(struct drm_encoder *_encoder)
intel_dp_aux_fini(intel_dp);
}
-void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_pps_vdd_off_sync(intel_dp);
intel_dp_tunnel_suspend(intel_dp);
}
-void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_shutdown(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_pps_wait_power_cycle(intel_dp);
}
@@ -5899,12 +5966,12 @@ void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
static int intel_modeset_tile_group(struct intel_atomic_state *state,
int tile_group_id)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
int ret = 0;
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
@@ -5940,13 +6007,13 @@ static int intel_modeset_tile_group(struct intel_atomic_state *state,
static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
if (transcoders == 0)
return 0;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state;
int ret;
@@ -5973,7 +6040,7 @@ static int intel_modeset_affected_transcoders(struct intel_atomic_state *state,
transcoders &= ~BIT(crtc_state->cpu_transcoder);
}
- drm_WARN_ON(&dev_priv->drm, transcoders != 0);
+ drm_WARN_ON(display->drm, transcoders != 0);
return 0;
}
@@ -6007,7 +6074,7 @@ static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
static int intel_dp_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *_state)
{
- struct drm_i915_private *dev_priv = to_i915(conn->dev);
+ struct intel_display *display = to_intel_display(conn->dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
struct intel_connector *intel_conn = to_intel_connector(conn);
@@ -6037,7 +6104,7 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
if (conn->has_tile) {
@@ -6052,6 +6119,7 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
enum drm_connector_status hpd_state)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(connector->dev);
bool hpd_high = hpd_state == connector_status_connected;
@@ -6059,10 +6127,12 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
bool need_work = false;
spin_lock_irq(&i915->irq_lock);
- if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) {
- i915->display.hotplug.event_bits |= BIT(hpd_pin);
+ if (hpd_high != test_bit(hpd_pin, &display->hotplug.oob_hotplug_last_state)) {
+ display->hotplug.event_bits |= BIT(hpd_pin);
- __assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high);
+ __assign_bit(hpd_pin,
+ &display->hotplug.oob_hotplug_last_state,
+ hpd_high);
need_work = true;
}
spin_unlock_irq(&i915->irq_lock);
@@ -6094,6 +6164,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -6108,7 +6179,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
* would end up in an endless cycle of
* "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
*/
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
long_hpd ? "long" : "short",
dig_port->base.base.base.id,
@@ -6116,7 +6187,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
- drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
+ drm_dbg_kms(display->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
dig_port->base.base.base.id,
dig_port->base.base.name,
long_hpd ? "long" : "short");
@@ -6149,7 +6220,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
-static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
+static bool _intel_dp_is_port_edp(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -6157,41 +6228,40 @@ static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
return false;
- if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
+ if (DISPLAY_VER(display) < 9 && port == PORT_A)
return true;
return devdata && intel_bios_encoder_supports_edp(devdata);
}
-bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
+bool intel_dp_is_port_edp(struct intel_display *display, enum port port)
{
- struct intel_display *display = &i915->display;
const struct intel_bios_encoder_data *devdata =
intel_bios_encoder_data_lookup(display, port);
- return _intel_dp_is_port_edp(i915, devdata, port);
+ return _intel_dp_is_port_edp(display, devdata, port);
}
bool
intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
if (intel_bios_encoder_is_lspcon(encoder->devdata))
return false;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return true;
if (port == PORT_A)
return false;
- if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
- DISPLAY_VER(i915) >= 9)
+ if (display->platform.haswell || display->platform.broadwell ||
+ DISPLAY_VER(display) >= 9)
return true;
return false;
@@ -6200,19 +6270,19 @@ intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->base.port;
if (!intel_dp_is_edp(intel_dp))
drm_connector_attach_dp_subconnector_property(connector);
- if (!IS_G4X(dev_priv) && port != PORT_A)
+ if (!display->platform.g4x && port != PORT_A)
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
drm_connector_attach_max_bpc_property(connector, 6, 10);
- else if (DISPLAY_VER(dev_priv) >= 5)
+ else if (DISPLAY_VER(display) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
/* Register HDMI colorspace for case of lspcon */
@@ -6226,22 +6296,22 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
drm_connector_attach_hdr_output_metadata_property(connector);
- if (HAS_VRR(dev_priv))
+ if (HAS_VRR(display))
drm_connector_attach_vrr_capable_property(connector);
}
static void
intel_edp_add_properties(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
intel_attach_scaling_mode_property(&connector->base);
drm_connector_set_panel_orientation_with_quirk(&connector->base,
- i915->display.vbt.orientation,
+ display->vbt.orientation,
fixed_mode->hdisplay,
fixed_mode->vdisplay);
}
@@ -6249,21 +6319,20 @@ intel_edp_add_properties(struct intel_dp *intel_dp)
static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum pipe pipe = INVALID_PIPE;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
pipe = vlv_pps_backlight_initial_pipe(intel_dp);
intel_backlight_setup(connector, pipe);
}
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- struct intel_connector *intel_connector)
+ struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct drm_connector *connector = &intel_connector->base;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
@@ -6279,19 +6348,19 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* with an already powered-on LVDS power sequencer.
*/
if (intel_get_lvds_encoder(dev_priv)) {
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"LVDS was detected, not registering eDP\n");
return false;
}
- intel_bios_init_panel_early(display, &intel_connector->panel,
+ intel_bios_init_panel_early(display, &connector->panel,
encoder->devdata);
if (!intel_pps_init(intel_dp)) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] unusable PPS, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
/*
@@ -6314,11 +6383,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_alpm_init_dpcd(intel_dp);
/* Cache DPCD and EDID for edp. */
- has_dpcd = intel_edp_init_dpcd(intel_dp, intel_connector);
+ has_dpcd = intel_edp_init_dpcd(intel_dp, connector);
if (!has_dpcd) {
/* if this fails, presume the device is a ghost */
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
@@ -6341,7 +6410,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* DPCD read? Would need sort out the VDD handling...
*/
if (!intel_digital_port_connected(encoder)) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] HPD is down, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
@@ -6353,30 +6422,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* back to checking for a VGA branch device. Only do this
* on known affected platforms to minimize false positives.
*/
- if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
+ if (DISPLAY_VER(display) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
DP_DWN_STRM_PORT_TYPE_ANALOG) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
}
}
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- drm_edid = drm_edid_read_ddc(connector, connector->ddc);
+ mutex_lock(&display->drm->mode_config.mutex);
+ drm_edid = drm_edid_read_ddc(&connector->base, connector->base.ddc);
if (!drm_edid) {
/* Fallback to EDID from ACPI OpRegion, if any */
- drm_edid = intel_opregion_get_edid(intel_connector);
+ drm_edid = intel_opregion_get_edid(connector);
if (drm_edid)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using OpRegion EDID\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
}
if (drm_edid) {
- if (drm_edid_connector_update(connector, drm_edid) ||
- !drm_edid_connector_add_modes(connector)) {
- drm_edid_connector_update(connector, NULL);
+ if (drm_edid_connector_update(&connector->base, drm_edid) ||
+ !drm_edid_connector_add_modes(&connector->base)) {
+ drm_edid_connector_update(&connector->base, NULL);
drm_edid_free(drm_edid);
drm_edid = ERR_PTR(-EINVAL);
}
@@ -6384,34 +6453,34 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
drm_edid = ERR_PTR(-ENOENT);
}
- intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata,
+ intel_bios_init_panel_late(display, &connector->panel, encoder->devdata,
IS_ERR(drm_edid) ? NULL : drm_edid);
- intel_panel_add_edid_fixed_modes(intel_connector, true);
+ intel_panel_add_edid_fixed_modes(connector, true);
/* MSO requires information from the EDID */
intel_edp_mso_init(intel_dp);
/* multiply the mode clock and horizontal timings for MSO */
- list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head)
- intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head)
+ intel_edp_mso_mode_fixup(connector, fixed_mode);
/* fallback to VBT if available for eDP */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
- if (!intel_panel_preferred_fixed_mode(intel_connector)) {
- drm_info(&dev_priv->drm,
+ if (!intel_panel_preferred_fixed_mode(connector)) {
+ drm_info(display->drm,
"[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
}
- intel_panel_init(intel_connector, drm_edid);
+ intel_panel_init(connector, drm_edid);
- intel_edp_backlight_setup(intel_dp, intel_connector);
+ intel_edp_backlight_setup(intel_dp, connector);
intel_edp_add_properties(intel_dp);
@@ -6421,34 +6490,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
out_vdd_off:
intel_pps_vdd_off_sync(intel_dp);
- intel_bios_fini_panel(&intel_connector->panel);
+ intel_bios_fini_panel(&connector->panel);
return false;
}
static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
{
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
+ struct intel_connector *connector = container_of(work, typeof(*connector),
+ modeset_retry_work);
+ struct intel_display *display = to_intel_display(connector);
- intel_connector = container_of(work, typeof(*intel_connector),
- modeset_retry_work);
- connector = &intel_connector->base;
- drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
- connector->name);
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id,
+ connector->base.name);
/* Grab the locks before changing connector property*/
- mutex_lock(&connector->dev->mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
/* Set connector link status to BAD and send a Uevent to notify
* userspace to do a modeset.
*/
- drm_connector_set_link_status_property(connector,
+ drm_connector_set_link_status_property(&connector->base,
DRM_MODE_LINK_STATUS_BAD);
- mutex_unlock(&connector->dev->mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
- drm_kms_helper_connector_hotplug_event(connector);
+ drm_kms_helper_connector_hotplug_event(&connector->base);
- drm_connector_put(connector);
+ drm_connector_put(&connector->base);
}
void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
@@ -6459,45 +6526,44 @@ void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
bool
intel_dp_init_connector(struct intel_digital_port *dig_port,
- struct intel_connector *intel_connector)
+ struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_connector *connector = &intel_connector->base;
struct intel_dp *intel_dp = &dig_port->dp;
- struct intel_encoder *intel_encoder = &dig_port->base;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct intel_encoder *encoder = &dig_port->base;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_encoder->port;
+ enum port port = encoder->port;
int type;
/* Initialize the work for modeset in case of link train failure */
- intel_dp_init_modeset_retry_work(intel_connector);
+ intel_dp_init_modeset_retry_work(connector);
if (drm_WARN(dev, dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
- dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ dig_port->max_lanes, encoder->base.base.id,
+ encoder->base.name))
return false;
intel_dp->reset_link_params = true;
/* Preserve the current hw state. */
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
- intel_dp->attached_connector = intel_connector;
+ intel_dp->DP = intel_de_read(display, intel_dp->output_reg);
+ intel_dp->attached_connector = connector;
- if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
+ if (_intel_dp_is_port_edp(display, encoder->devdata, port)) {
/*
* Currently we don't support eDP on TypeC ports for DISPLAY_VER < 30,
* although in theory it could work on TypeC legacy ports.
*/
- drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder) &&
- DISPLAY_VER(dev_priv) < 30);
+ drm_WARN_ON(dev, intel_encoder_is_tc(encoder) &&
+ DISPLAY_VER(display) < 30);
type = DRM_MODE_CONNECTOR_eDP;
- intel_encoder->type = INTEL_OUTPUT_EDP;
+ encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) &&
+ if (drm_WARN_ON(dev, (display->platform.valleyview ||
+ display->platform.cherryview) &&
port != PORT_B && port != PORT_C))
return false;
} else {
@@ -6507,37 +6573,37 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_set_default_sink_rates(intel_dp);
intel_dp_set_default_max_sink_lane_count(intel_dp);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
vlv_pps_pipe_init(intel_dp);
intel_dp_aux_init(intel_dp);
- intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
+ connector->dp.dsc_decompression_aux = &intel_dp->aux;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Adding %s connector on [ENCODER:%d:%s]\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- intel_encoder->base.base.id, intel_encoder->base.name);
+ encoder->base.base.id, encoder->base.name);
- drm_connector_init_with_ddc(dev, connector, &intel_dp_connector_funcs,
+ drm_connector_init_with_ddc(dev, &connector->base, &intel_dp_connector_funcs,
type, &intel_dp->aux.ddc);
- drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &intel_dp_connector_helper_funcs);
- if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12)
- connector->interlace_allowed = true;
+ if (!HAS_GMCH(display) && DISPLAY_VER(display) < 12)
+ connector->base.interlace_allowed = true;
if (type != DRM_MODE_CONNECTOR_eDP)
- intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
- intel_connector->base.polled = intel_connector->polled;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->base.polled = connector->polled;
- intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_connector_attach_encoder(connector, encoder);
- if (HAS_DDI(dev_priv))
- intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ if (HAS_DDI(display))
+ connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
- intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->sync_state = intel_dp_connector_sync_state;
+ connector->get_hw_state = intel_connector_get_hw_state;
+ connector->sync_state = intel_dp_connector_sync_state;
- if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+ if (!intel_edp_init_connector(intel_dp, connector)) {
intel_dp_aux_fini(intel_dp);
goto fail;
}
@@ -6547,15 +6613,14 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_reset_link_params(intel_dp);
/* init MST on ports that can support it */
- intel_dp_mst_encoder_init(dig_port,
- intel_connector->base.base.id);
+ intel_dp_mst_encoder_init(dig_port, connector->base.base.id);
- intel_dp_add_properties(intel_dp, connector);
+ intel_dp_add_properties(intel_dp, &connector->base);
if (is_hdcp_supported(display, port) && !intel_dp_is_edp(intel_dp)) {
- int ret = intel_dp_hdcp_init(dig_port, intel_connector);
+ int ret = intel_dp_hdcp_init(dig_port, connector);
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HDCP init failed, skipping.\n");
}
@@ -6568,19 +6633,19 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
fail:
intel_display_power_flush_work(dev_priv);
- drm_connector_cleanup(connector);
+ drm_connector_cleanup(&connector->base);
return false;
}
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
+void intel_dp_mst_suspend(struct intel_display *display)
{
struct intel_encoder *encoder;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_dp *intel_dp;
if (encoder->type != INTEL_OUTPUT_DDI)
@@ -6596,14 +6661,14 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
}
}
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
+void intel_dp_mst_resume(struct intel_display *display)
{
struct intel_encoder *encoder;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_dp *intel_dp;
int ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 48f10876be65..ca49f0a05da5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -12,14 +12,14 @@ enum intel_output_format;
enum pipe;
enum port;
struct drm_connector_state;
+struct drm_dp_vsc_sdp;
struct drm_encoder;
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
-struct drm_dp_vsc_sdp;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_digital_port;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
@@ -87,15 +87,15 @@ bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
bool intel_dp_has_dsc(const struct intel_connector *connector);
int intel_dp_link_symbol_size(int rate);
int intel_dp_link_symbol_clock(int rate);
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_dp_is_port_edp(struct intel_display *display, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
+void intel_dp_mst_suspend(struct intel_display *display);
+void intel_dp_mst_resume(struct intel_display *display);
int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
@@ -112,15 +112,15 @@ void intel_dp_reset_link_params(struct intel_dp *intel_dp);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
-bool intel_dp_source_supports_tps3(struct drm_i915_private *i915);
-bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
+bool intel_dp_source_supports_tps3(struct intel_display *display);
+bool intel_dp_source_supports_tps4(struct intel_display *display);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
int max_dprx_rate, int max_dprx_lanes);
-bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915,
+bool intel_dp_joiner_needs_dsc(struct intel_display *display,
int num_joined_pipes);
bool intel_dp_has_joiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
@@ -137,16 +137,16 @@ bool intel_digital_port_connected(struct intel_encoder *encoder);
bool intel_digital_port_connected_locked(struct intel_encoder *encoder);
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 dsc_max_bpc);
-u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
+u16 intel_dp_dsc_get_max_compressed_bpp(struct intel_display *display,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
int num_joined_pipes,
enum intel_output_format output_format,
u32 pipe_bpp,
u32 timeslots);
-int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config);
+int intel_dp_dsc_sink_min_compressed_bpp(const struct intel_crtc_state *pipe_config);
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *pipe_config,
int bpc);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
@@ -170,10 +170,11 @@ bool intel_dp_supports_fec(struct intel_dp *intel_dp,
const struct intel_connector *connector,
const struct intel_crtc_state *pipe_config);
-bool intel_dp_supports_dsc(const struct intel_connector *connector,
+bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state);
-u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp);
+u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp);
void intel_ddi_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -193,11 +194,11 @@ void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp);
void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
-bool
-intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool dsc,
- struct link_config_limits *limits);
+bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ bool respect_downstream_limits,
+ bool dsc,
+ struct link_config_limits *limits);
void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector);
bool intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder);
@@ -206,5 +207,7 @@ bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
u8 lane_count);
bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state);
+int intel_dp_dsc_max_src_input_bpc(struct intel_display *display);
+int intel_dp_dsc_min_src_input_bpc(void);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 04a7acd7f73c..40c697476b72 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -5,8 +5,6 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_trace.h"
-#include "intel_bios.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -15,6 +13,7 @@
#include "intel_pps.h"
#include "intel_quirks.h"
#include "intel_tc.h"
+#include "intel_uncore_trace.h"
#define AUX_CH_NAME_BUFSIZE 6
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 33f72db99b58..c846ef4acf5b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -34,8 +34,9 @@
* for some reason.
*/
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux_backlight.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 397cc4ebae52..6696a32cdd3e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -25,7 +25,8 @@
#include <drm/display/drm_dp_helper.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -221,7 +222,6 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp_is_edp(intel_dp))
return 0;
@@ -230,7 +230,7 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
- if (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))
+ if (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)
if (drm_dp_dpcd_probe(&intel_dp->aux,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
return -EIO;
@@ -262,7 +262,6 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
int lttpr_count = 0;
/*
@@ -270,7 +269,7 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
if (!intel_dp_is_edp(intel_dp) &&
- (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))) {
+ (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)) {
u8 dpcd[DP_RECEIVER_CAP_SIZE];
int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
@@ -391,10 +390,9 @@ static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
- DISPLAY_VER(display) >= 10 || IS_BROXTON(i915);
+ DISPLAY_VER(display) >= 10 || display->platform.broxton;
}
/* 128b/132b */
@@ -898,7 +896,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
- usleep_range(delay_us, 2 * delay_us);
+ fsleep(delay_us);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
@@ -959,7 +957,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
/* UHBR+ use separate 128b/132b TPS2 */
@@ -972,7 +969,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
* TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
* LTTPRs must support TPS4.
*/
- source_tps4 = intel_dp_source_supports_tps4(i915);
+ source_tps4 = intel_dp_source_supports_tps4(display);
sink_tps4 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps4_supported(intel_dp->dpcd);
if (source_tps4 && sink_tps4) {
@@ -990,7 +987,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
* TPS3 support is mandatory for downstream devices that
* support HBR2. However, not all sinks follow the spec.
*/
- source_tps3 = intel_dp_source_supports_tps3(i915);
+ source_tps3 = intel_dp_source_supports_tps3(display);
sink_tps3 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps3_supported(intel_dp->dpcd);
if (source_tps3 && sink_tps3) {
@@ -1040,7 +1037,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
}
for (tries = 0; tries < 5; tries++) {
- usleep_range(delay_us, 2 * delay_us);
+ fsleep(delay_us);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
@@ -1414,16 +1411,10 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
}
/* Time budget for the LANEx_EQ_DONE Sequence */
- deadline = jiffies + msecs_to_jiffies_timeout(400);
+ deadline = jiffies + msecs_to_jiffies_timeout(450);
for (try = 0; try < max_tries; try++) {
- usleep_range(delay_us, 2 * delay_us);
-
- /*
- * The delay may get updated. The transmitter shall read the
- * delay before link status during link training.
- */
- delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+ fsleep(delay_us);
if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
@@ -1451,8 +1442,15 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
if (time_after(jiffies, deadline))
timeout = true; /* try one last time after deadline */
- /* Update signal levels and training set as requested. */
+ /*
+ * During LT, Tx shall read AUX_RD_INTERVAL just before writing the new FFE
+ * presets.
+ */
+ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+
intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
+
+ /* Update signal levels and training set as requested. */
if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n");
return false;
@@ -1565,7 +1563,7 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
- return false;
+ goto out;
}
if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
@@ -1577,6 +1575,19 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
passed ? "passed" : "failed",
crtc_state->port_clock, crtc_state->lane_count);
+out:
+ /*
+ * Ensure that the training pattern does get set to TPS2 even in case
+ * of a failure, as is the case at the end of a passing link training
+ * and what is expected by the transcoder. Leaving TPS1 set (and
+ * disabling the link train mode in DP_TP_CTL later from TPS1 directly)
+ * would result in a stuck transcoder HW state and flip-done timeouts
+ * later in the modeset sequence.
+ */
+ if (!passed)
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
+
return passed;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 5bba078c00d8..86d6185fda50 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -53,14 +53,64 @@
#include "intel_vdsc.h"
#include "skl_scaler.h"
+/*
+ * DP MST (DisplayPort Multi-Stream Transport)
+ *
+ * MST support on the source depends on the platform and port. DP initialization
+ * sets up MST for each MST capable encoder. This will become the primary
+ * encoder for the port.
+ *
+ * MST initialization of each primary encoder creates MST stream encoders, one
+ * per pipe, and initializes the MST topology manager. The MST stream encoders
+ * are sometimes called "fake encoders", because they're virtual, not
+ * physical. Thus there are (number of MST capable ports) x (number of pipes)
+ * MST stream encoders in total.
+ *
+ * Decision to use MST for a sink happens at detect on the connector attached to
+ * the primary encoder, and this will not change while the sink is connected. We
+ * always use MST when possible, including for SST sinks with sideband messaging
+ * support.
+ *
+ * The connectors for the MST streams are added and removed dynamically by the
+ * topology manager. Their connection status is also determined by the topology
+ * manager.
+ *
+ * On hardware, each transcoder may be associated with a single DDI
+ * port. Multiple transcoders may be associated with the same DDI port only if
+ * the port is in MST mode.
+ *
+ * On TGL+, all the transcoders streaming on the same DDI port will indicate a
+ * primary transcoder; the TGL_DP_TP_CTL and TGL_DP_TP_STATUS registers are
+ * relevant only on the primary transcoder. Prior to that, they are port
+ * registers.
+ */
+
+/* From fake MST stream encoder to primary encoder */
+static struct intel_encoder *to_primary_encoder(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+
+ return &dig_port->base;
+}
+
+/* From fake MST stream encoder to primary DP */
+static struct intel_dp *to_primary_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+
+ return &dig_port->dp;
+}
+
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(i915) >= 20 || !dsc)
+ if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc)
return INT_MAX;
/*
@@ -89,7 +139,6 @@ static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
}
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
- const struct intel_connector *connector,
bool ssc, int dsc_slice_count, int bpp_x16)
{
const struct drm_display_mode *adjusted_mode =
@@ -118,7 +167,6 @@ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
}
static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
- const struct intel_connector *connector,
int overhead,
int bpp_x16,
struct intel_link_m_n *m_n)
@@ -161,35 +209,22 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
-static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- int max_bpp,
- int min_bpp,
- struct link_config_limits *limits,
- struct drm_connector_state *conn_state,
- int step,
- bool dsc)
+int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp, int min_bpp,
+ struct drm_connector_state *conn_state,
+ int step, bool dsc)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_atomic_state *state = crtc_state->uapi.state;
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
- struct drm_dp_mst_topology_state *mst_state;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ fixed20_12 pbn_div;
int bpp, slots = -EINVAL;
int dsc_slice_count = 0;
int max_dpt_bpp;
- int ret = 0;
-
- mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
- if (IS_ERR(mst_state))
- return PTR_ERR(mst_state);
-
- crtc_state->lane_count = limits->max_lane_count;
- crtc_state->port_clock = limits->max_rate;
if (dsc) {
if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
@@ -198,24 +233,23 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
}
- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
- crtc_state->port_clock,
- crtc_state->lane_count);
+ pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count);
max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
if (max_bpp > max_dpt_bpp) {
- drm_dbg_kms(&i915->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
+ drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
max_bpp, max_dpt_bpp);
max_bpp = max_dpt_bpp;
}
- drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
+ drm_dbg_kms(display->drm, "Looking for slots in range min bpp %d max bpp %d\n",
min_bpp, max_bpp);
if (dsc) {
dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state);
if (!dsc_slice_count) {
- drm_dbg_kms(&i915->drm, "Can't get valid DSC slice count\n");
+ drm_dbg_kms(display->drm, "Can't get valid DSC slice count\n");
return -ENOSPC;
}
@@ -223,149 +257,173 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
int local_bw_overhead;
- int remote_bw_overhead;
int link_bpp_x16;
- int remote_tu;
- fixed20_12 pbn;
- drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
+ drm_dbg_kms(display->drm, "Trying bpp %d\n", bpp);
link_bpp_x16 = fxp_q4_from_int(dsc ? bpp :
intel_dp_output_bpp(crtc_state->output_format, bpp));
- local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
+ local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
false, dsc_slice_count, link_bpp_x16);
- remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
- true, dsc_slice_count, link_bpp_x16);
-
- intel_dp_mst_compute_m_n(crtc_state, connector,
+ intel_dp_mst_compute_m_n(crtc_state,
local_bw_overhead,
link_bpp_x16,
&crtc_state->dp_m_n);
- /*
- * The TU size programmed to the HW determines which slots in
- * an MTP frame are used for this stream, which needs to match
- * the payload size programmed to the first downstream branch
- * device's payload table.
- *
- * Note that atm the payload's PBN value DRM core sends via
- * the ALLOCATE_PAYLOAD side-band message matches the payload
- * size (which it calculates from the PBN value) it programs
- * to the first branch device's payload table. The allocation
- * in the payload table could be reduced though (to
- * crtc_state->dp_m_n.tu), provided that the driver doesn't
- * enable SSC on the corresponding link.
- */
- pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
- link_bpp_x16,
- remote_bw_overhead));
- remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full);
-
- /*
- * Aligning the TUs ensures that symbols consisting of multiple
- * (4) symbol cycles don't get split between two consecutive
- * MTPs, as required by Bspec.
- * TODO: remove the alignment restriction for 128b/132b links
- * on some platforms, where Bspec allows this.
- */
- remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
-
- /*
- * Also align PBNs accordingly, since MST core will derive its
- * own copy of TU from the PBN in drm_dp_atomic_find_time_slots().
- * The above comment about the difference between the PBN
- * allocated for the whole path and the TUs allocated for the
- * first branch device's link also applies here.
- */
- pbn.full = remote_tu * mst_state->pbn_div.full;
- crtc_state->pbn = dfixed_trunc(pbn);
-
- drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
- crtc_state->dp_m_n.tu = remote_tu;
+ if (intel_dp->is_mst) {
+ int remote_bw_overhead;
+ int remote_tu;
+ fixed20_12 pbn;
+
+ remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
+ true, dsc_slice_count, link_bpp_x16);
+
+ /*
+ * The TU size programmed to the HW determines which slots in
+ * an MTP frame are used for this stream, which needs to match
+ * the payload size programmed to the first downstream branch
+ * device's payload table.
+ *
+ * Note that atm the payload's PBN value DRM core sends via
+ * the ALLOCATE_PAYLOAD side-band message matches the payload
+ * size (which it calculates from the PBN value) it programs
+ * to the first branch device's payload table. The allocation
+ * in the payload table could be reduced though (to
+ * crtc_state->dp_m_n.tu), provided that the driver doesn't
+ * enable SSC on the corresponding link.
+ */
+ pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
+ link_bpp_x16,
+ remote_bw_overhead));
+ remote_tu = DIV_ROUND_UP(pbn.full, pbn_div.full);
+
+ /*
+ * Aligning the TUs ensures that symbols consisting of multiple
+ * (4) symbol cycles don't get split between two consecutive
+ * MTPs, as required by Bspec.
+ * TODO: remove the alignment restriction for 128b/132b links
+ * on some platforms, where Bspec allows this.
+ */
+ remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
+
+ /*
+ * Also align PBNs accordingly, since MST core will derive its
+ * own copy of TU from the PBN in drm_dp_atomic_find_time_slots().
+ * The above comment about the difference between the PBN
+ * allocated for the whole path and the TUs allocated for the
+ * first branch device's link also applies here.
+ */
+ pbn.full = remote_tu * pbn_div.full;
+
+ drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu);
+ crtc_state->dp_m_n.tu = remote_tu;
+
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
+ connector->port,
+ dfixed_trunc(pbn));
+ } else {
+ /* Same as above for remote_tu */
+ crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu,
+ 4 / crtc_state->lane_count);
+
+ if (crtc_state->dp_m_n.tu <= 64)
+ slots = crtc_state->dp_m_n.tu;
+ else
+ slots = -EINVAL;
+ }
- slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
- connector->port,
- crtc_state->pbn);
if (slots == -EDEADLK)
return slots;
if (slots >= 0) {
- drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
+ drm_WARN_ON(display->drm, slots != crtc_state->dp_m_n.tu);
break;
}
- }
- /* We failed to find a proper bpp/timeslots, return error */
- if (ret)
- slots = ret;
+ /* Allow using zero step to indicate one try */
+ if (!step)
+ break;
+ }
if (slots < 0) {
- drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
+ drm_dbg_kms(display->drm, "failed finding vcpi slots:%d\n",
slots);
- } else {
- if (!dsc)
- crtc_state->pipe_bpp = bpp;
- else
- crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
- drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
+ return slots;
}
- return slots;
+ if (!dsc)
+ crtc_state->pipe_bpp = bpp;
+ else
+ crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
+
+ drm_dbg_kms(display->drm, "Got %d slots for pipe bpp %d dsc %d\n",
+ slots, bpp, dsc);
+
+ return 0;
}
-static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static int mst_stream_find_vcpi_slots_for_bpp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp, int min_bpp,
+ struct link_config_limits *limits,
+ struct drm_connector_state *conn_state,
+ int step, bool dsc)
{
- int slots = -EINVAL;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_dp_mst_topology_state *mst_state;
+ mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_rate;
+
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count);
+
+ return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state,
+ max_bpp, min_bpp,
+ conn_state, step, dsc);
+}
+
+static int mst_stream_compute_link_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
/*
* FIXME: allocate the BW according to link_bpp, which in the case of
* YUV420 is only half of the pipe bpp value.
*/
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
- fxp_q4_to_int(limits->link.max_bpp_x16),
- fxp_q4_to_int(limits->link.min_bpp_x16),
- limits,
- conn_state, 2 * 3, false);
-
- if (slots < 0)
- return slots;
-
- return 0;
+ return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state,
+ fxp_q4_to_int(limits->link.max_bpp_x16),
+ fxp_q4_to_int(limits->link.min_bpp_x16),
+ limits,
+ conn_state, 2 * 3, false);
}
-static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
{
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- int slots = -EINVAL;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
int i, num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
- u8 dsc_max_bpc;
int min_compressed_bpp, max_compressed_bpp;
- /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
- if (DISPLAY_VER(i915) >= 12)
- dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
- else
- dsc_max_bpc = min_t(u8, 10, conn_state->max_requested_bpc);
-
- max_bpp = min_t(u8, dsc_max_bpc * 3, limits->pipe.max_bpp);
+ max_bpp = limits->pipe.max_bpp;
min_bpp = limits->pipe.min_bpp;
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
dsc_bpc);
- drm_dbg_kms(&i915->drm, "DSC Source supported min bpp %d max bpp %d\n",
+ drm_dbg_kms(display->drm, "DSC Source supported min bpp %d max bpp %d\n",
min_bpp, max_bpp);
sink_max_bpp = dsc_bpc[0] * 3;
@@ -378,7 +436,7 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
sink_max_bpp = dsc_bpc[i] * 3;
}
- drm_dbg_kms(&i915->drm, "DSC Sink supported min bpp %d max bpp %d\n",
+ drm_dbg_kms(display->drm, "DSC Sink supported min bpp %d max bpp %d\n",
sink_min_bpp, sink_max_bpp);
if (min_bpp < sink_min_bpp)
@@ -389,41 +447,28 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
crtc_state->pipe_bpp = max_bpp;
- max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
- crtc_state,
- max_bpp / 3);
- max_compressed_bpp = min(max_compressed_bpp,
- fxp_q4_to_int(limits->link.max_bpp_x16));
+ max_compressed_bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
+ min_compressed_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
- min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
- min_compressed_bpp = max(min_compressed_bpp,
- fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
-
- drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
+ drm_dbg_kms(display->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
min_compressed_bpp, max_compressed_bpp);
/* Align compressed bpps according to our own constraints */
- max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
+ max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, max_compressed_bpp,
crtc_state->pipe_bpp);
- min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
+ min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp,
crtc_state->pipe_bpp);
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
- min_compressed_bpp, limits,
- conn_state, 1, true);
-
- if (slots < 0)
- return slots;
-
- return 0;
+ return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state, max_compressed_bpp,
+ min_compressed_bpp, limits,
+ conn_state, 1, true);
}
-static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+
+static int mst_stream_update_slots(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_topology_state *topology_state;
u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
@@ -431,7 +476,7 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
if (IS_ERR(topology_state)) {
- drm_dbg_kms(&i915->drm, "slot update failed\n");
+ drm_dbg_kms(display->drm, "slot update failed\n");
return PTR_ERR(topology_state);
}
@@ -474,12 +519,13 @@ hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
}
static bool
-adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
+adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state,
struct link_config_limits *limits,
bool dsc)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int min_bpp_x16 = limits->link.min_bpp_x16;
@@ -487,15 +533,15 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
return true;
if (!dsc) {
- if (intel_dp_supports_dsc(connector, crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ if (intel_dp_supports_dsc(intel_dp, connector, crtc_state)) {
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
return false;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
@@ -508,7 +554,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
return true;
}
- drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
+ drm_WARN_ON(display->drm, limits->min_rate != limits->max_rate);
if (limits->max_rate < 540000)
min_bpp_x16 = fxp_q4_from_int(13);
@@ -518,7 +564,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
if (limits->link.min_bpp_x16 >= min_bpp_x16)
return true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name,
@@ -533,56 +579,31 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
}
static bool
-intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
- struct intel_crtc_state *crtc_state,
- bool dsc,
- struct link_config_limits *limits)
-{
- /*
- * for MST we always configure max link bw - the spec doesn't
- * seem to suggest we should do otherwise.
- */
- limits->min_rate = limits->max_rate =
- intel_dp_max_link_rate(intel_dp);
-
- limits->min_lane_count = limits->max_lane_count =
- intel_dp_max_lane_count(intel_dp);
-
- limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
- /*
- * FIXME: If all the streams can't fit into the link with
- * their current pipe_bpp we should reduce pipe_bpp across
- * the board until things start to fit. Until then we
- * limit to <= 8bpc since that's what was hardcoded for all
- * MST streams previously. This hack should be removed once
- * we have the proper retry logic in place.
- */
- limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
-
- intel_dp_test_compute_config(intel_dp, crtc_state, limits);
-
- if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
- crtc_state,
- dsc,
- limits))
+mst_stream_compute_config_limits(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ struct intel_crtc_state *crtc_state,
+ bool dsc,
+ struct link_config_limits *limits)
+{
+ if (!intel_dp_compute_config_limits(intel_dp, crtc_state, false, dsc,
+ limits))
return false;
- return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
+ return adjust_limits_for_dsc_hblank_expansion_quirk(intel_dp,
+ connector,
crtc_state,
limits,
dsc);
}
-static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int mst_stream_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
@@ -609,18 +630,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes);
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_mst_compute_config_limits(intel_dp,
- connector,
- pipe_config,
- false,
- &limits);
+ !mst_stream_compute_config_limits(intel_dp, connector,
+ pipe_config, false, &limits);
if (!dsc_needed) {
- ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
- conn_state, &limits);
+ ret = mst_stream_compute_link_config(intel_dp, pipe_config,
+ conn_state, &limits);
if (ret == -EDEADLK)
return ret;
@@ -629,35 +647,37 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
dsc_needed = true;
}
+ if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) {
+ drm_dbg_kms(display->drm, "DSC required but not available\n");
+ return -EINVAL;
+ }
+
/* enable compression if the mode doesn't fit available BW */
if (dsc_needed) {
- drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_supports_dsc(connector, pipe_config))
- return -EINVAL;
- if (!intel_dp_mst_compute_config_limits(intel_dp,
- connector,
- pipe_config,
- true,
- &limits))
+ if (!mst_stream_compute_config_limits(intel_dp, connector,
+ pipe_config, true,
+ &limits))
return -EINVAL;
/*
* FIXME: As bpc is hardcoded to 8, as mentioned above,
* WARN and ignore the debug flag force_dsc_bpc for now.
*/
- drm_WARN(&dev_priv->drm, intel_dp->force_dsc_bpc, "Cannot Force BPC for MST\n");
+ drm_WARN(display->drm, intel_dp->force_dsc_bpc,
+ "Cannot Force BPC for MST\n");
/*
* Try to get at least some timeslots and then see, if
* we can fit there with DSC.
*/
- drm_dbg_kms(&dev_priv->drm, "Trying to find VCPI slots in DSC mode\n");
+ drm_dbg_kms(display->drm, "Trying to find VCPI slots in DSC mode\n");
- ret = intel_dp_dsc_mst_compute_link_config(encoder, pipe_config,
- conn_state, &limits);
+ ret = mst_stream_dsc_compute_link_config(intel_dp, pipe_config,
+ conn_state, &limits);
if (ret < 0)
return ret;
@@ -669,14 +689,14 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state);
+ ret = mst_stream_update_slots(intel_dp, pipe_config, conn_state);
if (ret)
return ret;
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -698,13 +718,13 @@ static unsigned int
intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
struct intel_dp *mst_port)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_digital_connector_state *conn_state;
struct intel_connector *connector;
u8 transcoders = 0;
int i;
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
return 0;
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
@@ -758,7 +778,7 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mst_mgr,
struct intel_link_bw_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
u8 mst_pipe_mask;
u8 fec_pipe_mask = 0;
@@ -766,12 +786,12 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mst_pipe_mask) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
/* Atomic connector check should've added all the MST CRTCs. */
- if (drm_WARN_ON(&i915->drm, !crtc_state))
+ if (drm_WARN_ON(display->drm, !crtc_state))
return -EINVAL;
if (crtc_state->fec_enable)
@@ -850,13 +870,12 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
return 0;
}
-static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static int mst_stream_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
/* lowest numbered transcoder will be designated master */
crtc_state->mst_master_transcoder =
@@ -879,10 +898,10 @@ static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
* recomputation of the corresponding CRTC states.
*/
static int
-intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
- struct intel_atomic_state *state)
+mst_connector_atomic_topology_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
int ret = 0;
@@ -890,7 +909,7 @@ intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
- drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
+ drm_connector_list_iter_begin(display->drm, &connector_list_iter);
for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
struct intel_digital_connector_state *conn_iter_state;
struct intel_crtc_state *crtc_state;
@@ -928,8 +947,8 @@ intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
}
static int
-intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *_state)
+mst_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_connector *intel_connector =
@@ -940,7 +959,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
+ ret = mst_connector_atomic_topology_check(intel_connector, state);
if (ret)
return ret;
@@ -957,42 +976,18 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
intel_connector->port);
}
-static void clear_act_sent(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
- intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_ACT_SENT);
-}
-
-static void wait_for_act_sent(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
-
- if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_ACT_SENT, 1))
- drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
-
- drm_dp_check_act_status(&intel_dp->mst_mgr);
-}
-
-static void intel_mst_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void mst_stream_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- drm_dbg_kms(&i915->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
if (intel_dp->active_mst_links == 1)
@@ -1003,15 +998,15 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
}
-static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void mst_stream_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_dp_mst_topology_state *old_mst_state =
@@ -1022,15 +1017,13 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_crtc *pipe_crtc;
bool last_mst_stream;
int i;
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
- drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -1044,13 +1037,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
- clear_act_sent(encoder, old_crtc_state);
+ intel_ddi_clear_act_sent(encoder, old_crtc_state);
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, old_crtc_state->cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder),
TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
- wait_for_act_sent(encoder, old_crtc_state);
+ intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state,
old_payload, new_payload);
@@ -1063,7 +1057,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_dsc_disable(old_pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
@@ -1080,8 +1074,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* BSpec 4287: disable DIP after the transcoder is disabled and before
* the transcoder clock select is set to none.
*/
- intel_dp_set_infoframes(&dig_port->base, false,
- old_crtc_state, NULL);
+ intel_dp_set_infoframes(primary_encoder, false, old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder"
@@ -1089,51 +1082,49 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* From older GENs spec: "Configure Transcoder Clock Select to direct
* no clock to the transcoder"
*/
- if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
+ if (DISPLAY_VER(display) < 12 || !last_mst_stream)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_mst->connector = NULL;
if (last_mst_stream)
- dig_port->base.post_disable(state, &dig_port->base,
- old_crtc_state, NULL);
+ primary_encoder->post_disable(state, primary_encoder,
+ old_crtc_state, NULL);
- drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
}
-static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
if (intel_dp->active_mst_links == 0 &&
- dig_port->base.post_pll_disable)
- dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state);
+ primary_encoder->post_pll_disable)
+ primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
-static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
if (intel_dp->active_mst_links == 0)
- dig_port->base.pre_pll_enable(state, &dig_port->base,
- pipe_config, NULL);
+ primary_encoder->pre_pll_enable(state, primary_encoder,
+ pipe_config, NULL);
else
/*
* The port PLL state needs to get updated for secondary
* streams as for the primary stream.
*/
- intel_ddi_update_active_dpll(state, &dig_port->base,
+ intel_ddi_update_active_dpll(state, primary_encoder,
to_intel_crtc(pipe_config->uapi.crtc));
}
@@ -1164,15 +1155,15 @@ static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
crtc_state->port_clock, crtc_state->lane_count);
}
-static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void mst_stream_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_dp_mst_topology_state *mst_state =
@@ -1186,11 +1177,10 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
connector->encoder = encoder;
intel_mst->connector = connector;
first_mst_stream = intel_dp->active_mst_links == 0;
- drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 12 && first_mst_stream &&
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
if (first_mst_stream)
@@ -1201,8 +1191,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_sink_enable_decompression(state, connector, pipe_config);
if (first_mst_stream) {
- dig_port->base.pre_enable(state, &dig_port->base,
- pipe_config, NULL);
+ primary_encoder->pre_enable(state, primary_encoder,
+ pipe_config, NULL);
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
@@ -1212,24 +1202,28 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (ret < 0)
- intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
+ intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
/*
* Before Gen 12 this is not done as part of
- * dig_port->base.pre_enable() and should be done here. For
+ * primary_encoder->pre_enable() and should be done here. For
* Gen 12+ the step in which this should be done is different for the
* first MST stream, so it's done on the DDI for the first stream and
* here for the following ones.
*/
- if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
+ if (DISPLAY_VER(display) < 12 || !first_mst_stream)
intel_ddi_enable_transcoder_clock(encoder, pipe_config);
- intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
+ if (DISPLAY_VER(display) >= 13 && !first_mst_stream)
+ intel_ddi_config_transcoder_func(encoder, pipe_config);
+
+ intel_dsc_dp_pps_write(primary_encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 clear = 0;
u32 set = 0;
@@ -1237,7 +1231,7 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
if (!IS_ALDERLAKE_P(i915))
return;
- if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER))
+ if (!IS_DISPLAY_STEP(display, STEP_D0, STEP_FOREVER))
return;
/* Wa_14013163432:adlp */
@@ -1245,7 +1239,7 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
/* Wa_14014143976:adlp */
- if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) {
+ if (IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER)) {
if (intel_dp_is_uhbr(crtc_state))
set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
else if (crtc_state->fec_enable)
@@ -1258,20 +1252,18 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
if (!clear && !set)
return;
- intel_de_rmw(i915, CHICKEN_MISC_3, clear, set);
+ intel_de_rmw(display, CHICKEN_MISC_3, clear, set);
}
-static void intel_mst_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void mst_stream_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
@@ -1279,16 +1271,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_crtc *pipe_crtc;
int ret, i;
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+ drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
if (intel_dp_is_uhbr(pipe_config)) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
- intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
+ intel_de_write(display, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
- intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
+ intel_de_write(display, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
@@ -1296,15 +1288,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, pipe_config);
- clear_act_sent(encoder, pipe_config);
+ intel_ddi_clear_act_sent(encoder, pipe_config);
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, trans), 0,
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
- wait_for_act_sent(encoder, pipe_config);
+ intel_ddi_wait_for_act_sent(encoder, pipe_config);
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
@@ -1313,10 +1306,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
drm_atomic_get_mst_payload_state(mst_state,
connector->port));
if (ret < 0)
- intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
+ intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
- if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
+ if (DISPLAY_VER(display) >= 12)
+ intel_de_rmw(display, CHICKEN_TRANS(display, trans),
FECSTALL_DIS_DPTSTREAM_DPTTG,
pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
@@ -1334,8 +1327,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
-static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe)
+static bool mst_stream_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
*pipe = intel_mst->pipe;
@@ -1344,28 +1337,26 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
return false;
}
-static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void mst_stream_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
- dig_port->base.get_config(&dig_port->base, pipe_config);
+ primary_encoder->get_config(primary_encoder, pipe_config);
}
-static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+static bool mst_stream_initial_fastset_check(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
- return intel_dp_initial_fastset_check(&dig_port->base, crtc_state);
+ return intel_dp_initial_fastset_check(primary_encoder, crtc_state);
}
-static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+static int mst_connector_get_ddc_modes(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
struct intel_dp *intel_dp = intel_connector->mst_port;
const struct drm_edid *drm_edid;
int ret;
@@ -1373,7 +1364,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
if (drm_connector_is_unregistered(connector))
return intel_connector_update_modes(connector, NULL);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
@@ -1386,7 +1377,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
}
static int
-intel_dp_mst_connector_late_register(struct drm_connector *connector)
+mst_connector_late_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
int ret;
@@ -1405,7 +1396,7 @@ intel_dp_mst_connector_late_register(struct drm_connector *connector)
}
static void
-intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+mst_connector_early_unregister(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1414,35 +1405,36 @@ intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
intel_connector->port);
}
-static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+static const struct drm_connector_funcs mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_dp_mst_connector_late_register,
- .early_unregister = intel_dp_mst_connector_early_unregister,
+ .late_register = mst_connector_late_register,
+ .early_unregister = mst_connector_early_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_dp_mst_get_modes(struct drm_connector *connector)
+static int mst_connector_get_modes(struct drm_connector *connector)
{
- return intel_dp_mst_get_ddc_modes(connector);
+ return mst_connector_get_ddc_modes(connector);
}
static int
-intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct drm_modeset_acquire_ctx *ctx,
- enum drm_mode_status *status)
+mst_connector_mode_valid_ctx(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_port *port = intel_connector->port;
const int min_bpp = 18;
- int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
int ret;
bool dsc = false;
@@ -1512,7 +1504,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (drm_dp_sink_supports_fec(intel_connector->dp.fec_capability)) {
dsc_max_compressed_bpp =
- intel_dp_dsc_get_max_compressed_bpp(dev_priv,
+ intel_dp_dsc_get_max_compressed_bpp(display,
max_link_clock,
max_lanes,
target_clock,
@@ -1530,7 +1522,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- if (intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes) && !dsc) {
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
@@ -1544,8 +1536,9 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
-static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
- struct drm_atomic_state *state)
+static struct drm_encoder *
+mst_connector_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_atomic_state *state)
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
connector);
@@ -1557,20 +1550,20 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
}
static int
-intel_dp_mst_detect(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx, bool force)
+mst_connector_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
intel_dp_flush_connector_commits(intel_connector);
@@ -1579,15 +1572,15 @@ intel_dp_mst_detect(struct drm_connector *connector,
intel_connector->port);
}
-static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
- .get_modes = intel_dp_mst_get_modes,
- .mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
- .atomic_best_encoder = intel_mst_atomic_best_encoder,
- .atomic_check = intel_dp_mst_atomic_check,
- .detect_ctx = intel_dp_mst_detect,
+static const struct drm_connector_helper_funcs mst_connector_helper_funcs = {
+ .get_modes = mst_connector_get_modes,
+ .mode_valid_ctx = mst_connector_mode_valid_ctx,
+ .atomic_best_encoder = mst_connector_atomic_best_encoder,
+ .atomic_check = mst_connector_atomic_check,
+ .detect_ctx = mst_connector_detect_ctx,
};
-static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void mst_stream_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
@@ -1595,31 +1588,32 @@ static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_mst);
}
-static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
- .destroy = intel_dp_mst_encoder_destroy,
+static const struct drm_encoder_funcs mst_stream_encoder_funcs = {
+ .destroy = mst_stream_encoder_destroy,
};
-static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+static bool mst_connector_get_hw_state(struct intel_connector *connector)
{
- if (intel_attached_encoder(connector) && connector->base.state->crtc) {
- enum pipe pipe;
- if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
- return false;
- return true;
- }
- return false;
+ /* This is the MST stream encoder set in ->pre_enable, if any */
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ enum pipe pipe;
+
+ if (!encoder || !connector->base.state->crtc)
+ return false;
+
+ return encoder->get_hw_state(encoder, &pipe);
}
-static int intel_dp_mst_add_properties(struct intel_dp *intel_dp,
- struct drm_connector *connector,
- const char *pathprop)
+static int mst_topology_add_connector_properties(struct intel_dp *intel_dp,
+ struct drm_connector *connector,
+ const char *pathprop)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(intel_dp);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.path_property, 0);
+ display->drm->mode_config.path_property, 0);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tile_property, 0);
+ display->drm->mode_config.tile_property, 0);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
@@ -1653,7 +1647,7 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux;
struct drm_dp_desc desc;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -1691,21 +1685,21 @@ static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *conn
!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
return false;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
connector->base.base.id, connector->base.name);
return true;
}
-static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- const char *pathprop)
+static struct drm_connector *
+mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector;
struct drm_connector *connector;
enum pipe pipe;
@@ -1715,7 +1709,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (!intel_connector)
return NULL;
- intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ connector = &intel_connector->base;
+
+ intel_connector->get_hw_state = mst_connector_get_hw_state;
intel_connector->sync_state = intel_dp_connector_sync_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
@@ -1723,23 +1719,22 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_dp_init_modeset_retry_work(intel_connector);
- intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
- intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
- intel_connector->dp.dsc_hblank_expansion_quirk =
- detect_dsc_hblank_expansion_quirk(intel_connector);
-
- connector = &intel_connector->base;
- ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_dynamic_init(display->drm, connector, &mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort, NULL);
if (ret) {
drm_dp_mst_put_port_malloc(port);
intel_connector_free(intel_connector);
return NULL;
}
- drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+ intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
+ intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
+ intel_connector->dp.dsc_hblank_expansion_quirk =
+ detect_dsc_hblank_expansion_quirk(intel_connector);
+
+ drm_connector_helper_add(connector, &mst_connector_helper_funcs);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
@@ -1748,13 +1743,13 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
goto err;
}
- ret = intel_dp_mst_add_properties(intel_dp, connector, pathprop);
+ ret = mst_topology_add_connector_properties(intel_dp, connector, pathprop);
if (ret)
goto err;
ret = intel_dp_hdcp_init(dig_port, intel_connector);
if (ret)
- drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+ drm_dbg_kms(display->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
connector->name, connector->base.id);
return connector;
@@ -1765,24 +1760,26 @@ err:
}
static void
-intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
+mst_topology_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
}
-static const struct drm_dp_mst_topology_cbs mst_cbs = {
- .add_connector = intel_dp_add_mst_connector,
- .poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
+static const struct drm_dp_mst_topology_cbs mst_topology_cbs = {
+ .add_connector = mst_topology_add_connector,
+ .poll_hpd_irq = mst_topology_poll_hpd_irq,
};
+/* Create a fake encoder for an individual MST stream */
static struct intel_dp_mst_encoder *
-intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
+mst_stream_encoder_create(struct intel_digital_port *dig_port, enum pipe pipe)
{
+ struct intel_display *display = to_intel_display(dig_port);
+ struct intel_encoder *primary_encoder = &dig_port->base;
struct intel_dp_mst_encoder *intel_mst;
- struct intel_encoder *intel_encoder;
- struct drm_device *dev = dig_port->base.base.dev;
+ struct intel_encoder *encoder;
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
@@ -1790,16 +1787,16 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
return NULL;
intel_mst->pipe = pipe;
- intel_encoder = &intel_mst->base;
+ encoder = &intel_mst->base;
intel_mst->primary = dig_port;
- drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &mst_stream_encoder_funcs,
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
- intel_encoder->type = INTEL_OUTPUT_DP_MST;
- intel_encoder->power_domain = dig_port->base.power_domain;
- intel_encoder->port = dig_port->base.port;
- intel_encoder->cloneable = 0;
+ encoder->type = INTEL_OUTPUT_DP_MST;
+ encoder->power_domain = primary_encoder->power_domain;
+ encoder->port = primary_encoder->port;
+ encoder->cloneable = 0;
/*
* This is wrong, but broken userspace uses the intersection
* of possible_crtcs of all the encoders of a given connector
@@ -1808,36 +1805,37 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
* To keep such userspace functioning we must misconfigure
* this to make sure the intersection is not empty :(
*/
- intel_encoder->pipe_mask = ~0;
-
- intel_encoder->compute_config = intel_dp_mst_compute_config;
- intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
- intel_encoder->disable = intel_mst_disable_dp;
- intel_encoder->post_disable = intel_mst_post_disable_dp;
- intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
- intel_encoder->update_pipe = intel_ddi_update_pipe;
- intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
- intel_encoder->pre_enable = intel_mst_pre_enable_dp;
- intel_encoder->enable = intel_mst_enable_dp;
- intel_encoder->audio_enable = intel_audio_codec_enable;
- intel_encoder->audio_disable = intel_audio_codec_disable;
- intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
- intel_encoder->get_config = intel_dp_mst_enc_get_config;
- intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
+ encoder->pipe_mask = ~0;
+
+ encoder->compute_config = mst_stream_compute_config;
+ encoder->compute_config_late = mst_stream_compute_config_late;
+ encoder->disable = mst_stream_disable;
+ encoder->post_disable = mst_stream_post_disable;
+ encoder->post_pll_disable = mst_stream_post_pll_disable;
+ encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->pre_pll_enable = mst_stream_pre_pll_enable;
+ encoder->pre_enable = mst_stream_pre_enable;
+ encoder->enable = mst_stream_enable;
+ encoder->audio_enable = intel_audio_codec_enable;
+ encoder->audio_disable = intel_audio_codec_disable;
+ encoder->get_hw_state = mst_stream_get_hw_state;
+ encoder->get_config = mst_stream_get_config;
+ encoder->initial_fastset_check = mst_stream_initial_fastset_check;
return intel_mst;
}
+/* Create the fake encoders for MST streams */
static bool
-intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
+mst_stream_encoders_create(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
- intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
+ for_each_pipe(display, pipe)
+ intel_dp->mst_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe);
return true;
}
@@ -1850,26 +1848,27 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
enum port port = dig_port->base.port;
int ret;
- if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ if (!HAS_DP_MST(display) || intel_dp_is_edp(intel_dp))
return 0;
- if (DISPLAY_VER(i915) < 12 && port == PORT_A)
+ if (DISPLAY_VER(display) < 12 && port == PORT_A)
return 0;
- if (DISPLAY_VER(i915) < 11 && port == PORT_E)
+ if (DISPLAY_VER(display) < 11 && port == PORT_E)
return 0;
- intel_dp->mst_mgr.cbs = &mst_cbs;
+ intel_dp->mst_mgr.cbs = &mst_topology_cbs;
/* create encoders */
- intel_dp_create_fake_mst_encoders(dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
- &intel_dp->aux, 16, 3, conn_base_id);
+ mst_stream_encoders_create(dig_port);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm,
+ &intel_dp->aux, 16,
+ INTEL_NUM_PIPES(display), conn_base_id);
if (ret) {
intel_dp->mst_mgr.cbs = NULL;
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 8343804ce3f8..c6bdc1d190a4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_connector_state;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -30,4 +31,10 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp);
bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
+int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp, int min_bpp,
+ struct drm_connector_state *conn_state,
+ int step, bool dsc);
+
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index e05819300d77..380b359b0420 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -8,7 +8,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_de.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 94198bc04939..589872babdd7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -3,11 +3,10 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
-
#include <drm/display/drm_dp_tunnel.h>
#include "intel_atomic.h"
+#include "intel_display_core.h"
#include "intel_display_limits.h"
#include "intel_display_types.h"
#include "intel_dp.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 0f12f2c3467c..52a36a2281e6 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -22,6 +22,7 @@
*/
#include "bxt_dpio_phy_regs.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -855,6 +856,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -871,7 +873,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
dig_port->release_cl2_override =
- !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+ !chv_phy_powergate_ch(display, DPIO_PHY0, DPIO_CH1, true);
chv_phy_powergate_lanes(encoder, true, lane_mask);
@@ -1013,11 +1015,11 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (dig_port->release_cl2_override) {
- chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+ chv_phy_powergate_ch(display, DPIO_PHY0, DPIO_CH1, false);
dig_port->release_cl2_override = false;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 198ceda790d2..3256b1293f7f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/string_helpers.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index e60497bb8a94..d86cc9ffd4ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -25,6 +25,7 @@
#include <linux/string_helpers.h>
#include "bxt_dpio_phy_regs.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index ce8c76e44e6a..8b1f0e92a11c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -205,7 +205,7 @@ void intel_dpt_resume(struct drm_i915_private *i915)
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
- i915_ggtt_resume_vm(fb->dpt_vm);
+ i915_ggtt_resume_vm(fb->dpt_vm, true);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
}
@@ -233,7 +233,7 @@ void intel_dpt_suspend(struct drm_i915_private *i915)
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
- i915_ggtt_suspend_vm(fb->dpt_vm);
+ i915_ggtt_suspend_vm(fb->dpt_vm, true);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
index 573f72068899..d2dede0a5229 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index bb39eb96e812..0fec01b79b23 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -68,7 +68,9 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
enum transcoder cpu_transcoder)
{
- if (HAS_DOUBLE_BUFFERED_M_N(i915))
+ struct intel_display *display = &i915->display;
+
+ if (HAS_DOUBLE_BUFFERED_M_N(display))
return true;
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 4d3785f5cb52..e6f8fc743fb4 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -256,15 +256,6 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
}
-static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg)
-{
- /* only full byte-enables can be converted to indexed writes */
- return intel_dsb_prev_ins_is_write(dsb,
- DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT |
- DSB_BYTE_EN << DSB_BYTE_EN_SHIFT,
- reg);
-}
-
static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
{
return intel_dsb_prev_ins_is_write(dsb,
@@ -273,7 +264,7 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_
}
/**
- * intel_dsb_reg_write_indexed() - Emit register wriite to the DSB context
+ * intel_dsb_reg_write_indexed() - Emit indexed register write to the DSB context
* @dsb: DSB context
* @reg: register address.
* @val: value.
@@ -304,44 +295,23 @@ void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
* we are writing odd no of dwords, Zeros will be added in the end for
* padding.
*/
- if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) &&
- !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) {
- intel_dsb_emit(dsb, val,
- (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
- (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ if (!intel_dsb_prev_ins_is_indexed_write(dsb, reg))
+ intel_dsb_emit(dsb, 0, /* count */
+ (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
i915_mmio_reg_offset(reg));
- } else {
- if (!assert_dsb_has_room(dsb))
- return;
-
- /* convert to indexed write? */
- if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
- u32 prev_val = dsb->ins[0];
- dsb->ins[0] = 1; /* count */
- dsb->ins[1] = (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
- i915_mmio_reg_offset(reg);
-
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
- dsb->ins[0]);
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
- dsb->ins[1]);
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2,
- prev_val);
-
- dsb->free_pos++;
- }
+ if (!assert_dsb_has_room(dsb))
+ return;
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
- /* Update the count */
- dsb->ins[0]++;
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
- dsb->ins[0]);
+ /* Update the count */
+ dsb->ins[0]++;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
+ dsb->ins[0]);
- /* if number of data words is odd, then the last dword should be 0.*/
- if (dsb->free_pos & 0x1)
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
- }
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
}
void intel_dsb_reg_write(struct intel_dsb *dsb,
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index e8129a720210..b2b78f39cfd3 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -745,6 +745,23 @@ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
+static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
+{
+ switch (format) {
+ case PIXEL_FORMAT_RGB888:
+ return MIPI_DSI_FMT_RGB888;
+ case PIXEL_FORMAT_RGB666_LOOSELY_PACKED:
+ return MIPI_DSI_FMT_RGB666;
+ case PIXEL_FORMAT_RGB666:
+ return MIPI_DSI_FMT_RGB666_PACKED;
+ case PIXEL_FORMAT_RGB565:
+ return MIPI_DSI_FMT_RGB565;
+ default:
+ MISSING_CASE(format);
+ return MIPI_DSI_FMT_RGB666;
+ }
+}
+
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -762,8 +779,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format =
- pixel_format_from_register_bits(
- mipi_config->videomode_color_format << 7);
+ vbt_to_dsi_pixel_format(mipi_config->videomode_color_format);
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 2d5ffb37eac9..abf19dfd6d9d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -318,6 +318,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *_connector, bool force)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
@@ -325,10 +326,10 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->base.status;
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
@@ -336,11 +337,11 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
static int intel_dvo_get_modes(struct drm_connector *_connector)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int num_modes;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(&connector->base);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 6a7060889f40..223c4218c019 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1694,7 +1694,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
* arithmetic related to alignment and offset calculation.
*/
if (is_gen12_ccs_cc_plane(&fb->base, i)) {
- if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
+ if (IS_ALIGNED(fb->base.offsets[i], 64))
continue;
else
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 98e1a3606227..37cdfa9c692a 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -7,6 +7,7 @@
#include <drm/drm_fixed.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index cbcd1e91b7be..8a49e2bb37fa 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -75,7 +75,7 @@ intel_atomic_global_state_get(struct intel_global_state *obj_state)
return obj_state;
}
-void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+void intel_atomic_global_obj_init(struct intel_display *display,
struct intel_global_obj *obj,
struct intel_global_state *state,
const struct intel_global_state_funcs *funcs)
@@ -88,26 +88,26 @@ void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
obj->state = state;
obj->funcs = funcs;
- list_add_tail(&obj->head, &dev_priv->display.global.obj_list);
+ list_add_tail(&obj->head, &display->global.obj_list);
}
-void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
+void intel_atomic_global_obj_cleanup(struct intel_display *display)
{
struct intel_global_obj *obj, *next;
- list_for_each_entry_safe(obj, next, &dev_priv->display.global.obj_list, head) {
+ list_for_each_entry_safe(obj, next, &display->global.obj_list, head) {
list_del(&obj->head);
- drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
+ drm_WARN_ON(display->drm, kref_read(&obj->state->ref) != 1);
intel_atomic_global_state_put(obj->state);
}
}
-static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
+static void assert_global_state_write_locked(struct intel_display *display)
{
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
drm_modeset_lock_assert_held(&crtc->base.mutex);
}
@@ -126,23 +126,23 @@ static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
static void assert_global_state_read_locked(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (modeset_lock_is_held(ctx, &crtc->base.mutex))
return;
}
- drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
+ drm_WARN(display->drm, 1, "Global state not read locked\n");
}
struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
struct intel_global_obj *obj)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
int index, num_objs, i;
size_t size;
struct __intel_global_objs_state *arr;
@@ -184,7 +184,7 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
state->num_global_objs = num_objs;
- drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
+ drm_dbg_atomic(display->drm, "Added new global object %p state %p to %p\n",
obj, obj_state, state);
return obj_state;
@@ -218,14 +218,14 @@ intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
void intel_atomic_swap_global_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *old_obj_state, *new_obj_state;
struct intel_global_obj *obj;
int i;
for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
new_obj_state, i) {
- drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
+ drm_WARN_ON(display->drm, obj->state != old_obj_state);
/*
* If the new state wasn't modified (and properly
@@ -234,7 +234,7 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
if (!new_obj_state->changed)
continue;
- assert_global_state_write_locked(dev_priv);
+ assert_global_state_write_locked(display);
old_obj_state->state = state;
new_obj_state->state = NULL;
@@ -265,10 +265,10 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state)
int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
{
struct intel_atomic_state *state = obj_state->state;
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
int ret;
ret = drm_modeset_lock(&crtc->base.mutex,
@@ -298,10 +298,10 @@ int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
bool
intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
if (!intel_atomic_get_new_crtc_state(state, crtc))
return false;
return true;
@@ -344,7 +344,7 @@ intel_atomic_global_state_setup_commit(struct intel_atomic_state *state)
int
intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_global_state *old_obj_state;
struct intel_global_obj *obj;
int i;
@@ -358,7 +358,7 @@ intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state
ret = wait_for_completion_timeout(&commit->done, 10 * HZ);
if (ret == 0) {
- drm_err(&i915->drm, "global state timed out\n");
+ drm_err(display->drm, "global state timed out\n");
return -ETIMEDOUT;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index 6506a8e32972..d42fb2547ee9 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -9,8 +9,8 @@
#include <linux/kref.h>
#include <linux/list.h>
-struct drm_i915_private;
struct intel_atomic_state;
+struct intel_display;
struct intel_global_obj;
struct intel_global_state;
@@ -69,11 +69,11 @@ struct __intel_global_objs_state {
struct intel_global_state *state, *old_state, *new_state;
};
-void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+void intel_atomic_global_obj_init(struct intel_display *display,
struct intel_global_obj *obj,
struct intel_global_state *state,
const struct intel_global_state_funcs *funcs);
-void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv);
+void intel_atomic_global_obj_cleanup(struct intel_display *display);
struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index e3d938c7f83e..807cf606e7a8 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -496,14 +496,13 @@ static int
gmbus_xfer_read(struct intel_display *display, struct i2c_msg *msg,
u32 gmbus0_reg, u32 gmbus1_index)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u8 *buf = msg->buf;
unsigned int rx_size = msg->len;
unsigned int len;
int ret;
do {
- if (HAS_GMBUS_BURST_READ(i915))
+ if (HAS_GMBUS_BURST_READ(display))
len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
else
len = min(rx_size, gmbus_max_xfer_size(display));
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index f57e4dba2873..1bab7c34a794 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -31,27 +31,33 @@
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
-/* WA: 16022217614 */
static void
-intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
- struct intel_hdcp *hdcp)
+intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
+ struct intel_hdcp *hdcp,
+ bool enable)
{
struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t rekey_reg;
+ u32 rekey_bit = 0;
/* Here we assume HDMI is in TMDS mode of operation */
- if (encoder->type != INTEL_OUTPUT_HDMI)
+ if (!intel_encoder_is_hdmi(encoder))
return;
- if (DISPLAY_VER(display) >= 14) {
- if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER))
- intel_de_rmw(display, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
- 0, HDCP_LINE_REKEY_DISABLE);
- else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
- IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER))
- intel_de_rmw(display,
- TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder),
- 0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
+ if (DISPLAY_VER(display) >= 30) {
+ rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
+ rekey_bit = XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
+ } else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
+ IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) {
+ rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
+ rekey_bit = TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
+ } else if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) {
+ rekey_reg = CHICKEN_TRANS(display, hdcp->cpu_transcoder);
+ rekey_bit = HDCP_LINE_REKEY_DISABLE;
}
+
+ if (rekey_bit)
+ intel_de_rmw(display, rekey_reg, rekey_bit, enable ? 0 : rekey_bit);
}
static int intel_conn_to_vcpi(struct intel_atomic_state *state,
@@ -343,7 +349,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
/* PG1 (power well #1) needs to be enabled */
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- enabled = intel_display_power_well_is_enabled(i915, id);
+ enabled = intel_display_power_well_is_enabled(display, id);
/*
* Another req for hdcp key loadability is enabled state of pll for
@@ -1048,6 +1054,8 @@ static int intel_hdcp1_enable(struct intel_connector *connector)
return ret;
}
+ intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, true);
+
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
ret = intel_hdcp_auth(connector);
@@ -1158,9 +1166,15 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- intel_hdcp_update_value(connector,
- DRM_MODE_CONTENT_PROTECTION_DESIRED,
- true);
+ ret = intel_hdcp1_enable(connector);
+ if (ret) {
+ drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
+ goto out;
+ }
+
out:
mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
@@ -2069,7 +2083,7 @@ static int _intel_hdcp2_enable(struct intel_atomic_state *state,
connector->base.base.id, connector->base.name,
hdcp->content_type);
- intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp);
+ intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, false);
ret = hdcp2_authenticate_and_encrypt(state, connector);
if (ret) {
@@ -2174,6 +2188,19 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
drm_dbg_kms(display->drm,
"HDCP2.2 Downstream topology change\n");
+
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
+ goto out;
+ }
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
+ connector->base.base.id, connector->base.name,
+ ret);
} else {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index c6ce6bb88d7c..ed29dd0ccef0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1600,14 +1600,12 @@ static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
- struct intel_display *display = to_intel_display(dig_port);
int retry;
for (retry = 0; retry < 3; retry++)
if (intel_hdmi_hdcp_check_link_once(dig_port, connector))
return true;
- drm_err(display->drm, "Link check failed\n");
return false;
}
@@ -2556,10 +2554,10 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_device_enabled(dev_priv))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return connector->status;
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
@@ -2586,12 +2584,11 @@ static void
intel_hdmi_force(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *i915 = to_i915(connector->dev);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return;
intel_hdmi_unset_edid(connector);
@@ -3042,7 +3039,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
}
}
-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
struct intel_display *display = to_intel_display(dig_port);
@@ -3059,17 +3056,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
intel_encoder->base.base.id, intel_encoder->base.name);
if (DISPLAY_VER(display) < 12 && drm_WARN_ON(dev, port == PORT_A))
- return;
+ return false;
if (drm_WARN(dev, dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
- return;
+ return false;
ddc_pin = intel_hdmi_ddc_pin(intel_encoder);
if (!ddc_pin)
- return;
+ return false;
drm_connector_init_with_ddc(dev, connector,
&intel_hdmi_connector_funcs,
@@ -3114,6 +3111,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
&conn_info);
if (!intel_hdmi->cec_notifier)
drm_dbg_kms(display->drm, "CEC notifier get failed\n");
+
+ return true;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 466f48df8a74..38deaeb302a2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -22,7 +22,7 @@ struct intel_encoder;
struct intel_hdmi;
union hdmi_infoframe;
-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index a013b0e0ef54..3adc791d3776 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -813,8 +813,10 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*/
void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
if (!HAS_DISPLAY(dev_priv) ||
- !intel_display_device_enabled(dev_priv))
+ !intel_display_device_enabled(display))
return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index cb64c6f0ad1b..476ac88087e0 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1457,7 +1457,11 @@ void intel_hpd_enable_detection(struct intel_encoder *encoder)
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display.irq.display_irqs_enabled && i915->display.funcs.hotplug)
+ if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
+ !i915->display.irq.vlv_display_irqs_enabled)
+ return;
+
+ if (i915->display.funcs.hotplug)
i915->display.funcs.hotplug->hpd_irq_setup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index 19d1f196d9fb..fb6b84f6a81d 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -3,7 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_device.h>
+
#include "intel_de.h"
#include "intel_display.h"
#include "intel_hti.h"
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index c87cd1d16d0a..29705c159119 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -5,10 +5,9 @@
#include <drm/drm_fixed.h>
-#include "i915_drv.h"
-
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index b457c69dc0be..86cc03a4413c 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -7,9 +7,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_load_detect.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f9db867fae89..d75dd17fad32 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -29,11 +29,12 @@
#include <drm/drm_edid.h>
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_lspcon.h"
#include "intel_hdmi.h"
+#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 6d7637ad980a..6ffd55c17445 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -57,12 +57,7 @@
/* Private structure for the integrated LVDS support */
struct intel_lvds_pps {
- /* 100us units */
- int t1_t2;
- int t3;
- int t4;
- int t5;
- int tx;
+ struct intel_pps_delays delays;
int divider;
@@ -168,12 +163,12 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
- pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
- pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
+ pps->delays.power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
+ pps->delays.backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
val = intel_de_read(dev_priv, PP_OFF_DELAYS(dev_priv, 0));
- pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
- pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
+ pps->delays.power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
+ pps->delays.backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
val = intel_de_read(dev_priv, PP_DIVISOR(dev_priv, 0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
@@ -186,25 +181,30 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
if (val)
val--;
/* Convert from 100ms to 100us units */
- pps->t4 = val * 1000;
+ pps->delays.power_cycle = val * 1000;
if (DISPLAY_VER(dev_priv) < 5 &&
- pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
+ pps->delays.power_up == 0 &&
+ pps->delays.backlight_on == 0 &&
+ pps->delays.power_down == 0 &&
+ pps->delays.backlight_off == 0) {
drm_dbg_kms(&dev_priv->drm,
"Panel power timings uninitialized, "
"setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
- pps->t1_t2 = 40 * 10;
- pps->t5 = 200 * 10;
+ pps->delays.power_up = 40 * 10;
+ pps->delays.backlight_on = 200 * 10;
/* Set T3 to 35ms and Tx to 200ms in 100 usec units */
- pps->t3 = 35 * 10;
- pps->tx = 200 * 10;
+ pps->delays.power_down = 35 * 10;
+ pps->delays.backlight_off = 200 * 10;
}
- drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ drm_dbg(&dev_priv->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
"divider %d port %d powerdown_on_reset %d\n",
- pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
- pps->divider, pps->port, pps->powerdown_on_reset);
+ pps->delays.power_up, pps->delays.power_down,
+ pps->delays.power_cycle, pps->delays.backlight_on,
+ pps->delays.backlight_off, pps->divider,
+ pps->port, pps->powerdown_on_reset);
}
static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
@@ -221,16 +221,17 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, PP_ON_DELAYS(dev_priv, 0),
REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
- REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->delays.power_up) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->delays.backlight_on));
intel_de_write(dev_priv, PP_OFF_DELAYS(dev_priv, 0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
- REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->delays.power_down) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->delays.backlight_off));
intel_de_write(dev_priv, PP_DIVISOR(dev_priv, 0),
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
- REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(pps->delays.power_cycle, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 2c8668b1ebae..9a2bea19f17b 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -116,6 +116,7 @@ static void set_encoder_for_connector(struct intel_connector *connector,
static void reset_encoder_connector_state(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_pmdemand_state *pmdemand_state =
to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
@@ -128,7 +129,7 @@ static void reset_encoder_connector_state(struct intel_encoder *encoder)
continue;
/* Clear the corresponding bit in pmdemand active phys mask */
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state, false);
set_encoder_for_connector(connector, NULL);
@@ -152,6 +153,7 @@ static void reset_crtc_encoder_state(struct intel_crtc *crtc)
static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(i915->display.bw.obj.state);
@@ -185,7 +187,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
- intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe, 0);
+ intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, 0);
}
/*
@@ -582,6 +584,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -613,7 +616,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->base.name);
/* Clear the corresponding bit in pmdemand active phys mask */
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state, false);
/*
@@ -770,11 +773,11 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
}
}
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state,
true);
} else {
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state,
false);
@@ -899,13 +902,13 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
cdclk_state->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
- intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe,
+ intel_pmdemand_update_port_clock(display, pmdemand_state, pipe,
crtc_state->port_clock);
intel_bw_crtc_update(bw_state, crtc_state);
}
- intel_pmdemand_init_pmdemand_params(i915, pmdemand_state);
+ intel_pmdemand_init_pmdemand_params(display, pmdemand_state);
}
static void
@@ -1024,5 +1027,5 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
- intel_power_domains_sanitize_state(i915);
+ intel_power_domains_sanitize_state(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 2ec14096ba9c..ca30fff61876 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -183,7 +183,7 @@ struct overlay_registers {
};
struct intel_overlay {
- struct drm_i915_private *i915;
+ struct intel_display *display;
struct intel_context *context;
struct intel_crtc *crtc;
struct i915_vma *vma;
@@ -205,17 +205,17 @@ struct intel_overlay {
void (*flip_complete)(struct intel_overlay *ovl);
};
-static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
+static void i830_overlay_clock_gating(struct intel_display *display,
bool enable)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u8 val;
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0);
+ intel_de_write(display, DSPCLK_GATE_D(display), 0);
else
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_write(display, DSPCLK_GATE_D(display),
OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
@@ -253,11 +253,11 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 *cs;
- drm_WARN_ON(&dev_priv->drm, overlay->active);
+ drm_WARN_ON(display->drm, overlay->active);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -271,8 +271,8 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = true;
- if (IS_I830(dev_priv))
- i830_overlay_clock_gating(dev_priv, false);
+ if (display->platform.i830)
+ i830_overlay_clock_gating(display, false);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
*cs++ = overlay->flip_addr | OFC_UPDATE;
@@ -288,10 +288,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *frontbuffer = NULL;
- drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
+ drm_WARN_ON(display->drm, overlay->old_vma);
if (vma)
frontbuffer = intel_frontbuffer_get(intel_bo_to_drm_bo(vma->obj));
@@ -303,8 +305,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
- intel_frontbuffer_flip_prepare(overlay->i915,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_flip_prepare(i915, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
if (vma)
@@ -318,20 +319,20 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct i915_vma *vma,
bool load_polyphase_filter)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 flip_addr = overlay->flip_addr;
u32 tmp, *cs;
- drm_WARN_ON(&dev_priv->drm, !overlay->active);
+ drm_WARN_ON(display->drm, !overlay->active);
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
/* check for underruns */
- tmp = intel_de_read(dev_priv, DOVSTA);
+ tmp = intel_de_read(display, DOVSTA);
if (tmp & (1 << 17))
- drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
+ drm_dbg(display->drm, "overlay underrun, DOVSTA: %x\n", tmp);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -355,14 +356,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
{
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
- if (drm_WARN_ON(&overlay->i915->drm, !vma))
+ if (drm_WARN_ON(display->drm, !vma))
return;
- intel_frontbuffer_flip_complete(overlay->i915,
- INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip_complete(i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_vma_unpin(vma);
i915_vma_put(vma);
@@ -376,7 +378,7 @@ intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
intel_overlay_release_old_vma(overlay);
@@ -384,8 +386,8 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
overlay->crtc = NULL;
overlay->active = false;
- if (IS_I830(dev_priv))
- i830_overlay_clock_gating(dev_priv, true);
+ if (display->platform.i830)
+ i830_overlay_clock_gating(display, true);
}
static void intel_overlay_last_flip_retire(struct i915_active *active)
@@ -400,10 +402,11 @@ static void intel_overlay_last_flip_retire(struct i915_active *active)
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 *cs, flip_addr = overlay->flip_addr;
- drm_WARN_ON(&overlay->i915->drm, !overlay->active);
+ drm_WARN_ON(display->drm, !overlay->active);
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
@@ -452,7 +455,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
*/
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 *cs;
@@ -463,7 +466,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (!overlay->old_vma)
return 0;
- if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ if (!(intel_de_read(display, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -487,9 +490,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return i915_active_wait(&overlay->last_flip);
}
-void intel_overlay_reset(struct drm_i915_private *dev_priv)
+void intel_overlay_reset(struct intel_display *display)
{
- struct intel_overlay *overlay = dev_priv->display.overlay;
+ struct intel_overlay *overlay = display->overlay;
if (!overlay)
return;
@@ -550,11 +553,11 @@ static int uv_vsubsampling(u32 format)
}
}
-static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
+static u32 calc_swidthsw(struct intel_display *display, u32 offset, u32 width)
{
u32 sw;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
sw = ALIGN((offset & 31) + width, 32);
else
sw = ALIGN((offset & 63) + width, 64);
@@ -789,16 +792,17 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct drm_intel_overlay_put_image *params)
{
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct overlay_registers __iomem *regs = overlay->regs;
- struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
bool scale_changed = false;
struct i915_vma *vma;
int ret, tmp_width;
- drm_WARN_ON(&dev_priv->drm,
- !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -824,7 +828,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig |= OCONF_CC_OUT_8BIT;
if (crtc_state->gamma_enable)
oconfig |= OCONF_GAMMA2_ENABLE;
- if (DISPLAY_VER(dev_priv) == 4)
+ if (DISPLAY_VER(display) == 4)
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -845,7 +849,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
tmp_width = params->src_width;
swidth = params->src_width;
- swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
+ swidthsw = calc_swidthsw(display, params->offset_Y, tmp_width);
sheight = params->src_height;
iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
@@ -858,9 +862,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth |= (params->src_width / uv_hscale) << 16;
sheight |= (params->src_height / uv_vscale) << 16;
- tmp_U = calc_swidthsw(dev_priv, params->offset_U,
+ tmp_U = calc_swidthsw(display, params->offset_U,
params->src_width / uv_hscale);
- tmp_V = calc_swidthsw(dev_priv, params->offset_V,
+ tmp_V = calc_swidthsw(display, params->offset_V,
params->src_width / uv_hscale);
swidthsw |= max(tmp_U, tmp_V) << 16;
@@ -899,11 +903,11 @@ out_pin_section:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
int ret;
- drm_WARN_ON(&dev_priv->drm,
- !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -936,26 +940,24 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
* line with the intel documentation for the i965
*/
- if (DISPLAY_VER(dev_priv) >= 4) {
- u32 tmp = intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv));
+ if (DISPLAY_VER(display) >= 4) {
+ u32 tmp = intel_de_read(display, PFIT_PGM_RATIOS(display));
/* on i965 use the PGM reg to read out the autoscaler values */
ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK_965, tmp);
} else {
u32 tmp;
- if (intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_VERT_AUTO_SCALE)
- tmp = intel_de_read(dev_priv,
- PFIT_AUTO_RATIOS(dev_priv));
+ if (intel_de_read(display, PFIT_CONTROL(display)) & PFIT_VERT_AUTO_SCALE)
+ tmp = intel_de_read(display, PFIT_AUTO_RATIOS(display));
else
- tmp = intel_de_read(dev_priv,
- PFIT_PGM_RATIOS(dev_priv));
+ tmp = intel_de_read(display, PFIT_PGM_RATIOS(display));
ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK, tmp);
}
@@ -1000,7 +1002,7 @@ static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
return 0;
}
-static int check_overlay_src(struct drm_i915_private *dev_priv,
+static int check_overlay_src(struct intel_display *display,
struct drm_intel_overlay_put_image *rec,
struct drm_i915_gem_object *new_bo)
{
@@ -1011,7 +1013,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
u32 tmp;
/* check src dimensions */
- if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
+ if (display->platform.i845g || display->platform.i830) {
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
@@ -1063,14 +1065,14 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
return -EINVAL;
/* stride checking */
- if (IS_I830(dev_priv) || IS_I845G(dev_priv))
+ if (display->platform.i830 || display->platform.i845g)
stride_mask = 255;
else
stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (DISPLAY_VER(dev_priv) == 4 && rec->stride_Y < 512)
+ if (DISPLAY_VER(display) == 4 && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1114,17 +1116,17 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_intel_overlay_put_image *params = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
struct drm_i915_gem_object *new_bo;
int ret;
- overlay = dev_priv->display.overlay;
+ overlay = display->overlay;
if (!overlay) {
- drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
+ drm_dbg(display->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1148,7 +1150,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
if (i915_gem_object_is_tiled(new_bo)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
@@ -1197,7 +1199,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = check_overlay_src(dev_priv, params, new_bo);
+ ret = check_overlay_src(display, params, new_bo);
if (ret != 0)
goto out_unlock;
@@ -1277,14 +1279,14 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_intel_overlay_attrs *attrs = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
int ret;
- overlay = dev_priv->display.overlay;
+ overlay = display->overlay;
if (!overlay) {
- drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
+ drm_dbg(display->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1297,13 +1299,13 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (DISPLAY_VER(dev_priv) != 2) {
- attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
- attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
- attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
- attrs->gamma3 = intel_de_read(dev_priv, OGAMC3);
- attrs->gamma4 = intel_de_read(dev_priv, OGAMC4);
- attrs->gamma5 = intel_de_read(dev_priv, OGAMC5);
+ if (DISPLAY_VER(display) != 2) {
+ attrs->gamma0 = intel_de_read(display, OGAMC0);
+ attrs->gamma1 = intel_de_read(display, OGAMC1);
+ attrs->gamma2 = intel_de_read(display, OGAMC2);
+ attrs->gamma3 = intel_de_read(display, OGAMC3);
+ attrs->gamma4 = intel_de_read(display, OGAMC4);
+ attrs->gamma5 = intel_de_read(display, OGAMC5);
}
} else {
if (attrs->brightness < -128 || attrs->brightness > 127)
@@ -1321,7 +1323,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
goto out_unlock;
if (overlay->active) {
@@ -1333,12 +1335,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out_unlock;
- intel_de_write(dev_priv, OGAMC0, attrs->gamma0);
- intel_de_write(dev_priv, OGAMC1, attrs->gamma1);
- intel_de_write(dev_priv, OGAMC2, attrs->gamma2);
- intel_de_write(dev_priv, OGAMC3, attrs->gamma3);
- intel_de_write(dev_priv, OGAMC4, attrs->gamma4);
- intel_de_write(dev_priv, OGAMC5, attrs->gamma5);
+ intel_de_write(display, OGAMC0, attrs->gamma0);
+ intel_de_write(display, OGAMC1, attrs->gamma1);
+ intel_de_write(display, OGAMC2, attrs->gamma2);
+ intel_de_write(display, OGAMC3, attrs->gamma3);
+ intel_de_write(display, OGAMC4, attrs->gamma4);
+ intel_de_write(display, OGAMC5, attrs->gamma5);
}
}
overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
@@ -1352,12 +1354,13 @@ out_unlock:
static int get_registers(struct intel_overlay *overlay, bool use_phys)
{
- struct drm_i915_private *i915 = overlay->i915;
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_i915_gem_object *obj = ERR_PTR(-ENODEV);
struct i915_vma *vma;
int err;
- if (!IS_METEORLAKE(i915)) /* Wa_22018444074 */
+ if (!display->platform.meteorlake) /* Wa_22018444074 */
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -1390,13 +1393,14 @@ err_put_bo:
return err;
}
-void intel_overlay_setup(struct drm_i915_private *dev_priv)
+void intel_overlay_setup(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_overlay *overlay;
struct intel_engine_cs *engine;
int ret;
- if (!HAS_OVERLAY(dev_priv))
+ if (!HAS_OVERLAY(display))
return;
engine = to_gt(dev_priv)->engine[RCS0];
@@ -1407,7 +1411,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!overlay)
return;
- overlay->i915 = dev_priv;
+ overlay->display = display;
overlay->context = engine->kernel_context;
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
@@ -1418,7 +1422,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
i915_active_init(&overlay->last_flip,
NULL, intel_overlay_last_flip_retire, 0);
- ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+ ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(display));
if (ret)
goto out_free;
@@ -1426,19 +1430,24 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
- dev_priv->display.overlay = overlay;
- drm_info(&dev_priv->drm, "Initialized overlay support.\n");
+ display->overlay = overlay;
+ drm_info(display->drm, "Initialized overlay support.\n");
return;
out_free:
kfree(overlay);
}
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
+bool intel_overlay_available(struct intel_display *display)
+{
+ return display->overlay;
+}
+
+void intel_overlay_cleanup(struct intel_display *display)
{
struct intel_overlay *overlay;
- overlay = fetch_and_zero(&dev_priv->display.overlay);
+ overlay = fetch_and_zero(&display->overlay);
if (!overlay)
return;
@@ -1447,7 +1456,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
* Furthermore modesetting teardown happens beforehand so the
* hardware should be off already.
*/
- drm_WARN_ON(&dev_priv->drm, overlay->active);
+ drm_WARN_ON(display->drm, overlay->active);
i915_gem_object_put(overlay->reg_bo);
i915_active_fini(&overlay->last_flip);
@@ -1467,8 +1476,7 @@ struct intel_overlay_snapshot {
struct intel_overlay_snapshot *
intel_overlay_snapshot_capture(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_overlay *overlay = dev_priv->display.overlay;
+ struct intel_overlay *overlay = display->overlay;
struct intel_overlay_snapshot *error;
if (!overlay || !overlay->active)
@@ -1478,8 +1486,8 @@ intel_overlay_snapshot_capture(struct intel_display *display)
if (error == NULL)
return NULL;
- error->dovsta = intel_de_read(dev_priv, DOVSTA);
- error->isr = intel_de_read(dev_priv, GEN2_ISR);
+ error->dovsta = intel_de_read(display, DOVSTA);
+ error->isr = intel_de_read(display, GEN2_ISR);
error->base = overlay->flip_addr;
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
index eafac24d1de8..45a42fce754e 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.h
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
@@ -17,19 +17,24 @@ struct intel_overlay;
struct intel_overlay_snapshot;
#ifdef I915
-void intel_overlay_setup(struct drm_i915_private *dev_priv);
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
+void intel_overlay_setup(struct intel_display *display);
+bool intel_overlay_available(struct intel_display *display);
+void intel_overlay_cleanup(struct intel_display *display);
int intel_overlay_switch_off(struct intel_overlay *overlay);
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void intel_overlay_reset(struct drm_i915_private *dev_priv);
+void intel_overlay_reset(struct intel_display *display);
#else
-static inline void intel_overlay_setup(struct drm_i915_private *dev_priv)
+static inline void intel_overlay_setup(struct intel_display *display)
{
}
-static inline void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
+static inline bool intel_overlay_available(struct intel_display *display)
+{
+ return false;
+}
+static inline void intel_overlay_cleanup(struct intel_display *display)
{
}
static inline int intel_overlay_switch_off(struct intel_overlay *overlay)
@@ -37,7 +42,7 @@ static inline int intel_overlay_switch_off(struct intel_overlay *overlay)
return 0;
}
static inline int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
return 0;
}
@@ -46,7 +51,7 @@ static inline int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
{
return 0;
}
-static inline void intel_overlay_reset(struct drm_i915_private *dev_priv)
+static inline void intel_overlay_reset(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 313bd3f35ace..4e6c5592c7ae 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -33,7 +33,6 @@
#include <drm/drm_edid.h>
-#include "i915_drv.h"
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_display_core.h"
@@ -383,12 +382,12 @@ void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
return connector_status_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 4210de87a0a2..8fa5a6334d10 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -4,8 +4,10 @@
*/
#include "g4x_dp.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crt.h"
+#include "intel_crt_regs.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 84c55971e91a..71471c1d7dc9 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -108,13 +109,13 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
struct iclkip_params {
@@ -195,7 +196,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
@@ -218,7 +219,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
temp &= ~SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
/* Wait for initialization time */
udelay(24);
@@ -236,11 +237,11 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
iclkip_params_init(&p);
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
if (temp & SBI_SSCCTL_DISABLE) {
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
return 0;
}
@@ -254,7 +255,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
@@ -279,7 +280,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
@@ -302,7 +303,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
/* Sequence to disable CLKOUT_DP */
@@ -310,7 +311,7 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
u32 reg, tmp;
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
@@ -328,7 +329,7 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
}
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
#define BEND_IDX(steps) ((50 + (steps)) / 5)
@@ -374,7 +375,7 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
if (steps % 10 != 0)
tmp = 0xAAAAAAAB;
@@ -387,7 +388,7 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
tmp |= sscdivintphase[idx];
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
#undef BEND_IDX
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 50861aa78a89..4ee03d9d14ad 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -3,8 +3,8 @@
* Copyright © 2024 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 304da826dee1..90efc6f64e52 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -28,6 +28,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_atomic.h"
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 62401f6a04e4..6789b7f14095 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -20,10 +20,10 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
struct drm_framebuffer **fb,
struct i915_vma **vma)
{
- struct drm_i915_private *i915 = to_i915(this->base.dev);
+ struct intel_display *display = to_intel_display(this);
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
@@ -48,9 +48,10 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
}
static bool
-initial_plane_phys_lmem(struct drm_i915_private *i915,
+initial_plane_phys_lmem(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
struct intel_memory_region *mem;
dma_addr_t dma_addr;
@@ -63,7 +64,7 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
pte = ioread64(gte);
if (!(pte & GEN12_GGTT_PTE_LM)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Initial plane programming missing PTE_LM bit\n");
return false;
}
@@ -75,7 +76,7 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
else
mem = i915->mm.stolen_region;
if (!mem) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Initial plane memory region not initialized\n");
return false;
}
@@ -85,13 +86,13 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
* ever be placed in the stolen portion.
*/
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
&dma_addr, mem->region.name, &mem->region.start, &mem->region.end);
return false;
}
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"Using dma_addr=%pa, based on initial plane programming\n",
&dma_addr);
@@ -102,9 +103,10 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
}
static bool
-initial_plane_phys_smem(struct drm_i915_private *i915,
+initial_plane_phys_smem(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_memory_region *mem;
u32 base;
@@ -112,7 +114,7 @@ initial_plane_phys_smem(struct drm_i915_private *i915,
mem = i915->mm.stolen_region;
if (!mem) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Initial plane memory region not initialized\n");
return false;
}
@@ -125,19 +127,22 @@ initial_plane_phys_smem(struct drm_i915_private *i915,
}
static bool
-initial_plane_phys(struct drm_i915_private *i915,
+initial_plane_phys(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
- return initial_plane_phys_lmem(i915, plane_config);
+ return initial_plane_phys_lmem(display, plane_config);
else
- return initial_plane_phys_smem(i915, plane_config);
+ return initial_plane_phys_smem(display, plane_config);
}
static struct i915_vma *
-initial_plane_vma(struct drm_i915_private *i915,
+initial_plane_vma(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct drm_mm_node orig_mm = {};
@@ -149,7 +154,7 @@ initial_plane_vma(struct drm_i915_private *i915,
if (plane_config->size == 0)
return NULL;
- if (!initial_plane_phys(i915, plane_config))
+ if (!initial_plane_phys(display, plane_config))
return NULL;
phys_base = plane_config->phys_base;
@@ -168,7 +173,7 @@ initial_plane_vma(struct drm_i915_private *i915,
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
size * 2 > i915->dsm.usable_size) {
- drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
+ drm_dbg_kms(display->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
}
@@ -176,7 +181,7 @@ initial_plane_vma(struct drm_i915_private *i915,
I915_BO_ALLOC_USER |
I915_BO_PREALLOC);
if (IS_ERR(obj)) {
- drm_dbg_kms(&i915->drm, "Failed to preallocate initial FB in %s\n",
+ drm_dbg_kms(display->drm, "Failed to preallocate initial FB in %s\n",
mem->region.name);
return NULL;
}
@@ -254,7 +259,7 @@ retry:
if (drm_mm_node_allocated(&orig_mm))
drm_mm_remove_node(&orig_mm);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Initial plane fb bound to 0x%x in the ggtt (original 0x%x)\n",
i915_ggtt_offset(vma), plane_config->base);
@@ -271,8 +276,7 @@ static bool
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_framebuffer *fb = &plane_config->fb->base;
struct i915_vma *vma;
@@ -284,13 +288,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
case I915_FORMAT_MOD_4_TILED:
break;
default:
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Unsupported modifier for initial FB: 0x%llx\n",
fb->modifier);
return false;
}
- vma = initial_plane_vma(dev_priv, plane_config);
+ vma = initial_plane_vma(display, plane_config);
if (!vma)
return false;
@@ -303,7 +307,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (intel_framebuffer_init(to_intel_framebuffer(fb),
intel_bo_to_drm_bo(vma->obj), &mode_cmd)) {
- drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ drm_dbg_kms(display->drm, "intel fb init failed\n");
goto err_vma;
}
@@ -410,12 +414,12 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
i915_vma_put(plane_config->vma);
}
-void intel_initial_plane_config(struct drm_i915_private *i915)
+void intel_initial_plane_config(struct intel_display *display)
{
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
@@ -429,7 +433,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
+ display->funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -437,7 +441,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
*/
intel_find_initial_plane_obj(crtc, plane_configs);
- if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config))
intel_crtc_wait_for_next_vblank(crtc);
plane_config_fini(plane_config);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
index 64ab95239cd4..6c6aa717ed21 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.h
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_PLANE_INITIAL_H__
#define __INTEL_PLANE_INITIAL_H__
-struct drm_i915_private;
+struct intel_display;
-void intel_initial_plane_config(struct drm_i915_private *i915);
+void intel_initial_plane_config(struct intel_display *display);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index cdd314956a31..975520322136 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -5,16 +5,50 @@
#include <linux/bitops.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_trace.h"
#include "intel_pmdemand.h"
+#include "intel_step.h"
#include "skl_watermark.h"
+struct pmdemand_params {
+ u16 qclk_gv_bw;
+ u8 voltage_index;
+ u8 qclk_gv_index;
+ u8 active_pipes;
+ u8 active_dbufs; /* pre-Xe3 only */
+ /* Total number of non type C active phys from active_phys_mask */
+ u8 active_phys;
+ u8 plls;
+ u16 cdclk_freq_mhz;
+ /* max from ddi_clocks[] */
+ u16 ddiclk_max;
+ u8 scalers; /* pre-Xe3 only */
+};
+
+struct intel_pmdemand_state {
+ struct intel_global_state base;
+
+ /* Maintain a persistent list of port clocks across all crtcs */
+ int ddi_clocks[I915_MAX_PIPES];
+
+ /* Maintain a persistent list of non type C phys mask */
+ u16 active_combo_phys_mask;
+
+ /* Parameters to be configured in the pmdemand registers */
+ struct pmdemand_params params;
+};
+
+struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_pmdemand_state, base);
+}
+
static struct intel_global_state *
intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
{
@@ -41,10 +75,10 @@ static const struct intel_global_state_funcs intel_pmdemand_funcs = {
static struct intel_pmdemand_state *
intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *pmdemand_state =
intel_atomic_get_global_obj_state(state,
- &i915->display.pmdemand.obj);
+ &display->pmdemand.obj);
if (IS_ERR(pmdemand_state))
return ERR_CAST(pmdemand_state);
@@ -55,10 +89,10 @@ intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
static struct intel_pmdemand_state *
intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *pmdemand_state =
intel_atomic_get_old_global_obj_state(state,
- &i915->display.pmdemand.obj);
+ &display->pmdemand.obj);
if (!pmdemand_state)
return NULL;
@@ -69,10 +103,10 @@ intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
static struct intel_pmdemand_state *
intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *pmdemand_state =
intel_atomic_get_new_global_obj_state(state,
- &i915->display.pmdemand.obj);
+ &display->pmdemand.obj);
if (!pmdemand_state)
return NULL;
@@ -80,7 +114,7 @@ intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
return to_intel_pmdemand_state(pmdemand_state);
}
-int intel_pmdemand_init(struct drm_i915_private *i915)
+int intel_pmdemand_init(struct intel_display *display)
{
struct intel_pmdemand_state *pmdemand_state;
@@ -88,32 +122,32 @@ int intel_pmdemand_init(struct drm_i915_private *i915)
if (!pmdemand_state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj,
+ intel_atomic_global_obj_init(display, &display->pmdemand.obj,
&pmdemand_state->base,
&intel_pmdemand_funcs);
- if (IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0))
+ if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
/* Wa_14016740474 */
- intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
+ intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
return 0;
}
-void intel_pmdemand_init_early(struct drm_i915_private *i915)
+void intel_pmdemand_init_early(struct intel_display *display)
{
- mutex_init(&i915->display.pmdemand.lock);
- init_waitqueue_head(&i915->display.pmdemand.waitqueue);
+ mutex_init(&display->pmdemand.lock);
+ init_waitqueue_head(&display->pmdemand.waitqueue);
}
void
-intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
+intel_pmdemand_update_phys_mask(struct intel_display *display,
struct intel_encoder *encoder,
struct intel_pmdemand_state *pmdemand_state,
bool set_bit)
{
enum phy phy;
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!encoder)
@@ -131,18 +165,18 @@ intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
}
void
-intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
+intel_pmdemand_update_port_clock(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state,
enum pipe pipe, int port_clock)
{
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
pmdemand_state->ddi_clocks[pipe] = port_clock;
}
static void
-intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
+intel_pmdemand_update_max_ddiclk(struct intel_display *display,
struct intel_atomic_state *state,
struct intel_pmdemand_state *pmdemand_state)
{
@@ -152,7 +186,7 @@ intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
int i;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
- intel_pmdemand_update_port_clock(i915, pmdemand_state,
+ intel_pmdemand_update_port_clock(display, pmdemand_state,
crtc->pipe,
new_crtc_state->port_clock);
@@ -163,7 +197,7 @@ intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
}
static void
-intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
+intel_pmdemand_update_connector_phys(struct intel_display *display,
struct intel_atomic_state *state,
struct drm_connector_state *conn_state,
bool set_bit,
@@ -184,12 +218,12 @@ intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
if (!crtc_state->hw.active)
return;
- intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state,
+ intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state,
set_bit);
}
static void
-intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
+intel_pmdemand_update_active_non_tc_phys(struct intel_display *display,
struct intel_atomic_state *state,
struct intel_pmdemand_state *pmdemand_state)
{
@@ -204,12 +238,12 @@ intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
continue;
/* First clear the active phys in the old connector state */
- intel_pmdemand_update_connector_phys(i915, state,
+ intel_pmdemand_update_connector_phys(display, state,
old_conn_state, false,
pmdemand_state);
/* Then set the active phys in new connector state */
- intel_pmdemand_update_connector_phys(i915, state,
+ intel_pmdemand_update_connector_phys(display, state,
new_conn_state, true,
pmdemand_state);
}
@@ -220,7 +254,7 @@ intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
}
static bool
-intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
+intel_pmdemand_encoder_has_tc_phy(struct intel_display *display,
struct intel_encoder *encoder)
{
return encoder && intel_encoder_is_tc(encoder);
@@ -229,7 +263,7 @@ intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
static bool
intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_connector *connector;
@@ -246,8 +280,8 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
continue;
if (old_encoder == new_encoder ||
- (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) &&
- intel_pmdemand_encoder_has_tc_phy(i915, new_encoder)))
+ (intel_pmdemand_encoder_has_tc_phy(display, old_encoder) &&
+ intel_pmdemand_encoder_has_tc_phy(display, new_encoder)))
continue;
return true;
@@ -304,13 +338,13 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state;
const struct intel_cdclk_state *new_cdclk_state;
const struct intel_dbuf_state *new_dbuf_state;
struct intel_pmdemand_state *new_pmdemand_state;
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return 0;
if (!intel_pmdemand_needs_update(state))
@@ -332,14 +366,14 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
if (IS_ERR(new_dbuf_state))
return PTR_ERR(new_dbuf_state);
- if (DISPLAY_VER(i915) < 30) {
+ if (DISPLAY_VER(display) < 30) {
new_pmdemand_state->params.active_dbufs =
min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
new_pmdemand_state->params.active_pipes =
min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
} else {
new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(i915));
+ min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
}
new_cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -351,9 +385,9 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
new_pmdemand_state->params.cdclk_freq_mhz =
DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
- intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state);
+ intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
- intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state);
+ intel_pmdemand_update_active_non_tc_phys(display, state, new_pmdemand_state);
/*
* Active_PLLs starts with 1 because of CDCLK PLL.
@@ -374,36 +408,36 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
return intel_atomic_lock_global_state(&new_pmdemand_state->base);
}
-static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915)
+static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
{
- return !(intel_de_wait_for_clear(i915,
+ return !(intel_de_wait_for_clear(display,
XELPDP_INITIATE_PMDEMAND_REQUEST(1),
XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
- intel_de_wait_for_clear(i915,
+ intel_de_wait_for_clear(display,
GEN12_DCPR_STATUS_1,
XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
}
void
-intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
+intel_pmdemand_init_pmdemand_params(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state)
{
u32 reg1, reg2;
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- mutex_lock(&i915->display.pmdemand.lock);
- if (drm_WARN_ON(&i915->drm,
- !intel_pmdemand_check_prev_transaction(i915))) {
+ mutex_lock(&display->pmdemand.lock);
+ if (drm_WARN_ON(display->drm,
+ !intel_pmdemand_check_prev_transaction(display))) {
memset(&pmdemand_state->params, 0,
sizeof(pmdemand_state->params));
goto unlock;
}
- reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
+ reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
- reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
+ reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
pmdemand_state->params.qclk_gv_bw =
REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
@@ -419,7 +453,7 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
pmdemand_state->params.ddiclk_max =
REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
- if (DISPLAY_VER(i915) >= 30) {
+ if (DISPLAY_VER(display) >= 30) {
pmdemand_state->params.active_pipes =
REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
} else {
@@ -433,49 +467,49 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
}
unlock:
- mutex_unlock(&i915->display.pmdemand.lock);
+ mutex_unlock(&display->pmdemand.lock);
}
-static bool intel_pmdemand_req_complete(struct drm_i915_private *i915)
+static bool intel_pmdemand_req_complete(struct intel_display *display)
{
- return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
+ return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
XELPDP_PMDEMAND_REQ_ENABLE);
}
-static void intel_pmdemand_wait(struct drm_i915_private *i915)
+static void intel_pmdemand_wait(struct intel_display *display)
{
- if (!wait_event_timeout(i915->display.pmdemand.waitqueue,
- intel_pmdemand_req_complete(i915),
+ if (!wait_event_timeout(display->pmdemand.waitqueue,
+ intel_pmdemand_req_complete(display),
msecs_to_jiffies_timeout(10)))
- drm_err(&i915->drm,
+ drm_err(display->drm,
"timed out waiting for Punit PM Demand Response\n");
}
/* Required to be programmed during Display Init Sequences. */
-void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
+void intel_pmdemand_program_dbuf(struct intel_display *display,
u8 dbuf_slices)
{
u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
/* PM Demand only tracks active dbufs on pre-Xe3 platforms */
- if (DISPLAY_VER(i915) >= 30)
+ if (DISPLAY_VER(display) >= 30)
return;
- mutex_lock(&i915->display.pmdemand.lock);
- if (drm_WARN_ON(&i915->drm,
- !intel_pmdemand_check_prev_transaction(i915)))
+ mutex_lock(&display->pmdemand.lock);
+ if (drm_WARN_ON(display->drm,
+ !intel_pmdemand_check_prev_transaction(display)))
goto unlock;
- intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
+ intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
XELPDP_PMDEMAND_DBUFS_MASK,
REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
- intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
+ intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
XELPDP_PMDEMAND_REQ_ENABLE);
- intel_pmdemand_wait(i915);
+ intel_pmdemand_wait(display);
unlock:
- mutex_unlock(&i915->display.pmdemand.lock);
+ mutex_unlock(&display->pmdemand.lock);
}
static void
@@ -535,38 +569,37 @@ intel_pmdemand_update_params(struct intel_display *display,
}
static void
-intel_pmdemand_program_params(struct drm_i915_private *i915,
+intel_pmdemand_program_params(struct intel_display *display,
const struct intel_pmdemand_state *new,
const struct intel_pmdemand_state *old,
bool serialized)
{
- struct intel_display *display = &i915->display;
bool changed = false;
u32 reg1, mod_reg1;
u32 reg2, mod_reg2;
- mutex_lock(&i915->display.pmdemand.lock);
- if (drm_WARN_ON(&i915->drm,
- !intel_pmdemand_check_prev_transaction(i915)))
+ mutex_lock(&display->pmdemand.lock);
+ if (drm_WARN_ON(display->drm,
+ !intel_pmdemand_check_prev_transaction(display)))
goto unlock;
- reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
+ reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
mod_reg1 = reg1;
- reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
+ reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
mod_reg2 = reg2;
intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
serialized);
if (reg1 != mod_reg1) {
- intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
+ intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
mod_reg1);
changed = true;
}
if (reg2 != mod_reg2) {
- intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
mod_reg2);
changed = true;
}
@@ -575,17 +608,17 @@ intel_pmdemand_program_params(struct drm_i915_private *i915,
if (!changed)
goto unlock;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"initate pmdemand request values: (0x%x 0x%x)\n",
mod_reg1, mod_reg2);
- intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
+ intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
XELPDP_PMDEMAND_REQ_ENABLE);
- intel_pmdemand_wait(i915);
+ intel_pmdemand_wait(display);
unlock:
- mutex_unlock(&i915->display.pmdemand.lock);
+ mutex_unlock(&display->pmdemand.lock);
}
static bool
@@ -597,13 +630,13 @@ intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_pmdemand_state *new_pmdemand_state =
intel_atomic_get_new_pmdemand_state(state);
const struct intel_pmdemand_state *old_pmdemand_state =
intel_atomic_get_old_pmdemand_state(state);
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!new_pmdemand_state ||
@@ -613,20 +646,20 @@ void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_pmdemand_state->base.changed);
- intel_pmdemand_program_params(i915, new_pmdemand_state,
+ intel_pmdemand_program_params(display, new_pmdemand_state,
old_pmdemand_state,
intel_atomic_global_state_is_serialized(state));
}
void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_pmdemand_state *new_pmdemand_state =
intel_atomic_get_new_pmdemand_state(state);
const struct intel_pmdemand_state *old_pmdemand_state =
intel_atomic_get_old_pmdemand_state(state);
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!new_pmdemand_state ||
@@ -636,6 +669,6 @@ void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_pmdemand_state->base.changed);
- intel_pmdemand_program_params(i915, new_pmdemand_state, NULL,
+ intel_pmdemand_program_params(display, new_pmdemand_state, NULL,
intel_atomic_global_state_is_serialized(state));
}
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h
index a1c49efdc493..821ef2c4134a 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.h
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h
@@ -6,58 +6,31 @@
#ifndef __INTEL_PMDEMAND_H__
#define __INTEL_PMDEMAND_H__
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
+#include <linux/types.h>
-struct drm_i915_private;
+enum pipe;
struct intel_atomic_state;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
+struct intel_global_state;
struct intel_plane_state;
+struct intel_pmdemand_state;
-struct pmdemand_params {
- u16 qclk_gv_bw;
- u8 voltage_index;
- u8 qclk_gv_index;
- u8 active_pipes;
- u8 active_dbufs; /* pre-Xe3 only */
- /* Total number of non type C active phys from active_phys_mask */
- u8 active_phys;
- u8 plls;
- u16 cdclk_freq_mhz;
- /* max from ddi_clocks[] */
- u16 ddiclk_max;
- u8 scalers; /* pre-Xe3 only */
-};
+struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state);
-struct intel_pmdemand_state {
- struct intel_global_state base;
-
- /* Maintain a persistent list of port clocks across all crtcs */
- int ddi_clocks[I915_MAX_PIPES];
-
- /* Maintain a persistent list of non type C phys mask */
- u16 active_combo_phys_mask;
-
- /* Parameters to be configured in the pmdemand registers */
- struct pmdemand_params params;
-};
-
-#define to_intel_pmdemand_state(global_state) \
- container_of_const((global_state), struct intel_pmdemand_state, base)
-
-void intel_pmdemand_init_early(struct drm_i915_private *i915);
-int intel_pmdemand_init(struct drm_i915_private *i915);
-void intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
+void intel_pmdemand_init_early(struct intel_display *display);
+int intel_pmdemand_init(struct intel_display *display);
+void intel_pmdemand_init_pmdemand_params(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state);
-void intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
+void intel_pmdemand_update_port_clock(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state,
enum pipe pipe, int port_clock);
-void intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
+void intel_pmdemand_update_phys_mask(struct intel_display *display,
struct intel_encoder *encoder,
struct intel_pmdemand_state *pmdemand_state,
bool clear_bit);
-void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
+void intel_pmdemand_program_dbuf(struct intel_display *display,
u8 dbuf_slices);
void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state);
void intel_pmdemand_post_plane_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 093fe37a3983..eb35f0249f2b 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -134,7 +134,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
*/
if (!pll_enabled) {
release_cl_override = display->platform.cherryview &&
- !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+ !chv_phy_powergate_ch(display, phy, ch, true);
if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
drm_err(display->drm,
@@ -163,7 +163,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
vlv_force_pll_off(dev_priv, pipe);
if (release_cl_override)
- chv_phy_powergate_ch(dev_priv, phy, ch, false);
+ chv_phy_powergate_ch(display, phy, ch, false);
}
}
@@ -668,23 +668,24 @@ static void wait_panel_power_cycle(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ktime_t panel_power_on_time;
- s64 panel_power_off_duration;
-
- drm_dbg_kms(display->drm,
- "[ENCODER:%d:%s] %s wait for panel power cycle\n",
- dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(intel_dp));
+ s64 panel_power_off_duration, remaining;
/* take the difference of current time and panel power off time
- * and then make panel wait for t11_t12 if needed. */
+ * and then make panel wait for power_cycle if needed. */
panel_power_on_time = ktime_get_boottime();
panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
+ remaining = max(0, intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power cycle (%lld ms remaining)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(intel_dp), remaining);
+
/* When we disable the VDD override bit last we have to do the manual
* wait. */
- if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
- wait_remaining_ms_from_jiffies(jiffies,
- intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+ if (remaining)
+ wait_remaining_ms_from_jiffies(jiffies, remaining);
wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
}
@@ -1387,10 +1388,10 @@ static void pps_init_timestamps(struct intel_dp *intel_dp)
}
static void
-intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct intel_pps_delays *seq)
{
struct intel_display *display = to_intel_display(intel_dp);
- u32 pp_on, pp_off, pp_ctl;
+ u32 pp_on, pp_off, pp_ctl, power_cycle_delay;
struct pps_registers regs;
intel_pps_get_registers(intel_dp, &regs);
@@ -1405,59 +1406,77 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
pp_off = intel_de_read(display, regs.pp_off);
/* Pull timing values out of registers */
- seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
- seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
- seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
- seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
+ seq->power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
pp_div = intel_de_read(display, regs.pp_div);
- seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
+ power_cycle_delay = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div);
} else {
- seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
+ power_cycle_delay = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl);
}
+
+ /* hardware wants <delay>+1 in 100ms units */
+ seq->power_cycle = power_cycle_delay ? (power_cycle_delay - 1) * 1000 : 0;
}
static void
intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
- const struct edp_power_seq *seq)
+ const struct intel_pps_delays *seq)
{
struct intel_display *display = to_intel_display(intel_dp);
drm_dbg_kms(display->drm,
- "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- state_name,
- seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+ "%s power_up %d backlight_on %d backlight_off %d power_down %d power_cycle %d\n",
+ state_name, seq->power_up, seq->backlight_on,
+ seq->backlight_off, seq->power_down, seq->power_cycle);
}
static void
intel_pps_verify_state(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct edp_power_seq hw;
- struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
+ struct intel_pps_delays hw;
+ struct intel_pps_delays *sw = &intel_dp->pps.pps_delays;
intel_pps_readout_hw_state(intel_dp, &hw);
- if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
- hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ if (hw.power_up != sw->power_up ||
+ hw.backlight_on != sw->backlight_on ||
+ hw.backlight_off != sw->backlight_off ||
+ hw.power_down != sw->power_down ||
+ hw.power_cycle != sw->power_cycle) {
drm_err(display->drm, "PPS state mismatch\n");
intel_pps_dump_state(intel_dp, "sw", sw);
intel_pps_dump_state(intel_dp, "hw", &hw);
}
}
-static bool pps_delays_valid(struct edp_power_seq *delays)
+static bool pps_delays_valid(struct intel_pps_delays *delays)
+{
+ return delays->power_up || delays->backlight_on || delays->backlight_off ||
+ delays->power_down || delays->power_cycle;
+}
+
+static int msecs_to_pps_units(int msecs)
{
- return delays->t1_t3 || delays->t8 || delays->t9 ||
- delays->t10 || delays->t11_t12;
+ /* PPS uses 100us units */
+ return msecs * 10;
+}
+
+static int pps_units_to_msecs(int val)
+{
+ /* PPS uses 100us units */
+ return DIV_ROUND_UP(val, 10);
}
static void pps_init_delays_bios(struct intel_dp *intel_dp,
- struct edp_power_seq *bios)
+ struct intel_pps_delays *bios)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1472,7 +1491,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp,
}
static void pps_init_delays_vbt(struct intel_dp *intel_dp,
- struct edp_power_seq *vbt)
+ struct intel_pps_delays *vbt)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
@@ -1488,39 +1507,28 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
* seems sufficient to avoid this problem.
*/
if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) {
- vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
+ vbt->power_cycle = max_t(u16, vbt->power_cycle, msecs_to_pps_units(1300));
drm_dbg_kms(display->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
- vbt->t11_t12);
+ vbt->power_cycle);
}
- /* T11_T12 delay is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- vbt->t11_t12 += 100 * 10;
-
intel_pps_dump_state(intel_dp, "vbt", vbt);
}
static void pps_init_delays_spec(struct intel_dp *intel_dp,
- struct edp_power_seq *spec)
+ struct intel_pps_delays *spec)
{
struct intel_display *display = to_intel_display(intel_dp);
lockdep_assert_held(&display->pps.mutex);
- /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
- * our hw here, which are all in 100usec. */
- spec->t1_t3 = 210 * 10;
- spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec->t10 = 500 * 10;
- /* This one is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- spec->t11_t12 = (510 + 100) * 10;
+ /* Upper limits from eDP 1.3 spec */
+ spec->power_up = msecs_to_pps_units(10 + 200); /* T1+T3 */
+ spec->backlight_on = msecs_to_pps_units(50); /* no limit for T8, use T7 instead */
+ spec->backlight_off = msecs_to_pps_units(50); /* no limit for T9, make it symmetric with T8 */
+ spec->power_down = msecs_to_pps_units(500); /* T10 */
+ spec->power_cycle = msecs_to_pps_units(10 + 500); /* T11+T12 */
intel_pps_dump_state(intel_dp, "spec", spec);
}
@@ -1528,7 +1536,7 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp,
static void pps_init_delays(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct edp_power_seq cur, vbt, spec,
+ struct intel_pps_delays cur, vbt, spec,
*final = &intel_dp->pps.pps_delays;
lockdep_assert_held(&display->pps.mutex);
@@ -1546,20 +1554,18 @@ static void pps_init_delays(struct intel_dp *intel_dp)
#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
spec.field : \
max(cur.field, vbt.field))
- assign_final(t1_t3);
- assign_final(t8);
- assign_final(t9);
- assign_final(t10);
- assign_final(t11_t12);
+ assign_final(power_up);
+ assign_final(backlight_on);
+ assign_final(backlight_off);
+ assign_final(power_down);
+ assign_final(power_cycle);
#undef assign_final
-#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
- intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
- intel_dp->pps.backlight_on_delay = get_delay(t8);
- intel_dp->pps.backlight_off_delay = get_delay(t9);
- intel_dp->pps.panel_power_down_delay = get_delay(t10);
- intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
-#undef get_delay
+ intel_dp->pps.panel_power_up_delay = pps_units_to_msecs(final->power_up);
+ intel_dp->pps.backlight_on_delay = pps_units_to_msecs(final->backlight_on);
+ intel_dp->pps.backlight_off_delay = pps_units_to_msecs(final->backlight_off);
+ intel_dp->pps.panel_power_down_delay = pps_units_to_msecs(final->power_down);
+ intel_dp->pps.panel_power_cycle_delay = pps_units_to_msecs(final->power_cycle);
drm_dbg_kms(display->drm,
"panel power up delay %d, power down delay %d, power cycle delay %d\n",
@@ -1573,19 +1579,20 @@ static void pps_init_delays(struct intel_dp *intel_dp)
/*
* We override the HW backlight delays to 1 because we do manual waits
- * on them. For T8, even BSpec recommends doing it. For T9, if we
- * don't do this, we'll end up waiting for the backlight off delay
- * twice: once when we do the manual sleep, and once when we disable
- * the panel and wait for the PP_STATUS bit to become zero.
+ * on them. For backlight_on, even BSpec recommends doing it. For
+ * backlight_off, if we don't do this, we'll end up waiting for the
+ * backlight off delay twice: once when we do the manual sleep, and
+ * once when we disable the panel and wait for the PP_STATUS bit to
+ * become zero.
*/
- final->t8 = 1;
- final->t9 = 1;
+ final->backlight_on = 1;
+ final->backlight_off = 1;
/*
- * HW has only a 100msec granularity for t11_t12 so round it up
+ * HW has only a 100msec granularity for power_cycle so round it up
* accordingly.
*/
- final->t11_t12 = roundup(final->t11_t12, 100 * 10);
+ final->power_cycle = roundup(final->power_cycle, msecs_to_pps_units(100));
}
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
@@ -1596,7 +1603,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
- const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
+ const struct intel_pps_delays *seq = &intel_dp->pps.pps_delays;
lockdep_assert_held(&display->pps.mutex);
@@ -1629,10 +1636,10 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
intel_de_write(display, regs.pp_ctrl, pp);
}
- pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
- pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->power_up) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->backlight_on);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->backlight_off) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->power_down);
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
@@ -1665,11 +1672,14 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
*/
if (i915_mmio_reg_valid(regs.pp_div))
intel_de_write(display, regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK,
+ (100 * div) / 2 - 1) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
else
intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
- DIV_ROUND_UP(seq->t11_t12, 1000)));
+ DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
drm_dbg_kms(display->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
@@ -1810,6 +1820,8 @@ static int intel_pps_show(struct seq_file *m, void *data)
intel_dp->pps.panel_power_up_delay);
seq_printf(m, "Panel power down delay: %d\n",
intel_dp->pps.panel_power_down_delay);
+ seq_printf(m, "Panel power cycle delay: %d\n",
+ intel_dp->pps.panel_power_cycle_delay);
seq_printf(m, "Backlight on delay: %d\n",
intel_dp->pps.backlight_on_delay);
seq_printf(m, "Backlight off delay: %d\n",
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index a784c0b81556..0b021acb330f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -871,7 +871,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
val |= EDP_PSR_TP2_TP3_TIME_100us;
check_tp3_sel:
- if (intel_dp_source_supports_tps3(dev_priv) &&
+ if (intel_dp_source_supports_tps3(display) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP_TP1_TP3;
else
@@ -1130,18 +1130,16 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp,
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
psr2_program_idle_frames(intel_dp, 0);
- intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+ intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
}
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
}
@@ -1564,13 +1562,6 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int entry_setup_frames;
- /*
- * Current PSR panels don't work reliably with VRR enabled
- * So if VRR is enabled, do not enable PSR.
- */
- if (crtc_state->vrr.enable)
- return false;
-
if (!CAN_PSR(intel_dp))
return false;
@@ -1644,6 +1635,15 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return true;
}
+static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
+ !crtc_state->has_sel_update);
+}
+
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -1679,6 +1679,12 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /*
+ * Currently PSR/PR doesn't work reliably with VRR enabled.
+ */
+ if (crtc_state->vrr.enable)
+ return;
+
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
@@ -1690,6 +1696,13 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
+
+ /* Wa_18037818876 */
+ if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
+ crtc_state->has_psr = false;
+ drm_dbg_kms(display->drm,
+ "PSR disabled to workaround PSR FSM hang issue\n");
+ }
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1773,23 +1786,6 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
intel_dp->psr.active = true;
}
-static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
-{
- switch (intel_dp->psr.pipe) {
- case PIPE_A:
- return LATENCY_REPORTING_REMOVED_PIPE_A;
- case PIPE_B:
- return LATENCY_REPORTING_REMOVED_PIPE_B;
- case PIPE_C:
- return LATENCY_REPORTING_REMOVED_PIPE_C;
- case PIPE_D:
- return LATENCY_REPORTING_REMOVED_PIPE_D;
- default:
- MISSING_CASE(intel_dp->psr.pipe);
- return 0;
- }
-}
-
/*
* Wa_16013835468
* Wa_14015648006
@@ -1798,23 +1794,25 @@ static void wm_optimization_wa(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- bool set_wa_bit = false;
+ enum pipe pipe = intel_dp->psr.pipe;
+ bool activate = false;
/* Wa_14015648006 */
- if (IS_DISPLAY_VER(display, 11, 14))
- set_wa_bit |= crtc_state->wm_level_disabled;
+ if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
+ activate = true;
/* Wa_16013835468 */
- if (DISPLAY_VER(display) == 12)
- set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
- crtc_state->hw.adjusted_mode.crtc_vdisplay;
+ if (DISPLAY_VER(display) == 12 &&
+ crtc_state->hw.adjusted_mode.crtc_vblank_start !=
+ crtc_state->hw.adjusted_mode.crtc_vdisplay)
+ activate = true;
- if (set_wa_bit)
+ if (activate)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
- 0, wa_16013835468_bit_get(intel_dp));
+ 0, LATENCY_REPORTING_REMOVED(pipe));
else
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
- wa_16013835468_bit_get(intel_dp), 0);
+ LATENCY_REPORTING_REMOVED(pipe), 0);
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -1908,7 +1906,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (intel_dp->psr.sel_update_enabled) {
if (DISPLAY_VER(display) == 9)
- intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0,
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT);
@@ -1920,7 +1918,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (!intel_dp->psr.panel_replay_enabled &&
(IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv)))
- intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
@@ -2114,7 +2112,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
*/
if (DISPLAY_VER(display) >= 11)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
- wa_16013835468_bit_get(intel_dp), 0);
+ LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
if (intel_dp->psr.sel_update_enabled) {
/* Wa_16012604467:adlp,mtl[a0,b0] */
@@ -3335,11 +3333,10 @@ unlock:
void intel_psr_init(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (!(HAS_PSR(display) || HAS_DP20(dev_priv)))
+ if (!(HAS_PSR(display) || HAS_DP20(display)))
return;
/*
@@ -3357,7 +3354,7 @@ void intel_psr_init(struct intel_dp *intel_dp)
return;
}
- if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
+ if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
DISPLAY_VER(display) >= 20)
intel_dp->psr.source_panel_replay_support = true;
@@ -3974,7 +3971,6 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
void intel_psr_connector_debugfs_add(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
@@ -3984,7 +3980,7 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
- if (HAS_PSR(display) || HAS_DP20(i915))
+ if (HAS_PSR(display) || HAS_DP20(display))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 28f497ae785b..8b30e9fd936e 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -5,7 +5,7 @@
#include <linux/dmi.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_quirks.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 7a28104f68ad..498b35ec4e0f 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2136,6 +2136,7 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
@@ -2145,10 +2146,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
if (!intel_sdvo_set_target_output(intel_sdvo,
@@ -2196,14 +2197,14 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
int num_modes = 0;
const struct drm_edid *drm_edid;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
/* set the bus switch and get the modes */
@@ -2297,6 +2298,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct intel_sdvo_connector *intel_sdvo_connector =
@@ -2310,7 +2312,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return 0;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 4b3a32736fd6..41fe26dc200b 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,6 +5,7 @@
#include <linux/math.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b16c4d2d4077..13811244c82b 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -390,7 +390,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
- bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ bool lane_reversal = dig_port->lane_reversal;
u32 val;
if (DISPLAY_VER(i915) >= 14)
@@ -1013,21 +1013,52 @@ xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
return true;
}
+/*
+ * Gfx driver WA 14020908590 for PTL tcss_rxdetect_clkswb_req/ack
+ * handshake violation when pwwreq= 0->1 during TC7/10 entry
+ */
+static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
+{
+ /* check if mailbox is running busy */
+ if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ drm_dbg_kms(display->drm,
+ "Timeout waiting for TCSS mailbox run/busy bit to clear\n");
+ return;
+ }
+
+ intel_de_write(display, TCSS_DISP_MAILBOX_IN_DATA, enable ? 1 : 0);
+ intel_de_write(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY |
+ TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
+
+ /* wait to clear mailbox running busy bit before continuing */
+ if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ drm_dbg_kms(display->drm,
+ "Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
+ return;
+ }
+}
+
static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, reg);
+ if (DISPLAY_VER(display) == 30)
+ xelpdp_tc_power_request_wa(display, enable);
+
+ val = intel_de_read(display, reg);
if (enable)
val |= XELPDP_TCSS_POWER_REQUEST;
else
val &= ~XELPDP_TCSS_POWER_REQUEST;
- intel_de_write(i915, reg, val);
+ intel_de_write(display, reg, val);
}
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 27c530218ee6..6e311dcc1a61 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1714,7 +1714,6 @@ intel_tv_detect(struct drm_connector *connector,
bool force)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
@@ -1722,10 +1721,10 @@ intel_tv_detect(struct drm_connector *connector,
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name, force);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
if (force) {
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 42022756bbd5..e9b809568cd4 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -1014,6 +1014,14 @@ struct bdb_tv_options {
* Block 27 - eDP VBT Block
*/
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __packed;
+
#define EDP_18BPP 0
#define EDP_24BPP 1
#define EDP_30BPP 2
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 40525f5c4c42..b355c479eda3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -14,6 +14,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dsi.h"
#include "intel_qp_tables.h"
#include "intel_vdsc.h"
@@ -379,7 +380,7 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
static int intel_dsc_get_vdsc_per_pipe(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->dsc.dsc_split ? 2 : 1;
+ return crtc_state->dsc.num_streams;
}
int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state)
@@ -402,8 +403,10 @@ static void intel_dsc_get_pps_reg(const struct intel_crtc_state *crtc_state, int
pipe_dsc = is_pipe_dsc(crtc, cpu_transcoder);
- if (dsc_reg_num >= 3)
+ if (dsc_reg_num >= 4)
MISSING_CASE(dsc_reg_num);
+ if (dsc_reg_num >= 3)
+ dsc_reg[2] = BMG_DSC2_PPS(pipe, pps);
if (dsc_reg_num >= 2)
dsc_reg[1] = pipe_dsc ? ICL_DSC1_PPS(pipe, pps) : DSCC_PPS(pps);
if (dsc_reg_num >= 1)
@@ -415,7 +418,7 @@ static void intel_dsc_pps_write(const struct intel_crtc_state *crtc_state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- i915_reg_t dsc_reg[2];
+ i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
@@ -770,11 +773,17 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
intel_dsc_pps_configure(crtc_state);
- dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+ dss_ctl2_val |= VDSC0_ENABLE;
if (vdsc_instances_per_pipe > 1) {
- dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+ dss_ctl2_val |= VDSC1_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
+
+ if (vdsc_instances_per_pipe > 2) {
+ dss_ctl2_val |= VDSC2_ENABLE;
+ dss_ctl2_val |= SMALL_JOINER_CONFIG_3_ENGINES;
+ }
+
if (crtc_state->joiner_pipes) {
if (intel_crtc_ultrajoiner_enable_needed(crtc_state))
dss_ctl1_val |= ULTRA_JOINER_ENABLE;
@@ -809,7 +818,7 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- i915_reg_t dsc_reg[2];
+ i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
u32 val;
@@ -972,12 +981,16 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder));
dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder));
- crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
+ crtc_state->dsc.compression_enable = dss_ctl2 & VDSC0_ENABLE;
if (!crtc_state->dsc.compression_enable)
goto out;
- crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
- (dss_ctl1 & JOINER_ENABLE);
+ if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & (VDSC2_ENABLE | SMALL_JOINER_CONFIG_3_ENGINES))
+ crtc_state->dsc.num_streams = 3;
+ else if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & VDSC1_ENABLE)
+ crtc_state->dsc.num_streams = 2;
+ else
+ crtc_state->dsc.num_streams = 1;
intel_dsc_get_pps_config(crtc_state);
out:
@@ -988,10 +1001,10 @@ static void intel_vdsc_dump_state(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state)
{
drm_printf_indent(p, indent,
- "dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, split: %s\n",
+ "dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, num_streams: %d\n",
FXP_Q4_ARGS(crtc_state->dsc.compressed_bpp_x16),
crtc_state->dsc.slice_count,
- str_yes_no(crtc_state->dsc.dsc_split));
+ crtc_state->dsc.num_streams);
}
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
@@ -1003,3 +1016,48 @@ void intel_vdsc_state_dump(struct drm_printer *p, int indent,
intel_vdsc_dump_state(p, indent, crtc_state);
drm_dsc_dump_config(p, indent, &crtc_state->dsc.config);
}
+
+int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc);
+ int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
+ int min_cdclk;
+
+ if (!crtc_state->dsc.compression_enable)
+ return 0;
+
+ /*
+ * When we decide to use only one VDSC engine, since
+ * each VDSC operates with 1 ppc throughput, pixel clock
+ * cannot be higher than the VDSC clock (cdclk)
+ * If there 2 VDSC engines, then pixel clock can't be higher than
+ * VDSC clock(cdclk) * 2 and so on.
+ */
+ min_cdclk = DIV_ROUND_UP(crtc_state->pixel_rate, num_vdsc_instances);
+
+ if (crtc_state->joiner_pipes) {
+ int pixel_clock = intel_dp_mode_to_fec_clock(crtc_state->hw.adjusted_mode.clock);
+
+ /*
+ * According to Bigjoiner bw check:
+ * compressed_bpp <= PPC * CDCLK * Big joiner Interface bits / Pixel clock
+ *
+ * We have already computed compressed_bpp, so now compute the min CDCLK that
+ * is required to support this compressed_bpp.
+ *
+ * => CDCLK >= compressed_bpp * Pixel clock / (PPC * Bigjoiner Interface bits)
+ *
+ * Since PPC = 2 with bigjoiner
+ * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
+ */
+ int bigjoiner_interface_bits = DISPLAY_VER(display) >= 14 ? 36 : 24;
+ int min_cdclk_bj =
+ (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
+ pixel_clock) / (2 * bigjoiner_interface_bits);
+
+ min_cdclk = max(min_cdclk, min_cdclk_bj);
+ }
+
+ return min_cdclk;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 290b2e9b3482..9e2812f99dd7 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -31,5 +31,6 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
+int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index bf32a3b46fb1..2d478a84b07c 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -21,8 +21,10 @@
#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
#define DSS_CTL2 _MMIO(0x67404)
-#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
-#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define VDSC0_ENABLE REG_BIT(31)
+#define VDSC2_ENABLE REG_BIT(30)
+#define SMALL_JOINER_CONFIG_3_ENGINES REG_BIT(23)
+#define VDSC1_ENABLE REG_BIT(15)
#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
@@ -57,8 +59,10 @@
#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _BMG_DSC2_PICTURE_PARAMETER_SET_0_PB 0x78970
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define _BMG_DSC2_PICTURE_PARAMETER_SET_0_PC 0x78A70
#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
_ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
@@ -71,8 +75,12 @@
#define _ICL_DSC1_PPS_0(pipe) _PICK_EVEN((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define _BMG_DSC2_PPS_0(pipe) _PICK_EVEN((pipe) - PIPE_B, \
+ _BMG_DSC2_PICTURE_PARAMETER_SET_0_PB, \
+ _BMG_DSC2_PICTURE_PARAMETER_SET_0_PC)
#define ICL_DSC0_PPS(pipe, pps) _MMIO(_ICL_DSC0_PPS_0(pipe) + ((pps) * 4))
#define ICL_DSC1_PPS(pipe, pps) _MMIO(_ICL_DSC1_PPS_0(pipe) + ((pps) * 4))
+#define BMG_DSC2_PPS(pipe, pps) _MMIO(_BMG_DSC2_PPS_0(pipe) + ((pps) * 4))
/* PPS 0 */
#define DSC_PPS0_NATIVE_422_ENABLE REG_BIT(23)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 19a5d0076bb8..70088e355055 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -4,7 +4,6 @@
*
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -288,7 +287,7 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
if (IS_DISPLAY_VER(display, 12, 13))
- intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
if (!intel_vrr_possible(crtc_state)) {
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 7dbc99b02eaa..ae21fce534dc 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -3,6 +3,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -105,10 +106,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
const struct drm_format_info *format,
u64 modifier, bool need_scaler)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
@@ -130,9 +131,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* Once NV12 is enabled, handle it here while allocating scaler
* for NV12.
*/
- if (DISPLAY_VER(dev_priv) >= 9 && crtc_state->hw.enable &&
+ if (DISPLAY_VER(display) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
@@ -150,9 +151,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (force_detach || !need_scaler) {
if (*scaler_id >= 0) {
scaler_state->scaler_users &= ~(1 << scaler_user);
- scaler_state->scalers[*scaler_id].in_use = 0;
+ scaler_state->scalers[*scaler_id].in_use = false;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"scaler_user index %u.%u: "
"Staged freeing scaler id %d scaler_users = 0x%x\n",
crtc->pipe, scaler_user, *scaler_id,
@@ -164,7 +165,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Planar YUV: src dimensions not met\n");
return -EINVAL;
}
@@ -174,17 +175,17 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
min_dst_w = SKL_MIN_DST_W;
min_dst_h = SKL_MIN_DST_H;
- if (DISPLAY_VER(dev_priv) < 11) {
+ if (DISPLAY_VER(display) < 11) {
max_src_w = SKL_MAX_SRC_W;
max_src_h = SKL_MAX_SRC_H;
max_dst_w = SKL_MAX_DST_W;
max_dst_h = SKL_MAX_DST_H;
- } else if (DISPLAY_VER(dev_priv) < 12) {
+ } else if (DISPLAY_VER(display) < 12) {
max_src_w = ICL_MAX_SRC_W;
max_src_h = ICL_MAX_SRC_H;
max_dst_w = ICL_MAX_DST_W;
max_dst_h = ICL_MAX_DST_H;
- } else if (DISPLAY_VER(dev_priv) < 14) {
+ } else if (DISPLAY_VER(display) < 14) {
max_src_w = TGL_MAX_SRC_W;
max_src_h = TGL_MAX_SRC_H;
max_dst_w = TGL_MAX_DST_W;
@@ -201,7 +202,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
dst_w < min_dst_w || dst_h < min_dst_h ||
src_w > max_src_w || src_h > max_src_h ||
dst_w > max_dst_w || dst_h > max_dst_h) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
crtc->pipe, scaler_user, src_w, src_h,
@@ -218,7 +219,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* now.
*/
if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"scaler_user index %u.%u: pipe src size %ux%u "
"is out of scaler range\n",
crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
@@ -227,7 +228,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
- drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+ drm_dbg_kms(display->drm, "scaler_user index %u.%u: "
"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
scaler_state->scaler_users);
@@ -268,20 +269,19 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *intel_plane =
- to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_framebuffer *fb = plane_state->hw.fb;
bool force_detach = !fb || !plane_state->uapi.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
- if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+ if (!icl_is_hdr_plane(dev_priv, plane->id) &&
fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
need_scaler = true;
return skl_update_scaler(crtc_state, force_detach,
- drm_plane_index(&intel_plane->base),
+ drm_plane_index(&plane->base),
&plane_state->scaler_id,
drm_rect_width(&plane_state->uapi.src) >> 16,
drm_rect_height(&plane_state->uapi.src) >> 16,
@@ -292,29 +292,37 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
need_scaler);
}
+static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
+ struct intel_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < crtc->num_scalers; i++) {
+ if (scaler_state->scalers[i].in_use)
+ continue;
+
+ scaler_state->scalers[i].in_use = true;
+
+ return i;
+ }
+
+ return -1;
+}
+
static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
- int num_scalers_need, struct intel_crtc *intel_crtc,
+ int num_scalers_need, struct intel_crtc *crtc,
const char *name, int idx,
struct intel_plane_state *plane_state,
int *scaler_id)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- int j;
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 mode;
- if (*scaler_id < 0) {
- /* find a free scaler */
- for (j = 0; j < intel_crtc->num_scalers; j++) {
- if (scaler_state->scalers[j].in_use)
- continue;
-
- *scaler_id = j;
- scaler_state->scalers[*scaler_id].in_use = 1;
- break;
- }
- }
+ if (*scaler_id < 0)
+ *scaler_id = intel_allocate_scaler(scaler_state, crtc);
- if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+ if (drm_WARN(display->drm, *scaler_id < 0,
"Cannot find scaler for %s:%d\n", name, idx))
return -EINVAL;
@@ -324,7 +332,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
plane_state->hw.fb->format->num_planes > 1) {
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- if (DISPLAY_VER(dev_priv) == 9) {
+ if (DISPLAY_VER(display) == 9) {
mode = SKL_PS_SCALER_MODE_NV12;
} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
/*
@@ -342,17 +350,17 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
if (linked)
mode |= PS_BINDING_Y_PLANE(linked->id);
}
- } else if (DISPLAY_VER(dev_priv) >= 10) {
+ } else if (DISPLAY_VER(display) >= 10) {
mode = PS_SCALER_MODE_NORMAL;
- } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+ } else if (num_scalers_need == 1 && crtc->num_scalers > 1) {
/*
* when only 1 scaler is in use on a pipe with 2 scalers
* scaler 0 operates in high quality (HQ) mode.
* In this case use scaler 0 to take advantage of HQ mode
*/
- scaler_state->scalers[*scaler_id].in_use = 0;
+ scaler_state->scalers[*scaler_id].in_use = false;
*scaler_id = 0;
- scaler_state->scalers[0].in_use = 1;
+ scaler_state->scalers[0].in_use = true;
mode = SKL_PS_SCALER_MODE_HQ;
} else {
mode = SKL_PS_SCALER_MODE_DYN;
@@ -376,7 +384,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
* unnecessarily.
*/
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
/*
* On versions 14 and up, only the first
* scaler supports a vertical scaling factor
@@ -389,7 +397,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
else
max_vscale = 0x10000;
- } else if (DISPLAY_VER(dev_priv) >= 10 ||
+ } else if (DISPLAY_VER(display) >= 10 ||
!intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
max_hscale = 0x30000 - 1;
max_vscale = 0x30000 - 1;
@@ -408,7 +416,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
if (hscale < 0 || vscale < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Scaler %d doesn't support required plane scaling\n",
*scaler_id);
drm_rect_debug_print("src: ", src, true);
@@ -418,18 +426,66 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
}
}
- drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
+ drm_dbg_kms(display->drm, "Attached scaler id %u.%u to %s:%d\n",
+ crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
return 0;
}
+static int setup_crtc_scaler(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+
+ return intel_atomic_setup_scaler(scaler_state,
+ hweight32(scaler_state->scaler_users),
+ crtc, "CRTC", crtc->base.base.id,
+ NULL, &scaler_state->scaler_id);
+}
+
+static int setup_plane_scaler(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct intel_plane_state *plane_state;
+
+ /* plane on different crtc cannot be a scaler user of this crtc */
+ if (drm_WARN_ON(display->drm, plane->pipe != crtc->pipe))
+ return 0;
+
+ plane_state = intel_atomic_get_new_plane_state(state, plane);
+
+ /*
+ * GLK+ scalers don't have a HQ mode so it
+ * isn't necessary to change between HQ and dyn mode
+ * on those platforms.
+ */
+ if (!plane_state && DISPLAY_VER(display) >= 10)
+ return 0;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ return intel_atomic_setup_scaler(scaler_state,
+ hweight32(scaler_state->scaler_users),
+ crtc, "PLANE", plane->base.base.id,
+ plane_state, &plane_state->scaler_id);
+}
+
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
- * @dev_priv: i915 device
- * @intel_crtc: intel crtc
- * @crtc_state: incoming crtc_state to validate and setup scalers
+ * @state: atomic state
+ * @crtc: crtc
*
* This function sets up scalers based on staged scaling requests for
* a @crtc and its planes. It is called from crtc level check path. If request
@@ -442,16 +498,14 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
* 0 - scalers were setup successfully
* error code - otherwise
*/
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
+int intel_atomic_setup_scalers(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_plane *plane = NULL;
- struct intel_plane *intel_plane;
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- struct drm_atomic_state *drm_state = crtc_state->uapi.state;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
int i;
@@ -470,80 +524,33 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
*/
/* fail if required scalers > available scalers */
- if (num_scalers_need > intel_crtc->num_scalers) {
- drm_dbg_kms(&dev_priv->drm,
+ if (num_scalers_need > crtc->num_scalers) {
+ drm_dbg_kms(display->drm,
"Too many scaling requests %d > %d\n",
- num_scalers_need, intel_crtc->num_scalers);
+ num_scalers_need, crtc->num_scalers);
return -EINVAL;
}
/* walkthrough scaler_users bits and start assigning scalers */
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
- struct intel_plane_state *plane_state = NULL;
- int *scaler_id;
- const char *name;
- int idx, ret;
+ int ret;
/* skip if scaler not required */
if (!(scaler_state->scaler_users & (1 << i)))
continue;
if (i == SKL_CRTC_INDEX) {
- name = "CRTC";
- idx = intel_crtc->base.base.id;
-
- /* panel fitter case: assign as a crtc scaler */
- scaler_id = &scaler_state->scaler_id;
+ ret = setup_crtc_scaler(state, crtc);
+ if (ret)
+ return ret;
} else {
- name = "PLANE";
-
- /* plane scaler case: assign as a plane scaler */
- /* find the plane that set the bit as scaler_user */
- plane = drm_state->planes[i].ptr;
+ struct intel_plane *plane =
+ to_intel_plane(drm_plane_from_index(display->drm, i));
- /*
- * to enable/disable hq mode, add planes that are using scaler
- * into this transaction
- */
- if (!plane) {
- struct drm_plane_state *state;
-
- /*
- * GLK+ scalers don't have a HQ mode so it
- * isn't necessary to change between HQ and dyn mode
- * on those platforms.
- */
- if (DISPLAY_VER(dev_priv) >= 10)
- continue;
-
- plane = drm_plane_from_index(&dev_priv->drm, i);
- state = drm_atomic_get_plane_state(drm_state, plane);
- if (IS_ERR(state)) {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to add [PLANE:%d] to drm_state\n",
- plane->base.id);
- return PTR_ERR(state);
- }
- }
-
- intel_plane = to_intel_plane(plane);
- idx = plane->base.id;
-
- /* plane on different crtc cannot be a scaler user of this crtc */
- if (drm_WARN_ON(&dev_priv->drm,
- intel_plane->pipe != intel_crtc->pipe))
- continue;
-
- plane_state = intel_atomic_get_new_plane_state(intel_state,
- intel_plane);
- scaler_id = &plane_state->scaler_id;
+ ret = setup_plane_scaler(state, crtc, plane);
+ if (ret)
+ return ret;
}
-
- ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
- intel_crtc, name, idx,
- plane_state, scaler_id);
- if (ret < 0)
- return ret;
}
return 0;
@@ -596,12 +603,12 @@ static u16 glk_nearest_filter_coef(int t)
*
*/
-static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
+static void glk_program_nearest_filter_coefs(struct intel_display *display,
enum pipe pipe, int id, int set)
{
int i;
- intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set),
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set),
PS_COEF_INDEX_AUTO_INC);
for (i = 0; i < 17 * 7; i += 2) {
@@ -614,11 +621,11 @@ static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
t = glk_coef_tap(i + 1);
tmp |= glk_nearest_filter_coef(t) << 16;
- intel_de_write_fw(dev_priv, GLK_PS_COEF_DATA_SET(pipe, id, set),
+ intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(pipe, id, set),
tmp);
}
- intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
@@ -634,14 +641,14 @@ static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
return PS_FILTER_MEDIUM;
}
-static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
+static void skl_scaler_setup_filter(struct intel_display *display, enum pipe pipe,
int id, int set, enum drm_scaling_filter filter)
{
switch (filter) {
case DRM_SCALING_FILTER_DEFAULT:
break;
case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
- glk_program_nearest_filter_coefs(dev_priv, pipe, id, set);
+ glk_program_nearest_filter_coefs(display, pipe, id, set);
break;
default:
MISSING_CASE(filter);
@@ -650,8 +657,8 @@ static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe
void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
@@ -669,7 +676,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
if (!crtc_state->pch_pfit.enabled)
return;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
crtc_state->scaler_state.scaler_id < 0))
return;
@@ -688,18 +695,18 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
- skl_scaler_setup_filter(dev_priv, pipe, id, 0,
+ skl_scaler_setup_filter(display, pipe, id, 0,
crtc_state->hw.scaling_filter);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
+ intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ intel_de_write_fw(display, SKL_PS_VPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ intel_de_write_fw(display, SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, id),
PS_WIN_XPOS(x) | PS_WIN_YPOS(y));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, id),
PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height));
}
@@ -708,6 +715,7 @@ skl_program_plane_scaler(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
@@ -751,28 +759,27 @@ skl_program_plane_scaler(struct intel_plane *plane,
ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode |
skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
- skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
+ skl_scaler_setup_filter(display, pipe, scaler_id, 0,
plane_state->hw.scaling_filter);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ intel_de_write_fw(display, SKL_PS_VPHASE(pipe, scaler_id),
PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_HPHASE(pipe, scaler_id),
PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, scaler_id),
PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, scaler_id),
PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h));
}
static void skl_detach_scaler(struct intel_crtc *crtc, int id)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
+ intel_de_write_fw(display, SKL_PS_CTRL(crtc->pipe, id), 0);
+ intel_de_write_fw(display, SKL_PS_WIN_POS(crtc->pipe, id), 0);
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
}
/*
@@ -803,8 +810,8 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
int id = -1;
int i;
@@ -813,15 +820,15 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
for (i = 0; i < crtc->num_scalers; i++) {
u32 ctl, pos, size;
- ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
+ ctl = intel_de_read(display, SKL_PS_CTRL(crtc->pipe, i));
if ((ctl & (PS_SCALER_EN | PS_BINDING_MASK)) != (PS_SCALER_EN | PS_BINDING_PIPE))
continue;
id = i;
crtc_state->pch_pfit.enabled = true;
- pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
- size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
+ pos = intel_de_read(display, SKL_PS_WIN_POS(crtc->pipe, i));
+ size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, i));
drm_rect_init(&crtc_state->pch_pfit.dst,
REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 63f93ca03c89..4d2e2dbb1666 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -5,11 +5,7 @@
#ifndef INTEL_SCALER_H
#define INTEL_SCALER_H
-#include <linux/types.h>
-
-enum drm_scaling_filter;
-enum pipe;
-struct drm_i915_private;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
@@ -20,9 +16,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
+int intel_atomic_setup_scalers(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 038ca2ec5d7a..80e558042d97 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -106,8 +106,6 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_sdr_uv_plane_formats[] = {
@@ -134,8 +132,6 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_hdr_plane_formats[] = {
@@ -239,7 +235,9 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915))
+ struct intel_display *display = &i915->display;
+
+ if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
return BIT(PLANE_4) | BIT(PLANE_5);
else
return BIT(PLANE_6) | BIT(PLANE_7);
@@ -715,6 +713,22 @@ static u32 skl_plane_ddb_reg_val(const struct skl_ddb_entry *entry)
PLANE_BUF_START(entry->start);
}
+static u32 xe3_plane_min_ddb_reg_val(const u16 *min_ddb,
+ const u16 *interim_ddb)
+{
+ u32 val = 0;
+
+ if (*min_ddb)
+ val |= PLANE_MIN_DBUF_BLOCKS(*min_ddb);
+
+ if (*interim_ddb)
+ val |= PLANE_INTERIM_DBUF_BLOCKS(*interim_ddb);
+
+ val |= val ? PLANE_AUTO_MIN_DBUF_EN : 0;
+
+ return val;
+}
+
static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level)
{
u32 val = 0;
@@ -723,6 +737,9 @@ static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level)
val |= PLANE_WM_EN;
if (level->ignore_lines)
val |= PLANE_WM_IGNORE_LINES;
+ if (level->auto_min_alloc_wm_enable)
+ val |= PLANE_WM_AUTO_MIN_ALLOC_EN;
+
val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
@@ -734,7 +751,6 @@ static void skl_write_plane_wm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -742,16 +758,19 @@ static void skl_write_plane_wm(struct intel_dsb *dsb,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const u16 *min_ddb = &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ const u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
intel_de_write_dsb(display, dsb, PLANE_WM(pipe, plane_id, level),
skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
intel_de_write_dsb(display, dsb, PLANE_WM_TRANS(pipe, plane_id),
skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
intel_de_write_dsb(display, dsb, PLANE_WM_SAGV(pipe, plane_id),
@@ -763,9 +782,13 @@ static void skl_write_plane_wm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_BUF_CFG(pipe, plane_id),
skl_plane_ddb_reg_val(ddb));
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
intel_de_write_dsb(display, dsb, PLANE_NV12_BUF_CFG(pipe, plane_id),
skl_plane_ddb_reg_val(ddb_y));
+
+ if (DISPLAY_VER(display) >= 30)
+ intel_de_write_dsb(display, dsb, PLANE_MIN_BUF_CFG(pipe, plane_id),
+ xe3_plane_min_ddb_reg_val(min_ddb, interim_ddb));
}
static void
@@ -2548,13 +2571,14 @@ static bool tgl_plane_has_mc_ccs(struct drm_i915_private *i915,
static u8 skl_get_plane_caps(struct drm_i915_private *i915,
enum pipe pipe, enum plane_id plane_id)
{
+ struct intel_display *display = &i915->display;
u8 caps = INTEL_PLANE_CAP_TILING_X;
- if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915))
+ if (DISPLAY_VER(display) < 13 || display->platform.alderlake_p)
caps |= INTEL_PLANE_CAP_TILING_Y;
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
caps |= INTEL_PLANE_CAP_TILING_Yf;
- if (HAS_4TILE(i915))
+ if (HAS_4TILE(display))
caps |= INTEL_PLANE_CAP_TILING_4;
if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
@@ -2562,14 +2586,14 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
caps |= INTEL_PLANE_CAP_CCS_RC_CC;
}
if (tgl_plane_has_mc_ccs(i915, plane_id))
caps |= INTEL_PLANE_CAP_CCS_MC;
- if (DISPLAY_VER(i915) >= 14 && IS_DGFX(i915))
+ if (DISPLAY_VER(display) >= 14 && display->platform.dgfx)
caps |= INTEL_PLANE_CAP_NEED64K_PHYS;
return caps;
@@ -2743,6 +2767,7 @@ void
skl_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2824,7 +2849,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF: /* aka PLANE_CTL_TILED_4 on XE_LPD+ */
- if (HAS_4TILE(dev_priv)) {
+ if (HAS_4TILE(display)) {
u32 rc_mask = PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
PLANE_CTL_CLEAR_COLOR_DISABLE;
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
index ff31a00d511e..ca9fdfbbe57c 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
@@ -322,6 +322,7 @@
_PLANE_WM_2_A_0, _PLANE_WM_2_B_0)
#define PLANE_WM_EN REG_BIT(31)
#define PLANE_WM_IGNORE_LINES REG_BIT(30)
+#define PLANE_WM_AUTO_MIN_ALLOC_EN REG_BIT(29)
#define PLANE_WM_LINES_MASK REG_GENMASK(26, 14)
#define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0)
@@ -373,12 +374,26 @@
#define PLANE_BUF_CFG(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \
_PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B, \
_PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
+
/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
#define PLANE_BUF_END_MASK REG_GENMASK(27, 16)
#define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end))
#define PLANE_BUF_START_MASK REG_GENMASK(11, 0)
#define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start))
+#define _PLANE_MIN_BUF_CFG_1_A 0x70274
+#define _PLANE_MIN_BUF_CFG_2_A 0x70374
+#define _PLANE_MIN_BUF_CFG_1_B 0x71274
+#define _PLANE_MIN_BUF_CFG_2_B 0x71374
+#define PLANE_MIN_BUF_CFG(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \
+ _PLANE_MIN_BUF_CFG_1_A, _PLANE_MIN_BUF_CFG_1_B, \
+ _PLANE_MIN_BUF_CFG_2_A, _PLANE_MIN_BUF_CFG_2_B)
+#define PLANE_AUTO_MIN_DBUF_EN REG_BIT(31)
+#define PLANE_MIN_DBUF_BLOCKS_MASK REG_GENMASK(27, 16)
+#define PLANE_MIN_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_MIN_DBUF_BLOCKS_MASK, (val))
+#define PLANE_INTERIM_DBUF_BLOCKS_MASK REG_GENMASK(11, 0)
+#define PLANE_INTERIM_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_INTERIM_DBUF_BLOCKS_MASK, (val))
+
/* tgl+ */
#define _SEL_FETCH_PLANE_CTL_1_A 0x70890
#define _SEL_FETCH_PLANE_CTL_2_A 0x708b0
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 3b0e87edbacf..f4458d1185b3 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -77,20 +77,23 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
bool
intel_has_sagv(struct drm_i915_private *i915)
{
- return HAS_SAGV(i915) &&
- i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
+ struct intel_display *display = &i915->display;
+
+ return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
}
static u32
intel_sagv_block_time(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 14) {
+ struct intel_display *display = &i915->display;
+
+ if (DISPLAY_VER(display) >= 14) {
u32 val;
- val = intel_de_read(i915, MTL_LATENCY_SAGV);
+ val = intel_de_read(display, MTL_LATENCY_SAGV);
return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
u32 val = 0;
int ret;
@@ -98,14 +101,14 @@ intel_sagv_block_time(struct drm_i915_private *i915)
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
&val, NULL);
if (ret) {
- drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
+ drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
return 0;
}
return val;
- } else if (DISPLAY_VER(i915) == 11) {
+ } else if (DISPLAY_VER(display) == 11) {
return 10;
- } else if (HAS_SAGV(i915)) {
+ } else if (HAS_SAGV(display)) {
return 30;
} else {
return 0;
@@ -114,31 +117,33 @@ intel_sagv_block_time(struct drm_i915_private *i915)
static void intel_sagv_init(struct drm_i915_private *i915)
{
- if (!HAS_SAGV(i915))
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ struct intel_display *display = &i915->display;
+
+ if (!HAS_SAGV(display))
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
/*
* Probe to see if we have working SAGV control.
* For icl+ this was already determined by intel_bw_init_hw().
*/
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
skl_sagv_disable(i915);
- drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
+ drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
- i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
+ display->sagv.block_time_us = intel_sagv_block_time(i915);
- drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
+ drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us);
/* avoid overflow when adding with wm0 latency/etc. */
- if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
+ if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
"Excessive SAGV block time %u, ignoring\n",
- i915->display.sagv.block_time_us))
- i915->display.sagv.block_time_us = 0;
+ display->sagv.block_time_us))
+ display->sagv.block_time_us = 0;
if (!intel_has_sagv(i915))
- i915->display.sagv.block_time_us = 0;
+ display->sagv.block_time_us = 0;
}
/*
@@ -444,6 +449,7 @@ bool intel_can_enable_sagv(struct drm_i915_private *i915,
static int intel_compute_sagv_mask(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
int ret;
struct intel_crtc *crtc;
@@ -479,7 +485,7 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
* other crtcs can't be allowed to use the more optimal
* normal (ie. non-SAGV) watermarks.
*/
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
DISPLAY_VER(i915) >= 12 &&
intel_crtc_can_enable_sagv(new_crtc_state);
@@ -795,30 +801,40 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+ struct skl_ddb_entry *ddb_y,
+ u16 *min_ddb, u16 *interim_ddb)
{
+ struct intel_display *display = &i915->display;
u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = intel_de_read(i915, CUR_BUF_CFG(pipe));
+ val = intel_de_read(display, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(ddb, val);
return;
}
- val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(display, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb, val);
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 30) {
+ val = intel_de_read(display, PLANE_MIN_BUF_CFG(pipe, plane_id));
+
+ *min_ddb = REG_FIELD_GET(PLANE_MIN_DBUF_BLOCKS_MASK, val);
+ *interim_ddb = REG_FIELD_GET(PLANE_INTERIM_DBUF_BLOCKS_MASK, val);
+ }
+
+ if (DISPLAY_VER(display) >= 11)
return;
- val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(display, PLANE_NV12_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb_y, val);
}
static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+ struct skl_ddb_entry *ddb_y,
+ u16 *min_ddb, u16 *interim_ddb)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
@@ -835,7 +851,9 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
skl_ddb_get_hw_plane_state(i915, pipe,
plane_id,
&ddb[plane_id],
- &ddb_y[plane_id]);
+ &ddb_y[plane_id],
+ &min_ddb[plane_id],
+ &interim_ddb[plane_id]);
intel_display_power_put(i915, power_domain, wakeref);
}
@@ -1370,13 +1388,30 @@ static bool
use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- return DISPLAY_VER(i915) >= 13 &&
+ /* Xe3+ are auto minimum DDB capble. So don't force minimal wm0 */
+ return IS_DISPLAY_VER(display, 13, 20) &&
crtc_state->uapi.async_flip &&
plane->async_flip;
}
+unsigned int
+skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane, int width, int height,
+ int cpp)
+{
+ /*
+ * We calculate extra ddb based on ratio plane rate/total data rate
+ * in case, in some cases we should not allocate extra ddb for the plane,
+ * so do not count its data rate, if this is the case.
+ */
+ if (use_minimal_wm0_only(crtc_state, plane))
+ return 0;
+
+ return width * height * cpp;
+}
+
static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
@@ -1513,6 +1548,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state =
intel_atomic_get_new_dbuf_state(state);
const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ struct intel_display *display = to_intel_display(state);
int num_active = hweight8(dbuf_state->active_pipes);
struct skl_plane_ddb_iter iter;
enum plane_id plane_id;
@@ -1523,6 +1559,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+ memset(crtc_state->wm.skl.plane_min_ddb, 0,
+ sizeof(crtc_state->wm.skl.plane_min_ddb));
+ memset(crtc_state->wm.skl.plane_interim_ddb, 0,
+ sizeof(crtc_state->wm.skl.plane_interim_ddb));
if (!crtc_state->hw.active)
return 0;
@@ -1595,6 +1635,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *min_ddb = &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -1611,6 +1654,11 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
crtc_state->rel_data_rate[plane_id]);
}
+
+ if (DISPLAY_VER(display) >= 30) {
+ *min_ddb = wm->wm[0].min_ddb_alloc;
+ *interim_ddb = wm->sagv.wm0.min_ddb_alloc;
+ }
}
drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
@@ -1654,6 +1702,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -1667,6 +1717,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
skl_check_wm_level(&wm->sagv.wm0, ddb);
+ if (DISPLAY_VER(display) >= 30)
+ *interim_ddb = wm->sagv.wm0.min_ddb_alloc;
+
skl_check_wm_level(&wm->sagv.trans_wm, ddb);
}
@@ -1745,6 +1798,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int color_plane, unsigned int pan_x)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 interm_pbpl;
@@ -1803,7 +1857,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines,
wp->dbuf_block_size);
- if (DISPLAY_VER(i915) >= 30)
+ if (DISPLAY_VER(display) >= 30)
interm_pbpl += (pan_x != 0);
else if (DISPLAY_VER(i915) >= 10)
interm_pbpl++;
@@ -1868,6 +1922,13 @@ static int skl_wm_max_lines(struct drm_i915_private *i915)
return 31;
}
+static bool xe3_auto_min_alloc_capable(struct intel_plane *plane, int level)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ return DISPLAY_VER(display) >= 30 && level == 0 && plane->id != PLANE_CURSOR;
+}
+
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane,
int level,
@@ -2000,6 +2061,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
result->enable = true;
+ result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
result->can_sagv = latency >= i915->display.sagv.block_time_us;
@@ -2379,16 +2441,18 @@ static bool skl_wm_level_equals(const struct skl_wm_level *l1,
return l1->enable == l2->enable &&
l1->ignore_lines == l2->ignore_lines &&
l1->lines == l2->lines &&
- l1->blocks == l2->blocks;
+ l1->blocks == l2->blocks &&
+ l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
}
static bool skl_plane_wm_equals(struct drm_i915_private *i915,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
+ struct intel_display *display = &i915->display;
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2496,6 +2560,7 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
struct intel_dbuf_state *new_dbuf_state = NULL;
@@ -2524,7 +2589,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- if (HAS_MBUS_JOINING(i915)) {
+ if (HAS_MBUS_JOINING(display)) {
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
@@ -2742,10 +2807,10 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
const struct skl_pipe_wm *old_pipe_wm,
const struct skl_pipe_wm *new_pipe_wm)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2756,7 +2821,7 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
return false;
}
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
@@ -2847,32 +2912,58 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
* Program DEEP PKG_C_LATENCY Pkg C with all 1's.
* Program PKG_C_LATENCY Added Wake Time = 0
*/
-static void
-skl_program_dpkgc_latency(struct drm_i915_private *i915, bool enable_dpkgc)
+void
+intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
- u32 max_latency = 0;
- u32 clear = 0, val = 0;
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ u32 latency = LNL_PKG_C_LATENCY_MASK;
u32 added_wake_time = 0;
+ u32 max_linetime = 0;
+ u32 clear, val;
+ bool fixed_refresh_rate = false;
+ int i;
- if (DISPLAY_VER(i915) < 20)
+ if (DISPLAY_VER(display) < 20)
return;
- if (enable_dpkgc) {
- max_latency = skl_watermark_max_latency(i915, 1);
- if (max_latency == 0)
- max_latency = LNL_PKG_C_LATENCY_MASK;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->vrr.enable ||
+ (new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
+ new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline))
+ fixed_refresh_rate = true;
+
+ max_linetime = max(new_crtc_state->linetime, max_linetime);
+ }
+
+ if (fixed_refresh_rate) {
added_wake_time = DSB_EXE_TIME +
- i915->display.sagv.block_time_us;
- } else {
- max_latency = LNL_PKG_C_LATENCY_MASK;
- added_wake_time = 0;
+ display->sagv.block_time_us;
+
+ latency = skl_watermark_max_latency(i915, 1);
+
+ /* Wa_22020432604 */
+ if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
+ latency += added_wake_time;
+ added_wake_time = 0;
+ }
+
+ /* Wa_22020299601 */
+ if ((latency && max_linetime) &&
+ (DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30)) {
+ latency = max_linetime * DIV_ROUND_UP(latency, max_linetime);
+ } else if (!latency) {
+ latency = LNL_PKG_C_LATENCY_MASK;
+ }
}
- clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
- val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency);
- val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
+ clear = LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
+ val = REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency) |
+ REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
- intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val);
+ intel_de_rmw(display, LNL_PKG_C_LATENCY, clear, val);
}
static int
@@ -2881,7 +2972,6 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
- bool enable_dpkgc = false;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
@@ -2906,32 +2996,28 @@ skl_compute_wm(struct intel_atomic_state *state)
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
-
- if ((new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
- new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline) ||
- !new_crtc_state->vrr.enable)
- enable_dpkgc = true;
}
- skl_program_dpkgc_latency(to_i915(state->base.dev), enable_dpkgc);
-
skl_print_wm_changes(state);
return 0;
}
-static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
+static void skl_wm_level_from_reg_val(struct intel_display *display,
+ u32 val, struct skl_wm_level *level)
{
level->enable = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
+ level->auto_min_alloc_wm_enable = DISPLAY_VER(display) >= 30 ?
+ val & PLANE_WM_AUTO_MIN_ALLOC_EN : 0;
}
static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
int level;
@@ -2940,37 +3026,37 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
+ val = intel_de_read(display, PLANE_WM(pipe, plane_id, level));
else
- val = intel_de_read(i915, CUR_WM(pipe, level));
+ val = intel_de_read(display, CUR_WM(pipe, level));
- skl_wm_level_from_reg_val(val, &wm->wm[level]);
+ skl_wm_level_from_reg_val(display, val, &wm->wm[level]);
}
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_TRANS(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_TRANS(pipe));
+ val = intel_de_read(display, CUR_WM_TRANS(pipe));
- skl_wm_level_from_reg_val(val, &wm->trans_wm);
+ skl_wm_level_from_reg_val(display, val, &wm->trans_wm);
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_SAGV(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_SAGV(pipe));
+ val = intel_de_read(display, CUR_WM_SAGV(pipe));
- skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+ skl_wm_level_from_reg_val(display, val, &wm->sagv.wm0);
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_SAGV_TRANS(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
+ val = intel_de_read(display, CUR_WM_SAGV_TRANS(pipe));
- skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
- } else if (DISPLAY_VER(i915) >= 12) {
+ skl_wm_level_from_reg_val(display, val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(display) >= 12) {
wm->sagv.wm0 = wm->wm[0];
wm->sagv.trans_wm = wm->trans_wm;
}
@@ -2984,12 +3070,12 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_crtc *crtc;
- if (HAS_MBUS_JOINING(i915))
- dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+ if (HAS_MBUS_JOINING(display))
+ dbuf_state->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN;
dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(display, &display->cdclk.hw);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -3010,12 +3096,17 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
&crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *min_ddb =
+ &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
if (!crtc_state->hw.active)
continue;
skl_ddb_get_hw_plane_state(i915, crtc->pipe,
- plane_id, ddb, ddb_y);
+ plane_id, ddb, ddb_y,
+ min_ddb, interim_ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
@@ -3037,7 +3128,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->slices[pipe] =
skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
@@ -3045,203 +3136,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
str_yes_no(dbuf_state->joined_mbus));
}
- dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
-}
-
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
-{
- const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
- struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- entries[crtc->pipe] = crtc_state->wm.skl.ddb;
- }
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- u8 slices;
-
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- if (dbuf_state->slices[crtc->pipe] & ~slices)
- return true;
-
- if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
- I915_MAX_PIPES, crtc->pipe))
- return true;
- }
-
- return false;
-}
-
-static void skl_wm_sanitize(struct drm_i915_private *i915)
-{
- struct intel_crtc *crtc;
-
- /*
- * On TGL/RKL (at least) the BIOS likes to assign the planes
- * to the wrong DBUF slices. This will cause an infinite loop
- * in skl_commit_modeset_enables() as it can't find a way to
- * transition between the old bogus DBUF layout to the new
- * proper DBUF layout without DBUF allocation overlaps between
- * the planes (which cannot be allowed or else the hardware
- * may hang). If we detect a bogus DBUF layout just turn off
- * all the planes so that skl_commit_modeset_enables() can
- * simply ignore them.
- */
- if (!skl_dbuf_is_misconfigured(i915))
- return;
-
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (plane_state->uapi.visible)
- intel_plane_disable_noatomic(crtc, plane);
-
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
-
- memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
- }
-}
-
-static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
-{
- skl_wm_get_hw_state(i915);
- skl_wm_sanitize(i915);
-}
-
-void intel_wm_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_hw_state {
- struct skl_ddb_entry ddb[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_pipe_wm wm;
- } *hw;
- const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- struct intel_plane *plane;
- u8 hw_enabled_slices;
- int level;
-
- if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
- return;
-
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return;
-
- skl_pipe_wm_get_hw_state(crtc, &hw->wm);
-
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
-
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
-
- if (DISPLAY_VER(i915) >= 11 &&
- hw_enabled_slices != i915->display.dbuf.enabled_slices)
- drm_err(&i915->drm,
- "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- i915->display.dbuf.enabled_slices,
- hw_enabled_slices);
-
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
- const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- const struct skl_wm_level *hw_wm_level, *sw_wm_level;
-
- /* Watermarks */
- for (level = 0; level < i915->display.wm.num_levels; level++) {
- hw_wm_level = &hw->wm.planes[plane->id].wm[level];
- sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
-
- if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
- continue;
-
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name, level,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
- sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
-
- if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
-
- if (HAS_HW_SAGV_WM(i915) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
-
- if (HAS_HW_SAGV_WM(i915) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
- plane->base.base.id, plane->base.name,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- kfree(hw);
+ dbuf_state->enabled_slices = display->dbuf.enabled_slices;
}
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
@@ -3386,31 +3281,19 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
static void skl_setup_wm_latency(struct drm_i915_private *i915)
{
- if (HAS_HW_SAGV_WM(i915))
- i915->display.wm.num_levels = 6;
- else
- i915->display.wm.num_levels = 8;
+ struct intel_display *display = &i915->display;
- if (DISPLAY_VER(i915) >= 14)
- mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
+ if (HAS_HW_SAGV_WM(display))
+ display->wm.num_levels = 6;
else
- skl_read_wm_latency(i915, i915->display.wm.skl_latency);
-
- intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
-}
-
-static const struct intel_wm_funcs skl_wm_funcs = {
- .compute_global_watermarks = skl_compute_wm,
- .get_hw_state = skl_wm_get_hw_state_and_sanitize,
-};
-
-void skl_wm_init(struct drm_i915_private *i915)
-{
- intel_sagv_init(i915);
+ display->wm.num_levels = 8;
- skl_setup_wm_latency(i915);
+ if (DISPLAY_VER(display) >= 14)
+ mtl_read_wm_latency(i915, display->wm.skl_latency);
+ else
+ skl_read_wm_latency(i915, display->wm.skl_latency);
- i915->display.funcs.wm = &skl_wm_funcs;
+ intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3450,13 +3333,14 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
int intel_dbuf_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state;
dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
if (!dbuf_state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
+ intel_atomic_global_obj_init(display, &display->dbuf.obj,
&dbuf_state->base, &intel_dbuf_funcs);
return 0;
@@ -3466,38 +3350,27 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
{
switch (pipe) {
case PIPE_A:
- return !(active_pipes & BIT(PIPE_D));
case PIPE_D:
- return !(active_pipes & BIT(PIPE_A));
+ active_pipes &= BIT(PIPE_A) | BIT(PIPE_D);
+ break;
case PIPE_B:
- return !(active_pipes & BIT(PIPE_C));
case PIPE_C:
- return !(active_pipes & BIT(PIPE_B));
+ active_pipes &= BIT(PIPE_B) | BIT(PIPE_C);
+ break;
default: /* to suppress compiler warning */
MISSING_CASE(pipe);
- break;
+ return false;
}
- return false;
+ return is_power_of_2(active_pipes);
}
-static void intel_mbus_dbox_update(struct intel_atomic_state *state)
+static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
+ const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc *crtc;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 val = 0;
- if (DISPLAY_VER(i915) < 11)
- return;
-
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (!new_dbuf_state ||
- (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
- new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
- return;
-
if (DISPLAY_VER(i915) >= 14)
val |= MBUS_DBOX_I_CREDIT(2);
@@ -3508,12 +3381,12 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
}
if (DISPLAY_VER(i915) >= 14)
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
- MBUS_DBOX_A_CREDIT(8);
+ val |= dbuf_state->joined_mbus ?
+ MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
else if (IS_ALDERLAKE_P(i915))
/* Wa_22010947358:adl-p */
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
- MBUS_DBOX_A_CREDIT(4);
+ val |= dbuf_state->joined_mbus ?
+ MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
else
val |= MBUS_DBOX_A_CREDIT(2);
@@ -3530,19 +3403,42 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
val |= MBUS_DBOX_B_CREDIT(8);
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) {
- u32 pipe_val = val;
+ if (DISPLAY_VERx100(i915) == 1400) {
+ if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
+ val |= MBUS_DBOX_BW_8CREDITS_MTL;
+ else
+ val |= MBUS_DBOX_BW_4CREDITS_MTL;
+ }
- if (DISPLAY_VERx100(i915) == 1400) {
- if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
- new_dbuf_state->active_pipes))
- pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
- else
- pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
- }
+ return val;
+}
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
- }
+static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915,
+ const struct intel_dbuf_state *dbuf_state)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes)
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe),
+ pipe_mbus_dbox_ctl(crtc, dbuf_state));
+}
+
+static void intel_mbus_dbox_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ pipe_mbus_dbox_ctl_update(i915, new_dbuf_state);
}
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -3562,23 +3458,24 @@ int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
int ratio, bool joined_mbus)
{
+ struct intel_display *display = &i915->display;
enum dbuf_slice slice;
- if (!HAS_MBUS_JOINING(i915))
+ if (!HAS_MBUS_JOINING(display))
return;
- if (DISPLAY_VER(i915) >= 20)
- intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_rmw(display, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
if (joined_mbus)
ratio *= 2;
- drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
+ drm_dbg_kms(display->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
ratio, str_yes_no(joined_mbus));
- for_each_dbuf_slice(i915, slice)
- intel_de_rmw(i915, DBUF_CTL_S(slice),
+ for_each_dbuf_slice(display, slice)
+ intel_de_rmw(display, DBUF_CTL_S(slice),
DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
}
@@ -3625,22 +3522,13 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
return INVALID_PIPE;
}
-static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
- enum pipe pipe)
+static void mbus_ctl_join_update(struct drm_i915_private *i915,
+ const struct intel_dbuf_state *dbuf_state,
+ enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
u32 mbus_ctl;
- drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
- str_yes_no(old_dbuf_state->joined_mbus),
- str_yes_no(new_dbuf_state->joined_mbus),
- pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
-
- if (new_dbuf_state->joined_mbus)
+ if (dbuf_state->joined_mbus)
mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
else
mbus_ctl = MBUS_HASHING_MODE_2x2;
@@ -3655,6 +3543,23 @@ static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
}
+static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
+ enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus),
+ pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
+
+ mbus_ctl_join_update(i915, new_dbuf_state, pipe);
+}
+
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
{
const struct intel_dbuf_state *new_dbuf_state =
@@ -3757,6 +3662,245 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(i915, new_slices);
}
+static void skl_mbus_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_display *display = &i915->display;
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(display->dbuf.obj.state);
+
+ if (!HAS_MBUS_JOINING(display))
+ return;
+
+ if (!dbuf_state->joined_mbus ||
+ adlp_check_mbus_joined(dbuf_state->active_pipes))
+ return;
+
+ drm_dbg_kms(display->drm, "Disabling redundant MBUS joining (active pipes 0x%x)\n",
+ dbuf_state->active_pipes);
+
+ dbuf_state->joined_mbus = false;
+ intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ dbuf_state->mdclk_cdclk_ratio,
+ dbuf_state->joined_mbus);
+ pipe_mbus_dbox_ctl_update(i915, dbuf_state);
+ mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE);
+}
+
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_dbuf_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
+static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ skl_wm_get_hw_state(i915);
+
+ skl_mbus_sanitize(i915);
+ skl_dbuf_sanitize(i915);
+}
+
+void intel_wm_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ u16 min_ddb[I915_MAX_PLANES];
+ u16 interim_ddb[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+ int level;
+
+ if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ return;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y, hw->min_ddb, hw->interim_ddb);
+
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ hw_enabled_slices != i915->display.dbuf.enabled_slices)
+ drm_err(&i915->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ i915->display.dbuf.enabled_slices,
+ hw_enabled_slices);
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
+
+ /* Watermarks */
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
+
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(display) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(display) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
+ }
+
+ kfree(hw);
+}
+
+static const struct intel_wm_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+ .get_hw_state = skl_wm_get_hw_state_and_sanitize,
+};
+
+void skl_wm_init(struct drm_i915_private *i915)
+{
+ intel_sagv_init(i915);
+
+ skl_setup_wm_latency(i915);
+
+ i915->display.funcs.wm = &skl_wm_funcs;
+}
+
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = m->private;
@@ -3830,13 +3974,14 @@ DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
void skl_watermark_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct intel_display *display = &i915->display;
+ struct drm_minor *minor = display->drm->primary;
- if (HAS_IPC(i915))
+ if (HAS_IPC(display))
debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
&skl_watermark_ipc_status_fops);
- if (HAS_SAGV(i915))
+ if (HAS_SAGV(display))
debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
&intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index e73baec94873..8659f89427f2 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -18,6 +18,7 @@ struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
+struct intel_plane_state;
struct skl_pipe_wm;
struct skl_wm_level;
@@ -53,6 +54,9 @@ const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
int level);
const struct skl_wm_level *skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id);
+unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane, int width,
+ int height, int cpp);
struct intel_dbuf_state {
struct intel_global_state base;
@@ -87,6 +91,7 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
int ratio, bool joined_mbus);
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
+void intel_program_dpkgc_latency(struct intel_atomic_state *state);
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 9383eedee2d4..d49e9b3c7627 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -67,9 +67,8 @@ static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
(bpp * burst_mode_ratio));
}
-enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+static enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
{
- /* It just so happens the VBT matches register contents. */
switch (fmt) {
case VID_MODE_FORMAT_RGB888:
return MIPI_DSI_FMT_RGB888;
@@ -1760,6 +1759,31 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
intel_dsi_log_params(intel_dsi);
}
+int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ /*
+ * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+ * than 320000KHz.
+ */
+ if (IS_VALLEYVIEW(dev_priv))
+ return 320000;
+
+ /*
+ * On Geminilake once the CDCLK gets as low as 79200
+ * picture gets unstable, despite that values are
+ * correct for DSI PLL and DE PLL.
+ */
+ if (IS_GEMINILAKE(dev_priv))
+ return 158400;
+
+ return 0;
+}
+
typedef void (*vlv_dsi_dmi_quirk_func)(struct intel_dsi *intel_dsi);
/*
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
index cf9d7b82f288..277bacfbc551 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -6,21 +6,20 @@
#ifndef __VLV_DSI_H__
#define __VLV_DSI_H__
-#include <linux/types.h>
-
enum port;
struct drm_i915_private;
+struct intel_crtc_state;
struct intel_dsi;
#ifdef I915
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
-enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state);
void vlv_dsi_init(struct drm_i915_private *dev_priv);
#else
static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
}
-static inline enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 3198b64ad7db..388f90784d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -53,29 +53,6 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
}
/**
- * __i915_gem_object_is_lmem - Whether the object is resident in
- * lmem while in the fence signaling critical path.
- * @obj: The object to check.
- *
- * This function is intended to be called from within the fence signaling
- * path where the fence, or a pin, keeps the object from being migrated. For
- * example during gpu reset or similar.
- *
- * Return: Whether the object is resident in lmem.
- */
-bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
-{
- struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
-
-#ifdef CONFIG_LOCKDEP
- GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
- i915_gem_object_evictable(obj));
-#endif
- return mr && (mr->type == INTEL_MEMORY_LOCAL ||
- mr->type == INTEL_MEMORY_STOLEN_LOCAL);
-}
-
-/**
* __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
* minimum page size for the backing pages.
* @i915: The i915 instance.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 5a7a14e85c3f..ecd8f1a633a1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -19,8 +19,6 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
-bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
-
struct drm_i915_gem_object *
i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
const void *data, size_t size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 21274aa9bddd..c3dabb857960 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -164,6 +164,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 4 - Support multiple fault handlers per object depending on object's
* backing storage (a.k.a. MMAP_OFFSET).
*
+ * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple
+ * times with different size and offset).
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -191,7 +194,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 4;
+ return 5;
}
static inline struct i915_gtt_view
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3dc61cbd2e11..bb713e096db2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -283,9 +283,7 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- /* TODO: make DPT shrinkable when it has no bound vmas */
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
- !obj->is_dpt;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index fe69f2c8527d..ae3343c81a64 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -209,8 +209,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
struct address_space *mapping = obj->base.filp->f_mapping;
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
struct sg_table *st;
- struct sgt_iter sgt_iter;
- struct page *page;
int ret;
/*
@@ -239,9 +237,7 @@ rebuild_st:
* for PAGE_SIZE chunks instead may be helpful.
*/
if (max_segment > PAGE_SIZE) {
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
- sg_free_table(st);
+ shmem_sg_free_table(st, mapping, false, false);
kfree(st);
max_segment = PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 89d4dc8b60c6..eb0158e43417 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -369,7 +369,7 @@ static int live_parallel_switch(void *arg)
if (!data[n].ce[0])
continue;
- worker = kthread_create_worker(0, "igt/parallel:%s",
+ worker = kthread_run_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 40269e4c1e31..325da0414d94 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -126,9 +126,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
return active;
}
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 72090f52fb85..4a80ffa1b962 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -405,15 +405,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
return active;
}
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
-
- return __unwind_incomplete_requests(engine);
-}
-
static void
execlists_context_status_change(struct i915_request *rq, unsigned long status)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index d60a6ca0cae5..f6c59f20832f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -107,11 +107,12 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
+ * @evict_all: Evict all VMAs
*
* Suspend the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
*/
-void i915_ggtt_suspend_vm(struct i915_address_space *vm)
+void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all)
{
struct i915_vma *vma, *vn;
int save_skip_rewrite;
@@ -157,7 +158,7 @@ retry:
goto retry;
}
- if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ if (evict_all || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
i915_vma_wait_for_bind(vma);
__i915_vma_evict(vma, false);
@@ -172,13 +173,15 @@ retry:
vm->skip_pte_rewrite = save_skip_rewrite;
mutex_unlock(&vm->mutex);
+
+ drm_WARN_ON(&vm->i915->drm, evict_all && !list_empty(&vm->bound_list));
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
struct intel_gt *gt;
- i915_ggtt_suspend_vm(&ggtt->vm);
+ i915_ggtt_suspend_vm(&ggtt->vm, false);
ggtt->invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
@@ -1545,6 +1548,7 @@ int i915_ggtt_enable_hw(struct drm_i915_private *i915)
/**
* i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
* @vm: The VM to restore the mappings for
+ * @all_evicted: Were all VMAs expected to be evicted on suspend?
*
* Restore the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
@@ -1552,13 +1556,18 @@ int i915_ggtt_enable_hw(struct drm_i915_private *i915)
* Returns %true if restoring the mapping for any object that was in a write
* domain before suspend.
*/
-bool i915_ggtt_resume_vm(struct i915_address_space *vm)
+bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted)
{
struct i915_vma *vma;
bool write_domain_objs = false;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+ if (all_evicted) {
+ drm_WARN_ON(&vm->i915->drm, !list_empty(&vm->bound_list));
+ return false;
+ }
+
/* First fill our portion of the GTT with scratch pages */
vm->clear_range(vm, 0, vm->total);
@@ -1598,7 +1607,7 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
intel_gt_check_and_clear_faults(gt);
- flush = i915_ggtt_resume_vm(&ggtt->vm);
+ flush = i915_ggtt_resume_vm(&ggtt->vm, false);
if (drm_mm_node_allocated(&ggtt->error_capture))
ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 6b85222ee3ea..0a36ea751b63 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -608,8 +608,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
-void i915_ggtt_suspend_vm(struct i915_address_space *vm);
-bool i915_ggtt_resume_vm(struct i915_address_space *vm);
+void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all);
+bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index f42f21632306..aae5a081cb53 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1113,6 +1113,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* Warn CI about the unrecoverable wedged condition.
* Time for a reboot.
*/
+ gt_err(gt, "Unrecoverable wedged condition\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
return false;
}
@@ -1198,6 +1199,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason)
{
+ struct intel_display *display = &gt->i915->display;
intel_engine_mask_t awake;
int ret;
@@ -1243,7 +1245,7 @@ void intel_gt_reset(struct intel_gt *gt,
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
intel_irq_resume(gt->i915);
- intel_overlay_reset(gt->i915);
+ intel_overlay_reset(display);
/* sanitize uC after engine reset */
if (!intel_uc_uses_guc_submission(&gt->uc))
@@ -1263,8 +1265,10 @@ void intel_gt_reset(struct intel_gt *gt,
}
ret = resume(gt);
- if (ret)
+ if (ret) {
+ gt_err(gt, "Failed to resume (%d)\n", ret);
goto taint;
+ }
finish:
reset_finish(gt, awake);
@@ -1607,6 +1611,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
/* Wedged on init is non-recoverable */
+ gt_err(gt, "Non-recoverable wedged on init\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 59da4b7bd262..b74d9205c0f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -308,30 +308,6 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
return cs;
}
-/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct i915_request *rq)
-{
- int num_dwords;
- void *cs;
-
- num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
- if (num_dwords == 0)
- return 0;
-
- num_dwords = CACHELINE_DWORDS - num_dwords;
- GEM_BUG_ON(num_dwords & 1);
-
- cs = intel_ring_begin(rq, num_dwords);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
- intel_ring_advance(rq, cs + num_dwords);
-
- GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
- return 0;
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_ring.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index 1b32dadfb8c3..64b322e25f36 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -16,7 +16,6 @@ struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
-int intel_ring_cacheline_align(struct i915_request *rq);
unsigned int intel_ring_update_space(struct intel_ring *ring);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 32f3b52a183a..458e29d89978 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -26,6 +26,7 @@
#include "shmem_utils.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "intel_gt_print.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
@@ -230,8 +231,13 @@ static int xcs_resume(struct intel_engine_cs *engine)
set_pp_dir(engine);
- /* First wake the ring up to an empty/idle ring */
- for ((kt) = ktime_get() + (2 * NSEC_PER_MSEC);
+ /*
+ * First wake the ring up to an empty/idle ring.
+ * Use 50ms of delay to let the engine write successfully
+ * for all platforms. Experimented with different values and
+ * determined that 50ms works best based on testing.
+ */
+ for ((kt) = ktime_get() + (50 * NSEC_PER_MSEC);
ktime_before(ktime_get(), (kt)); cpu_relax()) {
/*
* In case of resets fails because engine resumes from
@@ -282,16 +288,16 @@ static int xcs_resume(struct intel_engine_cs *engine)
return 0;
err:
- drm_err(&engine->i915->drm,
- "%s initialization failed; "
- "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_CTL) & RING_VALID,
- ENGINE_READ(engine, RING_HEAD), ring->head,
- ENGINE_READ(engine, RING_TAIL), ring->tail,
- ENGINE_READ(engine, RING_START),
- i915_ggtt_offset(ring->vma));
+ gt_err(engine->gt, "%s initialization failed\n", engine->name);
+ ENGINE_TRACE(engine,
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ GEM_TRACE_DUMP();
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 222ca7c44951..81c31396eceb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -3574,7 +3574,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
arg[id].batch = NULL;
arg[id].count = 0;
- worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
+ worker[id] = kthread_run_worker(0, "igt/smoke:%d", id);
if (IS_ERR(worker[id])) {
err = PTR_ERR(worker[id]);
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 9ce8ff1c04fe..9d3aeb237295 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1025,7 +1025,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
threads[tmp].engine = other;
threads[tmp].flags = flags;
- worker = kthread_create_worker(0, "igt/%s",
+ worker = kthread_run_worker(0, "igt/%s",
other->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index ca460cee4f8b..1bf7b88d9a9d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -262,7 +262,7 @@ static int clear(struct intel_migrate *migrate,
{
struct drm_i915_private *i915 = migrate->context->engine->i915;
struct drm_i915_gem_object *obj;
- struct i915_request *rq;
+ struct i915_request *rq = NULL;
struct i915_gem_ww_ctx ww;
u32 *vaddr, val = 0;
bool ccs_cap = false;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 1aa1446c8fb0..27b6d51ef145 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -8,6 +8,7 @@
#include "intel_gpu_commands.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
+#include "intel_rps.h"
#include "selftest_rc6.h"
#include "selftests/i915_random.h"
@@ -38,6 +39,9 @@ int live_rc6_manual(void *arg)
ktime_t dt;
u64 res[2];
int err = 0;
+ u32 rc0_freq = 0;
+ u32 rc6_freq = 0;
+ struct intel_rps *rps = &gt->rps;
/*
* Our claim is that we can "encourage" the GPU to enter rc6 at will.
@@ -66,6 +70,7 @@ int live_rc6_manual(void *arg)
rc0_power = librapl_energy_uJ() - rc0_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
+ rc0_freq = intel_rps_read_actual_frequency_fw(rps);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 1000ms!\n",
(res[1] - res[0]) >> 10);
@@ -77,7 +82,11 @@ int live_rc6_manual(void *arg)
rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
ktime_to_ns(dt));
if (!rc0_power) {
- pr_err("No power measured while in RC0\n");
+ if (rc0_freq)
+ pr_debug("No power measured while in RC0! GPU Freq: %u in RC0\n",
+ rc0_freq);
+ else
+ pr_err("No power and freq measured while in RC0\n");
err = -EINVAL;
goto out_unlock;
}
@@ -90,7 +99,8 @@ int live_rc6_manual(void *arg)
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
dt = ktime_get();
rc6_power = librapl_energy_uJ();
- msleep(100);
+ msleep(1000);
+ rc6_freq = intel_rps_read_actual_frequency_fw(rps);
rc6_power = librapl_energy_uJ() - rc6_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
@@ -108,7 +118,8 @@ int live_rc6_manual(void *arg)
pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
rc0_power, rc6_power);
if (2 * rc6_power > rc0_power) {
- pr_err("GPU leaked energy while in RC6!\n");
+ pr_err("GPU leaked energy while in RC6! GPU Freq: %u in RC6 and %u in RC0\n",
+ rc6_freq, rc0_freq);
err = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index dcef8d498919..c207a4fb03bf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -1125,6 +1125,7 @@ static u64 measure_power(struct intel_rps *rps, int *freq)
static u64 measure_power_at(struct intel_rps *rps, int *freq)
{
*freq = rps_set_check(rps, *freq);
+ msleep(100);
return measure_power(rps, freq);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 4ecc4ae74a54..e218b229681f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -489,7 +489,7 @@ static int live_slpc_tile_interaction(void *arg)
return -ENOMEM;
for_each_gt(gt, i915, i) {
- threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
+ threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id);
if (IS_ERR(threads[i].worker)) {
ret = PTR_ERR(threads[i].worker);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 222c95f62156..e8a04e476c57 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -18,7 +18,7 @@
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
-#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#elif IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index c0bd730383f2..3fce5c000144 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1469,6 +1469,19 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
+static void __update_guc_busyness_running_state(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ for_each_engine(engine, gt, id)
+ engine->stats.guc.running = false;
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
static void __update_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1619,6 +1632,9 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
+ /* Assume no engines are running and set running state to false */
+ __update_guc_busyness_running_state(guc);
+
/*
* There is a race with suspend flow where the worker runs after suspend
* and causes an unclaimed register access warning. Cancel the worker
@@ -1725,6 +1741,10 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
spin_lock_irq(guc_to_gt(guc)->irq_lock);
spin_unlock_irq(guc_to_gt(guc)->irq_lock);
+ /* Flush tasklet */
+ tasklet_disable(&guc->ct.receive_tasklet);
+ tasklet_enable(&guc->ct.receive_tasklet);
+
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
flush_work(&guc->ct.requests.worker);
@@ -2042,6 +2062,8 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
+ int outstanding;
+
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
@@ -2055,8 +2077,10 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
* see in CI if this happens frequently / a precursor to taking down the
* machine.
*/
- if (atomic_read(&guc->outstanding_submission_g2h))
- guc_err(guc, "Unexpected outstanding GuC to Host in reset finish\n");
+ outstanding = atomic_read(&guc->outstanding_submission_g2h);
+ if (outstanding)
+ guc_err(guc, "Unexpected outstanding GuC to Host response(s) in reset finish: %d\n",
+ outstanding);
atomic_set(&guc->outstanding_submission_g2h, 0);
intel_guc_global_policies_update(guc);
@@ -3425,10 +3449,10 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
*/
ret = deregister_context(ce, ce->guc_id.id);
if (ret) {
- spin_lock(&ce->guc_state.lock);
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce);
clr_context_destroyed(ce);
- spin_unlock(&ce->guc_state.lock);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
* the wakeref immediately but per function spec usage call this after unlock.
@@ -5511,12 +5535,20 @@ static inline void guc_log_context(struct drm_printer *p,
{
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
- drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
- ce->ring->head,
- ce->lrc_reg_state[CTX_RING_HEAD]);
- drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
- ce->ring->tail,
- ce->lrc_reg_state[CTX_RING_TAIL]);
+ if (intel_context_pin_if_active(ce)) {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+ ce->ring->head,
+ ce->lrc_reg_state[CTX_RING_HEAD]);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+ ce->ring->tail,
+ ce->lrc_reg_state[CTX_RING_TAIL]);
+ intel_context_unpin(ce);
+ } else {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n",
+ ce->ring->head);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n",
+ ce->ring->tail);
+ }
drm_printf(p, "\t\tContext Pin Count: %u\n",
atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d7ac31c3254c..00d00c480cc5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -231,8 +231,8 @@ static void delayed_huc_load_init(struct intel_huc *huc)
sw_fence_dummy_notify);
i915_sw_fence_commit(&huc->delayed_load.fence);
- hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
+ hrtimer_setup(&huc->delayed_load.timer, huc_delayed_load_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static void delayed_huc_load_fini(struct intel_huc *huc)
@@ -427,19 +427,6 @@ void intel_huc_fini(struct intel_huc *huc)
intel_uc_fw_fini(&huc->fw);
}
-void intel_huc_suspend(struct intel_huc *huc)
-{
- if (!intel_uc_fw_is_loadable(&huc->fw))
- return;
-
- /*
- * in the unlikely case that we're suspending before the GSC has
- * completed its loading sequence, just stop waiting. We'll restart
- * on resume.
- */
- delayed_huc_load_complete(huc);
-}
-
static const char *auth_mode_string(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index ba5cb08e9e7b..d5e441b9e08d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -57,7 +57,6 @@ int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
-void intel_huc_suspend(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type);
int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
enum intel_huc_authentication_type type);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 81d67a46cd9e..6439c8e91a8d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1286,6 +1286,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
+ struct intel_display *display = &dev_priv->display;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1314,9 +1315,9 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
if (info->plane == PLANE_A) {
- info->ctrl_reg = DSPCNTR(dev_priv, info->pipe);
- info->stride_reg = DSPSTRIDE(dev_priv, info->pipe);
- info->surf_reg = DSPSURF(dev_priv, info->pipe);
+ info->ctrl_reg = DSPCNTR(display, info->pipe);
+ info->stride_reg = DSPSTRIDE(display, info->pipe);
+ info->surf_reg = DSPSURF(display, info->pipe);
} else if (info->plane == PLANE_B) {
info->ctrl_reg = SPRCTL(info->pipe);
info->stride_reg = SPRSTRIDE(info->pipe);
@@ -1332,6 +1333,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
+ struct intel_display *display = &dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1380,9 +1382,9 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
- info->ctrl_reg = DSPCNTR(dev_priv, info->pipe);
- info->stride_reg = DSPSTRIDE(dev_priv, info->pipe);
- info->surf_reg = DSPSURF(dev_priv, info->pipe);
+ info->ctrl_reg = DSPCNTR(display, info->pipe);
+ info->stride_reg = DSPSTRIDE(display, info->pipe);
+ info->surf_reg = DSPSURF(display, info->pipe);
return 0;
}
@@ -1419,6 +1421,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
+ struct intel_display *display = &dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
@@ -1436,7 +1439,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
}
if (info->plane == PLANE_PRIMARY)
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, info->pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, info->pipe))++;
if (info->async_flip)
intel_vgpu_trigger_virtual_event(vgpu, info->event);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 17f74cb244bb..f668cd9487f1 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -40,6 +40,7 @@
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
+#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display.h"
#include "display/intel_dpio_phy.h"
@@ -68,8 +69,9 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
- if (!(vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -80,12 +82,13 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
if (drm_WARN_ON(&dev_priv->drm,
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, pipe)) & TRANSCONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) & TRANSCONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -180,6 +183,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -192,21 +196,21 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) |
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
- for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, pipe)) &=
+ for_each_pipe(display, pipe) {
+ vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) &=
~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE);
- vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) &= ~DISP_ENABLE;
+ vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) &= ~MCURSOR_MODE_MASK;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) |= MCURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE;
}
for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) {
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, trans)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, trans)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE);
}
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
@@ -254,8 +258,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* TRANSCODER_A can be enabled. PORT_x depends on the input of
* setup_virtual_dp_monitor.
*/
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_ENABLE;
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
/*
* Golden M/N are calculated based on:
@@ -263,11 +267,11 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* DP link clk 1620 MHz and non-constant_n.
* TODO: calculate DP link symbol clk and stream clk m/n.
*/
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) = TU_SIZE(64);
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) |= 0x5b425e;
- vgpu_vreg_t(vgpu, PIPE_DATA_N1(dev_priv, TRANSCODER_A)) = 0x800000;
- vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A)) = 0x3cd6e;
- vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A)) = 0x80000;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64);
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000;
/* Enable per-DDI/PORT vreg */
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
@@ -290,7 +294,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP)) |=
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
@@ -320,7 +324,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -351,7 +355,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -400,11 +404,11 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* DP link clk 1620 MHz and non-constant_n.
* TODO: calculate DP link symbol clk and stream clk m/n.
*/
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) = TU_SIZE(64);
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) |= 0x5b425e;
- vgpu_vreg_t(vgpu, PIPE_DATA_N1(dev_priv, TRANSCODER_A)) = 0x800000;
- vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A)) = 0x3cd6e;
- vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A)) = 0x80000;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64);
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
@@ -415,10 +419,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -441,10 +445,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -467,10 +471,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -508,14 +512,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
/* Disable Primary/Sprite/Cursor plane */
- for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) &= ~DISP_ENABLE;
+ for_each_pipe(display, pipe) {
+ vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) &= ~MCURSOR_MODE_MASK;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) |= MCURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE;
}
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -577,8 +581,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
vgpu->display.port_num = port_num;
/* Init hrtimer based on default refresh rate */
- hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- vblank_timer->timer.function = vblank_timer_fn;
+ hrtimer_setup(&vblank_timer->timer, vblank_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
vblank_timer->vrefresh_k = port->vrefresh_k;
vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
@@ -631,6 +634,7 @@ void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -652,17 +656,19 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
}
if (pipe_is_enabled(vgpu, pipe)) {
- vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(dev_priv, pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(display, pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
}
}
void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ struct intel_display *display = &i915->display;
int pipe;
mutex_lock(&vgpu->vgpu_lock);
- for_each_pipe(vgpu->gvt->gt->i915, pipe)
+ for_each_pipe(display, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index c454e25b2b0f..15cce973e1ae 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -154,8 +154,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
- u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(dev_priv, pipe)) & stride_mask;
+ u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(display, pipe)) & stride_mask;
u32 stride = stride_reg;
if (GRAPHICS_VER(dev_priv) >= 9) {
@@ -210,6 +211,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
u32 val, fmt;
int pipe;
@@ -217,7 +219,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES)
return -ENODEV;
- val = vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, DSPCNTR(display, pipe));
plane->enabled = !!(val & DISP_ENABLE);
if (!plane->enabled)
return -ENODEV;
@@ -251,7 +253,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
- plane->base = vgpu_vreg_t(vgpu, DSPSURF(dev_priv, pipe)) & I915_GTT_PAGE_MASK;
+ plane->base = vgpu_vreg_t(vgpu, DSPSURF(display, pipe)) & I915_GTT_PAGE_MASK;
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
@@ -267,14 +269,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
- plane->width = (vgpu_vreg_t(vgpu, PIPESRC(dev_priv, pipe)) & _PIPE_H_SRCSZ_MASK) >>
+ plane->width = (vgpu_vreg_t(vgpu, PIPESRC(display, pipe)) & _PIPE_H_SRCSZ_MASK) >>
_PIPE_H_SRCSZ_SHIFT;
plane->width += 1;
- plane->height = (vgpu_vreg_t(vgpu, PIPESRC(dev_priv, pipe)) &
+ plane->height = (vgpu_vreg_t(vgpu, PIPESRC(display, pipe)) &
_PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
plane->height += 1; /* raw height is one minus the real value */
- val = vgpu_vreg_t(vgpu, DSPTILEOFF(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, DSPTILEOFF(display, pipe));
plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
_PRI_PLANE_X_OFF_SHIFT;
plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
@@ -340,6 +342,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
int pipe;
@@ -348,7 +351,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES)
return -ENODEV;
- val = vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, CURCNTR(display, pipe));
mode = val & MCURSOR_MODE_MASK;
plane->enabled = (mode != MCURSOR_MODE_DISABLE);
if (!plane->enabled)
@@ -374,7 +377,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
alpha_plane, alpha_force);
- plane->base = vgpu_vreg_t(vgpu, CURBASE(dev_priv, pipe)) & I915_GTT_PAGE_MASK;
+ plane->base = vgpu_vreg_t(vgpu, CURBASE(display, pipe)) & I915_GTT_PAGE_MASK;
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
@@ -385,7 +388,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -EINVAL;
}
- val = vgpu_vreg_t(vgpu, CURPOS(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, CURPOS(display, pipe));
plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 9494d812c00a..241cff0fc683 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -45,6 +45,7 @@
#include "intel_mchbar_regs.h"
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
+#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
@@ -655,11 +656,12 @@ static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
enum port port;
u32 dp_br, link_m, link_n, htotal, vtotal;
/* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
- port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &
+ port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &
TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
if (port != PORT_B && port != PORT_D) {
gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
@@ -675,12 +677,12 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
/* Get DP link symbol clock M/N */
- link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A));
- link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A));
+ link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A));
+ link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A));
/* Get H/V total from transcoder timing */
- htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(dev_priv, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
- vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(dev_priv, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
+ htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(display, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
+ vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(display, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
if (dp_br && link_n && htotal && vtotal) {
u64 pixel_clk = 0;
@@ -1011,22 +1013,23 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
return 0;
}
-#define DSPSURF_TO_PIPE(dev_priv, offset) \
- calc_index(offset, DSPSURF(dev_priv, PIPE_A), DSPSURF(dev_priv, PIPE_B), DSPSURF(dev_priv, PIPE_C))
+#define DSPSURF_TO_PIPE(display, offset) \
+ calc_index(offset, DSPSURF(display, PIPE_A), DSPSURF(display, PIPE_B), DSPSURF(display, PIPE_C))
static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- u32 pipe = DSPSURF_TO_PIPE(dev_priv, offset);
+ struct intel_display *display = &dev_priv->display;
+ u32 pipe = DSPSURF_TO_PIPE(display, offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, DSPSURFLIVE(dev_priv, pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
- if (vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) & PLANE_CTL_ASYNC_FLIP)
+ if (vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) & PLANE_CTL_ASYNC_FLIP)
intel_vgpu_trigger_virtual_event(vgpu, event);
else
set_bit(event, vgpu->irq.flip_done_event[pipe]);
@@ -1059,14 +1062,15 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
write_vreg(vgpu, offset, p_data, bytes);
if (plane == PLANE_PRIMARY) {
- vgpu_vreg_t(vgpu, DSPSURFLIVE(dev_priv, pipe)) = vgpu_vreg(vgpu, offset);
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, pipe))++;
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
} else {
vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
}
@@ -2192,6 +2196,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
@@ -2280,21 +2285,21 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_A), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_A), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_B), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_B), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_C), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_C), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_EDP), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_EDP), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(DSPSURF(dev_priv, PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_DH(DSPSURF(display, PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
- MMIO_DH(DSPSURF(dev_priv, PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_DH(DSPSURF(display, PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
- MMIO_DH(DSPSURF(dev_priv, PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_DH(DSPSURF(display, PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index c077fb4674f0..9f97f743aa71 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -286,8 +286,7 @@ static int tbs_sched_init(struct intel_gvt *gvt)
return -ENOMEM;
INIT_LIST_HEAD(&data->lru_runq_head);
- hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- data->timer.function = tbs_timer_fn;
+ hrtimer_setup(&data->timer, tbs_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 35319228bc51..0dbc4e289300 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -527,24 +527,6 @@ int i915_active_acquire(struct i915_active *ref)
return err;
}
-int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
-{
- struct i915_active_fence *active;
- int err;
-
- err = i915_active_acquire(ref);
- if (err)
- return err;
-
- active = active_instance(ref, idx);
- if (!active) {
- i915_active_release(ref);
- return -ENOMEM;
- }
-
- return 0; /* return with active ref */
-}
-
void i915_active_release(struct i915_active *ref)
{
debug_active_assert(ref);
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 77c676ecc263..821f7c21ea9b 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -186,7 +186,6 @@ int i915_request_await_active(struct i915_request *rq,
#define I915_ACTIVE_AWAIT_BARRIER BIT(2)
int i915_active_acquire(struct i915_active *ref);
-int i915_active_acquire_for_context(struct i915_active *ref, u64 idx);
bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 365329ff8a07..c2ae37d6b94d 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -45,6 +45,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include "display/i9xx_display_sr.h"
#include "display/intel_acpi.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
@@ -60,6 +61,7 @@
#include "display/intel_pch_refclk.h"
#include "display/intel_pps.h"
#include "display/intel_sprite_uapi.h"
+#include "display/intel_vga.h"
#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
@@ -93,17 +95,20 @@
#include "i915_memcpy.h"
#include "i915_perf.h"
#include "i915_query.h"
-#include "i915_suspend.h"
+#include "i915_reg.h"
#include "i915_switcheroo.h"
#include "i915_sysfs.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_clock_gating.h"
+#include "intel_cpu_info.h"
#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_region_ttm.h"
+#include "intel_sbi.h"
+#include "vlv_sideband.h"
#include "vlv_suspend.h"
static const struct drm_driver i915_drm_driver;
@@ -217,6 +222,7 @@ static void sanitize_gpu(struct drm_i915_private *i915)
*/
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
int ret = 0;
if (i915_inject_probe_failure(dev_priv))
@@ -231,8 +237,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
+ intel_sbi_init(dev_priv);
+ vlv_iosf_sb_init(dev_priv);
mutex_init(&dev_priv->sb_lock);
- cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -259,7 +266,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_detect_pch(dev_priv);
intel_irq_init(dev_priv);
- intel_display_driver_early_probe(dev_priv);
+ intel_display_driver_early_probe(display);
intel_clock_gating_hooks_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -282,16 +289,19 @@ err_workqueues:
*/
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
intel_irq_fini(dev_priv);
- intel_power_domains_cleanup(dev_priv);
+ intel_power_domains_cleanup(display);
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release_all(dev_priv);
intel_region_ttm_device_fini(dev_priv);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
- cpu_latency_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
+ vlv_iosf_sb_fini(dev_priv);
+ intel_sbi_fini(dev_priv);
i915_params_free(&dev_priv->params);
}
@@ -307,6 +317,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
*/
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -332,7 +343,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_gmch_bar_setup(dev_priv);
intel_device_info_runtime_init(dev_priv);
- intel_display_device_info_runtime_init(dev_priv);
+ intel_display_device_info_runtime_init(display);
for_each_gt(gt, dev_priv, i) {
ret = intel_gt_init_mmio(gt);
@@ -415,6 +426,18 @@ mask_err:
return ret;
}
+/* Wa_14022698537:dg2 */
+static void i915_enable_g8(struct drm_i915_private *i915)
+{
+ if (IS_DG2(i915)) {
+ if (IS_DG2_D(i915) && !intel_match_g8_cpu())
+ return;
+
+ snb_pcode_write_p(&i915->uncore, PCODE_POWER_SETUP,
+ POWER_SETUP_SUBCOMMAND_G8_ENABLE, 0, 0);
+ }
+}
+
static int i915_pcode_init(struct drm_i915_private *i915)
{
struct intel_gt *gt;
@@ -428,6 +451,7 @@ static int i915_pcode_init(struct drm_i915_private *i915)
}
}
+ i915_enable_g8(i915);
return 0;
}
@@ -599,6 +623,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
*/
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
@@ -627,9 +652,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
i915_hwmon_register(dev_priv);
- intel_display_driver_register(dev_priv);
+ intel_display_driver_register(display);
- intel_power_domains_enable(dev_priv);
+ intel_power_domains_enable(display);
intel_runtime_pm_enable(&dev_priv->runtime_pm);
intel_register_dsm_handler();
@@ -644,6 +669,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
@@ -652,9 +678,9 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_unregister_dsm_handler();
intel_runtime_pm_disable(&dev_priv->runtime_pm);
- intel_power_domains_disable(dev_priv);
+ intel_power_domains_disable(display);
- intel_display_driver_unregister(dev_priv);
+ intel_display_driver_unregister(display);
intel_pxp_fini(dev_priv);
@@ -731,7 +757,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, match_info);
- intel_display_device_probe(i915);
+ intel_display_device_probe(pdev);
return i915;
}
@@ -750,6 +776,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct drm_i915_private *i915;
+ struct intel_display *display;
int ret;
ret = pci_enable_device(pdev);
@@ -764,6 +791,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
}
+ display = &i915->display;
+
ret = i915_driver_early_probe(i915);
if (ret < 0)
goto out_pci_disable;
@@ -784,7 +813,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- ret = intel_display_driver_probe_noirq(i915);
+ ret = intel_display_driver_probe_noirq(display);
if (ret < 0)
goto out_cleanup_hw;
@@ -792,7 +821,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset;
- ret = intel_display_driver_probe_nogem(i915);
+ ret = intel_display_driver_probe_nogem(display);
if (ret)
goto out_cleanup_irq;
@@ -804,7 +833,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret && ret != -ENODEV)
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
- ret = intel_display_driver_probe(i915);
+ ret = intel_display_driver_probe(display);
if (ret)
goto out_cleanup_gem;
@@ -824,14 +853,14 @@ out_cleanup_gem:
i915_gem_driver_release(i915);
out_cleanup_modeset2:
/* FIXME clean up the error path */
- intel_display_driver_remove(i915);
+ intel_display_driver_remove(display);
intel_irq_uninstall(i915);
- intel_display_driver_remove_noirq(i915);
+ intel_display_driver_remove_noirq(display);
goto out_cleanup_modeset;
out_cleanup_irq:
intel_irq_uninstall(i915);
out_cleanup_modeset:
- intel_display_driver_remove_nogem(i915);
+ intel_display_driver_remove_nogem(display);
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -851,6 +880,7 @@ out_pci_disable:
void i915_driver_remove(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
@@ -864,16 +894,16 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_gvt_driver_remove(i915);
- intel_display_driver_remove(i915);
+ intel_display_driver_remove(display);
intel_irq_uninstall(i915);
- intel_display_driver_remove_noirq(i915);
+ intel_display_driver_remove_noirq(display);
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
- intel_display_driver_remove_nogem(i915);
+ intel_display_driver_remove_nogem(display);
i915_driver_hw_remove(i915);
@@ -883,6 +913,7 @@ void i915_driver_remove(struct drm_i915_private *i915)
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t wakeref;
@@ -906,7 +937,7 @@ static void i915_driver_release(struct drm_device *dev)
i915_driver_late_release(dev_priv);
- intel_display_device_remove(dev_priv);
+ intel_display_device_remove(display);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -936,25 +967,27 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
disable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_disable(&i915->runtime_pm);
- intel_power_domains_disable(i915);
+ intel_power_domains_disable(display);
intel_fbdev_set_suspend(&i915->drm, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
- intel_display_driver_disable_user_access(i915);
+ intel_display_driver_disable_user_access(display);
drm_atomic_helper_shutdown(&i915->drm);
}
- intel_dp_mst_suspend(i915);
+ intel_dp_mst_suspend(display);
intel_irq_suspend(i915);
intel_hpd_cancel_work(i915);
if (HAS_DISPLAY(i915))
- intel_display_driver_suspend_access(i915);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&i915->display);
intel_encoder_shutdown_all(&i915->display);
@@ -974,7 +1007,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
* - unify the driver remove and system/runtime suspend sequences with
* the above unified shutdown/poweroff sequence.
*/
- intel_power_domains_driver_remove(i915);
+ intel_power_domains_driver_remove(display);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_driver_last_release(&i915->runtime_pm);
@@ -1022,24 +1055,22 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
- intel_power_domains_disable(dev_priv);
+ intel_power_domains_disable(display);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(dev_priv)) {
drm_kms_helper_poll_disable(dev);
- intel_display_driver_disable_user_access(dev_priv);
+ intel_display_driver_disable_user_access(display);
}
pci_save_state(pdev);
- intel_display_driver_suspend(dev_priv);
-
- intel_dp_mst_suspend(dev_priv);
+ intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
intel_hpd_cancel_work(dev_priv);
if (HAS_DISPLAY(dev_priv))
- intel_display_driver_suspend_access(dev_priv);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&dev_priv->display);
@@ -1047,7 +1078,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_dpt_suspend(dev_priv);
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
- i915_save_display(dev_priv);
+ i9xx_display_sr_save(display);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_suspend(display, opregion_target_state);
@@ -1066,6 +1097,7 @@ static int i915_drm_suspend(struct drm_device *dev)
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_gt *gt;
@@ -1081,14 +1113,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
for_each_gt(gt, dev_priv, i)
intel_uncore_suspend(gt->uncore);
- intel_power_domains_suspend(dev_priv, s2idle);
-
- intel_display_power_suspend_late(dev_priv);
+ intel_display_power_suspend_late(display, s2idle);
ret = vlv_suspend_complete(dev_priv);
if (ret) {
drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
- intel_power_domains_resume(dev_priv);
+ intel_display_power_resume_early(display);
goto out;
}
@@ -1166,7 +1196,12 @@ static int i915_drm_resume(struct drm_device *dev)
intel_dmc_resume(display);
- i915_restore_display(dev_priv);
+ i9xx_display_sr_restore(display);
+
+ intel_vga_redisable(display);
+
+ intel_gmbus_reset(display);
+
intel_pps_unlock_regs_wa(display);
intel_init_pch_refclk(dev_priv);
@@ -1188,21 +1223,19 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_display_driver_init_hw(dev_priv);
+ intel_display_driver_init_hw(display);
intel_clock_gating_init(dev_priv);
if (HAS_DISPLAY(dev_priv))
- intel_display_driver_resume_access(dev_priv);
+ intel_display_driver_resume_access(display);
intel_hpd_init(dev_priv);
- /* MST sideband requires HPD interrupts enabled */
- intel_dp_mst_resume(dev_priv);
- intel_display_driver_resume(dev_priv);
+ intel_display_driver_resume(display);
if (HAS_DISPLAY(dev_priv)) {
- intel_display_driver_enable_user_access(dev_priv);
+ intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
intel_hpd_poll_disable(dev_priv);
@@ -1211,7 +1244,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- intel_power_domains_enable(dev_priv);
+ intel_power_domains_enable(display);
intel_gvt_resume(dev_priv);
@@ -1223,6 +1256,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gt *gt;
int ret, i;
@@ -1282,9 +1316,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
for_each_gt(gt, dev_priv, i)
intel_gt_resume_early(gt);
- intel_display_power_resume_early(dev_priv);
-
- intel_power_domains_resume(dev_priv);
+ intel_display_power_resume_early(display);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1486,7 +1518,7 @@ static int intel_runtime_suspend(struct device *kdev)
for_each_gt(gt, dev_priv, i)
intel_uncore_suspend(gt->uncore);
- intel_display_power_suspend(dev_priv);
+ intel_display_power_suspend(display);
ret = vlv_suspend_complete(dev_priv);
if (ret) {
@@ -1580,7 +1612,7 @@ static int intel_runtime_resume(struct device *kdev)
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");
- intel_display_power_resume(dev_priv);
+ intel_display_power_resume(display);
ret = vlv_resume_prepare(dev_priv, true);
@@ -1785,7 +1817,6 @@ static const struct drm_driver i915_drm_driver = {
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 94a70d8ec5d5..4b67ad9a61cd 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -15,7 +15,6 @@ struct drm_printer;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20230929"
#define DRIVER_TIMESTAMP 1695980603
extern const struct dev_pm_ops i915_pm_ops;
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index f58682505491..168d7375304b 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -102,6 +102,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
for_each_memory_region(mr, i915, id)
drm_print_memory_stats(p,
&stats[id],
+ DRM_GEM_OBJECT_ACTIVE |
DRM_GEM_OBJECT_RESIDENT |
DRM_GEM_OBJECT_PURGEABLE,
mr->uabi_name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7b1a061d92fb..b96b8de12756 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -101,14 +101,6 @@ struct i915_dsm {
resource_size_t usable_size;
};
-struct i915_suspend_saved_registers {
- u32 saveDSPARB;
- u32 saveSWF0[16];
- u32 saveSWF1[16];
- u32 saveSWF3[3];
- u16 saveGCDGMBUS;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -236,9 +228,17 @@ struct drm_i915_private {
spinlock_t irq_lock;
bool irqs_enabled;
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex sbi_lock;
+
+ /* VLV/CHV IOSF sideband */
+ struct {
+ struct mutex lock; /* protect sideband access */
+ struct pm_qos_request qos;
+ } vlv_iosf_sb;
+
/* Sideband mailbox protection */
struct mutex sb_lock;
- struct pm_qos_request sb_qos;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask;
@@ -291,7 +291,6 @@ struct drm_i915_private {
struct i915_gpu_error gpu_error;
u32 suspend_count;
- struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
struct dram_info {
@@ -550,6 +549,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G11)
#define IS_DG2_G12(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G12)
+#define IS_DG2_D(i915) \
+ IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_D)
#define IS_RAPTORLAKE_S(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
#define IS_ALDERLAKE_P_N(i915) \
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a9662cc6ed1e..25295eb626dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -71,7 +71,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space
* @ww: An optional struct i915_gem_ww_ctx.
- * @node: the &struct drm_mm_node (typically i915_vma.mode)
+ * @node: the &struct drm_mm_node (typically i915_vma.node)
* @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
* @offset: where to insert inside the GTT,
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index a62405787e77..be8149e46281 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -2,9 +2,9 @@
* SPDX-License-Identifier: MIT
*/
+#include "display/intel_overlay.h"
#include "gem/i915_gem_mman.h"
#include "gt/intel_engine_user.h"
-
#include "pxp/intel_pxp.h"
#include "i915_cmd_parser.h"
@@ -16,6 +16,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = &i915->display;
struct pci_dev *pdev = to_pci_dev(dev->dev);
const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
drm_i915_getparam_t *param = data;
@@ -38,7 +39,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = to_gt(i915)->ggtt->num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
- value = !!i915->display.overlay;
+ value = intel_overlay_available(display);
break;
case I915_PARAM_HAS_BSD:
value = !!intel_engine_lookup_user(i915,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 71c0daef1996..819ab933bb10 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -841,7 +841,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "Kernel: %s %s\n",
init_utsname()->release,
init_utsname()->machine);
- err_printf(m, "Driver: %s\n", DRIVER_DATE);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f75cbf5b8a1c..7920ad9585ae 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -658,8 +658,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -723,8 +722,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -740,8 +738,7 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_postinstall(dev_priv);
+ vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
@@ -794,8 +791,7 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_postinstall(dev_priv);
+ vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index f5c97a620962..76e2801619f0 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -143,8 +143,8 @@ int remap_io_sg(struct vm_area_struct *vma,
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
- while (offset >= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT) {
- offset -= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT;
+ while (offset >= r.sgt.max >> PAGE_SHIFT) {
+ offset -= r.sgt.max >> PAGE_SHIFT;
r.sgt = __sgt_iter(__sg_next(r.sgt.sgp), use_dma(iobase));
if (!r.sgt.sgp)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2406cda75b7b..279e266b4b06 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3359,9 +3359,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
"opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
- hrtimer_init(&stream->poll_check_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- stream->poll_check_timer.function = oa_poll_check_timer_cb;
+ hrtimer_setup(&stream->poll_check_timer, oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
mutex_init(&stream->lock);
@@ -4802,7 +4801,7 @@ err_unlock:
return ret;
}
-static struct ctl_table oa_table[] = {
+static const struct ctl_table oa_table[] = {
{
.procname = "perf_stream_paranoid",
.data = &i915_perf_stream_paranoid,
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 93fbf53578da..0ce87f188d11 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -302,7 +302,7 @@ void i915_pmu_gt_parked(struct intel_gt *gt)
{
struct i915_pmu *pmu = &gt->i915->pmu;
- if (!pmu->base.event_init)
+ if (!pmu->registered)
return;
spin_lock_irq(&pmu->lock);
@@ -324,7 +324,7 @@ void i915_pmu_gt_unparked(struct intel_gt *gt)
{
struct i915_pmu *pmu = &gt->i915->pmu;
- if (!pmu->base.event_init)
+ if (!pmu->registered)
return;
spin_lock_irq(&pmu->lock);
@@ -626,7 +626,7 @@ static int i915_pmu_event_init(struct perf_event *event)
struct drm_i915_private *i915 = pmu_to_i915(pmu);
int ret;
- if (pmu->closed)
+ if (!pmu->registered)
return -ENODEV;
if (event->attr.type != event->pmu->type)
@@ -724,7 +724,7 @@ static void i915_pmu_event_read(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
u64 prev, new;
- if (pmu->closed) {
+ if (!pmu->registered) {
event->hw.state = PERF_HES_STOPPED;
return;
}
@@ -850,7 +850,7 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
{
struct i915_pmu *pmu = event_to_pmu(event);
- if (pmu->closed)
+ if (!pmu->registered)
return;
i915_pmu_enable(event);
@@ -861,7 +861,7 @@ static void i915_pmu_event_stop(struct perf_event *event, int flags)
{
struct i915_pmu *pmu = event_to_pmu(event);
- if (pmu->closed)
+ if (!pmu->registered)
goto out;
if (flags & PERF_EF_UPDATE)
@@ -877,7 +877,7 @@ static int i915_pmu_event_add(struct perf_event *event, int flags)
{
struct i915_pmu *pmu = event_to_pmu(event);
- if (pmu->closed)
+ if (!pmu->registered)
return -ENODEV;
if (flags & PERF_EF_START)
@@ -1177,8 +1177,6 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
- GEM_BUG_ON(!pmu->base.event_init);
-
/* Select the first online CPU as a designated reader. */
if (cpumask_empty(&i915_pmu_cpumask))
cpumask_set_cpu(cpu, &i915_pmu_cpumask);
@@ -1191,13 +1189,11 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
unsigned int target = i915_pmu_target_cpu;
- GEM_BUG_ON(!pmu->base.event_init);
-
/*
* Unregistering an instance generates a CPU offline event which we must
* ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
*/
- if (pmu->closed)
+ if (!pmu->registered)
return 0;
if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
@@ -1218,7 +1214,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
}
-static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
+static enum cpuhp_state cpuhp_state = CPUHP_INVALID;
int i915_pmu_init(void)
{
@@ -1232,28 +1228,28 @@ int i915_pmu_init(void)
pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
ret);
else
- cpuhp_slot = ret;
+ cpuhp_state = ret;
return 0;
}
void i915_pmu_exit(void)
{
- if (cpuhp_slot != CPUHP_INVALID)
- cpuhp_remove_multi_state(cpuhp_slot);
+ if (cpuhp_state != CPUHP_INVALID)
+ cpuhp_remove_multi_state(cpuhp_state);
}
static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
- if (cpuhp_slot == CPUHP_INVALID)
+ if (cpuhp_state == CPUHP_INVALID)
return -EINVAL;
- return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node);
+ return cpuhp_state_add_instance(cpuhp_state, &pmu->cpuhp.node);
}
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
- cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node);
+ cpuhp_state_remove_instance(cpuhp_state, &pmu->cpuhp.node);
}
void i915_pmu_register(struct drm_i915_private *i915)
@@ -1265,12 +1261,10 @@ void i915_pmu_register(struct drm_i915_private *i915)
&i915_pmu_cpumask_attr_group,
NULL
};
-
int ret = -ENOMEM;
spin_lock_init(&pmu->lock);
- hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pmu->timer.function = i915_sample;
+ hrtimer_setup(&pmu->timer, i915_sample, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->cpuhp.cpu = -1;
init_rc6(pmu);
@@ -1316,6 +1310,8 @@ void i915_pmu_register(struct drm_i915_private *i915)
if (ret)
goto err_unreg;
+ pmu->registered = true;
+
return;
err_unreg:
@@ -1323,7 +1319,6 @@ err_unreg:
err_groups:
kfree(pmu->base.attr_groups);
err_attr:
- pmu->base.event_init = NULL;
free_event_attributes(pmu);
err_name:
if (IS_DGFX(i915))
@@ -1336,23 +1331,17 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
- if (!pmu->base.event_init)
+ if (!pmu->registered)
return;
- /*
- * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
- * ensures all currently executing ones will have exited before we
- * proceed with unregistration.
- */
- pmu->closed = true;
- synchronize_rcu();
+ /* Disconnect the PMU callbacks */
+ pmu->registered = false;
hrtimer_cancel(&pmu->timer);
i915_pmu_unregister_cpuhp_state(pmu);
perf_pmu_unregister(&pmu->base);
- pmu->base.event_init = NULL;
kfree(pmu->base.attr_groups);
if (IS_DGFX(i915))
kfree(pmu->name);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 41af038c3738..8e66d63d0c9f 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -68,9 +68,9 @@ struct i915_pmu {
*/
struct pmu base;
/**
- * @closed: i915 is unregistering.
+ * @registered: PMU is registered and not in the unregistering process.
*/
- bool closed;
+ bool registered;
/**
* @name: Name as registered with perf core.
*/
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 22be4a731d27..786c727aea45 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -144,8 +144,6 @@
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
-#define _VGA_MSR_WRITE _MMIO(0x3c2)
-
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
@@ -1069,11 +1067,6 @@
#define CLKGATE_DIS_PSL_EXT(pipe) \
_MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
-/* DDI Buffer Control */
-#define _DDI_CLK_VALFREQ_A 0x64030
-#define _DDI_CLK_VALFREQ_B 0x64130
-#define DDI_CLK_VALFREQ(port) _MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
-
/*
* Display engine regs
*/
@@ -1147,53 +1140,6 @@
#define _TRANS_MULT_B 0x6102c
#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
-/* VGA port control */
-#define ADPA _MMIO(0x61100)
-#define PCH_ADPA _MMIO(0xe1100)
-#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
-#define ADPA_DAC_ENABLE (1 << 31)
-#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SEL_SHIFT 30
-#define ADPA_PIPE_SEL_MASK (1 << 30)
-#define ADPA_PIPE_SEL(pipe) ((pipe) << 30)
-#define ADPA_PIPE_SEL_SHIFT_CPT 29
-#define ADPA_PIPE_SEL_MASK_CPT (3 << 29)
-#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0 << 24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3 << 24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2 << 24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1 << 23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0 << 22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1 << 22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0 << 21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1 << 21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0 << 20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1 << 20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0 << 18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1 << 18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2 << 18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3 << 18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0 << 17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1 << 17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16)
-#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
-#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1 << 10)
-#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1 << 11)
-#define ADPA_HSYNC_CNTL_ENABLE 0
-#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
-#define ADPA_VSYNC_ACTIVE_LOW 0
-#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
-#define ADPA_HSYNC_ACTIVE_LOW 0
-#define ADPA_DPMS_MASK (~(3 << 10))
-#define ADPA_DPMS_ON (0 << 10)
-#define ADPA_DPMS_SUSPEND (1 << 10)
-#define ADPA_DPMS_STANDBY (2 << 10)
-#define ADPA_DPMS_OFF (3 << 10)
-
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
#define PORTB_HOTPLUG_INT_EN (1 << 29)
@@ -1786,180 +1732,6 @@
#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
-#define DSPARB(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
-#define DSPARB_CSTART_MASK (0x7f << 7)
-#define DSPARB_CSTART_SHIFT 7
-#define DSPARB_BSTART_MASK (0x7f)
-#define DSPARB_BSTART_SHIFT 0
-#define DSPARB_BEND_SHIFT 9 /* on 855 */
-#define DSPARB_AEND_SHIFT 0
-#define DSPARB_SPRITEA_SHIFT_VLV 0
-#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
-#define DSPARB_SPRITEB_SHIFT_VLV 8
-#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
-#define DSPARB_SPRITEC_SHIFT_VLV 16
-#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
-#define DSPARB_SPRITED_SHIFT_VLV 24
-#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
-#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
-#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
-#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
-#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
-#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
-#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
-#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
-#define DSPARB_SPRITED_HI_SHIFT_VLV 12
-#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
-#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
-#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
-#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
-#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
-#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
-#define DSPARB_SPRITEE_SHIFT_VLV 0
-#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
-#define DSPARB_SPRITEF_SHIFT_VLV 8
-#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
-
-/* pnv/gen4/g4x/vlv/chv */
-#define DSPFW1(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
-#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff << 23)
-#define DSPFW_CURSORB_SHIFT 16
-#define DSPFW_CURSORB_MASK (0x3f << 16)
-#define DSPFW_PLANEB_SHIFT 8
-#define DSPFW_PLANEB_MASK (0x7f << 8)
-#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
-#define DSPFW_PLANEA_SHIFT 0
-#define DSPFW_PLANEA_MASK (0x7f << 0)
-#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW2(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
-#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
-#define DSPFW_FBC_SR_SHIFT 28
-#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
-#define DSPFW_FBC_HPLL_SR_SHIFT 24
-#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
-#define DSPFW_SPRITEB_SHIFT (16)
-#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
-#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
-#define DSPFW_CURSORA_SHIFT 8
-#define DSPFW_CURSORA_MASK (0x3f << 8)
-#define DSPFW_PLANEC_OLD_SHIFT 0
-#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
-#define DSPFW_SPRITEA_SHIFT 0
-#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
-#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW3(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
-#define DSPFW_HPLL_SR_EN (1 << 31)
-#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
-#define DSPFW_CURSOR_SR_SHIFT 24
-#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
-#define DSPFW_HPLL_CURSOR_SHIFT 16
-#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
-#define DSPFW_HPLL_SR_SHIFT 0
-#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
-
-/* vlv/chv */
-#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
-#define DSPFW_SPRITEB_WM1_SHIFT 16
-#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
-#define DSPFW_CURSORA_WM1_SHIFT 8
-#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
-#define DSPFW_SPRITEA_WM1_SHIFT 0
-#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
-#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
-#define DSPFW_PLANEB_WM1_SHIFT 24
-#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
-#define DSPFW_PLANEA_WM1_SHIFT 16
-#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
-#define DSPFW_CURSORB_WM1_SHIFT 8
-#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
-#define DSPFW_CURSOR_SR_WM1_SHIFT 0
-#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
-#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
-#define DSPFW_SR_WM1_SHIFT 0
-#define DSPFW_SR_WM1_MASK (0x1ff << 0)
-#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
-#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
-#define DSPFW_SPRITED_WM1_SHIFT 24
-#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
-#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
-#define DSPFW_SPRITEC_WM1_SHIFT 8
-#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
-#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
-#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
-#define DSPFW_SPRITEF_WM1_SHIFT 24
-#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
-#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
-#define DSPFW_SPRITEE_WM1_SHIFT 8
-#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
-#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
-#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
-#define DSPFW_PLANEC_WM1_SHIFT 24
-#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
-#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
-#define DSPFW_CURSORC_WM1_SHIFT 8
-#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
-#define DSPFW_CURSORC_SHIFT 0
-#define DSPFW_CURSORC_MASK (0x3f << 0)
-
-/* vlv/chv high order bits */
-#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
-#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
-#define DSPFW_SPRITEF_HI_SHIFT 23
-#define DSPFW_SPRITEF_HI_MASK (1 << 23)
-#define DSPFW_SPRITEE_HI_SHIFT 22
-#define DSPFW_SPRITEE_HI_MASK (1 << 22)
-#define DSPFW_PLANEC_HI_SHIFT 21
-#define DSPFW_PLANEC_HI_MASK (1 << 21)
-#define DSPFW_SPRITED_HI_SHIFT 20
-#define DSPFW_SPRITED_HI_MASK (1 << 20)
-#define DSPFW_SPRITEC_HI_SHIFT 16
-#define DSPFW_SPRITEC_HI_MASK (1 << 16)
-#define DSPFW_PLANEB_HI_SHIFT 12
-#define DSPFW_PLANEB_HI_MASK (1 << 12)
-#define DSPFW_SPRITEB_HI_SHIFT 8
-#define DSPFW_SPRITEB_HI_MASK (1 << 8)
-#define DSPFW_SPRITEA_HI_SHIFT 4
-#define DSPFW_SPRITEA_HI_MASK (1 << 4)
-#define DSPFW_PLANEA_HI_SHIFT 0
-#define DSPFW_PLANEA_HI_MASK (1 << 0)
-#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
-#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
-#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
-#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
-#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
-#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
-#define DSPFW_PLANEC_WM1_HI_SHIFT 21
-#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
-#define DSPFW_SPRITED_WM1_HI_SHIFT 20
-#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
-#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
-#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
-#define DSPFW_PLANEB_WM1_HI_SHIFT 12
-#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
-#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
-#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
-#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
-#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
-#define DSPFW_PLANEA_WM1_HI_SHIFT 0
-#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
-
-/* drain latency register values*/
-#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
-#define DDL_PLANE_SHIFT 0
-#define DDL_PRECISION_HIGH (1 << 7)
-#define DDL_PRECISION_LOW (0 << 7)
-#define DRAIN_LATENCY_MASK 0x7f
-
#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
#define CBR_PND_DEADLINE_DISABLE (1 << 31)
#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
@@ -1967,72 +1739,6 @@
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
-/* FIFO watermark sizes etc */
-#define G4X_FIFO_LINE_SIZE 64
-#define I915_FIFO_LINE_SIZE 64
-#define I830_FIFO_LINE_SIZE 32
-
-#define VALLEYVIEW_FIFO_SIZE 255
-#define G4X_FIFO_SIZE 127
-#define I965_FIFO_SIZE 512
-#define I945_FIFO_SIZE 127
-#define I915_FIFO_SIZE 95
-#define I855GM_FIFO_SIZE 127 /* In cachelines */
-#define I830_FIFO_SIZE 95
-
-#define VALLEYVIEW_MAX_WM 0xff
-#define G4X_MAX_WM 0x3f
-#define I915_MAX_WM 0x3f
-
-#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
-#define PINEVIEW_FIFO_LINE_SIZE 64
-#define PINEVIEW_MAX_WM 0x1ff
-#define PINEVIEW_DFT_WM 0x3f
-#define PINEVIEW_DFT_HPLLOFF_WM 0
-#define PINEVIEW_GUARD_WM 10
-#define PINEVIEW_CURSOR_FIFO 64
-#define PINEVIEW_CURSOR_MAX_WM 0x3f
-#define PINEVIEW_CURSOR_DFT_WM 0
-#define PINEVIEW_CURSOR_GUARD_WM 5
-
-#define VALLEYVIEW_CURSOR_MAX_WM 64
-#define I965_CURSOR_FIFO 64
-#define I965_CURSOR_MAX_WM 32
-#define I965_CURSOR_DFT_WM 8
-
-/* define the Watermark register on Ironlake */
-#define _WM0_PIPEA_ILK 0x45100
-#define _WM0_PIPEB_ILK 0x45104
-#define _WM0_PIPEC_IVB 0x45200
-#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
- _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
-#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
-#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
-#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
-#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x))
-#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x))
-#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x))
-#define WM1_LP_ILK _MMIO(0x45108)
-#define WM2_LP_ILK _MMIO(0x4510c)
-#define WM3_LP_ILK _MMIO(0x45110)
-#define WM_LP_ENABLE REG_BIT(31)
-#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24)
-#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19)
-#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20)
-#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8)
-#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0)
-#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x))
-#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x))
-#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x))
-#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x))
-#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x))
-#define WM1S_LP_ILK _MMIO(0x45120)
-#define WM2S_LP_IVB _MMIO(0x45124)
-#define WM3S_LP_IVB _MMIO(0x45128)
-#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */
-#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0)
-#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x))
-
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2802,7 +2508,7 @@
#define _CHICKEN_TRANS_C 0x420c8
#define _CHICKEN_TRANS_EDP 0x420cc
#define _CHICKEN_TRANS_D 0x420d8
-#define CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
[TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
[TRANSCODER_A] = _CHICKEN_TRANS_A, \
[TRANSCODER_B] = _CHICKEN_TRANS_B, \
@@ -2810,9 +2516,10 @@
[TRANSCODER_D] = _CHICKEN_TRANS_D))
#define _MTL_CHICKEN_TRANS_A 0x604e0
#define _MTL_CHICKEN_TRANS_B 0x614e0
-#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
+#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
@@ -2863,11 +2570,16 @@
#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
+#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
-#define LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
-#define LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
-#define LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define _LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
+#define _LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
+#define _LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define LATENCY_REPORTING_REMOVED(pipe) _PICK((pipe), \
+ _LATENCY_REPORTING_REMOVED_PIPE_A, \
+ _LATENCY_REPORTING_REMOVED_PIPE_B, \
+ _LATENCY_REPORTING_REMOVED_PIPE_C, \
+ _LATENCY_REPORTING_REMOVED_PIPE_D)
#define ICL_DELAY_PMRSP REG_BIT(22)
#define DISABLE_FLR_SRC REG_BIT(15)
#define MASK_WAKEMEM REG_BIT(13)
@@ -3619,6 +3331,7 @@
#define POWER_SETUP_I1_WATTS REG_BIT(31)
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define POWER_SETUP_SUBCOMMAND_G8_ENABLE 0x6
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
@@ -3819,6 +3532,7 @@ enum skl_power_gate {
#define TRANS_DDI_PVSYNC (1 << 17)
#define TRANS_DDI_PHSYNC (1 << 16)
#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
+#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
@@ -3863,25 +3577,26 @@ enum skl_power_gate {
#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
-#define DP_TP_CTL_ENABLE (1 << 31)
-#define DP_TP_CTL_FEC_ENABLE (1 << 30)
-#define DP_TP_CTL_MODE_SST (0 << 27)
-#define DP_TP_CTL_MODE_MST (1 << 27)
-#define DP_TP_CTL_FORCE_ACT (1 << 25)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK (3 << 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A (0 << 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B (1 << 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C (2 << 19)
-#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18)
-#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15)
-#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT1 (0 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT2 (1 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 (4 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT4 (5 << 8)
-#define DP_TP_CTL_LINK_TRAIN_IDLE (2 << 8)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL (3 << 8)
-#define DP_TP_CTL_SCRAMBLE_DISABLE (1 << 7)
+#define DP_TP_CTL_ENABLE REG_BIT(31)
+#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
+#define DP_TP_CTL_MODE_MASK REG_BIT(27)
+#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
+#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
+#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
+#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
+#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
+#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
+#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
@@ -3889,14 +3604,15 @@ enum skl_power_gate {
#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
-#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
-#define DP_TP_STATUS_IDLE_DONE (1 << 25)
-#define DP_TP_STATUS_ACT_SENT (1 << 24)
-#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1 << 12)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
+#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
+#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
+#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
+#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
+#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
/* DDI Buffer Control */
#define _DDI_BUF_CTL_A 0x64000
@@ -3917,7 +3633,7 @@ enum skl_power_gate {
#define DDI_BUF_IS_IDLE (1 << 7)
#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
#define DDI_A_4_LANES (1 << 4)
-#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
+#define DDI_PORT_WIDTH(width) (((width) == 3 ? 4 : ((width) - 1)) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
#define DDI_PORT_WIDTH_SHIFT 1
#define DDI_INIT_DISPLAY_DETECTED (1 << 0)
@@ -4444,14 +4160,6 @@ enum skl_power_gate {
#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
-#define WM_MISC _MMIO(0x45260)
-#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
-
-#define WM_DBG _MMIO(0x45280)
-#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
-#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
-#define WM_DBG_DISALLOW_SPRITE (1 << 2)
-
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8f62cfa23fb7..ea0b8e7e4828 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -293,8 +293,7 @@ static void __rq_init_watchdog(struct i915_request *rq)
{
struct i915_request_watchdog *wdg = &rq->watchdog;
- hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- wdg->timer.function = __rq_watchdog_expired;
+ hrtimer_setup(&wdg->timer, __rq_watchdog_expired, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
static void __rq_arm_watchdog(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
deleted file mode 100644
index f18f1acf2158..000000000000
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- *
- * Copyright 2008 (c) Intel Corporation
- * Jesse Barnes <jbarnes@virtuousgeek.org>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "display/intel_de.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_vga.h"
-
-#include "i915_drv.h"
-#include "i915_reg.h"
-#include "i915_suspend.h"
-#include "intel_pci_config.h"
-
-static void intel_save_swf(struct drm_i915_private *dev_priv)
-{
- int i;
-
- /* Scratch space */
- if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
- for (i = 0; i < 7; i++) {
- dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv,
- SWF0(dev_priv, i));
- dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv,
- SWF1(dev_priv, i));
- }
- for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv,
- SWF3(dev_priv, i));
- } else if (GRAPHICS_VER(dev_priv) == 2) {
- for (i = 0; i < 7; i++)
- dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv,
- SWF1(dev_priv, i));
- } else if (HAS_GMCH(dev_priv)) {
- for (i = 0; i < 16; i++) {
- dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv,
- SWF0(dev_priv, i));
- dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv,
- SWF1(dev_priv, i));
- }
- for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv,
- SWF3(dev_priv, i));
- }
-}
-
-static void intel_restore_swf(struct drm_i915_private *dev_priv)
-{
- int i;
-
- /* Scratch space */
- if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
- for (i = 0; i < 7; i++) {
- intel_de_write(dev_priv, SWF0(dev_priv, i),
- dev_priv->regfile.saveSWF0[i]);
- intel_de_write(dev_priv, SWF1(dev_priv, i),
- dev_priv->regfile.saveSWF1[i]);
- }
- for (i = 0; i < 3; i++)
- intel_de_write(dev_priv, SWF3(dev_priv, i),
- dev_priv->regfile.saveSWF3[i]);
- } else if (GRAPHICS_VER(dev_priv) == 2) {
- for (i = 0; i < 7; i++)
- intel_de_write(dev_priv, SWF1(dev_priv, i),
- dev_priv->regfile.saveSWF1[i]);
- } else if (HAS_GMCH(dev_priv)) {
- for (i = 0; i < 16; i++) {
- intel_de_write(dev_priv, SWF0(dev_priv, i),
- dev_priv->regfile.saveSWF0[i]);
- intel_de_write(dev_priv, SWF1(dev_priv, i),
- dev_priv->regfile.saveSWF1[i]);
- }
- for (i = 0; i < 3; i++)
- intel_de_write(dev_priv, SWF3(dev_priv, i),
- dev_priv->regfile.saveSWF3[i]);
- }
-}
-
-void i915_save_display(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- /* Display arbitration control */
- if (GRAPHICS_VER(dev_priv) <= 4)
- dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv,
- DSPARB(dev_priv));
-
- if (GRAPHICS_VER(dev_priv) == 4)
- pci_read_config_word(pdev, GCDGMBUS,
- &dev_priv->regfile.saveGCDGMBUS);
-
- intel_save_swf(dev_priv);
-}
-
-void i915_restore_display(struct drm_i915_private *dev_priv)
-{
- struct intel_display *display = &dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- intel_restore_swf(dev_priv);
-
- if (GRAPHICS_VER(dev_priv) == 4)
- pci_write_config_word(pdev, GCDGMBUS,
- dev_priv->regfile.saveGCDGMBUS);
-
- /* Display arbitration */
- if (GRAPHICS_VER(dev_priv) <= 4)
- intel_de_write(dev_priv, DSPARB(dev_priv),
- dev_priv->regfile.saveDSPARB);
-
- intel_vga_redisable(display);
-
- intel_gmbus_reset(display);
-}
diff --git a/drivers/gpu/drm/i915/i915_suspend.h b/drivers/gpu/drm/i915/i915_suspend.h
deleted file mode 100644
index e5a611ee3d15..000000000000
--- a/drivers/gpu/drm/i915/i915_suspend.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __I915_SUSPEND_H__
-#define __I915_SUSPEND_H__
-
-struct drm_i915_private;
-
-void i915_save_display(struct drm_i915_private *i915);
-void i915_restore_display(struct drm_i915_private *i915);
-
-#endif /* __I915_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 09d89bdf82f4..7ed41ce9b708 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -642,34 +642,6 @@ DEFINE_EVENT(i915_request, i915_request_wait_end,
TP_ARGS(rq)
);
-TRACE_EVENT_CONDITION(i915_reg_rw,
- TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
-
- TP_ARGS(write, reg, val, len, trace),
-
- TP_CONDITION(trace),
-
- TP_STRUCT__entry(
- __field(u64, val)
- __field(u32, reg)
- __field(u16, write)
- __field(u16, len)
- ),
-
- TP_fast_assign(
- __entry->val = (u64)val;
- __entry->reg = i915_mmio_reg_offset(reg);
- __entry->write = write;
- __entry->len = len;
- ),
-
- TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
- __entry->write ? "write" : "read",
- __entry->reg, __entry->len,
- (u32)(__entry->val & 0xffffffff),
- (u32)(__entry->val >> 32))
-);
-
/**
* DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
*
diff --git a/drivers/gpu/drm/i915/intel_cpu_info.c b/drivers/gpu/drm/i915/intel_cpu_info.c
new file mode 100644
index 000000000000..e52d0ac713a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cpu_info.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ *
+ * Avoid INTEL_<PLATFORM> name collisions between asm/intel-family.h and
+ * intel_device_info.h by having a separate file.
+ */
+
+#include "intel_cpu_info.h"
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+static const struct x86_cpu_id g8_cpu_ids[] = {
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, NULL),
+ {}
+};
+
+/**
+ * intel_match_g8_cpu - match current CPU against g8_cpu_ids
+ *
+ * This matches current CPU against g8_cpu_ids, which are applicable
+ * for G8 workaround.
+ *
+ * Returns: %true if matches, %false otherwise.
+ */
+bool intel_match_g8_cpu(void)
+{
+ return x86_match_cpu(g8_cpu_ids);
+}
+#else /* CONFIG_X86 */
+
+bool intel_match_g8_cpu(void) { return false; }
+
+#endif /* CONFIG_X86 */
diff --git a/drivers/gpu/drm/i915/intel_cpu_info.h b/drivers/gpu/drm/i915/intel_cpu_info.h
new file mode 100644
index 000000000000..d898fb463d31
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cpu_info.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _INTEL_CPU_INFO_H_
+#define _INTEL_CPU_INFO_H_
+
+#include <linux/types.h>
+
+bool intel_match_g8_cpu(void);
+
+#endif /* _INTEL_CPU_INFO_H_ */
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 856b30fa37dc..bbe3a24fe3d9 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -200,6 +200,10 @@ static const u16 subplatform_g12_ids[] = {
INTEL_DG2_G12_IDS(ID),
};
+static const u16 subplatform_dg2_d_ids[] = {
+ INTEL_DG2_D_IDS(ID),
+};
+
static const u16 subplatform_arl_h_ids[] = {
INTEL_ARL_H_IDS(ID),
};
@@ -280,6 +284,11 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
mask = BIT(INTEL_SUBPLATFORM_ARL_S);
}
+ /* DG2_D ids span across multiple DG2 subplatforms */
+ if (find_devid(devid, subplatform_dg2_d_ids,
+ ARRAY_SIZE(subplatform_dg2_d_ids)))
+ mask |= BIT(INTEL_SUBPLATFORM_D);
+
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index ef84eea9ba0b..9387385cb418 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -95,9 +95,11 @@ enum intel_platform {
/*
* Subplatform bits share the same namespace per parent platform. In other words
* it is fine for the same bit to be used on multiple parent platforms.
+ * Devices can belong to multiple subplatforms if needed, so it's possible to set
+ * multiple bits for same device.
*/
-#define INTEL_SUBPLATFORM_BITS (3)
+#define INTEL_SUBPLATFORM_BITS (4)
#define INTEL_SUBPLATFORM_MASK (BIT(INTEL_SUBPLATFORM_BITS) - 1)
/* HSW/BDW/SKL/KBL/CFL */
@@ -114,6 +116,7 @@ enum intel_platform {
#define INTEL_SUBPLATFORM_G10 0
#define INTEL_SUBPLATFORM_G11 1
#define INTEL_SUBPLATFORM_G12 2
+#define INTEL_SUBPLATFORM_D 3
/* ADL */
#define INTEL_SUBPLATFORM_RPL 0
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index aa51f366626c..ee1cd2126f97 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -5,9 +5,11 @@
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
+#include "display/i9xx_wm_regs.h"
#include "display/intel_audio_regs.h"
#include "display/intel_backlight_regs.h"
#include "display/intel_color_regs.h"
+#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c
index 5c6e517c73f4..41e85ac773dc 100644
--- a/drivers/gpu/drm/i915/intel_sbi.c
+++ b/drivers/gpu/drm/i915/intel_sbi.c
@@ -17,7 +17,7 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
struct intel_uncore *uncore = &i915->uncore;
u32 cmd;
- lockdep_assert_held(&i915->sb_lock);
+ lockdep_assert_held(&i915->sbi_lock);
if (intel_wait_for_register_fw(uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
@@ -57,6 +57,16 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
return 0;
}
+void intel_sbi_lock(struct drm_i915_private *i915)
+{
+ mutex_lock(&i915->sbi_lock);
+}
+
+void intel_sbi_unlock(struct drm_i915_private *i915)
+{
+ mutex_unlock(&i915->sbi_lock);
+}
+
u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
enum intel_sbi_destination destination)
{
@@ -72,3 +82,13 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
{
intel_sbi_rw(i915, reg, destination, &value, false);
}
+
+void intel_sbi_init(struct drm_i915_private *i915)
+{
+ mutex_init(&i915->sbi_lock);
+}
+
+void intel_sbi_fini(struct drm_i915_private *i915)
+{
+ mutex_destroy(&i915->sbi_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_sbi.h b/drivers/gpu/drm/i915/intel_sbi.h
index f5a862210454..85161a4f13b8 100644
--- a/drivers/gpu/drm/i915/intel_sbi.h
+++ b/drivers/gpu/drm/i915/intel_sbi.h
@@ -15,6 +15,10 @@ enum intel_sbi_destination {
SBI_MPHY,
};
+void intel_sbi_init(struct drm_i915_private *i915);
+void intel_sbi_fini(struct drm_i915_private *i915);
+void intel_sbi_lock(struct drm_i915_private *i915);
+void intel_sbi_unlock(struct drm_i915_private *i915);
u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 6aa179a3e92a..bdcfcae83b52 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -31,12 +31,17 @@
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
-#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_uncore_trace.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
#define GT_FIFO_TIMEOUT_MS 10
+struct intel_uncore *to_intel_uncore(struct drm_device *drm)
+{
+ return &to_i915(drm)->uncore;
+}
+
#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
static void
@@ -2098,8 +2103,7 @@ static int __fw_domain_init(struct intel_uncore *uncore,
d->mask = BIT(domain_id);
- hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- d->timer.function = intel_uncore_fw_release_timer;
+ hrtimer_setup(&d->timer, intel_uncore_fw_release_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
uncore->fw_domains |= BIT(domain_id);
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index f419c311a0de..e39582950627 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -501,6 +501,8 @@ static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
return uncore->regs;
}
+struct intel_uncore *to_intel_uncore(struct drm_device *drm);
+
/*
* The raw_reg_{read,write} macros are intended as a micro-optimization for
* interrupt handlers so that the pointer indirection on uncore->regs can
diff --git a/drivers/gpu/drm/i915/intel_uncore_trace.c b/drivers/gpu/drm/i915/intel_uncore_trace.c
new file mode 100644
index 000000000000..86f0c3942b1d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore_trace.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "intel_uncore_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore_trace.h b/drivers/gpu/drm/i915/intel_uncore_trace.h
new file mode 100644
index 000000000000..f13ff71edf2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore_trace.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+
+#if !defined(__INTEL_UNCORE_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __INTEL_UNCORE_TRACE_H__
+
+#include "i915_reg_defs.h"
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT_CONDITION(i915_reg_rw,
+ TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
+
+ TP_ARGS(write, reg, val, len, trace),
+
+ TP_CONDITION(trace),
+
+ TP_STRUCT__entry(
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->val = (u64)val;
+ __entry->reg = i915_mmio_reg_offset(reg);
+ __entry->write = write;
+ __entry->len = len;
+ ),
+
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
+);
+#endif /* __INTEL_UNCORE_TRACE_H__ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_FILE intel_uncore_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 5c397a2df70e..5d27e1c733c5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -168,7 +168,7 @@ static int igt_ppgtt_alloc(void *arg)
return PTR_ERR(ppgtt);
if (!ppgtt->vm.allocate_va_range)
- goto err_ppgtt_cleanup;
+ goto ppgtt_vm_put;
/*
* While we only allocate the page tables here and so we could
@@ -236,7 +236,7 @@ err_ppgtt_cleanup:
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
-
+ppgtt_vm_put:
i915_vm_put(&ppgtt->vm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index acae30a04a94..88870844b5bd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -492,7 +492,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
for (n = 0; n < ncpus; n++) {
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/%d", n);
+ worker = kthread_run_worker(0, "igt/%d", n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
ncpus = n;
@@ -1645,7 +1645,7 @@ static int live_parallel_engines(void *arg)
for_each_uabi_engine(engine, i915) {
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/parallel:%s",
+ worker = kthread_run_worker(0, "igt/parallel:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
@@ -1806,7 +1806,7 @@ static int live_breadcrumbs_smoketest(void *arg)
unsigned int i = idx * ncpus + n;
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
+ worker = kthread_run_worker(0, "igt/%d.%d", idx, n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
goto out_flush;
@@ -3219,7 +3219,7 @@ static int perf_parallel_engines(void *arg)
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
- worker = kthread_create_worker(0, "igt:%s",
+ worker = kthread_run_worker(0, "igt:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 29110abb4fe0..c383d31d46b0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -19,12 +19,22 @@ int igt_flush_test(struct drm_i915_private *i915)
int ret = 0;
for_each_gt(gt, i915, i) {
+ struct intel_engine_cs *engine;
+ unsigned long timeout_ms = 0;
+ unsigned int id;
+
if (intel_gt_is_wedged(gt))
ret = -EIO;
+ for_each_engine(engine, gt, id) {
+ if (engine->props.preempt_timeout_ms > timeout_ms)
+ timeout_ms = engine->props.preempt_timeout_ms;
+ }
+
cond_resched();
- if (intel_gt_wait_for_idle(gt, HZ * 3) == -ETIME) {
+ /* 2x longest preempt timeout, experimentally determined */
+ if (intel_gt_wait_for_idle(gt, HZ * timeout_ms / 500) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0));
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index ae57eb03dfca..a77e5b26542c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -180,7 +180,7 @@ struct drm_i915_private *mock_gem_device(void)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, &mock_info);
- intel_display_device_probe(i915);
+ intel_display_device_probe(pdev);
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index 68291412f4cb..114ae8eb9cd5 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -43,7 +43,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
* to the Valleyview P-unit and not all sideband communications.
*/
if (IS_VALLEYVIEW(i915)) {
- cpu_latency_qos_update_request(&i915->sb_qos, 0);
+ cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos, 0);
on_each_cpu(ping, NULL, 1);
}
}
@@ -51,7 +51,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
static void __vlv_punit_put(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915))
- cpu_latency_qos_update_request(&i915->sb_qos,
+ cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos,
PM_QOS_DEFAULT_VALUE);
iosf_mbi_punit_release();
@@ -62,12 +62,12 @@ void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
if (ports & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_get(i915);
- mutex_lock(&i915->sb_lock);
+ mutex_lock(&i915->vlv_iosf_sb.lock);
}
void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
{
- mutex_unlock(&i915->sb_lock);
+ mutex_unlock(&i915->vlv_iosf_sb.lock);
if (ports & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_put(i915);
@@ -81,7 +81,7 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
int err;
- lockdep_assert_held(&i915->sb_lock);
+ lockdep_assert_held(&i915->vlv_iosf_sb.lock);
if (port == IOSF_PORT_PUNIT)
iosf_mbi_assert_punit_acquired();
@@ -249,3 +249,21 @@ void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
reg, &val);
}
+
+void vlv_iosf_sb_init(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ mutex_init(&i915->vlv_iosf_sb.lock);
+
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_add_request(&i915->vlv_iosf_sb.qos, PM_QOS_DEFAULT_VALUE);
+}
+
+void vlv_iosf_sb_fini(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_remove_request(&i915->vlv_iosf_sb.qos);
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ mutex_destroy(&i915->vlv_iosf_sb.lock);
+}
diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
index c20cf41b2d39..31813e07c56f 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.h
+++ b/drivers/gpu/drm/i915/vlv_sideband.h
@@ -25,6 +25,9 @@ enum {
VLV_IOSF_SB_PUNIT,
};
+void vlv_iosf_sb_init(struct drm_i915_private *i915);
+void vlv_iosf_sb_fini(struct drm_i915_private *i915);
+
void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index 94595dde2b96..fc9f311ea1db 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -13,6 +13,7 @@
#include "i915_trace.h"
#include "i915_utils.h"
#include "intel_clock_gating.h"
+#include "intel_uncore_trace.h"
#include "vlv_suspend.h"
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index 9bc6a3884c22..3d9d4d40fb80 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only OR MIT
# Copyright (c) 2023 Imagination Technologies Ltd.
-subdir-ccflags-y := -I$(src)
-
powervr-y := \
pvr_ccb.o \
pvr_cccb.o \
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index 85ee9abd1811..0639502137b4 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -1387,7 +1387,6 @@ static struct drm_driver pvr_drm_driver = {
.name = PVR_DRIVER_NAME,
.desc = PVR_DRIVER_DESC,
- .date = PVR_DRIVER_DATE,
.major = PVR_DRIVER_MAJOR,
.minor = PVR_DRIVER_MINOR,
.patchlevel = PVR_DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/imagination/pvr_drv.h b/drivers/gpu/drm/imagination/pvr_drv.h
index 378fe477b759..7fa147312dd1 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.h
+++ b/drivers/gpu/drm/imagination/pvr_drv.h
@@ -9,7 +9,6 @@
#define PVR_DRIVER_NAME "powervr"
#define PVR_DRIVER_DESC "Imagination PowerVR (Series 6 and later) & IMG Graphics"
-#define PVR_DRIVER_DATE "20230904"
/*
* Driver interface version:
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index c39beb70c317..6d13864851fc 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
static void
pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
{
- pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
- fw_obj->fw_mm_node.size);
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
+ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
}
static bool
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 73707daa4e52..5dbb636d7d4f 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v)
if (sf_id == ROGUE_FW_SF_LAST)
return -EINVAL;
- timestamp = read_fw_trace(trace_seq_data, 1) |
- ((u64)read_fw_trace(trace_seq_data, 2) << 32);
+ timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
+ read_fw_trace(trace_seq_data, 2);
timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index c4f08432882b..43411be930a2 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f)
return PVR_DRIVER_NAME;
}
+static void pvr_queue_fence_release_work(struct work_struct *w)
+{
+ struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work);
+
+ pvr_context_put(fence->queue->ctx);
+ dma_fence_free(&fence->base);
+}
+
static void pvr_queue_fence_release(struct dma_fence *f)
{
struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+ struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev;
- pvr_context_put(fence->queue->ctx);
- dma_fence_free(f);
+ queue_work(pvr_dev->sched_wq, &fence->release_work);
}
static const char *
@@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f,
pvr_context_get(queue->ctx);
fence->queue = queue;
+ INIT_WORK(&fence->release_work, pvr_queue_fence_release_work);
dma_fence_init(&fence->base, fence_ops,
&fence_ctx->lock, fence_ctx->id,
atomic_inc_return(&fence_ctx->seqno));
@@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
static void
pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
{
- pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
- &queue->job_fence_ctx);
+ if (!fence->ops)
+ pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
+ &queue->job_fence_ctx);
}
/**
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index e06ced69302f..93fe9ac9f58c 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -5,6 +5,7 @@
#define PVR_QUEUE_H
#include <drm/gpu_scheduler.h>
+#include <linux/workqueue.h>
#include "pvr_cccb.h"
#include "pvr_device.h"
@@ -63,6 +64,9 @@ struct pvr_queue_fence {
/** @queue: Queue that created this fence. */
struct pvr_queue *queue;
+
+ /** @release_work: Fence release work structure. */
+ struct work_struct release_work;
};
/**
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 363f885a7098..2896fa7501b1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -293,8 +293,9 @@ err_bind_op_fini:
static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
- struct pvr_vm_context *vm_ctx, u64 device_addr,
- u64 size)
+ struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
int err;
@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
goto err_bind_op_fini;
}
+ bind_op->pvr_obj = pvr_obj;
bind_op->vm_ctx = vm_ctx;
bind_op->device_addr = device_addr;
bind_op->size = size;
@@ -598,20 +600,6 @@ err_free:
}
/**
- * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
- * @vm_ctx: Target VM context.
- *
- * This function ensures that no mappings are left dangling by unmapping them
- * all in order of ascending device-virtual address.
- */
-void
-pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
-{
- WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
- vm_ctx->gpuvm_mgr.mm_range));
-}
-
-/**
* pvr_vm_context_release() - Teardown a VM context.
* @ref_count: Pointer to reference counter of the VM context.
*
@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
- /* Unmap operations don't have an object to lock. */
- if (!pvr_obj)
- return 0;
-
- /* Acquire lock on the GEM being mapped. */
+ /* Acquire lock on the GEM object being mapped/unmapped. */
return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
}
@@ -772,8 +756,10 @@ err_cleanup:
}
/**
- * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
+ * memory.
* @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
* @device_addr: Virtual device address at the start of the target mapping.
* @size: Size of the target mapping.
*
@@ -784,9 +770,13 @@ err_cleanup:
* * Any error encountered while performing internal operations required to
* destroy the mapping (returned from pvr_vm_gpuva_unmap or
* pvr_vm_gpuva_remap).
+ *
+ * The vm_ctx->lock must be held when calling this function.
*/
-int
-pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+static int
+pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
struct pvr_vm_bind_op bind_op = {0};
struct drm_gpuvm_exec vm_exec = {
@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
},
};
- int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
- size);
+ int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
+ device_addr, size);
if (err)
return err;
+ pvr_gem_object_get(pvr_obj);
+
err = drm_gpuvm_exec_lock(&vm_exec);
if (err)
goto err_cleanup;
@@ -818,6 +810,96 @@ err_cleanup:
return err;
}
+/**
+ * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
+ * memory.
+ * @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
+{
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by drm_gpuva_find,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+{
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+
+ va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
+ if (va) {
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range);
+ } else {
+ err = -ENOENT;
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+ * @vm_ctx: Target VM context.
+ *
+ * This function ensures that no mappings are left dangling by unmapping them
+ * all in order of ascending device-virtual address.
+ */
+void
+pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+{
+ mutex_lock(&vm_ctx->lock);
+
+ for (;;) {
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+
+ va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
+ vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range);
+ if (!va)
+ break;
+
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+
+ WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range));
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+}
+
/* Static data areas are determined by firmware. */
static const struct drm_pvr_static_data_area static_data_areas[] = {
{
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
index 79406243617c..b0528dffa7f1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.h
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
int pvr_vm_map(struct pvr_vm_context *vm_ctx,
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
u64 device_addr, u64 size);
+int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size);
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 63a335c62296..3633e8f3aff6 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -3,11 +3,11 @@
* Copyright 2019 NXP.
*/
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -34,7 +34,6 @@ static const struct drm_driver dcss_kms_driver = {
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
- .date = "20190917",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
index 5f2c93c3c288..ec5fd9a01f1e 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
@@ -13,9 +13,9 @@
#include <video/imx-ipu-v3.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -163,7 +163,6 @@ static const struct drm_driver imx_drm_driver = {
.fops = &imx_drm_driver_fops,
.name = "imx-drm",
.desc = "i.MX DRM graphics",
- .date = "20120507",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index fa7d44623c52..8d6a0bb31c48 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: 2020 Marian Cichy <M.Cichy@pengutronix.de>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -352,7 +352,6 @@ static struct drm_driver imx_lcdc_drm_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "imx-lcdc",
.desc = "i.MX LCDC driver",
- .date = "20200716",
};
static const struct of_device_id imx_lcdc_of_dev_id[] = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 8469e1e5e582..c23ee2d214de 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -20,11 +20,11 @@
#include <linux/pm.h>
#include <linux/regmap.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
@@ -953,7 +953,6 @@ static const struct drm_driver ingenic_drm_driver_data = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "ingenic-drm",
.desc = "DRM module for Ingenic SoCs",
- .date = "20200716",
.major = 1,
.minor = 1,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index a3d31de761cb..32cda134ae3e 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -13,8 +13,8 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -445,7 +445,6 @@ static const struct drm_driver kmb_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "kmb-drm",
.desc = "KEEMBAY DISPLAY DRIVER",
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index bf085e95b28f..1f0c10d317fe 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -16,7 +16,6 @@
#define KMB_MIN_WIDTH 1920 /*Max width in pixels */
#define KMB_MIN_HEIGHT 1080 /*Max height in pixels */
-#define DRIVER_DATE "20210223"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index fb3062c872b3..2067c5b65c57 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -271,7 +271,6 @@ static const struct drm_driver lima_drm_driver = {
.fops = &lima_drm_driver_fops,
.name = "lima",
.desc = "lima DRM",
- .date = "20191231",
.major = 1,
.minor = 1,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index fb9de5e0bc0e..204b0fee55d0 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -15,8 +15,8 @@
#include <linux/regmap.h>
#include <linux/types.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -52,7 +52,6 @@ static struct drm_driver logicvc_drm_driver = {
.fops = &logicvc_drm_fops,
.name = "logicvc-drm",
.desc = "Xylon LogiCVC DRM driver",
- .date = "20200403",
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
index b350bdcf1645..12193d2a301a 100644
--- a/drivers/gpu/drm/loongson/lsdc_drv.c
+++ b/drivers/gpu/drm/loongson/lsdc_drv.c
@@ -7,9 +7,9 @@
#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -26,7 +26,6 @@
#define DRIVER_AUTHOR "Sui Jingfeng <suijingfeng@loongson.cn>"
#define DRIVER_NAME "loongson"
#define DRIVER_DESC "drm driver for loongson graphics"
-#define DRIVER_DATE "20220701"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -39,7 +38,6 @@ static const struct drm_driver lsdc_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -232,9 +230,9 @@ lsdc_create_device(struct pci_dev *pdev,
lsdc_gem_init(ddev);
/* Bar 0 of the DC device contains the MMIO register's base address */
- ldev->reg_base = pcim_iomap(pdev, 0, 0);
- if (!ldev->reg_base)
- return ERR_PTR(-ENODEV);
+ ldev->reg_base = pcim_iomap_region(pdev, 0, "lsdc");
+ if (IS_ERR(ldev->reg_base))
+ return ldev->reg_base;
spin_lock_init(&ldev->reglock);
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index c4d51f5f038d..5f2c462bad7e 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -65,9 +65,9 @@
#include <linux/slab.h>
#include <linux/delay.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fbdev_dma.h>
@@ -208,7 +208,6 @@ static const struct drm_driver mcde_drm_driver = {
.fops = &drm_fops,
.name = "mcde",
.desc = DRIVER_DESC,
- .date = "20180529",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index f496e6cfdfe0..e47debd60619 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -14,9 +14,6 @@ config DRM_MEDIATEK
select DRM_BRIDGE_CONNECTOR
select DRM_MIPI_DSI
select DRM_PANEL
- select MEMORY
- select MTK_SMI
- select PHY_MTK_MIPI_DSI
select VIDEOMODE_HELPERS
help
Choose this option if you have a Mediatek SoCs.
@@ -27,7 +24,6 @@ config DRM_MEDIATEK
config DRM_MEDIATEK_DP
tristate "DRM DPTX Support for MediaTek SoCs"
depends on DRM_MEDIATEK
- select PHY_MTK_DP
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_DP_AUX_BUS
@@ -38,6 +34,5 @@ config DRM_MEDIATEK_HDMI
tristate "DRM HDMI Support for Mediatek SoCs"
depends on DRM_MEDIATEK
select SND_SOC_HDMI_CODEC if SND_SOC
- select PHY_MTK_HDMI
help
DRM/KMS HDMI driver for Mediatek SoCs
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index eb0e1233ad04..5674f5707cca 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -112,6 +112,11 @@ static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
drm_crtc_handle_vblank(&mtk_crtc->base);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (mtk_crtc->cmdq_client.chan)
+ return;
+#endif
+
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
mtk_crtc_finish_page_flip(mtk_crtc);
@@ -284,10 +289,8 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
state = to_mtk_crtc_state(mtk_crtc->base.state);
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
- if (mtk_crtc->config_updating) {
- spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ if (mtk_crtc->config_updating)
goto ddp_cmdq_cb_out;
- }
state->pending_config = false;
@@ -315,10 +318,15 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
mtk_crtc->pending_async_planes = false;
}
- spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
-
ddp_cmdq_cb_out:
+ if (mtk_crtc->pending_needs_vblank) {
+ mtk_crtc_finish_page_flip(mtk_crtc);
+ mtk_crtc->pending_needs_vblank = false;
+ }
+
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mtk_crtc->cmdq_vblank_cnt = 0;
wake_up(&mtk_crtc->cb_blocking_queue);
}
@@ -606,13 +614,18 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
*/
mtk_crtc->cmdq_vblank_cnt = 3;
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ mtk_crtc->config_updating = false;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
-#endif
+#else
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = false;
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+#endif
mutex_unlock(&mtk_crtc->hw_lock);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index f731d4fbe8b6..df82cea4bb79 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -460,6 +460,29 @@ static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
}
}
+static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl,
+ unsigned int idx,
+ struct mtk_plane_pending_state *pending,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ unsigned int pitch_msb = pending->pitch >> 16;
+ unsigned int hdr_pitch = pending->hdr_pitch;
+ unsigned int hdr_addr = pending->hdr_addr;
+
+ if (pending->modifier != DRM_FORMAT_MOD_LINEAR) {
+ mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_HDR_ADDR(ovl, idx));
+ mtk_ddp_write_relaxed(cmdq_pkt,
+ OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_HDR_PITCH(ovl, idx));
+ } else {
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+ }
+}
+
void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
@@ -467,25 +490,14 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
- unsigned int hdr_addr = pending->hdr_addr;
- unsigned int pitch = pending->pitch;
- unsigned int hdr_pitch = pending->hdr_pitch;
+ unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0);
unsigned int fmt = pending->format;
+ unsigned int rotation = pending->rotation;
unsigned int offset = (pending->y << 16) | pending->x;
unsigned int src_size = (pending->height << 16) | pending->width;
unsigned int blend_mode = state->base.pixel_blend_mode;
unsigned int ignore_pixel_alpha = 0;
unsigned int con;
- bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
- union overlay_pitch {
- struct split_pitch {
- u16 lsb;
- u16 msb;
- } split_pitch;
- u32 pitch;
- } overlay_pitch;
-
- overlay_pitch.pitch = pitch;
if (!pending->enable) {
mtk_ovl_layer_off(dev, idx, cmdq_pkt);
@@ -513,22 +525,30 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
ignore_pixel_alpha = OVL_CONST_BLEND;
}
- if (pending->rotation & DRM_MODE_REFLECT_Y) {
+ /*
+ * Treat rotate 180 as flip x + flip y, and XOR the original rotation value
+ * to flip x + flip y to support both in the same time.
+ */
+ if (rotation & DRM_MODE_ROTATE_180)
+ rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+
+ if (rotation & DRM_MODE_REFLECT_Y) {
con |= OVL_CON_VIRT_FLIP;
addr += (pending->height - 1) * pending->pitch;
}
- if (pending->rotation & DRM_MODE_REFLECT_X) {
+ if (rotation & DRM_MODE_REFLECT_X) {
con |= OVL_CON_HORZ_FLIP;
addr += pending->pitch - 1;
}
if (ovl->data->supports_afbc)
- mtk_ovl_set_afbc(ovl, cmdq_pkt, idx, is_afbc);
+ mtk_ovl_set_afbc(ovl, cmdq_pkt, idx,
+ pending->modifier != DRM_FORMAT_MOD_LINEAR);
mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_CON(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb | ignore_pixel_alpha,
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha,
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_SIZE(idx));
@@ -537,19 +557,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ADDR(ovl, idx));
- if (is_afbc) {
- mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
- DISP_REG_OVL_HDR_ADDR(ovl, idx));
- mtk_ddp_write_relaxed(cmdq_pkt,
- OVL_PITCH_MSB_2ND_SUBBUF | overlay_pitch.split_pitch.msb,
- &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
- DISP_REG_OVL_HDR_PITCH(ovl, idx));
- } else {
- mtk_ddp_write_relaxed(cmdq_pkt,
- overlay_pitch.split_pitch.msb,
- &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
- }
+ if (ovl->data->supports_afbc)
+ mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt);
mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
mtk_ovl_layer_on(dev, idx, cmdq_pkt);
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 36713c176cfc..cd385ba4c66a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -543,18 +543,16 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
enum dp_pixelformat color_format)
{
u32 val;
-
- /* update MISC0 */
- mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
- color_format << DP_TEST_COLOR_FORMAT_SHIFT,
- DP_TEST_COLOR_FORMAT_MASK);
+ u32 misc0_color;
switch (color_format) {
case DP_PIXELFORMAT_YUV422:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422;
+ misc0_color = DP_COLOR_FORMAT_YCbCr422;
break;
case DP_PIXELFORMAT_RGB:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB;
+ misc0_color = DP_COLOR_FORMAT_RGB;
break;
default:
drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n",
@@ -562,6 +560,11 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
return -EINVAL;
}
+ /* update MISC0 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
+ misc0_color,
+ DP_TEST_COLOR_FORMAT_MASK);
+
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK);
return 0;
@@ -1135,6 +1138,18 @@ static void mtk_dp_digital_sw_reset(struct mtk_dp *mtk_dp)
0, DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0);
}
+static void mtk_dp_sdp_path_reset(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ SDP_RESET_SW_DP_ENC0_P0,
+ SDP_RESET_SW_DP_ENC0_P0);
+
+ /* Wait for sdp path reset to complete */
+ usleep_range(1000, 5000);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ 0, SDP_RESET_SW_DP_ENC0_P0);
+}
+
static void mtk_dp_set_lanes(struct mtk_dp *mtk_dp, int lanes)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35F0,
@@ -1165,17 +1180,25 @@ static void mtk_dp_get_calibration_data(struct mtk_dp *mtk_dp)
buf = (u32 *)nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
- if (IS_ERR(buf) || ((len / sizeof(u32)) != 4)) {
+ if (IS_ERR(buf)) {
dev_warn(dev, "Failed to read nvmem_cell_read\n");
-
- if (!IS_ERR(buf))
- kfree(buf);
-
goto use_default_val;
}
+ /* The cell length is in bytes. Convert it to be compatible with u32 buffer. */
+ len /= sizeof(u32);
+
for (i = 0; i < MTK_DP_CAL_MAX; i++) {
fmt = &mtk_dp->data->efuse_fmt[i];
+
+ if (fmt->idx >= len) {
+ dev_warn(mtk_dp->dev,
+ "Out-of-bound efuse data access, fmt idx = %d, buf len = %zu\n",
+ fmt->idx, len);
+ kfree(buf);
+ goto use_default_val;
+ }
+
cal_data[i] = (buf[fmt->idx] >> fmt->shift) & fmt->mask;
if (cal_data[i] < fmt->min_val || cal_data[i] > fmt->max_val) {
@@ -2100,7 +2123,6 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
enum drm_connector_status ret = connector_status_disconnected;
bool enabled = mtk_dp->enabled;
- u8 sink_count = 0;
if (!mtk_dp->train_info.cable_plugged_in)
return ret;
@@ -2115,8 +2137,8 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
* function, we just need to check the HPD connection to check
* whether we connect to a sink device.
*/
- drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count);
- if (DP_GET_SINK_COUNT(sink_count))
+
+ if (drm_dp_read_sink_count(&mtk_dp->aux) > 0)
ret = connector_status_connected;
if (!enabled)
@@ -2397,6 +2419,9 @@ static void mtk_dp_bridge_atomic_disable(struct drm_bridge *bridge,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
+ /* SDP path reset sw*/
+ mtk_dp_sdp_path_reset(mtk_dp);
+
/* Ensure the sink is muted */
msleep(20);
}
@@ -2408,12 +2433,19 @@ mtk_dp_bridge_mode_valid(struct drm_bridge *bridge,
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24;
- u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
- drm_dp_max_lane_count(mtk_dp->rx_cap),
- drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
- mtk_dp->max_lanes);
+ u32 lane_count_min = mtk_dp->train_info.lane_count;
+ u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
+ lane_count_min;
- if (rate < mode->clock * bpp / 8)
+ /*
+ *FEC overhead is approximately 2.4% from DP 1.4a spec 2.2.1.4.2.
+ *The down-spread amplitude shall either be disabled (0.0%) or up
+ *to 0.5% from 1.4a 3.5.2.6. Add up to approximately 3% total overhead.
+ *
+ *Because rate is already divided by 10,
+ *mode->clock does not need to be multiplied by 10
+ */
+ if ((rate * 97 / 100) < (mode->clock * bpp / 8))
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -2454,10 +2486,9 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_display_info *display_info =
&conn_state->connector->display_info;
- u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
- drm_dp_max_lane_count(mtk_dp->rx_cap),
- drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
- mtk_dp->max_lanes);
+ u32 lane_count_min = mtk_dp->train_info.lane_count;
+ u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
+ lane_count_min;
*num_input_fmts = 0;
@@ -2466,8 +2497,8 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
* datarate of YUV422 and sink device supports YUV422, we output YUV422
* format. Use this condition, we can support more resolution.
*/
- if ((rate < (mode->clock * 24 / 8)) &&
- (rate > (mode->clock * 16 / 8)) &&
+ if (((rate * 97 / 100) < (mode->clock * 24 / 8)) &&
+ ((rate * 97 / 100) > (mode->clock * 16 / 8)) &&
(display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
@@ -2615,7 +2646,6 @@ static const struct hdmi_codec_ops mtk_dp_audio_codec_ops = {
.audio_shutdown = mtk_dp_audio_shutdown,
.get_eld = mtk_dp_audio_get_eld,
.hook_plugged_cb = mtk_dp_audio_hook_plugged_cb,
- .no_capture_mute = 1,
};
static int mtk_dp_register_audio_driver(struct device *dev)
@@ -2626,6 +2656,7 @@ static int mtk_dp_register_audio_driver(struct device *dev)
.max_i2s_channels = 8,
.i2s = 1,
.data = mtk_dp,
+ .no_capture_mute = 1,
};
mtk_dp->audio_pdev = platform_device_register_data(dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_dp_reg.h b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
index 709b79480693..8ad7a9cc259e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp_reg.h
+++ b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
@@ -86,6 +86,7 @@
#define MTK_DP_ENC0_P0_3004 0x3004
#define VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK BIT(8)
#define DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0 BIT(9)
+#define SDP_RESET_SW_DP_ENC0_P0 BIT(13)
#define MTK_DP_ENC0_P0_3010 0x3010
#define HTOTAL_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
#define MTK_DP_ENC0_P0_3014 0x3014
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 0829ceb9967c..f22ad2882697 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -12,9 +12,9 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -33,7 +33,6 @@
#define DRIVER_NAME "mediatek"
#define DRIVER_DESC "Mediatek SoC DRM"
-#define DRIVER_DATE "20150513"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -359,7 +358,7 @@ static const struct of_device_id mtk_drm_of_ids[] = {
};
MODULE_DEVICE_TABLE(of, mtk_drm_of_ids);
-static int mtk_drm_match(struct device *dev, void *data)
+static int mtk_drm_match(struct device *dev, const void *data)
{
if (!strncmp(dev_name(dev), "mediatek-drm", sizeof("mediatek-drm") - 1))
return true;
@@ -373,11 +372,12 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
struct mtk_drm_private *temp_drm_priv;
struct device_node *phandle = dev->parent->of_node;
const struct of_device_id *of_id;
+ struct device_node *node;
struct device *drm_dev;
unsigned int cnt = 0;
int i, j;
- for_each_child_of_node_scoped(phandle->parent, node) {
+ for_each_child_of_node(phandle->parent, node) {
struct platform_device *pdev;
of_id = of_match_node(mtk_drm_of_ids, node);
@@ -406,8 +406,10 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
if (temp_drm_priv->mtk_drm_bound)
cnt++;
- if (cnt == MAX_CRTC)
+ if (cnt == MAX_CRTC) {
+ of_node_put(node);
break;
+ }
}
if (drm_priv->data->mmsys_dev_num == cnt) {
@@ -615,7 +617,6 @@ static const struct drm_driver mtk_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -673,6 +674,8 @@ err_deinit:
err_free:
private->drm = NULL;
drm_dev_put(drm);
+ for (i = 0; i < private->data->mmsys_dev_num; i++)
+ private->all_drm_private[i]->drm = NULL;
return ret;
}
@@ -900,7 +903,7 @@ static int mtk_drm_of_ddp_path_build_one(struct device *dev, enum mtk_crtc_path
const unsigned int **out_path,
unsigned int *out_path_len)
{
- struct device_node *next, *prev, *vdo = dev->parent->of_node;
+ struct device_node *next = NULL, *prev, *vdo = dev->parent->of_node;
unsigned int temp_path[DDP_COMPONENT_DRM_ID_MAX] = { 0 };
unsigned int *final_ddp_path;
unsigned short int idx = 0;
@@ -1089,7 +1092,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
/* No devicetree graphs support: go with hardcoded paths if present */
dev_dbg(dev, "Using hardcoded paths for MMSYS %u\n", mtk_drm_data->mmsys_id);
private->data = mtk_drm_data;
- };
+ }
private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num,
sizeof(*private->all_drm_private),
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index e61b9bc68e9a..40752f232054 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -139,11 +139,11 @@
#define CLK_HS_POST GENMASK(15, 8)
#define CLK_HS_EXIT GENMASK(23, 16)
-#define DSI_VM_CMD_CON 0x130
+/* DSI_VM_CMD_CON */
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
-#define DSI_SHADOW_DEBUG 0x190U
+/* DSI_SHADOW_DEBUG */
#define FORCE_COMMIT BIT(0)
#define BYPASS_SHADOW BIT(1)
@@ -187,6 +187,8 @@ struct phy;
struct mtk_dsi_driver_data {
const u32 reg_cmdq_off;
+ const u32 reg_vm_cmd_off;
+ const u32 reg_shadow_dbg_off;
bool has_shadow_ctl;
bool has_size_ctl;
bool cmdq_long_packet_ctl;
@@ -246,23 +248,22 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
struct mtk_phy_timing *timing = &dsi->phy_timing;
- timing->lpx = (80 * data_rate_mhz / (8 * 1000)) + 1;
- timing->da_hs_prepare = (59 * data_rate_mhz + 4 * 1000) / 8000 + 1;
- timing->da_hs_zero = (163 * data_rate_mhz + 11 * 1000) / 8000 + 1 -
+ timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+ timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
timing->da_hs_prepare;
- timing->da_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
+ timing->da_hs_trail = timing->da_hs_prepare + 1;
- timing->ta_go = 4 * timing->lpx;
- timing->ta_sure = 3 * timing->lpx / 2;
- timing->ta_get = 5 * timing->lpx;
- timing->da_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->ta_go = 4 * timing->lpx - 2;
+ timing->ta_sure = timing->lpx + 2;
+ timing->ta_get = 4 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx + 1;
- timing->clk_hs_prepare = (57 * data_rate_mhz / (8 * 1000)) + 1;
- timing->clk_hs_post = (65 * data_rate_mhz + 53 * 1000) / 8000 + 1;
- timing->clk_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
- timing->clk_hs_zero = (330 * data_rate_mhz / (8 * 1000)) + 1 -
- timing->clk_hs_prepare;
- timing->clk_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+ timing->clk_hs_post = timing->clk_hs_prepare + 8;
+ timing->clk_hs_trail = timing->clk_hs_prepare;
+ timing->clk_hs_zero = timing->clk_hs_trail * 4;
+ timing->clk_hs_exit = 2 * timing->clk_hs_trail;
timcon0 = FIELD_PREP(LPX, timing->lpx) |
FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
@@ -367,8 +368,8 @@ static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
{
- mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
- mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
+ mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, VM_CMD_EN, VM_CMD_EN);
+ mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, TS_VFP_EN, TS_VFP_EN);
}
static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
@@ -714,7 +715,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
if (dsi->driver_data->has_shadow_ctl)
writel(FORCE_COMMIT | BYPASS_SHADOW,
- dsi->regs + DSI_SHADOW_DEBUG);
+ dsi->regs + dsi->driver_data->reg_shadow_dbg_off);
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
@@ -1263,26 +1264,36 @@ static void mtk_dsi_remove(struct platform_device *pdev)
static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
.reg_cmdq_off = 0x200,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190
};
static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
.reg_cmdq_off = 0x180,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190
};
static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
.reg_cmdq_off = 0x200,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = {
.reg_cmdq_off = 0xd00,
+ .reg_vm_cmd_off = 0x200,
+ .reg_shadow_dbg_off = 0xc00,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
.reg_cmdq_off = 0xd00,
+ .reg_vm_cmd_off = 0x200,
+ .reg_shadow_dbg_off = 0xc00,
.has_shadow_ctl = true,
.has_size_ctl = true,
.cmdq_long_packet_ctl = true,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 70dc1d4460ad..ca82bc829cb9 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1660,7 +1660,6 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.mute_stream = mtk_hdmi_audio_mute,
.get_eld = mtk_hdmi_audio_get_eld,
.hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
- .no_capture_mute = 1,
};
static int mtk_hdmi_register_audio_driver(struct device *dev)
@@ -1671,6 +1670,7 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
.max_i2s_channels = 2,
.i2s = 1,
.data = hdmi,
+ .no_capture_mute = 1,
};
struct platform_device *pdev;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 0f5a1a54544e..81d2ee37e773 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -16,8 +16,8 @@
#include <linux/platform_device.h>
#include <linux/soc/amlogic/meson-canvas.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -105,7 +105,6 @@ static const struct drm_driver meson_driver = {
.fops = &fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = "20161109",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 97fd7eb765b4..069fdd2dc8f6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
@@ -97,7 +97,6 @@ static const struct drm_driver mgag200_driver = {
.fops = &mgag200_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 988967eafbf2..0608fc63e588 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -25,7 +25,6 @@
#define DRIVER_NAME "mgag200"
#define DRIVER_DESC "MGA G200 SE"
-#define DRIVER_DATE "20110418"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index f274d9430cc3..5df20cbeafb8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -78,6 +78,7 @@ msm-display-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_hw_catalog.o \
disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_cwb.o \
disp/dpu1/dpu_hw_dsc.o \
disp/dpu1/dpu_hw_dsc_1_2.o \
disp/dpu1/dpu_hw_interrupts.o \
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 50c490b492f0..f1b18a6663f7 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -251,8 +251,8 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
/* Disable L2 bypass to avoid UCHE out of bounds errors */
- gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
- gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
@@ -693,6 +693,8 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
if (ret)
goto fail;
+ adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull;
+
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index ee89db72e36e..71dca78cd7a5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -750,10 +750,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
/* Disable L2 bypass in the UCHE */
- gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
- gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
- gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
- gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
/* Set the GMEM VA range (0 to gpu->gmem) */
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
@@ -1760,11 +1760,6 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
unsigned int nr_rings;
int ret;
- if (!pdev) {
- DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
- return ERR_PTR(-ENXIO);
- }
-
a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
if (!a5xx_gpu)
return ERR_PTR(-ENOMEM);
@@ -1805,5 +1800,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
adreno_gpu->ubwc_config.macrotile_mode = 0;
adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+ adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 0c560e84ad5a..edffb7737a97 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -1388,6 +1388,17 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
},
.address_space_size = SZ_16G,
.preempt_record_size = 4192 * SZ_1K,
@@ -1432,6 +1443,17 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(2),
+ .perfmode_bw = 10687500,
+ },
+ { /* sentinel */ },
+ },
},
.address_space_size = SZ_16G,
.preempt_record_size = 3572 * SZ_1K,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 14db7376c712..699b0dd34b18 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -9,6 +9,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
#include <drm/drm_gem.h>
#include "a6xx_gpu.h"
@@ -109,9 +110,11 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 perf_index;
+ u32 bw_index = 0;
unsigned long gpu_freq;
int ret = 0;
@@ -124,6 +127,37 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
if (gpu_freq == gmu->gpu_freqs[perf_index])
break;
+ /* If enabled, find the corresponding DDR bandwidth index */
+ if (info->bcms && gmu->nr_gpu_bws > 1) {
+ unsigned int bw = dev_pm_opp_get_bw(opp, true, 0);
+
+ for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) {
+ if (bw == gmu->gpu_bw_table[bw_index])
+ break;
+ }
+
+ /* Vote AB as a fraction of the max bandwidth, starting from A750 */
+ if (bw && adreno_is_a750_family(adreno_gpu)) {
+ u64 tmp;
+
+ /* For now, vote for 25% of the bandwidth */
+ tmp = bw * 25;
+ do_div(tmp, 100);
+
+ /*
+ * The AB vote consists of a 16 bit wide quantized level
+ * against the maximum supported bandwidth.
+ * Quantization can be calculated as below:
+ * vote = (bandwidth * 2^16) / max bandwidth
+ */
+ tmp *= MAX_AB_VOTE;
+ do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]);
+
+ bw_index |= AB_VOTE(clamp(tmp, 1, MAX_AB_VOTE));
+ bw_index |= AB_VOTE_ENABLE;
+ }
+ }
+
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
@@ -139,8 +173,10 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
return;
if (!gmu->legacy) {
- a6xx_hfi_set_freq(gmu, perf_index);
- dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+ a6xx_hfi_set_freq(gmu, perf_index, bw_index);
+ /* With Bandwidth voting, we now vote for all resources, so skip OPP set */
+ if (!bw_index)
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
return;
}
@@ -729,6 +765,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
const struct block_header *blk;
u32 reg_offset;
+ u32 ver;
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
@@ -775,6 +812,12 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
}
}
+ ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION);
+ DRM_INFO_ONCE("Loaded GMU firmware v%u.%u.%u\n",
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
+
return 0;
}
@@ -1265,7 +1308,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
bo->virt = msm_gem_get_vaddr(bo->obj);
bo->size = size;
- msm_gem_object_set_name(bo->obj, name);
+ msm_gem_object_set_name(bo->obj, "%s", name);
return 0;
}
@@ -1287,6 +1330,104 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return 0;
}
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
+ * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
+ * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved field
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
+static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu,
+ const struct a6xx_info *info,
+ struct a6xx_gmu *gmu)
+{
+ const struct bcm_db *bcm_data[GMU_MAX_BCMS] = { 0 };
+ unsigned int bcm_index, bw_index, bcm_count = 0;
+
+ /* Retrieve BCM data from cmd-db */
+ for (bcm_index = 0; bcm_index < GMU_MAX_BCMS; bcm_index++) {
+ const struct a6xx_bcm *bcm = &info->bcms[bcm_index];
+ size_t count;
+
+ /* Stop at NULL terminated bcm entry */
+ if (!bcm->name)
+ break;
+
+ bcm_data[bcm_index] = cmd_db_read_aux_data(bcm->name, &count);
+ if (IS_ERR(bcm_data[bcm_index]))
+ return PTR_ERR(bcm_data[bcm_index]);
+
+ if (!count) {
+ dev_err(gmu->dev, "invalid BCM '%s' aux data size\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ bcm_count++;
+ }
+
+ /* Generate BCM votes values for each bandwidth & BCM */
+ for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) {
+ u32 *data = gmu->gpu_ib_votes[bw_index];
+ u32 bw = gmu->gpu_bw_table[bw_index];
+
+ /* Calculations loosely copied from bcm_aggregate() & tcs_cmd_gen() */
+ for (bcm_index = 0; bcm_index < bcm_count; bcm_index++) {
+ const struct a6xx_bcm *bcm = &info->bcms[bcm_index];
+ bool commit = false;
+ u64 peak;
+ u32 vote;
+
+ if (bcm_index == bcm_count - 1 ||
+ (bcm_data[bcm_index + 1] &&
+ bcm_data[bcm_index]->vcd != bcm_data[bcm_index + 1]->vcd))
+ commit = true;
+
+ if (!bw) {
+ data[bcm_index] = BCM_TCS_CMD(commit, false, 0, 0);
+ continue;
+ }
+
+ if (bcm->fixed) {
+ u32 perfmode = 0;
+
+ /* GMU on A6xx votes perfmode on all valid bandwidth */
+ if (!adreno_is_a7xx(adreno_gpu) ||
+ (bcm->perfmode_bw && bw >= bcm->perfmode_bw))
+ perfmode = bcm->perfmode;
+
+ data[bcm_index] = BCM_TCS_CMD(commit, true, 0, perfmode);
+ continue;
+ }
+
+ /* Multiply the bandwidth by the width of the connection */
+ peak = (u64)bw * le16_to_cpu(bcm_data[bcm_index]->width);
+ do_div(peak, bcm->buswidth);
+
+ /* Input bandwidth value is in KBps, scale the value to BCM unit */
+ peak *= 1000;
+ do_div(peak, le32_to_cpu(bcm_data[bcm_index]->unit));
+
+ vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK);
+
+ /* GMUs on A7xx votes on both x & y */
+ if (adreno_is_a7xx(adreno_gpu))
+ data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote);
+ else
+ data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote);
+ }
+ }
+
+ return 0;
+}
+
/* Return the 'arc-level' for the given frequency */
static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
unsigned long freq)
@@ -1390,12 +1531,15 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
* The GMU votes with the RPMh for itself and on behalf of the GPU but we need
* to construct the list of votes on the CPU and send it over. Query the RPMh
* voltage levels and build the votes
+ * The GMU can also vote for DDR interconnects, use the OPP bandwidth entries
+ * and BCM parameters to build the votes.
*/
static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
@@ -1407,6 +1551,10 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
+ /* Build the interconnect votes */
+ if (info->bcms && gmu->nr_gpu_bws > 1)
+ ret |= a6xx_gmu_rpmh_bw_votes_init(adreno_gpu, info, gmu);
+
return ret;
}
@@ -1442,10 +1590,43 @@ static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
return index;
}
+static int a6xx_gmu_build_bw_table(struct device *dev, unsigned long *bandwidths,
+ u32 size)
+{
+ int count = dev_pm_opp_get_opp_count(dev);
+ struct dev_pm_opp *opp;
+ int i, index = 0;
+ unsigned int bandwidth = 1;
+
+ /*
+ * The OPP table doesn't contain the "off" bandwidth level so we need to
+ * add 1 to the table size to account for it
+ */
+
+ if (WARN(count + 1 > size,
+ "The GMU bandwidth table is being truncated\n"))
+ count = size - 1;
+
+ /* Set the "off" bandwidth */
+ bandwidths[index++] = 0;
+
+ for (i = 0; i < count; i++) {
+ opp = dev_pm_opp_find_bw_ceil(dev, &bandwidth, 0);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ bandwidths[index++] = bandwidth++;
+ }
+
+ return index;
+}
+
static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret = 0;
@@ -1472,6 +1653,14 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
+ /*
+ * The GMU also handles GPU Interconnect Votes so build a list
+ * of DDR bandwidths from the GPU OPP table
+ */
+ if (info->bcms)
+ gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev,
+ gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table));
+
/* Build the list of RPMh votes that we'll send to the GMU */
return a6xx_gmu_rpmh_votes_init(gmu);
}
@@ -1603,7 +1792,9 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, true);
+ ret = of_dma_configure(gmu->dev, node, true);
+ if (ret)
+ return ret;
pm_runtime_enable(gmu->dev);
@@ -1668,7 +1859,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, true);
+ ret = of_dma_configure(gmu->dev, node, true);
+ if (ret)
+ return ret;
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index b4a79f88ccf4..0c888b326cfb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -19,6 +19,18 @@ struct a6xx_gmu_bo {
u64 iova;
};
+#define GMU_MAX_GX_FREQS 16
+#define GMU_MAX_CX_FREQS 4
+#define GMU_MAX_BCMS 3
+
+struct a6xx_bcm {
+ char *name;
+ unsigned int buswidth;
+ bool fixed;
+ unsigned int perfmode;
+ unsigned int perfmode_bw;
+};
+
/*
* These define the different GMU wake up options - these define how both the
* CPU and the GMU bring up the hardware
@@ -79,12 +91,16 @@ struct a6xx_gmu {
int current_perf_index;
int nr_gpu_freqs;
- unsigned long gpu_freqs[16];
- u32 gx_arc_votes[16];
+ unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
+ u32 gx_arc_votes[GMU_MAX_GX_FREQS];
+
+ int nr_gpu_bws;
+ unsigned long gpu_bw_table[GMU_MAX_GX_FREQS];
+ u32 gpu_ib_votes[GMU_MAX_GX_FREQS][GMU_MAX_BCMS];
int nr_gmu_freqs;
- unsigned long gmu_freqs[4];
- u32 cx_arc_votes[4];
+ unsigned long gmu_freqs[GMU_MAX_CX_FREQS];
+ u32 cx_arc_votes[GMU_MAX_CX_FREQS];
unsigned long freq;
@@ -193,7 +209,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
-int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 perf_index, u32 bw_index);
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 019610341df1..0ae29a7c8a4d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1123,12 +1123,12 @@ static int hw_init(struct msm_gpu *gpu)
/* Disable L2 bypass in the UCHE */
if (adreno_is_a7xx(adreno_gpu)) {
- gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
} else {
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
- gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, adreno_gpu->uche_trap_base + 0xfc0);
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
}
if (!(adreno_is_a650_family(adreno_gpu) ||
@@ -2533,6 +2533,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
}
}
+ adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
+
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
a6xx_fault_handler);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 4aceffb6aae8..9201a53dd341 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -44,6 +44,7 @@ struct a6xx_info {
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
+ const struct a6xx_bcm *bcms;
};
struct a6xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index cb8844ed46b2..0989aee3dd2c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
@@ -259,6 +260,48 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
NULL, 0);
}
+static void a6xx_generate_bw_table(const struct a6xx_info *info, struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_bw_table *msg)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < GMU_MAX_BCMS; i++) {
+ if (!info->bcms[i].name)
+ break;
+ msg->ddr_cmds_addrs[i] = cmd_db_read_addr(info->bcms[i].name);
+ }
+ msg->ddr_cmds_num = i;
+
+ for (i = 0; i < gmu->nr_gpu_bws; ++i)
+ for (j = 0; j < msg->ddr_cmds_num; j++)
+ msg->ddr_cmds_data[i][j] = gmu->gpu_ib_votes[i][j];
+ msg->bw_level_num = gmu->nr_gpu_bws;
+
+ /* Compute the wait bitmask with each BCM having the commit bit */
+ msg->ddr_wait_bitmask = 0;
+ for (j = 0; j < msg->ddr_cmds_num; j++)
+ if (msg->ddr_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
+ msg->ddr_wait_bitmask |= BIT(j);
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU
+ * The 'CN0' BCM is used on all targets, and votes are basically
+ * 'off' and 'on' states with first bit to enable the path.
+ */
+
+ msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
+ msg->cnoc_cmds_num = 1;
+
+ msg->cnoc_cmds_data[0][0] = BCM_TCS_CMD(true, false, 0, 0);
+ msg->cnoc_cmds_data[1][0] = BCM_TCS_CMD(true, true, 0, BIT(0));
+
+ /* Compute the wait bitmask with each BCM having the commit bit */
+ msg->cnoc_wait_bitmask = 0;
+ for (j = 0; j < msg->cnoc_cmds_num; j++)
+ if (msg->cnoc_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
+ msg->cnoc_wait_bitmask |= BIT(j);
+}
+
static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
@@ -664,6 +707,7 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
struct a6xx_hfi_msg_bw_table *msg;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
if (gmu->bw_table)
goto send;
@@ -672,7 +716,9 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
if (!msg)
return -ENOMEM;
- if (adreno_is_a618(adreno_gpu))
+ if (info->bcms && gmu->nr_gpu_bws > 1)
+ a6xx_generate_bw_table(info, gmu, msg);
+ else if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(msg);
else if (adreno_is_a619(adreno_gpu))
a619_build_bw_table(msg);
@@ -726,13 +772,13 @@ static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
sizeof(msg), NULL, 0);
}
-int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 freq_index, u32 bw_index)
{
struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
msg.ack_type = 1; /* blocking */
- msg.freq = index;
- msg.bw = 0; /* TODO: bus scaling */
+ msg.freq = freq_index;
+ msg.bw = bw_index;
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
sizeof(msg), NULL, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 528110169398..52ba4a07d7b9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -173,6 +173,11 @@ struct a6xx_hfi_gx_bw_perf_vote_cmd {
u32 bw;
};
+#define AB_VOTE_MASK GENMASK(31, 16)
+#define MAX_AB_VOTE (FIELD_MAX(AB_VOTE_MASK) - 1)
+#define AB_VOTE(vote) FIELD_PREP(AB_VOTE_MASK, (vote))
+#define AB_VOTE_ENABLE BIT(8)
+
#define HFI_H2F_MSG_PREPARE_SLUMBER 33
struct a6xx_hfi_prep_slumber_cmd {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 75f5367e73ca..1238f3265978 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -310,10 +310,11 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct drm_device *drm = gpu->dev;
/* No pointer params yet */
if (*len != 0)
- return -EINVAL;
+ return UERR(EINVAL, drm, "invalid len");
switch (param) {
case MSM_PARAM_GPU_ID:
@@ -365,12 +366,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
return 0;
case MSM_PARAM_VA_START:
if (ctx->aspace == gpu->aspace)
- return -EINVAL;
+ return UERR(EINVAL, drm, "requires per-process pgtables");
*value = ctx->aspace->va_start;
return 0;
case MSM_PARAM_VA_SIZE:
if (ctx->aspace == gpu->aspace)
- return -EINVAL;
+ return UERR(EINVAL, drm, "requires per-process pgtables");
*value = ctx->aspace->va_size;
return 0;
case MSM_PARAM_HIGHEST_BANK_BIT:
@@ -385,15 +386,19 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_MACROTILE_MODE:
*value = adreno_gpu->ubwc_config.macrotile_mode;
return 0;
+ case MSM_PARAM_UCHE_TRAP_BASE:
+ *value = adreno_gpu->uche_trap_base;
+ return 0;
default:
- DBG("%s: invalid param: %u", gpu->name, param);
- return -EINVAL;
+ return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t value, uint32_t len)
{
+ struct drm_device *drm = gpu->dev;
+
switch (param) {
case MSM_PARAM_COMM:
case MSM_PARAM_CMDLINE:
@@ -401,11 +406,11 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
* that should be a reasonable upper bound
*/
if (len > PAGE_SIZE)
- return -EINVAL;
+ return UERR(EINVAL, drm, "invalid len");
break;
default:
if (len != 0)
- return -EINVAL;
+ return UERR(EINVAL, drm, "invalid len");
}
switch (param) {
@@ -434,11 +439,10 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
}
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ return UERR(EPERM, drm, "invalid permissions");
return msm_file_private_set_sysprof(ctx, gpu, value);
default:
- DBG("%s: invalid param: %u", gpu->name, param);
- return -EINVAL;
+ return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index e71f420f8b3a..dcf454629ce0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -253,6 +253,8 @@ struct adreno_gpu {
bool gmu_is_wrapper;
bool has_ray_tracing;
+
+ u64 uche_trap_base;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -559,6 +561,11 @@ static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
gpu->info->family == ADRENO_7XX_GEN3;
}
+static inline int adreno_is_a750_family(struct adreno_gpu *gpu)
+{
+ return gpu->info->family == ADRENO_7XX_GEN3;
+}
+
static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
{
/* Update with non-fake (i.e. non-A702) Gen 7 GPUs */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index eb5dfff2ec4f..bcb39807fe61 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
@@ -252,25 +254,25 @@ static const struct dpu_pingpong_cfg sm8650_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_8", .id = PINGPONG_8,
+ .name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
.base = 0x7e000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
}, {
- .name = "pingpong_9", .id = PINGPONG_9,
+ .name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
.base = 0x7e400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
@@ -350,6 +352,25 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
},
};
+static const struct dpu_cwb_cfg sm8650_cwb[] = {
+ {
+ .name = "cwb_0", .id = CWB_0,
+ .base = 0x66200, .len = 0x8,
+ },
+ {
+ .name = "cwb_1", .id = CWB_1,
+ .base = 0x66600, .len = 0x8,
+ },
+ {
+ .name = "cwb_2", .id = CWB_2,
+ .base = 0x7E200, .len = 0x8,
+ },
+ {
+ .name = "cwb_3", .id = CWB_3,
+ .base = 0x7E600, .len = 0x8,
+ },
+};
+
static const struct dpu_intf_cfg sm8650_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -447,6 +468,8 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = {
.merge_3d = sm8650_merge_3d,
.wb_count = ARRAY_SIZE(sm8650_wb),
.wb = sm8650_wb,
+ .cwb_count = ARRAY_SIZE(sm8650_cwb),
+ .cwb = sm8650_cwb,
.intf_count = ARRAY_SIZE(sm8650_intf),
.intf = sm8650_intf,
.vbif_count = ARRAY_SIZE(sm8650_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index cbbdaebe357e..daef07924886 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -65,6 +65,54 @@ static const struct dpu_sspp_cfg sdm670_sspp[] = {
},
};
+static const struct dpu_lm_cfg sdm670_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_dspp_cfg sdm670_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
static const struct dpu_dsc_cfg sdm670_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
@@ -88,8 +136,10 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm670_sspp),
.sspp = sdm670_sspp,
- .mixer_count = ARRAY_SIZE(sdm845_lm),
- .mixer = sdm845_lm,
+ .mixer_count = ARRAY_SIZE(sdm670_lm),
+ .mixer = sdm670_lm,
+ .dspp_count = ARRAY_SIZE(sdm670_dspp),
+ .dspp = sdm670_dspp,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
.dsc_count = ARRAY_SIZE(sdm670_dsc),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 6ccfde82fecd..36cc9dbc00b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -164,6 +164,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -171,6 +172,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@@ -295,7 +297,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SDM845_MASK,
+ .features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index bab19ddd1d4f..e8eacdb47967 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -163,6 +163,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -170,6 +171,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@@ -302,7 +304,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SDM845_MASK,
+ .features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
new file mode 100644
index 000000000000..d761ed705bac
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_5_3_SM6150_H
+#define _DPU_5_3_SM6150_H
+
+static const struct dpu_caps sm6150_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x9,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sm6150_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = 0,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm6150_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm6150_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_2_4,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sm6150_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_2,
+ },
+};
+
+static const struct dpu_dspp_cfg sm6150_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm6150_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ },
+};
+
+static const struct dpu_intf_cfg sm6150_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sm6150_perf_data = {
+ .max_bw_low = 4800000,
+ .max_bw_high = 4800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm8150_qos_linear),
+ .entries = sm8150_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm6150_mdss_ver = {
+ .core_major_ver = 5,
+ .core_minor_ver = 3,
+};
+
+const struct dpu_mdss_cfg dpu_sm6150_cfg = {
+ .mdss_ver = &sm6150_mdss_ver,
+ .caps = &sm6150_dpu_caps,
+ .mdp = &sm6150_mdp,
+ .ctl_count = ARRAY_SIZE(sm6150_ctl),
+ .ctl = sm6150_ctl,
+ .sspp_count = ARRAY_SIZE(sm6150_sspp),
+ .sspp = sm6150_sspp,
+ .mixer_count = ARRAY_SIZE(sm6150_lm),
+ .mixer = sm6150_lm,
+ .dspp_count = ARRAY_SIZE(sm6150_dspp),
+ .dspp = sm6150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm6150_pp),
+ .pingpong = sm6150_pp,
+ .intf_count = ARRAY_SIZE(sm6150_intf),
+ .intf = sm6150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm6150_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index d039b96beb97..76f60a2df7a8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -144,7 +144,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SDM845_MASK,
+ .features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index a57d50b1f028..e8916ae826a6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index aced16e350da..f7c08e89c882 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index a1779c5597ae..08742472f9cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -257,13 +257,13 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 907b4d7ceb47..76ec72a32378 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -256,13 +256,13 @@ static const struct dpu_pingpong_cfg sa8775p_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_6", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_7", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index ad48defa154f..4d3787fceb72 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@@ -251,13 +253,13 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index a3e60ac70689..6b112e3d17da 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -159,6 +159,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -166,6 +167,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@@ -251,13 +253,13 @@ static const struct dpu_pingpong_cfg x1e80100_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
@@ -389,8 +391,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
- .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
- .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 9f6ffd344693..e5dcd41a361f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -732,6 +732,13 @@ static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
int i;
+ /* if we cannot merge 2 LMs (no 3d mux) better to fail earlier
+ * before even checking the width after the split
+ */
+ if (!dpu_kms->catalog->caps->has_3d_merge &&
+ adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
+ return -E2BIG;
+
for (i = 0; i < cstate->num_mixers; i++) {
struct drm_rect *r = &cstate->lm_bounds[i];
r->x1 = crtc_split_width * i;
@@ -1182,6 +1189,47 @@ static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
return false;
}
+static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
+{
+ int total_planes = crtc->dev->mode_config.num_total_plane;
+ struct drm_atomic_state *state = crtc_state->state;
+ struct dpu_global_state *global_state;
+ struct drm_plane_state **states;
+ struct drm_plane *plane;
+ int ret;
+
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ dpu_rm_release_all_sspp(global_state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ states = kcalloc(total_planes, sizeof(*states), GFP_KERNEL);
+ if (!states)
+ return -ENOMEM;
+
+ drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto done;
+ }
+
+ states[plane_state->normalized_zpos] = plane_state;
+ }
+
+ ret = dpu_assign_plane_resources(global_state, state, crtc, states, total_planes);
+
+done:
+ kfree(states);
+ return ret;
+}
+
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1197,6 +1245,13 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+ if (dpu_use_virtual_planes &&
+ (crtc_state->planes_changed || crtc_state->zpos_changed)) {
+ rc = dpu_crtc_reassign_planes(crtc, crtc_state);
+ if (rc < 0)
+ return rc;
+ }
+
if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, crtc_state->enable,
@@ -1251,6 +1306,12 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
{
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ /* if there is no 3d_mux block we cannot merge LMs so we cannot
+ * split the large layer into 2 LMs, filter out such modes
+ */
+ if (!dpu_kms->catalog->caps->has_3d_merge &&
+ mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
+ return MODE_BAD_HVALUE;
/*
* max crtc width is equal to the max mixer width * 2 and max height is 4K
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 83de7564e2c1..48e6e8d74c85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -800,7 +800,7 @@ static int dpu_encoder_virt_atomic_check(
if (!crtc_state->active_changed || crtc_state->enable)
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
- drm_enc, crtc_state, topology);
+ drm_enc, crtc_state, &topology);
if (!ret)
dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
global_state, crtc_state);
@@ -2281,6 +2281,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
}
+ if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
+ phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
+
/* reset the merge 3D HW block */
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 2cbf41f33cc0..0b342c043875 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -765,6 +765,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
#include "catalog/dpu_5_2_sm7150.h"
+#include "catalog/dpu_5_3_sm6150.h"
#include "catalog/dpu_5_4_sm6125.h"
#include "catalog/dpu_6_0_sm8250.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index c701d18c3522..4cea19e1a203 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -613,6 +613,16 @@ struct dpu_wb_cfg {
enum dpu_clk_ctrl_type clk_ctrl;
};
+/*
+ * struct dpu_cwb_cfg : MDP CWB mux instance info
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_cwb_cfg {
+ DPU_HW_BLK_INFO;
+};
+
/**
* struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
* @pps pixel per seconds
@@ -815,6 +825,9 @@ struct dpu_mdss_cfg {
u32 dspp_count;
const struct dpu_dspp_cfg *dspp;
+ u32 cwb_count;
+ const struct dpu_cwb_cfg *cwb;
+
/* Add additional block data structures here */
const struct dpu_perf_cfg *perf;
@@ -839,6 +852,7 @@ extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
extern const struct dpu_mdss_cfg dpu_sc7180_cfg;
extern const struct dpu_mdss_cfg dpu_sm6115_cfg;
extern const struct dpu_mdss_cfg dpu_sm6125_cfg;
+extern const struct dpu_mdss_cfg dpu_sm6150_cfg;
extern const struct dpu_mdss_cfg dpu_sm6350_cfg;
extern const struct dpu_mdss_cfg dpu_qcm2290_cfg;
extern const struct dpu_mdss_cfg dpu_sm6375_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c
new file mode 100644
index 000000000000..ae785f4ff0d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#include <drm/drm_managed.h>
+#include "dpu_hw_cwb.h"
+
+#include <linux/bitfield.h>
+
+#define CWB_MUX 0x000
+#define CWB_MODE 0x004
+
+/* CWB mux block bit definitions */
+#define CWB_MUX_MASK GENMASK(3, 0)
+#define CWB_MODE_MASK GENMASK(2, 0)
+
+static void dpu_hw_cwb_config(struct dpu_hw_cwb *ctx,
+ struct dpu_hw_cwb_setup_cfg *cwb_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int cwb_mux_cfg = 0xF;
+ enum dpu_pingpong pp;
+ enum cwb_mode_input input;
+
+ if (!cwb_cfg)
+ return;
+
+ input = cwb_cfg->input;
+ pp = cwb_cfg->pp_idx;
+
+ if (input >= INPUT_MODE_MAX)
+ return;
+
+ /*
+ * The CWB_MUX register takes the pingpong index for the real-time
+ * display
+ */
+ if ((pp != PINGPONG_NONE) && (pp < PINGPONG_MAX))
+ cwb_mux_cfg = FIELD_PREP(CWB_MUX_MASK, pp - PINGPONG_0);
+
+ input = FIELD_PREP(CWB_MODE_MASK, input);
+
+ DPU_REG_WRITE(c, CWB_MUX, cwb_mux_cfg);
+ DPU_REG_WRITE(c, CWB_MODE, input);
+}
+
+/**
+ * dpu_hw_cwb_init() - Initializes the writeback hw driver object with cwb.
+ * @dev: Corresponding device for devres management
+ * @cfg: wb_path catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_wb context
+ */
+struct dpu_hw_cwb *dpu_hw_cwb_init(struct drm_device *dev,
+ const struct dpu_cwb_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_cwb *c;
+
+ if (!addr)
+ return ERR_PTR(-EINVAL);
+
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_CWB;
+
+ c->idx = cfg->id;
+ c->ops.config_cwb = dpu_hw_cwb_config;
+
+ return c;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
new file mode 100644
index 000000000000..96b6edf6b2bb
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#ifndef _DPU_HW_CWB_H
+#define _DPU_HW_CWB_H
+
+#include "dpu_hw_util.h"
+
+struct dpu_hw_cwb;
+
+enum cwb_mode_input {
+ INPUT_MODE_LM_OUT,
+ INPUT_MODE_DSPP_OUT,
+ INPUT_MODE_MAX
+};
+
+/**
+ * struct dpu_hw_cwb_setup_cfg : Describes configuration for CWB mux
+ * @pp_idx: Index of the real-time pinpong that the CWB mux will
+ * feed the CWB mux
+ * @input: Input tap point
+ */
+struct dpu_hw_cwb_setup_cfg {
+ enum dpu_pingpong pp_idx;
+ enum cwb_mode_input input;
+};
+
+/**
+ *
+ * struct dpu_hw_cwb_ops : Interface to the cwb hw driver functions
+ * @config_cwb: configure CWB mux
+ */
+struct dpu_hw_cwb_ops {
+ void (*config_cwb)(struct dpu_hw_cwb *ctx,
+ struct dpu_hw_cwb_setup_cfg *cwb_cfg);
+};
+
+/**
+ * struct dpu_hw_cwb : CWB mux driver object
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: CWB index
+ * @ops: handle to operations possible for this CWB
+ */
+struct dpu_hw_cwb {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ enum dpu_cwb idx;
+
+ struct dpu_hw_cwb_ops ops;
+};
+
+/**
+ * dpu_hw_cwb - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cwb *to_dpu_hw_cwb(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cwb, base);
+}
+
+struct dpu_hw_cwb *dpu_hw_cwb_init(struct drm_device *dev,
+ const struct dpu_cwb_cfg *cfg,
+ void __iomem *addr);
+
+#endif /*_DPU_HW_CWB_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 657200401f57..cec6d4e8baec 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
u32 slice_last_group_size;
u32 det_thresh_flatness;
bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+ bool input_10_bits = dsc->bits_per_component == 10;
DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
@@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
data |= (dsc->line_buf_depth << 3);
data |= (dsc->simple_422 << 2);
data |= (dsc->convert_rgb << 1);
- data |= dsc->bits_per_component;
+ data |= input_10_bits;
DPU_REG_WRITE(c, DSC_ENC, data);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index f8806a4d317b..ba7bb05efe9b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_MDSS_H
@@ -181,10 +183,10 @@ enum dpu_pingpong {
PINGPONG_3,
PINGPONG_4,
PINGPONG_5,
- PINGPONG_6,
- PINGPONG_7,
- PINGPONG_8,
- PINGPONG_9,
+ PINGPONG_CWB_0,
+ PINGPONG_CWB_1,
+ PINGPONG_CWB_2,
+ PINGPONG_CWB_3,
PINGPONG_S0,
PINGPONG_MAX
};
@@ -350,6 +352,7 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_DSPP (1 << 10)
#define DPU_DBG_MASK_DSC (1 << 11)
#define DPU_DBG_MASK_CDM (1 << 12)
+#define DPU_DBG_MASK_CWB (1 << 13)
/**
* struct dpu_hw_tear_check - Struct contains parameters to configure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index ad19330de61a..562a3f4c5238 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
if (cap & BIT(DPU_MDP_VSYNC_SEL))
ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
- else
+ else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
ops->setup_vsync_source = dpu_hw_setup_wd_timer;
ops->get_safe_status = dpu_hw_get_safe_status;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index fb9f90957762..4853e516c487 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -173,7 +173,9 @@ static void dpu_hw_wb_bind_pingpong_blk(
mux_cfg = DPU_REG_READ(c, WB_MUX);
mux_cfg &= ~0xf;
- if (pp)
+ if (pp >= PINGPONG_CWB_0)
+ mux_cfg |= (pp < PINGPONG_CWB_2) ? 0xd : 0xb;
+ else if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 8b251f87a052..97e9cb8c2b09 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -51,6 +51,9 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+bool dpu_use_virtual_planes;
+module_param(dpu_use_virtual_planes, bool, 0);
+
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@@ -829,8 +832,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
type, catalog->sspp[i].features,
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
- plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
- (1UL << max_crtc_count) - 1);
+ if (dpu_use_virtual_planes)
+ plane = dpu_plane_init_virtual(dev, type, (1UL << max_crtc_count) - 1);
+ else
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
+ (1UL << max_crtc_count) - 1);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -932,12 +938,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump CTL sub-blocks HW regs info */
for (i = 0; i < cat->ctl_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
- dpu_kms->mmio + cat->ctl[i].base, cat->ctl[i].name);
+ dpu_kms->mmio + cat->ctl[i].base, "%s",
+ cat->ctl[i].name);
/* dump DSPP sub-blocks HW regs info */
for (i = 0; i < cat->dspp_count; i++) {
base = dpu_kms->mmio + cat->dspp[i].base;
- msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base, cat->dspp[i].name);
+ msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base,
+ "%s", cat->dspp[i].name);
if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len,
@@ -949,13 +957,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump INTF sub-blocks HW regs info */
for (i = 0; i < cat->intf_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
- dpu_kms->mmio + cat->intf[i].base, cat->intf[i].name);
+ dpu_kms->mmio + cat->intf[i].base, "%s",
+ cat->intf[i].name);
/* dump PP sub-blocks HW regs info */
for (i = 0; i < cat->pingpong_count; i++) {
base = dpu_kms->mmio + cat->pingpong[i].base;
msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base,
- cat->pingpong[i].name);
+ "%s", cat->pingpong[i].name);
/* TE2 sub-block has length of 0, so will not print it */
@@ -969,7 +978,8 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump SSPP sub-blocks HW regs info */
for (i = 0; i < cat->sspp_count; i++) {
base = dpu_kms->mmio + cat->sspp[i].base;
- msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base, cat->sspp[i].name);
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base,
+ "%s", cat->sspp[i].name);
if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len,
@@ -987,12 +997,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump LM sub-blocks HW regs info */
for (i = 0; i < cat->mixer_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
- dpu_kms->mmio + cat->mixer[i].base, cat->mixer[i].name);
+ dpu_kms->mmio + cat->mixer[i].base,
+ "%s", cat->mixer[i].name);
/* dump WB sub-blocks HW regs info */
for (i = 0; i < cat->wb_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
- dpu_kms->mmio + cat->wb[i].base, cat->wb[i].name);
+ dpu_kms->mmio + cat->wb[i].base, "%s",
+ cat->wb[i].name);
if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
@@ -1004,10 +1016,16 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
dpu_kms->mmio + cat->mdp[0].base, "top");
}
+ /* dump CWB sub-blocks HW regs info */
+ for (i = 0; i < cat->cwb_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->cwb[i].len,
+ dpu_kms->mmio + cat->cwb[i].base, cat->cwb[i].name);
+
/* dump DSC sub-blocks HW regs info */
for (i = 0; i < cat->dsc_count; i++) {
base = dpu_kms->mmio + cat->dsc[i].base;
- msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base, cat->dsc[i].name);
+ msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base,
+ "%s", cat->dsc[i].name);
if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
@@ -1022,7 +1040,16 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
if (cat->cdm)
msm_disp_snapshot_add_block(disp_state, cat->cdm->len,
- dpu_kms->mmio + cat->cdm->base, cat->cdm->name);
+ dpu_kms->mmio + cat->cdm->base,
+ "%s", cat->cdm->name);
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ msm_disp_snapshot_add_block(disp_state, vbif->len,
+ dpu_kms->vbif[vbif->id] + vbif->base,
+ "%s", vbif->name);
+ }
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
@@ -1478,6 +1505,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
{ .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
{ .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
+ { .compatible = "qcom,sm6150-dpu", .data = &dpu_sm6150_cfg, },
{ .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
{ .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
{ .compatible = "qcom,sm7150-dpu", .data = &dpu_sm7150_cfg, },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 88d64d43ea1a..547cdb2c0c78 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -54,6 +54,8 @@
#define ktime_compare_safe(A, B) \
ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+extern bool dpu_use_virtual_planes;
+
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
@@ -128,6 +130,8 @@ struct dpu_global_state {
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
uint32_t cdm_to_enc_id;
+
+ uint32_t sspp_to_crtc_id[SSPP_MAX - SSPP_NONE];
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 3ffac24333a2..af3e541f60c3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -20,7 +20,6 @@
#include "msm_drv.h"
#include "msm_mdss.h"
#include "dpu_kms.h"
-#include "dpu_formats.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_util.h"
#include "dpu_trace.h"
@@ -878,7 +877,7 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
drm_rect_rotate_inv(&pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
- if (r_pipe_cfg->src_rect.x1 != 0)
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
@@ -888,6 +887,32 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return 0;
}
+static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
+ drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect))
+ return false;
+
+ if (pipe_cfg->rotation & DRM_MODE_ROTATE_90)
+ return false;
+
+ if (MSM_FORMAT_IS_YUV(fmt))
+ return false;
+
+ if (MSM_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
+ return false;
+
+ if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
+ !test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
+ return false;
+
+ return true;
+}
+
static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct drm_atomic_state *state,
const struct drm_crtc_state *crtc_state)
@@ -901,7 +926,6 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
const struct msm_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
- uint32_t max_linewidth;
uint32_t supported_rotations;
const struct dpu_sspp_cfg *pipe_hw_caps;
const struct dpu_sspp_sub_blks *sblk;
@@ -923,8 +947,6 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
fmt = msm_framebuffer_format(new_plane_state->fb);
- max_linewidth = pdpu->catalog->caps->max_linewidth;
-
supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
@@ -940,48 +962,43 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
return ret;
if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
- /*
- * In parallel multirect case only the half of the usual width
- * is supported for tiled formats. If we are here, we know that
- * full width is more than max_linewidth, thus each rect is
- * wider than allowed.
- */
- if (MSM_FORMAT_IS_UBWC(fmt) &&
- drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
- }
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
+ &crtc_state->adjusted_mode);
+ if (ret)
+ return ret;
+ }
- if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
- drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) ||
- (!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) &&
- !test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) ||
- pipe_cfg->rotation & DRM_MODE_ROTATE_90 ||
- MSM_FORMAT_IS_YUV(fmt)) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
- }
+ return 0;
+}
+
+static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dpu_sw_pipe_cfg *pipe_cfg,
+ struct dpu_sw_pipe *r_pipe, struct dpu_sw_pipe_cfg *r_pipe_cfg,
+ struct dpu_hw_sspp *sspp, const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ r_pipe->sspp = NULL;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
+ if (!dpu_plane_is_multirect_parallel_capable(pipe->sspp, pipe_cfg, fmt, max_linewidth) ||
+ !dpu_plane_is_multirect_parallel_capable(pipe->sspp, r_pipe_cfg, fmt, max_linewidth))
+ return false;
+
+ r_pipe->sspp = pipe->sspp;
- /*
- * Use multirect for wide plane. We do not support dynamic
- * assignment of SSPPs, so we know the configuration.
- */
pipe->multirect_index = DPU_SSPP_RECT_0;
pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
- r_pipe->sspp = pipe->sspp;
r_pipe->multirect_index = DPU_SSPP_RECT_1;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
-
- ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
- &crtc_state->adjusted_mode);
- if (ret)
- return ret;
}
- return 0;
+ return true;
}
static int dpu_plane_atomic_check(struct drm_plane *plane,
@@ -995,14 +1012,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
const struct drm_crtc_state *crtc_state = NULL;
+ uint32_t max_linewidth = dpu_kms->catalog->caps->max_linewidth;
if (new_plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe);
- r_pipe->sspp = NULL;
+
+ if (!pipe->sspp)
+ return -EINVAL;
ret = dpu_plane_atomic_check_nosspp(plane, new_plane_state, crtc_state);
if (ret)
@@ -1011,14 +1033,154 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->visible)
return 0;
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(new_plane_state->fb),
+ max_linewidth)) {
+ DPU_DEBUG_PLANE(pdpu, "invalid " DRM_RECT_FMT " /" DRM_RECT_FMT
+ " max_line:%u, can't use split source\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect),
+ DRM_RECT_ARG(&r_pipe_cfg->src_rect),
+ max_linewidth);
+ return -E2BIG;
+ }
+
+ return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
+}
+
+static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ ret = dpu_plane_atomic_check_nosspp(plane, plane_state, crtc_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->visible) {
+ /*
+ * resources are freed by dpu_crtc_assign_plane_resources(),
+ * but clean them here.
+ */
+ pstate->pipe.sspp = NULL;
+ pstate->r_pipe.sspp = NULL;
+
+ return 0;
+ }
+
+ /*
+ * Force resource reallocation if the format of FB or src/dst have
+ * changed. We might need to allocate different SSPP or SSPPs for this
+ * plane than the one used previously.
+ */
+ if (!old_plane_state || !old_plane_state->fb ||
+ old_plane_state->src_w != plane_state->src_w ||
+ old_plane_state->src_h != plane_state->src_h ||
+ old_plane_state->src_w != plane_state->src_w ||
+ old_plane_state->crtc_h != plane_state->crtc_h ||
+ msm_framebuffer_format(old_plane_state->fb) !=
+ msm_framebuffer_format(plane_state->fb))
+ crtc_state->planes_changed = true;
+
+ return 0;
+}
+
+static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
+ struct dpu_global_state *global_state,
+ struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state)
+{
+ const struct drm_crtc_state *crtc_state = NULL;
+ struct drm_plane *plane = plane_state->plane;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ struct dpu_rm_sspp_requirements reqs;
+ struct dpu_plane_state *pstate;
+ struct dpu_sw_pipe *pipe;
+ struct dpu_sw_pipe *r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg;
+ const struct msm_format *fmt;
+
+ if (plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ pstate = to_dpu_plane_state(plane_state);
+ pipe = &pstate->pipe;
+ r_pipe = &pstate->r_pipe;
+ pipe_cfg = &pstate->pipe_cfg;
+ r_pipe_cfg = &pstate->r_pipe_cfg;
+
+ pipe->sspp = NULL;
+ r_pipe->sspp = NULL;
+
+ if (!plane_state->fb)
+ return -EINVAL;
+
+ fmt = msm_framebuffer_format(plane_state->fb);
+ reqs.yuv = MSM_FORMAT_IS_YUV(fmt);
+ reqs.scale = (plane_state->src_w >> 16 != plane_state->crtc_w) ||
+ (plane_state->src_h >> 16 != plane_state->crtc_h);
+
+ reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation);
+
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
+ return -ENODEV;
+
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(plane_state->fb),
+ dpu_kms->catalog->caps->max_linewidth)) {
+ /* multirect is not possible, use two SSPP blocks */
+ r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!r_pipe->sspp)
+ return -ENODEV;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
}
+int dpu_assign_plane_resources(struct dpu_global_state *global_state,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_plane_state **states,
+ unsigned int num_planes)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_planes; i++) {
+ struct drm_plane_state *plane_state = states[i];
+
+ if (!plane_state ||
+ !plane_state->visible)
+ continue;
+
+ int ret = dpu_plane_virtual_assign_resources(crtc, global_state,
+ state, plane_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
{
const struct msm_format *format =
@@ -1335,12 +1497,15 @@ static void dpu_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tstage=%d\n", pstate->stage);
- drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
- drm_printf(p, "\tmultirect_mode[0]=%s\n", dpu_get_multirect_mode(pipe->multirect_mode));
- drm_printf(p, "\tmultirect_index[0]=%s\n",
- dpu_get_multirect_index(pipe->multirect_index));
- drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
- drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
+ if (pipe->sspp) {
+ drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
+ drm_printf(p, "\tmultirect_mode[0]=%s\n",
+ dpu_get_multirect_mode(pipe->multirect_mode));
+ drm_printf(p, "\tmultirect_index[0]=%s\n",
+ dpu_get_multirect_index(pipe->multirect_index));
+ drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
+ drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
+ }
if (r_pipe->sspp) {
drm_printf(p, "\tsspp[1]=%s\n", r_pipe->sspp->cap->name);
@@ -1433,39 +1598,29 @@ static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
.atomic_update = dpu_plane_atomic_update,
};
-/**
- * dpu_plane_init - create new dpu plane for the given pipe
- * @dev: Pointer to DRM device
- * @pipe: dpu hardware pipe identifier
- * @type: Plane type - PRIMARY/OVERLAY/CURSOR
- * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
- *
- * Initialize the plane.
- */
-struct drm_plane *dpu_plane_init(struct drm_device *dev,
- uint32_t pipe, enum drm_plane_type type,
- unsigned long possible_crtcs)
+static const struct drm_plane_helper_funcs dpu_plane_virtual_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_virtual_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+/* initialize plane */
+static struct drm_plane *dpu_plane_init_common(struct drm_device *dev,
+ enum drm_plane_type type,
+ unsigned long possible_crtcs,
+ bool inline_rotation,
+ const uint32_t *format_list,
+ uint32_t num_formats,
+ enum dpu_sspp pipe)
{
struct drm_plane *plane = NULL;
- const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
- struct dpu_hw_sspp *pipe_hw;
- uint32_t num_formats;
uint32_t supported_rotations;
int ret;
- /* initialize underlying h/w driver */
- pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
- if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
- DPU_ERROR("[%u]SSPP is invalid\n", pipe);
- return ERR_PTR(-EINVAL);
- }
-
- format_list = pipe_hw->cap->sblk->format_list;
- num_formats = pipe_hw->cap->sblk->num_formats;
-
pdpu = drmm_universal_plane_alloc(dev, struct dpu_plane, base,
0xff, &dpu_plane_funcs,
format_list, num_formats,
@@ -1491,7 +1646,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
- if (pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ if (inline_rotation)
supported_rotations |= DRM_MODE_ROTATE_MASK;
drm_plane_create_rotation_property(plane,
@@ -1499,10 +1654,98 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
drm_plane_enable_fb_damage_clips(plane);
- /* success! finalize initialization */
+ DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
+ pipe, plane->base.id);
+ return plane;
+}
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ *
+ * Initialize the plane.
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, enum drm_plane_type type,
+ unsigned long possible_crtcs)
+{
+ struct drm_plane *plane = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
+ struct dpu_hw_sspp *pipe_hw;
+
+ /* initialize underlying h/w driver */
+ pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
+ if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP is invalid\n", pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ plane = dpu_plane_init_common(dev, type, possible_crtcs,
+ pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION),
+ pipe_hw->cap->sblk->format_list,
+ pipe_hw->cap->sblk->num_formats,
+ pipe);
+ if (IS_ERR(plane))
+ return plane;
+
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
+
+ return plane;
+}
+
+/**
+ * dpu_plane_init_virtual - create new virtualized DPU plane
+ * @dev: Pointer to DRM device
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ *
+ * Initialize the virtual plane with no backing SSPP / pipe.
+ */
+struct drm_plane *dpu_plane_init_virtual(struct drm_device *dev,
+ enum drm_plane_type type,
+ unsigned long possible_crtcs)
+{
+ struct drm_plane *plane = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
+ bool has_inline_rotation = false;
+ const u32 *format_list = NULL;
+ u32 num_formats = 0;
+ int i;
+
+ /* Determine the largest configuration that we can implement */
+ for (i = 0; i < kms->catalog->sspp_count; i++) {
+ const struct dpu_sspp_cfg *cfg = &kms->catalog->sspp[i];
+
+ if (test_bit(DPU_SSPP_INLINE_ROTATION, &cfg->features))
+ has_inline_rotation = true;
+
+ if (!format_list ||
+ cfg->sblk->csc_blk.len) {
+ format_list = cfg->sblk->format_list;
+ num_formats = cfg->sblk->num_formats;
+ }
+ }
+
+ plane = dpu_plane_init_common(dev, type, possible_crtcs,
+ has_inline_rotation,
+ format_list,
+ num_formats,
+ SSPP_NONE);
+ if (IS_ERR(plane))
+ return plane;
+
+ drm_plane_helper_add(plane, &dpu_plane_virtual_helper_funcs);
+
+ DPU_DEBUG("%s created virtual id:%u\n", plane->name, plane->base.id);
+
return plane;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 97090ca7842b..acd5725175cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -62,10 +62,23 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs);
+struct drm_plane *dpu_plane_init_virtual(struct drm_device *dev,
+ enum drm_plane_type type,
+ unsigned long possible_crtcs);
+
+int dpu_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
#ifdef CONFIG_DEBUG_FS
void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable);
#else
static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {}
#endif
+int dpu_assign_plane_resources(struct dpu_global_state *global_state,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_plane_state **states,
+ unsigned int num_planes);
+
#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index c247af03dc8e..5baf9df702b8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
@@ -9,6 +9,7 @@
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_cdm.h"
+#include "dpu_hw_cwb.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_intf.h"
@@ -27,14 +28,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
}
/**
- * struct dpu_rm_requirements - Reservation requirements parameter bundle
- * @topology: selected topology for the display
- */
-struct dpu_rm_requirements {
- struct msm_display_topology topology;
-};
-
-/**
* dpu_rm_init - Read hardware catalog and create reservation tracking objects
* for all HW blocks.
* @dev: Corresponding device for devres management
@@ -130,6 +123,19 @@ int dpu_rm_init(struct drm_device *dev,
rm->hw_wb[wb->id - WB_0] = hw;
}
+ for (i = 0; i < cat->cwb_count; i++) {
+ struct dpu_hw_cwb *hw;
+ const struct dpu_cwb_cfg *cwb = &cat->cwb[i];
+
+ hw = dpu_hw_cwb_init(dev, cwb, mmio);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed cwb object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->cwb_blks[cwb->id - CWB_0] = &hw->base;
+ }
+
for (i = 0; i < cat->ctl_count; i++) {
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
@@ -241,14 +247,13 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
* mixer in rm->pingpong_blks[].
* @dspp_idx: output parameter, index of dspp block attached to the layer
* mixer in rm->dspp_blks[].
- * @reqs: input parameter, rm requirements for HW blocks needed in the
- * datapath.
+ * @topology: selected topology for the display
* Return: true if lm matches all requirements, false otherwise
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
- struct dpu_rm_requirements *reqs)
+ struct msm_display_topology *topology)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
@@ -273,7 +278,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
}
*pp_idx = idx;
- if (!reqs->topology.num_dspp)
+ if (!topology->num_dspp)
return true;
idx = lm_cfg->dspp - DSPP_0;
@@ -295,7 +300,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id,
- struct dpu_rm_requirements *reqs)
+ struct msm_display_topology *topology)
{
int lm_idx[MAX_BLOCKS];
@@ -303,14 +308,14 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
int dspp_idx[MAX_BLOCKS] = {0};
int i, lm_count = 0;
- if (!reqs->topology.num_lm) {
- DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
+ if (!topology->num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", topology->num_lm);
return -EINVAL;
}
/* Find a primary mixer */
for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
- lm_count < reqs->topology.num_lm; i++) {
+ lm_count < topology->num_lm; i++) {
if (!rm->mixer_blks[i])
continue;
@@ -319,14 +324,14 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
enc_id, i, &pp_idx[lm_count],
- &dspp_idx[lm_count], reqs)) {
+ &dspp_idx[lm_count], topology)) {
continue;
}
++lm_count;
/* Valid primary mixer found, find matching peers */
- if (lm_count < reqs->topology.num_lm) {
+ if (lm_count < topology->num_lm) {
int j = _dpu_rm_get_lm_peer(rm, i);
/* ignore the peer if there is an error or if the peer was already processed */
@@ -339,7 +344,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
global_state, enc_id, j,
&pp_idx[lm_count], &dspp_idx[lm_count],
- reqs)) {
+ topology)) {
continue;
}
@@ -348,7 +353,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
}
}
- if (lm_count != reqs->topology.num_lm) {
+ if (lm_count != topology->num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
@@ -357,7 +362,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
global_state->dspp_to_enc_id[dspp_idx[i]] =
- reqs->topology.num_dspp ? enc_id : 0;
+ topology->num_dspp ? enc_id : 0;
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
pp_idx[i] + PINGPONG_0);
@@ -594,28 +599,28 @@ static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc,
- struct dpu_rm_requirements *reqs)
+ struct msm_display_topology *topology)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
+ ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, topology);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
- &reqs->topology);
+ topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
+ ret = _dpu_rm_reserve_dsc(rm, global_state, enc, topology);
if (ret)
return ret;
- if (reqs->topology.needs_cdm) {
+ if (topology->needs_cdm) {
ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
if (ret) {
DPU_ERROR("unable to find CDM blk\n");
@@ -626,20 +631,6 @@ static int _dpu_rm_make_reservation(
return ret;
}
-static int _dpu_rm_populate_requirements(
- struct drm_encoder *enc,
- struct dpu_rm_requirements *reqs,
- struct msm_display_topology req_topology)
-{
- reqs->topology = req_topology;
-
- DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n",
- reqs->topology.num_lm, reqs->topology.num_dsc,
- reqs->topology.num_intf, reqs->topology.needs_cdm);
-
- return 0;
-}
-
static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
uint32_t enc_id)
{
@@ -693,9 +684,8 @@ int dpu_rm_reserve(
struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology)
+ struct msm_display_topology *topology)
{
- struct dpu_rm_requirements reqs;
int ret;
/* Check if this is just a page-flip */
@@ -710,13 +700,11 @@ int dpu_rm_reserve(
DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
enc->base.id, crtc_state->crtc->base.id);
- ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
- if (ret) {
- DPU_ERROR("failed to populate hw requirements\n");
- return ret;
- }
+ DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
+ topology->num_lm, topology->num_dsc,
+ topology->num_intf);
- ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
+ ret = _dpu_rm_make_reservation(rm, global_state, enc, topology);
if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
@@ -725,6 +713,88 @@ int dpu_rm_reserve(
return ret;
}
+static struct dpu_hw_sspp *dpu_rm_try_sspp(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs,
+ unsigned int type)
+{
+ uint32_t crtc_id = crtc->base.id;
+ struct dpu_hw_sspp *hw_sspp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) {
+ if (!rm->hw_sspp[i])
+ continue;
+
+ if (global_state->sspp_to_crtc_id[i])
+ continue;
+
+ hw_sspp = rm->hw_sspp[i];
+
+ if (hw_sspp->cap->type != type)
+ continue;
+
+ if (reqs->scale && !hw_sspp->cap->sblk->scaler_blk.len)
+ continue;
+
+ // TODO: QSEED2 and RGB scalers are not yet supported
+ if (reqs->scale && !hw_sspp->ops.setup_scaler)
+ continue;
+
+ if (reqs->yuv && !hw_sspp->cap->sblk->csc_blk.len)
+ continue;
+
+ if (reqs->rot90 && !(hw_sspp->cap->features & DPU_SSPP_INLINE_ROTATION))
+ continue;
+
+ global_state->sspp_to_crtc_id[i] = crtc_id;
+
+ return rm->hw_sspp[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * dpu_rm_reserve_sspp - Reserve the required SSPP for the provided CRTC
+ * @rm: DPU Resource Manager handle
+ * @global_state: private global state
+ * @crtc: DRM CRTC handle
+ * @reqs: SSPP required features
+ */
+struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs)
+{
+ struct dpu_hw_sspp *hw_sspp = NULL;
+
+ if (!reqs->scale && !reqs->yuv)
+ hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
+ if (!hw_sspp && reqs->scale)
+ hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
+ if (!hw_sspp)
+ hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
+
+ return hw_sspp;
+}
+
+/**
+ * dpu_rm_release_all_sspp - Given the CRTC, release all SSPP
+ * blocks previously reserved for that use case.
+ * @global_state: resources shared across multiple kms objects
+ * @crtc: DRM CRTC handle
+ */
+void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
+ struct drm_crtc *crtc)
+{
+ uint32_t crtc_id = crtc->base.id;
+
+ _dpu_rm_clear_mapping(global_state->sspp_to_crtc_id,
+ ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id);
+}
+
/**
* dpu_rm_get_assigned_resources - Get hw resources of the given type that are
* assigned to this encoder
@@ -859,4 +929,11 @@ void dpu_rm_print_state(struct drm_printer *p,
dpu_rm_print_state_helper(p, rm->cdm_blk,
global_state->cdm_to_enc_id);
drm_puts(p, "\n");
+
+ drm_puts(p, "\tsspp=");
+ /* skip SSPP_NONE and start from the next index */
+ for (i = SSPP_NONE + 1; i < ARRAY_SIZE(global_state->sspp_to_crtc_id); i++)
+ dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL,
+ global_state->sspp_to_crtc_id[i]);
+ drm_puts(p, "\n");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index ea0e49cb7b0d..99bd594ee0d1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -20,6 +20,7 @@ struct dpu_global_state;
* @ctl_blks: array of ctl hardware resources
* @hw_intf: array of intf hardware resources
* @hw_wb: array of wb hardware resources
+ * @hw_cwb: array of cwb hardware resources
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
* @cdm_blk: cdm hardware resource
@@ -30,6 +31,7 @@ struct dpu_rm {
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
struct dpu_hw_wb *hw_wb[WB_MAX - WB_0];
+ struct dpu_hw_blk *cwb_blks[CWB_MAX - CWB_0];
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
@@ -37,6 +39,12 @@ struct dpu_rm {
struct dpu_hw_blk *cdm_blk;
};
+struct dpu_rm_sspp_requirements {
+ bool yuv;
+ bool scale;
+ bool rot90;
+};
+
/**
* struct msm_display_topology - defines a display topology pipeline
* @num_lm: number of layer mixers used
@@ -63,11 +71,19 @@ int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology);
+ struct msm_display_topology *topology);
void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc);
+struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs);
+
+void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
+ struct drm_crtc *crtc);
+
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 16f144cbc0c9..8ff496082902 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -42,9 +42,6 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
if (!conn_state || !conn_state->connector) {
DPU_ERROR("invalid connector state\n");
return -EINVAL;
- } else if (conn_state->connector->status != connector_status_connected) {
- DPU_ERROR("connector not connected %d\n", conn_state->connector->status);
- return -EINVAL;
}
crtc = conn_state->crtc;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 576995ddce37..8bbc7fb881d5 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -389,7 +389,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
/* TODO: different regulators in other cases? */
mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v";
- mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v";
+ mdp4_lcdc_encoder->regs[1].supply = "lvds-pll-vdda";
mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda";
ret = devm_regulator_bulk_get(dev->dev,
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
index e75b97127c0d..2be00b11e557 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -109,7 +109,7 @@ int msm_disp_snapshot_init(struct drm_device *drm_dev)
mutex_init(&kms->dump_mutex);
- kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
+ kms->dump_worker = kthread_run_worker(0, "%s", "disp_snapshot");
if (IS_ERR(kms->dump_worker))
DRM_ERROR("failed to create disp state task\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 74e01a5dd419..70fdc9fe228a 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -14,6 +14,7 @@
#include "dp_catalog.h"
#include "dp_audio.h"
#include "dp_panel.h"
+#include "dp_reg.h"
#include "dp_display.h"
#include "dp_utils.h"
@@ -28,251 +29,64 @@ struct msm_dp_audio_private {
struct msm_dp_audio msm_dp_audio;
};
-static u32 msm_dp_audio_get_header(struct msm_dp_catalog *catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header)
-{
- return msm_dp_catalog_audio_get_header(catalog, sdp, header);
-}
-
-static void msm_dp_audio_set_header(struct msm_dp_catalog *catalog,
- u32 data,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header)
-{
- msm_dp_catalog_audio_set_header(catalog, sdp, header, data);
-}
-
static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x02;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
- new_value = value;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
-
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
-
- new_value = audio->channels - 1;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
-
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x02,
+ .HB2 = 0x00,
+ .HB3 = audio->channels - 1,
+ };
+
+ msm_dp_catalog_write_audio_stream(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x1;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x17;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
-
- new_value = (0x0 | (0x11 << 2));
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x01,
+ .HB2 = 0x17,
+ .HB3 = 0x0 | (0x11 << 2),
+ };
+
+ msm_dp_catalog_write_audio_timestamp(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x84;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x1b;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
-
- new_value = (0x0 | (0x11 << 2));
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- new_value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x84,
+ .HB2 = 0x1b,
+ .HB3 = 0x0 | (0x11 << 2),
+ };
+
+ msm_dp_catalog_write_audio_infoframe(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x05;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x0F;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
-
- new_value = 0x0;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x05,
+ .HB2 = 0x0f,
+ .HB3 = 0x00,
+ };
+
+ msm_dp_catalog_write_audio_copy_mgmt(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
{
- struct msm_dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x06;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = msm_dp_audio_get_header(catalog,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x0F;
- parity_byte = msm_dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- msm_dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x06,
+ .HB2 = 0x0f,
+ .HB3 = 0x00,
+ };
+
+ msm_dp_catalog_write_audio_isrc(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
@@ -329,10 +143,10 @@ static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
safe_to_exit_level = 5;
break;
default:
+ safe_to_exit_level = 14;
drm_dbg_dp(audio->drm_dev,
"setting the default safe_to_exit_level = %u\n",
safe_to_exit_level);
- safe_to_exit_level = 14;
break;
}
@@ -414,8 +228,10 @@ static int msm_dp_audio_get_eld(struct device *dev,
return -ENODEV;
}
+ mutex_lock(&msm_dp_display->connector->eld_mutex);
memcpy(buf, msm_dp_display->connector->eld,
min(sizeof(msm_dp_display->connector->eld), len));
+ mutex_unlock(&msm_dp_display->connector->eld_mutex);
return 0;
}
@@ -537,14 +353,13 @@ int msm_dp_register_audio_driver(struct device *dev,
}
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_panel *panel,
struct msm_dp_catalog *catalog)
{
int rc = 0;
struct msm_dp_audio_private *audio;
struct msm_dp_audio *msm_dp_audio;
- if (!pdev || !panel || !catalog) {
+ if (!pdev || !catalog) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
@@ -561,8 +376,6 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
msm_dp_audio = &audio->msm_dp_audio;
- msm_dp_catalog_audio_init(catalog);
-
return msm_dp_audio;
error:
return ERR_PTR(rc);
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index 1c9efaaa40e5..beea34cbab77 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -8,7 +8,6 @@
#include <linux/platform_device.h>
-#include "dp_panel.h"
#include "dp_catalog.h"
#include <sound/hdmi-codec.h>
@@ -28,14 +27,12 @@ struct msm_dp_audio {
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
- * @panel: an instance of msm_dp_panel module.
* @catalog: an instance of msm_dp_catalog module.
*
* Returns the error code in case of failure, otherwize
* an instance of newly created msm_dp_module.
*/
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
- struct msm_dp_panel *panel,
struct msm_dp_catalog *catalog);
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index b4c8856fb25d..7b7eadb2f83b 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -79,7 +79,6 @@ struct msm_dp_catalog_private {
struct device *dev;
struct drm_device *drm_dev;
struct dss_io_data io;
- u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
struct msm_dp_catalog msm_dp_catalog;
};
@@ -276,43 +275,6 @@ int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_
min(wait_us, 2000), wait_us);
}
-static void dump_regs(void __iomem *base, int len)
-{
- int i;
- u32 x0, x4, x8, xc;
- u32 addr_off = 0;
-
- len = DIV_ROUND_UP(len, 16);
- for (i = 0; i < len; i++) {
- x0 = readl_relaxed(base + addr_off);
- x4 = readl_relaxed(base + addr_off + 0x04);
- x8 = readl_relaxed(base + addr_off + 0x08);
- xc = readl_relaxed(base + addr_off + 0x0c);
-
- pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc);
- addr_off += 16;
- }
-}
-
-void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
- struct dss_io_data *io = &catalog->io;
-
- pr_info("AHB regs\n");
- dump_regs(io->ahb.base, io->ahb.len);
-
- pr_info("AUXCLK regs\n");
- dump_regs(io->aux.base, io->aux.len);
-
- pr_info("LCLK regs\n");
- dump_regs(io->link.base, io->link.len);
-
- pr_info("P0CLK regs\n");
- dump_regs(io->p0.base, io->p0.len);
-}
-
u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog)
{
struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
@@ -1036,7 +998,6 @@ void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
display_hctl = (hsync_end_x << 16) | hsync_start_x;
- msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
hsync_period);
@@ -1160,38 +1121,75 @@ struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev)
return &catalog->msm_dp_catalog;
}
-u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header)
+void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
{
- struct msm_dp_catalog_private *catalog;
- u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
+
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_1, header[1]);
+}
+
+void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
+{
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- sdp_map = catalog->audio_map;
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
- return msm_dp_read_link(catalog, sdp_map[sdp][header]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
}
-void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header,
- u32 data)
+void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
{
- struct msm_dp_catalog_private *catalog;
- u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- if (!msm_dp_catalog)
- return;
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
+}
+
+void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
+{
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- sdp_map = catalog->audio_map;
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
- msm_dp_write_link(catalog, sdp_map[sdp][header], data);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
+}
+
+void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
+{
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ struct dp_sdp_header tmp = *sdp_hdr;
+ u32 header[2];
+ u32 reg;
+
+ /* XXX: is it necessary to preserve this field? */
+ reg = msm_dp_read_link(catalog, MMSS_DP_AUDIO_ISRC_1);
+ tmp.HB3 = FIELD_GET(HEADER_3_MASK, reg);
+
+ msm_dp_utils_pack_sdp_header(&tmp, header);
+
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_1, header[1]);
}
void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select)
@@ -1277,47 +1275,6 @@ void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog)
msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
-void msm_dp_catalog_audio_init(struct msm_dp_catalog *msm_dp_catalog)
-{
- struct msm_dp_catalog_private *catalog;
-
- static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
- {
- MMSS_DP_AUDIO_STREAM_0,
- MMSS_DP_AUDIO_STREAM_1,
- MMSS_DP_AUDIO_STREAM_1,
- },
- {
- MMSS_DP_AUDIO_TIMESTAMP_0,
- MMSS_DP_AUDIO_TIMESTAMP_1,
- MMSS_DP_AUDIO_TIMESTAMP_1,
- },
- {
- MMSS_DP_AUDIO_INFOFRAME_0,
- MMSS_DP_AUDIO_INFOFRAME_1,
- MMSS_DP_AUDIO_INFOFRAME_1,
- },
- {
- MMSS_DP_AUDIO_COPYMANAGEMENT_0,
- MMSS_DP_AUDIO_COPYMANAGEMENT_1,
- MMSS_DP_AUDIO_COPYMANAGEMENT_1,
- },
- {
- MMSS_DP_AUDIO_ISRC_0,
- MMSS_DP_AUDIO_ISRC_1,
- MMSS_DP_AUDIO_ISRC_1,
- },
- };
-
- if (!msm_dp_catalog)
- return;
-
- catalog = container_of(msm_dp_catalog,
- struct msm_dp_catalog_private, msm_dp_catalog);
-
- catalog->audio_map = sdp_map;
-}
-
void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level)
{
struct msm_dp_catalog_private *catalog;
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index e932b17eecbf..6678b0ac9a67 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -31,22 +31,6 @@
#define DP_HW_VERSION_1_0 0x10000000
#define DP_HW_VERSION_1_2 0x10020000
-enum msm_dp_catalog_audio_sdp_type {
- DP_AUDIO_SDP_STREAM,
- DP_AUDIO_SDP_TIMESTAMP,
- DP_AUDIO_SDP_INFOFRAME,
- DP_AUDIO_SDP_COPYMANAGEMENT,
- DP_AUDIO_SDP_ISRC,
- DP_AUDIO_SDP_MAX,
-};
-
-enum msm_dp_catalog_audio_header_type {
- DP_AUDIO_SDP_HEADER_1,
- DP_AUDIO_SDP_HEADER_2,
- DP_AUDIO_SDP_HEADER_3,
- DP_AUDIO_SDP_HEADER_MAX,
-};
-
struct msm_dp_catalog {
bool wide_bus_en;
};
@@ -104,7 +88,6 @@ int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 t
u32 sync_start, u32 width_blanking, u32 msm_dp_active);
void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp);
void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog);
-void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog);
void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
struct drm_display_mode *drm_mode);
void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
@@ -112,17 +95,19 @@ void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev);
/* DP Audio APIs */
-u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header);
-void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog,
- enum msm_dp_catalog_audio_sdp_type sdp,
- enum msm_dp_catalog_audio_header_type header,
- u32 data);
+void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select);
void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable);
void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog);
-void msm_dp_catalog_audio_init(struct msm_dp_catalog *catalog);
void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level);
#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index bc2ca8133b79..9c463ae2f8fa 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -178,7 +178,6 @@ static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl
u32 cc, tb;
msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog);
- msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
msm_dp_catalog_setup_peripheral_flush(ctrl->catalog);
msm_dp_ctrl_config_ctrl(ctrl);
@@ -2071,6 +2070,7 @@ void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl)
msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ dev_pm_opp_set_rate(ctrl->dev, 0);
msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index aff51bb973eb..3898850739ab 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -722,9 +722,6 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
{
int rc = 0;
struct device *dev = &dp->msm_dp_display.pdev->dev;
- struct msm_dp_panel_in panel_in = {
- .dev = dev,
- };
struct phy *phy;
phy = devm_phy_get(dev, "dp");
@@ -765,11 +762,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_link;
}
- panel_in.aux = dp->aux;
- panel_in.catalog = dp->catalog;
- panel_in.link = dp->link;
-
- dp->panel = msm_dp_panel_get(&panel_in);
+ dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->catalog);
if (IS_ERR(dp->panel)) {
rc = PTR_ERR(dp->panel);
DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
@@ -787,7 +780,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_ctrl;
}
- dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->panel, dp->catalog);
+ dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->catalog);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
@@ -937,16 +930,17 @@ enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
- if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
- return MODE_CLOCK_HIGH;
-
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
link_info = &msm_dp_display->panel->link_info;
- if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
- msm_dp_display->panel->vsc_sdp_supported)
+ if ((drm_mode_is_420_only(&dp->connector->display_info, mode) &&
+ msm_dp_display->panel->vsc_sdp_supported) ||
+ msm_dp_wide_bus_available(dp))
mode_pclk_khz /= 2;
+ if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
+ return MODE_CLOCK_HIGH;
+
mode_bpp = dp->connector->display_info.bpc * num_components;
if (!mode_bpp)
mode_bpp = default_bpp;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index d3e241ea6941..16b7913d1eef 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -257,7 +257,10 @@ static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
- if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
+ if (msm_dp_wide_bus_available(dp))
+ mode_pclk_khz /= 2;
+
+ if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
/*
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 5d7eaa31bf31..92415bf8aa16 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -317,17 +317,6 @@ static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
return 0;
}
-void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel)
-{
- struct msm_dp_catalog *catalog;
- struct msm_dp_panel_private *panel;
-
- panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- catalog = panel->catalog;
-
- msm_dp_catalog_dump_regs(catalog);
-}
-
int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
{
u32 data, total_ver, total_hor;
@@ -486,25 +475,26 @@ static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel)
return 0;
}
-struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in)
+struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
+ struct msm_dp_link *link, struct msm_dp_catalog *catalog)
{
struct msm_dp_panel_private *panel;
struct msm_dp_panel *msm_dp_panel;
int ret;
- if (!in->dev || !in->catalog || !in->aux || !in->link) {
+ if (!dev || !catalog || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
- panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return ERR_PTR(-ENOMEM);
- panel->dev = in->dev;
- panel->aux = in->aux;
- panel->catalog = in->catalog;
- panel->link = in->link;
+ panel->dev = dev;
+ panel->aux = aux;
+ panel->catalog = catalog;
+ panel->link = link;
msm_dp_panel = &panel->msm_dp_panel;
msm_dp_panel->max_bw_code = DP_LINK_BW_8_1;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 0e944db3adf2..4906f4f09f24 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -21,13 +21,6 @@ struct msm_dp_display_mode {
bool out_fmt_is_yuv_420;
};
-struct msm_dp_panel_in {
- struct device *dev;
- struct drm_dp_aux *aux;
- struct msm_dp_link *link;
- struct msm_dp_catalog *catalog;
-};
-
struct msm_dp_panel_psr {
u8 version;
u8 capabilities;
@@ -55,7 +48,6 @@ struct msm_dp_panel {
int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel);
-void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp,
@@ -92,6 +84,7 @@ static inline bool is_lane_count_valid(u32 lane_count)
lane_count == 4);
}
-struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in);
+struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
+ struct msm_dp_link *link, struct msm_dp_catalog *catalog);
void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel);
#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.c b/drivers/gpu/drm/msm/dp/dp_utils.c
index 2a40f07fe2d5..4a5ebb0c33b8 100644
--- a/drivers/gpu/drm/msm/dp/dp_utils.c
+++ b/drivers/gpu/drm/msm/dp/dp_utils.c
@@ -74,14 +74,8 @@ u8 msm_dp_utils_calculate_parity(u32 data)
return parity_byte;
}
-ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff)
+void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2])
{
- size_t length;
-
- length = sizeof(header_buff);
- if (length < DP_SDP_HEADER_SIZE)
- return -ENOSPC;
-
header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) |
FIELD_PREP(PARITY_0_MASK, msm_dp_utils_calculate_parity(sdp_header->HB0)) |
FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) |
@@ -91,6 +85,4 @@ ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *head
FIELD_PREP(PARITY_2_MASK, msm_dp_utils_calculate_parity(sdp_header->HB2)) |
FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) |
FIELD_PREP(PARITY_3_MASK, msm_dp_utils_calculate_parity(sdp_header->HB3));
-
- return length;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.h b/drivers/gpu/drm/msm/dp/dp_utils.h
index 88d53157f5b5..2e4f98a863c4 100644
--- a/drivers/gpu/drm/msm/dp/dp_utils.h
+++ b/drivers/gpu/drm/msm/dp/dp_utils.h
@@ -31,6 +31,6 @@
u8 msm_dp_utils_get_g0_value(u8 data);
u8 msm_dp_utils_get_g1_value(u8 data);
u8 msm_dp_utils_calculate_parity(u32 data);
-ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff);
+void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2]);
#endif /* _DP_UTILS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 10ba7d153d1c..7754dcec33d0 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -286,6 +286,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_1,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 4c9b4b37681b..120cb65164c1 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -23,6 +23,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
+#define MSM_DSI_6G_VER_MINOR_V2_3_1 0x20030001
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a98d24b7cb00..007311c21fda 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1831,7 +1831,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL);
ret = 0;
- if (of_property_read_bool(np, "syscon-sfpb")) {
+ if (of_property_present(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index dd58bc0a49eb..c0bcc6828963 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -567,6 +567,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_14nm_8953_cfgs },
{ .compatible = "qcom,sm6125-dsi-phy-14nm",
.data = &dsi_phy_14nm_2290_cfgs },
+ { .compatible = "qcom,sm6150-dsi-phy-14nm",
+ .data = &dsi_phy_14nm_6150_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 4953459edd63..8985818bb2e0 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -46,6 +46,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 1723f0e4faa4..2c3cbe0f2870 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -1032,6 +1032,10 @@ static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
{ .supply = "vcca", .init_load_uA = 73400 },
};
+static const struct regulator_bulk_data dsi_phy_14nm_36mA_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 36000 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_14nm_17mA_regulators,
@@ -1097,3 +1101,20 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = {
.io_start = { 0x5e94400 },
.num_dsi_phy = 1,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_14nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_36mA_regulators),
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0xae94400 },
+ .num_dsi_phy = 1,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 031446c87dae..798168180c1a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -83,6 +83,9 @@ struct dsi_pll_7nm {
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
spinlock_t postdiv_lock;
+ /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
+ spinlock_t pclk_mux_lock;
+
struct pll_7nm_cached_state cached_state;
struct dsi_pll_7nm *slave;
@@ -372,22 +375,41 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
ndelay(250);
}
-static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->postdiv_lock, flags);
+ writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+ spin_unlock_irqrestore(&pll->postdiv_lock, flags);
+}
+
+static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask,
+ u32 val)
+{
+ unsigned long flags;
u32 data;
+ spin_lock_irqsave(&pll->pclk_mux_lock, flags);
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ data &= ~mask;
+ data |= val & mask;
+
+ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ spin_unlock_irqrestore(&pll->pclk_mux_lock, flags);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+ dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0);
}
static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
{
- u32 data;
+ u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL;
writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3);
-
- data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1);
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
@@ -565,7 +587,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
- void __iomem *phy_base = pll_7nm->phy->base;
u32 val;
int ret;
@@ -574,13 +595,10 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
val |= cached->pll_out_div;
writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
- writel(cached->bit_clk_div | (cached->pix_clk_div << 4),
- phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
-
- val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- val &= ~0x3;
- val |= cached->pll_mux;
- writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_pll_cmn_clk_cfg0_write(pll_7nm,
+ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
+ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
@@ -599,7 +617,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
- void __iomem *base = phy->base;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_7nm->phy->id);
@@ -618,7 +635,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
}
/* set PLL src */
- writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK,
+ DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data));
return 0;
}
@@ -733,7 +751,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
pll_by_2_bit,
}), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- 0, 1, 0, NULL);
+ 0, 1, 0, &pll_7nm->pclk_mux_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -778,6 +796,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
pll_7nm_list[phy->id] = pll_7nm;
spin_lock_init(&pll_7nm->postdiv_lock);
+ spin_lock_init(&pll_7nm->pclk_mux_lock);
pll_7nm->phy = phy;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
index a719fd33d9d8..33bb48ae58a2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -137,7 +137,7 @@ static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
base <<= (digclk_divsel == 2 ? 1 : 0);
- return (base <= 2046 ? base : 2046);
+ return base;
}
static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 9c45d641b521..a7a2384044ff 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -115,7 +115,7 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
timer->kms = kms;
timer->crtc_idx = crtc_idx;
- timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
+ timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx);
if (IS_ERR(timer->worker)) {
int ret = PTR_ERR(timer->worker);
timer->worker = NULL;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ffbcc97b5018..ff7a7a9f7b0d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -11,7 +11,7 @@
#include <linux/of_address.h>
#include <linux/uaccess.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -538,7 +538,7 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
/* Only supported if per-process address space is supported: */
if (priv->gpu->aspace == ctx->aspace)
- return -EOPNOTSUPP;
+ return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
@@ -910,7 +910,6 @@ static const struct drm_driver msm_driver = {
.fops = &fops,
.name = "msm",
.desc = "MSM Snapdragon DRM",
- .date = "20130625",
.major = MSM_VERSION_MAJOR,
.minor = MSM_VERSION_MINOR,
.patchlevel = MSM_VERSION_PATCHLEVEL,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d8c9a1b19263..a65077855201 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -28,6 +28,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_dsc.h>
#include <drm/msm_drm.h>
@@ -506,6 +507,12 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
clockid_t clock_id,
enum hrtimer_mode mode);
+/* Helper for returning a UABI error with optional logging which can make
+ * it easier for userspace to understand what it is doing wrong.
+ */
+#define UERR(err, drm, fmt, ...) \
+ ({ DRM_DEV_DEBUG_DRIVER((drm)->dev, fmt, ##__VA_ARGS__); -(err); })
+
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
@@ -530,15 +537,12 @@ static inline int align_pitch(int width, int bpp)
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
{
ktime_t now = ktime_get();
- s64 remaining_jiffies;
- if (ktime_compare(*timeout, now) < 0) {
- remaining_jiffies = 0;
- } else {
- ktime_t rem = ktime_sub(*timeout, now);
- remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
- }
+ if (ktime_compare(*timeout, now) <= 0)
+ return 0;
+ ktime_t rem = ktime_sub(*timeout, now);
+ s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
}
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 1a5d4f1c8b42..d41e5a6bbee0 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -65,8 +65,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
fctx->completed_fence = fctx->last_fence;
*fctx->fenceptr = fctx->last_fence;
- hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fctx->deadline_timer.function = deadline_timer;
+ hrtimer_setup(&fctx->deadline_timer, deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
kthread_init_work(&fctx->deadline_work, deadline_work);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index fba78193127d..dee470403036 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -20,8 +20,8 @@
/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
* error msgs for debugging, but we don't spam dmesg by default
*/
-#define SUBMIT_ERROR(submit, fmt, ...) \
- DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__)
+#define SUBMIT_ERROR(err, submit, fmt, ...) \
+ UERR(err, (submit)->dev, fmt, ##__VA_ARGS__)
/*
* Cmdstream submission:
@@ -142,8 +142,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
!(submit_bo.flags & MANDATORY_FLAGS)) {
- SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid flags: %x\n", submit_bo.flags);
i = 0;
goto out;
}
@@ -162,8 +161,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
goto out_unlock;
}
@@ -206,14 +204,12 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
- SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type);
- return -EINVAL;
+ return SUBMIT_ERROR(EINVAL, submit, "invalid type: %08x\n", submit_cmd.type);
}
if (submit_cmd.size % 4) {
- SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n",
- submit_cmd.size);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
goto out;
}
@@ -371,9 +367,8 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct drm_gem_object **obj, uint64_t *iova)
{
if (idx >= submit->nr_bos) {
- SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n",
- idx, submit->nr_bos);
- return -EINVAL;
+ return SUBMIT_ERROR(EINVAL, submit, "invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
}
if (obj)
@@ -392,10 +387,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint32_t *ptr;
int ret = 0;
- if (offset % 4) {
- SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset);
- return -EINVAL;
- }
+ if (offset % 4)
+ return SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer: %u\n", offset);
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
@@ -414,9 +407,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint64_t iova;
if (submit_reloc.submit_offset % 4) {
- SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n",
- submit_reloc.submit_offset);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned reloc offset: %u\n",
+ submit_reloc.submit_offset);
goto out;
}
@@ -425,8 +417,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
- SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid offset %u at reloc %u\n", off, i);
goto out;
}
@@ -513,12 +504,12 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
if (syncobj_desc.point &&
!drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
- ret = -EOPNOTSUPP;
+ ret = SUBMIT_ERROR(EOPNOTSUPP, submit, "syncobj timeline unsupported");
break;
}
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = -EINVAL;
+ ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
break;
}
@@ -531,7 +522,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
syncobjs[i] =
drm_syncobj_find(file, syncobj_desc.handle);
if (!syncobjs[i]) {
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj handle: %u", i);
break;
}
}
@@ -588,14 +579,14 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
post_deps[i].point = syncobj_desc.point;
if (syncobj_desc.flags) {
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid syncobj flags");
break;
}
if (syncobj_desc.point) {
if (!drm_core_check_feature(dev,
DRIVER_SYNCOBJ_TIMELINE)) {
- ret = -EOPNOTSUPP;
+ ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
break;
}
@@ -609,7 +600,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
post_deps[i].syncobj =
drm_syncobj_find(file, syncobj_desc.handle);
if (!post_deps[i].syncobj) {
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid syncobj handle");
break;
}
}
@@ -677,10 +668,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* be more clever to dispatch to appropriate gpu module:
*/
if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
- return -EINVAL;
+ return UERR(EINVAL, dev, "invalid pipe");
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
- return -EINVAL;
+ return UERR(EINVAL, dev, "invalid flags");
if (args->flags & MSM_SUBMIT_SUDO) {
if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
@@ -724,7 +715,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid in-fence");
goto out_unlock;
}
@@ -787,10 +778,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
if (!submit->cmd[i].size ||
- ((submit->cmd[i].size + submit->cmd[i].offset) >
- obj->size / 4)) {
- SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
- ret = -EINVAL;
+ (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
+ ret = UERR(EINVAL, dev, "invalid cmdstream size: %u\n",
+ submit->cmd[i].size * 4);
goto out;
}
@@ -800,8 +790,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
continue;
if (!gpu->allow_relocs) {
- SUBMIT_ERROR(submit, "relocs not allowed\n");
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "relocs not allowed\n");
goto out;
}
@@ -827,7 +816,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
(!args->fence || idr_find(&queue->fence_idr, args->fence))) {
spin_unlock(&queue->idr_lock);
idr_preload_end();
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid in-fence-sn");
goto out;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 0d4a3744cfcb..8557998e0c92 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -859,7 +859,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->funcs = funcs;
gpu->name = name;
- gpu->worker = kthread_create_worker(0, "gpu-worker");
+ gpu->worker = kthread_run_worker(0, "gpu-worker");
if (IS_ERR(gpu->worker)) {
ret = PTR_ERR(gpu->worker);
gpu->worker = NULL;
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
index afedd61c3e28..a6efe1eac271 100644
--- a/drivers/gpu/drm/msm/msm_io_utils.c
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -135,8 +135,7 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
clockid_t clock_id,
enum hrtimer_mode mode)
{
- hrtimer_init(&work->timer, clock_id, mode);
- work->timer.function = msm_hrtimer_worktimer;
+ hrtimer_setup(&work->timer, msm_hrtimer_worktimer, clock_id, mode);
work->worker = worker;
kthread_init_work(&work->work, fn);
}
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index f3326d09bdbc..38965e12a6bf 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -244,7 +244,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
- priv->kms = NULL;
return ret;
}
@@ -269,7 +268,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
/* initialize event thread */
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
- ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
+ ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
ret = PTR_ERR(ev_thread->worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 76b6ae35a3cb..dcb49fd30402 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -166,22 +166,32 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
- writel_relaxed(data->ubwc_static, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
+ if (data->ubwc_bank_spread)
+ value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
+
+ if (data->ubwc_enc_version == UBWC_1_0)
+ value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1);
+
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
- u32 value = (data->ubwc_swizzle & 0x1) |
- (data->highest_bank_bit & 0x3) << 4 |
- (data->macrotile_mode & 0x1) << 12;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+
+ if (data->macrotile_mode)
+ value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
if (data->ubwc_enc_version == UBWC_3_0)
- value |= BIT(10);
+ value |= MDSS_UBWC_STATIC_UBWC_AMSBC;
if (data->ubwc_enc_version == UBWC_1_0)
- value |= BIT(8);
+ value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1);
writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
@@ -189,10 +199,14 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
- u32 value = (data->ubwc_swizzle & 0x7) |
- (data->ubwc_static & 0x1) << 3 |
- (data->highest_bank_bit & 0x7) << 4 |
- (data->macrotile_mode & 0x1) << 12;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+
+ if (data->ubwc_bank_spread)
+ value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
+
+ if (data->macrotile_mode)
+ value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
@@ -572,16 +586,17 @@ static const struct msm_mdss_data sa8775p_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 4,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 0,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 74000,
};
static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
- .ubwc_static = 0x1e,
+ .ubwc_swizzle = 6,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
};
@@ -590,9 +605,9 @@ static const struct msm_mdss_data sc7280_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 1,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 74000,
};
@@ -600,7 +615,7 @@ static const struct msm_mdss_data sc8180x_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 76800,
};
@@ -608,9 +623,9 @@ static const struct msm_mdss_data sc8280xp_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 76800,
};
@@ -632,7 +647,7 @@ static const struct msm_mdss_data sm6350_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 6,
- .ubwc_static = 0x1e,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 1,
.reg_bus_bw = 76800,
};
@@ -655,7 +670,7 @@ static const struct msm_mdss_data sm6115_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 7,
- .ubwc_static = 0x11f,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
};
@@ -667,14 +682,21 @@ static const struct msm_mdss_data sm6125_data = {
.highest_bank_bit = 1,
};
+static const struct msm_mdss_data sm6150_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
+};
+
static const struct msm_mdss_data sm8250_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 76800,
};
@@ -682,10 +704,10 @@ static const struct msm_mdss_data sm8350_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 74000,
};
@@ -693,10 +715,10 @@ static const struct msm_mdss_data sm8550_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 57000,
};
@@ -704,10 +726,10 @@ static const struct msm_mdss_data x1e80100_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
/* TODO: Add reg_bus_bw with real value */
};
@@ -724,6 +746,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
{ .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
{ .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
+ { .compatible = "qcom,sm6150-mdss", .data = &sm6150_data },
{ .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm7150-mdss", .data = &sm7150_data },
diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
index 3afef4b1786d..14dc53704314 100644
--- a/drivers/gpu/drm/msm/msm_mdss.h
+++ b/drivers/gpu/drm/msm/msm_mdss.h
@@ -11,9 +11,9 @@ struct msm_mdss_data {
/* can be read from register 0x58 */
u32 ubwc_dec_version;
u32 ubwc_swizzle;
- u32 ubwc_static;
u32 highest_bank_bit;
- u32 macrotile_mode;
+ bool ubwc_bank_spread;
+ bool macrotile_mode;
u32 reg_bus_bw;
};
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 2fc3eaf81f44..7fed1de63b5d 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -18,7 +18,7 @@ int msm_file_private_set_sysprof(struct msm_file_private *ctx,
switch (sysprof) {
default:
- return -EINVAL;
+ return UERR(EINVAL, gpu->dev, "Invalid sysprof: %d", sysprof);
case 2:
pm_runtime_get_sync(&gpu->pdev->dev);
fallthrough;
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
index 6531749d30f4..3d2cc339b8f1 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
@@ -52,6 +52,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x23fd" name="GMU_DCVS_PERF_SETTING"/>
<reg32 offset="0x23fe" name="GMU_DCVS_BW_SETTING"/>
<reg32 offset="0x23ff" name="GMU_DCVS_RETURN"/>
+ <reg32 offset="0x2bf8" name="GMU_CORE_FW_VERSION">
+ <bitfield name="MAJOR" low="28" high="31"/>
+ <bitfield name="MINOR" low="16" high="27"/>
+ <bitfield name="STEP" low="0" high="15"/>
+ </reg32>
<reg32 offset="0x4c00" name="GMU_ICACHE_CONFIG"/>
<reg32 offset="0x4c01" name="GMU_DCACHE_CONFIG"/>
<reg32 offset="0x4c0f" name="GMU_SYS_BUS_CONFIG"/>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index d54b72f92449..35f7f40e405b 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -9,8 +9,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00004" name="REVISION_ID1"/>
<reg32 offset="0x00008" name="REVISION_ID2"/>
<reg32 offset="0x0000c" name="REVISION_ID3"/>
- <reg32 offset="0x00010" name="CLK_CFG0"/>
- <reg32 offset="0x00014" name="CLK_CFG1"/>
+ <reg32 offset="0x00010" name="CLK_CFG0">
+ <bitfield name="DIV_CTRL_3_0" low="0" high="3" type="uint"/>
+ <bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00014" name="CLK_CFG1">
+ <bitfield name="CLK_EN" pos="5" type="boolean"/>
+ <bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
+ <bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
+ </reg32>
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
<reg32 offset="0x00020" name="VREG_CTRL_0"/>
diff --git a/drivers/gpu/drm/msm/registers/display/mdss.xml b/drivers/gpu/drm/msm/registers/display/mdss.xml
index ac85caf1575c..6e9f81cd4690 100644
--- a/drivers/gpu/drm/msm/registers/display/mdss.xml
+++ b/drivers/gpu/drm/msm/registers/display/mdss.xml
@@ -21,7 +21,16 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00058" name="UBWC_DEC_HW_VERSION"/>
- <reg32 offset="0x00144" name="UBWC_STATIC"/>
+ <reg32 offset="0x00144" name="UBWC_STATIC">
+ <bitfield name="UBWC_SWIZZLE" low="0" high="2"/>
+ <bitfield name="UBWC_BANK_SPREAD" pos="3"/>
+ <!-- high=5 for UBWC < 4.0 -->
+ <bitfield name="HIGHEST_BANK_BIT" low="4" high="6"/>
+ <bitfield name="UBWC_MIN_ACC_LEN" low="8" high="9"/>
+ <bitfield name="UBWC_AMSBC" pos="10"/>
+ <bitfield name="MACROTILE_MODE" pos="12"/>
+ </reg32>
+
<reg32 offset="0x00150" name="UBWC_CTRL_2"/>
<reg32 offset="0x00154" name="UBWC_PREDICTION_MODE"/>
</domain>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 51ae0b51b1e8..8ee00f59ca82 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -14,9 +14,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fbdev_dma.h>
@@ -248,7 +248,6 @@ static const struct drm_driver lcdif_driver = {
.fops = &fops,
.name = "imx-lcdif",
.desc = "i.MX LCDIF Controller DRM",
- .date = "20220417",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 6b95e4eb3e4e..59020862cf65 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -17,9 +17,9 @@
#include <linux/property.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -336,7 +336,6 @@ static const struct drm_driver mxsfb_driver = {
.fops = &fops,
.name = "mxsfb-drm",
.desc = "MXSFB Controller DRM",
- .date = "20160824",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ce840300578d..1050a4617fc1 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -4,6 +4,7 @@ config DRM_NOUVEAU
depends on DRM && PCI && MMU
select IOMMU_API
select FW_LOADER
+ select FW_CACHE if PM_SLEEP
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index eed579a6c858..62d72b7a8d04 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -992,8 +992,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
if (!mst_state->pbn_div.full) {
struct nouveau_encoder *outp = mstc->mstm->outp;
- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
- outp->dp.link_bw, outp->dp.link_nr);
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(outp->dp.link_bw, outp->dp.link_nr);
}
slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
@@ -1265,8 +1264,8 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
mstc->mstm = mstm;
mstc->port = port;
- ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_dynamic_init(dev, &mstc->connector, &nv50_mstc,
+ DRM_MODE_CONNECTOR_DisplayPort, NULL);
if (ret) {
kfree(*pmstc);
*pmstc = NULL;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/log.h b/drivers/gpu/drm/nouveau/include/nvif/log.h
new file mode 100644
index 000000000000..64f6f8fc6141
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/log.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __NVIF_LOG_H__
+#define __NVIF_LOG_H__
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * nvif_log - structure for tracking logging buffers
+ * @entry: an entry in a list of struct nvif_logs
+ * @shutdown: pointer to function to call to clean up
+ *
+ * Structure used to track logging buffers so that they can be cleaned up
+ * when the module exits.
+ *
+ * The @shutdown function is called when the module exits. It should free all
+ * backing resources, such as logging buffers.
+ */
+struct nvif_log {
+ struct list_head entry;
+ void (*shutdown)(struct nvif_log *log);
+};
+
+/**
+ * nvif_logs - linked list of nvif_log objects
+ */
+struct nvif_logs {
+ struct list_head head;
+};
+
+#define NVIF_LOGS_DECLARE(logs) \
+ struct nvif_logs logs = { LIST_HEAD_INIT(logs.head) }
+
+static inline void nvif_log_shutdown(struct nvif_logs *logs)
+{
+ if (!list_empty(&logs->head)) {
+ struct nvif_log *log, *n;
+
+ list_for_each_entry_safe(log, n, &logs->head, entry) {
+ /* shutdown() should also delete the log entry */
+ log->shutdown(log);
+ }
+ }
+}
+
+extern struct nvif_logs gsp_logs;
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index a2055f2a014a..5c5f4607fcc9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -5,10 +5,13 @@
#include <core/falcon.h>
#include <core/firmware.h>
+#include <linux/debugfs.h>
+
#define GSP_PAGE_SHIFT 12
#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
struct nvkm_gsp_mem {
+ struct device *dev;
size_t size;
void *data;
dma_addr_t addr;
@@ -219,6 +222,24 @@ struct nvkm_gsp {
/* The size of the registry RPC */
size_t registry_rpc_size;
+
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * Logging buffers in debugfs. The wrapper objects need to remain
+ * in memory until the dentry is deleted.
+ */
+ struct {
+ struct dentry *parent;
+ struct dentry *init;
+ struct dentry *rm;
+ struct dentry *intr;
+ struct dentry *pmu;
+ } debugfs;
+ struct debugfs_blob_wrapper blob_init;
+ struct debugfs_blob_wrapper blob_intr;
+ struct debugfs_blob_wrapper blob_rm;
+ struct debugfs_blob_wrapper blob_pmu;
+#endif
};
static inline bool
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8f0c69aad248..21b56cc7605c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -384,7 +384,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
if (ret < 0)
return NULL;
- return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ return edid;
}
bool nouveau_acpi_video_backlight_use_native(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 2cb2e5675807..cd659b9fd1d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -279,7 +279,6 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
const u64 plength = 0x10000;
const u64 ioffset = plength;
const u64 ilength = 0x02000;
- char name[TASK_COMM_LEN];
int cid, ret;
u64 size;
@@ -338,8 +337,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
chan->userd = &chan->user;
}
- get_task_comm(name, current);
- snprintf(args.name, sizeof(args.name), "%s[%d]", name, task_pid_nr(current));
+ snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current));
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
&args, sizeof(args), &chan->user);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8d5c9c74cbb9..eac0d1d2dbda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -775,7 +775,6 @@ nouveau_connector_force(struct drm_connector *connector)
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
connector->name);
- connector->status = connector_status_disconnected;
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index e83db051e851..200e65a7cefc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -313,3 +313,19 @@ nouveau_debugfs_fini(struct nouveau_drm *drm)
kfree(drm->debugfs);
drm->debugfs = NULL;
}
+
+int
+nouveau_module_debugfs_init(void)
+{
+ nouveau_debugfs_root = debugfs_create_dir("nouveau", NULL);
+ if (IS_ERR(nouveau_debugfs_root))
+ return PTR_ERR(nouveau_debugfs_root);
+
+ return 0;
+}
+
+void
+nouveau_module_debugfs_fini(void)
+{
+ debugfs_remove(nouveau_debugfs_root);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index 77f0323b38ba..b7617b344ee2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -21,6 +21,11 @@ nouveau_debugfs(struct drm_device *dev)
extern void nouveau_drm_debugfs_init(struct drm_minor *);
extern int nouveau_debugfs_init(struct nouveau_drm *);
extern void nouveau_debugfs_fini(struct nouveau_drm *);
+
+extern struct dentry *nouveau_debugfs_root;
+
+int nouveau_module_debugfs_init(void);
+void nouveau_module_debugfs_fini(void);
#else
static inline void
nouveau_drm_debugfs_init(struct drm_minor *minor)
@@ -37,6 +42,17 @@ nouveau_debugfs_fini(struct nouveau_drm *drm)
{
}
+static inline int
+nouveau_module_debugfs_init(void)
+{
+ return 0;
+}
+
+static inline void
+nouveau_module_debugfs_fini(void)
+{
+}
+
#endif
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 107f63f08bd9..5664c4c71faf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -30,8 +30,9 @@
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
#include <linux/dynamic_debug.h>
+#include <linux/debugfs.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -47,6 +48,7 @@
#include <nvif/fifo.h>
#include <nvif/push006c.h>
#include <nvif/user.h>
+#include <nvif/log.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
@@ -113,6 +115,20 @@ static struct drm_driver driver_stub;
static struct drm_driver driver_pci;
static struct drm_driver driver_platform;
+#ifdef CONFIG_DEBUG_FS
+struct dentry *nouveau_debugfs_root;
+
+/**
+ * gsp_logs - list of nvif_log GSP-RM logging buffers
+ *
+ * Head pointer to a a list of nvif_log buffers that is created for each GPU
+ * upon GSP shutdown if the "keep_gsp_logging" command-line parameter is
+ * specified. This is used to track the alternative debugfs entries for the
+ * GSP-RM logs.
+ */
+NVIF_LOGS_DECLARE(gsp_logs);
+#endif
+
static u64
nouveau_pci_name(struct pci_dev *pdev)
{
@@ -1159,7 +1175,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- char name[32], tmpname[TASK_COMM_LEN];
+ char name[32];
int ret;
/* need to bring up power immediately if opening device */
@@ -1169,10 +1185,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
return ret;
}
- get_task_comm(tmpname, current);
rcu_read_lock();
snprintf(name, sizeof(name), "%s[%d]",
- tmpname, pid_nr(rcu_dereference(fpriv->pid)));
+ current->comm, pid_nr(rcu_dereference(fpriv->pid)));
rcu_read_unlock();
if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
@@ -1326,11 +1341,6 @@ driver_stub = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
-#ifdef GIT_REVISION
- .date = GIT_REVISION,
-#else
- .date = DRIVER_DATE,
-#endif
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -1423,6 +1433,8 @@ err_free:
static int __init
nouveau_drm_init(void)
{
+ int ret;
+
driver_pci = driver_stub;
driver_platform = driver_stub;
@@ -1436,6 +1448,10 @@ nouveau_drm_init(void)
if (!nouveau_modeset)
return 0;
+ ret = nouveau_module_debugfs_init();
+ if (ret)
+ return ret;
+
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
platform_driver_register(&nouveau_platform_driver);
#endif
@@ -1444,10 +1460,14 @@ nouveau_drm_init(void)
nouveau_backlight_ctor();
#ifdef CONFIG_PCI
- return pci_register_driver(&nouveau_drm_pci_driver);
-#else
- return 0;
+ ret = pci_register_driver(&nouveau_drm_pci_driver);
+ if (ret) {
+ nouveau_module_debugfs_fini();
+ return ret;
+ }
#endif
+
+ return 0;
}
static void __exit
@@ -1467,6 +1487,12 @@ nouveau_drm_exit(void)
#endif
if (IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM))
mmu_notifier_synchronize();
+
+#ifdef CONFIG_DEBUG_FS
+ nvif_log_shutdown(&gsp_logs);
+#endif
+
+ nouveau_module_debugfs_fini();
}
module_init(nouveau_drm_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 685d6ca3d8aa..55abc510067b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -7,7 +7,6 @@
#define DRIVER_NAME "nouveau"
#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla/Tegra K1+"
-#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 4
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 09686d038d60..7cc84472cece 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -387,11 +387,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
+ bool local;
rcu_read_lock();
prev = rcu_dereference(f->channel);
- if (prev && (prev == chan ||
- fctx->sync(f, prev, chan) == 0))
+ local = prev && prev->cli->drm == chan->cli->drm;
+ if (local && (prev == chan ||
+ fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
if (!must_wait)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index b4da82ddbb6b..8ea98f06d39a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
struct mm_struct *mm = svmm->notifier.mm;
+ struct folio *folio;
struct page *page;
unsigned long start = args->p.addr;
unsigned long notifier_seq;
@@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = -EINVAL;
goto out;
}
+ folio = page_folio(page);
mutex_lock(&svmm->mutex);
if (!mmu_interval_read_retry(&notifier->notifier,
notifier_seq))
break;
mutex_unlock(&svmm->mutex);
+
+ folio_unlock(folio);
+ folio_put(folio);
}
/* Map the page on the GPU. */
@@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
mutex_unlock(&svmm->mutex);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out:
mmu_interval_notifier_remove(&notifier->notifier);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index 841e3b69fcaf..5a0c9b8a79f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -31,6 +31,7 @@ mcp77_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &nv50_sor_bl,
.hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index d586aea30898..58502102926b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -26,6 +26,7 @@
#include <subdev/vfn.h>
#include <engine/fifo/chan.h>
#include <engine/sec2.h>
+#include <nvif/log.h>
#include <nvfw/fw.h>
@@ -57,6 +58,8 @@
#include <linux/ctype.h>
#include <linux/parser.h>
+extern struct dentry *nouveau_debugfs_root;
+
#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
@@ -121,6 +124,8 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
return mqe->data;
}
+ size = ALIGN(repc + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+
msg = kvmalloc(repc, GFP_KERNEL);
if (!msg)
return ERR_PTR(-ENOMEM);
@@ -129,19 +134,15 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
len = min_t(u32, repc, len);
memcpy(msg, mqe->data, len);
- rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE);
- if (rptr == gsp->msgq.cnt)
- rptr = 0;
-
repc -= len;
if (repc) {
mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
memcpy(msg + len, mqe, repc);
-
- rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE);
}
+ rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
+
mb();
(*gsp->msgq.rptr) = rptr;
return msg;
@@ -163,7 +164,7 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
u64 *end;
u64 csum = 0;
int free, time = 1000000;
- u32 wptr, size;
+ u32 wptr, size, step;
u32 off = 0;
argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
@@ -197,7 +198,9 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
}
cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
- size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE);
+ step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
+ size = min_t(u32, argc, step * GSP_PAGE_SIZE);
+
memcpy(cqe, (u8 *)cmd + off, size);
wptr += DIV_ROUND_UP(size, 0x1000);
@@ -1000,7 +1003,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
}
static void
-nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
{
if (mem->data) {
/*
@@ -1009,19 +1012,35 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
*/
memset(mem->data, 0xFF, mem->size);
- dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+ dma_free_coherent(mem->dev, mem->size, mem->data, mem->addr);
+ put_device(mem->dev);
+
memset(mem, 0, sizeof(*mem));
}
}
+/**
+ * nvkm_gsp_mem_ctor - constructor for nvkm_gsp_mem objects
+ * @gsp: gsp pointer
+ * @size: number of bytes to allocate
+ * @mem: nvkm_gsp_mem object to initialize
+ *
+ * Allocates a block of memory for use with GSP.
+ *
+ * This memory block can potentially out-live the driver's remove() callback,
+ * so we take a device reference to ensure its lifetime. The reference is
+ * dropped in the destructor.
+ */
static int
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
{
- mem->size = size;
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
if (WARN_ON(!mem->data))
return -ENOMEM;
+ mem->size = size;
+ mem->dev = get_device(gsp->subdev.device->dev);
+
return 0;
}
@@ -1054,8 +1073,8 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
nvkm_wr32(device, 0x110004, 0x00000040);
/* Release the DMA buffers that were needed only for boot and init */
- nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
- nvkm_gsp_mem_dtor(gsp, &gsp->libos);
+ nvkm_gsp_mem_dtor(&gsp->boot.fw);
+ nvkm_gsp_mem_dtor(&gsp->libos);
return ret;
}
@@ -2060,6 +2079,215 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * If GSP-RM load fails, then the GSP nvkm object will be deleted, the logging
+ * debugfs entries will be deleted, and it will not be possible to debug the
+ * load failure. The keep_gsp_logging parameter tells Nouveau to copy the
+ * logging buffers to new debugfs entries, and these entries are retained
+ * until the driver unloads.
+ */
+static bool keep_gsp_logging;
+module_param(keep_gsp_logging, bool, 0444);
+MODULE_PARM_DESC(keep_gsp_logging,
+ "Migrate the GSP-RM logging debugfs entries upon exit");
+
+/*
+ * GSP-RM uses a pseudo-class mechanism to define of a variety of per-"engine"
+ * data structures, and each engine has a "class ID" genererated by a
+ * pre-processor. This is the class ID for the PMU.
+ */
+#define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU 0xf3d722
+
+/**
+ * rpc_ucode_libos_print_v1E_08 - RPC payload for libos print buffers
+ * @ucode_eng_desc: the engine descriptor
+ * @libos_print_buf_size: the size of the libos_print_buf[]
+ * @libos_print_buf: the actual buffer
+ *
+ * The engine descriptor is divided into 31:8 "class ID" and 7:0 "instance
+ * ID". We only care about messages from PMU.
+ */
+struct rpc_ucode_libos_print_v1e_08 {
+ u32 ucode_eng_desc;
+ u32 libos_print_buf_size;
+ u8 libos_print_buf[];
+};
+
+/**
+ * r535_gsp_msg_libos_print - capture log message from the PMU
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
+ *
+ * Called when we receive a UCODE_LIBOS_PRINT event RPC from GSP-RM. This RPC
+ * contains the contents of the libos print buffer from PMU. It is typically
+ * only written to when PMU encounters an error.
+ *
+ * Technically this RPC can be used to pass print buffers from any number of
+ * GSP-RM engines, but we only expect to receive them for the PMU.
+ *
+ * For the PMU, the buffer is 4K in size and the RPC always contains the full
+ * contents.
+ */
+static int
+r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc)
+{
+ struct nvkm_gsp *gsp = priv;
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct rpc_ucode_libos_print_v1e_08 *rpc = repv;
+ unsigned int class = rpc->ucode_eng_desc >> 8;
+
+ nvkm_debug(subdev, "received libos print from class 0x%x for %u bytes\n",
+ class, rpc->libos_print_buf_size);
+
+ if (class != NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU) {
+ nvkm_warn(subdev,
+ "received libos print from unknown class 0x%x\n",
+ class);
+ return -ENOMSG;
+ }
+
+ if (rpc->libos_print_buf_size > GSP_PAGE_SIZE) {
+ nvkm_error(subdev, "libos print is too large (%u bytes)\n",
+ rpc->libos_print_buf_size);
+ return -E2BIG;
+ }
+
+ memcpy(gsp->blob_pmu.data, rpc->libos_print_buf, rpc->libos_print_buf_size);
+
+ return 0;
+}
+
+/**
+ * create_debufgs - create a blob debugfs entry
+ * @gsp: gsp pointer
+ * @name: name of this dentry
+ * @blob: blob wrapper
+ *
+ * Creates a debugfs entry for a logging buffer with the name 'name'.
+ */
+static struct dentry *create_debugfs(struct nvkm_gsp *gsp, const char *name,
+ struct debugfs_blob_wrapper *blob)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_blob(name, 0444, gsp->debugfs.parent, blob);
+ if (IS_ERR(dent)) {
+ nvkm_error(&gsp->subdev,
+ "failed to create %s debugfs entry\n", name);
+ return NULL;
+ }
+
+ /*
+ * For some reason, debugfs_create_blob doesn't set the size of the
+ * dentry, so do that here. See [1]
+ *
+ * [1] https://lore.kernel.org/r/linux-fsdevel/20240207200619.3354549-1-ttabi@nvidia.com/
+ */
+ i_size_write(d_inode(dent), blob->size);
+
+ return dent;
+}
+
+/**
+ * r535_gsp_libos_debugfs_init - create logging debugfs entries
+ * @gsp: gsp pointer
+ *
+ * Create the debugfs entries. This exposes the log buffers to userspace so
+ * that an external tool can parse it.
+ *
+ * The 'logpmu' contains exception dumps from the PMU. It is written via an
+ * RPC sent from GSP-RM and must be only 4KB. We create it here because it's
+ * only useful if there is a debugfs entry to expose it. If we get the PMU
+ * logging RPC and there is no debugfs entry, the RPC is just ignored.
+ *
+ * The blob_init, blob_rm, and blob_pmu objects can't be transient
+ * because debugfs_create_blob doesn't copy them.
+ *
+ * NOTE: OpenRM loads the logging elf image and prints the log messages
+ * in real-time. We may add that capability in the future, but that
+ * requires loading ELF images that are not distributed with the driver and
+ * adding the parsing code to Nouveau.
+ *
+ * Ideally, this should be part of nouveau_debugfs_init(), but that function
+ * is called too late. We really want to create these debugfs entries before
+ * r535_gsp_booter_load() is called, so that if GSP-RM fails to initialize,
+ * there could still be a log to capture.
+ */
+static void
+r535_gsp_libos_debugfs_init(struct nvkm_gsp *gsp)
+{
+ struct device *dev = gsp->subdev.device->dev;
+
+ /* Create a new debugfs directory with a name unique to this GPU. */
+ gsp->debugfs.parent = debugfs_create_dir(dev_name(dev), nouveau_debugfs_root);
+ if (IS_ERR(gsp->debugfs.parent)) {
+ nvkm_error(&gsp->subdev,
+ "failed to create %s debugfs root\n", dev_name(dev));
+ return;
+ }
+
+ gsp->blob_init.data = gsp->loginit.data;
+ gsp->blob_init.size = gsp->loginit.size;
+ gsp->blob_intr.data = gsp->logintr.data;
+ gsp->blob_intr.size = gsp->logintr.size;
+ gsp->blob_rm.data = gsp->logrm.data;
+ gsp->blob_rm.size = gsp->logrm.size;
+
+ gsp->debugfs.init = create_debugfs(gsp, "loginit", &gsp->blob_init);
+ if (!gsp->debugfs.init)
+ goto error;
+
+ gsp->debugfs.intr = create_debugfs(gsp, "logintr", &gsp->blob_intr);
+ if (!gsp->debugfs.intr)
+ goto error;
+
+ gsp->debugfs.rm = create_debugfs(gsp, "logrm", &gsp->blob_rm);
+ if (!gsp->debugfs.rm)
+ goto error;
+
+ /*
+ * Since the PMU buffer is copied from an RPC, it doesn't need to be
+ * a DMA buffer.
+ */
+ gsp->blob_pmu.size = GSP_PAGE_SIZE;
+ gsp->blob_pmu.data = kzalloc(gsp->blob_pmu.size, GFP_KERNEL);
+ if (!gsp->blob_pmu.data)
+ goto error;
+
+ gsp->debugfs.pmu = create_debugfs(gsp, "logpmu", &gsp->blob_pmu);
+ if (!gsp->debugfs.pmu) {
+ kfree(gsp->blob_pmu.data);
+ goto error;
+ }
+
+ i_size_write(d_inode(gsp->debugfs.init), gsp->blob_init.size);
+ i_size_write(d_inode(gsp->debugfs.intr), gsp->blob_intr.size);
+ i_size_write(d_inode(gsp->debugfs.rm), gsp->blob_rm.size);
+ i_size_write(d_inode(gsp->debugfs.pmu), gsp->blob_pmu.size);
+
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT,
+ r535_gsp_msg_libos_print, gsp);
+
+ nvkm_debug(&gsp->subdev, "created debugfs GSP-RM logging entries\n");
+
+ if (keep_gsp_logging) {
+ nvkm_info(&gsp->subdev,
+ "logging buffers will be retained on failure\n");
+ }
+
+ return;
+
+error:
+ debugfs_remove(gsp->debugfs.parent);
+ gsp->debugfs.parent = NULL;
+}
+
+#endif
+
static inline u64
r535_gsp_libos_id8(const char *name)
{
@@ -2110,7 +2338,11 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
* written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
*
* The physical address map for the log buffer is stored in the buffer
- * itself, starting with offset 1. Offset 0 contains the "put" pointer.
+ * itself, starting with offset 1. Offset 0 contains the "put" pointer (pp).
+ * Initially, pp is equal to 0. If the buffer has valid logging data in it,
+ * then pp points to index into the buffer where the next logging entry will
+ * be written. Therefore, the logging data is valid if:
+ * 1 <= pp < sizeof(buffer)/sizeof(u64)
*
* The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
* configured for a larger page size (e.g. 64K pages), we need to give
@@ -2181,6 +2413,11 @@ r535_gsp_libos_init(struct nvkm_gsp *gsp)
args[3].size = gsp->rmargs.size;
args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ r535_gsp_libos_debugfs_init(gsp);
+#endif
+
return 0;
}
@@ -2234,8 +2471,8 @@ static void
nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
{
nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2);
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
+ nvkm_gsp_mem_dtor(&rx3->lvl1);
+ nvkm_gsp_mem_dtor(&rx3->lvl0);
}
/**
@@ -2323,9 +2560,9 @@ nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
if (ret) {
lvl2_fail:
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
+ nvkm_gsp_mem_dtor(&rx3->lvl1);
lvl1_fail:
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
+ nvkm_gsp_mem_dtor(&rx3->lvl0);
}
return ret;
@@ -2417,7 +2654,7 @@ r535_gsp_init(struct nvkm_gsp *gsp)
done:
if (gsp->sr.meta.data) {
- nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta);
+ nvkm_gsp_mem_dtor(&gsp->sr.meta);
nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
return ret;
@@ -2491,6 +2728,222 @@ r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
gsp->fws.rm = NULL;
}
+#ifdef CONFIG_DEBUG_FS
+
+struct r535_gsp_log {
+ struct nvif_log log;
+
+ /*
+ * Logging buffers in debugfs. The wrapper objects need to remain
+ * in memory until the dentry is deleted.
+ */
+ struct dentry *debugfs_logging_dir;
+ struct debugfs_blob_wrapper blob_init;
+ struct debugfs_blob_wrapper blob_intr;
+ struct debugfs_blob_wrapper blob_rm;
+ struct debugfs_blob_wrapper blob_pmu;
+};
+
+/**
+ * r535_debugfs_shutdown - delete GSP-RM logging buffers for one GPU
+ * @_log: nvif_log struct for this GPU
+ *
+ * Called when the driver is shutting down, to clean up the retained GSP-RM
+ * logging buffers.
+ */
+static void r535_debugfs_shutdown(struct nvif_log *_log)
+{
+ struct r535_gsp_log *log = container_of(_log, struct r535_gsp_log, log);
+
+ debugfs_remove(log->debugfs_logging_dir);
+
+ kfree(log->blob_init.data);
+ kfree(log->blob_intr.data);
+ kfree(log->blob_rm.data);
+ kfree(log->blob_pmu.data);
+
+ /* We also need to delete the list object */
+ kfree(log);
+}
+
+/**
+ * is_empty - return true if the logging buffer was never written to
+ * @b: blob wrapper with ->data field pointing to logging buffer
+ *
+ * The first 64-bit field of loginit, and logintr, and logrm is the 'put'
+ * pointer, and it is initialized to 0. It's a dword-based index into the
+ * circular buffer, indicating where the next printf write will be made.
+ *
+ * If the pointer is still 0 when GSP-RM is shut down, that means that the
+ * buffer was never written to, so it can be ignored.
+ *
+ * This test also works for logpmu, even though it doesn't have a put pointer.
+ */
+static bool is_empty(const struct debugfs_blob_wrapper *b)
+{
+ u64 *put = b->data;
+
+ return put ? (*put == 0) : true;
+}
+
+/**
+ * r535_gsp_copy_log - preserve the logging buffers in a blob
+ *
+ * When GSP shuts down, the nvkm_gsp object and all its memory is deleted.
+ * To preserve the logging buffers, the buffers need to be copied, but only
+ * if they actually have data.
+ */
+static int r535_gsp_copy_log(struct dentry *parent,
+ const char *name,
+ const struct debugfs_blob_wrapper *s,
+ struct debugfs_blob_wrapper *t)
+{
+ struct dentry *dent;
+ void *p;
+
+ if (is_empty(s))
+ return 0;
+
+ /* The original buffers will be deleted */
+ p = kmemdup(s->data, s->size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ t->data = p;
+ t->size = s->size;
+
+ dent = debugfs_create_blob(name, 0444, parent, t);
+ if (IS_ERR(dent)) {
+ kfree(p);
+ memset(t, 0, sizeof(*t));
+ return PTR_ERR(dent);
+ }
+
+ i_size_write(d_inode(dent), t->size);
+
+ return 0;
+}
+
+/**
+ * r535_gsp_retain_logging - copy logging buffers to new debugfs root
+ * @gsp: gsp pointer
+ *
+ * If keep_gsp_logging is enabled, then we want to preserve the GSP-RM logging
+ * buffers and their debugfs entries, but all those objects would normally
+ * deleted if GSP-RM fails to load.
+ *
+ * To preserve the logging buffers, we need to:
+ *
+ * 1) Allocate new buffers and copy the logs into them, so that the original
+ * DMA buffers can be released.
+ *
+ * 2) Preserve the directories. We don't need to save single dentries because
+ * we're going to delete the parent when the
+ *
+ * If anything fails in this process, then all the dentries need to be
+ * deleted. We don't need to deallocate the original logging buffers because
+ * the caller will do that regardless.
+ */
+static void r535_gsp_retain_logging(struct nvkm_gsp *gsp)
+{
+ struct device *dev = gsp->subdev.device->dev;
+ struct r535_gsp_log *log = NULL;
+ int ret;
+
+ if (!keep_gsp_logging || !gsp->debugfs.parent) {
+ /* Nothing to do */
+ goto exit;
+ }
+
+ /* Check to make sure at least one buffer has data. */
+ if (is_empty(&gsp->blob_init) && is_empty(&gsp->blob_intr) &&
+ is_empty(&gsp->blob_rm) && is_empty(&gsp->blob_rm)) {
+ nvkm_warn(&gsp->subdev, "all logging buffers are empty\n");
+ goto exit;
+ }
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ goto error;
+
+ /*
+ * Since the nvkm_gsp object is going away, the debugfs_blob_wrapper
+ * objects are also being deleted, which means the dentries will no
+ * longer be valid. Delete the existing entries so that we can create
+ * new ones with the same name.
+ */
+ debugfs_remove(gsp->debugfs.init);
+ debugfs_remove(gsp->debugfs.intr);
+ debugfs_remove(gsp->debugfs.rm);
+ debugfs_remove(gsp->debugfs.pmu);
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "loginit", &gsp->blob_init, &log->blob_init);
+ if (ret)
+ goto error;
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "logintr", &gsp->blob_intr, &log->blob_intr);
+ if (ret)
+ goto error;
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "logrm", &gsp->blob_rm, &log->blob_rm);
+ if (ret)
+ goto error;
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "logpmu", &gsp->blob_pmu, &log->blob_pmu);
+ if (ret)
+ goto error;
+
+ /* The nvkm_gsp object is going away, so save the dentry */
+ log->debugfs_logging_dir = gsp->debugfs.parent;
+
+ log->log.shutdown = r535_debugfs_shutdown;
+ list_add(&log->log.entry, &gsp_logs.head);
+
+ nvkm_warn(&gsp->subdev,
+ "logging buffers migrated to /sys/kernel/debug/nouveau/%s\n",
+ dev_name(dev));
+
+ return;
+
+error:
+ nvkm_warn(&gsp->subdev, "failed to migrate logging buffers\n");
+
+exit:
+ debugfs_remove(gsp->debugfs.parent);
+
+ if (log) {
+ kfree(log->blob_init.data);
+ kfree(log->blob_intr.data);
+ kfree(log->blob_rm.data);
+ kfree(log->blob_pmu.data);
+ kfree(log);
+ }
+}
+
+#endif
+
+/**
+ * r535_gsp_libos_debugfs_fini - cleanup/retain log buffers on shutdown
+ * @gsp: gsp pointer
+ *
+ * If the log buffers are exposed via debugfs, the data for those entries
+ * needs to be cleaned up when the GSP device shuts down.
+ */
+static void
+r535_gsp_libos_debugfs_fini(struct nvkm_gsp __maybe_unused *gsp)
+{
+#ifdef CONFIG_DEBUG_FS
+ r535_gsp_retain_logging(gsp);
+
+ /*
+ * Unlike the other buffers, the PMU blob is a kmalloc'd buffer that
+ * exists only if the debugfs entries were created.
+ */
+ kfree(gsp->blob_pmu.data);
+ gsp->blob_pmu.data = NULL;
+#endif
+}
+
void
r535_gsp_dtor(struct nvkm_gsp *gsp)
{
@@ -2498,7 +2951,7 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
mutex_destroy(&gsp->client_id.mutex);
nvkm_gsp_radix3_dtor(gsp, &gsp->radix3);
- nvkm_gsp_mem_dtor(gsp, &gsp->sig);
+ nvkm_gsp_mem_dtor(&gsp->sig);
nvkm_firmware_dtor(&gsp->fw);
nvkm_falcon_fw_dtor(&gsp->booter.unload);
@@ -2509,12 +2962,15 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
r535_gsp_dtor_fws(gsp);
- nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
- nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
- nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
- nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
- nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
- nvkm_gsp_mem_dtor(gsp, &gsp->logrm);
+ nvkm_gsp_mem_dtor(&gsp->rmargs);
+ nvkm_gsp_mem_dtor(&gsp->wpr_meta);
+ nvkm_gsp_mem_dtor(&gsp->shm.mem);
+
+ r535_gsp_libos_debugfs_fini(gsp);
+
+ nvkm_gsp_mem_dtor(&gsp->loginit);
+ nvkm_gsp_mem_dtor(&gsp->logintr);
+ nvkm_gsp_mem_dtor(&gsp->logrm);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index a6f410ba60bc..d393bc540f86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -75,7 +75,7 @@ gp10b_pmu_acr = {
.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
};
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e27376121606..054b71dba6a7 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -28,7 +28,6 @@
#define DRIVER_NAME MODULE_NAME
#define DRIVER_DESC "OMAP DRM"
-#define DRIVER_DATE "20110917"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -653,7 +652,6 @@ static const struct drm_driver omap_drm_driver = {
.fops = &omapdriver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index f4bd0c6e3f34..7b6396890681 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -6,7 +6,7 @@
#include <linux/fb.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 94a46241dece..f8511fe5fb0d 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1802,6 +1802,12 @@ static const struct panel_delay delay_200_500_e50_po2e200 = {
.powered_on_to_enable = 200,
};
+static const struct panel_delay delay_200_150_e50 = {
+ .hpd_absent = 200,
+ .unprepare = 150,
+ .enable = 50,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.ident = { \
@@ -1913,6 +1919,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
@@ -1963,6 +1970,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1118, &delay_200_500_e50, "KD116N29-30NK-A005"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1212, &delay_200_500_e50, "KD116N0930A16"),
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x1707, &delay_200_150_e50, "KD116N2130B12"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 45d09e6fa667..7d68a8acfe2e 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -109,13 +109,13 @@ static int jadard_prepare(struct drm_panel *panel)
if (jadard->desc->lp11_to_reset_delay_ms)
msleep(jadard->desc->lp11_to_reset_delay_ms);
- gpiod_set_value(jadard->reset, 1);
+ gpiod_set_value(jadard->reset, 0);
msleep(5);
- gpiod_set_value(jadard->reset, 0);
+ gpiod_set_value(jadard->reset, 1);
msleep(10);
- gpiod_set_value(jadard->reset, 1);
+ gpiod_set_value(jadard->reset, 0);
msleep(130);
ret = jadard->desc->init(jadard);
@@ -1130,7 +1130,7 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = desc->format;
dsi->lanes = desc->lanes;
- jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(jadard->reset)) {
DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
return PTR_ERR(jadard->reset);
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index a9b5dad70bc1..87bbb25d119a 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -9,6 +9,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 4618c892cdd6..e10e469aa7a6 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -400,7 +400,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c)
rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
/* Look up the DSI host. It needs to probe before we do. */
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (!endpoint)
return -ENODEV;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index ed53787d1dea..364f1c9a16d9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -11,6 +11,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index a0e5698275a5..6917ffda5b2b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/media-bus-format.h>
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 222c170dde8b..9b2f128fd309 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3222,6 +3222,33 @@ static const struct panel_desc mitsubishi_aa084xe01 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
+static const struct display_timing multi_inno_mi0700a2t_30_timing = {
+ .pixelclock = { 26400000, 33000000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 204, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 6, 40 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 3, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc multi_inno_mi0700a2t_30 = {
+ .timings = &multi_inno_mi0700a2t_30_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 153,
+ .height = 92,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing multi_inno_mi0700s4t_6_timing = {
.pixelclock = { 29000000, 33000000, 38000000 },
.hactive = { 800, 800, 800 },
@@ -3313,6 +3340,33 @@ static const struct panel_desc multi_inno_mi1010ait_1cp = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing multi_inno_mi1010z1t_1cp11_timing = {
+ .pixelclock = { 40800000, 51200000, 67200000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 30, 110, 130 },
+ .hback_porch = { 30, 110, 130 },
+ .hsync_len = { 30, 100, 116 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 4, 13, 80 },
+ .vback_porch = { 4, 13, 80 },
+ .vsync_len = { 2, 9, 40 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc multi_inno_mi1010z1t_1cp11 = {
+ .timings = &multi_inno_mi1010z1t_1cp11_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 162,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing nec_nl12880bc20_05_timing = {
.pixelclock = { 67000000, 71000000, 75000000 },
.hactive = { 1280, 1280, 1280 },
@@ -4280,6 +4334,45 @@ static const struct panel_desc tianma_tm070jvhg33 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
+/*
+ * The datasheet computes total blanking as back porch + front porch, not
+ * including sync pulse width. This is for both H and V. To make the total
+ * blanking and period correct, subtract the pulse width from the front
+ * porch.
+ *
+ * This works well for the Min and Typ values, but for Max values the sync
+ * pulse width is higher than back porch + front porch, so work around that
+ * by reducing the Max sync length value to 1 and then treating the Max
+ * porches as in the Min and Typ cases.
+ *
+ * Exact datasheet values are added as a comment where they differ from the
+ * ones implemented for the above reason.
+ */
+static const struct display_timing tianma_tm070jdhg34_00_timing = {
+ .pixelclock = { 68400000, 71900000, 78100000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 130, 138, 158 }, /* 131, 139, 159 */
+ .hback_porch = { 5, 5, 5 },
+ .hsync_len = { 1, 1, 1 }, /* 1, 1, 256 */
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 2, 39, 98 }, /* 3, 40, 99 */
+ .vback_porch = { 2, 2, 2 },
+ .vsync_len = { 1, 1, 1 }, /* 1, 1, 128 */
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc tianma_tm070jdhg34_00 = {
+ .timings = &tianma_tm070jdhg34_00_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 150, /* 149.76 */
+ .height = 94, /* 93.60 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing tianma_tm070rvhg71_timing = {
.pixelclock = { 27700000, 29200000, 39600000 },
.hactive = { 800, 800, 800 },
@@ -4361,6 +4454,37 @@ static const struct panel_desc ti_nspire_classic_lcd_panel = {
.bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
+static const struct display_timing topland_tian_g07017_01_timing = {
+ .pixelclock = { 44900000, 51200000, 63000000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 16, 160, 216 },
+ .hback_porch = { 160, 160, 160 },
+ .hsync_len = { 1, 1, 140 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 1, 12, 127 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 1, 20 },
+};
+
+static const struct panel_desc topland_tian_g07017_01 = {
+ .timings = &topland_tian_g07017_01_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 1, /* 6.5 - 150µs PLL wake-up time */
+ .enable = 100, /* 6.4 - Power on: 6 VSyncs */
+ .disable = 84, /* 6.4 - Power off: 5 Vsyncs */
+ .unprepare = 50, /* 6.4 - Power off: 3 Vsyncs */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+};
+
static const struct drm_display_mode toshiba_lt089ac29000_mode = {
.clock = 79500,
.hdisplay = 1280,
@@ -4906,6 +5030,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "mitsubishi,aa084xe01",
.data = &mitsubishi_aa084xe01,
}, {
+ .compatible = "multi-inno,mi0700a2t-30",
+ .data = &multi_inno_mi0700a2t_30,
+ }, {
.compatible = "multi-inno,mi0700s4t-6",
.data = &multi_inno_mi0700s4t_6,
}, {
@@ -4915,6 +5042,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "multi-inno,mi1010ait-1cp",
.data = &multi_inno_mi1010ait_1cp,
}, {
+ .compatible = "multi-inno,mi1010z1t-1cp11",
+ .data = &multi_inno_mi1010z1t_1cp11,
+ }, {
.compatible = "nec,nl12880bc20-05",
.data = &nec_nl12880bc20_05,
}, {
@@ -5023,6 +5153,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
+ .compatible = "tianma,tm070jdhg34-00",
+ .data = &tianma_tm070jdhg34_00,
+ }, {
.compatible = "tianma,tm070jvhg33",
.data = &tianma_tm070jvhg33,
}, {
@@ -5038,6 +5171,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "toshiba,lt089ac29000",
.data = &toshiba_lt089ac29000,
}, {
+ .compatible = "topland,tian-g07017-01",
+ .data = &topland_tian_g07017_01,
+ }, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
}, {
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 272490b9565b..be3a9797fbce 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -193,7 +193,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
- ctx->panel.dev = dev;
ctx->dsi = dsi;
ctx->supplies[0].supply = "vdda";
@@ -201,13 +200,11 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
ctx->supplies[1].supply = "vdd3p3";
ctx->supplies[1].init_load_uA = 13200;
- ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
- ctx->supplies);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
- ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
- "reset", GPIOD_OUT_LOW);
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio %ld\n", PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
@@ -215,8 +212,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &visionox_rm69299_drm_funcs;
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index ee3864476eb9..0f3935556ac7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -636,7 +636,6 @@ static const struct drm_driver panfrost_drm_driver = {
.fops = &panfrost_drm_driver_fops,
.name = "panfrost",
.desc = "panfrost DRM",
- .date = "20180908",
.major = 1,
.minor = 3,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index f5abde3866fb..174e190ba40f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -236,6 +236,10 @@ static const struct panfrost_model gpu_models[] = {
*/
GPU_MODEL(g57, 0x9003,
GPU_REV(g57, 0, 0)),
+
+ /* MediaTek MT8188 Mali-G57 MC3 */
+ GPU_MODEL(g57, 0x9093,
+ GPU_REV(g57, 0, 0)),
};
static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c
index ecc7a52bd688..3686515d368d 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.c
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.c
@@ -243,26 +243,26 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
return 0;
}
-int panthor_devfreq_resume(struct panthor_device *ptdev)
+void panthor_devfreq_resume(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
if (!pdevfreq->devfreq)
- return 0;
+ return;
panthor_devfreq_reset(pdevfreq);
- return devfreq_resume_device(pdevfreq->devfreq);
+ drm_WARN_ON(&ptdev->base, devfreq_resume_device(pdevfreq->devfreq));
}
-int panthor_devfreq_suspend(struct panthor_device *ptdev)
+void panthor_devfreq_suspend(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
if (!pdevfreq->devfreq)
- return 0;
+ return;
- return devfreq_suspend_device(pdevfreq->devfreq);
+ drm_WARN_ON(&ptdev->base, devfreq_suspend_device(pdevfreq->devfreq));
}
void panthor_devfreq_record_busy(struct panthor_device *ptdev)
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.h b/drivers/gpu/drm/panthor/panthor_devfreq.h
index 83a5c9522493..b7631de695f7 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.h
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.h
@@ -12,8 +12,8 @@ struct panthor_devfreq;
int panthor_devfreq_init(struct panthor_device *ptdev);
-int panthor_devfreq_resume(struct panthor_device *ptdev);
-int panthor_devfreq_suspend(struct panthor_device *ptdev);
+void panthor_devfreq_resume(struct panthor_device *ptdev);
+void panthor_devfreq_suspend(struct panthor_device *ptdev);
void panthor_devfreq_record_busy(struct panthor_device *ptdev);
void panthor_devfreq_record_idle(struct panthor_device *ptdev);
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index 6fbff516c1c1..0a37cfeeb181 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -22,6 +22,24 @@
#include "panthor_regs.h"
#include "panthor_sched.h"
+static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
+{
+ ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+
+ if (!ptdev->coherent)
+ return 0;
+
+ /* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
+ * ACE protocol has never been supported for command stream frontend GPUs.
+ */
+ if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
+ GPU_COHERENCY_PROT_BIT(ACE_LITE)))
+ return 0;
+
+ drm_err(&ptdev->base, "Coherency not supported by the device");
+ return -ENOTSUPP;
+}
+
static int panthor_clk_init(struct panthor_device *ptdev)
{
ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
@@ -156,7 +174,9 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
- ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+ ret = panthor_gpu_coherency_init(ptdev);
+ if (ret)
+ return ret;
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
@@ -415,6 +435,22 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *
return 0;
}
+static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
+{
+ int ret;
+
+ panthor_gpu_resume(ptdev);
+ panthor_mmu_resume(ptdev);
+
+ ret = panthor_fw_resume(ptdev);
+ if (!ret)
+ return 0;
+
+ panthor_mmu_suspend(ptdev);
+ panthor_gpu_suspend(ptdev);
+ return ret;
+}
+
int panthor_device_resume(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
@@ -437,22 +473,20 @@ int panthor_device_resume(struct device *dev)
if (ret)
goto err_disable_stacks_clk;
- ret = panthor_devfreq_resume(ptdev);
- if (ret)
- goto err_disable_coregroup_clk;
+ panthor_devfreq_resume(ptdev);
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
- panthor_gpu_resume(ptdev);
- panthor_mmu_resume(ptdev);
- ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
- if (!ret) {
- panthor_sched_resume(ptdev);
- } else {
- panthor_mmu_suspend(ptdev);
- panthor_gpu_suspend(ptdev);
+ ret = panthor_device_resume_hw_components(ptdev);
+ if (ret && ptdev->reset.fast) {
+ drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
+ ptdev->reset.fast = false;
+ ret = panthor_device_resume_hw_components(ptdev);
}
+ if (!ret)
+ panthor_sched_resume(ptdev);
+
drm_dev_exit(cookie);
if (ret)
@@ -476,8 +510,6 @@ int panthor_device_resume(struct device *dev)
err_suspend_devfreq:
panthor_devfreq_suspend(ptdev);
-
-err_disable_coregroup_clk:
clk_disable_unprepare(ptdev->clks.coregroup);
err_disable_stacks_clk:
@@ -488,13 +520,14 @@ err_disable_core_clk:
err_set_suspended:
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ atomic_set(&ptdev->pm.recovery_needed, 1);
return ret;
}
int panthor_device_suspend(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
- int ret, cookie;
+ int cookie;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
return -EINVAL;
@@ -526,36 +559,11 @@ int panthor_device_suspend(struct device *dev)
drm_dev_exit(cookie);
}
- ret = panthor_devfreq_suspend(ptdev);
- if (ret) {
- if (panthor_device_is_initialized(ptdev) &&
- drm_dev_enter(&ptdev->base, &cookie)) {
- panthor_gpu_resume(ptdev);
- panthor_mmu_resume(ptdev);
- drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
- panthor_sched_resume(ptdev);
- drm_dev_exit(cookie);
- }
-
- goto err_set_active;
- }
+ panthor_devfreq_suspend(ptdev);
clk_disable_unprepare(ptdev->clks.coregroup);
clk_disable_unprepare(ptdev->clks.stacks);
clk_disable_unprepare(ptdev->clks.core);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
return 0;
-
-err_set_active:
- /* If something failed and we have to revert back to an
- * active state, we also need to clear the MMIO userspace
- * mappings, so any dumb pages that were mapped while we
- * were trying to suspend gets invalidated.
- */
- mutex_lock(&ptdev->pm.mmio_lock);
- atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
- unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
- DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
- mutex_unlock(&ptdev->pm.mmio_lock);
- return ret;
}
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index 0e68f5a70d20..da6574021664 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -9,6 +9,7 @@
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
@@ -156,6 +157,17 @@ struct panthor_device {
/** @pending: Set to true if a reset is pending. */
atomic_t pending;
+
+ /**
+ * @fast: True if the post_reset logic can proceed with a fast reset.
+ *
+ * A fast reset is just a reset where the driver doesn't reload the FW sections.
+ *
+ * Any time the firmware is properly suspended, a fast reset can take place.
+ * On the other hand, if the halt operation failed, the driver will reload
+ * all FW sections to make sure we start from a fresh state.
+ */
+ bool fast;
} reset;
/** @pm: Power management related data. */
@@ -180,6 +192,9 @@ struct panthor_device {
* is suspended.
*/
struct page *dummy_latest_flush;
+
+ /** @recovery_needed: True when a resume attempt failed. */
+ atomic_t recovery_needed;
} pm;
/** @profile_mask: User-set profiling flags for job accounting. */
@@ -243,6 +258,28 @@ int panthor_device_mmap_io(struct panthor_device *ptdev,
int panthor_device_resume(struct device *dev);
int panthor_device_suspend(struct device *dev);
+static inline int panthor_device_resume_and_get(struct panthor_device *ptdev)
+{
+ int ret = pm_runtime_resume_and_get(ptdev->base.dev);
+
+ /* If the resume failed, we need to clear the runtime_error, which
+ * can done by forcing the RPM state to suspended. If multiple
+ * threads called panthor_device_resume_and_get(), we only want
+ * one of them to update the state, hence the cmpxchg. Note that a
+ * thread might enter panthor_device_resume_and_get() and call
+ * pm_runtime_resume_and_get() after another thread had attempted
+ * to resume and failed. This means we will end up with an error
+ * without even attempting a resume ourselves. The only risk here
+ * is to report an error when the second resume attempt might have
+ * succeeded. Given resume errors are not expected, this is probably
+ * something we can live with.
+ */
+ if (ret && atomic_cmpxchg(&ptdev->pm.recovery_needed, 1, 0) == 1)
+ pm_runtime_set_suspended(ptdev->base.dev);
+
+ return ret;
+}
+
enum drm_panthor_exception_type {
DRM_PANTHOR_EXCEPTION_OK = 0x00,
DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04,
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 0b3fbee3d37a..08136e790ca0 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -763,7 +763,7 @@ static int panthor_query_timestamp_info(struct panthor_device *ptdev,
{
int ret;
- ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ ret = panthor_device_resume_and_get(ptdev);
if (ret)
return ret;
@@ -802,6 +802,7 @@ static void panthor_query_group_priorities_info(struct drm_file *file,
{
int prio;
+ memset(arg, 0, sizeof(*arg));
for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) {
if (!group_priority_permit(file, prio))
arg->allowed_mask |= BIT(prio);
@@ -1493,6 +1494,7 @@ static void panthor_debugfs_init(struct drm_minor *minor)
* - 1.1 - adds DEV_QUERY_TIMESTAMP_INFO query
* - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
* - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
+ * - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
@@ -1505,9 +1507,8 @@ static const struct drm_driver panthor_drm_driver = {
.fops = &panthor_drm_driver_fops,
.name = "panthor",
.desc = "Panthor DRM driver",
- .date = "20230801",
.major = 1,
- .minor = 2,
+ .minor = 3,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index ecca5565ce41..68eb4fb4d3a8 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -12,6 +12,7 @@
#include <linux/iosys-map.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
@@ -91,26 +92,26 @@ enum panthor_fw_binary_entry_type {
#define CSF_FW_BINARY_ENTRY_UPDATE BIT(30)
#define CSF_FW_BINARY_ENTRY_OPTIONAL BIT(31)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_RD BIT(0)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_WR BIT(1)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_EX BIT(2)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_NONE (0 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED (1 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED_COHERENT (3 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK GENMASK(4, 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_PROT BIT(5)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED BIT(30)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO BIT(31)
-
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS \
- (CSF_FW_BINARY_IFACE_ENTRY_RD_RD | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_WR | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_EX | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_PROT | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD BIT(0)
+#define CSF_FW_BINARY_IFACE_ENTRY_WR BIT(1)
+#define CSF_FW_BINARY_IFACE_ENTRY_EX BIT(2)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_NONE (0 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED (1 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED_COHERENT (3 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK GENMASK(4, 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_PROT BIT(5)
+#define CSF_FW_BINARY_IFACE_ENTRY_SHARED BIT(30)
+#define CSF_FW_BINARY_IFACE_ENTRY_ZERO BIT(31)
+
+#define CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS \
+ (CSF_FW_BINARY_IFACE_ENTRY_RD | \
+ CSF_FW_BINARY_IFACE_ENTRY_WR | \
+ CSF_FW_BINARY_IFACE_ENTRY_EX | \
+ CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK | \
+ CSF_FW_BINARY_IFACE_ENTRY_PROT | \
+ CSF_FW_BINARY_IFACE_ENTRY_SHARED | \
+ CSF_FW_BINARY_IFACE_ENTRY_ZERO)
/**
* struct panthor_fw_binary_section_entry_hdr - Describes a section of FW binary
@@ -262,17 +263,6 @@ struct panthor_fw {
/** @booted: True is the FW is booted */
bool booted;
- /**
- * @fast_reset: True if the post_reset logic can proceed with a fast reset.
- *
- * A fast reset is just a reset where the driver doesn't reload the FW sections.
- *
- * Any time the firmware is properly suspended, a fast reset can take place.
- * On the other hand, if the halt operation failed, the driver will reload
- * all sections to make sure we start from a fresh state.
- */
- bool fast_reset;
-
/** @irq: Job irq data. */
struct panthor_irq irq;
};
@@ -413,7 +403,7 @@ static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
int ret;
if (!section->data.size &&
- !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO))
+ !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO))
return;
ret = panthor_kernel_bo_vmap(section->mem);
@@ -421,7 +411,7 @@ static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
return;
memcpy(section->mem->kmap, section->data.buf, section->data.size);
- if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO) {
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO) {
memset(section->mem->kmap + section->data.size, 0,
panthor_kernel_bo_size(section->mem) - section->data.size);
}
@@ -535,20 +525,20 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
return -EINVAL;
}
- if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS) {
+ if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS) {
drm_err(&ptdev->base, "Firmware contains interface with unsupported flags (0x%x)\n",
hdr.flags);
return -EINVAL;
}
- if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_PROT) {
+ if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_PROT) {
drm_warn(&ptdev->base,
"Firmware protected mode entry not be supported, ignoring");
return 0;
}
if (hdr.va.start == CSF_MCU_SHARED_REGION_START &&
- !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED)) {
+ !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED)) {
drm_err(&ptdev->base,
"Interface at 0x%llx must be shared", CSF_MCU_SHARED_REGION_START);
return -EINVAL;
@@ -587,26 +577,26 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
section_size = hdr.va.end - hdr.va.start;
if (section_size) {
- u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK;
+ u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK;
struct panthor_gem_object *bo;
u32 vm_map_flags = 0;
struct sg_table *sgt;
u64 va = hdr.va.start;
- if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_WR))
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_READONLY;
- if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_EX))
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_EX))
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC;
- /* TODO: CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_*_COHERENT are mapped to
+ /* TODO: CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_*_COHERENT are mapped to
* non-cacheable for now. We might want to introduce a new
* IOMMU_xxx flag (or abuse IOMMU_MMIO, which maps to device
* memory and is currently not used by our driver) for
* AS_MEMATTR_AARCH64_SHARED memory, so we can take benefit
* of IO-coherent systems.
*/
- if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED)
+ if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED)
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED;
section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
@@ -619,7 +609,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
if (drm_WARN_ON(&ptdev->base, section->mem->va_node.start != hdr.va.start))
return -EINVAL;
- if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED) {
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED) {
ret = panthor_kernel_bo_vmap(section->mem);
if (ret)
return ret;
@@ -689,7 +679,7 @@ panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload)
list_for_each_entry(section, &ptdev->fw->sections, node) {
struct sg_table *sgt;
- if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_WR))
continue;
panthor_fw_init_section_mem(ptdev, section);
@@ -1089,7 +1079,7 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
/* Make sure we won't be woken up by a ping. */
cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
- ptdev->fw->fast_reset = false;
+ ptdev->reset.fast = false;
if (!on_hang) {
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
@@ -1098,17 +1088,11 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_HALT, 10, 100000) &&
- glb_iface->output->halt_status == PANTHOR_FW_HALT_OK) {
- ptdev->fw->fast_reset = true;
+ status == MCU_STATUS_HALT, 10, 100000)) {
+ ptdev->reset.fast = true;
} else {
drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
}
-
- /* The FW detects 0 -> 1 transitions. Make sure we reset
- * the HALT bit before the FW is rebooted.
- */
- panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
}
panthor_job_irq_suspend(&ptdev->fw->irq);
@@ -1130,41 +1114,30 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
if (ret)
return ret;
- /* If this is a fast reset, try to start the MCU without reloading
- * the FW sections. If it fails, go for a full reset.
- */
- if (ptdev->fw->fast_reset) {
- ret = panthor_fw_start(ptdev);
- if (!ret)
- goto out;
-
- /* Forcibly reset the MCU and force a slow reset, so we get a
- * fresh boot on the next panthor_fw_start() call.
+ if (!ptdev->reset.fast) {
+ /* On a slow reset, reload all sections, including RO ones.
+ * We're not supposed to end up here anyway, let's just assume
+ * the overhead of reloading everything is acceptable.
*/
- panthor_fw_stop(ptdev);
- ptdev->fw->fast_reset = false;
- drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset");
+ panthor_reload_fw_sections(ptdev, true);
+ } else {
+ /* The FW detects 0 -> 1 transitions. Make sure we reset
+ * the HALT bit before the FW is rebooted.
+ * This is not needed on a slow reset because FW sections are
+ * re-initialized.
+ */
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
- ret = panthor_vm_flush_all(ptdev->fw->vm);
- if (ret) {
- drm_err(&ptdev->base, "FW slow reset failed (couldn't flush FW's AS l2cache)");
- return ret;
- }
+ panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
}
- /* Reload all sections, including RO ones. We're not supposed
- * to end up here anyway, let's just assume the overhead of
- * reloading everything is acceptable.
- */
- panthor_reload_fw_sections(ptdev, true);
-
ret = panthor_fw_start(ptdev);
if (ret) {
- drm_err(&ptdev->base, "FW slow reset failed (couldn't start the FW )");
+ drm_err(&ptdev->base, "FW %s reset failed",
+ ptdev->reset.fast ? "fast" : "slow");
return ret;
}
-out:
/* We must re-initialize the global interface even on fast-reset. */
panthor_fw_init_global_iface(ptdev);
return 0;
@@ -1188,11 +1161,13 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
- /* Make sure the IRQ handler can be called after that point. */
- if (ptdev->fw->irq.irq)
- panthor_job_irq_suspend(&ptdev->fw->irq);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) {
+ /* Make sure the IRQ handler cannot be called after that point. */
+ if (ptdev->fw->irq.irq)
+ panthor_job_irq_suspend(&ptdev->fw->irq);
- panthor_fw_stop(ptdev);
+ panthor_fw_stop(ptdev);
+ }
list_for_each_entry(section, &ptdev->fw->sections, node)
panthor_kernel_bo_destroy(section->mem);
@@ -1205,7 +1180,8 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
panthor_vm_put(ptdev->fw->vm);
ptdev->fw->vm = NULL;
- panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
+ panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
}
/**
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 2d3529a0b156..671049020afa 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -77,6 +77,12 @@ static const struct panthor_model gpu_models[] = {
GPU_IRQ_RESET_COMPLETED | \
GPU_IRQ_CLEAN_CACHES_COMPLETED)
+static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
+{
+ gpu_write(ptdev, GPU_COHERENCY_PROTOCOL,
+ ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
+}
+
static void panthor_gpu_init_info(struct panthor_device *ptdev)
{
const struct panthor_model *model;
@@ -174,7 +180,8 @@ void panthor_gpu_unplug(struct panthor_device *ptdev)
unsigned long flags;
/* Make sure the IRQ handler is not running after that point. */
- panthor_gpu_irq_suspend(&ptdev->gpu->irq);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
+ panthor_gpu_irq_suspend(&ptdev->gpu->irq);
/* Wake-up all waiters. */
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
@@ -365,6 +372,9 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
hweight64(ptdev->gpu_info.shader_present));
}
+ /* Set the desired coherency mode before the power up of L2 */
+ panthor_gpu_coherency_set(ptdev);
+
return panthor_gpu_power_on(ptdev, L2, 1, 20000);
}
@@ -460,11 +470,12 @@ int panthor_gpu_soft_reset(struct panthor_device *ptdev)
*/
void panthor_gpu_suspend(struct panthor_device *ptdev)
{
- /*
- * It may be preferable to simply power down the L2, but for now just
- * soft-reset which will leave the L2 powered down.
- */
- panthor_gpu_soft_reset(ptdev);
+ /* On a fast reset, simply power down the L2. */
+ if (!ptdev->reset.fast)
+ panthor_gpu_soft_reset(ptdev);
+ else
+ panthor_gpu_power_off(ptdev, L2, 1, 20000);
+
panthor_gpu_irq_suspend(&ptdev->gpu->irq);
}
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index a49132f3778b..c39e3eb1c15d 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1941,7 +1941,7 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
return pool;
}
-static u64 mair_to_memattr(u64 mair)
+static u64 mair_to_memattr(u64 mair, bool coherent)
{
u64 memattr = 0;
u32 i;
@@ -1960,14 +1960,21 @@ static u64 mair_to_memattr(u64 mair)
AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
} else {
- /* Use SH_CPU_INNER mode so SH_IS, which is used when
- * IOMMU_CACHE is set, actually maps to the standard
- * definition of inner-shareable and not Mali's
- * internal-shareable mode.
- */
out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
- AS_MEMATTR_AARCH64_SH_CPU_INNER |
AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ /* Use SH_MIDGARD_INNER mode when device isn't coherent,
+ * so SH_IS, which is used when IOMMU_CACHE is set, maps
+ * to Mali's internal-shareable mode. As per the Mali
+ * Spec, inner and outer-shareable modes aren't allowed
+ * for WB memory when coherency is disabled.
+ * Use SH_CPU_INNER mode when coherency is enabled, so
+ * that SH_IS actually maps to the standard definition of
+ * inner-shareable.
+ */
+ if (!coherent)
+ out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
+ else
+ out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
}
memattr |= (u64)out_attr << (8 * i);
@@ -2339,7 +2346,7 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
goto err_sched_fini;
mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
- vm->memattr = mair_to_memattr(mair);
+ vm->memattr = mair_to_memattr(mair, ptdev->coherent);
mutex_lock(&ptdev->mmu->vm.lock);
list_add_tail(&vm->node, &ptdev->mmu->vm.list);
@@ -2665,7 +2672,8 @@ int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm
*/
void panthor_mmu_unplug(struct panthor_device *ptdev)
{
- panthor_mmu_irq_suspend(&ptdev->mmu->irq);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
+ panthor_mmu_irq_suspend(&ptdev->mmu->irq);
mutex_lock(&ptdev->mmu->as.slots_lock);
for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index ef4bec7ff9c7..77b184c3fb0c 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -611,6 +611,16 @@ struct panthor_group {
bool timedout;
/**
+ * @innocent: True when the group becomes unusable because the group suspension
+ * failed during a reset.
+ *
+ * Sometimes the FW was put in a bad state by other groups, causing the group
+ * suspension happening in the reset path to fail. In that case, we consider the
+ * group innocent.
+ */
+ bool innocent;
+
+ /**
* @syncobjs: Pool of per-queue synchronization objects.
*
* One sync object per queue. The position of the sync object is
@@ -2354,7 +2364,7 @@ static void tick_work(struct work_struct *work)
if (!drm_dev_enter(&ptdev->base, &cookie))
return;
- ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ ret = panthor_device_resume_and_get(ptdev);
if (drm_WARN_ON(&ptdev->base, ret))
goto out_dev_exit;
@@ -2690,6 +2700,12 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
u32 csg_id = ffs(slot_mask) - 1;
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ /* If the group was still usable before that point, we consider
+ * it innocent.
+ */
+ if (group_can_run(csg_slot->group))
+ csg_slot->group->innocent = true;
+
/* We consider group suspension failures as fatal and flag the
* group as unusable by setting timedout=true.
*/
@@ -3115,7 +3131,7 @@ queue_run_job(struct drm_sched_job *sched_job)
return dma_fence_get(job->done_fence);
}
- ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ ret = panthor_device_resume_and_get(ptdev);
if (drm_WARN_ON(&ptdev->base, ret))
return ERR_PTR(ret);
@@ -3570,6 +3586,8 @@ int panthor_group_get_state(struct panthor_file *pfile,
get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
get_state->fatal_queues = group->fatal_queues;
}
+ if (group->innocent)
+ get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT;
mutex_unlock(&sched->lock);
group_put(group);
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 13362150b9c6..56ff6a3fb483 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -45,9 +45,9 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -220,7 +220,6 @@ static const struct drm_driver pl111_drm_driver = {
.fops = &drm_fops,
.name = "pl111",
.desc = DRIVER_DESC,
- .date = "20170317",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 98a148bea628..69427eb8bed2 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -6,6 +6,7 @@ config DRM_QXL
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select DRM_EXEC
select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 21f752644242..417061ae59eb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -34,9 +34,9 @@
#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_file.h>
@@ -300,7 +300,6 @@ static struct drm_driver qxl_driver = {
.num_ioctls = ARRAY_SIZE(qxl_ioctls),
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = 0,
.minor = 1,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 32069acd93f8..cc02b5f10ad9 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,12 +38,12 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_dev.h"
@@ -54,7 +54,6 @@ struct iosys_map;
#define DRIVER_NAME "qxl"
#define DRIVER_DESC "RH QXL"
-#define DRIVER_DATE "20120117"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
@@ -101,7 +100,8 @@ struct qxl_gem {
};
struct qxl_bo_list {
- struct ttm_validate_buffer tv;
+ struct qxl_bo *bo;
+ struct list_head list;
};
struct qxl_crtc {
@@ -150,7 +150,7 @@ struct qxl_release {
struct qxl_bo *release_bo;
uint32_t release_offset;
uint32_t surface_release_id;
- struct ww_acquire_ctx ticket;
+ struct drm_exec exec;
struct list_head bos;
};
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 368d26da0d6a..05204a6a3fa8 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -121,13 +121,11 @@ qxl_release_free_list(struct qxl_release *release)
{
while (!list_empty(&release->bos)) {
struct qxl_bo_list *entry;
- struct qxl_bo *bo;
entry = container_of(release->bos.next,
- struct qxl_bo_list, tv.head);
- bo = to_qxl_bo(entry->tv.bo);
- qxl_bo_unref(&bo);
- list_del(&entry->tv.head);
+ struct qxl_bo_list, list);
+ qxl_bo_unref(&entry->bo);
+ list_del(&entry->list);
kfree(entry);
}
release->release_bo = NULL;
@@ -172,8 +170,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
{
struct qxl_bo_list *entry;
- list_for_each_entry(entry, &release->bos, tv.head) {
- if (entry->tv.bo == &bo->tbo)
+ list_for_each_entry(entry, &release->bos, list) {
+ if (entry->bo == bo)
return 0;
}
@@ -182,9 +180,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
return -ENOMEM;
qxl_bo_ref(bo);
- entry->tv.bo = &bo->tbo;
- entry->tv.num_shared = 0;
- list_add_tail(&entry->tv.head, &release->bos);
+ entry->bo = bo;
+ list_add_tail(&entry->list, &release->bos);
return 0;
}
@@ -221,21 +218,28 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos))
return 0;
- ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL);
- if (ret)
- return ret;
-
- list_for_each_entry(entry, &release->bos, tv.head) {
- struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
-
- ret = qxl_release_validate_bo(bo);
- if (ret) {
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
- return ret;
+ drm_exec_init(&release->exec, no_intr ? 0 :
+ DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&release->exec) {
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = drm_exec_prepare_obj(&release->exec,
+ &entry->bo->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(&release->exec);
+ if (ret)
+ goto error;
}
}
+
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = qxl_release_validate_bo(entry->bo);
+ if (ret)
+ goto error;
+ }
return 0;
+error:
+ drm_exec_fini(&release->exec);
+ return ret;
}
void qxl_release_backoff_reserve_list(struct qxl_release *release)
@@ -245,7 +249,7 @@ void qxl_release_backoff_reserve_list(struct qxl_release *release)
if (list_is_singular(&release->bos))
return;
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+ drm_exec_fini(&release->exec);
}
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
@@ -404,18 +408,18 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
- struct ttm_buffer_object *bo;
struct ttm_device *bdev;
- struct ttm_validate_buffer *entry;
+ struct qxl_bo_list *entry;
struct qxl_device *qdev;
+ struct qxl_bo *bo;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos) || list_empty(&release->bos))
return;
- bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
+ bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
+ bdev = bo->tbo.bdev;
qdev = container_of(bdev, struct qxl_device, mman.bdev);
/*
@@ -426,14 +430,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- list_for_each_entry(entry, &release->bos, head) {
+ list_for_each_entry(entry, &release->bos, list) {
bo = entry->bo;
- dma_resv_add_fence(bo->base.resv, &release->base,
+ dma_resv_add_fence(bo->tbo.base.resv, &release->base,
DMA_RESV_USAGE_READ);
- ttm_bo_move_to_lru_tail_unlocked(bo);
- dma_resv_unlock(bo->base.resv);
+ ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
}
- ww_acquire_fini(&release->ticket);
+ drm_exec_fini(&release->exec);
}
-
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 9c6c74a75778..f51bace9555d 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -13,6 +13,7 @@ config DRM_RADEON
select DRM_TTM
select DRM_TTM_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
+ select DRM_EXEC
select SND_HDA_COMPONENT if SND_HDA_CORE
select POWER_SUPPLY
select HWMON
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 05c13102a8cb..d22889fbfa9c 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-static void r300_gpu_init(struct radeon_device *rdev)
+/* rs400_gpu_init also calls this! */
+void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fd8a4513025f..8605c074d9f7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -75,8 +75,8 @@
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_suballoc.h>
@@ -457,7 +457,8 @@ struct radeon_mman {
struct radeon_bo_list {
struct radeon_bo *robj;
- struct ttm_validate_buffer tv;
+ struct list_head list;
+ bool shared;
uint64_t gpu_offset;
unsigned preferred_domains;
unsigned allowed_domains;
@@ -1030,6 +1031,7 @@ struct radeon_cs_parser {
struct radeon_bo_list *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
+ struct drm_exec exec;
/* indices of various chunks */
struct radeon_cs_chunk *chunk_ib;
struct radeon_cs_chunk *chunk_relocs;
@@ -1043,7 +1045,6 @@ struct radeon_cs_parser {
u32 cs_flags;
u32 ring;
s32 priority;
- struct ww_acquire_ctx ticket;
};
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 1e00f6b99f94..8f5e07834fcc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev);
*/
extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
+extern void r300_gpu_init(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 5b69cc8011b4..8d64ba18572e 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -775,8 +775,10 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port,
if (!dig->pin || dig->pin->id != port)
continue;
*enabled = true;
+ mutex_lock(&connector->eld_mutex);
ret = drm_eld_size(connector->eld);
memcpy(buf, connector->eld, min(max_bytes, ret));
+ mutex_unlock(&connector->eld_mutex);
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a6700d7278bf..64b26bfeafc9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -182,11 +182,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
}
- p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].tv.num_shared = !r->write_domain;
-
- radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
- priority);
+ p->relocs[i].shared = !r->write_domain;
+ radeon_cs_buckets_add(&buckets, &p->relocs[i].list, priority);
}
radeon_cs_buckets_get_list(&buckets, &p->validated);
@@ -197,7 +194,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (need_mmap_lock)
mmap_read_lock(current->mm);
- r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
+ r = radeon_bo_list_validate(p->rdev, &p->exec, &p->validated, p->ring);
if (need_mmap_lock)
mmap_read_unlock(current->mm);
@@ -253,12 +250,11 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
struct radeon_bo_list *reloc;
int r;
- list_for_each_entry(reloc, &p->validated, tv.head) {
+ list_for_each_entry(reloc, &p->validated, list) {
struct dma_resv *resv;
resv = reloc->robj->tbo.base.resv;
- r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
- reloc->tv.num_shared);
+ r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, reloc->shared);
if (r)
return r;
}
@@ -276,6 +272,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
s32 priority = 0;
INIT_LIST_HEAD(&p->validated);
+ drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
if (!cs->num_chunks) {
return 0;
@@ -397,8 +394,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
static int cmp_size_smaller_first(void *priv, const struct list_head *a,
const struct list_head *b)
{
- struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
- struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
+ struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, list);
+ struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, list);
/* Sort A before B if A is smaller. */
if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
@@ -417,11 +414,13 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
-static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
if (!error) {
+ struct radeon_bo_list *reloc;
+
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
* This assures that the smallest buffers are added first
@@ -433,15 +432,17 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
* per frame under memory pressure.
*/
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
-
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- &parser->ib.fence->base);
- } else if (backoff) {
- ttm_eu_backoff_reservation(&parser->ticket,
- &parser->validated);
+ list_for_each_entry(reloc, &parser->validated, list) {
+ dma_resv_add_fence(reloc->robj->tbo.base.resv,
+ &parser->ib.fence->base,
+ reloc->shared ?
+ DMA_RESV_USAGE_READ :
+ DMA_RESV_USAGE_WRITE);
+ }
}
+ drm_exec_fini(&parser->exec);
+
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
struct radeon_bo *bo = parser->relocs[i].robj;
@@ -693,7 +694,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- radeon_cs_parser_fini(&parser, r, false);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
@@ -707,7 +708,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
if (r) {
- radeon_cs_parser_fini(&parser, r, false);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
@@ -724,7 +725,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
}
out:
- radeon_cs_parser_fini(&parser, r, true);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 5e958cc223f4..267f082bc430 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -37,7 +37,7 @@
#include <linux/mmu_notifier.h>
#include <linux/pci.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
@@ -603,7 +603,6 @@ static const struct drm_driver kms_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 02a65971d140..0f3dbffc492d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -43,7 +43,6 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20080528"
/* Interface history:
*
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index bf2d4b16dc2a..f86773f3db20 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -605,33 +605,40 @@ out:
static void radeon_gem_va_update_vm(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
- struct ttm_validate_buffer tv, *entry;
- struct radeon_bo_list *vm_bos;
- struct ww_acquire_ctx ticket;
+ struct radeon_bo_list *vm_bos, *entry;
struct list_head list;
+ struct drm_exec exec;
unsigned domain;
int r;
INIT_LIST_HEAD(&list);
- tv.bo = &bo_va->bo->tbo;
- tv.num_shared = 1;
- list_add(&tv.head, &list);
-
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r)
- goto error_free;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ list_for_each_entry(entry, &list, list) {
+ r = drm_exec_prepare_obj(&exec, &entry->robj->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_cleanup;
+ }
- list_for_each_entry(entry, &list, head) {
- domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
+ r = drm_exec_prepare_obj(&exec, &bo_va->bo->tbo.base, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_cleanup;
+ }
+
+ list_for_each_entry(entry, &list, list) {
+ domain = radeon_mem_type_to_domain(entry->robj->tbo.resource->mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (domain == RADEON_GEM_DOMAIN_CPU)
- goto error_unreserve;
+ goto error_cleanup;
}
mutex_lock(&bo_va->vm->mutex);
@@ -645,10 +652,8 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
-error_unreserve:
- ttm_eu_backoff_reservation(&ticket, &list);
-
-error_free:
+error_cleanup:
+ drm_exec_fini(&exec);
kvfree(vm_bos);
if (r && r != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7672404fdb29..a0fc0801abb0 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -464,23 +464,26 @@ static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
}
int radeon_bo_list_validate(struct radeon_device *rdev,
- struct ww_acquire_ctx *ticket,
+ struct drm_exec *exec,
struct list_head *head, int ring)
{
struct ttm_operation_ctx ctx = { true, false };
struct radeon_bo_list *lobj;
- struct list_head duplicates;
- int r;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
+ int r;
- INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
- if (unlikely(r != 0)) {
- return r;
+ drm_exec_until_all_locked(exec) {
+ list_for_each_entry(lobj, head, list) {
+ r = drm_exec_prepare_obj(exec, &lobj->robj->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(exec);
+ if (unlikely(r && r != -EALREADY))
+ return r;
+ }
}
- list_for_each_entry(lobj, head, tv.head) {
+ list_for_each_entry(lobj, head, list) {
struct radeon_bo *bo = lobj->robj;
if (!bo->tbo.pin_count) {
u32 domain = lobj->preferred_domains;
@@ -519,7 +522,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
domain = lobj->allowed_domains;
goto retry;
}
- ttm_eu_backoff_reservation(ticket, head);
return r;
}
}
@@ -527,11 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
lobj->tiling_flags = bo->tiling_flags;
}
- list_for_each_entry(lobj, &duplicates, tv.head) {
- lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
- lobj->tiling_flags = lobj->robj->tiling_flags;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 39cc87a59a9a..d7bbb52db546 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -152,7 +152,7 @@ extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern int radeon_bo_list_validate(struct radeon_device *rdev,
- struct ww_acquire_ctx *ticket,
+ struct drm_exec *exec,
struct list_head *head, int ring);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
u32 tiling_flags, u32 pitch);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index d1871af967d4..2355a78e1b69 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
int session_idx = -1;
bool destroyed = false, created = false, allocated = false;
- uint32_t tmp, handle = 0;
+ uint32_t tmp = 0, handle = 0;
uint32_t *size = &tmp;
int i, r = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index c38b4d5d6a14..21a5340aefdf 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -142,10 +142,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].robj = vm->page_directory;
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.num_shared = 1;
+ list[0].shared = true;
list[0].tiling_flags = 0;
- list_add(&list[0].tv.head, head);
+ list_add(&list[0].list, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
@@ -154,10 +153,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].robj = vm->page_tables[i].bo;
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.num_shared = 1;
+ list[idx].shared = true;
list[idx].tiling_flags = 0;
- list_add(&list[idx++].tv.head, head);
+ list_add(&list[idx++].list, head);
}
return list;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index d6c18fd740ec..13cd0a688a65 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
static void rs400_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: is this correct ? */
- r420_pipes_init(rdev);
+ /* Earlier code was calling r420_pipes_init and then
+ * rs400_mc_wait_for_idle(rdev). The problem is that
+ * at least on my Mobility Radeon Xpress 200M RC410 card
+ * that ends up in this code path ends up num_gb_pipes == 3
+ * while the card seems to have only one pipe. With the
+ * r420 pipe initialization method.
+ *
+ * Problems shown up as HyperZ glitches, see:
+ * https://bugs.freedesktop.org/show_bug.cgi?id=110897
+ *
+ * Delegating initialization to r300 code seems to work
+ * and results in proper pipe numbers. The rs400 cards
+ * are said to be not r400, but r300 kind of cards.
+ */
+ r300_gpu_init(rdev);
+
if (rs400_mc_wait_for_idle(rdev)) {
pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n",
RREG32(RADEON_MC_STATUS));
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
index f9ecc334c024..d948ff3594c4 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
@@ -18,8 +18,8 @@
#include <linux/slab.h>
#include <linux/wait.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -546,6 +546,23 @@ static const struct rcar_du_device_info rcar_du_r8a779g0_info = {
.dsi_clk_mask = BIT(1) | BIT(0),
};
+static const struct rcar_du_device_info rcar_du_r8a779h0_info = {
+ .gen = 4,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_NO_BLENDING,
+ .channels_mask = BIT(0),
+ .routes = {
+ /* R8A779H0 has one MIPI DSI output. */
+ [RCAR_DU_OUTPUT_DSI0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ },
+ .num_rpf = 5,
+ .dsi_clk_mask = BIT(0),
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
@@ -572,6 +589,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a779a0", .data = &rcar_du_r8a779a0_info },
{ .compatible = "renesas,du-r8a779g0", .data = &rcar_du_r8a779g0_info },
+ { .compatible = "renesas,du-r8a779h0", .data = &rcar_du_r8a779h0_info },
{ }
};
@@ -611,7 +629,6 @@ static const struct drm_driver rcar_du_driver = {
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
- .date = "20130110",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
index 2ccd2581f544..068c106e586c 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
@@ -107,10 +107,12 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
*/
rcrtc = rcdu->crtcs;
num_crtcs = rcdu->num_crtcs;
- } else if (rcdu->info->gen >= 3 && rgrp->num_crtcs > 1) {
+ } else if ((rcdu->info->gen == 3 && rgrp->num_crtcs > 1) ||
+ rcdu->info->gen == 4) {
/*
* On Gen3 dot clocks are setup through per-group registers,
* only available when the group has two channels.
+ * On Gen4 the registers are there for single channel too.
*/
rcrtc = &rcdu->crtcs[rgrp->index * 2];
num_crtcs = rgrp->num_crtcs;
@@ -185,11 +187,21 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1;
rcar_du_group_write(rgrp, DORCR, dorcr);
- /* Apply planes to CRTCs association. */
- mutex_lock(&rgrp->lock);
- rcar_du_group_write(rgrp, DPTSR, (rgrp->dptsr_planes << 16) |
- rgrp->dptsr_planes);
- mutex_unlock(&rgrp->lock);
+ /*
+ * DPTSR is used to select the source for the planes of a group. The
+ * first source is chosen by writing 0 to the respective bits, and this
+ * is always the default value of the register. In other words, writing
+ * DPTSR is only needed if the SoC supports choosing the second source.
+ *
+ * The SoCs documentations seems to confirm this, as the DPTSR register
+ * is not documented if only the first source exists on that SoC.
+ */
+ if (rgrp->channels_mask & BIT(1)) {
+ mutex_lock(&rgrp->lock);
+ rcar_du_group_write(rgrp, DPTSR, (rgrp->dptsr_planes << 16) |
+ rgrp->dptsr_planes);
+ mutex_unlock(&rgrp->lock);
+ }
}
/*
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 8180625d5866..3c0c18d5249a 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -587,7 +587,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
for (timeout = 10; timeout > 0; --timeout) {
if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
(rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
- (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
+ (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK_PHY))
break;
usleep_range(1000, 2000);
@@ -1081,6 +1081,8 @@ static const struct rcar_mipi_dsi_device_info v4h_data = {
static const struct of_device_id rcar_mipi_dsi_of_table[] = {
{ .compatible = "renesas,r8a779a0-dsi-csi2-tx", .data = &v3u_data },
{ .compatible = "renesas,r8a779g0-dsi-csi2-tx", .data = &v4h_data },
+ /* DSI in r8a779h0 is identical to r8a779g0 */
+ { .compatible = "renesas,r8a779h0-dsi-csi2-tx", .data = &v4h_data },
{ }
};
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
index f8114d11f2d1..a6b276f1d6ee 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
@@ -142,7 +142,6 @@
#define CLOCKSET1 0x101c
#define CLOCKSET1_LOCK_PHY (1 << 17)
-#define CLOCKSET1_LOCK (1 << 16)
#define CLOCKSET1_CLKSEL (1 << 8)
#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2)
#define CLOCKSET1_CLKINSEL_DIG (1 << 2)
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
index c4c1474d487e..6e7aac6219be 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
@@ -28,7 +28,6 @@
#include "rzg2l_du_vsp.h"
#define DU_MCR0 0x00
-#define DU_MCR0_DPI_OE BIT(0)
#define DU_MCR0_DI_EN BIT(8)
#define DU_DITR0 0x10
@@ -217,14 +216,9 @@ static void rzg2l_du_crtc_put(struct rzg2l_du_crtc *rcrtc)
static void rzg2l_du_start_stop(struct rzg2l_du_crtc *rcrtc, bool start)
{
- struct rzg2l_du_crtc_state *rstate = to_rzg2l_crtc_state(rcrtc->crtc.state);
struct rzg2l_du_device *rcdu = rcrtc->dev;
- u32 val = DU_MCR0_DI_EN;
- if (rstate->outputs & BIT(RZG2L_DU_OUTPUT_DPAD0))
- val |= DU_MCR0_DPI_OE;
-
- writel(start ? val : 0, rcdu->mmio + DU_MCR0);
+ writel(start ? DU_MCR0_DI_EN : 0, rcdu->mmio + DU_MCR0);
}
static void rzg2l_du_crtc_start(struct rzg2l_du_crtc *rcrtc)
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index b069efd8ffc3..cbd9b9841267 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -12,8 +12,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -84,7 +84,6 @@ static const struct drm_driver rzg2l_du_driver = {
.fops = &rzg2l_du_fops,
.name = "rzg2l-du",
.desc = "Renesas RZ/G2L Display Unit",
- .date = "20230410",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
index 339cbaaea0b5..564ab4cb3d37 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/of.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_panel.h>
@@ -24,6 +25,22 @@
static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = {
};
+static enum drm_mode_status
+rzg2l_du_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct rzg2l_du_encoder *renc = to_rzg2l_encoder(encoder);
+
+ if (renc->output == RZG2L_DU_OUTPUT_DPAD0 && mode->clock > 83500)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_encoder_helper_funcs rzg2l_du_encoder_helper_funcs = {
+ .mode_valid = rzg2l_du_encoder_mode_valid,
+};
+
int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
enum rzg2l_du_output output,
struct device_node *enc_node)
@@ -48,6 +65,7 @@ int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
return PTR_ERR(renc);
renc->output = output;
+ drm_encoder_helper_add(&renc->base, &rzg2l_du_encoder_helper_funcs);
/* Attach the bridge to the encoder. */
ret = drm_bridge_attach(&renc->base, bridge, NULL,
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index b99217b4e05d..90c6269ccd29 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -311,11 +311,11 @@ int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu)
dev->mode_config.helper_private = &rzg2l_du_mode_config_helper;
/*
- * The RZ DU uses the VSP1 for memory access, and is limited
- * to frame sizes of 1920x1080.
+ * The RZ DU was designed to support a frame size of 1920x1200 (landscape)
+ * or 1200x1920 (portrait).
*/
dev->mode_config.max_width = 1920;
- dev->mode_config.max_height = 1080;
+ dev->mode_config.max_height = 1920;
rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
index 76ee3e16077c..2f31822b2245 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
@@ -17,8 +17,8 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -107,7 +107,6 @@ static const struct drm_driver shmob_drm_driver = {
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
- .date = "20120424",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 3ac579615749..26c4410b2407 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -11,6 +11,7 @@ config DRM_ROCKCHIP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select DRM_DW_MIPI_DSI2 if ROCKCHIP_DW_MIPI_DSI2
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
@@ -82,6 +83,15 @@ config ROCKCHIP_DW_MIPI_DSI
enable MIPI DSI on RK3288 or RK3399 based SoC, you should
select this option.
+config ROCKCHIP_DW_MIPI_DSI2
+ bool "Rockchip specific extensions for Synopsys DW MIPI DSI2"
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the Synopsys DesignWare DSI2 driver. If you want to
+ enable MIPI DSI on RK3576 or RK3588 based SoC, you should
+ select this option.
+
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 3eab662a5a1d..2b867cebbc12 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -13,6 +13,7 @@ rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI2) += dw-mipi-dsi2-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 546d13f19f9b..0844175c37c5 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -2,7 +2,7 @@
/*
* Rockchip SoC DP (Display Port) interface driver.
*
- * Copyright (C) Fuzhou Rockchip Electronics Co., Ltd.
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Andy Yan <andy.yan@rock-chips.com>
* Yakir Yang <ykk@rock-chips.com>
* Jeff Chen <jeff.chen@rock-chips.com>
@@ -386,7 +386,7 @@ static int rockchip_dp_probe(struct platform_device *pdev)
return -ENODEV;
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV)
return ret;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index ff9d95e2c4d4..b17de83b988b 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
*/
@@ -885,7 +885,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.mute_stream = cdn_dp_audio_mute_stream,
.get_eld = cdn_dp_audio_get_eld,
.hook_plugged_cb = cdn_dp_audio_hook_plugged_cb,
- .no_capture_mute = 1,
};
static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
@@ -896,6 +895,7 @@ static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
.spdif = 1,
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
+ .no_capture_mute = 1,
};
dp->audio_pdev = platform_device_register_data(
@@ -947,9 +947,6 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
{
struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
event_work);
- struct drm_connector *connector = &dp->connector;
- enum drm_connector_status old_status;
-
int ret;
mutex_lock(&dp->lock);
@@ -1009,11 +1006,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
out:
mutex_unlock(&dp->lock);
-
- old_status = connector->status;
- connector->status = connector->funcs->detect(connector, false);
- if (old_status != connector->status)
- drm_kms_helper_hotplug_event(dp->drm_dev);
+ drm_connector_helper_hpd_irq_event(&dp->connector);
}
static int cdn_dp_pd_event(struct notifier_block *nb,
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 8e6e95d269da..17498f576ce7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Chris Zhong <zyw@rock-chips.com>
- * Copyright (C) 2016 ROCKCHIP, Inc.
+ * Copyright (C) Rockchip Electronics Co., Ltd.
*/
#ifndef _CDN_DP_CORE_H
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 33fb4d05c506..924fb1d3ece2 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
index c7780ae3272a..13ed8cbdbafa 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 1b64b6e39cc8..3398160ad75e 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Chris Zhong <zyw@rock-chips.com>
* Nickey Yang <nickey.yang@rock-chips.com>
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
new file mode 100644
index 000000000000..cdd490778756
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2024 Rockchip Electronics Co., Ltd.
+ * Author:
+ * Guochun Huang <hero.huang@rock-chips.com>
+ * Heiko Stuebner <heiko.stuebner@cherry.de>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+
+#include <drm/bridge/dw_mipi_dsi2.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <uapi/linux/videodev2.h>
+
+#include "rockchip_drm_drv.h"
+
+#define PSEC_PER_SEC 1000000000000LL
+
+struct dsigrf_reg {
+ u16 offset;
+ u16 lsb;
+ u16 msb;
+};
+
+enum grf_reg_fields {
+ TXREQCLKHS_EN,
+ GATING_EN,
+ IPI_SHUTDN,
+ IPI_COLORM,
+ IPI_COLOR_DEPTH,
+ IPI_FORMAT,
+ MAX_FIELDS,
+};
+
+#define IPI_DEPTH_5_6_5_BITS 0x02
+#define IPI_DEPTH_6_BITS 0x03
+#define IPI_DEPTH_8_BITS 0x05
+#define IPI_DEPTH_10_BITS 0x06
+
+struct rockchip_dw_dsi2_chip_data {
+ u32 reg;
+ const struct dsigrf_reg *grf_regs;
+ unsigned long long max_bit_rate_per_lane;
+};
+
+struct dw_mipi_dsi2_rockchip {
+ struct device *dev;
+ struct rockchip_encoder encoder;
+ struct regmap *regmap;
+
+ unsigned int lane_mbps; /* per lane */
+ u32 format;
+
+ struct regmap *grf_regmap;
+ struct phy *phy;
+ union phy_configure_opts phy_opts;
+
+ struct dw_mipi_dsi2 *dmd;
+ struct dw_mipi_dsi2_plat_data pdata;
+ const struct rockchip_dw_dsi2_chip_data *cdata;
+};
+
+static inline struct dw_mipi_dsi2_rockchip *to_dsi2(struct drm_encoder *encoder)
+{
+ struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
+
+ return container_of(rkencoder, struct dw_mipi_dsi2_rockchip, encoder);
+}
+
+static void grf_field_write(struct dw_mipi_dsi2_rockchip *dsi2, enum grf_reg_fields index,
+ unsigned int val)
+{
+ const struct dsigrf_reg *field = &dsi2->cdata->grf_regs[index];
+
+ if (!field)
+ return;
+
+ regmap_write(dsi2->grf_regmap, field->offset,
+ (val << field->lsb) | (GENMASK(field->msb, field->lsb) << 16));
+}
+
+static int dw_mipi_dsi2_phy_init(void *priv_data)
+{
+ return 0;
+}
+
+static void dw_mipi_dsi2_phy_power_on(void *priv_data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ int ret;
+
+ ret = phy_set_mode(dsi2->phy, PHY_MODE_MIPI_DPHY);
+ if (ret) {
+ dev_err(dsi2->dev, "Failed to set phy mode: %d\n", ret);
+ return;
+ }
+
+ phy_configure(dsi2->phy, &dsi2->phy_opts);
+ phy_power_on(dsi2->phy);
+}
+
+static void dw_mipi_dsi2_phy_power_off(void *priv_data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+
+ phy_power_off(dsi2->phy);
+}
+
+static int
+dw_mipi_dsi2_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ u64 max_lane_rate, target_phyclk;
+ unsigned int lane_rate_kbps;
+ int bpp;
+
+ max_lane_rate = dsi2->cdata->max_bit_rate_per_lane;
+
+ dsi2->format = format;
+ bpp = mipi_dsi_pixel_format_to_bpp(format);
+ if (bpp < 0) {
+ dev_err(dsi2->dev, "failed to get bpp for pixel format %d\n", format);
+ return bpp;
+ }
+
+ lane_rate_kbps = mode->clock * bpp / lanes;
+
+ /*
+ * Set BW a little larger only in video burst mode in
+ * consideration of the protocol overhead and HS mode
+ * switching to BLLP mode, take 1 / 0.9, since Mbps must
+ * big than bandwidth of RGB
+ */
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ lane_rate_kbps = (lane_rate_kbps * 10) / 9;
+
+ if (lane_rate_kbps > max_lane_rate) {
+ dev_err(dsi2->dev, "DPHY clock frequency is out of range\n");
+ return -ERANGE;
+ }
+
+ dsi2->lane_mbps = lane_rate_kbps / 1000;
+ *lane_mbps = dsi2->lane_mbps;
+
+ if (dsi2->phy) {
+ target_phyclk = DIV_ROUND_CLOSEST_ULL(lane_rate_kbps * lanes * 1000, bpp);
+ phy_mipi_dphy_get_default_config(target_phyclk, bpp, lanes,
+ &dsi2->phy_opts.mipi_dphy);
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_phy_get_iface(void *priv_data, struct dw_mipi_dsi2_phy_iface *iface)
+{
+ /* PPI width is fixed to 16 bits in DCPHY */
+ iface->ppi_width = 16;
+ iface->phy_type = DW_MIPI_DSI2_DPHY;
+}
+
+static int
+dw_mipi_dsi2_phy_get_timing(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi2_phy_timing *timing)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ struct phy_configure_opts_mipi_dphy *cfg = &dsi2->phy_opts.mipi_dphy;
+ unsigned long long tmp, ui;
+ unsigned long long hstx_clk;
+
+ hstx_clk = DIV_ROUND_CLOSEST_ULL(dsi2->lane_mbps * USEC_PER_SEC, 16);
+
+ ui = ALIGN(PSEC_PER_SEC, hstx_clk);
+ do_div(ui, hstx_clk);
+
+ /* PHY_LP2HS_TIME = (TLPX + THS-PREPARE + THS-ZERO) / Tphy_hstx_clk */
+ tmp = cfg->lpx + cfg->hs_prepare + cfg->hs_zero;
+ tmp = DIV_ROUND_CLOSEST_ULL(tmp << 16, ui);
+ timing->data_lp2hs = tmp;
+
+ /* PHY_HS2LP_TIME = (THS-TRAIL + THS-EXIT) / Tphy_hstx_clk */
+ tmp = cfg->hs_trail + cfg->hs_exit;
+ tmp = DIV_ROUND_CLOSEST_ULL(tmp << 16, ui);
+ timing->data_hs2lp = tmp;
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi2_phy_ops dw_mipi_dsi2_rockchip_phy_ops = {
+ .init = dw_mipi_dsi2_phy_init,
+ .power_on = dw_mipi_dsi2_phy_power_on,
+ .power_off = dw_mipi_dsi2_phy_power_off,
+ .get_interface = dw_mipi_dsi2_phy_get_iface,
+ .get_lane_mbps = dw_mipi_dsi2_get_lane_mbps,
+ .get_timing = dw_mipi_dsi2_phy_get_timing,
+};
+
+static void dw_mipi_dsi2_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = to_dsi2(encoder);
+ u32 color_depth;
+
+ switch (dsi2->format) {
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ color_depth = IPI_DEPTH_6_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ color_depth = IPI_DEPTH_5_6_5_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ color_depth = IPI_DEPTH_8_BITS;
+ break;
+ default:
+ /* Should've been caught by atomic_check */
+ WARN_ON(1);
+ return;
+ }
+
+ grf_field_write(dsi2, IPI_COLOR_DEPTH, color_depth);
+}
+
+static int
+dw_mipi_dsi2_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct dw_mipi_dsi2_rockchip *dsi2 = to_dsi2(encoder);
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_display_info *info = &connector->display_info;
+
+ switch (dsi2->format) {
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ s->output_mode = ROCKCHIP_OUT_MODE_P666;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ s->output_mode = ROCKCHIP_OUT_MODE_P565;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (info->num_bus_formats)
+ s->bus_format = info->bus_formats[0];
+ else
+ s->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ s->output_type = DRM_MODE_CONNECTOR_DSI;
+ s->bus_flags = info->bus_flags;
+ s->color_space = V4L2_COLORSPACE_DEFAULT;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+dw_mipi_dsi2_encoder_helper_funcs = {
+ .atomic_enable = dw_mipi_dsi2_encoder_atomic_enable,
+ .atomic_check = dw_mipi_dsi2_encoder_atomic_check,
+};
+
+static int rockchip_dsi2_drm_create_encoder(struct dw_mipi_dsi2_rockchip *dsi2,
+ struct drm_device *drm_dev)
+{
+ struct drm_encoder *encoder = &dsi2->encoder.encoder;
+ int ret;
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+ dsi2->dev->of_node);
+
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
+ if (ret) {
+ dev_err(dsi2->dev, "Failed to initialize encoder with drm\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &dw_mipi_dsi2_encoder_helper_funcs);
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_rockchip_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = rockchip_dsi2_drm_create_encoder(dsi2, drm_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to create drm encoder\n");
+
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dsi2->encoder,
+ dev->of_node, 0, 0);
+
+ ret = dw_mipi_dsi2_bind(dsi2->dmd, &dsi2->encoder.encoder);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to bind\n");
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_rockchip_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = dev_get_drvdata(dev);
+
+ dw_mipi_dsi2_unbind(dsi2->dmd);
+}
+
+static const struct component_ops dw_mipi_dsi2_rockchip_ops = {
+ .bind = dw_mipi_dsi2_rockchip_bind,
+ .unbind = dw_mipi_dsi2_rockchip_unbind,
+};
+
+static int dw_mipi_dsi2_rockchip_host_attach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ int ret;
+
+ ret = component_add(dsi2->dev, &dw_mipi_dsi2_rockchip_ops);
+ if (ret)
+ return dev_err_probe(dsi2->dev, ret, "Failed to register component\n");
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_rockchip_host_detach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+
+ component_del(dsi2->dev, &dw_mipi_dsi2_rockchip_ops);
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi2_host_ops dw_mipi_dsi2_rockchip_host_ops = {
+ .attach = dw_mipi_dsi2_rockchip_host_attach,
+ .detach = dw_mipi_dsi2_rockchip_host_detach,
+};
+
+static const struct regmap_config dw_mipi_dsi2_rockchip_regmap_config = {
+ .name = "dsi2-host",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static int dw_mipi_dsi2_rockchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct rockchip_dw_dsi2_chip_data *cdata =
+ of_device_get_match_data(dev);
+ struct dw_mipi_dsi2_rockchip *dsi2;
+ struct resource *res;
+ void __iomem *base;
+ int i;
+
+ dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
+ if (!dsi2)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "Unable to get dsi registers\n");
+
+ dsi2->regmap = devm_regmap_init_mmio(dev, base, &dw_mipi_dsi2_rockchip_regmap_config);
+ if (IS_ERR(dsi2->regmap))
+ return dev_err_probe(dev, PTR_ERR(dsi2->regmap), "failed to init register map\n");
+
+ i = 0;
+ while (cdata[i].reg) {
+ if (cdata[i].reg == res->start) {
+ dsi2->cdata = &cdata[i];
+ break;
+ }
+
+ i++;
+ }
+
+ if (!dsi2->cdata)
+ return dev_err_probe(dev, -EINVAL, "No dsi-config for %s node\n", np->name);
+
+ dsi2->grf_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(dsi2->grf_regmap))
+ return dev_err_probe(dsi2->dev, PTR_ERR(dsi2->grf_regmap), "Unable to get grf\n");
+
+ dsi2->phy = devm_phy_optional_get(dev, "dcphy");
+ if (IS_ERR(dsi2->phy))
+ return dev_err_probe(dev, PTR_ERR(dsi2->phy), "failed to get mipi phy\n");
+
+ dsi2->dev = dev;
+ dsi2->pdata.regmap = dsi2->regmap;
+ dsi2->pdata.max_data_lanes = 4;
+ dsi2->pdata.phy_ops = &dw_mipi_dsi2_rockchip_phy_ops;
+ dsi2->pdata.host_ops = &dw_mipi_dsi2_rockchip_host_ops;
+ dsi2->pdata.priv_data = dsi2;
+ platform_set_drvdata(pdev, dsi2);
+
+ dsi2->dmd = dw_mipi_dsi2_probe(pdev, &dsi2->pdata);
+ if (IS_ERR(dsi2->dmd))
+ return dev_err_probe(dev, PTR_ERR(dsi2->dmd), "Failed to probe dw_mipi_dsi2\n");
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_rockchip_remove(struct platform_device *pdev)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = platform_get_drvdata(pdev);
+
+ dw_mipi_dsi2_remove(dsi2->dmd);
+}
+
+static const struct dsigrf_reg rk3588_dsi0_grf_reg_fields[MAX_FIELDS] = {
+ [TXREQCLKHS_EN] = { 0x0000, 11, 11 },
+ [GATING_EN] = { 0x0000, 10, 10 },
+ [IPI_SHUTDN] = { 0x0000, 9, 9 },
+ [IPI_COLORM] = { 0x0000, 8, 8 },
+ [IPI_COLOR_DEPTH] = { 0x0000, 4, 7 },
+ [IPI_FORMAT] = { 0x0000, 0, 3 },
+};
+
+static const struct dsigrf_reg rk3588_dsi1_grf_reg_fields[MAX_FIELDS] = {
+ [TXREQCLKHS_EN] = { 0x0004, 11, 11 },
+ [GATING_EN] = { 0x0004, 10, 10 },
+ [IPI_SHUTDN] = { 0x0004, 9, 9 },
+ [IPI_COLORM] = { 0x0004, 8, 8 },
+ [IPI_COLOR_DEPTH] = { 0x0004, 4, 7 },
+ [IPI_FORMAT] = { 0x0004, 0, 3 },
+};
+
+static const struct rockchip_dw_dsi2_chip_data rk3588_chip_data[] = {
+ {
+ .reg = 0xfde20000,
+ .grf_regs = rk3588_dsi0_grf_reg_fields,
+ .max_bit_rate_per_lane = 4500000ULL,
+ },
+ {
+ .reg = 0xfde30000,
+ .grf_regs = rk3588_dsi1_grf_reg_fields,
+ .max_bit_rate_per_lane = 4500000ULL,
+ }
+};
+
+static const struct of_device_id dw_mipi_dsi2_rockchip_dt_ids[] = {
+ {
+ .compatible = "rockchip,rk3588-mipi-dsi2",
+ .data = &rk3588_chip_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_mipi_dsi2_rockchip_dt_ids);
+
+struct platform_driver dw_mipi_dsi2_rockchip_driver = {
+ .probe = dw_mipi_dsi2_rockchip_probe,
+ .remove = dw_mipi_dsi2_rockchip_remove,
+ .driver = {
+ .of_match_table = dw_mipi_dsi2_rockchip_dt_ids,
+ .name = "dw-mipi-dsi2",
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 42bda4ffbbbd..e7a6669c46b0 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2014, Rockchip Electronics Co., Ltd.
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index c8b362cc2b95..e498767a0a66 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -28,20 +28,26 @@
#define RK3588_GRF_SOC_CON2 0x0308
#define RK3588_HDMI0_HPD_INT_MSK BIT(13)
#define RK3588_HDMI0_HPD_INT_CLR BIT(12)
+#define RK3588_HDMI1_HPD_INT_MSK BIT(15)
+#define RK3588_HDMI1_HPD_INT_CLR BIT(14)
#define RK3588_GRF_SOC_CON7 0x031c
#define RK3588_SET_HPD_PATH_MASK GENMASK(13, 12)
#define RK3588_GRF_SOC_STATUS1 0x0384
#define RK3588_HDMI0_LEVEL_INT BIT(16)
+#define RK3588_HDMI1_LEVEL_INT BIT(24)
#define RK3588_GRF_VO1_CON3 0x000c
+#define RK3588_GRF_VO1_CON6 0x0018
#define RK3588_SCLIN_MASK BIT(9)
#define RK3588_SDAIN_MASK BIT(10)
#define RK3588_MODE_MASK BIT(11)
#define RK3588_I2S_SEL_MASK BIT(13)
#define RK3588_GRF_VO1_CON9 0x0024
#define RK3588_HDMI0_GRANT_SEL BIT(10)
+#define RK3588_HDMI1_GRANT_SEL BIT(12)
#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
#define HOTPLUG_DEBOUNCE_MS 150
+#define MAX_HDMI_PORT_NUM 2
struct rockchip_hdmi_qp {
struct device *dev;
@@ -53,6 +59,7 @@ struct rockchip_hdmi_qp {
struct phy *phy;
struct gpio_desc *enable_gpio;
struct delayed_work hpd_work;
+ int port_id;
};
static struct rockchip_hdmi_qp *to_rockchip_hdmi_qp(struct drm_encoder *encoder)
@@ -127,20 +134,24 @@ dw_hdmi_qp_rk3588_read_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
u32 val;
regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &val);
+ val &= hdmi->port_id ? RK3588_HDMI1_LEVEL_INT : RK3588_HDMI0_LEVEL_INT;
- return val & RK3588_HDMI0_LEVEL_INT ?
- connector_status_connected : connector_status_disconnected;
+ return val ? connector_status_connected : connector_status_disconnected;
}
static void dw_hdmi_qp_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
{
struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+ u32 val;
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
+ RK3588_HDMI1_HPD_INT_CLR | RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
+ RK3588_HDMI0_HPD_INT_CLR | RK3588_HDMI0_HPD_INT_MSK);
- regmap_write(hdmi->regmap,
- RK3588_GRF_SOC_CON2,
- HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR |
- RK3588_HDMI0_HPD_INT_MSK));
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
}
static const struct dw_hdmi_qp_phy_ops rk3588_hdmi_phy_ops = {
@@ -173,8 +184,12 @@ static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id)
regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat);
if (intr_stat) {
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK,
- RK3588_HDMI0_HPD_INT_MSK);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK,
+ RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK,
+ RK3588_HDMI0_HPD_INT_MSK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_WAKE_THREAD;
}
@@ -191,22 +206,44 @@ static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id)
if (!intr_stat)
return IRQ_NONE;
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
+ RK3588_HDMI1_HPD_INT_CLR);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
+ RK3588_HDMI0_HPD_INT_CLR);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
mod_delayed_work(system_wq, &hdmi->hpd_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
- val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
+ if (hdmi->port_id)
+ val |= HIWORD_UPDATE(0, RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_HANDLED;
}
+struct rockchip_hdmi_qp_cfg {
+ unsigned int num_ports;
+ unsigned int port_ids[MAX_HDMI_PORT_NUM];
+ const struct dw_hdmi_qp_phy_ops *phy_ops;
+};
+
+static const struct rockchip_hdmi_qp_cfg rk3588_hdmi_cfg = {
+ .num_ports = 2,
+ .port_ids = {
+ 0xfde80000,
+ 0xfdea0000,
+ },
+ .phy_ops = &rk3588_hdmi_phy_ops,
+};
+
static const struct of_device_id dw_hdmi_qp_rockchip_dt_ids[] = {
{ .compatible = "rockchip,rk3588-dw-hdmi-qp",
- .data = &rk3588_hdmi_phy_ops },
+ .data = &rk3588_hdmi_cfg },
{},
};
MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids);
@@ -214,17 +251,15 @@ MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids);
static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
void *data)
{
- static const char * const clk_names[] = {
- "pclk", "earc", "aud", "hdp", "hclk_vo1",
- "ref" /* keep "ref" last */
- };
struct platform_device *pdev = to_platform_device(dev);
+ const struct rockchip_hdmi_qp_cfg *cfg;
struct dw_hdmi_qp_plat_data plat_data;
struct drm_device *drm = data;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct rockchip_hdmi_qp *hdmi;
- struct clk *clk;
+ struct resource *res;
+ struct clk_bulk_data *clks;
int ret, irq, i;
u32 val;
@@ -235,12 +270,31 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
if (!hdmi)
return -ENOMEM;
- plat_data.phy_ops = of_device_get_match_data(dev);
- if (!plat_data.phy_ops)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
return -ENODEV;
- plat_data.phy_data = hdmi;
hdmi->dev = &pdev->dev;
+ hdmi->port_id = -ENODEV;
+
+ /* Identify port ID by matching base IO address */
+ for (i = 0; i < cfg->num_ports; i++) {
+ if (res->start == cfg->port_ids[i]) {
+ hdmi->port_id = i;
+ break;
+ }
+ }
+ if (hdmi->port_id < 0) {
+ drm_err(hdmi, "Failed to match HDMI port ID\n");
+ return hdmi->port_id;
+ }
+
+ plat_data.phy_ops = cfg->phy_ops;
+ plat_data.phy_data = hdmi;
encoder = &hdmi->encoder.encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -270,18 +324,22 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->vo_regmap);
}
- for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
- clk = devm_clk_get_enabled(hdmi->dev, clk_names[i]);
+ ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks);
+ if (ret < 0) {
+ drm_err(hdmi, "Failed to get clocks: %d\n", ret);
+ return ret;
+ }
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "Failed to get %s clock: %d\n",
- clk_names[i], ret);
- return ret;
+ for (i = 0; i < ret; i++) {
+ if (!strcmp(clks[i].id, "ref")) {
+ hdmi->ref_clk = clks[1].clk;
+ break;
}
}
- hdmi->ref_clk = clk;
+ if (!hdmi->ref_clk) {
+ drm_err(hdmi, "Missing ref clock\n");
+ return -EINVAL;
+ }
hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable",
GPIOD_OUT_HIGH);
@@ -303,17 +361,26 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
- regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val);
+ regmap_write(hdmi->vo_regmap,
+ hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
+ val);
val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
RK3588_SET_HPD_PATH_MASK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
- RK3588_HDMI0_GRANT_SEL);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
+ RK3588_HDMI1_GRANT_SEL);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
+ RK3588_HDMI0_GRANT_SEL);
regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
INIT_DELAYED_WORK(&hdmi->hpd_work, dw_hdmi_qp_rk3588_hpd_work);
@@ -391,14 +458,20 @@ static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
- regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val);
+ regmap_write(hdmi->vo_regmap,
+ hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
+ val);
val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
RK3588_SET_HPD_PATH_MASK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
- RK3588_HDMI0_GRANT_SEL);
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
+ RK3588_HDMI1_GRANT_SEL);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
+ RK3588_HDMI0_GRANT_SEL);
regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
dw_hdmi_qp_resume(dev, hdmi->hdmi);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index b58e2a29294b..898d90155057 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
* Yakir Yang <ykk@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
index a7edf3559e60..8b7ef3fac485 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.h
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
* Yakir Yang <ykk@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index b0fc8ace2e41..403336397214 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.h b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
index 39a31c62a428..c3598ba7428c 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.h
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 32d8394c4c49..439edc165ff6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*
* based on exynos_drm_drv.c
@@ -17,7 +17,7 @@
#include <linux/console.h>
#include <linux/iommu.h>
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -39,7 +39,6 @@
#define DRIVER_NAME "rockchip"
#define DRIVER_DESC "RockChip Soc DRM"
-#define DRIVER_DATE "20140818"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -235,7 +234,6 @@ static const struct drm_driver rockchip_drm_driver = {
.fops = &rockchip_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -488,8 +486,7 @@ static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- if (drm)
- drm_atomic_helper_shutdown(drm);
+ drm_atomic_helper_shutdown(drm);
}
static const struct of_device_id rockchip_drm_dt_ids[] = {
@@ -536,6 +533,8 @@ static int __init rockchip_drm_init(void)
CONFIG_ROCKCHIP_DW_HDMI_QP);
ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
CONFIG_ROCKCHIP_DW_MIPI_DSI);
+ ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi2_rockchip_driver,
+ CONFIG_ROCKCHIP_DW_MIPI_DSI2);
ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver,
CONFIG_ROCKCHIP_RK3066_HDMI);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 24b4ce5ceaf1..c183e82a42a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*
* based on exynos_drm_drv.h
@@ -90,6 +90,7 @@ extern struct platform_driver cdn_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_rockchip_driver;
+extern struct platform_driver dw_mipi_dsi2_rockchip_driver;
extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
extern struct platform_driver rockchip_lvds_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index cfe8b793d344..dcc1f07632c3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
index bae4e079dfb1..5179026b12d6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 93ed841f5dce..6330b883efc3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 72f59ac6d258..cdeae36b91a1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 69900138295b..57747f1cff26 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 0cf512cc1614..f04c9731ae7b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 9873172e3fd3..17a98845fd31 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -24,16 +24,17 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <uapi/linux/videodev2.h>
#include <dt-bindings/soc/rockchip,vop2.h>
-#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_vop2.h"
#include "rockchip_rgb.h"
@@ -186,6 +187,7 @@ struct vop2 {
*/
u32 registered_num_wins;
+ struct resource *res;
void __iomem *regs;
struct regmap *map;
@@ -237,6 +239,37 @@ struct vop2 {
#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0)
+/*
+ * bus-format types.
+ */
+struct drm_bus_format_enum_list {
+ int type;
+ const char *name;
+};
+
+static const struct drm_bus_format_enum_list drm_bus_format_enum_list[] = {
+ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+ { MEDIA_BUS_FMT_RGB565_1X16, "RGB565_1X16" },
+ { MEDIA_BUS_FMT_RGB666_1X18, "RGB666_1X18" },
+ { MEDIA_BUS_FMT_RGB666_1X24_CPADHI, "RGB666_1X24_CPADHI" },
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, "RGB666_1X7X3_SPWG" },
+ { MEDIA_BUS_FMT_YUV8_1X24, "YUV8_1X24" },
+ { MEDIA_BUS_FMT_UYYVYY8_0_5X24, "UYYVYY8_0_5X24" },
+ { MEDIA_BUS_FMT_YUV10_1X30, "YUV10_1X30" },
+ { MEDIA_BUS_FMT_UYYVYY10_0_5X30, "UYYVYY10_0_5X30" },
+ { MEDIA_BUS_FMT_RGB888_3X8, "RGB888_3X8" },
+ { MEDIA_BUS_FMT_RGB888_1X24, "RGB888_1X24" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, "RGB888_1X7X4_SPWG" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, "RGB888_1X7X4_JEIDA" },
+ { MEDIA_BUS_FMT_UYVY8_2X8, "UYVY8_2X8" },
+ { MEDIA_BUS_FMT_YUYV8_1X16, "YUYV8_1X16" },
+ { MEDIA_BUS_FMT_UYVY8_1X16, "UYVY8_1X16" },
+ { MEDIA_BUS_FMT_RGB101010_1X30, "RGB101010_1X30" },
+ { MEDIA_BUS_FMT_YUYV10_1X20, "YUYV10_1X20" },
+};
+
+static DRM_ENUM_NAME_FN(drm_get_bus_format_name, drm_bus_format_enum_list)
+
static const struct regmap_config vop2_regmap_config;
static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
@@ -278,6 +311,15 @@ static u32 vop2_readl(struct vop2 *vop2, u32 offset)
return val;
}
+static u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset)
+{
+ u32 val;
+
+ regmap_read(vp->vop2->map, vp->data->offset + offset, &val);
+
+ return val;
+}
+
static void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v)
{
regmap_field_write(win->reg[reg], v);
@@ -550,6 +592,25 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
if (modifier == DRM_FORMAT_MOD_INVALID)
return false;
+ if (vop2->data->soc_id == 3568 || vop2->data->soc_id == 3566) {
+ if (vop2_cluster_window(win)) {
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(vop2->drm,
+ "Cluster window only supports format with afbc\n");
+ return false;
+ }
+ }
+ }
+
+ if (format == DRM_FORMAT_XRGB2101010 || format == DRM_FORMAT_XBGR2101010) {
+ if (vop2->data->soc_id == 3588) {
+ if (!rockchip_afbc(plane, modifier)) {
+ drm_dbg_kms(vop2->drm, "Only support 32 bpp format with afbc\n");
+ return false;
+ }
+ }
+ }
+
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
@@ -998,6 +1059,67 @@ static void vop2_disable(struct vop2 *vop2)
clk_disable_unprepare(vop2->hclk);
}
+static bool vop2_vp_dsp_lut_is_enabled(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ return dsp_ctrl & RK3568_VP_DSP_CTRL__DSP_LUT_EN;
+}
+
+static void vop2_vp_dsp_lut_disable(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ dsp_ctrl &= ~RK3568_VP_DSP_CTRL__DSP_LUT_EN;
+ vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+}
+
+static bool vop2_vp_dsp_lut_poll_disabled(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl;
+ int ret = readx_poll_timeout(vop2_vp_dsp_lut_is_enabled, vp, dsp_ctrl,
+ !dsp_ctrl, 5, 30 * 1000);
+ if (ret) {
+ drm_err(vp->vop2->drm, "display LUT RAM enable timeout!\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void vop2_vp_dsp_lut_enable(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_LUT_EN;
+ vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+}
+
+static void vop2_vp_dsp_lut_update_enable(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ dsp_ctrl |= RK3588_VP_DSP_CTRL__GAMMA_UPDATE_EN;
+ vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+}
+
+static inline bool vop2_supports_seamless_gamma_lut_update(struct vop2 *vop2)
+{
+ return (vop2->data->soc_id != 3566 && vop2->data->soc_id != 3568);
+}
+
+static bool vop2_gamma_lut_in_use(struct vop2 *vop2, struct vop2_video_port *vp)
+{
+ const int nr_vps = vop2->data->nr_vps;
+ int gamma_en_vp_id;
+
+ for (gamma_en_vp_id = 0; gamma_en_vp_id < nr_vps; gamma_en_vp_id++)
+ if (vop2_vp_dsp_lut_is_enabled(&vop2->vps[gamma_en_vp_id]))
+ break;
+
+ return gamma_en_vp_id != nr_vps && gamma_en_vp_id != vp->id;
+}
+
static void vop2_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1271,8 +1393,9 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_w = drm_rect_width(dest);
if (dest->x1 + dsp_w > adjusted_mode->hdisplay) {
- drm_err(vop2->drm, "vp%d %s dest->x1[%d] + dsp_w[%d] exceed mode hdisplay[%d]\n",
- vp->id, win->data->name, dest->x1, dsp_w, adjusted_mode->hdisplay);
+ drm_dbg_kms(vop2->drm,
+ "vp%d %s dest->x1[%d] + dsp_w[%d] exceed mode hdisplay[%d]\n",
+ vp->id, win->data->name, dest->x1, dsp_w, adjusted_mode->hdisplay);
dsp_w = adjusted_mode->hdisplay - dest->x1;
if (dsp_w < 4)
dsp_w = 4;
@@ -1282,8 +1405,9 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_h = drm_rect_height(dest);
if (dest->y1 + dsp_h > adjusted_mode->vdisplay) {
- drm_err(vop2->drm, "vp%d %s dest->y1[%d] + dsp_h[%d] exceed mode vdisplay[%d]\n",
- vp->id, win->data->name, dest->y1, dsp_h, adjusted_mode->vdisplay);
+ drm_dbg_kms(vop2->drm,
+ "vp%d %s dest->y1[%d] + dsp_h[%d] exceed mode vdisplay[%d]\n",
+ vp->id, win->data->name, dest->y1, dsp_h, adjusted_mode->vdisplay);
dsp_h = adjusted_mode->vdisplay - dest->y1;
if (dsp_h < 4)
dsp_h = 4;
@@ -1296,15 +1420,15 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
*/
if (!(win->data->feature & WIN_FEATURE_AFBDC)) {
if (actual_w > dsp_w && (actual_w & 0xf) == 1) {
- drm_err(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n",
- vp->id, win->data->name, actual_w);
+ drm_dbg_kms(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n",
+ vp->id, win->data->name, actual_w);
actual_w -= 1;
}
}
if (afbc_en && actual_w % 4) {
- drm_err(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n",
- vp->id, win->data->name, actual_w);
+ drm_dbg_kms(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n",
+ vp->id, win->data->name, actual_w);
actual_w = ALIGN_DOWN(actual_w, 4);
}
@@ -1320,20 +1444,28 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
+ if (vop2->data->soc_id > 3568) {
+ vop2_win_write(win, VOP2_WIN_AXI_BUS_ID, win->data->axi_bus_id);
+ vop2_win_write(win, VOP2_WIN_AXI_YRGB_R_ID, win->data->axi_yrgb_r_id);
+ vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id);
+ }
+
if (vop2_cluster_window(win))
vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en);
if (afbc_en) {
- u32 stride;
+ u32 stride, block_w;
+
+ /* the afbc superblock is 16 x 16 or 32 x 8 */
+ block_w = fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 ? 32 : 16;
- /* the afbc superblock is 16 x 16 */
afbc_format = vop2_convert_afbc_format(fb->format->format);
/* Enable color transform for YTR */
if (fb->modifier & AFBC_FORMAT_MOD_YTR)
afbc_format |= (1 << 4);
- afbc_tile_num = ALIGN(actual_w, 16) >> 4;
+ afbc_tile_num = ALIGN(actual_w, block_w) / block_w;
/*
* AFBC pic_vir_width is count by pixel, this is different
@@ -1341,8 +1473,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
*/
stride = (fb->pitches[0] << 3) / bpp;
if ((stride & 0x3f) && (xmirror || rotate_90 || rotate_270))
- drm_err(vop2->drm, "vp%d %s stride[%d] not 64 pixel aligned\n",
- vp->id, win->data->name, stride);
+ drm_dbg_kms(vop2->drm, "vp%d %s stride[%d] not 64 pixel aligned\n",
+ vp->id, win->data->name, stride);
+
+ /* It's for head stride, each head size is 16 byte */
+ stride = ALIGN(stride, block_w) / block_w * 16;
uv_swap = vop2_afbc_uv_swap(fb->format->format);
/*
@@ -1374,7 +1509,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
else
vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 1);
- vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
+ if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
+ vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 1);
+ else
+ vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
+
transform_offset = vop2_afbc_transform_offset(pstate, half_block_en);
vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info);
@@ -1482,6 +1621,77 @@ static bool vop2_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+static void vop2_crtc_write_gamma_lut(struct vop2 *vop2, struct drm_crtc *crtc)
+{
+ const struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ const struct vop2_video_port_data *vp_data = &vop2->data->vp[vp->id];
+ struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+ unsigned int i, bpc = ilog2(vp_data->gamma_lut_len);
+ u32 word;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ word = (drm_color_lut_extract(lut[i].blue, bpc) << (2 * bpc)) |
+ (drm_color_lut_extract(lut[i].green, bpc) << bpc) |
+ drm_color_lut_extract(lut[i].red, bpc);
+
+ writel(word, vop2->lut_regs + i * 4);
+ }
+}
+
+static void vop2_crtc_atomic_set_gamma_seamless(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc)
+{
+ vop2_writel(vop2, RK3568_LUT_PORT_SEL,
+ FIELD_PREP(RK3588_LUT_PORT_SEL__GAMMA_AHB_WRITE_SEL, vp->id));
+ vop2_vp_dsp_lut_enable(vp);
+ vop2_crtc_write_gamma_lut(vop2, crtc);
+ vop2_vp_dsp_lut_update_enable(vp);
+}
+
+static void vop2_crtc_atomic_set_gamma_rk356x(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc)
+{
+ vop2_vp_dsp_lut_disable(vp);
+ vop2_cfg_done(vp);
+ if (!vop2_vp_dsp_lut_poll_disabled(vp))
+ return;
+
+ vop2_writel(vop2, RK3568_LUT_PORT_SEL, vp->id);
+ vop2_crtc_write_gamma_lut(vop2, crtc);
+ vop2_vp_dsp_lut_enable(vp);
+}
+
+static void vop2_crtc_atomic_try_set_gamma(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ if (!vop2->lut_regs)
+ return;
+
+ if (!crtc_state->gamma_lut) {
+ vop2_vp_dsp_lut_disable(vp);
+ return;
+ }
+
+ if (vop2_supports_seamless_gamma_lut_update(vop2))
+ vop2_crtc_atomic_set_gamma_seamless(vop2, vp, crtc);
+ else
+ vop2_crtc_atomic_set_gamma_rk356x(vop2, vp, crtc);
+}
+
+static inline void vop2_crtc_atomic_try_set_gamma_locked(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ vop2_lock(vop2);
+ vop2_crtc_atomic_try_set_gamma(vop2, vp, crtc, crtc_state);
+ vop2_unlock(vop2);
+}
+
static void vop2_dither_setup(struct drm_crtc *crtc, u32 *dsp_ctrl)
{
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
@@ -1721,9 +1931,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
else
dclk_out_rate = v_pixclk >> 2;
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
if (!dclk_rate) {
- drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld KHZ\n",
+ drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n",
dclk_out_rate);
return 0;
}
@@ -1738,9 +1948,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
* dclk_rate = N * dclk_core_rate N = (1,2,4 ),
* we get a little factor here
*/
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
if (!dclk_rate) {
- drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld KHZ\n",
+ drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n",
dclk_out_rate);
return 0;
}
@@ -2057,11 +2267,40 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+ vop2_crtc_atomic_try_set_gamma(vop2, vp, crtc, crtc_state);
+
drm_crtc_vblank_on(crtc);
vop2_unlock(vop2);
}
+static int vop2_crtc_atomic_check_gamma(struct vop2_video_port *vp,
+ struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct vop2 *vop2 = vp->vop2;
+ unsigned int len;
+
+ if (!vp->vop2->lut_regs || !crtc_state->color_mgmt_changed ||
+ !crtc_state->gamma_lut)
+ return 0;
+
+ len = drm_color_lut_size(crtc_state->gamma_lut);
+ if (len != crtc->gamma_size) {
+ drm_dbg(vop2->drm, "Invalid LUT size; got %d, expected %d\n",
+ len, crtc->gamma_size);
+ return -EINVAL;
+ }
+
+ if (!vop2_supports_seamless_gamma_lut_update(vop2) && vop2_gamma_lut_in_use(vop2, vp)) {
+ drm_info(vop2->drm, "Gamma LUT can be enabled for only one CRTC at a time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vop2_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -2069,6 +2308,11 @@ static int vop2_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane *plane;
int nplanes = 0;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ ret = vop2_crtc_atomic_check_gamma(vp, crtc, state, crtc_state);
+ if (ret)
+ return ret;
drm_atomic_crtc_state_for_each_plane(plane, crtc_state)
nplanes++;
@@ -2159,7 +2403,6 @@ static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
{
- u32 offset = (main_win->data->phys_id * 0x10);
struct vop2_alpha_config alpha_config;
struct vop2_alpha alpha;
struct drm_plane_state *bottom_win_pstate;
@@ -2167,6 +2410,7 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
u16 src_glb_alpha_val, dst_glb_alpha_val;
bool premulti_en = false;
bool swap = false;
+ u32 offset = 0;
/* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
bottom_win_pstate = main_win->base.state;
@@ -2185,6 +2429,22 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
vop2_parse_alpha(&alpha_config, &alpha);
alpha.src_color_ctrl.bits.src_dst_swap = swap;
+
+ switch (main_win->data->phys_id) {
+ case ROCKCHIP_VOP2_CLUSTER0:
+ offset = 0x0;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER1:
+ offset = 0x10;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER2:
+ offset = 0x20;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER3:
+ offset = 0x30;
+ break;
+ }
+
vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset,
alpha.src_color_ctrl.val);
vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset,
@@ -2232,6 +2492,12 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
struct vop2_win *win = to_vop2_win(plane);
int zpos = plane->state->normalized_zpos;
+ /*
+ * Need to configure alpha from second layer.
+ */
+ if (zpos == 0)
+ continue;
+
if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
premulti_en = 1;
else
@@ -2308,7 +2574,10 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
struct drm_plane *plane;
u32 layer_sel = 0;
u32 port_sel;
- unsigned int nlayer, ofs;
+ u8 layer_id;
+ u8 old_layer_id;
+ u8 layer_sel_id;
+ unsigned int ofs;
u32 ovl_ctrl;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
@@ -2352,9 +2621,30 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
for (i = 0; i < vp->id; i++)
ofs += vop2->vps[i].nlayers;
- nlayer = 0;
drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
struct vop2_win *win = to_vop2_win(plane);
+ struct vop2_win *old_win;
+
+ layer_id = (u8)(plane->state->normalized_zpos + ofs);
+
+ /*
+ * Find the layer this win bind in old state.
+ */
+ for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) {
+ layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf;
+ if (layer_sel_id == win->data->layer_sel_id)
+ break;
+ }
+
+ /*
+ * Find the win bind to this layer in old state
+ */
+ for (i = 0; i < vop2->data->win_size; i++) {
+ old_win = &vop2->win[i];
+ layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf;
+ if (layer_sel_id == old_win->data->layer_sel_id)
+ break;
+ }
switch (win->data->phys_id) {
case ROCKCHIP_VOP2_CLUSTER0:
@@ -2399,17 +2689,14 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
break;
}
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
- 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
- win->data->layer_sel_id);
- nlayer++;
- }
-
- /* configure unused layers to 0x5 (reserved) */
- for (; nlayer < vp->nlayers; nlayer++) {
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 5);
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id);
+ /*
+ * When we bind a window from layerM to layerN, we also need to move the old
+ * window on layerN to layerM to avoid one window selected by two or more layers.
+ */
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id);
}
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
@@ -2444,9 +2731,11 @@ static void vop2_setup_dly_for_windows(struct vop2 *vop2)
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
break;
case ROCKCHIP_VOP2_SMART0:
+ case ROCKCHIP_VOP2_ESMART2:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
break;
case ROCKCHIP_VOP2_SMART1:
+ case ROCKCHIP_VOP2_ESMART3:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
break;
}
@@ -2487,7 +2776,13 @@ static void vop2_crtc_atomic_begin(struct drm_crtc *crtc,
static void vop2_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct vop2 *vop2 = vp->vop2;
+
+ /* In case of modeset, gamma lut update already happened in atomic enable */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state) && crtc_state->color_mgmt_changed)
+ vop2_crtc_atomic_try_set_gamma_locked(vop2, vp, crtc, crtc_state);
vop2_post_config(crtc);
@@ -2513,6 +2808,228 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
.atomic_disable = vop2_crtc_atomic_disable,
};
+static void vop2_dump_connector_on_crtc(struct drm_crtc *crtc, struct seq_file *s)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ drm_connector_list_iter_begin(crtc->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (crtc->state->connector_mask & drm_connector_mask(connector))
+ seq_printf(s, " Connector: %s\n", connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static int vop2_plane_state_dump(struct seq_file *s, struct drm_plane *plane)
+{
+ struct vop2_win *win = to_vop2_win(plane);
+ struct drm_plane_state *pstate = plane->state;
+ struct drm_rect *src, *dst;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rk_obj;
+ bool xmirror;
+ bool ymirror;
+ bool rotate_270;
+ bool rotate_90;
+ dma_addr_t fb_addr;
+ int i;
+
+ seq_printf(s, " %s: %s\n", win->data->name, !pstate ?
+ "DISABLED" : pstate->crtc ? "ACTIVE" : "DISABLED");
+
+ if (!pstate || !pstate->fb)
+ return 0;
+
+ fb = pstate->fb;
+ src = &pstate->src;
+ dst = &pstate->dst;
+ xmirror = pstate->rotation & DRM_MODE_REFLECT_X ? true : false;
+ ymirror = pstate->rotation & DRM_MODE_REFLECT_Y ? true : false;
+ rotate_270 = pstate->rotation & DRM_MODE_ROTATE_270;
+ rotate_90 = pstate->rotation & DRM_MODE_ROTATE_90;
+
+ seq_printf(s, "\twin_id: %d\n", win->win_id);
+
+ seq_printf(s, "\tformat: %p4cc%s glb_alpha[0x%x]\n",
+ &fb->format->format,
+ drm_is_afbc(fb->modifier) ? "[AFBC]" : "",
+ pstate->alpha >> 8);
+ seq_printf(s, "\trotate: xmirror: %d ymirror: %d rotate_90: %d rotate_270: %d\n",
+ xmirror, ymirror, rotate_90, rotate_270);
+ seq_printf(s, "\tzpos: %d\n", pstate->normalized_zpos);
+ seq_printf(s, "\tsrc: pos[%d, %d] rect[%d x %d]\n", src->x1 >> 16,
+ src->y1 >> 16, drm_rect_width(src) >> 16,
+ drm_rect_height(src) >> 16);
+ seq_printf(s, "\tdst: pos[%d, %d] rect[%d x %d]\n", dst->x1, dst->y1,
+ drm_rect_width(dst), drm_rect_height(dst));
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ obj = fb->obj[i];
+ rk_obj = to_rockchip_obj(obj);
+ fb_addr = rk_obj->dma_addr + fb->offsets[i];
+
+ seq_printf(s, "\tbuf[%d]: addr: %pad pitch: %d offset: %d\n",
+ i, &fb_addr, fb->pitches[i], fb->offsets[i]);
+ }
+
+ return 0;
+}
+
+static int vop2_crtc_state_dump(struct drm_crtc *crtc, struct seq_file *s)
+{
+ struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct drm_crtc_state *cstate = crtc->state;
+ struct rockchip_crtc_state *vcstate;
+ struct drm_display_mode *mode;
+ struct drm_plane *plane;
+ bool interlaced;
+
+ seq_printf(s, "Video Port%d: %s\n", vp->id, !cstate ?
+ "DISABLED" : cstate->active ? "ACTIVE" : "DISABLED");
+
+ if (!cstate || !cstate->active)
+ return 0;
+
+ mode = &crtc->state->adjusted_mode;
+ vcstate = to_rockchip_crtc_state(cstate);
+ interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ vop2_dump_connector_on_crtc(crtc, s);
+ seq_printf(s, "\tbus_format[%x]: %s\n", vcstate->bus_format,
+ drm_get_bus_format_name(vcstate->bus_format));
+ seq_printf(s, "\toutput_mode[%x]", vcstate->output_mode);
+ seq_printf(s, " color_space[%d]\n", vcstate->color_space);
+ seq_printf(s, " Display mode: %dx%d%s%d\n",
+ mode->hdisplay, mode->vdisplay, interlaced ? "i" : "p",
+ drm_mode_vrefresh(mode));
+ seq_printf(s, "\tclk[%d] real_clk[%d] type[%x] flag[%x]\n",
+ mode->clock, mode->crtc_clock, mode->type, mode->flags);
+ seq_printf(s, "\tH: %d %d %d %d\n", mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal);
+ seq_printf(s, "\tV: %d %d %d %d\n", mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ vop2_plane_state_dump(s, plane);
+ }
+
+ return 0;
+}
+
+static int vop2_summary_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *drm_dev = minor->dev;
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock_all(drm_dev);
+ drm_for_each_crtc(crtc, drm_dev) {
+ vop2_crtc_state_dump(crtc, s);
+ }
+ drm_modeset_unlock_all(drm_dev);
+
+ return 0;
+}
+
+static void vop2_regs_print(struct vop2 *vop2, struct seq_file *s,
+ const struct vop2_regs_dump *dump, bool active_only)
+{
+ resource_size_t start;
+ u32 val;
+ int i;
+
+ if (dump->en_mask && active_only) {
+ val = vop2_readl(vop2, dump->base + dump->en_reg);
+ if ((val & dump->en_mask) != dump->en_val)
+ return;
+ }
+
+ seq_printf(s, "\n%s:\n", dump->name);
+
+ start = vop2->res->start + dump->base;
+ for (i = 0; i < dump->size >> 2; i += 4) {
+ seq_printf(s, "%08x: %08x %08x %08x %08x\n", (u32)start + i * 4,
+ vop2_readl(vop2, dump->base + (4 * i)),
+ vop2_readl(vop2, dump->base + (4 * (i + 1))),
+ vop2_readl(vop2, dump->base + (4 * (i + 2))),
+ vop2_readl(vop2, dump->base + (4 * (i + 3))));
+ }
+}
+
+static void __vop2_regs_dump(struct seq_file *s, bool active_only)
+{
+ struct drm_info_node *node = s->private;
+ struct vop2 *vop2 = node->info_ent->data;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *drm_dev = minor->dev;
+ const struct vop2_regs_dump *dump;
+ unsigned int i;
+
+ drm_modeset_lock_all(drm_dev);
+
+ regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register);
+
+ if (vop2->enable_count) {
+ for (i = 0; i < vop2->data->regs_dump_size; i++) {
+ dump = &vop2->data->regs_dump[i];
+ vop2_regs_print(vop2, s, dump, active_only);
+ }
+ } else {
+ seq_puts(s, "VOP disabled\n");
+ }
+ drm_modeset_unlock_all(drm_dev);
+}
+
+static int vop2_regs_show(struct seq_file *s, void *arg)
+{
+ __vop2_regs_dump(s, false);
+
+ return 0;
+}
+
+static int vop2_active_regs_show(struct seq_file *s, void *data)
+{
+ __vop2_regs_dump(s, true);
+
+ return 0;
+}
+
+static struct drm_info_list vop2_debugfs_list[] = {
+ { "summary", vop2_summary_show, 0, NULL },
+ { "active_regs", vop2_active_regs_show, 0, NULL },
+ { "regs", vop2_regs_show, 0, NULL },
+};
+
+static void vop2_debugfs_init(struct vop2 *vop2, struct drm_minor *minor)
+{
+ struct dentry *root;
+ unsigned int i;
+
+ root = debugfs_create_dir("vop2", minor->debugfs_root);
+ if (!IS_ERR(root)) {
+ for (i = 0; i < ARRAY_SIZE(vop2_debugfs_list); i++)
+ vop2_debugfs_list[i].data = vop2;
+
+ drm_debugfs_create_files(vop2_debugfs_list,
+ ARRAY_SIZE(vop2_debugfs_list),
+ root, minor);
+ }
+}
+
+static int vop2_crtc_late_register(struct drm_crtc *crtc)
+{
+ struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct vop2 *vop2 = vp->vop2;
+
+ if (drm_crtc_index(crtc) == 0)
+ vop2_debugfs_init(vop2, crtc->dev->primary);
+
+ return 0;
+}
+
static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *vcstate;
@@ -2562,6 +3079,7 @@ static const struct drm_crtc_funcs vop2_crtc_funcs = {
.atomic_destroy_state = vop2_crtc_destroy_state,
.enable_vblank = vop2_crtc_enable_vblank,
.disable_vblank = vop2_crtc_disable_vblank,
+ .late_register = vop2_crtc_late_register,
};
static irqreturn_t vop2_isr(int irq, void *data)
@@ -2790,7 +3308,12 @@ static int vop2_create_crtcs(struct vop2 *vop2)
}
drm_crtc_helper_add(&vp->crtc, &vop2_crtc_helper_funcs);
+ if (vop2->lut_regs) {
+ const struct vop2_video_port_data *vp_data = &vop2_data->vp[vp->id];
+ drm_mode_crtc_set_gamma_size(&vp->crtc, vp_data->gamma_lut_len);
+ drm_crtc_enable_color_mgmt(&vp->crtc, 0, false, vp_data->gamma_lut_len);
+ }
init_completion(&vp->dsp_hold_completion);
}
@@ -2865,6 +3388,10 @@ static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
[VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
[VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
[VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8),
+ /* RK3588 only, reserved bit on rk3568*/
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
/* Scale */
[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
@@ -2957,6 +3484,10 @@ static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = {
[VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
[VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
[VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
+ /* RK3588 only, reserved register on rk3568 */
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
/* Scale */
[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
@@ -3106,6 +3637,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return -EINVAL;
}
+ vop2->res = res;
vop2->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop2->regs))
return PTR_ERR(vop2->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 615a16196aff..29cc7fb8f6d8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
@@ -9,6 +9,7 @@
#include <linux/regmap.h>
#include <drm/drm_modes.h>
+#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#define VOP2_VP_FEATURE_OUTPUT_10BIT BIT(0)
@@ -78,6 +79,9 @@ enum vop2_win_regs {
VOP2_WIN_COLOR_KEY,
VOP2_WIN_COLOR_KEY_EN,
VOP2_WIN_DITHER_UP,
+ VOP2_WIN_AXI_BUS_ID,
+ VOP2_WIN_AXI_YRGB_R_ID,
+ VOP2_WIN_AXI_UV_R_ID,
/* scale regs */
VOP2_WIN_SCALE_YRGB_X,
@@ -122,6 +126,15 @@ enum vop2_win_regs {
VOP2_WIN_MAX_REG,
};
+struct vop2_regs_dump {
+ const char *name;
+ u32 base;
+ u32 size;
+ u32 en_reg;
+ u32 en_val;
+ u32 en_mask;
+};
+
struct vop2_win_data {
const char *name;
unsigned int phys_id;
@@ -140,6 +153,10 @@ struct vop2_win_data {
unsigned int layer_sel_id;
uint64_t feature;
+ uint8_t axi_bus_id;
+ uint8_t axi_yrgb_r_id;
+ uint8_t axi_uv_r_id;
+
unsigned int max_upscale_factor;
unsigned int max_downscale_factor;
const u8 dly[VOP2_DLY_MODE_MAX];
@@ -160,10 +177,12 @@ struct vop2_data {
u64 feature;
const struct vop2_win_data *win;
const struct vop2_video_port_data *vp;
+ const struct vop2_regs_dump *regs_dump;
struct vop_rect max_input;
struct vop_rect max_output;
unsigned int win_size;
+ unsigned int regs_dump_size;
unsigned int soc_id;
};
@@ -308,6 +327,7 @@ enum dst_factor_mode {
#define RK3568_CLUSTER_WIN_CTRL0 0x00
#define RK3568_CLUSTER_WIN_CTRL1 0x04
+#define RK3568_CLUSTER_WIN_CTRL2 0x08
#define RK3568_CLUSTER_WIN_YRGB_MST 0x10
#define RK3568_CLUSTER_WIN_CBR_MST 0x14
#define RK3568_CLUSTER_WIN_VIR 0x18
@@ -330,6 +350,7 @@ enum dst_factor_mode {
/* (E)smart register definition, offset relative to window base */
#define RK3568_SMART_CTRL0 0x00
#define RK3568_SMART_CTRL1 0x04
+#define RK3588_SMART_AXI_CTRL 0x08
#define RK3568_SMART_REGION0_CTRL 0x10
#define RK3568_SMART_REGION0_YRGB_MST 0x14
#define RK3568_SMART_REGION0_CBR_MST 0x18
@@ -394,6 +415,7 @@ enum dst_factor_mode {
#define RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN BIT(15)
#define RK3568_VP_DSP_CTRL__STANDBY BIT(31)
+#define RK3568_VP_DSP_CTRL__DSP_LUT_EN BIT(28)
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_MODE BIT(20)
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_SEL GENMASK(19, 18)
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_EN BIT(17)
@@ -408,6 +430,8 @@ enum dst_factor_mode {
#define RK3568_VP_DSP_CTRL__CORE_DCLK_DIV BIT(4)
#define RK3568_VP_DSP_CTRL__OUT_MODE GENMASK(3, 0)
+#define RK3588_VP_DSP_CTRL__GAMMA_UPDATE_EN BIT(22)
+
#define RK3588_VP_CLK_CTRL__DCLK_OUT_DIV GENMASK(3, 2)
#define RK3588_VP_CLK_CTRL__DCLK_CORE_DIV GENMASK(1, 0)
@@ -460,6 +484,8 @@ enum dst_factor_mode {
#define RK3588_DSP_IF_POL__DP1_PIN_POL GENMASK(14, 12)
#define RK3588_DSP_IF_POL__DP0_PIN_POL GENMASK(10, 8)
+#define RK3588_LUT_PORT_SEL__GAMMA_AHB_WRITE_SEL GENMASK(13, 12)
+
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2_PHASE_LOCK BIT(5)
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2 BIT(4)
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index aba733736ff7..385cf6881504 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Mark Yao <mark.yao@rock-chips.com>
* Sandy Huang <hjc@rock-chips.com>
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
index 4ce967d23813..ca83d7b6bea7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.h
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Sandy Huang <hjc@rock-chips.com>
* Mark Yao <mark.yao@rock-chips.com>
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index dbfbde24698e..811020665120 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Sandy Huang <hjc@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
index 1bd4e20e91eb..116f958b894d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Sandy Huang <hjc@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index f9d87a0abc8b..65a88f489693 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Andy Yan <andy.yan@rock-chips.com>
*/
@@ -258,6 +258,88 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
},
};
+static const struct vop2_regs_dump rk3568_regs_dump[] = {
+ {
+ .name = "SYS",
+ .base = RK3568_REG_CFG_DONE,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0
+ }, {
+ .name = "OVL",
+ .base = RK3568_OVL_CTRL,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "VP0",
+ .base = RK3568_VP0_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP1",
+ .base = RK3568_VP1_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP2",
+ .base = RK3568_VP2_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+
+ }, {
+ .name = "Cluster0",
+ .base = RK3568_CLUSTER0_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster1",
+ .base = RK3568_CLUSTER1_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Esmart0",
+ .base = RK3568_ESMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart1",
+ .base = RK3568_ESMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Smart0",
+ .base = RK3568_SMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Smart1",
+ .base = RK3568_SMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ },
+};
+
static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
{
.id = 0,
@@ -313,7 +395,7 @@ static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
* AXI1 is a read only bus.
*
* Every window on a AXI bus must assigned two unique
- * read id(yrgb_id/uv_id, valid id are 0x1~0xe).
+ * read id(yrgb_r_id/uv_r_id, valid id are 0x1~0xe).
*
* AXI0:
* Cluster0/1, Esmart0/1, WriteBack
@@ -333,6 +415,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 0,
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 2,
+ .axi_uv_r_id = 3,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -349,6 +434,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 6,
+ .axi_uv_r_id = 7,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -364,6 +452,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 2,
+ .axi_uv_r_id = 3,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -379,6 +470,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 6,
+ .axi_uv_r_id = 7,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -393,6 +487,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 2,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x0a,
+ .axi_uv_r_id = 0x0b,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
@@ -406,6 +503,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 3,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x0c,
+ .axi_uv_r_id = 0x01,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
@@ -419,6 +519,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 6,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0a,
+ .axi_uv_r_id = 0x0b,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
@@ -432,12 +535,118 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 7,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0c,
+ .axi_uv_r_id = 0x0d,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
},
};
+static const struct vop2_regs_dump rk3588_regs_dump[] = {
+ {
+ .name = "SYS",
+ .base = RK3568_REG_CFG_DONE,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0
+ }, {
+ .name = "OVL",
+ .base = RK3568_OVL_CTRL,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "VP0",
+ .base = RK3568_VP0_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP1",
+ .base = RK3568_VP1_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP2",
+ .base = RK3568_VP2_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+
+ }, {
+ .name = "VP3",
+ .base = RK3588_VP3_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "Cluster0",
+ .base = RK3568_CLUSTER0_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster1",
+ .base = RK3568_CLUSTER1_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster2",
+ .base = RK3588_CLUSTER2_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster3",
+ .base = RK3588_CLUSTER3_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Esmart0",
+ .base = RK3568_ESMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart1",
+ .base = RK3568_ESMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart2",
+ .base = RK3588_ESMART2_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart3",
+ .base = RK3588_ESMART3_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ },
+};
+
static const struct vop2_data rk3566_vop = {
.feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
@@ -446,6 +655,8 @@ static const struct vop2_data rk3566_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .regs_dump = rk3568_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
.soc_id = 3566,
};
@@ -457,6 +668,8 @@ static const struct vop2_data rk3568_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .regs_dump = rk3568_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
.soc_id = 3568,
};
@@ -469,6 +682,8 @@ static const struct vop2_data rk3588_vop = {
.vp = rk3588_vop_video_ports,
.win = rk3588_vop_win_data,
.win_size = ARRAY_SIZE(rk3588_vop_win_data),
+ .regs_dump = rk3588_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3588_regs_dump),
.soc_id = 3588,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 8998967f0c00..4e2099d86517 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index fbf1bcc68625..addf8ca085f6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index c75302ca3427..f56e77e7f6d0 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _GPU_SCHED_TRACE_H_
#include <linux/stringify.h>
@@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep,
__entry->seqno)
);
-#endif
+#endif /* _GPU_SCHED_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 69bcf0e99d57..da00572d7d42 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -259,9 +259,16 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
dma_fence_get(&s_fence->finished);
- if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb))
+ if (!prev ||
+ dma_fence_add_callback(prev, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb)) {
+ /*
+ * Adding callback above failed.
+ * dma_fence_put() checks for NULL.
+ */
+ dma_fence_put(prev);
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ }
prev = &s_fence->finished;
}
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 486d8f5282f9..b777690fd660 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -18,9 +18,9 @@
#include <linux/pwm.h>
#include <linux/regulator/consumer.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
@@ -39,7 +39,6 @@
#define DRIVER_NAME "ssd130x"
#define DRIVER_DESC "DRM driver for Solomon SSD13xx OLED displays"
-#define DRIVER_DATE "20220131"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -1784,7 +1783,6 @@ static const struct drm_driver ssd130x_drm_driver = {
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index bc1c747d3ea4..ceacdcb7c566 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -23,7 +23,6 @@
#define DRIVER_NAME "sprd"
#define DRIVER_DESC "Spreadtrum SoCs' DRM Driver"
-#define DRIVER_DATE "20200201"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -59,7 +58,6 @@ static struct drm_driver sprd_drm_drv = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 61ceff9aee7e..5e9332df21df 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -13,9 +13,9 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -29,7 +29,6 @@
#define DRIVER_NAME "sti"
#define DRIVER_DESC "STMicroelectronics SoC DRM"
-#define DRIVER_DATE "20140601"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -143,7 +142,6 @@ static const struct drm_driver sti_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 21b46a6465f0..ca2fe17de4a5 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1225,7 +1225,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size
struct drm_connector *connector = hdmi->drm_connector;
DRM_DEBUG_DRIVER("\n");
+ mutex_lock(&connector->eld_mutex);
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
return 0;
}
@@ -1235,7 +1237,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.audio_shutdown = hdmi_audio_shutdown,
.mute_stream = hdmi_audio_mute,
.get_eld = hdmi_audio_get_eld,
- .no_capture_mute = 1,
};
static int sti_hdmi_register_audio_driver(struct device *dev,
@@ -1245,6 +1246,7 @@ static int sti_hdmi_register_audio_driver(struct device *dev,
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
+ .no_capture_mute = 1,
};
DRM_DEBUG_DRIVER("\n");
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index bf090a354989..8ebcaf953782 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -16,9 +16,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -62,7 +62,6 @@ static const struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
- .date = "20170330",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 5eccf58f2e17..c11dfb2739fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -15,8 +15,8 @@
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -50,7 +50,6 @@ static const struct drm_driver sun4i_drv_driver = {
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
- .date = "20150629",
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 453f19f16ab7..ab0938ba61f7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -187,34 +187,6 @@ sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector,
return MODE_NOCLOCK;
}
-static int sun4i_hdmi_connector_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *state)
-{
- struct drm_connector_state *conn_state =
- drm_atomic_get_new_connector_state(state, connector);
- struct drm_crtc *crtc = conn_state->crtc;
- struct drm_crtc_state *crtc_state = crtc->state;
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
- enum drm_mode_status status;
-
- status = sun4i_hdmi_connector_clock_valid(connector, mode,
- conn_state->hdmi.tmds_char_rate);
- if (status != MODE_OK)
- return -EINVAL;
-
- return 0;
-}
-
-static enum drm_mode_status
-sun4i_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- unsigned long long rate = drm_hdmi_compute_mode_clock(mode, 8,
- HDMI_COLORSPACE_RGB);
-
- return sun4i_hdmi_connector_clock_valid(connector, mode, rate);
-}
-
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
@@ -268,8 +240,8 @@ static const struct drm_connector_hdmi_funcs sun4i_hdmi_hdmi_connector_funcs = {
};
static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = {
- .atomic_check = sun4i_hdmi_connector_atomic_check,
- .mode_valid = sun4i_hdmi_connector_mode_valid,
+ .atomic_check = drm_atomic_helper_connector_hdmi_check,
+ .mode_valid = drm_hdmi_connector_mode_valid,
.get_modes = sun4i_hdmi_get_modes,
};
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index bf3421667ecc..4596073fe28f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -13,9 +13,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
@@ -35,7 +35,6 @@
#define DRIVER_NAME "tegra"
#define DRIVER_DESC "NVIDIA Tegra graphics"
-#define DRIVER_DATE "20120330"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -901,7 +900,6 @@ static const struct drm_driver tegra_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
index 6bba97d0be88..22e2d959eb31 100644
--- a/drivers/gpu/drm/tests/drm_connector_test.c
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -9,6 +9,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
@@ -181,6 +182,465 @@ static struct kunit_suite drmm_connector_init_test_suite = {
.test_cases = drmm_connector_init_tests,
};
+static const struct drm_connector_funcs dummy_dynamic_init_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .destroy = drm_connector_cleanup,
+};
+
+/*
+ * Test that the initialization of a bog standard dynamic connector works
+ * as expected and doesn't report any error.
+ */
+static void drm_test_drm_connector_dynamic_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+static void drm_test_connector_dynamic_init_cleanup(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+
+ drm_connector_cleanup(connector);
+}
+
+/*
+ * Test that the initialization of a dynamic connector without a DDC adapter
+ * doesn't report any error.
+ */
+static void drm_test_drm_connector_dynamic_init_null_ddc(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+/*
+ * Test that the initialization of a dynamic connector doesn't add the
+ * connector to the connector list.
+ */
+static void drm_test_drm_connector_dynamic_init_not_added(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_PTR_EQ(test, connector->head.next, &connector->head);
+}
+
+static void test_connector_property(struct kunit *test,
+ struct drm_connector *connector,
+ const struct drm_property *expected_prop)
+{
+ struct drm_property *prop;
+ uint64_t val;
+ int ret;
+
+ KUNIT_ASSERT_NOT_NULL(test, expected_prop);
+ prop = drm_mode_obj_find_prop_id(&connector->base, expected_prop->base.id);
+ KUNIT_ASSERT_PTR_EQ_MSG(test, prop, expected_prop,
+ "Can't find property %s", expected_prop->name);
+
+ ret = drm_object_property_get_default_value(&connector->base, prop, &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, 0);
+
+ /* TODO: Check property value in the connector state. */
+}
+
+/*
+ * Test that the initialization of a dynamic connector adds all the expected
+ * properties to it.
+ */
+static void drm_test_drm_connector_dynamic_init_properties(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_mode_config *config = &priv->drm.mode_config;
+ const struct drm_property *props[] = {
+ config->edid_property,
+ config->dpms_property,
+ config->link_status_property,
+ config->non_desktop_property,
+ config->tile_property,
+ config->prop_crtc_id,
+ };
+ int ret;
+ int i;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ for (i = 0; i < ARRAY_SIZE(props); i++)
+ test_connector_property(test, connector, props[i]);
+}
+
+/*
+ * Test that the initialization of a dynamic connector succeeds for all
+ * possible connector types.
+ */
+static void drm_test_drm_connector_dynamic_init_type_valid(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ unsigned int connector_type = *(unsigned int *)test->param_value;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ connector_type,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+/*
+ * Test that the initialization of a dynamic connector sets the expected name
+ * for it for all possible connector types.
+ */
+static void drm_test_drm_connector_dynamic_init_name(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ unsigned int connector_type = *(unsigned int *)test->param_value;
+ char expected_name[128];
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ connector_type,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ snprintf(expected_name, sizeof(expected_name), "%s-%d",
+ drm_get_connector_type_name(connector_type), connector->connector_type_id);
+ KUNIT_ASSERT_STREQ(test, connector->name, expected_name);
+}
+
+static struct kunit_case drm_connector_dynamic_init_tests[] = {
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init_null_ddc),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init_not_added),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init_properties),
+ KUNIT_CASE_PARAM(drm_test_drm_connector_dynamic_init_type_valid,
+ drm_connector_init_type_valid_gen_params),
+ KUNIT_CASE_PARAM(drm_test_drm_connector_dynamic_init_name,
+ drm_connector_init_type_valid_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_connector_dynamic_init_test_suite = {
+ .name = "drm_connector_dynamic_init",
+ .init = drm_test_connector_init,
+ .exit = drm_test_connector_dynamic_init_cleanup,
+ .test_cases = drm_connector_dynamic_init_tests,
+};
+
+static int drm_test_connector_dynamic_register_early_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv;
+ int ret;
+
+ ret = drm_test_connector_init(test);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv = test->priv;
+
+ ret = drm_connector_dynamic_init(&priv->drm, &priv->connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return 0;
+}
+
+static void drm_test_connector_dynamic_register_early_cleanup(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+
+ drm_connector_unregister(connector);
+ drm_connector_put(connector);
+}
+
+/*
+ * Test that registration of a dynamic connector adds it to the connector list.
+ */
+static void drm_test_drm_connector_dynamic_register_early_on_list(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&connector->head));
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_PTR_EQ(test, connector->head.next, &priv->drm.mode_config.connector_list);
+}
+
+/*
+ * Test that the registration of a dynamic connector before the drm device is
+ * registered results in deferring the connector's user interface registration.
+ */
+static void drm_test_drm_connector_dynamic_register_early_defer(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_EQ(test, connector->registration_state, DRM_CONNECTOR_INITIALIZING);
+}
+
+/*
+ * Test that the registration of a dynamic connector fails, if this is done before
+ * the connector is initialized.
+ */
+static void drm_test_drm_connector_dynamic_register_early_no_init(struct kunit *test)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ connector = kunit_kzalloc(test, sizeof(*connector), GFP_KERNEL); /* auto freed */
+ KUNIT_ASSERT_NOT_NULL(test, connector);
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, -EINVAL);
+}
+
+/*
+ * Test that the registration of a dynamic connector before the drm device is
+ * registered results in deferring adding a mode object for the connector.
+ */
+static void drm_test_drm_connector_dynamic_register_early_no_mode_object(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_connector *tmp_connector;
+ int ret;
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ tmp_connector = drm_connector_lookup(connector->dev, NULL, connector->base.id);
+ KUNIT_ASSERT_NULL(test, tmp_connector);
+}
+
+static struct kunit_case drm_connector_dynamic_register_early_tests[] = {
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_on_list),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_defer),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_no_init),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_no_mode_object),
+ { }
+};
+
+static struct kunit_suite drm_connector_dynamic_register_early_test_suite = {
+ .name = "drm_connector_dynamic_register_early",
+ .init = drm_test_connector_dynamic_register_early_init,
+ .exit = drm_test_connector_dynamic_register_early_cleanup,
+ .test_cases = drm_connector_dynamic_register_early_tests,
+};
+
+static int drm_test_connector_dynamic_register_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv;
+ int ret;
+
+ ret = drm_test_connector_dynamic_register_early_init(test);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv = test->priv;
+
+ ret = drm_dev_register(priv->connector.dev, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return 0;
+}
+
+static void drm_test_connector_dynamic_register_cleanup(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_device *dev = priv->connector.dev;
+
+ drm_connector_unregister(&priv->connector);
+ drm_connector_put(&priv->connector);
+
+ drm_dev_unregister(dev);
+
+ drm_test_connector_dynamic_register_early_cleanup(test);
+}
+
+static void drm_test_drm_connector_dynamic_register_on_list(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&priv->connector.head));
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_PTR_EQ(test, priv->connector.head.next, &priv->drm.mode_config.connector_list);
+}
+
+/*
+ * Test that the registration of a dynamic connector doesn't get deferred if
+ * this is done after the drm device is registered.
+ */
+static void drm_test_drm_connector_dynamic_register_no_defer(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_EQ(test, priv->connector.registration_state, DRM_CONNECTOR_INITIALIZING);
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_EQ(test, priv->connector.registration_state, DRM_CONNECTOR_REGISTERED);
+}
+
+/*
+ * Test that the registration of a dynamic connector fails if this is done after the
+ * drm device is registered, but before the connector is initialized.
+ */
+static void drm_test_drm_connector_dynamic_register_no_init(struct kunit *test)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ connector = kunit_kzalloc(test, sizeof(*connector), GFP_KERNEL); /* auto freed */
+ KUNIT_ASSERT_NOT_NULL(test, connector);
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, -EINVAL);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered adds the mode object for the connector.
+ */
+static void drm_test_drm_connector_dynamic_register_mode_object(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_connector *tmp_connector;
+ int ret;
+
+ tmp_connector = drm_connector_lookup(connector->dev, NULL, connector->base.id);
+ KUNIT_ASSERT_NULL(test, tmp_connector);
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ tmp_connector = drm_connector_lookup(connector->dev, NULL, connector->base.id);
+ KUNIT_ASSERT_PTR_EQ(test, tmp_connector, connector);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered adds the connector to sysfs.
+ */
+static void drm_test_drm_connector_dynamic_register_sysfs(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ KUNIT_ASSERT_NULL(test, connector->kdev);
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_NOT_NULL(test, connector->kdev);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered sets the connector's sysfs name as expected.
+ */
+static void drm_test_drm_connector_dynamic_register_sysfs_name(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ char expected_name[128];
+ int ret;
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ snprintf(expected_name, sizeof(expected_name), "card%d-%s",
+ connector->dev->primary->index, connector->name);
+
+ KUNIT_ASSERT_STREQ(test, dev_name(connector->kdev), expected_name);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered adds the connector to debugfs.
+ */
+static void drm_test_drm_connector_dynamic_register_debugfs(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_NULL(test, priv->connector.debugfs_entry);
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ KUNIT_ASSERT_NOT_NULL(test, priv->connector.debugfs_entry);
+ else
+ KUNIT_ASSERT_NULL(test, priv->connector.debugfs_entry);
+}
+
+static struct kunit_case drm_connector_dynamic_register_tests[] = {
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_on_list),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_no_defer),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_no_init),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_mode_object),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_sysfs),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_sysfs_name),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_debugfs),
+ { }
+};
+
+static struct kunit_suite drm_connector_dynamic_register_test_suite = {
+ .name = "drm_connector_dynamic_register",
+ .init = drm_test_connector_dynamic_register_init,
+ .exit = drm_test_connector_dynamic_register_cleanup,
+ .test_cases = drm_connector_dynamic_register_tests,
+};
+
/*
* Test that the registration of a bog standard connector works as
* expected and doesn't report any error.
@@ -635,6 +1095,64 @@ static void drm_test_connector_hdmi_init_formats_no_rgb(struct kunit *test)
KUNIT_EXPECT_LT(test, ret, 0);
}
+struct drm_connector_hdmi_init_formats_yuv420_allowed_test {
+ unsigned long supported_formats;
+ bool yuv420_allowed;
+ int expected_result;
+};
+
+#define YUV420_ALLOWED_TEST(_formats, _allowed, _result) \
+ { \
+ .supported_formats = BIT(HDMI_COLORSPACE_RGB) | (_formats), \
+ .yuv420_allowed = _allowed, \
+ .expected_result = _result, \
+ }
+
+static const struct drm_connector_hdmi_init_formats_yuv420_allowed_test
+drm_connector_hdmi_init_formats_yuv420_allowed_tests[] = {
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), true, 0),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), false, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), true, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), false, 0),
+};
+
+static void
+drm_connector_hdmi_init_formats_yuv420_allowed_desc(const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *t,
+ char *desc)
+{
+ sprintf(desc, "supported_formats=0x%lx yuv420_allowed=%d",
+ t->supported_formats, t->yuv420_allowed);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_tests,
+ drm_connector_hdmi_init_formats_yuv420_allowed_desc);
+
+/*
+ * Test that the registration of an HDMI connector succeeds only when
+ * the presence of YUV420 in the supported formats matches the value
+ * of the ycbcr_420_allowed flag.
+ */
+static void drm_test_connector_hdmi_init_formats_yuv420_allowed(struct kunit *test)
+{
+ const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *params;
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ params = test->param_value;
+ priv->connector.ycbcr_420_allowed = params->yuv420_allowed;
+
+ ret = drmm_connector_hdmi_init(&priv->drm, &priv->connector,
+ "Vendor", "Product",
+ &dummy_funcs,
+ &dummy_hdmi_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ &priv->ddc,
+ params->supported_formats,
+ 8);
+ KUNIT_EXPECT_EQ(test, ret, params->expected_result);
+}
+
/*
* Test that the registration of an HDMI connector with an HDMI
* connector type succeeds.
@@ -726,6 +1244,8 @@ static struct kunit_case drmm_connector_hdmi_init_tests[] = {
KUNIT_CASE(drm_test_connector_hdmi_init_bpc_null),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_empty),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_no_rgb),
+ KUNIT_CASE_PARAM(drm_test_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_gen_params),
KUNIT_CASE(drm_test_connector_hdmi_init_null_ddc),
KUNIT_CASE(drm_test_connector_hdmi_init_null_product),
KUNIT_CASE(drm_test_connector_hdmi_init_null_vendor),
@@ -1283,6 +1803,9 @@ static struct kunit_suite drm_hdmi_compute_mode_clock_test_suite = {
kunit_test_suites(
&drmm_connector_hdmi_init_test_suite,
&drmm_connector_init_test_suite,
+ &drm_connector_dynamic_init_test_suite,
+ &drm_connector_dynamic_register_early_test_suite,
+ &drm_connector_dynamic_register_test_suite,
&drm_connector_attach_broadcast_rgb_property_test_suite,
&drm_get_tv_mode_from_name_test_suite,
&drm_hdmi_compute_mode_clock_test_suite,
diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
index 89cd9e4f4d32..9e0e2fb65944 100644
--- a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -199,10 +199,8 @@ static const struct drm_dp_mst_calc_pbn_div_test drm_dp_mst_calc_pbn_div_dp1_4_c
static void drm_test_dp_mst_calc_pbn_div(struct kunit *test)
{
const struct drm_dp_mst_calc_pbn_div_test *params = test->param_value;
- /* mgr->dev is only needed by drm_dbg_kms(), but it's not called for the test cases. */
- struct drm_dp_mst_topology_mgr *mgr = test->priv;
- KUNIT_EXPECT_EQ(test, drm_dp_get_vc_payload_bw(mgr, params->link_rate, params->lane_count).full,
+ KUNIT_EXPECT_EQ(test, drm_dp_get_vc_payload_bw(params->link_rate, params->lane_count).full,
params->expected.full);
}
@@ -568,21 +566,8 @@ static struct kunit_case drm_dp_mst_helper_tests[] = {
{ }
};
-static int drm_dp_mst_helper_tests_init(struct kunit *test)
-{
- struct drm_dp_mst_topology_mgr *mgr;
-
- mgr = kunit_kzalloc(test, sizeof(*mgr), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mgr);
-
- test->priv = mgr;
-
- return 0;
-}
-
static struct kunit_suite drm_dp_mst_helper_test_suite = {
.name = "drm_dp_mst_helper",
- .init = drm_dp_mst_helper_tests_init,
.test_cases = drm_dp_mst_helper_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 294773342e71..23ecc00accb2 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -46,7 +46,7 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
struct drm_display_mode *mode, *preferred;
mutex_lock(&drm->mode_config.mutex);
- preferred = list_first_entry(&connector->modes, struct drm_display_mode, head);
+ preferred = list_first_entry_or_null(&connector->modes, struct drm_display_mode, head);
list_for_each_entry(mode, &connector->modes, head)
if (mode->type & DRM_MODE_TYPE_PREFERRED)
preferred = mode;
@@ -70,10 +70,17 @@ static int light_up_connector(struct kunit *test,
state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+retry:
conn_state = drm_atomic_get_connector_state(state, connector);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -105,9 +112,8 @@ static int set_connector_edid(struct kunit *test, struct drm_connector *connecto
mutex_lock(&drm->mode_config.mutex);
ret = connector->funcs->fill_modes(connector, 4096, 4096);
mutex_unlock(&drm->mode_config.mutex);
- KUNIT_ASSERT_GT(test, ret, 0);
- return 0;
+ return ret;
}
static const struct drm_connector_hdmi_funcs dummy_connector_hdmi_funcs = {
@@ -125,6 +131,18 @@ static const struct drm_connector_hdmi_funcs reject_connector_hdmi_funcs = {
.tmds_char_rate_valid = reject_connector_tmds_char_rate_valid,
};
+static enum drm_mode_status
+reject_100MHz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
+{
+ return (tmds_rate > 100ULL * 1000 * 1000) ? MODE_BAD : MODE_OK;
+}
+
+static const struct drm_connector_hdmi_funcs reject_100_MHz_connector_hdmi_funcs = {
+ .tmds_char_rate_valid = reject_100MHz_connector_tmds_char_rate_valid,
+};
+
static int dummy_connector_get_modes(struct drm_connector *connector)
{
struct drm_atomic_helper_connector_hdmi_priv *priv =
@@ -147,6 +165,7 @@ static int dummy_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs dummy_connector_helper_funcs = {
.atomic_check = drm_atomic_helper_connector_hdmi_check,
.get_modes = dummy_connector_get_modes,
+ .mode_valid = drm_hdmi_connector_mode_valid,
};
static void dummy_hdmi_connector_reset(struct drm_connector *connector)
@@ -164,9 +183,10 @@ static const struct drm_connector_funcs dummy_connector_funcs = {
static
struct drm_atomic_helper_connector_hdmi_priv *
-drm_atomic_helper_connector_hdmi_init(struct kunit *test,
- unsigned int formats,
- unsigned int max_bpc)
+drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc,
+ const struct drm_connector_hdmi_funcs *hdmi_funcs)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
@@ -208,7 +228,7 @@ drm_atomic_helper_connector_hdmi_init(struct kunit *test,
ret = drmm_connector_hdmi_init(drm, conn,
"Vendor", "Product",
&dummy_connector_funcs,
- &dummy_connector_hdmi_funcs,
+ hdmi_funcs,
DRM_MODE_CONNECTOR_HDMIA,
NULL,
formats,
@@ -220,10 +240,27 @@ drm_atomic_helper_connector_hdmi_init(struct kunit *test,
drm_mode_config_reset(drm);
- ret = set_connector_edid(test, conn,
+ return priv;
+}
+
+static
+struct drm_atomic_helper_connector_hdmi_priv *
+drm_kunit_helper_connector_hdmi_init(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
+ formats, max_bpc,
+ &dummy_connector_hdmi_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ ret = set_connector_edid(test, &priv->connector,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
return priv;
}
@@ -247,20 +284,21 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
+
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -310,20 +348,21 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
+
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -373,23 +412,23 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -429,9 +468,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -444,7 +483,6 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
- drm = &priv->drm;
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -485,23 +523,23 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -543,9 +581,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -558,7 +596,6 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
- drm = &priv->drm;
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -601,23 +638,23 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -659,9 +696,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -674,7 +711,6 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
- drm = &priv->drm;
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -719,25 +755,25 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ KUNIT_ASSERT_GT(test, ret, 0);
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -793,25 +829,25 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ KUNIT_ASSERT_GT(test, ret, 0);
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -862,30 +898,30 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_dvi_1080p,
ARRAY_SIZE(test_edid_dvi_1080p));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_FALSE(test, info->is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -911,26 +947,26 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ KUNIT_ASSERT_GT(test, ret, 0);
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -958,26 +994,26 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ KUNIT_ASSERT_GT(test, ret, 0);
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1005,26 +1041,26 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ KUNIT_ASSERT_GT(test, ret, 0);
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1056,20 +1092,21 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
+
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1112,24 +1149,23 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1140,8 +1176,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1179,26 +1216,25 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1212,8 +1248,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1242,11 +1279,11 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -1254,15 +1291,12 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
@@ -1276,7 +1310,9 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
rate = mode->clock * 1500;
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1305,24 +1341,23 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1341,8 +1376,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1370,26 +1406,25 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1408,8 +1443,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1438,24 +1474,23 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1466,8 +1501,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1496,26 +1532,25 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1526,8 +1561,9 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1538,6 +1574,57 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
}
+/* Test that atomic check succeeds when disabling a connector. */
+static void drm_test_check_disable_connector(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_display_mode *preferred;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ conn = &priv->connector;
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ crtc_state->active = false;
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
KUNIT_CASE(drm_test_check_broadcast_rgb_auto_cea_mode),
KUNIT_CASE(drm_test_check_broadcast_rgb_auto_cea_mode_vic_1),
@@ -1552,6 +1639,7 @@ static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
*/
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_changed),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_not_changed),
+ KUNIT_CASE(drm_test_check_disable_connector),
KUNIT_CASE(drm_test_check_hdmi_funcs_reject_rate),
KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback),
KUNIT_CASE(drm_test_check_max_tmds_rate_format_fallback),
@@ -1593,9 +1681,9 @@ static void drm_test_check_broadcast_rgb_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1615,9 +1703,9 @@ static void drm_test_check_bpc_8_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1639,9 +1727,9 @@ static void drm_test_check_bpc_10_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1663,9 +1751,9 @@ static void drm_test_check_bpc_12_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1685,11 +1773,11 @@ static void drm_test_check_format_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1707,11 +1795,11 @@ static void drm_test_check_tmds_char_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1734,9 +1822,148 @@ static struct kunit_suite drm_atomic_helper_connector_hdmi_reset_test_suite = {
.test_cases = drm_atomic_helper_connector_hdmi_reset_tests,
};
+/*
+ * Test that the default behaviour for drm_hdmi_connector_mode_valid() is not
+ * to reject any modes. Pass a correct EDID and verify that preferred mode
+ * matches the expectations (1080p).
+ */
+static void drm_test_check_mode_valid(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+
+ KUNIT_EXPECT_EQ(test, preferred->hdisplay, 1920);
+ KUNIT_EXPECT_EQ(test, preferred->vdisplay, 1080);
+ KUNIT_EXPECT_EQ(test, preferred->clock, 148500);
+}
+
+/*
+ * Test that the drm_hdmi_connector_mode_valid() will reject modes depending on
+ * the .tmds_char_rate_valid() behaviour.
+ * Pass a correct EDID and verify that high-rate modes are filtered.
+ */
+static void drm_test_check_mode_valid_reject_rate(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_100_MHz_connector_hdmi_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+
+ ret = set_connector_edid(test, conn,
+ test_edid_hdmi_1080p_rgb_max_200mhz,
+ ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ /*
+ * Unlike the drm_test_check_mode_valid() here 1080p is rejected, but
+ * 480p is allowed.
+ */
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
+ KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
+ KUNIT_EXPECT_EQ(test, preferred->clock, 25200);
+}
+
+/*
+ * Test that the drm_hdmi_connector_mode_valid() will not mark any modes as
+ * valid if .tmds_char_rate_valid() rejects all of them. Pass a correct EDID
+ * and verify that there is no preferred mode and no modes were set for the
+ * connector.
+ */
+static void drm_test_check_mode_valid_reject(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_connector_hdmi_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+
+ /* should reject all modes */
+ ret = set_connector_edid(test, conn,
+ test_edid_hdmi_1080p_rgb_max_200mhz,
+ ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NULL(test, preferred);
+}
+
+/*
+ * Test that the drm_hdmi_connector_mode_valid() will reject modes that don't
+ * pass the info.max_tmds_clock filter. Pass crafted EDID and verify that
+ * high-rate modes are filtered.
+ */
+static void drm_test_check_mode_valid_reject_max_clock(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+
+ ret = set_connector_edid(test, conn,
+ test_edid_hdmi_1080p_rgb_max_100mhz,
+ ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_100mhz));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ KUNIT_ASSERT_EQ(test, conn->display_info.max_tmds_clock, 100 * 1000);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
+ KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
+ KUNIT_EXPECT_EQ(test, preferred->clock, 25200);
+}
+
+static struct kunit_case drm_atomic_helper_connector_hdmi_mode_valid_tests[] = {
+ KUNIT_CASE(drm_test_check_mode_valid),
+ KUNIT_CASE(drm_test_check_mode_valid_reject),
+ KUNIT_CASE(drm_test_check_mode_valid_reject_rate),
+ KUNIT_CASE(drm_test_check_mode_valid_reject_max_clock),
+ { }
+};
+
+static struct kunit_suite drm_atomic_helper_connector_hdmi_mode_valid_test_suite = {
+ .name = "drm_atomic_helper_connector_hdmi_mode_valid",
+ .test_cases = drm_atomic_helper_connector_hdmi_mode_valid_tests,
+};
+
kunit_test_suites(
&drm_atomic_helper_connector_hdmi_check_test_suite,
&drm_atomic_helper_connector_hdmi_reset_test_suite,
+ &drm_atomic_helper_connector_hdmi_mode_valid_test_suite,
);
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
diff --git a/drivers/gpu/drm/tests/drm_kunit_edid.h b/drivers/gpu/drm/tests/drm_kunit_edid.h
index 107559900e97..6358397a5d7a 100644
--- a/drivers/gpu/drm/tests/drm_kunit_edid.h
+++ b/drivers/gpu/drm/tests/drm_kunit_edid.h
@@ -74,6 +74,108 @@ static const unsigned char test_edid_dvi_1080p[] = {
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
* 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
+ * 00 12 34 00 14 20 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e4
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 42
+ * Made in: 2023
+ * Basic Display Parameters & Features:
+ * Digital display
+ * DFP 1.x compatible TMDS
+ * Maximum image size: 160 cm x 90 cm
+ * Gamma: 2.20
+ * Monochrome or grayscale display
+ * First detailed timing is the preferred timing
+ * Color Characteristics:
+ * Red : 0.0000, 0.0000
+ * Green: 0.0000, 0.0000
+ * Blue : 0.0000, 0.0000
+ * White: 0.0000, 0.0000
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (1600 mm x 900 mm)
+ * Hfront 88 Hsync 44 Hback 148 Hpol P
+ * Vfront 4 Vsync 5 Vback 36 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 50-70 Hz V, 30-70 kHz H, max dotclock 150 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0x92
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Underscans IT Video Formats by default
+ * Native detailed modes: 1
+ * Colorimetry Data Block:
+ * sRGB
+ * Video Data Block:
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz
+ * Video Capability Data Block:
+ * YCbCr quantization: No Data
+ * RGB quantization: Selectable (via AVI Q)
+ * PT scan behavior: No Data
+ * IT scan behavior: Always Underscanned
+ * CE scan behavior: Always Underscanned
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.2.3.4
+ * Maximum TMDS clock: 100 MHz
+ * Extended HDMI video details:
+ * Checksum: 0xe4 Unused space in Extension Block: 100 bytes
+ */
+static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x01, 0x03, 0x81, 0xa0, 0x5a, 0x78,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+ 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
+ 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x14, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xe4
+};
+
+/*
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 2a 00 00 00 00 00
+ * 00 21 01 03 81 a0 5a 78 02 00 00 00 00 00 00 00
+ * 00 00 00 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 02 3a 80 18 71 38 2d 40 58 2c
+ * 45 00 40 84 63 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 32
+ * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
+ *
+ * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
* 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 04a6b8cc62ac..3c0b7824c0be 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -320,8 +320,7 @@ static void kunit_action_drm_mode_destroy(void *ptr)
}
/**
- * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
- for a KUnit test
+ * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
* @test: The test context object
* @dev: DRM device
* @video_code: CEA VIC of the mode
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 1ad711f8d2a8..cacb5f3d8085 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -700,7 +700,7 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
{
dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
- /* clear the irqstatus for newly enabled irqs */
+ /* clear the irqstatus for irqs that will be enabled */
dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
dispc_k2g_vp_set_irqenable(dispc, 0, mask);
@@ -708,6 +708,9 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+ /* clear the irqstatus for irqs that were disabled */
+ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & old_mask);
+
/* flush posted write */
dispc_k2g_read_irqenable(dispc);
}
@@ -780,24 +783,18 @@ static
void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
{
unsigned int i;
- u32 top_clear = 0;
for (i = 0; i < dispc->feat->num_vps; ++i) {
- if (clearmask & DSS_IRQ_VP_MASK(i)) {
+ if (clearmask & DSS_IRQ_VP_MASK(i))
dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
- top_clear |= BIT(i);
- }
}
for (i = 0; i < dispc->feat->num_planes; ++i) {
- if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
+ if (clearmask & DSS_IRQ_PLANE_MASK(i))
dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
- top_clear |= BIT(4 + i);
- }
}
- if (dispc->feat->subrev == DISPC_K2G)
- return;
- dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
+ /* always clear the top level irqstatus */
+ dispc_write(dispc, DISPC_IRQSTATUS, dispc_read(dispc, DISPC_IRQSTATUS));
/* Flush posted writes */
dispc_read(dispc, DISPC_IRQSTATUS);
@@ -843,7 +840,7 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
old_mask = dispc_k3_read_irqenable(dispc);
- /* clear the irqstatus for newly enabled irqs */
+ /* clear the irqstatus for irqs that will be enabled */
dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
for (i = 0; i < dispc->feat->num_vps; ++i) {
@@ -868,6 +865,9 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
if (main_disable)
dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+ /* clear the irqstatus for irqs that were disabled */
+ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & old_mask);
+
/* Flush posted writes */
dispc_read(dispc, DISPC_IRQENABLE_SET);
}
@@ -2767,8 +2767,12 @@ static void dispc_init_errata(struct dispc_device *dispc)
*/
static void dispc_softreset_k2g(struct dispc_device *dispc)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dispc->tidss->irq_lock, flags);
dispc_set_irqenable(dispc, 0);
dispc_read_and_clear_irqstatus(dispc);
+ spin_unlock_irqrestore(&dispc->tidss->irq_lock, flags);
for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 7c8fd6407d82..d4652e8cc28c 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -9,9 +9,9 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -113,7 +113,6 @@ static const struct drm_driver tidss_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "tidss",
.desc = "TI Keystone DSS",
- .date = "20180215",
.major = 1,
.minor = 0,
};
@@ -140,7 +139,7 @@ static int tidss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tidss);
- spin_lock_init(&tidss->wait_lock);
+ spin_lock_init(&tidss->irq_lock);
ret = dispc_init(tidss);
if (ret) {
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index d7f27b0b0315..7f4f4282bc04 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -29,8 +29,9 @@ struct tidss_device {
unsigned int irq;
- spinlock_t wait_lock; /* protects the irq masks */
- dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
+ /* protects the irq masks field and irqenable/irqstatus registers */
+ spinlock_t irq_lock;
+ dispc_irq_t irq_mask; /* enabled irqs */
};
#define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 604334ef526a..5abc788781f4 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -15,10 +15,9 @@
#include "tidss_irq.h"
#include "tidss_plane.h"
-/* call with wait_lock and dispc runtime held */
static void tidss_irq_update(struct tidss_device *tidss)
{
- assert_spin_locked(&tidss->wait_lock);
+ assert_spin_locked(&tidss->irq_lock);
dispc_set_irqenable(tidss->dispc, tidss->irq_mask);
}
@@ -31,11 +30,11 @@ void tidss_irq_enable_vblank(struct drm_crtc *crtc)
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ spin_lock_irqsave(&tidss->irq_lock, flags);
tidss->irq_mask |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
tidss_irq_update(tidss);
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
+ spin_unlock_irqrestore(&tidss->irq_lock, flags);
}
void tidss_irq_disable_vblank(struct drm_crtc *crtc)
@@ -46,11 +45,11 @@ void tidss_irq_disable_vblank(struct drm_crtc *crtc)
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ spin_lock_irqsave(&tidss->irq_lock, flags);
tidss->irq_mask &= ~(DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
DSS_IRQ_VP_VSYNC_ODD(hw_videoport));
tidss_irq_update(tidss);
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
+ spin_unlock_irqrestore(&tidss->irq_lock, flags);
}
static irqreturn_t tidss_irq_handler(int irq, void *arg)
@@ -60,7 +59,9 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg)
unsigned int id;
dispc_irq_t irqstatus;
+ spin_lock(&tidss->irq_lock);
irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
+ spin_unlock(&tidss->irq_lock);
for (id = 0; id < tidss->num_crtcs; id++) {
struct drm_crtc *crtc = tidss->crtcs[id];
@@ -78,8 +79,13 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg)
tidss_crtc_error_irq(crtc, irqstatus);
}
- if (irqstatus & DSS_IRQ_DEVICE_OCP_ERR)
- dev_err_ratelimited(tidss->dev, "OCP error\n");
+ for (unsigned int i = 0; i < tidss->num_planes; ++i) {
+ struct drm_plane *plane = tidss->planes[i];
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ if (irqstatus & DSS_IRQ_PLANE_FIFO_UNDERFLOW(tplane->hw_plane_id))
+ tidss_plane_error_irq(plane, irqstatus);
+ }
return IRQ_HANDLED;
}
@@ -88,9 +94,9 @@ void tidss_irq_resume(struct tidss_device *tidss)
{
unsigned long flags;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ spin_lock_irqsave(&tidss->irq_lock, flags);
tidss_irq_update(tidss);
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
+ spin_unlock_irqrestore(&tidss->irq_lock, flags);
}
int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
@@ -105,7 +111,7 @@ int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
if (ret)
return ret;
- tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
+ tidss->irq_mask = 0;
for (unsigned int i = 0; i < tidss->num_crtcs; ++i) {
struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
@@ -115,6 +121,12 @@ int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
}
+ for (unsigned int i = 0; i < tidss->num_planes; ++i) {
+ struct tidss_plane *tplane = to_tidss_plane(tidss->planes[i]);
+
+ tidss->irq_mask |= DSS_IRQ_PLANE_FIFO_UNDERFLOW(tplane->hw_plane_id);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tidss/tidss_irq.h b/drivers/gpu/drm/tidss/tidss_irq.h
index b512614d5863..dd61f645f662 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.h
+++ b/drivers/gpu/drm/tidss/tidss_irq.h
@@ -19,15 +19,13 @@
* bit use |D |fou|FEOL|FEOL|FEOL|FEOL| UUUU | |
* bit number|0 |1-3|4-7 |8-11| 12-19 | 20-23 | 24-31 |
*
- * device bits: D = OCP error
+ * device bits: D = Unused
* WB bits: f = frame done wb, o = wb buffer overflow,
* u = wb buffer uncomplete
* vp bits: F = frame done, E = vsync even, O = vsync odd, L = sync lost
* plane bits: U = fifo underflow
*/
-#define DSS_IRQ_DEVICE_OCP_ERR BIT(0)
-
#define DSS_IRQ_DEVICE_FRAMEDONEWB BIT(1)
#define DSS_IRQ_DEVICE_WBBUFFEROVERFLOW BIT(2)
#define DSS_IRQ_DEVICE_WBUNCOMPLETEERROR BIT(3)
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index a5d86822c9e3..116de124bddb 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -18,6 +18,14 @@
#include "tidss_drv.h"
#include "tidss_plane.h"
+void tidss_plane_error_irq(struct drm_plane *plane, u64 irqstatus)
+{
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dev_err_ratelimited(plane->dev->dev, "Plane%u underflow (irq %llx)\n",
+ tplane->hw_plane_id, irqstatus);
+}
+
/* drm_plane_helper_funcs */
static int tidss_plane_atomic_check(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
index e933e158b617..aecaf2728406 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.h
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -22,4 +22,6 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
u32 crtc_mask, const u32 *formats,
u32 num_formats);
+void tidss_plane_error_irq(struct drm_plane *plane, u64 irqstatus);
+
#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 6f0df8d6b90c..7caec4d38ddf 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -13,8 +13,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -481,7 +481,6 @@ static const struct drm_driver tilcdc_driver = {
.fops = &fops,
.name = "tilcdc",
.desc = "TI LCD Controller DRM",
- .date = "20121205",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 4aaf56f8707d..60816d2eb4ff 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index 0cc68042a6d6..2748d1f21d86 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -6,8 +6,9 @@
*/
#include <linux/clk.h>
+
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -289,7 +290,7 @@ static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
* There is only one output port inside each device. It is linked with
* encoder endpoint.
*/
- endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ endpoint_node = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 0, -1);
if (endpoint_node) {
encoder_node = of_graph_get_remote_port_parent(endpoint_node);
of_node_put(endpoint_node);
@@ -366,7 +367,6 @@ static const struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
- .date = "20160219",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 6f91ff1dbf7e..8706763af8fb 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -5,9 +5,9 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -335,8 +335,6 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode
bochs->xres, bochs->yres, bochs->bpp,
bochs->yres_virtual);
- bochs_hw_blank(bochs, false);
-
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
@@ -506,6 +504,9 @@ static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc,
static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct bochs_device *bochs = to_bochs_device(crtc->dev);
+
+ bochs_hw_blank(bochs, false);
}
static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
@@ -680,7 +681,6 @@ static const struct drm_driver bochs_driver = {
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
- .date = "20130925",
.major = 1,
.minor = 0,
DRM_GEM_SHMEM_DRIVER_OPS,
@@ -758,7 +758,6 @@ static void bochs_pci_remove(struct pci_dev *pdev)
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- drm_dev_put(dev);
}
static void bochs_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 4d2adcaeaa60..52ec1e4ea9e5 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -24,10 +24,10 @@
#include <video/cirrus.h>
#include <video/vga.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -46,9 +46,8 @@
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
-#define DRIVER_NAME "cirrus"
+#define DRIVER_NAME "cirrus-qemu"
#define DRIVER_DESC "qemu cirrus vga"
-#define DRIVER_DATE "2019"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
@@ -589,14 +588,14 @@ static int cirrus_pipe_init(struct cirrus_device *cirrus)
encoder = &cirrus->encoder;
ret = drm_encoder_init(dev, encoder, &cirrus_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
connector = &cirrus->connector;
ret = drm_connector_init(dev, connector, &cirrus_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_VIRTUAL);
if (ret)
return ret;
drm_connector_helper_add(connector, &cirrus_connector_helper_funcs);
@@ -659,7 +658,6 @@ static const struct drm_driver cirrus_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 0c17ae532fb4..41e9bfb2e2ff 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -7,9 +7,9 @@
#include <linux/pm.h>
#include <linux/usb.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -34,7 +34,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
#define DRIVER_NAME "gm12u320"
#define DRIVER_DESC "Grain Media GM12U320 USB projector display"
-#define DRIVER_DATE "2019"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -626,7 +625,6 @@ static const struct drm_driver gm12u320_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 6b0d1846cfcf..df263818f45f 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -16,8 +16,8 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -199,7 +199,6 @@ static const struct drm_driver hx8357d_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
.desc = "HX8357D",
- .date = "20181023",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index 5eb39ca1a855..62cadf5e033d 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -7,8 +7,8 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -118,7 +118,6 @@ static struct drm_driver ili9163_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9163",
.desc = "Ilitek ILI9163",
- .date = "20210208",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 875e2d09729a..6de44ff69b51 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -16,8 +16,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
@@ -364,7 +364,6 @@ static const struct drm_driver ili9225_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
- .date = "20171106",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index c1dfdfbbd30c..e55029433509 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -15,8 +15,8 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -155,7 +155,6 @@ static const struct drm_driver ili9341_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
- .date = "20180514",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 7e46a720d5e2..093661c771a0 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -14,8 +14,8 @@
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -177,7 +177,6 @@ static const struct drm_driver ili9486_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
.desc = "Ilitek ILI9486",
- .date = "20200118",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index f1461c55dba6..b6b4664908ae 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -13,8 +13,8 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -159,7 +159,6 @@ static const struct drm_driver mi0283qt_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
- .date = "20160614",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index 9898eab5e9e2..13491c0e704a 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -5,9 +5,9 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
@@ -25,7 +25,6 @@
#define DRIVER_NAME "ofdrm"
#define DRIVER_DESC "DRM driver for OF platform devices"
-#define DRIVER_DATE "20220501"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -1348,7 +1347,6 @@ static struct drm_driver ofdrm_driver = {
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index e66729b31bd6..0460ecaef4bd 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -10,12 +10,13 @@
#include <linux/firmware.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -269,7 +270,6 @@ static const struct drm_driver panel_mipi_dbi_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "panel-mipi-dbi",
.desc = "MIPI DBI compatible display panel",
- .date = "20220103",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 77944eb17b3c..52ba6c699bc8 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -21,8 +21,8 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -917,7 +917,6 @@ static const struct drm_driver repaper_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
- .date = "20170405",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c
index 2d2315bd6aef..03d2850310c4 100644
--- a/drivers/gpu/drm/tiny/sharp-memory.c
+++ b/drivers/gpu/drm/tiny/sharp-memory.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -107,7 +107,6 @@ static const struct drm_driver sharp_memory_drm_driver = {
DRM_FBDEV_DMA_DRIVER_OPS,
.name = "sharp_memory_display",
.desc = "Sharp Display Memory LCD",
- .date = "20231129",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 4d4f05dee244..5d9ab8adf800 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -10,9 +10,9 @@
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
@@ -31,7 +31,6 @@
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
-#define DRIVER_DATE "20200625"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -1015,7 +1014,6 @@ static struct drm_driver simpledrm_driver = {
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 97013685c62f..a29672d84ede 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -12,8 +12,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
@@ -295,7 +295,6 @@ static const struct drm_driver st7586_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
- .date = "20170801",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 0747ebd999cc..1d60f6e5b3bc 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -16,8 +16,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -160,7 +160,6 @@ static const struct drm_driver st7735r_driver = {
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
- .date = "20171128",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index 3139fd9128d8..f8f20d2f6174 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -258,13 +258,13 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->priority = bo_prio;
- err = ttm_resource_alloc(bo, place, &res1);
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res1;
/* Add a dummy resource to populate LRU */
- ttm_resource_alloc(bo, place, &res2);
+ ttm_resource_alloc(bo, place, &res2, NULL);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_unreserve(bo);
@@ -300,12 +300,12 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_pin(bo);
- err = ttm_resource_alloc(bo, place, &res1);
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res1;
/* Add a dummy resource to the pinned list */
- err = ttm_resource_alloc(bo, place, &res2);
+ err = ttm_resource_alloc(bo, place, &res2, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
list_is_last(&res2->lru.link, &priv->ttm_dev->unevictable), 1);
@@ -355,7 +355,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
dma_resv_unlock(bo1->base.resv);
- err = ttm_resource_alloc(bo1, place, &res1);
+ err = ttm_resource_alloc(bo1, place, &res1, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo1->resource = res1;
@@ -363,7 +363,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
dma_resv_unlock(bo2->base.resv);
- err = ttm_resource_alloc(bo2, place, &res2);
+ err = ttm_resource_alloc(bo2, place, &res2, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo2->resource = res2;
@@ -401,7 +401,7 @@ static void ttm_bo_put_basic(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->type = ttm_bo_type_device;
- err = ttm_resource_alloc(bo, place, &res);
+ err = ttm_resource_alloc(bo, place, &res, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
@@ -518,7 +518,7 @@ static void ttm_bo_pin_unpin_resource(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
- err = ttm_resource_alloc(bo, place, &res);
+ err = ttm_resource_alloc(bo, place, &res, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
@@ -569,7 +569,7 @@ static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
- err = ttm_resource_alloc(bo, place, &res);
+ err = ttm_resource_alloc(bo, place, &res, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
index 1adf18481ea0..3148f5d3dbd6 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -542,7 +542,7 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
bo->ttm = old_tt;
}
- err = ttm_resource_alloc(bo, place, &bo->resource);
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test, man->usage, size);
@@ -603,7 +603,7 @@ static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
bo->type = params->bo_type;
- err = ttm_resource_alloc(bo, place, &bo->resource);
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
KUNIT_EXPECT_EQ(test, err, 0);
placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
index a9f4b81921c3..e6ea2bd01f07 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -302,7 +302,7 @@ static void ttm_sys_man_free_basic(struct kunit *test)
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
- ttm_resource_alloc(bo, place, &res);
+ ttm_resource_alloc(bo, place, &res, NULL);
man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
man->func->free(man, res);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 48c5365efca1..ea5e49858857 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -42,6 +42,7 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
+#include <linux/cgroup_dmem.h>
#include <linux/dma-resv.h>
#include "ttm_module.h"
@@ -499,6 +500,13 @@ struct ttm_bo_evict_walk {
struct ttm_resource **res;
/** @evicted: Number of successful evictions. */
unsigned long evicted;
+
+ /** @limit_pool: Which pool limit we should test against */
+ struct dmem_cgroup_pool_state *limit_pool;
+ /** @try_low: Whether we should attempt to evict BO's with low watermark threshold */
+ bool try_low;
+ /** @hit_low: If we cannot evict a bo when @try_low is false (first pass) */
+ bool hit_low;
};
static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
@@ -507,6 +515,10 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *
container_of(walk, typeof(*evict_walk), walk);
s64 lret;
+ if (!dmem_cgroup_state_evict_valuable(evict_walk->limit_pool, bo->resource->css,
+ evict_walk->try_low, &evict_walk->hit_low))
+ return 0;
+
if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
return 0;
@@ -524,7 +536,7 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *
evict_walk->evicted++;
if (evict_walk->res)
lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
- evict_walk->res);
+ evict_walk->res, NULL);
if (lret == 0)
return 1;
out:
@@ -545,7 +557,8 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
struct ttm_buffer_object *evictor,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket,
- struct ttm_resource **res)
+ struct ttm_resource **res,
+ struct dmem_cgroup_pool_state *limit_pool)
{
struct ttm_bo_evict_walk evict_walk = {
.walk = {
@@ -556,22 +569,39 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
.place = place,
.evictor = evictor,
.res = res,
+ .limit_pool = limit_pool,
};
s64 lret;
evict_walk.walk.trylock_only = true;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+
+ /* One more attempt if we hit low limit? */
+ if (!lret && evict_walk.hit_low) {
+ evict_walk.try_low = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ }
if (lret || !ticket)
goto out;
+ /* Reset low limit */
+ evict_walk.try_low = evict_walk.hit_low = false;
/* If ticket-locking, repeat while making progress. */
evict_walk.walk.trylock_only = false;
+
+retry:
do {
/* The walk may clear the evict_walk.walk.ticket field */
evict_walk.walk.ticket = ticket;
evict_walk.evicted = 0;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
} while (!lret && evict_walk.evicted);
+
+ /* We hit the low limit? Try once more */
+ if (!lret && evict_walk.hit_low && !evict_walk.try_low) {
+ evict_walk.try_low = true;
+ goto retry;
+ }
out:
if (lret < 0)
return lret;
@@ -689,6 +719,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
+ struct dmem_cgroup_pool_state *limit_pool = NULL;
struct ttm_resource_manager *man;
bool may_evict;
@@ -701,15 +732,20 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
continue;
may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
- ret = ttm_resource_alloc(bo, place, res);
+ ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
if (ret) {
- if (ret != -ENOSPC)
+ if (ret != -ENOSPC && ret != -EAGAIN) {
+ dmem_cgroup_pool_state_put(limit_pool);
return ret;
- if (!may_evict)
+ }
+ if (!may_evict) {
+ dmem_cgroup_pool_state_put(limit_pool);
continue;
+ }
ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
- ticket, res);
+ ticket, res, limit_pool);
+ dmem_cgroup_pool_state_put(limit_pool);
if (ret == -EBUSY)
continue;
if (ret)
@@ -1056,6 +1092,8 @@ struct ttm_bo_swapout_walk {
struct ttm_lru_walk walk;
/** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
gfp_t gfp_flags;
+
+ bool hit_low, evict_low;
};
static s64
@@ -1106,7 +1144,7 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
- ret = ttm_resource_alloc(bo, &place, &evict_mem);
+ ret = ttm_resource_alloc(bo, &place, &evict_mem, NULL);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 2c699ed1963a..a194db83421d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -58,13 +58,13 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
return VM_FAULT_RETRY;
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
mmap_read_unlock(vmf->vma->vm_mm);
(void)dma_resv_wait_timeout(bo->base.resv,
DMA_RESV_USAGE_KERNEL, true,
MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
return VM_FAULT_RETRY;
}
@@ -130,12 +130,12 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
mmap_read_unlock(vmf->vma->vm_mm);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
}
return VM_FAULT_RETRY;
@@ -353,7 +353,7 @@ void ttm_bo_vm_open(struct vm_area_struct *vma)
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
}
EXPORT_SYMBOL(ttm_bo_vm_open);
@@ -361,7 +361,7 @@ void ttm_bo_vm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
vma->vm_private_data = NULL;
}
EXPORT_SYMBOL(ttm_bo_vm_close);
@@ -405,13 +405,25 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
return len;
}
-int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write)
+/**
+ * ttm_bo_access - Helper to access a buffer object
+ *
+ * @bo: ttm buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Utility function to access a buffer object. Useful when buffer object cannot
+ * be easily mapped (non-contiguous, non-visible, etc...). Should not directly
+ * be exported to user space via a peak / poke interface.
+ *
+ * Returns:
+ * @len if successful, negative error code on failure.
+ */
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write)
{
- struct ttm_buffer_object *bo = vma->vm_private_data;
- unsigned long offset = (addr) - vma->vm_start +
- ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
- << PAGE_SHIFT);
int ret;
if (len < 1 || (offset + len) > bo->base.size)
@@ -429,8 +441,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
break;
default:
if (bo->bdev->funcs->access_memory)
- ret = bo->bdev->funcs->access_memory(
- bo, offset, buf, len, write);
+ ret = bo->bdev->funcs->access_memory
+ (bo, offset, buf, len, write);
else
ret = -EIO;
}
@@ -439,6 +451,18 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
+EXPORT_SYMBOL(ttm_bo_access);
+
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ unsigned long offset = (addr) - vma->vm_start +
+ ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
+ << PAGE_SHIFT);
+
+ return ttm_bo_access(bo, offset, buf, len, write);
+}
EXPORT_SYMBOL(ttm_bo_vm_access);
static const struct vm_operations_struct ttm_bo_vm_ops = {
@@ -462,7 +486,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
/*
* Drivers may want to override the vm_ops field. Otherwise we
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index a87665eb28a6..cc29bbf3eabb 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -26,6 +26,7 @@
#include <linux/io-mapping.h>
#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
+#include <linux/cgroup_dmem.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
@@ -350,15 +351,28 @@ EXPORT_SYMBOL(ttm_resource_fini);
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource **res_ptr)
+ struct ttm_resource **res_ptr,
+ struct dmem_cgroup_pool_state **ret_limit_pool)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
+ struct dmem_cgroup_pool_state *pool = NULL;
int ret;
+ if (man->cg) {
+ ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool);
+ if (ret)
+ return ret;
+ }
+
ret = man->func->alloc(man, bo, place, res_ptr);
- if (ret)
+ if (ret) {
+ if (pool)
+ dmem_cgroup_uncharge(pool, bo->base.size);
return ret;
+ }
+
+ (*res_ptr)->css = pool;
spin_lock(&bo->bdev->lru_lock);
ttm_resource_add_bulk_move(*res_ptr, bo);
@@ -370,6 +384,7 @@ EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
{
struct ttm_resource_manager *man;
+ struct dmem_cgroup_pool_state *pool;
if (!*res)
return;
@@ -377,9 +392,13 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
spin_lock(&bo->bdev->lru_lock);
ttm_resource_del_bulk_move(*res, bo);
spin_unlock(&bo->bdev->lru_lock);
+
+ pool = (*res)->css;
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
+ if (man->cg)
+ dmem_cgroup_uncharge(pool, bo->base.size);
}
EXPORT_SYMBOL(ttm_resource_free);
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index c341aee37dd9..a048e37f1c2c 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -37,9 +37,9 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -146,7 +146,6 @@ static const struct drm_driver tve200_drm_driver = {
.fops = &drm_fops,
.name = "tve200",
.desc = DRIVER_DESC,
- .date = "20170703",
.major = 1,
.minor = 0,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 8d8ae40f945c..05b3a152cc33 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -5,8 +5,8 @@
#include <linux/module.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -78,7 +78,6 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 1eb716d9dad5..be00dc1d87a1 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -26,7 +26,6 @@ struct drm_mode_create_dumb;
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
-#define DRIVER_DATE "20120220"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 73ab7dd31b17..bb7815599435 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -13,10 +13,6 @@
* Display engines requiring physically contiguous allocations should
* look into Mesa's "renderonly" support (as used by the Mesa pl111
* driver) for an example of how to integrate with V3D.
- *
- * Long term, we should support evicting pages from the MMU when under
- * memory pressure (thus the v3d_bo_get_pages() refcounting), but
- * that's not a high priority since our systems tend to not have swap.
*/
#include <linux/dma-buf.h>
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 19e3ee7ac897..76816f2551c1 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -237,8 +237,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
if (v3d->ver >= 40) {
int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
- V3D_SET_FIELD(cycle_count_reg,
- V3D_PCTR_S0));
+ V3D_SET_FIELD_VER(cycle_count_reg,
+ V3D_PCTR_S0, v3d->ver));
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
} else {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index bee51c942a56..930737a9347b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -31,7 +31,6 @@
#define DRIVER_NAME "v3d"
#define DRIVER_DESC "Broadcom V3D graphics"
-#define DRIVER_DATE "20180419"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -224,6 +223,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_COUNTER, v3d_perfmon_get_counter_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(V3D_PERFMON_SET_GLOBAL, v3d_perfmon_set_global_ioctl, DRM_RENDER_ALLOW),
};
static const struct drm_driver v3d_drm_driver = {
@@ -248,7 +248,6 @@ static const struct drm_driver v3d_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index de73eefff9ac..dc1cfe2e14be 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -183,6 +183,12 @@ struct v3d_dev {
u32 num_allocated;
u32 pages_allocated;
} bo_stats;
+
+ /* To support a performance analysis tool in user space, we require
+ * a single, globally configured performance monitor (perfmon) for
+ * all jobs.
+ */
+ struct v3d_perfmon *global_perfmon;
};
static inline struct v3d_dev *
@@ -594,6 +600,8 @@ int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_perfmon_set_global_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* v3d_sysfs.c */
int v3d_sysfs_init(struct device *dev);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 20bf33702c3c..72b6a119412f 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -107,7 +107,10 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+
+ v3d->bin_job = NULL;
dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
@@ -117,7 +120,10 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+
+ v3d->render_job = NULL;
dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
@@ -127,7 +133,10 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
+
+ v3d->csd_job = NULL;
dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
@@ -164,7 +173,10 @@ v3d_hub_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+
+ v3d->tfu_job = NULL;
dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index 0f564fd7160c..a25d25a8ae61 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -4,7 +4,7 @@
/**
* DOC: Broadcom V3D MMU
*
- * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has
+ * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has
* a single level of page tables for the V3D's 4GB address space to
* map to AXI bus addresses, thus it could need up to 4MB of
* physically contiguous memory to store the PTEs.
@@ -15,14 +15,14 @@
*
* To protect clients from each other, we should use the GMP to
* quickly mask out (at 128kb granularity) what pages are available to
- * each client. This is not yet implemented.
+ * each client. This is not yet implemented.
*/
#include "v3d_drv.h"
#include "v3d_regs.h"
-/* Note: All PTEs for the 1MB superpage must be filled with the
- * superpage bit set.
+/* Note: All PTEs for the 64KB bigpage or 1MB superpage must be filled
+ * with the bigpage/superpage bit set.
*/
#define V3D_PTE_SUPERPAGE BIT(31)
#define V3D_PTE_BIGPAGE BIT(30)
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index 924814cab46a..3ebda2fa46fc 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -240,17 +240,18 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
for (i = 0; i < ncounters; i++) {
u32 source = i / 4;
- u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0);
+ u32 channel = V3D_SET_FIELD_VER(perfmon->counters[i], V3D_PCTR_S0,
+ v3d->ver);
i++;
- channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
- V3D_PCTR_S1);
+ channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
+ V3D_PCTR_S1, v3d->ver);
i++;
- channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
- V3D_PCTR_S2);
+ channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
+ V3D_PCTR_S2, v3d->ver);
i++;
- channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
- V3D_PCTR_S3);
+ channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
+ V3D_PCTR_S3, v3d->ver);
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel);
}
@@ -312,6 +313,9 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
if (perfmon == v3d->active_perfmon)
v3d_perfmon_stop(v3d, perfmon, false);
+ /* If the global perfmon is being destroyed, set it to NULL */
+ cmpxchg(&v3d->global_perfmon, perfmon, NULL);
+
v3d_perfmon_put(perfmon);
return 0;
@@ -383,6 +387,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
{
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_perfmon_destroy *req = data;
+ struct v3d_dev *v3d = v3d_priv->v3d;
struct v3d_perfmon *perfmon;
mutex_lock(&v3d_priv->perfmon.lock);
@@ -392,6 +397,13 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
if (!perfmon)
return -EINVAL;
+ /* If the active perfmon is being destroyed, stop it first */
+ if (perfmon == v3d->active_perfmon)
+ v3d_perfmon_stop(v3d, perfmon, false);
+
+ /* If the global perfmon is being destroyed, set it to NULL */
+ cmpxchg(&v3d->global_perfmon, perfmon, NULL);
+
v3d_perfmon_put(perfmon);
return 0;
@@ -451,3 +463,34 @@ int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
return 0;
}
+
+int v3d_perfmon_set_global_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_perfmon_set_global *req = data;
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_perfmon *perfmon;
+
+ if (req->flags & ~DRM_V3D_PERFMON_CLEAR_GLOBAL)
+ return -EINVAL;
+
+ perfmon = v3d_perfmon_find(v3d_priv, req->id);
+ if (!perfmon)
+ return -EINVAL;
+
+ /* If the request is to clear the global performance monitor */
+ if (req->flags & DRM_V3D_PERFMON_CLEAR_GLOBAL) {
+ if (!v3d->global_perfmon)
+ return -EINVAL;
+
+ xchg(&v3d->global_perfmon, NULL);
+
+ return 0;
+ }
+
+ if (cmpxchg(&v3d->global_perfmon, NULL, perfmon))
+ return -EBUSY;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_performance_counters.h b/drivers/gpu/drm/v3d/v3d_performance_counters.h
index d919a2fc9449..2bc4cce0744a 100644
--- a/drivers/gpu/drm/v3d/v3d_performance_counters.h
+++ b/drivers/gpu/drm/v3d/v3d_performance_counters.h
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2024 Raspberry Pi
*/
+
#ifndef V3D_PERFORMANCE_COUNTERS_H
#define V3D_PERFORMANCE_COUNTERS_H
-/* Holds a description of a given performance counter. The index of performance
- * counter is given by the array on v3d_performance_counter.h
+/* Holds a description of a given performance counter. The index of
+ * performance counter is given by the array on `v3d_performance_counter.c`.
*/
struct v3d_perf_counter_desc {
/* Category of the counter */
@@ -20,15 +21,12 @@ struct v3d_perf_counter_desc {
};
struct v3d_perfmon_info {
- /*
- * Different revisions of V3D have different total number of
+ /* Different revisions of V3D have different total number of
* performance counters.
*/
unsigned int max_counters;
- /*
- * Array of counters valid for the platform.
- */
+ /* Array of counters valid for the platform. */
const struct v3d_perf_counter_desc *counters;
};
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 1b1a62ad9585..6da3c69082bd 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -15,6 +15,14 @@
fieldval & field##_MASK; \
})
+#define V3D_SET_FIELD_VER(value, field, ver) \
+ ({ \
+ typeof(ver) _ver = (ver); \
+ u32 fieldval = (value) << field##_SHIFT(_ver); \
+ WARN_ON((fieldval & ~field##_MASK(_ver)) != 0); \
+ fieldval & field##_MASK(_ver); \
+ })
+
#define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >> \
field##_SHIFT)
@@ -354,18 +362,15 @@
#define V3D_V4_PCTR_0_SRC_28_31 0x0067c
#define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \
4 * (x))
-# define V3D_PCTR_S0_MASK V3D_MASK(6, 0)
-# define V3D_V7_PCTR_S0_MASK V3D_MASK(7, 0)
-# define V3D_PCTR_S0_SHIFT 0
-# define V3D_PCTR_S1_MASK V3D_MASK(14, 8)
-# define V3D_V7_PCTR_S1_MASK V3D_MASK(15, 8)
-# define V3D_PCTR_S1_SHIFT 8
-# define V3D_PCTR_S2_MASK V3D_MASK(22, 16)
-# define V3D_V7_PCTR_S2_MASK V3D_MASK(23, 16)
-# define V3D_PCTR_S2_SHIFT 16
-# define V3D_PCTR_S3_MASK V3D_MASK(30, 24)
-# define V3D_V7_PCTR_S3_MASK V3D_MASK(31, 24)
-# define V3D_PCTR_S3_SHIFT 24
+# define V3D_PCTR_S0_MASK(ver) (((ver) >= 71) ? V3D_MASK(7, 0) : V3D_MASK(6, 0))
+# define V3D_PCTR_S0_SHIFT(ver) 0
+# define V3D_PCTR_S1_MASK(ver) (((ver) >= 71) ? V3D_MASK(15, 8) : V3D_MASK(14, 8))
+# define V3D_PCTR_S1_SHIFT(ver) 8
+# define V3D_PCTR_S2_MASK(ver) (((ver) >= 71) ? V3D_MASK(23, 16) : V3D_MASK(22, 16))
+# define V3D_PCTR_S2_SHIFT(ver) 16
+# define V3D_PCTR_S3_MASK(ver) (((ver) >= 71) ? V3D_MASK(31, 24) : V3D_MASK(30, 24))
+# define V3D_PCTR_S3_SHIFT(ver) 24
+
#define V3D_PCTR_CYCLE_COUNT(ver) ((ver >= 71) ? 0 : 32)
/* Output values of the counters. */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 99ac4995b5a1..05608c894ed9 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -5,16 +5,16 @@
* DOC: Broadcom V3D scheduling
*
* The shared DRM GPU scheduler is used to coordinate submitting jobs
- * to the hardware. Each DRM fd (roughly a client process) gets its
- * own scheduler entity, which will process jobs in order. The GPU
- * scheduler will round-robin between clients to submit the next job.
+ * to the hardware. Each DRM fd (roughly a client process) gets its
+ * own scheduler entity, which will process jobs in order. The GPU
+ * scheduler will schedule the clients with a FIFO scheduling algorithm.
*
* For simplicity, and in order to keep latency low for interactive
* jobs when bulk background jobs are queued up, we submit a new job
* to the HW only when it has completed the last one, instead of
- * filling up the CT[01]Q FIFOs with jobs. Similarly, we use
- * drm_sched_job_add_dependency() to manage the dependency between bin and
- * render, instead of having the clients submit jobs using the HW's
+ * filling up the CT[01]Q FIFOs with jobs. Similarly, we use
+ * `drm_sched_job_add_dependency()` to manage the dependency between bin
+ * and render, instead of having the clients submit jobs using the HW's
* semaphores to interlock between them.
*/
@@ -120,11 +120,19 @@ v3d_cpu_job_free(struct drm_sched_job *sched_job)
static void
v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
{
- if (job->perfmon != v3d->active_perfmon)
+ struct v3d_perfmon *perfmon = v3d->global_perfmon;
+
+ if (!perfmon)
+ perfmon = job->perfmon;
+
+ if (perfmon == v3d->active_perfmon)
+ return;
+
+ if (perfmon != v3d->active_perfmon)
v3d_perfmon_stop(v3d, v3d->active_perfmon, true);
- if (job->perfmon && v3d->active_perfmon != job->perfmon)
- v3d_perfmon_start(v3d, job->perfmon);
+ if (perfmon && v3d->active_perfmon != perfmon)
+ v3d_perfmon_start(v3d, perfmon);
}
static void
@@ -218,8 +226,12 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
unsigned long irqflags;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ spin_lock_irqsave(&v3d->job_lock, irqflags);
+ v3d->bin_job = NULL;
+ spin_unlock_irqrestore(&v3d->job_lock, irqflags);
return NULL;
+ }
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
@@ -273,8 +285,10 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->render_job = NULL;
return NULL;
+ }
v3d->render_job = job;
@@ -319,11 +333,17 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->tfu_job = NULL;
+ return NULL;
+ }
+
+ v3d->tfu_job = job;
+
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
return NULL;
- v3d->tfu_job = job;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
@@ -361,6 +381,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
int i, csd_cfg0_reg;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->csd_job = NULL;
+ return NULL;
+ }
+
v3d->csd_job = job;
v3d_invalidate_caches(v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index d607aa9c4ec2..4ff5de46fb22 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -11,10 +11,11 @@
#include "v3d_trace.h"
/* Takes the reservation lock on all the BOs being referenced, so that
- * at queue submit time we can update the reservations.
+ * we can attach fences and update the reservations after pushing the job
+ * to the queue.
*
* We don't lock the RCL the tile alloc/state BOs, or overflow memory
- * (all of which are on exec->unref_list). They're entirely private
+ * (all of which are on render->unref_list). They're entirely private
* to v3d, so we don't attach dma-buf fences to them.
*/
static int
@@ -55,11 +56,11 @@ fail:
* @bo_count: Number of GEM handles passed in
*
* The command validator needs to reference BOs by their index within
- * the submitted job's BO list. This does the validation of the job's
+ * the submitted job's BO list. This does the validation of the job's
* BO list and reference counting for the lifetime of the job.
*
* Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at v3d_exec_cleanup() time.
+ * failure, because that will happen at `v3d_job_free()`.
*/
static int
v3d_lookup_bos(struct drm_device *dev,
@@ -981,6 +982,11 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
goto fail;
if (args->perfmon_id) {
+ if (v3d->global_perfmon) {
+ ret = -EAGAIN;
+ goto fail_perfmon;
+ }
+
render->base.perfmon = v3d_perfmon_find(v3d_priv,
args->perfmon_id);
@@ -1196,6 +1202,11 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
goto fail;
if (args->perfmon_id) {
+ if (v3d->global_perfmon) {
+ ret = -EAGAIN;
+ goto fail_perfmon;
+ }
+
job->base.perfmon = v3d_perfmon_find(v3d_priv,
args->perfmon_id);
if (!job->base.perfmon) {
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index a536c467e2b2..bb861f0a0a31 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -13,8 +13,8 @@
#include <linux/pci.h>
#include <linux/vt_kern.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_file.h>
@@ -189,7 +189,6 @@ static const struct drm_driver driver = {
.fops = &vbox_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index e77bd6512eb1..dfa935f381a6 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -25,7 +25,6 @@
#define DRIVER_NAME "vboxvideo"
#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
-#define DRIVER_DATE "20130823"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index c5f30b317698..6cc7b7e6294a 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -10,6 +10,7 @@ config DRM_VC4
depends on COMMON_CLK
depends on PM
select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
select DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c
index 6527fb1db71e..e276a957b01c 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
@@ -51,8 +51,8 @@ struct vc4_mock_desc {
static const struct vc4_mock_desc vc4_mock =
VC4_MOCK_DESC(
- VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
- VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ VC4_MOCK_CRTC_DESC(&bcm2835_txp_data.base,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP0,
DRM_MODE_ENCODER_VIRTUAL,
DRM_MODE_CONNECTOR_WRITEBACK)),
VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv0_data,
@@ -77,8 +77,8 @@ static const struct vc4_mock_desc vc4_mock =
static const struct vc4_mock_desc vc5_mock =
VC4_MOCK_DESC(
- VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
- VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ VC4_MOCK_CRTC_DESC(&bcm2835_txp_data.base,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP0,
DRM_MODE_ENCODER_VIRTUAL,
DRM_MODE_CONNECTOR_WRITEBACK)),
VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv0_data,
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 61622e951031..40a05869a50e 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -90,7 +90,7 @@ static const struct encoder_constraint vc4_encoder_constraints[] = {
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 1),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
- ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP0, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 2),
};
@@ -98,7 +98,7 @@ static const struct encoder_constraint vc5_encoder_constraints[] = {
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DPI, 0),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
- ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 0, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP0, 0, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 0, 1, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 0, 1, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI1, 0, 1, 2),
@@ -207,7 +207,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("1 output: DSI1",
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("1 output: TXP",
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: DSI0, HDMI0",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0),
@@ -219,7 +219,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: DSI0, TXP",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: DPI, HDMI0",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0),
@@ -231,19 +231,19 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: DPI, TXP",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: HDMI0, DSI1",
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: VEC, DSI1",
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: VEC, TXP",
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0,
@@ -251,7 +251,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
@@ -259,7 +259,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0,
@@ -267,7 +267,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
@@ -275,7 +275,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
};
KUNIT_ARRAY_PARAM(vc4_test_pv_muxing,
@@ -287,7 +287,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_DSI0),
VC4_PV_MUXING_TEST("TXP/DSI1 Conflict",
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("HDMI0/VEC Conflict",
VC4_ENCODER_TYPE_HDMI0,
@@ -296,22 +296,22 @@ static const struct pv_muxing_param vc4_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("More than 3 outputs: DPI, HDMI0, DSI1, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
};
KUNIT_ARRAY_PARAM(vc4_test_pv_muxing_invalid,
@@ -342,7 +342,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("2 outputs: DPI, TXP",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: DPI, VEC",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC),
@@ -360,7 +360,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("2 outputs: DSI0, TXP",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: DSI0, VEC",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC),
@@ -372,7 +372,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("2 outputs: DSI1, TXP",
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
@@ -384,7 +384,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: HDMI0, HDMI1",
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
@@ -393,14 +393,14 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("2 outputs: HDMI1, TXP",
VC4_ENCODER_TYPE_HDMI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: TXP, VEC",
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
@@ -415,15 +415,15 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, DSI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI0",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DPI, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
@@ -440,7 +440,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
@@ -455,15 +455,15 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, DSI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI0",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DSI0, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
@@ -490,17 +490,17 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
@@ -519,17 +519,17 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, DSI1, HDMI0, HDMI1",
@@ -540,19 +540,19 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0, HDMI1",
@@ -563,24 +563,24 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
@@ -599,17 +599,17 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, DSI1, HDMI0, HDMI1",
@@ -620,19 +620,19 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0, HDMI1",
@@ -643,27 +643,27 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: VEC, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index ee82a959d279..cf40a53ad42e 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -83,13 +83,22 @@ static unsigned int
vc4_crtc_get_cob_allocation(struct vc4_dev *vc4, unsigned int channel)
{
struct vc4_hvs *hvs = vc4->hvs;
- u32 dispbase = HVS_READ(SCALER_DISPBASEX(channel));
+ u32 dispbase, top, base;
+
/* Top/base are supposed to be 4-pixel aligned, but the
* Raspberry Pi firmware fills the low bits (which are
* presumably ignored).
*/
- u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
- u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+
+ if (vc4->gen >= VC4_GEN_6_C) {
+ dispbase = HVS_READ(SCALER6_DISPX_COB(channel));
+ top = VC4_GET_FIELD(dispbase, SCALER6_DISPX_COB_TOP) & ~3;
+ base = VC4_GET_FIELD(dispbase, SCALER6_DISPX_COB_BASE) & ~3;
+ } else {
+ dispbase = HVS_READ(SCALER_DISPBASEX(channel));
+ top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
+ base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+ }
return top - base + 4;
}
@@ -122,7 +131,10 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
* Read vertical scanline which is currently composed for our
* pixelvalve by the HVS, and also the scaler status.
*/
- val = HVS_READ(SCALER_DISPSTATX(channel));
+ if (vc4->gen >= VC4_GEN_6_C)
+ val = HVS_READ(SCALER6_DISPX_STATUS(channel));
+ else
+ val = HVS_READ(SCALER_DISPSTATX(channel));
/* Get optional system timestamp after query. */
if (etime)
@@ -131,7 +143,12 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
/* Vertical position of hvs composed scanline. */
- *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ *vpos = VC4_GET_FIELD(val, SCALER6_DISPX_STATUS_YLINE);
+ else
+ *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
*hpos = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
@@ -223,6 +240,11 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
+
+ /*
+ * NOTE: Could we use register 0x68 (PV_HW_CFG1) to get the FIFO
+ * size?
+ */
u32 fifo_len_bytes = pv_data->fifo_depth;
/*
@@ -404,6 +426,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
*/
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
+ (vc4->gen >= VC4_GEN_6_C ? PV_VCONTROL_ODD_TIMING : 0) |
(is_dsi ? PV_VCONTROL_DSI : 0) |
PV_VCONTROL_INTERLACE |
(odd_field_first
@@ -415,6 +438,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
} else {
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
+ (vc4->gen >= VC4_GEN_6_C ? PV_VCONTROL_ODD_TIMING : 0) |
(is_dsi ? PV_VCONTROL_DSI : 0));
CRTC_WRITE(PV_VSYNCD_EVEN, 0);
}
@@ -429,11 +453,17 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
- if (vc4->gen == VC4_GEN_5)
+ if (vc4->gen >= VC4_GEN_5)
CRTC_WRITE(PV_MUX_CFG,
VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
+ if (vc4->gen >= VC4_GEN_6_C)
+ CRTC_WRITE(PV_PIPE_INIT_CTRL,
+ VC4_SET_FIELD(1, PV_PIPE_INIT_CTRL_PV_INIT_WIDTH) |
+ VC4_SET_FIELD(1, PV_PIPE_INIT_CTRL_PV_INIT_IDLE) |
+ PV_PIPE_INIT_CTRL_PV_INIT_EN);
+
CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR |
vc4_crtc_get_fifo_full_level_bits(vc4_crtc, format) |
VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
@@ -459,8 +489,10 @@ static void require_hvs_enabled(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
- WARN_ON_ONCE((HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE) !=
- SCALER_DISPCTRL_ENABLE);
+ if (vc4->gen >= VC4_GEN_6_C)
+ WARN_ON_ONCE(!(HVS_READ(SCALER6_CONTROL) & SCALER6_CONTROL_HVS_EN));
+ else
+ WARN_ON_ONCE(!(HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE));
}
static int vc4_crtc_disable(struct drm_crtc *crtc,
@@ -530,7 +562,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
"brcm,bcm2711-pixelvalve2") ||
of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
- "brcm,bcm2711-pixelvalve4")))
+ "brcm,bcm2711-pixelvalve4") ||
+ of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
+ "brcm,bcm2712-pixelvalve0") ||
+ of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
+ "brcm,bcm2712-pixelvalve1")))
return 0;
if (!(CRTC_READ(PV_CONTROL) & PV_CONTROL_EN))
@@ -789,14 +825,21 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
+ unsigned int current_dlist;
u32 chan = vc4_crtc->current_hvs_channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
spin_lock(&vc4_crtc->irq_lock);
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ current_dlist = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_DL(chan)),
+ SCALER6_DISPX_DL_LACT);
+ else
+ current_dlist = HVS_READ(SCALER_DISPLACTX(chan));
+
if (vc4_crtc->event &&
- (vc4_crtc->current_dlist == HVS_READ(SCALER_DISPLACTX(chan)) ||
- vc4_crtc->feeds_txp)) {
+ (vc4_crtc->current_dlist == current_dlist || vc4_crtc->feeds_txp)) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
@@ -807,7 +850,8 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
* the CRTC and encoder already reconfigured, leading to
* underruns. This can be seen when reconfiguring the CRTC.
*/
- vc4_hvs_unmask_underrun(hvs, chan);
+ if (vc4->gen < VC4_GEN_6_C)
+ vc4_hvs_unmask_underrun(hvs, chan);
}
spin_unlock(&vc4_crtc->irq_lock);
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -1265,6 +1309,32 @@ const struct vc4_pv_data bcm2711_pv4_data = {
},
};
+const struct vc4_pv_data bcm2712_pv0_data = {
+ .base = {
+ .debugfs_name = "crtc0_regs",
+ .hvs_available_channels = BIT(0),
+ .hvs_output = 0,
+ },
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_HDMI0,
+ },
+};
+
+const struct vc4_pv_data bcm2712_pv1_data = {
+ .base = {
+ .debugfs_name = "crtc1_regs",
+ .hvs_available_channels = BIT(1),
+ .hvs_output = 1,
+ },
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_HDMI1,
+ },
+};
+
static const struct of_device_id vc4_crtc_dt_match[] = {
{ .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data },
{ .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data },
@@ -1274,6 +1344,8 @@ static const struct of_device_id vc4_crtc_dt_match[] = {
{ .compatible = "brcm,bcm2711-pixelvalve2", .data = &bcm2711_pv2_data },
{ .compatible = "brcm,bcm2711-pixelvalve3", .data = &bcm2711_pv3_data },
{ .compatible = "brcm,bcm2711-pixelvalve4", .data = &bcm2711_pv4_data },
+ { .compatible = "brcm,bcm2712-pixelvalve0", .data = &bcm2712_pv0_data },
+ { .compatible = "brcm,bcm2712-pixelvalve1", .data = &bcm2712_pv1_data },
{}
};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 2c60d37275b0..c7cb1e3a6434 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -31,8 +31,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -47,7 +47,6 @@
#define DRIVER_NAME "vc4"
#define DRIVER_DESC "Broadcom VC4 graphics"
-#define DRIVER_DATE "20140616"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -222,7 +221,6 @@ const struct drm_driver vc4_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -244,7 +242,6 @@ const struct drm_driver vc5_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -279,6 +276,7 @@ static void vc4_component_unbind_all(void *ptr)
static const struct of_device_id vc4_dma_range_matches[] = {
{ .compatible = "brcm,bcm2711-hvs" },
+ { .compatible = "brcm,bcm2712-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
{ .compatible = "brcm,bcm2835-v3d" },
{ .compatible = "brcm,cygnus-v3d" },
@@ -300,16 +298,18 @@ static int vc4_drm_bind(struct device *dev)
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- if (of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"))
- gen = VC4_GEN_5;
- else
- gen = VC4_GEN_4;
+ gen = (enum vc4_gen)of_device_get_match_data(dev);
if (gen > VC4_GEN_4)
driver = &vc5_drm_driver;
else
driver = &vc4_drm_driver;
+ if (gen >= VC4_GEN_6_C)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+ else
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
node = of_find_matching_node_and_match(NULL, vc4_dma_range_matches,
NULL);
if (node) {
@@ -462,9 +462,11 @@ static void vc4_platform_drm_shutdown(struct platform_device *pdev)
}
static const struct of_device_id vc4_of_match[] = {
- { .compatible = "brcm,bcm2711-vc5", },
- { .compatible = "brcm,bcm2835-vc4", },
- { .compatible = "brcm,cygnus-vc4", },
+ { .compatible = "brcm,bcm2711-vc5", .data = (void *)VC4_GEN_5 },
+ /* NB GEN_6_C will be corrected on D0 hw to GEN_6_D via vc4_hvs_bind */
+ { .compatible = "brcm,bcm2712-vc6", .data = (void *)VC4_GEN_6_C },
+ { .compatible = "brcm,bcm2835-vc4", .data = (void *)VC4_GEN_4 },
+ { .compatible = "brcm,cygnus-vc4", .data = (void *)VC4_GEN_4 },
{},
};
MODULE_DEVICE_TABLE(of, vc4_of_match);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index c6be1997f1c7..4a078ffd9f82 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -84,6 +84,8 @@ struct vc4_perfmon {
enum vc4_gen {
VC4_GEN_4,
VC4_GEN_5,
+ VC4_GEN_6_C,
+ VC4_GEN_6_D,
};
struct vc4_dev {
@@ -316,6 +318,21 @@ struct vc4_v3d {
struct debugfs_regset32 regset;
};
+#define VC4_NUM_UPM_HANDLES 32
+struct vc4_upm_refcounts {
+ refcount_t refcount;
+
+ /* Allocation size */
+ size_t size;
+ /* Our allocation in UPM for prefetching. */
+ struct drm_mm_node upm;
+
+ /* Pointer back to the HVS structure */
+ struct vc4_hvs *hvs;
+};
+
+#define HVS_NUM_CHANNELS 3
+
struct vc4_hvs {
struct vc4_dev *vc4;
struct platform_device *pdev;
@@ -324,6 +341,7 @@ struct vc4_hvs {
unsigned int dlist_mem_size;
struct clk *core_clk;
+ struct clk *disp_clk;
unsigned long max_core_rate;
@@ -331,8 +349,15 @@ struct vc4_hvs {
* list. Units are dwords.
*/
struct drm_mm dlist_mm;
+
/* Memory manager for the LBM memory used by HVS scaling. */
struct drm_mm lbm_mm;
+
+ /* Memory manager for the UPM memory used for prefetching. */
+ struct drm_mm upm_mm;
+ struct ida upm_handles;
+ struct vc4_upm_refcounts upm_refcounts[VC4_NUM_UPM_HANDLES + 1];
+
spinlock_t mm_lock;
struct drm_mm_node mitchell_netravali_filter;
@@ -355,6 +380,7 @@ struct vc4_hvs {
};
#define HVS_NUM_CHANNELS 3
+#define HVS_UBM_WORD_SIZE 256
struct vc4_hvs_state {
struct drm_private_state base;
@@ -424,6 +450,12 @@ struct vc4_plane_state {
/* Our allocation in LBM for temporary storage during scaling. */
struct drm_mm_node lbm;
+ /* The Unified Pre-Fetcher Handle */
+ unsigned int upm_handle[DRM_FORMAT_MAX_PLANES];
+
+ /* Number of lines to pre-fetch */
+ unsigned int upm_buffer_lines;
+
/* Set when the plane has per-pixel alpha content or does not cover
* the entire screen. This is a hint to the CRTC that it might need
* to enable background color fill.
@@ -458,7 +490,8 @@ enum vc4_encoder_type {
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_SMI,
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
+ VC4_ENCODER_TYPE_TXP1,
};
struct vc4_encoder {
@@ -505,7 +538,16 @@ struct vc4_crtc_data {
int hvs_output;
};
-extern const struct vc4_crtc_data vc4_txp_crtc_data;
+struct vc4_txp_data {
+ struct vc4_crtc_data base;
+ enum vc4_encoder_type encoder_type;
+ unsigned int high_addr_ptr_reg;
+ unsigned int has_byte_enable:1;
+ unsigned int size_minus_one:1;
+ unsigned int supports_40bit_addresses:1;
+};
+
+extern const struct vc4_txp_data bcm2835_txp_data;
struct vc4_pv_data {
struct vc4_crtc_data base;
@@ -527,6 +569,8 @@ extern const struct vc4_pv_data bcm2711_pv1_data;
extern const struct vc4_pv_data bcm2711_pv2_data;
extern const struct vc4_pv_data bcm2711_pv3_data;
extern const struct vc4_pv_data bcm2711_pv4_data;
+extern const struct vc4_pv_data bcm2712_pv0_data;
+extern const struct vc4_pv_data bcm2712_pv1_data;
struct vc4_crtc {
struct drm_crtc base;
@@ -637,6 +681,12 @@ struct vc4_crtc_state {
writel(val, hvs->regs + (offset)); \
} while (0)
+#define HVS_READ6(offset) \
+ HVS_READ(hvs->vc4->gen == VC4_GEN_6_C ? SCALER6_ ## offset : SCALER6D_ ## offset)
+
+#define HVS_WRITE6(offset, val) \
+ HVS_WRITE(hvs->vc4->gen == VC4_GEN_6_C ? SCALER6_ ## offset : SCALER6D_ ## offset, val)
+
#define VC4_REG32(reg) { .name = #reg, .offset = reg }
struct vc4_exec_info {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index e3818c48c9b8..47d9ada98430 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -31,6 +31,7 @@
* encoder block has CEC support.
*/
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/display/drm_scdc_helper.h>
@@ -383,7 +384,6 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
enum drm_connector_status status)
{
struct drm_connector *connector = &vc4_hdmi->connector;
- const struct drm_edid *drm_edid;
int ret;
/*
@@ -405,17 +405,14 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
return;
}
- drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
- drm_edid_connector_update(connector, drm_edid);
cec_s_phys_addr(vc4_hdmi->cec_adap,
connector->display_info.source_physical_address, false);
- if (!drm_edid)
+ if (status != connector_status_connected)
return;
- drm_edid_free(drm_edid);
-
for (;;) {
ret = vc4_hdmi_reset_link(connector, ctx);
if (ret == -EDEADLK) {
@@ -470,31 +467,10 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
- const struct drm_edid *drm_edid;
int ret = 0;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in reentrancy issues since cec_s_phys_addr() might call
- * .adap_enable, which leads to that funtion being called with our mutex
- * held.
- *
- * Concurrency isn't an issue at the moment since we don't share
- * any state with any of the other frameworks so we can ignore
- * the lock for now.
- */
-
- drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
- drm_edid_connector_update(connector, drm_edid);
- cec_s_phys_addr(vc4_hdmi->cec_adap,
- connector->display_info.source_physical_address, false);
- if (!drm_edid)
- return 0;
-
ret = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
if (!vc4->hvs->vc5_hdmi_enable_hdmi_20) {
struct drm_device *drm = connector->dev;
@@ -570,6 +546,7 @@ static void vc4_hdmi_connector_reset(struct drm_connector *connector)
}
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
+ .force = drm_atomic_helper_connector_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = vc4_hdmi_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -580,9 +557,11 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs =
.detect_ctx = vc4_hdmi_connector_detect_ctx,
.get_modes = vc4_hdmi_connector_get_modes,
.atomic_check = vc4_hdmi_connector_atomic_check,
+ .mode_valid = drm_hdmi_connector_mode_valid,
};
static const struct drm_connector_hdmi_funcs vc4_hdmi_hdmi_connector_funcs;
+static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs;
static int vc4_hdmi_connector_init(struct drm_device *dev,
struct vc4_hdmi *vc4_hdmi)
@@ -608,6 +587,12 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
+ ret = drm_connector_hdmi_audio_init(connector, dev->dev,
+ &vc4_hdmi_audio_funcs,
+ 8, false, -1);
+ if (ret)
+ return ret;
+
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -845,6 +830,7 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
unsigned long flags;
int idx;
@@ -861,14 +847,25 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_CLRRGB);
+ if (vc4->gen >= VC4_GEN_6_C)
+ HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) |
+ VC4_HD_VID_CTL_BLANKPIX);
+
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
mdelay(1);
- spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- HDMI_WRITE(HDMI_VID_CTL,
- HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
- spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ /*
+ * TODO: This should work on BCM2712, but doesn't for some
+ * reason and result in a system lockup.
+ */
+ if (vc4->gen < VC4_GEN_6_C) {
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ HDMI_WRITE(HDMI_VID_CTL,
+ HDMI_READ(HDMI_VID_CTL) &
+ ~VC4_HD_VID_CTL_ENABLE);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ }
vc4_hdmi_disable_scrambling(encoder);
@@ -1488,7 +1485,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
goto err_put_runtime_pm;
}
-
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (tmds_char_rate > 297000000)
@@ -1594,6 +1590,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
+ (HDMI_READ(HDMI_VID_CTL) &
+ ~(VC4_HD_VID_CTL_VSYNC_LOW | VC4_HD_VID_CTL_HSYNC_LOW)) |
VC4_HD_VID_CTL_ENABLE |
VC4_HD_VID_CTL_CLRRGB |
VC4_HD_VID_CTL_UNDERFLOW_ENABLE |
@@ -1752,7 +1750,6 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- unsigned long long rate;
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
!(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
@@ -1760,8 +1757,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
- rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
- return vc4_hdmi_connector_clock_valid(&vc4_hdmi->connector, mode, rate);
+ return MODE_OK;
}
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
@@ -1909,9 +1905,9 @@ static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi)
return true;
}
-static int vc4_hdmi_audio_startup(struct device *dev, void *data)
+static int vc4_hdmi_audio_startup(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int ret = 0;
@@ -1973,9 +1969,9 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
-static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
+static void vc4_hdmi_audio_shutdown(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
@@ -2045,13 +2041,12 @@ static int sample_rate_to_mai_fmt(int samplerate)
}
/* HDMI audio codec callbacks */
-static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
+static int vc4_hdmi_audio_prepare(struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
- struct drm_connector *connector = &vc4_hdmi->connector;
struct vc4_dev *vc4 = to_vc4_dev(drm);
unsigned int sample_rate = params->sample_rate;
unsigned int channels = params->channels;
@@ -2063,7 +2058,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
int ret = 0;
int idx;
- dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
+ dev_dbg(&vc4_hdmi->pdev->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
sample_rate, params->sample_width, channels);
mutex_lock(&vc4_hdmi->mutex);
@@ -2110,18 +2105,33 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
VC4_HDMI_AUDIO_PACKET_CEA_MASK);
/* Set the MAI threshold */
- if (vc4->gen >= VC4_GEN_5)
+ switch (vc4->gen) {
+ case VC4_GEN_6_D:
+ HDMI_WRITE(HDMI_MAI_THR,
+ VC4_SET_FIELD(0x10, VC6_D_HD_MAI_THR_PANICHIGH) |
+ VC4_SET_FIELD(0x10, VC6_D_HD_MAI_THR_PANICLOW) |
+ VC4_SET_FIELD(0x1c, VC6_D_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x1c, VC6_D_HD_MAI_THR_DREQLOW));
+ break;
+ case VC4_GEN_6_C:
+ case VC4_GEN_5:
HDMI_WRITE(HDMI_MAI_THR,
VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQHIGH) |
VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQLOW));
- else
+ break;
+ case VC4_GEN_4:
HDMI_WRITE(HDMI_MAI_THR,
VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICHIGH) |
VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICLOW) |
VC4_SET_FIELD(0x6, VC4_HD_MAI_THR_DREQHIGH) |
VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_DREQLOW));
+ break;
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
+ break;
+ }
HDMI_WRITE(HDMI_MAI_CONFIG,
VC4_HDMI_MAI_CONFIG_BIT_REVERSE |
@@ -2187,40 +2197,12 @@ static const struct snd_dmaengine_pcm_config pcm_conf = {
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
};
-static int vc4_hdmi_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &vc4_hdmi->connector;
-
- mutex_lock(&vc4_hdmi->mutex);
- memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
- mutex_unlock(&vc4_hdmi->mutex);
-
- return 0;
-}
-
-static const struct hdmi_codec_ops vc4_hdmi_codec_ops = {
- .get_eld = vc4_hdmi_audio_get_eld,
+static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = {
+ .startup = vc4_hdmi_audio_startup,
.prepare = vc4_hdmi_audio_prepare,
- .audio_shutdown = vc4_hdmi_audio_shutdown,
- .audio_startup = vc4_hdmi_audio_startup,
-};
-
-static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
- .ops = &vc4_hdmi_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
+ .shutdown = vc4_hdmi_audio_shutdown,
};
-static void vc4_hdmi_audio_codec_release(void *ptr)
-{
- struct vc4_hdmi *vc4_hdmi = ptr;
-
- platform_device_unregister(vc4_hdmi->audio.codec_pdev);
- vc4_hdmi->audio.codec_pdev = NULL;
-}
-
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2228,7 +2210,6 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
struct snd_soc_dai_link *dai_link = &vc4_hdmi->audio.link;
struct snd_soc_card *card = &vc4_hdmi->audio.card;
struct device *dev = &vc4_hdmi->pdev->dev;
- struct platform_device *codec_pdev;
const __be32 *addr;
int index, len;
int ret;
@@ -2321,20 +2302,6 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
return ret;
}
- codec_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &vc4_hdmi_codec_pdata,
- sizeof(vc4_hdmi_codec_pdata));
- if (IS_ERR(codec_pdev)) {
- dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev));
- return PTR_ERR(codec_pdev);
- }
- vc4_hdmi->audio.codec_pdev = codec_pdev;
-
- ret = devm_add_action_or_reset(dev, vc4_hdmi_audio_codec_release, vc4_hdmi);
- if (ret)
- return ret;
-
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
@@ -2347,7 +2314,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dai_link->stream_name = "MAI PCM";
dai_link->codecs->dai_name = "i2s-hifi";
dai_link->cpus->dai_name = dev_name(dev);
- dai_link->codecs->name = dev_name(&codec_pdev->dev);
+ dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev);
dai_link->platforms->name = dev_name(dev);
card->dai_link = dai_link;
@@ -3121,6 +3088,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ clk_disable_unprepare(vc4_hdmi->audio_clock);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
@@ -3153,6 +3121,10 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
goto err_disable_clk;
}
+ ret = clk_prepare_enable(vc4_hdmi->audio_clock);
+ if (ret)
+ goto err_disable_clk;
+
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
@@ -3273,7 +3245,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return ret;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
- of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
+ of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1") ||
+ of_device_is_compatible(dev->of_node, "brcm,bcm2712-hdmi0") ||
+ of_device_is_compatible(dev->of_node, "brcm,bcm2712-hdmi1")) &&
HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
clk_prepare_enable(vc4_hdmi->pixel_clock);
clk_prepare_enable(vc4_hdmi->hsm_clock);
@@ -3407,10 +3381,66 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.hp_detect = vc5_hdmi_hp_detect,
};
+static const struct vc4_hdmi_variant bcm2712_hdmi0_variant = {
+ .encoder_type = VC4_ENCODER_TYPE_HDMI0,
+ .debugfs_name = "hdmi0_regs",
+ .card_name = "vc4-hdmi-0",
+ .max_pixel_clock = 600000000,
+ .registers = vc6_hdmi_hdmi0_fields,
+ .num_registers = ARRAY_SIZE(vc6_hdmi_hdmi0_fields),
+ .phy_lane_mapping = {
+ PHY_LANE_0,
+ PHY_LANE_1,
+ PHY_LANE_2,
+ PHY_LANE_CK,
+ },
+ .unsupported_odd_h_timings = false,
+ .external_irq_controller = true,
+
+ .init_resources = vc5_hdmi_init_resources,
+ .csc_setup = vc5_hdmi_csc_setup,
+ .reset = vc5_hdmi_reset,
+ .set_timings = vc5_hdmi_set_timings,
+ .phy_init = vc6_hdmi_phy_init,
+ .phy_disable = vc6_hdmi_phy_disable,
+ .channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
+};
+
+static const struct vc4_hdmi_variant bcm2712_hdmi1_variant = {
+ .encoder_type = VC4_ENCODER_TYPE_HDMI1,
+ .debugfs_name = "hdmi1_regs",
+ .card_name = "vc4-hdmi-1",
+ .max_pixel_clock = 600000000,
+ .registers = vc6_hdmi_hdmi1_fields,
+ .num_registers = ARRAY_SIZE(vc6_hdmi_hdmi1_fields),
+ .phy_lane_mapping = {
+ PHY_LANE_0,
+ PHY_LANE_1,
+ PHY_LANE_2,
+ PHY_LANE_CK,
+ },
+ .unsupported_odd_h_timings = false,
+ .external_irq_controller = true,
+
+ .init_resources = vc5_hdmi_init_resources,
+ .csc_setup = vc5_hdmi_csc_setup,
+ .reset = vc5_hdmi_reset,
+ .set_timings = vc5_hdmi_set_timings,
+ .phy_init = vc6_hdmi_phy_init,
+ .phy_disable = vc6_hdmi_phy_disable,
+ .channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
+};
+
static const struct of_device_id vc4_hdmi_dt_match[] = {
{ .compatible = "brcm,bcm2835-hdmi", .data = &bcm2835_variant },
{ .compatible = "brcm,bcm2711-hdmi0", .data = &bcm2711_hdmi0_variant },
{ .compatible = "brcm,bcm2711-hdmi1", .data = &bcm2711_hdmi1_variant },
+ { .compatible = "brcm,bcm2712-hdmi0", .data = &bcm2712_hdmi0_variant },
+ { .compatible = "brcm,bcm2712-hdmi1", .data = &bcm2712_hdmi1_variant },
{}
};
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index b37f1d2c3fe5..e3d989ca302b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -104,8 +104,6 @@ struct vc4_hdmi_audio {
struct snd_soc_dai_link_component codec;
struct snd_soc_dai_link_component platform;
struct snd_dmaengine_dai_dma_data dma_data;
- struct hdmi_audio_infoframe infoframe;
- struct platform_device *codec_pdev;
bool streaming;
};
@@ -237,4 +235,8 @@ void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
+void vc6_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *conn_state);
+void vc6_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
+
#endif /* _VC4_HDMI_H_ */
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
index 1f5507fc7a03..56e6a35da357 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -125,6 +125,48 @@
#define VC4_HDMI_RM_FORMAT_SHIFT_SHIFT 24
#define VC4_HDMI_RM_FORMAT_SHIFT_MASK VC4_MASK(25, 24)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BG_PWRUP BIT(8)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_LDO_PWRUP BIT(7)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BIAS_PWRUP BIT(6)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_RNDGEN_PWRUP BIT(4)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_CK_PWRUP BIT(3)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_2_PWRUP BIT(2)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_1_PWRUP BIT(1)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_0_PWRUP BIT(0)
+
+#define VC6_HDMI_TX_PHY_PLL_REFCLK_REFCLK_SEL_CMOS BIT(13)
+#define VC6_HDMI_TX_PHY_PLL_REFCLK_REFFRQ_MASK VC4_MASK(9, 0)
+
+#define VC6_HDMI_TX_PHY_PLL_POST_KDIV_CLK0_SEL_MASK VC4_MASK(3, 2)
+#define VC6_HDMI_TX_PHY_PLL_POST_KDIV_KDIV_MASK VC4_MASK(1, 0)
+
+#define VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV_EN BIT(10)
+#define VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV_MASK VC4_MASK(9, 0)
+
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL_MASK VC4_MASK(31, 28)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE_MASK VC4_MASK(27, 27)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL_MASK VC4_MASK(26, 26)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN_MASK VC4_MASK(25, 25)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL_MASK VC4_MASK(24, 23)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN_MASK VC4_MASK(22, 22)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL_MASK VC4_MASK(21, 21)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN_MASK VC4_MASK(20, 20)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL_MASK VC4_MASK(19, 18)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN_MASK VC4_MASK(17, 17)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN_MASK VC4_MASK(16, 16)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL_MASK VC4_MASK(15, 12)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN_MASK VC4_MASK(11, 11)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT_MASK VC4_MASK(10, 8)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT_MASK VC4_MASK(7, 5)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING_MASK VC4_MASK(4, 3)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING_MASK VC4_MASK(2, 1)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN_MASK VC4_MASK(0, 0)
+
+#define VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_PLLPOST_RESETB BIT(1)
+#define VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_RESETB BIT(0)
+
+#define VC6_HDMI_TX_PHY_PLL_POWERUP_CTL_PLL_PWRUP BIT(0)
+
#define OSCILLATOR_FREQUENCY 54000000
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
@@ -558,3 +600,601 @@ void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
+
+#define VC6_VCO_MIN_FREQ (8ULL * 1000 * 1000 * 1000)
+#define VC6_VCO_MAX_FREQ (12ULL * 1000 * 1000 * 1000)
+
+static unsigned long long
+vc6_phy_get_vco_freq(unsigned long long tmds_rate, unsigned int *vco_div)
+{
+ unsigned int min_div;
+ unsigned int max_div;
+ unsigned int div;
+
+ div = 0;
+ while (tmds_rate * div * 10 < VC6_VCO_MIN_FREQ)
+ div++;
+ min_div = div;
+
+ while (tmds_rate * (div + 1) * 10 < VC6_VCO_MAX_FREQ)
+ div++;
+ max_div = div;
+
+ div = min_div + (max_div - min_div) / 2;
+
+ *vco_div = div;
+ return tmds_rate * div * 10;
+}
+
+struct vc6_phy_lane_settings {
+ unsigned int ext_current_ctl:4;
+ unsigned int ffe_enable:1;
+ unsigned int slew_rate_ctl:1;
+ unsigned int ffe_post_tap_en:1;
+ unsigned int ldmos_bias_ctl:2;
+ unsigned int com_mode_ldmos_en:1;
+ unsigned int edge_sel:1;
+ unsigned int ext_current_src_hs_en:1;
+ unsigned int term_ctl:2;
+ unsigned int ext_current_src_en:1;
+ unsigned int int_current_src_en:1;
+ unsigned int int_current_ctl:4;
+ unsigned int int_current_src_hs_en:1;
+ unsigned int main_tap_current_select:3;
+ unsigned int post_tap_current_select:3;
+ unsigned int slew_ctl_slow_loading:2;
+ unsigned int slew_ctl_slow_driving:2;
+ unsigned int ffe_pre_tap_en:1;
+};
+
+struct vc6_phy_settings {
+ unsigned long long min_rate;
+ unsigned long long max_rate;
+ struct vc6_phy_lane_settings channel[3];
+ struct vc6_phy_lane_settings clock;
+};
+
+static const struct vc6_phy_settings vc6_hdmi_phy_settings[] = {
+ {
+ 0, 222000000,
+ {
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+ {
+ 222000001, 297000000,
+ {
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+ },
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+ },
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+ },
+ },
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* Internal Current Source Half Swing Enable*/
+ .int_current_src_hs_en = 1,
+ },
+ },
+ {
+ 297000001, 597000044,
+ {
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* External Current Source Half Swing Enable*/
+ .ext_current_src_hs_en = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* Internal Current Source Half Swing Enable*/
+ .int_current_src_hs_en = 1,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+};
+
+static const struct vc6_phy_settings *
+vc6_phy_get_settings(unsigned long long tmds_rate)
+{
+ unsigned int count = ARRAY_SIZE(vc6_hdmi_phy_settings);
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ const struct vc6_phy_settings *s = &vc6_hdmi_phy_settings[i];
+
+ if (tmds_rate >= s->min_rate && tmds_rate <= s->max_rate)
+ return s;
+ }
+
+ /*
+ * If the pixel clock exceeds our max setting, try the max
+ * setting anyway.
+ */
+ return &vc6_hdmi_phy_settings[count - 1];
+}
+
+static const struct vc6_phy_lane_settings *
+vc6_phy_get_channel_settings(enum vc4_hdmi_phy_channel chan,
+ unsigned long long tmds_rate)
+{
+ const struct vc6_phy_settings *settings = vc6_phy_get_settings(tmds_rate);
+
+ if (chan == PHY_LANE_CK)
+ return &settings->clock;
+
+ return &settings->channel[chan];
+}
+
+static void vc6_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
+{
+ lockdep_assert_held(&vc4_hdmi->hw_lock);
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0);
+ HDMI_WRITE(HDMI_TX_PHY_POWERUP_CTL, 0);
+}
+
+void vc6_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *conn_state)
+{
+ const struct vc6_phy_lane_settings *chan0_settings;
+ const struct vc6_phy_lane_settings *chan1_settings;
+ const struct vc6_phy_lane_settings *chan2_settings;
+ const struct vc6_phy_lane_settings *clock_settings;
+ const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
+ unsigned long long pixel_freq = conn_state->hdmi.tmds_char_rate;
+ unsigned long long vco_freq;
+ unsigned char word_sel;
+ unsigned long flags;
+ unsigned int vco_div;
+
+ vco_freq = vc6_phy_get_vco_freq(pixel_freq, &vco_div);
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
+ vc6_hdmi_reset_phy(vc4_hdmi);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_0, 0x810c6000);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_1, 0x00b8c451);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_2, 0x46402e31);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_3, 0x00b8c005);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_4, 0x42410261);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_5, 0xcc021001);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_6, 0xc8301c80);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_7, 0xb0804444);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_8, 0xf80f8000);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_REFCLK,
+ VC6_HDMI_TX_PHY_PLL_REFCLK_REFCLK_SEL_CMOS |
+ VC4_SET_FIELD(54, VC6_HDMI_TX_PHY_PLL_REFCLK_REFFRQ));
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x7f);
+
+ HDMI_WRITE(HDMI_RM_OFFSET,
+ VC4_HDMI_RM_OFFSET_ONLY |
+ VC4_SET_FIELD(phy_get_rm_offset(vco_freq),
+ VC4_HDMI_RM_OFFSET_OFFSET));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_VCOCLK_DIV,
+ VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV_EN |
+ VC4_SET_FIELD(vco_div,
+ VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_CFG,
+ VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CFG_PDIV));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_POST_KDIV,
+ VC4_SET_FIELD(2, VC6_HDMI_TX_PHY_PLL_POST_KDIV_CLK0_SEL) |
+ VC4_SET_FIELD(1, VC6_HDMI_TX_PHY_PLL_POST_KDIV_KDIV));
+
+ chan0_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_0],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_0,
+ VC4_SET_FIELD(chan0_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan0_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(chan0_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(chan0_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(chan0_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(chan0_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(chan0_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(chan0_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan0_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(chan0_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan0_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan0_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan0_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan0_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan0_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan0_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(chan0_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(chan0_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ chan1_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_1],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_1,
+ VC4_SET_FIELD(chan1_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan1_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(chan1_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(chan1_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(chan1_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(chan1_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(chan1_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(chan1_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan1_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(chan1_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan1_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan1_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan1_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan1_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan1_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan1_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(chan1_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(chan1_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ chan2_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_2],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_2,
+ VC4_SET_FIELD(chan2_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan2_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(chan2_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(chan2_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(chan2_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(chan2_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(chan2_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(chan2_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan2_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(chan2_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan2_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan2_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan2_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan2_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan2_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan2_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(chan2_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(chan2_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ clock_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_CK],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_CK,
+ VC4_SET_FIELD(clock_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(clock_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(clock_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(clock_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(clock_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(clock_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(clock_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(clock_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(clock_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(clock_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(clock_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(clock_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(clock_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(clock_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(clock_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(clock_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(clock_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(clock_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ if (pixel_freq >= 340000000)
+ word_sel = 3;
+ else
+ word_sel = 0;
+ HDMI_WRITE(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, word_sel);
+
+ HDMI_WRITE(HDMI_TX_PHY_POWERUP_CTL,
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BG_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_LDO_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BIAS_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_CK_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_2_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_1_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_0_PWRUP);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_POWERUP_CTL,
+ VC6_HDMI_TX_PHY_PLL_POWERUP_CTL_PLL_PWRUP);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_RESET_CTL,
+ HDMI_READ(HDMI_TX_PHY_PLL_RESET_CTL) &
+ ~VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_RESETB);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_RESET_CTL,
+ HDMI_READ(HDMI_TX_PHY_PLL_RESET_CTL) |
+ VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_RESETB);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+}
+
+void vc6_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
+{
+}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 68455ce513e7..59bfd69f54d9 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -111,13 +111,30 @@ enum vc4_hdmi_field {
HDMI_TX_PHY_CTL_1,
HDMI_TX_PHY_CTL_2,
HDMI_TX_PHY_CTL_3,
+ HDMI_TX_PHY_CTL_CK,
HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1,
HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2,
HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4,
HDMI_TX_PHY_PLL_CFG,
+ HDMI_TX_PHY_PLL_CFG_PDIV,
HDMI_TX_PHY_PLL_CTL_0,
HDMI_TX_PHY_PLL_CTL_1,
+ HDMI_TX_PHY_PLL_MISC_0,
+ HDMI_TX_PHY_PLL_MISC_1,
+ HDMI_TX_PHY_PLL_MISC_2,
+ HDMI_TX_PHY_PLL_MISC_3,
+ HDMI_TX_PHY_PLL_MISC_4,
+ HDMI_TX_PHY_PLL_MISC_5,
+ HDMI_TX_PHY_PLL_MISC_6,
+ HDMI_TX_PHY_PLL_MISC_7,
+ HDMI_TX_PHY_PLL_MISC_8,
+ HDMI_TX_PHY_PLL_POST_KDIV,
+ HDMI_TX_PHY_PLL_POWERUP_CTL,
+ HDMI_TX_PHY_PLL_REFCLK,
+ HDMI_TX_PHY_PLL_RESET_CTL,
+ HDMI_TX_PHY_PLL_VCOCLK_DIV,
HDMI_TX_PHY_POWERDOWN_CTL,
+ HDMI_TX_PHY_POWERUP_CTL,
HDMI_TX_PHY_RESET_CTL,
HDMI_TX_PHY_TMDS_CLK_WORD_SEL,
HDMI_VEC_INTERFACE_CFG,
@@ -411,6 +428,206 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
VC5_CSC_REG(HDMI_CSC_CHANNEL_CTL, 0x02c),
};
+static const struct vc4_hdmi_register __maybe_unused vc6_hdmi_hdmi0_fields[] = {
+ VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
+ VC4_HD_REG(HDMI_MAI_CTL, 0x0010),
+ VC4_HD_REG(HDMI_MAI_THR, 0x0014),
+ VC4_HD_REG(HDMI_MAI_FMT, 0x0018),
+ VC4_HD_REG(HDMI_MAI_DATA, 0x001c),
+ VC4_HD_REG(HDMI_MAI_SMP, 0x0020),
+ VC4_HD_REG(HDMI_VID_CTL, 0x0044),
+ VC4_HD_REG(HDMI_FRAME_COUNT, 0x0060),
+
+ VC4_HDMI_REG(HDMI_FIFO_CTL, 0x07c),
+ VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0c0),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0c4),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0cc),
+ VC4_HDMI_REG(HDMI_CRP_CFG, 0x0d0),
+ VC4_HDMI_REG(HDMI_CTS_0, 0x0d4),
+ VC4_HDMI_REG(HDMI_CTS_1, 0x0d8),
+ VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e8),
+ VC4_HDMI_REG(HDMI_HORZA, 0x0ec),
+ VC4_HDMI_REG(HDMI_HORZB, 0x0f0),
+ VC4_HDMI_REG(HDMI_VERTA0, 0x0f4),
+ VC4_HDMI_REG(HDMI_VERTB0, 0x0f8),
+ VC4_HDMI_REG(HDMI_VERTA1, 0x100),
+ VC4_HDMI_REG(HDMI_VERTB1, 0x104),
+ VC4_HDMI_REG(HDMI_MISC_CONTROL, 0x114),
+ VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x0a4),
+ VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a8),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_1, 0x148),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_2, 0x14c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_3, 0x150),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_4, 0x158),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_5, 0x15c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_6, 0x160),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_7, 0x164),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_8, 0x168),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_9, 0x16c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_10, 0x170),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x18c),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x194),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x198),
+ VC4_HDMI_REG(HDMI_HOTPLUG, 0x1c8),
+ VC4_HDMI_REG(HDMI_SCRAMBLER_CTL, 0x1e4),
+
+ VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_CFG, 0x0f0),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f4),
+
+ VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000),
+ VC5_PHY_REG(HDMI_TX_PHY_POWERUP_CTL, 0x004),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_CK, 0x014),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_REFCLK, 0x01c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POST_KDIV, 0x028),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_VCOCLK_DIV, 0x02c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x044),
+ VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x054),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_0, 0x060),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_1, 0x064),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_2, 0x068),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_3, 0x06c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_4, 0x070),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_5, 0x074),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_6, 0x078),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_7, 0x07c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_8, 0x080),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_RESET_CTL, 0x190),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POWERUP_CTL, 0x194),
+
+ VC5_RM_REG(HDMI_RM_CONTROL, 0x000),
+ VC5_RM_REG(HDMI_RM_OFFSET, 0x018),
+ VC5_RM_REG(HDMI_RM_FORMAT, 0x01c),
+
+ VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000),
+
+ VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044),
+
+ VC5_CSC_REG(HDMI_CSC_CTL, 0x000),
+ VC5_CSC_REG(HDMI_CSC_12_11, 0x004),
+ VC5_CSC_REG(HDMI_CSC_14_13, 0x008),
+ VC5_CSC_REG(HDMI_CSC_22_21, 0x00c),
+ VC5_CSC_REG(HDMI_CSC_24_23, 0x010),
+ VC5_CSC_REG(HDMI_CSC_32_31, 0x014),
+ VC5_CSC_REG(HDMI_CSC_34_33, 0x018),
+ VC5_CSC_REG(HDMI_CSC_CHANNEL_CTL, 0x02c),
+};
+
+static const struct vc4_hdmi_register __maybe_unused vc6_hdmi_hdmi1_fields[] = {
+ VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
+ VC4_HD_REG(HDMI_MAI_CTL, 0x0030),
+ VC4_HD_REG(HDMI_MAI_THR, 0x0034),
+ VC4_HD_REG(HDMI_MAI_FMT, 0x0038),
+ VC4_HD_REG(HDMI_MAI_DATA, 0x003c),
+ VC4_HD_REG(HDMI_MAI_SMP, 0x0040),
+ VC4_HD_REG(HDMI_VID_CTL, 0x0048),
+ VC4_HD_REG(HDMI_FRAME_COUNT, 0x0064),
+
+ VC4_HDMI_REG(HDMI_FIFO_CTL, 0x07c),
+ VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0c0),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0c4),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0cc),
+ VC4_HDMI_REG(HDMI_CRP_CFG, 0x0d0),
+ VC4_HDMI_REG(HDMI_CTS_0, 0x0d4),
+ VC4_HDMI_REG(HDMI_CTS_1, 0x0d8),
+ VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e8),
+ VC4_HDMI_REG(HDMI_HORZA, 0x0ec),
+ VC4_HDMI_REG(HDMI_HORZB, 0x0f0),
+ VC4_HDMI_REG(HDMI_VERTA0, 0x0f4),
+ VC4_HDMI_REG(HDMI_VERTB0, 0x0f8),
+ VC4_HDMI_REG(HDMI_VERTA1, 0x100),
+ VC4_HDMI_REG(HDMI_VERTB1, 0x104),
+ VC4_HDMI_REG(HDMI_MISC_CONTROL, 0x114),
+ VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x0a4),
+ VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a8),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_1, 0x148),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_2, 0x14c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_3, 0x150),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_4, 0x158),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_5, 0x15c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_6, 0x160),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_7, 0x164),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_8, 0x168),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_9, 0x16c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_10, 0x170),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x18c),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x194),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x198),
+ VC4_HDMI_REG(HDMI_HOTPLUG, 0x1c8),
+ VC4_HDMI_REG(HDMI_SCRAMBLER_CTL, 0x1e4),
+
+ VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_CFG, 0x0f0),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f4),
+
+ VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000),
+ VC5_PHY_REG(HDMI_TX_PHY_POWERUP_CTL, 0x004),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_CK, 0x014),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_REFCLK, 0x01c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POST_KDIV, 0x028),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_VCOCLK_DIV, 0x02c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x044),
+ VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x054),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_0, 0x060),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_1, 0x064),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_2, 0x068),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_3, 0x06c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_4, 0x070),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_5, 0x074),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_6, 0x078),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_7, 0x07c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_8, 0x080),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_RESET_CTL, 0x190),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POWERUP_CTL, 0x194),
+
+ VC5_RM_REG(HDMI_RM_CONTROL, 0x000),
+ VC5_RM_REG(HDMI_RM_OFFSET, 0x018),
+ VC5_RM_REG(HDMI_RM_FORMAT, 0x01c),
+
+ VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000),
+
+ VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044),
+
+ VC5_CSC_REG(HDMI_CSC_CTL, 0x000),
+ VC5_CSC_REG(HDMI_CSC_12_11, 0x004),
+ VC5_CSC_REG(HDMI_CSC_14_13, 0x008),
+ VC5_CSC_REG(HDMI_CSC_22_21, 0x00c),
+ VC5_CSC_REG(HDMI_CSC_24_23, 0x010),
+ VC5_CSC_REG(HDMI_CSC_32_31, 0x014),
+ VC5_CSC_REG(HDMI_CSC_34_33, 0x018),
+ VC5_CSC_REG(HDMI_CSC_CHANNEL_CTL, 0x02c),
+};
+
static inline
void __iomem *__vc4_hdmi_get_field_base(struct vc4_hdmi *hdmi,
enum vc4_hdmi_regs reg)
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 70623e6b91e9..4811d794001f 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -67,6 +67,140 @@ static const struct debugfs_reg32 vc4_hvs_regs[] = {
VC4_REG32(SCALER_OLEDCOEF2),
};
+static const struct debugfs_reg32 vc6_hvs_regs[] = {
+ VC4_REG32(SCALER6_VERSION),
+ VC4_REG32(SCALER6_CXM_SIZE),
+ VC4_REG32(SCALER6_LBM_SIZE),
+ VC4_REG32(SCALER6_UBM_SIZE),
+ VC4_REG32(SCALER6_COBA_SIZE),
+ VC4_REG32(SCALER6_COB_SIZE),
+ VC4_REG32(SCALER6_CONTROL),
+ VC4_REG32(SCALER6_FETCHER_STATUS),
+ VC4_REG32(SCALER6_FETCH_STATUS),
+ VC4_REG32(SCALER6_HANDLE_ERROR),
+ VC4_REG32(SCALER6_DISP0_CTRL0),
+ VC4_REG32(SCALER6_DISP0_CTRL1),
+ VC4_REG32(SCALER6_DISP0_BGND),
+ VC4_REG32(SCALER6_DISP0_LPTRS),
+ VC4_REG32(SCALER6_DISP0_COB),
+ VC4_REG32(SCALER6_DISP0_STATUS),
+ VC4_REG32(SCALER6_DISP0_DL),
+ VC4_REG32(SCALER6_DISP0_RUN),
+ VC4_REG32(SCALER6_DISP1_CTRL0),
+ VC4_REG32(SCALER6_DISP1_CTRL1),
+ VC4_REG32(SCALER6_DISP1_BGND),
+ VC4_REG32(SCALER6_DISP1_LPTRS),
+ VC4_REG32(SCALER6_DISP1_COB),
+ VC4_REG32(SCALER6_DISP1_STATUS),
+ VC4_REG32(SCALER6_DISP1_DL),
+ VC4_REG32(SCALER6_DISP1_RUN),
+ VC4_REG32(SCALER6_DISP2_CTRL0),
+ VC4_REG32(SCALER6_DISP2_CTRL1),
+ VC4_REG32(SCALER6_DISP2_BGND),
+ VC4_REG32(SCALER6_DISP2_LPTRS),
+ VC4_REG32(SCALER6_DISP2_COB),
+ VC4_REG32(SCALER6_DISP2_STATUS),
+ VC4_REG32(SCALER6_DISP2_DL),
+ VC4_REG32(SCALER6_DISP2_RUN),
+ VC4_REG32(SCALER6_EOLN),
+ VC4_REG32(SCALER6_DL_STATUS),
+ VC4_REG32(SCALER6_BFG_MISC),
+ VC4_REG32(SCALER6_QOS0),
+ VC4_REG32(SCALER6_PROF0),
+ VC4_REG32(SCALER6_QOS1),
+ VC4_REG32(SCALER6_PROF1),
+ VC4_REG32(SCALER6_QOS2),
+ VC4_REG32(SCALER6_PROF2),
+ VC4_REG32(SCALER6_PRI_MAP0),
+ VC4_REG32(SCALER6_PRI_MAP1),
+ VC4_REG32(SCALER6_HISTCTRL),
+ VC4_REG32(SCALER6_HISTBIN0),
+ VC4_REG32(SCALER6_HISTBIN1),
+ VC4_REG32(SCALER6_HISTBIN2),
+ VC4_REG32(SCALER6_HISTBIN3),
+ VC4_REG32(SCALER6_HISTBIN4),
+ VC4_REG32(SCALER6_HISTBIN5),
+ VC4_REG32(SCALER6_HISTBIN6),
+ VC4_REG32(SCALER6_HISTBIN7),
+ VC4_REG32(SCALER6_HDR_CFG_REMAP),
+ VC4_REG32(SCALER6_COL_SPACE),
+ VC4_REG32(SCALER6_HVS_ID),
+ VC4_REG32(SCALER6_CFC1),
+ VC4_REG32(SCALER6_DISP_UPM_ISO0),
+ VC4_REG32(SCALER6_DISP_UPM_ISO1),
+ VC4_REG32(SCALER6_DISP_UPM_ISO2),
+ VC4_REG32(SCALER6_DISP_LBM_ISO0),
+ VC4_REG32(SCALER6_DISP_LBM_ISO1),
+ VC4_REG32(SCALER6_DISP_LBM_ISO2),
+ VC4_REG32(SCALER6_DISP_COB_ISO0),
+ VC4_REG32(SCALER6_DISP_COB_ISO1),
+ VC4_REG32(SCALER6_DISP_COB_ISO2),
+ VC4_REG32(SCALER6_BAD_COB),
+ VC4_REG32(SCALER6_BAD_LBM),
+ VC4_REG32(SCALER6_BAD_UPM),
+ VC4_REG32(SCALER6_BAD_AXI),
+};
+
+static const struct debugfs_reg32 vc6_d_hvs_regs[] = {
+ VC4_REG32(SCALER6D_VERSION),
+ VC4_REG32(SCALER6D_CXM_SIZE),
+ VC4_REG32(SCALER6D_LBM_SIZE),
+ VC4_REG32(SCALER6D_UBM_SIZE),
+ VC4_REG32(SCALER6D_COBA_SIZE),
+ VC4_REG32(SCALER6D_COB_SIZE),
+ VC4_REG32(SCALER6D_CONTROL),
+ VC4_REG32(SCALER6D_FETCHER_STATUS),
+ VC4_REG32(SCALER6D_FETCH_STATUS),
+ VC4_REG32(SCALER6D_HANDLE_ERROR),
+ VC4_REG32(SCALER6D_DISP0_CTRL0),
+ VC4_REG32(SCALER6D_DISP0_CTRL1),
+ VC4_REG32(SCALER6D_DISP0_BGND0),
+ VC4_REG32(SCALER6D_DISP0_BGND1),
+ VC4_REG32(SCALER6D_DISP0_LPTRS),
+ VC4_REG32(SCALER6D_DISP0_COB),
+ VC4_REG32(SCALER6D_DISP0_STATUS),
+ VC4_REG32(SCALER6D_DISP0_DL),
+ VC4_REG32(SCALER6D_DISP0_RUN),
+ VC4_REG32(SCALER6D_DISP1_CTRL0),
+ VC4_REG32(SCALER6D_DISP1_CTRL1),
+ VC4_REG32(SCALER6D_DISP1_BGND0),
+ VC4_REG32(SCALER6D_DISP1_BGND1),
+ VC4_REG32(SCALER6D_DISP1_LPTRS),
+ VC4_REG32(SCALER6D_DISP1_COB),
+ VC4_REG32(SCALER6D_DISP1_STATUS),
+ VC4_REG32(SCALER6D_DISP1_DL),
+ VC4_REG32(SCALER6D_DISP1_RUN),
+ VC4_REG32(SCALER6D_DISP2_CTRL0),
+ VC4_REG32(SCALER6D_DISP2_CTRL1),
+ VC4_REG32(SCALER6D_DISP2_BGND0),
+ VC4_REG32(SCALER6D_DISP2_BGND1),
+ VC4_REG32(SCALER6D_DISP2_LPTRS),
+ VC4_REG32(SCALER6D_DISP2_COB),
+ VC4_REG32(SCALER6D_DISP2_STATUS),
+ VC4_REG32(SCALER6D_DISP2_DL),
+ VC4_REG32(SCALER6D_DISP2_RUN),
+ VC4_REG32(SCALER6D_EOLN),
+ VC4_REG32(SCALER6D_DL_STATUS),
+ VC4_REG32(SCALER6D_QOS0),
+ VC4_REG32(SCALER6D_PROF0),
+ VC4_REG32(SCALER6D_QOS1),
+ VC4_REG32(SCALER6D_PROF1),
+ VC4_REG32(SCALER6D_QOS2),
+ VC4_REG32(SCALER6D_PROF2),
+ VC4_REG32(SCALER6D_PRI_MAP0),
+ VC4_REG32(SCALER6D_PRI_MAP1),
+ VC4_REG32(SCALER6D_HISTCTRL),
+ VC4_REG32(SCALER6D_HISTBIN0),
+ VC4_REG32(SCALER6D_HISTBIN1),
+ VC4_REG32(SCALER6D_HISTBIN2),
+ VC4_REG32(SCALER6D_HISTBIN3),
+ VC4_REG32(SCALER6D_HISTBIN4),
+ VC4_REG32(SCALER6D_HISTBIN5),
+ VC4_REG32(SCALER6D_HISTBIN6),
+ VC4_REG32(SCALER6D_HISTBIN7),
+ VC4_REG32(SCALER6D_HVS_ID),
+};
+
void vc4_hvs_dump_state(struct vc4_hvs *hvs)
{
struct drm_device *drm = &hvs->vc4->base;
@@ -145,6 +279,76 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
return 0;
}
+static int vc6_hvs_debugfs_dlist(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_printer p = drm_seq_file_printer(m);
+ unsigned int dlist_mem_size = hvs->dlist_mem_size;
+ unsigned int next_entry_start;
+ unsigned int i;
+
+ for (i = 0; i < SCALER_CHANNELS_COUNT; i++) {
+ unsigned int active_dlist, dispstat;
+ unsigned int j;
+
+ dispstat = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_STATUS(i)),
+ SCALER6_DISPX_STATUS_MODE);
+ if (dispstat == SCALER6_DISPX_STATUS_MODE_DISABLED ||
+ dispstat == SCALER6_DISPX_STATUS_MODE_EOF) {
+ drm_printf(&p, "HVS chan %u disabled\n", i);
+ continue;
+ }
+
+ drm_printf(&p, "HVS chan %u:\n", i);
+
+ active_dlist = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_DL(i)),
+ SCALER6_DISPX_DL_LACT);
+ next_entry_start = 0;
+
+ for (j = active_dlist; j < dlist_mem_size; j++) {
+ u32 dlist_word;
+
+ dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j);
+ drm_printf(&p, "dlist: %02d: 0x%08x\n", j,
+ dlist_word);
+ if (!next_entry_start ||
+ next_entry_start == j) {
+ if (dlist_word & SCALER_CTL0_END)
+ break;
+ next_entry_start = j +
+ VC4_GET_FIELD(dlist_word,
+ SCALER_CTL0_SIZE);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int vc6_hvs_debugfs_upm_allocs(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct vc4_upm_refcounts *refcount;
+ unsigned int i;
+
+ drm_printf(&p, "UPM Handles:\n");
+ for (i = 1; i <= VC4_NUM_UPM_HANDLES; i++) {
+ refcount = &hvs->upm_refcounts[i];
+ drm_printf(&p, "handle %u: refcount %u, size %zu [%08llx + %08llx]\n",
+ i, refcount_read(&refcount->refcount), refcount->size,
+ refcount->upm.start, refcount->upm.size);
+ }
+
+ return 0;
+}
+
/* The filter kernel is composed of dwords each containing 3 9-bit
* signed integers packed next to each other.
*/
@@ -215,12 +419,15 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
struct vc4_crtc *vc4_crtc)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
struct drm_crtc *crtc = &vc4_crtc->base;
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
int idx;
u32 i;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
@@ -265,25 +472,56 @@ static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
u8 field = 0;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
if (!drm_dev_enter(drm, &idx))
return 0;
- switch (fifo) {
- case 0:
- field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
- SCALER_DISPSTAT1_FRCNT0);
+ switch (vc4->gen) {
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ field = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_STATUS(fifo)),
+ SCALER6_DISPX_STATUS_FRCNT);
break;
- case 1:
- field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
- SCALER_DISPSTAT1_FRCNT1);
+ case VC4_GEN_5:
+ switch (fifo) {
+ case 0:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER5_DISPSTAT1_FRCNT0);
+ break;
+ case 1:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER5_DISPSTAT1_FRCNT1);
+ break;
+ case 2:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
+ SCALER5_DISPSTAT2_FRCNT2);
+ break;
+ }
+ break;
+ case VC4_GEN_4:
+ switch (fifo) {
+ case 0:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER_DISPSTAT1_FRCNT0);
+ break;
+ case 1:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER_DISPSTAT1_FRCNT1);
+ break;
+ case 2:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
+ SCALER_DISPSTAT2_FRCNT2);
+ break;
+ }
break;
- case 2:
- field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
- SCALER_DISPSTAT2_FRCNT2);
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
break;
}
@@ -297,6 +535,8 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
u32 reg;
int ret;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
switch (vc4->gen) {
case VC4_GEN_4:
return output;
@@ -352,6 +592,24 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
return -EPIPE;
}
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ switch (output) {
+ case 0:
+ return 0;
+
+ case 2:
+ return 2;
+
+ case 1:
+ case 3:
+ case 4:
+ return 1;
+
+ default:
+ return -EPIPE;
+ }
+
default:
return -EPIPE;
}
@@ -370,6 +628,8 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
u32 dispctrl;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return -ENODEV;
@@ -420,11 +680,50 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
return 0;
}
-void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+static int vc6_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
+ struct drm_display_mode *mode, bool oneshot)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
+ struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
+ unsigned int chan = vc4_crtc_state->assigned_channel;
+ bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ u32 disp_ctrl1;
+ int idx;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan), SCALER6_DISPX_CTRL0_RESET);
+
+ disp_ctrl1 = HVS_READ(SCALER6_DISPX_CTRL1(chan));
+ disp_ctrl1 &= ~SCALER6_DISPX_CTRL1_INTLACE;
+ HVS_WRITE(SCALER6_DISPX_CTRL1(chan),
+ disp_ctrl1 | (interlace ? SCALER6_DISPX_CTRL1_INTLACE : 0));
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan),
+ SCALER6_DISPX_CTRL0_ENB |
+ VC4_SET_FIELD(mode->hdisplay - 1,
+ SCALER6_DISPX_CTRL0_FWIDTH) |
+ (oneshot ? SCALER6_DISPX_CTRL0_ONESHOT : 0) |
+ VC4_SET_FIELD(mode->vdisplay - 1,
+ SCALER6_DISPX_CTRL0_LINES));
+
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static void __vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
@@ -449,6 +748,44 @@ out:
drm_dev_exit(idx);
}
+static void __vc6_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
+ int idx;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ if (!(HVS_READ(SCALER6_DISPX_CTRL0(chan)) & SCALER6_DISPX_CTRL0_ENB))
+ goto out;
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan),
+ HVS_READ(SCALER6_DISPX_CTRL0(chan)) | SCALER6_DISPX_CTRL0_RESET);
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan),
+ HVS_READ(SCALER6_DISPX_CTRL0(chan)) & ~SCALER6_DISPX_CTRL0_ENB);
+
+ WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_STATUS(chan)),
+ SCALER6_DISPX_STATUS_MODE) !=
+ SCALER6_DISPX_STATUS_MODE_DISABLED);
+
+out:
+ drm_dev_exit(idx);
+}
+
+void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ __vc6_hvs_stop_channel(hvs, chan);
+ else
+ __vc4_hvs_stop_channel(hvs, chan);
+}
+
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
@@ -505,8 +842,13 @@ static void vc4_hvs_install_dlist(struct drm_crtc *crtc)
if (!drm_dev_enter(dev, &idx))
return;
- HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
- vc4_state->mm.start);
+ if (vc4->gen >= VC4_GEN_6_C)
+ HVS_WRITE(SCALER6_DISPX_LPTRS(vc4_state->assigned_channel),
+ VC4_SET_FIELD(vc4_state->mm.start,
+ SCALER6_DISPX_LPTRS_HEADE));
+ else
+ HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
+ vc4_state->mm.start);
drm_dev_exit(idx);
}
@@ -561,7 +903,11 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
vc4_hvs_install_dlist(crtc);
vc4_hvs_update_dlist(crtc);
- vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ vc6_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
+ else
+ vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
}
void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
@@ -590,13 +936,15 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
struct drm_plane *plane;
struct vc4_plane_state *vc4_plane_state;
bool debug_dump_regs = false;
- bool enable_bg_fill = false;
+ bool enable_bg_fill = true;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
unsigned int zpos = 0;
bool found = false;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
if (!drm_dev_enter(dev, &idx)) {
vc4_crtc_send_vblank(crtc);
return;
@@ -645,13 +993,26 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- if (enable_bg_fill)
+ if (vc4->gen >= VC4_GEN_6_C) {
/* This sets a black background color fill, as is the case
* with other DRM drivers.
*/
+ if (enable_bg_fill)
+ HVS_WRITE(SCALER6_DISPX_CTRL1(channel),
+ HVS_READ(SCALER6_DISPX_CTRL1(channel)) |
+ SCALER6_DISPX_CTRL1_BGENB);
+ else
+ HVS_WRITE(SCALER6_DISPX_CTRL1(channel),
+ HVS_READ(SCALER6_DISPX_CTRL1(channel)) &
+ ~SCALER6_DISPX_CTRL1_BGENB);
+ } else {
+ /* we can actually run with a lower core clock when background
+ * fill is enabled on VC4_GEN_5 so leave it enabled always.
+ */
HVS_WRITE(SCALER_DISPBKGNDX(channel),
HVS_READ(SCALER_DISPBKGNDX(channel)) |
SCALER_DISPBKGND_FILL);
+ }
/* Only update DISPLIST if the CRTC was already running and is not
* being disabled.
@@ -668,6 +1029,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
if (crtc->state->color_mgmt_changed) {
u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(channel));
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (crtc->state->gamma_lut) {
vc4_hvs_update_gamma_lut(hvs, vc4_crtc);
dispbkgndx |= SCALER_DISPBKGND_GAMMA;
@@ -697,6 +1060,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
u32 dispctrl;
int idx;
+ WARN_ON(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
@@ -717,6 +1082,8 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
u32 dispctrl;
int idx;
+ WARN_ON(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
@@ -751,6 +1118,8 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
u32 status;
u32 dspeislur;
+ WARN_ON(vc4->gen > VC4_GEN_5);
+
/*
* NOTE: We don't need to protect the register access using
* drm_dev_enter() there because the interrupt handler lifetime
@@ -802,7 +1171,12 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor)
minor->debugfs_root,
&vc4->load_tracker_enabled);
- drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL);
+ if (vc4->gen >= VC4_GEN_6_C) {
+ drm_debugfs_add_file(drm, "hvs_dlists", vc6_hvs_debugfs_dlist, NULL);
+ drm_debugfs_add_file(drm, "hvs_upm", vc6_hvs_debugfs_upm_allocs, NULL);
+ } else {
+ drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL);
+ }
drm_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun, NULL);
@@ -817,6 +1191,10 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4,
{
struct drm_device *drm = &vc4->base;
struct vc4_hvs *hvs;
+ unsigned int dlist_start;
+ size_t dlist_size;
+ size_t lbm_size;
+ unsigned int i;
hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
@@ -828,27 +1206,94 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4,
spin_lock_init(&hvs->mm_lock);
- /* Set up the HVS display list memory manager. We never
- * overwrite the setup from the bootloader (just 128b out of
- * our 16K), since we don't want to scramble the screen when
- * transitioning from the firmware's boot setup to runtime.
- */
- hvs->dlist_mem_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END;
- drm_mm_init(&hvs->dlist_mm,
- HVS_BOOTLOADER_DLIST_END,
- hvs->dlist_mem_size);
+ switch (vc4->gen) {
+ case VC4_GEN_4:
+ case VC4_GEN_5:
+ /* Set up the HVS display list memory manager. We never
+ * overwrite the setup from the bootloader (just 128b
+ * out of our 16K), since we don't want to scramble the
+ * screen when transitioning from the firmware's boot
+ * setup to runtime.
+ */
+ dlist_start = HVS_BOOTLOADER_DLIST_END;
+ dlist_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END;
+ break;
+
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ dlist_start = HVS_BOOTLOADER_DLIST_END;
+
+ /*
+ * If we are running a test, it means that we can't
+ * access a register. Use a plausible size then.
+ */
+ if (!kunit_get_current_test())
+ dlist_size = HVS_READ(SCALER6_CXM_SIZE);
+ else
+ dlist_size = 4096;
+
+ for (i = 0; i < VC4_NUM_UPM_HANDLES; i++) {
+ refcount_set(&hvs->upm_refcounts[i].refcount, 0);
+ hvs->upm_refcounts[i].hvs = hvs;
+ }
+
+ break;
+
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
+ return ERR_PTR(-ENODEV);
+ }
+
+ drm_mm_init(&hvs->dlist_mm, dlist_start, dlist_size);
+
+ hvs->dlist_mem_size = dlist_size;
/* Set up the HVS LBM memory manager. We could have some more
* complicated data structure that allowed reuse of LBM areas
* between planes when they don't overlap on the screen, but
* for now we just allocate globally.
*/
- if (vc4->gen == VC4_GEN_4)
+
+ switch (vc4->gen) {
+ case VC4_GEN_4:
/* 48k words of 2x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
- else
+ lbm_size = 48 * SZ_1K;
+ break;
+
+ case VC4_GEN_5:
/* 60k words of 4x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
+ lbm_size = 60 * SZ_1K;
+ break;
+
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ /*
+ * If we are running a test, it means that we can't
+ * access a register. Use a plausible size then.
+ */
+ lbm_size = 1024;
+ break;
+
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
+ return ERR_PTR(-ENODEV);
+ }
+
+ drm_mm_init(&hvs->lbm_mm, 0, lbm_size);
+
+ if (vc4->gen >= VC4_GEN_6_C) {
+ ida_init(&hvs->upm_handles);
+
+ /*
+ * NOTE: On BCM2712, the size can also be read through
+ * the SCALER_UBM_SIZE register. We would need to do a
+ * register access though, which we can't do with kunit
+ * that also uses this function to create its mock
+ * device.
+ */
+ drm_mm_init(&hvs->upm_mm, 0, 1024 * HVS_UBM_WORD_SIZE);
+ }
+
vc4->hvs = hvs;
@@ -945,10 +1390,150 @@ static int vc4_hvs_hw_init(struct vc4_hvs *hvs)
return 0;
}
+#define CFC1_N_NL_CSC_CTRL(x) (0xa000 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C00(x) (0xa008 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C01(x) (0xa00c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C02(x) (0xa010 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C03(x) (0xa014 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C04(x) (0xa018 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C10(x) (0xa01c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C11(x) (0xa020 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C12(x) (0xa024 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C13(x) (0xa028 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C14(x) (0xa02c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C20(x) (0xa030 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C21(x) (0xa034 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C22(x) (0xa038 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C23(x) (0xa03c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C24(x) (0xa040 + ((x) * 0x3000))
+
+#define SCALER_PI_CMP_CSC_RED0(x) (0x200 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_RED1(x) (0x204 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_RED_CLAMP(x) (0x208 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_CFG(x) (0x20c + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_GREEN0(x) (0x210 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_GREEN1(x) (0x214 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_GREEN_CLAMP(x) (0x218 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_BLUE0(x) (0x220 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_BLUE1(x) (0x224 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_BLUE_CLAMP(x) (0x228 + ((x) * 0x40))
+
+/* 4 S2.22 multiplication factors, and 1 S9.15 addititive element for each of 3
+ * output components
+ */
+struct vc6_csc_coeff_entry {
+ u32 csc[3][5];
+};
+
+static const struct vc6_csc_coeff_entry csc_coeffs[2][3] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ .csc = {
+ { 0x004A8542, 0x0, 0x0066254A, 0x0, 0xFF908A0D },
+ { 0x004A8542, 0xFFE6ED5D, 0xFFCBF856, 0x0, 0x0043C9A3 },
+ { 0x004A8542, 0x00811A54, 0x0, 0x0, 0xFF759502 }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ .csc = {
+ { 0x004A8542, 0x0, 0x0072BC44, 0x0, 0xFF83F312 },
+ { 0x004A8542, 0xFFF25A22, 0xFFDDE4D0, 0x0, 0x00267064 },
+ { 0x004A8542, 0x00873197, 0x0, 0x0, 0xFF6F7DC0 }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ .csc = {
+ { 0x004A8542, 0x0, 0x006B4A17, 0x0, 0xFF8B653F },
+ { 0x004A8542, 0xFFF402D9, 0xFFDDE4D0, 0x0, 0x0024C7AE },
+ { 0x004A8542, 0x008912CC, 0x0, 0x0, 0xFF6D9C8B }
+ }
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ .csc = {
+ { 0x00400000, 0x0, 0x0059BA5E, 0x0, 0xFFA645A1 },
+ { 0x00400000, 0xFFE9F9AC, 0xFFD24B97, 0x0, 0x0043BABB },
+ { 0x00400000, 0x00716872, 0x0, 0x0, 0xFF8E978D }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ .csc = {
+ { 0x00400000, 0x0, 0x0064C985, 0x0, 0xFF9B367A },
+ { 0x00400000, 0xFFF402E1, 0xFFE20A40, 0x0, 0x0029F2DE },
+ { 0x00400000, 0x0076C226, 0x0, 0x0, 0xFF893DD9 }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ .csc = {
+ { 0x00400000, 0x0, 0x005E3F14, 0x0, 0xFFA1C0EB },
+ { 0x00400000, 0xFFF577F6, 0xFFDB580F, 0x0, 0x002F2FFA },
+ { 0x00400000, 0x007868DB, 0x0, 0x0, 0xFF879724 }
+ }
+ }
+ }
+};
+
+static int vc6_hvs_hw_init(struct vc4_hvs *hvs)
+{
+ const struct vc6_csc_coeff_entry *coeffs;
+ unsigned int i;
+
+ HVS_WRITE(SCALER6_CONTROL,
+ SCALER6_CONTROL_HVS_EN |
+ VC4_SET_FIELD(8, SCALER6_CONTROL_PF_LINES) |
+ VC4_SET_FIELD(15, SCALER6_CONTROL_MAX_REQS));
+
+ /* Set HVS arbiter priority to max */
+ HVS_WRITE(SCALER6(PRI_MAP0), 0xffffffff);
+ HVS_WRITE(SCALER6(PRI_MAP1), 0xffffffff);
+
+ if (hvs->vc4->gen == VC4_GEN_6_C) {
+ for (i = 0; i < 6; i++) {
+ coeffs = &csc_coeffs[i / 3][i % 3];
+
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C00(i), coeffs->csc[0][0]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C01(i), coeffs->csc[0][1]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C02(i), coeffs->csc[0][2]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C03(i), coeffs->csc[0][3]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C04(i), coeffs->csc[0][4]);
+
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C10(i), coeffs->csc[1][0]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C11(i), coeffs->csc[1][1]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C12(i), coeffs->csc[1][2]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C13(i), coeffs->csc[1][3]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C14(i), coeffs->csc[1][4]);
+
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C20(i), coeffs->csc[2][0]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C21(i), coeffs->csc[2][1]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C22(i), coeffs->csc[2][2]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C23(i), coeffs->csc[2][3]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C24(i), coeffs->csc[2][4]);
+
+ HVS_WRITE(CFC1_N_NL_CSC_CTRL(i), BIT(15));
+ }
+ } else {
+ for (i = 0; i < 8; i++) {
+ HVS_WRITE(SCALER_PI_CMP_CSC_RED0(i), 0x1f002566);
+ HVS_WRITE(SCALER_PI_CMP_CSC_RED1(i), 0x3994);
+ HVS_WRITE(SCALER_PI_CMP_CSC_RED_CLAMP(i), 0xfff00000);
+ HVS_WRITE(SCALER_PI_CMP_CSC_CFG(i), 0x1);
+ HVS_WRITE(SCALER_PI_CMP_CSC_GREEN0(i), 0x18002566);
+ HVS_WRITE(SCALER_PI_CMP_CSC_GREEN1(i), 0xf927eee2);
+ HVS_WRITE(SCALER_PI_CMP_CSC_GREEN_CLAMP(i), 0xfff00000);
+ HVS_WRITE(SCALER_PI_CMP_CSC_BLUE0(i), 0x18002566);
+ HVS_WRITE(SCALER_PI_CMP_CSC_BLUE1(i), 0x43d80000);
+ HVS_WRITE(SCALER_PI_CMP_CSC_BLUE_CLAMP(i), 0xfff00000);
+ }
+ }
+
+ return 0;
+}
+
static int vc4_hvs_cob_init(struct vc4_hvs *hvs)
{
struct vc4_dev *vc4 = hvs->vc4;
- u32 reg, top;
+ u32 reg, top, base;
/*
* Recompute Composite Output Buffer (COB) allocations for the
@@ -1009,6 +1594,32 @@ static int vc4_hvs_cob_init(struct vc4_hvs *hvs)
HVS_WRITE(SCALER_DISPBASE0, reg);
break;
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ #define VC6_COB_LINE_WIDTH 3840
+ #define VC6_COB_NUM_LINES 4
+ base = 0;
+ top = 3840;
+
+ HVS_WRITE(SCALER6_DISPX_COB(2),
+ VC4_SET_FIELD(top, SCALER6_DISPX_COB_TOP) |
+ VC4_SET_FIELD(base, SCALER6_DISPX_COB_BASE));
+
+ base = top + 16;
+ top += VC6_COB_LINE_WIDTH * VC6_COB_NUM_LINES;
+
+ HVS_WRITE(SCALER6_DISPX_COB(1),
+ VC4_SET_FIELD(top, SCALER6_DISPX_COB_TOP) |
+ VC4_SET_FIELD(base, SCALER6_DISPX_COB_BASE));
+
+ base = top + 16;
+ top += VC6_COB_LINE_WIDTH * VC6_COB_NUM_LINES;
+
+ HVS_WRITE(SCALER6_DISPX_COB(0),
+ VC4_SET_FIELD(top, SCALER6_DISPX_COB_TOP) |
+ VC4_SET_FIELD(base, SCALER6_DISPX_COB_BASE));
+ break;
+
default:
return -EINVAL;
}
@@ -1034,10 +1645,23 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(hvs);
hvs->regset.base = hvs->regs;
- hvs->regset.regs = vc4_hvs_regs;
- hvs->regset.nregs = ARRAY_SIZE(vc4_hvs_regs);
- if (vc4->gen == VC4_GEN_5) {
+ if (vc4->gen == VC4_GEN_6_C) {
+ hvs->regset.regs = vc6_hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(vc6_hvs_regs);
+
+ if (VC4_GET_FIELD(HVS_READ(SCALER6_VERSION), SCALER6_VERSION) ==
+ SCALER6_VERSION_D0) {
+ vc4->gen = VC4_GEN_6_D;
+ hvs->regset.regs = vc6_d_hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(vc6_d_hvs_regs);
+ }
+ } else {
+ hvs->regset.regs = vc4_hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(vc4_hvs_regs);
+ }
+
+ if (vc4->gen >= VC4_GEN_5) {
struct rpi_firmware *firmware;
struct device_node *node;
unsigned int max_rate;
@@ -1051,12 +1675,20 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (!firmware)
return -EPROBE_DEFER;
- hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
+ hvs->core_clk = devm_clk_get(&pdev->dev,
+ (vc4->gen >= VC4_GEN_6_C) ? "core" : NULL);
if (IS_ERR(hvs->core_clk)) {
dev_err(&pdev->dev, "Couldn't get core clock\n");
return PTR_ERR(hvs->core_clk);
}
+ hvs->disp_clk = devm_clk_get(&pdev->dev,
+ (vc4->gen >= VC4_GEN_6_C) ? "disp" : NULL);
+ if (IS_ERR(hvs->disp_clk)) {
+ dev_err(&pdev->dev, "Couldn't get disp clock\n");
+ return PTR_ERR(hvs->disp_clk);
+ }
+
max_rate = rpi_firmware_clk_get_max_rate(firmware,
RPI_FIRMWARE_CORE_CLK_ID);
rpi_firmware_put(firmware);
@@ -1073,14 +1705,23 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
dev_err(&pdev->dev, "Couldn't enable the core clock\n");
return ret;
}
+
+ ret = clk_prepare_enable(hvs->disp_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the disp clock\n");
+ return ret;
+ }
}
- if (vc4->gen == VC4_GEN_4)
- hvs->dlist = hvs->regs + SCALER_DLIST_START;
- else
+ if (vc4->gen >= VC4_GEN_5)
hvs->dlist = hvs->regs + SCALER5_DLIST_START;
+ else
+ hvs->dlist = hvs->regs + SCALER_DLIST_START;
- ret = vc4_hvs_hw_init(hvs);
+ if (vc4->gen >= VC4_GEN_6_C)
+ ret = vc6_hvs_hw_init(hvs);
+ else
+ ret = vc4_hvs_hw_init(hvs);
if (ret)
return ret;
@@ -1097,10 +1738,12 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
- vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
- if (ret)
- return ret;
+ if (vc4->gen < VC4_GEN_6_C) {
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1125,6 +1768,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
drm_mm_remove_node(node);
drm_mm_takedown(&vc4->hvs->lbm_mm);
+ clk_disable_unprepare(hvs->disp_clk);
clk_disable_unprepare(hvs->core_clk);
vc4->hvs = NULL;
@@ -1147,6 +1791,7 @@ static void vc4_hvs_dev_remove(struct platform_device *pdev)
static const struct of_device_id vc4_hvs_dt_match[] = {
{ .compatible = "brcm,bcm2711-hvs" },
+ { .compatible = "brcm,bcm2712-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
{}
};
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 58bbb9efc2df..f5b167417428 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -138,6 +138,8 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
struct drm_color_ctm *ctm = ctm_state->ctm;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (ctm_state->fifo) {
HVS_WRITE(SCALER_OLEDCOEF2,
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
@@ -213,6 +215,8 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct drm_crtc *crtc;
unsigned int i;
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_4);
+
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
@@ -256,6 +260,8 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
unsigned int i;
u32 reg;
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_5);
+
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
@@ -320,17 +326,62 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
}
}
+static void vc6_hvs_pv_muxing_commit(struct vc4_dev *vc4,
+ struct drm_atomic_state *state)
+{
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_6_C && vc4->gen != VC4_GEN_6_D);
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
+ struct vc4_encoder *vc4_encoder;
+ struct drm_encoder *encoder;
+ unsigned char mux;
+ u32 reg;
+
+ if (!vc4_state->update_muxing)
+ continue;
+
+ if (vc4_state->assigned_channel != 1)
+ continue;
+
+ encoder = vc4_get_crtc_encoder(crtc, crtc_state);
+ vc4_encoder = to_vc4_encoder(encoder);
+ switch (vc4_encoder->type) {
+ case VC4_ENCODER_TYPE_HDMI1:
+ mux = 0;
+ break;
+
+ case VC4_ENCODER_TYPE_TXP1:
+ mux = 2;
+ break;
+
+ default:
+ drm_err(&vc4->base, "Unhandled encoder type for PV muxing %d",
+ vc4_encoder->type);
+ mux = 0;
+ break;
+ }
+
+ reg = HVS_READ(SCALER6_CONTROL);
+ HVS_WRITE(SCALER6_CONTROL,
+ (reg & ~SCALER6_CONTROL_DSP1_TARGET_MASK) |
+ VC4_SET_FIELD(mux, SCALER6_CONTROL_DSP1_TARGET));
+ }
+}
+
static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
- struct drm_crtc_state *new_crtc_state;
struct vc4_hvs_state *new_hvs_state;
- struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
unsigned int channel;
- int i;
old_hvs_state = vc4_hvs_get_old_global_state(state);
if (WARN_ON(IS_ERR(old_hvs_state)))
@@ -340,14 +391,20 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
if (WARN_ON(IS_ERR(new_hvs_state)))
return;
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- struct vc4_crtc_state *vc4_crtc_state;
+ if (vc4->gen < VC4_GEN_6_C) {
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
- if (!new_crtc_state->commit)
- continue;
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state;
+
+ if (!new_crtc_state->commit)
+ continue;
- vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
- vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
+ vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
+ vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
+ }
}
for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
@@ -382,16 +439,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* modeset.
*/
WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
+ WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
}
drm_atomic_helper_commit_modeset_disables(dev, state);
- vc4_ctm_commit(vc4, state);
+ if (vc4->gen <= VC4_GEN_5)
+ vc4_ctm_commit(vc4, state);
- if (vc4->gen == VC4_GEN_5)
- vc5_hvs_pv_muxing_commit(vc4, state);
- else
+ switch (vc4->gen) {
+ case VC4_GEN_4:
vc4_hvs_pv_muxing_commit(vc4, state);
+ break;
+
+ case VC4_GEN_5:
+ vc5_hvs_pv_muxing_commit(vc4, state);
+ break;
+
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ vc6_hvs_pv_muxing_commit(vc4, state);
+ break;
+
+ default:
+ drm_err(dev, "Unknown VC4 generation: %d", vc4->gen);
+ break;
+ }
drm_atomic_helper_commit_planes(dev, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
@@ -418,6 +491,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* requirements.
*/
WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
+ WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
clk_get_rate(hvs->core_clk));
@@ -1056,7 +1130,10 @@ int vc4_kms_load(struct drm_device *dev)
return ret;
}
- if (vc4->gen == VC4_GEN_5) {
+ if (vc4->gen >= VC4_GEN_6_C) {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ } else if (vc4->gen >= VC4_GEN_5) {
dev->mode_config.max_width = 7680;
dev->mode_config.max_height = 7680;
} else {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index ba6e86d62a77..d608860d525f 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -278,7 +278,10 @@ static bool plane_enabled(struct drm_plane_state *state)
static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
+ struct vc4_hvs *hvs = vc4->hvs;
struct vc4_plane_state *vc4_state;
+ unsigned int i;
if (WARN_ON(!plane->state))
return NULL;
@@ -288,6 +291,12 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return NULL;
memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
+
+ for (i = 0; i < DRM_FORMAT_MAX_PLANES; i++) {
+ if (vc4_state->upm_handle[i])
+ refcount_inc(&hvs->upm_refcounts[vc4_state->upm_handle[i]].refcount);
+ }
+
vc4_state->dlist_initialized = 0;
__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
@@ -306,18 +315,47 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return &vc4_state->base;
}
+static void vc4_plane_release_upm_ida(struct vc4_hvs *hvs, unsigned int upm_handle)
+{
+ struct vc4_upm_refcounts *refcount = &hvs->upm_refcounts[upm_handle];
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&hvs->mm_lock, irqflags);
+ drm_mm_remove_node(&refcount->upm);
+ spin_unlock_irqrestore(&hvs->mm_lock, irqflags);
+ refcount->upm.start = 0;
+ refcount->upm.size = 0;
+ refcount->size = 0;
+
+ ida_free(&hvs->upm_handles, upm_handle);
+}
+
static void vc4_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
+ struct vc4_hvs *hvs = vc4->hvs;
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int i;
if (drm_mm_node_allocated(&vc4_state->lbm)) {
unsigned long irqflags;
- spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+ spin_lock_irqsave(&hvs->mm_lock, irqflags);
drm_mm_remove_node(&vc4_state->lbm);
- spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+ spin_unlock_irqrestore(&hvs->mm_lock, irqflags);
+ }
+
+ for (i = 0; i < DRM_FORMAT_MAX_PLANES; i++) {
+ struct vc4_upm_refcounts *refcount;
+
+ if (!vc4_state->upm_handle[i])
+ continue;
+
+ refcount = &hvs->upm_refcounts[vc4_state->upm_handle[i]];
+
+ if (refcount_dec_and_test(&refcount->refcount))
+ vc4_plane_release_upm_ida(hvs, vc4_state->upm_handle[i]);
}
kfree(vc4_state->dlist);
@@ -330,7 +368,10 @@ static void vc4_plane_reset(struct drm_plane *plane)
{
struct vc4_plane_state *vc4_state;
- WARN_ON(plane->state);
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
@@ -528,8 +569,11 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
{
+ struct vc4_dev *vc4 = to_vc4_dev(vc4_state->base.plane->dev);
u32 scale, recip;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
scale = src / dst;
/* The specs note that while the reciprocal would be defined
@@ -538,6 +582,11 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
recip = ~0 / scale;
vc4_dlist_write(vc4_state,
+ /*
+ * The BCM2712 is lacking BIT(31) compared to
+ * the previous generations, but we don't use
+ * it.
+ */
VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
vc4_dlist_write(vc4_state,
@@ -550,10 +599,13 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst,
u32 xy, int channel)
{
+ struct vc4_dev *vc4 = to_vc4_dev(vc4_state->base.plane->dev);
u32 scale = src / dst;
s32 offset, offset2;
s32 phase;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
/*
* Start the phase at 1/2 pixel from the 1st pixel at src_x.
* 1/4 pixel for YUV.
@@ -598,10 +650,15 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst,
vc4_dlist_write(vc4_state,
SCALER_PPF_AGC |
VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
+ /*
+ * The register layout documentation is slightly
+ * different to setup the phase in the BCM2712,
+ * but they seem equivalent.
+ */
VC4_SET_FIELD(phase, SCALER_PPF_IPHASE));
}
-static u32 vc4_lbm_size(struct drm_plane_state *state)
+static u32 __vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
@@ -649,11 +706,139 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
return lbm;
}
+static unsigned int vc4_lbm_words_per_component(const struct drm_plane_state *state,
+ unsigned int channel)
+{
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ switch (vc4_state->y_scaling[channel]) {
+ case VC4_SCALING_PPF:
+ return 4;
+
+ case VC4_SCALING_TPZ:
+ return 2;
+
+ default:
+ return 0;
+ }
+}
+
+static unsigned int vc4_lbm_components(const struct drm_plane_state *state,
+ unsigned int channel)
+{
+ const struct drm_format_info *info = state->fb->format;
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ if (vc4_state->y_scaling[channel] == VC4_SCALING_NONE)
+ return 0;
+
+ if (info->is_yuv)
+ return channel ? 2 : 1;
+
+ if (info->has_alpha)
+ return 4;
+
+ return 3;
+}
+
+static unsigned int vc4_lbm_channel_size(const struct drm_plane_state *state,
+ unsigned int channel)
+{
+ const struct drm_format_info *info = state->fb->format;
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int channels_scaled = 0;
+ unsigned int components, words, wpc;
+ unsigned int width, lines;
+ unsigned int i;
+
+ /* LBM is meant to use the smaller of source or dest width, but there
+ * is a issue with UV scaling that the size required for the second
+ * channel is based on the source width only.
+ */
+ if (info->hsub > 1 && channel == 1)
+ width = state->src_w >> 16;
+ else
+ width = min(state->src_w >> 16, state->crtc_w);
+ width = round_up(width / info->hsub, 4);
+
+ wpc = vc4_lbm_words_per_component(state, channel);
+ if (!wpc)
+ return 0;
+
+ components = vc4_lbm_components(state, channel);
+ if (!components)
+ return 0;
+
+ if (state->alpha != DRM_BLEND_ALPHA_OPAQUE && info->has_alpha)
+ components -= 1;
+
+ words = width * wpc * components;
+
+ lines = DIV_ROUND_UP(words, 128 / info->hsub);
+
+ for (i = 0; i < 2; i++)
+ if (vc4_state->y_scaling[channel] != VC4_SCALING_NONE)
+ channels_scaled++;
+
+ if (channels_scaled == 1)
+ lines = lines / 2;
+
+ return lines;
+}
+
+static unsigned int __vc6_lbm_size(const struct drm_plane_state *state)
+{
+ const struct drm_format_info *info = state->fb->format;
+
+ if (info->hsub > 1)
+ return max(vc4_lbm_channel_size(state, 0),
+ vc4_lbm_channel_size(state, 1));
+ else
+ return vc4_lbm_channel_size(state, 0);
+}
+
+static u32 vc4_lbm_size(struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+
+ /* LBM is not needed when there's no vertical scaling. */
+ if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[1] == VC4_SCALING_NONE)
+ return 0;
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ return __vc6_lbm_size(state);
+ else
+ return __vc4_lbm_size(state);
+}
+
+static size_t vc6_upm_size(const struct drm_plane_state *state,
+ unsigned int plane)
+{
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int stride = state->fb->pitches[plane];
+
+ /*
+ * TODO: This only works for raster formats, and is sub-optimal
+ * for buffers with a stride aligned on 32 bytes.
+ */
+ unsigned int words_per_line = (stride + 62) / 32;
+ unsigned int fetch_region_size = words_per_line * 32;
+ unsigned int buffer_lines = 2 << vc4_state->upm_buffer_lines;
+ unsigned int buffer_size = fetch_region_size * buffer_lines;
+
+ return ALIGN(buffer_size, HVS_UBM_WORD_SIZE);
+}
+
static void vc4_write_scaling_parameters(struct drm_plane_state *state,
int channel)
{
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
/* Ch0 H-PPF Word 0: Scaling Parameters */
if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
vc4_write_ppf(vc4_state, vc4_state->src_w[channel],
@@ -750,6 +935,10 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
if (!lbm_size)
return 0;
+ /*
+ * NOTE: BCM2712 doesn't need to be aligned, since the size
+ * returned by vc4_lbm_size() is in words already.
+ */
if (vc4->gen == VC4_GEN_5)
lbm_size = ALIGN(lbm_size, 64);
else if (vc4->gen == VC4_GEN_4)
@@ -787,6 +976,108 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
return 0;
}
+static int vc6_plane_allocate_upm(struct drm_plane_state *state)
+{
+ const struct drm_format_info *info = state->fb->format;
+ struct drm_device *drm = state->plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int i;
+ int ret;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ vc4_state->upm_buffer_lines = SCALER6_PTR0_UPM_BUFF_SIZE_2_LINES;
+
+ for (i = 0; i < info->num_planes; i++) {
+ struct vc4_upm_refcounts *refcount;
+ int upm_handle;
+ unsigned long irqflags;
+ size_t upm_size;
+
+ upm_size = vc6_upm_size(state, i);
+ if (!upm_size)
+ return -EINVAL;
+ upm_handle = vc4_state->upm_handle[i];
+
+ if (upm_handle &&
+ hvs->upm_refcounts[upm_handle].size == upm_size) {
+ /* Allocation is the same size as the previous user of
+ * the plane. Keep the allocation.
+ */
+ vc4_state->upm_handle[i] = upm_handle;
+ } else {
+ if (upm_handle &&
+ refcount_dec_and_test(&hvs->upm_refcounts[upm_handle].refcount)) {
+ vc4_plane_release_upm_ida(hvs, upm_handle);
+ vc4_state->upm_handle[i] = 0;
+ }
+
+ upm_handle = ida_alloc_range(&hvs->upm_handles, 1,
+ VC4_NUM_UPM_HANDLES,
+ GFP_KERNEL);
+ if (upm_handle < 0) {
+ drm_dbg(drm, "Out of upm_handles\n");
+ return upm_handle;
+ }
+ vc4_state->upm_handle[i] = upm_handle;
+
+ refcount = &hvs->upm_refcounts[upm_handle];
+ refcount_set(&refcount->refcount, 1);
+ refcount->size = upm_size;
+
+ spin_lock_irqsave(&hvs->mm_lock, irqflags);
+ ret = drm_mm_insert_node_generic(&hvs->upm_mm,
+ &refcount->upm,
+ upm_size, HVS_UBM_WORD_SIZE,
+ 0, 0);
+ spin_unlock_irqrestore(&hvs->mm_lock, irqflags);
+ if (ret) {
+ drm_err(drm, "Failed to allocate UPM entry: %d\n", ret);
+ refcount_set(&refcount->refcount, 0);
+ ida_free(&hvs->upm_handles, upm_handle);
+ vc4_state->upm_handle[i] = 0;
+ return ret;
+ }
+ }
+
+ refcount = &hvs->upm_refcounts[upm_handle];
+ vc4_state->dlist[vc4_state->ptr0_offset[i]] |=
+ VC4_SET_FIELD(refcount->upm.start / HVS_UBM_WORD_SIZE,
+ SCALER6_PTR0_UPM_BASE) |
+ VC4_SET_FIELD(vc4_state->upm_handle[i] - 1,
+ SCALER6_PTR0_UPM_HANDLE) |
+ VC4_SET_FIELD(vc4_state->upm_buffer_lines,
+ SCALER6_PTR0_UPM_BUFF_SIZE);
+ }
+
+ return 0;
+}
+
+static void vc6_plane_free_upm(struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct drm_device *drm = state->plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
+ unsigned int i;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ for (i = 0; i < DRM_FORMAT_MAX_PLANES; i++) {
+ unsigned int upm_handle;
+
+ upm_handle = vc4_state->upm_handle[i];
+ if (!upm_handle)
+ continue;
+
+ if (refcount_dec_and_test(&hvs->upm_refcounts[upm_handle].refcount))
+ vc4_plane_release_upm_ida(hvs, upm_handle);
+ vc4_state->upm_handle[i] = 0;
+ }
+}
+
/*
* The colorspace conversion matrices are held in 3 entries in the dlist.
* Create an array of them, with entries for each full and limited mode, and
@@ -834,6 +1125,11 @@ static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
{
+ struct drm_device *dev = state->state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_4);
+
if (!state->fb->format->has_alpha)
return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
SCALER_POS2_ALPHA_MODE);
@@ -855,25 +1151,56 @@ static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
static u32 vc4_hvs5_get_alpha_blend_mode(struct drm_plane_state *state)
{
- if (!state->fb->format->has_alpha)
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
- SCALER5_CTL2_ALPHA_MODE);
+ struct drm_device *dev = state->state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
- switch (state->pixel_blend_mode) {
- case DRM_MODE_BLEND_PIXEL_NONE:
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
- SCALER5_CTL2_ALPHA_MODE);
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_5 && vc4->gen != VC4_GEN_6_C &&
+ vc4->gen != VC4_GEN_6_D);
+
+ switch (vc4->gen) {
default:
- case DRM_MODE_BLEND_PREMULTI:
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
- SCALER5_CTL2_ALPHA_MODE) |
- SCALER5_CTL2_ALPHA_PREMULT;
- case DRM_MODE_BLEND_COVERAGE:
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
- SCALER5_CTL2_ALPHA_MODE);
+ case VC4_GEN_5:
+ case VC4_GEN_6_C:
+ if (!state->fb->format->has_alpha)
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
+ SCALER5_CTL2_ALPHA_MODE);
+
+ switch (state->pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
+ SCALER5_CTL2_ALPHA_MODE);
+ default:
+ case DRM_MODE_BLEND_PREMULTI:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
+ SCALER5_CTL2_ALPHA_MODE) |
+ SCALER5_CTL2_ALPHA_PREMULT;
+ case DRM_MODE_BLEND_COVERAGE:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
+ SCALER5_CTL2_ALPHA_MODE);
+ }
+ case VC4_GEN_6_D:
+ /* 2712-D configures fixed alpha mode in CTL0 */
+ return state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ?
+ SCALER5_CTL2_ALPHA_PREMULT : 0;
}
}
+static u32 vc4_hvs6_get_alpha_mask_mode(struct drm_plane_state *state)
+{
+ struct drm_device *dev = state->state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_6_C && vc4->gen != VC4_GEN_6_D);
+
+ if (vc4->gen == VC4_GEN_6_D &&
+ (!state->fb->format->has_alpha ||
+ state->pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE))
+ return VC4_SET_FIELD(SCALER6D_CTL0_ALPHA_MASK_FIXED,
+ SCALER6_CTL0_ALPHA_MASK);
+
+ return VC4_SET_FIELD(SCALER6_CTL0_ALPHA_MASK_NONE, SCALER6_CTL0_ALPHA_MASK);
+}
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
@@ -906,6 +1233,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (ret)
return ret;
+ if (!vc4_state->src_w[0] || !vc4_state->src_h[0] ||
+ !vc4_state->crtc_w || !vc4_state->crtc_h) {
+ /* 0 source size probably means the plane is offscreen */
+ vc4_state->dlist_initialized = 1;
+ return 0;
+ }
+
width = vc4_state->src_w[0] >> 16;
height = vc4_state->src_h[0] >> 16;
@@ -1363,6 +1697,427 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
return 0;
}
+static u32 vc6_plane_get_csc_mode(struct vc4_plane_state *vc4_state)
+{
+ struct drm_plane_state *state = &vc4_state->base;
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+ u32 ret = 0;
+
+ if (vc4_state->is_yuv) {
+ enum drm_color_encoding color_encoding = state->color_encoding;
+ enum drm_color_range color_range = state->color_range;
+
+ /* CSC pre-loaded with:
+ * 0 = BT601 limited range
+ * 1 = BT709 limited range
+ * 2 = BT2020 limited range
+ * 3 = BT601 full range
+ * 4 = BT709 full range
+ * 5 = BT2020 full range
+ */
+ if (color_encoding > DRM_COLOR_YCBCR_BT2020)
+ color_encoding = DRM_COLOR_YCBCR_BT601;
+ if (color_range > DRM_COLOR_YCBCR_FULL_RANGE)
+ color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+
+ if (vc4->gen == VC4_GEN_6_C) {
+ ret |= SCALER6C_CTL2_CSC_ENABLE;
+ ret |= VC4_SET_FIELD(color_encoding + (color_range * 3),
+ SCALER6C_CTL2_BRCM_CFC_CONTROL);
+ } else {
+ ret |= SCALER6D_CTL2_CSC_ENABLE;
+ ret |= VC4_SET_FIELD(color_encoding + (color_range * 3),
+ SCALER6D_CTL2_BRCM_CFC_CONTROL);
+ }
+ }
+
+ return ret;
+}
+
+static int vc6_plane_mode_set(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_device *drm = plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
+ const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+ u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
+ int num_planes = fb->format->num_planes;
+ u32 h_subsample = fb->format->hsub;
+ u32 v_subsample = fb->format->vsub;
+ bool mix_plane_alpha;
+ bool covers_screen;
+ u32 scl0, scl1, pitch0;
+ u32 tiling, src_x, src_y;
+ u32 width, height;
+ u32 hvs_format = format->hvs;
+ u32 offsets[3] = { 0 };
+ unsigned int rotation;
+ int ret, i;
+
+ if (vc4_state->dlist_initialized)
+ return 0;
+
+ ret = vc4_plane_setup_clipping_and_scaling(state);
+ if (ret)
+ return ret;
+
+ if (!vc4_state->src_w[0] || !vc4_state->src_h[0] ||
+ !vc4_state->crtc_w || !vc4_state->crtc_h) {
+ /* 0 source size probably means the plane is offscreen.
+ * 0 destination size is a redundant plane.
+ */
+ vc4_state->dlist_initialized = 1;
+ return 0;
+ }
+
+ width = vc4_state->src_w[0] >> 16;
+ height = vc4_state->src_h[0] >> 16;
+
+ /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB
+ * and 4:4:4, scl1 should be set to scl0 so both channels of
+ * the scaler do the same thing. For YUV, the Y plane needs
+ * to be put in channel 1 and Cb/Cr in channel 0, so we swap
+ * the scl fields here.
+ */
+ if (num_planes == 1) {
+ scl0 = vc4_get_scl_field(state, 0);
+ scl1 = scl0;
+ } else {
+ scl0 = vc4_get_scl_field(state, 1);
+ scl1 = vc4_get_scl_field(state, 0);
+ }
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ /* We must point to the last line when Y reflection is enabled. */
+ src_y = vc4_state->src_y >> 16;
+ if (rotation & DRM_MODE_REFLECT_Y)
+ src_y += height - 1;
+
+ src_x = vc4_state->src_x >> 16;
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_LINEAR:
+ tiling = SCALER6_CTL0_ADDR_MODE_LINEAR;
+
+ /* Adjust the base pointer to the first pixel to be scanned
+ * out.
+ */
+ for (i = 0; i < num_planes; i++) {
+ offsets[i] += src_y / (i ? v_subsample : 1) * fb->pitches[i];
+ offsets[i] += src_x / (i ? h_subsample : 1) * fb->format->cpp[i];
+ }
+
+ break;
+
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+ uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+ u32 components_per_word;
+ u32 starting_offset;
+ u32 fetch_count;
+
+ if (param > SCALER_TILE_HEIGHT_MASK) {
+ DRM_DEBUG_KMS("SAND height too large (%d)\n",
+ param);
+ return -EINVAL;
+ }
+
+ if (fb->format->format == DRM_FORMAT_P030) {
+ hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT;
+ tiling = SCALER6_CTL0_ADDR_MODE_128B;
+ } else {
+ hvs_format = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE;
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tiling = SCALER6_CTL0_ADDR_MODE_128B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tiling = SCALER6_CTL0_ADDR_MODE_256B;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Adjust the base pointer to the first pixel to be scanned
+ * out.
+ *
+ * For P030, y_ptr [31:4] is the 128bit word for the start pixel
+ * y_ptr [3:0] is the pixel (0-11) contained within that 128bit
+ * word that should be taken as the first pixel.
+ * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the
+ * element within the 128bit word, eg for pixel 3 the value
+ * should be 6.
+ */
+ for (i = 0; i < num_planes; i++) {
+ u32 tile_w, tile, x_off, pix_per_tile;
+
+ if (fb->format->format == DRM_FORMAT_P030) {
+ /*
+ * Spec says: bits [31:4] of the given address
+ * should point to the 128-bit word containing
+ * the desired starting pixel, and bits[3:0]
+ * should be between 0 and 11, indicating which
+ * of the 12-pixels in that 128-bit word is the
+ * first pixel to be used
+ */
+ u32 remaining_pixels = src_x % 96;
+ u32 aligned = remaining_pixels / 12;
+ u32 last_bits = remaining_pixels % 12;
+
+ x_off = aligned * 16 + last_bits;
+ tile_w = 128;
+ pix_per_tile = 96;
+ } else {
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tile_w = 128;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tile_w = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pix_per_tile = tile_w / fb->format->cpp[0];
+ x_off = (src_x % pix_per_tile) /
+ (i ? h_subsample : 1) *
+ fb->format->cpp[i];
+ }
+
+ tile = src_x / pix_per_tile;
+
+ offsets[i] += param * tile_w * tile;
+ offsets[i] += src_y / (i ? v_subsample : 1) * tile_w;
+ offsets[i] += x_off & ~(i ? 1 : 0);
+ }
+
+ components_per_word = fb->format->format == DRM_FORMAT_P030 ? 24 : 32;
+ starting_offset = src_x % components_per_word;
+ fetch_count = (width + starting_offset + components_per_word - 1) /
+ components_per_word;
+
+ pitch0 = VC4_SET_FIELD(param, SCALER6_PTR2_PITCH) |
+ VC4_SET_FIELD(fetch_count - 1, SCALER6_PTR2_FETCH_COUNT);
+ break;
+ }
+
+ default:
+ DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
+ (long long)fb->modifier);
+ return -EINVAL;
+ }
+
+ /* fetch an extra pixel if we don't actually line up with the left edge. */
+ if ((vc4_state->src_x & 0xffff) && vc4_state->src_x < (state->fb->width << 16))
+ width++;
+
+ /* same for the right side */
+ if (((vc4_state->src_x + vc4_state->src_w[0]) & 0xffff) &&
+ vc4_state->src_x + vc4_state->src_w[0] < (state->fb->width << 16))
+ width++;
+
+ /* now for the top */
+ if ((vc4_state->src_y & 0xffff) && vc4_state->src_y < (state->fb->height << 16))
+ height++;
+
+ /* and the bottom */
+ if (((vc4_state->src_y + vc4_state->src_h[0]) & 0xffff) &&
+ vc4_state->src_y + vc4_state->src_h[0] < (state->fb->height << 16))
+ height++;
+
+ /* for YUV444 hardware wants double the width, otherwise it doesn't
+ * fetch full width of chroma
+ */
+ if (format->drm == DRM_FORMAT_YUV444 || format->drm == DRM_FORMAT_YVU444)
+ width <<= 1;
+
+ /* Don't waste cycles mixing with plane alpha if the set alpha
+ * is opaque or there is no per-pixel alpha information.
+ * In any case we use the alpha property value as the fixed alpha.
+ */
+ mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
+ fb->format->has_alpha;
+
+ /* Control Word 0: Scaling Configuration & Element Validity*/
+ vc4_dlist_write(vc4_state,
+ SCALER6_CTL0_VALID |
+ VC4_SET_FIELD(tiling, SCALER6_CTL0_ADDR_MODE) |
+ vc4_hvs6_get_alpha_mask_mode(state) |
+ (vc4_state->is_unity ? SCALER6_CTL0_UNITY : 0) |
+ VC4_SET_FIELD(format->pixel_order_hvs5, SCALER6_CTL0_ORDERRGBA) |
+ VC4_SET_FIELD(scl1, SCALER6_CTL0_SCL1_MODE) |
+ VC4_SET_FIELD(scl0, SCALER6_CTL0_SCL0_MODE) |
+ VC4_SET_FIELD(hvs_format, SCALER6_CTL0_PIXEL_FORMAT));
+
+ /* Position Word 0: Image Position */
+ vc4_state->pos0_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->crtc_y, SCALER6_POS0_START_Y) |
+ (rotation & DRM_MODE_REFLECT_X ? SCALER6_POS0_HFLIP : 0) |
+ VC4_SET_FIELD(vc4_state->crtc_x, SCALER6_POS0_START_X));
+
+ /* Control Word 2: Alpha Value & CSC */
+ vc4_dlist_write(vc4_state,
+ vc6_plane_get_csc_mode(vc4_state) |
+ vc4_hvs5_get_alpha_blend_mode(state) |
+ (mix_plane_alpha ? SCALER6_CTL2_ALPHA_MIX : 0) |
+ VC4_SET_FIELD(state->alpha >> 4, SCALER5_CTL2_ALPHA));
+
+ /* Position Word 1: Scaled Image Dimensions */
+ if (!vc4_state->is_unity)
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->crtc_h - 1,
+ SCALER6_POS1_SCL_LINES) |
+ VC4_SET_FIELD(vc4_state->crtc_w - 1,
+ SCALER6_POS1_SCL_WIDTH));
+
+ /* Position Word 2: Source Image Size */
+ vc4_state->pos2_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(height - 1,
+ SCALER6_POS2_SRC_LINES) |
+ VC4_SET_FIELD(width - 1,
+ SCALER6_POS2_SRC_WIDTH));
+
+ /* Position Word 3: Context */
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+
+ /*
+ * TODO: This only covers Raster Scan Order planes
+ */
+ for (i = 0; i < num_planes; i++) {
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, i);
+ dma_addr_t paddr = bo->dma_addr + fb->offsets[i] + offsets[i];
+
+ /* Pointer Word 0 */
+ vc4_state->ptr0_offset[i] = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ (rotation & DRM_MODE_REFLECT_Y ? SCALER6_PTR0_VFLIP : 0) |
+ /*
+ * The UPM buffer will be allocated in
+ * vc6_plane_allocate_upm().
+ */
+ VC4_SET_FIELD(upper_32_bits(paddr) & 0xff,
+ SCALER6_PTR0_UPPER_ADDR));
+
+ /* Pointer Word 1 */
+ vc4_dlist_write(vc4_state, lower_32_bits(paddr));
+
+ /* Pointer Word 2 */
+ if (base_format_mod != DRM_FORMAT_MOD_BROADCOM_SAND128 &&
+ base_format_mod != DRM_FORMAT_MOD_BROADCOM_SAND256) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->pitches[i],
+ SCALER6_PTR2_PITCH));
+ } else {
+ vc4_dlist_write(vc4_state, pitch0);
+ }
+ }
+
+ /*
+ * Palette Word 0
+ * TODO: We're not using the palette mode
+ */
+
+ /*
+ * Trans Word 0
+ * TODO: It's only relevant if we set the trans_rgb bit in the
+ * control word 0, and we don't at the moment.
+ */
+
+ vc4_state->lbm_offset = 0;
+
+ if (!vc4_state->is_unity || fb->format->is_yuv) {
+ /*
+ * Reserve a slot for the LBM Base Address. The real value will
+ * be set when calling vc4_plane_allocate_lbm().
+ */
+ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ vc4_state->lbm_offset = vc4_state->dlist_count;
+ vc4_dlist_counter_increment(vc4_state);
+ }
+
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ if (num_planes > 1)
+ /*
+ * Emit Cb/Cr as channel 0 and Y as channel
+ * 1. This matches how we set up scl0/scl1
+ * above.
+ */
+ vc4_write_scaling_parameters(state, 1);
+
+ vc4_write_scaling_parameters(state, 0);
+ }
+
+ /*
+ * If any PPF setup was done, then all the kernel
+ * pointers get uploaded.
+ */
+ if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
+ vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
+ vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
+ vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
+ u32 kernel =
+ VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
+ SCALER_PPF_KERNEL_OFFSET);
+
+ /* HPPF plane 0 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* VPPF plane 0 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* HPPF plane 1 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* VPPF plane 1 */
+ vc4_dlist_write(vc4_state, kernel);
+ }
+ }
+
+ vc4_dlist_write(vc4_state, SCALER6_CTL0_END);
+
+ vc4_state->dlist[0] |=
+ VC4_SET_FIELD(vc4_state->dlist_count, SCALER6_CTL0_NEXT);
+
+ /* crtc_* are already clipped coordinates. */
+ covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 &&
+ vc4_state->crtc_w == state->crtc->mode.hdisplay &&
+ vc4_state->crtc_h == state->crtc->mode.vdisplay;
+
+ /*
+ * Background fill might be necessary when the plane has per-pixel
+ * alpha content or a non-opaque plane alpha and could blend from the
+ * background or does not cover the entire screen.
+ */
+ vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
+ state->alpha != DRM_BLEND_ALPHA_OPAQUE;
+
+ /*
+ * Flag the dlist as initialized to avoid checking it twice in case
+ * the async update check already called vc4_plane_mode_set() and
+ * decided to fallback to sync update because async update was not
+ * possible.
+ */
+ vc4_state->dlist_initialized = 1;
+
+ vc4_plane_calc_load(state);
+
+ drm_dbg_driver(drm, "[PLANE:%d:%s] Computed DLIST size: %u\n",
+ plane->base.id, plane->name, vc4_state->dlist_count);
+
+ return 0;
+}
+
/* If a modeset involves changing the setup of a plane, the atomic
* infrastructure will call this to validate a proposed plane setup.
* However, if a plane isn't getting updated, this (and the
@@ -1373,6 +2128,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
static int vc4_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(new_plane_state);
@@ -1380,17 +2136,38 @@ static int vc4_plane_atomic_check(struct drm_plane *plane,
vc4_state->dlist_count = 0;
- if (!plane_enabled(new_plane_state))
+ if (!plane_enabled(new_plane_state)) {
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+
+ if (vc4->gen >= VC4_GEN_6_C && old_plane_state &&
+ plane_enabled(old_plane_state)) {
+ vc6_plane_free_upm(new_plane_state);
+ }
return 0;
+ }
- ret = vc4_plane_mode_set(plane, new_plane_state);
+ if (vc4->gen >= VC4_GEN_6_C)
+ ret = vc6_plane_mode_set(plane, new_plane_state);
+ else
+ ret = vc4_plane_mode_set(plane, new_plane_state);
if (ret)
return ret;
+ if (!vc4_state->src_w[0] || !vc4_state->src_h[0] ||
+ !vc4_state->crtc_w || !vc4_state->crtc_h)
+ return 0;
+
ret = vc4_plane_allocate_lbm(new_plane_state);
if (ret)
return ret;
+ if (vc4->gen >= VC4_GEN_6_C) {
+ ret = vc6_plane_allocate_upm(new_plane_state);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1439,7 +2216,8 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
- uint32_t addr;
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
+ dma_addr_t dma_addr = bo->dma_addr + fb->offsets[0];
int idx;
if (!drm_dev_enter(plane->dev, &idx))
@@ -1449,19 +2227,38 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
* because this is only called on the primary plane.
*/
WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
- addr = bo->dma_addr + fb->offsets[0];
- /* Write the new address into the hardware immediately. The
- * scanout will start from this address as soon as the FIFO
- * needs to refill with pixels.
- */
- writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]);
+ if (vc4->gen == VC4_GEN_6_C) {
+ u32 value;
- /* Also update the CPU-side dlist copy, so that any later
- * atomic updates that don't do a new modeset on our plane
- * also use our updated address.
- */
- vc4_state->dlist[vc4_state->ptr0_offset[0]] = addr;
+ value = vc4_state->dlist[vc4_state->ptr0_offset[0]] &
+ ~SCALER6_PTR0_UPPER_ADDR_MASK;
+ value |= VC4_SET_FIELD(upper_32_bits(dma_addr) & 0xff,
+ SCALER6_PTR0_UPPER_ADDR);
+
+ writel(value, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]);
+ vc4_state->dlist[vc4_state->ptr0_offset[0]] = value;
+
+ value = lower_32_bits(dma_addr);
+ writel(value, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0] + 1]);
+ vc4_state->dlist[vc4_state->ptr0_offset[0] + 1] = value;
+ } else {
+ u32 addr;
+
+ addr = (u32)dma_addr;
+
+ /* Write the new address into the hardware immediately. The
+ * scanout will start from this address as soon as the FIFO
+ * needs to refill with pixels.
+ */
+ writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]);
+
+ /* Also update the CPU-side dlist copy, so that any later
+ * atomic updates that don't do a new modeset on our plane
+ * also use our updated address.
+ */
+ vc4_state->dlist[vc4_state->ptr0_offset[0]] = addr;
+ }
drm_dev_exit(idx);
}
@@ -1543,13 +2340,17 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *old_vc4_state, *new_vc4_state;
int ret;
u32 i;
- ret = vc4_plane_mode_set(plane, new_plane_state);
+ if (vc4->gen <= VC4_GEN_5)
+ ret = vc4_plane_mode_set(plane, new_plane_state);
+ else
+ ret = vc6_plane_mode_set(plane, new_plane_state);
if (ret)
return ret;
@@ -1723,7 +2524,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
};
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
- if (!hvs_formats[i].hvs5_only || vc4->gen == VC4_GEN_5) {
+ if (!hvs_formats[i].hvs5_only || vc4->gen >= VC4_GEN_5) {
formats[num_formats] = hvs_formats[i].drm;
num_formats++;
}
@@ -1738,7 +2539,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
return ERR_CAST(vc4_plane);
plane = &vc4_plane->base;
- if (vc4->gen == VC4_GEN_5)
+ if (vc4->gen >= VC4_GEN_5)
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
else
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index c55dec383929..27158be19952 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -19,6 +19,20 @@
#define VC4_GET_FIELD(word, field) FIELD_GET(field##_MASK, word)
+#define VC6_SET_FIELD(value, field) \
+ ({ \
+ WARN_ON(!FIELD_FIT(hvs->vc4->gen == VC4_GEN_6_C ? \
+ SCALER6_ ## field ## _MASK : \
+ SCALER6D_ ## field ## _MASK, value));\
+ FIELD_PREP(hvs->vc4->gen == VC4_GEN_6_C ? \
+ SCALER6_ ## field ## _MASK : \
+ SCALER6D_ ## field ## _MASK, value); \
+ })
+
+#define VC6_GET_FIELD(word, field) FIELD_GET(hvs->vc4->gen == VC4_GEN_6_C ? \
+ SCALER6_ ## field ## _MASK : \
+ SCALER6D_ ## field ## _MASK, word)
+
#define V3D_IDENT0 0x00000
# define V3D_EXPECTED_IDENT0 \
((2 << 24) | \
@@ -155,6 +169,7 @@
# define PV_CONTROL_EN BIT(0)
#define PV_V_CONTROL 0x04
+# define PV_VCONTROL_ODD_TIMING BIT(29)
# define PV_VCONTROL_ODD_DELAY_MASK VC4_MASK(22, 6)
# define PV_VCONTROL_ODD_DELAY_SHIFT 6
# define PV_VCONTROL_ODD_FIRST BIT(5)
@@ -215,6 +230,11 @@
# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_SHIFT 2
# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP 8
+#define PV_PIPE_INIT_CTRL 0x94
+# define PV_PIPE_INIT_CTRL_PV_INIT_WIDTH_MASK VC4_MASK(11, 8)
+# define PV_PIPE_INIT_CTRL_PV_INIT_IDLE_MASK VC4_MASK(7, 4)
+# define PV_PIPE_INIT_CTRL_PV_INIT_EN BIT(0)
+
#define SCALER_CHANNELS_COUNT 3
#define SCALER_DISPCTRL 0x00000000
@@ -418,6 +438,10 @@
# define SCALER_DISPSTAT1_FRCNT0_SHIFT 18
# define SCALER_DISPSTAT1_FRCNT1_MASK VC4_MASK(17, 12)
# define SCALER_DISPSTAT1_FRCNT1_SHIFT 12
+# define SCALER5_DISPSTAT1_FRCNT0_MASK VC4_MASK(25, 20)
+# define SCALER5_DISPSTAT1_FRCNT0_SHIFT 20
+# define SCALER5_DISPSTAT1_FRCNT1_MASK VC4_MASK(19, 14)
+# define SCALER5_DISPSTAT1_FRCNT1_SHIFT 14
#define SCALER_DISPSTATX(x) (SCALER_DISPSTAT0 + \
(x) * (SCALER_DISPSTAT1 - \
@@ -436,6 +460,8 @@
#define SCALER_DISPSTAT2 0x00000068
# define SCALER_DISPSTAT2_FRCNT2_MASK VC4_MASK(17, 12)
# define SCALER_DISPSTAT2_FRCNT2_SHIFT 12
+# define SCALER5_DISPSTAT2_FRCNT2_MASK VC4_MASK(19, 14)
+# define SCALER5_DISPSTAT2_FRCNT2_SHIFT 14
#define SCALER_DISPBASE2 0x0000006c
#define SCALER_DISPALPHA2 0x00000070
@@ -514,6 +540,206 @@
#define SCALER5_DLIST_START 0x00004000
+#define SCALER6_VERSION 0x00000000
+# define SCALER6_VERSION_MASK VC4_MASK(7, 0)
+# define SCALER6_VERSION_C0 0x00000053
+# define SCALER6_VERSION_D0 0x00000054
+#define SCALER6_CXM_SIZE 0x00000004
+#define SCALER6_LBM_SIZE 0x00000008
+#define SCALER6_UBM_SIZE 0x0000000c
+#define SCALER6_COBA_SIZE 0x00000010
+#define SCALER6_COB_SIZE 0x00000014
+
+#define SCALER6_CONTROL 0x00000020
+# define SCALER6_CONTROL_HVS_EN BIT(31)
+# define SCALER6_CONTROL_PF_LINES_MASK VC4_MASK(22, 18)
+# define SCALER6_CONTROL_ABORT_ON_EMPTY BIT(16)
+# define SCALER6_CONTROL_DSP1_TARGET_MASK VC4_MASK(13, 12)
+# define SCALER6_CONTROL_MAX_REQS_MASK VC4_MASK(7, 4)
+
+#define SCALER6_FETCHER_STATUS 0x00000024
+#define SCALER6_FETCH_STATUS 0x00000028
+#define SCALER6_HANDLE_ERROR 0x0000002c
+
+#define SCALER6_DISP0_CTRL0 0x00000030
+#define SCALER6_DISPX_CTRL0(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_CTRL0 + ((x) * (SCALER6_DISP1_CTRL0 - SCALER6_DISP0_CTRL0))) : \
+ (SCALER6D_DISP0_CTRL0 + ((x) * (SCALER6D_DISP1_CTRL0 - SCALER6D_DISP0_CTRL0))))
+# define SCALER6_DISPX_CTRL0_ENB BIT(31)
+# define SCALER6_DISPX_CTRL0_RESET BIT(30)
+# define SCALER6_DISPX_CTRL0_FWIDTH_MASK VC4_MASK(28, 16)
+# define SCALER6_DISPX_CTRL0_ONESHOT BIT(15)
+# define SCALER6_DISPX_CTRL0_ONECTX_MASK VC4_MASK(14, 13)
+# define SCALER6_DISPX_CTRL0_LINES_MASK VC4_MASK(12, 0)
+
+#define SCALER6_DISP0_CTRL1 0x00000034
+#define SCALER6_DISPX_CTRL1(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_CTRL1 + ((x) * (SCALER6_DISP1_CTRL1 - SCALER6_DISP0_CTRL1))) : \
+ (SCALER6D_DISP0_CTRL1 + ((x) * (SCALER6D_DISP1_CTRL1 - SCALER6D_DISP0_CTRL1))))
+# define SCALER6_DISPX_CTRL1_BGENB BIT(8)
+# define SCALER6_DISPX_CTRL1_INTLACE BIT(0)
+
+#define SCALER6_DISP0_BGND 0x00000038
+#define SCALER6_DISPX_BGND(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_BGND + ((x) * (SCALER6_DISP1_BGND - SCALER6_DISP0_BGND))) : \
+ (SCALER6D_DISP0_BGND + ((x) * (SCALER6D_DISP1_BGND - SCALER6D_DISP0_BGND))))
+
+#define SCALER6_DISP0_LPTRS 0x0000003c
+#define SCALER6_DISPX_LPTRS(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_LPTRS + ((x) * (SCALER6_DISP1_LPTRS - SCALER6_DISP0_LPTRS))) : \
+ (SCALER6D_DISP0_LPTRS + ((x) * (SCALER6D_DISP1_LPTRS - SCALER6D_DISP0_LPTRS))))
+# define SCALER6_DISPX_LPTRS_HEADE_MASK VC4_MASK(11, 0)
+
+#define SCALER6_DISP0_COB 0x00000040
+#define SCALER6_DISPX_COB(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_COB + ((x) * (SCALER6_DISP1_COB - SCALER6_DISP0_COB))) : \
+ (SCALER6D_DISP0_COB + ((x) * (SCALER6D_DISP1_COB - SCALER6D_DISP0_COB))))
+# define SCALER6_DISPX_COB_TOP_MASK VC4_MASK(31, 16)
+# define SCALER6_DISPX_COB_BASE_MASK VC4_MASK(15, 0)
+
+#define SCALER6_DISP0_STATUS 0x00000044
+#define SCALER6_DISPX_STATUS(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_STATUS + ((x) * (SCALER6_DISP1_STATUS - SCALER6_DISP0_STATUS))) : \
+ (SCALER6D_DISP0_STATUS + ((x) * (SCALER6D_DISP1_STATUS - SCALER6D_DISP0_STATUS))))
+# define SCALER6_DISPX_STATUS_EMPTY BIT(22)
+# define SCALER6_DISPX_STATUS_FRCNT_MASK VC4_MASK(21, 16)
+# define SCALER6_DISPX_STATUS_OFIELD BIT(15)
+# define SCALER6_DISPX_STATUS_MODE_MASK VC4_MASK(14, 13)
+# define SCALER6_DISPX_STATUS_MODE_DISABLED 0
+# define SCALER6_DISPX_STATUS_MODE_INIT 1
+# define SCALER6_DISPX_STATUS_MODE_RUN 2
+# define SCALER6_DISPX_STATUS_MODE_EOF 3
+# define SCALER6_DISPX_STATUS_YLINE_MASK VC4_MASK(12, 0)
+
+#define SCALER6_DISP0_DL 0x00000048
+
+#define SCALER6_DISPX_DL(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_DL + ((x) * (SCALER6_DISP1_DL - SCALER6_DISP0_DL))) : \
+ (SCALER6D_DISP0_DL + ((x) * (SCALER6D_DISP1_DL - SCALER6D_DISP0_DL))))
+# define SCALER6_DISPX_DL_LACT_MASK VC4_MASK(11, 0)
+
+#define SCALER6_DISP0_RUN 0x0000004c
+#define SCALER6_DISP1_CTRL0 0x00000050
+#define SCALER6_DISP1_CTRL1 0x00000054
+#define SCALER6_DISP1_BGND 0x00000058
+#define SCALER6_DISP1_LPTRS 0x0000005c
+#define SCALER6_DISP1_COB 0x00000060
+#define SCALER6_DISP1_STATUS 0x00000064
+#define SCALER6_DISP1_DL 0x00000068
+#define SCALER6_DISP1_RUN 0x0000006c
+#define SCALER6_DISP2_CTRL0 0x00000070
+#define SCALER6_DISP2_CTRL1 0x00000074
+#define SCALER6_DISP2_BGND 0x00000078
+#define SCALER6_DISP2_LPTRS 0x0000007c
+#define SCALER6_DISP2_COB 0x00000080
+#define SCALER6_DISP2_STATUS 0x00000084
+#define SCALER6_DISP2_DL 0x00000088
+#define SCALER6_DISP2_RUN 0x0000008c
+#define SCALER6_EOLN 0x00000090
+#define SCALER6_DL_STATUS 0x00000094
+#define SCALER6_BFG_MISC 0x0000009c
+#define SCALER6_QOS0 0x000000a0
+#define SCALER6_PROF0 0x000000a4
+#define SCALER6_QOS1 0x000000a8
+#define SCALER6_PROF1 0x000000ac
+#define SCALER6_QOS2 0x000000b0
+#define SCALER6_PROF2 0x000000b4
+#define SCALER6_PRI_MAP0 0x000000b8
+#define SCALER6_PRI_MAP1 0x000000bc
+#define SCALER6_HISTCTRL 0x000000c0
+#define SCALER6_HISTBIN0 0x000000c4
+#define SCALER6_HISTBIN1 0x000000c8
+#define SCALER6_HISTBIN2 0x000000cc
+#define SCALER6_HISTBIN3 0x000000d0
+#define SCALER6_HISTBIN4 0x000000d4
+#define SCALER6_HISTBIN5 0x000000d8
+#define SCALER6_HISTBIN6 0x000000dc
+#define SCALER6_HISTBIN7 0x000000e0
+#define SCALER6_HDR_CFG_REMAP 0x000000f4
+#define SCALER6_COL_SPACE 0x000000f8
+#define SCALER6_HVS_ID 0x000000fc
+#define SCALER6_CFC1 0x00000100
+#define SCALER6_DISP_UPM_ISO0 0x00000200
+#define SCALER6_DISP_UPM_ISO1 0x00000204
+#define SCALER6_DISP_UPM_ISO2 0x00000208
+#define SCALER6_DISP_LBM_ISO0 0x0000020c
+#define SCALER6_DISP_LBM_ISO1 0x00000210
+#define SCALER6_DISP_LBM_ISO2 0x00000214
+#define SCALER6_DISP_COB_ISO0 0x00000218
+#define SCALER6_DISP_COB_ISO1 0x0000021c
+#define SCALER6_DISP_COB_ISO2 0x00000220
+#define SCALER6_BAD_COB 0x00000224
+#define SCALER6_BAD_LBM 0x00000228
+#define SCALER6_BAD_UPM 0x0000022c
+#define SCALER6_BAD_AXI 0x00000230
+
+#define SCALER6D_VERSION 0x00000000
+#define SCALER6D_CXM_SIZE 0x00000004
+#define SCALER6D_LBM_SIZE 0x00000008
+#define SCALER6D_UBM_SIZE 0x0000000c
+#define SCALER6D_COBA_SIZE 0x00000010
+#define SCALER6D_COB_SIZE 0x00000014
+#define SCALER6D_CONTROL 0x00000020
+#define SCALER6D_FETCHER_STATUS 0x00000024
+#define SCALER6D_FETCH_STATUS 0x00000028
+#define SCALER6D_HANDLE_ERROR 0x0000002c
+#define SCALER6D_EOLN 0x00000030
+#define SCALER6D_DL_STATUS 0x00000034
+#define SCALER6D_PRI_MAP0 0x00000038
+#define SCALER6D_PRI_MAP1 0x0000003c
+#define SCALER6D_HISTCTRL 0x000000d0
+#define SCALER6D_HISTBIN0 0x000000d4
+#define SCALER6D_HISTBIN1 0x000000d8
+#define SCALER6D_HISTBIN2 0x000000dc
+#define SCALER6D_HISTBIN3 0x000000e0
+#define SCALER6D_HISTBIN4 0x000000e4
+#define SCALER6D_HISTBIN5 0x000000e8
+#define SCALER6D_HISTBIN6 0x000000ec
+#define SCALER6D_HISTBIN7 0x000000f0
+#define SCALER6D_HVS_ID 0x000000fc
+
+#define SCALER6D_DISP0_CTRL0 0x00000100
+#define SCALER6D_DISP0_CTRL1 0x00000104
+#define SCALER6D_DISP0_BGND 0x00000108
+#define SCALER6D_DISP0_LPTRS 0x00000110
+#define SCALER6D_DISP0_COB 0x00000114
+#define SCALER6D_DISP0_STATUS 0x00000118
+#define SCALER6D_DISP0_CTRL0 0x00000100
+#define SCALER6D_DISP0_CTRL1 0x00000104
+#define SCALER6D_DISP0_BGND0 0x00000108
+#define SCALER6D_DISP0_BGND1 0x0000010c
+#define SCALER6D_DISP0_LPTRS 0x00000110
+#define SCALER6D_DISP0_COB 0x00000114
+#define SCALER6D_DISP0_STATUS 0x00000118
+#define SCALER6D_DISP0_DL 0x0000011c
+#define SCALER6D_DISP0_RUN 0x00000120
+#define SCALER6D_QOS0 0x00000124
+#define SCALER6D_PROF0 0x00000128
+#define SCALER6D_DISP1_CTRL0 0x00000140
+#define SCALER6D_DISP1_CTRL1 0x00000144
+#define SCALER6D_DISP1_BGND0 0x00000148
+#define SCALER6D_DISP1_BGND1 0x0000014c
+#define SCALER6D_DISP1_LPTRS 0x00000150
+#define SCALER6D_DISP1_COB 0x00000154
+#define SCALER6D_DISP1_STATUS 0x00000158
+#define SCALER6D_DISP1_DL 0x0000015c
+#define SCALER6D_DISP1_RUN 0x00000160
+#define SCALER6D_QOS1 0x00000164
+#define SCALER6D_PROF1 0x00000168
+#define SCALER6D_DISP2_CTRL0 0x00000180
+#define SCALER6D_DISP2_CTRL1 0x00000184
+#define SCALER6D_DISP2_BGND0 0x00000188
+#define SCALER6D_DISP2_BGND1 0x0000018c
+#define SCALER6D_DISP2_LPTRS 0x00000190
+#define SCALER6D_DISP2_COB 0x00000194
+#define SCALER6D_DISP2_STATUS 0x00000198
+#define SCALER6D_DISP2_DL 0x0000019c
+#define SCALER6D_DISP2_RUN 0x000001a0
+#define SCALER6D_QOS2 0x000001a4
+#define SCALER6D_PROF2 0x000001a8
+
+#define SCALER6(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? SCALER6_ ## x : SCALER6D_ ## x)
+
# define VC4_HDMI_SW_RESET_FORMAT_DETECT BIT(1)
# define VC4_HDMI_SW_RESET_HDMI BIT(0)
@@ -761,6 +987,15 @@ enum {
# define VC4_HD_MAI_THR_DREQLOW_MASK VC4_MASK(5, 0)
# define VC4_HD_MAI_THR_DREQLOW_SHIFT 0
+# define VC6_D_HD_MAI_THR_PANICHIGH_MASK VC4_MASK(29, 23)
+# define VC6_D_HD_MAI_THR_PANICHIGH_SHIFT 23
+# define VC6_D_HD_MAI_THR_PANICLOW_MASK VC4_MASK(21, 15)
+# define VC6_D_HD_MAI_THR_PANICLOW_SHIFT 15
+# define VC6_D_HD_MAI_THR_DREQHIGH_MASK VC4_MASK(13, 7)
+# define VC6_D_HD_MAI_THR_DREQHIGH_SHIFT 7
+# define VC6_D_HD_MAI_THR_DREQLOW_MASK VC4_MASK(6, 0)
+# define VC6_D_HD_MAI_THR_DREQLOW_SHIFT 0
+
/* Divider from HDMI HSM clock to MAI serial clock. Sampling period
* converges to N / (M + 1) cycles.
*/
@@ -968,6 +1203,9 @@ enum hvs_pixel_format {
#define SCALER5_CTL2_ALPHA_MASK VC4_MASK(15, 4)
#define SCALER5_CTL2_ALPHA_SHIFT 4
+#define SCALER6D_CTL2_CSC_ENABLE BIT(19)
+#define SCALER6D_CTL2_BRCM_CFC_CONTROL_MASK VC4_MASK(22, 20)
+
#define SCALER_POS1_SCL_HEIGHT_MASK VC4_MASK(27, 16)
#define SCALER_POS1_SCL_HEIGHT_SHIFT 16
@@ -1109,4 +1347,63 @@ enum hvs_pixel_format {
#define SCALER_PITCH0_TILE_WIDTH_R_MASK VC4_MASK(6, 0)
#define SCALER_PITCH0_TILE_WIDTH_R_SHIFT 0
+#define SCALER6_CTL0_END BIT(31)
+#define SCALER6_CTL0_VALID BIT(30)
+#define SCALER6_CTL0_NEXT_MASK VC4_MASK(29, 24)
+#define SCALER6_CTL0_RGB_TRANS BIT(23)
+#define SCALER6_CTL0_ADDR_MODE_MASK VC4_MASK(22, 20)
+#define SCALER6_CTL0_ADDR_MODE_LINEAR 0
+#define SCALER6_CTL0_ADDR_MODE_128B 1
+#define SCALER6_CTL0_ADDR_MODE_256B 2
+#define SCALER6_CTL0_ADDR_MODE_MAP8 3
+#define SCALER6_CTL0_ADDR_MODE_UIF 4
+
+#define SCALER6_CTL0_ALPHA_MASK_MASK VC4_MASK(19, 18)
+#define SCALER6_CTL0_ALPHA_MASK_NONE 0
+#define SCALER6D_CTL0_ALPHA_MASK_FIXED 3
+#define SCALER6_CTL0_UNITY BIT(15)
+#define SCALER6_CTL0_ORDERRGBA_MASK VC4_MASK(14, 13)
+#define SCALER6_CTL0_SCL1_MODE_MASK VC4_MASK(10, 8)
+#define SCALER6_CTL0_SCL0_MODE_MASK VC4_MASK(7, 5)
+#define SCALER6_CTL0_PIXEL_FORMAT_MASK VC4_MASK(4, 0)
+
+#define SCALER6_POS0_START_Y_MASK VC4_MASK(28, 16)
+#define SCALER6_POS0_HFLIP BIT(15)
+#define SCALER6_POS0_START_X_MASK VC4_MASK(12, 0)
+
+#define SCALER6_CTL2_ALPHA_MODE_MASK VC4_MASK(31, 30)
+#define SCALER6_CTL2_ALPHA_PREMULT BIT(29)
+#define SCALER6_CTL2_ALPHA_MIX BIT(28)
+#define SCALER6_CTL2_BFG BIT(26)
+#define SCALER6C_CTL2_CSC_ENABLE BIT(25)
+#define SCALER6C_CTL2_BRCM_CFC_CONTROL_MASK VC4_MASK(18, 16)
+#define SCALER6_CTL2_ALPHA_MASK VC4_MASK(15, 4)
+
+#define SCALER6_POS1_SCL_LINES_MASK VC4_MASK(28, 16)
+#define SCALER6_POS1_SCL_WIDTH_MASK VC4_MASK(12, 0)
+
+#define SCALER6_POS2_SRC_LINES_MASK VC4_MASK(28, 16)
+#define SCALER6_POS2_SRC_WIDTH_MASK VC4_MASK(12, 0)
+
+#define SCALER6_PTR0_VFLIP BIT(31)
+#define SCALER6_PTR0_UPM_BASE_MASK VC4_MASK(28, 16)
+#define SCALER6_PTR0_UPM_HANDLE_MASK VC4_MASK(14, 10)
+#define SCALER6_PTR0_UPM_BUFF_SIZE_MASK VC4_MASK(9, 8)
+#define SCALER6_PTR0_UPM_BUFF_SIZE_16_LINES 3
+#define SCALER6_PTR0_UPM_BUFF_SIZE_8_LINES 2
+#define SCALER6_PTR0_UPM_BUFF_SIZE_4_LINES 1
+#define SCALER6_PTR0_UPM_BUFF_SIZE_2_LINES 0
+#define SCALER6_PTR0_UPPER_ADDR_MASK VC4_MASK(7, 0)
+
+#define SCALER6_PTR2_ALPHA_BPP_MASK VC4_MASK(31, 31)
+#define SCALER6_PTR2_ALPHA_BPP_1BPP 1
+#define SCALER6_PTR2_ALPHA_BPP_8BPP 0
+#define SCALER6_PTR2_ALPHA_ORDER_MASK VC4_MASK(30, 30)
+#define SCALER6_PTR2_ALPHA_ORDER_MSB_TO_LSB 1
+#define SCALER6_PTR2_ALPHA_ORDER_LSB_TO_MSB 0
+#define SCALER6_PTR2_ALPHA_OFFS_MASK VC4_MASK(29, 27)
+#define SCALER6_PTR2_LSKIP_MASK VC4_MASK(26, 24)
+#define SCALER6_PTR2_PITCH_MASK VC4_MASK(16, 0)
+#define SCALER6_PTR2_FETCH_COUNT_MASK VC4_MASK(26, 16)
+
#endif /* VC4_REGS_H */
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 3e38a1d2d55e..4eab069cda75 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -145,6 +145,9 @@
/* Number of lines received and committed to memory. */
#define TXP_PROGRESS 0x10
+#define TXP_DST_PTR_HIGH_MOPLET 0x1c
+#define TXP_DST_PTR_HIGH_MOP 0x24
+
#define TXP_READ(offset) \
({ \
kunit_fail_current_test("Accessing a register in a unit test!\n"); \
@@ -159,6 +162,7 @@
struct vc4_txp {
struct vc4_crtc base;
+ const struct vc4_txp_data *data;
struct platform_device *pdev;
@@ -286,9 +290,13 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
+ const struct vc4_txp_data *txp_data = txp->data;
struct drm_gem_dma_object *gem;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
+ unsigned int hdisplay;
+ unsigned int vdisplay;
+ dma_addr_t addr;
u32 ctrl;
int idx;
int i;
@@ -308,9 +316,11 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
return;
ctrl = TXP_GO | TXP_EI |
- VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
+ if (txp_data->has_byte_enable)
+ ctrl |= VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE);
+
if (fb->format->has_alpha)
ctrl |= TXP_ALPHA_ENABLE;
else
@@ -324,11 +334,25 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
return;
gem = drm_fb_dma_get_gem_obj(fb, 0);
- TXP_WRITE(TXP_DST_PTR, gem->dma_addr + fb->offsets[0]);
+ addr = gem->dma_addr + fb->offsets[0];
+
+ TXP_WRITE(TXP_DST_PTR, lower_32_bits(addr));
+
+ if (txp_data->supports_40bit_addresses)
+ TXP_WRITE(txp_data->high_addr_ptr_reg, upper_32_bits(addr) & 0xff);
+
TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
+
+ hdisplay = mode->hdisplay ?: 1;
+ vdisplay = mode->vdisplay ?: 1;
+ if (txp_data->size_minus_one) {
+ hdisplay -= 1;
+ vdisplay -= 1;
+ }
+
TXP_WRITE(TXP_DIM,
- VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
- VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT));
+ VC4_SET_FIELD(hdisplay, TXP_WIDTH) |
+ VC4_SET_FIELD(vdisplay, TXP_HEIGHT));
TXP_WRITE(TXP_DST_CTRL, ctrl);
@@ -362,6 +386,7 @@ static const struct drm_connector_funcs vc4_txp_connector_funcs = {
static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
int idx;
@@ -380,7 +405,8 @@ static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY);
}
- TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+ if (vc4->gen < VC4_GEN_6_C)
+ TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
drm_dev_exit(idx);
}
@@ -484,17 +510,49 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-const struct vc4_crtc_data vc4_txp_crtc_data = {
- .name = "txp",
- .debugfs_name = "txp_regs",
- .hvs_available_channels = BIT(2),
- .hvs_output = 2,
+static const struct vc4_txp_data bcm2712_mop_data = {
+ .base = {
+ .name = "mop",
+ .debugfs_name = "mop_regs",
+ .hvs_available_channels = BIT(2),
+ .hvs_output = 2,
+ },
+ .encoder_type = VC4_ENCODER_TYPE_TXP0,
+ .high_addr_ptr_reg = TXP_DST_PTR_HIGH_MOP,
+ .has_byte_enable = true,
+ .size_minus_one = true,
+ .supports_40bit_addresses = true,
+};
+
+static const struct vc4_txp_data bcm2712_moplet_data = {
+ .base = {
+ .name = "moplet",
+ .debugfs_name = "moplet_regs",
+ .hvs_available_channels = BIT(1),
+ .hvs_output = 4,
+ },
+ .encoder_type = VC4_ENCODER_TYPE_TXP1,
+ .high_addr_ptr_reg = TXP_DST_PTR_HIGH_MOPLET,
+ .size_minus_one = true,
+ .supports_40bit_addresses = true,
+};
+
+const struct vc4_txp_data bcm2835_txp_data = {
+ .base = {
+ .name = "txp",
+ .debugfs_name = "txp_regs",
+ .hvs_available_channels = BIT(2),
+ .hvs_output = 2,
+ },
+ .encoder_type = VC4_ENCODER_TYPE_TXP0,
+ .has_byte_enable = true,
};
static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
+ const struct vc4_txp_data *txp_data;
struct vc4_encoder *vc4_encoder;
struct drm_encoder *encoder;
struct vc4_crtc *vc4_crtc;
@@ -509,6 +567,11 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
if (!txp)
return -ENOMEM;
+ txp_data = of_device_get_match_data(dev);
+ if (!txp_data)
+ return -ENODEV;
+
+ txp->data = txp_data;
txp->pdev = pdev;
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
@@ -519,13 +582,13 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
vc4_crtc->regset.regs = txp_regs;
vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
- ret = vc4_crtc_init(drm, pdev, vc4_crtc, &vc4_txp_crtc_data,
+ ret = vc4_crtc_init(drm, pdev, vc4_crtc, &txp_data->base,
&vc4_txp_crtc_funcs, &vc4_txp_crtc_helper_funcs, true);
if (ret)
return ret;
vc4_encoder = &txp->encoder;
- txp->encoder.type = VC4_ENCODER_TYPE_TXP;
+ txp->encoder.type = txp_data->encoder_type;
encoder = &vc4_encoder->base;
encoder->possible_crtcs = drm_crtc_mask(&vc4_crtc->base);
@@ -579,7 +642,9 @@ static void vc4_txp_remove(struct platform_device *pdev)
}
static const struct of_device_id vc4_txp_dt_match[] = {
- { .compatible = "brcm,bcm2835-txp" },
+ { .compatible = "brcm,bcm2712-mop", .data = &bcm2712_mop_data },
+ { .compatible = "brcm,bcm2712-moplet", .data = &bcm2712_moplet_data },
+ { .compatible = "brcm,bcm2835-txp", .data = &bcm2835_txp_data },
{ /* sentinel */ },
};
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index c5e3e5457737..2752ab4f1c97 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -47,7 +47,6 @@
#define DRIVER_NAME "vgem"
#define DRIVER_DESC "Virtual GEM provider"
-#define DRIVER_DATE "20120112"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -121,7 +120,6 @@ static const struct drm_driver vgem_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ffca6e2e1c9a..6a67c6297d58 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -32,9 +32,9 @@
#include <linux/poll.h>
#include <linux/wait.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
@@ -184,7 +184,6 @@ static const struct drm_driver driver = {
.postclose = virtio_gpu_driver_postclose,
.dumb_create = virtio_gpu_mode_dumb_create,
- .dumb_map_offset = virtio_gpu_mode_dumb_mmap,
DRM_FBDEV_SHMEM_DRIVER_OPS,
@@ -202,7 +201,6 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 64c236169db8..f42ca9d8ed10 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -45,7 +45,6 @@
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
-#define DRIVER_DATE "0"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
@@ -89,9 +88,11 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
+ struct sg_table *sgt;
uint32_t hw_res_handle;
bool dumb;
bool created;
+ bool attached;
bool host3d_blob, guest_blob;
uint32_t blob_mem, blob_flags;
@@ -194,6 +195,13 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
+struct virtio_gpu_plane_state {
+ struct drm_plane_state base;
+ struct virtio_gpu_fence *fence;
+};
+#define to_virtio_gpu_plane_state(x) \
+ container_of(x, struct virtio_gpu_plane_state, base)
+
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -301,9 +309,6 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p);
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
struct virtio_gpu_object_array*
@@ -349,6 +354,10 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_mem_entry *ents,
unsigned int nents);
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence);
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output);
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
@@ -468,6 +477,10 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach);
/* virtgpu_debugfs.c */
void virtio_gpu_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 7db48d17ee3a..5aab588fc400 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -99,21 +99,6 @@ fail:
return ret;
}
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- struct drm_gem_object *gobj;
-
- BUG_ON(!offset_p);
- gobj = drm_gem_object_lookup(file_priv, handle);
- if (gobj == NULL)
- return -ENOENT;
- *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
- drm_gem_object_put(gobj);
- return 0;
-}
-
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file)
{
@@ -127,15 +112,17 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
/* the context might still be missing when the first ioctl is
* DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
*/
- virtio_gpu_create_context(obj->dev, file);
+ if (!vgdev->has_context_init)
+ virtio_gpu_create_context(obj->dev, file);
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
virtio_gpu_array_add_obj(objs, obj);
- virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- objs);
+ if (vfpriv->ctx_id)
+ virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
+
out_notify:
virtio_gpu_notify(vgdev);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index e4f76f315550..c33c057365f8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -80,9 +80,9 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
- return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
- virtio_gpu_map->handle,
- &virtio_gpu_map->offset);
+ return drm_gem_dumb_map_offset(file, vgdev->ddev,
+ virtio_gpu_map->handle,
+ &virtio_gpu_map->offset);
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index c7e74cf13022..5517cff8715c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -80,6 +80,9 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
drm_gem_free_mmap_offset(&vram->base.base.base);
drm_gem_object_release(&vram->base.base.base);
kfree(vram);
+ } else {
+ drm_gem_object_release(&bo->base.base);
+ kfree(bo);
}
}
@@ -97,6 +100,27 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
virtio_gpu_cleanup_object(bo);
}
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_fence *fence;
+
+ if (!bo->attached)
+ return 0;
+
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
+ if (!fence)
+ return -ENOMEM;
+
+ virtio_gpu_object_detach(vgdev, bo, fence);
+ virtio_gpu_notify(vgdev);
+
+ dma_fence_wait(&fence->f, false);
+ dma_fence_put(&fence->f);
+
+ return 0;
+}
+
static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a72a2dbda031..42aa554eca9f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,6 +26,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <linux/virtio_dma_buf.h>
#include "virtgpu_drv.h"
@@ -66,11 +68,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
}
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct virtio_gpu_plane_state *new;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+ return &new->base;
+}
+
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
@@ -138,11 +157,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- if (vgfb->fence) {
+ if (vgplane_st->fence) {
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
@@ -151,13 +172,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
+ width, height, objs,
+ vgplane_st->fence);
virtio_gpu_notify(vgdev);
-
- dma_fence_wait_timeout(&vgfb->fence->f, true,
+ dma_fence_wait_timeout(&vgplane_st->fence->f, true,
msecs_to_jiffies(50));
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
} else {
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
width, height, NULL, NULL);
@@ -241,45 +260,113 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
rect.y2 - rect.y1);
}
+static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane,
+ struct drm_plane_state *new_state,
+ struct drm_gem_object *obj)
+{
+ struct virtio_gpu_device *vgdev = plane->dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret) {
+ dma_resv_unlock(resv);
+ return ret;
+ }
+
+ if (!bo->sgt) {
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents,
+ bo, attach);
+ if (ret)
+ goto err;
+
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ }
+
+ dma_resv_unlock(resv);
+ return 0;
+
+err:
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+ return ret;
+}
+
static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
+ struct drm_gem_object *obj;
+ int ret;
if (!new_state->fb)
return 0;
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(new_state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
return 0;
- if (bo->dumb && (plane->state->fb != new_state->fb)) {
- vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ obj = new_state->fb->obj[0];
+ if (obj->import_attach) {
+ ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
+ if (ret)
+ return ret;
+ }
+
+ if (bo->dumb || obj->import_attach) {
+ vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
+ vgdev->fence_drv.context,
0);
- if (!vgfb->fence)
+ if (!vgplane_st->fence)
return -ENOMEM;
}
return 0;
}
+static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
+{
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+
+ dma_resv_lock(resv, NULL);
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+}
+
static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
+ struct drm_gem_object *obj;
if (!state->fb)
return;
- vgfb = to_virtio_gpu_framebuffer(state->fb);
- if (vgfb->fence) {
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ vgplane_st = to_virtio_gpu_plane_state(state);
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
}
+
+ obj = state->fb->obj[0];
+ if (obj->import_attach)
+ virtio_gpu_cleanup_imported_obj(obj);
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
@@ -291,6 +378,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
@@ -303,6 +391,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
@@ -322,11 +411,9 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, 0,
plane->state->crtc_w,
plane->state->crtc_h,
- 0, 0, objs, vgfb->fence);
+ 0, 0, objs, vgplane_st->fence);
virtio_gpu_notify(vgdev);
- dma_fence_wait(&vgfb->fence->f, true);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ dma_fence_wait(&vgplane_st->fence->f, true);
}
if (plane->state->fb != old_state->fb) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 44425f20d91a..f92133a01195 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -27,6 +27,8 @@
#include "virtgpu_drv.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
uuid_t *uuid)
{
@@ -142,10 +144,160 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
return buf;
}
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct scatterlist *sl;
+ struct sg_table *sgt;
+ long i, ret;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return ret < 0 ? ret : -ETIMEDOUT;
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ *ents = kvmalloc_array(sgt->nents,
+ sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+
+ *nents = sgt->nents;
+ for_each_sgtable_dma_sg(sgt, sl, i) {
+ (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
+ (*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
+ (*ents)[i].padding = 0;
+ }
+
+ bo->sgt = sgt;
+ return 0;
+}
+
+static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct dma_buf_attachment *attach = obj->import_attach;
+
+ if (attach) {
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ dma_resv_unlock(dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
+ dma_buf_put(dmabuf);
+ }
+
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
+}
+
+static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_object_params params = { 0 };
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ if (ret) {
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+ }
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret)
+ goto err_pin;
+
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
+ if (ret)
+ goto err_import;
+
+ params.blob = true;
+ params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+ params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ params.size = attach->dmabuf->size;
+
+ virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
+ ents, nents);
+ bo->guest_blob = true;
+ bo->attached = true;
+
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+
+ return 0;
+
+err_import:
+ dma_buf_unpin(attach);
+err_pin:
+ dma_resv_unlock(resv);
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+}
+
+static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
+ .free = virtgpu_dma_buf_free_obj,
+};
+
+static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (bo->created && kref_read(&obj->refcount)) {
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ bo->sgt = NULL;
+ }
+}
+
+static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
+ .move_notify = virtgpu_dma_buf_move_notify
+};
+
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct dma_buf_attachment *attach;
+ struct virtio_gpu_object *bo;
struct drm_gem_object *obj;
+ int ret;
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
obj = buf->priv;
@@ -159,7 +311,32 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, buf);
+ if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
+ return drm_gem_prime_import(dev, buf);
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &bo->base.base;
+ obj->funcs = &virtgpu_gem_dma_buf_funcs;
+ drm_gem_private_object_init(dev, obj, buf->size);
+
+ attach = dma_buf_dynamic_attach(buf, dev->dev,
+ &virtgpu_dma_buf_attach_ops, obj);
+ if (IS_ERR(attach)) {
+ kfree(bo);
+ return ERR_CAST(attach);
+ }
+
+ obj->import_attach = attach;
+ get_dma_buf(buf);
+
+ ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return obj;
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 0d3d0d09f39b..ad91624df42d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -645,6 +645,23 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
+static void
+virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_fence *fence)
+{
+ struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+}
+
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -1103,8 +1120,26 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_mem_entry *ents,
unsigned int nents)
{
+ if (obj->attached)
+ return;
+
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
+
+ obj->attached = true;
+}
+
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence)
+{
+ if (!obj->attached)
+ return;
+
+ virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle,
+ fence);
+
+ obj->attached = false;
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 3f0977d746be..fa269d279e25 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -24,64 +24,33 @@ static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
/**
* pre_mul_alpha_blend - alpha blending equation
- * @frame_info: Source framebuffer's metadata
* @stage_buffer: The line with the pixels from src_plane
* @output_buffer: A line buffer that receives all the blends output
+ * @x_start: The start offset
+ * @pixel_count: The number of pixels to blend
*
- * Using the information from the `frame_info`, this blends only the
- * necessary pixels from the `stage_buffer` to the `output_buffer`
- * using premultiplied blend formula.
+ * The pixels [@x_start;@x_start+@pixel_count) in stage_buffer are blended at
+ * [@x_start;@x_start+@pixel_count) in output_buffer.
*
* The current DRM assumption is that pixel color values have been already
* pre-multiplied with the alpha channel values. See more
* drm_plane_create_blend_mode_property(). Also, this formula assumes a
* completely opaque background.
*/
-static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
- struct line_buffer *stage_buffer,
- struct line_buffer *output_buffer)
+static void pre_mul_alpha_blend(const struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer, int x_start, int pixel_count)
{
- int x_dst = frame_info->dst.x1;
- struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
- struct pixel_argb_u16 *in = stage_buffer->pixels;
- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
- stage_buffer->n_pixels);
-
- for (int x = 0; x < x_limit; x++) {
- out[x].a = (u16)0xffff;
- out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
- out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
- out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
+ struct pixel_argb_u16 *out = &output_buffer->pixels[x_start];
+ const struct pixel_argb_u16 *in = &stage_buffer->pixels[x_start];
+
+ for (int i = 0; i < pixel_count; i++) {
+ out[i].a = (u16)0xffff;
+ out[i].r = pre_mul_blend_channel(in[i].r, out[i].r, in[i].a);
+ out[i].g = pre_mul_blend_channel(in[i].g, out[i].g, in[i].a);
+ out[i].b = pre_mul_blend_channel(in[i].b, out[i].b, in[i].a);
}
}
-static int get_y_pos(struct vkms_frame_info *frame_info, int y)
-{
- if (frame_info->rotation & DRM_MODE_REFLECT_Y)
- return drm_rect_height(&frame_info->rotated) - y - 1;
-
- switch (frame_info->rotation & DRM_MODE_ROTATE_MASK) {
- case DRM_MODE_ROTATE_90:
- return frame_info->rotated.x2 - y - 1;
- case DRM_MODE_ROTATE_270:
- return y + frame_info->rotated.x1;
- default:
- return y;
- }
-}
-
-static bool check_limit(struct vkms_frame_info *frame_info, int pos)
-{
- if (drm_rotation_90_or_270(frame_info->rotation)) {
- if (pos >= 0 && pos < drm_rect_width(&frame_info->rotated))
- return true;
- } else {
- if (pos >= frame_info->rotated.y1 && pos < frame_info->rotated.y2)
- return true;
- }
-
- return false;
-}
static void fill_background(const struct pixel_argb_u16 *background_color,
struct line_buffer *output_buffer)
@@ -96,9 +65,9 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
s64 a_fp = drm_int2fixp(a);
s64 b_fp = drm_int2fixp(b);
- s64 delta = drm_fixp_mul(b_fp - a_fp, t);
+ s64 delta = drm_fixp_mul(b_fp - a_fp, t);
- return drm_fixp2int(a_fp + delta);
+ return drm_fixp2int_round(a_fp + delta);
}
static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
@@ -164,6 +133,226 @@ static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buff
}
/**
+ * direction_for_rotation() - Get the correct reading direction for a given rotation
+ *
+ * @rotation: Rotation to analyze. It correspond the field @frame_info.rotation.
+ *
+ * This function will use the @rotation setting of a source plane to compute the reading
+ * direction in this plane which correspond to a "left to right writing" in the CRTC.
+ * For example, if the buffer is reflected on X axis, the pixel must be read from right to left
+ * to be written from left to right on the CRTC.
+ */
+static enum pixel_read_direction direction_for_rotation(unsigned int rotation)
+{
+ struct drm_rect tmp_a, tmp_b;
+ int x, y;
+
+ /*
+ * Points A and B are depicted as zero-size rectangles on the CRTC.
+ * The CRTC writing direction is from A to B. The plane reading direction
+ * is discovered by inverse-transforming A and B.
+ * The reading direction is computed by rotating the vector AB (top-left to top-right) in a
+ * 1x1 square.
+ */
+
+ tmp_a = DRM_RECT_INIT(0, 0, 0, 0);
+ tmp_b = DRM_RECT_INIT(1, 0, 0, 0);
+ drm_rect_rotate_inv(&tmp_a, 1, 1, rotation);
+ drm_rect_rotate_inv(&tmp_b, 1, 1, rotation);
+
+ x = tmp_b.x1 - tmp_a.x1;
+ y = tmp_b.y1 - tmp_a.y1;
+
+ if (x == 1 && y == 0)
+ return READ_LEFT_TO_RIGHT;
+ else if (x == -1 && y == 0)
+ return READ_RIGHT_TO_LEFT;
+ else if (y == 1 && x == 0)
+ return READ_TOP_TO_BOTTOM;
+ else if (y == -1 && x == 0)
+ return READ_BOTTOM_TO_TOP;
+
+ WARN_ONCE(true, "The inverse of the rotation gives an incorrect direction.");
+ return READ_LEFT_TO_RIGHT;
+}
+
+/**
+ * clamp_line_coordinates() - Compute and clamp the coordinate to read and write during the blend
+ * process.
+ *
+ * @direction: direction of the reading
+ * @current_plane: current plane blended
+ * @src_line: source line of the reading. Only the top-left coordinate is used. This rectangle
+ * must be rotated and have a shape of 1*pixel_count if @direction is vertical and a shape of
+ * pixel_count*1 if @direction is horizontal.
+ * @src_x_start: x start coordinate for the line reading
+ * @src_y_start: y start coordinate for the line reading
+ * @dst_x_start: x coordinate to blend the read line
+ * @pixel_count: number of pixels to blend
+ *
+ * This function is mainly a safety net to avoid reading outside the source buffer. As the
+ * userspace should never ask to read outside the source plane, all the cases covered here should
+ * be dead code.
+ */
+static void clamp_line_coordinates(enum pixel_read_direction direction,
+ const struct vkms_plane_state *current_plane,
+ const struct drm_rect *src_line, int *src_x_start,
+ int *src_y_start, int *dst_x_start, int *pixel_count)
+{
+ /* By default the start points are correct */
+ *src_x_start = src_line->x1;
+ *src_y_start = src_line->y1;
+ *dst_x_start = current_plane->frame_info->dst.x1;
+
+ /* Get the correct number of pixel to blend, it depends of the direction */
+ switch (direction) {
+ case READ_LEFT_TO_RIGHT:
+ case READ_RIGHT_TO_LEFT:
+ *pixel_count = drm_rect_width(src_line);
+ break;
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ *pixel_count = drm_rect_height(src_line);
+ break;
+ }
+
+ /*
+ * Clamp the coordinates to avoid reading outside the buffer
+ *
+ * This is mainly a security check to avoid reading outside the buffer, the userspace
+ * should never request to read outside the source buffer.
+ */
+ switch (direction) {
+ case READ_LEFT_TO_RIGHT:
+ case READ_RIGHT_TO_LEFT:
+ if (*src_x_start < 0) {
+ *pixel_count += *src_x_start;
+ *dst_x_start -= *src_x_start;
+ *src_x_start = 0;
+ }
+ if (*src_x_start + *pixel_count > current_plane->frame_info->fb->width)
+ *pixel_count = max(0, (int)current_plane->frame_info->fb->width -
+ *src_x_start);
+ break;
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ if (*src_y_start < 0) {
+ *pixel_count += *src_y_start;
+ *dst_x_start -= *src_y_start;
+ *src_y_start = 0;
+ }
+ if (*src_y_start + *pixel_count > current_plane->frame_info->fb->height)
+ *pixel_count = max(0, (int)current_plane->frame_info->fb->height -
+ *src_y_start);
+ break;
+ }
+}
+
+/**
+ * blend_line() - Blend a line from a plane to the output buffer
+ *
+ * @current_plane: current plane to work on
+ * @y: line to write in the output buffer
+ * @crtc_x_limit: width of the output buffer
+ * @stage_buffer: temporary buffer to convert the pixel line from the source buffer
+ * @output_buffer: buffer to blend the read line into.
+ */
+static void blend_line(struct vkms_plane_state *current_plane, int y,
+ int crtc_x_limit, struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer)
+{
+ int src_x_start, src_y_start, dst_x_start, pixel_count;
+ struct drm_rect dst_line, tmp_src, src_line;
+
+ /* Avoid rendering useless lines */
+ if (y < current_plane->frame_info->dst.y1 ||
+ y >= current_plane->frame_info->dst.y2)
+ return;
+
+ /*
+ * dst_line is the line to copy. The initial coordinates are inside the
+ * destination framebuffer, and then drm_rect_* helpers are used to
+ * compute the correct position into the source framebuffer.
+ */
+ dst_line = DRM_RECT_INIT(current_plane->frame_info->dst.x1, y,
+ drm_rect_width(&current_plane->frame_info->dst),
+ 1);
+
+ drm_rect_fp_to_int(&tmp_src, &current_plane->frame_info->src);
+
+ /*
+ * [1]: Clamping src_line to the crtc_x_limit to avoid writing outside of
+ * the destination buffer
+ */
+ dst_line.x1 = max_t(int, dst_line.x1, 0);
+ dst_line.x2 = min_t(int, dst_line.x2, crtc_x_limit);
+ /* The destination is completely outside of the crtc. */
+ if (dst_line.x2 <= dst_line.x1)
+ return;
+
+ src_line = dst_line;
+
+ /*
+ * Transform the coordinate x/y from the crtc to coordinates into
+ * coordinates for the src buffer.
+ *
+ * - Cancel the offset of the dst buffer.
+ * - Invert the rotation. This assumes that
+ * dst = drm_rect_rotate(src, rotation) (dst and src have the
+ * same size, but can be rotated).
+ * - Apply the offset of the source rectangle to the coordinate.
+ */
+ drm_rect_translate(&src_line, -current_plane->frame_info->dst.x1,
+ -current_plane->frame_info->dst.y1);
+ drm_rect_rotate_inv(&src_line, drm_rect_width(&tmp_src),
+ drm_rect_height(&tmp_src),
+ current_plane->frame_info->rotation);
+ drm_rect_translate(&src_line, tmp_src.x1, tmp_src.y1);
+
+ /* Get the correct reading direction in the source buffer. */
+
+ enum pixel_read_direction direction =
+ direction_for_rotation(current_plane->frame_info->rotation);
+
+ /* [2]: Compute and clamp the number of pixel to read */
+ clamp_line_coordinates(direction, current_plane, &src_line, &src_x_start, &src_y_start,
+ &dst_x_start, &pixel_count);
+
+ if (pixel_count <= 0) {
+ /* Nothing to read, so avoid multiple function calls */
+ return;
+ }
+
+ /*
+ * Modify the starting point to take in account the rotation
+ *
+ * src_line is the top-left corner, so when reading READ_RIGHT_TO_LEFT or
+ * READ_BOTTOM_TO_TOP, it must be changed to the top-right/bottom-left
+ * corner.
+ */
+ if (direction == READ_RIGHT_TO_LEFT) {
+ // src_x_start is now the right point
+ src_x_start += pixel_count - 1;
+ } else if (direction == READ_BOTTOM_TO_TOP) {
+ // src_y_start is now the bottom point
+ src_y_start += pixel_count - 1;
+ }
+
+ /*
+ * Perform the conversion and the blending
+ *
+ * Here we know that the read line (x_start, y_start, pixel_count) is
+ * inside the source buffer [2] and we don't write outside the stage
+ * buffer [1].
+ */
+ current_plane->pixel_read_line(current_plane, src_x_start, src_y_start, direction,
+ pixel_count, &stage_buffer->pixels[dst_x_start]);
+
+ pre_mul_alpha_blend(stage_buffer, output_buffer,
+ dst_x_start, pixel_count);
+}
+
+/**
* blend - blend the pixels from all planes and compute crc
* @wb: The writeback frame buffer metadata
* @crtc_state: The crtc state
@@ -183,32 +372,25 @@ static void blend(struct vkms_writeback_job *wb,
{
struct vkms_plane_state **plane = crtc_state->active_planes;
u32 n_active_planes = crtc_state->num_active_planes;
- int y_pos;
const struct pixel_argb_u16 background_color = { .a = 0xffff };
- size_t crtc_y_limit = crtc_state->base.mode.vdisplay;
+ int crtc_y_limit = crtc_state->base.mode.vdisplay;
+ int crtc_x_limit = crtc_state->base.mode.hdisplay;
/*
* The planes are composed line-by-line to avoid heavy memory usage. It is a necessary
* complexity to avoid poor blending performance.
*
- * The function vkms_compose_row() is used to read a line, pixel-by-pixel, into the staging
- * buffer.
+ * The function pixel_read_line callback is used to read a line, using an efficient
+ * algorithm for a specific format, into the staging buffer.
*/
- for (size_t y = 0; y < crtc_y_limit; y++) {
+ for (int y = 0; y < crtc_y_limit; y++) {
fill_background(&background_color, output_buffer);
/* The active planes are composed associatively in z-order. */
for (size_t i = 0; i < n_active_planes; i++) {
- y_pos = get_y_pos(plane[i]->frame_info, y);
-
- if (!check_limit(plane[i]->frame_info, y_pos))
- continue;
-
- vkms_compose_row(stage_buffer, plane[i], y_pos);
- pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
- output_buffer);
+ blend_line(plane[i], y, crtc_x_limit, stage_buffer, output_buffer);
}
apply_lut(crtc_state, output_buffer);
@@ -216,7 +398,7 @@ static void blend(struct vkms_writeback_job *wb,
*crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
if (wb)
- vkms_writeback_row(wb, output_buffer, y_pos);
+ vkms_writeback_row(wb, output_buffer, y);
}
}
@@ -227,7 +409,7 @@ static int check_format_funcs(struct vkms_crtc_state *crtc_state,
u32 n_active_planes = crtc_state->num_active_planes;
for (size_t i = 0; i < n_active_planes; i++)
- if (!planes[i]->pixel_read)
+ if (!planes[i]->pixel_read_line)
return -1;
if (active_wb && !active_wb->pixel_write)
@@ -309,8 +491,8 @@ free_stage_buffer:
void vkms_composer_worker(struct work_struct *work)
{
struct vkms_crtc_state *crtc_state = container_of(work,
- struct vkms_crtc_state,
- composer_work);
+ struct vkms_crtc_state,
+ composer_work);
struct drm_crtc *crtc = crtc_state->base.crtc;
struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
@@ -335,7 +517,7 @@ void vkms_composer_worker(struct work_struct *work)
crtc_state->gamma_lut.base = (struct drm_color_lut *)crtc->state->gamma_lut->data;
crtc_state->gamma_lut.lut_length =
crtc->state->gamma_lut->length / sizeof(struct drm_color_lut);
- max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
+ max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
crtc_state->gamma_lut.channel_value2index_ratio = drm_fixp_div(max_lut_index_fp,
u16_max_fp);
@@ -374,7 +556,7 @@ void vkms_composer_worker(struct work_struct *work)
drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
}
-static const char * const pipe_crc_sources[] = {"auto"};
+static const char *const pipe_crc_sources[] = { "auto" };
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count)
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index bbf080d32d2c..ae4e36bc337c 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -64,8 +64,8 @@ static int vkms_enable_vblank(struct drm_crtc *crtc)
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- out->vblank_hrtimer.function = &vkms_vblank_simulate;
+ hrtimer_setup(&out->vblank_hrtimer, &vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
out->period_ns = ktime_set(0, vblank->framedur_ns);
hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
@@ -186,8 +186,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
- plane);
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
WARN_ON(!plane_state);
if (!plane_state->visible)
@@ -203,8 +202,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
i = 0;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
- plane);
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
if (!plane_state->visible)
continue;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 2d1e95cb66e5..e0409aba9349 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -13,10 +13,10 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
@@ -34,7 +34,6 @@
#define DRIVER_NAME "vkms"
#define DRIVER_DESC "Virtual Kernel Mode Setting"
-#define DRIVER_DATE "20180514"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -82,8 +81,7 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_wait_for_flip_done(dev, old_state);
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- struct vkms_crtc_state *vkms_state =
- to_vkms_crtc_state(old_crtc_state);
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(old_crtc_state);
flush_work(&vkms_state->composer_work);
}
@@ -117,7 +115,6 @@ static const struct drm_driver vkms_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -174,7 +171,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.preferred_depth = 0;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
- return vkms_output_init(vkmsdev, 0);
+ return vkms_output_init(vkmsdev);
}
static int vkms_create(struct vkms_config *config)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 672fe191e239..00541eff3d1b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -39,12 +39,8 @@
struct vkms_frame_info {
struct drm_framebuffer *fb;
struct drm_rect src, dst;
- struct drm_rect rotated;
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
unsigned int rotation;
- unsigned int offset;
- unsigned int pitch;
- unsigned int cpp;
};
struct pixel_argb_u16 {
@@ -56,23 +52,65 @@ struct line_buffer {
struct pixel_argb_u16 *pixels;
};
+/**
+ * typedef pixel_write_t - These functions are used to read a pixel from a
+ * &struct pixel_argb_u16, convert it in a specific format and write it in the @out_pixel
+ * buffer.
+ *
+ * @out_pixel: destination address to write the pixel
+ * @in_pixel: pixel to write
+ */
+typedef void (*pixel_write_t)(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel);
+
struct vkms_writeback_job {
struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct vkms_frame_info wb_frame_info;
- void (*pixel_write)(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel);
+ pixel_write_t pixel_write;
};
/**
+ * enum pixel_read_direction - Enum used internally by VKMS to represent a reading direction in a
+ * plane.
+ */
+enum pixel_read_direction {
+ READ_BOTTOM_TO_TOP,
+ READ_TOP_TO_BOTTOM,
+ READ_RIGHT_TO_LEFT,
+ READ_LEFT_TO_RIGHT
+};
+
+struct vkms_plane_state;
+
+/**
+ * typedef pixel_read_line_t - These functions are used to read a pixel line in the source frame,
+ * convert it to `struct pixel_argb_u16` and write it to @out_pixel.
+ *
+ * @plane: plane used as source for the pixel value
+ * @x_start: X (width) coordinate of the first pixel to copy. The caller must ensure that x_start
+ * is non-negative and smaller than @plane->frame_info->fb->width.
+ * @y_start: Y (height) coordinate of the first pixel to copy. The caller must ensure that y_start
+ * is non-negative and smaller than @plane->frame_info->fb->height.
+ * @direction: direction to use for the copy, starting at @x_start/@y_start
+ * @count: number of pixels to copy
+ * @out_pixel: pointer where to write the pixel values. They will be written from @out_pixel[0]
+ * (included) to @out_pixel[@count] (excluded). The caller must ensure that out_pixel have a
+ * length of at least @count.
+ */
+typedef void (*pixel_read_line_t)(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[]);
+
+/**
* struct vkms_plane_state - Driver specific plane state
* @base: base plane state
* @frame_info: data required for composing computation
- * @pixel_read: function to read a pixel in this plane. The creator of a struct vkms_plane_state
- * must ensure that this pointer is valid
+ * @pixel_read_line: function to read a pixel line in this plane. The creator of a
+ * struct vkms_plane_state must ensure that this pointer is valid
*/
struct vkms_plane_state {
struct drm_shadow_plane_state base;
struct vkms_frame_info *frame_info;
- void (*pixel_read)(u8 *src_buffer, struct pixel_argb_u16 *out_pixel);
+ pixel_read_line_t pixel_read_line;
};
struct vkms_plane {
@@ -212,21 +250,17 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
* vkms_output_init() - Initialize all sub-components needed for a VKMS device.
*
* @vkmsdev: VKMS device to initialize
- * @index: CRTC which can be attached to the planes. The caller must ensure that
- * @index is positive and less or equals to 31.
*/
-int vkms_output_init(struct vkms_device *vkmsdev, int index);
+int vkms_output_init(struct vkms_device *vkmsdev);
/**
* vkms_plane_init() - Initialize a plane
*
* @vkmsdev: VKMS device containing the plane
* @type: type of plane to initialize
- * @index: CRTC which can be attached to the plane. The caller must ensure that
- * @index is positive and less or equals to 31.
*/
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type, int index);
+ enum drm_plane_type type);
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
@@ -238,7 +272,6 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
/* Composer Support */
void vkms_composer_worker(struct work_struct *work);
void vkms_set_composer(struct vkms_output *out, bool enabled);
-void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y);
void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y);
/* Writeback */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index e8a5cc235ebb..39b1d7c97d45 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -10,21 +10,46 @@
#include "vkms_formats.h"
/**
- * pixel_offset() - Get the offset of the pixel at coordinates x/y in the first plane
+ * packed_pixels_offset() - Get the offset of the block containing the pixel at coordinates x/y
*
* @frame_info: Buffer metadata
* @x: The x coordinate of the wanted pixel in the buffer
* @y: The y coordinate of the wanted pixel in the buffer
+ * @plane_index: The index of the plane to use
+ * @offset: The returned offset inside the buffer of the block
+ * @rem_x: The returned X coordinate of the requested pixel in the block
+ * @rem_y: The returned Y coordinate of the requested pixel in the block
*
- * The caller must ensure that the framebuffer associated with this request uses a pixel format
- * where block_h == block_w == 1.
- * If this requirement is not fulfilled, the resulting offset can point to an other pixel or
- * outside of the buffer.
+ * As some pixel formats store multiple pixels in a block (DRM_FORMAT_R* for example), some
+ * pixels are not individually addressable. This function return 3 values: the offset of the
+ * whole block, and the coordinate of the requested pixel inside this block.
+ * For example, if the format is DRM_FORMAT_R1 and the requested coordinate is 13,5, the offset
+ * will point to the byte 5*pitches + 13/8 (second byte of the 5th line), and the rem_x/rem_y
+ * coordinates will be (13 % 8, 5 % 1) = (5, 0)
+ *
+ * With this function, the caller just have to extract the correct pixel from the block.
*/
-static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y)
+static void packed_pixels_offset(const struct vkms_frame_info *frame_info, int x, int y,
+ int plane_index, int *offset, int *rem_x, int *rem_y)
{
- return frame_info->offset + (y * frame_info->pitch)
- + (x * frame_info->cpp);
+ struct drm_framebuffer *fb = frame_info->fb;
+ const struct drm_format_info *format = frame_info->fb->format;
+ /* Directly using x and y to multiply pitches and format->ccp is not sufficient because
+ * in some formats a block can represent multiple pixels.
+ *
+ * Dividing x and y by the block size allows to extract the correct offset of the block
+ * containing the pixel.
+ */
+
+ int block_x = x / drm_format_info_block_width(format, plane_index);
+ int block_y = y / drm_format_info_block_height(format, plane_index);
+ int block_pitch = fb->pitches[plane_index] * drm_format_info_block_height(format,
+ plane_index);
+ *rem_x = x % drm_format_info_block_width(format, plane_index);
+ *rem_y = y % drm_format_info_block_height(format, plane_index);
+ *offset = fb->offsets[plane_index] +
+ block_y * block_pitch +
+ block_x * format->char_per_block[plane_index];
}
/**
@@ -34,145 +59,266 @@ static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int
* @frame_info: Buffer metadata
* @x: The x (width) coordinate inside the plane
* @y: The y (height) coordinate inside the plane
+ * @plane_index: The index of the plane
+ * @addr: The returned pointer
+ * @rem_x: The returned X coordinate of the requested pixel in the block
+ * @rem_y: The returned Y coordinate of the requested pixel in the block
*
- * Takes the information stored in the frame_info, a pair of coordinates, and
- * returns the address of the first color channel.
- * This function assumes the channels are packed together, i.e. a color channel
- * comes immediately after another in the memory. And therefore, this function
- * doesn't work for YUV with chroma subsampling (e.g. YUV420 and NV21).
+ * Takes the information stored in the frame_info, a pair of coordinates, and returns the address
+ * of the block containing this pixel and the pixel position inside this block.
*
- * The caller must ensure that the framebuffer associated with this request uses a pixel format
- * where block_h == block_w == 1, otherwise the returned pointer can be outside the buffer.
+ * See @packed_pixels_offset for details about rem_x/rem_y behavior.
*/
-static void *packed_pixels_addr(const struct vkms_frame_info *frame_info,
- int x, int y)
+static void packed_pixels_addr(const struct vkms_frame_info *frame_info,
+ int x, int y, int plane_index, u8 **addr, int *rem_x,
+ int *rem_y)
{
- size_t offset = pixel_offset(frame_info, x, y);
+ int offset;
- return (u8 *)frame_info->map[0].vaddr + offset;
+ packed_pixels_offset(frame_info, x, y, plane_index, &offset, rem_x, rem_y);
+ *addr = (u8 *)frame_info->map[0].vaddr + offset;
}
-static void *get_packed_src_addr(const struct vkms_frame_info *frame_info, int y)
+/**
+ * get_block_step_bytes() - Common helper to compute the correct step value between each pixel block
+ * to read in a certain direction.
+ *
+ * @fb: Framebuffer to iter on
+ * @direction: Direction of the reading
+ * @plane_index: Plane to get the step from
+ *
+ * As the returned count is the number of bytes between two consecutive blocks in a direction,
+ * the caller may have to read multiple pixels before using the next one (for example, to read from
+ * left to right in a DRM_FORMAT_R1 plane, each block contains 8 pixels, so the step must be used
+ * only every 8 pixels).
+ */
+static int get_block_step_bytes(struct drm_framebuffer *fb, enum pixel_read_direction direction,
+ int plane_index)
{
- int x_src = frame_info->src.x1 >> 16;
- int y_src = y - frame_info->rotated.y1 + (frame_info->src.y1 >> 16);
+ switch (direction) {
+ case READ_LEFT_TO_RIGHT:
+ return fb->format->char_per_block[plane_index];
+ case READ_RIGHT_TO_LEFT:
+ return -fb->format->char_per_block[plane_index];
+ case READ_TOP_TO_BOTTOM:
+ return (int)fb->pitches[plane_index] * drm_format_info_block_width(fb->format,
+ plane_index);
+ case READ_BOTTOM_TO_TOP:
+ return -(int)fb->pitches[plane_index] * drm_format_info_block_width(fb->format,
+ plane_index);
+ }
- return packed_pixels_addr(frame_info, x_src, y_src);
+ return 0;
}
-static int get_x_position(const struct vkms_frame_info *frame_info, int limit, int x)
+/**
+ * packed_pixels_addr_1x1() - Get the pointer to the block containing the pixel at the given
+ * coordinates
+ *
+ * @frame_info: Buffer metadata
+ * @x: The x (width) coordinate inside the plane
+ * @y: The y (height) coordinate inside the plane
+ * @plane_index: The index of the plane
+ * @addr: The returned pointer
+ *
+ * This function can only be used with format where block_h == block_w == 1.
+ */
+static void packed_pixels_addr_1x1(const struct vkms_frame_info *frame_info,
+ int x, int y, int plane_index, u8 **addr)
{
- if (frame_info->rotation & (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_270))
- return limit - x - 1;
- return x;
+ int offset, rem_x, rem_y;
+
+ WARN_ONCE(drm_format_info_block_width(frame_info->fb->format,
+ plane_index) != 1,
+ "%s() only support formats with block_w == 1", __func__);
+ WARN_ONCE(drm_format_info_block_height(frame_info->fb->format,
+ plane_index) != 1,
+ "%s() only support formats with block_h == 1", __func__);
+
+ packed_pixels_offset(frame_info, x, y, plane_index, &offset, &rem_x,
+ &rem_y);
+ *addr = (u8 *)frame_info->map[0].vaddr + offset;
}
/*
- * The following functions take pixel data from the buffer and convert them to the format
- * ARGB16161616 in @out_pixel.
+ * The following functions take pixel data (a, r, g, b, pixel, ...) and convert them to
+ * &struct pixel_argb_u16
*
- * They are used in the vkms_compose_row() function to handle multiple formats.
+ * They are used in the `read_line`s functions to avoid duplicate work for some pixel formats.
*/
-static void ARGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_u8888(u8 a, u8 r, u8 g, u8 b)
{
+ struct pixel_argb_u16 out_pixel;
/*
* The 257 is the "conversion ratio". This number is obtained by the
* (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
* the best color value in a pixel format with more possibilities.
* A similar idea applies to others RGB color conversions.
*/
- out_pixel->a = (u16)src_pixels[3] * 257;
- out_pixel->r = (u16)src_pixels[2] * 257;
- out_pixel->g = (u16)src_pixels[1] * 257;
- out_pixel->b = (u16)src_pixels[0] * 257;
-}
+ out_pixel.a = (u16)a * 257;
+ out_pixel.r = (u16)r * 257;
+ out_pixel.g = (u16)g * 257;
+ out_pixel.b = (u16)b * 257;
-static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
-{
- out_pixel->a = (u16)0xffff;
- out_pixel->r = (u16)src_pixels[2] * 257;
- out_pixel->g = (u16)src_pixels[1] * 257;
- out_pixel->b = (u16)src_pixels[0] * 257;
+ return out_pixel;
}
-static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_u16161616(u16 a, u16 r, u16 g, u16 b)
{
- __le16 *pixels = (__force __le16 *)src_pixels;
+ struct pixel_argb_u16 out_pixel;
- out_pixel->a = le16_to_cpu(pixels[3]);
- out_pixel->r = le16_to_cpu(pixels[2]);
- out_pixel->g = le16_to_cpu(pixels[1]);
- out_pixel->b = le16_to_cpu(pixels[0]);
+ out_pixel.a = a;
+ out_pixel.r = r;
+ out_pixel.g = g;
+ out_pixel.b = b;
+
+ return out_pixel;
}
-static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_le16161616(__le16 a, __le16 r, __le16 g, __le16 b)
{
- __le16 *pixels = (__force __le16 *)src_pixels;
-
- out_pixel->a = (u16)0xffff;
- out_pixel->r = le16_to_cpu(pixels[2]);
- out_pixel->g = le16_to_cpu(pixels[1]);
- out_pixel->b = le16_to_cpu(pixels[0]);
+ return argb_u16_from_u16161616(le16_to_cpu(a), le16_to_cpu(r), le16_to_cpu(g),
+ le16_to_cpu(b));
}
-static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
{
- __le16 *pixels = (__force __le16 *)src_pixels;
+ struct pixel_argb_u16 out_pixel;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
- u16 rgb_565 = le16_to_cpu(*pixels);
+ u16 rgb_565 = le16_to_cpu(*pixel);
s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
- out_pixel->a = (u16)0xffff;
- out_pixel->r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
- out_pixel->g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
- out_pixel->b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
+ out_pixel.a = (u16)0xffff;
+ out_pixel.r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
+ out_pixel.g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
+ out_pixel.b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
+
+ return out_pixel;
}
-/**
- * vkms_compose_row - compose a single row of a plane
- * @stage_buffer: output line with the composed pixels
- * @plane: state of the plane that is being composed
- * @y: y coordinate of the row
+/*
+ * The following functions are read_line function for each pixel format supported by VKMS.
+ *
+ * They read a line starting at the point @x_start,@y_start following the @direction. The result
+ * is stored in @out_pixel and in the format ARGB16161616.
+ *
+ * These functions are very repetitive, but the innermost pixel loops must be kept inside these
+ * functions for performance reasons. Some benchmarking was done in [1] where having the innermost
+ * loop factored out of these functions showed a slowdown by a factor of three.
*
- * This function composes a single row of a plane. It gets the source pixels
- * through the y coordinate (see get_packed_src_addr()) and goes linearly
- * through the source pixel, reading the pixels and converting it to
- * ARGB16161616 (see the pixel_read() callback). For rotate-90 and rotate-270,
- * the source pixels are not traversed linearly. The source pixels are queried
- * on each iteration in order to traverse the pixels vertically.
+ * [1]: https://lore.kernel.org/dri-devel/d258c8dc-78e9-4509-9037-a98f7f33b3a3@riseup.net/
*/
-void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y)
+
+static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
+ enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
{
- struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
- struct vkms_frame_info *frame_info = plane->frame_info;
- u8 *src_pixels = get_packed_src_addr(frame_info, y);
- int limit = min_t(size_t, drm_rect_width(&frame_info->dst), stage_buffer->n_pixels);
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
- for (size_t x = 0; x < limit; x++, src_pixels += frame_info->cpp) {
- int x_pos = get_x_position(frame_info, limit, x);
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
- if (drm_rotation_90_or_270(frame_info->rotation))
- src_pixels = get_packed_src_addr(frame_info, x + frame_info->rotated.y1)
- + frame_info->cpp * y;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
- plane->pixel_read(src_pixels, &out_pixels[x_pos]);
+ while (out_pixel < end) {
+ u8 *px = (u8 *)src_pixels;
+ *out_pixel = argb_u16_from_u8888(px[3], px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
+ enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ u8 *px = (u8 *)src_pixels;
+ *out_pixel = argb_u16_from_u8888(255, px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ u16 *px = (u16 *)src_pixels;
+ *out_pixel = argb_u16_from_u16161616(px[3], px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void XRGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ __le16 *px = (__le16 *)src_pixels;
+ *out_pixel = argb_u16_from_le16161616(cpu_to_le16(0xFFFF), px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ __le16 *px = (__le16 *)src_pixels;
+
+ *out_pixel = argb_u16_from_RGB565(px);
+ out_pixel += 1;
+ src_pixels += step;
}
}
/*
* The following functions take one &struct pixel_argb_u16 and convert it to a specific format.
- * The result is stored in @dst_pixels.
+ * The result is stored in @out_pixel.
*
* They are used in vkms_writeback_row() to convert and store a pixel from the src_buffer to
* the writeback buffer.
*/
-static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_ARGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
/*
* This sequence below is important because the format's byte order is
@@ -184,43 +330,43 @@ static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel
* | Addr + 2 | = Red channel
* | Addr + 3 | = Alpha channel
*/
- dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
- dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
- dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
- dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+ out_pixel[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
+ out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
-static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_XRGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- dst_pixels[3] = 0xff;
- dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
- dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
- dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+ out_pixel[3] = 0xff;
+ out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
-static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_ARGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- __le16 *pixels = (__force __le16 *)dst_pixels;
+ __le16 *pixel = (__le16 *)out_pixel;
- pixels[3] = cpu_to_le16(in_pixel->a);
- pixels[2] = cpu_to_le16(in_pixel->r);
- pixels[1] = cpu_to_le16(in_pixel->g);
- pixels[0] = cpu_to_le16(in_pixel->b);
+ pixel[3] = cpu_to_le16(in_pixel->a);
+ pixel[2] = cpu_to_le16(in_pixel->r);
+ pixel[1] = cpu_to_le16(in_pixel->g);
+ pixel[0] = cpu_to_le16(in_pixel->b);
}
-static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_XRGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- __le16 *pixels = (__force __le16 *)dst_pixels;
+ __le16 *pixel = (__le16 *)out_pixel;
- pixels[3] = cpu_to_le16(0xffff);
- pixels[2] = cpu_to_le16(in_pixel->r);
- pixels[1] = cpu_to_le16(in_pixel->g);
- pixels[0] = cpu_to_le16(in_pixel->b);
+ pixel[3] = cpu_to_le16(0xffff);
+ pixel[2] = cpu_to_le16(in_pixel->r);
+ pixel[1] = cpu_to_le16(in_pixel->g);
+ pixel[0] = cpu_to_le16(in_pixel->b);
}
-static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_RGB565(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- __le16 *pixels = (__force __le16 *)dst_pixels;
+ __le16 *pixel = (__le16 *)out_pixel;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
@@ -233,7 +379,7 @@ static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
- *pixels = cpu_to_le16(r << 11 | g << 5 | b);
+ *pixel = cpu_to_le16(r << 11 | g << 5 | b);
}
/**
@@ -249,36 +395,47 @@ void vkms_writeback_row(struct vkms_writeback_job *wb,
{
struct vkms_frame_info *frame_info = &wb->wb_frame_info;
int x_dst = frame_info->dst.x1;
- u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ u8 *dst_pixels;
+ int rem_x, rem_y;
+
+ packed_pixels_addr(frame_info, x_dst, y, 0, &dst_pixels, &rem_x, &rem_y);
struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst), src_buffer->n_pixels);
- for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->cpp)
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->fb->format->cpp[0])
wb->pixel_write(dst_pixels, &in_pixels[x]);
}
/**
- * get_pixel_conversion_function() - Retrieve the correct read_pixel function for a specific
+ * get_pixel_read_line_function() - Retrieve the correct read_line function for a specific
* format. The returned pointer is NULL for unsupported pixel formats. The caller must ensure that
* the pointer is valid before using it in a vkms_plane_state.
*
* @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
*/
-void *get_pixel_conversion_function(u32 format)
+pixel_read_line_t get_pixel_read_line_function(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
- return &ARGB8888_to_argb_u16;
+ return &ARGB8888_read_line;
case DRM_FORMAT_XRGB8888:
- return &XRGB8888_to_argb_u16;
+ return &XRGB8888_read_line;
case DRM_FORMAT_ARGB16161616:
- return &ARGB16161616_to_argb_u16;
+ return &ARGB16161616_read_line;
case DRM_FORMAT_XRGB16161616:
- return &XRGB16161616_to_argb_u16;
+ return &XRGB16161616_read_line;
case DRM_FORMAT_RGB565:
- return &RGB565_to_argb_u16;
+ return &RGB565_read_line;
default:
- return NULL;
+ /*
+ * This is a bug in vkms_plane_atomic_check(). All the supported
+ * format must:
+ * - Be listed in vkms_formats in vkms_plane.c
+ * - Have a pixel_read callback defined here
+ */
+ pr_err("Pixel format %p4cc is not supported by VKMS planes. This is a kernel bug, atomic check must forbid this configuration.\n",
+ &format);
+ BUG();
}
}
@@ -289,7 +446,7 @@ void *get_pixel_conversion_function(u32 format)
*
* @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
*/
-void *get_pixel_write_function(u32 format)
+pixel_write_t get_pixel_write_function(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
@@ -303,6 +460,14 @@ void *get_pixel_write_function(u32 format)
case DRM_FORMAT_RGB565:
return &argb_u16_to_RGB565;
default:
- return NULL;
+ /*
+ * This is a bug in vkms_writeback_atomic_check. All the supported
+ * format must:
+ * - Be listed in vkms_wb_formats in vkms_writeback.c
+ * - Have a pixel_write callback defined here
+ */
+ pr_err("Pixel format %p4cc is not supported by VKMS writeback. This is a kernel bug, atomic check must forbid this configuration.\n",
+ &format);
+ BUG();
}
}
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index cf59c2ed8e9a..8d2bef95ff79 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -5,8 +5,8 @@
#include "vkms_drv.h"
-void *get_pixel_conversion_function(u32 format);
+pixel_read_line_t get_pixel_read_line_function(u32 format);
-void *get_pixel_write_function(u32 format);
+pixel_write_t get_pixel_write_function(u32 format);
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 25a99fde126c..8f4bd5aef087 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -32,29 +32,14 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
.get_modes = vkms_conn_get_modes,
};
-static int vkms_add_overlay_plane(struct vkms_device *vkmsdev, int index,
- struct drm_crtc *crtc)
-{
- struct vkms_plane *overlay;
-
- overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY, index);
- if (IS_ERR(overlay))
- return PTR_ERR(overlay);
-
- if (!overlay->base.possible_crtcs)
- overlay->base.possible_crtcs = drm_crtc_mask(crtc);
-
- return 0;
-}
-
-int vkms_output_init(struct vkms_device *vkmsdev, int index)
+int vkms_output_init(struct vkms_device *vkmsdev)
{
struct vkms_output *output = &vkmsdev->output;
struct drm_device *dev = &vkmsdev->drm;
struct drm_connector *connector = &output->connector;
struct drm_encoder *encoder = &output->encoder;
struct drm_crtc *crtc = &output->crtc;
- struct vkms_plane *primary, *cursor = NULL;
+ struct vkms_plane *primary, *overlay, *cursor = NULL;
int ret;
int writeback;
unsigned int n;
@@ -65,29 +50,31 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
* The overlay and cursor planes are not mandatory, but can be used to perform complex
* composition.
*/
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
+ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(primary))
return PTR_ERR(primary);
- if (vkmsdev->config->overlay) {
- for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
- ret = vkms_add_overlay_plane(vkmsdev, index, crtc);
- if (ret)
- return ret;
- }
- }
-
if (vkmsdev->config->cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
+ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
if (IS_ERR(cursor))
return PTR_ERR(cursor);
}
- /* [1]: Allocation of a CRTC, its index will be BIT(0) = 1 */
ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base);
if (ret)
return ret;
+ if (vkmsdev->config->overlay) {
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY);
+ if (IS_ERR(overlay)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
+ return PTR_ERR(overlay);
+ }
+ overlay->base.possible_crtcs = drm_crtc_mask(crtc);
+ }
+ }
+
ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
@@ -103,11 +90,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
DRM_ERROR("Failed to init encoder\n");
goto err_encoder;
}
- /*
- * This is a hardcoded value to select crtc for the encoder.
- * BIT(0) here designate the first registered CRTC, the one allocated in [1]
- */
- encoder->possible_crtcs = BIT(0);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e5c625ab8e3e..e2fce471870f 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -112,23 +112,12 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
frame_info = vkms_plane_state->frame_info;
memcpy(&frame_info->src, &new_state->src, sizeof(struct drm_rect));
memcpy(&frame_info->dst, &new_state->dst, sizeof(struct drm_rect));
- memcpy(&frame_info->rotated, &new_state->dst, sizeof(struct drm_rect));
frame_info->fb = fb;
memcpy(&frame_info->map, &shadow_plane_state->data, sizeof(frame_info->map));
drm_framebuffer_get(frame_info->fb);
- frame_info->rotation = drm_rotation_simplify(new_state->rotation, DRM_MODE_ROTATE_0 |
- DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_270 |
- DRM_MODE_REFLECT_X |
- DRM_MODE_REFLECT_Y);
-
- drm_rect_rotate(&frame_info->rotated, drm_rect_width(&frame_info->rotated),
- drm_rect_height(&frame_info->rotated), frame_info->rotation);
-
- frame_info->offset = fb->offsets[0];
- frame_info->pitch = fb->pitches[0];
- frame_info->cpp = fb->format->cpp[0];
- vkms_plane_state->pixel_read = get_pixel_conversion_function(fmt);
+ frame_info->rotation = new_state->rotation;
+
+ vkms_plane_state->pixel_read_line = get_pixel_read_line_function(fmt);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -198,12 +187,12 @@ static const struct drm_plane_helper_funcs vkms_plane_helper_funcs = {
};
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type, int index)
+ enum drm_plane_type type)
{
struct drm_device *dev = &vkmsdev->drm;
struct vkms_plane *plane;
- plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 1 << index,
+ plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 0,
&vkms_plane_funcs,
vkms_formats, ARRAY_SIZE(vkms_formats),
NULL, type, NULL);
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 999d5c01ea81..79918b44fedd 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -149,11 +149,6 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
crtc_state->active_writeback = active_wb;
crtc_state->wb_pending = true;
spin_unlock_irq(&output->composer_lock);
-
- wb_frame_info->offset = fb->offsets[0];
- wb_frame_info->pitch = fb->pitches[0];
- wb_frame_info->cpp = fb->format->cpp[0];
-
drm_writeback_queue_job(wb_conn, connector_state);
active_wb->pixel_write = get_pixel_write_function(wb_format);
drm_rect_init(&wb_frame_info->src, 0, 0, crtc_width, crtc_height);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index a0e433fbcba6..9b5b8c1f063b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -228,7 +228,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
- buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
@@ -443,7 +442,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
if (params->pin)
ttm_bo_pin(&vmw_bo->tbo);
- ttm_bo_unreserve(&vmw_bo->tbo);
+ if (!params->keep_resv)
+ ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 43b5439ec9f7..11e330c7c7f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -56,8 +56,9 @@ struct vmw_bo_params {
u32 domain;
u32 busy_domain;
enum ttm_bo_type bo_type;
- size_t size;
bool pin;
+ bool keep_resv;
+ size_t size;
struct dma_resv *resv;
struct sg_table *sg;
};
@@ -83,7 +84,6 @@ struct vmw_bo {
struct ttm_placement placement;
struct ttm_place places[5];
- struct ttm_place busy_places[5];
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2c46897876dd..0f32471c8533 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -35,7 +35,7 @@
#include "vmwgfx_vkms.h"
#include "ttm_object.h"
-#include <drm/drm_client_setup.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -403,7 +403,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
/*
@@ -415,10 +416,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
- vmw_bo_pin_reserved(vbo, true);
-
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
@@ -1634,7 +1631,6 @@ static const struct drm_driver driver = {
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
- .date = VMWGFX_DRIVER_DATE,
.major = VMWGFX_DRIVER_MAJOR,
.minor = VMWGFX_DRIVER_MINOR,
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b21831ef214a..5275ef632d4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -57,7 +57,6 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20211206"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 20
#define VMWGFX_DRIVER_PATCHLEVEL 0
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index b9857f37ca1a..ed5015ced392 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -206,6 +206,7 @@ struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
.bo_type = ttm_bo_type_sg,
.size = attach->dmabuf->size,
.pin = false,
+ .keep_resv = true,
.resv = attach->dmabuf->resv,
.sg = table,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8db38927729b..800a79e035ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -750,6 +750,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
struct vmw_bo *old_bo = NULL;
struct vmw_bo *new_bo = NULL;
+ struct ww_acquire_ctx ctx;
s32 hotspot_x, hotspot_y;
int ret;
@@ -769,9 +770,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (du->cursor_surface)
du->cursor_age = du->cursor_surface->snooper.age;
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
if (!vmw_user_object_is_null(&old_vps->uo)) {
old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
if (ret != 0)
return;
}
@@ -779,9 +782,14 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (!vmw_user_object_is_null(&vps->uo)) {
new_bo = vmw_user_object_buffer(&vps->uo);
if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
- if (ret != 0)
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ if (old_bo) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ }
return;
+ }
} else {
new_bo = NULL;
}
@@ -803,10 +811,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
if (new_bo)
ttm_bo_unreserve(&new_bo->tbo);
+ if (old_bo)
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 39949e0a493f..f0b429525467 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -479,7 +479,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, true);
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 0f4bfd98480a..32029d80b72b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -868,7 +868,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, true);
ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index a01ca3226d0a..7fb1c88bcc47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
if (!vmw_shader_id_ok(user_key, shader_type))
@@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
- if (unlikely(ret != 0))
- goto no_reserve;
-
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 82d18b88f4a7..114a75069e1c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1593,7 +1593,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, false);
ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 621d98b376bb..5553892d7c3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -572,15 +572,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
.busy_domain = domain,
.bo_type = ttm_bo_type_kernel,
.size = bo_size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
index 8651b788e98b..aec774fa4d7b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -290,8 +290,8 @@ vmw_vkms_enable_vblank(struct drm_crtc *crtc)
drm_calc_timestamping_constants(crtc, &crtc->mode);
- hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- du->vkms.timer.function = &vmw_vkms_vblank_simulate;
+ hrtimer_setup(&du->vkms.timer, &vmw_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
du->vkms.period_ns = ktime_set(0, vblank->framedur_ns);
hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL);
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 2de0de41b8dd..0d749ed44878 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -66,7 +66,7 @@ config DRM_XE_DEBUG_MEM
bool "Enable passing SYS/VRAM addresses to user space"
default n
help
- Pass object location trough uapi. Intended for extended
+ Pass object location through uapi. Intended for extended
testing and development only.
Recommended for driver developers only.
@@ -104,5 +104,5 @@ config DRM_XE_USERPTR_INVAL_INJECT
Choose this option when debugging error paths that
are hit during checks for userptr invalidations.
- Recomended for driver developers only.
+ Recommended for driver developers only.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index bc7a04ce69fd..5c97ad6ed738 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -101,6 +101,7 @@ xe-y += xe_bb.o \
xe_trace.o \
xe_trace_bo.o \
xe_trace_guc.o \
+ xe_trace_lrc.o \
xe_ttm_sys_mgr.o \
xe_ttm_stolen_mgr.o \
xe_ttm_vram_mgr.o \
@@ -110,6 +111,7 @@ xe-y += xe_bb.o \
xe_vm.o \
xe_vram.o \
xe_vram_freq.o \
+ xe_vsec.o \
xe_wait_user_fence.o \
xe_wa.o \
xe_wopcm.o
@@ -124,7 +126,8 @@ xe-y += \
xe_gt_sriov_vf.o \
xe_guc_relay.o \
xe_memirq.o \
- xe_sriov.o
+ xe_sriov.o \
+ xe_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
@@ -206,6 +209,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
+ i915-display/intel_display_conversion.o \
i915-display/intel_display_device.o \
i915-display/intel_display_driver.o \
i915-display/intel_display_irq.o \
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index b54fe40fc5a9..fee385532fb0 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -134,6 +134,8 @@ enum xe_guc_action {
XE_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503,
XE_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
XE_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
+ XE_GUC_ACTION_REGISTER_G2G = 0x4507,
+ XE_GUC_ACTION_DEREGISTER_G2G = 0x4508,
XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
@@ -218,4 +220,22 @@ enum xe_guc_tlb_inval_mode {
XE_GUC_TLB_INVAL_MODE_LITE = 0x1,
};
+/*
+ * GuC to GuC communication (de-)registration fields:
+ */
+enum xe_guc_g2g_type {
+ XE_G2G_TYPE_IN = 0x0,
+ XE_G2G_TYPE_OUT,
+ XE_G2G_TYPE_LIMIT,
+};
+
+#define XE_G2G_REGISTER_DEVICE REG_GENMASK(16, 16)
+#define XE_G2G_REGISTER_TILE REG_GENMASK(15, 12)
+#define XE_G2G_REGISTER_TYPE REG_GENMASK(11, 8)
+#define XE_G2G_REGISTER_SIZE REG_GENMASK(7, 0)
+
+#define XE_G2G_DEREGISTER_DEVICE REG_GENMASK(16, 16)
+#define XE_G2G_DEREGISTER_TILE REG_GENMASK(15, 12)
+#define XE_G2G_DEREGISTER_TYPE REG_GENMASK(11, 8)
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
index b6a1852749dd..0b28659d94e9 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
@@ -502,6 +502,44 @@
#define VF2GUC_VF_RESET_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
/**
+ * DOC: VF2GUC_NOTIFY_RESFIX_DONE
+ *
+ * This action is used by VF to notify the GuC that the VF KMD has completed
+ * post-migration recovery steps.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE` = 0x5508 |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE 0x5508u
+
+#define VF2GUC_NOTIFY_RESFIX_DONE_REQUEST_MSG_LEN GUC_HXG_REQUEST_MSG_MIN_LEN
+#define VF2GUC_NOTIFY_RESFIX_DONE_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+
+#define VF2GUC_NOTIFY_RESFIX_DONE_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define VF2GUC_NOTIFY_RESFIX_DONE_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
* DOC: VF2GUC_QUERY_SINGLE_KLV
*
* This action is used by VF to query value of the single KLV data.
diff --git a/drivers/gpu/drm/xe/abi/guc_capture_abi.h b/drivers/gpu/drm/xe/abi/guc_capture_abi.h
index e7898edc6236..dd4117553739 100644
--- a/drivers/gpu/drm/xe/abi/guc_capture_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_capture_abi.h
@@ -25,7 +25,7 @@ enum guc_state_capture_type {
#define GUC_STATE_CAPTURE_TYPE_MAX (GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE + 1)
-/* Class indecies for capture_class and capture_instance arrays */
+/* Class indices for capture_class and capture_instance arrays */
enum guc_capture_list_class_type {
GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 37606cf8cc5e..d633f1c739e4 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -132,7 +132,7 @@ enum {
* _`GUC_KLV_VGT_POLICY_SCHED_IF_IDLE` : 0x8001
* This config sets whether strict scheduling is enabled whereby any VF
* that doesn’t have work to submit is still allocated a fixed execution
- * time-slice to ensure active VFs execution is always consitent even
+ * time-slice to ensure active VFs execution is always consistent even
* during other VF reprovisiong / rebooting events. Changing this KLV
* impacts all VFs and takes effect on the next VF-Switch event.
*
@@ -207,7 +207,7 @@ enum {
* of and this will never be perfectly-exact (accumulated nano-second
* granularity) since the GPUs clock time runs off a different crystal
* from the CPUs clock. Changing this KLV on a VF that is currently
- * running a context wont take effect until a new context is scheduled in.
+ * running a context won't take effect until a new context is scheduled in.
* That said, when the PF is changing this value from 0x0 to
* a non-zero value, it might never take effect if the VF is running an
* infinitely long compute or shader kernel. In such a scenario, the
@@ -227,7 +227,7 @@ enum {
* HW is capable and this will never be perfectly-exact (accumulated
* nano-second granularity) since the GPUs clock time runs off a
* different crystal from the CPUs clock. Changing this KLV on a VF
- * that is currently running a context wont take effect until a new
+ * that is currently running a context won't take effect until a new
* context is scheduled in.
* That said, when the PF is changing this value from 0x0 to
* a non-zero value, it might never take effect if the VF is running an
@@ -291,6 +291,14 @@ enum {
*
* :0: (default)
* :1-65535: number of contexts (Gen12)
+ *
+ * _`GUC_KLV_VF_CFG_SCHED_PRIORITY` : 0x8A0C
+ * This config controls VF’s scheduling priority.
+ *
+ * :0: LOW = schedule VF only if it has active work (default)
+ * :1: NORMAL = schedule VF always, irrespective of whether it has work or not
+ * :2: HIGH = schedule VF in the next time-slice after current active
+ * time-slice completes if it has active work
*/
#define GUC_KLV_VF_CFG_GGTT_START_KEY 0x0001
@@ -343,6 +351,12 @@ enum {
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY 0x8a0b
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_LEN 1u
+#define GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY 0x8a0c
+#define GUC_KLV_VF_CFG_SCHED_PRIORITY_LEN 1u
+#define GUC_SCHED_PRIORITY_LOW 0u
+#define GUC_SCHED_PRIORITY_NORMAL 1u
+#define GUC_SCHED_PRIORITY_HIGH 2u
+
/*
* Workaround keys:
*/
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index 0382beb4035b..4fc3e535de91 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -10,6 +10,11 @@
#include "xe_device_types.h"
#include "xe_mmio.h"
+static inline struct intel_uncore *to_intel_uncore(struct drm_device *drm)
+{
+ return &to_xe_device(drm)->uncore;
+}
+
static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
@@ -117,10 +122,19 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
unsigned int slow_timeout_ms, u32 *out_value)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+ bool atomic;
+
+ /*
+ * Replicate the behavior from i915 here, in which sleep is not
+ * performed if slow_timeout_ms == 0. This is necessary because
+ * of some paths in display code where waits are done in atomic
+ * context.
+ */
+ atomic = !slow_timeout_ms && fast_timeout_us > 0;
return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
fast_timeout_us + 1000 * slow_timeout_ms,
- out_value, false);
+ out_value, atomic);
}
static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h
index d429d421ac70..d429d421ac70 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h
diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c
index a7dbc6554d69..ac4cda2d81c7 100644
--- a/drivers/gpu/drm/xe/display/ext/i915_irq.c
+++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c
@@ -53,18 +53,7 @@ void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
bool intel_irqs_enabled(struct xe_device *xe)
{
- /*
- * XXX: i915 has a racy handling of the irq.enabled, since it doesn't
- * lock its transitions. Because of that, the irq.enabled sometimes
- * is not read with the irq.lock in place.
- * However, the most critical cases like vblank and page flips are
- * properly using the locks.
- * We cannot take the lock in here or run any kind of assert because
- * of i915 inconsistency.
- * But at this point the xe irq is better protected against races,
- * although the full solution would be protecting the i915 side.
- */
- return xe->irq.enabled;
+ return atomic_read(&xe->irq.enabled);
}
void intel_synchronize_irq(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 9f54fad0f1c0..b463f5bd4eed 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -40,31 +40,8 @@ int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
- struct ttm_bo_kmap_obj map;
- void *src;
- bool is_iomem;
- int ret;
- ret = xe_bo_lock(bo, true);
- if (ret)
- return ret;
-
- ret = ttm_bo_kmap(&bo->ttm, offset >> PAGE_SHIFT, 1, &map);
- if (ret)
- goto out_unlock;
-
- offset &= ~PAGE_MASK;
- src = ttm_kmap_obj_virtual(&map, &is_iomem);
- src += offset;
- if (is_iomem)
- memcpy_fromio(dst, (void __iomem *)src, size);
- else
- memcpy(dst, src, size);
-
- ttm_bo_kunmap(&map);
-out_unlock:
- xe_bo_unlock(bo);
- return ret;
+ return xe_bo_read(bo, offset, dst, size);
}
struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index b5502f335f53..b3921dbc52ff 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -22,6 +22,7 @@
#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
+#include "intel_dmc_wl.h"
#include "intel_dp.h"
#include "intel_encoder.h"
#include "intel_fbdev.h"
@@ -103,11 +104,12 @@ int xe_display_create(struct xe_device *xe)
static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ struct intel_display *display = &xe->display;
if (!xe->info.probe_display)
return;
- intel_power_domains_cleanup(xe);
+ intel_power_domains_cleanup(display);
}
int xe_display_init_nommio(struct xe_device *xe)
@@ -132,7 +134,7 @@ static void xe_display_fini_noirq(void *arg)
if (!xe->info.probe_display)
return;
- intel_display_driver_remove_noirq(xe);
+ intel_display_driver_remove_noirq(display);
intel_opregion_cleanup(display);
}
@@ -144,7 +146,7 @@ int xe_display_init_noirq(struct xe_device *xe)
if (!xe->info.probe_display)
return 0;
- intel_display_driver_early_probe(xe);
+ intel_display_driver_early_probe(display);
/* Early display init.. */
intel_opregion_setup(display);
@@ -157,9 +159,9 @@ int xe_display_init_noirq(struct xe_device *xe)
intel_bw_init_hw(xe);
- intel_display_device_info_runtime_init(xe);
+ intel_display_device_info_runtime_init(display);
- err = intel_display_driver_probe_noirq(xe);
+ err = intel_display_driver_probe_noirq(display);
if (err) {
intel_opregion_cleanup(display);
return err;
@@ -171,21 +173,23 @@ int xe_display_init_noirq(struct xe_device *xe)
static void xe_display_fini_noaccel(void *arg)
{
struct xe_device *xe = arg;
+ struct intel_display *display = &xe->display;
if (!xe->info.probe_display)
return;
- intel_display_driver_remove_nogem(xe);
+ intel_display_driver_remove_nogem(display);
}
int xe_display_init_noaccel(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
int err;
if (!xe->info.probe_display)
return 0;
- err = intel_display_driver_probe_nogem(xe);
+ err = intel_display_driver_probe_nogem(display);
if (err)
return err;
@@ -194,10 +198,12 @@ int xe_display_init_noaccel(struct xe_device *xe)
int xe_display_init(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return 0;
- return intel_display_driver_probe(xe);
+ return intel_display_driver_probe(display);
}
void xe_display_fini(struct xe_device *xe)
@@ -215,30 +221,36 @@ void xe_display_fini(struct xe_device *xe)
void xe_display_register(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_driver_register(xe);
+ intel_display_driver_register(display);
+ intel_power_domains_enable(display);
intel_register_dsm_handler();
- intel_power_domains_enable(xe);
}
void xe_display_unregister(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
intel_unregister_dsm_handler();
- intel_power_domains_disable(xe);
- intel_display_driver_unregister(xe);
+ intel_power_domains_disable(display);
+ intel_display_driver_unregister(display);
}
void xe_display_driver_remove(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_driver_remove(xe);
+ intel_display_driver_remove(display);
}
/* IRQ-related functions */
@@ -322,25 +334,22 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
* We do a lot of poking in a lot of registers, make sure they work
* properly.
*/
- intel_power_domains_disable(xe);
+ intel_power_domains_disable(display);
if (!runtime)
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
if (!runtime && has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
- intel_display_driver_disable_user_access(xe);
- intel_display_driver_suspend(xe);
+ intel_display_driver_disable_user_access(display);
+ intel_display_driver_suspend(display);
}
xe_display_flush_cleanup_work(xe);
- if (!runtime)
- intel_dp_mst_suspend(xe);
-
intel_hpd_cancel_work(xe);
if (!runtime && has_display(xe)) {
- intel_display_driver_suspend_access(xe);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&xe->display);
}
@@ -364,20 +373,20 @@ void xe_display_pm_shutdown(struct xe_device *xe)
if (!xe->info.probe_display)
return;
- intel_power_domains_disable(xe);
+ intel_power_domains_disable(display);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
if (has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
- intel_display_driver_disable_user_access(xe);
- intel_display_driver_suspend(xe);
+ intel_display_driver_disable_user_access(display);
+ intel_display_driver_suspend(display);
}
xe_display_flush_cleanup_work(xe);
- intel_dp_mst_suspend(xe);
+ intel_dp_mst_suspend(display);
intel_hpd_cancel_work(xe);
if (has_display(xe))
- intel_display_driver_suspend_access(xe);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
intel_encoder_shutdown_all(display);
@@ -402,17 +411,37 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
void xe_display_pm_suspend_late(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_power_suspend_late(display, s2idle);
+}
+
+void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_power_domains_suspend(xe, s2idle);
+ if (xe->d3cold.allowed)
+ xe_display_pm_suspend_late(xe);
- intel_display_power_suspend_late(xe);
+ /*
+ * If xe_display_pm_suspend_late() is not called, it is likely
+ * that we will be on dynamic DC states with DMC wakelock enabled. We
+ * need to flush the release work in that case.
+ */
+ intel_dmc_wl_flush_release_work(display);
}
void xe_display_pm_shutdown_late(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -421,17 +450,17 @@ void xe_display_pm_shutdown_late(struct xe_device *xe)
* for now leaving all display power wells in the INIT power domain
* enabled.
*/
- intel_power_domains_driver_remove(xe);
+ intel_power_domains_driver_remove(display);
}
void xe_display_pm_resume_early(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_power_resume_early(xe);
-
- intel_power_domains_resume(xe);
+ intel_display_power_resume_early(display);
}
static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
@@ -446,20 +475,17 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
if (has_display(xe))
drm_mode_config_reset(&xe->drm);
- intel_display_driver_init_hw(xe);
- intel_hpd_init(xe);
+ intel_display_driver_init_hw(display);
if (!runtime && has_display(xe))
- intel_display_driver_resume_access(xe);
+ intel_display_driver_resume_access(display);
- /* MST sideband requires HPD interrupts enabled */
- if (!runtime)
- intel_dp_mst_resume(xe);
+ intel_hpd_init(xe);
if (!runtime && has_display(xe)) {
- intel_display_driver_resume(xe);
+ intel_display_driver_resume(display);
drm_kms_helper_poll_enable(&xe->drm);
- intel_display_driver_enable_user_access(xe);
+ intel_display_driver_enable_user_access(display);
}
if (has_display(xe))
@@ -470,7 +496,7 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
if (!runtime)
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
- intel_power_domains_enable(xe);
+ intel_power_domains_enable(display);
}
void xe_display_pm_resume(struct xe_device *xe)
@@ -495,21 +521,23 @@ void xe_display_pm_runtime_resume(struct xe_device *xe)
static void display_device_remove(struct drm_device *dev, void *arg)
{
- struct xe_device *xe = arg;
+ struct intel_display *display = arg;
- intel_display_device_remove(xe);
+ intel_display_device_remove(display);
}
int xe_display_probe(struct xe_device *xe)
{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct intel_display *display;
int err;
if (!xe->info.probe_display)
goto no_display;
- intel_display_device_probe(xe);
+ display = intel_display_device_probe(pdev);
- err = drmm_add_action_or_reset(&xe->drm, display_device_remove, xe);
+ err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 17afa537aee5..233f81a26c25 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -41,6 +41,7 @@ void xe_display_pm_shutdown_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
void xe_display_pm_resume(struct xe_device *xe);
void xe_display_pm_runtime_suspend(struct xe_device *xe);
+void xe_display_pm_runtime_suspend_late(struct xe_device *xe);
void xe_display_pm_runtime_resume(struct xe_device *xe);
#else
@@ -74,6 +75,7 @@ static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
static inline void xe_display_pm_resume(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_runtime_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}
#endif /* CONFIG_DRM_XE_DISPLAY */
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 761510ae0690..9fa51b84737c 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -161,7 +161,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
}
vma->dpt = dpt;
- vma->node = dpt->ggtt_node;
+ vma->node = dpt->ggtt_node[tile0->id];
return 0;
}
@@ -213,8 +213,8 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) {
- vma->node = bo->ggtt_node;
+ if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
+ vma->node = bo->ggtt_node[ggtt->tile->id];
} else if (view->type == I915_GTT_VIEW_NORMAL) {
u32 x, size = bo->ttm.base.size;
@@ -345,10 +345,12 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
+ u8 tile_id = vma->node->ggtt->tile->id;
+
if (vma->dpt)
xe_bo_unpin_map_no_vm(vma->dpt);
- else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) ||
- vma->bo->ggtt_node->base.start != vma->node->base.start)
+ else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) ||
+ vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start)
xe_ggtt_node_remove(vma->node, false);
ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 8c113463a3d5..2a2f250fa495 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -194,8 +194,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct drm_framebuffer *fb;
struct i915_vma *vma;
@@ -241,14 +239,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
plane_config->vma = vma;
-
- /*
- * Flip to the newly created mapping ASAP, so we can re-use the
- * first part of GGTT for WOPCM, prevent flickering, and prevent
- * the lookup of sysmem scratch pages.
- */
- plane->check_plane(crtc_state, plane_state);
- plane->async_flip(NULL, plane, crtc_state, plane_state, true);
return;
nofb:
@@ -275,12 +265,12 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
}
}
-void intel_initial_plane_config(struct drm_i915_private *i915)
+void intel_initial_plane_config(struct intel_display *display)
{
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
@@ -294,7 +284,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
+ display->funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -302,7 +292,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
*/
intel_find_initial_plane_obj(crtc, plane_configs);
- if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config))
intel_crtc_wait_for_next_vblank(crtc);
plane_config_fini(plane_config);
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 7c78496e6213..b732c89816df 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -53,7 +53,6 @@
#define RING_CTL(base) XE_REG((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_START_UDW(base) XE_REG((base) + 0x48)
@@ -83,6 +82,8 @@
#define RING_IMR(base) XE_REG((base) + 0xa8)
#define RING_INT_STATUS_RPT_PTR(base) XE_REG((base) + 0xac)
+#define CS_INT_VEC(base) XE_REG((base) + 0x1b8)
+
#define RING_EIR(base) XE_REG((base) + 0xb0)
#define RING_EMR(base) XE_REG((base) + 0xb4)
#define RING_ESR(base) XE_REG((base) + 0xb8)
@@ -138,6 +139,7 @@
#define RING_MODE(base) XE_REG((base) + 0x29c)
#define GFX_DISABLE_LEGACY_MODE REG_BIT(3)
+#define GFX_MSIX_INTERRUPT_ENABLE REG_BIT(13)
#define RING_TIMESTAMP(base) XE_REG((base) + 0x358)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 0c9e4b2fafab..162f18e975da 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -445,6 +445,8 @@
#define SAMPLER_MODE XE_REG_MCR(0xe18c, XE_REG_OPTION_MASKED)
#define ENABLE_SMALLPL REG_BIT(15)
+#define SMP_WAIT_FETCH_MERGING_COUNTER REG_GENMASK(11, 10)
+#define SMP_FORCE_128B_OVERFETCH REG_FIELD_PREP(SMP_WAIT_FETCH_MERGING_COUNTER, 1)
#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
#define SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
#define INDIRECT_STATE_BASE_ADDR_OVERRIDE REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 045dfd09db99..57944f90bbf6 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -25,6 +25,9 @@
#define CTX_INT_SRC_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 3)
#define CTX_INT_SRC_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 4)
+#define CTX_CS_INT_VEC_REG 0x5a
+#define CTX_CS_INT_VEC_DATA (CTX_CS_INT_VEC_REG + 1)
+
#define INDIRECT_CTX_RING_HEAD (0x02 + 1)
#define INDIRECT_CTX_RING_TAIL (0x04 + 1)
#define INDIRECT_CTX_RING_START (0x06 + 1)
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a9b0091cb7ee..a79ad2da070c 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -41,14 +41,6 @@
#define OAG_OABUFFER XE_REG(0xdb08)
#define OABUFFER_SIZE_MASK REG_GENMASK(5, 3)
-#define OABUFFER_SIZE_128K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 0)
-#define OABUFFER_SIZE_256K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 1)
-#define OABUFFER_SIZE_512K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 2)
-#define OABUFFER_SIZE_1M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 3)
-#define OABUFFER_SIZE_2M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 4)
-#define OABUFFER_SIZE_4M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 5)
-#define OABUFFER_SIZE_8M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 6)
-#define OABUFFER_SIZE_16M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 7)
#define OAG_OABUFFER_MEMORY_SELECT REG_BIT(0) /* 0: PPGTT, 1: GGTT */
#define OAG_OACONTROL XE_REG(0xdaf4)
@@ -59,10 +51,15 @@
/* Common to all OA units */
#define OA_OACONTROL_REPORT_BC_MASK REG_GENMASK(9, 9)
#define OA_OACONTROL_COUNTER_SIZE_MASK REG_GENMASK(8, 8)
+#define OAG_OACONTROL_USED_BITS \
+ (OAG_OACONTROL_OA_PES_DISAG_EN | OAG_OACONTROL_OA_CCS_SELECT_MASK | \
+ OAG_OACONTROL_OA_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE | \
+ OA_OACONTROL_REPORT_BC_MASK | OA_OACONTROL_COUNTER_SIZE_MASK)
#define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
#define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14)
#define OAG_OA_DEBUG_START_TRIGGER_SCOPE_CONTROL REG_BIT(13)
+#define OAG_OA_DEBUG_BUF_SIZE_SELECT REG_BIT(12)
#define OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL REG_BIT(8)
#define OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL REG_BIT(7)
#define OAG_OA_DEBUG_INCLUDE_CLK_RATIO REG_BIT(6)
@@ -85,6 +82,8 @@
#define OAM_CONTEXT_CONTROL_OFFSET (0x1bc)
#define OAM_CONTROL_OFFSET (0x194)
#define OAM_CONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1)
+#define OAM_OACONTROL_USED_BITS \
+ (OAM_CONTROL_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE)
#define OAM_DEBUG_OFFSET (0x198)
#define OAM_STATUS_OFFSET (0x19c)
#define OAM_MMIO_TRG_OFFSET (0x1d0)
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
new file mode 100644
index 000000000000..f45abcd96ba8
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#ifndef _XE_PMT_H_
+#define _XE_PMT_H_
+
+#define SOC_BASE 0x280000
+
+#define BMG_PMT_BASE_OFFSET 0xDB000
+#define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET)
+
+#define BMG_TELEMETRY_BASE_OFFSET 0xE0000
+#define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE_OFFSET)
+
+#define SG_REMAP_INDEX1 XE_REG(SOC_BASE + 0x08)
+#define SG_REMAP_BITS REG_GENMASK(31, 24)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index 51fd40ffafcb..0eedd6c26b1b 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -13,7 +13,7 @@
/**
* struct xe_reg - Register definition
*
- * Register defintion to be used by the individual register. Although the same
+ * Register definition to be used by the individual register. Although the same
* definition is used for xe_reg and xe_reg_mcr, they use different internal
* APIs for accesses.
*/
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 3293172b0128..6cf282618836 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -44,12 +44,16 @@
#define MTL_RP_STATE_CAP XE_REG(0x138000)
+#define MTL_GT_RPA_FREQUENCY XE_REG(0x138008)
#define MTL_GT_RPE_FREQUENCY XE_REG(0x13800c)
#define MTL_MEDIAP_STATE_CAP XE_REG(0x138020)
#define MTL_RPN_CAP_MASK REG_GENMASK(24, 16)
#define MTL_RP0_CAP_MASK REG_GENMASK(8, 0)
+#define MTL_MPA_FREQUENCY XE_REG(0x138028)
+#define MTL_RPA_MASK REG_GENMASK(8, 0)
+
#define MTL_MPE_FREQUENCY XE_REG(0x13802c)
#define MTL_RPE_MASK REG_GENMASK(8, 0)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 3e0ae40ebbd2..6795d1d916e4 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -49,6 +49,13 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
KUNIT_FAIL(test, "Failed to submit bo clear.\n");
return PTR_ERR(fence);
}
+
+ if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) {
+ dma_fence_put(fence);
+ KUNIT_FAIL(test, "Timeout while clearing bo.\n");
+ return -ETIME;
+ }
+
dma_fence_put(fence);
}
@@ -257,10 +264,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
* however seems quite fragile not to also restart the GT. Try
* to do that here by triggering a GT reset.
*/
- for_each_gt(__gt, xe, id) {
- xe_gt_reset_async(__gt);
- flush_work(&__gt->reset.worker);
- }
+ for_each_gt(__gt, xe, id)
+ xe_gt_reset(__gt);
+
if (err) {
KUNIT_FAIL(test, "restore kernel err=%pe\n",
ERR_PTR(err));
@@ -599,8 +605,6 @@ static void xe_bo_shrink_kunit(struct kunit *test)
static struct kunit_case xe_bo_tests[] = {
KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
- KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param,
- {.speed = KUNIT_SPEED_SLOW}),
{}
};
@@ -611,3 +615,17 @@ struct kunit_suite xe_bo_test_suite = {
.init = xe_kunit_helper_xe_device_live_test_init,
};
EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite);
+
+static struct kunit_case xe_bo_shrink_test[] = {
+ KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param,
+ {.speed = KUNIT_SPEED_SLOW}),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_bo_shrink_test_suite = {
+ .name = "xe_bo_shrink",
+ .test_cases = xe_bo_shrink_test,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_shrink_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index 0d36ab864ec0..81277c77016d 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -6,11 +6,13 @@
#include <kunit/test.h>
extern struct kunit_suite xe_bo_test_suite;
+extern struct kunit_suite xe_bo_shrink_test_suite;
extern struct kunit_suite xe_dma_buf_test_suite;
extern struct kunit_suite xe_migrate_test_suite;
extern struct kunit_suite xe_mocs_test_suite;
kunit_test_suite(xe_bo_test_suite);
+kunit_test_suite(xe_bo_shrink_test_suite);
kunit_test_suite(xe_dma_buf_test_suite);
kunit_test_suite(xe_migrate_test_suite);
kunit_test_suite(xe_mocs_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 3bbdb362d6f0..d5fe0ea889ad 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -83,7 +83,8 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size,
ttm_bo_type_kernel,
region |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
@@ -642,7 +643,9 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(sys_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -666,7 +669,8 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(ccs_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -690,7 +694,8 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(vram_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(vram_bo));
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 6f9b7a266b41..ef1e5256c56a 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -58,7 +58,7 @@ static void read_l3cc_table(struct xe_gt *gt,
mocs_dbg(gt, "reg_val=0x%x\n", reg_val);
} else {
- /* Just re-use value read on previous iteration */
+ /* Just reuse value read on previous iteration */
reg_val >>= 16;
}
@@ -162,8 +162,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
- xe_gt_reset_async(gt);
- flush_work(&gt->reset.worker);
+ xe_gt_reset(gt);
kunit_info(test, "mocs_reset_test after reset\n");
if (flags & HAS_GLOBAL_MOCS)
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index 04d6b95c6d87..68fe70ce2be3 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -14,7 +14,7 @@
#include "xe_step.h"
/**
- * DOC: Xe ASSERTs
+ * DOC: Xe Asserts
*
* While Xe driver aims to be simpler than legacy i915 driver it is still
* complex enough that some changes introduced while adding new functionality
@@ -103,7 +103,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_assert(xe, condition) xe_assert_msg((xe), condition, "")
#define xe_assert_msg(xe, condition, msg, arg...) ({ \
@@ -138,7 +138,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_tile_assert(tile, condition) xe_tile_assert_msg((tile), condition, "")
#define xe_tile_assert_msg(tile, condition, msg, arg...) ({ \
@@ -162,7 +162,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_gt_assert(gt, condition) xe_gt_assert_msg((gt), condition, "")
#define xe_gt_assert_msg(gt, condition, msg, arg...) ({ \
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index ef777dbdf4ec..9570672fce33 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -41,7 +41,7 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
/*
* We need to allocate space for the requested number of dwords,
* one additional MI_BATCH_BUFFER_END dword, and additional buffer
- * space to accomodate the platform-specific hardware prefetch
+ * space to accommodate the platform-specific hardware prefetch
* requirements.
*/
bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index f61a8ef38094..3f5391d416d4 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -162,6 +162,15 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
}
}
+static bool force_contiguous(u32 bo_flags)
+{
+ /*
+ * For eviction / restore on suspend / resume objects pinned in VRAM
+ * must be contiguous, also only contiguous BOs support xe_bo_vmap.
+ */
+ return bo_flags & (XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+}
+
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{
@@ -175,12 +184,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
xe_assert(xe, vram && vram->usable_size);
io_size = vram->io_size;
- /*
- * For eviction / restore on suspend / resume objects
- * pinned in VRAM must be contiguous
- */
- if (bo_flags & (XE_BO_FLAG_PINNED |
- XE_BO_FLAG_GGTT))
+ if (force_contiguous(bo_flags))
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < vram->usable_size) {
@@ -212,8 +216,7 @@ static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_STOLEN,
- .flags = bo_flags & (XE_BO_FLAG_PINNED |
- XE_BO_FLAG_GGTT) ?
+ .flags = force_contiguous(bo_flags) ?
TTM_PL_FLAG_CONTIGUOUS : 0,
};
*c += 1;
@@ -442,6 +445,14 @@ static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
kfree(tt);
}
+static bool xe_ttm_resource_visible(struct ttm_resource *mem)
+{
+ struct xe_ttm_vram_mgr_resource *vres =
+ to_xe_ttm_vram_mgr_resource(mem);
+
+ return vres->used_visible_size == mem->size;
+}
+
static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
@@ -453,11 +464,9 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
return 0;
case XE_PL_VRAM0:
case XE_PL_VRAM1: {
- struct xe_ttm_vram_mgr_resource *vres =
- to_xe_ttm_vram_mgr_resource(mem);
struct xe_mem_region *vram = res_to_mem_region(mem);
- if (vres->used_visible_size < mem->size)
+ if (!xe_ttm_resource_visible(mem))
return -EINVAL;
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -777,7 +786,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
* / resume, some of the pinned memory is required for the
* device to resume / use the GPU to move other evicted memory
* (user memory) around. This likely could be optimized a bit
- * futher where we find the minimum set of pinned memory
+ * further where we find the minimum set of pinned memory
* required for resume but for simplity doing a memcpy for all
* pinned memory.
*/
@@ -866,7 +875,7 @@ out:
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
*
- * On successful completion, the object memory will be moved to sytem memory.
+ * On successful completion, the object memory will be moved to system memory.
*
* This is needed to for special handling of pinned VRAM object during
* suspend-resume.
@@ -884,6 +893,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
};
struct ttm_operation_ctx ctx = {
.interruptible = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_resource *new_mem;
int ret;
@@ -945,6 +955,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
+ .gfp_retry_mayfail = false,
};
struct ttm_resource *new_mem;
struct ttm_place *place = &bo->placements[0];
@@ -1114,7 +1125,8 @@ static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operati
static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
{
struct ttm_operation_ctx ctx = {
- .interruptible = false
+ .interruptible = false,
+ .gfp_retry_mayfail = false,
};
if (ttm_bo->ttm) {
@@ -1126,6 +1138,52 @@ static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
}
}
+static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct iosys_map vmap;
+ struct xe_res_cursor cursor;
+ struct xe_mem_region *vram;
+ int bytes_left = len;
+
+ xe_bo_assert_held(bo);
+ xe_device_assert_mem_access(xe);
+
+ if (!mem_type_is_vram(ttm_bo->resource->mem_type))
+ return -EIO;
+
+ /* FIXME: Use GPU for non-visible VRAM */
+ if (!xe_ttm_resource_visible(ttm_bo->resource))
+ return -EIO;
+
+ vram = res_to_mem_region(ttm_bo->resource);
+ xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
+ bo->size - (offset & PAGE_MASK), &cursor);
+
+ do {
+ unsigned long page_offset = (offset & ~PAGE_MASK);
+ int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left);
+
+ iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping +
+ cursor.start);
+ if (write)
+ xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count);
+ else
+ xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count);
+
+ buf += byte_count;
+ offset += byte_count;
+ bytes_left -= byte_count;
+ if (bytes_left)
+ xe_res_next(&cursor, PAGE_SIZE);
+ } while (bytes_left);
+
+ return len;
+}
+
const struct ttm_device_funcs xe_ttm_funcs = {
.ttm_tt_create = xe_ttm_tt_create,
.ttm_tt_populate = xe_ttm_tt_populate,
@@ -1135,6 +1193,7 @@ const struct ttm_device_funcs xe_ttm_funcs = {
.move = xe_bo_move,
.io_mem_reserve = xe_ttm_io_mem_reserve,
.io_mem_pfn = xe_ttm_io_mem_pfn,
+ .access_memory = xe_ttm_access_memory,
.release_notify = xe_ttm_bo_release_notify,
.eviction_valuable = ttm_bo_eviction_valuable,
.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
@@ -1145,6 +1204,8 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
{
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct xe_tile *tile;
+ u8 id;
if (bo->ttm.base.import_attach)
drm_prime_gem_destroy(&bo->ttm.base, NULL);
@@ -1152,8 +1213,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
- if (bo->ggtt_node && bo->ggtt_node->base.size)
- xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
+ for_each_tile(tile, xe, id)
+ if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size)
+ xe_ggtt_remove_bo(tile->mem.ggtt, bo);
#ifdef CONFIG_PROC_FS
if (bo->client)
@@ -1251,11 +1313,50 @@ out:
return ret;
}
+static int xe_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct ttm_buffer_object *ttm_bo = vma->vm_private_data;
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = ttm_bo_vm_access(vma, addr, buf, len, write);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+/**
+ * xe_bo_read() - Read from an xe_bo
+ * @bo: The buffer object to read from.
+ * @offset: The byte offset to start reading from.
+ * @dst: Location to store the read.
+ * @size: Size in bytes for the read.
+ *
+ * Read @size bytes from the @bo, starting from @offset, storing into @dst.
+ *
+ * Return: Zero on success, or negative error.
+ */
+int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
+{
+ int ret;
+
+ ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0);
+ if (ret >= 0 && ret != size)
+ ret = -EIO;
+ else if (ret == size)
+ ret = 0;
+
+ return ret;
+}
+
static const struct vm_operations_struct xe_gem_vm_ops = {
.fault = xe_gem_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
+ .access = xe_bo_vm_access,
};
static const struct drm_gem_object_funcs xe_gem_object_funcs = {
@@ -1269,7 +1370,7 @@ static const struct drm_gem_object_funcs xe_gem_object_funcs = {
/**
* xe_bo_alloc - Allocate storage for a struct xe_bo
*
- * This funcition is intended to allocate storage to be used for input
+ * This function is intended to allocate storage to be used for input
* to __xe_bo_create_locked(), in the case a pointer to the bo to be
* created is needed before the call to __xe_bo_create_locked().
* If __xe_bo_create_locked ends up never to be called, then the
@@ -1309,6 +1410,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement *placement;
uint32_t alignment;
@@ -1323,6 +1425,10 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return ERR_PTR(-EINVAL);
}
+ /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */
+ if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT))
+ return ERR_PTR(-EINVAL);
+
if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
!(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
@@ -1513,19 +1619,29 @@ __xe_bo_create_locked(struct xe_device *xe,
bo->vm = vm;
if (bo->flags & XE_BO_FLAG_GGTT) {
- if (!tile && flags & XE_BO_FLAG_STOLEN)
- tile = xe_device_get_root_tile(xe);
+ struct xe_tile *t;
+ u8 id;
- xe_assert(xe, tile);
+ if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) {
+ if (!tile && flags & XE_BO_FLAG_STOLEN)
+ tile = xe_device_get_root_tile(xe);
- if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
- err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
- start + bo->size, U64_MAX);
- } else {
- err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
+ xe_assert(xe, tile);
+ }
+
+ for_each_tile(t, xe, id) {
+ if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t)))
+ continue;
+
+ if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
+ err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
+ start + bo->size, U64_MAX);
+ } else {
+ err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
+ }
+ if (err)
+ goto err_unlock_put_bo;
}
- if (err)
- goto err_unlock_put_bo;
}
return bo;
@@ -1908,6 +2024,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
if (vm) {
@@ -1918,6 +2035,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
ctx.resv = xe_vm_resv(vm);
}
+ trace_xe_bo_validate(bo);
return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
}
@@ -1969,13 +2087,15 @@ dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
int xe_bo_vmap(struct xe_bo *bo)
{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
void *virtual;
bool is_iomem;
int ret;
xe_bo_assert_held(bo);
- if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
+ if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) ||
+ !force_contiguous(bo->flags)))
return -EINVAL;
if (!iosys_map_is_null(&bo->vmap))
@@ -2251,6 +2371,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement placement;
struct ttm_place requested;
@@ -2291,7 +2412,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
* @force_alloc: Set force_alloc in ttm_operation_ctx
*
* On successful completion, the object memory will be moved to evict
- * placement. Ths function blocks until the object has been fully moved.
+ * placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
@@ -2301,6 +2422,7 @@ int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
.interruptible = false,
.no_wait_gpu = false,
.force_alloc = force_alloc,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement placement;
int ret;
@@ -2380,14 +2502,18 @@ void xe_bo_put_commit(struct llist_head *deferred)
void xe_bo_put(struct xe_bo *bo)
{
+ struct xe_tile *tile;
+ u8 id;
+
might_sleep();
if (bo) {
#ifdef CONFIG_PROC_FS
if (bo->client)
might_lock(&bo->client->bos_lock);
#endif
- if (bo->ggtt_node && bo->ggtt_node->ggtt)
- might_lock(&bo->ggtt_node->ggtt->lock);
+ for_each_tile(tile, xe_bo_device(bo), id)
+ if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
+ might_lock(&bo->ggtt_node[id]->ggtt->lock);
drm_gem_object_put(&bo->ttm.base);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 7fa44a0138b0..43bf6f140d40 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -39,10 +39,22 @@
#define XE_BO_FLAG_NEEDS_64K BIT(15)
#define XE_BO_FLAG_NEEDS_2M BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
+#define XE_BO_FLAG_GGTT0 BIT(18)
+#define XE_BO_FLAG_GGTT1 BIT(19)
+#define XE_BO_FLAG_GGTT2 BIT(20)
+#define XE_BO_FLAG_GGTT3 BIT(21)
+#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
+ XE_BO_FLAG_GGTT1 | \
+ XE_BO_FLAG_GGTT2 | \
+ XE_BO_FLAG_GGTT3)
+
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
+#define XE_BO_FLAG_GGTTx(tile) \
+ (XE_BO_FLAG_GGTT0 << (tile)->id)
+
#define XE_PTE_SHIFT 12
#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
#define XE_PTE_MASK (XE_PAGE_SIZE - 1)
@@ -194,18 +206,29 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
}
static inline u32
-xe_bo_ggtt_addr(struct xe_bo *bo)
+__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
{
- if (XE_WARN_ON(!bo->ggtt_node))
+ struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id];
+
+ if (XE_WARN_ON(!ggtt_node))
return 0;
- XE_WARN_ON(bo->ggtt_node->base.size > bo->size);
- XE_WARN_ON(bo->ggtt_node->base.start + bo->ggtt_node->base.size > (1ull << 32));
- return bo->ggtt_node->base.start;
+ XE_WARN_ON(ggtt_node->base.size > bo->size);
+ XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32));
+ return ggtt_node->base.start;
+}
+
+static inline u32
+xe_bo_ggtt_addr(struct xe_bo *bo)
+{
+ xe_assert(xe_bo_device(bo), bo->tile);
+
+ return __xe_bo_ggtt_addr(bo, bo->tile->id);
}
int xe_bo_vmap(struct xe_bo *bo);
void xe_bo_vunmap(struct xe_bo *bo);
+int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
bool mem_type_is_vram(u32 mem_type);
bool xe_bo_is_vram(struct xe_bo *bo);
@@ -318,7 +341,6 @@ static inline unsigned int xe_sg_segment_size(struct device *dev)
return round_down(max / 2, PAGE_SIZE);
}
-#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
/**
* xe_bo_is_mem_type - Whether the bo currently resides in the given
* TTM memory type
@@ -333,4 +355,3 @@ static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
return bo->ttm.resource->mem_type == mem_type;
}
#endif
-#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_doc.h b/drivers/gpu/drm/xe/xe_bo_doc.h
index f57d440cc95a..25a884c64bf1 100644
--- a/drivers/gpu/drm/xe/xe_bo_doc.h
+++ b/drivers/gpu/drm/xe/xe_bo_doc.h
@@ -41,7 +41,7 @@
* created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user
* access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All
* user BOs are evictable and user BOs are never pinned by XE. The allocation of
- * the backing store can be defered from creation time until first use which is
+ * the backing store can be deferred from creation time until first use which is
* either mmap, bind, or pagefault.
*
* Private BOs
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 8fb2be061003..6a40eedd9db1 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -152,11 +152,17 @@ int xe_bo_restore_kernel(struct xe_device *xe)
}
if (bo->flags & XE_BO_FLAG_GGTT) {
- struct xe_tile *tile = bo->tile;
+ struct xe_tile *tile;
+ u8 id;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
+ for_each_tile(tile, xe, id) {
+ if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
+ continue;
+
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo(tile->mem.ggtt, bo);
+ mutex_unlock(&tile->mem.ggtt->lock);
+ }
}
/*
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 13c6d8a69e91..46dc9e4e3e46 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -10,9 +10,9 @@
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
+#include "xe_device_types.h"
#include "xe_ggtt_types.h"
struct xe_device;
@@ -39,8 +39,8 @@ struct xe_bo {
struct ttm_place placements[XE_BO_MAX_PLACEMENTS];
/** @placement: current placement for this BO */
struct ttm_placement placement;
- /** @ggtt_node: GGTT node if this BO is mapped in the GGTT */
- struct xe_ggtt_node *ggtt_node;
+ /** @ggtt_node: Array of GGTT nodes if this BO is mapped in the GGTTs */
+ struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE];
/** @vmap: iosys map of this buffer */
struct iosys_map vmap;
/** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 21a50d539426..39fe485d2085 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -30,30 +30,39 @@
/**
* DOC: Xe device coredump
*
- * Devices overview:
* Xe uses dev_coredump infrastructure for exposing the crash errors in a
- * standardized way.
- * devcoredump exposes a temporary device under /sys/class/devcoredump/
- * which is linked with our card device directly.
- * The core dump can be accessed either from
- * /sys/class/drm/card<n>/device/devcoredump/ or from
- * /sys/class/devcoredump/devcd<m> where
- * /sys/class/devcoredump/devcd<m>/failing_device is a link to
- * /sys/class/drm/card<n>/device/.
+ * standardized way. Once a crash occurs, devcoredump exposes a temporary
+ * node under ``/sys/class/devcoredump/devcd<m>/``. The same node is also
+ * accessible in ``/sys/class/drm/card<n>/device/devcoredump/``. The
+ * ``failing_device`` symlink points to the device that crashed and created the
+ * coredump.
*
- * Snapshot at hang:
- * The 'data' file is printed with a drm_printer pointer at devcoredump read
- * time. For this reason, we need to take snapshots from when the hang has
- * happened, and not only when the user is reading the file. Otherwise the
- * information is outdated since the resets might have happened in between.
+ * The following characteristics are observed by xe when creating a device
+ * coredump:
*
- * 'First' failure snapshot:
- * In general, the first hang is the most critical one since the following hangs
- * can be a consequence of the initial hang. For this reason we only take the
- * snapshot of the 'first' failure and ignore subsequent calls of this function,
- * at least while the coredump device is alive. Dev_coredump has a delayed work
- * queue that will eventually delete the device and free all the dump
- * information.
+ * **Snapshot at hang**:
+ * The 'data' file contains a snapshot of the HW and driver states at the time
+ * the hang happened. Due to the driver recovering from resets/crashes, it may
+ * not correspond to the state of the system when the file is read by
+ * userspace.
+ *
+ * **Coredump release**:
+ * After a coredump is generated, it stays in kernel memory until released by
+ * userspace by writing anything to it, or after an internal timer expires. The
+ * exact timeout may vary and should not be relied upon. Example to release
+ * a coredump:
+ *
+ * .. code-block:: shell
+ *
+ * $ > /sys/class/drm/card0/device/devcoredump/data
+ *
+ * **First failure only**:
+ * In general, the first hang is the most critical one since the following
+ * hangs can be a consequence of the initial hang. For this reason a snapshot
+ * is taken only for the first failure. Until the devcoredump is released by
+ * userspace or kernel, all subsequent hangs do not override the snapshot nor
+ * create new ones. Devcoredump has a delayed work queue that will eventually
+ * delete the file node and free all the dump information.
*/
#ifdef CONFIG_DEV_COREDUMP
@@ -91,6 +100,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
p = drm_coredump_printer(&iter);
drm_puts(&p, "**** Xe Device Coredump ****\n");
+ drm_printf(&p, "Reason: %s\n", ss->reason);
drm_puts(&p, "kernel: " UTS_RELEASE "\n");
drm_puts(&p, "module: " KBUILD_MODNAME "\n");
@@ -98,7 +108,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
ts = ktime_to_timespec64(ss->boot_time);
drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
- drm_printf(&p, "Process: %s\n", ss->process_name);
+ drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid);
xe_device_snapshot_print(xe, &p);
drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id);
@@ -109,11 +119,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_puts(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
- /*
- * Don't add a new section header here because the mesa debug decoder
- * tool expects the context information to be in the 'GuC CT' section.
- */
- /* drm_puts(&p, "\n**** Contexts ****\n"); */
+ drm_puts(&p, "\n**** Contexts ****\n");
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
drm_puts(&p, "\n**** Job ****\n");
@@ -134,6 +140,9 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
{
int i;
+ kfree(ss->reason);
+ ss->reason = NULL;
+
xe_guc_log_snapshot_free(ss->guc.log);
ss->guc.log = NULL;
@@ -174,16 +183,24 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
- if (!ss->read.buffer)
+ mutex_lock(&coredump->lock);
+
+ if (!ss->read.buffer) {
+ mutex_unlock(&coredump->lock);
return -ENODEV;
+ }
- if (offset >= ss->read.size)
+ if (offset >= ss->read.size) {
+ mutex_unlock(&coredump->lock);
return 0;
+ }
byte_copied = count < ss->read.size - offset ? count :
ss->read.size - offset;
memcpy(buffer, ss->read.buffer + offset, byte_copied);
+ mutex_unlock(&coredump->lock);
+
return byte_copied;
}
@@ -197,15 +214,18 @@ static void xe_devcoredump_free(void *data)
cancel_work_sync(&coredump->snapshot.work);
+ mutex_lock(&coredump->lock);
+
xe_devcoredump_snapshot_free(&coredump->snapshot);
kvfree(coredump->snapshot.read.buffer);
/* To prevent stale data on next snapshot, clear everything */
memset(&coredump->snapshot, 0, sizeof(coredump->snapshot));
coredump->captured = false;
- coredump->job = NULL;
drm_info(&coredump_to_xe(coredump)->drm,
"Xe device coredump has been deleted.\n");
+
+ mutex_unlock(&coredump->lock);
}
static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
@@ -248,10 +268,10 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ struct xe_exec_queue *q,
struct xe_sched_job *job)
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
- struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
u32 adj_logical_mask = q->logical_mask;
u32 width_mask = (0x1 << q->width) - 1;
@@ -264,12 +284,14 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
- if (q->vm && q->vm->xef)
+ if (q->vm && q->vm->xef) {
process_name = q->vm->xef->process_name;
+ ss->pid = q->vm->xef->pid;
+ }
+
strscpy(ss->process_name, process_name);
ss->gt = q->gt;
- coredump->job = job;
INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
cookie = dma_fence_begin_signalling();
@@ -288,10 +310,11 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true);
ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct);
ss->ge = xe_guc_exec_queue_snapshot_capture(q);
- ss->job = xe_sched_job_snapshot_capture(job);
+ if (job)
+ ss->job = xe_sched_job_snapshot_capture(job);
ss->vm = xe_vm_snapshot_capture(q->vm);
- xe_engine_snapshot_capture_for_job(job);
+ xe_engine_snapshot_capture_for_queue(q);
queue_work(system_unbound_wq, &ss->work);
@@ -301,28 +324,42 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
/**
* xe_devcoredump - Take the required snapshots and initialize coredump device.
+ * @q: The faulty xe_exec_queue, where the issue was detected.
* @job: The faulty xe_sched_job, where the issue was detected.
+ * @fmt: Printf format + args to describe the reason for the core dump
*
* This function should be called at the crash time within the serialized
* gt_reset. It is skipped if we still have the core dump device available
* with the information of the 'first' snapshot.
*/
-void xe_devcoredump(struct xe_sched_job *job)
+__printf(3, 4)
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...)
{
- struct xe_device *xe = gt_to_xe(job->q->gt);
+ struct xe_device *xe = gt_to_xe(q->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
+ va_list varg;
+
+ mutex_lock(&coredump->lock);
if (coredump->captured) {
drm_dbg(&xe->drm, "Multiple hangs are occurring, but only the first snapshot was taken\n");
+ mutex_unlock(&coredump->lock);
return;
}
coredump->captured = true;
- devcoredump_snapshot(coredump, job);
+
+ va_start(varg, fmt);
+ coredump->snapshot.reason = kvasprintf(GFP_ATOMIC, fmt, varg);
+ va_end(varg);
+
+ devcoredump_snapshot(coredump, q, job);
drm_info(&xe->drm, "Xe device coredump has been created\n");
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
xe->drm.primary->index);
+
+ mutex_unlock(&coredump->lock);
}
static void xe_driver_devcoredump_fini(void *arg)
@@ -334,6 +371,18 @@ static void xe_driver_devcoredump_fini(void *arg)
int xe_devcoredump_init(struct xe_device *xe)
{
+ int err;
+
+ err = drmm_mutex_init(&xe->drm, &xe->devcoredump.lock);
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&xe->devcoredump.lock);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
return devm_add_action_or_reset(xe->drm.dev, xe_driver_devcoredump_fini, &xe->drm);
}
@@ -342,42 +391,34 @@ int xe_devcoredump_init(struct xe_device *xe)
/**
* xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
*
- * The output is split to multiple lines because some print targets, e.g. dmesg
- * cannot handle arbitrarily long lines. Note also that printing to dmesg in
- * piece-meal fashion is not possible, each separate call to drm_puts() has a
- * line-feed automatically added! Therefore, the entire output line must be
- * constructed in a local buffer first, then printed in one atomic output call.
+ * The output is split into multiple calls to drm_puts() because some print
+ * targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may
+ * add newlines, as is the case with dmesg: each drm_puts() call creates a
+ * separate line.
*
* There is also a scheduler yield call to prevent the 'task has been stuck for
* 120s' kernel hang check feature from firing when printing to a slow target
* such as dmesg over a serial port.
*
- * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down.
- *
* @p: the printer object to output to
* @prefix: optional prefix to add to output string
+ * @suffix: optional suffix to add at the end. 0 disables it and is
+ * not added to the output, which is useful when using multiple calls
+ * to dump data to @p
* @blob: the Binary Large OBject to dump out
* @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
* @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
*/
-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
const void *blob, size_t offset, size_t size)
{
const u32 *blob32 = (const u32 *)blob;
char buff[ASCII85_BUFSZ], *line_buff;
size_t line_pos = 0;
- /*
- * Splitting blobs across multiple lines is not compatible with the mesa
- * debug decoder tool. Note that even dropping the explicit '\n' below
- * doesn't help because the GuC log is so big some underlying implementation
- * still splits the lines at 512K characters. So just bail completely for
- * the moment.
- */
- return;
-
#define DMESG_MAX_LINE_LEN 800
-#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
+ /* Always leave space for the suffix char and the \0 */
+#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "<suffix>\0" */
if (size & 3)
drm_printf(p, "Size not word aligned: %zu", size);
@@ -409,7 +450,6 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
line_pos += strlen(line_buff + line_pos);
if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
- line_buff[line_pos++] = '\n';
line_buff[line_pos++] = 0;
drm_puts(p, line_buff);
@@ -421,10 +461,11 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
}
}
+ if (suffix)
+ line_buff[line_pos++] = suffix;
+
if (line_pos) {
- line_buff[line_pos++] = '\n';
line_buff[line_pos++] = 0;
-
drm_puts(p, line_buff);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index a4eebc285fc8..5391a80a4d1b 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -10,13 +10,16 @@
struct drm_printer;
struct xe_device;
+struct xe_exec_queue;
struct xe_sched_job;
#ifdef CONFIG_DEV_COREDUMP
-void xe_devcoredump(struct xe_sched_job *job);
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...);
int xe_devcoredump_init(struct xe_device *xe);
#else
-static inline void xe_devcoredump(struct xe_sched_job *job)
+static inline void xe_devcoredump(struct xe_exec_queue *q,
+ struct xe_sched_job *job,
+ const char *fmt, ...)
{
}
@@ -26,7 +29,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
}
#endif
-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
const void *blob, size_t offset, size_t size);
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 3703ddea1252..1a1d16a96b2d 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -28,6 +28,10 @@ struct xe_devcoredump_snapshot {
ktime_t boot_time;
/** @process_name: Name of process that triggered this gpu hang */
char process_name[TASK_COMM_LEN];
+ /** @pid: Process id of process that triggered this gpu hang */
+ pid_t pid;
+ /** @reason: The reason the coredump was triggered */
+ char *reason;
/** @gt: Affected GT, used by forcewake for delayed capture */
struct xe_gt *gt;
@@ -76,12 +80,12 @@ struct xe_devcoredump_snapshot {
* for reading the information.
*/
struct xe_devcoredump {
- /** @captured: The snapshot of the first hang has already been taken. */
+ /** @lock: protects access to entire structure */
+ struct mutex lock;
+ /** @captured: The snapshot of the first hang has already been taken */
bool captured;
/** @snapshot: Snapshot is captured at time of the first crash */
struct xe_devcoredump_snapshot snapshot;
- /** @job: Point to the faulting job */
- struct xe_sched_job *job;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 06d6db8b50f9..4e1839b483a0 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -44,6 +44,7 @@
#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
+#include "xe_oa.h"
#include "xe_observation.h"
#include "xe_pat.h"
#include "xe_pcode.h"
@@ -55,6 +56,7 @@
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
#include "xe_vram.h"
+#include "xe_vsec.h"
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
@@ -269,7 +271,6 @@ static struct drm_driver driver = {
.fops = &xe_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -324,7 +325,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
- spin_lock_init(&xe->irq.lock);
+ err = xe_irq_init(xe);
+ if (err)
+ goto err;
init_waitqueue_head(&xe->ufence_wq);
@@ -366,6 +369,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
goto err;
}
+ err = drmm_mutex_init(&xe->drm, &xe->pmt.lock);
+ if (err)
+ goto err;
+
err = xe_display_create(xe);
if (WARN_ON(err))
goto err;
@@ -514,7 +521,7 @@ static int wait_for_lmem_ready(struct xe_device *xe)
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
start = jiffies;
- timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
+ timeout = start + secs_to_jiffies(60); /* 60 sec! */
do {
if (signal_pending(current))
@@ -599,7 +606,7 @@ static int probe_has_flat_ccs(struct xe_device *xe)
u32 reg;
/* Always enabled/disabled, no runtime check to do */
- if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
+ if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe))
return 0;
gt = xe_root_mmio_gt(xe);
@@ -760,6 +767,8 @@ int xe_device_probe(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
+ xe_vsec_init(xe);
+
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
err_fini_display:
@@ -990,7 +999,7 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
* xe_device_declare_wedged - Declare device wedged
* @xe: xe device instance
*
- * This is a final state that can only be cleared with a mudule
+ * This is a final state that can only be cleared with a module
* re-probe (unbind + bind).
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index f1fbfe916867..fc3c2af3fb7f 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -157,8 +157,7 @@ static inline bool xe_device_has_sriov(struct xe_device *xe)
static inline bool xe_device_has_msix(struct xe_device *xe)
{
- /* TODO: change this when MSI-X support is fully integrated */
- return false;
+ return xe->irq.msix.nvec > 0;
}
static inline bool xe_device_has_memirq(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index b9ea455d6f59..8a7b15972413 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -16,7 +16,7 @@
#include "xe_heci_gsc.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
-#include "xe_oa.h"
+#include "xe_oa_types.h"
#include "xe_platform_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_types.h"
@@ -42,8 +42,6 @@ struct xe_pat_ops;
#define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
#define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
#define IS_DGFX(xe) ((xe)->info.is_dgfx)
-#define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
-#define HAS_HECI_CSCFI(xe) ((xe)->info.has_heci_cscfi)
#define XE_VRAM_FLAGS_NEED64K BIT(0)
@@ -296,14 +294,24 @@ struct xe_device {
/** @info.va_bits: Maximum bits of a virtual address */
u8 va_bits;
- /** @info.is_dgfx: is discrete device */
- u8 is_dgfx:1;
- /** @info.has_asid: Has address space ID */
- u8 has_asid:1;
+ /*
+ * Keep all flags below alphabetically sorted
+ */
+
/** @info.force_execlist: Forced execlist submission */
u8 force_execlist:1;
+ /** @info.has_asid: Has address space ID */
+ u8 has_asid:1;
+ /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
+ u8 has_atomic_enable_pte_bit:1;
+ /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
+ u8 has_device_atomics_on_smem:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
+ /** @info.has_heci_cscfi: device has heci cscfi */
+ u8 has_heci_cscfi:1;
+ /** @info.has_heci_gscfi: device has heci gscfi */
+ u8 has_heci_gscfi:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
/** @info.has_mmio_ext: Device has extra MMIO address range */
@@ -314,6 +322,8 @@ struct xe_device {
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
+ /** @info.is_dgfx: is discrete device */
+ u8 is_dgfx:1;
/**
* @info.probe_display: Probe display hardware. If set to
* false, the driver will behave as if there is no display
@@ -323,20 +333,12 @@ struct xe_device {
* state the firmware or bootloader left it in.
*/
u8 probe_display:1;
+ /** @info.skip_guc_pc: Skip GuC based PM feature init */
+ u8 skip_guc_pc:1;
/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
u8 skip_mtcfg:1;
/** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
- /** @info.has_heci_gscfi: device has heci gscfi */
- u8 has_heci_gscfi:1;
- /** @info.has_heci_cscfi: device has heci cscfi */
- u8 has_heci_cscfi:1;
- /** @info.skip_guc_pc: Skip GuC based PM feature init */
- u8 skip_guc_pc:1;
- /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
- u8 has_atomic_enable_pte_bit:1;
- /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
- u8 has_device_atomics_on_smem:1;
} info;
/** @irq: device interrupt state */
@@ -345,7 +347,15 @@ struct xe_device {
spinlock_t lock;
/** @irq.enabled: interrupts enabled on this device */
- bool enabled;
+ atomic_t enabled;
+
+ /** @irq.msix: irq info for platforms that support MSI-X */
+ struct {
+ /** @irq.msix.nvec: number of MSI-X interrupts */
+ u16 nvec;
+ /** @irq.msix.indexes: used to allocate MSI-X indexes */
+ struct xarray indexes;
+ } msix;
} irq;
/** @ttm: ttm device */
@@ -374,6 +384,8 @@ struct xe_device {
/** @sriov.pf: PF specific data */
struct xe_device_pf pf;
+ /** @sriov.vf: VF specific data */
+ struct xe_device_vf vf;
/** @sriov.wq: workqueue used by the virtualization workers */
struct workqueue_struct *wq;
@@ -481,6 +493,12 @@ struct xe_device {
struct mutex lock;
} d3cold;
+ /** @pmt: Support the PMT driver callback interface */
+ struct {
+ /** @pmt.lock: protect access for telemetry data */
+ struct mutex lock;
+ } pmt;
+
/**
* @pm_callback_task: Track the active task that is running in either
* the runtime_suspend or runtime_resume callbacks.
@@ -588,7 +606,7 @@ struct xe_file {
/** @vm.xe: xarray to store VMs */
struct xarray xa;
/**
- * @vm.lock: Protects VM lookup + reference and removal a from
+ * @vm.lock: Protects VM lookup + reference and removal from
* file xarray. Not an intended to be an outer lock which does
* thing while being held.
*/
@@ -601,10 +619,15 @@ struct xe_file {
struct xarray xa;
/**
* @exec_queue.lock: Protects exec queue lookup + reference and
- * removal a frommfile xarray. Not an intended to be an outer
- * lock which does thing while being held.
+ * removal from file xarray. Not intended to be an outer lock
+ * which does things while being held.
*/
struct mutex lock;
+ /**
+ * @exec_queue.pending_removal: items pending to be removed to
+ * synchronize GPU state update with ongoing query.
+ */
+ atomic_t pending_removal;
} exec_queue;
/** @run_ticks: hw engine class run time in ticks for this drm client */
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index c5b95470fa32..f67803e15a0e 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -58,7 +58,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
* 1) Avoid pinning in a placement not accessible to some importers.
* 2) Pinning in VRAM requires PIN accounting which is a to-do.
*/
- if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
+ if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 22f0f1a6dfd5..2d4874d2b922 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -135,8 +135,8 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
XE_WARN_ON(bo->client);
XE_WARN_ON(!list_empty(&bo->client_link));
- spin_lock(&client->bos_lock);
bo->client = xe_drm_client_get(client);
+ spin_lock(&client->bos_lock);
list_add_tail(&bo->client_link, &client->bos_list);
spin_unlock(&client->bos_lock);
}
@@ -261,6 +261,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
if (man) {
drm_print_memory_stats(p,
&stats[mem_type],
+ DRM_GEM_OBJECT_ACTIVE |
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
@@ -269,6 +270,49 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
}
}
+static struct xe_hw_engine *any_engine(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned long gt_id;
+
+ for_each_gt(gt, xe, gt_id) {
+ struct xe_hw_engine *hwe = xe_gt_any_hw_engine(gt);
+
+ if (hwe)
+ return hwe;
+ }
+
+ return NULL;
+}
+
+static bool force_wake_get_any_engine(struct xe_device *xe,
+ struct xe_hw_engine **phwe,
+ unsigned int *pfw_ref)
+{
+ enum xe_force_wake_domains domain;
+ unsigned int fw_ref;
+ struct xe_hw_engine *hwe;
+ struct xe_force_wake *fw;
+
+ hwe = any_engine(xe);
+ if (!hwe)
+ return false;
+
+ domain = xe_hw_engine_to_fw_domain(hwe);
+ fw = gt_to_fw(hwe->gt);
+
+ fw_ref = xe_force_wake_get(fw, domain);
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain)) {
+ xe_force_wake_put(fw, fw_ref);
+ return false;
+ }
+
+ *phwe = hwe;
+ *pfw_ref = fw_ref;
+
+ return true;
+}
+
static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
{
unsigned long class, i, gt_id, capacity[XE_ENGINE_CLASS_MAX] = { };
@@ -280,7 +324,18 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
u64 gpu_timestamp;
unsigned int fw_ref;
+ /*
+ * Wait for any exec queue going away: their cycles will get updated on
+ * context switch out, so wait for that to happen
+ */
+ wait_var_event(&xef->exec_queue.pending_removal,
+ !atomic_read(&xef->exec_queue.pending_removal));
+
xe_pm_runtime_get(xe);
+ if (!force_wake_get_any_engine(xe, &hwe, &fw_ref)) {
+ xe_pm_runtime_put(xe);
+ return;
+ }
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
@@ -295,33 +350,11 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
}
mutex_unlock(&xef->exec_queue.lock);
- /* Get the total GPU cycles */
- for_each_gt(gt, xe, gt_id) {
- enum xe_force_wake_domains fw;
-
- hwe = xe_gt_any_hw_engine(gt);
- if (!hwe)
- continue;
-
- fw = xe_hw_engine_to_fw_domain(hwe);
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), fw);
- if (!xe_force_wake_ref_has_domain(fw_ref, fw)) {
- hwe = NULL;
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- break;
- }
-
- gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- break;
- }
+ gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
+ xe_force_wake_put(gt_to_fw(hwe->gt), fw_ref);
xe_pm_runtime_put(xe);
- if (unlikely(!hwe))
- return;
-
for (class = 0; class < XE_ENGINE_CLASS_MAX; class++) {
const char *class_name;
@@ -352,7 +385,7 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
* @p: The drm_printer ptr
* @file: The drm_file ptr
*
- * This is callabck for drm fdinfo interface. Register this callback
+ * This is callback for drm fdinfo interface. Register this callback
* in drm driver ops for show_fdinfo.
*
* Return: void
diff --git a/drivers/gpu/drm/xe/xe_drv.h b/drivers/gpu/drm/xe/xe_drv.h
index d45b71426cc8..d61650d4aa0b 100644
--- a/drivers/gpu/drm/xe/xe_drv.h
+++ b/drivers/gpu/drm/xe/xe_drv.h
@@ -10,7 +10,6 @@
#define DRIVER_NAME "xe"
#define DRIVER_DESC "Intel Xe Graphics"
-#define DRIVER_DATE "20201103"
/* Interface history:
*
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 31cca938956f..df8ce550deb4 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -33,7 +33,7 @@
*
* In XE we avoid all of this complication by not allowing a BO list to be
* passed into an exec, using the dma-buf implicit sync uAPI, have binds as
- * seperate operations, and using the DRM scheduler to flow control the ring.
+ * separate operations, and using the DRM scheduler to flow control the ring.
* Let's deep dive on each of these.
*
* We can get away from a BO list by forcing the user to use in / out fences on
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 268cd3123be9..7e1abbbfba12 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -17,6 +17,7 @@
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_migrate.h"
@@ -69,6 +70,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->gt = gt;
q->class = hwe->class;
q->width = width;
+ q->msix_vec = XE_IRQ_DEFAULT_MSIX;
q->logical_mask = logical_mask;
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
@@ -118,7 +120,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
}
for (i = 0; i < q->width; ++i) {
- q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K);
+ q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec);
if (IS_ERR(q->lrc[i])) {
err = PTR_ERR(q->lrc[i]);
goto err_unlock;
@@ -241,6 +243,7 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
return q;
}
+ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
void xe_exec_queue_destroy(struct kref *ref)
{
@@ -263,8 +266,11 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
/*
* Before releasing our ref to lrc and xef, accumulate our run ticks
+ * and wakeup any waiters.
*/
xe_exec_queue_update_run_ticks(q);
+ if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
+ wake_up_var(&q->xef->exec_queue.pending_removal);
for (i = 0; i < q->width; ++i)
xe_lrc_put(q->lrc[i]);
@@ -764,25 +770,20 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
- struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
int idx;
/*
- * Jobs that are run during driver load may use an exec_queue, but are
- * not associated with a user xe file, so avoid accumulating busyness
- * for kernel specific work.
+ * Jobs that are executed by kernel doesn't have a corresponding xe_file
+ * and thus are not accounted.
*/
- if (!q->vm || !q->vm->xef)
+ if (!q->xef)
return;
/* Synchronize with unbind while holding the xe file open */
if (!drm_dev_enter(&xe->drm, &idx))
return;
-
- xef = q->vm->xef;
-
/*
* Only sample the first LRC. For parallel submission, all of them are
* scheduled together and we compensate that below by multiplying by
@@ -793,7 +794,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
*/
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
- xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
+ q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
drm_dev_exit(idx);
}
@@ -835,7 +836,10 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
mutex_lock(&xef->exec_queue.lock);
q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
+ if (q)
+ atomic_inc(&xef->exec_queue.pending_removal);
mutex_unlock(&xef->exec_queue.lock);
+
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 1158b6062a6c..5af5419cec7a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -41,7 +41,7 @@ struct xe_exec_queue {
/** @xef: Back pointer to xe file if this is user created exec queue */
struct xe_file *xef;
- /** @gt: graphics tile this exec queue can submit to */
+ /** @gt: GT structure this exec queue can submit to */
struct xe_gt *gt;
/**
* @hwe: A hardware of the same class. May (physical engine) or may not
@@ -63,6 +63,8 @@ struct xe_exec_queue {
char name[MAX_FENCE_NAME_LEN];
/** @width: width (number BB submitted per exec) of this exec queue */
u16 width;
+ /** @msix_vec: MSI-X vector (for platforms that support it) */
+ u16 msix_vec;
/** @fence_irq: fence IRQ used to signal job completion */
struct xe_hw_fence_irq *fence_irq;
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index a8c416a48812..5ef96deaa881 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -17,6 +17,7 @@
#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_hw_fence.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
@@ -47,6 +48,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
struct xe_mmio *mmio = &gt->mmio;
struct xe_device *xe = gt_to_xe(gt);
u64 lrc_desc;
+ u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
lrc_desc = xe_lrc_descriptor(lrc);
@@ -80,8 +82,10 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
xe_mmio_write32(mmio, RING_HWS_PGA(hwe->mmio_base),
xe_bo_ggtt_addr(hwe->hwsp));
xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base));
- xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+
+ if (xe_device_has_msix(gt_to_xe(hwe->gt)))
+ ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), ring_mode);
xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
lower_32_bits(lrc_desc));
@@ -265,7 +269,7 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
port->hwe = hwe;
- port->lrc = xe_lrc_create(hwe, NULL, SZ_16K);
+ port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX);
if (IS_ERR(port->lrc)) {
err = PTR_ERR(port->lrc);
goto err;
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 558fac8bb6fb..5fcb2b4c2c13 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -362,7 +362,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
/*
* So we don't need to worry about 64K GGTT layout when dealing with
- * scratch entires, rather keep the scratch page in system memory on
+ * scratch entries, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
flags = XE_BO_FLAG_PINNED;
@@ -598,10 +598,10 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
u64 start;
u64 offset, pte;
- if (XE_WARN_ON(!bo->ggtt_node))
+ if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id]))
return;
- start = bo->ggtt_node->base.start;
+ start = bo->ggtt_node[ggtt->tile->id]->base.start;
for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
@@ -612,15 +612,16 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
- int err;
u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
+ u8 tile_id = ggtt->tile->id;
+ int err;
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
alignment = SZ_64K;
- if (XE_WARN_ON(bo->ggtt_node)) {
+ if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
/* Someone's already inserted this BO in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
return 0;
}
@@ -630,19 +631,19 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
- bo->ggtt_node = xe_ggtt_node_init(ggtt);
- if (IS_ERR(bo->ggtt_node)) {
- err = PTR_ERR(bo->ggtt_node);
- bo->ggtt_node = NULL;
+ bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(bo->ggtt_node[tile_id])) {
+ err = PTR_ERR(bo->ggtt_node[tile_id]);
+ bo->ggtt_node[tile_id] = NULL;
goto out;
}
mutex_lock(&ggtt->lock);
- err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size,
- alignment, 0, start, end, 0);
+ err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
+ bo->size, alignment, 0, start, end, 0);
if (err) {
- xe_ggtt_node_fini(bo->ggtt_node);
- bo->ggtt_node = NULL;
+ xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
+ bo->ggtt_node[tile_id] = NULL;
} else {
xe_ggtt_map_bo(ggtt, bo);
}
@@ -691,13 +692,15 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
*/
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- if (XE_WARN_ON(!bo->ggtt_node))
+ u8 tile_id = ggtt->tile->id;
+
+ if (XE_WARN_ON(!bo->ggtt_node[tile_id]))
return;
/* This BO is not currently in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
- xe_ggtt_node_remove(bo->ggtt_node,
+ xe_ggtt_node_remove(bo->ggtt_node[tile_id],
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 64b2ae6839db..c250ea773491 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -71,8 +71,14 @@ static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
static inline
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
{
- return list_first_entry_or_null(&sched->base.pending_list,
- struct xe_sched_job, drm.list);
+ struct xe_sched_job *job;
+
+ spin_lock(&sched->base.job_list_lock);
+ job = list_first_entry_or_null(&sched->base.pending_list,
+ struct xe_sched_job, drm.list);
+ spin_unlock(&sched->base.job_list_lock);
+
+ return job;
}
static inline int
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index fc64b45d324b..24cc6a4f9a96 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -139,17 +139,29 @@ static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
return 0;
}
-static int validate_proxy_header(struct xe_gsc_proxy_header *header,
+static int validate_proxy_header(struct xe_gt *gt,
+ struct xe_gsc_proxy_header *header,
u32 source, u32 dest, u32 max_size)
{
u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
+ int ret = 0;
- if (header->destination != dest || header->source != source)
- return -ENOEXEC;
+ if (header->destination != dest || header->source != source) {
+ ret = -ENOEXEC;
+ goto out;
+ }
- if (length + PROXY_HDR_SIZE > max_size)
- return -E2BIG;
+ if (length + PROXY_HDR_SIZE > max_size) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* We only care about the status if this is a message for the driver */
+ if (dest == GSC_PROXY_ADDRESSING_KMD && header->status != 0) {
+ ret = -EIO;
+ goto out;
+ }
switch (type) {
case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
@@ -157,12 +169,20 @@ static int validate_proxy_header(struct xe_gsc_proxy_header *header,
break;
fallthrough;
case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
- return -EIO;
+ ret = -EIO;
+ break;
default:
break;
}
- return 0;
+out:
+ if (ret)
+ xe_gt_err(gt,
+ "GSC proxy error: s=0x%x[0x%x], d=0x%x[0x%x], t=%u, l=0x%x, st=0x%x\n",
+ header->source, source, header->destination, dest,
+ type, length, header->status);
+
+ return ret;
}
#define proxy_header_wr(xe_, map_, offset_, field_, val_) \
@@ -228,12 +248,17 @@ static int proxy_query(struct xe_gsc *gsc)
xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
reply_offset, PROXY_HDR_SIZE);
- /* stop if this was the last message */
- if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END)
+ /* Check the status and stop if this was the last message */
+ if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END) {
+ ret = validate_proxy_header(gt, to_csme_hdr,
+ GSC_PROXY_ADDRESSING_GSC,
+ GSC_PROXY_ADDRESSING_KMD,
+ GSC_PROXY_BUFFER_SIZE - reply_offset);
break;
+ }
/* make sure the GSC-to-CSME proxy header is sane */
- ret = validate_proxy_header(to_csme_hdr,
+ ret = validate_proxy_header(gt, to_csme_hdr,
GSC_PROXY_ADDRESSING_GSC,
GSC_PROXY_ADDRESSING_CSME,
GSC_PROXY_BUFFER_SIZE - reply_offset);
@@ -262,7 +287,7 @@ static int proxy_query(struct xe_gsc *gsc)
}
/* make sure the CSME-to-GSC proxy header is sane */
- ret = validate_proxy_header(gsc->proxy.from_csme,
+ ret = validate_proxy_header(gt, gsc->proxy.from_csme,
GSC_PROXY_ADDRESSING_CSME,
GSC_PROXY_ADDRESSING_GSC,
GSC_PROXY_BUFFER_SIZE - reply_offset);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index d6744be01a68..9f4f27d1ef4a 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -380,13 +380,15 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- xe_wa_process_gt(gt);
xe_wa_process_oob(gt);
- xe_tuning_process_gt(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
+ err = xe_gt_tlb_invalidation_init_early(gt);
+ if (err)
+ return err;
+
return 0;
}
@@ -470,6 +472,8 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
xe_gt_mcr_set_implicit_defaults(gt);
+ xe_wa_process_gt(gt);
+ xe_tuning_process_gt(gt);
xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
err = xe_gt_clock_init(gt);
@@ -528,8 +532,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
- if (IS_SRIOV_PF(gt_to_xe(gt)))
+ if (IS_SRIOV_PF(gt_to_xe(gt))) {
+ xe_gt_sriov_pf_init(gt);
xe_gt_sriov_pf_init_hw(gt);
+ }
xe_force_wake_put(gt_to_fw(gt), fw_ref);
@@ -588,10 +594,6 @@ int xe_gt_init(struct xe_gt *gt)
xe_hw_fence_irq_init(&gt->fence_irq[i]);
}
- err = xe_gt_tlb_invalidation_init(gt);
- if (err)
- return err;
-
err = xe_gt_pagefault_init(gt);
if (err)
return err;
@@ -748,10 +750,8 @@ static int do_gt_restart(struct xe_gt *gt)
if (err)
return err;
- for_each_hw_engine(hwe, gt, id) {
+ for_each_hw_engine(hwe, gt, id)
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
- xe_reg_sr_apply_whitelist(hwe);
- }
/* Get CCS mode in sync between sw/hw */
xe_gt_apply_ccs_mode(gt);
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 82b9b7f82fca..e504cc33ade4 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -37,7 +37,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt);
/**
* xe_gt_record_user_engines - save data related to engines available to
- * usersapce
+ * userspace
* @gt: GT structure
*
* Walk the available HW engines from gt->info.engine_mask and calculate data
@@ -57,6 +57,31 @@ int xe_gt_sanitize_freq(struct xe_gt *gt);
void xe_gt_remove(struct xe_gt *gt);
/**
+ * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the work to finish execution,
+ * %false if there was no scheduled reset or it was done.
+ */
+static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
+{
+ return flush_work(&gt->reset.worker);
+}
+
+/**
+ * xe_gt_reset - perform synchronous reset
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the reset to finish,
+ * %false if there was no scheduled reset.
+ */
+static inline bool xe_gt_reset(struct xe_gt *gt)
+{
+ xe_gt_reset_async(gt);
+ return xe_gt_wait_for_reset(gt);
+}
+
+/**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
* first that matches the same reset domain as @class
* @gt: GT structure
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index b6adfb9f2030..50fffc9ebf62 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -150,7 +150,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
gt->ccs_mode = num_engines;
xe_gt_record_user_engines(gt);
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
}
mutex_unlock(&xe->drm.filelist_mutex);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 3e8c351a0eab..e7792858b1e4 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -132,11 +132,9 @@ static int force_reset(struct xe_gt *gt, struct drm_printer *p)
static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
xe_pm_runtime_put(gt_to_xe(gt));
- flush_work(&gt->reset.worker);
-
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 6bd39b2c5003..604bdc7c8173 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -115,6 +115,20 @@ static ssize_t rpe_freq_show(struct device *dev,
}
static DEVICE_ATTR_RO(rpe_freq);
+static ssize_t rpa_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rpa_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return sysfs_emit(buf, "%d\n", freq);
+}
+static DEVICE_ATTR_RO(rpa_freq);
+
static ssize_t rpn_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -202,6 +216,7 @@ static const struct attribute *freq_attrs[] = {
&dev_attr_act_freq.attr,
&dev_attr_cur_freq.attr,
&dev_attr_rp0_freq.attr,
+ &dev_attr_rpa_freq.attr,
&dev_attr_rpe_freq.attr,
&dev_attr_rpn_freq.attr,
&dev_attr_min_freq.attr,
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index fd80afeef56a..ffd3ba7f6656 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -122,10 +122,12 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
if (!xe_gt_is_media_type(gt))
gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
- for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
- if ((gt->info.engine_mask & BIT(i)))
- gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
- VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ if (xe->info.platform != XE_DG1) {
+ for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
+ if ((gt->info.engine_mask & BIT(i)))
+ gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
+ VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ }
}
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 5013d674e17d..a1676b787fdc 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -371,7 +371,7 @@ void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group,
* @group: steering group ID
* @instance: steering instance ID
*
- * Return: the coverted DSS id.
+ * Return: the converted DSS id.
*/
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance)
{
@@ -550,9 +550,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
* Returns true if the caller should steer to the @group/@instance values
* returned. Returns false if the caller need not perform any steering
*/
-static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
- struct xe_reg_mcr reg_mcr,
- u8 *group, u8 *instance)
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance)
{
const struct xe_reg reg = to_xe_reg(reg_mcr);
const struct xe_mmio_range *implicit_ranges;
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index c0cd36021c24..bc06520befab 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -26,6 +26,10 @@ void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
u32 value);
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance);
+
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 79c426dc2505..2606cd396df5 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -10,7 +10,6 @@
#include <drm/drm_exec.h>
#include <drm/drm_managed.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
index 5dc71394372d..11da0228cea7 100644
--- a/drivers/gpu/drm/xe/xe_gt_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -60,6 +60,21 @@ static inline void __xe_gt_printfn_info(struct drm_printer *p, struct va_format
xe_gt_info(gt, "%pV", vaf);
}
+static inline void __xe_gt_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_gt *gt = p->arg;
+ struct drm_printer dbg;
+
+ /*
+ * The original xe_gt_dbg() callsite annotations are useless here,
+ * redirect to the tweaked drm_dbg_printer() instead.
+ */
+ dbg = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, NULL);
+ dbg.origin = p->origin;
+
+ drm_printf(&dbg, "GT%u: %pV", gt->info.id, vaf);
+}
+
/**
* xe_gt_err_printer - Construct a &drm_printer that outputs to xe_gt_err()
* @gt: the &xe_gt pointer to use in xe_gt_err()
@@ -90,4 +105,20 @@ static inline struct drm_printer xe_gt_info_printer(struct xe_gt *gt)
return p;
}
+/**
+ * xe_gt_dbg_printer - Construct a &drm_printer that outputs like xe_gt_dbg()
+ * @gt: the &xe_gt pointer to use in xe_gt_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_gt_dbg_printer(struct xe_gt *gt)
+{
+ struct drm_printer p = {
+ .printfn = __xe_gt_printfn_dbg,
+ .arg = gt,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index e71fc3d2bda2..6f906c8e8108 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -68,6 +68,19 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
+ * @gt: the &xe_gt to initialize
+ *
+ * Late one-time initialization of the PF data.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return xe_gt_sriov_pf_migration_init(gt);
+}
+
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) == 1200;
@@ -90,7 +103,6 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
pf_enable_ggtt_guest_update(gt);
xe_gt_sriov_pf_service_update(gt);
- xe_gt_sriov_pf_migration_init(gt);
}
static u32 pf_get_vf_regs_stride(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index 96fab779a906..f474509411c0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -10,6 +10,7 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_pf_init(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
@@ -19,6 +20,11 @@ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static inline int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return 0;
+}
+
static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index ca49860168f6..878e96281c03 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -207,6 +207,11 @@ static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u
return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
}
+static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
+}
+
static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
{
return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
@@ -1540,8 +1545,6 @@ static u64 pf_query_max_lmem(struct xe_gt *gt)
#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
#define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
-#else
-#define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */
#endif
static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
@@ -1767,6 +1770,77 @@ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfi
return preempt_timeout;
}
+static const char *sched_priority_unit(u32 priority)
+{
+ return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
+ priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
+ priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
+ "(?)";
+}
+
+static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int err;
+
+ err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
+ if (unlikely(err))
+ return err;
+
+ config->sched_priority = priority;
+ return 0;
+}
+
+static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->sched_priority;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @priority: requested scheduling priority
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_sched_priority(gt, vfid, priority);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, priority,
+ xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
+ "scheduling priority", sched_priority_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's) scheduling priority.
+ */
+u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 priority;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ priority = pf_get_sched_priority(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return priority;
+}
+
static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -2087,7 +2161,7 @@ bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
*
* This function can only be called on PF.
*
- * Return: mininum size of the buffer or the number of bytes saved,
+ * Return: minimum size of the buffer or the number of bytes saved,
* or a negative error code on failure.
*/
ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index 0c55aa40a1a7..f894e9d4abba 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -44,6 +44,9 @@ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfi
int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
u32 preempt_timeout);
+u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority);
+
u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
enum xe_guc_klv_threshold_index index);
int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
index 2d3b73d78f14..686c7b3b6d7a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
@@ -33,6 +33,8 @@ struct xe_gt_sriov_config {
u32 exec_quantum;
/** @preempt_timeout: preemption timeout in microseconds. */
u32 preempt_timeout;
+ /** @sched_priority: scheduling priority. */
+ u32 sched_priority;
/** @thresholds: GuC thresholds for adverse events notifications. */
u32 thresholds[XE_GUC_KLV_NUM_THRESHOLDS];
};
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index 05df4ab3514b..b2521dd6ec42 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -164,6 +164,7 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
* │   │   ├── contexts_spare
* │   │   ├── exec_quantum_ms
* │   │   ├── preempt_timeout_us
+ * │   │   ├── sched_priority
* │   ├── vf1
* │   │   ├── ggtt_quota
* │   │   ├── lmem_quota
@@ -171,6 +172,7 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
* │   │   ├── contexts_quota
* │   │   ├── exec_quantum_ms
* │   │   ├── preempt_timeout_us
+ * │   │   ├── sched_priority
*/
#define DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(CONFIG, TYPE, FORMAT) \
@@ -209,6 +211,7 @@ DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ctxs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(dbs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(exec_quantum, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(preempt_timeout, u32, "%llu\n");
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(sched_priority, u32, "%llu\n");
/*
* /sys/kernel/debug/dri/0/
@@ -295,6 +298,8 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
&exec_quantum_fops);
debugfs_create_file_unsafe("preempt_timeout_us", 0644, parent, parent,
&preempt_timeout_fops);
+ debugfs_create_file_unsafe("sched_priority", 0644, parent, parent,
+ &sched_priority_fops);
/* register all threshold attributes */
#define register_threshold_attribute(TAG, NAME, ...) \
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
index 0bf12d89ceb2..6af219d93c3b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
@@ -18,7 +18,7 @@
* is within a range of supported VF numbers (up to maximum number of VFs that
* driver can support, including VF0 that represents the PF itself).
*
- * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ * Note: Effective only on debug builds. See `Xe Asserts`_ for more information.
*/
#define xe_gt_sriov_pf_assert_vfid(gt, vfid) xe_sriov_pf_assert_vfid(gt_to_xe(gt), (vfid))
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
index fae5be5a2a11..c00fb354705f 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
@@ -135,14 +135,33 @@ static int pf_update_policy_u32(struct xe_gt *gt, u16 key, u32 *policy, u32 valu
return 0;
}
+static void pf_bulk_reset_sched_priority(struct xe_gt *gt, u32 priority)
+{
+ unsigned int total_vfs = 1 + xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int n;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 0; n < total_vfs; n++)
+ gt->sriov.pf.vfs[n].config.sched_priority = priority;
+}
+
static int pf_provision_sched_if_idle(struct xe_gt *gt, bool enable)
{
+ int err;
+
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
- return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
- &gt->sriov.pf.policy.guc.sched_if_idle,
- enable);
+ err = pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
+ &gt->sriov.pf.policy.guc.sched_if_idle,
+ enable);
+
+ if (!err)
+ pf_bulk_reset_sched_priority(gt, enable ? GUC_SCHED_PRIORITY_NORMAL :
+ GUC_SCHED_PRIORITY_LOW);
+ return err;
}
static int pf_reprovision_sched_if_idle(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index d3baba50f085..cca5d5732802 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -27,6 +27,7 @@
#include "xe_guc_relay.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
+#include "xe_sriov_vf.h"
#include "xe_uc_fw.h"
#include "xe_wopcm.h"
@@ -223,6 +224,44 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
return 0;
}
+static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
+{
+ u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE),
+ };
+ int ret;
+
+ ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+/**
+ * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
+ * @gt: the &xe_gt struct instance linked to target GuC
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
+{
+ struct xe_guc *guc = &gt->uc.guc;
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ err = guc_action_vf_notify_resfix_done(guc);
+ if (unlikely(err))
+ xe_gt_sriov_err(gt, "Failed to notify GuC about resource fixup done (%pe)\n",
+ ERR_PTR(err));
+ else
+ xe_gt_sriov_dbg_verbose(gt, "sent GuC resource fixup done\n");
+
+ return err;
+}
+
static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
u32 *value, u32 value_len)
{
@@ -692,6 +731,30 @@ failed:
return err;
}
+/**
+ * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
+ * or just mark that a GuC is ready for it.
+ * @gt: the &xe_gt struct instance linked to target GuC
+ *
+ * This function shall be called only by VF.
+ */
+void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+
+ set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
+ /*
+ * We need to be certain that if all flags were set, at least one
+ * thread will notice that and schedule the recovery.
+ */
+ smp_mb__after_atomic();
+
+ xe_gt_sriov_info(gt, "ready for recovery after migration\n");
+ xe_sriov_vf_start_migration_recovery(xe);
+}
+
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
{
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index e541ce57bec2..912d20814261 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -17,6 +17,8 @@ int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
+int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
+void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index c7364a5aef8f..7a6c1d808e41 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -12,7 +12,7 @@
/**
* xe_gt_stats_incr - Increments the specified stats counter
- * @gt: graphics tile
+ * @gt: GT structure
* @id: xe_gt_stats_id type id that needs to be incremented
* @incr: value to be incremented with
*
@@ -32,7 +32,7 @@ static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
/**
* xe_gt_stats_print_info - Print the GT stats
- * @gt: graphics tile
+ * @gt: GT structure
* @p: drm_printer where it will be printed out.
*
* This prints out all the available GT stats.
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h
index 91d944f6c4e4..38325ef53617 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats.h
@@ -6,15 +6,11 @@
#ifndef _XE_GT_STATS_H_
#define _XE_GT_STATS_H_
+#include "xe_gt_stats_types.h"
+
struct xe_gt;
struct drm_printer;
-enum xe_gt_stats_id {
- XE_GT_STATS_ID_TLB_INVAL,
- /* must be the last entry */
- __XE_GT_STATS_NUM_IDS,
-};
-
#ifdef CONFIG_DEBUG_FS
int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
new file mode 100644
index 000000000000..2fc055e39f27
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_STATS_TYPES_H_
+#define _XE_GT_STATS_TYPES_H_
+
+enum xe_gt_stats_id {
+ XE_GT_STATS_ID_TLB_INVAL,
+ /* must be the last entry */
+ __XE_GT_STATS_NUM_IDS,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 03b225364101..8db78d616b6f 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -8,6 +8,7 @@
#include <regs/xe_gt_regs.h>
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle.h"
#include "xe_mmio.h"
@@ -53,6 +54,7 @@ static u32 read_status(struct xe_gt *gt)
{
u32 status = xe_gt_throttle_get_limit_reasons(gt) & GT0_PERF_LIMIT_REASONS_MASK;
+ xe_gt_dbg(gt, "throttle reasons: 0x%08x\n", status);
return status;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 6146d1776bda..0a93831c0a02 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -106,15 +106,15 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
}
/**
- * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
- * @gt: graphics tile
+ * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
+ * @gt: GT structure
*
* Initialize GT TLB invalidation state, purely software initialization, should
* be called once during driver load.
*
* Return: 0 on success, negative error code on error.
*/
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
{
gt->tlb_invalidation.seqno = 1;
INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
@@ -128,7 +128,7 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
/**
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
- * @gt: graphics tile
+ * @gt: GT structure
*
* Signal any pending invalidation fences, should be called during a GT reset
*/
@@ -244,7 +244,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
/**
* xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion
*
@@ -261,14 +261,23 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
0, /* seqno, replaced in send_tlb_invalidation */
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
};
+ int ret;
+
+ ret = send_tlb_invalidation(&gt->uc.guc, fence, action,
+ ARRAY_SIZE(action));
+ /*
+ * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
+ * should be nuked on a GT reset so this error can be ignored.
+ */
+ if (ret == -ECANCELED)
+ return 0;
- return send_tlb_invalidation(&gt->uc.guc, fence, action,
- ARRAY_SIZE(action));
+ return ret;
}
/**
* xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
- * @gt: graphics tile
+ * @gt: GT structure
*
* Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
* synchronous.
@@ -317,7 +326,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
* xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
* address range
*
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion
* @start: start address
@@ -403,7 +412,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
/**
* xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion, can be NULL
* @vma: VMA to invalidate
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index 00b1c6c01e8d..672acfcdf0d7 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -14,7 +14,8 @@ struct xe_gt;
struct xe_guc;
struct xe_vma;
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
+
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index a287b98ee70b..6e66bf0e8b3f 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -11,10 +11,10 @@
#include "xe_gt_idle_types.h"
#include "xe_gt_sriov_pf_types.h"
#include "xe_gt_sriov_vf_types.h"
-#include "xe_gt_stats.h"
+#include "xe_gt_stats_types.h"
#include "xe_hw_engine_types.h"
#include "xe_hw_fence_types.h"
-#include "xe_oa.h"
+#include "xe_oa_types.h"
#include "xe_reg_sr_types.h"
#include "xe_sa_types.h"
#include "xe_uc_types.h"
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 7f704346a8f4..408365dfe4ee 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -44,7 +44,15 @@ static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
struct xe_bo *bo)
{
struct xe_device *xe = guc_to_xe(guc);
- u32 addr = xe_bo_ggtt_addr(bo);
+ u32 addr;
+
+ /*
+ * For most BOs, the address on the allocating tile is fine. However for
+ * some, e.g. G2G CTB, the address on a specific tile is required as it
+ * might be different for each tile. So, just always ask for the address
+ * on the target GuC.
+ */
+ addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id);
/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
@@ -139,6 +147,34 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc)
return flags;
}
+static bool needs_wa_dual_queue(struct xe_gt *gt)
+{
+ /*
+ * The DUAL_QUEUE_WA tells the GuC to not allow concurrent submissions
+ * on RCS and CCSes with different address spaces, which on DG2 is
+ * required as a WA for an HW bug.
+ */
+ if (XE_WA(gt, 22011391025))
+ return true;
+
+ /*
+ * On newer platforms, the HW has been updated to not allow parallel
+ * execution of different address spaces, so the RCS/CCS will stall the
+ * context switch if one of the other RCS/CCSes is busy with a different
+ * address space. While functionally correct, having a submission
+ * stalled on the HW limits the GuC ability to shuffle things around and
+ * can cause complications if the non-stalled submission runs for a long
+ * time, because the GuC doesn't know that the stalled submission isn't
+ * actually running and might declare it as hung. Therefore, we enable
+ * the DUAL_QUEUE_WA on all newer platforms on GTs that have CCS engines
+ * to move management back to the GuC.
+ */
+ if (CCS_MASK(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
+ return true;
+
+ return false;
+}
+
static u32 guc_ctl_wa_flags(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
@@ -151,7 +187,7 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (XE_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
- if (XE_WA(gt, 22011391025))
+ if (needs_wa_dual_queue(gt))
flags |= GUC_WA_DUAL_QUEUE;
/*
@@ -244,6 +280,293 @@ static void guc_write_params(struct xe_guc *guc)
xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(1 + i), guc->params[i]);
}
+static int guc_action_register_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev,
+ u32 desc_addr, u32 buff_addr, u32 size)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 action[] = {
+ XE_GUC_ACTION_REGISTER_G2G,
+ FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) |
+ FIELD_PREP(XE_G2G_REGISTER_TYPE, type) |
+ FIELD_PREP(XE_G2G_REGISTER_TILE, dst_tile) |
+ FIELD_PREP(XE_G2G_REGISTER_DEVICE, dst_dev),
+ desc_addr,
+ buff_addr,
+ };
+
+ xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
+ xe_assert(xe, !(size % SZ_4K));
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static int guc_action_deregister_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 action[] = {
+ XE_GUC_ACTION_DEREGISTER_G2G,
+ FIELD_PREP(XE_G2G_DEREGISTER_TYPE, type) |
+ FIELD_PREP(XE_G2G_DEREGISTER_TILE, dst_tile) |
+ FIELD_PREP(XE_G2G_DEREGISTER_DEVICE, dst_dev),
+ };
+
+ xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+#define G2G_DEV(gt) (((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1)
+
+#define G2G_BUFFER_SIZE (SZ_4K)
+#define G2G_DESC_SIZE (64)
+#define G2G_DESC_AREA_SIZE (SZ_4K)
+
+/*
+ * Generate a unique id for each bi-directional CTB for each pair of
+ * near and far tiles/devices. The id can then be used as an index into
+ * a single allocation that is sub-divided into multiple CTBs.
+ *
+ * For example, with two devices per tile and two tiles, the table should
+ * look like:
+ * Far <tile>.<dev>
+ * 0.0 0.1 1.0 1.1
+ * N 0.0 --/-- 00/01 02/03 04/05
+ * e 0.1 01/00 --/-- 06/07 08/09
+ * a 1.0 03/02 07/06 --/-- 10/11
+ * r 1.1 05/04 09/08 11/10 --/--
+ *
+ * Where each entry is Rx/Tx channel id.
+ *
+ * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
+ * be reading from channel #11 and writing to channel #10. Whereas,
+ * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
+ */
+static unsigned int g2g_slot(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
+ u32 type, u32 max_inst, bool have_dev)
+{
+ u32 near = near_tile, far = far_tile;
+ u32 idx = 0, x, y, direction;
+ int i;
+
+ if (have_dev) {
+ near = (near << 1) | near_dev;
+ far = (far << 1) | far_dev;
+ }
+
+ /* No need to send to one's self */
+ if (far == near)
+ return -1;
+
+ if (far > near) {
+ /* Top right table half */
+ x = far;
+ y = near;
+
+ /* T/R is 'forwards' direction */
+ direction = type;
+ } else {
+ /* Bottom left table half */
+ x = near;
+ y = far;
+
+ /* B/L is 'backwards' direction */
+ direction = (1 - type);
+ }
+
+ /* Count the rows prior to the target */
+ for (i = y; i > 0; i--)
+ idx += max_inst - i;
+
+ /* Count this row up to the target */
+ idx += (x - 1 - y);
+
+ /* Slots are in Rx/Tx pairs */
+ idx *= 2;
+
+ /* Pick Rx/Tx direction */
+ idx += direction;
+
+ return idx;
+}
+
+static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 type, bool have_dev)
+{
+ struct xe_gt *near_gt = guc_to_gt(near_guc);
+ struct xe_device *xe = gt_to_xe(near_gt);
+ struct xe_bo *g2g_bo;
+ u32 near_tile = gt_to_tile(near_gt)->id;
+ u32 near_dev = G2G_DEV(near_gt);
+ u32 far_tile = gt_to_tile(far_gt)->id;
+ u32 far_dev = G2G_DEV(far_gt);
+ u32 max = xe->info.gt_count;
+ u32 base, desc, buf;
+ int slot;
+
+ /* G2G is not allowed between different cards */
+ xe_assert(xe, xe == gt_to_xe(far_gt));
+
+ g2g_bo = near_guc->g2g.bo;
+ xe_assert(xe, g2g_bo);
+
+ slot = g2g_slot(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
+ xe_assert(xe, slot >= 0);
+
+ base = guc_bo_ggtt_addr(near_guc, g2g_bo);
+ desc = base + slot * G2G_DESC_SIZE;
+ buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
+
+ xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size);
+
+ return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
+ desc, buf, G2G_BUFFER_SIZE);
+}
+
+static void guc_g2g_deregister(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type)
+{
+ guc_action_deregister_g2g_buffer(guc, type, far_tile, far_dev);
+}
+
+static u32 guc_g2g_size(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int count = xe->info.gt_count;
+ u32 num_channels = (count * (count - 1)) / 2;
+
+ xe_assert(xe, num_channels * XE_G2G_TYPE_LIMIT * G2G_DESC_SIZE <= G2G_DESC_AREA_SIZE);
+
+ return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+}
+
+static bool xe_guc_g2g_wanted(struct xe_device *xe)
+{
+ /* Can't do GuC to GuC communication if there is only one GuC */
+ if (xe->info.gt_count <= 1)
+ return false;
+
+ /* No current user */
+ return false;
+}
+
+static int guc_g2g_alloc(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ if (guc->g2g.bo)
+ return 0;
+
+ if (gt->info.id != 0) {
+ struct xe_gt *root_gt = xe_device_get_gt(xe, 0);
+ struct xe_guc *root_guc = &root_gt->uc.guc;
+ struct xe_bo *bo;
+
+ bo = xe_bo_get(root_guc->g2g.bo);
+ if (!bo)
+ return -ENODEV;
+
+ guc->g2g.bo = bo;
+ guc->g2g.owned = false;
+ return 0;
+ }
+
+ g2g_size = guc_g2g_size(guc);
+ bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+ guc->g2g.bo = bo;
+ guc->g2g.owned = true;
+
+ return 0;
+}
+
+static void guc_g2g_fini(struct xe_guc *guc)
+{
+ if (!guc->g2g.bo)
+ return;
+
+ /* Unpinning the owned object is handled by generic shutdown */
+ if (!guc->g2g.owned)
+ xe_bo_put(guc->g2g.bo);
+
+ guc->g2g.bo = NULL;
+}
+
+static int guc_g2g_start(struct xe_guc *guc)
+{
+ struct xe_gt *far_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int i, j;
+ int t, err;
+ bool have_dev;
+
+ if (!guc->g2g.bo) {
+ int ret;
+
+ ret = guc_g2g_alloc(guc);
+ if (ret)
+ return ret;
+ }
+
+ /* GuC interface will need extending if more GT device types are ever created. */
+ xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA));
+
+ /* Channel numbering depends on whether there are multiple GTs per tile */
+ have_dev = xe->info.gt_count > xe->info.tile_count;
+
+ for_each_gt(far_gt, xe, i) {
+ u32 far_tile, far_dev;
+
+ if (far_gt->info.id == gt->info.id)
+ continue;
+
+ far_tile = gt_to_tile(far_gt)->id;
+ far_dev = G2G_DEV(far_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
+ err = guc_g2g_register(guc, far_gt, t, have_dev);
+ if (err) {
+ while (--t >= 0)
+ guc_g2g_deregister(guc, far_tile, far_dev, t);
+ goto err_deregister;
+ }
+ }
+ }
+
+ return 0;
+
+err_deregister:
+ for_each_gt(far_gt, xe, j) {
+ u32 tile, dev;
+
+ if (far_gt->info.id == gt->info.id)
+ continue;
+
+ if (j >= i)
+ break;
+
+ tile = gt_to_tile(far_gt)->id;
+ dev = G2G_DEV(far_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
+ guc_g2g_deregister(guc, tile, dev, t);
+ }
+
+ return err;
+}
+
static void guc_fini_hw(void *arg)
{
struct xe_guc *guc = arg;
@@ -253,6 +576,8 @@ static void guc_fini_hw(void *arg)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_uc_fini_hw(&guc_to_gt(guc)->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ guc_g2g_fini(guc);
}
/**
@@ -423,7 +748,16 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
int xe_guc_post_load_init(struct xe_guc *guc)
{
+ int ret;
+
xe_guc_ads_populate_post_load(&guc->ads);
+
+ if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
+ ret = guc_g2g_start(guc);
+ if (ret)
+ return ret;
+ }
+
guc->submission_state.enabled = true;
return 0;
@@ -945,7 +1279,6 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
- xe_assert(xe, !xe_guc_ct_enabled(&guc->ct));
xe_assert(xe, len);
xe_assert(xe, len <= VF_SW_FLAG_COUNT);
xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
@@ -1099,10 +1432,21 @@ int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
return guc_self_cfg(guc, key, 2, val);
}
+static void xe_guc_sw_0_irq_handler(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ xe_gt_sriov_vf_migrated_event_handler(gt);
+}
+
void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
{
if (iir & GUC_INTR_GUC2HOST)
xe_guc_ct_irq_handler(&guc->ct);
+
+ if (iir & GUC_INTR_SW_INT_0)
+ xe_guc_sw_0_irq_handler(guc);
}
void xe_guc_sanitize(struct xe_guc *guc)
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 4e746ae98888..fab259adc380 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -29,6 +29,7 @@
#include "xe_platform_types.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
+#include "xe_gt_mcr.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -231,11 +232,6 @@ static size_t guc_ads_size(struct xe_guc_ads *ads)
guc_ads_private_data_size(ads);
}
-static bool needs_wa_1607983814(struct xe_device *xe)
-{
- return GRAPHICS_VERx100(xe) < 1250;
-}
-
static size_t calculate_regset_size(struct xe_gt *gt)
{
struct xe_reg_sr_entry *sr_entry;
@@ -250,7 +246,7 @@ static size_t calculate_regset_size(struct xe_gt *gt)
count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
- if (needs_wa_1607983814(gt_to_xe(gt)))
+ if (XE_WA(gt, 1607983814))
count += LNCFCMOCS_REG_COUNT;
return count * sizeof(struct guc_mmio_reg);
@@ -701,6 +697,20 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
.flags = reg.masked ? GUC_REGSET_MASKED : 0,
};
+ if (reg.mcr) {
+ struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
+ u8 group, instance;
+
+ bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
+ &group, &instance);
+
+ if (steer) {
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
+ entry.flags |= GUC_REGSET_STEERING_NEEDED;
+ }
+ }
+
xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
&entry, sizeof(entry));
}
@@ -709,7 +719,6 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
struct iosys_map *regset_map,
struct xe_hw_engine *hwe)
{
- struct xe_device *xe = ads_to_xe(ads);
struct xe_hw_engine *hwe_rcs_reset_domain =
xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
struct xe_reg_sr_entry *entry;
@@ -740,8 +749,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
}
- /* Wa_1607983814 */
- if (needs_wa_1607983814(xe) && hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
guc_mmio_regset_write_one(ads, regset_map,
XELP_LNCFCMOCS(i), count++);
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index d63912d28246..f6d523e4c5fe 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -1806,7 +1806,6 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
if (!devcore_snapshot->matched_node)
return;
- xe_gt_assert(gt, snapshot->source <= XE_ENGINE_CAPTURE_SOURCE_GUC);
xe_gt_assert(gt, snapshot->hwe);
capture_class = xe_engine_class_to_guc_capture_class(snapshot->hwe->class);
@@ -1815,7 +1814,8 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
snapshot->name ? snapshot->name : "",
snapshot->logical_instance);
drm_printf(p, "\tCapture_source: %s\n",
- snapshot->source == XE_ENGINE_CAPTURE_SOURCE_GUC ? "GuC" : "Manual");
+ devcore_snapshot->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC ?
+ "GuC" : "Manual");
drm_printf(p, "\tCoverage: %s\n", grptype[devcore_snapshot->matched_node->is_partial]);
drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
snapshot->forcewake.domain, snapshot->forcewake.ref);
@@ -1840,29 +1840,24 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
}
/**
- * xe_guc_capture_get_matching_and_lock - Matching GuC capture for the job.
- * @job: The job object.
+ * xe_guc_capture_get_matching_and_lock - Matching GuC capture for the queue.
+ * @q: The exec queue object
*
- * Search within the capture outlist for the job, could be used for check if
- * GuC capture is ready for the job.
+ * Search within the capture outlist for the queue, could be used for check if
+ * GuC capture is ready for the queue.
* If found, the locked boolean of the node will be flagged.
*
* Returns: found guc-capture node ptr else NULL
*/
struct __guc_capture_parsed_output *
-xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job)
+xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q)
{
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
- struct xe_exec_queue *q;
struct xe_device *xe;
u16 guc_class = GUC_LAST_ENGINE_CLASS + 1;
struct xe_devcoredump_snapshot *ss;
- if (!job)
- return NULL;
-
- q = job->q;
if (!q || !q->gt)
return NULL;
@@ -1874,7 +1869,7 @@ xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job)
if (ss->matched_node && ss->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC)
return ss->matched_node;
- /* Find hwe for the job */
+ /* Find hwe for the queue */
for_each_hw_engine(hwe, q->gt, id) {
if (hwe != q->hwe)
continue;
@@ -1906,17 +1901,16 @@ xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job)
}
/**
- * xe_engine_snapshot_capture_for_job - Take snapshot of associated engine
- * @job: The job object
+ * xe_engine_snapshot_capture_for_queue - Take snapshot of associated engine
+ * @q: The exec queue object
*
* Take snapshot of associated HW Engine
*
* Returns: None.
*/
void
-xe_engine_snapshot_capture_for_job(struct xe_sched_job *job)
+xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q)
{
- struct xe_exec_queue *q = job->q;
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
struct xe_hw_engine *hwe;
@@ -1934,11 +1928,12 @@ xe_engine_snapshot_capture_for_job(struct xe_sched_job *job)
}
if (!coredump->snapshot.hwe[id]) {
- coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe, job);
+ coredump->snapshot.hwe[id] =
+ xe_hw_engine_snapshot_capture(hwe, q);
} else {
struct __guc_capture_parsed_output *new;
- new = xe_guc_capture_get_matching_and_lock(job);
+ new = xe_guc_capture_get_matching_and_lock(q);
if (new) {
struct xe_guc *guc = &q->gt->uc.guc;
@@ -1960,7 +1955,7 @@ xe_engine_snapshot_capture_for_job(struct xe_sched_job *job)
}
/*
- * xe_guc_capture_put_matched_nodes - Cleanup macthed nodes
+ * xe_guc_capture_put_matched_nodes - Cleanup matched nodes
* @guc: The GuC object
*
* Free matched node and all nodes with the equal guc_id from
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.h b/drivers/gpu/drm/xe/xe_guc_capture.h
index 97a795d13dd1..20a078dc4b85 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.h
+++ b/drivers/gpu/drm/xe/xe_guc_capture.h
@@ -11,10 +11,10 @@
#include "xe_guc.h"
#include "xe_guc_fwif.h"
+struct xe_exec_queue;
struct xe_guc;
struct xe_hw_engine;
struct xe_hw_engine_snapshot;
-struct xe_sched_job;
static inline enum guc_capture_list_class_type xe_guc_class_to_capture_class(u16 class)
{
@@ -50,10 +50,10 @@ size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc);
const struct __guc_mmio_reg_descr_group *
xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type,
enum guc_capture_list_class_type capture_class, bool is_ext);
-struct __guc_capture_parsed_output *xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job);
+struct __guc_capture_parsed_output *xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q);
void xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot);
void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p);
-void xe_engine_snapshot_capture_for_job(struct xe_sched_job *job);
+void xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q);
void xe_guc_capture_steered_list_init(struct xe_guc *guc);
void xe_guc_capture_put_matched_nodes(struct xe_guc *guc);
int xe_guc_capture_init(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h
index 2057125b1bfa..ca2d390ccbee 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h
@@ -22,7 +22,7 @@ enum capture_register_data_type {
* struct __guc_mmio_reg_descr - GuC mmio register descriptor
*
* xe_guc_capture module uses these structures to define a register
- * (offsets, names, flags,...) that are used at the ADS regisration
+ * (offsets, names, flags,...) that are used at the ADS registration
* time as well as during runtime processing and reporting of error-
* capture states generated by GuC just prior to engine reset events.
*/
@@ -48,7 +48,7 @@ struct __guc_mmio_reg_descr {
*
* xe_guc_capture module uses these structures to maintain static
* tables (per unique platform) that consists of lists of registers
- * (offsets, names, flags,...) that are used at the ADS regisration
+ * (offsets, names, flags,...) that are used at the ADS registration
* time as well as during runtime processing and reporting of error-
* capture states generated by GuC just prior to engine reset events.
*/
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 8aeb1789805c..72ad576fc18e 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -54,6 +54,7 @@ enum {
CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
+ CT_DEAD_CRASH, /* 0x8000 */
};
static void ct_dead_worker_func(struct work_struct *w);
@@ -469,8 +470,10 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
* after any existing dead state has been dumped.
*/
spin_lock_irq(&ct->dead.lock);
- if (ct->dead.reason)
+ if (ct->dead.reason) {
ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
+ queue_work(system_unbound_wq, &ct->dead.worker);
+ }
spin_unlock_irq(&ct->dead.lock);
#endif
@@ -707,7 +710,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
--len;
++action;
- /* Write H2G ensuring visable before descriptor update */
+ /* Write H2G ensuring visible before descriptor update */
xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
xe_device_wmb(xe);
@@ -1017,7 +1020,6 @@ retry_same_fence:
}
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
-
if (!ret) {
LNL_FLUSH_WORK(&ct->g2h_worker);
if (g2h_fence.done) {
@@ -1121,6 +1123,24 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
return 0;
}
+static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+
+ if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
+ xe_gt_err(gt, "GuC Crash dump notification\n");
+ else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
+ xe_gt_err(gt, "GuC Exception notification\n");
+ else
+ xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
+
+ CT_DEAD(ct, NULL, CRASH);
+
+ kick_reset(ct);
+
+ return 0;
+}
+
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
@@ -1295,13 +1315,17 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
break;
+ case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
+ case XE_GUC_ACTION_NOTIFY_EXCEPTION:
+ ret = guc_crash_process_msg(ct, action);
+ break;
default:
xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
if (ret) {
- xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
- action, ERR_PTR(ret));
+ xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
+ action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
CT_DEAD(ct, NULL, PROCESS_FAILED);
}
@@ -1359,7 +1383,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
* this function and nowhere else. Hence, they cannot be different
* unless two g2h_read calls are running concurrently. Which is not
* possible because it is guarded by ct->fast_lock. And yet, some
- * discrete platforms are reguarly hitting this error :(.
+ * discrete platforms are regularly hitting this error :(.
*
* desc_head rolling backwards shouldn't cause any noticeable
* problems - just a delay in GuC being allowed to proceed past that
@@ -1699,8 +1723,11 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
drm_printf(p, "\tg2h outstanding: %d\n",
snapshot->g2h_outstanding);
- if (snapshot->ctb)
- xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size);
+ if (snapshot->ctb) {
+ drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
+ xe_print_blob_ascii85(p, "[CTB].data", '\n',
+ snapshot->ctb, 0, snapshot->ctb_size);
+ }
} else {
drm_puts(p, "CT disabled\n");
}
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 08ffe59f22fa..057153f89b30 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -17,6 +17,7 @@
#define G2H_LEN_DW_TLB_INVALIDATE 3
#define GUC_ID_MAX 65535
+#define GUC_ID_UNKNOWN 0xffffffff
#define GUC_CONTEXT_DISABLE 0
#define GUC_CONTEXT_ENABLE 1
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
index 9d99fe266d97..146a6eda9e06 100644
--- a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
+++ b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
@@ -49,6 +49,8 @@ const char *xe_guc_klv_key_to_string(u16 key)
return "begin_db_id";
case GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY:
return "begin_ctx_id";
+ case GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY:
+ return "sched_priority";
/* VF CFG threshold keys */
#define define_threshold_key_to_string_case(TAG, NAME, ...) \
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index df4cfb698cdb..0ca3056d8bd3 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -208,11 +208,14 @@ void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_
drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp);
drm_printf(p, "Log level: %u\n", snapshot->level);
+ drm_printf(p, "[LOG].length: 0x%zx\n", snapshot->size);
remain = snapshot->size;
for (i = 0; i < snapshot->num_chunks; i++) {
size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
+ const char *prefix = i ? NULL : "[LOG].data";
+ char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0;
- xe_print_blob_ascii85(p, i ? NULL : "Log data", snapshot->copy[i], 0, size);
+ xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size);
remain -= size;
}
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index e8b9faeaef64..b995d1d51aed 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -6,6 +6,7 @@
#include "xe_guc_pc.h"
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <drm/drm_managed.h>
#include <generated/xe_wa_oob.h>
@@ -19,6 +20,7 @@
#include "xe_gt.h"
#include "xe_gt_idle.h"
#include "xe_gt_printk.h"
+#include "xe_gt_throttle.h"
#include "xe_gt_types.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
@@ -38,6 +40,7 @@
#define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
#define RPE_MASK REG_GENMASK(15, 8)
+#define RPA_MASK REG_GENMASK(31, 16)
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define CAGF_MASK REG_GENMASK(19, 11)
@@ -48,6 +51,9 @@
#define LNL_MERT_FREQ_CAP 800
#define BMG_MERT_FREQ_CAP 2133
+#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
+#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
+
/**
* DOC: GuC Power Conservation (PC)
*
@@ -112,9 +118,10 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
static int wait_for_pc_state(struct xe_guc_pc *pc,
- enum slpc_global_state state)
+ enum slpc_global_state state,
+ int timeout_ms)
{
- int timeout_us = 5000; /* rought 5ms, but no need for precision */
+ int timeout_us = 1000 * timeout_ms;
int slept, wait = 10;
xe_device_assert_mem_access(pc_to_xe(pc));
@@ -163,7 +170,8 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
/* Blocking here to ensure the results are ready before reading them */
@@ -186,7 +194,8 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -207,7 +216,8 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -328,6 +338,19 @@ static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
freq);
}
+static void mtl_update_rpa_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg;
+
+ if (xe_gt_is_media_type(gt))
+ reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
+ else
+ reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
+
+ pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
+}
+
static void mtl_update_rpe_value(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
@@ -341,6 +364,25 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc)
pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
}
+static void tgl_update_rpa_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 reg;
+
+ /*
+ * For PVC we still need to use fused RP1 as the approximation for RPe
+ * For other platforms than PVC we get the resolved RPe directly from
+ * PCODE at a different register
+ */
+ if (xe->info.platform == XE_PVC)
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
+ else
+ reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
+
+ pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+}
+
static void tgl_update_rpe_value(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
@@ -365,10 +407,13 @@ static void pc_update_rp_values(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
struct xe_device *xe = gt_to_xe(gt);
- if (GRAPHICS_VERx100(xe) >= 1270)
+ if (GRAPHICS_VERx100(xe) >= 1270) {
+ mtl_update_rpa_value(pc);
mtl_update_rpe_value(pc);
- else
+ } else {
+ tgl_update_rpa_value(pc);
tgl_update_rpe_value(pc);
+ }
/*
* RPe is decided at runtime by PCODE. In the rare case where that's
@@ -404,6 +449,15 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
return freq;
}
+static u32 get_cur_freq(struct xe_gt *gt)
+{
+ u32 freq;
+
+ freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
+ freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
+ return decode_freq(freq);
+}
+
/**
* xe_guc_pc_get_cur_freq - Get Current requested frequency
* @pc: The GuC PC
@@ -421,16 +475,13 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
* GuC SLPC plays with cur freq request when GuCRC is enabled
* Block RC6 for a more reliable read.
*/
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return -ETIMEDOUT;
}
- *freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
-
- *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
- *freq = decode_freq(*freq);
+ *freq = get_cur_freq(gt);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
@@ -448,6 +499,19 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
}
/**
+ * xe_guc_pc_get_rpa_freq - Get the RPa freq
+ * @pc: The GuC PC
+ *
+ * Returns: RPa freq.
+ */
+u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
+{
+ pc_update_rp_values(pc);
+
+ return pc->rpa_freq;
+}
+
+/**
* xe_guc_pc_get_rpe_freq - Get the RPe freq
* @pc: The GuC PC
*
@@ -481,10 +545,10 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
*/
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
{
- struct xe_gt *gt = pc_to_gt(pc);
- unsigned int fw_ref;
int ret;
+ xe_device_assert_mem_access(pc_to_xe(pc));
+
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -492,24 +556,12 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
goto out;
}
- /*
- * GuC SLPC plays with min freq request when GuCRC is enabled
- * Block RC6 for a more reliable read.
- */
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- ret = -ETIMEDOUT;
- goto fw;
- }
-
ret = pc_action_query_task_state(pc);
if (ret)
- goto fw;
+ goto out;
*freq = pc_get_min_freq(pc);
-fw:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
out:
mutex_unlock(&pc->freq_lock);
return ret;
@@ -965,12 +1017,13 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
unsigned int fw_ref;
+ ktime_t earlier;
int ret;
xe_gt_assert(gt, xe_device_uc_enabled(xe));
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return -ETIMEDOUT;
}
@@ -989,14 +1042,25 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
memset(pc->bo->vmap.vaddr, 0, size);
slpc_shared_data_write(pc, header.size, size);
+ earlier = ktime_get();
ret = pc_action_reset(pc);
if (ret)
goto out;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
- xe_gt_err(gt, "GuC PC Start failed\n");
- ret = -EIO;
- goto out;
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS)) {
+ xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
+ xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
+ xe_gt_throttle_get_limit_reasons(gt));
+
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
+ xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
+ goto out;
+ }
+
+ xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
+ ktime_ms_delta(ktime_get(), earlier));
}
ret = pc_init_freqs(pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index efda432fadfc..619f59cd633c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -21,6 +21,7 @@ int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
+u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
index 13810be015db..2978ac9a249b 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
@@ -17,6 +17,8 @@ struct xe_guc_pc {
struct xe_bo *bo;
/** @rp0_freq: HW RP0 frequency - The Maximum one */
u32 rp0_freq;
+ /** @rpa_freq: HW RPa frequency - The Achievable one */
+ u32 rpa_freq;
/** @rpe_freq: HW RPe frequency - The Efficient one */
u32 rpe_freq;
/** @rpn_freq: HW RPN frequency - The Minimum one */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 6f4a9812b4f4..1a5fe4822a62 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -412,12 +412,11 @@ static const int xe_exec_queue_prio_to_guc[] = {
static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{
struct exec_queue_policy policy;
- struct xe_device *xe = guc_to_xe(guc);
enum xe_exec_queue_priority prio = q->sched_props.priority;
u32 timeslice_us = q->sched_props.timeslice_us;
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
- xe_assert(xe, exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
__guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
@@ -451,12 +450,11 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
struct guc_ctxt_registration_info *info)
{
#define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
- struct xe_device *xe = guc_to_xe(guc);
u32 action[MAX_MLRC_REG_SIZE];
int len = 0;
int i;
- xe_assert(xe, xe_exec_queue_is_parallel(q));
+ xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q));
action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
action[len++] = info->flags;
@@ -479,7 +477,7 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
}
- xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
+ xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE
xe_guc_ct_send(&guc->ct, action, len, 0, 0);
@@ -513,7 +511,7 @@ static void register_exec_queue(struct xe_exec_queue *q)
struct xe_lrc *lrc = q->lrc[0];
struct guc_ctxt_registration_info info;
- xe_assert(xe, !exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
memset(&info, 0, sizeof(info));
info.context_idx = q->guc->id;
@@ -603,7 +601,7 @@ static int wq_noop_append(struct xe_exec_queue *q)
if (wq_wait_for_space(q, wq_space_until_wrap(q)))
return -ENODEV;
- xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
+ xe_gt_assert(guc_to_gt(guc), FIELD_FIT(WQ_LEN_MASK, len_dw));
parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
@@ -643,13 +641,13 @@ static void wq_item_append(struct xe_exec_queue *q)
wqi[i++] = lrc->ring.tail / sizeof(u64);
}
- xe_assert(xe, i == wqi_size / sizeof(u32));
+ xe_gt_assert(guc_to_gt(guc), i == wqi_size / sizeof(u32));
iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
wq[q->guc->wqi_tail / sizeof(u32)]));
xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
q->guc->wqi_tail += wqi_size;
- xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
+ xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE);
xe_device_wmb(xe);
@@ -661,7 +659,6 @@ static void wq_item_append(struct xe_exec_queue *q)
static void submit_exec_queue(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_lrc *lrc = q->lrc[0];
u32 action[3];
u32 g2h_len = 0;
@@ -669,7 +666,7 @@ static void submit_exec_queue(struct xe_exec_queue *q)
int len = 0;
bool extra_submit = false;
- xe_assert(xe, exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
if (xe_exec_queue_is_parallel(q))
wq_item_append(q);
@@ -716,12 +713,11 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
struct xe_sched_job *job = to_xe_sched_job(drm_job);
struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct dma_fence *fence = NULL;
bool lr = xe_exec_queue_is_lr(q);
- xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
- exec_queue_banned(q) || exec_queue_suspended(q));
+ xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
+ exec_queue_banned(q) || exec_queue_suspended(q));
trace_xe_sched_job_run(job);
@@ -823,7 +819,7 @@ static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
*/
void xe_guc_submit_wedge(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
unsigned long index;
int err;
@@ -833,7 +829,8 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc);
if (err) {
- drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
+ xe_gt_err(gt, "Failed to register clean-up on wedged.mode=2; "
+ "Although device is wedged.\n");
return;
}
@@ -865,11 +862,10 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
container_of(w, struct xe_guc_exec_queue, lr_tdr);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_gpu_scheduler *sched = &ge->sched;
bool wedged;
- xe_assert(xe, xe_exec_queue_is_lr(q));
+ xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
trace_xe_exec_queue_lr_cleanup(q);
wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
@@ -903,13 +899,19 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
!exec_queue_pending_disable(q) ||
xe_guc_read_stopped(guc), HZ * 5);
if (!ret) {
- drm_warn(&xe->drm, "Schedule disable failed to respond");
+ xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
+ q->guc->id);
+ xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n",
+ q->guc->id);
xe_sched_submission_start(sched);
xe_gt_reset_async(q->gt);
return;
}
}
+ if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
+ xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
+
xe_sched_submission_start(sched);
}
@@ -1068,13 +1070,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* do manual capture first and decide later if we need to use it
*/
if (!exec_queue_killed(q) && !xe->devcoredump.captured &&
- !xe_guc_capture_get_matching_and_lock(job)) {
+ !xe_guc_capture_get_matching_and_lock(q)) {
/* take force wake before engine register manual capture */
fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n");
- xe_engine_snapshot_capture_for_job(job);
+ xe_engine_snapshot_capture_for_queue(q);
xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
}
@@ -1132,7 +1134,12 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
if (!ret || xe_guc_read_stopped(guc)) {
trigger_reset:
if (!ret)
- xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond");
+ xe_gt_warn(guc_to_gt(guc),
+ "Schedule disable failed to respond, guc_id=%d",
+ q->guc->id);
+ xe_devcoredump(q, job,
+ "Schedule disable failed to respond, guc_id=%d, ret=%d, guc_read=%d",
+ q->guc->id, ret, xe_guc_read_stopped(guc));
set_exec_queue_extra_ref(q);
xe_exec_queue_get(q); /* GT reset owns this */
set_exec_queue_banned(q);
@@ -1162,7 +1169,10 @@ trigger_reset:
trace_xe_sched_job_timedout(job);
if (!exec_queue_killed(q))
- xe_devcoredump(job);
+ xe_devcoredump(q, job,
+ "Timedout job - seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags);
/*
* Kernel jobs should never fail, nor should VM jobs if they do
@@ -1216,7 +1226,7 @@ sched_enable:
enable_scheduling(q);
rearm:
/*
- * XXX: Ideally want to adjust timeout based on current exection time
+ * XXX: Ideally want to adjust timeout based on current execution time
* but there is not currently an easy way to do in DRM scheduler. With
* some thought, do this in a follow up.
*/
@@ -1236,9 +1246,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
+ release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
- release_guc_id(guc, q);
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&ge->sched.base.work_tdr);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
@@ -1277,9 +1289,8 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
{
struct xe_exec_queue *q = msg->private_data;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
+ xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
trace_xe_exec_queue_cleanup_entity(q);
if (exec_queue_registered(q))
@@ -1315,11 +1326,10 @@ static void __suspend_fence_signal(struct xe_exec_queue *q)
static void suspend_fence_signal(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
- xe_guc_read_stopped(guc));
- xe_assert(xe, q->guc->suspend_pending);
+ xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) ||
+ xe_guc_read_stopped(guc));
+ xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending);
__suspend_fence_signal(q);
}
@@ -1415,12 +1425,11 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_guc_exec_queue *ge;
long timeout;
int err, i;
- xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
+ xe_gt_assert(guc_to_gt(guc), xe_device_uc_enabled(guc_to_xe(guc)));
ge = kzalloc(sizeof(*ge), GFP_KERNEL);
if (!ge)
@@ -1633,9 +1642,8 @@ static void guc_exec_queue_resume(struct xe_exec_queue *q)
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, !q->guc->suspend_pending);
+ xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending);
xe_sched_msg_lock(sched);
guc_exec_queue_try_add_msg(q, msg, RESUME);
@@ -1708,7 +1716,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
ban = true;
}
} else if (xe_exec_queue_is_lr(q) &&
- (xe_lrc_ring_head(q->lrc[0]) != xe_lrc_ring_tail(q->lrc[0]))) {
+ !xe_lrc_ring_is_idle(q->lrc[0])) {
ban = true;
}
@@ -1747,9 +1755,8 @@ void xe_guc_submit_stop(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, xe_guc_read_stopped(guc) == 1);
+ xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
@@ -1791,9 +1798,8 @@ int xe_guc_submit_start(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, xe_guc_read_stopped(guc) == 1);
+ xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
@@ -1814,22 +1820,22 @@ int xe_guc_submit_start(struct xe_guc *guc)
static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
if (unlikely(guc_id >= GUC_ID_MAX)) {
- drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
+ xe_gt_err(gt, "Invalid guc_id %u\n", guc_id);
return NULL;
}
q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
if (unlikely(!q)) {
- drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
+ xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id);
return NULL;
}
- xe_assert(xe, guc_id >= q->guc->id);
- xe_assert(xe, guc_id < (q->guc->id + q->width));
+ xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id);
+ xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width));
return q;
}
@@ -1898,15 +1904,14 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
- u32 runnable_state = msg[1];
+ u32 guc_id, runnable_state;
- if (unlikely(len < 2)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 2))
return -EPROTO;
- }
+
+ guc_id = msg[0];
+ runnable_state = msg[1];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -1940,14 +1945,13 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
- }
+
+ guc_id = msg[0];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -1969,14 +1973,13 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
- }
+
+ guc_id = msg[0];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -2016,10 +2019,8 @@ int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
u32 status;
- if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN)) {
- xe_gt_dbg(guc_to_gt(guc), "Invalid length %u", len);
+ if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN))
return -EPROTO;
- }
status = msg[0] & XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
if (status == XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
@@ -2034,13 +2035,21 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
+
+ guc_id = msg[0];
+
+ if (guc_id == GUC_ID_UNKNOWN) {
+ /*
+ * GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF
+ * context. In such case only PF will be notified about that fault.
+ */
+ xe_gt_err_ratelimited(gt, "Memory CAT error reported by GuC!\n");
+ return 0;
}
q = g2h_exec_queue_lookup(guc, guc_id);
@@ -2062,24 +2071,22 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
- if (unlikely(len != 3)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len != 3))
return -EPROTO;
- }
guc_class = msg[0];
instance = msg[1];
reason = msg[2];
/* Unexpected failure of a hardware feature, log an actual error */
- drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X",
- guc_class, instance, reason);
+ xe_gt_err(gt, "GuC engine reset request failed on %d:%d because 0x%08X",
+ guc_class, instance, reason);
- xe_gt_reset_async(guc_to_gt(guc));
+ xe_gt_reset_async(gt);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index fa75f57bf5da..83a41ebcdc91 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -64,6 +64,15 @@ struct xe_guc {
struct xe_guc_pc pc;
/** @dbm: GuC Doorbell Manager */
struct xe_guc_db_mgr dbm;
+
+ /** @g2g: GuC to GuC communication state */
+ struct {
+ /** @g2g.bo: Storage for GuC to GuC communication channels */
+ struct xe_bo *bo;
+ /** @g2g.owned: Is the BO owned by this GT or just mapped in */
+ bool owned;
+ } g2g;
+
/** @submission_state: GuC submission state */
struct {
/** @submission_state.idm: GuC context ID Manager */
@@ -79,6 +88,7 @@ struct xe_guc {
/** @submission_state.fini_wq: submit fini wait queue */
wait_queue_head_t fini_wq;
} submission_state;
+
/** @hwconfig: Hardware config state */
struct {
/** @hwconfig.bo: buffer object of the hardware config */
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 65b2e147c4b9..d765bfd3636b 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -92,7 +92,7 @@ void xe_heci_gsc_fini(struct xe_device *xe)
{
struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
- if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
+ if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
return;
if (heci_gsc->adev) {
@@ -177,7 +177,7 @@ void xe_heci_gsc_init(struct xe_device *xe)
const struct heci_gsc_def *def;
int ret;
- if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
+ if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
return;
heci_gsc->irq = -1;
@@ -222,7 +222,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
if ((iir & GSC_IRQ_INTF(1)) == 0)
return;
- if (!HAS_HECI_GSCFI(xe)) {
+ if (!xe->info.has_heci_gscfi) {
drm_warn_once(&xe->drm, "GSC irq: not supported");
return;
}
@@ -242,7 +242,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
if ((iir & CSC_IRQ_INTF(1)) == 0)
return;
- if (!HAS_HECI_CSCFI(xe)) {
+ if (!xe->info.has_heci_cscfi) {
drm_warn_once(&xe->drm, "CSC irq: not supported");
return;
}
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 2c32dc46f7d4..c3cc0fa105e8 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -19,11 +19,10 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
return (end - start) >> PAGE_SHIFT;
}
-/*
+/**
* xe_mark_range_accessed() - mark a range is accessed, so core mm
* have such information for memory eviction or write back to
* hard disk
- *
* @range: the range to mark
* @write: if write to this range, we mark pages in this range
* as dirty
@@ -43,15 +42,51 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
}
}
-/*
+static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
+ struct hmm_range *range, struct rw_semaphore *notifier_sem)
+{
+ unsigned long i, npages, hmm_pfn;
+ unsigned long num_chunks = 0;
+ int ret;
+
+ /* HMM docs says this is needed. */
+ ret = down_read_interruptible(notifier_sem);
+ if (ret)
+ return ret;
+
+ if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
+ up_read(notifier_sem);
+ return -EAGAIN;
+ }
+
+ npages = xe_npages_in_range(range->start, range->end);
+ for (i = 0; i < npages;) {
+ unsigned long len;
+
+ hmm_pfn = range->hmm_pfns[i];
+ xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
+
+ len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+
+ /* If order > 0 the page may extend beyond range->start */
+ len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
+ i += len;
+ num_chunks++;
+ }
+ up_read(notifier_sem);
+
+ return sg_alloc_table(st, num_chunks, GFP_KERNEL);
+}
+
+/**
* xe_build_sg() - build a scatter gather table for all the physical pages/pfn
* in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
* and will be used to program GPU page table later.
- *
* @xe: the xe device who will access the dma-address in sg table
* @range: the hmm range that we build the sg table from. range->hmm_pfns[]
* has the pfn numbers of pages that back up this hmm address range.
* @st: pointer to the sg table.
+ * @notifier_sem: The xe notifier lock.
* @write: whether we write to this range. This decides dma map direction
* for system pages. If write we map it bi-diretional; otherwise
* DMA_TO_DEVICE
@@ -78,43 +113,88 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
* Returns 0 if successful; -ENOMEM if fails to allocate memory
*/
static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
- struct sg_table *st, bool write)
+ struct sg_table *st,
+ struct rw_semaphore *notifier_sem,
+ bool write)
{
+ unsigned long npages = xe_npages_in_range(range->start, range->end);
struct device *dev = xe->drm.dev;
- struct page **pages;
- u64 i, npages;
- int ret;
+ struct scatterlist *sgl;
+ struct page *page;
+ unsigned long i, j;
- npages = xe_npages_in_range(range->start, range->end);
- pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ lockdep_assert_held(notifier_sem);
- for (i = 0; i < npages; i++) {
- pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
- xe_assert(xe, !is_device_private_page(pages[i]));
+ i = 0;
+ for_each_sg(st->sgl, sgl, st->nents, j) {
+ unsigned long hmm_pfn, size;
+
+ hmm_pfn = range->hmm_pfns[i];
+ page = hmm_pfn_to_page(hmm_pfn);
+ xe_assert(xe, !is_device_private_page(page));
+
+ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+ size -= page_to_pfn(page) & (size - 1);
+ i += size;
+
+ if (unlikely(j == st->nents - 1)) {
+ xe_assert(xe, i >= npages);
+ if (i > npages)
+ size -= (i - npages);
+
+ sg_mark_end(sgl);
+ } else {
+ xe_assert(xe, i < npages);
+ }
+
+ sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
}
- ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
- xe_sg_segment_size(dev), GFP_KERNEL);
- if (ret)
- goto free_pages;
+ return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
- ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
- if (ret) {
- sg_free_table(st);
- st = NULL;
+ lockdep_assert_held_write(&vm->lock);
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+
+ mutex_lock(&userptr->unmap_mutex);
+ xe_assert(vm->xe, !userptr->mapped);
+ userptr->mapped = true;
+ mutex_unlock(&userptr->unmap_mutex);
+}
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ bool write = !xe_vma_read_only(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+
+ if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
+ !lockdep_is_held_type(&vm->lock, 0) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ /* Don't unmap in exec critical section. */
+ xe_vm_assert_held(vm);
+ /* Don't unmap while mapping the sg. */
+ lockdep_assert_held(&vm->lock);
}
-free_pages:
- kvfree(pages);
- return ret;
+ mutex_lock(&userptr->unmap_mutex);
+ if (userptr->sg && userptr->mapped)
+ dma_unmap_sgtable(xe->drm.dev, userptr->sg,
+ write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ userptr->mapped = false;
+ mutex_unlock(&userptr->unmap_mutex);
}
-/*
+/**
* xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
- *
* @uvma: the userptr vma which hold the scatter gather table
*
* With function xe_userptr_populate_range, we allocate storage of
@@ -124,16 +204,9 @@ free_pages:
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
{
struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- bool write = !xe_vma_read_only(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
- struct device *dev = xe->drm.dev;
-
- xe_assert(xe, userptr->sg);
- dma_unmap_sgtable(dev, userptr->sg,
- write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
+ xe_hmm_userptr_unmap(uvma);
sg_free_table(userptr->sg);
userptr->sg = NULL;
}
@@ -159,20 +232,27 @@ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
* This function allocates the storage of the userptr sg table.
* It is caller's responsibility to free it calling sg_free_table.
*
- * returns: 0 for succuss; negative error no on failure
+ * returns: 0 for success; negative error no on failure
*/
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
bool is_mm_mmap_locked)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
+ unsigned long *pfns;
struct xe_userptr *userptr;
struct xe_vma *vma = &uvma->vma;
u64 userptr_start = xe_vma_userptr(vma);
u64 userptr_end = userptr_start + xe_vma_size(vma);
struct xe_vm *vm = xe_vma_vm(vma);
- struct hmm_range hmm_range;
+ struct hmm_range hmm_range = {
+ .pfn_flags_mask = 0, /* ignore pfns */
+ .default_flags = HMM_PFN_REQ_FAULT,
+ .start = userptr_start,
+ .end = userptr_end,
+ .notifier = &uvma->userptr.notifier,
+ .dev_private_owner = vm->xe,
+ };
bool write = !xe_vma_read_only(vma);
unsigned long notifier_seq;
u64 npages;
@@ -199,19 +279,14 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
return -ENOMEM;
if (write)
- flags |= HMM_PFN_REQ_WRITE;
+ hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
if (!mmget_not_zero(userptr->notifier.mm)) {
ret = -EFAULT;
goto free_pfns;
}
- hmm_range.default_flags = flags;
hmm_range.hmm_pfns = pfns;
- hmm_range.notifier = &userptr->notifier;
- hmm_range.start = userptr_start;
- hmm_range.end = userptr_end;
- hmm_range.dev_private_owner = vm->xe;
while (true) {
hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
@@ -238,16 +313,37 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
if (ret)
goto free_pfns;
- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
+ ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
if (ret)
goto free_pfns;
+ ret = down_read_interruptible(&vm->userptr.notifier_lock);
+ if (ret)
+ goto free_st;
+
+ if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
+ &vm->userptr.notifier_lock, write);
+ if (ret)
+ goto out_unlock;
+
xe_mark_range_accessed(&hmm_range, write);
userptr->sg = &userptr->sgt;
+ xe_hmm_userptr_set_mapped(uvma);
userptr->notifier_seq = hmm_range.notifier_seq;
+ up_read(&vm->userptr.notifier_lock);
+ kvfree(pfns);
+ return 0;
+out_unlock:
+ up_read(&vm->userptr.notifier_lock);
+free_st:
+ sg_free_table(&userptr->sgt);
free_pfns:
kvfree(pfns);
return ret;
}
-
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
index 909dc2bdcd97..0ea98d8e7bbc 100644
--- a/drivers/gpu/drm/xe/xe_hmm.h
+++ b/drivers/gpu/drm/xe/xe_hmm.h
@@ -3,9 +3,16 @@
* Copyright © 2024 Intel Corporation
*/
+#ifndef _XE_HMM_H_
+#define _XE_HMM_H_
+
#include <linux/types.h>
struct xe_userptr_vma;
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
+
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 1557acee3523..fc447751fe78 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -324,6 +324,7 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
{
u32 ccs_mask =
xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
+ u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
@@ -332,8 +333,10 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
xe_bo_ggtt_addr(hwe->hwsp));
- xe_hw_engine_mmio_write32(hwe, RING_MODE(0),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+
+ if (xe_device_has_msix(gt_to_xe(hwe->gt)))
+ ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
_MASKED_BIT_DISABLE(STOP_RING));
xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
@@ -419,7 +422,7 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
* Bspec: 72161
*/
const u8 mocs_write_idx = gt->mocs.uc_index;
- const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
+ const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
(GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
gt->mocs.wb_index : gt->mocs.uc_index;
u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
@@ -574,7 +577,6 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
- xe_reg_sr_apply_whitelist(hwe);
hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
@@ -773,7 +775,7 @@ static void check_gsc_availability(struct xe_gt *gt)
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
- drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
+ drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
}
}
@@ -829,7 +831,7 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
/**
* xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
* @hwe: Xe HW Engine.
- * @job: The job object.
+ * @q: The exec queue object.
*
* This can be printed out in a later stage like during dev_coredump
* analysis.
@@ -838,7 +840,7 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
* caller, using `xe_hw_engine_snapshot_free`.
*/
struct xe_hw_engine_snapshot *
-xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job)
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
{
struct xe_hw_engine_snapshot *snapshot;
struct __guc_capture_parsed_output *node;
@@ -864,15 +866,14 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job
if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
return snapshot;
- if (job) {
+ if (q) {
/* If got guc capture, set source to GuC */
- node = xe_guc_capture_get_matching_and_lock(job);
+ node = xe_guc_capture_get_matching_and_lock(q);
if (node) {
struct xe_device *xe = gt_to_xe(hwe->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
coredump->snapshot.matched_node = node;
- snapshot->source = XE_ENGINE_CAPTURE_SOURCE_GUC;
xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
return snapshot;
}
@@ -880,7 +881,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job
/* otherwise, do manual capture */
xe_engine_manual_capture(hwe, snapshot);
- snapshot->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL;
xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
return snapshot;
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h
index da0a6922a26f..6b5f9fa2a594 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine.h
@@ -11,7 +11,7 @@
struct drm_printer;
struct drm_xe_engine_class_instance;
struct xe_device;
-struct xe_sched_job;
+struct xe_exec_queue;
#ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN
#define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN
@@ -56,7 +56,7 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe);
u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
enum xe_engine_class engine_class);
struct xe_hw_engine_snapshot *
-xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job);
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q);
void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot);
void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p);
void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index 719f27ef00a5..e4191a7a2c31 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -106,7 +106,7 @@ struct xe_hw_engine_class_intf {
* Contains all the hardware engine state for physical instances.
*/
struct xe_hw_engine {
- /** @gt: graphics tile this hw engine belongs to */
+ /** @gt: GT structure this hw engine belongs to */
struct xe_gt *gt;
/** @name: name of this hw engine */
const char *name;
@@ -165,8 +165,6 @@ enum xe_hw_engine_snapshot_source_id {
struct xe_hw_engine_snapshot {
/** @name: name of the hw engine */
char *name;
- /** @source: Data source, either manual or GuC */
- enum xe_hw_engine_snapshot_source_id source;
/** @hwe: hw engine */
struct xe_hw_engine *hwe;
/** @logical_instance: logical instance of this hw engine */
diff --git a/drivers/gpu/drm/xe/xe_hw_fence_types.h b/drivers/gpu/drm/xe/xe_hw_fence_types.h
index 364a61f4bfda..58a8d09afe5c 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_fence_types.h
@@ -41,7 +41,7 @@ struct xe_hw_fence_irq {
* to a xe_hw_fence_irq, maintains serial seqno.
*/
struct xe_hw_fence_ctx {
- /** @gt: graphics tile of hardware fence context */
+ /** @gt: GT structure of hardware fence context */
struct xe_gt *gt;
/** @irq: fence irq handler */
struct xe_hw_fence_irq *irq;
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index b7995ebd54ab..08552ee3fb94 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
#include "display/xe_display.h"
+#include "regs/xe_guc_regs.h"
#include "regs/xe_irq_regs.h"
#include "xe_device.h"
#include "xe_drv.h"
@@ -29,6 +30,11 @@
#define IIR(offset) XE_REG(offset + 0x8)
#define IER(offset) XE_REG(offset + 0xc)
+static int xe_irq_msix_init(struct xe_device *xe);
+static void xe_irq_msix_free(struct xe_device *xe);
+static int xe_irq_msix_request_irqs(struct xe_device *xe);
+static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
+
static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 val = xe_mmio_read32(mmio, reg);
@@ -192,7 +198,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
gsc_mask = irqs | GSC_ER_COMPLETE;
heci_mask = GSC_IRQ_INTF(1);
- } else if (HAS_HECI_GSCFI(xe)) {
+ } else if (xe->info.has_heci_gscfi) {
gsc_mask = GSC_IRQ_INTF(1);
}
@@ -325,7 +331,7 @@ static void gt_irq_handler(struct xe_tile *tile,
if (class == XE_ENGINE_CLASS_OTHER) {
/* HECI GSCFI interrupts come from outside of GT */
- if (HAS_HECI_GSCFI(xe) && instance == OTHER_GSC_INSTANCE)
+ if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
xe_heci_gsc_irq_handler(xe, intr_vec);
else
gt_other_irq_handler(engine_gt, instance, intr_vec);
@@ -348,12 +354,8 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
unsigned long intr_dw[2];
u32 identity[32];
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
master_ctl = xelp_intr_disable(xe);
if (!master_ctl) {
@@ -417,12 +419,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
/* TODO: This really shouldn't be copied+pasted */
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
master_tile_ctl = dg1_intr_disable(xe);
if (!master_tile_ctl) {
@@ -459,7 +457,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* the primary tile.
*/
if (id == 0) {
- if (HAS_HECI_CSCFI(xe))
+ if (xe->info.has_heci_cscfi)
xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
@@ -508,7 +506,7 @@ static void gt_irq_reset(struct xe_tile *tile)
if ((tile->media_gt &&
xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
- HAS_HECI_GSCFI(tile_to_xe(tile))) {
+ tile_to_xe(tile)->info.has_heci_gscfi) {
xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
@@ -580,6 +578,11 @@ static void xe_irq_reset(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return vf_irq_reset(xe);
+ if (xe_device_uses_memirq(xe)) {
+ for_each_tile(tile, xe, id)
+ xe_memirq_reset(&tile->memirq);
+ }
+
for_each_tile(tile, xe, id) {
if (GRAPHICS_VERx100(xe) >= 1210)
dg1_irq_reset(tile);
@@ -622,6 +625,14 @@ static void xe_irq_postinstall(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return vf_irq_postinstall(xe);
+ if (xe_device_uses_memirq(xe)) {
+ struct xe_tile *tile;
+ unsigned int id;
+
+ for_each_tile(tile, xe, id)
+ xe_memirq_postinstall(&tile->memirq);
+ }
+
xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
/*
@@ -644,12 +655,8 @@ static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
struct xe_tile *tile;
unsigned int id;
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
for_each_tile(tile, xe, id)
xe_memirq_handler(&tile->memirq);
@@ -668,87 +675,105 @@ static irq_handler_t xe_irq_handler(struct xe_device *xe)
return xelp_irq_handler;
}
-static void irq_uninstall(void *arg)
+static int xe_irq_msi_request_irqs(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ irq_handler_t irq_handler;
+ int irq, err;
+
+ irq_handler = xe_irq_handler(xe);
+ if (!irq_handler) {
+ drm_err(&xe->drm, "No supported interrupt handler");
+ return -EINVAL;
+ }
+
+ irq = pci_irq_vector(pdev, 0);
+ err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
+ if (err < 0) {
+ drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void xe_irq_msi_free(struct xe_device *xe)
{
- struct xe_device *xe = arg;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
int irq;
- if (!xe->irq.enabled)
+ irq = pci_irq_vector(pdev, 0);
+ free_irq(irq, xe);
+}
+
+static void irq_uninstall(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ if (!atomic_xchg(&xe->irq.enabled, 0))
return;
- xe->irq.enabled = false;
xe_irq_reset(xe);
- irq = pci_irq_vector(pdev, 0);
- free_irq(irq, xe);
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_free(xe);
+ else
+ xe_irq_msi_free(xe);
+}
+
+int xe_irq_init(struct xe_device *xe)
+{
+ spin_lock_init(&xe->irq.lock);
+
+ return xe_irq_msix_init(xe);
}
int xe_irq_install(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- unsigned int irq_flags = PCI_IRQ_MSIX;
- irq_handler_t irq_handler;
- int err, irq, nvec;
-
- irq_handler = xe_irq_handler(xe);
- if (!irq_handler) {
- drm_err(&xe->drm, "No supported interrupt handler");
- return -EINVAL;
- }
+ unsigned int irq_flags = PCI_IRQ_MSI;
+ int nvec = 1;
+ int err;
xe_irq_reset(xe);
- nvec = pci_msix_vec_count(pdev);
- if (nvec <= 0) {
- if (nvec == -EINVAL) {
- /* MSIX capability is not supported in the device, using MSI */
- irq_flags = PCI_IRQ_MSI;
- nvec = 1;
- } else {
- drm_err(&xe->drm, "MSIX: Failed getting count\n");
- return nvec;
- }
+ if (xe_device_has_msix(xe)) {
+ nvec = xe->irq.msix.nvec;
+ irq_flags = PCI_IRQ_MSIX;
}
err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
if (err < 0) {
- drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
+ drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
return err;
}
- irq = pci_irq_vector(pdev, 0);
- err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
- if (err < 0) {
- drm_err(&xe->drm, "Failed to request MSI/MSIX IRQ %d\n", err);
+ err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
+ xe_irq_msi_request_irqs(xe);
+ if (err)
return err;
- }
- xe->irq.enabled = true;
+ atomic_set(&xe->irq.enabled, 1);
xe_irq_postinstall(xe);
- err = devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
- if (err)
- goto free_irq_handler;
-
- return 0;
-
-free_irq_handler:
- free_irq(irq, xe);
+ return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
+}
- return err;
+static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
+{
+ synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
}
void xe_irq_suspend(struct xe_device *xe)
{
- int irq = to_pci_dev(xe->drm.dev)->irq;
+ atomic_set(&xe->irq.enabled, 0); /* no new irqs */
- spin_lock_irq(&xe->irq.lock);
- xe->irq.enabled = false; /* no new irqs */
- spin_unlock_irq(&xe->irq.lock);
-
- synchronize_irq(irq); /* flush irqs */
+ /* flush irqs */
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_synchronize_irq(xe);
+ else
+ xe_irq_msi_synchronize_irq(xe);
xe_irq_reset(xe); /* turn irqs off */
}
@@ -762,10 +787,205 @@ void xe_irq_resume(struct xe_device *xe)
* 1. no irq will arrive before the postinstall
* 2. display is not yet resumed
*/
- xe->irq.enabled = true;
+ atomic_set(&xe->irq.enabled, 1);
xe_irq_reset(xe);
xe_irq_postinstall(xe); /* turn irqs on */
for_each_gt(gt, xe, id)
xe_irq_enable_hwe(gt);
}
+
+/* MSI-X related definitions and functions below. */
+
+enum xe_irq_msix_static {
+ GUC2HOST_MSIX = 0,
+ DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
+ /* Must be last */
+ NUM_OF_STATIC_MSIX,
+};
+
+static int xe_irq_msix_init(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int nvec = pci_msix_vec_count(pdev);
+
+ if (nvec == -EINVAL)
+ return 0; /* MSI */
+
+ if (nvec < 0) {
+ drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
+ return nvec;
+ }
+
+ xe->irq.msix.nvec = nvec;
+ xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
+ return 0;
+}
+
+static irqreturn_t guc2host_irq_handler(int irq, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ u8 id;
+
+ if (!atomic_read(&xe->irq.enabled))
+ return IRQ_NONE;
+
+ for_each_tile(tile, xe, id)
+ xe_guc_irq_handler(&tile->primary_gt->uc.guc,
+ GUC_INTR_GUC2HOST);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
+{
+ unsigned int tile_id, gt_id;
+ struct xe_device *xe = arg;
+ struct xe_memirq *memirq;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+
+ if (!atomic_read(&xe->irq.enabled))
+ return IRQ_NONE;
+
+ for_each_tile(tile, xe, tile_id) {
+ memirq = &tile->memirq;
+ if (!memirq->bo)
+ continue;
+
+ for_each_gt(gt, xe, gt_id) {
+ if (gt->tile != tile)
+ continue;
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_memirq_hwe_handler(memirq, hwe);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
+ bool dynamic_msix, u16 *msix)
+{
+ struct xa_limit limit;
+ int ret;
+ u32 id;
+
+ limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
+ XA_LIMIT(*msix, *msix);
+ ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (dynamic_msix)
+ *msix = id;
+
+ return 0;
+}
+
+static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
+{
+ xa_erase(&xe->irq.msix.indexes, msix);
+}
+
+static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
+ void *irq_buf, const char *name, u16 msix)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret, irq;
+
+ irq = pci_irq_vector(pdev, msix);
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix)
+{
+ int ret;
+
+ ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
+ if (ret)
+ return ret;
+
+ ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
+ if (ret) {
+ drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
+ xe_irq_msix_release_vector(xe, *msix);
+ return ret;
+ }
+
+ return 0;
+}
+
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int irq;
+ void *irq_buf;
+
+ irq_buf = xa_load(&xe->irq.msix.indexes, msix);
+ if (!irq_buf)
+ return;
+
+ irq = pci_irq_vector(pdev, msix);
+ if (irq < 0) {
+ drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
+ return;
+ }
+
+ free_irq(irq, irq_buf);
+ xe_irq_msix_release_vector(xe, msix);
+}
+
+int xe_irq_msix_request_irqs(struct xe_device *xe)
+{
+ int err;
+ u16 msix;
+
+ msix = GUC2HOST_MSIX;
+ err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
+ DRIVER_NAME "-guc2host", false, &msix);
+ if (err)
+ return err;
+
+ msix = DEFAULT_MSIX;
+ err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
+ DRIVER_NAME "-default-msix", false, &msix);
+ if (err) {
+ xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
+ return err;
+ }
+
+ return 0;
+}
+
+void xe_irq_msix_free(struct xe_device *xe)
+{
+ unsigned long msix;
+ u32 *dummy;
+
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ xe_irq_msix_free_irq(xe, msix);
+ xa_destroy(&xe->irq.msix.indexes);
+}
+
+void xe_irq_msix_synchronize_irq(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ unsigned long msix;
+ u32 *dummy;
+
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ synchronize_irq(pci_irq_vector(pdev, msix));
+}
diff --git a/drivers/gpu/drm/xe/xe_irq.h b/drivers/gpu/drm/xe/xe_irq.h
index 067514e13675..a28bd577ba52 100644
--- a/drivers/gpu/drm/xe/xe_irq.h
+++ b/drivers/gpu/drm/xe/xe_irq.h
@@ -6,13 +6,21 @@
#ifndef _XE_IRQ_H_
#define _XE_IRQ_H_
+#include <linux/interrupt.h>
+
+#define XE_IRQ_DEFAULT_MSIX 1
+
struct xe_device;
struct xe_tile;
struct xe_gt;
+int xe_irq_init(struct xe_device *xe);
int xe_irq_install(struct xe_device *xe);
void xe_irq_suspend(struct xe_device *xe);
void xe_irq_resume(struct xe_device *xe);
void xe_irq_enable_hwe(struct xe_gt *gt);
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix);
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix);
#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 4f64c7f4e68d..bbb9ffbf6367 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -25,6 +25,7 @@
#include "xe_map.h"
#include "xe_memirq.h"
#include "xe_sriov.h"
+#include "xe_trace_lrc.h"
#include "xe_vm.h"
#include "xe_wa.h"
@@ -583,6 +584,7 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
{
struct xe_memirq *memirq = &gt_to_tile(hwe->gt)->memirq;
struct xe_device *xe = gt_to_xe(hwe->gt);
+ u8 num_regs;
if (!xe_device_uses_memirq(xe))
return;
@@ -592,12 +594,18 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
regs[CTX_INT_MASK_ENABLE_REG] = RING_IMR(0).addr;
regs[CTX_INT_MASK_ENABLE_PTR] = xe_memirq_enable_ptr(memirq);
- regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ num_regs = xe_device_has_msix(xe) ? 3 : 2;
+ regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(num_regs) |
MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED;
regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr;
regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq, hwe);
regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr;
regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq, hwe);
+
+ if (xe_device_has_msix(xe)) {
+ regs[CTX_CS_INT_VEC_REG] = CS_INT_VEC(0).addr;
+ /* CTX_CS_INT_VEC_DATA will be set in xe_lrc_init */
+ }
}
static int lrc_ring_mi_mode(struct xe_hw_engine *hwe)
@@ -875,7 +883,7 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
- struct xe_vm *vm, u32 ring_size)
+ struct xe_vm *vm, u32 ring_size, u16 msix_vec)
{
struct xe_gt *gt = hwe->gt;
struct xe_tile *tile = gt_to_tile(gt);
@@ -944,6 +952,14 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_drm_client_add_bo(vm->xef->client, lrc->bo);
}
+ if (xe_device_has_msix(xe)) {
+ xe_lrc_write_ctx_reg(lrc, CTX_INT_STATUS_REPORT_PTR,
+ xe_memirq_status_ptr(&tile->memirq, hwe));
+ xe_lrc_write_ctx_reg(lrc, CTX_INT_SRC_REPORT_PTR,
+ xe_memirq_source_ptr(&tile->memirq, hwe));
+ xe_lrc_write_ctx_reg(lrc, CTX_CS_INT_VEC_DATA, msix_vec << 16 | msix_vec);
+ }
+
if (xe_gt_has_indirect_ring_state(gt)) {
xe_lrc_write_ctx_reg(lrc, CTX_INDIRECT_RING_STATE,
__xe_lrc_indirect_ring_ggtt_addr(lrc));
@@ -1004,6 +1020,7 @@ err_lrc_finish:
* @hwe: Hardware Engine
* @vm: The VM (address space)
* @ring_size: LRC ring size
+ * @msix_vec: MSI-X interrupt vector (for platforms that support it)
*
* Allocate and initialize the Logical Ring Context (LRC).
*
@@ -1011,7 +1028,7 @@ err_lrc_finish:
* upon failure.
*/
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size)
+ u32 ring_size, u16 msix_vec)
{
struct xe_lrc *lrc;
int err;
@@ -1020,7 +1037,7 @@ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
if (!lrc)
return ERR_PTR(-ENOMEM);
- err = xe_lrc_init(lrc, hwe, vm, ring_size);
+ err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec);
if (err) {
kfree(lrc);
return ERR_PTR(err);
@@ -1060,6 +1077,14 @@ u32 xe_lrc_ring_tail(struct xe_lrc *lrc)
return xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL) & TAIL_ADDR;
}
+static u32 xe_lrc_ring_start(struct xe_lrc *lrc)
+{
+ if (xe_lrc_has_indirect_ring_state(lrc))
+ return xe_lrc_read_indirect_ctx_reg(lrc, INDIRECT_CTX_RING_START);
+ else
+ return xe_lrc_read_ctx_reg(lrc, CTX_RING_START);
+}
+
void xe_lrc_set_ring_head(struct xe_lrc *lrc, u32 head)
{
if (xe_lrc_has_indirect_ring_state(lrc))
@@ -1635,10 +1660,12 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
xe_vm_get(lrc->bo->vm);
snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
+ snapshot->ring_addr = __xe_lrc_ring_ggtt_addr(lrc);
snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc);
snapshot->head = xe_lrc_ring_head(lrc);
snapshot->tail.internal = lrc->ring.tail;
snapshot->tail.memory = xe_lrc_ring_tail(lrc);
+ snapshot->start = xe_lrc_ring_start(lrc);
snapshot->start_seqno = xe_lrc_start_seqno(lrc);
snapshot->seqno = xe_lrc_seqno(lrc);
snapshot->lrc_bo = xe_bo_get(lrc->bo);
@@ -1692,11 +1719,14 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
return;
drm_printf(p, "\tHW Context Desc: 0x%08x\n", snapshot->context_desc);
+ drm_printf(p, "\tHW Ring address: 0x%08x\n",
+ snapshot->ring_addr);
drm_printf(p, "\tHW Indirect Ring State: 0x%08x\n",
snapshot->indirect_context_desc);
drm_printf(p, "\tLRC Head: (memory) %u\n", snapshot->head);
drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
snapshot->tail.internal, snapshot->tail.memory);
+ drm_printf(p, "\tRing start: (memory) 0x%08x\n", snapshot->start);
drm_printf(p, "\tStart seqno: (memory) %d\n", snapshot->start_seqno);
drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->seqno);
drm_printf(p, "\tTimestamp: 0x%08x\n", snapshot->ctx_timestamp);
@@ -1758,5 +1788,20 @@ u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ trace_xe_lrc_update_timestamp(lrc, *old_ts);
+
return lrc->ctx_timestamp;
}
+
+/**
+ * xe_lrc_ring_is_idle() - LRC is idle
+ * @lrc: Pointer to the lrc.
+ *
+ * Compare LRC ring head and tail to determine if idle.
+ *
+ * Return: True is ring is idle, False otherwise
+ */
+bool xe_lrc_ring_is_idle(struct xe_lrc *lrc)
+{
+ return xe_lrc_ring_head(lrc) == xe_lrc_ring_tail(lrc);
+}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 40d8f6906d3e..4206e6a8b50a 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -25,8 +25,10 @@ struct xe_lrc_snapshot {
unsigned long lrc_size, lrc_offset;
u32 context_desc;
+ u32 ring_addr;
u32 indirect_context_desc;
u32 head;
+ u32 start;
struct {
u32 internal;
u32 memory;
@@ -40,7 +42,7 @@ struct xe_lrc_snapshot {
#define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size);
+ u32 ring_size, u16 msix_vec);
void xe_lrc_destroy(struct kref *ref);
/**
@@ -78,6 +80,8 @@ u32 xe_lrc_ring_head(struct xe_lrc *lrc);
u32 xe_lrc_ring_space(struct xe_lrc *lrc);
void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size);
+bool xe_lrc_ring_is_idle(struct xe_lrc *lrc);
+
u32 xe_lrc_indirect_ring_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc);
u32 *xe_lrc_regs(struct xe_lrc *lrc);
diff --git a/drivers/gpu/drm/xe/xe_macros.h b/drivers/gpu/drm/xe/xe_macros.h
index daf56c846d03..8a77c2423555 100644
--- a/drivers/gpu/drm/xe/xe_macros.h
+++ b/drivers/gpu/drm/xe/xe_macros.h
@@ -10,9 +10,13 @@
#define XE_WARN_ON WARN_ON
-#define XE_IOCTL_DBG(xe, cond) \
- ((cond) && (drm_dbg(&(xe)->drm, \
- "Ioctl argument check failed at %s:%d: %s", \
- __FILE__, __LINE__, #cond), 1))
+#define XE_IOCTL_DBG(xe, cond) ({ \
+ int cond__ = !!(cond); \
+ if (cond__) \
+ drm_dbg(&(xe)->drm, \
+ "Ioctl argument check failed at %s:%d: %s", \
+ __FILE__, __LINE__, #cond); \
+ cond__; \
+})
#endif
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index f833da88150a..404fa2a456d5 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -155,13 +155,6 @@ static const char *guc_name(struct xe_guc *guc)
*
*/
-static void __release_xe_bo(struct drm_device *drm, void *arg)
-{
- struct xe_bo *bo = arg;
-
- xe_bo_unpin_map_no_vm(bo);
-}
-
static inline bool hw_reports_to_instance_zero(struct xe_memirq *memirq)
{
/*
@@ -184,14 +177,12 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET(0), SZ_64));
BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET(0), SZ_4K));
- /* XXX: convert to managed bo */
- bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE |
- XE_BO_FLAG_NEEDS_UC |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo = xe_managed_bo_create_pin_map(xe, tile, bo_size,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_NEEDS_UC |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out;
@@ -215,7 +206,7 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
xe_bo_ggtt_addr(bo), bo_size, XE_MEMIRQ_SOURCE_OFFSET(0),
XE_MEMIRQ_STATUS_OFFSET(0));
- return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo);
+ return 0;
out:
memirq_err(memirq, "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
@@ -442,6 +433,9 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat
if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
+
+ if (memirq_received(memirq, status, ilog2(GUC_INTR_SW_INT_0), name))
+ xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 1b97d90aadda..278bc96cf593 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1506,7 +1506,7 @@ err_bb:
* using the default engine for the updates, they will be performed in the
* order they grab the job_mutex. If different engines are used, external
* synchronization is needed for overlapping updates to maintain page-table
- * consistency. Note that the meaing of "overlapping" is that the updates
+ * consistency. Note that the meaning of "overlapping" is that the updates
* touch the same page-table, which might be a higher-level page-directory.
* If no pipelining is needed, then updates may be performed by the cpu.
*
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index bfc3deebdaa2..07b27114be9a 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -19,7 +19,7 @@
struct xe_modparam xe_modparam = {
.probe_display = true,
- .guc_log_level = 5,
+ .guc_log_level = 3,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
/* the rest are 0 by default */
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 5cc0f6f9bc11..1fa46a04425e 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -16,7 +16,6 @@
#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
-#include "regs/xe_lrc_layout.h"
#include "regs/xe_oa_regs.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -28,7 +27,6 @@
#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_guc_pc.h"
-#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_oa.h"
@@ -90,6 +88,8 @@ struct xe_oa_open_param {
struct drm_xe_sync __user *syncs_user;
int num_syncs;
struct xe_sync_entry *syncs;
+ size_t oa_buffer_size;
+ int wait_num_reports;
};
struct xe_oa_config_bo {
@@ -234,11 +234,9 @@ static void oa_timestamp_clear(struct xe_oa_stream *stream, u32 *report)
static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
{
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
+ u32 tail, hw_tail, partial_report_size, available;
int report_size = stream->oa_buffer.format->size;
- u32 tail, hw_tail;
unsigned long flags;
- bool pollin;
- u32 partial_report_size;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -282,12 +280,12 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
stream->oa_buffer.tail = tail;
- pollin = xe_oa_circ_diff(stream, stream->oa_buffer.tail,
- stream->oa_buffer.head) >= report_size;
+ available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
+ stream->pollin = available >= stream->wait_num_reports * report_size;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- return pollin;
+ return stream->pollin;
}
static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
@@ -295,10 +293,8 @@ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
struct xe_oa_stream *stream =
container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (xe_oa_buffer_check_unlocked(stream)) {
- stream->pollin = true;
+ if (xe_oa_buffer_check_unlocked(stream))
wake_up(&stream->poll_wq);
- }
hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns));
@@ -397,11 +393,19 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
{
- struct xe_mmio *mmio = &stream->gt->mmio;
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
- u32 oa_buf = gtt_offset | OABUFFER_SIZE_16M | OAG_OABUFFER_MEMORY_SELECT;
+ int size_exponent = __ffs(stream->oa_buffer.bo->size);
+ u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT;
+ struct xe_mmio *mmio = &stream->gt->mmio;
unsigned long flags;
+ /*
+ * If oa buffer size is more than 16MB (exponent greater than 24), the
+ * oa buffer size field is multiplied by 8 in xe_oa_enable_metric_set.
+ */
+ oa_buf |= REG_FIELD_PREP(OABUFFER_SIZE_MASK,
+ size_exponent > 24 ? size_exponent - 20 : size_exponent - 17);
+
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
xe_mmio_write32(mmio, __oa_regs(stream)->oa_status, 0);
@@ -445,6 +449,12 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
return val;
}
+static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
+{
+ return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
+}
+
static void xe_oa_enable(struct xe_oa_stream *stream)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
@@ -465,14 +475,14 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
- xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val);
+ xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
}
static void xe_oa_disable(struct xe_oa_stream *stream)
{
struct xe_mmio *mmio = &stream->gt->mmio;
- xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0);
+ xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0);
if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl,
OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
@@ -857,15 +867,12 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
xe_file_put(stream->xef);
}
-static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
+static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
{
struct xe_bo *bo;
- BUILD_BUG_ON_NOT_POWER_OF_2(XE_OA_BUFFER_SIZE);
- BUILD_BUG_ON(XE_OA_BUFFER_SIZE < SZ_128K || XE_OA_BUFFER_SIZE > SZ_16M);
-
bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
- XE_OA_BUFFER_SIZE, ttm_bo_type_kernel,
+ size, ttm_bo_type_kernel,
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1043,6 +1050,13 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
+static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
+{
+ return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
+ stream->oa_buffer.bo->size > SZ_16M ?
+ OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
+}
+
static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
{
struct xe_mmio *mmio = &stream->gt->mmio;
@@ -1075,6 +1089,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
_MASKED_BIT_ENABLE(oa_debug) |
oag_report_ctx_switches(stream) |
+ oag_buf_size_select(stream) |
oag_configure_mmio_trigger(stream, true));
xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
@@ -1216,6 +1231,28 @@ static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
return 0;
}
+static int xe_oa_set_prop_oa_buffer_size(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (!is_power_of_2(value) || value < SZ_128K || value > SZ_128M) {
+ drm_dbg(&oa->xe->drm, "OA buffer size invalid %llu\n", value);
+ return -EINVAL;
+ }
+ param->oa_buffer_size = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_wait_num_reports(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (!value) {
+ drm_dbg(&oa->xe->drm, "wait_num_reports %llu\n", value);
+ return -EINVAL;
+ }
+ param->wait_num_reports = value;
+ return 0;
+}
+
static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value,
struct xe_oa_open_param *param)
{
@@ -1236,6 +1273,8 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = {
[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_oa_buffer_size,
+ [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_wait_num_reports,
};
static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
@@ -1250,6 +1289,8 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval,
[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_ret_inval,
};
static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from,
@@ -1509,7 +1550,7 @@ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg)
{
- struct drm_xe_oa_stream_info info = { .oa_buf_size = XE_OA_BUFFER_SIZE, };
+ struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, };
void __user *uaddr = (void __user *)arg;
if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -1595,7 +1636,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
- if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
+ if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) {
drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
return -EINVAL;
}
@@ -1633,81 +1674,6 @@ static const struct file_operations xe_oa_fops = {
.mmap = xe_oa_mmap,
};
-static bool engine_supports_mi_query(struct xe_hw_engine *hwe)
-{
- return hwe->class == XE_ENGINE_CLASS_RENDER ||
- hwe->class == XE_ENGINE_CLASS_COMPUTE;
-}
-
-static bool xe_oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
-{
- u32 idx = *offset;
- u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
- bool found = false;
-
- idx++;
- for (; idx < len; idx += 2) {
- if (state[idx] == reg) {
- found = true;
- break;
- }
- }
-
- *offset = idx;
- return found;
-}
-
-#define IS_MI_LRI_CMD(x) (REG_FIELD_GET(MI_OPCODE, (x)) == \
- REG_FIELD_GET(MI_OPCODE, MI_LOAD_REGISTER_IMM))
-
-static u32 xe_oa_context_image_offset(struct xe_oa_stream *stream, u32 reg)
-{
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 len = (xe_gt_lrc_size(stream->gt, stream->hwe->class) +
- lrc->ring.size) / sizeof(u32);
- u32 offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
- u32 *state = (u32 *)lrc->bo->vmap.vaddr;
-
- if (drm_WARN_ON(&stream->oa->xe->drm, !state))
- return U32_MAX;
-
- for (; offset < len; ) {
- if (IS_MI_LRI_CMD(state[offset])) {
- /*
- * We expect reg-value pairs in MI_LRI command, so
- * MI_LRI_LEN() should be even
- */
- drm_WARN_ON(&stream->oa->xe->drm,
- MI_LRI_LEN(state[offset]) & 0x1);
-
- if (xe_oa_find_reg_in_lri(state, reg, &offset, len))
- break;
- } else {
- offset++;
- }
- }
-
- return offset < len ? offset : U32_MAX;
-}
-
-static int xe_oa_set_ctx_ctrl_offset(struct xe_oa_stream *stream)
-{
- struct xe_reg reg = OACTXCONTROL(stream->hwe->mmio_base);
- u32 offset = stream->oa->ctx_oactxctrl_offset[stream->hwe->class];
-
- /* Do this only once. Failure is stored as offset of U32_MAX */
- if (offset)
- goto exit;
-
- offset = xe_oa_context_image_offset(stream, reg.addr);
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] = offset;
-
- drm_dbg(&stream->oa->xe->drm, "%s oa ctx control at 0x%08x dword offset\n",
- stream->hwe->name, offset);
-exit:
- return offset && offset != U32_MAX ? 0 : -ENODEV;
-}
-
static int xe_oa_stream_init(struct xe_oa_stream *stream,
struct xe_oa_open_param *param)
{
@@ -1723,9 +1689,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
stream->sample = param->sample;
- stream->periodic = param->period_exponent > 0;
+ stream->periodic = param->period_exponent >= 0;
stream->period_exponent = param->period_exponent;
stream->no_preempt = param->no_preempt;
+ stream->wait_num_reports = param->wait_num_reports;
stream->xef = xe_file_get(param->xef);
stream->num_syncs = param->num_syncs;
@@ -1739,20 +1706,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
stream->oa_buffer.circ_size =
- XE_OA_BUFFER_SIZE - XE_OA_BUFFER_SIZE % stream->oa_buffer.format->size;
+ param->oa_buffer_size -
+ param->oa_buffer_size % stream->oa_buffer.format->size;
else
- stream->oa_buffer.circ_size = XE_OA_BUFFER_SIZE;
-
- if (stream->exec_q && engine_supports_mi_query(stream->hwe)) {
- /* If we don't find the context offset, just return error */
- ret = xe_oa_set_ctx_ctrl_offset(stream);
- if (ret) {
- drm_err(&stream->oa->xe->drm,
- "xe_oa_set_ctx_ctrl_offset failed for %s\n",
- stream->hwe->name);
- goto exit;
- }
- }
+ stream->oa_buffer.circ_size = param->oa_buffer_size;
stream->oa_config = xe_oa_get_oa_config(stream->oa, param->metric_set);
if (!stream->oa_config) {
@@ -1784,7 +1741,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
goto err_fw_put;
}
- ret = xe_oa_alloc_oa_buffer(stream);
+ ret = xe_oa_alloc_oa_buffer(stream, param->oa_buffer_size);
if (ret)
goto err_fw_put;
@@ -1809,8 +1766,8 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
WRITE_ONCE(u->exclusive_stream, stream);
- hrtimer_init(&stream->poll_check_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- stream->poll_check_timer.function = xe_oa_poll_check_timer_cb;
+ hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
@@ -2013,6 +1970,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
}
param.xef = xef;
+ param.period_exponent = -1;
ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, &param);
if (ret)
return ret;
@@ -2067,7 +2025,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
goto err_exec_q;
}
- if (param.period_exponent > 0) {
+ if (param.period_exponent >= 0) {
u64 oa_period, oa_freq_hz;
/* Requesting samples from OAG buffer is a privileged operation */
@@ -2081,6 +2039,17 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz);
}
+ if (!param.oa_buffer_size)
+ param.oa_buffer_size = DEFAULT_XE_OA_BUFFER_SIZE;
+
+ if (!param.wait_num_reports)
+ param.wait_num_reports = 1;
+ if (param.wait_num_reports > param.oa_buffer_size / f->size) {
+ drm_dbg(&oa->xe->drm, "wait_num_reports %d\n", param.wait_num_reports);
+ ret = -EINVAL;
+ goto err_exec_q;
+ }
+
ret = xe_oa_parse_syncs(oa, &param);
if (ret)
goto err_exec_q;
@@ -2198,6 +2167,7 @@ static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
+ { .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
{ .start = 0xE590, .end = 0xE590 }, /* TDL_LSC_LAT_MEASURE_TDL_GFX */
{ .start = 0x13000, .end = 0x137FC }, /* PES_0_PESL0 - PES_63_UPPER_PESL3 */
@@ -2568,6 +2538,8 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt)
u->type = DRM_XE_OA_UNIT_TYPE_OAM;
}
+ xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
+
/* Ensure MMIO trigger remains disabled till there is a stream */
xe_mmio_write32(&gt->mmio, u->regs.oa_debug,
oag_configure_mmio_trigger(NULL, false));
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index fea9d981e414..52e33c37d5ee 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -15,7 +15,7 @@
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
-#define XE_OA_BUFFER_SIZE SZ_16M
+#define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M
enum xe_oa_report_header {
HDR_32_BIT = 0,
@@ -138,9 +138,6 @@ struct xe_oa {
/** @metrics_idr: List of dynamic configurations (struct xe_oa_config) */
struct idr metrics_idr;
- /** @ctx_oactxctrl_offset: offset of OACTXCONTROL register in context image */
- u32 ctx_oactxctrl_offset[XE_ENGINE_CLASS_MAX];
-
/** @oa_formats: tracks all OA formats across platforms */
const struct xe_oa_format *oa_formats;
@@ -218,6 +215,9 @@ struct xe_oa_stream {
/** @pollin: Whether there is data available to read */
bool pollin;
+ /** @wait_num_reports: Number of reports to wait for before signalling pollin */
+ int wait_num_reports;
+
/** @periodic: Whether periodic sampling is currently enabled */
bool periodic;
diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c
index 8ec1b84cbb9e..57cf01efc07f 100644
--- a/drivers/gpu/drm/xe/xe_observation.c
+++ b/drivers/gpu/drm/xe/xe_observation.c
@@ -56,7 +56,7 @@ int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
}
}
-static struct ctl_table observation_ctl_table[] = {
+static const struct ctl_table observation_ctl_table[] = {
{
.procname = "observation_paranoid",
.data = &xe_observation_paranoid,
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 6b7f77425c7f..39be74848e44 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -490,7 +490,7 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
* least basic xe_gt and xe_guc initialization.
*
* Since to obtain the value of GMDID_MEDIA we need to use the
- * media GuC, temporarly tweak the gt type.
+ * media GuC, temporarily tweak the gt type.
*/
xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
@@ -781,7 +781,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
* error injectable functions is proper handling of the error code by the
* caller for recovery, which is always the case here. The second
* requirement is that no state is changed before the first error return.
- * It is not strictly fullfilled for all initialization functions using the
+ * It is not strictly fulfilled for all initialization functions using the
* ALLOW_ERROR_INJECTION() macro but this is acceptable because for those
* error cases at probe time, the error code is simply propagated up by the
* caller. Therefore there is no consequence on those specific callers when
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index d95d9835de42..9333ce776a6e 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -217,7 +217,7 @@ out:
*
* It returns 0 on success, and -ERROR number on failure, -EINVAL if max
* frequency is higher then the minimal, and other errors directly translated
- * from the PCODE Error returs:
+ * from the PCODE Error returns:
* - -ENXIO: "Illegal Command"
* - -ETIMEDOUT: "Timed out"
* - -EINVAL: "Illegal Data"
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 40f7c844ed44..89fd2c043136 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -7,6 +7,7 @@
#include <linux/fault-inject.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_placement.h>
@@ -266,6 +267,15 @@ int xe_pm_init_early(struct xe_device *xe)
}
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
+static u32 vram_threshold_value(struct xe_device *xe)
+{
+ /* FIXME: D3Cold temporarily disabled by default on BMG */
+ if (xe->info.platform == XE_BATTLEMAGE)
+ return 0;
+
+ return DEFAULT_VRAM_THRESHOLD;
+}
+
/**
* xe_pm_init - Initialize Xe Power Management
* @xe: xe device instance
@@ -276,6 +286,7 @@ ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
*/
int xe_pm_init(struct xe_device *xe)
{
+ u32 vram_threshold;
int err;
/* For now suspend/resume is only allowed with GuC */
@@ -289,7 +300,8 @@ int xe_pm_init(struct xe_device *xe)
if (err)
return err;
- err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ vram_threshold = vram_threshold_value(xe);
+ err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
return err;
}
@@ -390,7 +402,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
- * also checks and delets bo entry from user fault list.
+ * also checks and deletes bo entry from user fault list.
*/
mutex_lock(&xe->mem_access.vram_userfault.lock);
list_for_each_entry_safe(bo, on,
@@ -414,8 +426,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_irq_suspend(xe);
- if (xe->d3cold.allowed)
- xe_display_pm_suspend_late(xe);
+ xe_display_pm_runtime_suspend_late(xe);
+
out:
if (err)
xe_display_pm_runtime_resume(xe);
@@ -607,7 +619,8 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
struct device *dev = xe->drm.dev;
return dev->power.runtime_status == RPM_SUSPENDING ||
- dev->power.runtime_status == RPM_RESUMING;
+ dev->power.runtime_status == RPM_RESUMING ||
+ pm_suspend_target_state != PM_SUSPEND_ON;
#else
return false;
#endif
@@ -738,9 +751,6 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
xe->d3cold.allowed = false;
mutex_unlock(&xe->d3cold.lock);
-
- drm_dbg(&xe->drm,
- "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
}
/**
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 797576690356..dc24baa84092 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -28,6 +28,8 @@ struct xe_pt_dir {
struct xe_pt pt;
/** @children: Array of page-table child nodes */
struct xe_ptw *children[XE_PDES];
+ /** @staging: Array of page-table staging nodes */
+ struct xe_ptw *staging[XE_PDES];
};
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
@@ -48,9 +50,10 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
return container_of(pt, struct xe_pt_dir, pt);
}
-static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
+static struct xe_pt *
+xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
{
- return container_of(pt_dir->children[index], struct xe_pt, base);
+ return container_of(pt_dir->staging[index], struct xe_pt, base);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
@@ -125,6 +128,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
}
pt->bo = bo;
pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
+ pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
if (vm->xef)
xe_drm_client_add_bo(vm->xef->client, pt->bo);
@@ -136,6 +140,7 @@ err_kfree:
xe_pt_free(pt);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_pt_create, ERRNO);
/**
* xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
@@ -205,8 +210,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
for (i = 0; i < XE_PDES; i++) {
- if (xe_pt_entry(pt_dir, i))
- xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
+ if (xe_pt_entry_staging(pt_dir, i))
+ xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
deferred);
}
}
@@ -275,7 +280,7 @@ struct xe_pt_stage_bind_walk {
/* Also input, but is updated during the walk*/
/** @curs: The DMA address cursor. */
struct xe_res_cursor *curs;
- /** @va_curs_start: The Virtual address coresponding to @curs->start */
+ /** @va_curs_start: The Virtual address corresponding to @curs->start */
u64 va_curs_start;
/* Output */
@@ -375,8 +380,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
/* Continue building a non-connected subtree. */
struct iosys_map *map = &parent->bo->vmap;
- if (unlikely(xe_child))
+ if (unlikely(xe_child)) {
parent->base.children[offset] = &xe_child->base;
+ parent->base.staging[offset] = &xe_child->base;
+ }
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
parent->num_live++;
@@ -613,6 +620,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.ops = &xe_pt_stage_bind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
.vm = xe_vma_vm(vma),
.tile = tile,
@@ -872,7 +880,7 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
}
}
-static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
@@ -884,6 +892,16 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
xe_vm_assert_held(vm);
}
+static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+{
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_pt_commit_prepare_locks_assert(vma);
+
+ if (xe_vma_is_userptr(vma))
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+}
+
static void xe_pt_commit(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries, struct llist_head *deferred)
@@ -894,13 +912,17 @@ static void xe_pt_commit(struct xe_vma *vma,
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
if (!pt->level)
continue;
+ pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+ int j_ = j + entries[i].ofs;
+ pt_dir->children[j_] = pt_dir->staging[j_];
xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
}
}
@@ -912,7 +934,7 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_pt *pt = entries[i].pt;
@@ -927,10 +949,10 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
u32 j_ = j + entries[i].ofs;
- struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+ struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
- pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+ pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
}
}
@@ -942,7 +964,7 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
{
u32 i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
@@ -960,10 +982,10 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
struct xe_pt *oldpte = NULL;
- if (xe_pt_entry(pt_dir, j_))
- oldpte = xe_pt_entry(pt_dir, j_);
+ if (xe_pt_entry_staging(pt_dir, j_))
+ oldpte = xe_pt_entry_staging(pt_dir, j_);
- pt_dir->children[j_] = &newpte->base;
+ pt_dir->staging[j_] = &newpte->base;
entries[i].pt_entries[j].pt = oldpte;
}
}
@@ -1212,42 +1234,22 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
return 0;
uvma = to_userptr_vma(vma);
- notifier_seq = uvma->userptr.notifier_seq;
+ if (xe_pt_userptr_inject_eagain(uvma))
+ xe_vma_userptr_force_invalidate(uvma);
- if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
- return 0;
+ notifier_seq = uvma->userptr.notifier_seq;
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- notifier_seq) &&
- !xe_pt_userptr_inject_eagain(uvma))
+ notifier_seq))
return 0;
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm))
return -EAGAIN;
- } else {
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
-
- if (xe_vm_in_preempt_fence_mode(vm)) {
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- long err;
-
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
- err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
- XE_WARN_ON(err <= 0);
- }
- }
+ /*
+ * Just continue the operation since exec or rebind worker
+ * will take care of rebinding.
+ */
return 0;
}
@@ -1513,6 +1515,7 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
.ops = &xe_pt_stage_unbind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
.tile = tile,
.modified_start = xe_vma_start(vma),
@@ -1554,7 +1557,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1567,7 +1570,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
continue;
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
- pt_dir->children[j] =
+ pt_dir->staging[j] =
entries[i].pt_entries[j - entry->ofs].pt ?
&entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
}
@@ -1580,7 +1583,7 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; ++i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1594,8 +1597,8 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
entry->pt_entries[j - entry->ofs].pt =
- xe_pt_entry(pt_dir, j);
- pt_dir->children[j] = NULL;
+ xe_pt_entry_staging(pt_dir, j);
+ pt_dir->staging[j] = NULL;
}
}
}
@@ -1850,6 +1853,7 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2130,6 +2134,7 @@ kill_vm_tile1:
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_pt_update_ops_run, ERRNO);
/**
* xe_pt_update_ops_fini() - Finish PT update operations
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
index b8b3d2aea492..be602a763ff3 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.c
+++ b/drivers/gpu/drm/xe/xe_pt_walk.c
@@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
u64 addr, u64 end, struct xe_pt_walk *walk)
{
pgoff_t offset = xe_pt_offset(addr, level, walk);
- struct xe_ptw **entries = parent->children ? parent->children : NULL;
+ struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) :
+ (parent->children ?: NULL);
const struct xe_pt_walk_ops *ops = walk->ops;
enum page_walk_action action;
struct xe_ptw *child;
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
index 5ecc4d2f0f65..5c02c244f7de 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.h
+++ b/drivers/gpu/drm/xe/xe_pt_walk.h
@@ -11,12 +11,14 @@
/**
* struct xe_ptw - base class for driver pagetable subclassing.
* @children: Pointer to an array of children if any.
+ * @staging: Pointer to an array of staging if any.
*
* Drivers could subclass this, and if it's a page-directory, typically
* embed an array of xe_ptw pointers.
*/
struct xe_ptw {
struct xe_ptw **children;
+ struct xe_ptw **staging;
};
/**
@@ -41,6 +43,8 @@ struct xe_pt_walk {
* as shared pagetables.
*/
bool shared_pt_mode;
+ /** @staging: Walk staging PT structure */
+ bool staging;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 170ae72d1a7b..c059639613f7 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -23,6 +23,7 @@
#include "xe_guc_hwconfig.h"
#include "xe_macros.h"
#include "xe_mmio.h"
+#include "xe_oa.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
@@ -670,7 +671,9 @@ static int query_oa_units(struct xe_device *xe,
du->oa_unit_id = u->oa_unit_id;
du->oa_unit_type = u->type;
du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
- du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS;
+ du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
+ DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
+ DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
j = 0;
for_each_hw_engine(hwe, gt, hwe_id) {
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index c13123008e90..9475e3f74958 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -24,7 +24,6 @@
#include "xe_hw_engine_types.h"
#include "xe_macros.h"
#include "xe_mmio.h"
-#include "xe_reg_whitelist.h"
#include "xe_rtp_types.h"
static void reg_sr_fini(struct drm_device *drm, void *arg)
@@ -192,58 +191,6 @@ err_force_wake:
xe_gt_err(gt, "Failed to apply, err=-ETIMEDOUT\n");
}
-void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe)
-{
- struct xe_reg_sr *sr = &hwe->reg_whitelist;
- struct xe_gt *gt = hwe->gt;
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_reg_sr_entry *entry;
- struct drm_printer p;
- u32 mmio_base = hwe->mmio_base;
- unsigned long reg;
- unsigned int slot = 0;
- unsigned int fw_ref;
-
- if (xa_empty(&sr->xa))
- return;
-
- drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name);
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
- goto err_force_wake;
-
- p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
- xa_for_each(&sr->xa, reg, entry) {
- if (slot == RING_MAX_NONPRIV_SLOTS) {
- xe_gt_err(gt,
- "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
- hwe->name, RING_MAX_NONPRIV_SLOTS);
- break;
- }
-
- xe_reg_whitelist_print_entry(&p, 0, reg, entry);
- xe_mmio_write32(&gt->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot),
- reg | entry->set_bits);
- slot++;
- }
-
- /* And clear the rest just in case of garbage */
- for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) {
- u32 addr = RING_NOPID(mmio_base).addr;
-
- xe_mmio_write32(&gt->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr);
- }
-
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
- return;
-
-err_force_wake:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- drm_err(&xe->drm, "Failed to apply, err=-ETIMEDOUT\n");
-}
-
/**
* xe_reg_sr_dump - print all save/restore entries
* @sr: Save/restore entries
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index 3996934974fa..edab5d4e3ba5 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -10,7 +10,9 @@
#include "regs/xe_oa_regs.h"
#include "regs/xe_regs.h"
#include "xe_gt_types.h"
+#include "xe_gt_printk.h"
#include "xe_platform_types.h"
+#include "xe_reg_sr.h"
#include "xe_rtp.h"
#include "xe_step.h"
@@ -89,6 +91,40 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
{}
};
+static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
+{
+ struct xe_reg_sr *sr = &hwe->reg_whitelist;
+ struct xe_reg_sr_entry *entry;
+ struct drm_printer p;
+ unsigned long reg;
+ unsigned int slot;
+
+ xe_gt_dbg(hwe->gt, "Add %s whitelist to engine\n", sr->name);
+ p = xe_gt_dbg_printer(hwe->gt);
+
+ slot = 0;
+ xa_for_each(&sr->xa, reg, entry) {
+ struct xe_reg_sr_entry hwe_entry = {
+ .reg = RING_FORCE_TO_NONPRIV(hwe->mmio_base, slot),
+ .set_bits = entry->reg.addr | entry->set_bits,
+ .clr_bits = ~0u,
+ .read_mask = entry->read_mask,
+ };
+
+ if (slot == RING_MAX_NONPRIV_SLOTS) {
+ xe_gt_err(hwe->gt,
+ "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
+ hwe->name, RING_MAX_NONPRIV_SLOTS);
+ break;
+ }
+
+ xe_reg_whitelist_print_entry(&p, 0, reg, entry);
+ xe_reg_sr_add(&hwe->reg_sr, &hwe_entry, hwe->gt);
+
+ slot++;
+ }
+}
+
/**
* xe_reg_whitelist_process_engine - process table of registers to whitelist
* @hwe: engine instance to process whitelist for
@@ -102,6 +138,7 @@ void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist);
+ whitelist_apply_to_hwe(hwe);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index b13d4d62f0b1..7a1c78fdfc92 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -340,3 +340,8 @@ bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
return dss >= dss_per_gslice;
}
+bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return !IS_SRIOV_VF(gt_to_xe(gt));
+}
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 827d932b6908..38b9f13bba5e 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -131,7 +131,7 @@ struct xe_reg_sr;
* @ver_end__: Last graphics IP version to match
*
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
- * inclusive on boths sides
+ * inclusive on both sides
*
* Refer to XE_RTP_RULES() for expected usage.
*/
@@ -169,7 +169,7 @@ struct xe_reg_sr;
* @ver_end__: Last media IP version to match
*
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
- * inclusive on boths sides
+ * inclusive on both sides
*
* Refer to XE_RTP_RULES() for expected usage.
*/
@@ -476,4 +476,15 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
+/*
+ * xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
+ *
+ * @gt: GT structure
+ * @hwe: Engine instance
+ *
+ * Returns: true if device is not VF, false otherwise.
+ */
+bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index ef10782af656..04e2f539ccd9 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -14,6 +14,7 @@
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_vf.h"
/**
* xe_sriov_mode_to_string - Convert enum value to string.
@@ -114,6 +115,9 @@ int xe_sriov_init(struct xe_device *xe)
return err;
}
+ if (IS_SRIOV_VF(xe))
+ xe_sriov_vf_init_early(xe);
+
xe_assert(xe, !xe->sriov.wq);
xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
if (!xe->sriov.wq)
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
index 7d156ba82479..dd1df950b021 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
@@ -20,7 +20,7 @@
* is within a range of supported VF numbers (up to maximum number of VFs that
* driver can support, including VF0 that represents the PF itself).
*
- * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ * Note: Effective only on debug builds. See `Xe Asserts`_ for more information.
*/
#define xe_sriov_pf_assert_vfid(xe, vfid) \
xe_assert((xe), (vfid) <= xe_sriov_pf_get_totalvfs(xe))
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index c7b7ad4af5c8..ca94382a721e 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -9,6 +9,7 @@
#include <linux/build_bug.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#include <linux/workqueue_types.h>
/**
* VFID - Virtual Function Identifier
@@ -56,4 +57,20 @@ struct xe_device_pf {
struct mutex master_lock;
};
+/**
+ * struct xe_device_vf - Xe Virtual Function related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_VF mode.
+ */
+struct xe_device_vf {
+ /** @migration: VF Migration state data */
+ struct {
+ /** @migration.worker: VF migration recovery worker */
+ struct work_struct worker;
+ /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
+ unsigned long gt_flags;
+ } migration;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
new file mode 100644
index 000000000000..c1275e64aa9c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+#include "xe_sriov_vf.h"
+
+/**
+ * DOC: VF restore procedure in PF KMD and VF KMD
+ *
+ * Restoring previously saved state of a VF is one of core features of
+ * SR-IOV. All major VM Management applications allow saving and restoring
+ * the VM state, and doing that to a VM which uses SRIOV VF as one of
+ * the accessible devices requires support from KMD on both PF and VF side.
+ * VMM initiates all required operations through VFIO module, which then
+ * translates them into PF KMD calls. This description will focus on these
+ * calls, leaving out the module which initiates these steps (VFIO).
+ *
+ * In order to start the restore procedure, GuC needs to keep the VF in
+ * proper state. The PF driver can ensure GuC set it to VF_READY state
+ * by provisioning the VF, which in turn can be done after Function Level
+ * Reset of said VF (or after it was freshly created - in that case FLR
+ * is not needed). The FLR procedure ends with GuC sending message
+ * `GUC_PF_NOTIFY_VF_FLR_DONE`, and then provisioning data is sent to GuC.
+ * After the provisioning is completed, the VF needs to be paused, and
+ * at that point the actual restore can begin.
+ *
+ * During VF Restore, state of several resources is restored. These may
+ * include local memory content (system memory is restored by VMM itself),
+ * values of MMIO registers, stateless compression metadata and others.
+ * The final resource which also needs restoring is state of the VF
+ * submission maintained within GuC. For that, `GUC_PF_OPCODE_VF_RESTORE`
+ * message is used, with reference to the state blob to be consumed by
+ * GuC.
+ *
+ * Next, when VFIO is asked to set the VM into running state, the PF driver
+ * sends `GUC_PF_TRIGGER_VF_RESUME` to GuC. When sent after restore, this
+ * changes VF state within GuC to `VF_RESFIX_BLOCKED` rather than the
+ * usual `VF_RUNNING`. At this point GuC triggers an interrupt to inform
+ * the VF KMD within the VM that it was migrated.
+ *
+ * As soon as Virtual GPU of the VM starts, the VF driver within receives
+ * the MIGRATED interrupt and schedules post-migration recovery worker.
+ * That worker queries GuC for new provisioning (using MMIO communication),
+ * and applies fixups to any non-virtualized resources used by the VF.
+ *
+ * When the VF driver is ready to continue operation on the newly connected
+ * hardware, it sends `VF2GUC_NOTIFY_RESFIX_DONE` which causes it to
+ * enter the long awaited `VF_RUNNING` state, and therefore start handling
+ * CTB messages and scheduling workloads from the VF::
+ *
+ * PF GuC VF
+ * [ ] | |
+ * [ ] PF2GUC_VF_CONTROL(pause) | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_READY_PAUSED |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] success [ ] |
+ * [ ] <---------------------------[ ] |
+ * [ ] | |
+ * [ ] PF loads resources from the | |
+ * [ ]------- saved image supplied | |
+ * [ ] | | |
+ * [ ] <----- | |
+ * [ ] | |
+ * [ ] GUC_PF_OPCODE_VF_RESTORE | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC loads contexts and CTB |
+ * [ ] [ ]------- state from image |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_RESFIX_PAUSED |
+ * [ ] [ ] | |
+ * [ ] success [ ] <----- |
+ * [ ] <---------------------------[ ] |
+ * [ ] | |
+ * [ ] GUC_PF_TRIGGER_VF_RESUME | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_RESFIX_BLOCKED |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] [ ] |
+ * [ ] [ ] GUC_INTR_SW_INT_0 |
+ * [ ] success [ ]---------------------------> [ ]
+ * [ ] <---------------------------[ ] [ ]
+ * | | VF2GUC_QUERY_SINGLE_KLV [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] [ ]
+ * | [ ] new VF provisioning [ ]
+ * | [ ]---------------------------> [ ]
+ * | | [ ]
+ * | | VF driver applies post [ ]
+ * | | migration fixups -------[ ]
+ * | | | [ ]
+ * | | -----> [ ]
+ * | | [ ]
+ * | | VF2GUC_NOTIFY_RESFIX_DONE [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] [ ]
+ * | [ ] GuC sets new VF state to [ ]
+ * | [ ]------- VF_RUNNING [ ]
+ * | [ ] | [ ]
+ * | [ ] <----- [ ]
+ * | [ ] success [ ]
+ * | [ ]---------------------------> [ ]
+ * | | |
+ * | | |
+ */
+
+static void migration_worker_func(struct work_struct *w);
+
+/**
+ * xe_sriov_vf_init_early - Initialize SR-IOV VF specific data.
+ * @xe: the &xe_device to initialize
+ */
+void xe_sriov_vf_init_early(struct xe_device *xe)
+{
+ INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+}
+
+/**
+ * vf_post_migration_requery_guc - Re-query GuC for current VF provisioning.
+ * @xe: the &xe_device struct instance
+ *
+ * After migration, we need to re-query all VF configuration to make sure
+ * they match previous provisioning. Note that most of VF provisioning
+ * shall be the same, except GGTT range, since GGTT is not virtualized per-VF.
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+static int vf_post_migration_requery_guc(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err, ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_vf_query_config(gt);
+ ret = ret ?: err;
+ }
+
+ return ret;
+}
+
+/*
+ * vf_post_migration_imminent - Check if post-restore recovery is coming.
+ * @xe: the &xe_device struct instance
+ *
+ * Return: True if migration recovery worker will soon be running. Any worker currently
+ * executing does not affect the result.
+ */
+static bool vf_post_migration_imminent(struct xe_device *xe)
+{
+ return xe->sriov.vf.migration.gt_flags != 0 ||
+ work_pending(&xe->sriov.vf.migration.worker);
+}
+
+/*
+ * Notify all GuCs about resource fixups apply finished.
+ */
+static void vf_post_migration_notify_resfix_done(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, xe, id) {
+ if (vf_post_migration_imminent(xe))
+ goto skip;
+ xe_gt_sriov_vf_notify_resfix_done(gt);
+ }
+ return;
+
+skip:
+ drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n");
+}
+
+static void vf_post_migration_recovery(struct xe_device *xe)
+{
+ int err;
+
+ drm_dbg(&xe->drm, "migration recovery in progress\n");
+ xe_pm_runtime_get(xe);
+ err = vf_post_migration_requery_guc(xe);
+ if (vf_post_migration_imminent(xe))
+ goto defer;
+ if (unlikely(err))
+ goto fail;
+
+ /* FIXME: add the recovery steps */
+ vf_post_migration_notify_resfix_done(xe);
+ xe_pm_runtime_put(xe);
+ drm_notice(&xe->drm, "migration recovery ended\n");
+ return;
+defer:
+ xe_pm_runtime_put(xe);
+ drm_dbg(&xe->drm, "migration recovery deferred\n");
+ return;
+fail:
+ xe_pm_runtime_put(xe);
+ drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
+ xe_device_declare_wedged(xe);
+}
+
+static void migration_worker_func(struct work_struct *w)
+{
+ struct xe_device *xe = container_of(w, struct xe_device,
+ sriov.vf.migration.worker);
+
+ vf_post_migration_recovery(xe);
+}
+
+static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, xe, id) {
+ if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) {
+ xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * xe_sriov_vf_start_migration_recovery - Start VF migration recovery.
+ * @xe: the &xe_device to start recovery on
+ *
+ * This function shall be called only by VF.
+ */
+void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
+{
+ bool started;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ if (!vf_ready_to_recovery_on_all_gts(xe))
+ return;
+
+ WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0);
+ /* Ensure other threads see that no flags are set now. */
+ smp_mb();
+
+ started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
+ drm_info(&xe->drm, "VF migration recovery %s\n", started ?
+ "scheduled" : "already in progress");
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
new file mode 100644
index 000000000000..7b8622cff2b7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_H_
+#define _XE_SRIOV_VF_H_
+
+struct xe_device;
+
+void xe_sriov_vf_init_early(struct xe_device *xe);
+void xe_sriov_vf_start_migration_recovery(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 91130ad8999c..d5281de04d54 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -211,6 +211,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__string(dev, __dev_name_eq(job->q))
__field(u32, seqno)
__field(u32, lrc_seqno)
+ __field(u8, gt_id)
__field(u16, guc_id)
__field(u32, guc_state)
__field(u32, flags)
@@ -223,6 +224,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__assign_str(dev);
__entry->seqno = xe_sched_job_seqno(job);
__entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
+ __entry->gt_id = job->q->gt->info.id;
__entry->guc_id = job->q->guc->id;
__entry->guc_state =
atomic_read(&job->q->guc->state);
@@ -232,9 +234,9 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__entry->batch_addr = (u64)job->ptrs[0].batch_addr;
),
- TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
+ TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, gt=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
__get_str(dev), __entry->fence, __entry->seqno,
- __entry->lrc_seqno, __entry->guc_id,
+ __entry->lrc_seqno, __entry->gt_id, __entry->guc_id,
__entry->batch_addr, __entry->guc_state,
__entry->flags, __entry->error)
);
@@ -282,6 +284,7 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
__string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
__field(u32, opcode)
__field(u16, guc_id)
+ __field(u8, gt_id)
),
TP_fast_assign(
@@ -289,9 +292,11 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
__entry->opcode = msg->opcode;
__entry->guc_id =
((struct xe_exec_queue *)msg->private_data)->guc->id;
+ __entry->gt_id =
+ ((struct xe_exec_queue *)msg->private_data)->gt->info.id;
),
- TP_printk("dev=%s, guc_id=%d, opcode=%u", __get_str(dev), __entry->guc_id,
+ TP_printk("dev=%s, gt=%u guc_id=%d, opcode=%u", __get_str(dev), __entry->gt_id, __entry->guc_id,
__entry->opcode)
);
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index 30a3cfbaaa09..ea50fee50c7d 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -48,6 +48,11 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
+DEFINE_EVENT(xe_bo, xe_bo_validate,
+ TP_PROTO(struct xe_bo *bo),
+ TP_ARGS(bo)
+);
+
TRACE_EVENT(xe_bo_move,
TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
bool move_lacks_source),
@@ -55,8 +60,8 @@ TRACE_EVENT(xe_bo_move,
TP_STRUCT__entry(
__field(struct xe_bo *, bo)
__field(size_t, size)
- __field(u32, new_placement)
- __field(u32, old_placement)
+ __string(new_placement_name, xe_mem_type_to_name[new_placement])
+ __string(old_placement_name, xe_mem_type_to_name[old_placement])
__string(device_id, __dev_name_bo(bo))
__field(bool, move_lacks_source)
),
@@ -64,15 +69,15 @@ TRACE_EVENT(xe_bo_move,
TP_fast_assign(
__entry->bo = bo;
__entry->size = bo->size;
- __entry->new_placement = new_placement;
- __entry->old_placement = old_placement;
+ __assign_str(new_placement_name);
+ __assign_str(old_placement_name);
__assign_str(device_id);
__entry->move_lacks_source = move_lacks_source;
),
TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
__entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
- xe_mem_type_to_name[__entry->old_placement],
- xe_mem_type_to_name[__entry->new_placement], __get_str(device_id))
+ __get_str(old_placement_name),
+ __get_str(new_placement_name), __get_str(device_id))
);
DECLARE_EVENT_CLASS(xe_vma,
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.c b/drivers/gpu/drm/xe/xe_trace_lrc.c
new file mode 100644
index 000000000000..ab9b7e2970bc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "xe_trace_lrc.h"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.h b/drivers/gpu/drm/xe/xe_trace_lrc.h
new file mode 100644
index 000000000000..5c669a0b2180
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xe
+
+#if !defined(_XE_TRACE_LRC_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _XE_TRACE_LRC_H_
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+#include "xe_gt_types.h"
+#include "xe_lrc.h"
+#include "xe_lrc_types.h"
+
+#define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev)
+
+TRACE_EVENT(xe_lrc_update_timestamp,
+ TP_PROTO(struct xe_lrc *lrc, uint32_t old),
+ TP_ARGS(lrc, old),
+ TP_STRUCT__entry(
+ __field(struct xe_lrc *, lrc)
+ __field(u32, old)
+ __field(u32, new)
+ __string(name, lrc->fence_ctx.name)
+ __string(device_id, __dev_name_lrc(lrc))
+ ),
+
+ TP_fast_assign(
+ __entry->lrc = lrc;
+ __entry->old = old;
+ __entry->new = lrc->ctx_timestamp;
+ __assign_str(name);
+ __assign_str(device_id);
+ ),
+ TP_printk("lrc=:%p lrc->name=%s old=%u new=%u device_id:%s",
+ __entry->lrc, __get_str(name),
+ __entry->old, __entry->new,
+ __get_str(device_id))
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
+#define TRACE_INCLUDE_FILE xe_trace_lrc
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index 423856cc18d4..d414421f8c13 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -57,12 +57,35 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
}
+static u32 get_wopcm_size(struct xe_device *xe)
+{
+ u32 wopcm_size;
+ u64 val;
+
+ val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
+ val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
+
+ switch (val) {
+ case 0x5 ... 0x6:
+ val--;
+ fallthrough;
+ case 0x0 ... 0x3:
+ wopcm_size = (1U << val) * SZ_1M;
+ break;
+ default:
+ WARN(1, "Missing case wopcm_size=%llx\n", val);
+ wopcm_size = 0;
+ }
+
+ return wopcm_size;
+}
+
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- u64 stolen_size;
+ u64 stolen_size, wopcm_size;
u64 tile_offset;
u64 tile_size;
@@ -74,7 +97,13 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base))
return 0;
+ /* Carve out the top of DSM as it contains the reserved WOPCM region */
+ wopcm_size = get_wopcm_size(xe);
+ if (drm_WARN_ON(&xe->drm, !wopcm_size))
+ return 0;
+
stolen_size = tile_size - mgr->stolen_base;
+ stolen_size -= wopcm_size;
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
@@ -89,29 +118,6 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
return ALIGN_DOWN(stolen_size, SZ_1M);
}
-static u32 get_wopcm_size(struct xe_device *xe)
-{
- u32 wopcm_size;
- u64 val;
-
- val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
- val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
-
- switch (val) {
- case 0x5 ... 0x6:
- val--;
- fallthrough;
- case 0x0 ... 0x3:
- wopcm_size = (1U << val) * SZ_1M;
- break;
- default:
- WARN(1, "Missing case wopcm_size=%llx\n", val);
- wopcm_size = 0;
- }
-
- return wopcm_size;
-}
-
static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 423b261ea743..f4a16e5fa770 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_managed.h>
+#include <drm/drm_drv.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
@@ -52,7 +53,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
struct xe_ttm_vram_mgr_resource *vres;
struct drm_buddy *mm = &mgr->mm;
- u64 size, remaining_size, min_page_size;
+ u64 size, min_page_size;
unsigned long lpfn;
int err;
@@ -98,17 +99,6 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
goto error_fini;
}
- if (WARN_ON(min_page_size > SZ_2G)) { /* FIXME: sg limit */
- err = -EINVAL;
- goto error_fini;
- }
-
- if (WARN_ON((size > SZ_2G &&
- (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS)))) {
- err = -EINVAL;
- goto error_fini;
- }
-
if (WARN_ON(!IS_ALIGNED(size, min_page_size))) {
err = -EINVAL;
goto error_fini;
@@ -116,12 +106,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
mutex_lock(&mgr->lock);
if (lpfn <= mgr->visible_size >> PAGE_SHIFT && size > mgr->visible_avail) {
- mutex_unlock(&mgr->lock);
err = -ENOSPC;
- goto error_fini;
+ goto error_unlock;
}
- if (place->fpfn + (size >> PAGE_SHIFT) != place->lpfn &&
+ if (place->fpfn + (size >> PAGE_SHIFT) != lpfn &&
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
size = roundup_pow_of_two(size);
min_page_size = size;
@@ -129,25 +118,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn);
}
- remaining_size = size;
- do {
- /*
- * Limit maximum size to 2GiB due to SG table limitations.
- * FIXME: Should maybe be handled as part of sg construction.
- */
- u64 alloc_size = min_t(u64, remaining_size, SZ_2G);
-
- err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
- (u64)lpfn << PAGE_SHIFT,
- alloc_size,
- min_page_size,
- &vres->blocks,
- vres->flags);
- if (err)
- goto error_free_blocks;
-
- remaining_size -= alloc_size;
- } while (remaining_size);
+ err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
+ (u64)lpfn << PAGE_SHIFT, size,
+ min_page_size, &vres->blocks, vres->flags);
+ if (err)
+ goto error_unlock;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks))
@@ -194,9 +169,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
*res = &vres->base;
return 0;
-
-error_free_blocks:
- drm_buddy_free_list(mm, &vres->blocks, 0);
+error_unlock:
mutex_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &vres->base);
@@ -339,6 +312,13 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
struct ttm_resource_manager *man = &mgr->manager;
int err;
+ if (mem_type != XE_PL_STOLEN) {
+ const char *name = mem_type == XE_PL_VRAM0 ? "vram0" : "vram1";
+ man->cg = drmm_cgroup_register_region(&xe->drm, name, size);
+ if (IS_ERR(man->cg))
+ return PTR_ERR(man->cg);
+ }
+
man->func = &xe_ttm_vram_mgr_func;
mgr->mem_type = mem_type;
mutex_init(&mgr->lock);
@@ -393,7 +373,8 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
xe_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
- xe_res_next(&cursor, cursor.size);
+ /* Limit maximum size to 2GiB due to SG table limitations. */
+ xe_res_next(&cursor, min_t(u64, cursor.size, SZ_2G));
}
r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
@@ -413,7 +394,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
xe_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
- size_t size = cursor.size;
+ size_t size = min_t(u64, cursor.size, SZ_2G);
dma_addr_t addr;
addr = dma_map_resource(dev, phys, size, dir,
@@ -426,7 +407,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
- xe_res_next(&cursor, cursor.size);
+ xe_res_next(&cursor, size);
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index 0d8caa0e7354..ad3b35a0e6eb 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -92,7 +92,7 @@ struct xe_uc_fw {
const enum xe_uc_fw_status status;
/**
* @__status: private firmware load status - only to be used
- * by firmware laoding code
+ * by firmware loading code
*/
enum xe_uc_fw_status __status;
};
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index c99380271de6..5956631c0d40 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -10,7 +10,6 @@
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <uapi/drm/xe_drm.h>
#include <linux/ascii85.h>
@@ -580,51 +579,26 @@ out_unlock_outer:
trace_xe_vm_rebind_worker_exit(vm);
}
-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
+static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
{
- struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
- struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
+ struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
struct dma_resv_iter cursor;
struct dma_fence *fence;
long err;
- xe_assert(vm->xe, xe_vma_is_userptr(vma));
- trace_xe_vma_userptr_invalidate(vma);
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "NOTIFIER: addr=0x%016llx, range=0x%016llx",
- xe_vma_start(vma), xe_vma_size(vma));
-
- down_write(&vm->userptr.notifier_lock);
- mmu_interval_set_seq(mni, cur_seq);
-
- /* No need to stop gpu access if the userptr is not yet bound. */
- if (!userptr->initial_bind) {
- up_write(&vm->userptr.notifier_lock);
- return true;
- }
-
/*
* Tell exec and rebind worker they need to repin and rebind this
* userptr.
*/
if (!xe_vm_in_fault_mode(vm) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
spin_lock(&vm->userptr.invalidated_lock);
list_move_tail(&userptr->invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
}
- up_write(&vm->userptr.notifier_lock);
-
/*
* Preempt fences turn into schedule disables, pipeline these.
* Note that even in fault mode, we need to wait for binds and
@@ -642,11 +616,37 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
err = xe_vm_invalidate_vma(vma);
XE_WARN_ON(err);
}
+ xe_hmm_userptr_unmap(uvma);
+}
+
+static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
+ trace_xe_vma_userptr_invalidate(vma);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
+ down_write(&vm->userptr.notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ __vma_userptr_invalidate(vm, uvma);
+ up_write(&vm->userptr.notifier_lock);
trace_xe_vma_userptr_invalidate_complete(vma);
return true;
@@ -656,6 +656,34 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
.invalidate = vma_userptr_invalidate,
};
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+/**
+ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
+ * @uvma: The userptr vma to invalidate
+ *
+ * Perform a forced userptr invalidation for testing purposes.
+ */
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ /* Protect against concurrent userptr pinning */
+ lockdep_assert_held(&vm->lock);
+ /* Protect against concurrent notifiers */
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+ /*
+ * Protect against concurrent instances of this function and
+ * the critical exec sections
+ */
+ xe_vm_assert_held(vm);
+
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ uvma->userptr.notifier_seq))
+ uvma->userptr.notifier_seq -= 2;
+ __vma_userptr_invalidate(vm, uvma);
+}
+#endif
+
int xe_vm_userptr_pin(struct xe_vm *vm)
{
struct xe_userptr_vma *uvma, *next;
@@ -667,20 +695,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
/* Collect invalidated userptrs */
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
list_del_init(&uvma->userptr.invalidate_link);
- list_move_tail(&uvma->userptr.repin_link,
- &vm->userptr.repin_list);
+ list_add_tail(&uvma->userptr.repin_link,
+ &vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
- /* Pin and move to temporary list */
+ /* Pin and move to bind list */
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
if (err == -EFAULT) {
list_del_init(&uvma->userptr.repin_link);
+ /*
+ * We might have already done the pin once already, but
+ * then had to retry before the re-bind happened, due
+ * some other condition in the caller, but in the
+ * meantime the userptr got dinged by the notifier such
+ * that we need to revalidate here, but this time we hit
+ * the EFAULT. In such a case make sure we remove
+ * ourselves from the rebind list to avoid going down in
+ * flames.
+ */
+ if (!list_empty(&uvma->vma.combined_links.rebind))
+ list_del_init(&uvma->vma.combined_links.rebind);
/* Wait for pending binds */
xe_vm_lock(vm, false);
@@ -691,10 +732,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
err = xe_vm_invalidate_vma(&uvma->vma);
xe_vm_unlock(vm);
if (err)
- return err;
+ break;
} else {
- if (err < 0)
- return err;
+ if (err)
+ break;
list_del_init(&uvma->userptr.repin_link);
list_move_tail(&uvma->vma.combined_links.rebind,
@@ -702,7 +743,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
}
}
- return 0;
+ if (err) {
+ down_write(&vm->userptr.notifier_lock);
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+ up_write(&vm->userptr.notifier_lock);
+ }
+ return err;
}
/**
@@ -733,13 +786,14 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
vops->pt_update_ops[i].ops =
kmalloc_array(vops->pt_update_ops[i].num_ops,
sizeof(*vops->pt_update_ops[i].ops),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!vops->pt_update_ops[i].ops)
return array_of_binds ? -ENOBUFS : -ENOMEM;
}
return 0;
}
+ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
static void xe_vma_ops_fini(struct xe_vma_ops *vops)
{
@@ -987,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
INIT_LIST_HEAD(&userptr->invalidate_link);
INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
+ mutex_init(&userptr->unmap_mutex);
err = mmu_interval_notifier_insert(&userptr->notifier,
current->mm,
@@ -1024,10 +1079,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
/*
* Since userptr pages are not pinned, we can't remove
- * the notifer until we're sure the GPU is not accessing
+ * the notifier until we're sure the GPU is not accessing
* them anymore
*/
mmu_interval_notifier_remove(&userptr->notifier);
+ mutex_destroy(&userptr->unmap_mutex);
xe_vm_put(vm);
} else if (xe_vma_is_null(vma)) {
xe_vm_put(vm);
@@ -1066,6 +1122,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
} else if (!xe_vma_is_null(vma)) {
@@ -1352,6 +1409,7 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
return 0;
}
+ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
static void xe_vm_free_scratch(struct xe_vm *vm)
{
@@ -1751,9 +1809,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->extensions))
- return -EINVAL;
-
if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE;
if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
@@ -1978,6 +2033,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
return ops;
}
+ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
u16 pat_index, unsigned int flags)
@@ -2105,7 +2161,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
}
}
- /* Adjust for partial unbind after removin VMA from VM */
+ /* Adjust for partial unbind after removing VMA from VM */
if (!err) {
op->base.remap.unmap->va->va.addr = op->remap.start;
op->base.remap.unmap->va->va.range = op->remap.range;
@@ -2258,8 +2314,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
break;
}
case DRM_GPUVA_OP_UNMAP:
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ break;
case DRM_GPUVA_OP_PREFETCH:
- /* FIXME: Need to skip some prefetch ops */
+ vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (err)
+ return err;
+ }
+
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
@@ -2357,13 +2422,15 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
bool validate)
{
struct xe_bo *bo = xe_vma_bo(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
int err = 0;
if (bo) {
if (!bo->vm)
err = drm_exec_lock_obj(exec, &bo->ttm.base);
if (!err && validate)
- err = xe_bo_validate(bo, xe_vma_vm(vma), true);
+ err = xe_bo_validate(bo, vm,
+ !xe_vm_in_preempt_fence_mode(vm));
}
return err;
@@ -2697,6 +2764,7 @@ unlock:
drm_exec_fini(&exec);
return err;
}
+ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
#define SUPPORTED_FLAGS_STUB \
(DRM_XE_VM_BIND_FLAG_READONLY | \
@@ -2733,7 +2801,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
*bind_ops = kvmalloc_array(args->num_binds,
sizeof(struct drm_xe_vm_bind_op),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!*bind_ops)
return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
@@ -2973,14 +3042,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (args->num_binds) {
bos = kvcalloc(args->num_binds, sizeof(*bos),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!bos) {
err = -ENOMEM;
goto release_vm_lock;
}
ops = kvcalloc(args->num_binds, sizeof(*ops),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!ops) {
err = -ENOMEM;
goto release_vm_lock;
@@ -3303,7 +3374,6 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
for (int i = 0; i < snap->num_snaps; i++) {
struct xe_bo *bo = snap->snap[i].bo;
- struct iosys_map src;
int err;
if (IS_ERR(snap->snap[i].data))
@@ -3316,16 +3386,8 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
}
if (bo) {
- xe_bo_lock(bo, false);
- err = ttm_bo_vmap(&bo->ttm, &src);
- if (!err) {
- xe_map_memcpy_from(xe_bo_device(bo),
- snap->snap[i].data,
- &src, snap->snap[i].bo_ofs,
- snap->snap[i].len);
- ttm_bo_vunmap(&bo->ttm, &src);
- }
- xe_bo_unlock(bo);
+ err = xe_bo_read(bo, snap->snap[i].bo_ofs,
+ snap->snap[i].data, snap->snap[i].len);
} else {
void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index c864dba35e1d..b882bfb31bd0 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -17,7 +17,6 @@ struct drm_printer;
struct drm_file;
struct ttm_buffer_object;
-struct ttm_validate_buffer;
struct xe_exec_queue;
struct xe_file;
@@ -275,9 +274,17 @@ static inline void vm_dbg(const struct drm_device *dev,
const char *format, ...)
{ /* noop */ }
#endif
-#endif
struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h
index 4d33f310b653..078786958403 100644
--- a/drivers/gpu/drm/xe/xe_vm_doc.h
+++ b/drivers/gpu/drm/xe/xe_vm_doc.h
@@ -64,8 +64,8 @@
* update page level 2 PDE[1] to page level 3b phys address (GPU)
*
* bind BO2 0x1ff000-0x201000
- * update page level 3a PTE[511] to BO2 phys addres (GPU)
- * update page level 3b PTE[0] to BO2 phys addres + 0x1000 (GPU)
+ * update page level 3a PTE[511] to BO2 phys address (GPU)
+ * update page level 3b PTE[0] to BO2 phys address + 0x1000 (GPU)
*
* GPU bypass
* ~~~~~~~~~~
@@ -192,7 +192,7 @@
*
* If a VM is in fault mode (TODO: link to fault mode), new bind operations that
* create mappings are by default deferred to the page fault handler (first
- * use). This behavior can be overriden by setting the flag
+ * use). This behavior can be overridden by setting the flag
* DRM_XE_VM_BIND_FLAG_IMMEDIATE which indicates to creating the mapping
* immediately.
*
@@ -209,7 +209,7 @@
*
* Since this a core kernel managed memory the kernel can move this memory
* whenever it wants. We register an invalidation MMU notifier to alert XE when
- * a user poiter is about to move. The invalidation notifier needs to block
+ * a user pointer is about to move. The invalidation notifier needs to block
* until all pending users (jobs or compute mode engines) of the userptr are
* idle to ensure no faults. This done by waiting on all of VM's dma-resv slots.
*
@@ -252,7 +252,7 @@
* Rebind worker
* -------------
*
- * The rebind worker is very similar to an exec. It is resposible for rebinding
+ * The rebind worker is very similar to an exec. It is responsible for rebinding
* evicted BOs or userptrs, waiting on those operations, installing new preempt
* fences, and finally resuming executing of engines in the VM.
*
@@ -317,11 +317,11 @@
* are not allowed, only long running workloads and ULLS are enabled on a faulting
* VM.
*
- * Defered VM binds
+ * Deferred VM binds
* ----------------
*
* By default, on a faulting VM binds just allocate the VMA and the actual
- * updating of the page tables is defered to the page fault handler. This
+ * updating of the page tables is deferred to the page fault handler. This
* behavior can be overridden by setting the flag DRM_XE_VM_BIND_FLAG_IMMEDIATE in
* the VM bind which will then do the bind immediately.
*
@@ -500,18 +500,18 @@
* Slot waiting
* ------------
*
- * 1. The exection of all jobs from kernel ops shall wait on all slots
+ * 1. The execution of all jobs from kernel ops shall wait on all slots
* (DMA_RESV_USAGE_PREEMPT_FENCE) of either an external BO or VM (depends on if
* kernel op is operating on external or private BO)
*
- * 2. In non-compute mode, the exection of all jobs from rebinds in execs shall
+ * 2. In non-compute mode, the execution of all jobs from rebinds in execs shall
* wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO or VM
* (depends on if the rebind is operatiing on an external or private BO)
*
- * 3. In non-compute mode, the exection of all jobs from execs shall wait on the
+ * 3. In non-compute mode, the execution of all jobs from execs shall wait on the
* last rebind job
*
- * 4. In compute mode, the exection of all jobs from rebinds in the rebind
+ * 4. In compute mode, the execution of all jobs from rebinds in the rebind
* worker shall wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO
* or VM (depends on if rebind is operating on external or private BO)
*
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 7f9a303e51d8..a4b4091cfd0d 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -59,12 +59,16 @@ struct xe_userptr {
struct sg_table *sg;
/** @notifier_seq: notifier sequence number */
unsigned long notifier_seq;
+ /** @unmap_mutex: Mutex protecting dma-unmapping */
+ struct mutex unmap_mutex;
/**
* @initial_bind: user pointer has been bound at least once.
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
*/
bool initial_bind;
+ /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
+ bool mapped;
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
u32 divisor;
#endif
@@ -227,8 +231,8 @@ struct xe_vm {
* up for revalidation. Protected from access with the
* @invalidated_lock. Removing items from the list
* additionally requires @lock in write mode, and adding
- * items to the list requires the @userptr.notifer_lock in
- * write mode.
+ * items to the list requires either the @userptr.notifer_lock in
+ * write mode, OR @lock in write mode.
*/
struct list_head invalidated;
} userptr;
diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
new file mode 100644
index 000000000000..b378848d3b7b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright © 2024 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/intel_vsec.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_drv.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+#include "xe_pm.h"
+#include "xe_vsec.h"
+
+#include "regs/xe_pmt.h"
+
+/* PMT GUID value for BMG devices. NOTE: this is NOT a PCI id */
+#define BMG_DEVICE_ID 0xE2F8
+
+static struct intel_vsec_header bmg_telemetry = {
+ .length = 0x10,
+ .id = VSEC_ID_TELEMETRY,
+ .num_entries = 2,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET,
+};
+
+static struct intel_vsec_header bmg_punit_crashlog = {
+ .length = 0x10,
+ .id = VSEC_ID_CRASHLOG,
+ .num_entries = 1,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET + 0x60,
+};
+
+static struct intel_vsec_header bmg_oobmsm_crashlog = {
+ .length = 0x10,
+ .id = VSEC_ID_CRASHLOG,
+ .num_entries = 1,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET + 0x78,
+};
+
+static struct intel_vsec_header *bmg_capabilities[] = {
+ &bmg_telemetry,
+ &bmg_punit_crashlog,
+ &bmg_oobmsm_crashlog,
+ NULL
+};
+
+enum xe_vsec {
+ XE_VSEC_UNKNOWN = 0,
+ XE_VSEC_BMG,
+};
+
+static struct intel_vsec_platform_info xe_vsec_info[] = {
+ [XE_VSEC_BMG] = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_CRASHLOG,
+ .headers = bmg_capabilities,
+ },
+ { }
+};
+
+/*
+ * The GUID will have the following bits to decode:
+ * [0:3] - {Telemetry space iteration number (0,1,..)}
+ * [4:7] - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
+ * [8:11] - SOC_SKU
+ * [12:27] – Device ID – changes for each down bin SKU’s
+ * [28:29] - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
+ * [30:31] - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
+ */
+#define GUID_TELEM_ITERATION GENMASK(3, 0)
+#define GUID_SEGMENT GENMASK(7, 4)
+#define GUID_SOC_SKU GENMASK(11, 8)
+#define GUID_DEVICE_ID GENMASK(27, 12)
+#define GUID_CAP_TYPE GENMASK(29, 28)
+#define GUID_RECORD_ID GENMASK(31, 30)
+
+#define PUNIT_TELEMETRY_OFFSET 0x0200
+#define PUNIT_WATCHER_OFFSET 0x14A0
+#define OOBMSM_0_WATCHER_OFFSET 0x18D8
+#define OOBMSM_1_TELEMETRY_OFFSET 0x1000
+
+enum record_id {
+ PUNIT,
+ OOBMSM_0,
+ OOBMSM_1,
+};
+
+enum capability {
+ CRASHLOG,
+ TELEMETRY,
+ WATCHER,
+};
+
+static int xe_guid_decode(u32 guid, int *index, u32 *offset)
+{
+ u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
+ u32 cap_type = FIELD_GET(GUID_CAP_TYPE, guid);
+ u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
+
+ if (device_id != BMG_DEVICE_ID)
+ return -ENODEV;
+
+ if (cap_type > WATCHER)
+ return -EINVAL;
+
+ *offset = 0;
+
+ if (cap_type == CRASHLOG) {
+ *index = record_id == PUNIT ? 2 : 4;
+ return 0;
+ }
+
+ switch (record_id) {
+ case PUNIT:
+ *index = 0;
+ if (cap_type == TELEMETRY)
+ *offset = PUNIT_TELEMETRY_OFFSET;
+ else
+ *offset = PUNIT_WATCHER_OFFSET;
+ break;
+
+ case OOBMSM_0:
+ *index = 1;
+ if (cap_type == WATCHER)
+ *offset = OOBMSM_0_WATCHER_OFFSET;
+ break;
+
+ case OOBMSM_1:
+ *index = 1;
+ if (cap_type == TELEMETRY)
+ *offset = OOBMSM_1_TELEMETRY_OFFSET;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
+ u32 count)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
+ u32 mem_region;
+ u32 offset;
+ int ret;
+
+ ret = xe_guid_decode(guid, &mem_region, &offset);
+ if (ret)
+ return ret;
+
+ telem_addr += offset + user_offset;
+
+ guard(mutex)(&xe->pmt.lock);
+
+ /* indicate that we are not at an appropriate power level */
+ if (!xe_pm_runtime_get_if_active(xe))
+ return -ENODATA;
+
+ /* set SoC re-mapper index register based on GUID memory region */
+ xe_mmio_rmw32(xe_root_tile_mmio(xe), SG_REMAP_INDEX1, SG_REMAP_BITS,
+ REG_FIELD_PREP(SG_REMAP_BITS, mem_region));
+
+ memcpy_fromio(data, telem_addr, count);
+ xe_pm_runtime_put(xe);
+
+ return count;
+}
+
+static struct pmt_callbacks xe_pmt_cb = {
+ .read_telem = xe_pmt_telem_read,
+};
+
+static const int vsec_platforms[] = {
+ [XE_BATTLEMAGE] = XE_VSEC_BMG,
+};
+
+static enum xe_vsec get_platform_info(struct xe_device *xe)
+{
+ if (xe->info.platform > XE_BATTLEMAGE)
+ return XE_VSEC_UNKNOWN;
+
+ return vsec_platforms[xe->info.platform];
+}
+
+/**
+ * xe_vsec_init - Initialize resources and add intel_vsec auxiliary
+ * interface
+ * @xe: valid xe instance
+ */
+void xe_vsec_init(struct xe_device *xe)
+{
+ struct intel_vsec_platform_info *info;
+ struct device *dev = xe->drm.dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ enum xe_vsec platform;
+
+ platform = get_platform_info(xe);
+ if (platform == XE_VSEC_UNKNOWN)
+ return;
+
+ info = &xe_vsec_info[platform];
+ if (!info->headers)
+ return;
+
+ switch (platform) {
+ case XE_VSEC_BMG:
+ info->priv_data = &xe_pmt_cb;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Register a VSEC. Cleanup is handled using device managed
+ * resources.
+ */
+ intel_vsec_register(pdev, info);
+}
+MODULE_IMPORT_NS("INTEL_VSEC");
diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
new file mode 100644
index 000000000000..5777c53faec2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef _XE_VSEC_H_
+#define _XE_VSEC_H_
+
+struct xe_device;
+
+void xe_vsec_init(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 02cf647f86d8..570fe0376402 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -607,6 +607,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH))
},
+ { XE_RTP_NAME("16024792527"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(FIELD_SET(SAMPLER_MODE, SMP_WAIT_FETCH_MERGING_COUNTER,
+ SMP_FORCE_128B_OVERFETCH))
+ },
{}
};
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index bcd04464b85e..40438c3d9b72 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -1,3 +1,4 @@
+1607983814 GRAPHICS_VERSION_RANGE(1200, 1210)
22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
PLATFORM(DG2)
@@ -33,7 +34,7 @@
GRAPHICS_VERSION(2004)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
- MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001)
14019789679 GRAPHICS_VERSION(1255)
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index aab79c5e34c2..1bda7ef606cc 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -478,7 +478,6 @@ static const struct drm_driver xen_drm_driver = {
.fops = &xen_drm_dev_fops,
.name = "xendrm-du",
.desc = "Xen PV DRM Display Unit",
- .date = "20180221",
.major = 1,
.minor = 0,
@@ -525,11 +524,6 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
if (ret)
goto fail_register;
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- xen_drm_driver.name, xen_drm_driver.major,
- xen_drm_driver.minor, xen_drm_driver.patchlevel,
- xen_drm_driver.date, drm_dev->primary->index);
-
return 0;
fail_register:
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index 4197f44e202f..dbecca9bdd54 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -17,3 +17,12 @@ config DRM_ZYNQMP_DPSUB
This is a DRM/KMS driver for ZynqMP DisplayPort controller. Choose
this option if you have a Xilinx ZynqMP SoC with DisplayPort
subsystem.
+
+config DRM_ZYNQMP_DPSUB_AUDIO
+ bool "ZynqMP DisplayPort Audio Support"
+ depends on DRM_ZYNQMP_DPSUB
+ depends on SND && SND_SOC
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Choose this option to enable DisplayPort audio support in the ZynqMP
+ DisplayPort driver.
diff --git a/drivers/gpu/drm/xlnx/Makefile b/drivers/gpu/drm/xlnx/Makefile
index ea1422a39502..ab6e2ffd7e8d 100644
--- a/drivers/gpu/drm/xlnx/Makefile
+++ b/drivers/gpu/drm/xlnx/Makefile
@@ -1,2 +1,3 @@
zynqmp-dpsub-y := zynqmp_disp.o zynqmp_dpsub.o zynqmp_dp.o zynqmp_kms.o
+zynqmp-dpsub-$(CONFIG_DRM_ZYNQMP_DPSUB_AUDIO) += zynqmp_dp_audio.o
obj-$(CONFIG_DRM_ZYNQMP_DPSUB) += zynqmp-dpsub.o
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index e4e0e299e8a7..80d1e499a18d 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -143,7 +143,6 @@ struct zynqmp_disp_layer {
* @dpsub: Display subsystem
* @blend: Register I/O base address for the blender
* @avbuf: Register I/O base address for the audio/video buffer manager
- * @audio: Registers I/O base address for the audio mixer
* @layers: Layers (planes)
*/
struct zynqmp_disp {
@@ -152,7 +151,6 @@ struct zynqmp_disp {
void __iomem *blend;
void __iomem *avbuf;
- void __iomem *audio;
struct zynqmp_disp_layer layers[ZYNQMP_DPSUB_NUM_LAYERS];
};
@@ -866,42 +864,6 @@ static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp *disp,
}
/* -----------------------------------------------------------------------------
- * Audio Mixer
- */
-
-static void zynqmp_disp_audio_write(struct zynqmp_disp *disp, int reg, u32 val)
-{
- writel(val, disp->audio + reg);
-}
-
-/**
- * zynqmp_disp_audio_enable - Enable the audio mixer
- * @disp: Display controller
- *
- * Enable the audio mixer by de-asserting the soft reset. The audio state is set to
- * default values by the reset, set the default mixer volume explicitly.
- */
-static void zynqmp_disp_audio_enable(struct zynqmp_disp *disp)
-{
- /* Clear the audio soft reset register as it's an non-reset flop. */
- zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
- zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_MIXER_VOLUME,
- ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE);
-}
-
-/**
- * zynqmp_disp_audio_disable - Disable the audio mixer
- * @disp: Display controller
- *
- * Disable the audio mixer by asserting its soft reset.
- */
-static void zynqmp_disp_audio_disable(struct zynqmp_disp *disp)
-{
- zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET,
- ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
-}
-
-/* -----------------------------------------------------------------------------
* ZynqMP Display Layer & DRM Plane
*/
@@ -1341,8 +1303,6 @@ void zynqmp_disp_enable(struct zynqmp_disp *disp)
disp->dpsub->vid_clk_from_ps);
zynqmp_disp_avbuf_enable_channels(disp);
zynqmp_disp_avbuf_enable_audio(disp);
-
- zynqmp_disp_audio_enable(disp);
}
/**
@@ -1351,8 +1311,6 @@ void zynqmp_disp_enable(struct zynqmp_disp *disp)
*/
void zynqmp_disp_disable(struct zynqmp_disp *disp)
{
- zynqmp_disp_audio_disable(disp);
-
zynqmp_disp_avbuf_disable_audio(disp);
zynqmp_disp_avbuf_disable_channels(disp);
zynqmp_disp_avbuf_disable(disp);
@@ -1421,12 +1379,6 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
goto error;
}
- disp->audio = devm_platform_ioremap_resource_byname(pdev, "aud");
- if (IS_ERR(disp->audio)) {
- ret = PTR_ERR(disp->audio);
- goto error;
- }
-
ret = zynqmp_disp_create_layers(disp);
if (ret)
goto error;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
index fa3935384834..9a4ff094e276 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
@@ -177,12 +177,7 @@
#define ZYNQMP_DISP_AUD_MIXER_VOLUME 0x0
#define ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
#define ZYNQMP_DISP_AUD_MIXER_META_DATA 0x4
-#define ZYNQMP_DISP_AUD_CH_STATUS0 0x8
-#define ZYNQMP_DISP_AUD_CH_STATUS1 0xc
-#define ZYNQMP_DISP_AUD_CH_STATUS2 0x10
-#define ZYNQMP_DISP_AUD_CH_STATUS3 0x14
-#define ZYNQMP_DISP_AUD_CH_STATUS4 0x18
-#define ZYNQMP_DISP_AUD_CH_STATUS5 0x1c
+#define ZYNQMP_DISP_AUD_CH_STATUS(x) (0x8 + ((x) * 4))
#define ZYNQMP_DISP_AUD_CH_A_DATA0 0x20
#define ZYNQMP_DISP_AUD_CH_A_DATA1 0x24
#define ZYNQMP_DISP_AUD_CH_A_DATA2 0x28
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 25c5dc61ee88..979f6d3239ba 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1342,7 +1342,6 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
{
u8 lane_cnt = dp->mode.lane_cnt;
u32 reg, wpl;
- unsigned int rate;
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_HTOTAL, mode->htotal);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_VTOTAL, mode->vtotal);
@@ -1367,18 +1366,8 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_N_VID, reg);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_M_VID, mode->clock);
- rate = zynqmp_dpsub_get_audio_clk_rate(dp->dpsub);
- if (rate) {
- dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_N_AUD, reg);
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_M_AUD, rate / 1000);
- }
}
- /* Only 2 channel audio is supported now */
- if (zynqmp_dpsub_audio_enabled(dp->dpsub))
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CHANNELS, 1);
-
zynqmp_dp_write(dp, ZYNQMP_DP_USER_PIX_WIDTH, 1);
/* Translate to the native 16 bit datapath based on IP core spec */
@@ -1388,6 +1377,44 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
}
/* -----------------------------------------------------------------------------
+ * Audio
+ */
+
+void zynqmp_dp_audio_set_channels(struct zynqmp_dp *dp,
+ unsigned int num_channels)
+{
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CHANNELS, num_channels - 1);
+}
+
+void zynqmp_dp_audio_enable(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+}
+
+void zynqmp_dp_audio_disable(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
+}
+
+void zynqmp_dp_audio_write_n_m(struct zynqmp_dp *dp)
+{
+ unsigned int rate;
+ u32 link_rate;
+
+ if (!(dp->config.misc0 & ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK))
+ return;
+
+ link_rate = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+
+ rate = clk_get_rate(dp->dpsub->aud_clk);
+
+ dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
+
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_N_AUD, link_rate);
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_M_AUD, rate / 1000);
+}
+
+/* -----------------------------------------------------------------------------
* DISP Configuration
*/
@@ -1537,7 +1564,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(dp->dev);
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
zynqmp_dp_disp_enable(dp, old_bridge_state);
/*
@@ -1577,8 +1604,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
/* Enable the encoder */
dp->enabled = true;
zynqmp_dp_update_misc(dp);
- if (zynqmp_dpsub_audio_enabled(dp->dpsub))
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN, 0);
if (dp->status == connector_status_connected) {
for (i = 0; i < 3; i++) {
@@ -1598,7 +1624,6 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
zynqmp_dp_write(dp, ZYNQMP_DP_SOFTWARE_RESET,
ZYNQMP_DP_SOFTWARE_RESET_ALL);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 1);
- mutex_unlock(&dp->lock);
}
static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -1613,8 +1638,6 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge,
drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
- if (zynqmp_dpsub_audio_enabled(dp->dpsub))
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
zynqmp_dp_disp_disable(dp, old_bridge_state);
mutex_unlock(&dp->lock);
@@ -2190,7 +2213,7 @@ static int zynqmp_dp_rate_get(void *data, u64 *val)
struct zynqmp_dp *dp = data;
mutex_lock(&dp->lock);
- *val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000;
+ *val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000ULL;
mutex_unlock(&dp->lock);
return 0;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.h b/drivers/gpu/drm/xlnx/zynqmp_dp.h
index f077d7fbd0ad..a3257793e23a 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.h
@@ -22,4 +22,11 @@ void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp);
int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub);
void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub);
+void zynqmp_dp_audio_set_channels(struct zynqmp_dp *dp,
+ unsigned int num_channels);
+void zynqmp_dp_audio_enable(struct zynqmp_dp *dp);
+void zynqmp_dp_audio_disable(struct zynqmp_dp *dp);
+
+void zynqmp_dp_audio_write_n_m(struct zynqmp_dp *dp);
+
#endif /* _ZYNQMP_DP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
new file mode 100644
index 000000000000..fa5f0ace6084
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Subsystem Driver - Audio support
+ *
+ * Copyright (C) 2015 - 2024 Xilinx, Inc.
+ *
+ * Authors:
+ * - Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ * - Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+#include <sound/asoundef.h>
+#include <sound/core.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "zynqmp_disp_regs.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+#define ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK 512
+#define ZYNQMP_NUM_PCMS 2
+
+struct zynqmp_dpsub_audio {
+ void __iomem *base;
+
+ struct snd_soc_card card;
+
+ const char *dai_name;
+ const char *link_names[ZYNQMP_NUM_PCMS];
+ const char *pcm_names[ZYNQMP_NUM_PCMS];
+
+ struct snd_soc_dai_driver dai_driver;
+ struct snd_dmaengine_pcm_config pcm_configs[2];
+
+ struct snd_soc_dai_link links[ZYNQMP_NUM_PCMS];
+
+ struct {
+ struct snd_soc_dai_link_component cpu;
+ struct snd_soc_dai_link_component codec;
+ struct snd_soc_dai_link_component platform;
+ } components[ZYNQMP_NUM_PCMS];
+
+ /*
+ * Protects:
+ * - enabled_streams
+ * - volumes
+ * - current_rate
+ */
+ struct mutex enable_lock;
+
+ u32 enabled_streams;
+ u32 current_rate;
+
+ u16 volumes[2];
+};
+
+static const struct snd_pcm_hardware zynqmp_dp_pcm_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ .buffer_bytes_max = 128 * 1024,
+ .period_bytes_min = 256,
+ .period_bytes_max = 1024 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+};
+
+static int zynqmp_dp_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ 256);
+
+ return 0;
+}
+
+static const struct snd_soc_ops zynqmp_dp_ops = {
+ .startup = zynqmp_dp_startup,
+};
+
+static void zynqmp_dp_audio_write(struct zynqmp_dpsub_audio *audio, int reg,
+ u32 val)
+{
+ writel(val, audio->base + reg);
+}
+
+static int dp_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *socdai)
+{
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct zynqmp_dpsub *dpsub =
+ snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0));
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+ int ret;
+ u32 sample_rate;
+ struct snd_aes_iec958 iec = { 0 };
+ unsigned long rate;
+
+ sample_rate = params_rate(params);
+
+ if (sample_rate != 48000 && sample_rate != 44100)
+ return -EINVAL;
+
+ guard(mutex)(&audio->enable_lock);
+
+ if (audio->enabled_streams && audio->current_rate != sample_rate) {
+ dev_err(dpsub->dev,
+ "Can't change rate while playback enabled\n");
+ return -EINVAL;
+ }
+
+ if (audio->enabled_streams > 0) {
+ /* Nothing to do */
+ audio->enabled_streams++;
+ return 0;
+ }
+
+ audio->current_rate = sample_rate;
+
+ /* Note: clock rate can only be changed if the clock is disabled */
+ ret = clk_set_rate(dpsub->aud_clk,
+ sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK);
+ if (ret) {
+ dev_err(dpsub->dev, "can't set aud_clk to %u err:%d\n",
+ sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK, ret);
+ return ret;
+ }
+
+ clk_prepare_enable(dpsub->aud_clk);
+
+ rate = clk_get_rate(dpsub->aud_clk);
+
+ /* Ignore some offset +- 10 */
+ if (abs(sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK - rate) > 10) {
+ dev_err(dpsub->dev, "aud_clk offset is higher: %ld\n",
+ sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK - rate);
+ clk_disable_unprepare(dpsub->aud_clk);
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(dpsub->dev);
+
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ audio->volumes[0] | (audio->volumes[1] << 16));
+
+ /* Clear the audio soft reset register as it's an non-reset flop. */
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
+
+ /* Only 2 channel audio is supported now */
+ zynqmp_dp_audio_set_channels(dpsub->dp, 2);
+
+ zynqmp_dp_audio_write_n_m(dpsub->dp);
+
+ /* Channel status */
+
+ if (sample_rate == 48000)
+ iec.status[3] = IEC958_AES3_CON_FS_48000;
+ else
+ iec.status[3] = IEC958_AES3_CON_FS_44100;
+
+ for (unsigned int i = 0; i < AES_IEC958_STATUS_SIZE / 4; ++i) {
+ u32 v;
+
+ v = (iec.status[(i * 4) + 0] << 0) |
+ (iec.status[(i * 4) + 1] << 8) |
+ (iec.status[(i * 4) + 2] << 16) |
+ (iec.status[(i * 4) + 3] << 24);
+
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_CH_STATUS(i), v);
+ }
+
+ zynqmp_dp_audio_enable(dpsub->dp);
+
+ audio->enabled_streams++;
+
+ return 0;
+}
+
+static int dp_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *socdai)
+{
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct zynqmp_dpsub *dpsub =
+ snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0));
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ guard(mutex)(&audio->enable_lock);
+
+ /* Nothing to do */
+ if (audio->enabled_streams > 1) {
+ audio->enabled_streams--;
+ return 0;
+ }
+
+ pm_runtime_put(dpsub->dev);
+
+ zynqmp_dp_audio_disable(dpsub->dp);
+
+ /*
+ * Reset doesn't work. If we assert reset between audio stop and start,
+ * the audio won't start anymore. Probably we are missing writing
+ * some audio related registers. A/B buf?
+ */
+ /*
+ zynqmp_disp_audio_write(audio, ZYNQMP_DISP_AUD_SOFT_RESET,
+ ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
+ */
+
+ clk_disable_unprepare(dpsub->aud_clk);
+
+ audio->current_rate = 0;
+ audio->enabled_streams--;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops zynqmp_dp_dai_ops = {
+ .hw_params = dp_dai_hw_params,
+ .hw_free = dp_dai_hw_free,
+};
+
+/*
+ * Min = 10 * log10(0x1 / 0x2000) = -39.13
+ * Max = 10 * log10(0xffffff / 0x2000) = 9.03
+ */
+static const DECLARE_TLV_DB_RANGE(zynqmp_dp_tlv,
+ 0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, -3913, 1),
+ 0x1, 0x2000, TLV_DB_LINEAR_ITEM(-3913, 0),
+ 0x2000, 0xffff, TLV_DB_LINEAR_ITEM(0, 903),
+);
+
+static const struct snd_kcontrol_new zynqmp_dp_snd_controls[] = {
+ SOC_SINGLE_TLV("Input0 Playback Volume", 0,
+ 0, 0xffff, 0, zynqmp_dp_tlv),
+ SOC_SINGLE_TLV("Input1 Playback Volume", 1,
+ 0, 0xffff, 0, zynqmp_dp_tlv),
+};
+
+/*
+ * Note: these read & write functions only support two "registers", 0 and 1,
+ * for volume 0 and 1. In other words, these are not real register read/write
+ * functions.
+ *
+ * This is done to support caching the volume value for the case where the
+ * hardware is not enabled, and also to support locking as volumes 0 and 1
+ * are in the same register.
+ */
+static unsigned int zynqmp_dp_dai_read(struct snd_soc_component *component,
+ unsigned int reg)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(component->dev);
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ return audio->volumes[reg];
+}
+
+static int zynqmp_dp_dai_write(struct snd_soc_component *component,
+ unsigned int reg, unsigned int val)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(component->dev);
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ guard(mutex)(&audio->enable_lock);
+
+ audio->volumes[reg] = val;
+
+ if (audio->enabled_streams)
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ audio->volumes[0] |
+ (audio->volumes[1] << 16));
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver zynqmp_dp_component_driver = {
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .controls = zynqmp_dp_snd_controls,
+ .num_controls = ARRAY_SIZE(zynqmp_dp_snd_controls),
+ .read = zynqmp_dp_dai_read,
+ .write = zynqmp_dp_dai_write,
+};
+
+int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
+{
+ struct platform_device *pdev = to_platform_device(dpsub->dev);
+ struct device *dev = dpsub->dev;
+ struct zynqmp_dpsub_audio *audio;
+ struct snd_soc_card *card;
+ void *dev_data;
+ int ret;
+
+ if (!dpsub->aud_clk)
+ return 0;
+
+ audio = devm_kzalloc(dev, sizeof(*audio), GFP_KERNEL);
+ if (!audio)
+ return -ENOMEM;
+
+ dpsub->audio = audio;
+
+ mutex_init(&audio->enable_lock);
+
+ /* 0x2000 is the zero level, no change */
+ audio->volumes[0] = 0x2000;
+ audio->volumes[1] = 0x2000;
+
+ audio->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-dai", dev_name(dev));
+
+ for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
+ audio->link_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-dp-%u", dev_name(dev), i);
+ audio->pcm_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-pcm-%u", dev_name(dev), i);
+ }
+
+ audio->base = devm_platform_ioremap_resource_byname(pdev, "aud");
+ if (IS_ERR(audio->base))
+ return PTR_ERR(audio->base);
+
+ /* Create CPU DAI */
+
+ audio->dai_driver = (struct snd_soc_dai_driver) {
+ .name = audio->dai_name,
+ .ops = &zynqmp_dp_dai_ops,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ };
+
+ ret = devm_snd_soc_register_component(dev, &zynqmp_dp_component_driver,
+ &audio->dai_driver, 1);
+ if (ret) {
+ dev_err(dev, "Failed to register CPU DAI\n");
+ return ret;
+ }
+
+ /* Create PCMs */
+
+ for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
+ struct snd_dmaengine_pcm_config *pcm_config =
+ &audio->pcm_configs[i];
+
+ *pcm_config = (struct snd_dmaengine_pcm_config){
+ .name = audio->pcm_names[i],
+ .pcm_hardware = &zynqmp_dp_pcm_hw,
+ .prealloc_buffer_size = 64 * 1024,
+ .chan_names[SNDRV_PCM_STREAM_PLAYBACK] =
+ i == 0 ? "aud0" : "aud1",
+ };
+
+ ret = devm_snd_dmaengine_pcm_register(dev, pcm_config, 0);
+ if (ret) {
+ dev_err(dev, "Failed to register PCM %u\n", i);
+ return ret;
+ }
+ }
+
+ /* Create card */
+
+ card = &audio->card;
+ card->name = "DisplayPort";
+ card->long_name = "DisplayPort Monitor";
+ card->driver_name = "zynqmp_dpsub";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->num_links = ZYNQMP_NUM_PCMS;
+ card->dai_link = audio->links;
+
+ for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
+ struct snd_soc_dai_link *link = &card->dai_link[i];
+
+ link->ops = &zynqmp_dp_ops;
+
+ link->name = audio->link_names[i];
+ link->stream_name = audio->link_names[i];
+
+ link->cpus = &audio->components[i].cpu;
+ link->num_cpus = 1;
+ link->cpus[0].dai_name = audio->dai_name;
+
+ link->codecs = &audio->components[i].codec;
+ link->num_codecs = 1;
+ link->codecs[0].name = "snd-soc-dummy";
+ link->codecs[0].dai_name = "snd-soc-dummy-dai";
+
+ link->platforms = &audio->components[i].platform;
+ link->num_platforms = 1;
+ link->platforms[0].name = audio->pcm_names[i];
+ }
+
+ /*
+ * HACK: devm_snd_soc_register_card() overwrites current drvdata
+ * so we need to hack it back.
+ */
+ dev_data = dev_get_drvdata(dev);
+ ret = devm_snd_soc_register_card(dev, card);
+ dev_set_drvdata(dev, dev_data);
+ if (ret) {
+ /*
+ * As older dtbs may not have the audio channel dmas defined,
+ * instead of returning an error here we'll continue and just
+ * mark the audio as disabled.
+ */
+ dev_err(dev, "Failed to register sound card, disabling audio support\n");
+
+ devm_kfree(dev, audio);
+ dpsub->audio = NULL;
+
+ return 0;
+ }
+
+ return 0;
+}
+
+void zynqmp_audio_uninit(struct zynqmp_dpsub *dpsub)
+{
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ if (!audio)
+ return;
+
+ if (!dpsub->aud_clk)
+ return;
+
+ mutex_destroy(&audio->enable_lock);
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 07c4d184e7a1..f953ca48a930 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -57,36 +57,6 @@ static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
};
/* -----------------------------------------------------------------------------
- * DPSUB Configuration
- */
-
-/**
- * zynqmp_dpsub_audio_enabled - If the audio is enabled
- * @dpsub: DisplayPort subsystem
- *
- * Return if the audio is enabled depending on the audio clock.
- *
- * Return: true if audio is enabled, or false.
- */
-bool zynqmp_dpsub_audio_enabled(struct zynqmp_dpsub *dpsub)
-{
- return !!dpsub->aud_clk;
-}
-
-/**
- * zynqmp_dpsub_get_audio_clk_rate - Get the current audio clock rate
- * @dpsub: DisplayPort subsystem
- *
- * Return: the current audio clock rate.
- */
-unsigned int zynqmp_dpsub_get_audio_clk_rate(struct zynqmp_dpsub *dpsub)
-{
- if (zynqmp_dpsub_audio_enabled(dpsub))
- return 0;
- return clk_get_rate(dpsub->aud_clk);
-}
-
-/* -----------------------------------------------------------------------------
* Probe & Remove
*/
@@ -264,10 +234,17 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
goto err_disp;
}
+ ret = zynqmp_audio_init(dpsub);
+ if (ret)
+ goto err_drm_cleanup;
+
dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
return 0;
+err_drm_cleanup:
+ if (dpsub->drm)
+ zynqmp_dpsub_drm_cleanup(dpsub);
err_disp:
drm_bridge_remove(dpsub->bridge);
zynqmp_disp_remove(dpsub);
@@ -287,6 +264,8 @@ static void zynqmp_dpsub_remove(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ zynqmp_audio_uninit(dpsub);
+
if (dpsub->drm)
zynqmp_dpsub_drm_cleanup(dpsub);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
index b18554467e9c..d771b8b199e0 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
@@ -12,6 +12,8 @@
#ifndef _ZYNQMP_DPSUB_H_
#define _ZYNQMP_DPSUB_H_
+#include <linux/types.h>
+
struct clk;
struct device;
struct drm_bridge;
@@ -39,6 +41,8 @@ enum zynqmp_dpsub_format {
ZYNQMP_DPSUB_FORMAT_YONLY,
};
+struct zynqmp_dpsub_audio;
+
/**
* struct zynqmp_dpsub - ZynqMP DisplayPort Subsystem
* @dev: The physical device
@@ -56,6 +60,7 @@ enum zynqmp_dpsub_format {
* @layers: Video and graphics layers
* @dp: The DisplayPort controller
* @dma_align: DMA alignment constraint (must be a power of 2)
+ * @audio: DP audio data
*/
struct zynqmp_dpsub {
struct device *dev;
@@ -77,10 +82,17 @@ struct zynqmp_dpsub {
struct zynqmp_dp *dp;
unsigned int dma_align;
+
+ struct zynqmp_dpsub_audio *audio;
};
-bool zynqmp_dpsub_audio_enabled(struct zynqmp_dpsub *dpsub);
-unsigned int zynqmp_dpsub_get_audio_clk_rate(struct zynqmp_dpsub *dpsub);
+#ifdef CONFIG_DRM_ZYNQMP_DPSUB_AUDIO
+int zynqmp_audio_init(struct zynqmp_dpsub *dpsub);
+void zynqmp_audio_uninit(struct zynqmp_dpsub *dpsub);
+#else
+static inline int zynqmp_audio_init(struct zynqmp_dpsub *dpsub) { return 0; }
+static inline void zynqmp_audio_uninit(struct zynqmp_dpsub *dpsub) { }
+#endif
void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index fc81983d9e5e..b47463473472 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -9,12 +9,12 @@
* - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -409,7 +409,6 @@ static const struct drm_driver zynqmp_dpsub_drm_driver = {
.name = "zynqmp-dpsub",
.desc = "Xilinx DisplayPort Subsystem Driver",
- .date = "20130509",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 7b1d091f3c09..1f93e5e276c0 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -361,6 +361,10 @@ static bool host1x_wants_iommu(struct host1x *host1x)
return true;
}
+/*
+ * Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a
+ * valid paging domain.
+ */
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
@@ -385,6 +389,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
* Similarly, if host1x is already attached to an IOMMU (via the DMA
* API), don't try to attach again.
*/
+ if (domain && domain->type == IOMMU_DOMAIN_IDENTITY)
+ domain = NULL;
if (!host1x_wants_iommu(host) || domain)
return domain;
@@ -619,6 +625,8 @@ static int host1x_probe(struct platform_device *pdev)
goto free_contexts;
}
+ mutex_init(&host->intr_mutex);
+
pm_runtime_enable(&pdev->dev);
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index b3285dd10180..f77a678949e9 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -104,8 +104,6 @@ int host1x_intr_init(struct host1x *host)
unsigned int id;
int i, err;
- mutex_init(&host->intr_mutex);
-
for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
struct host1x_syncpt *syncpt = &host->syncpt[id];
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4d2a89d65b65..dfc245867a46 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -570,6 +570,8 @@ config HID_LED
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
+ depends on ACPI
+ select ACPI_PLATFORM_PROFILE
select NEW_LEDS
select LEDS_CLASS
help
@@ -787,7 +789,7 @@ config HID_NINTENDO
Adds support for the Nintendo Switch Joy-Cons, NSO, Pro Controller.
All controllers support bluetooth, and the Pro Controller also supports
its USB mode. This also includes support for the Nintendo Switch Online
- Controllers which include the Genesis, SNES, and N64 controllers.
+ Controllers which include the NES, Genesis, SNES, and N64 controllers.
To compile this driver as a module, choose M here: the
module will be called hid-nintendo.
@@ -1167,7 +1169,8 @@ config HID_TOPRE
tristate "Topre REALFORCE keyboards"
depends on HID
help
- Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key keyboards.
+ Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key and
+ Topre REALFORCE R3S 87 key keyboards.
config HID_THINGM
tristate "ThingM blink(1) USB RGB LED"
@@ -1374,10 +1377,6 @@ endmenu
source "drivers/hid/bpf/Kconfig"
-endif # HID
-
-source "drivers/hid/usbhid/Kconfig"
-
source "drivers/hid/i2c-hid/Kconfig"
source "drivers/hid/intel-ish-hid/Kconfig"
@@ -1386,4 +1385,12 @@ source "drivers/hid/amd-sfh-hid/Kconfig"
source "drivers/hid/surface-hid/Kconfig"
+source "drivers/hid/intel-thc-hid/Kconfig"
+
+endif # HID
+
+# USB support may be used with HID disabled
+
+source "drivers/hid/usbhid/Kconfig"
+
endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 24de45f3677d..482b096eea28 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -171,3 +171,5 @@ obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
+
+obj-$(CONFIG_INTEL_THC_HID) += intel-thc-hid/
diff --git a/drivers/hid/amd-sfh-hid/Kconfig b/drivers/hid/amd-sfh-hid/Kconfig
index 329de5e12c1a..3291786a5ee6 100644
--- a/drivers/hid/amd-sfh-hid/Kconfig
+++ b/drivers/hid/amd-sfh-hid/Kconfig
@@ -5,7 +5,6 @@ menu "AMD SFH HID Support"
config AMD_SFH_HID
tristate "AMD Sensor Fusion Hub"
- depends on HID
depends on X86
help
If you say yes to this option, support will be included for the
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index e5620d7db569..799b8686a88a 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -43,6 +43,7 @@ struct amd_mp2_sensor_info {
struct sfh_dev_status {
bool is_hpd_present;
bool is_als_present;
+ bool is_sra_present;
};
struct amd_mp2_dev {
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 0c28ca349bcd..48cfd0c58241 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -122,7 +122,7 @@ int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
{
int rc;
- pci_intx(privdata->pdev, true);
+ pcim_intx(privdata->pdev, true);
rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
@@ -248,7 +248,7 @@ static void amd_mp2_pci_remove(void *privdata)
struct amd_mp2_dev *mp2 = privdata;
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
- pci_intx(mp2->pdev, false);
+ pcim_intx(mp2->pdev, false);
amd_sfh_clear_intr(mp2);
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index db36d87d5634..e9929c4aa72e 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -30,6 +30,7 @@ static int amd_sfh_get_sensor_num(struct amd_mp2_dev *mp2, u8 *sensor_id)
case ACCEL_IDX:
case GYRO_IDX:
case MAG_IDX:
+ case SRA_IDX:
case ALS_IDX:
case HPD_IDX:
if (BIT(i) & slist->sl.sensors)
@@ -58,6 +59,8 @@ static const char *get_sensor_name(int idx)
return "gyroscope";
case MAG_IDX:
return "magnetometer";
+ case SRA_IDX:
+ return "SRA";
case ALS_IDX:
return "ALS";
case HPD_IDX:
@@ -130,6 +133,23 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_sts[i] = SENSOR_DISABLED;
+
+ if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX)
+ break;
+
+ if (cl_data->sensor_idx[i] == SRA_IDX) {
+ info.sensor_idx = cl_data->sensor_idx[i];
+ writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
+ mp2_ops->start(privdata, info);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
+
+ cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ privdata->dev_en.is_sra_present = true;
+ continue;
+ }
+
cl_data->sensor_requested_cnt[i] = 0;
cl_data->cur_hid_dev = i;
cl_idx = cl_data->sensor_idx[i];
@@ -181,6 +201,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] == SRA_IDX)
+ continue;
cl_data->cur_hid_dev = i;
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
cl_data->is_any_sensor_enabled = true;
@@ -289,7 +311,7 @@ static void amd_mp2_pci_remove(void *privdata)
sfh_deinit_emp2();
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
- pci_intx(mp2->pdev, false);
+ pcim_intx(mp2->pdev, false);
amd_sfh_clear_intr(mp2);
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index 4676f060da26..ffb98b4c36cb 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -87,6 +87,41 @@ void sfh_interface_init(struct amd_mp2_dev *mp2)
emp2 = mp2;
}
+static int amd_sfh_mode_info(u32 *platform_type, u32 *laptop_placement)
+{
+ struct sfh_op_mode mode;
+
+ if (!platform_type || !laptop_placement)
+ return -EINVAL;
+
+ if (!emp2 || !emp2->dev_en.is_sra_present)
+ return -ENODEV;
+
+ mode.val = readl(emp2->mmio + amd_get_c2p_val(emp2, 3));
+
+ *platform_type = mode.op_mode.devicemode;
+
+ if (mode.op_mode.ontablestate == 1) {
+ *laptop_placement = ON_TABLE;
+ } else if (mode.op_mode.ontablestate == 2) {
+ *laptop_placement = ON_LAP_MOTION;
+ } else if (mode.op_mode.inbagstate == 1) {
+ *laptop_placement = IN_BAG;
+ } else if (mode.op_mode.outbagstate == 1) {
+ *laptop_placement = OUT_OF_BAG;
+ } else if (mode.op_mode.ontablestate == 0 || mode.op_mode.inbagstate == 0 ||
+ mode.op_mode.outbagstate == 0) {
+ *laptop_placement = LP_UNKNOWN;
+ pr_warn_once("Unknown laptop placement\n");
+ } else if (mode.op_mode.ontablestate == 3 || mode.op_mode.inbagstate == 3 ||
+ mode.op_mode.outbagstate == 3) {
+ *laptop_placement = LP_UNDEFINED;
+ pr_warn_once("Undefined laptop placement\n");
+ }
+
+ return 0;
+}
+
static int amd_sfh_hpd_info(u8 *user_present)
{
struct hpd_status hpdstatus;
@@ -131,6 +166,9 @@ int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op)
return amd_sfh_hpd_info(&sfh_info->user_present);
case MT_ALS:
return amd_sfh_als_info(&sfh_info->ambient_light);
+ case MT_SRA:
+ return amd_sfh_mode_info(&sfh_info->platform_type,
+ &sfh_info->laptop_placement);
}
}
return -EINVAL;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
index 2c211d28764d..665c99ad779f 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
@@ -22,8 +22,9 @@ enum sensor_index {
ACCEL_IDX,
GYRO_IDX,
MAG_IDX,
- ALS_IDX = 4,
- HPD_IDX = 5,
+ SRA_IDX,
+ ALS_IDX,
+ HPD_IDX,
MAX_IDX = 15,
};
@@ -164,6 +165,25 @@ struct hpd_status {
};
};
+struct sfh_op_mode {
+ union {
+ u32 val;
+ struct {
+ u32 mode : 3;
+ u32 lidstatus : 1;
+ u32 angle : 10;
+ u32 inbagstatedbg : 2;
+ u32 ontablestate : 2;
+ u32 inbagstate : 2;
+ u32 outbagstate : 2;
+ u32 inbagmlcstate : 1;
+ u32 powerstate : 2;
+ u32 data : 3;
+ u32 devicemode : 4;
+ } op_mode;
+ };
+};
+
void sfh_interface_init(struct amd_mp2_dev *mp2);
void sfh_deinit_emp2(void);
void amd_sfh1_1_set_desc_ops(struct amd_mp2_ops *mp2_ops);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 7e1ae2a2bcc2..d900dd05c335 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -378,6 +378,12 @@ static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
return false;
}
+static bool apple_is_omoton_kb066(struct hid_device *hdev)
+{
+ return hdev->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI &&
+ strcmp(hdev->name, "Bluetooth Keyboard") == 0;
+}
+
static inline void apple_setup_key_translation(struct input_dev *input,
const struct apple_key_translation *table)
{
@@ -474,6 +480,7 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015)
table = magic_keyboard_2015_fn_keys;
else if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 ||
+ hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 ||
hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 ||
hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021)
table = apple2021_fn_keys;
@@ -724,7 +731,7 @@ static int apple_input_configured(struct hid_device *hdev,
{
struct apple_sc *asc = hid_get_drvdata(hdev);
- if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
+ if (((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) || apple_is_omoton_kb066(hdev)) {
hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
asc->quirks &= ~APPLE_HAS_FN;
}
@@ -1150,6 +1157,10 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index 8deded185725..c45e5aa569d2 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_device *hid, struct hid_report *report,
static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 };
unsigned long flags;
- if (len != 5)
+ if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT))
goto out;
if (!memcmp(data, keydown, sizeof(keydown))) {
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 506c6f377e7d..46e3e42f9eb5 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -432,6 +432,26 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
return ret;
}
+static int asus_kbd_disable_oobe(struct hid_device *hdev)
+{
+ const u8 init[][6] = {
+ { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 },
+ { FEATURE_KBD_REPORT_ID, 0xBA, 0xC5, 0xC4 },
+ { FEATURE_KBD_REPORT_ID, 0xD0, 0x8F, 0x01 },
+ { FEATURE_KBD_REPORT_ID, 0xD0, 0x85, 0xFF }
+ };
+ int ret;
+
+ for (size_t i = 0; i < ARRAY_SIZE(init); i++) {
+ ret = asus_kbd_set_report(hdev, init[i], sizeof(init[i]));
+ if (ret < 0)
+ return ret;
+ }
+
+ hid_info(hdev, "Disabled OOBE for keyboard\n");
+ return 0;
+}
+
static void asus_schedule_work(struct asus_kbd_leds *led)
{
unsigned long flags;
@@ -534,6 +554,12 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
ret = asus_kbd_init(hdev, FEATURE_KBD_LED_REPORT_ID2);
if (ret < 0)
return ret;
+
+ if (dmi_match(DMI_PRODUCT_FAMILY, "ProArt P16")) {
+ ret = asus_kbd_disable_oobe(hdev);
+ if (ret < 0)
+ return ret;
+ }
} else {
/* Initialize keyboard */
ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 33a191973324..4497b50799db 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1163,6 +1163,8 @@ static void hid_apply_multiplier(struct hid_device *hid,
while (multiplier_collection->parent_idx != -1 &&
multiplier_collection->type != HID_COLLECTION_LOGICAL)
multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
+ if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
+ multiplier_collection = NULL;
effective_multiplier = hid_calculate_multiplier(hid, multiplier);
@@ -2174,9 +2176,9 @@ static bool hid_hiddev(struct hid_device *hdev)
static ssize_t
-read_report_descriptor(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+report_descriptor_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct hid_device *hdev = to_hid_device(dev);
@@ -2193,24 +2195,17 @@ read_report_descriptor(struct file *filp, struct kobject *kobj,
}
static ssize_t
-show_country(struct device *dev, struct device_attribute *attr,
- char *buf)
+country_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
return sprintf(buf, "%02x\n", hdev->country & 0xff);
}
-static struct bin_attribute dev_bin_attr_report_desc = {
- .attr = { .name = "report_descriptor", .mode = 0444 },
- .read = read_report_descriptor,
- .size = HID_MAX_DESCRIPTOR_SIZE,
-};
+static const BIN_ATTR_RO(report_descriptor, HID_MAX_DESCRIPTOR_SIZE);
-static const struct device_attribute dev_attr_country = {
- .attr = { .name = "country", .mode = 0444 },
- .show = show_country,
-};
+static const DEVICE_ATTR_RO(country);
int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
{
@@ -2800,13 +2795,13 @@ static struct attribute *hid_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
-static struct bin_attribute *hid_dev_bin_attrs[] = {
- &dev_bin_attr_report_desc,
+static const struct bin_attribute *hid_dev_bin_attrs[] = {
+ &bin_attr_report_descriptor,
NULL
};
static const struct attribute_group hid_dev_group = {
.attrs = hid_dev_attrs,
- .bin_attrs = hid_dev_bin_attrs,
+ .bin_attrs_new = hid_dev_bin_attrs,
};
__ATTRIBUTE_GROUPS(hid_dev);
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index 6ece56b850fc..afbd67aa9719 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -71,11 +71,9 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
-#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
@@ -120,6 +118,12 @@ enum {
CORSAIR_VOID_BATTERY_CHARGING = 5,
};
+enum {
+ CORSAIR_VOID_ADD_BATTERY = 0,
+ CORSAIR_VOID_REMOVE_BATTERY = 1,
+ CORSAIR_VOID_UPDATE_BATTERY = 2,
+};
+
static enum power_supply_property corsair_void_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -155,12 +159,12 @@ struct corsair_void_drvdata {
struct power_supply *battery;
struct power_supply_desc battery_desc;
- struct mutex battery_mutex;
struct delayed_work delayed_status_work;
struct delayed_work delayed_firmware_work;
- struct work_struct battery_remove_work;
- struct work_struct battery_add_work;
+
+ unsigned long battery_work_flags;
+ struct work_struct battery_work;
};
/*
@@ -260,11 +264,9 @@ success:
/* Inform power supply if battery values changed */
if (memcmp(&orig_battery_data, battery_data, sizeof(*battery_data))) {
- scoped_guard(mutex, &drvdata->battery_mutex) {
- if (drvdata->battery) {
- power_supply_changed(drvdata->battery);
- }
- }
+ set_bit(CORSAIR_VOID_UPDATE_BATTERY,
+ &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
}
}
@@ -536,29 +538,11 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
}
-static void corsair_void_battery_remove_work_handler(struct work_struct *work)
-{
- struct corsair_void_drvdata *drvdata;
-
- drvdata = container_of(work, struct corsair_void_drvdata,
- battery_remove_work);
- scoped_guard(mutex, &drvdata->battery_mutex) {
- if (drvdata->battery) {
- power_supply_unregister(drvdata->battery);
- drvdata->battery = NULL;
- }
- }
-}
-
-static void corsair_void_battery_add_work_handler(struct work_struct *work)
+static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata)
{
- struct corsair_void_drvdata *drvdata;
- struct power_supply_config psy_cfg;
+ struct power_supply_config psy_cfg = {};
struct power_supply *new_supply;
- drvdata = container_of(work, struct corsair_void_drvdata,
- battery_add_work);
- guard(mutex)(&drvdata->battery_mutex);
if (drvdata->battery)
return;
@@ -583,16 +567,42 @@ static void corsair_void_battery_add_work_handler(struct work_struct *work)
drvdata->battery = new_supply;
}
+static void corsair_void_battery_work_handler(struct work_struct *work)
+{
+ struct corsair_void_drvdata *drvdata = container_of(work,
+ struct corsair_void_drvdata, battery_work);
+
+ bool add_battery = test_and_clear_bit(CORSAIR_VOID_ADD_BATTERY,
+ &drvdata->battery_work_flags);
+ bool remove_battery = test_and_clear_bit(CORSAIR_VOID_REMOVE_BATTERY,
+ &drvdata->battery_work_flags);
+ bool update_battery = test_and_clear_bit(CORSAIR_VOID_UPDATE_BATTERY,
+ &drvdata->battery_work_flags);
+
+ if (add_battery && !remove_battery) {
+ corsair_void_add_battery(drvdata);
+ } else if (remove_battery && !add_battery && drvdata->battery) {
+ power_supply_unregister(drvdata->battery);
+ drvdata->battery = NULL;
+ }
+
+ if (update_battery && drvdata->battery)
+ power_supply_changed(drvdata->battery);
+
+}
+
static void corsair_void_headset_connected(struct corsair_void_drvdata *drvdata)
{
- schedule_work(&drvdata->battery_add_work);
+ set_bit(CORSAIR_VOID_ADD_BATTERY, &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
schedule_delayed_work(&drvdata->delayed_firmware_work,
msecs_to_jiffies(100));
}
static void corsair_void_headset_disconnected(struct corsair_void_drvdata *drvdata)
{
- schedule_work(&drvdata->battery_remove_work);
+ set_bit(CORSAIR_VOID_REMOVE_BATTERY, &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
corsair_void_set_unknown_wireless_data(drvdata);
corsair_void_set_unknown_batt(drvdata);
@@ -678,13 +688,7 @@ static int corsair_void_probe(struct hid_device *hid_dev,
drvdata->battery_desc.get_property = corsair_void_battery_get_property;
drvdata->battery = NULL;
- INIT_WORK(&drvdata->battery_remove_work,
- corsair_void_battery_remove_work_handler);
- INIT_WORK(&drvdata->battery_add_work,
- corsair_void_battery_add_work_handler);
- ret = devm_mutex_init(drvdata->dev, &drvdata->battery_mutex);
- if (ret)
- return ret;
+ INIT_WORK(&drvdata->battery_work, corsair_void_battery_work_handler);
ret = sysfs_create_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
if (ret)
@@ -721,11 +725,11 @@ static void corsair_void_remove(struct hid_device *hid_dev)
struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev);
hid_hw_stop(hid_dev);
- cancel_work_sync(&drvdata->battery_remove_work);
- cancel_work_sync(&drvdata->battery_add_work);
+ cancel_work_sync(&drvdata->battery_work);
if (drvdata->battery)
power_supply_unregister(drvdata->battery);
+ cancel_delayed_work_sync(&drvdata->delayed_status_work);
cancel_delayed_work_sync(&drvdata->delayed_firmware_work);
sysfs_remove_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
}
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 541d682af15a..8433306148d5 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3450,7 +3450,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_MACRO_RECORD_START] = "MacroRecordStart",
[KEY_MACRO_RECORD_STOP] = "MacroRecordStop",
[KEY_MARK_WAYPOINT] = "MarkWayPoint", [KEY_MEDIA_REPEAT] = "MediaRepeat",
- [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messanger",
+ [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messenger",
[KEY_NAV_CHART] = "NavChar", [KEY_NAV_INFO] = "NavInfo",
[KEY_NEWS] = "News", [KEY_NEXT_ELEMENT] = "NextElement",
[KEY_NEXT_FAVORITE] = "NextFavorite", [KEY_NOTIFICATION_CENTER] = "NotificationCenter",
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 0f292b5d3e26..eb6fd2dc75d0 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -268,11 +268,13 @@ static void cbas_ec_remove(struct platform_device *pdev)
mutex_unlock(&cbas_ec_reglock);
}
+#ifdef CONFIG_ACPI
static const struct acpi_device_id cbas_ec_acpi_ids[] = {
{ "GOOG000B", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
+#endif
#ifdef CONFIG_OF
static const struct of_device_id cbas_ec_of_match[] = {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 1f47fda809b9..7e400624908e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -184,6 +184,7 @@
#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
#define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 0x0320
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
@@ -506,7 +507,6 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
#define I2C_VENDOR_ID_GOODIX 0x27c6
-#define I2C_DEVICE_ID_GOODIX_01E0 0x01e0
#define I2C_DEVICE_ID_GOODIX_01E8 0x01e8
#define I2C_DEVICE_ID_GOODIX_01E9 0x01e9
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
@@ -1089,11 +1089,14 @@
#define USB_VENDOR_ID_PRODIGE 0x05af
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
+#define I2C_VENDOR_ID_QTEC 0x6243
+
#define USB_VENDOR_ID_QUANTA 0x0408
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
+#define USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473 0x5473
#define I2C_VENDOR_ID_RAYDIUM 0x2386
#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
@@ -1300,6 +1303,7 @@
#define USB_VENDOR_ID_TOPRE 0x0853
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108 0x0148
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_87 0x0146
+#define USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87 0x0313
#define USB_VENDOR_ID_TOPSEED 0x0766
#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fda9dce3da99..9d80635a91eb 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -810,10 +810,23 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
- if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
- switch (usage->hid & 0xf) {
- case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
- default: goto ignore;
+ if ((usage->hid & 0xf0) == 0x90) { /* SystemControl & D-pad */
+ switch (usage->hid) {
+ case HID_GD_UP: usage->hat_dir = 1; break;
+ case HID_GD_DOWN: usage->hat_dir = 5; break;
+ case HID_GD_RIGHT: usage->hat_dir = 3; break;
+ case HID_GD_LEFT: usage->hat_dir = 7; break;
+ case HID_GD_DO_NOT_DISTURB:
+ map_key_clear(KEY_DO_NOT_DISTURB); break;
+ default: goto unknown;
+ }
+
+ if (usage->hid <= HID_GD_LEFT) {
+ if (field->dpad) {
+ map_abs(field->dpad);
+ goto ignore;
+ }
+ map_abs(ABS_HAT0X);
}
break;
}
@@ -844,22 +857,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
if (field->application == HID_GD_SYSTEM_CONTROL)
goto ignore;
- if ((usage->hid & 0xf0) == 0x90) { /* D-pad */
- switch (usage->hid) {
- case HID_GD_UP: usage->hat_dir = 1; break;
- case HID_GD_DOWN: usage->hat_dir = 5; break;
- case HID_GD_RIGHT: usage->hat_dir = 3; break;
- case HID_GD_LEFT: usage->hat_dir = 7; break;
- default: goto unknown;
- }
- if (field->dpad) {
- map_abs(field->dpad);
- goto ignore;
- }
- map_abs(ABS_HAT0X);
- break;
- }
-
switch (usage->hid) {
/* These usage IDs map directly to the usage codes. */
case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index f66194fde891..a7d9ca02779e 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -32,11 +32,20 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
+#include <linux/platform_profile.h>
+
#include "hid-ids.h"
/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
#define LENOVO_KEY_MICMUTE KEY_F20
+/* HID raw events for ThinkPad X12 Tabs*/
+#define TP_X12_RAW_HOTKEY_FN_F4 0x00020003
+#define TP_X12_RAW_HOTKEY_FN_F8 0x38001003
+#define TP_X12_RAW_HOTKEY_FN_F10 0x00000803
+#define TP_X12_RAW_HOTKEY_FN_F12 0x00000403
+#define TP_X12_RAW_HOTKEY_FN_SPACE 0x18001003
+
struct lenovo_drvdata {
u8 led_report[3]; /* Must be first for proper alignment */
int led_state;
@@ -71,6 +80,14 @@ struct lenovo_drvdata {
#define TP10UBKBD_LED_OFF 1
#define TP10UBKBD_LED_ON 2
+/* Function to report raw_events as key events*/
+static inline void report_key_event(struct input_dev *input, int keycode)
+{
+ input_report_key(input, keycode, 1);
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+}
+
static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
enum led_brightness value)
{
@@ -472,6 +489,8 @@ static int lenovo_input_mapping(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
usage, bit, max);
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max);
@@ -582,6 +601,8 @@ static ssize_t attr_fn_lock_store(struct device *dev,
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
lenovo_features_set_cptkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -680,6 +701,59 @@ static const struct attribute_group lenovo_attr_group_cptkbd = {
.attrs = lenovo_attributes_cptkbd,
};
+/* Function to handle Lenovo Thinkpad TAB X12's HID raw inputs for fn keys*/
+static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
+{
+ struct hid_input *hidinput;
+ struct input_dev *input = NULL;
+
+ /* Iterate through all associated input devices */
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+ input = hidinput->input;
+ if (!input)
+ continue;
+
+ switch (raw_data) {
+ /* fn-F20 being used here for MIC mute*/
+ case TP_X12_RAW_HOTKEY_FN_F4:
+ report_key_event(input, LENOVO_KEY_MICMUTE);
+ return 1;
+ /* Power-mode or Airplane mode will be called based on the device*/
+ case TP_X12_RAW_HOTKEY_FN_F8:
+ /*
+ * TP X12 TAB uses Fn-F8 calls Airplanemode
+ * Whereas TP X12 TAB2 uses Fn-F8 for toggling
+ * Power modes
+ */
+ if (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) {
+ report_key_event(input, KEY_RFKILL);
+ return 1;
+ } else {
+ platform_profile_cycle();
+ return 1;
+ }
+ return 0;
+ case TP_X12_RAW_HOTKEY_FN_F10:
+ /* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
+ (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) ?
+ report_key_event(input, KEY_PICKUP_PHONE) :
+ report_key_event(input, KEY_SELECTIVE_SCREENSHOT);
+ return 1;
+ case TP_X12_RAW_HOTKEY_FN_F12:
+ /* BookMarks/STAR key*/
+ report_key_event(input, KEY_BOOKMARKS);
+ return 1;
+ case TP_X12_RAW_HOTKEY_FN_SPACE:
+ /* Keyboard LED backlight toggle*/
+ report_key_event(input, KEY_KBDILLUMTOGGLE);
+ return 1;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
static int lenovo_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
@@ -697,6 +771,15 @@ static int lenovo_raw_event(struct hid_device *hdev,
data[2] = 0x01;
}
+ /*
+ * Lenovo TP X12 Tab KBD's Fn+XX is HID raw data defined. Report ID is 0x03
+ * e.g.: Raw data received for MIC mute is 0x00020003.
+ */
+ if (unlikely((hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB
+ || hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2)
+ && size >= 3 && report->id == 0x03))
+ return lenovo_raw_event_TP_X12_tab(hdev, le32_to_cpu(*(u32 *)data));
+
return 0;
}
@@ -776,6 +859,8 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
return lenovo_event_cptkbd(hdev, field, usage, value);
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -1057,6 +1142,8 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
case USB_DEVICE_ID_LENOVO_TPKBD:
lenovo_led_set_tpkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -1243,8 +1330,15 @@ static int lenovo_probe_tp10ubkbd(struct hid_device *hdev)
* We cannot read the state, only set it, so we force it to on here
* (which should be a no-op) to make sure that our state matches the
* keyboard's FN-lock state. This is the same as what Windows does.
+ *
+ * For X12 TAB and TAB2, the default windows behaviour Fn-lock Off.
+ * Adding additional check to ensure the behaviour in case of
+ * Thinkpad X12 Tabs.
*/
- data->fn_lock = true;
+
+ data->fn_lock = !(hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB ||
+ hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2);
+
lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, data->fn_lock);
ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd);
@@ -1288,6 +1382,8 @@ static int lenovo_probe(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
ret = lenovo_probe_cptkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -1375,6 +1471,8 @@ static void lenovo_remove(struct hid_device *hdev)
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
lenovo_remove_cptkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
@@ -1429,6 +1527,10 @@ static const struct hid_device_id lenovo_devices[] = {
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB2) },
{ }
};
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index ec110dea8772..a76f17158539 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -52,6 +52,7 @@ module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
#define TRACKPAD2_2021_BT_VERSION 0x110
+#define TRACKPAD_2024_BT_VERSION 0x314
#define TRACKPAD_REPORT_ID 0x28
#define TRACKPAD2_USB_REPORT_ID 0x02
@@ -567,9 +568,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
*/
if (hdev->vendor == BT_VENDOR_ID_APPLE) {
if (input->id.version == TRACKPAD2_2021_BT_VERSION)
+ input->name = "Apple Inc. Magic Trackpad 2021";
+ else if (input->id.version == TRACKPAD_2024_BT_VERSION) {
+ input->name = "Apple Inc. Magic Trackpad USB-C";
+ } else {
input->name = "Apple Inc. Magic Trackpad";
- else
- input->name = "Apple Inc. Magic Trackpad 2";
+ }
} else { /* USB_VENDOR_ID_APPLE */
input->name = hdev->name;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 785743036647..e50887a6d22c 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1460,8 +1460,7 @@ static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
{
if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
(hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
- hdev->product == I2C_DEVICE_ID_GOODIX_01E9 ||
- hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) {
+ hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
if (rdesc[607] == 0x15) {
rdesc[607] = 0x25;
dev_info(
@@ -1680,9 +1679,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
break;
}
- if (suffix)
+ if (suffix) {
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
"%s %s", hdev->name, suffix);
+ if (!hi->input->name)
+ return -ENOMEM;
+ }
return 0;
}
@@ -2086,9 +2088,6 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
I2C_DEVICE_ID_GOODIX_01E9) },
- { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
- HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
- I2C_DEVICE_ID_GOODIX_01E0) },
/* GoodTouch panels */
{ .driver_data = MT_CLS_NSMU,
@@ -2318,6 +2317,11 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_SIS_TOUCH,
HID_ANY_ID) },
+ /* Hantick */
+ { .driver_data = MT_CLS_NSMU,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288) },
+
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 55153a2f7988..839d5bcd72b1 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -456,24 +456,20 @@ static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = {
{ /* sentinel */ },
};
-/*
- * "A", "B", and "C" are mapped positionally, rather than by label (e.g., "A"
- * gets assigned to BTN_EAST instead of BTN_A).
- */
static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
- { BTN_SOUTH, JC_BTN_A, },
- { BTN_EAST, JC_BTN_B, },
- { BTN_WEST, JC_BTN_R, },
- { BTN_SELECT, JC_BTN_ZR, },
+ { BTN_WEST, JC_BTN_A, }, /* A */
+ { BTN_SOUTH, JC_BTN_B, }, /* B */
+ { BTN_EAST, JC_BTN_R, }, /* C */
+ { BTN_TL, JC_BTN_X, }, /* X MD/GEN 6B Only */
+ { BTN_NORTH, JC_BTN_Y, }, /* Y MD/GEN 6B Only */
+ { BTN_TR, JC_BTN_L, }, /* Z MD/GEN 6B Only */
+ { BTN_SELECT, JC_BTN_ZR, }, /* Mode */
{ BTN_START, JC_BTN_PLUS, },
{ BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, },
{ /* sentinel */ },
};
-/*
- * N64's C buttons get assigned to d-pad directions and registered as buttons.
- */
static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
{ BTN_A, JC_BTN_A, },
{ BTN_B, JC_BTN_B, },
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index e0bbf0c6345d..5d7a418ccdbe 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -891,6 +891,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
{ }
};
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index d55aaabab1ed..3048297569c5 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -224,24 +224,24 @@ static ssize_t arvo_sysfs_read(struct file *fp,
}
static ssize_t arvo_sysfs_write_button(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
return arvo_sysfs_write(fp, kobj, buf, off, count,
sizeof(struct arvo_button), ARVO_COMMAND_BUTTON);
}
-static BIN_ATTR(button, 0220, NULL, arvo_sysfs_write_button,
- sizeof(struct arvo_button));
+static const BIN_ATTR(button, 0220, NULL, arvo_sysfs_write_button,
+ sizeof(struct arvo_button));
static ssize_t arvo_sysfs_read_info(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
return arvo_sysfs_read(fp, kobj, buf, off, count,
sizeof(struct arvo_info), ARVO_COMMAND_INFO);
}
-static BIN_ATTR(info, 0440, arvo_sysfs_read_info, NULL,
- sizeof(struct arvo_info));
+static const BIN_ATTR(info, 0440, arvo_sysfs_read_info, NULL,
+ sizeof(struct arvo_info));
static struct attribute *arvo_attrs[] = {
&dev_attr_mode_key.attr,
@@ -250,7 +250,7 @@ static struct attribute *arvo_attrs[] = {
NULL,
};
-static struct bin_attribute *arvo_bin_attributes[] = {
+static const struct bin_attribute *const arvo_bin_attributes[] = {
&bin_attr_button,
&bin_attr_info,
NULL,
@@ -258,7 +258,7 @@ static struct bin_attribute *arvo_bin_attributes[] = {
static const struct attribute_group arvo_group = {
.attrs = arvo_attrs,
- .bin_attrs = arvo_bin_attributes,
+ .bin_attrs_new = arvo_bin_attributes,
};
static const struct attribute_group *arvo_groups[] = {
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index 839ddfd931f0..0f9a2db04df9 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -46,8 +46,8 @@ ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
#define ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \
static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return roccat_common2_sysfs_write(fp, kobj, buf, off, count, \
SIZE, COMMAND); \
@@ -55,8 +55,8 @@ static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \
#define ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE) \
static ssize_t roccat_common2_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return roccat_common2_sysfs_read(fp, kobj, buf, off, count, \
SIZE, COMMAND); \
@@ -68,27 +68,27 @@ ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE)
#define ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(thingy, COMMAND, SIZE) \
ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE); \
-static struct bin_attribute bin_attr_ ## thingy = { \
+static const struct bin_attribute bin_attr_ ## thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = SIZE, \
- .read = roccat_common2_sysfs_read_ ## thingy, \
- .write = roccat_common2_sysfs_write_ ## thingy \
+ .read_new = roccat_common2_sysfs_read_ ## thingy, \
+ .write_new = roccat_common2_sysfs_write_ ## thingy \
}
#define ROCCAT_COMMON2_BIN_ATTRIBUTE_R(thingy, COMMAND, SIZE) \
ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE); \
-static struct bin_attribute bin_attr_ ## thingy = { \
+static const struct bin_attribute bin_attr_ ## thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = SIZE, \
- .read = roccat_common2_sysfs_read_ ## thingy, \
+ .read_new = roccat_common2_sysfs_read_ ## thingy, \
}
#define ROCCAT_COMMON2_BIN_ATTRIBUTE_W(thingy, COMMAND, SIZE) \
ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE); \
-static struct bin_attribute bin_attr_ ## thingy = { \
+static const struct bin_attribute bin_attr_ ## thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = SIZE, \
- .write = roccat_common2_sysfs_write_ ## thingy \
+ .write_new = roccat_common2_sysfs_write_ ## thingy \
}
#endif
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 0cd6208fb371..65a84bfcc2f8 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -156,7 +156,7 @@ static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
#define ISKU_SYSFS_W(thingy, THINGY) \
static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
return isku_sysfs_write(fp, kobj, buf, off, count, \
@@ -165,7 +165,7 @@ static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj
#define ISKU_SYSFS_R(thingy, THINGY) \
static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
return isku_sysfs_read(fp, kobj, buf, off, count, \
@@ -178,27 +178,27 @@ ISKU_SYSFS_W(thingy, THINGY)
#define ISKU_BIN_ATTR_RW(thingy, THINGY) \
ISKU_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = ISKU_SIZE_ ## THINGY, \
- .read = isku_sysfs_read_ ## thingy, \
- .write = isku_sysfs_write_ ## thingy \
+ .read_new = isku_sysfs_read_ ## thingy, \
+ .write_new = isku_sysfs_write_ ## thingy \
}
#define ISKU_BIN_ATTR_R(thingy, THINGY) \
ISKU_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = ISKU_SIZE_ ## THINGY, \
- .read = isku_sysfs_read_ ## thingy, \
+ .read_new = isku_sysfs_read_ ## thingy, \
}
#define ISKU_BIN_ATTR_W(thingy, THINGY) \
ISKU_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = ISKU_SIZE_ ## THINGY, \
- .write = isku_sysfs_write_ ## thingy \
+ .write_new = isku_sysfs_write_ ## thingy \
}
ISKU_BIN_ATTR_RW(macro, MACRO);
@@ -217,7 +217,7 @@ ISKU_BIN_ATTR_W(control, CONTROL);
ISKU_BIN_ATTR_W(reset, RESET);
ISKU_BIN_ATTR_R(info, INFO);
-static struct bin_attribute *isku_bin_attributes[] = {
+static const struct bin_attribute *const isku_bin_attributes[] = {
&bin_attr_macro,
&bin_attr_keys_function,
&bin_attr_keys_easyzone,
@@ -238,7 +238,7 @@ static struct bin_attribute *isku_bin_attributes[] = {
static const struct attribute_group isku_group = {
.attrs = isku_attrs,
- .bin_attrs = isku_bin_attributes,
+ .bin_attrs_new = isku_bin_attributes,
};
static const struct attribute_group *isku_groups[] = {
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 3f8f459edcf3..b3c0242e5a37 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -261,7 +261,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
}
static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -285,7 +285,7 @@ static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
* case of error the old data is still valid
*/
static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -327,11 +327,11 @@ unlock:
return sizeof(struct kone_settings);
}
-static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
- kone_sysfs_write_settings, sizeof(struct kone_settings));
+static const BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
+ kone_sysfs_write_settings, sizeof(struct kone_settings));
static ssize_t kone_sysfs_read_profilex(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -351,7 +351,7 @@ static ssize_t kone_sysfs_read_profilex(struct file *fp,
/* Writes data only if different to stored data */
static ssize_t kone_sysfs_write_profilex(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -382,11 +382,11 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp,
return sizeof(struct kone_profile);
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number = { \
+static const struct bin_attribute bin_attr_profile##number = { \
.attr = { .name = "profile" #number, .mode = 0660 }, \
.size = sizeof(struct kone_profile), \
- .read = kone_sysfs_read_profilex, \
- .write = kone_sysfs_write_profilex, \
+ .read_new = kone_sysfs_read_profilex, \
+ .write_new = kone_sysfs_write_profilex, \
.private = &profile_numbers[number-1], \
}
PROFILE_ATTR(1);
@@ -634,7 +634,7 @@ static struct attribute *kone_attrs[] = {
NULL,
};
-static struct bin_attribute *kone_bin_attributes[] = {
+static const struct bin_attribute *const kone_bin_attributes[] = {
&bin_attr_settings,
&bin_attr_profile1,
&bin_attr_profile2,
@@ -646,7 +646,7 @@ static struct bin_attribute *kone_bin_attributes[] = {
static const struct attribute_group kone_group = {
.attrs = kone_attrs,
- .bin_attrs = kone_bin_attributes,
+ .bin_attrs_new = kone_bin_attributes,
};
static const struct attribute_group *kone_groups[] = {
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 8ccb3b14a1a9..5d8a5ce88b4c 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -128,8 +128,8 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
#define KONEPLUS_SYSFS_W(thingy, THINGY) \
static ssize_t koneplus_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return koneplus_sysfs_write(fp, kobj, buf, off, count, \
KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
@@ -137,8 +137,8 @@ static ssize_t koneplus_sysfs_write_ ## thingy(struct file *fp, \
#define KONEPLUS_SYSFS_R(thingy, THINGY) \
static ssize_t koneplus_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return koneplus_sysfs_read(fp, kobj, buf, off, count, \
KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
@@ -150,27 +150,27 @@ KONEPLUS_SYSFS_R(thingy, THINGY)
#define KONEPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
KONEPLUS_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
- .read = koneplus_sysfs_read_ ## thingy, \
- .write = koneplus_sysfs_write_ ## thingy \
+ .read_new = koneplus_sysfs_read_ ## thingy, \
+ .write_new = koneplus_sysfs_write_ ## thingy \
}
#define KONEPLUS_BIN_ATTRIBUTE_R(thingy, THINGY) \
KONEPLUS_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
- .read = koneplus_sysfs_read_ ## thingy, \
+ .read_new = koneplus_sysfs_read_ ## thingy, \
}
#define KONEPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
KONEPLUS_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
- .write = koneplus_sysfs_write_ ## thingy \
+ .write_new = koneplus_sysfs_write_ ## thingy \
}
KONEPLUS_BIN_ATTRIBUTE_W(control, CONTROL);
KONEPLUS_BIN_ATTRIBUTE_W(talk, TALK);
@@ -183,8 +183,8 @@ KONEPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
KONEPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -201,8 +201,8 @@ static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
}
static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -219,16 +219,16 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number##_settings = { \
+static const struct bin_attribute bin_attr_profile##number##_settings = { \
.attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
.size = KONEPLUS_SIZE_PROFILE_SETTINGS, \
- .read = koneplus_sysfs_read_profilex_settings, \
+ .read_new = koneplus_sysfs_read_profilex_settings, \
.private = &profile_numbers[number-1], \
}; \
-static struct bin_attribute bin_attr_profile##number##_buttons = { \
+static const struct bin_attribute bin_attr_profile##number##_buttons = { \
.attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
.size = KONEPLUS_SIZE_PROFILE_BUTTONS, \
- .read = koneplus_sysfs_read_profilex_buttons, \
+ .read_new = koneplus_sysfs_read_profilex_buttons, \
.private = &profile_numbers[number-1], \
};
PROFILE_ATTR(1);
@@ -321,7 +321,7 @@ static struct attribute *koneplus_attrs[] = {
NULL,
};
-static struct bin_attribute *koneplus_bin_attributes[] = {
+static const struct bin_attribute *const koneplus_bin_attributes[] = {
&bin_attr_control,
&bin_attr_talk,
&bin_attr_macro,
@@ -346,7 +346,7 @@ static struct bin_attribute *koneplus_bin_attributes[] = {
static const struct attribute_group koneplus_group = {
.attrs = koneplus_attrs,
- .bin_attrs = koneplus_bin_attributes,
+ .bin_attrs_new = koneplus_bin_attributes,
};
static const struct attribute_group *koneplus_groups[] = {
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index beca8aef8bbb..7fb705789d4e 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -47,7 +47,7 @@ ROCCAT_COMMON2_BIN_ATTRIBUTE_R(tcu_image, 0x0c, 0x0404);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0x0f, 0x06);
ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x10, 0x10);
-static struct bin_attribute *konepure_bin_attrs[] = {
+static const struct bin_attribute *const konepure_bin_attrs[] = {
&bin_attr_actual_profile,
&bin_attr_control,
&bin_attr_info,
@@ -62,7 +62,7 @@ static struct bin_attribute *konepure_bin_attrs[] = {
};
static const struct attribute_group konepure_group = {
- .bin_attrs = konepure_bin_attrs,
+ .bin_attrs_new = konepure_bin_attrs,
};
static const struct attribute_group *konepure_groups[] = {
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 748d4d7cb2fc..e31e4a2e62d5 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -171,8 +171,8 @@ static ssize_t kovaplus_sysfs_write(struct file *fp, struct kobject *kobj,
#define KOVAPLUS_SYSFS_W(thingy, THINGY) \
static ssize_t kovaplus_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return kovaplus_sysfs_write(fp, kobj, buf, off, count, \
KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
@@ -180,8 +180,8 @@ static ssize_t kovaplus_sysfs_write_ ## thingy(struct file *fp, \
#define KOVAPLUS_SYSFS_R(thingy, THINGY) \
static ssize_t kovaplus_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return kovaplus_sysfs_read(fp, kobj, buf, off, count, \
KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
@@ -193,19 +193,19 @@ KOVAPLUS_SYSFS_R(thingy, THINGY)
#define KOVAPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
KOVAPLUS_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = KOVAPLUS_SIZE_ ## THINGY, \
- .read = kovaplus_sysfs_read_ ## thingy, \
- .write = kovaplus_sysfs_write_ ## thingy \
+ .read_new = kovaplus_sysfs_read_ ## thingy, \
+ .write_new = kovaplus_sysfs_write_ ## thingy \
}
#define KOVAPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
KOVAPLUS_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = KOVAPLUS_SIZE_ ## THINGY, \
- .write = kovaplus_sysfs_write_ ## thingy \
+ .write_new = kovaplus_sysfs_write_ ## thingy \
}
KOVAPLUS_BIN_ATTRIBUTE_W(control, CONTROL);
KOVAPLUS_BIN_ATTRIBUTE_RW(info, INFO);
@@ -213,8 +213,8 @@ KOVAPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
KOVAPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -231,8 +231,8 @@ static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
}
static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -249,16 +249,16 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number##_settings = { \
+static const struct bin_attribute bin_attr_profile##number##_settings = { \
.attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
.size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \
- .read = kovaplus_sysfs_read_profilex_settings, \
+ .read_new = kovaplus_sysfs_read_profilex_settings, \
.private = &profile_numbers[number-1], \
}; \
-static struct bin_attribute bin_attr_profile##number##_buttons = { \
+static const struct bin_attribute bin_attr_profile##number##_buttons = { \
.attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
.size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \
- .read = kovaplus_sysfs_read_profilex_buttons, \
+ .read_new = kovaplus_sysfs_read_profilex_buttons, \
.private = &profile_numbers[number-1], \
};
PROFILE_ATTR(1);
@@ -379,7 +379,7 @@ static struct attribute *kovaplus_attrs[] = {
NULL,
};
-static struct bin_attribute *kovaplus_bin_attributes[] = {
+static const struct bin_attribute *const kovaplus_bin_attributes[] = {
&bin_attr_control,
&bin_attr_info,
&bin_attr_profile_settings,
@@ -399,7 +399,7 @@ static struct bin_attribute *kovaplus_bin_attributes[] = {
static const struct attribute_group kovaplus_group = {
.attrs = kovaplus_attrs,
- .bin_attrs = kovaplus_bin_attributes,
+ .bin_attrs_new = kovaplus_bin_attributes,
};
static const struct attribute_group *kovaplus_groups[] = {
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
index d5ddf0d68346..023ec64b4b0e 100644
--- a/drivers/hid/hid-roccat-lua.c
+++ b/drivers/hid/hid-roccat-lua.c
@@ -66,7 +66,7 @@ static ssize_t lua_sysfs_write(struct file *fp, struct kobject *kobj,
#define LUA_SYSFS_W(thingy, THINGY) \
static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, \
+ struct kobject *kobj, const struct bin_attribute *attr, \
char *buf, loff_t off, size_t count) \
{ \
return lua_sysfs_write(fp, kobj, buf, off, count, \
@@ -75,7 +75,7 @@ static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \
#define LUA_SYSFS_R(thingy, THINGY) \
static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, \
+ struct kobject *kobj, const struct bin_attribute *attr, \
char *buf, loff_t off, size_t count) \
{ \
return lua_sysfs_read(fp, kobj, buf, off, count, \
@@ -85,11 +85,11 @@ static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \
#define LUA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
LUA_SYSFS_W(thingy, THINGY) \
LUA_SYSFS_R(thingy, THINGY) \
-static struct bin_attribute lua_ ## thingy ## _attr = { \
+static const struct bin_attribute lua_ ## thingy ## _attr = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = LUA_SIZE_ ## THINGY, \
- .read = lua_sysfs_read_ ## thingy, \
- .write = lua_sysfs_write_ ## thingy \
+ .read_new = lua_sysfs_read_ ## thingy, \
+ .write_new = lua_sysfs_write_ ## thingy \
};
LUA_BIN_ATTRIBUTE_RW(control, CONTROL)
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index eeb3d38cd805..2b53fbfbb897 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -129,8 +129,8 @@ static ssize_t pyra_sysfs_write(struct file *fp, struct kobject *kobj,
#define PYRA_SYSFS_W(thingy, THINGY) \
static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return pyra_sysfs_write(fp, kobj, buf, off, count, \
PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
@@ -138,8 +138,8 @@ static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \
#define PYRA_SYSFS_R(thingy, THINGY) \
static ssize_t pyra_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return pyra_sysfs_read(fp, kobj, buf, off, count, \
PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
@@ -151,27 +151,27 @@ PYRA_SYSFS_R(thingy, THINGY)
#define PYRA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
PYRA_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = PYRA_SIZE_ ## THINGY, \
- .read = pyra_sysfs_read_ ## thingy, \
- .write = pyra_sysfs_write_ ## thingy \
+ .read_new = pyra_sysfs_read_ ## thingy, \
+ .write_new = pyra_sysfs_write_ ## thingy \
}
#define PYRA_BIN_ATTRIBUTE_R(thingy, THINGY) \
PYRA_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
- .size = PYRA_SIZE_ ## THINGY, \
- .read = pyra_sysfs_read_ ## thingy, \
+ .size_new = PYRA_SIZE_ ## THINGY, \
+ .read_new = pyra_sysfs_read_ ## thingy, \
}
#define PYRA_BIN_ATTRIBUTE_W(thingy, THINGY) \
PYRA_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = PYRA_SIZE_ ## THINGY, \
- .write = pyra_sysfs_write_ ## thingy \
+ .write_new = pyra_sysfs_write_ ## thingy \
}
PYRA_BIN_ATTRIBUTE_W(control, CONTROL);
@@ -180,8 +180,8 @@ PYRA_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
PYRA_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -198,8 +198,8 @@ static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
}
static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -216,16 +216,16 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number##_settings = { \
+static const struct bin_attribute bin_attr_profile##number##_settings = { \
.attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
.size = PYRA_SIZE_PROFILE_SETTINGS, \
- .read = pyra_sysfs_read_profilex_settings, \
+ .read_new = pyra_sysfs_read_profilex_settings, \
.private = &profile_numbers[number-1], \
}; \
-static struct bin_attribute bin_attr_profile##number##_buttons = { \
+static const struct bin_attribute bin_attr_profile##number##_buttons = { \
.attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
.size = PYRA_SIZE_PROFILE_BUTTONS, \
- .read = pyra_sysfs_read_profilex_buttons, \
+ .read_new = pyra_sysfs_read_profilex_buttons, \
.private = &profile_numbers[number-1], \
};
PROFILE_ATTR(1);
@@ -235,8 +235,8 @@ PROFILE_ATTR(4);
PROFILE_ATTR(5);
static ssize_t pyra_sysfs_write_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
@@ -273,7 +273,7 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
}
PYRA_SYSFS_R(settings, SETTINGS);
-static struct bin_attribute bin_attr_settings =
+static const struct bin_attribute bin_attr_settings =
__BIN_ATTR(settings, (S_IWUSR | S_IRUGO),
pyra_sysfs_read_settings, pyra_sysfs_write_settings,
PYRA_SIZE_SETTINGS);
@@ -334,7 +334,7 @@ static struct attribute *pyra_attrs[] = {
NULL,
};
-static struct bin_attribute *pyra_bin_attributes[] = {
+static const struct bin_attribute *const pyra_bin_attributes[] = {
&bin_attr_control,
&bin_attr_info,
&bin_attr_profile_settings,
@@ -355,7 +355,7 @@ static struct bin_attribute *pyra_bin_attributes[] = {
static const struct attribute_group pyra_group = {
.attrs = pyra_attrs,
- .bin_attrs = pyra_bin_attributes,
+ .bin_attrs_new = pyra_bin_attributes,
};
static const struct attribute_group *pyra_groups[] = {
diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
index 57714a4525e2..902dac1e714e 100644
--- a/drivers/hid/hid-roccat-ryos.c
+++ b/drivers/hid/hid-roccat-ryos.c
@@ -47,7 +47,7 @@ ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(stored_lights, 0x17, 0x0566);
ROCCAT_COMMON2_BIN_ATTRIBUTE_W(custom_lights, 0x18, 0x14);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light_macro, 0x19, 0x07d2);
-static struct bin_attribute *ryos_bin_attrs[] = {
+static const struct bin_attribute *const ryos_bin_attrs[] = {
&bin_attr_control,
&bin_attr_profile,
&bin_attr_keys_primary,
@@ -70,7 +70,7 @@ static struct bin_attribute *ryos_bin_attrs[] = {
};
static const struct attribute_group ryos_group = {
- .bin_attrs = ryos_bin_attrs,
+ .bin_attrs_new = ryos_bin_attrs,
};
static const struct attribute_group *ryos_groups[] = {
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 2baa47a0efc5..7399b8ffb5c7 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -30,7 +30,7 @@ ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x8, 0x0823);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x9, 0x08);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0xc, 0x04);
-static struct bin_attribute *savu_bin_attrs[] = {
+static const struct bin_attribute *const savu_bin_attrs[] = {
&bin_attr_control,
&bin_attr_profile,
&bin_attr_general,
@@ -42,7 +42,7 @@ static struct bin_attribute *savu_bin_attrs[] = {
};
static const struct attribute_group savu_group = {
- .bin_attrs = savu_bin_attrs,
+ .bin_attrs_new = savu_bin_attrs,
};
static const struct attribute_group *savu_groups[] = {
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 7bd86eef6ec7..4c94c03cb573 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -730,23 +730,30 @@ err_stop_hw:
return ret;
}
+static int sensor_hub_finalize_pending_fn(struct device *dev, void *data)
+{
+ struct hid_sensor_hub_device *hsdev = dev->platform_data;
+
+ if (hsdev->pending.status)
+ complete(&hsdev->pending.ready);
+
+ return 0;
+}
+
static void sensor_hub_remove(struct hid_device *hdev)
{
struct sensor_hub_data *data = hid_get_drvdata(hdev);
unsigned long flags;
- int i;
hid_dbg(hdev, " hardware removed\n");
hid_hw_close(hdev);
hid_hw_stop(hdev);
+
spin_lock_irqsave(&data->lock, flags);
- for (i = 0; i < data->hid_sensor_client_cnt; ++i) {
- struct hid_sensor_hub_device *hsdev =
- data->hid_sensor_hub_client_devs[i].platform_data;
- if (hsdev->pending.status)
- complete(&hsdev->pending.ready);
- }
+ device_for_each_child(&hdev->dev, NULL,
+ sensor_hub_finalize_pending_fn);
spin_unlock_irqrestore(&data->lock, flags);
+
mfd_remove_devices(&hdev->dev);
mutex_destroy(&data->mutex);
}
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 6439913372a8..10460b7bde1a 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -313,6 +313,7 @@ struct steam_device {
u16 rumble_left;
u16 rumble_right;
unsigned int sensor_timestamp_us;
+ struct work_struct unregister_work;
};
static int steam_recv_report(struct steam_device *steam,
@@ -1050,10 +1051,10 @@ static void steam_mode_switch_cb(struct work_struct *work)
struct steam_device, mode_switch);
unsigned long flags;
bool client_opened;
- steam->gamepad_mode = !steam->gamepad_mode;
if (!lizard_mode)
return;
+ steam->gamepad_mode = !steam->gamepad_mode;
if (steam->gamepad_mode)
steam_set_lizard_mode(steam, false);
else {
@@ -1072,6 +1073,31 @@ static void steam_mode_switch_cb(struct work_struct *work)
}
}
+static void steam_work_unregister_cb(struct work_struct *work)
+{
+ struct steam_device *steam = container_of(work, struct steam_device,
+ unregister_work);
+ unsigned long flags;
+ bool connected;
+ bool opened;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ opened = steam->client_opened;
+ connected = steam->connected;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+ if (connected) {
+ if (opened) {
+ steam_sensors_unregister(steam);
+ steam_input_unregister(steam);
+ } else {
+ steam_set_lizard_mode(steam, lizard_mode);
+ steam_input_register(steam);
+ steam_sensors_register(steam);
+ }
+ }
+}
+
static bool steam_is_valve_interface(struct hid_device *hdev)
{
struct hid_report_enum *rep_enum;
@@ -1117,8 +1143,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
steam->client_opened++;
spin_unlock_irqrestore(&steam->lock, flags);
- steam_sensors_unregister(steam);
- steam_input_unregister(steam);
+ schedule_work(&steam->unregister_work);
return 0;
}
@@ -1135,11 +1160,7 @@ static void steam_client_ll_close(struct hid_device *hdev)
connected = steam->connected && !steam->client_opened;
spin_unlock_irqrestore(&steam->lock, flags);
- if (connected) {
- steam_set_lizard_mode(steam, lizard_mode);
- steam_input_register(steam);
- steam_sensors_register(steam);
- }
+ schedule_work(&steam->unregister_work);
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
@@ -1231,6 +1252,7 @@ static int steam_probe(struct hid_device *hdev,
INIT_LIST_HEAD(&steam->list);
INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
steam->sensor_timestamp_us = 0;
+ INIT_WORK(&steam->unregister_work, steam_work_unregister_cb);
/*
* With the real steam controller interface, do not connect hidraw.
@@ -1291,6 +1313,7 @@ err_cancel_work:
cancel_work_sync(&steam->work_connect);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->rumble_work);
+ cancel_work_sync(&steam->unregister_work);
return ret;
}
@@ -1304,9 +1327,11 @@ static void steam_remove(struct hid_device *hdev)
return;
}
+ hid_destroy_device(steam->client_hdev);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->work_connect);
- hid_destroy_device(steam->client_hdev);
+ cancel_work_sync(&steam->rumble_work);
+ cancel_work_sync(&steam->unregister_work);
steam->client_hdev = NULL;
steam->client_opened = 0;
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
@@ -1592,13 +1617,13 @@ static void steam_do_deck_input_event(struct steam_device *steam,
if (!(b9 & BIT(6)) && steam->did_mode_switch) {
steam->did_mode_switch = false;
- cancel_delayed_work_sync(&steam->mode_switch);
+ cancel_delayed_work(&steam->mode_switch);
} else if (!steam->client_opened && (b9 & BIT(6)) && !steam->did_mode_switch) {
steam->did_mode_switch = true;
schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100);
}
- if (!steam->gamepad_mode)
+ if (!steam->gamepad_mode && lizard_mode)
return;
lpad_touched = b10 & BIT(3);
@@ -1668,7 +1693,7 @@ static void steam_do_deck_sensors_event(struct steam_device *steam,
*/
steam->sensor_timestamp_us += 4000;
- if (!steam->gamepad_mode)
+ if (!steam->gamepad_mode && lizard_mode)
return;
input_event(sensors, EV_MSC, MSC_TIMESTAMP, steam->sensor_timestamp_us);
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index f9ff5be94309..d4bd7848b8c6 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -19,6 +19,7 @@
#define STEELSERIES_SRWS1 BIT(0)
#define STEELSERIES_ARCTIS_1 BIT(1)
+#define STEELSERIES_ARCTIS_9 BIT(2)
struct steelseries_device {
struct hid_device *hdev;
@@ -32,6 +33,7 @@ struct steelseries_device {
struct power_supply *battery;
uint8_t battery_capacity;
bool headset_connected;
+ bool battery_charging;
};
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
@@ -368,32 +370,35 @@ static void steelseries_srws1_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
kfree(drv_data);
- return;
}
#endif
#define STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS 3000
#define ARCTIS_1_BATTERY_RESPONSE_LEN 8
+#define ARCTIS_9_BATTERY_RESPONSE_LEN 64
static const char arctis_1_battery_request[] = { 0x06, 0x12 };
+static const char arctis_9_battery_request[] = { 0x00, 0x20 };
-static int steelseries_headset_arctis_1_fetch_battery(struct hid_device *hdev)
+static int steelseries_headset_request_battery(struct hid_device *hdev,
+ const char *request, size_t len)
{
u8 *write_buf;
int ret;
/* Request battery information */
- write_buf = kmemdup(arctis_1_battery_request, sizeof(arctis_1_battery_request), GFP_KERNEL);
+ write_buf = kmemdup(request, len, GFP_KERNEL);
if (!write_buf)
return -ENOMEM;
- ret = hid_hw_raw_request(hdev, arctis_1_battery_request[0],
- write_buf, sizeof(arctis_1_battery_request),
+ hid_dbg(hdev, "Sending battery request report");
+ ret = hid_hw_raw_request(hdev, request[0], write_buf, len,
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
- if (ret < (int)sizeof(arctis_1_battery_request)) {
+ if (ret < (int)len) {
hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
ret = -ENODATA;
}
+
kfree(write_buf);
return ret;
}
@@ -404,7 +409,11 @@ static void steelseries_headset_fetch_battery(struct hid_device *hdev)
int ret = 0;
if (sd->quirks & STEELSERIES_ARCTIS_1)
- ret = steelseries_headset_arctis_1_fetch_battery(hdev);
+ ret = steelseries_headset_request_battery(hdev,
+ arctis_1_battery_request, sizeof(arctis_1_battery_request));
+ else if (sd->quirks & STEELSERIES_ARCTIS_9)
+ ret = steelseries_headset_request_battery(hdev,
+ arctis_9_battery_request, sizeof(arctis_9_battery_request));
if (ret < 0)
hid_dbg(hdev,
@@ -429,6 +438,9 @@ static void steelseries_headset_battery_timer_tick(struct work_struct *work)
steelseries_headset_fetch_battery(hdev);
}
+#define STEELSERIES_PREFIX "SteelSeries "
+#define STEELSERIES_PREFIX_LEN strlen(STEELSERIES_PREFIX)
+
static int steelseries_headset_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -437,13 +449,24 @@ static int steelseries_headset_battery_get_property(struct power_supply *psy,
int ret = 0;
switch (psp) {
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = sd->hdev->name;
+ while (!strncmp(val->strval, STEELSERIES_PREFIX, STEELSERIES_PREFIX_LEN))
+ val->strval += STEELSERIES_PREFIX_LEN;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "SteelSeries";
+ break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = 1;
break;
case POWER_SUPPLY_PROP_STATUS:
- val->intval = sd->headset_connected ?
- POWER_SUPPLY_STATUS_DISCHARGING :
- POWER_SUPPLY_STATUS_UNKNOWN;
+ if (sd->headset_connected) {
+ val->intval = sd->battery_charging ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING;
+ } else
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
break;
case POWER_SUPPLY_PROP_SCOPE:
val->intval = POWER_SUPPLY_SCOPE_DEVICE;
@@ -477,6 +500,8 @@ steelseries_headset_set_wireless_status(struct hid_device *hdev,
}
static enum power_supply_property steelseries_headset_battery_props[] = {
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_SCOPE,
@@ -505,6 +530,7 @@ static int steelseries_headset_battery_register(struct steelseries_device *sd)
/* avoid the warning of 0% battery while waiting for the first info */
steelseries_headset_set_wireless_status(sd->hdev, false);
sd->battery_capacity = 100;
+ sd->battery_charging = false;
sd->battery = devm_power_supply_register(&sd->hdev->dev,
&sd->battery_desc, &battery_cfg);
@@ -520,9 +546,22 @@ static int steelseries_headset_battery_register(struct steelseries_device *sd)
INIT_DELAYED_WORK(&sd->battery_work, steelseries_headset_battery_timer_tick);
steelseries_headset_fetch_battery(sd->hdev);
+ if (sd->quirks & STEELSERIES_ARCTIS_9) {
+ /* The first fetch_battery request can remain unanswered in some cases */
+ schedule_delayed_work(&sd->battery_work,
+ msecs_to_jiffies(STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS));
+ }
+
return 0;
}
+static bool steelseries_is_vendor_usage_page(struct hid_device *hdev, uint8_t usage_page)
+{
+ return hdev->rdesc[0] == 0x06 &&
+ hdev->rdesc[1] == usage_page &&
+ hdev->rdesc[2] == 0xff;
+}
+
static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct steelseries_device *sd;
@@ -548,12 +587,20 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
if (ret)
return ret;
+ if (sd->quirks & STEELSERIES_ARCTIS_9 &&
+ !steelseries_is_vendor_usage_page(hdev, 0xc0))
+ return -ENODEV;
+
spin_lock_init(&sd->lock);
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
return ret;
+ ret = hid_hw_open(hdev);
+ if (ret)
+ return ret;
+
if (steelseries_headset_battery_register(sd) < 0)
hid_err(sd->hdev,
"Failed to register battery for headset\n");
@@ -580,6 +627,7 @@ static void steelseries_remove(struct hid_device *hdev)
cancel_delayed_work_sync(&sd->battery_work);
+ hid_hw_close(hdev);
hid_hw_stop(hdev);
}
@@ -599,6 +647,15 @@ static const __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev,
return rdesc;
}
+static uint8_t steelseries_headset_map_capacity(uint8_t capacity, uint8_t min_in, uint8_t max_in)
+{
+ if (capacity >= max_in)
+ return 100;
+ if (capacity <= min_in)
+ return 0;
+ return (capacity - min_in) * 100 / (max_in - min_in);
+}
+
static int steelseries_headset_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *read_buf,
int size)
@@ -606,6 +663,7 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
struct steelseries_device *sd = hid_get_drvdata(hdev);
int capacity = sd->battery_capacity;
bool connected = sd->headset_connected;
+ bool charging = sd->battery_charging;
unsigned long flags;
/* Not a headset */
@@ -630,6 +688,34 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
}
}
+ if (sd->quirks & STEELSERIES_ARCTIS_9) {
+ hid_dbg(sd->hdev,
+ "Parsing raw event for Arctis 9 headset (%*ph)\n", size, read_buf);
+ if (size < ARCTIS_9_BATTERY_RESPONSE_LEN) {
+ if (!delayed_work_pending(&sd->battery_work))
+ goto request_battery;
+ return 0;
+ }
+
+ if (read_buf[0] == 0xaa && read_buf[1] == 0x01) {
+ connected = true;
+ charging = read_buf[4] == 0x01;
+
+ /*
+ * Found no official documentation about min and max.
+ * Values defined by testing.
+ */
+ capacity = steelseries_headset_map_capacity(read_buf[3], 0x68, 0x9d);
+ } else {
+ /*
+ * Device is off and sends the last known status read_buf[1] == 0x03 or
+ * there is no known status of the device read_buf[0] == 0x55
+ */
+ connected = false;
+ charging = false;
+ }
+ }
+
if (connected != sd->headset_connected) {
hid_dbg(sd->hdev,
"Connected status changed from %sconnected to %sconnected\n",
@@ -647,6 +733,15 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
power_supply_changed(sd->battery);
}
+ if (charging != sd->battery_charging) {
+ hid_dbg(sd->hdev,
+ "Battery charging status changed from %scharging to %scharging\n",
+ sd->battery_charging ? "" : "not ",
+ charging ? "" : "not ");
+ sd->battery_charging = charging;
+ power_supply_changed(sd->battery);
+ }
+
request_battery:
spin_lock_irqsave(&sd->lock, flags);
if (!sd->removed)
@@ -665,6 +760,10 @@ static const struct hid_device_id steelseries_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12b6),
.driver_data = STEELSERIES_ARCTIS_1 },
+ { /* SteelSeries Arctis 9 Wireless for XBox */
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12c2),
+ .driver_data = STEELSERIES_ARCTIS_9 },
+
{ }
};
MODULE_DEVICE_TABLE(hid, steelseries_devices);
@@ -683,3 +782,4 @@ MODULE_DESCRIPTION("HID driver for Steelseries devices");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
MODULE_AUTHOR("Simon Wood <simon@mungewell.org>");
+MODULE_AUTHOR("Christian Mayer <git@mayer-bgk.de>");
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index cf1679b0d4fb..3b81468a1df2 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -170,6 +170,14 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
ep = &usbif->cur_altsetting->endpoint[1];
b_ep = ep->desc.bEndpointAddress;
+ /* Are the expected endpoints present? */
+ u8 ep_addr[2] = {b_ep, 0};
+
+ if (!usb_check_int_endpoints(usbif, ep_addr)) {
+ hid_err(hdev, "Unexpected non-int endpoint\n");
+ return;
+ }
+
for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) {
memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]);
diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c
index 848361f6225d..ccedf8721722 100644
--- a/drivers/hid/hid-topre.c
+++ b/drivers/hid/hid-topre.c
@@ -29,6 +29,11 @@ static const __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev,
"fixing up Topre REALFORCE keyboard report descriptor\n");
rdesc[72] = 0x02;
+ } else if (*rsize >= 106 && rdesc[28] == 0x29 && rdesc[29] == 0xe7 &&
+ rdesc[30] == 0x81 && rdesc[31] == 0x00) {
+ hid_info(hdev,
+ "fixing up Topre REALFORCE keyboard report descriptor\n");
+ rdesc[31] = 0x02;
}
return rdesc;
}
@@ -38,6 +43,8 @@ static const struct hid_device_id topre_id_table[] = {
USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
+ USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87) },
{ }
};
MODULE_DEVICE_TABLE(hid, topre_id_table);
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index ef26c7defcf6..a6044996abf2 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -842,7 +842,7 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
__u8 *params_ptr = NULL;
size_t params_len = 0;
/* Parameters string descriptor of a model with touch ring (HS610) */
- const __u8 touch_ring_model_params_buf[] = {
+ static const __u8 touch_ring_model_params_buf[] = {
0x13, 0x03, 0x70, 0xC6, 0x00, 0x06, 0x7C, 0x00,
0xFF, 0x1F, 0xD8, 0x13, 0x03, 0x0D, 0x10, 0x01,
0x04, 0x3C, 0x3E
diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c
index 831b760c66ea..d4afbbd27807 100644
--- a/drivers/hid/hid-winwing.c
+++ b/drivers/hid/hid-winwing.c
@@ -106,6 +106,8 @@ static int winwing_init_led(struct hid_device *hdev,
"%s::%s",
dev_name(&input->dev),
info->led_name);
+ if (!led->cdev.name)
+ return -ENOMEM;
ret = devm_led_classdev_register(&hdev->dev, &led->cdev);
if (ret)
diff --git a/drivers/hid/i2c-hid/Kconfig b/drivers/hid/i2c-hid/Kconfig
index ef7c595c9403..e8d51f410cc1 100644
--- a/drivers/hid/i2c-hid/Kconfig
+++ b/drivers/hid/i2c-hid/Kconfig
@@ -2,7 +2,7 @@
menuconfig I2C_HID
tristate "I2C HID support"
default y
- depends on I2C && INPUT && HID
+ depends on I2C
if I2C_HID
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 4e87380d3edd..d3912e3f2f13 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -51,6 +51,7 @@
#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4)
#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5)
#define I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME BIT(6)
+#define I2C_HID_QUIRK_RE_POWER_ON BIT(7)
/* Command opcodes */
#define I2C_HID_OPCODE_RESET 0x01
@@ -136,6 +137,11 @@ static const struct i2c_hid_quirks {
{ I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063,
I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND },
/*
+ * Without additional power on command, at least some QTEC devices send garbage
+ */
+ { I2C_VENDOR_ID_QTEC, HID_ANY_ID,
+ I2C_HID_QUIRK_RE_POWER_ON },
+ /*
* Sending the wakeup after reset actually break ELAN touchscreen controller
*/
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
@@ -284,7 +290,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid,
ihid->rawbuf, recv_len + sizeof(__le16));
if (error) {
dev_err(&ihid->client->dev,
- "failed to set a report to device: %d\n", error);
+ "failed to get a report from device: %d\n", error);
return error;
}
@@ -1073,7 +1079,11 @@ static int i2c_hid_core_register_hid(struct i2c_hid *ihid)
return ret;
}
- return 0;
+ /* At least some QTEC devices need this after initialization */
+ if (ihid->quirks & I2C_HID_QUIRK_RE_POWER_ON)
+ ret = i2c_hid_set_power(ihid, I2C_HID_PWR_ON);
+
+ return ret;
}
static int i2c_hid_core_probe_panel_follower(struct i2c_hid *ihid)
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
index 253dc10d35ef..568c8688784e 100644
--- a/drivers/hid/intel-ish-hid/Kconfig
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -6,7 +6,6 @@ config INTEL_ISH_HID
tristate "Intel Integrated Sensor Hub"
default n
depends on X86
- depends on HID
help
The Integrated Sensor Hub (ISH) enables the ability to offload
sensor polling and algorithm processing to a dedicated low power
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index cdd80c653918..07e90d51f073 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -36,6 +36,8 @@
#define PCI_DEVICE_ID_INTEL_ISH_ARL_H 0x7745
#define PCI_DEVICE_ID_INTEL_ISH_ARL_S 0x7F78
#define PCI_DEVICE_ID_INTEL_ISH_LNL_M 0xA845
+#define PCI_DEVICE_ID_INTEL_ISH_PTL_H 0xE345
+#define PCI_DEVICE_ID_INTEL_ISH_PTL_P 0xE445
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 3cd53fc80634..4c861119e97a 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -517,6 +517,10 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
/* ISH FW is dead */
if (!ish_is_input_ready(dev))
return -EPIPE;
+
+ /* Send clock sync at once after reset */
+ ishtp_dev->prev_sync = 0;
+
/*
* Set HOST2ISH.ILUP. Apparently we need this BEFORE sending
* RESET_NOTIFY_ACK - FW will be checking for it
@@ -577,15 +581,14 @@ static void fw_reset_work_fn(struct work_struct *work)
*/
static void _ish_sync_fw_clock(struct ishtp_device *dev)
{
- static unsigned long prev_sync;
- uint64_t usec;
+ struct ipc_time_update_msg time = {};
- if (prev_sync && time_before(jiffies, prev_sync + 20 * HZ))
+ if (dev->prev_sync && time_before(jiffies, dev->prev_sync + 20 * HZ))
return;
- prev_sync = jiffies;
- usec = ktime_to_us(ktime_get_boottime());
- ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t));
+ dev->prev_sync = jiffies;
+ /* The fields of time would be updated while sending message */
+ ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &time, sizeof(time));
}
/**
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 9e2401291a2f..ff0fc8010072 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -26,9 +26,11 @@
enum ishtp_driver_data_index {
ISHTP_DRIVER_DATA_NONE,
ISHTP_DRIVER_DATA_LNL_M,
+ ISHTP_DRIVER_DATA_PTL,
};
#define ISH_FW_GEN_LNL_M "lnlm"
+#define ISH_FW_GEN_PTL "ptl"
#define ISH_FIRMWARE_PATH(gen) "intel/ish/ish_" gen ".bin"
#define ISH_FIRMWARE_PATH_ALL "intel/ish/ish_*.bin"
@@ -37,6 +39,9 @@ static struct ishtp_driver_data ishtp_driver_data[] = {
[ISHTP_DRIVER_DATA_LNL_M] = {
.fw_generation = ISH_FW_GEN_LNL_M,
},
+ [ISHTP_DRIVER_DATA_PTL] = {
+ .fw_generation = ISH_FW_GEN_PTL,
+ },
};
static const struct pci_device_id ish_pci_tbl[] = {
@@ -63,6 +68,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_H)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_S)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_LNL_M), .driver_data = ISHTP_DRIVER_DATA_LNL_M},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_H), .driver_data = ISHTP_DRIVER_DATA_PTL},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_P), .driver_data = ISHTP_DRIVER_DATA_PTL},
{}
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index cb04cd1d980b..6550ad5bfbb5 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -832,9 +832,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
hid_ishtp_cl);
dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
- hid_ishtp_cl_deinit(hid_ishtp_cl);
ishtp_put_device(cl_device);
ishtp_hid_remove(client_data);
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
hid_ishtp_cl = NULL;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index 00c6f0ebf356..be2c62fc8251 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -261,12 +261,14 @@ err_hid_data:
*/
void ishtp_hid_remove(struct ishtp_cl_data *client_data)
{
+ void *data;
int i;
for (i = 0; i < client_data->num_hid_devices; ++i) {
if (client_data->hid_sensor_hubs[i]) {
- kfree(client_data->hid_sensor_hubs[i]->driver_data);
+ data = client_data->hid_sensor_hubs[i]->driver_data;
hid_destroy_device(client_data->hid_sensor_hubs[i]);
+ kfree(data);
client_data->hid_sensor_hubs[i] = NULL;
}
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
index 513d7a4a1b8a..97f4026b1627 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -252,27 +252,6 @@ int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
/**
- * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
- * @cl: Pointer to client device instance
- *
- * Look client device tx buffer list, and check whether this list is empty
- *
- * Return: true if client tx buffer list is empty else false
- */
-bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
-{
- int tx_list_empty;
- unsigned long tx_flags;
-
- spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
- tx_list_empty = list_empty(&cl->tx_list.list);
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
-
- return !!tx_list_empty;
-}
-EXPORT_SYMBOL(ishtp_cl_tx_empty);
-
-/**
* ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
* @cl: Pointer to client device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index e61b01e9902e..21a2c0773cc2 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -14,25 +14,6 @@
#include "hbm.h"
#include "client.h"
-int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
-{
- unsigned long tx_free_flags;
- int size;
-
- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
- size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
- spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
-
- return size;
-}
-EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
-
-int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
-{
- return cl->tx_ring_free_size;
-}
-EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
-
/**
* ishtp_read_list_flush() - Flush read queue
* @cl: ishtp client instance
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index d9d398fadcf7..0efd49dd2530 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -120,8 +120,6 @@ int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
-int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl);
-int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl);
/* DMA I/F functions */
void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
index 07fdd52e4c5e..26bf9045a8de 100644
--- a/drivers/hid/intel-ish-hid/ishtp/init.c
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -15,36 +15,6 @@
#include "loader.h"
/**
- * ishtp_dev_state_str() -Convert to string format
- * @state: state to convert
- *
- * Convert state to string for prints
- *
- * Return: character pointer to converted string
- */
-const char *ishtp_dev_state_str(int state)
-{
- switch (state) {
- case ISHTP_DEV_INITIALIZING:
- return "INITIALIZING";
- case ISHTP_DEV_INIT_CLIENTS:
- return "INIT_CLIENTS";
- case ISHTP_DEV_ENABLED:
- return "ENABLED";
- case ISHTP_DEV_RESETTING:
- return "RESETTING";
- case ISHTP_DEV_DISABLED:
- return "DISABLED";
- case ISHTP_DEV_POWER_DOWN:
- return "POWER_DOWN";
- case ISHTP_DEV_POWER_UP:
- return "POWER_UP";
- default:
- return "unknown";
- }
-}
-
-/**
* ishtp_device_init() - ishtp device init
* @dev: ISHTP device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index effbb442c727..ec9f6e87aaf2 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -57,7 +57,6 @@ enum ishtp_dev_state {
ISHTP_DEV_POWER_DOWN,
ISHTP_DEV_POWER_UP
};
-const char *ishtp_dev_state_str(int state);
struct ishtp_cl;
@@ -254,6 +253,8 @@ struct ishtp_device {
unsigned int ipc_tx_cnt;
unsigned long long ipc_tx_bytes_cnt;
+ /* Time of the last clock sync */
+ unsigned long prev_sync;
const struct ishtp_hw_ops *ops;
size_t mtu;
uint32_t ishtp_msg_hdr;
diff --git a/drivers/hid/intel-thc-hid/Kconfig b/drivers/hid/intel-thc-hid/Kconfig
new file mode 100644
index 000000000000..0351d1137607
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2024, Intel Corporation.
+
+menu "Intel THC HID Support"
+ depends on X86_64 && PCI
+
+config INTEL_THC_HID
+ tristate "Intel Touch Host Controller"
+ depends on ACPI
+ help
+ THC (Touch Host Controller) is the name of the IP block in PCH that
+ interfaces with Touch Devices (ex: touchscreen, touchpad etc.). It
+ is comprised of 3 key functional blocks: A natively half-duplex
+ Quad I/O capable SPI master; a low latency I2C interface to support
+ HIDI2C compliant devices; a hardware sequencer with Read/Write DMA
+ capability to system memory.
+
+ Say Y/M here if you want to support Intel THC. If unsure, say N.
+
+config INTEL_QUICKSPI
+ tristate "Intel QuickSPI driver based on Intel Touch Host Controller"
+ depends on INTEL_THC_HID
+ help
+ Intel QuickSPI, based on Touch Host Controller (THC), implements
+ HIDSPI (HID over SPI) protocol. It configures THC to work at SPI
+ mode, and controls THC hardware sequencer to accelerate HIDSPI
+ transaction flow.
+
+ Say Y/M here if you want to support Intel QuickSPI. If unsure, say N.
+
+config INTEL_QUICKI2C
+ tristate "Intel QuickI2C driver based on Intel Touch Host Controller"
+ depends on INTEL_THC_HID
+ help
+ Intel QuickI2C, uses Touch Host Controller (THC) hardware, implements
+ HIDI2C (HID over I2C) protocol. It configures THC to work in I2C
+ mode, and controls THC hardware sequencer to accelerate HIDI2C
+ transaction flow.
+
+ Say Y/M here if you want to support Intel QuickI2C. If unsure, say N.
+
+endmenu
diff --git a/drivers/hid/intel-thc-hid/Makefile b/drivers/hid/intel-thc-hid/Makefile
new file mode 100644
index 000000000000..6f762d87af07
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile - Intel Touch Host Controller (THC) drivers
+# Copyright (c) 2024, Intel Corporation.
+#
+#
+
+obj-$(CONFIG_INTEL_THC_HID) += intel-thc.o
+intel-thc-objs += intel-thc/intel-thc-dev.o
+intel-thc-objs += intel-thc/intel-thc-dma.o
+
+obj-$(CONFIG_INTEL_QUICKSPI) += intel-quickspi.o
+intel-quickspi-objs += intel-quickspi/pci-quickspi.o
+intel-quickspi-objs += intel-quickspi/quickspi-hid.o
+intel-quickspi-objs += intel-quickspi/quickspi-protocol.o
+
+obj-$(CONFIG_INTEL_QUICKI2C) += intel-quicki2c.o
+intel-quicki2c-objs += intel-quicki2c/pci-quicki2c.o
+intel-quicki2c-objs += intel-quicki2c/quicki2c-hid.o
+intel-quicki2c-objs += intel-quicki2c/quicki2c-protocol.o
+
+ccflags-y += -I $(src)/intel-thc
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
new file mode 100644
index 000000000000..2de93f4a25ca
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -0,0 +1,969 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/pm_runtime.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-hw.h"
+
+#include "quicki2c-dev.h"
+#include "quicki2c-hid.h"
+#include "quicki2c-protocol.h"
+
+/* THC QuickI2C ACPI method to get device properties */
+/* HIDI2C device method */
+static guid_t i2c_hid_guid =
+ GUID_INIT(0x3cdff6f7, 0x4267, 0x4555, 0xad, 0x05, 0xb3, 0x0a, 0x3d, 0x89, 0x38, 0xde);
+
+/* platform method */
+static guid_t thc_platform_guid =
+ GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30, 0xf7, 0x87, 0xa1, 0x38);
+
+/**
+ * quicki2c_acpi_get_dsm_property - Query device ACPI DSM parameter
+ *
+ * @adev: point to ACPI device
+ * @guid: ACPI method's guid
+ * @rev: ACPI method's revision
+ * @func: ACPI method's function number
+ * @type: ACPI parameter's data type
+ * @prop_buf: point to return buffer
+ *
+ * This is a helper function for device to query its ACPI DSM parameters.
+ *
+ * Return: 0 if success or ENODEV on failed.
+ */
+static int quicki2c_acpi_get_dsm_property(struct acpi_device *adev, const guid_t *guid,
+ u64 rev, u64 func, acpi_object_type type, void *prop_buf)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm_typed(handle, guid, rev, func, NULL, type);
+ if (!obj) {
+ acpi_handle_err(handle,
+ "Error _DSM call failed, rev: %d, func: %d, type: %d\n",
+ (int)rev, (int)func, (int)type);
+ return -ENODEV;
+ }
+
+ if (type == ACPI_TYPE_INTEGER)
+ *(u32 *)prop_buf = (u32)obj->integer.value;
+ else if (type == ACPI_TYPE_BUFFER)
+ memcpy(prop_buf, obj->buffer.pointer, obj->buffer.length);
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+/**
+ * quicki2c_acpi_get_dsd_property - Query device ACPI DSD parameter
+ *
+ * @adev: point to ACPI device
+ * @dsd_method_name: ACPI method's property name
+ * @type: ACPI parameter's data type
+ * @prop_buf: point to return buffer
+ *
+ * This is a helper function for device to query its ACPI DSD parameters.
+ *
+ * Return: 0 if success or ENODEV on failed.
+ */
+static int quicki2c_acpi_get_dsd_property(struct acpi_device *adev, acpi_string dsd_method_name,
+ acpi_object_type type, void *prop_buf)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object obj = { .type = type };
+ struct acpi_object_list arg_list = {
+ .count = 1,
+ .pointer = &obj,
+ };
+ union acpi_object *ret_obj;
+ acpi_status status;
+
+ status = acpi_evaluate_object(handle, dsd_method_name, &arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle,
+ "Can't evaluate %s method: %d\n", dsd_method_name, status);
+ return -ENODEV;
+ }
+
+ ret_obj = buffer.pointer;
+
+ memcpy(prop_buf, ret_obj->buffer.pointer, ret_obj->buffer.length);
+
+ return 0;
+}
+
+/**
+ * quicki2c_get_acpi_resources - Query all quicki2c devices' ACPI parameters
+ *
+ * @qcdev: point to quicki2c device
+ *
+ * This function gets all quicki2c devices' ACPI resource.
+ *
+ * Return: 0 if success or error code on failed.
+ */
+static int quicki2c_get_acpi_resources(struct quicki2c_device *qcdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(qcdev->dev);
+ struct quicki2c_subip_acpi_parameter i2c_param;
+ struct quicki2c_subip_acpi_config i2c_config;
+ u32 hid_desc_addr;
+ int ret = -EINVAL;
+
+ if (!adev) {
+ dev_err(qcdev->dev, "Invalid acpi device pointer\n");
+ return ret;
+ }
+
+ qcdev->acpi_dev = adev;
+
+ ret = quicki2c_acpi_get_dsm_property(adev, &i2c_hid_guid,
+ QUICKI2C_ACPI_REVISION_NUM,
+ QUICKI2C_ACPI_FUNC_NUM_HID_DESC_ADDR,
+ ACPI_TYPE_INTEGER,
+ &hid_desc_addr);
+ if (ret)
+ return ret;
+
+ qcdev->hid_desc_addr = (u16)hid_desc_addr;
+
+ ret = quicki2c_acpi_get_dsm_property(adev, &thc_platform_guid,
+ QUICKI2C_ACPI_REVISION_NUM,
+ QUICKI2C_ACPI_FUNC_NUM_ACTIVE_LTR_VAL,
+ ACPI_TYPE_INTEGER,
+ &qcdev->active_ltr_val);
+ if (ret)
+ return ret;
+
+ ret = quicki2c_acpi_get_dsm_property(adev, &thc_platform_guid,
+ QUICKI2C_ACPI_REVISION_NUM,
+ QUICKI2C_ACPI_FUNC_NUM_LP_LTR_VAL,
+ ACPI_TYPE_INTEGER,
+ &qcdev->low_power_ltr_val);
+ if (ret)
+ return ret;
+
+ ret = quicki2c_acpi_get_dsd_property(adev, QUICKI2C_ACPI_METHOD_NAME_ICRS,
+ ACPI_TYPE_BUFFER, &i2c_param);
+ if (ret)
+ return ret;
+
+ if (i2c_param.addressing_mode != HIDI2C_ADDRESSING_MODE_7BIT)
+ return -EOPNOTSUPP;
+
+ qcdev->i2c_slave_addr = i2c_param.device_address;
+
+ ret = quicki2c_acpi_get_dsd_property(adev, QUICKI2C_ACPI_METHOD_NAME_ISUB,
+ ACPI_TYPE_BUFFER, &i2c_config);
+ if (ret)
+ return ret;
+
+ if (i2c_param.connection_speed > 0 &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_STANDARD_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_STANDARD;
+ qcdev->i2c_clock_hcnt = i2c_config.SMHX;
+ qcdev->i2c_clock_lcnt = i2c_config.SMLX;
+ } else if (i2c_param.connection_speed > QUICKI2C_SUBIP_STANDARD_MODE_MAX_SPEED &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_FAST_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_FAST_AND_PLUS;
+ qcdev->i2c_clock_hcnt = i2c_config.FMHX;
+ qcdev->i2c_clock_lcnt = i2c_config.FMLX;
+ } else if (i2c_param.connection_speed > QUICKI2C_SUBIP_FAST_MODE_MAX_SPEED &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_FASTPLUS_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_FAST_AND_PLUS;
+ qcdev->i2c_clock_hcnt = i2c_config.FPHX;
+ qcdev->i2c_clock_lcnt = i2c_config.FPLX;
+ } else if (i2c_param.connection_speed > QUICKI2C_SUBIP_FASTPLUS_MODE_MAX_SPEED &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_HIGH_SPEED_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_HIGH_SPEED;
+ qcdev->i2c_clock_hcnt = i2c_config.HMHX;
+ qcdev->i2c_clock_lcnt = i2c_config.HMLX;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * quicki2c_irq_quick_handler - The ISR of the quicki2c driver
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: IRQ_WAKE_THREAD if further process needed.
+ */
+static irqreturn_t quicki2c_irq_quick_handler(int irq, void *dev_id)
+{
+ struct quicki2c_device *qcdev = dev_id;
+
+ if (qcdev->state == QUICKI2C_DISABLED)
+ return IRQ_HANDLED;
+
+ /* Disable THC interrupt before current interrupt be handled */
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * try_recover - Try to recovery THC and Device
+ * @qcdev: pointer to quicki2c device
+ *
+ * This function is a error handler, called when fatal error happens.
+ * It try to reset Touch Device and re-configure THC to recovery
+ * transferring between Device and THC.
+ *
+ * Return: 0 if successful or error code on failed
+ */
+static int try_recover(struct quicki2c_device *qcdev)
+{
+ int ret;
+
+ thc_dma_unconfigure(qcdev->thc_hw);
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret) {
+ dev_err(qcdev->dev, "Reconfig DMA failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int handle_input_report(struct quicki2c_device *qcdev)
+{
+ struct hidi2c_report_packet *pkt = (struct hidi2c_report_packet *)qcdev->input_buf;
+ int rx_dma_finished = 0;
+ size_t report_len;
+ int ret;
+
+ while (!rx_dma_finished) {
+ ret = thc_rxdma_read(qcdev->thc_hw, THC_RXDMA2,
+ (u8 *)pkt, &report_len,
+ &rx_dma_finished);
+ if (ret)
+ return ret;
+
+ if (!pkt->len) {
+ if (qcdev->state == QUICKI2C_RESETING) {
+ qcdev->reset_ack = true;
+ wake_up(&qcdev->reset_ack_wq);
+
+ qcdev->state = QUICKI2C_RESETED;
+ } else {
+ dev_warn(qcdev->dev, "unexpected DIR happen\n");
+ }
+
+ continue;
+ }
+
+ /* discard samples before driver probe complete */
+ if (qcdev->state != QUICKI2C_ENABLED)
+ continue;
+
+ quicki2c_hid_send_report(qcdev, pkt->data,
+ HIDI2C_DATA_LEN(le16_to_cpu(pkt->len)));
+ }
+
+ return 0;
+}
+
+/**
+ * quicki2c_irq_thread_handler - IRQ thread handler of quicki2c driver
+ *
+ * @irq: The IRQ number
+ * @dev_id: pointer to the quicki2c device structure
+ *
+ * Return: IRQ_HANDLED to finish this handler.
+ */
+static irqreturn_t quicki2c_irq_thread_handler(int irq, void *dev_id)
+{
+ struct quicki2c_device *qcdev = dev_id;
+ int err_recover = 0;
+ int int_mask;
+ int ret;
+
+ if (qcdev->state == QUICKI2C_DISABLED)
+ return IRQ_HANDLED;
+
+ ret = pm_runtime_resume_and_get(qcdev->dev);
+ if (ret)
+ return IRQ_HANDLED;
+
+ int_mask = thc_interrupt_handler(qcdev->thc_hw);
+
+ if (int_mask & BIT(THC_FATAL_ERR_INT) || int_mask & BIT(THC_TXN_ERR_INT) ||
+ int_mask & BIT(THC_UNKNOWN_INT)) {
+ err_recover = 1;
+ goto exit;
+ }
+
+ if (int_mask & BIT(THC_RXDMA2_INT)) {
+ err_recover = handle_input_report(qcdev);
+ if (err_recover)
+ goto exit;
+ }
+
+exit:
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ if (err_recover)
+ if (try_recover(qcdev))
+ qcdev->state = QUICKI2C_DISABLED;
+
+ pm_runtime_mark_last_busy(qcdev->dev);
+ pm_runtime_put_autosuspend(qcdev->dev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * quicki2c_dev_init - Initialize quicki2c device
+ *
+ * @pdev: pointer to the thc pci device
+ * @mem_addr: The pointer of MMIO memory address
+ *
+ * Alloc quicki2c device structure and initialized THC device,
+ * then configure THC to HIDI2C mode.
+ *
+ * If success, enable THC hardware interrupt.
+ *
+ * Return: pointer to the quicki2c device structure if success
+ * or NULL on failed.
+ */
+static struct quicki2c_device *quicki2c_dev_init(struct pci_dev *pdev, void __iomem *mem_addr)
+{
+ struct device *dev = &pdev->dev;
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = devm_kzalloc(dev, sizeof(struct quicki2c_device), GFP_KERNEL);
+ if (!qcdev)
+ return ERR_PTR(-ENOMEM);
+
+ qcdev->pdev = pdev;
+ qcdev->dev = dev;
+ qcdev->mem_addr = mem_addr;
+ qcdev->state = QUICKI2C_DISABLED;
+
+ init_waitqueue_head(&qcdev->reset_ack_wq);
+
+ /* thc hw init */
+ qcdev->thc_hw = thc_dev_init(qcdev->dev, qcdev->mem_addr);
+ if (IS_ERR(qcdev->thc_hw)) {
+ ret = PTR_ERR(qcdev->thc_hw);
+ dev_err_once(dev, "Failed to initialize THC device context, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = quicki2c_get_acpi_resources(qcdev);
+ if (ret) {
+ dev_err_once(dev, "Get ACPI resources failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = thc_port_select(qcdev->thc_hw, THC_PORT_TYPE_I2C);
+ if (ret) {
+ dev_err_once(dev, "Failed to select THC port, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = thc_i2c_subip_init(qcdev->thc_hw, qcdev->i2c_slave_addr,
+ qcdev->i2c_speed_mode,
+ qcdev->i2c_clock_hcnt,
+ qcdev->i2c_clock_lcnt);
+ if (ret)
+ return ERR_PTR(ret);
+
+ thc_int_trigger_type_select(qcdev->thc_hw, false);
+
+ thc_interrupt_config(qcdev->thc_hw);
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ qcdev->state = QUICKI2C_INITED;
+
+ return qcdev;
+}
+
+/**
+ * quicki2c_dev_deinit - De-initialize quicki2c device
+ *
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * Disable THC interrupt and deinitilize THC.
+ */
+static void quicki2c_dev_deinit(struct quicki2c_device *qcdev)
+{
+ thc_interrupt_enable(qcdev->thc_hw, false);
+ thc_ltr_unconfig(qcdev->thc_hw);
+
+ qcdev->state = QUICKI2C_DISABLED;
+}
+
+/**
+ * quicki2c_dma_init - Configure THC DMA for quicki2c device
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * This function uses TIC's parameters(such as max input length, max output
+ * length) to allocate THC DMA buffers and configure THC DMA engines.
+ *
+ * Return: 0 if success or error code on failed.
+ */
+static int quicki2c_dma_init(struct quicki2c_device *qcdev)
+{
+ size_t swdma_max_len;
+ int ret;
+
+ swdma_max_len = max(le16_to_cpu(qcdev->dev_desc.max_input_len),
+ le16_to_cpu(qcdev->dev_desc.report_desc_len));
+
+ ret = thc_dma_set_max_packet_sizes(qcdev->thc_hw, 0,
+ le16_to_cpu(qcdev->dev_desc.max_input_len),
+ le16_to_cpu(qcdev->dev_desc.max_output_len),
+ swdma_max_len);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_allocate(qcdev->thc_hw);
+ if (ret) {
+ dev_err(qcdev->dev, "Allocate THC DMA buffer failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* Enable RxDMA */
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret) {
+ dev_err(qcdev->dev, "Configure THC DMA failed, ret = %d\n", ret);
+ thc_dma_unconfigure(qcdev->thc_hw);
+ thc_dma_release(qcdev->thc_hw);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * quicki2c_dma_deinit - Release THC DMA for quicki2c device
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * Stop THC DMA engines and release all DMA buffers.
+ *
+ */
+static void quicki2c_dma_deinit(struct quicki2c_device *qcdev)
+{
+ thc_dma_unconfigure(qcdev->thc_hw);
+ thc_dma_release(qcdev->thc_hw);
+}
+
+/**
+ * quicki2c_alloc_report_buf - Alloc report buffers
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * Allocate report descriptor buffer, it will be used for restore TIC HID
+ * report descriptor.
+ *
+ * Allocate input report buffer, it will be used for receive HID input report
+ * data from TIC.
+ *
+ * Allocate output report buffer, it will be used for store HID output report,
+ * such as set feature.
+ *
+ * Return: 0 if success or error code on failed.
+ */
+static int quicki2c_alloc_report_buf(struct quicki2c_device *qcdev)
+{
+ size_t max_report_len;
+
+ qcdev->report_descriptor = devm_kzalloc(qcdev->dev,
+ le16_to_cpu(qcdev->dev_desc.report_desc_len),
+ GFP_KERNEL);
+ if (!qcdev->report_descriptor)
+ return -ENOMEM;
+
+ /*
+ * Some HIDI2C devices don't declare input/output max length correctly,
+ * give default 4K buffer to avoid DMA buffer overrun.
+ */
+ max_report_len = max(le16_to_cpu(qcdev->dev_desc.max_input_len), SZ_4K);
+
+ qcdev->input_buf = devm_kzalloc(qcdev->dev, max_report_len, GFP_KERNEL);
+ if (!qcdev->input_buf)
+ return -ENOMEM;
+
+ if (!le16_to_cpu(qcdev->dev_desc.max_output_len))
+ qcdev->dev_desc.max_output_len = cpu_to_le16(SZ_4K);
+
+ max_report_len = max(le16_to_cpu(qcdev->dev_desc.max_output_len),
+ max_report_len);
+
+ qcdev->report_buf = devm_kzalloc(qcdev->dev, max_report_len, GFP_KERNEL);
+ if (!qcdev->report_buf)
+ return -ENOMEM;
+
+ qcdev->report_len = max_report_len;
+
+ return 0;
+}
+
+/*
+ * quicki2c_probe: Quicki2c driver probe function
+ *
+ * @pdev: point to pci device
+ * @id: point to pci_device_id structure
+ *
+ * This function initializes THC and HIDI2C device, the flow is:
+ * - do THC pci device initialization
+ * - query HIDI2C ACPI parameters
+ * - configure THC to HIDI2C mode
+ * - go through HIDI2C enumeration flow
+ * |- read device descriptor
+ * |- reset HIDI2C device
+ * - enable THC interrupt and DMA
+ * - read report descriptor
+ * - register HID device
+ * - enable runtime power management
+ *
+ * Return 0 if success or error code on failed.
+ */
+static int quicki2c_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct quicki2c_device *qcdev;
+ void __iomem *mem_addr;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err_once(&pdev->dev, "Failed to enable PCI device, ret = %d.\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ if (ret) {
+ dev_err_once(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
+ goto disable_pci_device;
+ }
+
+ mem_addr = pcim_iomap_table(pdev)[0];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err_once(&pdev->dev, "No usable DMA configuration %d\n", ret);
+ goto unmap_io_region;
+ }
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err_once(&pdev->dev,
+ "Failed to allocate IRQ vectors. ret = %d\n", ret);
+ goto unmap_io_region;
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
+
+ qcdev = quicki2c_dev_init(pdev, mem_addr);
+ if (IS_ERR(qcdev)) {
+ dev_err_once(&pdev->dev, "QuickI2C device init failed\n");
+ ret = PTR_ERR(qcdev);
+ goto unmap_io_region;
+ }
+
+ pci_set_drvdata(pdev, qcdev);
+
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ quicki2c_irq_quick_handler,
+ quicki2c_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME,
+ qcdev);
+ if (ret) {
+ dev_err_once(&pdev->dev,
+ "Failed to request threaded IRQ, irq = %d.\n", pdev->irq);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_get_device_descriptor(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Get device descriptor failed, ret = %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_alloc_report_buf(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Alloc report buffers failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_dma_init(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Setup THC DMA failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ goto dev_deinit;
+
+ ret = quicki2c_set_power(qcdev, HIDI2C_ON);
+ if (ret) {
+ dev_err(&pdev->dev, "Set Power On command failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_reset(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Reset HIDI2C device failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_get_report_descriptor(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Get report descriptor failed, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ ret = quicki2c_hid_probe(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register HID device, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ qcdev->state = QUICKI2C_ENABLED;
+
+ /* Enable runtime power management */
+ pm_runtime_use_autosuspend(qcdev->dev);
+ pm_runtime_set_autosuspend_delay(qcdev->dev, DEFAULT_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_mark_last_busy(qcdev->dev);
+ pm_runtime_put_noidle(qcdev->dev);
+ pm_runtime_put_autosuspend(qcdev->dev);
+
+ dev_dbg(&pdev->dev, "QuickI2C probe success\n");
+
+ return 0;
+
+dma_deinit:
+ quicki2c_dma_deinit(qcdev);
+dev_deinit:
+ quicki2c_dev_deinit(qcdev);
+unmap_io_region:
+ pcim_iounmap_regions(pdev, BIT(0));
+disable_pci_device:
+ pci_clear_master(pdev);
+
+ return ret;
+}
+
+/**
+ * quicki2c_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void quicki2c_remove(struct pci_dev *pdev)
+{
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return;
+
+ quicki2c_hid_remove(qcdev);
+ quicki2c_dma_deinit(qcdev);
+
+ pm_runtime_get_noresume(qcdev->dev);
+
+ quicki2c_dev_deinit(qcdev);
+
+ pcim_iounmap_regions(pdev, BIT(0));
+ pci_clear_master(pdev);
+}
+
+/**
+ * quicki2c_shutdown - Device Shutdown Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void quicki2c_shutdown(struct pci_dev *pdev)
+{
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return;
+
+ /* Must stop DMA before reboot to avoid DMA entering into unknown state */
+ quicki2c_dma_deinit(qcdev);
+
+ quicki2c_dev_deinit(qcdev);
+}
+
+static int quicki2c_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ /*
+ * As I2C is THC subsystem, no register auto save/restore support,
+ * need driver to do that explicitly for every D3 case.
+ */
+ ret = thc_i2c_subip_regs_save(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ thc_dma_unconfigure(qcdev->thc_hw);
+
+ return 0;
+}
+
+static int quicki2c_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_port_select(qcdev->thc_hw, THC_PORT_TYPE_I2C);
+ if (ret)
+ return ret;
+
+ ret = thc_i2c_subip_regs_restore(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qcdev->thc_hw);
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quicki2c_freeze(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ thc_dma_unconfigure(qcdev->thc_hw);
+
+ return 0;
+}
+
+static int quicki2c_thaw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quicki2c_poweroff(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ thc_ltr_unconfig(qcdev->thc_hw);
+
+ quicki2c_dma_deinit(qcdev);
+
+ return 0;
+}
+
+static int quicki2c_restore(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ /* Reconfig THC HW when back from hibernate */
+ ret = thc_port_select(qcdev->thc_hw, THC_PORT_TYPE_I2C);
+ if (ret)
+ return ret;
+
+ ret = thc_i2c_subip_init(qcdev->thc_hw, qcdev->i2c_slave_addr,
+ qcdev->i2c_speed_mode,
+ qcdev->i2c_clock_hcnt,
+ qcdev->i2c_clock_lcnt);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qcdev->thc_hw);
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_ltr_config(qcdev->thc_hw,
+ qcdev->active_ltr_val,
+ qcdev->low_power_ltr_val);
+
+ thc_change_ltr_mode(qcdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static int quicki2c_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qcdev->thc_hw, THC_LTR_MODE_LP);
+
+ pci_save_state(pdev);
+
+ return 0;
+}
+
+static int quicki2c_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qcdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static const struct dev_pm_ops quicki2c_pm_ops = {
+ .suspend = quicki2c_suspend,
+ .resume = quicki2c_resume,
+ .freeze = quicki2c_freeze,
+ .thaw = quicki2c_thaw,
+ .poweroff = quicki2c_poweroff,
+ .restore = quicki2c_restore,
+ .runtime_suspend = quicki2c_runtime_suspend,
+ .runtime_resume = quicki2c_runtime_resume,
+ .runtime_idle = NULL,
+};
+
+static const struct pci_device_id quicki2c_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, THC_LNL_DEVICE_ID_I2C_PORT1), },
+ {PCI_VDEVICE(INTEL, THC_LNL_DEVICE_ID_I2C_PORT2), },
+ {PCI_VDEVICE(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT1), },
+ {PCI_VDEVICE(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2), },
+ {PCI_VDEVICE(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1), },
+ {PCI_VDEVICE(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2), },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);
+
+static struct pci_driver quicki2c_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = quicki2c_pci_tbl,
+ .probe = quicki2c_probe,
+ .remove = quicki2c_remove,
+ .shutdown = quicki2c_shutdown,
+ .driver.pm = &quicki2c_pm_ops,
+ .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+};
+
+module_pci_driver(quicki2c_driver);
+
+MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
+MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) QuickI2C Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
new file mode 100644
index 000000000000..6ddb584bd611
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKI2C_DEV_H_
+#define _QUICKI2C_DEV_H_
+
+#include <linux/hid-over-i2c.h>
+#include <linux/workqueue.h>
+
+#define THC_LNL_DEVICE_ID_I2C_PORT1 0xA848
+#define THC_LNL_DEVICE_ID_I2C_PORT2 0xA84A
+#define THC_PTL_H_DEVICE_ID_I2C_PORT1 0xE348
+#define THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
+#define THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
+#define THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
+
+/* Packet size value, the unit is 16 bytes */
+#define MAX_PACKET_SIZE_VALUE_LNL 256
+
+/* HIDI2C special ACPI parameters DSD name */
+#define QUICKI2C_ACPI_METHOD_NAME_ICRS "ICRS"
+#define QUICKI2C_ACPI_METHOD_NAME_ISUB "ISUB"
+
+/* HIDI2C special ACPI parameters DSM methods */
+#define QUICKI2C_ACPI_REVISION_NUM 1
+#define QUICKI2C_ACPI_FUNC_NUM_HID_DESC_ADDR 1
+#define QUICKI2C_ACPI_FUNC_NUM_ACTIVE_LTR_VAL 1
+#define QUICKI2C_ACPI_FUNC_NUM_LP_LTR_VAL 2
+
+#define QUICKI2C_SUBIP_STANDARD_MODE_MAX_SPEED 100000
+#define QUICKI2C_SUBIP_FAST_MODE_MAX_SPEED 400000
+#define QUICKI2C_SUBIP_FASTPLUS_MODE_MAX_SPEED 1000000
+#define QUICKI2C_SUBIP_HIGH_SPEED_MODE_MAX_SPEED 3400000
+
+#define QUICKI2C_DEFAULT_ACTIVE_LTR_VALUE 5
+#define QUICKI2C_DEFAULT_LP_LTR_VALUE 500
+#define QUICKI2C_RPM_TIMEOUT_MS 500
+
+/*
+ * THC uses runtime auto suspend to dynamically switch between THC active LTR
+ * and low power LTR to save CPU power.
+ * Default value is 5000ms, that means if no touch event in this time, THC will
+ * change to low power LTR mode.
+ */
+#define DEFAULT_AUTO_SUSPEND_DELAY_MS 5000
+
+enum quicki2c_dev_state {
+ QUICKI2C_NONE,
+ QUICKI2C_RESETING,
+ QUICKI2C_RESETED,
+ QUICKI2C_INITED,
+ QUICKI2C_ENABLED,
+ QUICKI2C_DISABLED,
+};
+
+enum {
+ HIDI2C_ADDRESSING_MODE_7BIT,
+ HIDI2C_ADDRESSING_MODE_10BIT,
+};
+
+/**
+ * struct quicki2c_subip_acpi_parameter - QuickI2C ACPI DSD parameters
+ * @device_address: I2C device slave address
+ * @connection_speed: I2C device expected connection speed
+ * @addressing_mode: I2C device slave address mode, 7bit or 10bit
+ *
+ * Those properties get from QUICKI2C_ACPI_METHOD_NAME_ICRS method, used for
+ * Bus parameter.
+ */
+struct quicki2c_subip_acpi_parameter {
+ u16 device_address;
+ u64 connection_speed;
+ u8 addressing_mode;
+} __packed;
+
+/**
+ * struct quicki2c_subip_acpi_config - QuickI2C ACPI DSD parameters
+ * @SMHX: Standard Mode (100 kbit/s) Serial Clock Line HIGH Period
+ * @SMLX: Standard Mode (100 kbit/s) Serial Clock Line LOW Period
+ * @SMTD: Standard Mode (100 kbit/s) Serial Data Line Transmit Hold Period
+ * @SMRD: Standard Mode (100 kbit/s) Serial Data Receive Hold Period
+ * @FMHX: Fast Mode (400 kbit/s) Serial Clock Line HIGH Period
+ * @FMLX: Fast Mode (400 kbit/s) Serial Clock Line LOW Period
+ * @FMTD: Fast Mode (400 kbit/s) Serial Data Line Transmit Hold Period
+ * @FMRD: Fast Mode (400 kbit/s) Serial Data Line Receive Hold Period
+ * @FMSL: Maximum length (in ic_clk_cycles) of suppressed spikes
+ * in Standard Mode, Fast Mode and Fast Mode Plus
+ * @FPHX: Fast Mode Plus (1Mbit/sec) Serial Clock Line HIGH Period
+ * @FPLX: Fast Mode Plus (1Mbit/sec) Serial Clock Line LOW Period
+ * @FPTD: Fast Mode Plus (1Mbit/sec) Serial Data Line Transmit HOLD Period
+ * @FPRD: Fast Mode Plus (1Mbit/sec) Serial Data Line Receive HOLD Period
+ * @HMHX: High Speed Mode Plus (3.4Mbits/sec) Serial Clock Line HIGH Period
+ * @HMLX: High Speed Mode Plus (3.4Mbits/sec) Serial Clock Line LOW Period
+ * @HMTD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Transmit HOLD Period
+ * @HMRD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Receive HOLD Period
+ * @HMSL: Maximum length (in ic_clk_cycles) of suppressed spikes in High Speed Mode
+ *
+ * Those properties get from QUICKI2C_ACPI_METHOD_NAME_ISUB method, used for
+ * I2C timing configure.
+ */
+struct quicki2c_subip_acpi_config {
+ u64 SMHX;
+ u64 SMLX;
+ u64 SMTD;
+ u64 SMRD;
+
+ u64 FMHX;
+ u64 FMLX;
+ u64 FMTD;
+ u64 FMRD;
+ u64 FMSL;
+
+ u64 FPHX;
+ u64 FPLX;
+ u64 FPTD;
+ u64 FPRD;
+
+ u64 HMHX;
+ u64 HMLX;
+ u64 HMTD;
+ u64 HMRD;
+ u64 HMSL;
+};
+
+struct device;
+struct pci_dev;
+struct thc_device;
+struct hid_device;
+struct acpi_device;
+
+/**
+ * struct quicki2c_device - THC QuickI2C device struct
+ * @dev: point to kernel device
+ * @pdev: point to PCI device
+ * @thc_hw: point to THC device
+ * @hid_dev: point to hid device
+ * @acpi_dev: point to ACPI device
+ * @driver_data: point to quicki2c specific driver data
+ * @state: THC I2C device state
+ * @mem_addr: MMIO memory address
+ * @dev_desc: device descriptor for HIDI2C protocol
+ * @i2c_slave_addr: HIDI2C device slave address
+ * @hid_desc_addr: Register address for retrieve HID device descriptor
+ * @active_ltr_val: THC active LTR value
+ * @low_power_ltr_val: THC low power LTR value
+ * @i2c_speed_mode: 0 - standard mode, 1 - fast mode, 2 - fast mode plus
+ * @i2c_clock_hcnt: I2C CLK high period time (unit in cycle count)
+ * @i2c_clock_lcnt: I2C CLK low period time (unit in cycle count)
+ * @report_descriptor: store a copy of device report descriptor
+ * @input_buf: store a copy of latest input report data
+ * @report_buf: store a copy of latest input/output report packet from set/get feature
+ * @report_len: the length of input/output report packet
+ * @reset_ack_wq: workqueue for waiting reset response from device
+ * @reset_ack: indicate reset response received or not
+ */
+struct quicki2c_device {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct thc_device *thc_hw;
+ struct hid_device *hid_dev;
+ struct acpi_device *acpi_dev;
+ enum quicki2c_dev_state state;
+
+ void __iomem *mem_addr;
+
+ struct hidi2c_dev_descriptor dev_desc;
+ u8 i2c_slave_addr;
+ u16 hid_desc_addr;
+
+ u32 active_ltr_val;
+ u32 low_power_ltr_val;
+
+ u32 i2c_speed_mode;
+ u32 i2c_clock_hcnt;
+ u32 i2c_clock_lcnt;
+
+ u8 *report_descriptor;
+ u8 *input_buf;
+ u8 *report_buf;
+ u32 report_len;
+
+ wait_queue_head_t reset_ack_wq;
+ bool reset_ack;
+};
+
+#endif /* _QUICKI2C_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
new file mode 100644
index 000000000000..5c3ec95bb3fd
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/pm_runtime.h>
+
+#include "quicki2c-dev.h"
+#include "quicki2c-hid.h"
+#include "quicki2c-protocol.h"
+
+/**
+ * quicki2c_hid_parse() - HID core parse() callback
+ *
+ * @hid: HID device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error.
+ */
+static int quicki2c_hid_parse(struct hid_device *hid)
+{
+ struct quicki2c_device *qcdev = hid->driver_data;
+
+ if (qcdev->report_descriptor)
+ return hid_parse_report(hid, qcdev->report_descriptor,
+ le16_to_cpu(qcdev->dev_desc.report_desc_len));
+
+ dev_err_once(qcdev->dev, "invalid report descriptor\n");
+ return -EINVAL;
+}
+
+static int quicki2c_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quicki2c_hid_stop(struct hid_device *hid)
+{
+}
+
+static int quicki2c_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quicki2c_hid_close(struct hid_device *hid)
+{
+}
+
+static int quicki2c_hid_raw_request(struct hid_device *hid,
+ unsigned char reportnum,
+ __u8 *buf, size_t len,
+ unsigned char rtype, int reqtype)
+{
+ struct quicki2c_device *qcdev = hid->driver_data;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(qcdev->dev);
+ if (ret)
+ return ret;
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ ret = quicki2c_get_report(qcdev, rtype, reportnum, buf, len);
+ break;
+ case HID_REQ_SET_REPORT:
+ ret = quicki2c_set_report(qcdev, rtype, reportnum, buf, len);
+ break;
+ default:
+ dev_err(qcdev->dev, "Not supported request type %d\n", reqtype);
+ break;
+ }
+
+ pm_runtime_mark_last_busy(qcdev->dev);
+ pm_runtime_put_autosuspend(qcdev->dev);
+
+ return ret;
+}
+
+static int quicki2c_hid_power(struct hid_device *hid, int lvl)
+{
+ return 0;
+}
+
+static struct hid_ll_driver quicki2c_hid_ll_driver = {
+ .parse = quicki2c_hid_parse,
+ .start = quicki2c_hid_start,
+ .stop = quicki2c_hid_stop,
+ .open = quicki2c_hid_open,
+ .close = quicki2c_hid_close,
+ .power = quicki2c_hid_power,
+ .raw_request = quicki2c_hid_raw_request,
+};
+
+/**
+ * quicki2c_hid_probe() - Register HID low level driver
+ *
+ * @qcdev: point to quicki2c device
+ *
+ * This function is used to allocate and add HID device.
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quicki2c_hid_probe(struct quicki2c_device *qcdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &quicki2c_hid_ll_driver;
+ hid->bus = BUS_PCI;
+ hid->dev.parent = qcdev->dev;
+ hid->driver_data = qcdev;
+ hid->version = le16_to_cpu(qcdev->dev_desc.version_id);
+ hid->vendor = le16_to_cpu(qcdev->dev_desc.vendor_id);
+ hid->product = le16_to_cpu(qcdev->dev_desc.product_id);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quicki2c-hid",
+ hid->vendor, hid->product);
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ qcdev->hid_dev = hid;
+
+ return 0;
+}
+
+/**
+ * quicki2c_hid_remove() - Destroy HID device
+ *
+ * @qcdev: point to quicki2c device
+ *
+ * Return: 0 on success, non zero on error.
+ */
+void quicki2c_hid_remove(struct quicki2c_device *qcdev)
+{
+ hid_destroy_device(qcdev->hid_dev);
+}
+
+/**
+ * quicki2c_hid_send_report() - Send HID input report data to HID core
+ *
+ * @qcdev: point to quicki2c device
+ * @data: point to input report data buffer
+ * @data_len: the length of input report data
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quicki2c_hid_send_report(struct quicki2c_device *qcdev,
+ void *data, size_t data_len)
+{
+ int ret;
+
+ ret = hid_input_report(qcdev->hid_dev, HID_INPUT_REPORT, data, data_len, 1);
+ if (ret)
+ dev_err(qcdev->dev, "Failed to send HID input report, ret = %d.\n", ret);
+
+ return ret;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h
new file mode 100644
index 000000000000..e80df5f339fe
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKI2C_HID_H_
+#define _QUICKI2C_HID_H_
+
+struct quicki2c_device;
+
+int quicki2c_hid_send_report(struct quicki2c_device *qcdev,
+ void *data, size_t data_size);
+int quicki2c_hid_probe(struct quicki2c_device *qcdev);
+void quicki2c_hid_remove(struct quicki2c_device *qcdev);
+
+#endif /* _QUICKI2C_HID_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c
new file mode 100644
index 000000000000..f493df0d5dc4
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/hid.h>
+#include <linux/hid-over-i2c.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-dma.h"
+
+#include "quicki2c-dev.h"
+#include "quicki2c-hid.h"
+#include "quicki2c-protocol.h"
+
+static int quicki2c_init_write_buf(struct quicki2c_device *qcdev, u32 cmd, int cmd_len,
+ bool append_data_reg, u8 *data, int data_len,
+ u8 *write_buf, int write_buf_len)
+{
+ int buf_len, offset = 0;
+
+ buf_len = HIDI2C_REG_LEN + cmd_len;
+
+ if (append_data_reg)
+ buf_len += HIDI2C_REG_LEN;
+
+ if (data && data_len)
+ buf_len += data_len + HIDI2C_LENGTH_LEN;
+
+ if (buf_len > write_buf_len)
+ return -EINVAL;
+
+ memcpy(write_buf, &qcdev->dev_desc.cmd_reg, HIDI2C_REG_LEN);
+ offset += HIDI2C_REG_LEN;
+ memcpy(write_buf + offset, &cmd, cmd_len);
+ offset += cmd_len;
+
+ if (append_data_reg) {
+ memcpy(write_buf + offset, &qcdev->dev_desc.data_reg, HIDI2C_REG_LEN);
+ offset += HIDI2C_REG_LEN;
+ }
+
+ if (data && data_len) {
+ __le16 len = cpu_to_le16(data_len + HIDI2C_LENGTH_LEN);
+
+ memcpy(write_buf + offset, &len, HIDI2C_LENGTH_LEN);
+ offset += HIDI2C_LENGTH_LEN;
+ memcpy(write_buf + offset, data, data_len);
+ }
+
+ return buf_len;
+}
+
+static int quicki2c_encode_cmd(struct quicki2c_device *qcdev, u32 *cmd_buf,
+ u8 opcode, u8 report_type, u8 report_id)
+{
+ int cmd_len;
+
+ *cmd_buf = FIELD_PREP(HIDI2C_CMD_OPCODE, opcode) |
+ FIELD_PREP(HIDI2C_CMD_REPORT_TYPE, report_type);
+
+ if (report_id < HIDI2C_CMD_MAX_RI) {
+ *cmd_buf |= FIELD_PREP(HIDI2C_CMD_REPORT_ID, report_id);
+ cmd_len = HIDI2C_CMD_LEN;
+ } else {
+ *cmd_buf |= FIELD_PREP(HIDI2C_CMD_REPORT_ID, HIDI2C_CMD_MAX_RI) |
+ FIELD_PREP(HIDI2C_CMD_3RD_BYTE, report_id);
+ cmd_len = HIDI2C_CMD_LEN_OPT;
+ }
+
+ return cmd_len;
+}
+
+static int write_cmd_to_txdma(struct quicki2c_device *qcdev, int opcode,
+ int report_type, int report_id, u8 *buf, int buf_len)
+{
+ size_t write_buf_len;
+ int cmd_len, ret;
+ u32 cmd;
+
+ cmd_len = quicki2c_encode_cmd(qcdev, &cmd, opcode, report_type, report_id);
+
+ ret = quicki2c_init_write_buf(qcdev, cmd, cmd_len, buf ? true : false, buf,
+ buf_len, qcdev->report_buf, qcdev->report_len);
+ if (ret < 0)
+ return ret;
+
+ write_buf_len = ret;
+
+ return thc_dma_write(qcdev->thc_hw, qcdev->report_buf, write_buf_len);
+}
+
+int quicki2c_set_power(struct quicki2c_device *qcdev, enum hidi2c_power_state power_state)
+{
+ return write_cmd_to_txdma(qcdev, HIDI2C_SET_POWER, HIDI2C_RESERVED, power_state, NULL, 0);
+}
+
+int quicki2c_get_device_descriptor(struct quicki2c_device *qcdev)
+{
+ u32 read_len = 0;
+ int ret;
+
+ ret = thc_tic_pio_write_and_read(qcdev->thc_hw, qcdev->hid_desc_addr,
+ HIDI2C_REG_LEN, NULL, HIDI2C_DEV_DESC_LEN,
+ &read_len, (u32 *)&qcdev->dev_desc);
+ if (ret || HIDI2C_DEV_DESC_LEN != read_len) {
+ dev_err_once(qcdev->dev, "Get device descriptor failed, ret %d, read len %u\n",
+ ret, read_len);
+ return -EIO;
+ }
+
+ if (le16_to_cpu(qcdev->dev_desc.bcd_ver) != HIDI2C_HID_DESC_BCDVERSION)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int quicki2c_get_report_descriptor(struct quicki2c_device *qcdev)
+{
+ u16 desc_reg = le16_to_cpu(qcdev->dev_desc.report_desc_reg);
+ size_t read_len = le16_to_cpu(qcdev->dev_desc.report_desc_len);
+ u32 prd_len = read_len;
+
+ return thc_swdma_read(qcdev->thc_hw, (u8 *)&desc_reg, HIDI2C_REG_LEN,
+ &prd_len, qcdev->report_descriptor, &read_len);
+}
+
+int quicki2c_get_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len)
+{
+ struct hidi2c_report_packet *rpt;
+ size_t write_buf_len, read_len = 0;
+ int cmd_len, rep_type;
+ u32 cmd;
+ int ret;
+
+ if (report_type == HID_INPUT_REPORT) {
+ rep_type = HIDI2C_INPUT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = HIDI2C_FEATURE;
+ } else {
+ dev_err(qcdev->dev, "Unsupported report type for GET REPORT: %d\n", report_type);
+ return -EINVAL;
+ }
+
+ cmd_len = quicki2c_encode_cmd(qcdev, &cmd, HIDI2C_GET_REPORT, rep_type, reportnum);
+
+ ret = quicki2c_init_write_buf(qcdev, cmd, cmd_len, true, NULL, 0,
+ qcdev->report_buf, qcdev->report_len);
+ if (ret < 0)
+ return ret;
+
+ write_buf_len = ret;
+
+ rpt = (struct hidi2c_report_packet *)qcdev->input_buf;
+
+ ret = thc_swdma_read(qcdev->thc_hw, qcdev->report_buf, write_buf_len,
+ NULL, rpt, &read_len);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Get report failed, ret %d, read len (%zu vs %d)\n",
+ ret, read_len, buf_len);
+ return ret;
+ }
+
+ if (HIDI2C_DATA_LEN(le16_to_cpu(rpt->len)) != buf_len || rpt->data[0] != reportnum) {
+ dev_err_once(qcdev->dev, "Invalid packet, len (%d vs %d) report id (%d vs %d)\n",
+ le16_to_cpu(rpt->len), buf_len, rpt->data[0], reportnum);
+ return -EINVAL;
+ }
+
+ memcpy(buf, rpt->data, buf_len);
+
+ return buf_len;
+}
+
+int quicki2c_set_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len)
+{
+ int rep_type;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT) {
+ rep_type = HIDI2C_OUTPUT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = HIDI2C_FEATURE;
+ } else {
+ dev_err(qcdev->dev, "Unsupported report type for SET REPORT: %d\n", report_type);
+ return -EINVAL;
+ }
+
+ ret = write_cmd_to_txdma(qcdev, HIDI2C_SET_REPORT, rep_type, reportnum, buf, buf_len);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Set Report failed, ret %d\n", ret);
+ return ret;
+ }
+
+ return buf_len;
+}
+
+#define HIDI2C_RESET_TIMEOUT 5
+
+int quicki2c_reset(struct quicki2c_device *qcdev)
+{
+ int ret;
+
+ qcdev->reset_ack = false;
+ qcdev->state = QUICKI2C_RESETING;
+
+ ret = write_cmd_to_txdma(qcdev, HIDI2C_RESET, HIDI2C_RESERVED, 0, NULL, 0);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Send reset command failed, ret %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qcdev->reset_ack_wq, qcdev->reset_ack,
+ HIDI2C_RESET_TIMEOUT * HZ);
+ if (ret <= 0 || !qcdev->reset_ack) {
+ dev_err_once(qcdev->dev,
+ "Wait reset response timed out ret:%d timeout:%ds\n",
+ ret, HIDI2C_RESET_TIMEOUT);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h
new file mode 100644
index 000000000000..bf4908cce59c
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKI2C_PROTOCOL_H_
+#define _QUICKI2C_PROTOCOL_H_
+
+#include <linux/hid-over-i2c.h>
+
+struct quicki2c_device;
+
+int quicki2c_set_power(struct quicki2c_device *qcdev, enum hidi2c_power_state power_state);
+int quicki2c_get_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len);
+int quicki2c_set_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len);
+int quicki2c_get_device_descriptor(struct quicki2c_device *qcdev);
+int quicki2c_get_report_descriptor(struct quicki2c_device *qcdev);
+int quicki2c_reset(struct quicki2c_device *qcdev);
+
+#endif /* _QUICKI2C_PROTOCOL_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
new file mode 100644
index 000000000000..6b2c7620be2b
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -0,0 +1,989 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-hw.h"
+
+#include "quickspi-dev.h"
+#include "quickspi-hid.h"
+#include "quickspi-protocol.h"
+
+struct quickspi_driver_data mtl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_MTL,
+};
+
+struct quickspi_driver_data lnl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_LNL,
+};
+
+struct quickspi_driver_data ptl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_LNL,
+};
+
+/* THC QuickSPI ACPI method to get device properties */
+/* HIDSPI Method: {6e2ac436-0fcf-41af-a265-b32a220dcfab} */
+static guid_t hidspi_guid =
+ GUID_INIT(0x6e2ac436, 0x0fcf, 0x41af, 0xa2, 0x65, 0xb3, 0x2a,
+ 0x22, 0x0d, 0xcf, 0xab);
+
+/* QuickSpi Method: {300D35b7-ac20-413e-8e9c-92e4dafd0afe} */
+static guid_t thc_quickspi_guid =
+ GUID_INIT(0x300d35b7, 0xac20, 0x413e, 0x8e, 0x9c, 0x92, 0xe4,
+ 0xda, 0xfd, 0x0a, 0xfe);
+
+/* Platform Method: {84005682-5b71-41a4-0x8d668130f787a138} */
+static guid_t thc_platform_guid =
+ GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30,
+ 0xf7, 0x87, 0xa1, 0x38);
+
+/**
+ * thc_acpi_get_property - Query device ACPI parameter
+ *
+ * @adev: point to ACPI device
+ * @guid: ACPI method's guid
+ * @rev: ACPI method's revision
+ * @func: ACPI method's function number
+ * @type: ACPI parameter's data type
+ * @prop_buf: point to return buffer
+ *
+ * This is a helper function for device to query its ACPI parameters.
+ *
+ * Return: 0 if successful or ENODEV on failed.
+ */
+static int thc_acpi_get_property(struct acpi_device *adev, const guid_t *guid,
+ u64 rev, u64 func, acpi_object_type type, void *prop_buf)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm_typed(handle, guid, rev, func, NULL, type);
+ if (!obj) {
+ acpi_handle_err(handle,
+ "Error _DSM call failed, rev: %llu, func: %llu, type: %u\n",
+ rev, func, type);
+ return -ENODEV;
+ }
+
+ if (type == ACPI_TYPE_INTEGER)
+ *(u32 *)prop_buf = (u32)obj->integer.value;
+ else if (type == ACPI_TYPE_BUFFER)
+ memcpy(prop_buf, obj->buffer.pointer, obj->buffer.length);
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+/**
+ * quickspi_get_acpi_resources - Query all quickspi devices' ACPI parameters
+ *
+ * @qsdev: point to quickspi device
+ *
+ * This function gets all quickspi devices' ACPI resource.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int quickspi_get_acpi_resources(struct quickspi_device *qsdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(qsdev->dev);
+ int ret = -EINVAL;
+
+ if (!adev) {
+ dev_err(qsdev->dev, "no valid ACPI companion\n");
+ return ret;
+ }
+
+ qsdev->acpi_dev = adev;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_HDR_ADDR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->input_report_hdr_addr);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_BDY_ADDR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->input_report_bdy_addr);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_OUTPUT_REP_ADDR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->output_report_addr);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_READ_OPCODE,
+ ACPI_TYPE_BUFFER,
+ &qsdev->spi_read_opcode);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_WRITE_OPCODE,
+ ACPI_TYPE_BUFFER,
+ &qsdev->spi_write_opcode);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_IO_MODE,
+ ACPI_TYPE_INTEGER,
+ &qsdev->spi_read_io_mode);
+ if (ret)
+ return ret;
+
+ if (qsdev->spi_read_io_mode & SPI_WRITE_IO_MODE)
+ qsdev->spi_write_io_mode = FIELD_GET(SPI_IO_MODE_OPCODE, qsdev->spi_read_io_mode);
+ else
+ qsdev->spi_write_io_mode = THC_SINGLE_IO;
+
+ qsdev->spi_read_io_mode = FIELD_GET(SPI_IO_MODE_OPCODE, qsdev->spi_read_io_mode);
+
+ ret = thc_acpi_get_property(adev, &thc_quickspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_CONNECTION_SPEED,
+ ACPI_TYPE_INTEGER,
+ &qsdev->spi_freq_val);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &thc_quickspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_LIMIT_PACKET_SIZE,
+ ACPI_TYPE_INTEGER,
+ &qsdev->limit_packet_size);
+ if (ret)
+ return ret;
+
+ if (qsdev->limit_packet_size || !qsdev->driver_data)
+ qsdev->spi_packet_size = DEFAULT_MIN_PACKET_SIZE_VALUE;
+ else
+ qsdev->spi_packet_size = qsdev->driver_data->max_packet_size_value;
+
+ ret = thc_acpi_get_property(adev, &thc_quickspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_PERFORMANCE_LIMIT,
+ ACPI_TYPE_INTEGER,
+ &qsdev->performance_limit);
+ if (ret)
+ return ret;
+
+ qsdev->performance_limit = FIELD_GET(PERFORMANCE_LIMITATION, qsdev->performance_limit);
+
+ ret = thc_acpi_get_property(adev, &thc_platform_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_ACTIVE_LTR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->active_ltr_val);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &thc_platform_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_LP_LTR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->low_power_ltr_val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * quickspi_irq_quick_handler - The ISR of the quickspi driver
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: IRQ_WAKE_THREAD if further process needed.
+ */
+static irqreturn_t quickspi_irq_quick_handler(int irq, void *dev_id)
+{
+ struct quickspi_device *qsdev = dev_id;
+
+ if (qsdev->state == QUICKSPI_DISABLED)
+ return IRQ_HANDLED;
+
+ /* Disable THC interrupt before current interrupt be handled */
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * try_recover - Try to recovery THC and Device
+ * @qsdev: pointer to quickspi device
+ *
+ * This function is a error handler, called when fatal error happens.
+ * It try to reset Touch Device and re-configure THC to recovery
+ * transferring between Device and THC.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int try_recover(struct quickspi_device *qsdev)
+{
+ int ret;
+
+ ret = reset_tic(qsdev);
+ if (ret) {
+ dev_err(qsdev->dev, "Reset touch device failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ thc_dma_unconfigure(qsdev->thc_hw);
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret) {
+ dev_err(qsdev->dev, "Re-configure THC DMA failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * quickspi_irq_thread_handler - IRQ thread handler of quickspi driver
+ *
+ * @irq: The IRQ number
+ * @dev_id: pointer to the quickspi device structure
+ *
+ * Return: IRQ_HANDLED to finish this handler.
+ */
+static irqreturn_t quickspi_irq_thread_handler(int irq, void *dev_id)
+{
+ struct quickspi_device *qsdev = dev_id;
+ size_t input_len;
+ int read_finished = 0;
+ int err_recover = 0;
+ int int_mask;
+ int ret;
+
+ if (qsdev->state == QUICKSPI_DISABLED)
+ return IRQ_HANDLED;
+
+ ret = pm_runtime_resume_and_get(qsdev->dev);
+ if (ret)
+ return IRQ_HANDLED;
+
+ int_mask = thc_interrupt_handler(qsdev->thc_hw);
+
+ if (int_mask & BIT(THC_FATAL_ERR_INT) || int_mask & BIT(THC_TXN_ERR_INT)) {
+ err_recover = 1;
+ goto end;
+ }
+
+ if (int_mask & BIT(THC_NONDMA_INT)) {
+ if (qsdev->state == QUICKSPI_RESETING) {
+ qsdev->reset_ack = true;
+ wake_up_interruptible(&qsdev->reset_ack_wq);
+ } else {
+ qsdev->nondma_int_received = true;
+ wake_up_interruptible(&qsdev->nondma_int_received_wq);
+ }
+ }
+
+ if (int_mask & BIT(THC_RXDMA2_INT)) {
+ while (!read_finished) {
+ ret = thc_rxdma_read(qsdev->thc_hw, THC_RXDMA2, qsdev->input_buf,
+ &input_len, &read_finished);
+ if (ret) {
+ err_recover = 1;
+ goto end;
+ }
+
+ quickspi_handle_input_data(qsdev, input_len);
+ }
+ }
+
+end:
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ if (err_recover)
+ if (try_recover(qsdev))
+ qsdev->state = QUICKSPI_DISABLED;
+
+ pm_runtime_mark_last_busy(qsdev->dev);
+ pm_runtime_put_autosuspend(qsdev->dev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * quickspi_dev_init - Initialize quickspi device
+ *
+ * @pdev: pointer to the thc pci device
+ * @mem_addr: The pointer of MMIO memory address
+ * @id: point to pci_device_id structure
+ *
+ * Alloc quickspi device structure and initialized THC device,
+ * then configure THC to HIDSPI mode.
+ *
+ * If success, enable THC hardware interrupt.
+ *
+ * Return: pointer to the quickspi device structure if success
+ * or NULL on failed.
+ */
+static struct quickspi_device *quickspi_dev_init(struct pci_dev *pdev, void __iomem *mem_addr,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = devm_kzalloc(dev, sizeof(struct quickspi_device), GFP_KERNEL);
+ if (!qsdev)
+ return ERR_PTR(-ENOMEM);
+
+ qsdev->pdev = pdev;
+ qsdev->dev = dev;
+ qsdev->mem_addr = mem_addr;
+ qsdev->state = QUICKSPI_DISABLED;
+ qsdev->driver_data = (struct quickspi_driver_data *)id->driver_data;
+
+ init_waitqueue_head(&qsdev->reset_ack_wq);
+ init_waitqueue_head(&qsdev->nondma_int_received_wq);
+ init_waitqueue_head(&qsdev->report_desc_got_wq);
+ init_waitqueue_head(&qsdev->get_report_cmpl_wq);
+ init_waitqueue_head(&qsdev->set_report_cmpl_wq);
+
+ /* thc hw init */
+ qsdev->thc_hw = thc_dev_init(qsdev->dev, qsdev->mem_addr);
+ if (IS_ERR(qsdev->thc_hw)) {
+ ret = PTR_ERR(qsdev->thc_hw);
+ dev_err(dev, "Failed to initialize THC device context, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = thc_port_select(qsdev->thc_hw, THC_PORT_TYPE_SPI);
+ if (ret) {
+ dev_err(dev, "Failed to select THC port, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = quickspi_get_acpi_resources(qsdev);
+ if (ret) {
+ dev_err(dev, "Get ACPI resources failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* THC config for input/output address */
+ thc_spi_input_output_address_config(qsdev->thc_hw,
+ qsdev->input_report_hdr_addr,
+ qsdev->input_report_bdy_addr,
+ qsdev->output_report_addr);
+
+ /* THC config for spi read operation */
+ ret = thc_spi_read_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_read_io_mode,
+ qsdev->spi_read_opcode,
+ qsdev->spi_packet_size);
+ if (ret) {
+ dev_err(dev, "thc_spi_read_config failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* THC config for spi write operation */
+ ret = thc_spi_write_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_write_io_mode,
+ qsdev->spi_write_opcode,
+ qsdev->spi_packet_size,
+ qsdev->performance_limit);
+ if (ret) {
+ dev_err(dev, "thc_spi_write_config failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ thc_ltr_config(qsdev->thc_hw,
+ qsdev->active_ltr_val,
+ qsdev->low_power_ltr_val);
+
+ thc_interrupt_config(qsdev->thc_hw);
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ qsdev->state = QUICKSPI_INITED;
+
+ return qsdev;
+}
+
+/**
+ * quickspi_dev_deinit - De-initialize quickspi device
+ *
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * Disable THC interrupt and deinitilize THC.
+ */
+static void quickspi_dev_deinit(struct quickspi_device *qsdev)
+{
+ thc_interrupt_enable(qsdev->thc_hw, false);
+ thc_ltr_unconfig(qsdev->thc_hw);
+
+ qsdev->state = QUICKSPI_DISABLED;
+}
+
+/**
+ * quickspi_dma_init - Configure THC DMA for quickspi device
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * This function uses TIC's parameters(such as max input length, max output
+ * length) to allocate THC DMA buffers and configure THC DMA engines.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int quickspi_dma_init(struct quickspi_device *qsdev)
+{
+ int ret;
+
+ ret = thc_dma_set_max_packet_sizes(qsdev->thc_hw, 0,
+ le16_to_cpu(qsdev->dev_desc.max_input_len),
+ le16_to_cpu(qsdev->dev_desc.max_output_len),
+ 0);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_allocate(qsdev->thc_hw);
+ if (ret) {
+ dev_err(qsdev->dev, "Allocate THC DMA buffer failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* Enable RxDMA */
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret) {
+ dev_err(qsdev->dev, "Configure THC DMA failed, ret = %d\n", ret);
+ thc_dma_unconfigure(qsdev->thc_hw);
+ thc_dma_release(qsdev->thc_hw);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * quickspi_dma_deinit - Release THC DMA for quickspi device
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * Stop THC DMA engines and release all DMA buffers.
+ *
+ */
+static void quickspi_dma_deinit(struct quickspi_device *qsdev)
+{
+ thc_dma_unconfigure(qsdev->thc_hw);
+ thc_dma_release(qsdev->thc_hw);
+}
+
+/**
+ * quickspi_alloc_report_buf - Alloc report buffers
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * Allocate report descriptor buffer, it will be used for restore TIC HID
+ * report descriptor.
+ *
+ * Allocate input report buffer, it will be used for receive HID input report
+ * data from TIC.
+ *
+ * Allocate output report buffer, it will be used for store HID output report,
+ * such as set feature.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int quickspi_alloc_report_buf(struct quickspi_device *qsdev)
+{
+ size_t max_report_len;
+ size_t max_input_len;
+
+ qsdev->report_descriptor = devm_kzalloc(qsdev->dev,
+ le16_to_cpu(qsdev->dev_desc.rep_desc_len),
+ GFP_KERNEL);
+ if (!qsdev->report_descriptor)
+ return -ENOMEM;
+
+ max_input_len = max(le16_to_cpu(qsdev->dev_desc.rep_desc_len),
+ le16_to_cpu(qsdev->dev_desc.max_input_len));
+
+ qsdev->input_buf = devm_kzalloc(qsdev->dev, max_input_len, GFP_KERNEL);
+ if (!qsdev->input_buf)
+ return -ENOMEM;
+
+ max_report_len = max(le16_to_cpu(qsdev->dev_desc.max_output_len),
+ le16_to_cpu(qsdev->dev_desc.max_input_len));
+
+ qsdev->report_buf = devm_kzalloc(qsdev->dev, max_report_len, GFP_KERNEL);
+ if (!qsdev->report_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * quickspi_probe: Quickspi driver probe function
+ *
+ * @pdev: point to pci device
+ * @id: point to pci_device_id structure
+ *
+ * This function initializes THC and HIDSPI device, the flow is:
+ * - do THC pci device initialization
+ * - query HIDSPI ACPI parameters
+ * - configure THC to HIDSPI mode
+ * - go through HIDSPI enumeration flow
+ * |- reset HIDSPI device
+ * |- read device descriptor
+ * - enable THC interrupt and DMA
+ * - read report descriptor
+ * - register HID device
+ * - enable runtime power management
+ *
+ * Return 0 if success or error code on failure.
+ */
+static int quickspi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct quickspi_device *qsdev;
+ void __iomem *mem_addr;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PCI device, ret = %d.\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
+ goto disable_pci_device;
+ }
+
+ mem_addr = pcim_iomap_table(pdev)[0];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration %d\n", ret);
+ goto unmap_io_region;
+ }
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Failed to allocate IRQ vectors. ret = %d\n", ret);
+ goto unmap_io_region;
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
+
+ qsdev = quickspi_dev_init(pdev, mem_addr, id);
+ if (IS_ERR(qsdev)) {
+ dev_err(&pdev->dev, "QuickSPI device init failed\n");
+ ret = PTR_ERR(qsdev);
+ goto unmap_io_region;
+ }
+
+ pci_set_drvdata(pdev, qsdev);
+
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ quickspi_irq_quick_handler,
+ quickspi_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME,
+ qsdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to request threaded IRQ, irq = %d.\n", pdev->irq);
+ goto dev_deinit;
+ }
+
+ ret = reset_tic(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Reset Touch Device failed, ret = %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quickspi_alloc_report_buf(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Alloc report buffers failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quickspi_dma_init(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Setup THC DMA failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quickspi_get_report_descriptor(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Get report descriptor failed, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ ret = quickspi_hid_probe(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register HID device, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ qsdev->state = QUICKSPI_ENABLED;
+
+ /* Enable runtime power management */
+ pm_runtime_use_autosuspend(qsdev->dev);
+ pm_runtime_set_autosuspend_delay(qsdev->dev, DEFAULT_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_mark_last_busy(qsdev->dev);
+ pm_runtime_put_noidle(qsdev->dev);
+ pm_runtime_put_autosuspend(qsdev->dev);
+
+ dev_dbg(&pdev->dev, "QuickSPI probe success\n");
+
+ return 0;
+
+dma_deinit:
+ quickspi_dma_deinit(qsdev);
+dev_deinit:
+ quickspi_dev_deinit(qsdev);
+unmap_io_region:
+ pcim_iounmap_regions(pdev, BIT(0));
+disable_pci_device:
+ pci_clear_master(pdev);
+
+ return ret;
+}
+
+/**
+ * quickspi_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void quickspi_remove(struct pci_dev *pdev)
+{
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return;
+
+ quickspi_hid_remove(qsdev);
+ quickspi_dma_deinit(qsdev);
+
+ pm_runtime_get_noresume(qsdev->dev);
+
+ quickspi_dev_deinit(qsdev);
+
+ pcim_iounmap_regions(pdev, BIT(0));
+ pci_clear_master(pdev);
+}
+
+/**
+ * quickspi_shutdown - Device Shutdown Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void quickspi_shutdown(struct pci_dev *pdev)
+{
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return;
+
+ /* Must stop DMA before reboot to avoid DMA entering into unknown state */
+ quickspi_dma_deinit(qsdev);
+
+ quickspi_dev_deinit(qsdev);
+}
+
+static int quickspi_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = quickspi_set_power(qsdev, HIDSPI_SLEEP);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ thc_dma_unconfigure(qsdev->thc_hw);
+
+ return 0;
+}
+
+static int quickspi_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_port_select(qsdev->thc_hw, THC_PORT_TYPE_SPI);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qsdev->thc_hw);
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ ret = quickspi_set_power(qsdev, HIDSPI_ON);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quickspi_freeze(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ thc_dma_unconfigure(qsdev->thc_hw);
+
+ return 0;
+}
+
+static int quickspi_thaw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quickspi_poweroff(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ thc_ltr_unconfig(qsdev->thc_hw);
+
+ quickspi_dma_deinit(qsdev);
+
+ return 0;
+}
+
+static int quickspi_restore(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ /* Reconfig THC HW when back from hibernate */
+ ret = thc_port_select(qsdev->thc_hw, THC_PORT_TYPE_SPI);
+ if (ret)
+ return ret;
+
+ thc_spi_input_output_address_config(qsdev->thc_hw,
+ qsdev->input_report_hdr_addr,
+ qsdev->input_report_bdy_addr,
+ qsdev->output_report_addr);
+
+ ret = thc_spi_read_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_read_io_mode,
+ qsdev->spi_read_opcode,
+ qsdev->spi_packet_size);
+ if (ret)
+ return ret;
+
+ ret = thc_spi_write_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_write_io_mode,
+ qsdev->spi_write_opcode,
+ qsdev->spi_packet_size,
+ qsdev->performance_limit);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qsdev->thc_hw);
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ /* TIC may lose power, needs go through reset flow */
+ ret = reset_tic(qsdev);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_ltr_config(qsdev->thc_hw,
+ qsdev->active_ltr_val,
+ qsdev->low_power_ltr_val);
+
+ thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ qsdev->state = QUICKSPI_ENABLED;
+
+ return 0;
+}
+
+static int quickspi_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_LP);
+
+ pci_save_state(pdev);
+
+ return 0;
+}
+
+static int quickspi_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static const struct dev_pm_ops quickspi_pm_ops = {
+ .suspend = quickspi_suspend,
+ .resume = quickspi_resume,
+ .freeze = quickspi_freeze,
+ .thaw = quickspi_thaw,
+ .poweroff = quickspi_poweroff,
+ .restore = quickspi_restore,
+ .runtime_suspend = quickspi_runtime_suspend,
+ .runtime_resume = quickspi_runtime_resume,
+ .runtime_idle = NULL,
+};
+
+static const struct pci_device_id quickspi_pci_tbl[] = {
+ {PCI_DEVICE_DATA(INTEL, THC_MTL_DEVICE_ID_SPI_PORT1, &mtl), },
+ {PCI_DEVICE_DATA(INTEL, THC_MTL_DEVICE_ID_SPI_PORT2, &mtl), },
+ {PCI_DEVICE_DATA(INTEL, THC_LNL_DEVICE_ID_SPI_PORT1, &lnl), },
+ {PCI_DEVICE_DATA(INTEL, THC_LNL_DEVICE_ID_SPI_PORT2, &lnl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT2, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl);
+
+static struct pci_driver quickspi_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = quickspi_pci_tbl,
+ .probe = quickspi_probe,
+ .remove = quickspi_remove,
+ .shutdown = quickspi_shutdown,
+ .driver.pm = &quickspi_pm_ops,
+ .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+};
+
+module_pci_driver(quickspi_driver);
+
+MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
+MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) QuickSPI Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
new file mode 100644
index 000000000000..75179bb26767
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKSPI_DEV_H_
+#define _QUICKSPI_DEV_H_
+
+#include <linux/bits.h>
+#include <linux/hid-over-spi.h>
+#include <linux/sizes.h>
+#include <linux/wait.h>
+
+#include "quickspi-protocol.h"
+
+#define PCI_DEVICE_ID_INTEL_THC_MTL_DEVICE_ID_SPI_PORT1 0x7E49
+#define PCI_DEVICE_ID_INTEL_THC_MTL_DEVICE_ID_SPI_PORT2 0x7E4B
+#define PCI_DEVICE_ID_INTEL_THC_LNL_DEVICE_ID_SPI_PORT1 0xA849
+#define PCI_DEVICE_ID_INTEL_THC_LNL_DEVICE_ID_SPI_PORT2 0xA84B
+#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT1 0xE349
+#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT2 0xE34B
+#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT1 0xE449
+#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B
+
+/* HIDSPI special ACPI parameters DSM methods */
+#define ACPI_QUICKSPI_REVISION_NUM 2
+#define ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_HDR_ADDR 1
+#define ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_BDY_ADDR 2
+#define ACPI_QUICKSPI_FUNC_NUM_OUTPUT_REP_ADDR 3
+#define ACPI_QUICKSPI_FUNC_NUM_READ_OPCODE 4
+#define ACPI_QUICKSPI_FUNC_NUM_WRITE_OPCODE 5
+#define ACPI_QUICKSPI_FUNC_NUM_IO_MODE 6
+
+/* QickSPI device special ACPI parameters DSM methods */
+#define ACPI_QUICKSPI_FUNC_NUM_CONNECTION_SPEED 1
+#define ACPI_QUICKSPI_FUNC_NUM_LIMIT_PACKET_SIZE 2
+#define ACPI_QUICKSPI_FUNC_NUM_PERFORMANCE_LIMIT 3
+
+/* Platform special ACPI parameters DSM methods */
+#define ACPI_QUICKSPI_FUNC_NUM_ACTIVE_LTR 1
+#define ACPI_QUICKSPI_FUNC_NUM_LP_LTR 2
+
+#define SPI_WRITE_IO_MODE BIT(13)
+#define SPI_IO_MODE_OPCODE GENMASK(15, 14)
+#define PERFORMANCE_LIMITATION GENMASK(15, 0)
+
+/* Packet size value, the unit is 16 bytes */
+#define DEFAULT_MIN_PACKET_SIZE_VALUE 4
+#define MAX_PACKET_SIZE_VALUE_MTL 128
+#define MAX_PACKET_SIZE_VALUE_LNL 256
+
+/*
+ * THC uses runtime auto suspend to dynamically switch between THC active LTR
+ * and low power LTR to save CPU power.
+ * Default value is 5000ms, that means if no touch event in this time, THC will
+ * change to low power LTR mode.
+ */
+#define DEFAULT_AUTO_SUSPEND_DELAY_MS 5000
+
+enum quickspi_dev_state {
+ QUICKSPI_NONE,
+ QUICKSPI_RESETING,
+ QUICKSPI_RESETED,
+ QUICKSPI_INITED,
+ QUICKSPI_ENABLED,
+ QUICKSPI_DISABLED,
+};
+
+/**
+ * struct quickspi_driver_data - Driver specific data for quickspi device
+ * @max_packet_size_value: identify max packet size, unit is 16 bytes
+ */
+struct quickspi_driver_data {
+ u32 max_packet_size_value;
+};
+
+struct device;
+struct pci_dev;
+struct thc_device;
+struct hid_device;
+struct acpi_device;
+
+/**
+ * struct quickspi_device - THC QuickSpi device struct
+ * @dev: point to kernel device
+ * @pdev: point to PCI device
+ * @thc_hw: point to THC device
+ * @hid_dev: point to hid device
+ * @acpi_dev: point to ACPI device
+ * @driver_data: point to quickspi specific driver data
+ * @state: THC SPI device state
+ * @mem_addr: MMIO memory address
+ * @dev_desc: device descriptor for HIDSPI protocol
+ * @input_report_hdr_addr: device input report header address
+ * @input_report_bdy_addr: device input report body address
+ * @output_report_bdy_addr: device output report address
+ * @spi_freq_val: device supported max SPI frequnecy, in Hz
+ * @spi_read_io_mode: device supported SPI read io mode
+ * @spi_write_io_mode: device supported SPI write io mode
+ * @spi_read_opcode: device read opcode
+ * @spi_write_opcode: device write opcode
+ * @limit_packet_size: 1 - limit read/write packet to 64Bytes
+ * 0 - device no packet size limiation for read/write
+ * @performance_limit: delay time, in ms.
+ * if device has performance limitation, must give a delay
+ * before write operation after a read operation.
+ * @active_ltr_val: THC active LTR value
+ * @low_power_ltr_val: THC low power LTR value
+ * @report_descriptor: store a copy of device report descriptor
+ * @input_buf: store a copy of latest input report data
+ * @report_buf: store a copy of latest input/output report packet from set/get feature
+ * @report_len: the length of input/output report packet
+ * @reset_ack_wq: workqueue for waiting reset response from device
+ * @reset_ack: indicate reset response received or not
+ * @nondma_int_received_wq: workqueue for waiting THC non-DMA interrupt
+ * @nondma_int_received: indicate THC non-DMA interrupt received or not
+ * @report_desc_got_wq: workqueue for waiting device report descriptor
+ * @report_desc_got: indicate device report descritor received or not
+ * @set_power_on_wq: workqueue for waiting set power on response from device
+ * @set_power_on: indicate set power on response received or not
+ * @get_feature_cmpl_wq: workqueue for waiting get feature response from device
+ * @get_feature_cmpl: indicate get feature received or not
+ * @set_feature_cmpl_wq: workqueue for waiting set feature to device
+ * @set_feature_cmpl: indicate set feature send complete or not
+ */
+struct quickspi_device {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct thc_device *thc_hw;
+ struct hid_device *hid_dev;
+ struct acpi_device *acpi_dev;
+ struct quickspi_driver_data *driver_data;
+ enum quickspi_dev_state state;
+
+ void __iomem *mem_addr;
+
+ struct hidspi_dev_descriptor dev_desc;
+ u32 input_report_hdr_addr;
+ u32 input_report_bdy_addr;
+ u32 output_report_addr;
+ u32 spi_freq_val;
+ u32 spi_read_io_mode;
+ u32 spi_write_io_mode;
+ u32 spi_read_opcode;
+ u32 spi_write_opcode;
+ u32 limit_packet_size;
+ u32 spi_packet_size;
+ u32 performance_limit;
+
+ u32 active_ltr_val;
+ u32 low_power_ltr_val;
+
+ u8 *report_descriptor;
+ u8 *input_buf;
+ u8 *report_buf;
+ u32 report_len;
+
+ wait_queue_head_t reset_ack_wq;
+ bool reset_ack;
+
+ wait_queue_head_t nondma_int_received_wq;
+ bool nondma_int_received;
+
+ wait_queue_head_t report_desc_got_wq;
+ bool report_desc_got;
+
+ wait_queue_head_t get_report_cmpl_wq;
+ bool get_report_cmpl;
+
+ wait_queue_head_t set_report_cmpl_wq;
+ bool set_report_cmpl;
+};
+
+#endif /* _QUICKSPI_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
new file mode 100644
index 000000000000..ad52e402c28a
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/pm_runtime.h>
+
+#include "quickspi-dev.h"
+#include "quickspi-hid.h"
+
+/**
+ * quickspi_hid_parse() - HID core parse() callback
+ *
+ * @hid: HID device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error.
+ */
+static int quickspi_hid_parse(struct hid_device *hid)
+{
+ struct quickspi_device *qsdev = hid->driver_data;
+
+ if (qsdev->report_descriptor)
+ return hid_parse_report(hid, qsdev->report_descriptor,
+ le16_to_cpu(qsdev->dev_desc.rep_desc_len));
+
+ dev_err(qsdev->dev, "invalid report descriptor\n");
+ return -EINVAL;
+}
+
+static int quickspi_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quickspi_hid_stop(struct hid_device *hid)
+{
+}
+
+static int quickspi_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quickspi_hid_close(struct hid_device *hid)
+{
+}
+
+static int quickspi_hid_raw_request(struct hid_device *hid,
+ unsigned char reportnum,
+ __u8 *buf, size_t len,
+ unsigned char rtype, int reqtype)
+{
+ struct quickspi_device *qsdev = hid->driver_data;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(qsdev->dev);
+ if (ret)
+ return ret;
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ ret = quickspi_get_report(qsdev, rtype, reportnum, buf);
+ break;
+ case HID_REQ_SET_REPORT:
+ ret = quickspi_set_report(qsdev, rtype, reportnum, buf, len);
+ break;
+ default:
+ dev_err_once(qsdev->dev, "Not supported request type %d\n", reqtype);
+ break;
+ }
+
+ pm_runtime_mark_last_busy(qsdev->dev);
+ pm_runtime_put_autosuspend(qsdev->dev);
+
+ return ret;
+}
+
+static int quickspi_hid_power(struct hid_device *hid, int lvl)
+{
+ return 0;
+}
+
+static struct hid_ll_driver quickspi_hid_ll_driver = {
+ .parse = quickspi_hid_parse,
+ .start = quickspi_hid_start,
+ .stop = quickspi_hid_stop,
+ .open = quickspi_hid_open,
+ .close = quickspi_hid_close,
+ .power = quickspi_hid_power,
+ .raw_request = quickspi_hid_raw_request,
+};
+
+/**
+ * quickspi_hid_probe() - Register HID low level driver
+ *
+ * @qsdev: point to quickspi device
+ *
+ * This function is used to allocate and add HID device.
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quickspi_hid_probe(struct quickspi_device *qsdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &quickspi_hid_ll_driver;
+ hid->bus = BUS_PCI;
+ hid->dev.parent = qsdev->dev;
+ hid->driver_data = qsdev;
+ hid->version = le16_to_cpu(qsdev->dev_desc.version_id);
+ hid->vendor = le16_to_cpu(qsdev->dev_desc.vendor_id);
+ hid->product = le16_to_cpu(qsdev->dev_desc.product_id);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quickspi-hid",
+ hid->vendor, hid->product);
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ qsdev->hid_dev = hid;
+
+ return 0;
+}
+
+/**
+ * quickspi_hid_remove() - Destroy HID device
+ *
+ * @qsdev: point to quickspi device
+ *
+ * Return: 0 on success, non zero on error.
+ */
+void quickspi_hid_remove(struct quickspi_device *qsdev)
+{
+ hid_destroy_device(qsdev->hid_dev);
+}
+
+/**
+ * quickspi_hid_send_report() - Send HID input report data to HID core
+ *
+ * @qsdev: point to quickspi device
+ * @data: point to input report data buffer
+ * @data_len: the length of input report data
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quickspi_hid_send_report(struct quickspi_device *qsdev,
+ void *data, size_t data_len)
+{
+ int ret;
+
+ ret = hid_input_report(qsdev->hid_dev, HID_INPUT_REPORT, data, data_len, 1);
+ if (ret)
+ dev_err(qsdev->dev, "Failed to send HID input report, ret = %d.\n", ret);
+
+ return ret;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h
new file mode 100644
index 000000000000..f640fa876a40
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKSPI_HID_H_
+#define _QUICKSPI_HID_H_
+
+struct quickspi_device;
+
+int quickspi_hid_send_report(struct quickspi_device *qsdev,
+ void *data, size_t data_size);
+int quickspi_hid_probe(struct quickspi_device *qsdev);
+void quickspi_hid_remove(struct quickspi_device *qsdev);
+
+#endif /* _QUICKSPI_HID_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
new file mode 100644
index 000000000000..918050af73e5
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/hid.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-dma.h"
+
+#include "quickspi-dev.h"
+#include "quickspi-hid.h"
+#include "quickspi-protocol.h"
+
+/* THC uses HW to accelerate HID over SPI protocol, THC_M_PRT_DEV_INT_CAUSE
+ * register is used to store message header and body header, below definition
+ * let driver retrieve needed data filed easier from THC_M_PRT_DEV_INT_CAUSE
+ * register.
+ */
+#define HIDSPI_IN_REP_BDY_HDR_REP_TYPE GENMASK(7, 0)
+
+static int write_cmd_to_txdma(struct quickspi_device *qsdev,
+ int report_type, int report_id,
+ u8 *report_buf, const int report_buf_len)
+{
+ struct output_report *write_buf;
+ int write_buf_len;
+ int ret;
+
+ write_buf = (struct output_report *)qsdev->report_buf;
+
+ write_buf->output_hdr.report_type = report_type;
+ write_buf->output_hdr.content_len = cpu_to_le16(report_buf_len);
+ write_buf->output_hdr.content_id = report_id;
+
+ if (report_buf && report_buf_len > 0)
+ memcpy(write_buf->content, report_buf, report_buf_len);
+
+ write_buf_len = HIDSPI_OUTPUT_REPORT_SIZE(report_buf_len);
+
+ ret = thc_dma_write(qsdev->thc_hw, write_buf, write_buf_len);
+ if (ret)
+ dev_err_once(qsdev->dev, "DMA write failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int quickspi_get_device_descriptor(struct quickspi_device *qsdev)
+{
+ u8 read_buf[HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE];
+ struct output_report output_rep;
+ u32 input_len, read_len = 0;
+ u32 int_cause_val;
+ u8 input_rep_type;
+ int ret;
+
+ output_rep.output_hdr.report_type = DEVICE_DESCRIPTOR;
+ output_rep.output_hdr.content_len = 0;
+ output_rep.output_hdr.content_id = 0;
+
+ qsdev->nondma_int_received = false;
+
+ ret = thc_tic_pio_write(qsdev->thc_hw, qsdev->output_report_addr,
+ HIDSPI_OUTPUT_REPORT_SIZE(0), (u32 *)&output_rep);
+ if (ret) {
+ dev_err_once(qsdev->dev,
+ "Write DEVICE_DESCRIPTOR command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->nondma_int_received_wq,
+ qsdev->nondma_int_received,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->nondma_int_received) {
+ dev_err_once(qsdev->dev, "Wait DEVICE_DESCRIPTOR timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->nondma_int_received = false;
+
+ int_cause_val = thc_int_cause_read(qsdev->thc_hw);
+ input_len = FIELD_GET(HIDSPI_INPUT_HEADER_REPORT_LEN, int_cause_val);
+
+ input_len = input_len * sizeof(u32);
+ if (input_len != HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE) {
+ dev_err_once(qsdev->dev, "Receive wrong DEVICE_DESCRIPTOR length, len = %u\n",
+ input_len);
+ return -EINVAL;
+ }
+
+ ret = thc_tic_pio_read(qsdev->thc_hw, qsdev->input_report_bdy_addr,
+ input_len, &read_len, (u32 *)read_buf);
+ if (ret || read_len != input_len) {
+ dev_err_once(qsdev->dev, "Read DEVICE_DESCRIPTOR failed, ret = %d\n", ret);
+ dev_err_once(qsdev->dev, "DEVICE_DESCRIPTOR expected len = %u, actual read = %u\n",
+ input_len, read_len);
+ return ret;
+ }
+
+ input_rep_type = ((struct input_report_body_header *)read_buf)->input_report_type;
+
+ if (input_rep_type == DEVICE_DESCRIPTOR_RESPONSE) {
+ memcpy(&qsdev->dev_desc,
+ read_buf + HIDSPI_INPUT_BODY_HEADER_SIZE,
+ HIDSPI_DEVICE_DESCRIPTOR_SIZE);
+
+ return 0;
+ }
+
+ dev_err_once(qsdev->dev, "Unexpected input report type: %d\n", input_rep_type);
+ return -EINVAL;
+}
+
+int quickspi_get_report_descriptor(struct quickspi_device *qsdev)
+{
+ int ret;
+
+ ret = write_cmd_to_txdma(qsdev, REPORT_DESCRIPTOR, 0, NULL, 0);
+ if (ret) {
+ dev_err_once(qsdev->dev,
+ "Write REPORT_DESCRIPTOR command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->report_desc_got_wq,
+ qsdev->report_desc_got,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->report_desc_got) {
+ dev_err_once(qsdev->dev, "Wait Report Descriptor timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->report_desc_got = false;
+
+ return 0;
+}
+
+int quickspi_set_power(struct quickspi_device *qsdev,
+ enum hidspi_power_state power_state)
+{
+ u8 cmd_content = power_state;
+ int ret;
+
+ ret = write_cmd_to_txdma(qsdev, COMMAND_CONTENT,
+ HIDSPI_SET_POWER_CMD_ID,
+ &cmd_content,
+ sizeof(cmd_content));
+ if (ret) {
+ dev_err_once(qsdev->dev, "Write SET_POWER command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void quickspi_handle_input_data(struct quickspi_device *qsdev, u32 buf_len)
+{
+ struct input_report_body_header *body_hdr;
+ struct input_report_body *input_body;
+ u8 *input_report;
+ u32 input_len;
+ int ret = 0;
+
+ input_body = (struct input_report_body *)qsdev->input_buf;
+ body_hdr = &input_body->body_hdr;
+ input_len = le16_to_cpu(body_hdr->content_len);
+
+ if (HIDSPI_INPUT_BODY_SIZE(input_len) > buf_len) {
+ dev_err_once(qsdev->dev, "Wrong input report length: %u",
+ input_len);
+ return;
+ }
+
+ switch (body_hdr->input_report_type) {
+ case REPORT_DESCRIPTOR_RESPONSE:
+ if (input_len != le16_to_cpu(qsdev->dev_desc.rep_desc_len)) {
+ dev_err_once(qsdev->dev, "Unexpected report descriptor length: %u\n",
+ input_len);
+ return;
+ }
+
+ memcpy(qsdev->report_descriptor, input_body->content, input_len);
+
+ qsdev->report_desc_got = true;
+ wake_up_interruptible(&qsdev->report_desc_got_wq);
+
+ break;
+
+ case COMMAND_RESPONSE:
+ if (body_hdr->content_id == HIDSPI_SET_POWER_CMD_ID) {
+ dev_dbg(qsdev->dev, "Receive set power on response\n");
+ } else {
+ dev_err_once(qsdev->dev, "Unknown command response type: %u\n",
+ body_hdr->content_id);
+ }
+
+ break;
+
+ case RESET_RESPONSE:
+ if (qsdev->state == QUICKSPI_RESETING) {
+ qsdev->reset_ack = true;
+ wake_up_interruptible(&qsdev->reset_ack_wq);
+ dev_dbg(qsdev->dev, "Receive HIR reset response\n");
+ } else {
+ dev_info(qsdev->dev, "Receive DIR\n");
+ }
+ break;
+
+ case GET_FEATURE_RESPONSE:
+ case GET_INPUT_REPORT_RESPONSE:
+ qsdev->report_len = sizeof(body_hdr->content_id) + input_len;
+ input_report = input_body->content - sizeof(body_hdr->content_id);
+
+ memcpy(qsdev->report_buf, input_report, qsdev->report_len);
+
+ qsdev->get_report_cmpl = true;
+ wake_up_interruptible(&qsdev->get_report_cmpl_wq);
+
+ break;
+
+ case SET_FEATURE_RESPONSE:
+ case OUTPUT_REPORT_RESPONSE:
+ qsdev->set_report_cmpl = true;
+ wake_up_interruptible(&qsdev->set_report_cmpl_wq);
+
+ break;
+
+ case DATA:
+ if (qsdev->state != QUICKSPI_ENABLED)
+ return;
+
+ if (input_len > le16_to_cpu(qsdev->dev_desc.max_input_len)) {
+ dev_err_once(qsdev->dev, "Unexpected too large input report length: %u\n",
+ input_len);
+ return;
+ }
+
+ input_len = sizeof(body_hdr->content_id) + input_len;
+ input_report = input_body->content - sizeof(body_hdr->content_id);
+
+ ret = quickspi_hid_send_report(qsdev, input_report, input_len);
+ if (ret)
+ dev_err_once(qsdev->dev, "Failed to send HID input report: %d\n", ret);
+
+ break;
+
+ default:
+ dev_err_once(qsdev->dev, "Unsupported input report type: %u\n",
+ body_hdr->input_report_type);
+ break;
+ }
+}
+
+static int acpi_tic_reset(struct quickspi_device *qsdev)
+{
+ acpi_status status = 0;
+ acpi_handle handle;
+
+ if (!qsdev->acpi_dev)
+ return -ENODEV;
+
+ handle = acpi_device_handle(qsdev->acpi_dev);
+ status = acpi_execute_simple_method(handle, "_RST", 0);
+ if (ACPI_FAILURE(status)) {
+ dev_err_once(qsdev->dev,
+ "Failed to reset device through ACPI method, ret = %d\n", status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int reset_tic(struct quickspi_device *qsdev)
+{
+ u32 actual_read_len, read_len = 0;
+ u32 input_report_len, reset_response, int_cause_val;
+ u8 input_rep_type;
+ int ret;
+
+ qsdev->state = QUICKSPI_RESETING;
+
+ qsdev->reset_ack = false;
+
+ /* First interrupt uses level trigger to avoid missing interrupt */
+ thc_int_trigger_type_select(qsdev->thc_hw, false);
+
+ ret = acpi_tic_reset(qsdev);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ ret = wait_event_interruptible_timeout(qsdev->reset_ack_wq,
+ qsdev->reset_ack,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->reset_ack) {
+ dev_err_once(qsdev->dev, "Wait RESET_RESPONSE timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+
+ int_cause_val = thc_int_cause_read(qsdev->thc_hw);
+ input_report_len = FIELD_GET(HIDSPI_INPUT_HEADER_REPORT_LEN, int_cause_val);
+
+ read_len = input_report_len * sizeof(u32);
+ if (read_len != HIDSPI_INPUT_BODY_SIZE(0)) {
+ dev_err_once(qsdev->dev, "Receive wrong RESET_RESPONSE, len = %u\n",
+ read_len);
+ return -EINVAL;
+ }
+
+ /* Switch to edge trigger matching with HIDSPI protocol definition */
+ thc_int_trigger_type_select(qsdev->thc_hw, true);
+
+ ret = thc_tic_pio_read(qsdev->thc_hw, qsdev->input_report_bdy_addr,
+ read_len, &actual_read_len,
+ (u32 *)&reset_response);
+ if (ret || actual_read_len != read_len) {
+ dev_err_once(qsdev->dev, "Read RESET_RESPONSE body failed, ret = %d\n", ret);
+ dev_err_once(qsdev->dev, "RESET_RESPONSE body expected len = %u, actual = %u\n",
+ read_len, actual_read_len);
+ return ret;
+ }
+
+ input_rep_type = FIELD_GET(HIDSPI_IN_REP_BDY_HDR_REP_TYPE, reset_response);
+
+ if (input_rep_type == RESET_RESPONSE) {
+ dev_dbg(qsdev->dev, "RESET_RESPONSE received\n");
+ } else {
+ dev_err_once(qsdev->dev,
+ "Unexpected input report type: %d, expect RESET_RESPONSE\n",
+ input_rep_type);
+ return -EINVAL;
+ }
+
+ qsdev->state = QUICKSPI_RESETED;
+
+ ret = quickspi_get_device_descriptor(qsdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int quickspi_get_report(struct quickspi_device *qsdev,
+ u8 report_type, unsigned int report_id, void *buf)
+{
+ int rep_type;
+ int ret;
+
+ if (report_type == HID_INPUT_REPORT) {
+ rep_type = GET_INPUT_REPORT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = GET_FEATURE;
+ } else {
+ dev_err_once(qsdev->dev, "Unsupported report type for GET REPORT: %d\n",
+ report_type);
+ return -EINVAL;
+ }
+
+ ret = write_cmd_to_txdma(qsdev, rep_type, report_id, NULL, 0);
+ if (ret) {
+ dev_err_once(qsdev->dev, "Write GET_REPORT command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->get_report_cmpl_wq,
+ qsdev->get_report_cmpl,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->get_report_cmpl) {
+ dev_err_once(qsdev->dev, "Wait Get Report Response timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->get_report_cmpl = false;
+
+ memcpy(buf, qsdev->report_buf, qsdev->report_len);
+
+ return qsdev->report_len;
+}
+
+int quickspi_set_report(struct quickspi_device *qsdev,
+ u8 report_type, unsigned int report_id,
+ void *buf, u32 buf_len)
+{
+ int rep_type;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT) {
+ rep_type = OUTPUT_REPORT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = SET_FEATURE;
+ } else {
+ dev_err_once(qsdev->dev, "Unsupported report type for SET REPORT: %d\n",
+ report_type);
+ return -EINVAL;
+ }
+
+ ret = write_cmd_to_txdma(qsdev, rep_type, report_id, buf + 1, buf_len - 1);
+ if (ret) {
+ dev_err_once(qsdev->dev, "Write SET_REPORT command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->set_report_cmpl_wq,
+ qsdev->set_report_cmpl,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->set_report_cmpl) {
+ dev_err_once(qsdev->dev, "Wait Set Report Response timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->set_report_cmpl = false;
+
+ return buf_len;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h
new file mode 100644
index 000000000000..775e29c1ed13
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKSPI_PROTOCOL_H_
+#define _QUICKSPI_PROTOCOL_H_
+
+#include <linux/hid-over-spi.h>
+
+#define QUICKSPI_ACK_WAIT_TIMEOUT 5
+
+struct quickspi_device;
+
+void quickspi_handle_input_data(struct quickspi_device *qsdev, u32 buf_len);
+int quickspi_get_report(struct quickspi_device *qsdev, u8 report_type,
+ unsigned int report_id, void *buf);
+int quickspi_set_report(struct quickspi_device *qsdev, u8 report_type,
+ unsigned int report_id, void *buf, u32 buf_len);
+int quickspi_get_report_descriptor(struct quickspi_device *qsdev);
+
+int quickspi_set_power(struct quickspi_device *qsdev,
+ enum hidspi_power_state power_state);
+
+int reset_tic(struct quickspi_device *qsdev);
+
+#endif /* _QUICKSPI_PROTOCOL_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
new file mode 100644
index 000000000000..4fc78b5a04b5
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -0,0 +1,1578 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-hw.h"
+
+static int thc_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct thc_device *thc_ctx = context;
+ void __iomem *base = thc_ctx->mmio_addr;
+
+ *val = ioread32(base + reg);
+ return 0;
+}
+
+static int thc_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct thc_device *thc_ctx = context;
+ void __iomem *base = thc_ctx->mmio_addr;
+
+ iowrite32(val, base + reg);
+ return 0;
+}
+
+static const struct regmap_range thc_rw_ranges[] = {
+ regmap_reg_range(0x10, 0x14),
+ regmap_reg_range(0x1000, 0x1320),
+};
+
+static const struct regmap_access_table thc_rw_table = {
+ .yes_ranges = thc_rw_ranges,
+ .n_yes_ranges = ARRAY_SIZE(thc_rw_ranges),
+};
+
+static const struct regmap_config thc_regmap_cfg = {
+ .name = "thc_regmap_common",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1320,
+ .reg_read = thc_regmap_read,
+ .reg_write = thc_regmap_write,
+ .cache_type = REGCACHE_NONE,
+ .fast_io = true,
+ .rd_table = &thc_rw_table,
+ .wr_table = &thc_rw_table,
+ .volatile_table = &thc_rw_table,
+};
+
+/**
+ * thc_clear_state - Clear THC hardware state
+ *
+ * @dev: The pointer of THC device structure
+ */
+static void thc_clear_state(const struct thc_device *dev)
+{
+ u32 val;
+
+ /* Clear interrupt cause register */
+ val = THC_M_PRT_ERR_CAUSE_INVLD_DEV_ENTRY |
+ THC_M_PRT_ERR_CAUSE_FRAME_BABBLE_ERR |
+ THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR |
+ THC_M_PRT_ERR_CAUSE_PRD_ENTRY_ERR;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, val, val);
+
+ /* Clear interrupt error state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS,
+ THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS,
+ THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS);
+
+ val = THC_M_PRT_INT_EN_TXN_ERR_INT_EN |
+ THC_M_PRT_INT_EN_FATAL_ERR_INT_EN |
+ THC_M_PRT_INT_EN_BUF_OVRRUN_ERR_INT_EN;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_EN_OFFSET, val, val);
+
+ val = THC_M_PRT_SW_SEQ_STS_THC_SS_ERR |
+ THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, val, val);
+
+ /* Clear RxDMA state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_EOF, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_EOF, 0);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_2_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS);
+
+ /* Clear TxDMA state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL);
+
+ val = THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ERROR_STS |
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_IOC_STS |
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, val, val);
+
+ /* Reset all DMAs count */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DB_CNT_1_OFFSET,
+ THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT_RST,
+ THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DEVINT_CNT_OFFSET,
+ THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT_RST,
+ THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+
+ /* Reset THC hardware sequence state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRAME_DROP_CNT_1_OFFSET,
+ THC_M_PRT_FRAME_DROP_CNT_1_RFDC,
+ THC_M_PRT_FRAME_DROP_CNT_1_RFDC);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRAME_DROP_CNT_2_OFFSET,
+ THC_M_PRT_FRAME_DROP_CNT_2_RFDC,
+ THC_M_PRT_FRAME_DROP_CNT_2_RFDC);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRM_CNT_1_OFFSET,
+ THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT_RST,
+ THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRM_CNT_2_OFFSET,
+ THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT_RST,
+ THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_RXDMA_PKT_CNT_1_OFFSET,
+ THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT_RST,
+ THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_RXDMA_PKT_CNT_2_OFFSET,
+ THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT_RST,
+ THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SWINT_CNT_1_OFFSET,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SWINT_CNT_1_OFFSET,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TX_FRM_CNT_OFFSET,
+ THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT_RST,
+ THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TXDMA_PKT_CNT_OFFSET,
+ THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT_RST,
+ THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_UFRM_CNT_1_OFFSET,
+ THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT_RST,
+ THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_UFRM_CNT_2_OFFSET,
+ THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT_RST,
+ THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_PRD_EMPTY_CNT_1_OFFSET,
+ THC_M_PRT_PRD_EMPTY_CNT_1_RPTEC,
+ THC_M_PRT_PRD_EMPTY_CNT_1_RPTEC);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_PRD_EMPTY_CNT_2_OFFSET,
+ THC_M_PRT_PRD_EMPTY_CNT_2_RPTEC,
+ THC_M_PRT_PRD_EMPTY_CNT_2_RPTEC);
+}
+
+/**
+ * thc_dev_init - Allocate and initialize the THC device structure
+ *
+ * @device: The pointer of device structure
+ * @mem_addr: The pointer of MMIO memory address
+ *
+ * Return: The thc_device pointer on success, NULL on failed.
+ */
+struct thc_device *thc_dev_init(struct device *device, void __iomem *mem_addr)
+{
+ struct thc_device *thc_dev;
+ int ret;
+
+ thc_dev = devm_kzalloc(device, sizeof(*thc_dev), GFP_KERNEL);
+ if (!thc_dev)
+ return ERR_PTR(-ENOMEM);
+
+ thc_dev->dev = device;
+ thc_dev->mmio_addr = mem_addr;
+ thc_dev->thc_regmap = devm_regmap_init(device, NULL, thc_dev, &thc_regmap_cfg);
+ if (IS_ERR(thc_dev->thc_regmap)) {
+ ret = PTR_ERR(thc_dev->thc_regmap);
+ dev_err_once(device, "Failed to init thc_regmap: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ thc_clear_state(thc_dev);
+
+ mutex_init(&thc_dev->thc_bus_lock);
+ init_waitqueue_head(&thc_dev->write_complete_wait);
+ init_waitqueue_head(&thc_dev->swdma_complete_wait);
+
+ thc_dev->dma_ctx = thc_dma_init(thc_dev);
+ if (!thc_dev->dma_ctx) {
+ dev_err_once(device, "DMA context init failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return thc_dev;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dev_init, "INTEL_THC");
+
+static int prepare_pio(const struct thc_device *dev, const u8 pio_op,
+ const u32 address, const u32 size)
+{
+ u32 sts, ctrl, addr, mask;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &sts);
+
+ /* Check if THC previous PIO still in progress */
+ if (sts & THC_M_PRT_SW_SEQ_STS_THC_SS_CIP) {
+ dev_err_once(dev->dev, "THC PIO is still busy!\n");
+ return -EBUSY;
+ }
+
+ /* Clear error bit and complete bit in state register */
+ sts |= THC_M_PRT_SW_SEQ_STS_THC_SS_ERR |
+ THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, sts);
+
+ /* Set PIO data size, opcode and interrupt capability */
+ ctrl = FIELD_PREP(THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC, size) |
+ FIELD_PREP(THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CMD, pio_op);
+ if (dev->pio_int_supported)
+ ctrl |= THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE;
+
+ mask = THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC |
+ THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CMD |
+ THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_CNTRL_OFFSET, mask, ctrl);
+
+ /* Set PIO target address */
+ addr = FIELD_PREP(THC_M_PRT_SW_SEQ_DATA0_ADDR_THC_SW_SEQ_DATA0_ADDR, address);
+ mask = THC_M_PRT_SW_SEQ_DATA0_ADDR_THC_SW_SEQ_DATA0_ADDR;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, mask, addr);
+ return 0;
+}
+
+static void pio_start(const struct thc_device *dev,
+ u32 size_in_bytes, const u32 *buffer)
+{
+ if (size_in_bytes && buffer)
+ regmap_bulk_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET,
+ buffer, size_in_bytes / sizeof(u32));
+
+ /* Enable Start bit */
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_CNTRL_OFFSET,
+ THC_M_PRT_SW_SEQ_CNTRL_TSSGO,
+ THC_M_PRT_SW_SEQ_CNTRL_TSSGO);
+}
+
+static int pio_complete(const struct thc_device *dev,
+ u32 *buffer, u32 *size)
+{
+ u32 sts, ctrl;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &sts);
+ if (sts & THC_M_PRT_SW_SEQ_STS_THC_SS_ERR) {
+ dev_err_once(dev->dev, "PIO operation error\n");
+ return -EBUSY;
+ }
+
+ if (buffer && size) {
+ regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_CNTRL_OFFSET, &ctrl);
+ *size = FIELD_GET(THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC, ctrl);
+
+ regmap_bulk_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET,
+ buffer, *size / sizeof(u32));
+ }
+
+ sts |= THC_M_PRT_SW_SEQ_STS_THC_SS_ERR | THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, sts);
+ return 0;
+}
+
+static int pio_wait(const struct thc_device *dev)
+{
+ u32 sts = 0;
+ int ret;
+
+ ret = regmap_read_poll_timeout(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, sts,
+ !(sts & THC_M_PRT_SW_SEQ_STS_THC_SS_CIP ||
+ !(sts & THC_M_PRT_SW_SEQ_STS_TSSDONE)),
+ THC_REGMAP_POLLING_INTERVAL_US, THC_PIO_DONE_TIMEOUT_US);
+ if (ret)
+ dev_err_once(dev->dev, "Timeout while polling PIO operation done\n");
+
+ return ret;
+}
+
+/**
+ * thc_tic_pio_read - Read data from touch device by PIO
+ *
+ * @dev: The pointer of THC private device context
+ * @address: Slave address for the PIO operation
+ * @size: Expected read data size
+ * @actual_size: The pointer of the actual data size read from touch device
+ * @buffer: The pointer of data buffer to store the data read from touch device
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_tic_pio_read(struct thc_device *dev, const u32 address,
+ const u32 size, u32 *actual_size, u32 *buffer)
+{
+ u8 opcode;
+ int ret;
+
+ if (size <= 0 || !actual_size || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %u, actual_size %p, buffer %p\n",
+ size, actual_size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ opcode = (dev->port_type == THC_PORT_TYPE_SPI) ?
+ THC_PIO_OP_SPI_TIC_READ : THC_PIO_OP_I2C_TIC_READ;
+
+ ret = prepare_pio(dev, opcode, address, size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, 0, NULL);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, buffer, actual_size);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_tic_pio_read, "INTEL_THC");
+
+/**
+ * thc_tic_pio_write - Write data to touch device by PIO
+ *
+ * @dev: The pointer of THC private device context
+ * @address: Slave address for the PIO operation
+ * @size: PIO write data size
+ * @buffer: The pointer of the write data buffer
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_tic_pio_write(struct thc_device *dev, const u32 address,
+ const u32 size, const u32 *buffer)
+{
+ u8 opcode;
+ int ret;
+
+ if (size <= 0 || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %u, buffer %p\n",
+ size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ opcode = (dev->port_type == THC_PORT_TYPE_SPI) ?
+ THC_PIO_OP_SPI_TIC_WRITE : THC_PIO_OP_I2C_TIC_WRITE;
+
+ ret = prepare_pio(dev, opcode, address, size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, size, buffer);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, NULL, NULL);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_tic_pio_write, "INTEL_THC");
+
+/**
+ * thc_tic_pio_write_and_read - Write data followed by read data by PIO
+ *
+ * @dev: The pointer of THC private device context
+ * @address: Slave address for the PIO operation
+ * @write_size: PIO write data size
+ * @write_buffer: The pointer of the write data buffer
+ * @read_size: Expected PIO read data size
+ * @actual_size: The pointer of the actual read data size
+ * @read_buffer: The pointer of PIO read data buffer
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_tic_pio_write_and_read(struct thc_device *dev, const u32 address,
+ const u32 write_size, const u32 *write_buffer,
+ const u32 read_size, u32 *actual_size, u32 *read_buffer)
+{
+ u32 i2c_ctrl, mask;
+ int ret;
+
+ if (dev->port_type == THC_PORT_TYPE_SPI) {
+ dev_err(dev->dev, "SPI port type doesn't support pio write and read!");
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ /* Config i2c PIO write and read sequence */
+ i2c_ctrl = FIELD_PREP(THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_PIO_I2C_WBC, write_size);
+ mask = THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_PIO_I2C_WBC;
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_OFFSET,
+ mask, i2c_ctrl);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_OFFSET,
+ THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_I2C_RW_PIO_EN,
+ THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_I2C_RW_PIO_EN);
+
+ ret = prepare_pio(dev, THC_PIO_OP_I2C_TIC_WRITE_AND_READ, address, read_size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, write_size, write_buffer);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, read_buffer, actual_size);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_tic_pio_write_and_read, "INTEL_THC");
+
+/**
+ * thc_interrupt_config - Configure THC interrupts
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_interrupt_config(struct thc_device *dev)
+{
+ u32 mbits, mask, r_dma_ctrl_1;
+
+ /* Clear Error reporting interrupt status bits */
+ mbits = THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS |
+ THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_INT_STATUS_OFFSET,
+ mbits, mbits);
+
+ /* Enable Error Reporting Interrupts */
+ mbits = THC_M_PRT_INT_EN_TXN_ERR_INT_EN |
+ THC_M_PRT_INT_EN_FATAL_ERR_INT_EN |
+ THC_M_PRT_INT_EN_BUF_OVRRUN_ERR_INT_EN;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_INT_EN_OFFSET,
+ mbits, mbits);
+
+ /* Clear PIO Interrupt status bits */
+ mbits = THC_M_PRT_SW_SEQ_STS_THC_SS_ERR |
+ THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_STS_OFFSET,
+ mbits, mbits);
+
+ /* Read Interrupts */
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ &r_dma_ctrl_1);
+ /* Disable RxDMA1 */
+ r_dma_ctrl_1 &= ~THC_M_PRT_READ_DMA_CNTRL_IE_EOF;
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ r_dma_ctrl_1);
+
+ /* Ack EOF Interrupt RxDMA1 */
+ mbits = THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS;
+ /* Ack NonDMA Interrupt */
+ mbits |= THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ mbits, mbits);
+
+ /* Ack EOF Interrupt RxDMA2 */
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_2_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS);
+
+ /* Write Interrupts */
+ /* Disable TxDMA */
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL,
+ 0);
+
+ /* Clear TxDMA interrupt status bits */
+ mbits = THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ERROR_STS;
+ mbits |= THC_M_PRT_WRITE_INT_STS_THC_WRDMA_IOC_STS;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_WRITE_INT_STS_OFFSET,
+ mbits, mbits);
+
+ /* Enable Non-DMA device inband interrupt */
+ r_dma_ctrl_1 |= THC_M_PRT_READ_DMA_CNTRL_IE_NDDI;
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ r_dma_ctrl_1);
+
+ if (dev->port_type == THC_PORT_TYPE_SPI) {
+ /* Edge triggered interrupt */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TSEQ_CNTRL_1_OFFSET,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN);
+ } else {
+ /* Level triggered interrupt */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TSEQ_CNTRL_1_OFFSET,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN, 0);
+
+ mbits = THC_M_PRT_INT_EN_THC_I2C_IC_MST_ON_HOLD_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_SCL_STUCK_AT_LOW_DET_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_TX_ABRT_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_TX_OVER_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_RX_FULL_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_RX_OVER_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_RX_UNDER_INT_EN;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_EN_OFFSET,
+ mbits, mbits);
+ }
+
+ thc_set_pio_interrupt_support(dev, false);
+
+ /* HIDSPI specific settings */
+ if (dev->port_type == THC_PORT_TYPE_SPI) {
+ mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_OFFSET,
+ THC_BIT_OFFSET_INTERRUPT_TYPE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_LEN,
+ THC_BIT_LENGTH_INTERRUPT_TYPE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_EOF_OFFSET,
+ THC_BIT_OFFSET_LAST_FRAGMENT_FLAG) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL,
+ THC_BITMASK_INVALID_TYPE_DATA);
+ mask = THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_OFFSET |
+ THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_LEN |
+ THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_EOF_OFFSET |
+ THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DEVINT_CFG_1_OFFSET,
+ mask, mbits);
+
+ mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_OFFSET,
+ THC_BIT_OFFSET_MICROFRAME_SIZE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_LEN,
+ THC_BIT_LENGTH_MICROFRAME_SIZE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_UNIT,
+ THC_UNIT_MICROFRAME_SIZE) |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_IGNORE |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_VAL;
+ mask = THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_OFFSET |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_LEN |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_UNIT |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_IGNORE |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_VAL;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DEVINT_CFG_2_OFFSET,
+ mask, mbits);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_config, "INTEL_THC");
+
+/**
+ * thc_int_trigger_type_select - Select THC interrupt trigger type
+ *
+ * @dev: the pointer of THC private device context
+ * @edge_trigger: determine the interrupt is edge triggered or level triggered
+ */
+void thc_int_trigger_type_select(struct thc_device *dev, bool edge_trigger)
+{
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TSEQ_CNTRL_1_OFFSET,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN,
+ edge_trigger ? THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN : 0);
+}
+EXPORT_SYMBOL_NS_GPL(thc_int_trigger_type_select, "INTEL_THC");
+
+/**
+ * thc_interrupt_enable - Enable or disable THC interrupt
+ *
+ * @dev: the pointer of THC private device context
+ * @int_enable: the flag to control THC interrupt enable or disable
+ */
+void thc_interrupt_enable(struct thc_device *dev, bool int_enable)
+{
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_EN_OFFSET,
+ THC_M_PRT_INT_EN_GBL_INT_EN,
+ int_enable ? THC_M_PRT_INT_EN_GBL_INT_EN : 0);
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_enable, "INTEL_THC");
+
+/**
+ * thc_interrupt_quiesce - Quiesce or unquiesce external touch device interrupt
+ *
+ * @dev: the pointer of THC private device context
+ * @int_quiesce: the flag to determine quiesce or unquiesce device interrupt
+ *
+ * Return: 0 on success, other error codes on failed
+ */
+int thc_interrupt_quiesce(const struct thc_device *dev, bool int_quiesce)
+{
+ u32 ctrl;
+ int ret;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, &ctrl);
+ if (!(ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN) && !int_quiesce) {
+ dev_warn(dev->dev, "THC interrupt already unquiesce\n");
+ return 0;
+ }
+
+ if ((ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN) && int_quiesce) {
+ dev_warn(dev->dev, "THC interrupt already quiesce\n");
+ return 0;
+ }
+
+ /* Quiesce device interrupt - Set quiesce bit and waiting for THC HW to ACK */
+ if (int_quiesce)
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET,
+ THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN,
+ THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN);
+
+ ret = regmap_read_poll_timeout(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, ctrl,
+ ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS,
+ THC_REGMAP_POLLING_INTERVAL_US, THC_QUIESCE_EN_TIMEOUT_US);
+ if (ret) {
+ dev_err_once(dev->dev,
+ "Timeout while waiting THC idle, target quiesce state = %s\n",
+ int_quiesce ? "true" : "false");
+ return ret;
+ }
+
+ /* Unquiesce device interrupt - Clear the quiesce bit */
+ if (!int_quiesce)
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET,
+ THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_quiesce, "INTEL_THC");
+
+/**
+ * thc_set_pio_interrupt_support - Determine PIO interrupt is supported or not
+ *
+ * @dev: The pointer of THC private device context
+ * @supported: The flag to determine enabling PIO interrupt or not
+ */
+void thc_set_pio_interrupt_support(struct thc_device *dev, bool supported)
+{
+ dev->pio_int_supported = supported;
+}
+EXPORT_SYMBOL_NS_GPL(thc_set_pio_interrupt_support, "INTEL_THC");
+
+/**
+ * thc_ltr_config - Configure THC Latency Tolerance Reporting(LTR) settings
+ *
+ * @dev: The pointer of THC private device context
+ * @active_ltr_us: active LTR value, unit is us
+ * @lp_ltr_us: low power LTR value, unit is us
+ */
+void thc_ltr_config(struct thc_device *dev, u32 active_ltr_us, u32 lp_ltr_us)
+{
+ u32 active_ltr_scale, lp_ltr_scale, ltr_ctrl, ltr_mask, orig, tmp;
+
+ if (active_ltr_us >= THC_LTR_MIN_VAL_SCALE_3 &&
+ active_ltr_us < THC_LTR_MAX_VAL_SCALE_3) {
+ active_ltr_scale = THC_LTR_SCALE_3;
+ active_ltr_us = active_ltr_us >> 5;
+ } else if (active_ltr_us >= THC_LTR_MIN_VAL_SCALE_4 &&
+ active_ltr_us < THC_LTR_MAX_VAL_SCALE_4) {
+ active_ltr_scale = THC_LTR_SCALE_4;
+ active_ltr_us = active_ltr_us >> 10;
+ } else if (active_ltr_us >= THC_LTR_MIN_VAL_SCALE_5 &&
+ active_ltr_us < THC_LTR_MAX_VAL_SCALE_5) {
+ active_ltr_scale = THC_LTR_SCALE_5;
+ active_ltr_us = active_ltr_us >> 15;
+ } else {
+ active_ltr_scale = THC_LTR_SCALE_2;
+ }
+
+ if (lp_ltr_us >= THC_LTR_MIN_VAL_SCALE_3 &&
+ lp_ltr_us < THC_LTR_MAX_VAL_SCALE_3) {
+ lp_ltr_scale = THC_LTR_SCALE_3;
+ lp_ltr_us = lp_ltr_us >> 5;
+ } else if (lp_ltr_us >= THC_LTR_MIN_VAL_SCALE_4 &&
+ lp_ltr_us < THC_LTR_MAX_VAL_SCALE_4) {
+ lp_ltr_scale = THC_LTR_SCALE_4;
+ lp_ltr_us = lp_ltr_us >> 10;
+ } else if (lp_ltr_us >= THC_LTR_MIN_VAL_SCALE_5 &&
+ lp_ltr_us < THC_LTR_MAX_VAL_SCALE_5) {
+ lp_ltr_scale = THC_LTR_SCALE_5;
+ lp_ltr_us = lp_ltr_us >> 15;
+ } else {
+ lp_ltr_scale = THC_LTR_SCALE_2;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, &orig);
+ ltr_ctrl = FIELD_PREP(THC_M_CMN_LTR_CTRL_ACT_LTR_VAL, active_ltr_us) |
+ FIELD_PREP(THC_M_CMN_LTR_CTRL_ACT_LTR_SCALE, active_ltr_scale) |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN |
+ FIELD_PREP(THC_M_CMN_LTR_CTRL_LP_LTR_VAL, lp_ltr_us) |
+ FIELD_PREP(THC_M_CMN_LTR_CTRL_LP_LTR_SCALE, lp_ltr_scale) |
+ THC_M_CMN_LTR_CTRL_LP_LTR_REQ;
+
+ ltr_mask = THC_M_CMN_LTR_CTRL_ACT_LTR_VAL |
+ THC_M_CMN_LTR_CTRL_ACT_LTR_SCALE |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN |
+ THC_M_CMN_LTR_CTRL_LP_LTR_VAL |
+ THC_M_CMN_LTR_CTRL_LP_LTR_SCALE |
+ THC_M_CMN_LTR_CTRL_LP_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN;
+
+ tmp = orig & ~ltr_mask;
+ tmp |= ltr_ctrl & ltr_mask;
+
+ regmap_write(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, tmp);
+}
+EXPORT_SYMBOL_NS_GPL(thc_ltr_config, "INTEL_THC");
+
+/**
+ * thc_change_ltr_mode - Change THC LTR mode
+ *
+ * @dev: The pointer of THC private device context
+ * @ltr_mode: LTR mode(active or low power)
+ */
+void thc_change_ltr_mode(struct thc_device *dev, u32 ltr_mode)
+{
+ if (ltr_mode == THC_LTR_MODE_ACTIVE) {
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN,
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN);
+ return;
+ }
+
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN,
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN);
+}
+EXPORT_SYMBOL_NS_GPL(thc_change_ltr_mode, "INTEL_THC");
+
+/**
+ * thc_ltr_unconfig - Unconfigure THC Latency Tolerance Reporting(LTR) settings
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_ltr_unconfig(struct thc_device *dev)
+{
+ u32 ltr_ctrl, bits_clear;
+
+ regmap_read(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, &ltr_ctrl);
+ bits_clear = THC_M_CMN_LTR_CTRL_LP_LTR_EN |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN |
+ THC_M_CMN_LTR_CTRL_LP_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ;
+
+ ltr_ctrl &= ~bits_clear;
+
+ regmap_write(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, ltr_ctrl);
+}
+EXPORT_SYMBOL_NS_GPL(thc_ltr_unconfig, "INTEL_THC");
+
+/**
+ * thc_int_cause_read - Read interrupt cause register value
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: The interrupt cause register value
+ */
+u32 thc_int_cause_read(struct thc_device *dev)
+{
+ u32 int_cause;
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_DEV_INT_CAUSE_REG_VAL_OFFSET, &int_cause);
+
+ return int_cause;
+}
+EXPORT_SYMBOL_NS_GPL(thc_int_cause_read, "INTEL_THC");
+
+static void thc_print_txn_error_cause(const struct thc_device *dev)
+{
+ bool known_error = false;
+ u32 cause = 0;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, &cause);
+
+ if (cause & THC_M_PRT_ERR_CAUSE_PRD_ENTRY_ERR) {
+ dev_err(dev->dev, "TXN Error: Invalid PRD Entry\n");
+ known_error = true;
+ }
+ if (cause & THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR) {
+ dev_err(dev->dev, "TXN Error: THC Buffer Overrun\n");
+ known_error = true;
+ }
+ if (cause & THC_M_PRT_ERR_CAUSE_FRAME_BABBLE_ERR) {
+ dev_err(dev->dev, "TXN Error: Frame Babble\n");
+ known_error = true;
+ }
+ if (cause & THC_M_PRT_ERR_CAUSE_INVLD_DEV_ENTRY) {
+ dev_err(dev->dev, "TXN Error: Invalid Device Register Setting\n");
+ known_error = true;
+ }
+
+ /* Clear interrupt status bits */
+ regmap_write(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, cause);
+
+ if (!known_error)
+ dev_err(dev->dev, "TXN Error does not match any known value: 0x%X\n",
+ cause);
+}
+
+/**
+ * thc_interrupt_handler - Handle THC interrupts
+ *
+ * THC interrupts include several types: external touch device (TIC) non-DMA
+ * interrupts, PIO completion interrupts, DMA interrtups, I2C subIP raw
+ * interrupts and error interrupts.
+ *
+ * This is a help function for interrupt processing, it detects interrupt
+ * type, clear the interrupt status bit and return the interrupt type to caller
+ * for future processing.
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: The combined flag for interrupt type
+ */
+int thc_interrupt_handler(struct thc_device *dev)
+{
+ u32 read_sts_1, read_sts_2, read_sts_sw, write_sts;
+ u32 int_sts, err_cause, seq_cntrl, seq_sts;
+ int interrupt_type = 0;
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_1_OFFSET, &read_sts_1);
+
+ if (read_sts_1 & THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS) {
+ dev_dbg(dev->dev, "THC non-DMA device interrupt\n");
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ NONDMA_INT_STS_BIT);
+
+ interrupt_type |= BIT(THC_NONDMA_INT);
+
+ return interrupt_type;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET, &int_sts);
+
+ if (int_sts & THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS) {
+ dev_err(dev->dev, "THC transaction error, int_sts: 0x%08X\n", int_sts);
+ thc_print_txn_error_cause(dev);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ TXN_ERR_INT_STS_BIT);
+
+ interrupt_type |= BIT(THC_TXN_ERR_INT);
+
+ return interrupt_type;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, &err_cause);
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_2_OFFSET, &read_sts_2);
+
+ if (err_cause & THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR ||
+ read_sts_1 & THC_M_PRT_READ_DMA_INT_STS_STALL_STS ||
+ read_sts_2 & THC_M_PRT_READ_DMA_INT_STS_STALL_STS) {
+ dev_err(dev->dev, "Buffer overrun or RxDMA engine stalled!\n");
+ thc_print_txn_error_cause(dev);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_2_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_STALL_STS);
+ regmap_write(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_STALL_STS);
+ regmap_write(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET,
+ THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR);
+
+ interrupt_type |= BIT(THC_TXN_ERR_INT);
+
+ return interrupt_type;
+ }
+
+ if (int_sts & THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS) {
+ dev_err_once(dev->dev, "THC FATAL error, int_sts: 0x%08X\n", int_sts);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ TXN_FATAL_INT_STS_BIT);
+
+ interrupt_type |= BIT(THC_FATAL_ERR_INT);
+
+ return interrupt_type;
+ }
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_CNTRL_OFFSET, &seq_cntrl);
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_STS_OFFSET, &seq_sts);
+
+ if (seq_cntrl & THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE &&
+ seq_sts & THC_M_PRT_SW_SEQ_STS_TSSDONE) {
+ dev_dbg(dev->dev, "THC_SS_CD_IE and TSSDONE are set\n");
+ interrupt_type |= BIT(THC_PIO_DONE_INT);
+ }
+
+ if (read_sts_1 & THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS) {
+ dev_dbg(dev->dev, "Got RxDMA1 Read Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_1_OFFSET, read_sts_1);
+
+ interrupt_type |= BIT(THC_RXDMA1_INT);
+ }
+
+ if (read_sts_2 & THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS) {
+ dev_dbg(dev->dev, "Got RxDMA2 Read Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_2_OFFSET, read_sts_2);
+
+ interrupt_type |= BIT(THC_RXDMA2_INT);
+ }
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET, &read_sts_sw);
+
+ if (read_sts_sw & THC_M_PRT_READ_DMA_INT_STS_DMACPL_STS) {
+ dev_dbg(dev->dev, "Got SwDMA Read Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET, read_sts_sw);
+
+ dev->swdma_done = true;
+ wake_up_interruptible(&dev->swdma_complete_wait);
+
+ interrupt_type |= BIT(THC_SWDMA_INT);
+ }
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_WRITE_INT_STS_OFFSET, &write_sts);
+
+ if (write_sts & THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS) {
+ dev_dbg(dev->dev, "Got TxDMA Write complete Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_WRITE_INT_STS_OFFSET, write_sts);
+
+ dev->write_done = true;
+ wake_up_interruptible(&dev->write_complete_wait);
+
+ interrupt_type |= BIT(THC_TXDMA_INT);
+ }
+
+ if (int_sts & THC_M_PRT_INT_STATUS_DEV_RAW_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_DEV_RAW_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_UNDER_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_UNDER_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_OVER_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_OVER_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_FULL_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_FULL_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_OVER_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_OVER_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_EMPTY_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_EMPTY_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_ABRT_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_ABRT_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_ACTIVITY_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_ACTIVITY_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_SCL_STUCK_AT_LOW_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_SCL_STUCK_AT_LOW_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_STOP_DET_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_STOP_DET_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_START_DET_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_START_DET_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_MST_ON_HOLD_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_MST_ON_HOLD_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+
+ if (!interrupt_type)
+ interrupt_type |= BIT(THC_UNKNOWN_INT);
+
+ return interrupt_type;
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_handler, "INTEL_THC");
+
+/**
+ * thc_port_select - Set THC port type
+ *
+ * @dev: The pointer of THC private device context
+ * @port_type: THC port type to use for current device
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_port_select(struct thc_device *dev, enum thc_port_type port_type)
+{
+ u32 ctrl, mask;
+
+ if (port_type == THC_PORT_TYPE_SPI) {
+ dev_dbg(dev->dev, "Set THC port type to SPI\n");
+ dev->port_type = THC_PORT_TYPE_SPI;
+
+ /* Enable delay of CS assertion and set to default value */
+ ctrl = THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_EN |
+ FIELD_PREP(THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_VAL,
+ THC_CSA_CK_DELAY_VAL_DEFAULT);
+ mask = THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_EN |
+ THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_VAL;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SPI_DUTYC_CFG_OFFSET,
+ mask, ctrl);
+ } else if (port_type == THC_PORT_TYPE_I2C) {
+ dev_dbg(dev->dev, "Set THC port type to I2C\n");
+ dev->port_type = THC_PORT_TYPE_I2C;
+
+ /* Set THC transition arbitration policy to frame boundary for I2C */
+ ctrl = FIELD_PREP(THC_M_PRT_CONTROL_THC_ARB_POLICY,
+ THC_ARB_POLICY_FRAME_BOUNDARY);
+ mask = THC_M_PRT_CONTROL_THC_ARB_POLICY;
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, mask, ctrl);
+ } else {
+ dev_err(dev->dev, "unsupported THC port type: %d\n", port_type);
+ return -EINVAL;
+ }
+
+ ctrl = FIELD_PREP(THC_M_PRT_CONTROL_PORT_TYPE, port_type);
+ mask = THC_M_PRT_CONTROL_PORT_TYPE;
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, mask, ctrl);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_port_select, "INTEL_THC");
+
+#define THC_SPI_FREQUENCY_7M 7812500
+#define THC_SPI_FREQUENCY_15M 15625000
+#define THC_SPI_FREQUENCY_17M 17857100
+#define THC_SPI_FREQUENCY_20M 20833000
+#define THC_SPI_FREQUENCY_25M 25000000
+#define THC_SPI_FREQUENCY_31M 31250000
+#define THC_SPI_FREQUENCY_41M 41666700
+
+#define THC_SPI_LOW_FREQUENCY THC_SPI_FREQUENCY_17M
+
+static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
+{
+ int frequency[] = {
+ THC_SPI_FREQUENCY_7M,
+ THC_SPI_FREQUENCY_15M,
+ THC_SPI_FREQUENCY_17M,
+ THC_SPI_FREQUENCY_20M,
+ THC_SPI_FREQUENCY_25M,
+ THC_SPI_FREQUENCY_31M,
+ THC_SPI_FREQUENCY_41M,
+ };
+ u8 frequency_div[] = {
+ THC_SPI_FRQ_DIV_2,
+ THC_SPI_FRQ_DIV_1,
+ THC_SPI_FRQ_DIV_7,
+ THC_SPI_FRQ_DIV_6,
+ THC_SPI_FRQ_DIV_5,
+ THC_SPI_FRQ_DIV_4,
+ THC_SPI_FRQ_DIV_3,
+ };
+ int size = ARRAY_SIZE(frequency);
+ u32 closest_freq;
+ u8 freq_div;
+ int i;
+
+ for (i = size - 1; i >= 0; i--)
+ if ((int)spi_freq_val - frequency[i] >= 0)
+ break;
+
+ if (i < 0) {
+ dev_err_once(dev->dev, "Not supported SPI frequency %d\n", spi_freq_val);
+ return THC_SPI_FRQ_RESERVED;
+ }
+
+ closest_freq = frequency[i];
+ freq_div = frequency_div[i];
+
+ dev_dbg(dev->dev,
+ "Setting SPI frequency: spi_freq_val = %u, Closest freq = %u\n",
+ spi_freq_val, closest_freq);
+
+ return freq_div;
+}
+
+/**
+ * thc_spi_read_config - Configure SPI bus read attributes
+ *
+ * @dev: The pointer of THC private device context
+ * @spi_freq_val: SPI read frequecy value
+ * @io_mode: SPI read IO mode
+ * @opcode: Read opcode
+ * @spi_rd_mps: SPI read max packet size
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_spi_read_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_rd_mps)
+{
+ bool is_low_freq = false;
+ u32 cfg, mask;
+ u8 freq_div;
+
+ freq_div = thc_get_spi_freq_div_val(dev, spi_freq_val);
+ if (freq_div == THC_SPI_FRQ_RESERVED)
+ return -EINVAL;
+
+ if (spi_freq_val < THC_SPI_LOW_FREQUENCY)
+ is_low_freq = true;
+
+ cfg = FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TCRF, freq_div) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TRMODE, io_mode) |
+ (is_low_freq ? THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN : 0) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_RD_MPS, spi_rd_mps);
+ mask = THC_M_PRT_SPI_CFG_SPI_TCRF |
+ THC_M_PRT_SPI_CFG_SPI_TRMODE |
+ THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN |
+ THC_M_PRT_SPI_CFG_SPI_RD_MPS;
+
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SPI_CFG_OFFSET, mask, cfg);
+
+ if (io_mode == THC_QUAD_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO, opcode);
+ else if (io_mode == THC_DUAL_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO, opcode);
+ else
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_SIO, opcode);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET, opcode);
+ regmap_write(dev->thc_regmap, THC_M_PRT_SPI_DMARD_OPCODE_OFFSET, opcode);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_spi_read_config, "INTEL_THC");
+
+/**
+ * thc_spi_write_config - Configure SPI bus write attributes
+ *
+ * @dev: The pointer of THC private device context
+ * @spi_freq_val: SPI write frequecy value
+ * @io_mode: SPI write IO mode
+ * @opcode: Write opcode
+ * @spi_wr_mps: SPI write max packet size
+ * @perf_limit: Performance limitation in unit of 10us
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_spi_write_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_wr_mps,
+ u32 perf_limit)
+{
+ bool is_low_freq = false;
+ u32 cfg, mask;
+ u8 freq_div;
+
+ freq_div = thc_get_spi_freq_div_val(dev, spi_freq_val);
+ if (freq_div == THC_SPI_FRQ_RESERVED)
+ return -EINVAL;
+
+ if (spi_freq_val < THC_SPI_LOW_FREQUENCY)
+ is_low_freq = true;
+
+ cfg = FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TCWF, freq_div) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TWMODE, io_mode) |
+ (is_low_freq ? THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN : 0) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_WR_MPS, spi_wr_mps);
+ mask = THC_M_PRT_SPI_CFG_SPI_TCWF |
+ THC_M_PRT_SPI_CFG_SPI_TWMODE |
+ THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN |
+ THC_M_PRT_SPI_CFG_SPI_WR_MPS;
+
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SPI_CFG_OFFSET, mask, cfg);
+
+ if (io_mode == THC_QUAD_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO, opcode);
+ else if (io_mode == THC_DUAL_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO, opcode);
+ else
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_SIO, opcode);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SPI_WR_OPCODE_OFFSET, opcode);
+
+ dev->perf_limit = perf_limit;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_spi_write_config, "INTEL_THC");
+
+/**
+ * thc_spi_input_output_address_config - Configure SPI input and output addresses
+ *
+ * @dev: the pointer of THC private device context
+ * @input_hdr_addr: input report header address
+ * @input_bdy_addr: input report body address
+ * @output_addr: output report address
+ */
+void thc_spi_input_output_address_config(struct thc_device *dev, u32 input_hdr_addr,
+ u32 input_bdy_addr, u32 output_addr)
+{
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_DEV_INT_CAUSE_ADDR_OFFSET, input_hdr_addr);
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_RD_BULK_ADDR_1_OFFSET, input_bdy_addr);
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_RD_BULK_ADDR_2_OFFSET, input_bdy_addr);
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_WR_BULK_ADDR_OFFSET, output_addr);
+}
+EXPORT_SYMBOL_NS_GPL(thc_spi_input_output_address_config, "INTEL_THC");
+
+static int thc_i2c_subip_pio_read(struct thc_device *dev, const u32 address,
+ u32 *size, u32 *buffer)
+{
+ int ret;
+
+ if (!size || *size == 0 || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %p, buffer %p\n",
+ size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ ret = prepare_pio(dev, THC_PIO_OP_I2C_SUBSYSTEM_READ, address, *size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, 0, NULL);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, buffer, size);
+ if (ret < 0)
+ goto end;
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+
+ if (ret)
+ dev_err_once(dev->dev, "Read THC I2C SubIP register failed %d, offset %u\n",
+ ret, address);
+
+ return ret;
+}
+
+static int thc_i2c_subip_pio_write(struct thc_device *dev, const u32 address,
+ const u32 size, const u32 *buffer)
+{
+ int ret;
+
+ if (size == 0 || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %u, buffer %p\n",
+ size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ ret = prepare_pio(dev, THC_PIO_OP_I2C_SUBSYSTEM_WRITE, address, size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, size, buffer);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, NULL, NULL);
+ if (ret < 0)
+ goto end;
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+
+ if (ret)
+ dev_err_once(dev->dev, "Write THC I2C SubIP register failed %d, offset %u\n",
+ ret, address);
+
+ return ret;
+}
+
+#define I2C_SUBIP_CON_DEFAULT 0x663
+#define I2C_SUBIP_INT_MASK_DEFAULT 0x7FFF
+#define I2C_SUBIP_RX_TL_DEFAULT 62
+#define I2C_SUBIP_TX_TL_DEFAULT 0
+#define I2C_SUBIP_DMA_TDLR_DEFAULT 7
+#define I2C_SUBIP_DMA_RDLR_DEFAULT 7
+
+static int thc_i2c_subip_set_speed(struct thc_device *dev, const u32 speed,
+ const u32 hcnt, const u32 lcnt)
+{
+ u32 hcnt_offset, lcnt_offset;
+ u32 val;
+ int ret;
+
+ switch (speed) {
+ case THC_I2C_STANDARD:
+ hcnt_offset = THC_I2C_IC_SS_SCL_HCNT_OFFSET;
+ lcnt_offset = THC_I2C_IC_SS_SCL_LCNT_OFFSET;
+ break;
+
+ case THC_I2C_FAST_AND_PLUS:
+ hcnt_offset = THC_I2C_IC_FS_SCL_HCNT_OFFSET;
+ lcnt_offset = THC_I2C_IC_FS_SCL_LCNT_OFFSET;
+ break;
+
+ case THC_I2C_HIGH_SPEED:
+ hcnt_offset = THC_I2C_IC_HS_SCL_HCNT_OFFSET;
+ lcnt_offset = THC_I2C_IC_HS_SCL_LCNT_OFFSET;
+ break;
+
+ default:
+ dev_err_once(dev->dev, "Unsupported i2c speed %d\n", speed);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = thc_i2c_subip_pio_write(dev, hcnt_offset, sizeof(u32), &hcnt);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_pio_write(dev, lcnt_offset, sizeof(u32), &lcnt);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_CON_DEFAULT & ~THC_I2C_IC_CON_SPEED;
+ val |= FIELD_PREP(THC_I2C_IC_CON_SPEED, speed);
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_CON_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static u32 i2c_subip_regs[] = {
+ THC_I2C_IC_CON_OFFSET,
+ THC_I2C_IC_TAR_OFFSET,
+ THC_I2C_IC_INTR_MASK_OFFSET,
+ THC_I2C_IC_RX_TL_OFFSET,
+ THC_I2C_IC_TX_TL_OFFSET,
+ THC_I2C_IC_DMA_CR_OFFSET,
+ THC_I2C_IC_DMA_TDLR_OFFSET,
+ THC_I2C_IC_DMA_RDLR_OFFSET,
+ THC_I2C_IC_SS_SCL_HCNT_OFFSET,
+ THC_I2C_IC_SS_SCL_LCNT_OFFSET,
+ THC_I2C_IC_FS_SCL_HCNT_OFFSET,
+ THC_I2C_IC_FS_SCL_LCNT_OFFSET,
+ THC_I2C_IC_HS_SCL_HCNT_OFFSET,
+ THC_I2C_IC_HS_SCL_LCNT_OFFSET,
+ THC_I2C_IC_ENABLE_OFFSET,
+};
+
+/**
+ * thc_i2c_subip_init - Initialize and configure THC I2C subsystem
+ *
+ * @dev: The pointer of THC private device context
+ * @target_address: Slave address of touch device (TIC)
+ * @speed: I2C bus frequency speed mode
+ * @hcnt: I2C clock SCL high count
+ * @lcnt: I2C clock SCL low count
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_i2c_subip_init(struct thc_device *dev, const u32 target_address,
+ const u32 speed, const u32 hcnt, const u32 lcnt)
+{
+ u32 read_size = sizeof(u32);
+ u32 val;
+ int ret;
+
+ ret = thc_i2c_subip_pio_read(dev, THC_I2C_IC_ENABLE_OFFSET, &read_size, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= ~THC_I2C_IC_ENABLE_ENABLE;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_ENABLE_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_pio_read(dev, THC_I2C_IC_TAR_OFFSET, &read_size, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= ~THC_I2C_IC_TAR_IC_TAR;
+ val |= FIELD_PREP(THC_I2C_IC_TAR_IC_TAR, target_address);
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_TAR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_set_speed(dev, speed, hcnt, lcnt);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_INT_MASK_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_INTR_MASK_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_RX_TL_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_RX_TL_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_TX_TL_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_TX_TL_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = THC_I2C_IC_DMA_CR_RDMAE | THC_I2C_IC_DMA_CR_TDMAE;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_DMA_CR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_DMA_TDLR_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_DMA_TDLR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_DMA_RDLR_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_DMA_RDLR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_pio_read(dev, THC_I2C_IC_ENABLE_OFFSET, &read_size, &val);
+ if (ret < 0)
+ return ret;
+
+ val |= THC_I2C_IC_ENABLE_ENABLE;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_ENABLE_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ dev->i2c_subip_regs = devm_kzalloc(dev->dev, sizeof(i2c_subip_regs), GFP_KERNEL);
+ if (!dev->i2c_subip_regs)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_init, "INTEL_THC");
+
+/**
+ * thc_i2c_subip_regs_save - Save THC I2C sub-subsystem register values to THC device context
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_i2c_subip_regs_save(struct thc_device *dev)
+{
+ int ret;
+ u32 read_size = sizeof(u32);
+
+ for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
+ ret = thc_i2c_subip_pio_read(dev, i2c_subip_regs[i],
+ &read_size, (u32 *)&dev->i2c_subip_regs + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_regs_save, "INTEL_THC");
+
+/**
+ * thc_i2c_subip_regs_restore - Restore THC I2C subsystem registers from THC device context
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_i2c_subip_regs_restore(struct thc_device *dev)
+{
+ int ret;
+ u32 write_size = sizeof(u32);
+
+ for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
+ ret = thc_i2c_subip_pio_write(dev, i2c_subip_regs[i],
+ write_size, (u32 *)&dev->i2c_subip_regs + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_regs_restore, "INTEL_THC");
+
+MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
+MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) Intel THC Hardware Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h
new file mode 100644
index 000000000000..0517fee2c668
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _INTEL_THC_DEV_H_
+#define _INTEL_THC_DEV_H_
+
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#include "intel-thc-dma.h"
+
+#define THC_REGMAP_COMMON_OFFSET 0x10
+#define THC_REGMAP_MMIO_OFFSET 0x1000
+
+/*
+ * THC Port type
+ * @THC_PORT_TYPE_SPI: This port is used for HIDSPI
+ * @THC_PORT_TYPE_I2C: This port is used for HIDI2C
+ */
+enum thc_port_type {
+ THC_PORT_TYPE_SPI = 0,
+ THC_PORT_TYPE_I2C = 1,
+};
+
+/**
+ * THC interrupt flag
+ * @THC_NONDMA_INT: THC non-DMA interrupt
+ * @THC_RXDMA1_INT: THC RxDMA1 interrupt
+ * @THC_RXDMA2_INT: THC RxDMA2 interrupt
+ * @THC_SWDMA_INT: THC SWDMA interrupt
+ * @THC_TXDMA_INT: THC TXDMA interrupt
+ * @THC_PIO_DONE_INT: THC PIO complete interrupt
+ * @THC_I2CSUBIP_INT: THC I2C subsystem interrupt
+ * @THC_TXN_ERR_INT: THC transfer error interrupt
+ * @THC_FATAL_ERR_INT: THC fatal error interrupt
+ */
+enum thc_int_type {
+ THC_NONDMA_INT = 0,
+ THC_RXDMA1_INT = 1,
+ THC_RXDMA2_INT = 2,
+ THC_SWDMA_INT = 3,
+ THC_TXDMA_INT = 4,
+ THC_PIO_DONE_INT = 5,
+ THC_I2CSUBIP_INT = 6,
+ THC_TXN_ERR_INT = 7,
+ THC_FATAL_ERR_INT = 8,
+ THC_UNKNOWN_INT
+};
+
+/**
+ * struct thc_device - THC private device struct
+ * @thc_regmap: MMIO regmap structure for accessing THC registers
+ * @mmio_addr: MMIO registers address
+ * @thc_bus_lock: mutex locker for THC config
+ * @port_type: port type of THC port instance
+ * @pio_int_supported: PIO interrupt supported flag
+ * @dma_ctx: DMA specific data
+ * @write_complete_wait: signal event for DMA write complete
+ * @swdma_complete_wait: signal event for SWDMA sequence complete
+ * @write_done: bool value that indicates if DMA write is done
+ * @swdma_done: bool value that indicates if SWDMA swquence is done
+ * @perf_limit: the delay between read operation and write operation
+ * @i2c_subip_regs: the copy of THC I2C sub-system registers for resuming restore
+ */
+struct thc_device {
+ struct device *dev;
+ struct regmap *thc_regmap;
+ void __iomem *mmio_addr;
+ struct mutex thc_bus_lock;
+ enum thc_port_type port_type;
+ bool pio_int_supported;
+
+ struct thc_dma_context *dma_ctx;
+
+ wait_queue_head_t write_complete_wait;
+ wait_queue_head_t swdma_complete_wait;
+ bool write_done;
+ bool swdma_done;
+
+ u32 perf_limit;
+
+ u32 *i2c_subip_regs;
+};
+
+struct thc_device *thc_dev_init(struct device *device, void __iomem *mem_addr);
+int thc_tic_pio_read(struct thc_device *dev, const u32 address,
+ const u32 size, u32 *actual_size, u32 *buffer);
+int thc_tic_pio_write(struct thc_device *dev, const u32 address,
+ const u32 size, const u32 *buffer);
+int thc_tic_pio_write_and_read(struct thc_device *dev, const u32 address,
+ const u32 write_size, const u32 *write_buffer,
+ const u32 read_size, u32 *actual_size, u32 *read_buffer);
+void thc_interrupt_config(struct thc_device *dev);
+void thc_int_trigger_type_select(struct thc_device *dev, bool edge_trigger);
+void thc_interrupt_enable(struct thc_device *dev, bool int_enable);
+void thc_set_pio_interrupt_support(struct thc_device *dev, bool supported);
+int thc_interrupt_quiesce(const struct thc_device *dev, bool int_quiesce);
+void thc_ltr_config(struct thc_device *dev, u32 active_ltr_us, u32 lp_ltr_us);
+void thc_change_ltr_mode(struct thc_device *dev, u32 ltr_mode);
+void thc_ltr_unconfig(struct thc_device *dev);
+u32 thc_int_cause_read(struct thc_device *dev);
+int thc_interrupt_handler(struct thc_device *dev);
+int thc_port_select(struct thc_device *dev, enum thc_port_type port_type);
+int thc_spi_read_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_rd_mps);
+int thc_spi_write_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_wr_mps, u32 perf_limit);
+void thc_spi_input_output_address_config(struct thc_device *dev, u32 input_hdr_addr,
+ u32 input_bdy_addr, u32 output_addr);
+int thc_i2c_subip_init(struct thc_device *dev, const u32 target_address,
+ const u32 speed, const u32 hcnt, const u32 lcnt);
+int thc_i2c_subip_regs_save(struct thc_device *dev);
+int thc_i2c_subip_regs_restore(struct thc_device *dev);
+
+#endif /* _INTEL_THC_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
new file mode 100644
index 000000000000..eb23bea77686
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
@@ -0,0 +1,969 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/overflow.h>
+#include <linux/regmap.h>
+#include <linux/scatterlist.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-dma.h"
+#include "intel-thc-hw.h"
+
+static void dma_set_prd_base_addr(struct thc_device *dev, u64 physical_addr,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 addr_high, addr_low;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ addr_high = upper_32_bits(physical_addr);
+ addr_low = lower_32_bits(physical_addr);
+
+ regmap_write(dev->thc_regmap, dma_config->prd_base_addr_high, addr_high);
+ regmap_write(dev->thc_regmap, dma_config->prd_base_addr_low, addr_low);
+}
+
+static void dma_set_start_bit(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, mask, mbits, data, offset;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ switch (dma_config->dma_channel) {
+ case THC_RXDMA1:
+ case THC_RXDMA2:
+ if (dma_config->dma_channel == THC_RXDMA2) {
+ mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL,
+ THC_BITMASK_INTERRUPT_TYPE_DATA);
+ mask = THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_DEVINT_CFG_1_OFFSET, mask, mbits);
+ }
+
+ mbits = THC_M_PRT_READ_DMA_CNTRL_IE_EOF |
+ THC_M_PRT_READ_DMA_CNTRL_SOO |
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL |
+ THC_M_PRT_READ_DMA_CNTRL_IE_ERROR |
+ THC_M_PRT_READ_DMA_CNTRL_START;
+
+ mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
+ mask |= THC_M_PRT_READ_DMA_CNTRL_INT_SW_DMA_EN;
+ ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
+ offset = dma_config->dma_channel == THC_RXDMA1 ?
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET : THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
+ regmap_write_bits(dev->thc_regmap, offset, mask, ctrl);
+ break;
+
+ case THC_SWDMA:
+ mbits = THC_M_PRT_READ_DMA_CNTRL_IE_DMACPL |
+ THC_M_PRT_READ_DMA_CNTRL_IE_IOC |
+ THC_M_PRT_READ_DMA_CNTRL_SOO |
+ THC_M_PRT_READ_DMA_CNTRL_START;
+
+ mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
+ ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
+ mask, ctrl);
+ break;
+
+ case THC_TXDMA:
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET,
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS,
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS);
+
+ /* Select interrupt or polling method upon Write completion */
+ if (dev->dma_ctx->use_write_interrupts)
+ data = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL;
+ else
+ data = 0;
+
+ data |= THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
+ mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL |
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ mask, data);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void dma_set_prd_control(struct thc_device *dev, u8 entry_count, u8 cb_depth,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, mask;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ if (dma_config->dma_channel == THC_TXDMA) {
+ mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
+ ctrl = FIELD_PREP(THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC, entry_count);
+ } else {
+ mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
+ ctrl = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PTEC, entry_count) |
+ FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PCD, cb_depth);
+ }
+
+ regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, ctrl);
+}
+
+static void dma_clear_prd_control(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 mask;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ if (dma_config->dma_channel == THC_TXDMA)
+ mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
+ else
+ mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
+
+ regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, 0);
+}
+
+static u8 dma_get_read_pointer(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, read_pointer;
+
+ regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
+ read_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCRP, ctrl);
+
+ dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCRP 0x%x\n",
+ ctrl, dma_config->dma_cntrl, read_pointer);
+
+ return read_pointer;
+}
+
+static u8 dma_get_write_pointer(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, write_pointer;
+
+ regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
+ write_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCWP, ctrl);
+
+ dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCWP 0x%x\n",
+ ctrl, dma_config->dma_cntrl, write_pointer);
+
+ return write_pointer;
+}
+
+static void dma_set_write_pointer(struct thc_device *dev, u8 value,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, mask;
+
+ mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP;
+ ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, value);
+ regmap_write_bits(dev->thc_regmap, dma_config->dma_cntrl, mask, ctrl);
+}
+
+static size_t dma_get_max_packet_size(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ return dma_config->max_packet_size;
+}
+
+static void dma_set_max_packet_size(struct thc_device *dev, size_t size,
+ struct thc_dma_configuration *dma_config)
+{
+ if (size) {
+ dma_config->max_packet_size = ALIGN(size, SZ_4K);
+ dma_config->is_enabled = true;
+ }
+}
+
+static void thc_copy_one_sgl_to_prd(struct thc_device *dev,
+ struct thc_dma_configuration *config,
+ unsigned int ind)
+{
+ struct thc_prd_table *prd_tbl;
+ struct scatterlist *sg;
+ int j;
+
+ prd_tbl = &config->prd_tbls[ind];
+
+ for_each_sg(config->sgls[ind], sg, config->sgls_nent[ind], j) {
+ prd_tbl->entries[j].dest_addr =
+ sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
+ prd_tbl->entries[j].len = sg_dma_len(sg);
+ prd_tbl->entries[j].hw_status = 0;
+ prd_tbl->entries[j].end_of_prd = 0;
+ }
+
+ /* Set the end_of_prd flag in the last filled entry */
+ if (j > 0)
+ prd_tbl->entries[j - 1].end_of_prd = 1;
+}
+
+static void thc_copy_sgls_to_prd(struct thc_device *dev,
+ struct thc_dma_configuration *config)
+{
+ unsigned int i;
+
+ memset(config->prd_tbls, 0, array_size(PRD_TABLE_SIZE, config->prd_tbl_num));
+
+ for (i = 0; i < config->prd_tbl_num; i++)
+ thc_copy_one_sgl_to_prd(dev, config, i);
+}
+
+static int setup_dma_buffers(struct thc_device *dev,
+ struct thc_dma_configuration *config,
+ enum dma_data_direction dir)
+{
+ size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
+ unsigned int i, nent = PRD_ENTRIES_NUM;
+ dma_addr_t dma_handle;
+ void *cpu_addr;
+ size_t buf_sz;
+ int count;
+
+ if (!config->is_enabled)
+ return 0;
+
+ memset(config->sgls, 0, sizeof(config->sgls));
+ memset(config->sgls_nent, 0, sizeof(config->sgls_nent));
+
+ cpu_addr = dma_alloc_coherent(dev->dev, prd_tbls_size,
+ &dma_handle, GFP_KERNEL);
+ if (!cpu_addr)
+ return -ENOMEM;
+
+ config->prd_tbls = cpu_addr;
+ config->prd_tbls_dma_handle = dma_handle;
+
+ buf_sz = dma_get_max_packet_size(dev, config);
+
+ /* Allocate and map the scatter-gather lists, one for each PRD table */
+ for (i = 0; i < config->prd_tbl_num; i++) {
+ config->sgls[i] = sgl_alloc(buf_sz, GFP_KERNEL, &nent);
+ if (!config->sgls[i] || nent > PRD_ENTRIES_NUM) {
+ dev_err_once(dev->dev, "sgl_alloc (%uth) failed, nent %u\n",
+ i, nent);
+ return -ENOMEM;
+ }
+ count = dma_map_sg(dev->dev, config->sgls[i], nent, dir);
+
+ config->sgls_nent[i] = count;
+ }
+
+ thc_copy_sgls_to_prd(dev, config);
+
+ return 0;
+}
+
+static void thc_reset_dma_settings(struct thc_device *dev)
+{
+ /* Stop all DMA channels and reset DMA read pointers */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START, 0);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+}
+
+static void release_dma_buffers(struct thc_device *dev,
+ struct thc_dma_configuration *config)
+{
+ size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
+ unsigned int i;
+
+ if (!config->is_enabled)
+ return;
+
+ for (i = 0; i < config->prd_tbl_num; i++) {
+ if (!config->sgls[i] | !config->sgls_nent[i])
+ continue;
+
+ dma_unmap_sg(dev->dev, config->sgls[i],
+ config->sgls_nent[i],
+ config->dir);
+
+ sgl_free(config->sgls[i]);
+ config->sgls[i] = NULL;
+ }
+
+ memset(config->prd_tbls, 0, prd_tbls_size);
+
+ if (config->prd_tbls) {
+ dma_free_coherent(dev->dev, prd_tbls_size, config->prd_tbls,
+ config->prd_tbls_dma_handle);
+ config->prd_tbls = NULL;
+ config->prd_tbls_dma_handle = 0;
+ }
+}
+
+struct thc_dma_context *thc_dma_init(struct thc_device *dev)
+{
+ struct thc_dma_context *dma_ctx;
+
+ dma_ctx = devm_kzalloc(dev->dev, sizeof(*dma_ctx), GFP_KERNEL);
+ if (!dma_ctx)
+ return NULL;
+
+ dev->dma_ctx = dma_ctx;
+
+ dma_ctx->dma_config[THC_RXDMA1].dma_channel = THC_RXDMA1;
+ dma_ctx->dma_config[THC_RXDMA2].dma_channel = THC_RXDMA2;
+ dma_ctx->dma_config[THC_TXDMA].dma_channel = THC_TXDMA;
+ dma_ctx->dma_config[THC_SWDMA].dma_channel = THC_SWDMA;
+
+ dma_ctx->dma_config[THC_RXDMA1].dir = DMA_FROM_DEVICE;
+ dma_ctx->dma_config[THC_RXDMA2].dir = DMA_FROM_DEVICE;
+ dma_ctx->dma_config[THC_TXDMA].dir = DMA_TO_DEVICE;
+ dma_ctx->dma_config[THC_SWDMA].dir = DMA_FROM_DEVICE;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_tbl_num = PRD_TABLES_NUM;
+ dma_ctx->dma_config[THC_RXDMA2].prd_tbl_num = PRD_TABLES_NUM;
+ dma_ctx->dma_config[THC_TXDMA].prd_tbl_num = 1;
+ dma_ctx->dma_config[THC_SWDMA].prd_tbl_num = 1;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].prd_base_addr_high = THC_M_PRT_WPRD_BA_HI_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_SW_OFFSET;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].prd_base_addr_low = THC_M_PRT_WPRD_BA_LOW_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_SW_OFFSET;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_cntrl = THC_M_PRT_RPRD_CNTRL_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].prd_cntrl = THC_M_PRT_RPRD_CNTRL_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].prd_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].prd_cntrl = THC_M_PRT_RPRD_CNTRL_SW_OFFSET;
+
+ dma_ctx->dma_config[THC_RXDMA1].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].dma_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET;
+
+ /* Enable write DMA completion interrupt by default */
+ dma_ctx->use_write_interrupts = 1;
+
+ return dma_ctx;
+}
+
+/**
+ * thc_dma_set_max_packet_sizes - Set max packet sizes for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ * @mps_read1: RxDMA1 max packet size
+ * @mps_read2: RxDMA2 max packet size
+ * @mps_write: TxDMA max packet size
+ * @mps_swdma: Software DMA max packet size
+ *
+ * If mps is not 0, it means the corresponding DMA channel is used, then set
+ * the flag to turn on this channel.
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_set_max_packet_sizes(struct thc_device *dev, size_t mps_read1,
+ size_t mps_read2, size_t mps_write,
+ size_t mps_swdma)
+{
+ if (!dev->dma_ctx) {
+ dev_err_once(dev->dev,
+ "Cannot set max packet sizes because DMA context is NULL!\n");
+ return -EINVAL;
+ }
+
+ dma_set_max_packet_size(dev, mps_read1, &dev->dma_ctx->dma_config[THC_RXDMA1]);
+ dma_set_max_packet_size(dev, mps_read2, &dev->dma_ctx->dma_config[THC_RXDMA2]);
+ dma_set_max_packet_size(dev, mps_write, &dev->dma_ctx->dma_config[THC_TXDMA]);
+ dma_set_max_packet_size(dev, mps_swdma, &dev->dma_ctx->dma_config[THC_SWDMA]);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_set_max_packet_sizes, "INTEL_THC");
+
+/**
+ * thc_dma_allocate - Allocate DMA buffers for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_allocate(struct thc_device *dev)
+{
+ int ret, chan;
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
+ ret = setup_dma_buffers(dev, &dev->dma_ctx->dma_config[chan],
+ dev->dma_ctx->dma_config[chan].dir);
+ if (ret < 0) {
+ dev_err_once(dev->dev, "DMA setup failed for DMA channel %d\n", chan);
+ goto release_bufs;
+ }
+ }
+
+ return 0;
+
+release_bufs:
+ while (chan--)
+ release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_allocate, "INTEL_THC");
+
+/**
+ * thc_dma_release - Release DMA buffers for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_dma_release(struct thc_device *dev)
+{
+ int chan;
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++)
+ release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_release, "INTEL_THC");
+
+static int calc_prd_entries_num(struct thc_prd_table *prd_tbl,
+ size_t mes_len, u8 *nent)
+{
+ *nent = DIV_ROUND_UP(mes_len, THC_MIN_BYTES_PER_SG_LIST_ENTRY);
+ if (*nent > PRD_ENTRIES_NUM)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static size_t calc_message_len(struct thc_prd_table *prd_tbl, u8 *nent)
+{
+ size_t mes_len = 0;
+ unsigned int j;
+
+ for (j = 0; j < PRD_ENTRIES_NUM; j++) {
+ mes_len += prd_tbl->entries[j].len;
+ if (prd_tbl->entries[j].end_of_prd)
+ break;
+ }
+
+ *nent = j + 1;
+
+ return mes_len;
+}
+
+/**
+ * thc_dma_configure - Configure DMA settings for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_configure(struct thc_device *dev)
+{
+ struct thc_dma_context *dma_ctx = dev->dma_ctx;
+ int chan;
+
+ thc_reset_dma_settings(dev);
+
+ if (!dma_ctx) {
+ dev_err_once(dev->dev, "Cannot do DMA configure because DMA context is NULL\n");
+ return -EINVAL;
+ }
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
+ dma_set_prd_base_addr(dev,
+ dma_ctx->dma_config[chan].prd_tbls_dma_handle,
+ &dma_ctx->dma_config[chan]);
+
+ dma_set_prd_control(dev, PRD_ENTRIES_NUM - 1,
+ dma_ctx->dma_config[chan].prd_tbl_num - 1,
+ &dma_ctx->dma_config[chan]);
+ }
+
+ /* Start read2 DMA engine */
+ dma_set_start_bit(dev, &dma_ctx->dma_config[THC_RXDMA2]);
+
+ dev_dbg(dev->dev, "DMA configured successfully!\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_configure, "INTEL_THC");
+
+/**
+ * thc_dma_unconfigure - Unconfigure DMA settings for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_dma_unconfigure(struct thc_device *dev)
+{
+ int chan;
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
+ dma_set_prd_base_addr(dev, 0, &dev->dma_ctx->dma_config[chan]);
+ dma_clear_prd_control(dev, &dev->dma_ctx->dma_config[chan]);
+ }
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_unconfigure, "INTEL_THC");
+
+static int thc_wait_for_dma_pause(struct thc_device *dev, enum thc_dma_channel channel)
+{
+ u32 ctrl_reg, sts_reg, sts;
+ int ret;
+
+ ctrl_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_CNTRL_1_OFFSET :
+ ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_CNTRL_2_OFFSET :
+ THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET);
+
+ regmap_write_bits(dev->thc_regmap, ctrl_reg, THC_M_PRT_READ_DMA_CNTRL_START, 0);
+
+ sts_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_INT_STS_1_OFFSET :
+ ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_INT_STS_2_OFFSET :
+ THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET);
+
+ ret = regmap_read_poll_timeout(dev->thc_regmap, sts_reg, sts,
+ !(sts & THC_M_PRT_READ_DMA_INT_STS_ACTIVE),
+ THC_DEFAULT_RXDMA_POLLING_US_INTERVAL,
+ THC_DEFAULT_RXDMA_POLLING_US_TIMEOUT);
+
+ if (ret) {
+ dev_err_once(dev->dev,
+ "Timeout while waiting for DMA %d stop\n", channel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int read_dma_buffer(struct thc_device *dev,
+ struct thc_dma_configuration *read_config,
+ u8 prd_table_index, void *read_buff)
+{
+ struct thc_prd_table *prd_tbl;
+ struct scatterlist *sg;
+ size_t mes_len, ret;
+ u8 nent;
+
+ if (prd_table_index >= read_config->prd_tbl_num) {
+ dev_err_once(dev->dev, "PRD table index %d too big\n", prd_table_index);
+ return -EINVAL;
+ }
+
+ prd_tbl = &read_config->prd_tbls[prd_table_index];
+ mes_len = calc_message_len(prd_tbl, &nent);
+ if (mes_len > read_config->max_packet_size) {
+ dev_err(dev->dev,
+ "Message length %zu is bigger than buffer length %lu\n",
+ mes_len, read_config->max_packet_size);
+ return -EMSGSIZE;
+ }
+
+ sg = read_config->sgls[prd_table_index];
+ ret = sg_copy_to_buffer(sg, nent, read_buff, mes_len);
+ if (ret != mes_len) {
+ dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
+ ret, mes_len);
+ return -EIO;
+ }
+
+ return mes_len;
+}
+
+static void update_write_pointer(struct thc_device *dev,
+ struct thc_dma_configuration *read_config)
+{
+ u8 write_ptr = dma_get_write_pointer(dev, read_config);
+
+ if (write_ptr + 1 == THC_WRAPAROUND_VALUE_ODD)
+ dma_set_write_pointer(dev, THC_POINTER_WRAPAROUND, read_config);
+ else if (write_ptr + 1 == THC_WRAPAROUND_VALUE_EVEN)
+ dma_set_write_pointer(dev, 0, read_config);
+ else
+ dma_set_write_pointer(dev, write_ptr + 1, read_config);
+}
+
+static int is_dma_buf_empty(struct thc_device *dev,
+ struct thc_dma_configuration *read_config,
+ u8 *read_ptr, u8 *write_ptr)
+{
+ *read_ptr = dma_get_read_pointer(dev, read_config);
+ *write_ptr = dma_get_write_pointer(dev, read_config);
+
+ if ((*read_ptr & THC_POINTER_MASK) == (*write_ptr & THC_POINTER_MASK))
+ if (*read_ptr != *write_ptr)
+ return true;
+
+ return false;
+}
+
+static int thc_dma_read(struct thc_device *dev,
+ struct thc_dma_configuration *read_config,
+ void *read_buff, size_t *read_len, int *read_finished)
+{
+ u8 read_ptr, write_ptr, prd_table_index;
+ int status;
+
+ if (!is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr)) {
+ prd_table_index = write_ptr & THC_POINTER_MASK;
+
+ status = read_dma_buffer(dev, read_config, prd_table_index, read_buff);
+ if (status <= 0) {
+ dev_err_once(dev->dev, "read DMA buffer failed %d\n", status);
+ return -EIO;
+ }
+
+ *read_len = status;
+
+ /* Clear the relevant PRD table */
+ thc_copy_one_sgl_to_prd(dev, read_config, prd_table_index);
+
+ /* Increment the write pointer to let the HW know we have processed this PRD */
+ update_write_pointer(dev, read_config);
+ }
+
+ /*
+ * This function only reads one frame from PRD table for each call, so we need to
+ * check if all DMAed data is read out and return the flag to the caller. Caller
+ * should repeatedly call thc_dma_read() until all DMAed data is handled.
+ */
+ if (read_finished)
+ *read_finished = is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr) ? 1 : 0;
+
+ return 0;
+}
+
+/**
+ * thc_rxdma_read - Read data from RXDMA buffer
+ *
+ * @dev: The pointer of THC private device context
+ * @dma_channel: The RXDMA engine of read data source
+ * @read_buff: The pointer of the read data buffer
+ * @read_len: The pointer of the read data length
+ * @read_finished: The pointer of the flag indicating if all pending data has been read out
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_rxdma_read(struct thc_device *dev, enum thc_dma_channel dma_channel,
+ void *read_buff, size_t *read_len, int *read_finished)
+{
+ struct thc_dma_configuration *dma_config;
+ int ret;
+
+ dma_config = &dev->dma_ctx->dma_config[dma_channel];
+
+ if (!dma_config->is_enabled) {
+ dev_err_once(dev->dev, "The DMA channel %d is not enabled", dma_channel);
+ return -EINVAL;
+ }
+
+ if (!read_buff || !read_len) {
+ dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
+ read_buff, read_len);
+ return -EINVAL;
+ }
+
+ if (dma_channel >= THC_TXDMA) {
+ dev_err(dev->dev, "Unsupported DMA channel for RxDMA read, %d\n", dma_channel);
+ return -EINVAL;
+ }
+
+ ret = thc_dma_read(dev, dma_config, read_buff, read_len, read_finished);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_rxdma_read, "INTEL_THC");
+
+static int thc_swdma_read_start(struct thc_device *dev, void *write_buff,
+ size_t write_len, u32 *prd_tbl_len)
+{
+ u32 mask, val, data0 = 0, data1 = 0;
+ int ret;
+
+ ret = thc_interrupt_quiesce(dev, true);
+ if (ret)
+ return ret;
+
+ if (thc_wait_for_dma_pause(dev, THC_RXDMA1) || thc_wait_for_dma_pause(dev, THC_RXDMA2))
+ return -EIO;
+
+ thc_reset_dma_settings(dev);
+
+ mask = THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC |
+ THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN;
+ val = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC, write_len) |
+ ((!prd_tbl_len) ? THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN : 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_RPRD_CNTRL_SW_OFFSET,
+ mask, val);
+
+ if (prd_tbl_len) {
+ mask = THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN;
+ val = FIELD_PREP(THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN,
+ *prd_tbl_len);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_DMA_PRD_TABLE_LEN_OFFSET,
+ mask, val);
+ }
+
+ if (write_len <= sizeof(u32)) {
+ for (int i = 0; i < write_len; i++)
+ data0 |= *(((u8 *)write_buff) + i) << (i * 8);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
+ } else if (write_len <= 2 * sizeof(u32)) {
+ data0 = *(u32 *)write_buff;
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
+
+ for (int i = 0; i < write_len - sizeof(u32); i++)
+ data1 |= *(((u8 *)write_buff) + sizeof(u32) + i) << (i * 8);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET, data1);
+ }
+ dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_SWDMA]);
+
+ return 0;
+}
+
+static int thc_swdma_read_completion(struct thc_device *dev)
+{
+ int ret;
+
+ ret = thc_wait_for_dma_pause(dev, THC_SWDMA);
+ if (ret)
+ return ret;
+
+ thc_reset_dma_settings(dev);
+
+ dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_RXDMA2]);
+
+ ret = thc_interrupt_quiesce(dev, false);
+
+ return ret;
+}
+
+/**
+ * thc_swdma_read - Use software DMA to read data from touch device
+ *
+ * @dev: The pointer of THC private device context
+ * @write_buff: The pointer of write buffer for SWDMA sequence
+ * @write_len: The write data length for SWDMA sequence
+ * @prd_tbl_len: The prd table length of SWDMA engine, can be set to NULL
+ * @read_buff: The pointer of the read data buffer
+ * @read_len: The pointer of the read data length
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_swdma_read(struct thc_device *dev, void *write_buff, size_t write_len,
+ u32 *prd_tbl_len, void *read_buff, size_t *read_len)
+{
+ int ret;
+
+ if (!(&dev->dma_ctx->dma_config[THC_SWDMA])->is_enabled) {
+ dev_err_once(dev->dev, "The SWDMA channel is not enabled");
+ return -EINVAL;
+ }
+
+ if (!read_buff || !read_len) {
+ dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
+ read_buff, read_len);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ dev->swdma_done = false;
+
+ ret = thc_swdma_read_start(dev, write_buff, write_len, prd_tbl_len);
+ if (ret)
+ goto end;
+
+ ret = wait_event_interruptible_timeout(dev->swdma_complete_wait, dev->swdma_done, 1 * HZ);
+ if (ret <= 0 || !dev->swdma_done) {
+ dev_err_once(dev->dev, "timeout for waiting SWDMA completion\n");
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ ret = thc_dma_read(dev, &dev->dma_ctx->dma_config[THC_SWDMA], read_buff, read_len, NULL);
+ if (ret)
+ goto end;
+
+ ret = thc_swdma_read_completion(dev);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_swdma_read, "INTEL_THC");
+
+static int write_dma_buffer(struct thc_device *dev,
+ void *buffer, size_t buf_len)
+{
+ struct thc_dma_configuration *write_config = &dev->dma_ctx->dma_config[THC_TXDMA];
+ struct thc_prd_table *prd_tbl;
+ struct scatterlist *sg;
+ unsigned long len_left;
+ size_t ret;
+ u8 nent;
+ int i;
+
+ /* There is only one PRD table for write */
+ prd_tbl = &write_config->prd_tbls[0];
+
+ if (calc_prd_entries_num(prd_tbl, buf_len, &nent) < 0) {
+ dev_err(dev->dev, "Tx message length too big (%zu)\n", buf_len);
+ return -EOVERFLOW;
+ }
+
+ sg = write_config->sgls[0];
+ ret = sg_copy_from_buffer(sg, nent, buffer, buf_len);
+ if (ret != buf_len) {
+ dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
+ ret, buf_len);
+ return -EIO;
+ }
+
+ prd_tbl = &write_config->prd_tbls[0];
+ len_left = buf_len;
+
+ for_each_sg(write_config->sgls[0], sg, write_config->sgls_nent[0], i) {
+ if (sg_dma_address(sg) == 0 || sg_dma_len(sg) == 0) {
+ dev_err_once(dev->dev, "SGList: zero address or length\n");
+ return -EINVAL;
+ }
+
+ prd_tbl->entries[i].dest_addr =
+ sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
+
+ if (len_left < sg_dma_len(sg)) {
+ prd_tbl->entries[i].len = len_left;
+ prd_tbl->entries[i].end_of_prd = 1;
+ break;
+ }
+
+ prd_tbl->entries[i].len = sg_dma_len(sg);
+ prd_tbl->entries[i].end_of_prd = 0;
+
+ len_left -= sg_dma_len(sg);
+ }
+
+ dma_set_prd_control(dev, i, 0, write_config);
+
+ return 0;
+}
+
+static void thc_ensure_performance_limitations(struct thc_device *dev)
+{
+ unsigned long delay_usec = 0;
+ /*
+ * Minimum amount of delay the THC / QUICKSPI driver must wait
+ * between end of write operation and begin of read operation.
+ * This value shall be in 10us multiples.
+ */
+ if (dev->perf_limit > 0) {
+ delay_usec = dev->perf_limit * 10;
+ udelay(delay_usec);
+ }
+}
+
+static void thc_dma_write_completion(struct thc_device *dev)
+{
+ thc_ensure_performance_limitations(dev);
+}
+
+/**
+ * thc_dma_write - Use TXDMA to write data to touch device
+ *
+ * @dev: The pointer of THC private device context
+ * @buffer: The pointer of write data buffer
+ * @buf_len: The write data length
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_write(struct thc_device *dev, void *buffer, size_t buf_len)
+{
+ bool restore_interrupts = false;
+ u32 sts, ctrl;
+ int ret;
+
+ if (!(&dev->dma_ctx->dma_config[THC_TXDMA])->is_enabled) {
+ dev_err_once(dev->dev, "The TxDMA channel is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!buffer || buf_len <= 0) {
+ dev_err(dev->dev, "Invalid input parameters, buffer %p\n, buf_len %zu\n",
+ buffer, buf_len);
+ return -EINVAL;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, &sts);
+ if (sts & THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ACTIVE) {
+ dev_err_once(dev->dev, "THC TxDMA is till active and can't start again\n");
+ return -EBUSY;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, &ctrl);
+
+ ret = write_dma_buffer(dev, buffer, buf_len);
+ if (ret)
+ goto end;
+
+ if (dev->perf_limit && !(ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS)) {
+ ret = thc_interrupt_quiesce(dev, true);
+ if (ret)
+ goto end;
+
+ restore_interrupts = true;
+ }
+
+ dev->write_done = false;
+
+ dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_TXDMA]);
+
+ ret = wait_event_interruptible_timeout(dev->write_complete_wait, dev->write_done, 1 * HZ);
+ if (ret <= 0 || !dev->write_done) {
+ dev_err_once(dev->dev, "timeout for waiting TxDMA completion\n");
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ thc_dma_write_completion(dev);
+ mutex_unlock(&dev->thc_bus_lock);
+ return 0;
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+
+ if (restore_interrupts)
+ ret = thc_interrupt_quiesce(dev, false);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_write, "INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
new file mode 100644
index 000000000000..ca923ff2bef9
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _INTEL_THC_DMA_H_
+#define _INTEL_THC_DMA_H_
+
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/sizes.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+#define THC_POINTER_MASK GENMASK(6, 0)
+#define THC_POINTER_WRAPAROUND 0x80
+#define THC_WRAPAROUND_VALUE_ODD 0x10
+#define THC_WRAPAROUND_VALUE_EVEN 0x90
+#define THC_MIN_BYTES_PER_SG_LIST_ENTRY SZ_4K
+
+#define THC_DEFAULT_RXDMA_POLLING_US_INTERVAL 100
+#define THC_DEFAULT_RXDMA_POLLING_US_TIMEOUT (10 * USEC_PER_MSEC)
+
+/*
+ * THC needs 1KB aligned address, dest_addr is 54 bits, not 64,
+ * so don't need to send the lower 10-bits of address.
+ */
+#define THC_ADDRESS_SHIFT 10
+
+/**
+ * THC DMA channels:
+ * @THC_RXDMA1: legacy channel, reserved for raw data reading
+ * @THC_RXDMA2: DMA to read HID data from touch device
+ * @THC_TXDMA: DMA to write to touch device
+ * @THC_SWDMA: SW triggered DMA to write and read from touch device
+ */
+enum thc_dma_channel {
+ THC_RXDMA1 = 0,
+ THC_RXDMA2 = 1,
+ THC_TXDMA = 2,
+ THC_SWDMA = 3,
+ MAX_THC_DMA_CHANNEL
+};
+
+/**
+ * THC DMA Physical Memory Descriptor (PRD)
+ * @dest_addr: bit[53:0], destination address in system memory
+ * @int_on_completion: bit[63], if set, thc will trigger interrupt to driver
+ * @len: bit[87:64], length of this entry
+ * @end_of_prd: bit[88], if set, this entry is last one of current PRD table
+ * @hw_status: bit[90:89], hw status bits
+ */
+struct thc_prd_entry {
+ u64 dest_addr : 54;
+ u64 reserved1 : 9;
+ u64 int_on_completion : 1;
+ u64 len : 24;
+ u64 end_of_prd : 1;
+ u64 hw_status : 2;
+ u64 reserved2 : 37;
+};
+
+/*
+ * Max OS memory fragmentation will be at a 4KB boundary, thus to address 1MB
+ * of virtually contiguous memory 256 PRD entries are required for a single
+ * PRD Table. SW writes the number of PRD Entries for each PRD table in the
+ * THC_M_PRT_RPRD_CNTRL.PTEC register field. The PRD entry's length must be
+ * multiple of 4KB except for the last entry in a PRD table.
+ * This is the max possible number of etries supported by HW, in practise we
+ * there will be less entries in each prd table(the actual number will be
+ * given by scatter-gather list allocation).
+ */
+#define PRD_ENTRIES_NUM 16
+
+/*
+ * Number of PRD tables equals to number of data buffers.
+ * The max number of PRD tables supported by the HW is 128,
+ * but we allocate only 16.
+ */
+#define PRD_TABLES_NUM 16
+
+/* THC DMA Physical Memory Descriptor Table */
+struct thc_prd_table {
+ struct thc_prd_entry entries[PRD_ENTRIES_NUM];
+};
+
+#define PRD_TABLE_SIZE sizeof(struct thc_prd_table)
+
+/**
+ * struct thc_dma_configuration - THC DMA configure
+ * @dma_channel: DMA channel for current DMA configuration
+ * @prd_tbls_dma_handle: DMA buffer handle
+ * @dir: direction of DMA for this config
+ * @prd_tbls: PRD tables for current DMA
+ * @sgls: array of pointers to scatter-gather lists
+ * @sgls_nent: actual number of entries per sg list
+ * @prd_tbl_num: actual number of PRD tables
+ * @max_packet_size: size of the buffer needed for 1 DMA message (1 PRD table)
+ * @prd_base_addr_high: High 32bits memory address where stores PRD table
+ * @prd_base_addr_low: low 32bits memory address where stores PRD table
+ * @prd_cntrl: PRD control register value
+ * @dma_cntrl: DMA control register value
+ */
+struct thc_dma_configuration {
+ enum thc_dma_channel dma_channel;
+ dma_addr_t prd_tbls_dma_handle;
+ enum dma_data_direction dir;
+ bool is_enabled;
+
+ struct thc_prd_table *prd_tbls;
+ struct scatterlist *sgls[PRD_TABLES_NUM];
+ u8 sgls_nent[PRD_TABLES_NUM];
+ u8 prd_tbl_num;
+
+ size_t max_packet_size;
+ u32 prd_base_addr_high;
+ u32 prd_base_addr_low;
+ u32 prd_cntrl;
+ u32 dma_cntrl;
+};
+
+/*
+ * THC DMA context
+ * Store all THC Channel configures
+ */
+struct thc_dma_context {
+ struct thc_dma_configuration dma_config[MAX_THC_DMA_CHANNEL];
+ u8 use_write_interrupts;
+};
+
+struct thc_device;
+
+int thc_dma_set_max_packet_sizes(struct thc_device *dev,
+ size_t mps_read1, size_t mps_read2,
+ size_t mps_write, size_t mps_swdma);
+int thc_dma_allocate(struct thc_device *dev);
+int thc_dma_configure(struct thc_device *dev);
+void thc_dma_unconfigure(struct thc_device *dev);
+void thc_dma_release(struct thc_device *dev);
+int thc_rxdma_read(struct thc_device *dev, enum thc_dma_channel dma_channel,
+ void *read_buff, size_t *read_len, int *read_finished);
+int thc_swdma_read(struct thc_device *dev, void *write_buff, size_t write_len,
+ u32 *prd_tbl_len, void *read_buff, size_t *read_len);
+int thc_dma_write(struct thc_device *dev, void *buffer, size_t buf_len);
+
+struct thc_dma_context *thc_dma_init(struct thc_device *dev);
+
+#endif /* _INTEL_THC_DMA_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h
new file mode 100644
index 000000000000..6729c4c25dab
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h
@@ -0,0 +1,881 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _INTEL_THC_HW_H_
+#define _INTEL_THC_HW_H_
+
+#include <linux/bits.h>
+
+/* THC registers offset */
+/* Touch Host Controller Control Register */
+#define THC_M_PRT_CONTROL_OFFSET 0x1008
+/* THC SPI Bus Configuration Register */
+#define THC_M_PRT_SPI_CFG_OFFSET 0x1010
+/* THC SPI Bus Read Opcode Register */
+#define THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET 0x1014
+/* THC SPI Bus Read Opcode Register */
+#define THC_M_PRT_SPI_DMARD_OPCODE_OFFSET 0x1018
+/* THC SPI Bus Write Opcode Register */
+#define THC_M_PRT_SPI_WR_OPCODE_OFFSET 0x101C
+/* THC Interrupt Enable Register */
+#define THC_M_PRT_INT_EN_OFFSET 0x1020
+/* THC Interrupt Status Register */
+#define THC_M_PRT_INT_STATUS_OFFSET 0x1024
+/* THC Error Cause Register */
+#define THC_M_PRT_ERR_CAUSE_OFFSET 0x1028
+/* THC SW sequencing Control */
+#define THC_M_PRT_SW_SEQ_CNTRL_OFFSET 0x1040
+/* THC SW sequencing Status */
+#define THC_M_PRT_SW_SEQ_STS_OFFSET 0x1044
+/* THC SW Sequencing Data DW0 or SPI Address Register */
+#define THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET 0x1048
+/* THC SW sequencing Data DW1 */
+#define THC_M_PRT_SW_SEQ_DATA1_OFFSET 0x104C
+/* THC SW sequencing Data DW2 */
+#define THC_M_PRT_SW_SEQ_DATA2_OFFSET 0x1050
+/* THC SW sequencing Data DW3 */
+#define THC_M_PRT_SW_SEQ_DATA3_OFFSET 0x1054
+/* THC SW sequencing Data DW4 */
+#define THC_M_PRT_SW_SEQ_DATA4_OFFSET 0x1058
+/* THC SW sequencing Data DW5 */
+#define THC_M_PRT_SW_SEQ_DATA5_OFFSET 0x105C
+/* THC SW sequencing Data DW6 */
+#define THC_M_PRT_SW_SEQ_DATA6_OFFSET 0x1060
+/* THC SW sequencing Data DW7 */
+#define THC_M_PRT_SW_SEQ_DATA7_OFFSET 0x1064
+/* THC SW sequencing Data DW8 */
+#define THC_M_PRT_SW_SEQ_DATA8_OFFSET 0x1068
+/* THC SW sequencing Data DW9 */
+#define THC_M_PRT_SW_SEQ_DATA9_OFFSET 0x106C
+/* THC SW sequencing Data DW10 */
+#define THC_M_PRT_SW_SEQ_DATA10_OFFSET 0x1070
+/* THC SW sequencing Data DW11 */
+#define THC_M_PRT_SW_SEQ_DATA11_OFFSET 0x1074
+/* THC SW sequencing Data DW12 */
+#define THC_M_PRT_SW_SEQ_DATA12_OFFSET 0x1078
+/* THC SW sequencing Data DW13 */
+#define THC_M_PRT_SW_SEQ_DATA13_OFFSET 0x107C
+/* THC SW sequencing Data DW14 */
+#define THC_M_PRT_SW_SEQ_DATA14_OFFSET 0x1080
+/* THC SW sequencing Data DW15 */
+#define THC_M_PRT_SW_SEQ_DATA15_OFFSET 0x1084
+/* THC SW sequencing Data DW16 */
+#define THC_M_PRT_SW_SEQ_DATA16_OFFSET 0x1088
+/* THC Write PRD Base Address Register Low */
+#define THC_M_PRT_WPRD_BA_LOW_OFFSET 0x1090
+/* THC Write PRD Base Address Register High */
+#define THC_M_PRT_WPRD_BA_HI_OFFSET 0x1094
+/* THC Write DMA Control */
+#define THC_M_PRT_WRITE_DMA_CNTRL_OFFSET 0x1098
+/* THC Write Interrupt Status */
+#define THC_M_PRT_WRITE_INT_STS_OFFSET 0x109C
+/* THC Write DMA Error Register */
+#define THC_M_PRT_WRITE_DMA_ERR_OFFSET 0x10A0
+/* THC device address for the bulk write */
+#define THC_M_PRT_WR_BULK_ADDR_OFFSET 0x10B4
+/* THC Device Interrupt Cause Register Address */
+#define THC_M_PRT_DEV_INT_CAUSE_ADDR_OFFSET 0x10B8
+/* THC Device Interrupt Cause Register Value */
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_OFFSET 0x10BC
+/* THC TXDMA Frame Count */
+#define THC_M_PRT_TX_FRM_CNT_OFFSET 0x10E0
+/* THC TXDMA Packet Count */
+#define THC_M_PRT_TXDMA_PKT_CNT_OFFSET 0x10E4
+/* THC Device Interrupt Count on this port */
+#define THC_M_PRT_DEVINT_CNT_OFFSET 0x10E8
+/* Touch Device Interrupt Cause register Format Configuration Register 1 */
+#define THC_M_PRT_DEVINT_CFG_1_OFFSET 0x10EC
+/* Touch Device Interrupt Cause register Format Configuration Register 2 */
+#define THC_M_PRT_DEVINT_CFG_2_OFFSET 0x10F0
+/* THC Read PRD Base Address Low for the 1st RXDMA */
+#define THC_M_PRT_RPRD_BA_LOW_1_OFFSET 0x1100
+/* THC Read PRD Base Address High for the 1st RXDMA */
+#define THC_M_PRT_RPRD_BA_HI_1_OFFSET 0x1104
+/* THC Read PRD Control for the 1st RXDMA */
+#define THC_M_PRT_RPRD_CNTRL_1_OFFSET 0x1108
+/* THC Read DMA Control for the 1st RXDMA */
+#define THC_M_PRT_READ_DMA_CNTRL_1_OFFSET 0x110C
+/* THC Read Interrupt Status for the 1st RXDMA */
+#define THC_M_PRT_READ_DMA_INT_STS_1_OFFSET 0x1110
+/* THC Read DMA Error Register for the 1st RXDMA */
+#define THC_M_PRT_READ_DMA_ERR_1_OFFSET 0x1114
+/* Touch Sequencer GuC Tail Offset Address Low for the 1st RXDMA */
+#define THC_M_PRT_GUC_OFFSET_LOW_1_OFFSET 0x1118
+/* Touch Sequencer GuC Tail Offset Address High for the 1st RXDMA */
+#define THC_M_PRT_GUC_OFFSET_HI_1_OFFSET 0x111C
+/* Touch Host Controller GuC Work Queue Item Size for the 1st RXDMA */
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_1_OFFSET 0x1120
+/* Touch Host Controller GuC Control register for the 1st RXDMA */
+#define THC_M_PRT_GUC_WORKQ_SZ_1_OFFSET 0x1124
+/* Touch Sequencer Control for the 1st DMA */
+#define THC_M_PRT_TSEQ_CNTRL_1_OFFSET 0x1128
+/* Touch Sequencer GuC Doorbell Address Low for the 1st RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_LOW_1_OFFSET 0x1130
+/* Touch Sequencer GuC Doorbell Address High for the 1st RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_HI_1_OFFSET 0x1134
+/* Touch Sequencer GuC Doorbell Data */
+#define THC_M_PRT_GUC_DB_DATA_1_OFFSET 0x1138
+/* Touch Sequencer GuC Tail Offset Initial Value for the 1st RXDMA */
+#define THC_M_PRT_GUC_OFFSET_INITVAL_1_OFFSET 0x1140
+/* THC Device Address for the bulk/touch data read for the 1st RXDMA */
+#define THC_M_PRT_RD_BULK_ADDR_1_OFFSET 0x1170
+/* THC Gfx/SW Doorbell Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_DB_CNT_1_OFFSET 0x11A0
+/* THC Frame Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_FRM_CNT_1_OFFSET 0x11A4
+/* THC Micro Frame Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_UFRM_CNT_1_OFFSET 0x11A8
+/* THC Packet Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_RXDMA_PKT_CNT_1_OFFSET 0x11AC
+/*
+ * THC Software Interrupt Count from the 1st Stream RXDMA
+ * on this port
+ */
+#define THC_M_PRT_SWINT_CNT_1_OFFSET 0x11B0
+/* Touch Sequencer Frame Drop Counter for the 1st RXDMA */
+#define THC_M_PRT_FRAME_DROP_CNT_1_OFFSET 0x11B4
+/* THC Coaescing 1 */
+#define THC_M_PRT_COALESCE_1_OFFSET 0x11B8
+/* THC Read PRD Base Address Low for the 2nd RXDMA */
+#define THC_M_PRT_RPRD_BA_LOW_2_OFFSET 0x1200
+/* THC Read PRD Base Address High for the 2nd RXDMA */
+#define THC_M_PRT_RPRD_BA_HI_2_OFFSET 0x1204
+/* THC Read PRD Control for the 2nd RXDMA */
+#define THC_M_PRT_RPRD_CNTRL_2_OFFSET 0x1208
+/* THC Read DMA Control for the 2nd RXDMA */
+#define THC_M_PRT_READ_DMA_CNTRL_2_OFFSET 0x120C
+/* THC Read Interrupt Status for the 2nd RXDMA */
+#define THC_M_PRT_READ_DMA_INT_STS_2_OFFSET 0x1210
+/* THC Read DMA Error Register for the 2nd RXDMA */
+#define THC_M_PRT_READ_DMA_ERR_2_OFFSET 0x1214
+/* Touch Sequencer GuC Tail Offset Address Low for the 2nd RXDMA */
+#define THC_M_PRT_GUC_OFFSET_LOW_2_OFFSET 0x1218
+/* Touch Sequencer GuC Tail Offset Address High for the 2nd RXDMA */
+#define THC_M_PRT_GUC_OFFSET_HI_2_OFFSET 0x121C
+/* Touch Host Controller GuC Work Queue Item Size for the 2nd RXDMA */
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_2_OFFSET 0x1220
+/* Touch Host Controller GuC Control register for the 2nd RXDMA */
+#define THC_M_PRT_GUC_WORKQ_SZ_2_OFFSET 0x1224
+/* Touch Sequencer Control for the 2nd DMA */
+#define THC_M_PRT_TSEQ_CNTRL_2_OFFSET 0x1228
+/* Touch Sequencer GuC Doorbell Address Low for the 2nd RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_LOW_2_OFFSET 0x1230
+/* Touch Sequencer GuC Doorbell Address High for the 2nd RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_HI_2_OFFSET 0x1234
+/* Touch Sequencer GuC Doorbell Data for PRD2 */
+#define THC_M_PRT_GUC_DB_DATA_2_OFFSET 0x1238
+/* Touch Sequencer GuC Tail Offset Initial Value for the 2nd RXDMA */
+#define THC_M_PRT_GUC_OFFSET_INITVAL_2_OFFSET 0x1240
+/* THC Device Address for the bulk/touch data read for the 2nd RXDMA */
+#define THC_M_PRT_RD_BULK_ADDR_2_OFFSET 0x1270
+/* THC Gfx/SW Doorbell Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_DB_CNT_2_OFFSET 0x12A0
+/* THC Frame Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_FRM_CNT_2_OFFSET 0x12A4
+/* THC Micro Frame Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_UFRM_CNT_2_OFFSET 0x12A8
+/* THC Packet Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_RXDMA_PKT_CNT_2_OFFSET 0x12AC
+/*
+ * THC Software Interrupt Count from the 2nd Stream RXDMA
+ * on this port
+ */
+#define THC_M_PRT_SWINT_CNT_2_OFFSET 0x12B0
+/* Touch Sequencer Frame Drop Counter for the 2nd RXDMA */
+#define THC_M_PRT_FRAME_DROP_CNT_2_OFFSET 0x12B4
+/* THC Coaescing 2 */
+#define THC_M_PRT_COALESCE_2_OFFSET 0x12B8
+/* THC SPARE REGISTER */
+#define THC_M_PRT_SPARE_REG_OFFSET 0x12BC
+/* THC Read PRD Base Address Low for the SW RXDMA */
+#define THC_M_PRT_RPRD_BA_LOW_SW_OFFSET 0x12C0
+/* THC Read PRD Base Address High for the SW RXDMA */
+#define THC_M_PRT_RPRD_BA_HI_SW_OFFSET 0x12C4
+/* THC Read PRD Control for the SW RXDMA */
+#define THC_M_PRT_RPRD_CNTRL_SW_OFFSET 0x12C8
+/* THC Read DMA Control for the SW RXDMA */
+#define THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET 0x12CC
+/* THC Read Interrupt Status for the SW RXDMA */
+#define THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET 0x12D0
+/* Touch Sequencer Control for the SW DMA */
+#define THC_M_PRT_TSEQ_CNTRL_SW_OFFSET 0x12D4
+/* Address for the bulk read for SW DMA engine */
+#define THC_M_PRT_RD_BULK_ADDR_SW_OFFSET 0x12D8
+/* THC Frame Count from the SW RXDMA on this port */
+#define THC_M_PRT_FRM_CNT_SW_OFFSET 0x12DC
+/* THC Packet Count from the SW RXDMA on this port */
+#define THC_M_PRT_RXDMA_PKT_CNT_SW_OFFSET 0x12E0
+/* SW DMA PRD Table Length */
+#define THC_M_PRT_SW_DMA_PRD_TABLE_LEN_OFFSET 0x12E4
+/* THC timing based Frame/Interrupt caolescing control register for 1st RXDMA */
+#define THC_M_PRT_COALESCE_CNTRL_1_OFFSET 0x12E8
+/* THC timing based Frame/Interrupt caolescing control register for 2nd RXDMA */
+#define THC_M_PRT_COALESCE_CNTRL_2_OFFSET 0x12EC
+/* Touch Sequencer PRD Table Empty Counter for the 1st RXDMA */
+#define THC_M_PRT_PRD_EMPTY_CNT_1_OFFSET 0x12F0
+/* Touch Sequencer PRD Table Empty Counter for the 2nd RXDM */
+#define THC_M_PRT_PRD_EMPTY_CNT_2_OFFSET 0x12F4
+/* THC coalescing status to reflect the current coalescing FSM state for 1st RXDMA */
+#define THC_M_PRT_COALESCE_STS_1_OFFSET 0x12F8
+/* THC coalescing status to reflect the current coalescing FSM state for 2nd RXDMA */
+#define THC_M_PRT_COALESCE_STS_2_OFFSET 0x12FC
+/* THC Register for the SPI Port Duty Cycle Configuration */
+#define THC_M_PRT_SPI_DUTYC_CFG_OFFSET 0x1300
+/* THC Register for SW I2C Wtite Sequecning control */
+#define THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_OFFSET 0x1304
+/* THC current Timestamp Register for RXDMA1 */
+#define THC_M_PRT_TIMESTAMP_1_OFFSET 0x1308
+/* THC current Timestamp Register for RXDMA2 */
+#define THC_M_PRT_TIMESTAMP_2_OFFSET 0x130C
+/* Current SYNC Event Timestamp Register */
+#define THC_M_PRT_SYNC_TIMESTAMP_OFFSET 0x1310
+/* THC Display Sync Register */
+#define THC_M_PRT_DISP_SYNC_OFFSET 0x1314
+/* THC Display Sync Register */
+#define THC_M_PRT_DISP_SYNC_2_OFFSET 0x1318
+/* THC Register for SW I2C Wtite Sequecning control */
+#define THC_M_PRT_I2C_CFG_OFFSET 0x131C
+
+/* THC register bits definition */
+#define TXN_ERR_INT_STS_BIT BIT(28)
+#define TXN_FATAL_INT_STS_BIT BIT(30)
+
+#define NONDMA_INT_STS_BIT BIT(4)
+#define EOF_INT_STS_BIT BIT(5)
+
+#define THC_CFG_DID_VID_VID GENMASK(15, 0)
+#define THC_CFG_DID_VID_DID GENMASK(31, 16)
+
+#define THC_CFG_STS_CMD_IOSE BIT(0)
+#define THC_CFG_STS_CMD_MSE BIT(1)
+#define THC_CFG_STS_CMD_BME BIT(2)
+#define THC_CFG_STS_CMD_SPCYC BIT(3)
+#define THC_CFG_STS_CMD_MWRIEN BIT(4)
+#define THC_CFG_STS_CMD_VGAPS BIT(5)
+#define THC_CFG_STS_CMD_PERRR BIT(6)
+#define THC_CFG_STS_CMD_SERREN BIT(8)
+#define THC_CFG_STS_CMD_FBTBEN BIT(9)
+#define THC_CFG_STS_CMD_INTD BIT(10)
+#define THC_CFG_STS_CMD_INTS BIT(19)
+#define THC_CFG_STS_CMD_CAPL BIT(20)
+#define THC_CFG_STS_CMD_MCAP BIT(21)
+#define THC_CFG_STS_CMD_FBTBC BIT(23)
+#define THC_CFG_STS_CMD_MDPE BIT(24)
+#define THC_CFG_STS_CMD_DEVT GENMASK(26, 25)
+#define THC_CFG_STS_CMD_STA BIT(27)
+#define THC_CFG_STS_CMD_RTA BIT(28)
+#define THC_CFG_STS_CMD_RMA BIT(29)
+#define THC_CFG_STS_CMD_SSE BIT(30)
+#define THC_CFG_STS_CMD_DPE BIT(31)
+
+#define THC_CFG_CC_RID_RID GENMASK(7, 0)
+#define THC_CFG_CC_RID_PI GENMASK(15, 8)
+#define THC_CFG_CC_RID_SCC GENMASK(23, 16)
+#define THC_CFG_CC_RID_BCC GENMASK(31, 24)
+
+#define THC_CFG_BIST_HTYPE_LT_CLS_CLSZ GENMASK(7, 0)
+#define THC_CFG_BIST_HTYPE_LT_CLS_LT GENMASK(15, 8)
+#define THC_CFG_BIST_HTYPE_LT_CLS_HTYPE GENMASK(22, 16)
+#define THC_CFG_BIST_HTYPE_LT_CLS_MFD BIT(23)
+
+#define THC_CFG_BAR0_LOW_MEMSPACE BIT(0)
+#define THC_CFG_BAR0_LOW_TYP GENMASK(2, 1)
+#define THC_CFG_BAR0_LOW_PREFETCH BIT(3)
+#define THC_CFG_BAR0_LOW_MEMSIZE GENMASK(14, 4)
+#define THC_CFG_BAR0_LOW_MEMBAR GENMASK(31, 15)
+#define THC_CFG_BAR0_HI_MEMBAR GENMASK(31, 0)
+
+#define THC_CFG_SID_SVID_SSVID GENMASK(15, 0)
+#define THC_CFG_SID_SVID_SSID GENMASK(31, 16)
+
+#define THC_CFG_CAPP_CP GENMASK(7, 0)
+
+#define THC_CFG_INT_ILINE GENMASK(7, 0)
+#define THC_CFG_INT_IPIN GENMASK(15, 8)
+
+#define THC_CFG_UR_STS_CTL_URRE BIT(0)
+#define THC_CFG_UR_STS_CTL_URD BIT(1)
+#define THC_CFG_UR_STS_CTL_FD BIT(2)
+
+#define THC_CFG_MSIMC_MSINP_MSICID_CAPID GENMASK(7, 0)
+#define THC_CFG_MSIMC_MSINP_MSICID_NXTP GENMASK(15, 8)
+#define THC_CFG_MSIMC_MSINP_MSICID_MSIE BIT(16)
+#define THC_CFG_MSIMC_MSINP_MSICID_MMC GENMASK(19, 17)
+#define THC_CFG_MSIMC_MSINP_MSICID_MMEN GENMASK(22, 20)
+#define THC_CFG_MSIMC_MSINP_MSICID_XAC BIT(23)
+#define THC_CFG_MSIMC_MSINP_MSICID_PVMC BIT(24)
+#define THC_CFG_MSIMA_MADDR GENMASK(31, 2)
+#define THC_CFG_MSIMUA_MAUDDR GENMASK(31, 0)
+#define THC_CFG_MSIMD_MDAT GENMASK(15, 0)
+
+#define THC_CFG_PMCAP_PMNP_PMCID_CAPP GENMASK(7, 0)
+#define THC_CFG_PMCAP_PMNP_PMCID_NXTP GENMASK(15, 8)
+#define THC_CFG_PMCAP_PMNP_PMCID_VER GENMASK(18, 16)
+#define THC_CFG_PMCAP_PMNP_PMCID_PMECLK BIT(19)
+#define THC_CFG_PMCAP_PMNP_PMCID_DSI BIT(21)
+#define THC_CFG_PMCAP_PMNP_PMCID_AUXC GENMASK(24, 22)
+#define THC_CFG_PMCAP_PMNP_PMCID_D1S BIT(25)
+#define THC_CFG_PMCAP_PMNP_PMCID_D2S BIT(26)
+#define THC_CFG_PMCAP_PMNP_PMCID_PMES GENMASK(31, 27)
+
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_PWRST GENMASK(1, 0)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_NSR BIT(3)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_PMEEN BIT(8)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_DSEL GENMASK(12, 9)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_DS GENMASK(14, 13)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_PMESTS BIT(15)
+
+#define THC_CFG_DEVIDLE_CAPPID GENMASK(7, 0)
+#define THC_CFG_DEVIDLE_NCAPPP GENMASK(15, 8)
+#define THC_CFG_DEVIDLE_LENGTH GENMASK(23, 16)
+#define THC_CFG_DEVIDLE_REV GENMASK(27, 24)
+#define THC_CFG_DEVIDLE_VID GENMASK(31, 28)
+
+#define THC_CFG_VSHDR_VSECID GENMASK(15, 0)
+#define THC_CFG_VSHDR_VSECR GENMASK(19, 16)
+#define THC_CFG_VSHDR_VSECL GENMASK(31, 20)
+
+#define THC_CFG_SWLTRPTR_VALID BIT(0)
+#define THC_CFG_SWLTRPTR_BARNUM GENMASK(3, 1)
+#define THC_CFG_SWLTRPTR_SWLTRLOC GENMASK(31, 4)
+
+#define THC_CFG_DEVIDLEPTR_VALID BIT(0)
+#define THC_CFG_DEVIDLEPTR_BARNUM GENMASK(3, 1)
+#define THC_CFG_DEVIDLEPTR_DEVIDLELOC GENMASK(31, 4)
+#define THC_CFG_DEVIDLEPOL_POLV GENMASK(9, 0)
+#define THC_CFG_DEVIDLEPOL_POLS GENMASK(12, 10)
+
+#define THC_CFG_PCE_SPE BIT(0)
+#define THC_CFG_PCE_I3E BIT(1)
+#define THC_CFG_PCE_D3HE BIT(2)
+#define THC_CFG_PCE_SE BIT(3)
+#define THC_CFG_PCE_HAE BIT(5)
+
+#define THC_CFG_MANID_PROC GENMASK(7, 0)
+#define THC_CFG_MANID_MID GENMASK(15, 8)
+#define THC_CFG_MANID_MSID GENMASK(23, 16)
+#define THC_CFG_MANID_DOT GENMASK(27, 24)
+
+#define THC_M_CMN_DEVIDLECTRL_CIP BIT(0)
+#define THC_M_CMN_DEVIDLECTRL_IR BIT(1)
+#define THC_M_CMN_DEVIDLECTRL_DEVIDLE BIT(2)
+#define THC_M_CMN_DEVIDLECTRL_RR BIT(3)
+#define THC_M_CMN_DEVIDLECTRL_IRC BIT(4)
+
+#define THC_M_CMN_LTR_CTRL_OFFSET 0x14
+#define THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ BIT(0)
+#define THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN BIT(1)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_REQ BIT(2)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_EN BIT(3)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_SCALE GENMASK(6, 4)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_VAL GENMASK(16, 7)
+#define THC_M_CMN_LTR_CTRL_ACT_LTR_SCALE GENMASK(19, 17)
+#define THC_M_CMN_LTR_CTRL_ACT_LTR_VAL GENMASK(29, 20)
+#define THC_M_CMN_LTR_CTRL_LAST_LTR_SENT GENMASK(31, 30)
+
+#define THC_M_PRT_CONTROL_TSFTRST BIT(0)
+#define THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN BIT(1)
+#define THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS BIT(2)
+#define THC_M_PRT_CONTROL_DEVRST BIT(3)
+#define THC_M_PRT_CONTROL_THC_DRV_LOCK_EN BIT(13)
+#define THC_M_PRT_CONTROL_THC_INSTANCE_INDEX GENMASK(18, 16)
+#define THC_M_PRT_CONTROL_PORT_INDEX GENMASK(22, 20)
+#define THC_M_PRT_CONTROL_THC_ARB_POLICY GENMASK(25, 24)
+#define THC_M_PRT_CONTROL_THC_BIOS_LOCK_EN BIT(27)
+#define THC_M_PRT_CONTROL_PORT_SUPPORTED BIT(28)
+#define THC_M_PRT_CONTROL_SPI_IO_RDY BIT(29)
+#define THC_M_PRT_CONTROL_PORT_TYPE GENMASK(31, 30)
+
+#define THC_M_PRT_SPI_CFG_SPI_TRDC GENMASK(1, 0)
+#define THC_M_PRT_SPI_CFG_SPI_TRMODE GENMASK(3, 2)
+#define THC_M_PRT_SPI_CFG_SPI_TCRF GENMASK(6, 4)
+#define THC_M_PRT_SPI_CFG_SPI_RD_MPS GENMASK(15, 7)
+#define THC_M_PRT_SPI_CFG_SPI_TWMODE GENMASK(19, 18)
+#define THC_M_PRT_SPI_CFG_SPI_TCWF GENMASK(22, 20)
+#define THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN BIT(23)
+#define THC_M_PRT_SPI_CFG_SPI_WR_MPS GENMASK(31, 24)
+
+#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_SIO GENMASK(31, 24)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO GENMASK(23, 16)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO GENMASK(15, 8)
+
+#define THC_M_PRT_INT_EN_SIPE BIT(0)
+#define THC_M_PRT_INT_EN_SBO BIT(1)
+#define THC_M_PRT_INT_EN_SIDR BIT(2)
+#define THC_M_PRT_INT_EN_SOFB BIT(3)
+#define THC_M_PRT_INT_EN_INVLD_DEV_ENTRY_INT_EN BIT(9)
+#define THC_M_PRT_INT_EN_FRAME_BABBLE_ERR_INT_EN BIT(10)
+#define THC_M_PRT_INT_EN_BUF_OVRRUN_ERR_INT_EN BIT(12)
+#define THC_M_PRT_INT_EN_PRD_ENTRY_ERR_INT_EN BIT(13)
+#define THC_M_PRT_INT_EN_DISP_SYNC_EVT_INT_EN BIT(14)
+#define THC_M_PRT_INT_EN_DEV_RAW_INT_EN BIT(15)
+#define THC_M_PRT_INT_EN_FATAL_ERR_INT_EN BIT(16)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_RX_UNDER_INT_EN BIT(17)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_RX_OVER_INT_EN BIT(18)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_RX_FULL_INT_EN BIT(19)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_TX_OVER_INT_EN BIT(20)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_TX_EMPTY_INT_EN BIT(21)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_TX_ABRT_INT_EN BIT(22)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_SCL_STUCK_AT_LOW_DET_INT_EN BIT(24)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_STOP_DET_INT_EN BIT(25)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_START_DET_INT_EN BIT(26)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_MST_ON_HOLD_INT_EN BIT(27)
+#define THC_M_PRT_INT_EN_TXN_ERR_INT_EN BIT(29)
+#define THC_M_PRT_INT_EN_GBL_INT_EN BIT(31)
+
+#define THC_M_PRT_INT_STATUS_DISP_SYNC_EVT_INT_STS BIT(14)
+#define THC_M_PRT_INT_STATUS_DEV_RAW_INT_STS BIT(15)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_UNDER_INT_STS BIT(17)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_OVER_INT_STS BIT(18)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_FULL_INT_STS BIT(19)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_OVER_INT_STS BIT(20)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_EMPTY_INT_STS BIT(21)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_ABRT_INT_STS BIT(22)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_ACTIVITY_INT_STS BIT(23)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_SCL_STUCK_AT_LOW_INT_STS BIT(24)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_STOP_DET_INT_STS BIT(25)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_START_DET_INT_STS BIT(26)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_MST_ON_HOLD_INT_STS BIT(27)
+#define THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS BIT(28)
+#define THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS BIT(30)
+
+#define THC_M_PRT_ERR_CAUSE_INVLD_DEV_ENTRY BIT(9)
+#define THC_M_PRT_ERR_CAUSE_FRAME_BABBLE_ERR BIT(10)
+#define THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR BIT(12)
+#define THC_M_PRT_ERR_CAUSE_PRD_ENTRY_ERR BIT(13)
+#define THC_M_PRT_ERR_CAUSE_FATAL_ERR_CAUSE GENMASK(23, 16)
+
+#define THC_M_PRT_SW_SEQ_CNTRL_TSSGO BIT(0)
+#define THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE BIT(1)
+#define THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CMD GENMASK(15, 8)
+#define THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC GENMASK(31, 16)
+#define THC_M_PRT_SW_SEQ_STS_TSSDONE BIT(0)
+#define THC_M_PRT_SW_SEQ_STS_THC_SS_ERR BIT(1)
+#define THC_M_PRT_SW_SEQ_STS_THC_SS_CIP BIT(3)
+#define THC_M_PRT_SW_SEQ_DATA0_ADDR_THC_SW_SEQ_DATA0_ADDR GENMASK(31, 0)
+#define THC_M_PRT_SW_SEQ_DATA1_THC_SW_SEQ_DATA1 GENMASK(31, 0)
+
+#define THC_M_PRT_WPRD_BA_LOW_THC_M_PRT_WPRD_BA_LOW GENMASK(31, 12)
+#define THC_M_PRT_WPRD_BA_HI_THC_M_PRT_WPRD_BA_HI GENMASK(31, 0)
+
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START BIT(0)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_ERROR BIT(1)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC BIT(2)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL BIT(3)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_UHS BIT(23)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC GENMASK(31, 24)
+
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS BIT(0)
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ERROR_STS BIT(1)
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_IOC_STS BIT(2)
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ACTIVE BIT(3)
+
+#define THC_M_PRT_WR_BULK_ADDR_THC_M_PRT_WR_BULK_ADDR GENMASK(31, 0)
+
+#define THC_M_PRT_DEV_INT_CAUSE_ADDR_THC_M_PRT_DEV_INT_CAUSE_ADDR GENMASK(31, 0)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_INTERRUPT_TYPE GENMASK(3, 0)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_MICRO_FRAME_SIZE GENMASK(23, 4)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_BEGINNING_OF_FRAME BIT(29)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_END_OF_FRAME BIT(30)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_FRAME_TYPE BIT(31)
+
+#define THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT GENMASK(30, 0)
+#define THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT_RST BIT(31)
+
+#define THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT GENMASK(30, 0)
+#define THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT_RST BIT(31)
+
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_OFFSET GENMASK(4, 0)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_LEN GENMASK(9, 5)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_EOF_OFFSET GENMASK(14, 10)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_SEND_ICR_US_EN BIT(15)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL GENMASK(31, 16)
+
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_OFFSET GENMASK(4, 0)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_LEN GENMASK(9, 5)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_UNIT GENMASK(15, 12)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_IGNORE BIT(16)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_VAL BIT(17)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_RXDMA_ADDRINC_DIS BIT(24)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_TXDMA_ADDRINC_DIS BIT(25)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_RXDMA_PKT_STRM_EN BIT(26)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_TXDMA_PKT_STRM_EN BIT(27)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_DEVINT_POL BIT(28)
+
+#define THC_M_PRT_RPRD_BA_LOW_1_THC_M_PRT_RPRD_BA_LOW GENMASK(31, 12)
+#define THC_M_PRT_RPRD_BA_HI_1_THC_M_PRT_RPRD_BA_HI GENMASK(31, 0)
+
+#define THC_M_PRT_RPRD_CNTRL_PCD GENMASK(6, 0)
+#define THC_M_PRT_RPRD_CNTRL_PTEC GENMASK(15, 8)
+#define THC_M_PRT_RPRD_CNTRL_PREFETCH_WM GENMASK(19, 16)
+
+#define THC_M_PRT_READ_DMA_CNTRL_START BIT(0)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_ERROR BIT(1)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_IOC BIT(2)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_STALL BIT(3)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_NDDI BIT(4)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_EOF BIT(5)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_DMACPL BIT(7)
+#define THC_M_PRT_READ_DMA_CNTRL_TPCRP GENMASK(15, 8)
+#define THC_M_PRT_READ_DMA_CNTRL_TPCWP GENMASK(23, 16)
+#define THC_M_PRT_READ_DMA_CNTRL_INT_SW_DMA_EN BIT(28)
+#define THC_M_PRT_READ_DMA_CNTRL_SOO BIT(29)
+#define THC_M_PRT_READ_DMA_CNTRL_UHS BIT(30)
+#define THC_M_PRT_READ_DMA_CNTRL_TPCPR BIT(31)
+
+#define THC_M_PRT_READ_DMA_INT_STS_DMACPL_STS BIT(0)
+#define THC_M_PRT_READ_DMA_INT_STS_ERROR_STS BIT(1)
+#define THC_M_PRT_READ_DMA_INT_STS_IOC_STS BIT(2)
+#define THC_M_PRT_READ_DMA_INT_STS_STALL_STS BIT(3)
+#define THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS BIT(4)
+#define THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS BIT(5)
+#define THC_M_PRT_READ_DMA_INT_STS_ACTIVE BIT(8)
+
+#define THC_M_PRT_READ_DMA_ERR_1_DLERR BIT(0)
+
+#define THC_M_PRT_GUC_OFFSET_LOW_1_THC_M_PRT_GUC_OFFSET_LOW GENMASK(31, 3)
+#define THC_M_PRT_GUC_OFFSET_HI_1_THC_M_PRT_GUC_OFFSET_HI GENMASK(31, 0)
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_1_WORKQ_ITEM_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_1_WORKQ_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_1_FCD GENMASK(27, 24)
+#define THC_M_PRT_GUC_WORKQ_SZ_1_GIC GENMASK(31, 28)
+
+#define THC_M_PRT_TSEQ_CNTRL_1_RGD BIT(2)
+#define THC_M_PRT_TSEQ_CNTRL_1_EGP BIT(3)
+#define THC_M_PRT_TSEQ_CNTRL_1_RTO BIT(4)
+#define THC_M_PRT_TSEQ_CNTRL_1_EWOG BIT(5)
+#define THC_M_PRT_TSEQ_CNTRL_1_RWOGC BIT(6)
+#define THC_M_PRT_TSEQ_CNTRL_1_RX_DATA_FIFO_WR_WM GENMASK(25, 16)
+#define THC_M_PRT_TSEQ_CNTRL_1_RESET_PREP_CHICKEN BIT(30)
+#define THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN BIT(31)
+
+#define THC_M_PRT_GUC_DB_ADDR_LOW_1_GUC_DB_ADDR_LOW GENMASK(31, 2)
+#define THC_M_PRT_GUC_DB_ADDR_HI_1_GUC_DB_ADDR_HI GENMASK(31, 0)
+#define THC_M_PRT_GUC_DB_DATA_1_GUC_DB_DATA GENMASK(31, 0)
+#define THC_M_PRT_GUC_OFFSET_INITVAL_1_THC_M_PRT_GUC_OFFSET_INITVAL GENMASK(31, 0)
+
+#define THC_M_PRT_RD_BULK_ADDR_1_THC_M_PRT_RD_BULK_ADDR GENMASK(31, 0)
+
+#define THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT GENMASK(30, 0)
+#define THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT GENMASK(30, 0)
+#define THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT_RST BIT(31)
+
+#define THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT GENMASK(30, 0)
+#define THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRAME_DROP_CNT_1_NOFD GENMASK(30, 0)
+#define THC_M_PRT_FRAME_DROP_CNT_1_RFDC BIT(31)
+
+#define THC_M_PRT_COALESCE_1_COALESCE_TIMEOUT GENMASK(6, 0)
+
+#define THC_M_PRT_RPRD_BA_LOW_2_THC_M_PRT_RPRD_BA_LOW GENMASK(31, 12)
+#define THC_M_PRT_RPRD_BA_HI_2_THC_M_PRT_RPRD_BA_HI GENMASK(31, 0)
+
+#define THC_M_PRT_READ_DMA_ERR_2_DLERR BIT(0)
+
+#define THC_M_PRT_GUC_OFFSET_LOW_2_THC_M_PRT_GUC_OFFSET_LOW GENMASK(31, 3)
+#define THC_M_PRT_GUC_OFFSET_HI_2_THC_M_PRT_GUC_OFFSET_HI GENMASK(31, 0)
+
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_2_WORKQ_ITEM_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_2_WORKQ_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_2_FCD GENMASK(27, 24)
+#define THC_M_PRT_GUC_WORKQ_SZ_2_GIC GENMASK(31, 28)
+
+#define THC_M_PRT_TSEQ_CNTRL_2_RGD BIT(2)
+#define THC_M_PRT_TSEQ_CNTRL_2_EGP BIT(3)
+#define THC_M_PRT_TSEQ_CNTRL_2_RTO BIT(4)
+
+#define THC_M_PRT_GUC_DB_ADDR_LOW_2_GUC_DB_ADDR_LOW GENMASK(31, 2)
+#define THC_M_PRT_GUC_DB_ADDR_HI_2_GUC_DB_ADDR_HI GENMASK(31, 0)
+
+#define THC_M_PRT_GUC_DB_DATA_2_GUC_DB_DATA GENMASK(31, 0)
+
+#define THC_M_PRT_GUC_OFFSET_INITVAL_2_THC_M_PRT_GUC_OFFSET_INITVAL GENMASK(31, 0)
+
+#define THC_M_PRT_RD_BULK_ADDR_2_THC_M_PRT_RD_BULK_ADDR GENMASK(31, 0)
+
+#define THC_M_PRT_DB_CNT_2_THC_M_PRT_DB_CNT GENMASK(30, 0)
+#define THC_M_PRT_DB_CNT_2_THC_M_PRT_DB_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT GENMASK(30, 0)
+#define THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT_RST BIT(31)
+
+#define THC_M_PRT_SWINT_CNT_2_THC_M_PRT_SWINT_CNT GENMASK(30, 0)
+#define THC_M_PRT_SWINT_CNT_2_THC_M_PRT_SWINT_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRAME_DROP_CNT_2_NOFD GENMASK(30, 0)
+#define THC_M_PRT_FRAME_DROP_CNT_2_RFDC BIT(31)
+
+#define THC_M_PRT_COALESCE_2_COALESCE_TIMEOUT GENMASK(6, 0)
+
+#define THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_I2C_RW_PIO_EN BIT(23)
+#define THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_PIO_I2C_WBC GENMASK(31, 26)
+
+#define THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN BIT(23)
+#define THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC GENMASK(31, 26)
+
+#define THC_M_PRT_PRD_EMPTY_CNT_1_RPTEC BIT(31)
+#define THC_M_PRT_PRD_EMPTY_CNT_2_RPTEC BIT(31)
+
+#define THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN GENMASK(23, 0)
+
+#define THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_VAL GENMASK(3, 0)
+#define THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_EN BIT(25)
+
+/* CS Assertion delay default value */
+#define THC_CSA_CK_DELAY_VAL_DEFAULT 4
+
+/* ARB policy definition */
+/* Arbiter switches on packet boundary */
+#define THC_ARB_POLICY_PACKET_BOUNDARY 0
+/* Arbiter switches on Micro Frame boundary */
+#define THC_ARB_POLICY_UFRAME_BOUNDARY 1
+/* Arbiter switches on Frame boundary */
+#define THC_ARB_POLICY_FRAME_BOUNDARY 2
+
+#define THC_REGMAP_POLLING_INTERVAL_US 10 /* 10us */
+#define THC_PIO_DONE_TIMEOUT_US USEC_PER_SEC /* 1s */
+
+/* Default configures for HIDSPI */
+#define THC_BIT_OFFSET_INTERRUPT_TYPE 4
+/* input_report_type is 4 bits for HIDSPI */
+#define THC_BIT_LENGTH_INTERRUPT_TYPE 4
+/* Last fragment indicator is bit 15 for HIDSPI */
+#define THC_BIT_OFFSET_LAST_FRAGMENT_FLAG 22
+#define THC_BIT_OFFSET_MICROFRAME_SIZE 8
+/* input_report_length is 14 bits for HIDSPI */
+#define THC_BIT_LENGTH_MICROFRAME_SIZE 14
+/* MFS unit in power of 2 */
+#define THC_UNIT_MICROFRAME_SIZE 2
+#define THC_BITMASK_INTERRUPT_TYPE_DATA 1
+#define THC_BITMASK_INVALID_TYPE_DATA 2
+
+/* Interrupt Quiesce default timeout value */
+#define THC_QUIESCE_EN_TIMEOUT_US USEC_PER_SEC /* 1s */
+
+/* LTR definition */
+/*
+ * THC uses scale to calculate final LTR value.
+ * Scale is geometric progression of 2^5 step, starting from 2^0.
+ * For example, THC_LTR_SCALE_2(2) means 2^(5 * 2) = 1024, unit is ns.
+ */
+#define THC_LTR_SCALE_0 0
+#define THC_LTR_SCALE_1 1
+#define THC_LTR_SCALE_2 2
+#define THC_LTR_SCALE_3 3
+#define THC_LTR_SCALE_4 4
+#define THC_LTR_SCALE_5 5
+#define THC_LTR_MODE_ACTIVE 0
+#define THC_LTR_MODE_LP 1
+#define THC_LTR_MIN_VAL_SCALE_3 BIT(10)
+#define THC_LTR_MAX_VAL_SCALE_3 BIT(15)
+#define THC_LTR_MIN_VAL_SCALE_4 BIT(15)
+#define THC_LTR_MAX_VAL_SCALE_4 BIT(20)
+#define THC_LTR_MIN_VAL_SCALE_5 BIT(20)
+#define THC_LTR_MAX_VAL_SCALE_5 BIT(25)
+
+/*
+ * THC PIO opcode default value
+ * @THC_PIO_OP_SPI_TIC_READ: THC opcode for SPI PIO read
+ * @THC_PIO_OP_SPI_TIC_WRITE: THC opcode for SPI PIO write
+ * @THC_PIO_OP_I2C_SUBSYSTEM_READ: THC opcode for read I2C subsystem registers
+ * @THC_PIO_OP_I2C_SUBSYSTEM_WRITE: THC opcode for write I2C subsystem registers
+ * @THC_PIO_OP_I2C_TIC_READ: THC opcode for read I2C device
+ * @THC_PIO_OP_I2C_TIC_WRITE: THC opcode for write I2C device
+ * @THC_PIO_OP_I2C_TIC_WRITE_AND_READ: THC opcode for write followed by read I2C device
+ */
+enum thc_pio_opcode {
+ THC_PIO_OP_SPI_TIC_READ = 0x4,
+ THC_PIO_OP_SPI_TIC_WRITE = 0x6,
+ THC_PIO_OP_I2C_SUBSYSTEM_READ = 0x12,
+ THC_PIO_OP_I2C_SUBSYSTEM_WRITE = 0x13,
+ THC_PIO_OP_I2C_TIC_READ = 0x14,
+ THC_PIO_OP_I2C_TIC_WRITE = 0x18,
+ THC_PIO_OP_I2C_TIC_WRITE_AND_READ = 0x1C,
+};
+
+/**
+ * THC SPI IO mode
+ * @THC_SINGLE_IO: single IO mode, 1(opcode) - 1(address) - 1(data)
+ * @THC_DUAL_IO: dual IO mode, 1(opcode) - 2(address) - 2(data)
+ * @THC_QUAD_IO: quad IO mode, 1(opcode) - 4(address) - 4(data)
+ * @THC_QUAD_PARALLEL_IO: parallel quad IO mode, 4(opcode) - 4(address) - 4(data)
+ */
+enum thc_spi_iomode {
+ THC_SINGLE_IO = 0,
+ THC_DUAL_IO = 1,
+ THC_QUAD_IO = 2,
+ THC_QUAD_PARALLEL_IO = 3,
+};
+
+/**
+ * THC SPI frequency divider
+ *
+ * This DIV final value is determined by THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN bit.
+ * If THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN isn't be set, THC takes the DIV value directly;
+ * If THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN is set, THC takes the DIV value multiply by 8.
+ *
+ * For example, if THC input clock is 125MHz:
+ * When THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN isn't set, THC_SPI_FRQ_DIV_3 means DIV is 3,
+ * THC final clock is 125 / 3 = 41.667MHz;
+ * When THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN is set, THC_SPI_FRQ_DIV_3 means DIV is 3 * 8,
+ * THC final clock is 125 / (3 * 8) = 5.208MHz;
+ */
+enum thc_spi_frq_div {
+ THC_SPI_FRQ_RESERVED = 0,
+ THC_SPI_FRQ_DIV_1 = 1,
+ THC_SPI_FRQ_DIV_2 = 2,
+ THC_SPI_FRQ_DIV_3 = 3,
+ THC_SPI_FRQ_DIV_4 = 4,
+ THC_SPI_FRQ_DIV_5 = 5,
+ THC_SPI_FRQ_DIV_6 = 6,
+ THC_SPI_FRQ_DIV_7 = 7,
+};
+
+/* THC I2C sub-system registers */
+#define THC_I2C_IC_CON_OFFSET 0x0
+#define THC_I2C_IC_TAR_OFFSET 0x4
+#define THC_I2C_IC_SAR_OFFSET 0x8
+#define THC_I2C_IC_HS_MADDR_OFFSET 0xC
+#define THC_I2C_IC_DATA_CMD_OFFSET 0x10
+#define THC_I2C_IC_SS_SCL_HCNT_OFFSET 0x14
+#define THC_I2C_IC_UFM_SCL_HCNT_OFFSET 0x14
+#define THC_I2C_IC_SS_SCL_LCNT_OFFSET 0x18
+#define THC_I2C_IC_UFM_SCL_LCNT_OFFSET 0x18
+#define THC_I2C_IC_FS_SCL_HCNT_OFFSET 0x1C
+#define THC_I2C_IC_UFM_TBUF_CNT_OFFSET 0x1C
+#define THC_I2C_IC_FS_SCL_LCNT_OFFSET 0x20
+#define THC_I2C_IC_HS_SCL_HCNT_OFFSET 0x24
+#define THC_I2C_IC_HS_SCL_LCNT_OFFSET 0x28
+#define THC_I2C_IC_INTR_STAT_OFFSET 0x2C
+#define THC_I2C_IC_INTR_MASK_OFFSET 0x30
+#define THC_I2C_IC_RAW_INTR_STAT_OFFSET 0x34
+#define THC_I2C_IC_RX_TL_OFFSET 0x38
+#define THC_I2C_IC_TX_TL_OFFSET 0x3C
+#define THC_I2C_IC_CLR_INTR_OFFSET 0x40
+#define THC_I2C_IC_CLR_RX_UNDER_OFFSET 0x44
+#define THC_I2C_IC_CLR_RX_OVER_OFFSET 0x48
+#define THC_I2C_IC_CLR_TX_OVER_OFFSET 0x4C
+#define THC_I2C_IC_CLR_RD_REQ_OFFSET 0x50
+#define THC_I2C_IC_CLR_TX_ABRT_OFFSET 0x54
+#define THC_I2C_IC_CLR_RX_DONE_OFFSET 0x58
+#define THC_I2C_IC_CLR_ACTIVITY_OFFSET 0x5C
+#define THC_I2C_IC_CLR_STOP_DET_OFFSET 0x60
+#define THC_I2C_IC_CLR_START_DET_OFFSET 0x64
+#define THC_I2C_IC_CLR_GEN_CALL_OFFSET 0x68
+#define THC_I2C_IC_ENABLE_OFFSET 0x6C
+#define THC_I2C_IC_STATUS_OFFSET 0x70
+#define THC_I2C_IC_TXFLR_OFFSET 0x74
+#define THC_I2C_IC_RXFLR_OFFSET 0x78
+#define THC_I2C_IC_SDA_HOLD_OFFSET 0x7C
+#define THC_I2C_IC_TX_ABRT_SOURCE_OFFSET 0x80
+#define THC_I2C_IC_SLV_DATA_NACK_ONLY_OFFSET 0x84
+#define THC_I2C_IC_DMA_CR_OFFSET 0x88
+#define THC_I2C_IC_DMA_TDLR_OFFSET 0x8C
+#define THC_I2C_IC_DMA_RDLR_OFFSET 0x90
+#define THC_I2C_IC_SDA_SETUP_OFFSET 0x94
+#define THC_I2C_IC_ACK_GENERAL_CALL_OFFSET 0x98
+#define THC_I2C_IC_ENABLE_STATUS_OFFSET 0x9C
+#define THC_I2C_IC_FS_SPKLEN_OFFSET 0xA0
+#define THC_I2C_IC_UFM_SPKLEN_OFFSET 0xA0
+#define THC_I2C_IC_HS_SPKLEN_OFFSET 0xA4
+#define THC_I2C_IC_CLR_RESTART_DET_OFFSET 0xA8
+#define THC_I2C_IC_SCL_STUCK_AT_LOW_TIMEOUT_OFFSET 0xAC
+#define THC_I2C_IC_SDA_STUCK_AT_LOW_TIMEOUT_OFFSET 0xB0
+#define THC_I2C_IC_CLR_SCL_STUCK_DET_OFFSET 0xB4
+#define THC_I2C_IC_DEVICE_ID_OFFSET 0xB8
+#define THC_I2C_IC_SMBUS_CLK_LOW_SEXT_OFFSET 0xBC
+#define THC_I2C_IC_SMBUS_CLK_LOW_MEXT_OFFSET 0xC0
+#define THC_I2C_IC_SMBUS_THIGH_MAX_IDLE_COUNT_OFFSET 0xC4
+#define THC_I2C_IC_SMBUS_INTR_STAT_OFFSET 0xC8
+#define THC_I2C_IC_SMBUS_INTR_MASK_OFFSET 0xCC
+#define THC_I2C_IC_SMBUS_RAW_INTR_STAT_OFFSET 0xD0
+#define THC_I2C_IC_CLR_SMBUS_INTR_OFFSET 0xD4
+#define THC_I2C_IC_OPTIONAL_SAR_OFFSET 0xD8
+#define THC_I2C_IC_SMBUS_UDID_LSB_OFFSET 0xDC
+#define THC_I2C_IC_SMBUS_UDID_WORD0_OFFSET 0xDC
+#define THC_I2C_IC_SMBUS_UDID_WORD1_OFFSET 0xE0
+#define THC_I2C_IC_SMBUS_UDID_WORD2_OFFSET 0xE4
+#define THC_I2C_IC_SMBUS_UDID_WORD3_OFFSET 0xE8
+#define THC_I2C_IC_COMP_PARAM_1_OFFSET 0xF4
+#define THC_I2C_IC_COMP_VERSION_OFFSET 0xF8
+#define THC_I2C_IC_COMP_TYPE_OFFSET 0xFC
+
+/**
+ * THC I2C sub-system supported speed mode
+ */
+enum THC_I2C_SPEED_MODE {
+ THC_I2C_STANDARD = 1,
+ THC_I2C_FAST_AND_PLUS = 2,
+ THC_I2C_HIGH_SPEED = 3,
+};
+
+/* THC I2C sub-system register bits definition */
+#define THC_I2C_IC_ENABLE_ENABLE BIT(0)
+#define THC_I2C_IC_ENABLE_ABORT BIT(1)
+#define THC_I2C_IC_ENABLE_TX_CMD_BLOCK BIT(2)
+#define THC_I2C_IC_ENABLE_SDA_STUCK_RECOVERY_ENABLE BIT(3)
+#define THC_I2C_IC_ENABLE_SMBUS_CLK_RESET BIT(16)
+#define THC_I2C_IC_ENABLE_SMBUS_SUSPEND_EN BIT(17)
+#define THC_I2C_IC_ENABLE_SMBUS_ALERT_EN BIT(18)
+
+#define THC_I2C_IC_CON_MASTER_MODE BIT(0)
+#define THC_I2C_IC_CON_SPEED GENMASK(2, 1)
+#define THC_I2C_IC_CON_IC_10BITADDR_SLAVE BIT(3)
+#define THC_I2C_IC_CON_IC_10BITADDR_MASTER BIT(4)
+#define THC_I2C_IC_CON_IC_RESTART_EN BIT(5)
+#define THC_I2C_IC_CON_IC_SLAVE_DISABLE BIT(6)
+#define THC_I2C_IC_CON_STOP_DET_IFADDRESSED BIT(7)
+#define THC_I2C_IC_CON_TX_EMPTY_CTRL BIT(8)
+#define THC_I2C_IC_CON_RX_FIFO_FULL_HLD_CTRL BIT(9)
+#define THC_I2C_IC_CON_STOP_DET_IF_MASTER_ACTIVE BIT(10)
+#define THC_I2C_IC_CON_BUS_CLEAR_FEATURE_CTRL BIT(11)
+#define THC_I2C_IC_CON_OPTIONAL_SAR_CTRL BIT(16)
+#define THC_I2C_IC_CON_SMBUS_SLAVE_QUICK_EN BIT(17)
+#define THC_I2C_IC_CON_SMBUS_ARP_EN BIT(18)
+#define THC_I2C_IC_CON_SMBUS_PERSISTENT_SLV_ADDR_EN BIT(19)
+
+#define THC_I2C_IC_TAR_IC_TAR GENMASK(9, 0)
+#define THC_I2C_IC_TAR_GC_OR_START BIT(10)
+#define THC_I2C_IC_TAR_SPECIAL BIT(11)
+#define THC_I2C_IC_TAR_IC_10BITADDR_MASTER BIT(12)
+#define THC_I2C_IC_TAR_DEVICE_ID BIT(13)
+#define THC_I2C_IC_TAR_SMBUS_QUICK_CMD BIT(16)
+
+#define THC_I2C_IC_INTR_MASK_M_RX_UNDER BIT(0)
+#define THC_I2C_IC_INTR_MASK_M_RX_OVER BIT(1)
+#define THC_I2C_IC_INTR_MASK_M_RX_FULL BIT(2)
+#define THC_I2C_IC_INTR_MASK_M_TX_OVER BIT(3)
+#define THC_I2C_IC_INTR_MASK_M_TX_EMPTY BIT(4)
+#define THC_I2C_IC_INTR_MASK_M_RD_REQ BIT(5)
+#define THC_I2C_IC_INTR_MASK_M_TX_ABRT BIT(6)
+#define THC_I2C_IC_INTR_MASK_M_RX_DONE BIT(7)
+#define THC_I2C_IC_INTR_MASK_M_ACTIVITY BIT(8)
+#define THC_I2C_IC_INTR_MASK_M_STOP_DET BIT(9)
+#define THC_I2C_IC_INTR_MASK_M_START_DET BIT(10)
+#define THC_I2C_IC_INTR_MASK_M_GEN_CALL BIT(11)
+#define THC_I2C_IC_INTR_MASK_M_RESTART_DET BIT(12)
+#define THC_I2C_IC_INTR_MASK_M_MASTER_ON_HOLD BIT(13)
+#define THC_I2C_IC_INTR_MASK_M_SCL_STUCK_AT_LOW BIT(14)
+
+#define THC_I2C_IC_DMA_CR_RDMAE BIT(0)
+#define THC_I2C_IC_DMA_CR_TDMAE BIT(1)
+
+#endif /* _INTEL_THC_HW_H_ */
diff --git a/drivers/hid/surface-hid/Kconfig b/drivers/hid/surface-hid/Kconfig
index 7ce9b5d641eb..d0cfd0d29926 100644
--- a/drivers/hid/surface-hid/Kconfig
+++ b/drivers/hid/surface-hid/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+
menu "Surface System Aggregator Module HID support"
depends on SURFACE_AGGREGATOR
- depends on INPUT
config SURFACE_HID
tristate "HID transport driver for Surface System Aggregator Module"
@@ -39,4 +38,3 @@ endmenu
config SURFACE_HID_CORE
tristate
- select HID
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 7c2032f7f44d..f3194767a45e 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -5,8 +5,7 @@ menu "USB HID support"
config USB_HID
tristate "USB HID transport layer"
default y
- depends on USB && INPUT
- select HID
+ depends on HID
help
Say Y here if you want to connect USB keyboards,
mice, joysticks, graphic tablets, or any other HID based devices
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 6f1443999d1d..1deacb4568cb 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -218,6 +218,14 @@ static inline __u32 wacom_s32tou(s32 value, __u8 n)
return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value;
}
+static inline u32 wacom_rescale(u32 value, u32 in_max, u32 out_max)
+{
+ if (in_max == 0 || out_max == 0)
+ return 0;
+ value = clamp(value, 0, in_max);
+ return DIV_ROUND_CLOSEST(value * out_max, in_max);
+}
+
extern const struct hid_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 9843b52bd017..8125383932ec 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1084,6 +1084,17 @@ static ssize_t wacom_luminance_store(struct wacom *wacom, u8 *dest,
mutex_lock(&wacom->lock);
*dest = value & 0x7f;
+ for (unsigned int i = 0; i < wacom->led.count; i++) {
+ struct wacom_group_leds *group = &wacom->led.groups[i];
+
+ for (unsigned int j = 0; j < group->count; j++) {
+ if (dest == &wacom->led.llv)
+ group->leds[j].llv = *dest;
+ else if (dest == &wacom->led.hlv)
+ group->leds[j].hlv = *dest;
+ }
+ }
+
err = wacom_led_control(wacom);
mutex_unlock(&wacom->lock);
@@ -1302,10 +1313,10 @@ enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
struct wacom *wacom = led->wacom;
if (wacom->led.max_hlv)
- return led->hlv * LED_FULL / wacom->led.max_hlv;
+ return wacom_rescale(led->hlv, wacom->led.max_hlv, LED_FULL);
if (wacom->led.max_llv)
- return led->llv * LED_FULL / wacom->led.max_llv;
+ return wacom_rescale(led->llv, wacom->led.max_llv, LED_FULL);
/* device doesn't support brightness tuning */
return LED_FULL;
@@ -1337,8 +1348,8 @@ static int wacom_led_brightness_set(struct led_classdev *cdev,
goto out;
}
- led->llv = wacom->led.llv = wacom->led.max_llv * brightness / LED_FULL;
- led->hlv = wacom->led.hlv = wacom->led.max_hlv * brightness / LED_FULL;
+ led->llv = wacom->led.llv = wacom_rescale(brightness, LED_FULL, wacom->led.max_llv);
+ led->hlv = wacom->led.hlv = wacom_rescale(brightness, LED_FULL, wacom->led.max_hlv);
wacom->led.groups[led->group].select = led->id;
@@ -1370,17 +1381,6 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
if (!name)
return -ENOMEM;
- if (!read_only) {
- led->trigger.name = name;
- error = devm_led_trigger_register(dev, &led->trigger);
- if (error) {
- hid_err(wacom->hdev,
- "failed to register LED trigger %s: %d\n",
- led->cdev.name, error);
- return error;
- }
- }
-
led->group = group;
led->id = id;
led->wacom = wacom;
@@ -1397,6 +1397,19 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
led->cdev.brightness_set = wacom_led_readonly_brightness_set;
}
+ if (!read_only) {
+ led->trigger.name = name;
+ if (id == wacom->led.groups[group].select)
+ led->trigger.brightness = wacom_leds_brightness_get(led);
+ error = devm_led_trigger_register(dev, &led->trigger);
+ if (error) {
+ hid_err(wacom->hdev,
+ "failed to register LED trigger %s: %d\n",
+ led->cdev.name, error);
+ return error;
+ }
+ }
+
error = devm_led_classdev_register(dev, &led->cdev);
if (error) {
hid_err(wacom->hdev,
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 5501a560fb07..b60bfafc6a8f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -4946,6 +4946,10 @@ static const struct wacom_features wacom_features_0x94 =
HID_DEVICE(BUS_I2C, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
.driver_data = (kernel_ulong_t)&wacom_features_##prod
+#define PCI_DEVICE_WACOM(prod) \
+ HID_DEVICE(BUS_PCI, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ .driver_data = (kernel_ulong_t)&wacom_features_##prod
+
#define USB_DEVICE_LENOVO(prod) \
HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \
.driver_data = (kernel_ulong_t)&wacom_features_##prod
@@ -5115,6 +5119,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(HID_ANY_ID) },
{ I2C_DEVICE_WACOM(HID_ANY_ID) },
+ { PCI_DEVICE_WACOM(HID_ANY_ID) },
{ BT_DEVICE_WACOM(HID_ANY_ID) },
{ }
};
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 3c6011a48dab..6e084c207414 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -944,16 +944,6 @@ void vmbus_initiate_unload(bool crash)
vmbus_wait_for_unload();
}
-static void check_ready_for_resume_event(void)
-{
- /*
- * If all the old primary channels have been fixed up, then it's safe
- * to resume.
- */
- if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
- complete(&vmbus_connection.ready_for_resume_event);
-}
-
static void vmbus_setup_channel_state(struct vmbus_channel *channel,
struct vmbus_channel_offer_channel *offer)
{
@@ -1109,8 +1099,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
/* Add the channel back to the array of channels. */
vmbus_channel_map_relid(oldchannel);
- check_ready_for_resume_event();
-
mutex_unlock(&vmbus_connection.channel_mutex);
return;
}
@@ -1296,13 +1284,28 @@ EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
/*
* vmbus_onoffers_delivered -
- * This is invoked when all offers have been delivered.
+ * The CHANNELMSG_ALLOFFERS_DELIVERED message arrives after all
+ * boot-time offers are delivered. A boot-time offer is for the primary
+ * channel for any virtual hardware configured in the VM at the time it boots.
+ * Boot-time offers include offers for physical devices assigned to the VM
+ * via Hyper-V's Discrete Device Assignment (DDA) functionality that are
+ * handled as virtual PCI devices in Linux (e.g., NVMe devices and GPUs).
+ * Boot-time offers do not include offers for VMBus sub-channels. Because
+ * devices can be hot-added to the VM after it is booted, additional channel
+ * offers that aren't boot-time offers can be received at any time after the
+ * all-offers-delivered message.
*
- * Nothing to do here.
+ * SR-IOV NIC Virtual Functions (VFs) assigned to a VM are not considered
+ * to be assigned to the VM at boot-time, and offers for VFs may occur after
+ * the all-offers-delivered message. VFs are optional accelerators to the
+ * synthetic VMBus NIC and are effectively hot-added only after the VMBus
+ * NIC channel is opened (once it knows the guest can support it, via the
+ * sriov bit in the netvsc protocol).
*/
static void vmbus_onoffers_delivered(
struct vmbus_channel_message_header *hdr)
{
+ complete(&vmbus_connection.all_offers_delivered_event);
}
/*
@@ -1578,7 +1581,8 @@ void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
}
/*
- * vmbus_request_offers - Send a request to get all our pending offers.
+ * vmbus_request_offers - Send a request to get all our pending offers
+ * and wait for all boot-time offers to arrive.
*/
int vmbus_request_offers(void)
{
@@ -1596,6 +1600,10 @@ int vmbus_request_offers(void)
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
+ /*
+ * This REQUESTOFFERS message will result in the host sending an all
+ * offers delivered message after all the boot-time offers are sent.
+ */
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
true);
@@ -1607,6 +1615,29 @@ int vmbus_request_offers(void)
goto cleanup;
}
+ /*
+ * Wait for the host to send all boot-time offers.
+ * Keeping it as a best-effort mechanism, where a warning is
+ * printed if a timeout occurs, and execution is resumed.
+ */
+ if (!wait_for_completion_timeout(&vmbus_connection.all_offers_delivered_event,
+ secs_to_jiffies(60))) {
+ pr_warn("timed out waiting for all boot-time offers to be delivered.\n");
+ }
+
+ /*
+ * Flush handling of offer messages (which may initiate work on
+ * other work queues).
+ */
+ flush_workqueue(vmbus_connection.work_queue);
+
+ /*
+ * Flush workqueue for processing the incoming offers. Subchannel
+ * offers and their processing can happen later, so there is no need to
+ * flush that workqueue here.
+ */
+ flush_workqueue(vmbus_connection.handle_primary_chan_wq);
+
cleanup:
kfree(msginfo);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index f001ae880e1d..8351360bba16 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -34,8 +34,8 @@ struct vmbus_connection vmbus_connection = {
.ready_for_suspend_event = COMPLETION_INITIALIZER(
vmbus_connection.ready_for_suspend_event),
- .ready_for_resume_event = COMPLETION_INITIALIZER(
- vmbus_connection.ready_for_resume_event),
+ .all_offers_delivered_event = COMPLETION_INITIALIZER(
+ vmbus_connection.all_offers_delivered_event),
};
EXPORT_SYMBOL_GPL(vmbus_connection);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index a99112e6f0b8..fec2f18679e3 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -28,7 +28,7 @@
#include <linux/sizes.h>
#include <linux/hyperv.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
@@ -766,16 +766,18 @@ static void hv_online_page(struct page *pg, unsigned int order)
struct hv_hotadd_state *has;
unsigned long pfn = page_to_pfn(pg);
- guard(spinlock_irqsave)(&dm_device.ha_lock);
- list_for_each_entry(has, &dm_device.ha_region_list, list) {
- /* The page belongs to a different HAS. */
- if (pfn < has->start_pfn ||
- (pfn + (1UL << order) > has->end_pfn))
- continue;
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
+ /* The page belongs to a different HAS. */
+ if (pfn < has->start_pfn ||
+ (pfn + (1UL << order) > has->end_pfn))
+ continue;
- hv_bring_pgs_online(has, pfn, 1UL << order);
- break;
+ hv_bring_pgs_online(has, pfn, 1UL << order);
+ return;
+ }
}
+ generic_online_page(pg, order);
}
static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
@@ -1586,7 +1588,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
return -ENOSPC;
}
- hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
+ hint->heat_type = HV_EXTMEM_HEAT_HINT_COLD_DISCARD;
hint->reserved = 0;
for_each_sg(sgl, sg, nents, i) {
union hv_gpa_page_range *range;
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 7a35c82976e0..f2e6f55d6ca6 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include <linux/set_memory.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
/*
@@ -141,7 +141,7 @@ static int sysctl_record_panic_msg = 1;
* sysctl option to allow the user to control whether kmsg data should be
* reported to Hyper-V on panic.
*/
-static struct ctl_table hv_ctl_table[] = {
+static const struct ctl_table hv_ctl_table[] = {
{
.procname = "hyperv_record_panic_msg",
.data = &sysctl_record_panic_msg,
@@ -278,6 +278,11 @@ static void hv_kmsg_dump_register(void)
}
}
+static inline bool hv_output_page_exists(void)
+{
+ return hv_root_partition || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+}
+
int __init hv_common_init(void)
{
int i;
@@ -340,19 +345,19 @@ int __init hv_common_init(void)
BUG_ON(!hyperv_pcpu_input_arg);
/* Allocate the per-CPU state for output arg for root */
- if (hv_root_partition) {
+ if (hv_output_page_exists()) {
hyperv_pcpu_output_arg = alloc_percpu(void *);
BUG_ON(!hyperv_pcpu_output_arg);
}
- hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
+ hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index),
GFP_KERNEL);
if (!hv_vp_index) {
hv_common_free();
return -ENOMEM;
}
- for (i = 0; i < num_possible_cpus(); i++)
+ for (i = 0; i < nr_cpu_ids; i++)
hv_vp_index[i] = VP_INVAL;
return 0;
@@ -435,7 +440,7 @@ int hv_common_cpu_init(unsigned int cpu)
void **inputarg, **outputarg;
u64 msr_vp_index;
gfp_t flags;
- int pgcount = hv_root_partition ? 2 : 1;
+ const int pgcount = hv_output_page_exists() ? 2 : 1;
void *mem;
int ret;
@@ -453,7 +458,7 @@ int hv_common_cpu_init(unsigned int cpu)
if (!mem)
return -ENOMEM;
- if (hv_root_partition) {
+ if (hv_output_page_exists()) {
outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
*outputarg = (char *)mem + HV_HYP_PAGE_SIZE;
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 7400a5a4d2bd..62795f6cbb00 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -27,7 +27,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index bde637a96c37..2e7f537d53cf 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -12,7 +12,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 52cb744b4d7f..29780f3a7478 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -15,10 +15,10 @@
#include <linux/list.h>
#include <linux/bitops.h>
#include <asm/sync_bitops.h>
-#include <asm/hyperv-tlfs.h>
#include <linux/atomic.h>
#include <linux/hyperv.h>
#include <linux/interrupt.h>
+#include <hyperv/hvhdk.h>
#include "hv_trace.h"
@@ -287,18 +287,10 @@ struct vmbus_connection {
struct completion ready_for_suspend_event;
/*
- * The number of primary channels that should be "fixed up"
- * upon resume: these channels are re-offered upon resume, and some
- * fields of the channel offers (i.e. child_relid and connection_id)
- * can change, so the old offermsg must be fixed up, before the resume
- * callbacks of the VSC drivers start to further touch the channels.
+ * Completed once the host has offered all boot-time channels.
+ * Note that some channels may still be under process on a workqueue.
*/
- atomic_t nr_chan_fixup_on_resume;
- /*
- * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
- * drop to zero.
- */
- struct completion ready_for_resume_event;
+ struct completion all_offers_delivered_event;
};
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 2892b8da20a5..6e55a1a2613d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2262,12 +2262,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
struct resource *iter;
mutex_lock(&hyperv_mmio_lock);
+
+ /*
+ * If all bytes of the MMIO range to be released are within the
+ * special case fb_mmio shadow region, skip releasing the shadow
+ * region since no corresponding __request_region() was done
+ * in vmbus_allocate_mmio().
+ */
+ if (fb_mmio && start >= fb_mmio->start &&
+ (start + size - 1 <= fb_mmio->end))
+ goto skip_shadow_release;
+
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= start + size) || (iter->end <= start))
continue;
__release_region(iter, start, size);
}
+
+skip_shadow_release:
release_mem_region(start, size);
mutex_unlock(&hyperv_mmio_lock);
@@ -2427,11 +2440,6 @@ static int vmbus_bus_suspend(struct device *dev)
if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
wait_for_completion(&vmbus_connection.ready_for_suspend_event);
- if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
- pr_err("Can not suspend due to a previous failed resuming\n");
- return -EBUSY;
- }
-
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
@@ -2456,22 +2464,18 @@ static int vmbus_bus_suspend(struct device *dev)
pr_err("Sub-channel not deleted!\n");
WARN_ON_ONCE(1);
}
-
- atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
}
mutex_unlock(&vmbus_connection.channel_mutex);
vmbus_initiate_unload(false);
- /* Reset the event for the next resume. */
- reinit_completion(&vmbus_connection.ready_for_resume_event);
-
return 0;
}
static int vmbus_bus_resume(struct device *dev)
{
+ struct vmbus_channel *channel;
struct vmbus_channel_msginfo *msginfo;
size_t msgsize;
int ret;
@@ -2502,13 +2506,23 @@ static int vmbus_bus_resume(struct device *dev)
if (ret != 0)
return ret;
- WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
-
vmbus_request_offers();
- if (wait_for_completion_timeout(
- &vmbus_connection.ready_for_resume_event, secs_to_jiffies(10)) == 0)
- pr_err("Some vmbus device is missing after suspending?\n");
+ mutex_lock(&vmbus_connection.channel_mutex);
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (channel->offermsg.child_relid != INVALID_RELID)
+ continue;
+
+ /* hvsock channels are not expected to be present. */
+ if (is_hvsock_channel(channel))
+ continue;
+
+ pr_err("channel %pUl/%pUl not present after resume.\n",
+ &channel->offermsg.offer.if_type,
+ &channel->offermsg.offer.if_instance);
+ /* ToDo: Cleanup these channels here */
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
/* Reset the event for the next suspend. */
reinit_completion(&vmbus_connection.ready_for_suspend_event);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index dd376602f3f1..4cbaba15d86e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,7 +324,7 @@ config SENSORS_K8TEMP
config SENSORS_K10TEMP
tristate "AMD Family 10h+ temperature sensor"
- depends on X86 && PCI && AMD_NB
+ depends on X86 && PCI && AMD_NODE
help
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
@@ -413,7 +413,7 @@ config SENSORS_ASPEED
will be called aspeed_pwm_tacho.
config SENSORS_ASPEED_G6
- tristate "ASPEED g6 PWM and Fan tach driver"
+ tristate "ASPEED G6 PWM and Fan tach driver"
depends on ARCH_ASPEED || COMPILE_TEST
depends on PWM
help
@@ -421,7 +421,7 @@ config SENSORS_ASPEED_G6
controllers.
This driver can also be built as a module. If so, the module
- will be called aspeed_pwm_tacho.
+ will be called aspeed_g6_pwm_tach.
config SENSORS_ATXP1
tristate "Attansic ATXP1 VID controller"
@@ -1412,7 +1412,9 @@ config SENSORS_LM73
config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
depends on I2C
+ depends on I3C || !I3C
select REGMAP_I2C
+ select REGMAP_I3C if I3C
help
If you say yes here you get support for one common type of
temperature sensor chip, with models including:
@@ -1822,6 +1824,18 @@ config SENSORS_PWM_FAN
This driver can also be built as a module. If so, the module
will be called pwm-fan.
+config SENSORS_QNAP_MCU_HWMON
+ tristate "QNAP MCU hardware monitoring"
+ depends on MFD_QNAP_MCU
+ depends on THERMAL || THERMAL=n
+ help
+ Say yes here to enable support for fan and temperature sensor
+ connected to a QNAP MCU, as found in a number of QNAP network
+ attached storage devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called qnap-mcu-hwmon.
+
config SENSORS_RASPBERRYPI_HWMON
tristate "Raspberry Pi voltage monitor"
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b827b92f2a78..b7ef0f0562d3 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -189,6 +189,7 @@ obj-$(CONFIG_SENSORS_POWERZ) += powerz.o
obj-$(CONFIG_SENSORS_POWR1220) += powr1220.o
obj-$(CONFIG_SENSORS_PT5161L) += pt5161l.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
+obj-$(CONFIG_SENSORS_QNAP_MCU_HWMON) += qnap-mcu-hwmon.o
obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
obj-$(CONFIG_SENSORS_SBTSI) += sbtsi_temp.o
obj-$(CONFIG_SENSORS_SBRMI) += sbrmi.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 2f1c9d97ad21..44afb07409a4 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -84,6 +84,7 @@ struct acpi_power_meter_resource {
u64 power;
u64 cap;
u64 avg_interval;
+ bool power_alarm;
int sensors_valid;
unsigned long sensors_last_updated;
struct sensor_device_attribute sensors[NUM_SENSORS];
@@ -292,8 +293,8 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ unsigned long temp, trip_bk;
int res;
- unsigned long temp;
res = kstrtoul(buf, 10, &temp);
if (res)
@@ -301,13 +302,15 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
temp = DIV_ROUND_CLOSEST(temp, 1000);
- mutex_lock(&resource->lock);
+ guard(mutex)(&resource->lock);
+
+ trip_bk = resource->trip[attr->index - 7];
resource->trip[attr->index - 7] = temp;
res = set_acpi_trip(resource);
- mutex_unlock(&resource->lock);
-
- if (res)
+ if (res) {
+ resource->trip[attr->index - 7] = trip_bk;
return res;
+ }
return count;
}
@@ -396,6 +399,9 @@ static ssize_t show_val(struct device *dev,
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
u64 val = 0;
+ int ret;
+
+ guard(mutex)(&resource->lock);
switch (attr->index) {
case 0:
@@ -423,10 +429,17 @@ static ssize_t show_val(struct device *dev,
val = 0;
break;
case 6:
- if (resource->power > resource->cap)
- val = 1;
- else
- val = 0;
+ ret = update_meter(resource);
+ if (ret)
+ return ret;
+ /* need to update cap if not to support the notification. */
+ if (!(resource->caps.flags & POWER_METER_CAN_NOTIFY)) {
+ ret = update_cap(resource);
+ if (ret)
+ return ret;
+ }
+ val = resource->power_alarm || resource->power > resource->cap;
+ resource->power_alarm = resource->power > resource->cap;
break;
case 7:
case 8:
@@ -682,7 +695,7 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
/* _PMD method is optional. */
res = read_domain_devices(resource);
- if (res != -ENODEV)
+ if (res && res != -ENODEV)
return res;
if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
@@ -847,12 +860,20 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
break;
case METER_NOTIFY_CAP:
+ mutex_lock(&resource->lock);
+ res = update_cap(resource);
+ if (res)
+ dev_err_once(&device->dev, "update cap failed when capping value is changed.\n");
+ mutex_unlock(&resource->lock);
sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME);
break;
case METER_NOTIFY_INTERVAL:
sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME);
break;
case METER_NOTIFY_CAPPING:
+ mutex_lock(&resource->lock);
+ resource->power_alarm = true;
+ mutex_unlock(&resource->lock);
sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME);
dev_info(&device->dev, "Capping in progress.\n");
break;
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 7802bbf5f958..59424103f634 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -22,11 +22,13 @@
*/
#define AD7314_TEMP_MASK 0x7FE0
#define AD7314_TEMP_SHIFT 5
+#define AD7314_LEADING_ZEROS_MASK BIT(15)
/*
* ADT7301 and ADT7302 temperature masks
*/
#define ADT7301_TEMP_MASK 0x3FFF
+#define ADT7301_LEADING_ZEROS_MASK (BIT(15) | BIT(14))
enum ad7314_variant {
adt7301,
@@ -65,12 +67,20 @@ static ssize_t ad7314_temperature_show(struct device *dev,
return ret;
switch (spi_get_device_id(chip->spi_dev)->driver_data) {
case ad7314:
+ if (ret & AD7314_LEADING_ZEROS_MASK) {
+ /* Invalid read-out, leading zero part is missing */
+ return -EIO;
+ }
data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
data = sign_extend32(data, 9);
return sprintf(buf, "%d\n", 250 * data);
case adt7301:
case adt7302:
+ if (ret & ADT7301_LEADING_ZEROS_MASK) {
+ /* Invalid read-out, leading zero part is missing */
+ return -EIO;
+ }
/*
* Documented as a 13 bit twos complement register
* with a sign bit - which is a 14 bit 2's complement
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 9555366aeaf0..43e54dc513da 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -250,6 +250,8 @@ static const struct ec_sensor_info sensors_family_amd_600[] = {
EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
};
static const struct ec_sensor_info sensors_family_intel_300[] = {
@@ -477,6 +479,15 @@ static const struct ec_board_info board_info_zenith_ii_extreme = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
{ \
.matches = { \
@@ -538,6 +549,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_zenith_ii_extreme),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME ALPHA",
&board_info_zenith_ii_extreme),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("TUF GAMING X670E-PLUS",
+ &board_info_tuf_gaming_x670e_plus),
{},
};
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 1dc7e24fe4c5..c80350e499e9 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -17,6 +17,7 @@
#include <linux/jiffies.h>
#include <linux/err.h>
#include <linux/acpi.h>
+#include <linux/string_choices.h>
#define ATK_HID "ATK0110"
@@ -441,7 +442,7 @@ static void atk_print_sensor(struct atk_data *data, union acpi_object *obj)
flags->integer.value,
name->string.pointer,
limit1->integer.value, limit2->integer.value,
- enable->integer.value ? "enabled" : "disabled");
+ str_enabled_disabled(enable->integer.value));
#endif
}
@@ -1074,8 +1075,7 @@ static int atk_ec_enabled(struct atk_data *data)
err = -EIO;
} else {
err = (buf->value != 0);
- dev_dbg(dev, "EC is %sabled\n",
- err ? "en" : "dis");
+ dev_dbg(dev, "EC is %s\n", str_enabled_disabled(err));
}
ACPI_FREE(obj);
@@ -1096,18 +1096,15 @@ static int atk_ec_ctl(struct atk_data *data, int enable)
obj = atk_sitm(data, &sitm);
if (IS_ERR(obj)) {
- dev_err(dev, "Failed to %sable the EC\n",
- enable ? "en" : "dis");
+ dev_err(dev, "Failed to %s the EC\n", str_enable_disable(enable));
return PTR_ERR(obj);
}
ec_ret = (struct atk_acpi_ret_buffer *)obj->buffer.pointer;
if (ec_ret->flags == 0) {
- dev_err(dev, "Failed to %sable the EC\n",
- enable ? "en" : "dis");
+ dev_err(dev, "Failed to %s the EC\n", str_enable_disable(enable));
err = -EIO;
} else {
- dev_info(dev, "EC %sabled\n",
- enable ? "en" : "dis");
+ dev_info(dev, "EC %s\n", str_enabled_disabled(enable));
}
ACPI_FREE(obj);
diff --git a/drivers/hwmon/chipcap2.c b/drivers/hwmon/chipcap2.c
index edf454474f11..9d071f7ca9d2 100644
--- a/drivers/hwmon/chipcap2.c
+++ b/drivers/hwmon/chipcap2.c
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
@@ -556,55 +557,40 @@ static int cc2_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct cc2_data *data = dev_get_drvdata(dev);
- int ret = 0;
- mutex_lock(&data->dev_access_lock);
+ guard(mutex)(&data->dev_access_lock);
switch (type) {
case hwmon_temp:
- ret = cc2_measurement(data, type, val);
- break;
+ return cc2_measurement(data, type, val);
case hwmon_humidity:
switch (attr) {
case hwmon_humidity_input:
- ret = cc2_measurement(data, type, val);
- break;
+ return cc2_measurement(data, type, val);
case hwmon_humidity_min:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_L_ON, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_L_ON, val);
case hwmon_humidity_min_hyst:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_L_OFF, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_L_OFF, val);
case hwmon_humidity_max:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_H_ON, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_H_ON, val);
case hwmon_humidity_max_hyst:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_H_OFF, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_H_OFF, val);
case hwmon_humidity_min_alarm:
- ret = cc2_humidity_min_alarm_status(data, val);
- break;
+ return cc2_humidity_min_alarm_status(data, val);
case hwmon_humidity_max_alarm:
- ret = cc2_humidity_max_alarm_status(data, val);
- break;
+ return cc2_humidity_max_alarm_status(data, val);
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
-
- mutex_unlock(&data->dev_access_lock);
-
- return ret;
}
static int cc2_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long val)
{
struct cc2_data *data = dev_get_drvdata(dev);
- int ret;
u16 arg;
u8 cmd;
@@ -614,41 +600,28 @@ static int cc2_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (val < 0 || val > CC2_RH_MAX)
return -EINVAL;
- mutex_lock(&data->dev_access_lock);
+ guard(mutex)(&data->dev_access_lock);
switch (attr) {
case hwmon_humidity_min:
cmd = CC2_W_ALARM_L_ON;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
case hwmon_humidity_min_hyst:
cmd = CC2_W_ALARM_L_OFF;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
case hwmon_humidity_max:
cmd = CC2_W_ALARM_H_ON;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
case hwmon_humidity_max_hyst:
cmd = CC2_W_ALARM_H_OFF;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
default:
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
-
- mutex_unlock(&data->dev_access_lock);
-
- return ret;
}
static int cc2_request_ready_irq(struct cc2_data *data, struct device *dev)
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index f5bdf842040e..cd00adaad1b4 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1545,6 +1545,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
},
{
+ .ident = "Dell XPS 13 9370",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 13 9370"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+ },
+ {
.ident = "Dell Optiplex 7000",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 6bdd21aa005a..291d91f68646 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -165,6 +165,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
{
u8 scsi_cmd[MAX_COMMAND_SIZE];
enum req_op op;
+ int err;
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
@@ -192,8 +193,11 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
scsi_cmd[12] = lba_high;
scsi_cmd[14] = ata_command;
- return scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
- ATA_SECT_SIZE, HZ, 5, NULL);
+ err = scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
+ ATA_SECT_SIZE, 10 * HZ, 5, NULL);
+ if (err > 0)
+ err = -EIO;
+ return err;
}
static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index bbb9cc44e29f..9703d60e9bbf 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -158,11 +158,6 @@ static umode_t hwmon_is_visible(const struct hwmon_ops *ops,
/* Thermal zone handling */
-/*
- * The complex conditional is necessary to avoid a cyclic dependency
- * between hwmon and thermal_sys modules.
- */
-#ifdef CONFIG_THERMAL_OF
static int hwmon_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct hwmon_thermal_data *tdata = thermal_zone_device_priv(tz);
@@ -268,6 +263,9 @@ static int hwmon_thermal_register_sensors(struct device *dev)
void *drvdata = dev_get_drvdata(dev);
int i;
+ if (!IS_ENABLED(CONFIG_THERMAL_OF))
+ return 0;
+
for (i = 1; info[i]; i++) {
int j;
@@ -296,6 +294,9 @@ static void hwmon_thermal_notify(struct device *dev, int index)
struct hwmon_device *hwdev = to_hwmon_device(dev);
struct hwmon_thermal_data *tzdata;
+ if (!IS_ENABLED(CONFIG_THERMAL_OF))
+ return;
+
list_for_each_entry(tzdata, &hwdev->tzdata, node) {
if (tzdata->index == index) {
thermal_zone_device_update(tzdata->tzd,
@@ -304,16 +305,6 @@ static void hwmon_thermal_notify(struct device *dev, int index)
}
}
-#else
-static int hwmon_thermal_register_sensors(struct device *dev)
-{
- return 0;
-}
-
-static void hwmon_thermal_notify(struct device *dev, int index) { }
-
-#endif /* IS_REACHABLE(CONFIG_THERMAL) && ... */
-
static int hwmon_attr_base(enum hwmon_sensor_types type)
{
if (type == hwmon_in || type == hwmon_intrusion)
@@ -341,7 +332,7 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
static DEFINE_MUTEX(hwmon_pec_mutex);
-static int hwmon_match_device(struct device *dev, void *data)
+static int hwmon_match_device(struct device *dev, const void *data)
{
return dev->class == &hwmon_class;
}
@@ -1179,6 +1170,12 @@ devm_hwmon_device_register_with_info(struct device *dev, const char *name,
if (!dev)
return ERR_PTR(-EINVAL);
+ if (!name) {
+ name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(name))
+ return ERR_CAST(name);
+ }
+
ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/hwmon/isl28022.c b/drivers/hwmon/isl28022.c
index f9edcfd164c2..3f9b4520b53e 100644
--- a/drivers/hwmon/isl28022.c
+++ b/drivers/hwmon/isl28022.c
@@ -486,7 +486,7 @@ static int isl28022_probe(struct i2c_client *client)
}
static const struct i2c_device_id isl28022_ids[] = {
- { "isl28022", 0},
+ { "isl28022" },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, isl28022_ids);
@@ -506,8 +506,7 @@ static struct i2c_driver isl28022_driver = {
.id_table = isl28022_ids,
};
-static int __init
-isl28022_init(void)
+static int __init isl28022_init(void)
{
int err;
@@ -519,15 +518,13 @@ isl28022_init(void)
debugfs_remove_recursive(isl28022_debugfs_root);
return err;
}
+module_init(isl28022_init);
-static void __exit
-isl28022_exit(void)
+static void __exit isl28022_exit(void)
{
i2c_del_driver(&isl28022_driver);
debugfs_remove_recursive(isl28022_debugfs_root);
}
-
-module_init(isl28022_init);
module_exit(isl28022_exit);
MODULE_AUTHOR("Carsten Spieß <mail@carsten-spiess.de>");
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 7dc19c5d62ac..d0b4cc9a5011 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <asm/amd_nb.h>
+#include <asm/amd_node.h>
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
@@ -150,6 +150,11 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
+static u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) - AMD_NODE0_PCI_SLOT;
+}
+
static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
{
if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 2c2205aec7d4..d95a3c6c245c 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
+#include <linux/i3c/device.h>
#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/of.h>
@@ -38,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
max6626,
max31725,
mcp980x,
+ p3t1755,
pct2075,
stds75,
stlm75,
@@ -104,17 +106,15 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
#define LM75_REG_MAX 0x03
#define PCT2075_REG_IDLE 0x04
-/* Each client has this additional data */
struct lm75_data {
- struct i2c_client *client;
struct regmap *regmap;
- struct regulator *vs;
u16 orig_conf;
- u16 current_conf;
u8 resolution; /* In bits, 9 to 16 */
unsigned int sample_time; /* In ms */
enum lm75_type kind;
const struct lm75_params *params;
+ u8 reg_buf[1];
+ u8 val_buf[3];
};
/*-----------------------------------------------------------------------*/
@@ -222,6 +222,13 @@ static const struct lm75_params device_params[] = {
.default_resolution = 9,
.default_sample_time = MSEC_PER_SEC / 18,
},
+ [p3t1755] = {
+ .clr_mask = 1 << 1 | 1 << 7, /* disable SMBAlert and one-shot */
+ .default_resolution = 12,
+ .default_sample_time = 55,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ },
[pct2075] = {
.default_resolution = 11,
.default_sample_time = MSEC_PER_SEC / 10,
@@ -276,6 +283,7 @@ static const struct lm75_params device_params[] = {
.default_sample_time = 125,
.num_sample_times = 4,
.sample_times = (unsigned int []){ 125, 250, 1000, 4000 },
+ .alarm = true,
},
[tmp175] = {
.set_mask = 3 << 5, /* 12-bit mode */
@@ -332,41 +340,11 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
}
-static int lm75_write_config(struct lm75_data *data, u16 set_mask,
- u16 clr_mask)
-{
- unsigned int value;
-
- clr_mask |= LM75_SHUTDOWN << (8 * data->params->config_reg_16bits);
- value = data->current_conf & ~clr_mask;
- value |= set_mask;
-
- if (data->current_conf != value) {
- s32 err;
- if (data->params->config_reg_16bits)
- err = regmap_write(data->regmap, LM75_REG_CONF, value);
- else
- err = i2c_smbus_write_byte_data(data->client,
- LM75_REG_CONF,
- value);
- if (err)
- return err;
- data->current_conf = value;
- }
- return 0;
-}
-
-static int lm75_read_config(struct lm75_data *data)
+static inline int lm75_write_config(struct lm75_data *data, u16 set_mask,
+ u16 clr_mask)
{
- int ret;
- unsigned int status;
-
- if (data->params->config_reg_16bits) {
- ret = regmap_read(data->regmap, LM75_REG_CONF, &status);
- return ret ? ret : status;
- }
-
- return i2c_smbus_read_byte_data(data->client, LM75_REG_CONF);
+ return regmap_update_bits(data->regmap, LM75_REG_CONF,
+ clr_mask | LM75_SHUTDOWN, set_mask);
}
static irqreturn_t lm75_alarm_handler(int irq, void *private)
@@ -418,7 +396,8 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
if (attr == hwmon_temp_alarm) {
switch (data->kind) {
case as6200:
- *val = (regval >> 5) & 0x1;
+ case tmp112:
+ *val = (regval >> 13) & 0x1;
break;
default:
return -EINVAL;
@@ -469,7 +448,6 @@ static int lm75_write_temp(struct device *dev, u32 attr, long temp)
static int lm75_update_interval(struct device *dev, long val)
{
struct lm75_data *data = dev_get_drvdata(dev);
- unsigned int reg;
u8 index;
s32 err;
@@ -489,19 +467,14 @@ static int lm75_update_interval(struct device *dev, long val)
break;
case tmp112:
case as6200:
- err = regmap_read(data->regmap, LM75_REG_CONF, &reg);
- if (err < 0)
- return err;
- reg &= ~0x00c0;
- reg |= (3 - index) << 6;
- err = regmap_write(data->regmap, LM75_REG_CONF, reg);
+ err = regmap_update_bits(data->regmap, LM75_REG_CONF,
+ 0xc000, (3 - index) << 14);
if (err < 0)
return err;
data->sample_time = data->params->sample_times[index];
break;
case pct2075:
- err = i2c_smbus_write_byte_data(data->client, PCT2075_REG_IDLE,
- index + 1);
+ err = regmap_write(data->regmap, PCT2075_REG_IDLE, index + 1);
if (err)
return err;
data->sample_time = data->params->sample_times[index];
@@ -598,6 +571,115 @@ static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg)
return reg == LM75_REG_TEMP || reg == LM75_REG_CONF;
}
+static int lm75_i2c_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct i2c_client *client = context;
+ struct lm75_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ if (reg == LM75_REG_CONF) {
+ if (!data->params->config_reg_16bits)
+ ret = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
+ else
+ ret = i2c_smbus_read_word_data(client, LM75_REG_CONF);
+ } else {
+ ret = i2c_smbus_read_word_swapped(client, reg);
+ }
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+}
+
+static int lm75_i2c_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i2c_client *client = context;
+ struct lm75_data *data = i2c_get_clientdata(client);
+
+ if (reg == PCT2075_REG_IDLE ||
+ (reg == LM75_REG_CONF && !data->params->config_reg_16bits))
+ return i2c_smbus_write_byte_data(client, reg, val);
+ else if (reg == LM75_REG_CONF)
+ return i2c_smbus_write_word_data(client, reg, val);
+ return i2c_smbus_write_word_swapped(client, reg, val);
+}
+
+static const struct regmap_bus lm75_i2c_regmap_bus = {
+ .reg_read = lm75_i2c_reg_read,
+ .reg_write = lm75_i2c_reg_write,
+};
+
+static int lm75_i3c_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct i3c_device *i3cdev = context;
+ struct lm75_data *data = i3cdev_get_drvdata(i3cdev);
+ struct i3c_priv_xfer xfers[] = {
+ {
+ .rnw = false,
+ .len = 1,
+ .data.out = data->reg_buf,
+ },
+ {
+ .rnw = true,
+ .len = 2,
+ .data.out = data->val_buf,
+ },
+ };
+ int ret;
+
+ data->reg_buf[0] = reg;
+
+ if (reg == LM75_REG_CONF && !data->params->config_reg_16bits)
+ xfers[1].len--;
+
+ ret = i3c_device_do_priv_xfers(i3cdev, xfers, 2);
+ if (ret < 0)
+ return ret;
+
+ if (reg == LM75_REG_CONF && !data->params->config_reg_16bits)
+ *val = data->val_buf[0];
+ else if (reg == LM75_REG_CONF)
+ *val = data->val_buf[0] | (data->val_buf[1] << 8);
+ else
+ *val = data->val_buf[1] | (data->val_buf[0] << 8);
+
+ return 0;
+}
+
+static int lm75_i3c_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i3c_device *i3cdev = context;
+ struct lm75_data *data = i3cdev_get_drvdata(i3cdev);
+ struct i3c_priv_xfer xfers[] = {
+ {
+ .rnw = false,
+ .len = 3,
+ .data.out = data->val_buf,
+ },
+ };
+
+ data->val_buf[0] = reg;
+
+ if (reg == PCT2075_REG_IDLE ||
+ (reg == LM75_REG_CONF && !data->params->config_reg_16bits)) {
+ xfers[0].len--;
+ data->val_buf[1] = val & 0xff;
+ } else if (reg == LM75_REG_CONF) {
+ data->val_buf[1] = val & 0xff;
+ data->val_buf[2] = (val >> 8) & 0xff;
+ } else {
+ data->val_buf[1] = (val >> 8) & 0xff;
+ data->val_buf[2] = val & 0xff;
+ }
+
+ return i3c_device_do_priv_xfers(i3cdev, xfers, 1);
+}
+
+static const struct regmap_bus lm75_i3c_regmap_bus = {
+ .reg_read = lm75_i3c_reg_read,
+ .reg_write = lm75_i3c_reg_write,
+};
+
static const struct regmap_config lm75_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
@@ -610,46 +692,33 @@ static const struct regmap_config lm75_regmap_config = {
.use_single_write = true,
};
-static void lm75_disable_regulator(void *data)
-{
- struct lm75_data *lm75 = data;
-
- regulator_disable(lm75->vs);
-}
-
static void lm75_remove(void *data)
{
struct lm75_data *lm75 = data;
- struct i2c_client *client = lm75->client;
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf);
+ regmap_write(lm75->regmap, LM75_REG_CONF, lm75->orig_conf);
}
-static int lm75_probe(struct i2c_client *client)
+static int lm75_generic_probe(struct device *dev, const char *name,
+ enum lm75_type kind, int irq, struct regmap *regmap)
{
- struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm75_data *data;
int status, err;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
- return -EIO;
-
data = devm_kzalloc(dev, sizeof(struct lm75_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->client = client;
- data->kind = (uintptr_t)i2c_get_match_data(client);
+ /* needed by custom regmap callbacks */
+ dev_set_drvdata(dev, data);
- data->vs = devm_regulator_get(dev, "vs");
- if (IS_ERR(data->vs))
- return PTR_ERR(data->vs);
+ data->kind = kind;
+ data->regmap = regmap;
- data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
- if (IS_ERR(data->regmap))
- return PTR_ERR(data->regmap);
+ err = devm_regulator_get_enable(dev, "vs");
+ if (err)
+ return err;
/* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
* Then tweak to be more precise when appropriate.
@@ -661,25 +730,11 @@ static int lm75_probe(struct i2c_client *client)
data->sample_time = data->params->default_sample_time;
data->resolution = data->params->default_resolution;
- /* Enable the power */
- err = regulator_enable(data->vs);
- if (err) {
- dev_err(dev, "failed to enable regulator: %d\n", err);
- return err;
- }
-
- err = devm_add_action_or_reset(dev, lm75_disable_regulator, data);
+ /* Cache original configuration */
+ err = regmap_read(data->regmap, LM75_REG_CONF, &status);
if (err)
return err;
-
- /* Cache original configuration */
- status = lm75_read_config(data);
- if (status < 0) {
- dev_dbg(dev, "Can't read config? %d\n", status);
- return status;
- }
data->orig_conf = status;
- data->current_conf = status;
err = lm75_write_config(data, data->params->set_mask,
data->params->clr_mask);
@@ -690,20 +745,19 @@ static int lm75_probe(struct i2c_client *client)
if (err)
return err;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
- data, &lm75_chip_info,
- NULL);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, name, data,
+ &lm75_chip_info, NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- if (client->irq) {
+ if (irq) {
if (data->params->alarm) {
err = devm_request_threaded_irq(dev,
- client->irq,
+ irq,
NULL,
&lm75_alarm_handler,
IRQF_ONESHOT,
- client->name,
+ name,
hwmon_dev);
if (err)
return err;
@@ -713,12 +767,29 @@ static int lm75_probe(struct i2c_client *client)
}
}
- dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), client->name);
+ dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), name);
return 0;
}
-static const struct i2c_device_id lm75_ids[] = {
+static int lm75_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+ return -EOPNOTSUPP;
+
+ regmap = devm_regmap_init(dev, &lm75_i2c_regmap_bus, client, &lm75_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return lm75_generic_probe(dev, client->name, (uintptr_t)i2c_get_match_data(client),
+ client->irq, regmap);
+}
+
+static const struct i2c_device_id lm75_i2c_ids[] = {
{ "adt75", adt75, },
{ "as6200", as6200, },
{ "at30ts74", at30ts74, },
@@ -734,6 +805,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "max31725", max31725, },
{ "max31726", max31725, },
{ "mcp980x", mcp980x, },
+ { "p3t1755", p3t1755, },
{ "pct2075", pct2075, },
{ "stds75", stds75, },
{ "stlm75", stlm75, },
@@ -750,7 +822,38 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tmp1075", tmp1075, },
{ /* LIST END */ }
};
-MODULE_DEVICE_TABLE(i2c, lm75_ids);
+MODULE_DEVICE_TABLE(i2c, lm75_i2c_ids);
+
+struct lm75_i3c_device {
+ enum lm75_type type;
+ const char *name;
+};
+
+static const struct lm75_i3c_device lm75_i3c_p3t1755 = {
+ .name = "p3t1755",
+ .type = p3t1755,
+};
+
+static const struct i3c_device_id lm75_i3c_ids[] = {
+ I3C_DEVICE(0x011b, 0x152a, &lm75_i3c_p3t1755),
+ { /* LIST END */ }
+};
+MODULE_DEVICE_TABLE(i3c, lm75_i3c_ids);
+
+static int lm75_i3c_probe(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+ const struct lm75_i3c_device *id_data;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init(dev, &lm75_i3c_regmap_bus, i3cdev, &lm75_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ id_data = i3c_device_match_id(i3cdev, lm75_i3c_ids)->data;
+
+ return lm75_generic_probe(dev, id_data->name, id_data->type, 0, regmap);
+}
static const struct of_device_id __maybe_unused lm75_of_match[] = {
{
@@ -814,6 +917,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
.data = (void *)mcp980x
},
{
+ .compatible = "nxp,p3t1755",
+ .data = (void *)p3t1755
+ },
+ {
.compatible = "nxp,pct2075",
.data = (void *)pct2075
},
@@ -972,32 +1079,16 @@ static int lm75_detect(struct i2c_client *new_client,
#ifdef CONFIG_PM
static int lm75_suspend(struct device *dev)
{
- int status;
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
- status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
- if (status < 0) {
- dev_dbg(&client->dev, "Can't read config? %d\n", status);
- return status;
- }
- status = status | LM75_SHUTDOWN;
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
- return 0;
+ return regmap_update_bits(data->regmap, LM75_REG_CONF, LM75_SHUTDOWN, LM75_SHUTDOWN);
}
static int lm75_resume(struct device *dev)
{
- int status;
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
- status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
- if (status < 0) {
- dev_dbg(&client->dev, "Can't read config? %d\n", status);
- return status;
- }
- status = status & ~LM75_SHUTDOWN;
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
- return 0;
+ return regmap_update_bits(data->regmap, LM75_REG_CONF, LM75_SHUTDOWN, 0);
}
static const struct dev_pm_ops lm75_dev_pm_ops = {
@@ -1009,20 +1100,28 @@ static const struct dev_pm_ops lm75_dev_pm_ops = {
#define LM75_DEV_PM_OPS NULL
#endif /* CONFIG_PM */
-static struct i2c_driver lm75_driver = {
+static struct i2c_driver lm75_i2c_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm75",
.of_match_table = of_match_ptr(lm75_of_match),
.pm = LM75_DEV_PM_OPS,
},
- .probe = lm75_probe,
- .id_table = lm75_ids,
+ .probe = lm75_i2c_probe,
+ .id_table = lm75_i2c_ids,
.detect = lm75_detect,
.address_list = normal_i2c,
};
-module_i2c_driver(lm75_driver);
+static struct i3c_driver lm75_i3c_driver = {
+ .driver = {
+ .name = "lm75_i3c",
+ },
+ .probe = lm75_i3c_probe,
+ .id_table = lm75_i3c_ids,
+};
+
+module_i3c_i2c_driver(lm75_i3c_driver, &lm75_i2c_driver)
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
MODULE_DESCRIPTION("LM75 driver");
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index 7ca139e4b6af..6d5d4cb846da 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -125,7 +125,7 @@ static int ltc2991_get_curr(struct ltc2991_state *st, u32 reg, int channel,
/* Vx-Vy, 19.075uV/LSB */
*val = DIV_ROUND_CLOSEST(sign_extend32(reg_val, 14) * 19075,
- st->r_sense_uohm[channel]);
+ (s32)st->r_sense_uohm[channel]);
return 0;
}
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index f71615e06a8f..416ac02e9f74 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -175,9 +175,11 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_MSI 0x201
#define NCT6683_CUSTOMER_ID_MSI2 0x200
#define NCT6683_CUSTOMER_ID_MSI3 0x207
+#define NCT6683_CUSTOMER_ID_MSI4 0x20d
#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
#define NCT6683_CUSTOMER_ID_ASROCK2 0xe1b
#define NCT6683_CUSTOMER_ID_ASROCK3 0x1631
+#define NCT6683_CUSTOMER_ID_ASROCK4 0x163e
#define NCT6683_REG_BUILD_YEAR 0x604
#define NCT6683_REG_BUILD_MONTH 0x605
@@ -1227,12 +1229,16 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_MSI3:
break;
+ case NCT6683_CUSTOMER_ID_MSI4:
+ break;
case NCT6683_CUSTOMER_ID_ASROCK:
break;
case NCT6683_CUSTOMER_ID_ASROCK2:
break;
case NCT6683_CUSTOMER_ID_ASROCK3:
break;
+ case NCT6683_CUSTOMER_ID_ASROCK4:
+ break;
default:
if (!force)
return -ENODEV;
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index c243b51837d2..79bc67ffb998 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -42,6 +42,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#undef DEFAULT_SYMBOL_NAMESPACE
+#define DEFAULT_SYMBOL_NAMESPACE "HWMON_NCT6775"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -56,9 +59,6 @@
#include "lm75.h"
#include "nct6775.h"
-#undef DEFAULT_SYMBOL_NAMESPACE
-#define DEFAULT_SYMBOL_NAMESPACE "HWMON_NCT6775"
-
#define USE_ALTERNATE
/* used to set data->name = nct6775_device_names[data->sio_kind] */
@@ -273,8 +273,8 @@ static const s8 NCT6776_BEEP_BITS[NUM_BEEP_BITS] = {
static const u16 NCT6776_REG_TOLERANCE_H[] = {
0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c };
-static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 };
-static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = {
0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index b5352900463f..0d29c8f97ba7 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -181,40 +181,40 @@ static const struct ntc_compensation ncpXXwf104[] = {
};
static const struct ntc_compensation ncpXXxh103[] = {
- { .temp_c = -40, .ohm = 247565 },
- { .temp_c = -35, .ohm = 181742 },
- { .temp_c = -30, .ohm = 135128 },
- { .temp_c = -25, .ohm = 101678 },
- { .temp_c = -20, .ohm = 77373 },
- { .temp_c = -15, .ohm = 59504 },
- { .temp_c = -10, .ohm = 46222 },
- { .temp_c = -5, .ohm = 36244 },
- { .temp_c = 0, .ohm = 28674 },
- { .temp_c = 5, .ohm = 22878 },
- { .temp_c = 10, .ohm = 18399 },
- { .temp_c = 15, .ohm = 14910 },
- { .temp_c = 20, .ohm = 12169 },
+ { .temp_c = -40, .ohm = 195652 },
+ { .temp_c = -35, .ohm = 148171 },
+ { .temp_c = -30, .ohm = 113347 },
+ { .temp_c = -25, .ohm = 87559 },
+ { .temp_c = -20, .ohm = 68237 },
+ { .temp_c = -15, .ohm = 53650 },
+ { .temp_c = -10, .ohm = 42506 },
+ { .temp_c = -5, .ohm = 33892 },
+ { .temp_c = 0, .ohm = 27219 },
+ { .temp_c = 5, .ohm = 22021 },
+ { .temp_c = 10, .ohm = 17926 },
+ { .temp_c = 15, .ohm = 14674 },
+ { .temp_c = 20, .ohm = 12081 },
{ .temp_c = 25, .ohm = 10000 },
- { .temp_c = 30, .ohm = 8271 },
- { .temp_c = 35, .ohm = 6883 },
- { .temp_c = 40, .ohm = 5762 },
- { .temp_c = 45, .ohm = 4851 },
- { .temp_c = 50, .ohm = 4105 },
- { .temp_c = 55, .ohm = 3492 },
- { .temp_c = 60, .ohm = 2985 },
- { .temp_c = 65, .ohm = 2563 },
- { .temp_c = 70, .ohm = 2211 },
- { .temp_c = 75, .ohm = 1915 },
- { .temp_c = 80, .ohm = 1666 },
- { .temp_c = 85, .ohm = 1454 },
- { .temp_c = 90, .ohm = 1275 },
- { .temp_c = 95, .ohm = 1121 },
- { .temp_c = 100, .ohm = 990 },
- { .temp_c = 105, .ohm = 876 },
- { .temp_c = 110, .ohm = 779 },
- { .temp_c = 115, .ohm = 694 },
- { .temp_c = 120, .ohm = 620 },
- { .temp_c = 125, .ohm = 556 },
+ { .temp_c = 30, .ohm = 8315 },
+ { .temp_c = 35, .ohm = 6948 },
+ { .temp_c = 40, .ohm = 5834 },
+ { .temp_c = 45, .ohm = 4917 },
+ { .temp_c = 50, .ohm = 4161 },
+ { .temp_c = 55, .ohm = 3535 },
+ { .temp_c = 60, .ohm = 3014 },
+ { .temp_c = 65, .ohm = 2586 },
+ { .temp_c = 70, .ohm = 2228 },
+ { .temp_c = 75, .ohm = 1925 },
+ { .temp_c = 80, .ohm = 1669 },
+ { .temp_c = 85, .ohm = 1452 },
+ { .temp_c = 90, .ohm = 1268 },
+ { .temp_c = 95, .ohm = 1110 },
+ { .temp_c = 100, .ohm = 974 },
+ { .temp_c = 105, .ohm = 858 },
+ { .temp_c = 110, .ohm = 758 },
+ { .temp_c = 115, .ohm = 672 },
+ { .temp_c = 120, .ohm = 596 },
+ { .temp_c = 125, .ohm = 531 },
};
/*
diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
index 89761a9c8892..1e3749dfa598 100644
--- a/drivers/hwmon/occ/p9_sbe.c
+++ b/drivers/hwmon/occ/p9_sbe.c
@@ -30,7 +30,7 @@ struct p9_sbe_occ {
#define to_p9_sbe_occ(x) container_of((x), struct p9_sbe_occ, occ)
static ssize_t ffdc_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *battr, char *buf, loff_t pos,
+ const struct bin_attribute *battr, char *buf, loff_t pos,
size_t count)
{
ssize_t rc = 0;
@@ -48,7 +48,7 @@ static ssize_t ffdc_read(struct file *filp, struct kobject *kobj,
return rc;
}
-static BIN_ATTR_RO(ffdc, OCC_MAX_RESP_WORDS * 4);
+static const BIN_ATTR_RO(ffdc, OCC_MAX_RESP_WORDS * 4);
static bool p9_sbe_occ_save_ffdc(struct p9_sbe_occ *ctx, const void *resp,
size_t resp_len)
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
index d6762259dd69..fbe82d9852e0 100644
--- a/drivers/hwmon/peci/dimmtemp.c
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -127,8 +127,6 @@ static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
return 0;
ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
- if (ret == -ENODATA) /* Use default or previous value */
- return 0;
if (ret)
return ret;
@@ -509,11 +507,11 @@ read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, &reg_val);
if (ret || !(reg_val & BIT(31)))
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, &reg_val);
if (ret)
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
/*
* Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
@@ -546,11 +544,11 @@ read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, &reg_val);
if (ret || !(reg_val & BIT(31)))
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, &reg_val);
if (ret)
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
/*
* Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index f6d352841953..419469f40ba0 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -51,7 +51,7 @@ config SENSORS_ADM1275
tristate "Analog Devices ADM1275 and compatibles"
help
If you say yes here you get hardware monitoring support for Analog
- Devices ADM1075, ADM1272, ADM1275, ADM1276, ADM1278, ADM1281,
+ Devices ADM1075, ADM1272, ADM1273, ADM1275, ADM1276, ADM1278, ADM1281,
ADM1293, and ADM1294 Hot-Swap Controller and Digital Power Monitors.
This driver can also be built as a module. If so, the module will
@@ -85,6 +85,15 @@ config SENSORS_BPA_RS600
This driver can also be built as a module. If so, the module will
be called bpa-rs600.
+config SENSORS_CRPS
+ tristate "Intel Common Redundant Power Supply"
+ help
+ If you say yes here you get hardware monitoring support for the Intel
+ Common Redundant Power Supply.
+
+ This driver can also be built as a module. If so, the module will
+ be called crps.
+
config SENSORS_DELTA_AHE50DC_FAN
tristate "Delta AHE-50DC fan control module"
help
@@ -251,7 +260,7 @@ config SENSORS_MAX15301
tristate "Maxim MAX15301"
help
If you say yes here you get hardware monitoring support for Maxim
- MAX15301, as well as for Flex BMR461.
+ MAX15301, MAX15303, as well as for Flex BMR461.
This driver can also be built as a module. If so, the module will
be called max15301.
@@ -510,6 +519,23 @@ config SENSORS_TDA38640_REGULATOR
If you say yes here you get regulator support for Infineon
TDA38640 as regulator.
+config SENSORS_TPS25990
+ tristate "TI TPS25990"
+ help
+ If you say yes here you get hardware monitoring support for TI
+ TPS25990.
+
+ This driver can also be built as a module. If so, the module will
+ be called tps25990.
+
+config SENSORS_TPS25990_REGULATOR
+ bool "Regulator support for TPS25990 and compatibles"
+ depends on SENSORS_TPS25990 && REGULATOR
+ default SENSORS_TPS25990
+ help
+ If you say yes here you get regulator support for Texas Instruments
+ TPS25990.
+
config SENSORS_TPS40422
tristate "TI TPS40422"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index d00bcc758b97..c7eb7739b7f8 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
obj-$(CONFIG_SENSORS_STPDDC60) += stpddc60.o
obj-$(CONFIG_SENSORS_TDA38640) += tda38640.o
+obj-$(CONFIG_SENSORS_TPS25990) += tps25990.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_TPS546D24) += tps546d24.o
@@ -61,3 +62,4 @@ obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_XDPE152) += xdpe152c4.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
obj-$(CONFIG_SENSORS_PIM4328) += pim4328.o
+obj-$(CONFIG_SENSORS_CRPS) += crps.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 127593e10a03..7d175baa5de2 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -18,7 +18,7 @@
#include <linux/log2.h>
#include "pmbus.h"
-enum chips { adm1075, adm1272, adm1275, adm1276, adm1278, adm1281, adm1293, adm1294 };
+enum chips { adm1075, adm1272, adm1273, adm1275, adm1276, adm1278, adm1281, adm1293, adm1294 };
#define ADM1275_MFR_STATUS_IOUT_WARN2 BIT(0)
#define ADM1293_MFR_STATUS_VAUX_UV_WARN BIT(5)
@@ -479,6 +479,7 @@ static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
static const struct i2c_device_id adm1275_id[] = {
{ "adm1075", adm1075 },
{ "adm1272", adm1272 },
+ { "adm1273", adm1273 },
{ "adm1275", adm1275 },
{ "adm1276", adm1276 },
{ "adm1278", adm1278 },
@@ -555,9 +556,9 @@ static int adm1275_probe(struct i2c_client *client)
"Device mismatch: Configured %s, detected %s\n",
client->name, mid->name);
- if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
- mid->driver_data == adm1281 || mid->driver_data == adm1293 ||
- mid->driver_data == adm1294)
+ if (mid->driver_data == adm1272 || mid->driver_data == adm1273 ||
+ mid->driver_data == adm1278 || mid->driver_data == adm1281 ||
+ mid->driver_data == adm1293 || mid->driver_data == adm1294)
config_read_fn = i2c_smbus_read_word_data;
else
config_read_fn = i2c_smbus_read_byte_data;
@@ -630,6 +631,7 @@ static int adm1275_probe(struct i2c_client *client)
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
case adm1272:
+ case adm1273:
data->have_vout = true;
data->have_pin_max = true;
data->have_temp_max = true;
diff --git a/drivers/hwmon/pmbus/crps.c b/drivers/hwmon/pmbus/crps.c
new file mode 100644
index 000000000000..164b33fed312
--- /dev/null
+++ b/drivers/hwmon/pmbus/crps.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2024 IBM Corp.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/pmbus.h>
+
+#include "pmbus.h"
+
+static const struct i2c_device_id crps_id[] = {
+ { "intel_crps185" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, crps_id);
+
+static struct pmbus_driver_info crps_info = {
+ .pages = 1,
+ /* PSU uses default linear data format. */
+ .func[0] = PMBUS_HAVE_PIN | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
+};
+
+static int crps_probe(struct i2c_client *client)
+{
+ int rc;
+ struct device *dev = &client->dev;
+ char buf[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
+
+ rc = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (rc < 0)
+ return dev_err_probe(dev, rc, "Failed to read PMBUS_MFR_MODEL\n");
+
+ if (rc != 7 || strncmp(buf, "03NK260", 7)) {
+ buf[rc] = '\0';
+ return dev_err_probe(dev, -ENODEV, "Model '%s' not supported\n", buf);
+ }
+
+ rc = pmbus_do_probe(client, &crps_info);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to probe\n");
+
+ return 0;
+}
+
+static const struct of_device_id crps_of_match[] = {
+ {
+ .compatible = "intel,crps185",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, crps_of_match);
+
+static struct i2c_driver crps_driver = {
+ .driver = {
+ .name = "crps",
+ .of_match_table = crps_of_match,
+ },
+ .probe = crps_probe,
+ .id_table = crps_id,
+};
+
+module_i2c_driver(crps_driver);
+
+MODULE_AUTHOR("Ninad Palsule");
+MODULE_DESCRIPTION("PMBus driver for Intel Common Redundant power supplies");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/dps920ab.c b/drivers/hwmon/pmbus/dps920ab.c
index cc5aac9dfdb3..325111a955e6 100644
--- a/drivers/hwmon/pmbus/dps920ab.c
+++ b/drivers/hwmon/pmbus/dps920ab.c
@@ -190,12 +190,19 @@ static const struct of_device_id __maybe_unused dps920ab_of_match[] = {
MODULE_DEVICE_TABLE(of, dps920ab_of_match);
+static const struct i2c_device_id dps920ab_device_id[] = {
+ { "dps920ab" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, dps920ab_device_id);
+
static struct i2c_driver dps920ab_driver = {
.driver = {
.name = "dps920ab",
.of_match_table = of_match_ptr(dps920ab_of_match),
},
.probe = dps920ab_probe,
+ .id_table = dps920ab_device_id,
};
module_i2c_driver(dps920ab_driver);
diff --git a/drivers/hwmon/pmbus/max15301.c b/drivers/hwmon/pmbus/max15301.c
index 50dfd477772f..d5810b88ea8d 100644
--- a/drivers/hwmon/pmbus/max15301.c
+++ b/drivers/hwmon/pmbus/max15301.c
@@ -25,6 +25,7 @@
static const struct i2c_device_id max15301_id[] = {
{ "bmr461" },
{ "max15301" },
+ { "max15303" },
{}
};
MODULE_DEVICE_TABLE(i2c, max15301_id);
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 77cf268e7d2d..920cd5408141 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -103,6 +103,8 @@ static int pmbus_identify(struct i2c_client *client,
if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
int page;
+ info->pages = PMBUS_PAGES;
+
for (page = 1; page < PMBUS_PAGES; page++) {
if (pmbus_set_page(client, page, 0xff) < 0)
break;
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index d605412a3173..ddb19c9726d6 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -487,6 +487,8 @@ struct pmbus_driver_info {
/* Regulator ops */
extern const struct regulator_ops pmbus_regulator_ops;
+int pmbus_regulator_init_cb(struct regulator_dev *rdev,
+ struct regulator_config *config);
/* Macros for filling in array of struct regulator_desc */
#define PMBUS_REGULATOR_STEP(_name, _id, _voltages, _step, _min_uV) \
@@ -501,6 +503,7 @@ extern const struct regulator_ops pmbus_regulator_ops;
.n_voltages = _voltages, \
.uV_step = _step, \
.min_uV = _min_uV, \
+ .init_cb = pmbus_regulator_init_cb, \
}
#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0, 0)
@@ -516,6 +519,7 @@ extern const struct regulator_ops pmbus_regulator_ops;
.n_voltages = _voltages, \
.uV_step = _step, \
.min_uV = _min_uV, \
+ .init_cb = pmbus_regulator_init_cb, \
}
#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a1375cb6b648..787683e83db6 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -31,6 +31,9 @@
#define PMBUS_ATTR_ALLOC_SIZE 32
#define PMBUS_NAME_SIZE 24
+static int wp = -1;
+module_param(wp, int, 0444);
+
struct pmbus_sensor {
struct pmbus_sensor *next;
char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
@@ -2665,6 +2668,56 @@ static void pmbus_remove_pec(void *dev)
device_remove_file(dev, &dev_attr_pec);
}
+static void pmbus_init_wp(struct i2c_client *client, struct pmbus_data *data)
+{
+ int ret;
+
+ switch (wp) {
+ case 0:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, 0);
+ break;
+
+ case 1:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, PB_WP_VOUT);
+ break;
+
+ case 2:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, PB_WP_OP);
+ break;
+
+ case 3:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, PB_WP_ALL);
+ break;
+
+ default:
+ /* Ignore the other values */
+ break;
+ }
+
+ ret = _pmbus_read_byte_data(client, -1, PMBUS_WRITE_PROTECT);
+ if (ret < 0)
+ return;
+
+ switch (ret & PB_WP_ANY) {
+ case PB_WP_ALL:
+ data->flags |= PMBUS_OP_PROTECTED;
+ fallthrough;
+ case PB_WP_OP:
+ data->flags |= PMBUS_VOUT_PROTECTED;
+ fallthrough;
+ case PB_WP_VOUT:
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ break;
+
+ default:
+ break;
+ }
+}
+
static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
@@ -2718,12 +2771,8 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
* faults, and we should not try it. Also, in that case, writes into
* limit registers need to be disabled.
*/
- if (!(data->flags & PMBUS_NO_WRITE_PROTECT)) {
- ret = _pmbus_read_byte_data(client, -1, PMBUS_WRITE_PROTECT);
-
- if (ret > 0 && (ret & PB_WP_ANY))
- data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
- }
+ if (!(data->flags & PMBUS_NO_WRITE_PROTECT))
+ pmbus_init_wp(client, data);
ret = i2c_smbus_read_byte_data(client, PMBUS_REVISION);
if (ret >= 0)
@@ -3183,8 +3232,12 @@ static int pmbus_regulator_list_voltage(struct regulator_dev *rdev,
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
int val, low, high;
+ if (data->flags & PMBUS_VOUT_PROTECTED)
+ return 0;
+
if (selector >= rdev->desc->n_voltages ||
selector < rdev->desc->linear_min_sel)
return -EINVAL;
@@ -3219,6 +3272,22 @@ const struct regulator_ops pmbus_regulator_ops = {
};
EXPORT_SYMBOL_NS_GPL(pmbus_regulator_ops, "PMBUS");
+int pmbus_regulator_init_cb(struct regulator_dev *rdev,
+ struct regulator_config *config)
+{
+ struct pmbus_data *data = config->driver_data;
+ struct regulation_constraints *constraints = rdev->constraints;
+
+ if (data->flags & PMBUS_OP_PROTECTED)
+ constraints->valid_ops_mask &= ~REGULATOR_CHANGE_STATUS;
+
+ if (data->flags & PMBUS_VOUT_PROTECTED)
+ constraints->valid_ops_mask &= ~REGULATOR_CHANGE_VOLTAGE;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(pmbus_regulator_init_cb, "PMBUS");
+
static int pmbus_regulator_register(struct pmbus_data *data)
{
struct device *dev = data->dev;
@@ -3465,11 +3534,11 @@ static int pmbus_init_debugfs(struct i2c_client *client,
/*
* Allocate the max possible entries we need.
- * 6 entries device-specific
+ * 7 entries device-specific
* 10 entries page-specific
*/
entries = devm_kcalloc(data->dev,
- 6 + data->info->pages * 10, sizeof(*entries),
+ 7 + data->info->pages * 10, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -3482,6 +3551,15 @@ static int pmbus_init_debugfs(struct i2c_client *client,
* assume that values of the following registers are the same for all
* pages and report values only for page 0.
*/
+ if (pmbus_check_byte_register(client, 0, PMBUS_REVISION)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = PMBUS_REVISION;
+ debugfs_create_file("revision", 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
if (pmbus_check_block_register(client, 0, PMBUS_MFR_ID)) {
entries[idx].client = client;
entries[idx].page = 0;
diff --git a/drivers/hwmon/pmbus/tps25990.c b/drivers/hwmon/pmbus/tps25990.c
new file mode 100644
index 000000000000..0d2655e69549
--- /dev/null
+++ b/drivers/hwmon/pmbus/tps25990.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2024 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define TPS25990_READ_VAUX 0xd0
+#define TPS25990_READ_VIN_MIN 0xd1
+#define TPS25990_READ_VIN_PEAK 0xd2
+#define TPS25990_READ_IIN_PEAK 0xd4
+#define TPS25990_READ_PIN_PEAK 0xd5
+#define TPS25990_READ_TEMP_AVG 0xd6
+#define TPS25990_READ_TEMP_PEAK 0xd7
+#define TPS25990_READ_VOUT_MIN 0xda
+#define TPS25990_READ_VIN_AVG 0xdc
+#define TPS25990_READ_VOUT_AVG 0xdd
+#define TPS25990_READ_IIN_AVG 0xde
+#define TPS25990_READ_PIN_AVG 0xdf
+#define TPS25990_VIREF 0xe0
+#define TPS25990_PK_MIN_AVG 0xea
+#define PK_MIN_AVG_RST_PEAK BIT(7)
+#define PK_MIN_AVG_RST_AVG BIT(6)
+#define PK_MIN_AVG_RST_MIN BIT(5)
+#define PK_MIN_AVG_AVG_CNT GENMASK(2, 0)
+#define TPS25990_MFR_WRITE_PROTECT 0xf8
+#define TPS25990_UNLOCKED BIT(7)
+
+#define TPS25990_8B_SHIFT 2
+#define TPS25990_VIN_OVF_NUM 525100
+#define TPS25990_VIN_OVF_DIV 10163
+#define TPS25990_VIN_OVF_OFF 155
+#define TPS25990_IIN_OCF_NUM 953800
+#define TPS25990_IIN_OCF_DIV 129278
+#define TPS25990_IIN_OCF_OFF 157
+
+#define PK_MIN_AVG_RST_MASK (PK_MIN_AVG_RST_PEAK | \
+ PK_MIN_AVG_RST_AVG | \
+ PK_MIN_AVG_RST_MIN)
+
+/*
+ * Arbitrary default Rimon value: 1kOhm
+ * This correspond to an overcurrent limit of 55A, close to the specified limit
+ * of un-stacked TPS25990 and makes further calculation easier to setup in
+ * sensor.conf, if necessary
+ */
+#define TPS25990_DEFAULT_RIMON 1000000000
+
+static void tps25990_set_m(int *m, u32 rimon)
+{
+ u64 val = ((u64)*m) * rimon;
+
+ /* Make sure m fits the s32 type */
+ *m = DIV_ROUND_CLOSEST_ULL(val, 1000000);
+}
+
+static int tps25990_mfr_write_protect_set(struct i2c_client *client,
+ u8 protect)
+{
+ u8 val;
+
+ switch (protect) {
+ case 0:
+ val = 0xa2;
+ break;
+ case PB_WP_ALL:
+ val = 0x0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pmbus_write_byte_data(client, -1, TPS25990_MFR_WRITE_PROTECT,
+ val);
+}
+
+static int tps25990_mfr_write_protect_get(struct i2c_client *client)
+{
+ int ret = pmbus_read_byte_data(client, -1, TPS25990_MFR_WRITE_PROTECT);
+
+ if (ret < 0)
+ return ret;
+
+ return (ret & TPS25990_UNLOCKED) ? 0 : PB_WP_ALL;
+}
+
+static int tps25990_read_word_data(struct i2c_client *client,
+ int page, int phase, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VIN_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_VIN_MIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VIN_MIN);
+ break;
+
+ case PMBUS_VIRT_READ_VIN_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VIN_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_VOUT_MIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VOUT_MIN);
+ break;
+
+ case PMBUS_VIRT_READ_VOUT_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VOUT_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_IIN_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_IIN_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_IIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_IIN_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_TEMP_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_TEMP_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_TEMP_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_PIN_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_PIN_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_PIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_PIN_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_VMON:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VAUX);
+ break;
+
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ /*
+ * These registers provide an 8 bits value instead of a
+ * 10bits one. Just shifting twice the register value is
+ * enough to make the sensor type conversion work, even
+ * if the datasheet provides different m, b and R for
+ * those.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ break;
+ ret <<= TPS25990_8B_SHIFT;
+ break;
+
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ break;
+ ret = DIV_ROUND_CLOSEST(ret * TPS25990_VIN_OVF_NUM,
+ TPS25990_VIN_OVF_DIV);
+ ret += TPS25990_VIN_OVF_OFF;
+ break;
+
+ case PMBUS_IIN_OC_FAULT_LIMIT:
+ /*
+ * VIREF directly sets the over-current limit at which the eFuse
+ * will turn the FET off and trigger a fault. Expose it through
+ * this generic property instead of a manufacturer specific one.
+ */
+ ret = pmbus_read_byte_data(client, page, TPS25990_VIREF);
+ if (ret < 0)
+ break;
+ ret = DIV_ROUND_CLOSEST(ret * TPS25990_IIN_OCF_NUM,
+ TPS25990_IIN_OCF_DIV);
+ ret += TPS25990_IIN_OCF_OFF;
+ break;
+
+ case PMBUS_VIRT_SAMPLES:
+ ret = pmbus_read_byte_data(client, page, TPS25990_PK_MIN_AVG);
+ if (ret < 0)
+ break;
+ ret = 1 << FIELD_GET(PK_MIN_AVG_AVG_CNT, ret);
+ break;
+
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ case PMBUS_VIRT_RESET_IIN_HISTORY:
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = 0;
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int tps25990_write_word_data(struct i2c_client *client,
+ int page, int reg, u16 value)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ value >>= TPS25990_8B_SHIFT;
+ value = clamp_val(value, 0, 0xff);
+ ret = pmbus_write_word_data(client, page, reg, value);
+ break;
+
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ value -= TPS25990_VIN_OVF_OFF;
+ value = DIV_ROUND_CLOSEST(((unsigned int)value) * TPS25990_VIN_OVF_DIV,
+ TPS25990_VIN_OVF_NUM);
+ value = clamp_val(value, 0, 0xf);
+ ret = pmbus_write_word_data(client, page, reg, value);
+ break;
+
+ case PMBUS_IIN_OC_FAULT_LIMIT:
+ value -= TPS25990_IIN_OCF_OFF;
+ value = DIV_ROUND_CLOSEST(((unsigned int)value) * TPS25990_IIN_OCF_DIV,
+ TPS25990_IIN_OCF_NUM);
+ value = clamp_val(value, 0, 0x3f);
+ ret = pmbus_write_byte_data(client, page, TPS25990_VIREF, value);
+ break;
+
+ case PMBUS_VIRT_SAMPLES:
+ value = clamp_val(value, 1, 1 << PK_MIN_AVG_AVG_CNT);
+ value = ilog2(value);
+ ret = pmbus_update_byte_data(client, page, TPS25990_PK_MIN_AVG,
+ PK_MIN_AVG_AVG_CNT,
+ FIELD_PREP(PK_MIN_AVG_AVG_CNT, value));
+ break;
+
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ case PMBUS_VIRT_RESET_IIN_HISTORY:
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ /*
+ * TPS25990 has history resets based on MIN/AVG/PEAK instead of per
+ * sensor type. Exposing this quirk in hwmon is not desirable so
+ * reset MIN, AVG and PEAK together. Even is there effectively only
+ * one reset, which resets everything, expose the 5 entries so
+ * userspace is not required map a sensor type to another to trigger
+ * a reset
+ */
+ ret = pmbus_update_byte_data(client, 0, TPS25990_PK_MIN_AVG,
+ PK_MIN_AVG_RST_MASK,
+ PK_MIN_AVG_RST_MASK);
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int tps25990_read_byte_data(struct i2c_client *client,
+ int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_WRITE_PROTECT:
+ ret = tps25990_mfr_write_protect_get(client);
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int tps25990_write_byte_data(struct i2c_client *client,
+ int page, int reg, u8 byte)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_WRITE_PROTECT:
+ ret = tps25990_mfr_write_protect_set(client, byte);
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
+static const struct regulator_desc tps25990_reg_desc[] = {
+ PMBUS_REGULATOR_ONE("vout"),
+};
+#endif
+
+static const struct pmbus_driver_info tps25990_base_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 5251,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 5251,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -2,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 140,
+ .b[PSC_TEMPERATURE] = 32100,
+ .R[PSC_TEMPERATURE] = -2,
+ /*
+ * Current and Power measurement depends on the ohm value
+ * of Rimon. m is multiplied by 1000 below to have an integer
+ * and -3 is added to R to compensate.
+ */
+ .format[PSC_CURRENT_IN] = direct,
+ .m[PSC_CURRENT_IN] = 9538,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = -6,
+ .format[PSC_POWER] = direct,
+ .m[PSC_POWER] = 4901,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = -7,
+ .func[0] = (PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_VMON |
+ PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN |
+ PMBUS_HAVE_TEMP |
+ PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_SAMPLES),
+ .read_word_data = tps25990_read_word_data,
+ .write_word_data = tps25990_write_word_data,
+ .read_byte_data = tps25990_read_byte_data,
+ .write_byte_data = tps25990_write_byte_data,
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
+ .reg_desc = tps25990_reg_desc,
+ .num_regulators = ARRAY_SIZE(tps25990_reg_desc),
+#endif
+};
+
+static const struct i2c_device_id tps25990_i2c_id[] = {
+ { "tps25990" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tps25990_i2c_id);
+
+static const struct of_device_id tps25990_of_match[] = {
+ { .compatible = "ti,tps25990" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tps25990_of_match);
+
+static int tps25990_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ u32 rimon = TPS25990_DEFAULT_RIMON;
+ int ret;
+
+ ret = device_property_read_u32(dev, "ti,rimon-micro-ohms", &rimon);
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(dev, ret, "failed to get rimon\n");
+
+ info = devm_kmemdup(dev, &tps25990_base_info, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* Adapt the current and power scale for each instance */
+ tps25990_set_m(&info->m[PSC_CURRENT_IN], rimon);
+ tps25990_set_m(&info->m[PSC_POWER], rimon);
+
+ return pmbus_do_probe(client, info);
+}
+
+static struct i2c_driver tps25990_driver = {
+ .driver = {
+ .name = "tps25990",
+ .of_match_table = tps25990_of_match,
+ },
+ .probe = tps25990_probe,
+ .id_table = tps25990_i2c_id,
+};
+module_i2c_driver(tps25990_driver);
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_DESCRIPTION("PMBUS driver for TPS25990 eFuse");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 53a1a968d00d..579d31bb9ac7 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -497,7 +497,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
struct device *hwmon;
int ret;
const struct hwmon_channel_info **channels;
- u32 pwm_min_from_stopped = 0;
+ u32 initial_pwm, pwm_min_from_stopped = 0;
u32 *fan_channel_config;
int channel_count = 1; /* We always have a PWM channel. */
int i;
@@ -545,11 +545,21 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->enable_mode = pwm_disable_reg_enable;
+ ret = pwm_fan_get_cooling_data(dev, ctx);
+ if (ret)
+ return ret;
+
+ /* use maximum cooling level if provided */
+ if (ctx->pwm_fan_cooling_levels)
+ initial_pwm = ctx->pwm_fan_cooling_levels[ctx->pwm_fan_max_state];
+ else
+ initial_pwm = MAX_PWM;
+
/*
* Set duty cycle to maximum allowed and enable PWM output as well as
* the regulator. In case of error nothing is changed
*/
- ret = set_pwm(ctx, MAX_PWM);
+ ret = set_pwm(ctx, initial_pwm);
if (ret) {
dev_err(dev, "Failed to configure PWM: %d\n", ret);
return ret;
@@ -638,16 +648,16 @@ static int pwm_fan_probe(struct platform_device *pdev)
channels[1] = &ctx->fan_channel;
}
- ret = of_property_read_u32(dev->of_node, "fan-stop-to-start-percent",
- &pwm_min_from_stopped);
+ ret = device_property_read_u32(dev, "fan-stop-to-start-percent",
+ &pwm_min_from_stopped);
if (!ret && pwm_min_from_stopped) {
ctx->pwm_duty_cycle_from_stopped =
DIV_ROUND_UP_ULL(pwm_min_from_stopped *
(ctx->pwm_state.period - 1),
100);
}
- ret = of_property_read_u32(dev->of_node, "fan-stop-to-start-us",
- &ctx->pwm_usec_from_stopped);
+ ret = device_property_read_u32(dev, "fan-stop-to-start-us",
+ &ctx->pwm_usec_from_stopped);
if (ret)
ctx->pwm_usec_from_stopped = 250000;
@@ -661,10 +671,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
return PTR_ERR(hwmon);
}
- ret = pwm_fan_get_cooling_data(dev, ctx);
- if (ret)
- return ret;
-
ctx->pwm_fan_state = ctx->pwm_fan_max_state;
if (IS_ENABLED(CONFIG_THERMAL)) {
cdev = devm_thermal_of_cooling_device_register(dev,
diff --git a/drivers/hwmon/qnap-mcu-hwmon.c b/drivers/hwmon/qnap-mcu-hwmon.c
new file mode 100644
index 000000000000..29057514739c
--- /dev/null
+++ b/drivers/hwmon/qnap-mcu-hwmon.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Driver for hwmon elements found on QNAP-MCU devices
+ *
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/fwnode.h>
+#include <linux/hwmon.h>
+#include <linux/mfd/qnap-mcu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/thermal.h>
+
+struct qnap_mcu_hwmon {
+ struct qnap_mcu *mcu;
+ struct device *dev;
+
+ unsigned int pwm_min;
+ unsigned int pwm_max;
+
+ struct fwnode_handle *fan_node;
+ unsigned int fan_state;
+ unsigned int fan_max_state;
+ unsigned int *fan_cooling_levels;
+
+ struct thermal_cooling_device *cdev;
+ struct hwmon_chip_info info;
+};
+
+static int qnap_mcu_hwmon_get_rpm(struct qnap_mcu_hwmon *hwm)
+{
+ static const u8 cmd[] = { '@', 'F', 'A' };
+ u8 reply[6];
+ int ret;
+
+ /* poll the fan rpm */
+ ret = qnap_mcu_exec(hwm->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return ret;
+
+ /* First 2 bytes must mirror the sent command */
+ if (memcmp(cmd, reply, 2))
+ return -EIO;
+
+ return reply[4] * 30;
+}
+
+static int qnap_mcu_hwmon_get_pwm(struct qnap_mcu_hwmon *hwm)
+{
+ static const u8 cmd[] = { '@', 'F', 'Z', '0' }; /* 0 = fan-id? */
+ u8 reply[4];
+ int ret;
+
+ /* poll the fan pwm */
+ ret = qnap_mcu_exec(hwm->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return ret;
+
+ /* First 3 bytes must mirror the sent command */
+ if (memcmp(cmd, reply, 3))
+ return -EIO;
+
+ return reply[3];
+}
+
+static int qnap_mcu_hwmon_set_pwm(struct qnap_mcu_hwmon *hwm, u8 pwm)
+{
+ const u8 cmd[] = { '@', 'F', 'W', '0', pwm }; /* 0 = fan-id?, pwm 0-255 */
+
+ /* set the fan pwm */
+ return qnap_mcu_exec_with_ack(hwm->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_hwmon_get_temp(struct qnap_mcu_hwmon *hwm)
+{
+ static const u8 cmd[] = { '@', 'T', '3' };
+ u8 reply[4];
+ int ret;
+
+ /* poll the fan rpm */
+ ret = qnap_mcu_exec(hwm->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return ret;
+
+ /* First bytes must mirror the sent command */
+ if (memcmp(cmd, reply, sizeof(cmd)))
+ return -EIO;
+
+ /*
+ * There is an unknown bit set in bit7.
+ * Bits [6:0] report the actual temperature as returned by the
+ * original qnap firmware-tools, so just drop bit7 for now.
+ */
+ return (reply[3] & 0x7f) * 1000;
+}
+
+static int qnap_mcu_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct qnap_mcu_hwmon *hwm = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ if (val != 0)
+ val = clamp_val(val, hwm->pwm_min, hwm->pwm_max);
+
+ return qnap_mcu_hwmon_set_pwm(hwm, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qnap_mcu_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct qnap_mcu_hwmon *hwm = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = qnap_mcu_hwmon_get_pwm(hwm);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_fan:
+ ret = qnap_mcu_hwmon_get_rpm(hwm);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ case hwmon_temp:
+ ret = qnap_mcu_hwmon_get_temp(hwm);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t qnap_mcu_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ return 0444;
+
+ case hwmon_pwm:
+ return 0644;
+
+ case hwmon_fan:
+ return 0444;
+
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_ops qnap_mcu_hwmon_hwmon_ops = {
+ .is_visible = qnap_mcu_hwmon_is_visible,
+ .read = qnap_mcu_hwmon_read,
+ .write = qnap_mcu_hwmon_write,
+};
+
+/* thermal cooling device callbacks */
+static int qnap_mcu_hwmon_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qnap_mcu_hwmon *hwm = cdev->devdata;
+
+ if (!hwm)
+ return -EINVAL;
+
+ *state = hwm->fan_max_state;
+
+ return 0;
+}
+
+static int qnap_mcu_hwmon_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qnap_mcu_hwmon *hwm = cdev->devdata;
+
+ if (!hwm)
+ return -EINVAL;
+
+ *state = hwm->fan_state;
+
+ return 0;
+}
+
+static int qnap_mcu_hwmon_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qnap_mcu_hwmon *hwm = cdev->devdata;
+ int ret;
+
+ if (!hwm || state > hwm->fan_max_state)
+ return -EINVAL;
+
+ if (state == hwm->fan_state)
+ return 0;
+
+ ret = qnap_mcu_hwmon_set_pwm(hwm, hwm->fan_cooling_levels[state]);
+ if (ret)
+ return ret;
+
+ hwm->fan_state = state;
+
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops qnap_mcu_hwmon_cooling_ops = {
+ .get_max_state = qnap_mcu_hwmon_get_max_state,
+ .get_cur_state = qnap_mcu_hwmon_get_cur_state,
+ .set_cur_state = qnap_mcu_hwmon_set_cur_state,
+};
+
+static void devm_fan_node_release(void *data)
+{
+ struct qnap_mcu_hwmon *hwm = data;
+
+ fwnode_handle_put(hwm->fan_node);
+}
+
+static int qnap_mcu_hwmon_get_cooling_data(struct device *dev, struct qnap_mcu_hwmon *hwm)
+{
+ struct fwnode_handle *fwnode;
+ int num, i, ret;
+
+ fwnode = device_get_named_child_node(dev->parent, "fan-0");
+ if (!fwnode)
+ return 0;
+
+ /* if we found the fan-node, we're keeping it until device-unbind */
+ hwm->fan_node = fwnode;
+ ret = devm_add_action_or_reset(dev, devm_fan_node_release, hwm);
+ if (ret)
+ return ret;
+
+ num = fwnode_property_count_u32(fwnode, "cooling-levels");
+ if (num <= 0)
+ return dev_err_probe(dev, num ? : -EINVAL,
+ "Failed to count elements in 'cooling-levels'\n");
+
+ hwm->fan_cooling_levels = devm_kcalloc(dev, num, sizeof(u32),
+ GFP_KERNEL);
+ if (!hwm->fan_cooling_levels)
+ return -ENOMEM;
+
+ ret = fwnode_property_read_u32_array(fwnode, "cooling-levels",
+ hwm->fan_cooling_levels, num);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read 'cooling-levels'\n");
+
+ for (i = 0; i < num; i++) {
+ if (hwm->fan_cooling_levels[i] > hwm->pwm_max)
+ return dev_err_probe(dev, -EINVAL, "fan state[%d]:%d > %d\n", i,
+ hwm->fan_cooling_levels[i], hwm->pwm_max);
+ }
+
+ hwm->fan_max_state = num - 1;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info * const qnap_mcu_hwmon_channels[] = {
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static int qnap_mcu_hwmon_probe(struct platform_device *pdev)
+{
+ struct qnap_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
+ const struct qnap_mcu_variant *variant = pdev->dev.platform_data;
+ struct qnap_mcu_hwmon *hwm;
+ struct thermal_cooling_device *cdev;
+ struct device *dev = &pdev->dev;
+ struct device *hwmon;
+ int ret;
+
+ hwm = devm_kzalloc(dev, sizeof(*hwm), GFP_KERNEL);
+ if (!hwm)
+ return -ENOMEM;
+
+ hwm->mcu = mcu;
+ hwm->dev = &pdev->dev;
+ hwm->pwm_min = variant->fan_pwm_min;
+ hwm->pwm_max = variant->fan_pwm_max;
+
+ platform_set_drvdata(pdev, hwm);
+
+ /*
+ * Set duty cycle to maximum allowed.
+ */
+ ret = qnap_mcu_hwmon_set_pwm(hwm, hwm->pwm_max);
+ if (ret)
+ return ret;
+
+ hwm->info.ops = &qnap_mcu_hwmon_hwmon_ops;
+ hwm->info.info = qnap_mcu_hwmon_channels;
+
+ ret = qnap_mcu_hwmon_get_cooling_data(dev, hwm);
+ if (ret)
+ return ret;
+
+ hwm->fan_state = hwm->fan_max_state;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "qnapmcu",
+ hwm, &hwm->info, NULL);
+ if (IS_ERR(hwmon))
+ return dev_err_probe(dev, PTR_ERR(hwmon), "Failed to register hwmon device\n");
+
+ /*
+ * Only register cooling device when we found cooling-levels.
+ * qnap_mcu_hwmon_get_cooling_data() will fail when reading malformed
+ * levels and only succeed with either no or correct cooling levels.
+ */
+ if (IS_ENABLED(CONFIG_THERMAL) && hwm->fan_cooling_levels) {
+ cdev = devm_thermal_of_cooling_device_register(dev,
+ to_of_node(hwm->fan_node), "qnap-mcu-hwmon",
+ hwm, &qnap_mcu_hwmon_cooling_ops);
+ if (IS_ERR(cdev))
+ return dev_err_probe(dev, PTR_ERR(cdev),
+ "Failed to register qnap-mcu-hwmon as cooling device\n");
+ hwm->cdev = cdev;
+ }
+
+ return 0;
+}
+
+static struct platform_driver qnap_mcu_hwmon_driver = {
+ .probe = qnap_mcu_hwmon_probe,
+ .driver = {
+ .name = "qnap-mcu-hwmon",
+ },
+};
+module_platform_driver(qnap_mcu_hwmon_driver);
+
+MODULE_ALIAS("platform:qnap-mcu-hwmon");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("QNAP MCU hwmon driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index 10ef1e1f9458..a2938881ccd2 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -128,10 +128,32 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
return 0;
}
+static int rpi_hwmon_suspend(struct device *dev)
+{
+ struct rpi_hwmon_data *data = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&data->get_values_poll_work);
+
+ return 0;
+}
+
+static int rpi_hwmon_resume(struct device *dev)
+{
+ struct rpi_hwmon_data *data = dev_get_drvdata(dev);
+
+ get_values_poll(&data->get_values_poll_work.work);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(rpi_hwmon_pm_ops, rpi_hwmon_suspend,
+ rpi_hwmon_resume);
+
static struct platform_driver rpi_hwmon_driver = {
.probe = rpi_hwmon_probe,
.driver = {
.name = "raspberrypi-hwmon",
+ .pm = pm_ptr(&rpi_hwmon_pm_ops),
},
};
module_platform_driver(rpi_hwmon_driver);
diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c
index 6cee48a3e5c3..358152868d96 100644
--- a/drivers/hwmon/spd5118.c
+++ b/drivers/hwmon/spd5118.c
@@ -291,12 +291,6 @@ static umode_t spd5118_is_visible(const void *_data, enum hwmon_sensor_types typ
}
}
-static inline bool spd5118_parity8(u8 w)
-{
- w ^= w >> 4;
- return (0x6996 >> (w & 0xf)) & 1;
-}
-
/*
* Bank and vendor id are 8-bit fields with seven data bits and odd parity.
* Vendor IDs 0 and 0x7f are invalid.
@@ -304,7 +298,7 @@ static inline bool spd5118_parity8(u8 w)
*/
static bool spd5118_vendor_valid(u8 bank, u8 id)
{
- if (!spd5118_parity8(bank) || !spd5118_parity8(id))
+ if (parity8(bank) == 0 || parity8(id) == 0)
return false;
id &= 0x7f;
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index fbe673009126..a971ff628435 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -8,15 +8,15 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/i3c/device.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define DRIVER_NAME "tmp108"
@@ -331,6 +331,10 @@ static int tmp108_common_probe(struct device *dev, struct regmap *regmap, char *
u32 config;
int err;
+ err = devm_regulator_get_enable(dev, "vcc");
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable regulator\n");
+
tmp108 = devm_kzalloc(dev, sizeof(*tmp108), GFP_KERNEL);
if (!tmp108)
return -ENOMEM;
@@ -417,25 +421,24 @@ static int tmp108_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
static const struct i2c_device_id tmp108_i2c_ids[] = {
+ { "p3t1085" },
{ "tmp108" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp108_i2c_ids);
-#ifdef CONFIG_OF
static const struct of_device_id tmp108_of_ids[] = {
{ .compatible = "nxp,p3t1085", },
{ .compatible = "ti,tmp108", },
{}
};
MODULE_DEVICE_TABLE(of, tmp108_of_ids);
-#endif
static struct i2c_driver tmp108_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = pm_sleep_ptr(&tmp108_dev_pm_ops),
- .of_match_table = of_match_ptr(tmp108_of_ids),
+ .of_match_table = tmp108_of_ids,
},
.probe = tmp108_probe,
.id_table = tmp108_i2c_ids,
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 1c2cb12071b8..5acbfd7d088d 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -207,7 +207,8 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
*val = sign_extend32(regval,
reg == TMP51X_SHUNT_CURRENT_RESULT ?
16 - tmp51x_get_pga_shift(data) : 15);
- *val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
+ *val = DIV_ROUND_CLOSEST(*val * 10 * (long)MILLI, (long)data->shunt_uohms);
+
break;
case TMP51X_BUS_VOLTAGE_RESULT:
case TMP51X_BUS_VOLTAGE_H_LIMIT:
@@ -223,7 +224,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
case TMP51X_BUS_CURRENT_RESULT:
// Current = (ShuntVoltage * CalibrationRegister) / 4096
*val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
- *val = DIV_ROUND_CLOSEST(*val, MILLI);
+ *val = DIV_ROUND_CLOSEST(*val, (long)MILLI);
break;
case TMP51X_LOCAL_TEMP_RESULT:
case TMP51X_REMOTE_TEMP_RESULT_1:
@@ -263,7 +264,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
* The user enter current value and we convert it to
* voltage. 1lsb = 10uV
*/
- val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10 * MILLI);
+ val = DIV_ROUND_CLOSEST(val * (long)data->shunt_uohms, 10 * (long)MILLI);
max_val = U16_MAX >> tmp51x_get_pga_shift(data);
regval = clamp_val(val, -max_val, max_val);
break;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 1e3bd129a922..7087197383c9 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -706,7 +706,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out;
}
- if (!ctx->pcc_comm_addr) {
+ if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
dev_err(&pdev->dev,
"Failed to ioremap PCC comm region\n");
rc = -ENOMEM;
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index ea38ecf26fcb..0a9380350fb5 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -75,22 +75,54 @@ struct coresight_device *coresight_get_percpu_sink(int cpu)
}
EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
+static struct coresight_device *coresight_get_source(struct list_head *path)
+{
+ struct coresight_device *csdev;
+
+ if (!path)
+ return NULL;
+
+ csdev = list_first_entry(path, struct coresight_node, link)->csdev;
+ if (!coresight_is_device_source(csdev))
+ return NULL;
+
+ return csdev;
+}
+
+/**
+ * coresight_blocks_source - checks whether the connection matches the source
+ * of path if connection is bound to specific source.
+ * @src: The source device of the trace path
+ * @conn: The connection of one outport
+ *
+ * Return false if the connection doesn't have a source binded or source of the
+ * path matches the source binds to connection.
+ */
+static bool coresight_blocks_source(struct coresight_device *src,
+ struct coresight_connection *conn)
+{
+ return conn->filter_src_fwnode && (conn->filter_src_dev != src);
+}
+
static struct coresight_connection *
-coresight_find_out_connection(struct coresight_device *src_dev,
- struct coresight_device *dest_dev)
+coresight_find_out_connection(struct coresight_device *csdev,
+ struct coresight_device *out_dev,
+ struct coresight_device *trace_src)
{
int i;
struct coresight_connection *conn;
- for (i = 0; i < src_dev->pdata->nr_outconns; i++) {
- conn = src_dev->pdata->out_conns[i];
- if (conn->dest_dev == dest_dev)
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
+ conn = csdev->pdata->out_conns[i];
+ if (coresight_blocks_source(trace_src, conn))
+ continue;
+ if (conn->dest_dev == out_dev)
return conn;
}
- dev_err(&src_dev->dev,
- "couldn't find output connection, src_dev: %s, dest_dev: %s\n",
- dev_name(&src_dev->dev), dev_name(&dest_dev->dev));
+ dev_err(&csdev->dev,
+ "couldn't find output connection, csdev: %s, out_dev: %s\n",
+ dev_name(&csdev->dev), dev_name(&out_dev->dev));
return ERR_PTR(-ENODEV);
}
@@ -251,7 +283,8 @@ static void coresight_disable_sink(struct coresight_device *csdev)
static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
- struct coresight_device *child)
+ struct coresight_device *child,
+ struct coresight_device *source)
{
int link_subtype;
struct coresight_connection *inconn, *outconn;
@@ -259,8 +292,8 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (!parent || !child)
return -EINVAL;
- inconn = coresight_find_out_connection(parent, csdev);
- outconn = coresight_find_out_connection(csdev, child);
+ inconn = coresight_find_out_connection(parent, csdev, source);
+ outconn = coresight_find_out_connection(csdev, child, source);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && IS_ERR(inconn))
@@ -273,15 +306,16 @@ static int coresight_enable_link(struct coresight_device *csdev,
static void coresight_disable_link(struct coresight_device *csdev,
struct coresight_device *parent,
- struct coresight_device *child)
+ struct coresight_device *child,
+ struct coresight_device *source)
{
struct coresight_connection *inconn, *outconn;
if (!parent || !child)
return;
- inconn = coresight_find_out_connection(parent, csdev);
- outconn = coresight_find_out_connection(csdev, child);
+ inconn = coresight_find_out_connection(parent, csdev, source);
+ outconn = coresight_find_out_connection(csdev, child, source);
link_ops(csdev)->disable(csdev, inconn, outconn);
}
@@ -375,7 +409,8 @@ static void coresight_disable_path_from(struct list_head *path,
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
child = list_next_entry(nd, link)->csdev;
- coresight_disable_link(csdev, parent, child);
+ coresight_disable_link(csdev, parent, child,
+ coresight_get_source(path));
break;
default:
break;
@@ -418,7 +453,9 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
u32 type;
struct coresight_node *nd;
struct coresight_device *csdev, *parent, *child;
+ struct coresight_device *source;
+ source = coresight_get_source(path);
list_for_each_entry_reverse(nd, path, link) {
csdev = nd->csdev;
type = csdev->type;
@@ -456,7 +493,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
child = list_next_entry(nd, link)->csdev;
- ret = coresight_enable_link(csdev, parent, child);
+ ret = coresight_enable_link(csdev, parent, child, source);
if (ret)
goto err;
break;
@@ -619,6 +656,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
+ * @source: The trace source device of the path.
* @sink: The final sink we want in this path.
* @path: The list to add devices to.
*
@@ -628,6 +666,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
* the source is the first device and the sink the last one.
*/
static int _coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *source,
struct coresight_device *sink,
struct list_head *path)
{
@@ -641,7 +680,7 @@ static int _coresight_build_path(struct coresight_device *csdev,
if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) &&
sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) {
- if (_coresight_build_path(sink, sink, path) == 0) {
+ if (_coresight_build_path(sink, source, sink, path) == 0) {
found = true;
goto out;
}
@@ -652,8 +691,12 @@ static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_device *child_dev;
child_dev = csdev->pdata->out_conns[i]->dest_dev;
+
+ if (coresight_blocks_source(source, csdev->pdata->out_conns[i]))
+ continue;
+
if (child_dev &&
- _coresight_build_path(child_dev, sink, path) == 0) {
+ _coresight_build_path(child_dev, source, sink, path) == 0) {
found = true;
break;
}
@@ -698,7 +741,7 @@ struct list_head *coresight_build_path(struct coresight_device *source,
INIT_LIST_HEAD(path);
- rc = _coresight_build_path(source, sink, path);
+ rc = _coresight_build_path(source, source, sink, path);
if (rc) {
kfree(path);
return ERR_PTR(rc);
@@ -927,6 +970,16 @@ static int coresight_orphan_match(struct device *dev, void *data)
for (i = 0; i < src_csdev->pdata->nr_outconns; i++) {
conn = src_csdev->pdata->out_conns[i];
+ /* Fix filter source device before skip the port */
+ if (conn->filter_src_fwnode && !conn->filter_src_dev) {
+ if (dst_csdev &&
+ (conn->filter_src_fwnode == dst_csdev->dev.fwnode) &&
+ !WARN_ON_ONCE(!coresight_is_device_source(dst_csdev)))
+ conn->filter_src_dev = dst_csdev;
+ else
+ still_orphan = true;
+ }
+
/* Skip the port if it's already connected. */
if (conn->dest_dev)
continue;
@@ -977,18 +1030,40 @@ static int coresight_fixup_orphan_conns(struct coresight_device *csdev)
csdev, coresight_orphan_match);
}
+static int coresight_clear_filter_source(struct device *dev, void *data)
+{
+ int i;
+ struct coresight_device *source = data;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ for (i = 0; i < csdev->pdata->nr_outconns; ++i) {
+ if (csdev->pdata->out_conns[i]->filter_src_dev == source)
+ csdev->pdata->out_conns[i]->filter_src_dev = NULL;
+ }
+ return 0;
+}
+
/* coresight_remove_conns - Remove other device's references to this device */
static void coresight_remove_conns(struct coresight_device *csdev)
{
int i, j;
struct coresight_connection *conn;
+ if (coresight_is_device_source(csdev))
+ bus_for_each_dev(&coresight_bustype, NULL, csdev,
+ coresight_clear_filter_source);
+
/*
* Remove the input connection references from the destination device
* for each output connection.
*/
for (i = 0; i < csdev->pdata->nr_outconns; i++) {
conn = csdev->pdata->out_conns[i];
+ if (conn->filter_src_fwnode) {
+ conn->filter_src_dev = NULL;
+ fwnode_handle_put(conn->filter_src_fwnode);
+ }
+
if (!conn->dest_dev)
continue;
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
index 02ef2b945a0c..9be53be8964b 100644
--- a/drivers/hwtracing/coresight/coresight-dummy.c
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -11,10 +11,12 @@
#include <linux/pm_runtime.h>
#include "coresight-priv.h"
+#include "coresight-trace-id.h"
struct dummy_drvdata {
struct device *dev;
struct coresight_device *csdev;
+ u8 traceid;
};
DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
@@ -72,6 +74,32 @@ static const struct coresight_ops dummy_sink_cs_ops = {
.sink_ops = &dummy_sink_ops,
};
+/* User can get the trace id of dummy source from this node. */
+static ssize_t traceid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct dummy_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->traceid;
+ return sysfs_emit(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(traceid);
+
+static struct attribute *coresight_dummy_attrs[] = {
+ &dev_attr_traceid.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_dummy_group = {
+ .attrs = coresight_dummy_attrs,
+};
+
+static const struct attribute_group *coresight_dummy_groups[] = {
+ &coresight_dummy_group,
+ NULL,
+};
+
static int dummy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -79,6 +107,11 @@ static int dummy_probe(struct platform_device *pdev)
struct coresight_platform_data *pdata;
struct dummy_drvdata *drvdata;
struct coresight_desc desc = { 0 };
+ int ret = 0, trace_id = 0;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
if (of_device_is_compatible(node, "arm,coresight-dummy-source")) {
@@ -90,6 +123,26 @@ static int dummy_probe(struct platform_device *pdev)
desc.subtype.source_subtype =
CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
desc.ops = &dummy_source_cs_ops;
+ desc.groups = coresight_dummy_groups;
+
+ ret = coresight_get_static_trace_id(dev, &trace_id);
+ if (!ret) {
+ /* Get the static id if id is set in device tree. */
+ ret = coresight_trace_id_get_static_system_id(trace_id);
+ if (ret < 0) {
+ dev_err(dev, "Fail to get static id.\n");
+ return ret;
+ }
+ } else {
+ /* Get next available id if id is not set in device tree. */
+ trace_id = coresight_trace_id_get_system_id();
+ if (trace_id < 0) {
+ ret = trace_id;
+ return ret;
+ }
+ }
+ drvdata->traceid = (u8)trace_id;
+
} else if (of_device_is_compatible(node, "arm,coresight-dummy-sink")) {
desc.name = coresight_alloc_device_name(&sink_devs, dev);
if (!desc.name)
@@ -104,27 +157,35 @@ static int dummy_probe(struct platform_device *pdev)
}
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto free_id;
+ }
pdev->dev.platform_data = pdata;
- drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
desc.pdata = pdev->dev.platform_data;
desc.dev = &pdev->dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev))
- return PTR_ERR(drvdata->csdev);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto free_id;
+ }
pm_runtime_enable(dev);
dev_dbg(dev, "Dummy device initialized\n");
- return 0;
+ ret = 0;
+ goto out;
+
+free_id:
+ if (IS_VALID_CS_TRACE_ID(drvdata->traceid))
+ coresight_trace_id_put_system_id(drvdata->traceid);
+
+out:
+ return ret;
}
static void dummy_remove(struct platform_device *pdev)
@@ -132,6 +193,8 @@ static void dummy_remove(struct platform_device *pdev)
struct dummy_drvdata *drvdata = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ if (IS_VALID_CS_TRACE_ID(drvdata->traceid))
+ coresight_trace_id_put_system_id(drvdata->traceid);
pm_runtime_disable(dev);
coresight_unregister(drvdata->csdev);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index dd8c74f893db..2c1a60577728 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/kvm_host.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -268,10 +269,28 @@ struct etm4_enable_arg {
*/
static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
{
+ u64 trfcr;
+
/* If the CPU doesn't support FEAT_TRF, nothing to do */
if (!drvdata->trfcr)
return;
- cpu_prohibit_trace();
+
+ trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+
+ write_trfcr(trfcr);
+ kvm_tracing_set_el1_configuration(trfcr);
+}
+
+static u64 etm4x_get_kern_user_filter(struct etmv4_drvdata *drvdata)
+{
+ u64 trfcr = drvdata->trfcr;
+
+ if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
+ trfcr &= ~TRFCR_EL1_ExTRE;
+ if (drvdata->config.mode & ETM_MODE_EXCL_USER)
+ trfcr &= ~TRFCR_EL1_E0TRE;
+
+ return trfcr;
}
/*
@@ -286,18 +305,28 @@ static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
*/
static void etm4x_allow_trace(struct etmv4_drvdata *drvdata)
{
- u64 trfcr = drvdata->trfcr;
+ u64 trfcr, guest_trfcr;
/* If the CPU doesn't support FEAT_TRF, nothing to do */
- if (!trfcr)
+ if (!drvdata->trfcr)
return;
- if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
- trfcr &= ~TRFCR_ELx_ExTRE;
- if (drvdata->config.mode & ETM_MODE_EXCL_USER)
- trfcr &= ~TRFCR_ELx_E0TRE;
+ if (drvdata->config.mode & ETM_MODE_EXCL_HOST)
+ trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+ else
+ trfcr = etm4x_get_kern_user_filter(drvdata);
write_trfcr(trfcr);
+
+ /* Set filters for guests and pass to KVM */
+ if (drvdata->config.mode & ETM_MODE_EXCL_GUEST)
+ guest_trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+ else
+ guest_trfcr = etm4x_get_kern_user_filter(drvdata);
+
+ /* TRFCR_EL1 doesn't have CX so mask it out. */
+ guest_trfcr &= ~TRFCR_EL2_CX;
+ kvm_tracing_set_el1_configuration(guest_trfcr);
}
#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
@@ -655,6 +684,12 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
if (attr->exclude_user)
config->mode = ETM_MODE_EXCL_USER;
+ if (attr->exclude_host)
+ config->mode |= ETM_MODE_EXCL_HOST;
+
+ if (attr->exclude_guest)
+ config->mode |= ETM_MODE_EXCL_GUEST;
+
/* Always start from the default config */
etm4_set_default_config(config);
@@ -1141,9 +1176,9 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
* tracing at the kernel EL and EL0, forcing to use the
* virtual time as the timestamp.
*/
- trfcr = (TRFCR_ELx_TS_VIRTUAL |
- TRFCR_ELx_ExTRE |
- TRFCR_ELx_E0TRE);
+ trfcr = (TRFCR_EL1_TS_VIRTUAL |
+ TRFCR_EL1_ExTRE |
+ TRFCR_EL1_E0TRE);
/* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */
if (is_kernel_in_hyp_mode())
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index a9f19629f3f8..c767f8ae4cf1 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2319,11 +2319,11 @@ static ssize_t ts_source_show(struct device *dev,
goto out;
}
- switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
- case TRFCR_ELx_TS_VIRTUAL:
- case TRFCR_ELx_TS_GUEST_PHYSICAL:
- case TRFCR_ELx_TS_PHYSICAL:
- val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
+ switch (drvdata->trfcr & TRFCR_EL1_TS_MASK) {
+ case TRFCR_EL1_TS_VIRTUAL:
+ case TRFCR_EL1_TS_GUEST_PHYSICAL:
+ case TRFCR_EL1_TS_PHYSICAL:
+ val = FIELD_GET(TRFCR_EL1_TS_MASK, drvdata->trfcr);
break;
default:
val = -1;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 9e9165f62e81..1119762b5cec 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -817,7 +817,7 @@ enum etm_impdef_type {
* @s_ex_level: Secure ELs where tracing is supported.
*/
struct etmv4_config {
- u32 mode;
+ u64 mode;
u32 pe_sel;
u32 cfg;
u32 eventctrl0;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 33efe1acbef7..8faf51469bb8 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -86,14 +86,14 @@ static int funnel_enable(struct coresight_device *csdev,
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_read(&in->dest_refcnt) == 0) {
+ if (in->dest_refcnt == 0) {
if (drvdata->base)
rc = dynamic_funnel_enable_hw(drvdata, in->dest_port);
if (!rc)
first_enable = true;
}
if (!rc)
- atomic_inc(&in->dest_refcnt);
+ in->dest_refcnt++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -130,7 +130,7 @@ static void funnel_disable(struct coresight_device *csdev,
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&in->dest_refcnt) == 0) {
+ if (--in->dest_refcnt == 0) {
if (drvdata->base)
dynamic_funnel_disable_hw(drvdata, in->dest_port);
last_disable = true;
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 64e171eaad82..8192ba3279f0 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -243,6 +243,27 @@ static int of_coresight_parse_endpoint(struct device *dev,
conn.dest_fwnode = fwnode_handle_get(rdev_fwnode);
conn.dest_port = rendpoint.port;
+ /*
+ * Get the firmware node of the filter source through the
+ * reference. This could be used to filter the source in
+ * building path.
+ */
+ conn.filter_src_fwnode =
+ fwnode_find_reference(&ep->fwnode, "filter-source", 0);
+ if (IS_ERR(conn.filter_src_fwnode)) {
+ conn.filter_src_fwnode = NULL;
+ } else {
+ conn.filter_src_dev =
+ coresight_find_csdev_by_fwnode(conn.filter_src_fwnode);
+ if (conn.filter_src_dev &&
+ !coresight_is_device_source(conn.filter_src_dev)) {
+ dev_warn(dev, "port %d: Filter handle is not a trace source : %s\n",
+ conn.src_port, dev_name(&conn.filter_src_dev->dev));
+ conn.filter_src_dev = NULL;
+ conn.filter_src_fwnode = NULL;
+ }
+ }
+
new_conn = coresight_add_out_conn(dev, pdata, &conn);
if (IS_ERR_VALUE(new_conn)) {
fwnode_handle_put(conn.dest_fwnode);
@@ -796,6 +817,12 @@ int coresight_get_cpu(struct device *dev)
}
EXPORT_SYMBOL_GPL(coresight_get_cpu);
+int coresight_get_static_trace_id(struct device *dev, u32 *id)
+{
+ return fwnode_property_read_u32(dev_fwnode(dev), "arm,static-trace-id", id);
+}
+EXPORT_SYMBOL_GPL(coresight_get_static_trace_id);
+
struct coresight_platform_data *
coresight_get_platform_data(struct device *dev)
{
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 05f891ca6b5c..76403530f33e 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -42,6 +42,9 @@ extern const struct device_type coresight_dev_type[];
#define ETM_MODE_EXCL_KERN BIT(30)
#define ETM_MODE_EXCL_USER BIT(31)
+#define ETM_MODE_EXCL_HOST BIT(32)
+#define ETM_MODE_EXCL_GUEST BIT(33)
+
struct cs_pair_attribute {
struct device_attribute attr;
u32 lo_off;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 0fba87de6d1a..a1181c9048c0 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -126,7 +126,7 @@ static int replicator_enable(struct coresight_device *csdev,
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_read(&out->src_refcnt) == 0) {
+ if (out->src_refcnt == 0) {
if (drvdata->base)
rc = dynamic_replicator_enable(drvdata, in->dest_port,
out->src_port);
@@ -134,7 +134,7 @@ static int replicator_enable(struct coresight_device *csdev,
first_enable = true;
}
if (!rc)
- atomic_inc(&out->src_refcnt);
+ out->src_refcnt++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -180,7 +180,7 @@ static void replicator_disable(struct coresight_device *csdev,
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&out->src_refcnt) == 0) {
+ if (--out->src_refcnt == 0) {
if (drvdata->base)
dynamic_replicator_disable(drvdata, in->dest_port,
out->src_port);
diff --git a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
index 53840a2c41f2..303d71911870 100644
--- a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
+++ b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
@@ -21,13 +21,4 @@ static inline void write_trfcr(u64 val)
isb();
}
-static inline u64 cpu_prohibit_trace(void)
-{
- u64 trfcr = read_trfcr();
-
- /* Prohibit tracing at EL0 & the kernel EL */
- write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE));
- /* Return the original value of the TRFCR */
- return trfcr;
-}
#endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index bfca103f9f84..189a4abc2561 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -24,7 +24,7 @@ DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
static bool coresight_device_is_tpdm(struct coresight_device *csdev)
{
- return (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ return (coresight_is_device_source(csdev)) &&
(csdev->subtype.source_subtype ==
CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM);
}
@@ -110,6 +110,16 @@ static int tpda_get_element_size(struct tpda_drvdata *drvdata,
csdev->pdata->in_conns[i]->dest_port != inport)
continue;
+ /*
+ * If this port has a hardcoded filter, use the source
+ * device directly.
+ */
+ if (csdev->pdata->in_conns[i]->filter_src_fwnode) {
+ in = csdev->pdata->in_conns[i]->filter_src_dev;
+ if (!in)
+ continue;
+ }
+
if (coresight_device_is_tpdm(in)) {
if (drvdata->dsb_esize || drvdata->cmb_esize)
return -EEXIST;
@@ -124,7 +134,6 @@ static int tpda_get_element_size(struct tpda_drvdata *drvdata,
}
}
-
return rc;
}
@@ -190,10 +199,10 @@ static int tpda_enable(struct coresight_device *csdev,
int ret = 0;
spin_lock(&drvdata->spinlock);
- if (atomic_read(&in->dest_refcnt) == 0) {
+ if (in->dest_refcnt == 0) {
ret = __tpda_enable(drvdata, in->dest_port);
if (!ret) {
- atomic_inc(&in->dest_refcnt);
+ in->dest_refcnt++;
csdev->refcnt++;
dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
}
@@ -223,7 +232,7 @@ static void tpda_disable(struct coresight_device *csdev,
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
- if (atomic_dec_return(&in->dest_refcnt) == 0) {
+ if (--in->dest_refcnt == 0) {
__tpda_disable(drvdata, in->dest_port);
csdev->refcnt--;
}
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index b7d99e91ab84..c38f9701665e 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -640,8 +640,7 @@ static ssize_t dsb_mode_store(struct device *dev,
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
- if ((kstrtoul(buf, 0, &val)) || (val < 0) ||
- (val & ~TPDM_DSB_MODE_MASK))
+ if ((kstrtoul(buf, 0, &val)) || (val & ~TPDM_DSB_MODE_MASK))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -1308,8 +1307,8 @@ static void tpdm_remove(struct amba_device *adev)
*/
static struct amba_id tpdm_ids[] = {
{
- .id = 0x000f0e00,
- .mask = 0x000fff00,
+ .id = 0x001f0e00,
+ .mask = 0x00ffff00,
},
{ 0, 0, NULL },
};
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index d98e12cb30ec..378af743be45 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -12,6 +12,12 @@
#include "coresight-trace-id.h"
+enum trace_id_flags {
+ TRACE_ID_ANY = 0x0,
+ TRACE_ID_PREFER_ODD = 0x1,
+ TRACE_ID_REQ_STATIC = 0x2,
+};
+
/* Default trace ID map. Used in sysfs mode and for system sources */
static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
static struct coresight_trace_id_map id_map_default = {
@@ -74,21 +80,25 @@ static int coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map)
* Otherwise allocate next available ID.
*/
static int coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map,
- int preferred_id, bool prefer_odd_id)
+ int preferred_id, unsigned int flags)
{
int id = 0;
/* for backwards compatibility, cpu IDs may use preferred value */
- if (IS_VALID_CS_TRACE_ID(preferred_id) &&
- !test_bit(preferred_id, id_map->used_ids)) {
- id = preferred_id;
- goto trace_id_allocated;
- } else if (prefer_odd_id) {
+ if (IS_VALID_CS_TRACE_ID(preferred_id)) {
+ if (!test_bit(preferred_id, id_map->used_ids)) {
+ id = preferred_id;
+ goto trace_id_allocated;
+ } else if (flags & TRACE_ID_REQ_STATIC)
+ return -EBUSY;
+ } else if (flags & TRACE_ID_PREFER_ODD) {
/* may use odd ids to avoid preferred legacy cpu IDs */
id = coresight_trace_id_find_odd_id(id_map);
if (id)
goto trace_id_allocated;
- }
+ } else if (!IS_VALID_CS_TRACE_ID(preferred_id) &&
+ (flags & TRACE_ID_REQ_STATIC))
+ return -EINVAL;
/*
* skip reserved bit 0, look at bitmap length of
@@ -153,7 +163,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
*/
id = coresight_trace_id_alloc_new_id(id_map,
CORESIGHT_LEGACY_CPU_TRACE_ID(cpu),
- false);
+ TRACE_ID_ANY);
if (!IS_VALID_CS_TRACE_ID(id))
goto get_cpu_id_out_unlock;
@@ -188,14 +198,14 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
DUMP_ID_MAP(id_map);
}
-static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map)
+static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map,
+ int preferred_id, unsigned int traceid_flags)
{
unsigned long flags;
int id;
spin_lock_irqsave(&id_map->lock, flags);
- /* prefer odd IDs for system components to avoid legacy CPU IDS */
- id = coresight_trace_id_alloc_new_id(id_map, 0, true);
+ id = coresight_trace_id_alloc_new_id(id_map, preferred_id, traceid_flags);
spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
@@ -255,10 +265,19 @@ EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map);
int coresight_trace_id_get_system_id(void)
{
- return coresight_trace_id_map_get_system_id(&id_map_default);
+ /* prefer odd IDs for system components to avoid legacy CPU IDS */
+ return coresight_trace_id_map_get_system_id(&id_map_default, 0,
+ TRACE_ID_PREFER_ODD);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id);
+int coresight_trace_id_get_static_system_id(int trace_id)
+{
+ return coresight_trace_id_map_get_system_id(&id_map_default,
+ trace_id, TRACE_ID_REQ_STATIC);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_get_static_system_id);
+
void coresight_trace_id_put_system_id(int id)
{
coresight_trace_id_map_put_system_id(&id_map_default, id);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.h b/drivers/hwtracing/coresight/coresight-trace-id.h
index 9aae50a553ca..db68e1ec56b6 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.h
+++ b/drivers/hwtracing/coresight/coresight-trace-id.h
@@ -117,6 +117,15 @@ int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *i
int coresight_trace_id_get_system_id(void);
/**
+ * Allocate a CoreSight static trace ID for a system component.
+ *
+ * Used to allocate static IDs for system trace sources such as dummy source.
+ *
+ * return: Trace ID or -EINVAL if allocation is impossible.
+ */
+int coresight_trace_id_get_static_system_id(int id);
+
+/**
* Release an allocated system trace ID.
*
* Unconditionally release a trace ID allocated to a system component.
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 919804b12a67..fff67aac8418 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -17,6 +17,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <linux/kvm_host.h>
#include <linux/vmalloc.h>
#include "coresight-self-hosted-trace.h"
@@ -221,6 +222,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
*/
trblimitr |= TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ kvm_enable_trbe();
/* Synchronize the TRBE enable event */
isb();
@@ -239,6 +241,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
*/
trblimitr &= ~TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ kvm_disable_trbe();
if (trbe_needs_drain_after_disable(cpudata))
trbe_drain_buffer();
@@ -253,8 +256,8 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
- trbe_drain_and_disable_local(cpudata);
write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ trbe_drain_buffer();
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
write_sysreg_s(0, SYS_TRBSR_EL1);
@@ -1110,6 +1113,16 @@ static bool is_perf_trbe(struct perf_output_handle *handle)
return true;
}
+static u64 cpu_prohibit_trace(void)
+{
+ u64 trfcr = read_trfcr();
+
+ /* Prohibit tracing at EL0 & the kernel EL */
+ write_trfcr(trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE));
+ /* Return the original value of the TRFCR */
+ return trfcr;
+}
+
static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
{
struct perf_output_handle **handle_ptr = dev;
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index d72993355473..47d9e6c3bac0 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -857,8 +857,9 @@ static irqreturn_t intel_th_irq(int irq, void *data)
/**
* intel_th_alloc() - allocate a new Intel TH device and its subdevices
* @dev: parent device
+ * @drvdata: data private to the driver
* @devres: resources indexed by th_mmio_idx
- * @irq: irq number
+ * @ndevres: number of entries in the @devres resources
*/
struct intel_th *
intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 66123d684ac9..bf99d79a4192 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -105,23 +105,32 @@ struct msc_iter {
/**
* struct msc - MSC device representation
- * @reg_base: register window base address
+ * @reg_base: register window base address for the entire MSU
+ * @msu_base: register window base address for this MSC
* @thdev: intel_th_device pointer
* @mbuf: MSU buffer, if assigned
- * @mbuf_priv MSU buffer's private data, if @mbuf
+ * @mbuf_priv: MSU buffer's private data, if @mbuf
+ * @work: a work to stop the trace when the buffer is full
* @win_list: list of windows in multiblock mode
* @single_sgt: single mode buffer
* @cur_win: current window
+ * @switch_on_unlock: window to switch to when it becomes available
* @nr_pages: total number of pages allocated for this buffer
* @single_sz: amount of data in single mode
* @single_wrap: single mode wrap occurred
* @base: buffer's base pointer
* @base_addr: buffer's base address
+ * @orig_addr: MSC0 buffer's base address
+ * @orig_sz: MSC0 buffer's size
* @user_count: number of users of the buffer
* @mmap_count: number of mappings
* @buf_mutex: mutex to serialize access to buffer-related bits
+ * @iter_list: list of open file descriptor iterators
+ * @stop_on_full: stop the trace if the current window is full
* @enabled: MSC is enabled
* @wrap: wrapping is enabled
+ * @do_irq: IRQ resource is available, handle interrupts
+ * @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata)
* @mode: MSC operating mode
* @burst_len: write burst length
* @index: number of this MSC in the MSU
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index e9d8d28e055f..e3def163d5cf 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -335,6 +335,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Arrow Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Panther Lake-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Panther Lake-P/U */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index e9496fe97baa..495eb1dc8ac5 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -81,10 +81,8 @@ static int stm_heartbeat_init(void)
stm_heartbeat[i].data.type = STM_USER;
stm_heartbeat[i].data.link = stm_heartbeat_link;
stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
- hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
- stm_heartbeat[i].hrtimer.function =
- stm_heartbeat_hrtimer_handler;
+ hrtimer_setup(&stm_heartbeat[i].hrtimer, stm_heartbeat_hrtimer_handler,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
if (ret)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ceb3ecdf884b..fc438f445771 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -503,7 +503,7 @@ config I2C_BRCMSTB
tristate "BRCM Settop/DSL I2C controller"
depends on ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || \
BMIPS_GENERIC || COMPILE_TEST
- default y
+ default ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || BMIPS_GENERIC
help
If you say yes to this option, support will be included for the
I2C interface on the Broadcom Settop/DSL SoCs.
@@ -756,6 +756,7 @@ config I2C_IMX
config I2C_IMX_LPI2C
tristate "IMX Low Power I2C interface"
depends on ARCH_MXC || COMPILE_TEST
+ select I2C_SLAVE
help
Say Y here if you want to use the Low Power IIC bus controller
on the Freescale i.MX processors.
@@ -910,7 +911,7 @@ config I2C_MXS
config I2C_NOMADIK
tristate "ST-Ericsson Nomadik/Ux500 I2C Controller"
- depends on ARM_AMBA
+ depends on ARM_AMBA || COMPILE_TEST
help
If you say yes to this option, support will be included for the
I2C interface from ST-Ericsson's Nomadik and Ux500 architectures,
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 544c94e86b89..1eac35838040 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -485,6 +485,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids);
static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (ali1535_setup(dev)) {
dev_warn(&dev->dev,
"ALI1535 not detected, module not inserted.\n");
@@ -496,7 +498,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name),
"SMBus ALI1535 adapter at %04x", ali1535_offset);
- return i2c_add_adapter(&ali1535_adapter);
+ ret = i2c_add_adapter(&ali1535_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
+ return ret;
}
static void ali1535_remove(struct pci_dev *dev)
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 4761c7208102..418d11266671 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -472,6 +472,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids);
static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (ali15x3_setup(dev)) {
dev_err(&dev->dev,
"ALI15X3 not detected, module not inserted.\n");
@@ -483,7 +485,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name),
"SMBus ALI15X3 adapter at %04x", ali15x3_smba);
- return i2c_add_adapter(&ali15x3_adapter);
+ ret = i2c_add_adapter(&ali15x3_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
+ return ret;
}
static void ali15x3_remove(struct pci_dev *dev)
diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c
index 7512614bf4b7..93ebec162c6d 100644
--- a/drivers/i2c/busses/i2c-amd-asf-plat.c
+++ b/drivers/i2c/busses/i2c-amd-asf-plat.c
@@ -293,6 +293,7 @@ static irqreturn_t amd_asf_irq_handler(int irq, void *ptr)
amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true);
}
+ iowrite32(irq, dev->eoi_base);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index 143165300949..ef7370d3dbea 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -327,13 +327,11 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
amd_mp2_irq_isr, irq_flag, dev_name(&pci_dev->dev), privdata);
if (rc) {
pci_err(pci_dev, "Failure requesting irq %i: %d\n", privdata->dev_irq, rc);
- goto free_irq_vectors;
+ goto err_dma_mask;
}
return rc;
-free_irq_vectors:
- free_irq(privdata->dev_irq, privdata);
err_dma_mask:
pci_clear_master(pci_dev);
err_pci_enable:
@@ -376,7 +374,6 @@ static void amd_mp2_pci_remove(struct pci_dev *pci_dev)
pm_runtime_forbid(&pci_dev->dev);
pm_runtime_get_noresume(&pci_dev->dev);
- free_irq(privdata->dev_irq, privdata);
pci_clear_master(pci_dev);
amd_mp2_clear_reg(privdata);
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index fa0d5a2c3732..3621c02f1cba 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -211,7 +211,7 @@ static s32 amd756_access(struct i2c_adapter * adap, u16 addr,
SMB_HOST_ADDRESS);
outb_p(command, SMB_HOST_COMMAND);
if (read_write == I2C_SMBUS_WRITE)
- outw_p(data->word, SMB_HOST_DATA); /* TODO: endian???? */
+ outw_p(data->word, SMB_HOST_DATA);
size = AMD756_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
@@ -256,7 +256,7 @@ static s32 amd756_access(struct i2c_adapter * adap, u16 addr,
data->byte = inw_p(SMB_HOST_DATA);
break;
case AMD756_WORD_DATA:
- data->word = inw_p(SMB_HOST_DATA); /* TODO: endian???? */
+ data->word = inw_p(SMB_HOST_DATA);
break;
case AMD756_BLOCK_DATA:
data->block[0] = inw_p(SMB_HOST_DATA) & 0x3f;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 71dc0a6688b7..6a909d339681 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -11,23 +11,23 @@
*
* ----------------------------------------------------------------------------
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
+
#include <linux/clk.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of.h>
-#include <linux/platform_data/i2c-davinci.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
/* ----- global defines ----------------------------------------------- */
@@ -117,6 +117,8 @@
/* timeout for pm runtime autosuspend */
#define DAVINCI_I2C_PM_TIMEOUT 1000 /* ms */
+#define DAVINCI_I2C_DEFAULT_BUS_FREQ 100
+
struct davinci_i2c_dev {
struct device *dev;
void __iomem *base;
@@ -132,13 +134,10 @@ struct davinci_i2c_dev {
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
- struct davinci_i2c_platform_data *pdata;
-};
-
-/* default platform data to use if not supplied in the platform_device */
-static struct davinci_i2c_platform_data davinci_i2c_platform_data_default = {
- .bus_freq = 100,
- .bus_delay = 0,
+ /* standard bus frequency (kHz) */
+ unsigned int bus_freq;
+ /* Chip has a ICPFUNC register */
+ bool has_pfunc;
};
static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev,
@@ -168,14 +167,12 @@ static inline void davinci_i2c_reset_ctrl(struct davinci_i2c_dev *i2c_dev,
static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
{
- struct davinci_i2c_platform_data *pdata = dev->pdata;
u16 psc;
u32 clk;
u32 d;
u32 clkh;
u32 clkl;
u32 input_clock = clk_get_rate(dev->clk);
- struct device_node *of_node = dev->dev->of_node;
/* NOTE: I2C Clock divider programming info
* As per I2C specs the following formulas provide prescaler
@@ -209,19 +206,19 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
psc++; /* better to run under spec than over */
d = (psc >= 2) ? 5 : 7 - psc;
- if (of_node && of_device_is_compatible(of_node, "ti,keystone-i2c"))
+ if (device_is_compatible(dev->dev, "ti,keystone-i2c"))
d = 6;
- clk = ((input_clock / (psc + 1)) / (pdata->bus_freq * 1000));
+ clk = ((input_clock / (psc + 1)) / (dev->bus_freq * 1000));
/* Avoid driving the bus too fast because of rounding errors above */
- if (input_clock / (psc + 1) / clk > pdata->bus_freq * 1000)
+ if (input_clock / (psc + 1) / clk > dev->bus_freq * 1000)
clk++;
/*
* According to I2C-BUS Spec 2.1, in FAST-MODE LOW period should be at
* least 1.3uS, which is not the case with 50% duty cycle. Driving HIGH
* to LOW ratio as 1 to 2 is more safe.
*/
- if (pdata->bus_freq > 100)
+ if (dev->bus_freq > 100)
clkl = (clk << 1) / 3;
else
clkl = (clk >> 1);
@@ -255,8 +252,6 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
*/
static int i2c_davinci_init(struct davinci_i2c_dev *dev)
{
- struct davinci_i2c_platform_data *pdata = dev->pdata;
-
/* put I2C into reset */
davinci_i2c_reset_ctrl(dev, 0);
@@ -274,8 +269,7 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev)
davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKL_REG));
dev_dbg(dev->dev, "CLKH = %d\n",
davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKH_REG));
- dev_dbg(dev->dev, "bus_freq = %dkHz, bus_delay = %d\n",
- pdata->bus_freq, pdata->bus_delay);
+ dev_dbg(dev->dev, "bus_freq = %dkHz\n", dev->bus_freq);
/* Take the I2C module out of reset: */
@@ -309,12 +303,6 @@ static void davinci_i2c_unprepare_recovery(struct i2c_adapter *adap)
i2c_davinci_init(dev);
}
-static struct i2c_bus_recovery_info davinci_i2c_gpio_recovery_info = {
- .recover_bus = i2c_generic_scl_recovery,
- .prepare_recovery = davinci_i2c_prepare_recovery,
- .unprepare_recovery = davinci_i2c_unprepare_recovery,
-};
-
static void davinci_i2c_set_scl(struct i2c_adapter *adap, int val)
{
struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
@@ -414,7 +402,6 @@ static int
i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
{
struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
- struct davinci_i2c_platform_data *pdata = dev->pdata;
u32 flag;
u16 w;
unsigned long time_left;
@@ -424,10 +411,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
return -EADDRNOTAVAIL;
}
- /* Introduce a delay, required for some boards (e.g Davinci EVM) */
- if (pdata->bus_delay)
- udelay(pdata->bus_delay);
-
/* set the target address */
davinci_i2c_write_reg(dev, DAVINCI_I2C_SAR_REG, msg->addr);
@@ -758,8 +741,8 @@ static int davinci_i2c_probe(struct platform_device *pdev)
{
struct davinci_i2c_dev *dev;
struct i2c_adapter *adap;
- struct i2c_bus_recovery_info *rinfo;
int r, irq;
+ u32 prop;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -773,29 +756,15 @@ static int davinci_i2c_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
dev->irq = irq;
- dev->pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, dev);
- if (!dev->pdata && pdev->dev.of_node) {
- u32 prop;
-
- dev->pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct davinci_i2c_platform_data), GFP_KERNEL);
- if (!dev->pdata)
- return -ENOMEM;
-
- memcpy(dev->pdata, &davinci_i2c_platform_data_default,
- sizeof(struct davinci_i2c_platform_data));
- if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &prop))
- dev->pdata->bus_freq = prop / 1000;
-
- dev->pdata->has_pfunc =
- of_property_read_bool(pdev->dev.of_node,
- "ti,has-pfunc");
- } else if (!dev->pdata) {
- dev->pdata = &davinci_i2c_platform_data_default;
- }
+ r = device_property_read_u32(&pdev->dev, "clock-frequency", &prop);
+ if (r)
+ prop = DAVINCI_I2C_DEFAULT_BUS_FREQ;
+
+ dev->bus_freq = prop / 1000;
+
+ dev->has_pfunc = device_property_present(&pdev->dev, "ti,has-pfunc");
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
@@ -841,25 +810,10 @@ static int davinci_i2c_probe(struct platform_device *pdev)
adap->algo = &i2c_davinci_algo;
adap->dev.parent = &pdev->dev;
adap->timeout = DAVINCI_I2C_TIMEOUT;
- adap->dev.of_node = pdev->dev.of_node;
+ adap->dev.of_node = dev_of_node(&pdev->dev);
- if (dev->pdata->has_pfunc)
+ if (dev->has_pfunc)
adap->bus_recovery_info = &davinci_i2c_scl_recovery_info;
- else if (dev->pdata->gpio_recovery) {
- rinfo = &davinci_i2c_gpio_recovery_info;
- adap->bus_recovery_info = rinfo;
- rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl",
- GPIOD_OUT_HIGH_OPEN_DRAIN);
- if (IS_ERR(rinfo->scl_gpiod)) {
- r = PTR_ERR(rinfo->scl_gpiod);
- goto err_unuse_clocks;
- }
- rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
- if (IS_ERR(rinfo->sda_gpiod)) {
- r = PTR_ERR(rinfo->sda_gpiod);
- goto err_unuse_clocks;
- }
- }
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 183a35038eef..8eb7bd640f8d 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -8,6 +8,9 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW_COMMON"
+
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -29,8 +32,6 @@
#include <linux/types.h>
#include <linux/units.h>
-#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW_COMMON"
-
#include "i2c-designware-core.h"
static const char *const abort_sources[] = {
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index c8cbe5b1aeb1..2569bf1a72e0 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -8,6 +8,9 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
+
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -22,8 +25,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
-
#include "i2c-designware-core.h"
#define AMD_TIMEOUT_MIN_US 25
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index dc2b788eac5b..5cd4a5f7a472 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -6,6 +6,9 @@
*
* Copyright (C) 2016 Synopsys Inc.
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
+
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -16,8 +19,6 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
-
#include "i2c-designware-core.h"
static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index e330015087ab..6cdd957ea7e4 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -168,6 +168,7 @@ enum i2c_type_exynos {
I2C_TYPE_EXYNOS5,
I2C_TYPE_EXYNOS7,
I2C_TYPE_EXYNOSAUTOV9,
+ I2C_TYPE_EXYNOS8895,
};
struct exynos5_i2c {
@@ -240,6 +241,11 @@ static const struct exynos_hsi2c_variant exynosautov9_hsi2c_data = {
.hw = I2C_TYPE_EXYNOSAUTOV9,
};
+static const struct exynos_hsi2c_variant exynos8895_hsi2c_data = {
+ .fifo_depth = 64,
+ .hw = I2C_TYPE_EXYNOS8895,
+};
+
static const struct of_device_id exynos5_i2c_match[] = {
{
.compatible = "samsung,exynos5-hsi2c",
@@ -256,6 +262,9 @@ static const struct of_device_id exynos5_i2c_match[] = {
}, {
.compatible = "samsung,exynosautov9-hsi2c",
.data = &exynosautov9_hsi2c_data
+ }, {
+ .compatible = "samsung,exynos8895-hsi2c",
+ .data = &exynos8895_hsi2c_data
}, {},
};
MODULE_DEVICE_TABLE(of, exynos5_i2c_match);
@@ -331,6 +340,14 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
* clk_cycle := TSCLK_L + TSCLK_H
* temp := (CLK_DIV + 1) * (clk_cycle + 2)
*
+ * In case of HSI2C controllers in Exynos8895
+ * FPCLK / FI2C =
+ * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) +
+ * 2 * ((FLT_CYCLE + 3) - (FLT_CYCLE + 3) % (CLK_DIV + 1))
+ *
+ * clk_cycle := TSCLK_L + TSCLK_H
+ * temp := (FPCLK / FI2C) - (FLT_CYCLE + 3) * 2
+ *
* Constraints: 4 <= temp, 0 <= CLK_DIV < 256, 2 <= clk_cycle <= 510
*
* To split SCL clock into low, high periods appropriately, one
@@ -352,11 +369,19 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
*
*/
t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7;
- temp = clkin / op_clk - 8 - t_ftl_cycle;
- if (i2c->variant->hw != I2C_TYPE_EXYNOS7)
- temp -= t_ftl_cycle;
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS8895)
+ temp = clkin / op_clk - (t_ftl_cycle + 3) * 2;
+ else if (i2c->variant->hw == I2C_TYPE_EXYNOS7)
+ temp = clkin / op_clk - 8 - t_ftl_cycle;
+ else
+ temp = clkin / op_clk - 8 - (t_ftl_cycle * 2);
div = temp / 512;
- clk_cycle = temp / (div + 1) - 2;
+
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS8895)
+ clk_cycle = (temp + ((t_ftl_cycle + 3) % (div + 1)) * 2) /
+ (div + 1) - 2;
+ else
+ clk_cycle = temp / (div + 1) - 2;
if (temp < 4 || div >= 256 || clk_cycle < 2) {
dev_err(i2c->dev, "%s clock set-up failed\n",
hs_timings ? "HS" : "FS");
@@ -491,6 +516,8 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
switch (i2c->variant->hw) {
case I2C_TYPE_EXYNOSAUTOV9:
fallthrough;
+ case I2C_TYPE_EXYNOS8895:
+ fallthrough;
case I2C_TYPE_EXYNOS7:
if (int_status & HSI2C_INT_TRANS_DONE) {
i2c->trans_done = 1;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 75dab01d43a7..171d29d2770e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1162,127 +1162,6 @@ static void dmi_check_onboard_devices(const struct dmi_header *dm, void *adap)
}
}
-/* NOTE: Keep this list in sync with drivers/platform/x86/dell-smo8800.c */
-static const char *const acpi_smo8800_ids[] = {
- "SMO8800",
- "SMO8801",
- "SMO8810",
- "SMO8811",
- "SMO8820",
- "SMO8821",
- "SMO8830",
- "SMO8831",
-};
-
-static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
- u32 nesting_level,
- void *context,
- void **return_value)
-{
- struct acpi_device_info *info;
- acpi_status status;
- char *hid;
- int i;
-
- status = acpi_get_object_info(obj_handle, &info);
- if (ACPI_FAILURE(status))
- return AE_OK;
-
- if (!(info->valid & ACPI_VALID_HID))
- goto smo88xx_not_found;
-
- hid = info->hardware_id.string;
- if (!hid)
- goto smo88xx_not_found;
-
- i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
- if (i < 0)
- goto smo88xx_not_found;
-
- kfree(info);
-
- *return_value = NULL;
- return AE_CTRL_TERMINATE;
-
-smo88xx_not_found:
- kfree(info);
- return AE_OK;
-}
-
-static bool is_dell_system_with_lis3lv02d(void)
-{
- void *err = ERR_PTR(-ENOENT);
-
- if (!dmi_match(DMI_SYS_VENDOR, "Dell Inc."))
- return false;
-
- /*
- * Check that ACPI device SMO88xx is present and is functioning.
- * Function acpi_get_devices() already filters all ACPI devices
- * which are not present or are not functioning.
- * ACPI device SMO88xx represents our ST microelectronics lis3lv02d
- * accelerometer but unfortunately ACPI does not provide any other
- * information (like I2C address).
- */
- acpi_get_devices(NULL, check_acpi_smo88xx_device, NULL, &err);
-
- return !IS_ERR(err);
-}
-
-/*
- * Accelerometer's I2C address is not specified in DMI nor ACPI,
- * so it is needed to define mapping table based on DMI product names.
- */
-static const struct {
- const char *dmi_product_name;
- unsigned short i2c_addr;
-} dell_lis3lv02d_devices[] = {
- /*
- * Dell platform team told us that these Latitude devices have
- * ST microelectronics accelerometer at I2C address 0x29.
- */
- { "Latitude E5250", 0x29 },
- { "Latitude E5450", 0x29 },
- { "Latitude E5550", 0x29 },
- { "Latitude E6440", 0x29 },
- { "Latitude E6440 ATG", 0x29 },
- { "Latitude E6540", 0x29 },
- /*
- * Additional individual entries were added after verification.
- */
- { "Latitude 5480", 0x29 },
- { "Precision 3540", 0x29 },
- { "Vostro V131", 0x1d },
- { "Vostro 5568", 0x29 },
- { "XPS 15 7590", 0x29 },
-};
-
-static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
-{
- struct i2c_board_info info;
- const char *dmi_product_name;
- int i;
-
- dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
- for (i = 0; i < ARRAY_SIZE(dell_lis3lv02d_devices); ++i) {
- if (strcmp(dmi_product_name,
- dell_lis3lv02d_devices[i].dmi_product_name) == 0)
- break;
- }
-
- if (i == ARRAY_SIZE(dell_lis3lv02d_devices)) {
- dev_warn(&priv->pci_dev->dev,
- "Accelerometer lis3lv02d is present on SMBus but its"
- " address is unknown, skipping registration\n");
- return;
- }
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = dell_lis3lv02d_devices[i].i2c_addr;
- strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
- i2c_new_client_device(&priv->adapter, &info);
-}
-
/* Register optional targets */
static void i801_probe_optional_targets(struct i801_priv *priv)
{
@@ -1302,9 +1181,6 @@ static void i801_probe_optional_targets(struct i801_priv *priv)
if (dmi_name_in_vendors("FUJITSU"))
dmi_walk(dmi_check_onboard_devices, &priv->adapter);
- if (is_dell_system_with_lis3lv02d())
- register_dell_lis3lv02d_i2c_device(priv);
-
/* Instantiate SPD EEPROMs unless the SMBus is multiplexed */
#ifdef CONFIG_I2C_I801_MUX
if (!priv->mux_pdev)
@@ -1682,13 +1558,16 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!(priv->features & FEATURE_BLOCK_BUFFER))
priv->features &= ~FEATURE_BLOCK_PROC;
- err = pcim_enable_device(dev);
+ /*
+ * Do not call pcim_enable_device(), because the device has to remain
+ * enabled on driver detach. See i801_remove() for the reasoning.
+ */
+ err = pci_enable_device(dev);
if (err) {
dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
err);
return err;
}
- pcim_pin_device(dev);
/* Determine the address of the SMBus area */
priv->smba = pci_resource_start(dev, SMBBAR);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 8adf2963d764..0d4b3935e687 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -8,6 +8,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i2c.h>
@@ -29,6 +31,7 @@
#define LPI2C_MCR 0x10 /* i2c contrl register */
#define LPI2C_MSR 0x14 /* i2c status register */
#define LPI2C_MIER 0x18 /* i2c interrupt enable */
+#define LPI2C_MDER 0x1C /* i2c DMA enable */
#define LPI2C_MCFGR0 0x20 /* i2c master configuration */
#define LPI2C_MCFGR1 0x24 /* i2c master configuration */
#define LPI2C_MCFGR2 0x28 /* i2c master configuration */
@@ -40,6 +43,20 @@
#define LPI2C_MTDR 0x60 /* i2c master TX data register */
#define LPI2C_MRDR 0x70 /* i2c master RX data register */
+#define LPI2C_SCR 0x110 /* i2c target control register */
+#define LPI2C_SSR 0x114 /* i2c target status register */
+#define LPI2C_SIER 0x118 /* i2c target interrupt enable */
+#define LPI2C_SDER 0x11C /* i2c target DMA enable */
+#define LPI2C_SCFGR0 0x120 /* i2c target configuration */
+#define LPI2C_SCFGR1 0x124 /* i2c target configuration */
+#define LPI2C_SCFGR2 0x128 /* i2c target configuration */
+#define LPI2C_SAMR 0x140 /* i2c target address match */
+#define LPI2C_SASR 0x150 /* i2c target address status */
+#define LPI2C_STAR 0x154 /* i2c target transmit ACK */
+#define LPI2C_STDR 0x160 /* i2c target transmit data */
+#define LPI2C_SRDR 0x170 /* i2c target receive data */
+#define LPI2C_SRDROR 0x178 /* i2c target receive data read only */
+
/* i2c command */
#define TRAN_DATA 0X00
#define RECV_DATA 0X01
@@ -70,11 +87,50 @@
#define MCFGR1_AUTOSTOP BIT(8)
#define MCFGR1_IGNACK BIT(9)
#define MRDR_RXEMPTY BIT(14)
+#define MDER_TDDE BIT(0)
+#define MDER_RDDE BIT(1)
+
+#define SCR_SEN BIT(0)
+#define SCR_RST BIT(1)
+#define SCR_FILTEN BIT(4)
+#define SCR_RTF BIT(8)
+#define SCR_RRF BIT(9)
+#define SSR_TDF BIT(0)
+#define SSR_RDF BIT(1)
+#define SSR_AVF BIT(2)
+#define SSR_TAF BIT(3)
+#define SSR_RSF BIT(8)
+#define SSR_SDF BIT(9)
+#define SSR_BEF BIT(10)
+#define SSR_FEF BIT(11)
+#define SSR_SBF BIT(24)
+#define SSR_BBF BIT(25)
+#define SSR_CLEAR_BITS (SSR_RSF | SSR_SDF | SSR_BEF | SSR_FEF)
+#define SIER_TDIE BIT(0)
+#define SIER_RDIE BIT(1)
+#define SIER_AVIE BIT(2)
+#define SIER_TAIE BIT(3)
+#define SIER_RSIE BIT(8)
+#define SIER_SDIE BIT(9)
+#define SIER_BEIE BIT(10)
+#define SIER_FEIE BIT(11)
+#define SIER_AM0F BIT(12)
+#define SCFGR1_RXSTALL BIT(1)
+#define SCFGR1_TXDSTALL BIT(2)
+#define SCFGR2_FILTSDA_SHIFT 24
+#define SCFGR2_FILTSCL_SHIFT 16
+#define SCFGR2_CLKHOLD(x) (x)
+#define SCFGR2_FILTSDA(x) ((x) << SCFGR2_FILTSDA_SHIFT)
+#define SCFGR2_FILTSCL(x) ((x) << SCFGR2_FILTSCL_SHIFT)
+#define SASR_READ_REQ 0x1
+#define SLAVE_INT_FLAG (SIER_TDIE | SIER_RDIE | SIER_AVIE | \
+ SIER_SDIE | SIER_BEIE)
#define I2C_CLK_RATIO 2
#define CHUNK_DATA 256
#define I2C_PM_TIMEOUT 10 /* ms */
+#define I2C_DMA_THRESHOLD 8 /* bytes */
enum lpi2c_imx_mode {
STANDARD, /* 100+Kbps */
@@ -91,6 +147,24 @@ enum lpi2c_imx_pincfg {
FOUR_PIN_PP,
};
+struct lpi2c_imx_dma {
+ bool using_pio_mode;
+ u8 rx_cmd_buf_len;
+ u8 *dma_buf;
+ u16 *rx_cmd_buf;
+ unsigned int dma_len;
+ unsigned int tx_burst_num;
+ unsigned int rx_burst_num;
+ unsigned long dma_msg_flag;
+ resource_size_t phy_addr;
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_data_dir;
+ enum dma_transfer_direction dma_transfer_dir;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+};
+
struct lpi2c_imx_struct {
struct i2c_adapter adapter;
int num_clks;
@@ -108,6 +182,9 @@ struct lpi2c_imx_struct {
unsigned int rxfifosize;
enum lpi2c_imx_mode mode;
struct i2c_bus_recovery_info rinfo;
+ bool can_use_dma;
+ struct lpi2c_imx_dma *dma;
+ struct i2c_client *target;
};
static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
@@ -305,7 +382,7 @@ static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
return 0;
}
-static int lpi2c_imx_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+static int lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
{
unsigned long time_left;
@@ -451,6 +528,425 @@ static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx,
lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
}
+static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
+{
+ if (!lpi2c_imx->can_use_dma)
+ return false;
+
+ /*
+ * When the length of data is less than I2C_DMA_THRESHOLD,
+ * cpu mode is used directly to avoid low performance.
+ */
+ return !(msg->len < I2C_DMA_THRESHOLD);
+}
+
+static int lpi2c_imx_pio_xfer(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msg)
+{
+ reinit_completion(&lpi2c_imx->complete);
+
+ if (msg->flags & I2C_M_RD)
+ lpi2c_imx_read(lpi2c_imx, msg);
+ else
+ lpi2c_imx_write(lpi2c_imx, msg);
+
+ return lpi2c_imx_pio_msg_complete(lpi2c_imx);
+}
+
+static int lpi2c_imx_dma_timeout_calculate(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long time = 0;
+
+ time = 8 * lpi2c_imx->dma->dma_len * 1000 / lpi2c_imx->bitrate;
+
+ /* Add extra second for scheduler related activities */
+ time += 1;
+
+ /* Double calculated time */
+ return msecs_to_jiffies(time * MSEC_PER_SEC);
+}
+
+static int lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ u16 rx_remain = dma->dma_len;
+ int cmd_num;
+ u16 temp;
+
+ /*
+ * Calculate the number of rx command words via the DMA TX channel
+ * writing into command register based on the i2c msg len, and build
+ * the rx command words buffer.
+ */
+ cmd_num = DIV_ROUND_UP(rx_remain, CHUNK_DATA);
+ dma->rx_cmd_buf = kcalloc(cmd_num, sizeof(u16), GFP_KERNEL);
+ dma->rx_cmd_buf_len = cmd_num * sizeof(u16);
+
+ if (!dma->rx_cmd_buf) {
+ dev_err(&lpi2c_imx->adapter.dev, "Alloc RX cmd buffer failed\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < cmd_num ; i++) {
+ temp = rx_remain > CHUNK_DATA ? CHUNK_DATA - 1 : rx_remain - 1;
+ temp |= (RECV_DATA << 8);
+ rx_remain -= CHUNK_DATA;
+ dma->rx_cmd_buf[i] = temp;
+ }
+
+ return 0;
+}
+
+static int lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long time_left, time;
+
+ time = lpi2c_imx_dma_timeout_calculate(lpi2c_imx);
+ time_left = wait_for_completion_timeout(&lpi2c_imx->complete, time);
+ if (time_left == 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "I/O Error in DMA Data Transfer\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void lpi2c_dma_unmap(struct lpi2c_imx_dma *dma)
+{
+ struct dma_chan *chan = dma->dma_data_dir == DMA_FROM_DEVICE
+ ? dma->chan_rx : dma->chan_tx;
+
+ dma_unmap_single(chan->device->dev, dma->dma_addr,
+ dma->dma_len, dma->dma_data_dir);
+
+ dma->dma_data_dir = DMA_NONE;
+}
+
+static void lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_dma *dma)
+{
+ dmaengine_terminate_sync(dma->chan_tx);
+ dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+}
+
+static void lpi2c_cleanup_dma(struct lpi2c_imx_dma *dma)
+{
+ if (dma->dma_data_dir == DMA_FROM_DEVICE)
+ dmaengine_terminate_sync(dma->chan_rx);
+ else if (dma->dma_data_dir == DMA_TO_DEVICE)
+ dmaengine_terminate_sync(dma->chan_tx);
+
+ lpi2c_dma_unmap(dma);
+}
+
+static void lpi2c_dma_callback(void *data)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = (struct lpi2c_imx_struct *)data;
+
+ complete(&lpi2c_imx->complete);
+}
+
+static int lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct dma_async_tx_descriptor *rx_cmd_desc;
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ struct dma_chan *txchan = dma->chan_tx;
+ dma_cookie_t cookie;
+
+ dma->dma_tx_addr = dma_map_single(txchan->device->dev,
+ dma->rx_cmd_buf, dma->rx_cmd_buf_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(txchan->device->dev, dma->dma_tx_addr)) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
+ return -EINVAL;
+ }
+
+ rx_cmd_desc = dmaengine_prep_slave_single(txchan, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rx_cmd_desc) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
+ goto desc_prepare_err_exit;
+ }
+
+ cookie = dmaengine_submit(rx_cmd_desc);
+ if (dma_submit_error(cookie)) {
+ dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
+ goto submit_err_exit;
+ }
+
+ dma_async_issue_pending(txchan);
+
+ return 0;
+
+desc_prepare_err_exit:
+ dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+ return -EINVAL;
+
+submit_err_exit:
+ dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+ dmaengine_desc_free(rx_cmd_desc);
+ return -EINVAL;
+}
+
+static int lpi2c_dma_submit(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ chan = dma->chan_rx;
+ dma->dma_data_dir = DMA_FROM_DEVICE;
+ dma->dma_transfer_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = dma->chan_tx;
+ dma->dma_data_dir = DMA_TO_DEVICE;
+ dma->dma_transfer_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma->dma_addr = dma_map_single(chan->device->dev,
+ dma->dma_buf, dma->dma_len, dma->dma_data_dir);
+ if (dma_mapping_error(chan->device->dev, dma->dma_addr)) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
+ return -EINVAL;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma->dma_addr,
+ dma->dma_len, dma->dma_transfer_dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
+ goto desc_prepare_err_exit;
+ }
+
+ reinit_completion(&lpi2c_imx->complete);
+ desc->callback = lpi2c_dma_callback;
+ desc->callback_param = lpi2c_imx;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
+ goto submit_err_exit;
+ }
+
+ /* Can't switch to PIO mode when DMA have started transfer */
+ dma->using_pio_mode = false;
+
+ dma_async_issue_pending(chan);
+
+ return 0;
+
+desc_prepare_err_exit:
+ lpi2c_dma_unmap(dma);
+ return -EINVAL;
+
+submit_err_exit:
+ lpi2c_dma_unmap(dma);
+ dmaengine_desc_free(desc);
+ return -EINVAL;
+}
+
+static int lpi2c_imx_find_max_burst_num(unsigned int fifosize, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = fifosize / 2; i > 0; i--)
+ if (!(len % i))
+ break;
+
+ return i;
+}
+
+/*
+ * For a highest DMA efficiency, tx/rx burst number should be calculated according
+ * to the FIFO depth.
+ */
+static void lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ unsigned int cmd_num;
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ /*
+ * One RX cmd word can trigger DMA receive no more than 256 bytes.
+ * The number of RX cmd words should be calculated based on the data
+ * length.
+ */
+ cmd_num = DIV_ROUND_UP(dma->dma_len, CHUNK_DATA);
+ dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
+ cmd_num);
+ dma->rx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->rxfifosize,
+ dma->dma_len);
+ } else {
+ dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
+ dma->dma_len);
+ }
+}
+
+static int lpi2c_dma_config(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ struct dma_slave_config rx = {}, tx = {};
+ int ret;
+
+ lpi2c_imx_dma_burst_num_calculate(lpi2c_imx);
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
+ tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ tx.dst_maxburst = dma->tx_burst_num;
+ tx.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &tx);
+ if (ret < 0)
+ return ret;
+
+ rx.src_addr = dma->phy_addr + LPI2C_MRDR;
+ rx.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ rx.src_maxburst = dma->rx_burst_num;
+ rx.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &rx);
+ if (ret < 0)
+ return ret;
+ } else {
+ tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
+ tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ tx.dst_maxburst = dma->tx_burst_num;
+ tx.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &tx);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lpi2c_dma_enable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ /*
+ * TX interrupt will be triggered when the number of words in
+ * the transmit FIFO is equal or less than TX watermark.
+ * RX interrupt will be triggered when the number of words in
+ * the receive FIFO is greater than RX watermark.
+ * In order to trigger the DMA interrupt, TX watermark should be
+ * set equal to the DMA TX burst number but RX watermark should
+ * be set less than the DMA RX burst number.
+ */
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ /* Set I2C TX/RX watermark */
+ writel(dma->tx_burst_num | (dma->rx_burst_num - 1) << 16,
+ lpi2c_imx->base + LPI2C_MFCR);
+ /* Enable I2C DMA TX/RX function */
+ writel(MDER_TDDE | MDER_RDDE, lpi2c_imx->base + LPI2C_MDER);
+ } else {
+ /* Set I2C TX watermark */
+ writel(dma->tx_burst_num, lpi2c_imx->base + LPI2C_MFCR);
+ /* Enable I2C DMA TX function */
+ writel(MDER_TDDE, lpi2c_imx->base + LPI2C_MDER);
+ }
+
+ /* Enable NACK detected */
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_NDIE);
+};
+
+/*
+ * When lpi2c is in TX DMA mode we can use one DMA TX channel to write
+ * data word into TXFIFO, but in RX DMA mode it is different.
+ *
+ * The LPI2C MTDR register is a command data and transmit data register.
+ * Bits 8-10 are the command data field and Bits 0-7 are the transmit
+ * data field. When the LPI2C master needs to read data, the number of
+ * bytes to read should be set in the command field and RECV_DATA should
+ * be set into the command data field to receive (DATA[7:0] + 1) bytes.
+ * The recv data command word is made of RECV_DATA in the command data
+ * field and the number of bytes to read in transmit data field. When the
+ * length of data to be read exceeds 256 bytes, recv data command word
+ * needs to be written to TXFIFO multiple times.
+ *
+ * So when in RX DMA mode, the TX channel also must to be configured to
+ * send RX command words and the RX command word must be set in advance
+ * before transmitting.
+ */
+static int lpi2c_imx_dma_xfer(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msg)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ int ret;
+
+ /* When DMA mode fails before transferring, CPU mode can be used. */
+ dma->using_pio_mode = true;
+
+ dma->dma_len = msg->len;
+ dma->dma_msg_flag = msg->flags;
+ dma->dma_buf = i2c_get_dma_safe_msg_buf(msg, I2C_DMA_THRESHOLD);
+ if (!dma->dma_buf)
+ return -ENOMEM;
+
+ ret = lpi2c_dma_config(lpi2c_imx);
+ if (ret) {
+ dev_err(&lpi2c_imx->adapter.dev, "Failed to configure DMA (%d)\n", ret);
+ goto disable_dma;
+ }
+
+ lpi2c_dma_enable(lpi2c_imx);
+
+ ret = lpi2c_dma_submit(lpi2c_imx);
+ if (ret) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA submission failed (%d)\n", ret);
+ goto disable_dma;
+ }
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ ret = lpi2c_imx_alloc_rx_cmd_buf(lpi2c_imx);
+ if (ret)
+ goto disable_cleanup_data_dma;
+
+ ret = lpi2c_dma_rx_cmd_submit(lpi2c_imx);
+ if (ret)
+ goto disable_cleanup_data_dma;
+ }
+
+ ret = lpi2c_imx_dma_msg_complete(lpi2c_imx);
+ if (ret)
+ goto disable_cleanup_all_dma;
+
+ /* When encountering NACK in transfer, clean up all DMA transfers */
+ if ((readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) && !ret) {
+ ret = -EIO;
+ goto disable_cleanup_all_dma;
+ }
+
+ if (dma->dma_msg_flag & I2C_M_RD)
+ dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+ lpi2c_dma_unmap(dma);
+
+ goto disable_dma;
+
+disable_cleanup_all_dma:
+ if (dma->dma_msg_flag & I2C_M_RD)
+ lpi2c_cleanup_rx_cmd_dma(dma);
+disable_cleanup_data_dma:
+ lpi2c_cleanup_dma(dma);
+disable_dma:
+ /* Disable I2C DMA function */
+ writel(0, lpi2c_imx->base + LPI2C_MDER);
+
+ if (dma->dma_msg_flag & I2C_M_RD)
+ kfree(dma->rx_cmd_buf);
+
+ if (ret)
+ i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, false);
+ else
+ i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, true);
+
+ return ret;
+}
+
static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
@@ -477,12 +973,14 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
lpi2c_imx->msglen = msgs[i].len;
init_completion(&lpi2c_imx->complete);
- if (msgs[i].flags & I2C_M_RD)
- lpi2c_imx_read(lpi2c_imx, &msgs[i]);
- else
- lpi2c_imx_write(lpi2c_imx, &msgs[i]);
+ if (is_use_dma(lpi2c_imx, &msgs[i])) {
+ result = lpi2c_imx_dma_xfer(lpi2c_imx, &msgs[i]);
+ if (result && lpi2c_imx->dma->using_pio_mode)
+ result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
+ } else {
+ result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
+ }
- result = lpi2c_imx_msg_complete(lpi2c_imx);
if (result)
goto stop;
@@ -510,9 +1008,56 @@ disable:
return (result < 0) ? result : num;
}
-static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+static irqreturn_t lpi2c_imx_target_isr(struct lpi2c_imx_struct *lpi2c_imx,
+ u32 ssr, u32 sier_filter)
+{
+ u8 value;
+ u32 sasr;
+
+ /* Arbitration lost */
+ if (sier_filter & SSR_BEF) {
+ writel(0, lpi2c_imx->base + LPI2C_SIER);
+ return IRQ_HANDLED;
+ }
+
+ /* Address detected */
+ if (sier_filter & SSR_AVF) {
+ sasr = readl(lpi2c_imx->base + LPI2C_SASR);
+ if (SASR_READ_REQ & sasr) {
+ /* Read request */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_REQUESTED, &value);
+ writel(value, lpi2c_imx->base + LPI2C_STDR);
+ goto ret;
+ } else {
+ /* Write request */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_REQUESTED, &value);
+ }
+ }
+
+ if (sier_filter & SSR_SDF)
+ /* STOP */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_STOP, &value);
+
+ if (sier_filter & SSR_TDF) {
+ /* Target send data */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_PROCESSED, &value);
+ writel(value, lpi2c_imx->base + LPI2C_STDR);
+ }
+
+ if (sier_filter & SSR_RDF) {
+ /* Target receive data */
+ value = readl(lpi2c_imx->base + LPI2C_SRDR);
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_RECEIVED, &value);
+ }
+
+ret:
+ /* Clear SSR */
+ writel(ssr & SSR_CLEAR_BITS, lpi2c_imx->base + LPI2C_SSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lpi2c_imx_master_isr(struct lpi2c_imx_struct *lpi2c_imx)
{
- struct lpi2c_imx_struct *lpi2c_imx = dev_id;
unsigned int enabled;
unsigned int temp;
@@ -532,6 +1077,124 @@ static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+
+ if (lpi2c_imx->target) {
+ u32 scr = readl(lpi2c_imx->base + LPI2C_SCR);
+ u32 ssr = readl(lpi2c_imx->base + LPI2C_SSR);
+ u32 sier_filter = ssr & readl(lpi2c_imx->base + LPI2C_SIER);
+
+ /*
+ * The target is enabled and an interrupt has been triggered.
+ * Enter the target's irq handler.
+ */
+ if ((scr & SCR_SEN) && sier_filter)
+ return lpi2c_imx_target_isr(lpi2c_imx, ssr, sier_filter);
+ }
+
+ /*
+ * Otherwise the interrupt has been triggered by the master.
+ * Enter the master's irq handler.
+ */
+ return lpi2c_imx_master_isr(lpi2c_imx);
+}
+
+static void lpi2c_imx_target_init(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ u32 temp;
+
+ /* reset target module */
+ writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
+ writel(0, lpi2c_imx->base + LPI2C_SCR);
+
+ /* Set target address */
+ writel((lpi2c_imx->target->addr << 1), lpi2c_imx->base + LPI2C_SAMR);
+
+ writel(SCFGR1_RXSTALL | SCFGR1_TXDSTALL, lpi2c_imx->base + LPI2C_SCFGR1);
+
+ /*
+ * set SCFGR2: FILTSDA, FILTSCL and CLKHOLD
+ *
+ * FILTSCL/FILTSDA can eliminate signal skew. It should generally be
+ * set to the same value and should be set >= 50ns.
+ *
+ * CLKHOLD is only used when clock stretching is enabled, but it will
+ * extend the clock stretching to ensure there is an additional delay
+ * between the target driving SDA and the target releasing the SCL pin.
+ *
+ * CLKHOLD setting is crucial for lpi2c target. When master read data
+ * from target, if there is a delay caused by cpu idle, excessive load,
+ * or other delays between two bytes in one message transmission, it
+ * will cause a short interval time between the driving SDA signal and
+ * releasing SCL signal. The lpi2c master will mistakenly think it is a stop
+ * signal resulting in an arbitration failure. This issue can be avoided
+ * by setting CLKHOLD.
+ *
+ * In order to ensure lpi2c function normally when the lpi2c speed is as
+ * low as 100kHz, CLKHOLD should be set to 3 and it is also compatible with
+ * higher clock frequency like 400kHz and 1MHz.
+ */
+ temp = SCFGR2_FILTSDA(2) | SCFGR2_FILTSCL(2) | SCFGR2_CLKHOLD(3);
+ writel(temp, lpi2c_imx->base + LPI2C_SCFGR2);
+
+ /*
+ * Enable module:
+ * SCR_FILTEN can enable digital filter and output delay counter for LPI2C
+ * target mode. So SCR_FILTEN need be asserted when enable SDA/SCL FILTER
+ * and CLKHOLD.
+ */
+ writel(SCR_SEN | SCR_FILTEN, lpi2c_imx->base + LPI2C_SCR);
+
+ /* Enable interrupt from i2c module */
+ writel(SLAVE_INT_FLAG, lpi2c_imx->base + LPI2C_SIER);
+}
+
+static int lpi2c_imx_register_target(struct i2c_client *client)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
+ int ret;
+
+ if (lpi2c_imx->target)
+ return -EBUSY;
+
+ lpi2c_imx->target = client;
+
+ ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
+ if (ret < 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "failed to resume i2c controller");
+ return ret;
+ }
+
+ lpi2c_imx_target_init(lpi2c_imx);
+
+ return 0;
+}
+
+static int lpi2c_imx_unregister_target(struct i2c_client *client)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
+ int ret;
+
+ if (!lpi2c_imx->target)
+ return -EINVAL;
+
+ /* Reset target address. */
+ writel(0, lpi2c_imx->base + LPI2C_SAMR);
+
+ writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
+ writel(0, lpi2c_imx->base + LPI2C_SCR);
+
+ lpi2c_imx->target = NULL;
+
+ ret = pm_runtime_put_sync(lpi2c_imx->adapter.dev.parent);
+ if (ret < 0)
+ dev_err(&lpi2c_imx->adapter.dev, "failed to suspend i2c controller");
+
+ return ret;
+}
+
static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
struct platform_device *pdev)
{
@@ -546,6 +1209,58 @@ static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
return 0;
}
+static void dma_exit(struct device *dev, struct lpi2c_imx_dma *dma)
+{
+ if (dma->chan_rx)
+ dma_release_channel(dma->chan_rx);
+
+ if (dma->chan_tx)
+ dma_release_channel(dma->chan_tx);
+
+ devm_kfree(dev, dma);
+}
+
+static int lpi2c_dma_init(struct device *dev, dma_addr_t phy_addr)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
+ struct lpi2c_imx_dma *dma;
+ int ret;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->phy_addr = phy_addr;
+
+ /* Prepare for TX DMA: */
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
+ ret = PTR_ERR(dma->chan_tx);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(dev, "can't request DMA tx channel (%d)\n", ret);
+ dma->chan_tx = NULL;
+ goto dma_exit;
+ }
+
+ /* Prepare for RX DMA: */
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
+ ret = PTR_ERR(dma->chan_rx);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(dev, "can't request DMA rx channel (%d)\n", ret);
+ dma->chan_rx = NULL;
+ goto dma_exit;
+ }
+
+ lpi2c_imx->can_use_dma = true;
+ lpi2c_imx->dma = dma;
+ return 0;
+
+dma_exit:
+ dma_exit(dev, dma);
+ return ret;
+}
+
static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
@@ -555,6 +1270,8 @@ static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
static const struct i2c_algorithm lpi2c_imx_algo = {
.master_xfer = lpi2c_imx_xfer,
.functionality = lpi2c_imx_func,
+ .reg_target = lpi2c_imx_register_target,
+ .unreg_target = lpi2c_imx_unregister_target,
};
static const struct of_device_id lpi2c_imx_of_match[] = {
@@ -566,6 +1283,8 @@ MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
static int lpi2c_imx_probe(struct platform_device *pdev)
{
struct lpi2c_imx_struct *lpi2c_imx;
+ struct resource *res;
+ dma_addr_t phy_addr;
unsigned int temp;
int irq, ret;
@@ -573,7 +1292,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (!lpi2c_imx)
return -ENOMEM;
- lpi2c_imx->base = devm_platform_ioremap_resource(pdev, 0);
+ lpi2c_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(lpi2c_imx->base))
return PTR_ERR(lpi2c_imx->base);
@@ -587,6 +1306,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
+ phy_addr = (dma_addr_t)res->start;
ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
if (ret < 0)
@@ -598,7 +1318,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret)
lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
- ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, IRQF_NO_SUSPEND,
pdev->name, lpi2c_imx);
if (ret)
return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", irq);
@@ -640,6 +1360,14 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER)
goto rpm_disable;
+ /* Init DMA */
+ ret = lpi2c_dma_init(&pdev->dev, phy_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto rpm_disable;
+ dev_info(&pdev->dev, "use pio mode\n");
+ }
+
ret = i2c_add_adapter(&lpi2c_imx->adapter);
if (ret)
goto rpm_disable;
@@ -694,9 +1422,68 @@ static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
return 0;
}
+static int __maybe_unused lpi2c_suspend_noirq(struct device *dev)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused lpi2c_resume_noirq(struct device *dev)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * If the I2C module powers down during system suspend,
+ * the register values will be lost. Therefore, reinitialize
+ * the target when the system resumes.
+ */
+ if (lpi2c_imx->target)
+ lpi2c_imx_target_init(lpi2c_imx);
+
+ return 0;
+}
+
+static int lpi2c_suspend(struct device *dev)
+{
+ /*
+ * Some I2C devices may need the I2C controller to remain active
+ * during resume_noirq() or suspend_noirq(). If the controller is
+ * autosuspended, there is no way to wake it up once runtime PM is
+ * disabled (in suspend_late()).
+ *
+ * During system resume, the I2C controller will be available only
+ * after runtime PM is re-enabled (in resume_early()). However, this
+ * may be too late for some devices.
+ *
+ * Wake up the controller in the suspend() callback while runtime PM
+ * is still enabled. The I2C controller will remain available until
+ * the suspend_noirq() callback (pm_runtime_force_suspend()) is
+ * called. During resume, the I2C controller can be restored by the
+ * resume_noirq() callback (pm_runtime_force_resume()).
+ *
+ * Finally, the resume() callback re-enables autosuspend, ensuring
+ * the I2C controller remains available until the system enters
+ * suspend_noirq() and from resume_noirq().
+ */
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int lpi2c_resume(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
static const struct dev_pm_ops lpi2c_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(lpi2c_suspend_noirq,
+ lpi2c_resume_noirq)
+ SYSTEM_SLEEP_PM_OPS(lpi2c_suspend, lpi2c_resume)
SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend,
lpi2c_runtime_resume, NULL)
};
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 5c9a8dfbc4a0..9e5d454d8318 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -397,17 +397,16 @@ static void i2c_imx_reset_regs(struct imx_i2c_struct *i2c_imx)
}
/* Functions for DMA support */
-static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
- dma_addr_t phy_addr)
+static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dma_addr_t phy_addr)
{
struct imx_i2c_dma *dma;
struct dma_slave_config dma_sconfig;
- struct device *dev = &i2c_imx->adapter.dev;
+ struct device *dev = i2c_imx->adapter.dev.parent;
int ret;
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
- return;
+ return -ENOMEM;
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
@@ -452,7 +451,7 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
- return;
+ return 0;
fail_rx:
dma_release_channel(dma->chan_rx);
@@ -460,6 +459,8 @@ fail_tx:
dma_release_channel(dma->chan_tx);
fail_al:
devm_kfree(dev, dma);
+
+ return ret;
}
static void i2c_imx_dma_callback(void *arg)
@@ -621,8 +622,8 @@ static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx)
return 0;
}
-static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
- unsigned int i2c_clk_rate)
+static int i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
+ unsigned int i2c_clk_rate)
{
struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
unsigned int div;
@@ -637,7 +638,11 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
/* Divider value calculation */
if (i2c_imx->cur_clk == i2c_clk_rate)
- return;
+ return 0;
+
+ /* Keep the denominator of the following program always NOT equal to 0. */
+ if (!(i2c_clk_rate / 2))
+ return -EINVAL;
i2c_imx->cur_clk = i2c_clk_rate;
@@ -668,6 +673,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
dev_dbg(&i2c_imx->adapter.dev, "IFDR[IC]=0x%x, REAL DIV=%d\n",
i2c_clk_div[i].val, i2c_clk_div[i].div);
#endif
+
+ return 0;
}
static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
@@ -677,11 +684,12 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
struct imx_i2c_struct *i2c_imx = container_of(nb,
struct imx_i2c_struct,
clk_change_nb);
+ int ret = 0;
if (action & POST_RATE_CHANGE)
- i2c_imx_set_clk(i2c_imx, ndata->new_rate);
+ ret = i2c_imx_set_clk(i2c_imx, ndata->new_rate);
- return NOTIFY_OK;
+ return notifier_from_errno(ret);
}
static int i2c_imx_start(struct imx_i2c_struct *i2c_imx, bool atomic)
@@ -1715,8 +1723,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&i2c_imx->slave_lock);
- hrtimer_init(&i2c_imx->slave_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- i2c_imx->slave_timer.function = i2c_imx_slave_timeout;
+ hrtimer_setup(&i2c_imx->slave_timer, i2c_imx_slave_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
match = device_get_match_data(&pdev->dev);
if (match)
@@ -1760,7 +1768,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
goto rpm_disable;
/* Request IRQ */
- ret = request_irq(irq, i2c_imx_isr, IRQF_SHARED, pdev->name, i2c_imx);
+ ret = request_irq(irq, i2c_imx_isr, IRQF_SHARED | IRQF_NO_SUSPEND,
+ pdev->name, i2c_imx);
if (ret) {
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
goto rpm_disable;
@@ -1780,7 +1789,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx->bitrate = pdata->bitrate;
i2c_imx->clk_change_nb.notifier_call = i2c_imx_clk_notifier_call;
clk_notifier_register(i2c_imx->clk, &i2c_imx->clk_change_nb);
- i2c_imx_set_clk(i2c_imx, clk_get_rate(i2c_imx->clk));
+ ret = i2c_imx_set_clk(i2c_imx, clk_get_rate(i2c_imx->clk));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get I2C clock\n");
+ goto clk_notifier_unregister;
+ }
i2c_imx_reset_regs(i2c_imx);
@@ -1790,6 +1803,22 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER)
goto clk_notifier_unregister;
+ /*
+ * DMA mode should be optional for I2C, when encountering DMA errors,
+ * no need to exit I2C probe. Only print warning to show DMA error and
+ * use PIO mode directly to ensure I2C bus available as much as possible.
+ */
+ ret = i2c_imx_dma_request(i2c_imx, phy_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto clk_notifier_unregister;
+ else if (ret == -ENODEV)
+ dev_dbg(&pdev->dev, "Only use PIO mode\n");
+ else
+ dev_warn(&pdev->dev, "Failed to setup DMA (%pe), only use PIO mode\n",
+ ERR_PTR(ret));
+ }
+
/* Add I2C adapter */
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
if (ret < 0)
@@ -1804,9 +1833,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx->adapter.name);
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
- /* Init DMA config if supported */
- i2c_imx_dma_request(i2c_imx, phy_addr);
-
return 0; /* Return OK */
clk_notifier_unregister:
@@ -1858,8 +1884,7 @@ static int i2c_imx_runtime_suspend(struct device *dev)
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
clk_disable(i2c_imx->clk);
-
- return 0;
+ return pinctrl_pm_select_sleep_state(dev);
}
static int i2c_imx_runtime_resume(struct device *dev)
@@ -1867,6 +1892,10 @@ static int i2c_imx_runtime_resume(struct device *dev)
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
int ret;
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
ret = clk_enable(i2c_imx->clk);
if (ret)
dev_err(dev, "can't enable I2C clock, ret=%d\n", ret);
@@ -1874,7 +1903,43 @@ static int i2c_imx_runtime_resume(struct device *dev)
return ret;
}
+static int i2c_imx_suspend(struct device *dev)
+{
+ /*
+ * Some I2C devices may need the I2C controller to remain active
+ * during resume_noirq() or suspend_noirq(). If the controller is
+ * autosuspended, there is no way to wake it up once runtime PM is
+ * disabled (in suspend_late()).
+ *
+ * During system resume, the I2C controller will be available only
+ * after runtime PM is re-enabled (in resume_early()). However, this
+ * may be too late for some devices.
+ *
+ * Wake up the controller in the suspend() callback while runtime PM
+ * is still enabled. The I2C controller will remain available until
+ * the suspend_noirq() callback (pm_runtime_force_suspend()) is
+ * called. During resume, the I2C controller can be restored by the
+ * resume_noirq() callback (pm_runtime_force_resume()).
+ *
+ * Finally, the resume() callback re-enables autosuspend, ensuring
+ * the I2C controller remains available until the system enters
+ * suspend_noirq() and from resume_noirq().
+ */
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int i2c_imx_resume(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
static const struct dev_pm_ops i2c_imx_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SYSTEM_SLEEP_PM_OPS(i2c_imx_suspend, i2c_imx_resume)
RUNTIME_PM_OPS(i2c_imx_runtime_suspend, i2c_imx_runtime_resume, NULL)
};
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 2b3b65ef2900..a2ac992f9cb0 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -291,9 +291,9 @@ static int smbus_sch_probe(struct platform_device *pdev)
/* Set up the sysfs linkage to our parent device */
priv->adapter.dev.parent = dev;
- priv->adapter.owner = THIS_MODULE,
- priv->adapter.class = I2C_CLASS_HWMON,
- priv->adapter.algo = &smbus_algorithm,
+ priv->adapter.owner = THIS_MODULE;
+ priv->adapter.class = I2C_CLASS_HWMON;
+ priv->adapter.algo = &smbus_algorithm;
snprintf(priv->adapter.name, sizeof(priv->adapter.name),
"SMBus SCH adapter at %04x", (unsigned short)res->start);
diff --git a/drivers/i2c/busses/i2c-keba.c b/drivers/i2c/busses/i2c-keba.c
index 759732a07ef0..7b9ed2592f5b 100644
--- a/drivers/i2c/busses/i2c-keba.c
+++ b/drivers/i2c/busses/i2c-keba.c
@@ -464,12 +464,8 @@ static void ki2c_unregister_devices(struct ki2c *ki2c)
{
int i;
- for (i = 0; i < ki2c->client_size; i++) {
- struct i2c_client *client = ki2c->client[i];
-
- if (client)
- i2c_unregister_device(client);
- }
+ for (i = 0; i < ki2c->client_size; i++)
+ i2c_unregister_device(ki2c->client[i]);
}
static int ki2c_register_devices(struct ki2c *ki2c)
diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c
index 8821cac3897b..b475dd27b7af 100644
--- a/drivers/i2c/busses/i2c-ls2x.c
+++ b/drivers/i2c/busses/i2c-ls2x.c
@@ -10,6 +10,7 @@
* Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/device.h>
@@ -26,7 +27,8 @@
#include <linux/units.h>
/* I2C Registers */
-#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */
+#define I2C_LS2X_PRER_LO 0x0 /* Freq Division Low Byte Register */
+#define I2C_LS2X_PRER_HI 0x1 /* Freq Division High Byte Register */
#define I2C_LS2X_CTR 0x2 /* Control Register */
#define I2C_LS2X_TXR 0x3 /* Transport Data Register */
#define I2C_LS2X_RXR 0x3 /* Receive Data Register */
@@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
*/
static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
{
+ u16 val;
struct i2c_timings *t = &priv->i2c_t;
struct device *dev = priv->adapter.dev.parent;
u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
@@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
else
t->bus_freq_hz = LS2X_I2C_FREQ_STD;
- /* Calculate and set i2c frequency. */
- writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
- priv->base + I2C_LS2X_PRER);
+ /*
+ * According to the chip manual, we can only access the registers as bytes,
+ * otherwise the high bits will be truncated.
+ * So set the I2C frequency with a sequential writeb() instead of writew().
+ */
+ val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1;
+ writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO);
+ writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI);
}
static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 482a0074d448..de713b5747fe 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -263,6 +263,265 @@ static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
#define I2C_FREQ_MIN_HZ 10000
#define I2C_FREQ_MAX_HZ I2C_MAX_FAST_MODE_PLUS_FREQ
+struct smb_timing_t {
+ u32 core_clk;
+ u8 hldt;
+ u8 dbcnt;
+ u16 sclfrq;
+ u8 scllt;
+ u8 sclht;
+ bool fast_mode;
+};
+
+static struct smb_timing_t smb_timing_100khz[] = {
+ {
+ .core_clk = 100000000, .hldt = 0x2A, .dbcnt = 0x4,
+ .sclfrq = 0xFB, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 62500000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x9D, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 50000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x7E, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 48000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x79, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 40000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x65, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 30000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x4C, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 29000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x49, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 26000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x42, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 25000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x3F, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 24000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x3D, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 20000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x33, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 16180000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x29, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 15000000, .hldt = 0x23, .dbcnt = 0x1,
+ .sclfrq = 0x26, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 13000000, .hldt = 0x1D, .dbcnt = 0x1,
+ .sclfrq = 0x21, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 12000000, .hldt = 0x1B, .dbcnt = 0x1,
+ .sclfrq = 0x1F, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 10000000, .hldt = 0x18, .dbcnt = 0x1,
+ .sclfrq = 0x1A, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 9000000, .hldt = 0x16, .dbcnt = 0x1,
+ .sclfrq = 0x17, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 8090000, .hldt = 0x14, .dbcnt = 0x1,
+ .sclfrq = 0x15, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 7500000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x13, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 6500000, .hldt = 0xE, .dbcnt = 0x1,
+ .sclfrq = 0x11, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 4000000, .hldt = 0x9, .dbcnt = 0x1,
+ .sclfrq = 0xB, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+};
+
+static struct smb_timing_t smb_timing_400khz[] = {
+ {
+ .core_clk = 100000000, .hldt = 0x2A, .dbcnt = 0x3,
+ .sclfrq = 0x0, .scllt = 0x47, .sclht = 0x35,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 62500000, .hldt = 0x2A, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0x2C, .sclht = 0x22,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 50000000, .hldt = 0x21, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x24, .sclht = 0x1B,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 48000000, .hldt = 0x1E, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x24, .sclht = 0x19,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 40000000, .hldt = 0x1B, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x1E, .sclht = 0x14,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 33000000, .hldt = 0x15, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x19, .sclht = 0x11,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 30000000, .hldt = 0x15, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x19, .sclht = 0xD,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 29000000, .hldt = 0x11, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x15, .sclht = 0x10,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 26000000, .hldt = 0x10, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x13, .sclht = 0xE,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 25000000, .hldt = 0xF, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x13, .sclht = 0xD,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 24000000, .hldt = 0xD, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x12, .sclht = 0xD,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 20000000, .hldt = 0xB, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xF, .sclht = 0xA,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 16180000, .hldt = 0xA, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xC, .sclht = 0x9,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 15000000, .hldt = 0x9, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xB, .sclht = 0x8,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 13000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xA, .sclht = 0x7,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 12000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xA, .sclht = 0x6,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 10000000, .hldt = 0x6, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x8, .sclht = 0x5,
+ .fast_mode = true,
+ },
+};
+
+static struct smb_timing_t smb_timing_1000khz[] = {
+ {
+ .core_clk = 100000000, .hldt = 0x15, .dbcnt = 0x4,
+ .sclfrq = 0x0, .scllt = 0x1C, .sclht = 0x15,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 62500000, .hldt = 0xF, .dbcnt = 0x3,
+ .sclfrq = 0x0, .scllt = 0x11, .sclht = 0xE,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 50000000, .hldt = 0xA, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xE, .sclht = 0xB,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 48000000, .hldt = 0x9, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xD, .sclht = 0xB,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 41000000, .hldt = 0x9, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xC, .sclht = 0x9,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 40000000, .hldt = 0x8, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xB, .sclht = 0x9,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 33000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xA, .sclht = 0x7,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 25000000, .hldt = 0x4, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x7, .sclht = 0x6,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 24000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x8, .sclht = 0x5,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 20000000, .hldt = 0x4, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x6, .sclht = 0x4,
+ .fast_mode = true,
+ },
+};
+
struct npcm_i2c_data {
u8 fifo_size;
u32 segctl_init_val;
@@ -1666,6 +1925,12 @@ static int npcm_i2c_int_master_handler(struct npcm_i2c *bus)
(FIELD_GET(NPCM_I2CCST3_EO_BUSY,
ioread8(bus->reg + NPCM_I2CCST3)))) {
npcm_i2c_irq_handle_eob(bus);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* reenable slave if it was enabled */
+ if (bus->slave)
+ iowrite8(bus->slave->addr | NPCM_I2CADDR_SAEN,
+ bus->reg + NPCM_I2CADDR1);
+#endif
return 0;
}
@@ -1805,102 +2070,45 @@ static void npcm_i2c_recovery_init(struct i2c_adapter *_adap)
*/
static int npcm_i2c_init_clk(struct npcm_i2c *bus, u32 bus_freq_hz)
{
- u32 k1 = 0;
- u32 k2 = 0;
- u8 dbnct = 0;
- u32 sclfrq = 0;
- u8 hldt = 7;
+ struct smb_timing_t *smb_timing;
+ u8 scl_table_cnt = 0, table_size = 0;
u8 fast_mode = 0;
- u32 src_clk_khz;
- u32 bus_freq_khz;
- src_clk_khz = bus->apb_clk / 1000;
- bus_freq_khz = bus_freq_hz / 1000;
bus->bus_freq = bus_freq_hz;
- /* 100KHz and below: */
- if (bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ) {
- sclfrq = src_clk_khz / (bus_freq_khz * 4);
-
- if (sclfrq < SCLFRQ_MIN || sclfrq > SCLFRQ_MAX)
- return -EDOM;
-
- if (src_clk_khz >= 40000)
- hldt = 17;
- else if (src_clk_khz >= 12500)
- hldt = 15;
- else
- hldt = 7;
- }
-
- /* 400KHz: */
- else if (bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) {
- sclfrq = 0;
+ switch (bus_freq_hz) {
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ smb_timing = smb_timing_100khz;
+ table_size = ARRAY_SIZE(smb_timing_100khz);
+ break;
+ case I2C_MAX_FAST_MODE_FREQ:
+ smb_timing = smb_timing_400khz;
+ table_size = ARRAY_SIZE(smb_timing_400khz);
fast_mode = I2CCTL3_400K_MODE;
-
- if (src_clk_khz < 7500)
- /* 400KHZ cannot be supported for core clock < 7.5MHz */
- return -EDOM;
-
- else if (src_clk_khz >= 50000) {
- k1 = 80;
- k2 = 48;
- hldt = 12;
- dbnct = 7;
- }
-
- /* Master or Slave with frequency > 25MHz */
- else if (src_clk_khz > 25000) {
- hldt = clk_coef(src_clk_khz, 300) + 7;
- k1 = clk_coef(src_clk_khz, 1600);
- k2 = clk_coef(src_clk_khz, 900);
- }
- }
-
- /* 1MHz: */
- else if (bus_freq_hz <= I2C_MAX_FAST_MODE_PLUS_FREQ) {
- sclfrq = 0;
+ break;
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
+ smb_timing = smb_timing_1000khz;
+ table_size = ARRAY_SIZE(smb_timing_1000khz);
fast_mode = I2CCTL3_400K_MODE;
-
- /* 1MHZ cannot be supported for core clock < 24 MHz */
- if (src_clk_khz < 24000)
- return -EDOM;
-
- k1 = clk_coef(src_clk_khz, 620);
- k2 = clk_coef(src_clk_khz, 380);
-
- /* Core clk > 40 MHz */
- if (src_clk_khz > 40000) {
- /*
- * Set HLDT:
- * SDA hold time: (HLDT-7) * T(CLK) >= 120
- * HLDT = 120/T(CLK) + 7 = 120 * FREQ(CLK) + 7
- */
- hldt = clk_coef(src_clk_khz, 120) + 7;
- } else {
- hldt = 7;
- dbnct = 2;
- }
+ break;
+ default:
+ return -EINVAL;
}
- /* Frequency larger than 1 MHz is not supported */
- else
- return -EINVAL;
+ for (scl_table_cnt = 0; scl_table_cnt < table_size; scl_table_cnt++)
+ if (bus->apb_clk >= smb_timing[scl_table_cnt].core_clk)
+ break;
- if (bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) {
- k1 = round_up(k1, 2);
- k2 = round_up(k2 + 1, 2);
- if (k1 < SCLFRQ_MIN || k1 > SCLFRQ_MAX ||
- k2 < SCLFRQ_MIN || k2 > SCLFRQ_MAX)
- return -EDOM;
- }
+ if (scl_table_cnt == table_size)
+ return -EINVAL;
/* write sclfrq value. bits [6:0] are in I2CCTL2 reg */
- iowrite8(FIELD_PREP(I2CCTL2_SCLFRQ6_0, sclfrq & 0x7F),
+ iowrite8(FIELD_PREP(I2CCTL2_SCLFRQ6_0, smb_timing[scl_table_cnt].sclfrq & 0x7F),
bus->reg + NPCM_I2CCTL2);
/* bits [8:7] are in I2CCTL3 reg */
- iowrite8(fast_mode | FIELD_PREP(I2CCTL3_SCLFRQ8_7, (sclfrq >> 7) & 0x3),
+ iowrite8(FIELD_PREP(I2CCTL3_SCLFRQ8_7, (smb_timing[scl_table_cnt].sclfrq >> 7) & 0x3) |
+ fast_mode,
bus->reg + NPCM_I2CCTL3);
/* Select Bank 0 to access NPCM_I2CCTL4/NPCM_I2CCTL5 */
@@ -1912,13 +2120,13 @@ static int npcm_i2c_init_clk(struct npcm_i2c *bus, u32 bus_freq_hz)
* k1 = 2 * SCLLT7-0 -> Low Time = k1 / 2
* k2 = 2 * SCLLT7-0 -> High Time = k2 / 2
*/
- iowrite8(k1 / 2, bus->reg + NPCM_I2CSCLLT);
- iowrite8(k2 / 2, bus->reg + NPCM_I2CSCLHT);
+ iowrite8(smb_timing[scl_table_cnt].scllt, bus->reg + NPCM_I2CSCLLT);
+ iowrite8(smb_timing[scl_table_cnt].sclht, bus->reg + NPCM_I2CSCLHT);
- iowrite8(dbnct, bus->reg + NPCM_I2CCTL5);
+ iowrite8(smb_timing[scl_table_cnt].dbcnt, bus->reg + NPCM_I2CCTL5);
}
- iowrite8(hldt, bus->reg + NPCM_I2CCTL4);
+ iowrite8(smb_timing[scl_table_cnt].hldt, bus->reg + NPCM_I2CCTL4);
/* Return to Bank 1, and stay there by default: */
npcm_i2c_select_bank(bus, I2C_BANK_1);
@@ -2035,7 +2243,7 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id)
}
static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
- u8 slave_addr, u16 nwrite, u16 nread,
+ u16 nwrite, u16 nread,
u8 *write_data, u8 *read_data,
bool use_PEC, bool use_read_block)
{
@@ -2043,7 +2251,6 @@ static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
bus->cmd_err = -EBUSY;
return false;
}
- bus->dest_addr = slave_addr << 1;
bus->wr_buf = write_data;
bus->wr_size = nwrite;
bus->wr_ind = 0;
@@ -2086,7 +2293,6 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
unsigned long time_left, flags;
u16 nwrite, nread;
u8 *write_data, *read_data;
- u8 slave_addr;
unsigned long timeout;
bool read_block = false;
bool read_PEC = false;
@@ -2099,7 +2305,6 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
msg0 = &msgs[0];
- slave_addr = msg0->addr;
if (msg0->flags & I2C_M_RD) { /* read */
nwrite = 0;
write_data = NULL;
@@ -2132,19 +2337,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
}
- /*
- * Adaptive TimeOut: estimated time in usec + 100% margin:
- * 2: double the timeout for clock stretching case
- * 9: bits per transaction (including the ack/nack)
- */
- timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
- timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec));
if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
return -EINVAL;
}
- time_left = jiffies + timeout + 1;
+ time_left = jiffies + bus->adap.timeout / bus->adap.retries + 1;
do {
/*
* we must clear slave address immediately when the bus is not
@@ -2163,6 +2361,21 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
} while (time_is_after_jiffies(time_left) && bus_busy);
/*
+ * Store the address early in a global position to ensure it is
+ * accessible for a potential call to i2c_recover_bus().
+ *
+ * Since the transfer might be a read operation, remove the I2C_M_RD flag
+ * from the bus->dest_addr for the i2c_recover_bus() call later.
+ *
+ * The i2c_recover_bus() uses the address in a write direction to recover
+ * the i2c bus if some error condition occurs.
+ *
+ * Remove the I2C_M_RD flag from the address since npcm_i2c_master_start_xmit()
+ * handles the read/write operation internally.
+ */
+ bus->dest_addr = i2c_8bit_addr_from_msg(msg0) & ~I2C_M_RD;
+
+ /*
* Check the BER (bus error) state, when ber_state is true, it means that the module
* detects the bus error which is caused by some factor like that the electricity
* noise occurs on the bus. Under this condition, the module is reset and the bus
@@ -2179,7 +2392,6 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
npcm_i2c_init_params(bus);
- bus->dest_addr = slave_addr;
bus->msgs = msgs;
bus->msgs_num = num;
bus->cmd_err = 0;
@@ -2189,9 +2401,17 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
npcm_i2c_int_enable(bus, true);
- if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
+ if (npcm_i2c_master_start_xmit(bus, nwrite, nread,
write_data, read_data, read_PEC,
read_block)) {
+ /*
+ * Adaptive TimeOut: estimated time in usec + 100% margin:
+ * 2: double the timeout for clock stretching case
+ * 9: bits per transaction (including the ack/nack)
+ */
+ timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
+ timeout = max_t(unsigned long, bus->adap.timeout / bus->adap.retries,
+ usecs_to_jiffies(timeout_usec));
time_left = wait_for_completion_timeout(&bus->cmd_complete,
timeout);
@@ -2317,7 +2537,12 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
adap = &bus->adap;
adap->owner = THIS_MODULE;
adap->retries = 3;
- adap->timeout = msecs_to_jiffies(35);
+ /*
+ * The users want to connect a lot of masters on the same bus.
+ * This timeout is used to determine the time it takes to take bus ownership.
+ * The transactions are very long, so waiting 35ms is not enough.
+ */
+ adap->timeout = 2 * HZ;
adap->algo = &npcm_i2c_algo;
adap->quirks = &npcm_i2c_quirks;
adap->algo_data = bus;
@@ -2329,6 +2554,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
if (irq < 0)
return irq;
+ /*
+ * Disable the interrupt to avoid the interrupt handler being triggered
+ * incorrectly by the asynchronous interrupt status since the machine
+ * might do a warm reset during the last smbus/i2c transfer session.
+ */
+ npcm_i2c_int_enable(bus, false);
+
ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
dev_name(bus->dev), bus);
if (ret)
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 92faf03d64cf..f18c3e74b076 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1048,23 +1048,6 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes,
return 0;
}
-static irqreturn_t
-omap_i2c_isr(int irq, void *dev_id)
-{
- struct omap_i2c_dev *omap = dev_id;
- irqreturn_t ret = IRQ_HANDLED;
- u16 mask;
- u16 stat;
-
- stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
-
- if (stat & mask)
- ret = IRQ_WAKE_THREAD;
-
- return ret;
-}
-
static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
{
u16 bits;
@@ -1095,8 +1078,13 @@ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
}
if (stat & OMAP_I2C_STAT_NACK) {
- err |= OMAP_I2C_STAT_NACK;
+ omap->cmd_err |= OMAP_I2C_STAT_NACK;
omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
+
+ if (!(stat & ~OMAP_I2C_STAT_NACK)) {
+ err = -EAGAIN;
+ break;
+ }
}
if (stat & OMAP_I2C_STAT_AL) {
@@ -1472,7 +1460,7 @@ omap_i2c_probe(struct platform_device *pdev)
IRQF_NO_SUSPEND, pdev->name, omap);
else
r = devm_request_threaded_irq(&pdev->dev, omap->irq,
- omap_i2c_isr, omap_i2c_isr_thread,
+ NULL, omap_i2c_isr_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
pdev->name, omap);
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 7a22e1f46e60..7bbd478171e0 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -823,11 +823,9 @@ static int geni_i2c_probe(struct platform_device *pdev)
return gi2c->irq;
ret = geni_i2c_clk_map_idx(gi2c);
- if (ret) {
- dev_err(dev, "Invalid clk frequency %d Hz: %d\n",
- gi2c->clk_freq_out, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Invalid clk frequency %d Hz\n",
+ gi2c->clk_freq_out);
gi2c->adap.algo = &geni_i2c_algo;
init_completion(&gi2c->done);
@@ -837,11 +835,10 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* Keep interrupts disabled initially to allow for low-power modes */
ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, IRQF_NO_AUTOEN,
dev_name(dev), gi2c);
- if (ret) {
- dev_err(dev, "Request_irq failed:%d: err:%d\n",
- gi2c->irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Request_irq failed: %d\n", gi2c->irq);
+
i2c_set_adapdata(&gi2c->adap, gi2c);
gi2c->adap.dev.parent = dev;
gi2c->adap.dev.of_node = dev->of_node;
@@ -870,16 +867,13 @@ static int geni_i2c_probe(struct platform_device *pdev)
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
- dev_err(dev, "Error turning on resources %d\n", ret);
- clk_disable_unprepare(gi2c->core_clk);
- return ret;
+ dev_err_probe(dev, ret, "Error turning on resources\n");
+ goto err_clk;
}
proto = geni_se_read_proto(&gi2c->se);
if (proto != GENI_SE_I2C) {
- dev_err(dev, "Invalid proto %d\n", proto);
- geni_se_resources_off(&gi2c->se);
- clk_disable_unprepare(gi2c->core_clk);
- return -ENXIO;
+ ret = dev_err_probe(dev, -ENXIO, "Invalid proto %d\n", proto);
+ goto err_resources;
}
if (desc && desc->no_dma_support)
@@ -891,11 +885,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* FIFO is disabled, so we can only use GPI DMA */
gi2c->gpi_mode = true;
ret = setup_gpi_dma(gi2c);
- if (ret) {
- geni_se_resources_off(&gi2c->se);
- clk_disable_unprepare(gi2c->core_clk);
- return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
- }
+ if (ret)
+ goto err_resources;
dev_dbg(dev, "Using GPI DMA mode for I2C\n");
} else {
@@ -907,10 +898,9 @@ static int geni_i2c_probe(struct platform_device *pdev)
tx_depth = desc->tx_fifo_depth;
if (!tx_depth) {
- dev_err(dev, "Invalid TX FIFO depth\n");
- geni_se_resources_off(&gi2c->se);
- clk_disable_unprepare(gi2c->core_clk);
- return -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL,
+ "Invalid TX FIFO depth\n");
+ goto err_resources;
}
gi2c->tx_wm = tx_depth - 1;
@@ -924,7 +914,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
clk_disable_unprepare(gi2c->core_clk);
ret = geni_se_resources_off(&gi2c->se);
if (ret) {
- dev_err(dev, "Error turning off resources %d\n", ret);
+ dev_err_probe(dev, ret, "Error turning off resources\n");
goto err_dma;
}
@@ -940,17 +930,25 @@ static int geni_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&gi2c->adap);
if (ret) {
- dev_err(dev, "Error adding i2c adapter %d\n", ret);
+ dev_err_probe(dev, ret, "Error adding i2c adapter\n");
pm_runtime_disable(gi2c->se.dev);
goto err_dma;
}
dev_dbg(dev, "Geni-I2C adaptor successfully added\n");
- return 0;
+ return ret;
+
+err_resources:
+ geni_se_resources_off(&gi2c->se);
+err_clk:
+ clk_disable_unprepare(gi2c->core_clk);
+
+ return ret;
err_dma:
release_gpi_dma(gi2c);
+
return ret;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a7b77d14ee86..5693a38da7b5 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -130,6 +130,8 @@
#define ID_P_PM_BLOCKED BIT(31)
#define ID_P_MASK GENMASK(31, 27)
+#define ID_SLAVE_NACK BIT(0)
+
enum rcar_i2c_type {
I2C_RCAR_GEN1,
I2C_RCAR_GEN2,
@@ -166,6 +168,7 @@ struct rcar_i2c_priv {
int irq;
struct i2c_client *host_notify_client;
+ u8 slave_flags;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -655,6 +658,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
{
u32 ssr_raw, ssr_filtered;
u8 value;
+ int ret;
ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff;
ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER);
@@ -670,7 +674,10 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICRXTX, value);
rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR);
} else {
- i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
rcar_i2c_read(priv, ICRXTX); /* dummy read */
rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
}
@@ -683,18 +690,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
if (ssr_filtered & SSR) {
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
+ priv->slave_flags &= ~ID_SLAVE_NACK;
rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
}
/* master wants to write to us */
if (ssr_filtered & SDR) {
- int ret;
-
value = rcar_i2c_read(priv, ICRXTX);
ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
- /* Send NACK in case of error */
- rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0));
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
+ /* Send NACK in case of error, but it will come 1 byte late :( */
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS |
+ (priv->slave_flags & ID_SLAVE_NACK ? FNA : 0));
rcar_i2c_write(priv, ICSSR, ~SDR & 0xff);
}
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 9264adc97ca9..d7dddd6c296a 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -34,46 +34,51 @@
* Also check the comments in the interrupt routines for some gory details.
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/time.h>
-#define ICCR1_ICE 0x80
-#define ICCR1_IICRST 0x40
-#define ICCR1_SOWP 0x10
+#define ICCR1_ICE BIT(7)
+#define ICCR1_IICRST BIT(6)
+#define ICCR1_SOWP BIT(4)
+#define ICCR1_SCLI BIT(1)
+#define ICCR1_SDAI BIT(0)
-#define ICCR2_BBSY 0x80
-#define ICCR2_SP 0x08
-#define ICCR2_RS 0x04
-#define ICCR2_ST 0x02
+#define ICCR2_BBSY BIT(7)
+#define ICCR2_SP BIT(3)
+#define ICCR2_RS BIT(2)
+#define ICCR2_ST BIT(1)
-#define ICMR1_CKS_MASK 0x70
-#define ICMR1_BCWP 0x08
+#define ICMR1_CKS_MASK GENMASK(6, 4)
+#define ICMR1_BCWP BIT(3)
#define ICMR1_CKS(_x) ((((_x) << 4) & ICMR1_CKS_MASK) | ICMR1_BCWP)
-#define ICMR3_RDRFS 0x20
-#define ICMR3_ACKWP 0x10
-#define ICMR3_ACKBT 0x08
+#define ICMR3_RDRFS BIT(5)
+#define ICMR3_ACKWP BIT(4)
+#define ICMR3_ACKBT BIT(3)
-#define ICFER_FMPE 0x80
+#define ICFER_FMPE BIT(7)
-#define ICIER_TIE 0x80
-#define ICIER_TEIE 0x40
-#define ICIER_RIE 0x20
-#define ICIER_NAKIE 0x10
-#define ICIER_SPIE 0x08
+#define ICIER_TIE BIT(7)
+#define ICIER_TEIE BIT(6)
+#define ICIER_RIE BIT(5)
+#define ICIER_NAKIE BIT(4)
+#define ICIER_SPIE BIT(3)
-#define ICSR2_NACKF 0x10
+#define ICSR2_NACKF BIT(4)
-#define ICBR_RESERVED 0xe0 /* Should be 1 on writes */
+#define ICBR_RESERVED GENMASK(7, 5) /* Should be 1 on writes */
#define RIIC_INIT_MSG -1
@@ -134,6 +139,27 @@ static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u
riic_writeb(riic, (riic_readb(riic, reg) & ~clear) | set, reg);
}
+static int riic_bus_barrier(struct riic_dev *riic)
+{
+ int ret;
+ u8 val;
+
+ /*
+ * The SDA line can still be low even when BBSY = 0. Therefore, after checking
+ * the BBSY flag, also verify that the SDA and SCL lines are not being held low.
+ */
+ ret = readb_poll_timeout(riic->base + riic->info->regs[RIIC_ICCR2], val,
+ !(val & ICCR2_BBSY), 10, riic->adapter.timeout);
+ if (ret)
+ return ret;
+
+ if ((riic_readb(riic, RIIC_ICCR1) & (ICCR1_SDAI | ICCR1_SCLI)) !=
+ (ICCR1_SDAI | ICCR1_SCLI))
+ return -EBUSY;
+
+ return 0;
+}
+
static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct riic_dev *riic = i2c_get_adapdata(adap);
@@ -146,13 +172,11 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
if (ret)
return ret;
- if (riic_readb(riic, RIIC_ICCR2) & ICCR2_BBSY) {
- riic->err = -EBUSY;
+ riic->err = riic_bus_barrier(riic);
+ if (riic->err)
goto out;
- }
reinit_completion(&riic->msg_done);
- riic->err = 0;
riic_writeb(riic, 0, RIIC_ICSR2);
@@ -312,6 +336,7 @@ static int riic_init_hw(struct riic_dev *riic)
{
int ret;
unsigned long rate;
+ unsigned long ns_per_tick;
int total_ticks, cks, brl, brh;
struct i2c_timings *t = &riic->i2c_t;
struct device *dev = riic->adapter.dev.parent;
@@ -320,7 +345,7 @@ static int riic_init_hw(struct riic_dev *riic)
: I2C_MAX_FAST_MODE_FREQ;
if (t->bus_freq_hz > max_freq)
- return dev_err_probe(&riic->adapter.dev, -EINVAL,
+ return dev_err_probe(dev, -EINVAL,
"unsupported bus speed %uHz (%u max)\n",
t->bus_freq_hz, max_freq);
@@ -356,11 +381,9 @@ static int riic_init_hw(struct riic_dev *riic)
rate /= 2;
}
- if (brl > (0x1F + 3)) {
- dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
- (unsigned long)t->bus_freq_hz);
- return -EINVAL;
- }
+ if (brl > (0x1F + 3))
+ return dev_err_probe(dev, -EINVAL, "invalid speed (%uHz). Too slow.\n",
+ t->bus_freq_hz);
brh = total_ticks - brl;
@@ -377,8 +400,9 @@ static int riic_init_hw(struct riic_dev *riic)
* Remove clock ticks for rise and fall times. Convert ns to clock
* ticks.
*/
- brl -= t->scl_fall_ns / (1000000000 / rate);
- brh -= t->scl_rise_ns / (1000000000 / rate);
+ ns_per_tick = NSEC_PER_SEC / rate;
+ brl -= t->scl_fall_ns / ns_per_tick;
+ brh -= t->scl_rise_ns / ns_per_tick;
/* Adjust for min register values for when SCLE=1 and NFE=1 */
if (brl < 1)
@@ -388,8 +412,7 @@ static int riic_init_hw(struct riic_dev *riic)
pr_debug("i2c-riic: freq=%lu, duty=%d, fall=%lu, rise=%lu, cks=%d, brl=%d, brh=%d\n",
rate / total_ticks, ((brl + 3) * 100) / (brl + brh + 6),
- t->scl_fall_ns / (1000000000 / rate),
- t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
+ t->scl_fall_ns / ns_per_tick, t->scl_rise_ns / ns_per_tick, cks, brl, brh);
ret = pm_runtime_resume_and_get(dev);
if (ret)
@@ -416,7 +439,7 @@ static int riic_init_hw(struct riic_dev *riic)
return 0;
}
-static struct riic_irq_desc riic_irqs[] = {
+static const struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
{ .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
@@ -424,11 +447,6 @@ static struct riic_irq_desc riic_irqs[] = {
{ .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
};
-static void riic_reset_control_assert(void *data)
-{
- reset_control_assert(data);
-}
-
static int riic_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -445,35 +463,27 @@ static int riic_i2c_probe(struct platform_device *pdev)
return PTR_ERR(riic->base);
riic->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(riic->clk)) {
- dev_err(dev, "missing controller clock");
- return PTR_ERR(riic->clk);
- }
+ if (IS_ERR(riic->clk))
+ return dev_err_probe(dev, PTR_ERR(riic->clk),
+ "missing controller clock");
- riic->rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
+ riic->rstc = devm_reset_control_get_optional_exclusive_deasserted(dev, NULL);
if (IS_ERR(riic->rstc))
return dev_err_probe(dev, PTR_ERR(riic->rstc),
- "Error: missing reset ctrl\n");
-
- ret = reset_control_deassert(riic->rstc);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, riic_reset_control_assert, riic->rstc);
- if (ret)
- return ret;
+ "failed to acquire deasserted reset\n");
for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) {
- ret = platform_get_irq(pdev, riic_irqs[i].res_num);
- if (ret < 0)
- return ret;
+ int irq;
+
+ irq = platform_get_irq(pdev, riic_irqs[i].res_num);
+ if (irq < 0)
+ return irq;
- ret = devm_request_irq(dev, ret, riic_irqs[i].isr,
+ ret = devm_request_irq(dev, irq, riic_irqs[i].isr,
0, riic_irqs[i].name, riic);
- if (ret) {
- dev_err(dev, "failed to request irq %s\n", riic_irqs[i].name);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %s\n",
+ riic_irqs[i].name);
}
riic->info = of_device_get_match_data(dev);
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 3505cf29cedd..a19c3d251804 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids);
static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (sis630_setup(dev)) {
dev_err(&dev->dev,
"SIS630 compatible bus not detected, "
@@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
"SMBus SIS630 adapter at %04x", smbus_base + SMB_STS);
- return i2c_add_adapter(&sis630_adapter);
+ ret = i2c_add_adapter(&sis630_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
+ return ret;
}
static void sis630_remove(struct pci_dev *dev)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index c4d3eb02da09..dc1e46d834dc 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -30,6 +30,8 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
+#include <linux/spinlock.h>
#define DRIVER_NAME "xiic-i2c"
#define DYNAMIC_MODE_READ_BROKEN_BIT BIT(0)
@@ -74,6 +76,9 @@ enum i2c_scl_freq {
* @smbus_block_read: Flag to handle block read
* @input_clk: Input clock to I2C controller
* @i2c_clk: I2C SCL frequency
+ * @atomic: Mode of transfer
+ * @atomic_lock: Lock for atomic transfer mode
+ * @atomic_xfer_state: See STATE_
*/
struct xiic_i2c {
struct device *dev;
@@ -96,6 +101,9 @@ struct xiic_i2c {
bool smbus_block_read;
unsigned long input_clk;
unsigned int i2c_clk;
+ bool atomic;
+ spinlock_t atomic_lock; /* Lock for atomic transfer mode */
+ enum xilinx_i2c_state atomic_xfer_state;
};
struct xiic_version_data {
@@ -224,6 +232,8 @@ static const struct timing_regs timing_reg_values[] = {
#define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000))
/* timeout waiting for the controller finish transfers */
#define XIIC_XFER_TIMEOUT (msecs_to_jiffies(10000))
+/* timeout waiting for the controller finish transfers in micro seconds */
+#define XIIC_XFER_TIMEOUT_US 10000000
/*
* The following constant is used for the device global interrupt enable
@@ -238,6 +248,29 @@ static const struct timing_regs timing_reg_values[] = {
static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num);
static void __xiic_start_xfer(struct xiic_i2c *i2c);
+static int xiic_i2c_runtime_suspend(struct device *dev)
+{
+ struct xiic_i2c *i2c = dev_get_drvdata(dev);
+
+ clk_disable(i2c->clk);
+
+ return 0;
+}
+
+static int xiic_i2c_runtime_resume(struct device *dev)
+{
+ struct xiic_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(i2c->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* For the register read and write functions, a little-endian and big-endian
* version are necessary. Endianness is detected during the probe function.
@@ -374,9 +407,10 @@ static int xiic_setclk(struct xiic_i2c *i2c)
unsigned int index = 0;
u32 reg_val;
- dev_dbg(i2c->adap.dev.parent,
- "%s entry, i2c->input_clk: %ld, i2c->i2c_clk: %d\n",
- __func__, i2c->input_clk, i2c->i2c_clk);
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent,
+ "%s entry, i2c->input_clk: %ld, i2c->i2c_clk: %d\n",
+ __func__, i2c->input_clk, i2c->i2c_clk);
/* If not specified in DT, do not configure in SW. Rely only on Vivado design */
if (!i2c->i2c_clk || !i2c->input_clk)
@@ -467,7 +501,8 @@ static int xiic_reinit(struct xiic_i2c *i2c)
return ret;
/* Enable interrupts */
- xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
+ if (!i2c->atomic)
+ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
@@ -549,11 +584,12 @@ static void xiic_read_rx(struct xiic_i2c *i2c)
bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
- dev_dbg(i2c->adap.dev.parent,
- "%s entry, bytes in fifo: %d, rem: %d, SR: 0x%x, CR: 0x%x\n",
- __func__, bytes_in_fifo, xiic_rx_space(i2c),
- xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
- xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent,
+ "%s entry, bytes in fifo: %d, rem: %d, SR: 0x%x, CR: 0x%x\n",
+ __func__, bytes_in_fifo, xiic_rx_space(i2c),
+ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
if (bytes_in_fifo > xiic_rx_space(i2c))
bytes_in_fifo = xiic_rx_space(i2c);
@@ -612,6 +648,26 @@ static void xiic_read_rx(struct xiic_i2c *i2c)
}
}
+static bool xiic_error_check(struct xiic_i2c *i2c)
+{
+ bool status = false;
+ u32 pend, isr, ier;
+
+ isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
+ ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+ pend = isr & ier;
+
+ if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
+ ((pend & XIIC_INTR_TX_ERROR_MASK) &&
+ !(pend & XIIC_INTR_RX_FULL_MASK))) {
+ xiic_reinit(i2c);
+ status = true;
+ if (i2c->tx_msg || i2c->rx_msg)
+ i2c->atomic_xfer_state = STATE_ERROR;
+ }
+ return status;
+}
+
static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
{
/* return the actual space left in the FIFO */
@@ -625,8 +681,9 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
len = (len > fifo_space) ? fifo_space : len;
- dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
- __func__, len, fifo_space);
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
+ __func__, len, fifo_space);
while (len--) {
u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
@@ -649,9 +706,13 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
~XIIC_CR_MSMS_MASK);
}
- dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
}
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+
+ if (i2c->atomic && xiic_error_check(i2c))
+ return;
}
}
@@ -854,22 +915,51 @@ static int xiic_wait_not_busy(struct xiic_i2c *i2c)
*/
err = xiic_bus_busy(i2c);
while (err && tries--) {
- msleep(1);
+ if (i2c->atomic)
+ udelay(1000);
+ else
+ usleep_range(1000, 1100);
err = xiic_bus_busy(i2c);
}
return err;
}
+static void xiic_recv_atomic(struct xiic_i2c *i2c)
+{
+ while (xiic_rx_space(i2c)) {
+ if (xiic_getreg32(i2c, XIIC_IISR_OFFSET) & XIIC_INTR_RX_FULL_MASK) {
+ xiic_read_rx(i2c);
+
+ /* Clear Rx full and Tx error interrupts. */
+ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK |
+ XIIC_INTR_TX_ERROR_MASK);
+ }
+ if (xiic_error_check(i2c))
+ return;
+ }
+
+ i2c->rx_msg = NULL;
+ xiic_irq_clr_en(i2c, XIIC_INTR_TX_ERROR_MASK);
+
+ /* send next message if this wasn't the last. */
+ if (i2c->nmsgs > 1) {
+ i2c->nmsgs--;
+ i2c->tx_msg++;
+ __xiic_start_xfer(i2c);
+ }
+}
+
static void xiic_start_recv(struct xiic_i2c *i2c)
{
u16 rx_watermark;
u8 cr = 0, rfd_set = 0;
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
- dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
- __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
- xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
/* Disable Tx interrupts */
xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK | XIIC_INTR_TX_EMPTY_MASK);
@@ -967,9 +1057,10 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
XIIC_CR_MSMS_MASK)
& ~(XIIC_CR_DIR_IS_TX_MASK));
}
- dev_dbg(i2c->adap.dev.parent, "%s end, ISR: 0x%x, CR: 0x%x\n",
- __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
- xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s end, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
}
if (i2c->nmsgs == 1)
@@ -979,10 +1070,55 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
/* the message is tx:ed */
i2c->tx_pos = msg->len;
+ i2c->prev_msg_tx = false;
+
/* Enable interrupts */
- xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
+ if (!i2c->atomic)
+ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
+ else
+ xiic_recv_atomic(i2c);
+}
- i2c->prev_msg_tx = false;
+static void xiic_send_rem_atomic(struct xiic_i2c *i2c)
+{
+ while (xiic_tx_space(i2c)) {
+ if (xiic_tx_fifo_space(i2c)) {
+ u16 data;
+
+ data = i2c->tx_msg->buf[i2c->tx_pos];
+ i2c->tx_pos++;
+ if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) {
+ /* last message in transfer -> STOP */
+ if (i2c->dynamic) {
+ data |= XIIC_TX_DYN_STOP_MASK;
+ } else {
+ u8 cr;
+ int status;
+
+ /* Wait till FIFO is empty so STOP is sent last */
+ status = xiic_wait_tx_empty(i2c);
+ if (status)
+ return;
+
+ /* Write to CR to stop */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
+ ~XIIC_CR_MSMS_MASK);
+ }
+ }
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+ if (xiic_error_check(i2c))
+ return;
+ }
+
+ if (i2c->nmsgs > 1) {
+ i2c->nmsgs--;
+ i2c->tx_msg++;
+ __xiic_start_xfer(i2c);
+ } else {
+ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
+ }
}
static void xiic_start_send(struct xiic_i2c *i2c)
@@ -991,11 +1127,13 @@ static void xiic_start_send(struct xiic_i2c *i2c)
u16 data;
struct i2c_msg *msg = i2c->tx_msg;
- dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d",
- __func__, msg, msg->len);
- dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
- __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
- xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ if (!i2c->atomic) {
+ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d",
+ __func__, msg, msg->len);
+ dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ }
if (i2c->dynamic) {
/* write the address */
@@ -1060,19 +1198,27 @@ static void xiic_start_send(struct xiic_i2c *i2c)
XIIC_INTR_TX_ERROR_MASK |
XIIC_INTR_BNB_MASK);
}
+
i2c->prev_msg_tx = true;
+
+ if (i2c->atomic && !i2c->atomic_xfer_state)
+ xiic_send_rem_atomic(i2c);
}
static void __xiic_start_xfer(struct xiic_i2c *i2c)
{
int fifo_space = xiic_tx_fifo_space(i2c);
- dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
- __func__, i2c->tx_msg, fifo_space);
+ if (!i2c->atomic)
+ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
+ __func__, i2c->tx_msg, fifo_space);
if (!i2c->tx_msg)
return;
+ if (i2c->atomic && xiic_error_check(i2c))
+ return;
+
i2c->rx_pos = 0;
i2c->tx_pos = 0;
i2c->state = STATE_START;
@@ -1089,7 +1235,10 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
bool broken_read, max_read_len, smbus_blk_read;
int ret, count;
- mutex_lock(&i2c->lock);
+ if (i2c->atomic)
+ spin_lock(&i2c->atomic_lock);
+ else
+ mutex_lock(&i2c->lock);
if (i2c->tx_msg || i2c->rx_msg) {
dev_err(i2c->adap.dev.parent,
@@ -1098,6 +1247,8 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
goto out;
}
+ i2c->atomic_xfer_state = STATE_DONE;
+
/* In single master mode bus can only be busy, when in use by this
* driver. If the register indicates bus being busy for some reason we
* should ignore it, since bus will never be released and i2c will be
@@ -1124,7 +1275,9 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
i2c->tx_msg = msgs;
i2c->rx_msg = NULL;
i2c->nmsgs = num;
- init_completion(&i2c->completion);
+
+ if (!i2c->atomic)
+ init_completion(&i2c->completion);
/* Decide standard mode or Dynamic mode */
i2c->dynamic = true;
@@ -1159,7 +1312,10 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
__xiic_start_xfer(i2c);
out:
- mutex_unlock(&i2c->lock);
+ if (i2c->atomic)
+ spin_unlock(&i2c->atomic_lock);
+ else
+ mutex_unlock(&i2c->lock);
return ret;
}
@@ -1198,6 +1354,44 @@ out:
return err;
}
+static int xiic_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct xiic_i2c *i2c = i2c_get_adapdata(adap);
+ u32 status_reg;
+ int err;
+
+ err = xiic_i2c_runtime_resume(i2c->dev);
+ if (err)
+ return err;
+
+ i2c->atomic = true;
+ err = xiic_start_xfer(i2c, msgs, num);
+ if (err < 0)
+ return err;
+
+ err = readl_poll_timeout_atomic(i2c->base + XIIC_SR_REG_OFFSET,
+ status_reg, !(status_reg & XIIC_SR_BUS_BUSY_MASK),
+ 1, XIIC_XFER_TIMEOUT_US);
+
+ if (err) /* Timeout */
+ err = -ETIMEDOUT;
+
+ spin_lock(&i2c->atomic_lock);
+ if (err || i2c->state) {
+ i2c->tx_msg = NULL;
+ i2c->rx_msg = NULL;
+ i2c->nmsgs = 0;
+ }
+
+ err = (i2c->atomic_xfer_state == STATE_DONE) ? num : -EIO;
+ spin_unlock(&i2c->atomic_lock);
+
+ i2c->atomic = false;
+ xiic_i2c_runtime_suspend(i2c->dev);
+
+ return err;
+}
+
static u32 xiic_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
@@ -1205,6 +1399,7 @@ static u32 xiic_func(struct i2c_adapter *adap)
static const struct i2c_algorithm xiic_algorithm = {
.master_xfer = xiic_xfer,
+ .master_xfer_atomic = xiic_xfer_atomic,
.functionality = xiic_func,
};
@@ -1268,6 +1463,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
DRIVER_NAME " %s", pdev->name);
mutex_init(&i2c->lock);
+ spin_lock_init(&i2c->atomic_lock);
i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(i2c->clk))
@@ -1365,29 +1561,6 @@ static void xiic_i2c_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
}
-static int __maybe_unused xiic_i2c_runtime_suspend(struct device *dev)
-{
- struct xiic_i2c *i2c = dev_get_drvdata(dev);
-
- clk_disable(i2c->clk);
-
- return 0;
-}
-
-static int __maybe_unused xiic_i2c_runtime_resume(struct device *dev)
-{
- struct xiic_i2c *i2c = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_enable(i2c->clk);
- if (ret) {
- dev_err(dev, "Cannot enable clock.\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dev_pm_ops xiic_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend,
xiic_i2c_runtime_resume, NULL)
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index b7c10ced5a43..8fe9ddff8e96 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -412,7 +412,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
dev_name(dev), ret);
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_REMOVED_DEVICE:
i2c_atr_detach_client(client->adapter, client);
break;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 14ae0cfc325e..d2499f302b50 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -355,6 +355,25 @@ static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
{}
};
+static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = {
+ /*
+ * When a 400KHz freq is used on this model of ELAN touchpad in Linux,
+ * excessive smoothing (similar to when the touchpad's firmware detects
+ * a noisy signal) is sometimes applied. As some devices' (e.g, Lenovo
+ * V15 G4) ACPI tables specify a 400KHz frequency for this device and
+ * some I2C busses (e.g, Designware I2C) default to a 400KHz freq,
+ * force the speed to 100KHz as a workaround.
+ *
+ * For future investigation: This problem may be related to the default
+ * HCNT/LCNT values given by some busses' drivers, because they are not
+ * specified in the aforementioned devices' ACPI tables, and because
+ * the device works without issues on Windows at what is expected to be
+ * a 400KHz frequency. The root cause of the issue is not known.
+ */
+ { "ELAN06FA", 0 },
+ {}
+};
+
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value)
{
@@ -373,6 +392,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
lookup->force_speed = I2C_MAX_FAST_MODE_FREQ;
+ if (acpi_match_device_ids(adev, i2c_acpi_force_100khz_device_ids) == 0)
+ lookup->force_speed = I2C_MAX_STANDARD_MODE_FREQ;
+
return AE_OK;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7c810893bfa3..7ad1ad5c8c3f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -583,6 +583,9 @@ static int i2c_device_probe(struct device *dev)
goto err_detach_pm_domain;
}
+ client->debugfs = debugfs_create_dir(dev_name(&client->dev),
+ client->adapter->debugfs);
+
if (driver->probe)
status = driver->probe(client);
else
@@ -602,6 +605,7 @@ static int i2c_device_probe(struct device *dev)
return 0;
err_release_driver_resources:
+ debugfs_remove_recursive(client->debugfs);
devres_release_group(&client->dev, client->devres_group_id);
err_detach_pm_domain:
dev_pm_domain_detach(&client->dev, do_power_on);
@@ -627,6 +631,8 @@ static void i2c_device_remove(struct device *dev)
driver->remove(client);
}
+ debugfs_remove_recursive(client->debugfs);
+
devres_release_group(&client->dev, client->devres_group_id);
dev_pm_domain_detach(&client->dev, true);
@@ -1058,6 +1064,7 @@ void i2c_unregister_device(struct i2c_client *client)
if (ACPI_COMPANION(&client->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
+
device_remove_software_node(&client->dev);
device_unregister(&client->dev);
}
@@ -1562,6 +1569,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
res = device_add(&adap->dev);
if (res) {
pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
+ put_device(&adap->dev);
goto out_list;
}
@@ -2519,9 +2527,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
return 0;
/* Set up a temporary client to help detect callback */
- temp_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ temp_client = kzalloc(sizeof(*temp_client), GFP_KERNEL);
if (!temp_client)
return -ENOMEM;
+
temp_client->adapter = adapter;
for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
@@ -2535,6 +2544,7 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
}
kfree(temp_client);
+
return err;
}
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 5946c0d0aef9..275d1d0e910f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -91,7 +91,7 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
}
static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
struct eeprom_data *eeprom;
unsigned long flags;
@@ -106,7 +106,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
}
static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
struct eeprom_data *eeprom;
unsigned long flags;
@@ -165,8 +165,8 @@ static int i2c_slave_eeprom_probe(struct i2c_client *client)
sysfs_bin_attr_init(&eeprom->bin);
eeprom->bin.attr.name = "slave-eeprom";
eeprom->bin.attr.mode = S_IRUSR | S_IWUSR;
- eeprom->bin.read = i2c_slave_eeprom_bin_read;
- eeprom->bin.write = i2c_slave_eeprom_bin_write;
+ eeprom->bin.read_new = i2c_slave_eeprom_bin_read;
+ eeprom->bin.write_new = i2c_slave_eeprom_bin_write;
eeprom->bin.size = size;
ret = sysfs_create_bin_file(&client->dev.kobj, &eeprom->bin);
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 0d6fbaa48248..6de4307050dd 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -38,6 +38,7 @@ enum testunit_regs {
enum testunit_flags {
TU_FLAG_IN_PROCESS,
+ TU_FLAG_NACK,
};
struct testunit_data {
@@ -90,8 +91,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
switch (event) {
case I2C_SLAVE_WRITE_REQUESTED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
@@ -99,8 +102,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
case I2C_SLAVE_WRITE_RECEIVED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
if (tu->reg_idx < TU_NUM_REGS)
tu->regs[tu->reg_idx] = *val;
@@ -129,6 +134,8 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
* here because we still need them in the workqueue!
*/
tu->reg_idx = 0;
+
+ clear_bit(TU_FLAG_NACK, &tu->flags);
break;
case I2C_SLAVE_READ_PROCESSED:
@@ -151,6 +158,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
}
+ /* If an error occurred somewhen, we NACK everything until next STOP */
+ if (ret)
+ set_bit(TU_FLAG_NACK, &tu->flags);
+
return ret;
}
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index dce18f763a09..77a740561fd7 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
}
/*
- * Check if there are pinctrl states at all. Note: we cant' use
+ * Check if there are pinctrl states at all. Note: we can't use
* devm_pinctrl_get_select() because we need to distinguish between
* the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
*/
@@ -261,7 +261,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
pm_runtime_no_callbacks(&pdev->dev);
/* switch to first parent as active master */
- i2c_demux_activate_master(priv, 0);
+ err = i2c_demux_activate_master(priv, 0);
+ if (err)
+ goto err_rollback;
err = device_create_file(&pdev->dev, &dev_attr_available_masters);
if (err)
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 42310c9a00c2..d5dc4180afbc 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1919,7 +1919,7 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_bus_cleanup;
if (master->ops->set_speed) {
- master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
+ ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
if (ret)
goto err_bus_cleanup;
}
@@ -2486,7 +2486,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
struct i2c_dev_desc *i2cdev;
struct i2c_dev_boardinfo *i2cboardinfo;
- int ret;
+ int ret, id = -ENODEV;
adap->dev.parent = master->dev.parent;
adap->owner = master->dev.parent->driver->owner;
@@ -2497,7 +2497,15 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
adap->timeout = 1000;
adap->retries = 3;
- ret = i2c_add_adapter(adap);
+ if (master->dev.of_node)
+ id = of_alias_get_id(master->dev.of_node, "i2c");
+
+ if (id >= 0) {
+ adap->nr = id;
+ ret = i2c_add_numbered_adapter(adap);
+ } else {
+ ret = i2c_add_adapter(adap);
+ }
if (ret)
return ret;
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 90dee3ec5520..77da199c7413 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -57,3 +57,14 @@ config MIPI_I3C_HCI
This driver can also be built as a module. If so, the module will be
called mipi-i3c-hci.
+
+config MIPI_I3C_HCI_PCI
+ tristate "MIPI I3C Host Controller Interface PCI support"
+ depends on MIPI_I3C_HCI
+ depends on PCI
+ help
+ Support for MIPI I3C Host Controller Interface compatible hardware
+ on the PCI bus.
+
+ This driver can also be built as a module. If so, the module will be
+ called mipi-i3c-hci-pci.
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index d4b80eb8cecd..2fbf8b2addd0 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -251,14 +251,6 @@ struct dw_i3c_i2c_dev_data {
struct i3c_generic_ibi_pool *ibi_pool;
};
-static u8 even_parity(u8 p)
-{
- p ^= p >> 4;
- p &= 0xf;
-
- return (0x9669 >> p) & 1;
-}
-
static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
const struct i3c_ccc_cmd *cmd)
{
@@ -848,7 +840,7 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
struct dw_i3c_xfer *xfer;
struct dw_i3c_cmd *cmd;
u32 olddevs, newdevs;
- u8 p, last_addr = 0;
+ u8 last_addr = 0;
int ret, pos;
ret = pm_runtime_resume_and_get(master->dev);
@@ -873,9 +865,9 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
}
master->devs[pos].addr = ret;
- p = even_parity(ret);
last_addr = ret;
- ret |= (p << 7);
+
+ ret |= parity8(ret) ? 0 : BIT(7);
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
master->regs +
@@ -1647,6 +1639,7 @@ EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
void dw_i3c_common_remove(struct dw_i3c_master *master)
{
+ cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
pm_runtime_disable(master->dev);
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 06c0592487d3..fedbe6624a1c 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -889,8 +889,7 @@ static u32 prepare_rr0_dev_address(u32 addr)
ret |= (addr & GENMASK(9, 7)) << 6;
/* RR0[0] = ~XOR(addr[6:0]) */
- if (!(hweight8(addr & 0x7f) & 1))
- ret |= 1;
+ ret |= parity8(addr & 0x7f) ? 0 : BIT(0);
return ret;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
index 1f8cd5c48fde..e3d3ef757035 100644
--- a/drivers/i3c/master/mipi-i3c-hci/Makefile
+++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
@@ -5,3 +5,4 @@ mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \
cmd_v1.o cmd_v2.o \
dat_v1.o dct_v1.o \
hci_quirks.o
+obj-$(CONFIG_MIPI_I3C_HCI_PCI) += mipi-i3c-hci-pci.o
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
index 47b9b4d4ed3f..85c4916972e4 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -40,15 +40,6 @@
#define dat_w0_write(i, v) writel(v, hci->DAT_regs + (i) * 8)
#define dat_w1_write(i, v) writel(v, hci->DAT_regs + (i) * 8 + 4)
-static inline bool dynaddr_parity(unsigned int addr)
-{
- addr |= 1 << 7;
- addr += addr >> 4;
- addr += addr >> 2;
- addr += addr >> 1;
- return (addr & 1);
-}
-
static int hci_dat_v1_init(struct i3c_hci *hci)
{
unsigned int dat_idx;
@@ -123,7 +114,7 @@ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
dat_w0 = dat_w0_read(dat_idx);
dat_w0 &= ~(DAT_0_DYNAMIC_ADDRESS | DAT_0_DYNADDR_PARITY);
dat_w0 |= FIELD_PREP(DAT_0_DYNAMIC_ADDRESS, address) |
- (dynaddr_parity(address) ? DAT_0_DYNADDR_PARITY : 0);
+ (parity8(address) ? 0 : DAT_0_DYNADDR_PARITY);
dat_w0_write(dat_idx, dat_w0);
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index e8e56a8d2057..491dfe70b660 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -758,9 +758,26 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
complete(&rh->op_done);
if (status & INTR_TRANSFER_ABORT) {
+ u32 ring_status;
+
dev_notice_ratelimited(&hci->master.dev,
"ring %d: Transfer Aborted\n", i);
mipi_i3c_hci_resume(hci);
+ ring_status = rh_reg_read(RING_STATUS);
+ if (!(ring_status & RING_STATUS_RUNNING) &&
+ status & INTR_TRANSFER_COMPLETION &&
+ status & INTR_TRANSFER_ERR) {
+ /*
+ * Ring stop followed by run is an Intel
+ * specific required quirk after resuming the
+ * halted controller. Do it only when the ring
+ * is not in running state after a transfer
+ * error.
+ */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
+ RING_CTRL_RUN_STOP);
+ }
}
if (status & INTR_WARN_INS_STOP_MODE)
dev_warn_ratelimited(&hci->master.dev,
diff --git a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
new file mode 100644
index 000000000000..c6c3a3ec11ea
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI glue code for MIPI I3C HCI driver
+ *
+ * Copyright (C) 2024 Intel Corporation
+ *
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ */
+#include <linux/acpi.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+struct mipi_i3c_hci_pci_info {
+ int (*init)(struct pci_dev *pci);
+};
+
+#define INTEL_PRIV_OFFSET 0x2b0
+#define INTEL_PRIV_SIZE 0x28
+#define INTEL_PRIV_RESETS 0x04
+#define INTEL_PRIV_RESETS_RESET BIT(0)
+#define INTEL_PRIV_RESETS_RESET_DONE BIT(1)
+
+static DEFINE_IDA(mipi_i3c_hci_pci_ida);
+
+static int mipi_i3c_hci_pci_intel_init(struct pci_dev *pci)
+{
+ unsigned long timeout;
+ void __iomem *priv;
+
+ priv = devm_ioremap(&pci->dev,
+ pci_resource_start(pci, 0) + INTEL_PRIV_OFFSET,
+ INTEL_PRIV_SIZE);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Assert reset, wait for completion and release reset */
+ writel(0, priv + INTEL_PRIV_RESETS);
+ timeout = jiffies + msecs_to_jiffies(10);
+ while (!(readl(priv + INTEL_PRIV_RESETS) &
+ INTEL_PRIV_RESETS_RESET_DONE)) {
+ if (time_after(jiffies, timeout))
+ break;
+ cpu_relax();
+ }
+ writel(INTEL_PRIV_RESETS_RESET, priv + INTEL_PRIV_RESETS);
+
+ return 0;
+}
+
+static struct mipi_i3c_hci_pci_info intel_info = {
+ .init = mipi_i3c_hci_pci_intel_init,
+};
+
+static int mipi_i3c_hci_pci_probe(struct pci_dev *pci,
+ const struct pci_device_id *id)
+{
+ struct mipi_i3c_hci_pci_info *info;
+ struct platform_device *pdev;
+ struct resource res[2];
+ int dev_id, ret;
+
+ ret = pcim_enable_device(pci);
+ if (ret)
+ return ret;
+
+ pci_set_master(pci);
+
+ memset(&res, 0, sizeof(res));
+
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = pci->irq;
+ res[1].end = pci->irq;
+
+ dev_id = ida_alloc(&mipi_i3c_hci_pci_ida, GFP_KERNEL);
+ if (dev_id < 0)
+ return dev_id;
+
+ pdev = platform_device_alloc("mipi-i3c-hci", dev_id);
+ if (!pdev)
+ return -ENOMEM;
+
+ pdev->dev.parent = &pci->dev;
+ device_set_node(&pdev->dev, dev_fwnode(&pci->dev));
+
+ ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+ if (ret)
+ goto err;
+
+ info = (struct mipi_i3c_hci_pci_info *)id->driver_data;
+ if (info && info->init) {
+ ret = info->init(pci);
+ if (ret)
+ goto err;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err;
+
+ pci_set_drvdata(pci, pdev);
+
+ return 0;
+
+err:
+ platform_device_put(pdev);
+ ida_free(&mipi_i3c_hci_pci_ida, dev_id);
+ return ret;
+}
+
+static void mipi_i3c_hci_pci_remove(struct pci_dev *pci)
+{
+ struct platform_device *pdev = pci_get_drvdata(pci);
+ int dev_id = pdev->id;
+
+ platform_device_unregister(pdev);
+ ida_free(&mipi_i3c_hci_pci_ida, dev_id);
+}
+
+static const struct pci_device_id mipi_i3c_hci_pci_devices[] = {
+ /* Panther Lake-H */
+ { PCI_VDEVICE(INTEL, 0xe37c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0xe36f), (kernel_ulong_t)&intel_info},
+ /* Panther Lake-P */
+ { PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info},
+ { },
+};
+MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
+
+static struct pci_driver mipi_i3c_hci_pci_driver = {
+ .name = "mipi_i3c_hci_pci",
+ .id_table = mipi_i3c_hci_pci_devices,
+ .probe = mipi_i3c_hci_pci_probe,
+ .remove = mipi_i3c_hci_pci_remove,
+};
+
+module_pci_driver(mipi_i3c_hci_pci_driver);
+
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MIPI I3C HCI driver on PCI bus");
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 0a3c37510079..a34af1ba09bd 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,3 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
+# Branch profiling isn't noinstr-safe
+ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
+obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index ac4d8faa3886..5687089e406a 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -51,11 +51,14 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
+#include <asm/cpuid.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
+#include <asm/tsc.h>
#include <asm/fpu/api.h>
+#include <asm/smp.h>
#define INTEL_IDLE_VERSION "0.5.1"
@@ -227,6 +230,15 @@ static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
return 0;
}
+static void intel_idle_enter_dead(struct cpuidle_device *dev, int index)
+{
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+
+ mwait_play_dead(eax);
+}
+
/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
@@ -1651,6 +1663,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &idle_cpu_srf),
{}
};
@@ -1797,7 +1810,11 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
if (intel_idle_state_needs_timer_stop(state))
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle");
+
state->enter = intel_idle;
+ state->enter_dead = intel_idle_enter_dead;
state->enter_s2idle = intel_idle_s2idle;
}
}
@@ -2147,6 +2164,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
!cpuidle_state_table[cstate].enter_s2idle)
break;
+ if (!cpuidle_state_table[cstate].enter_dead)
+ cpuidle_state_table[cstate].enter_dead = intel_idle_enter_dead;
+
/* If marked as unusable, skip this state. */
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
@@ -2316,10 +2336,7 @@ static int __init intel_idle_init(void)
return -ENODEV;
}
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return -ENODEV;
-
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+ cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &mwait_substates);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
diff --git a/drivers/iio/accel/adxl345.h b/drivers/iio/accel/adxl345.h
index 3d5c8719db3d..517e494ba555 100644
--- a/drivers/iio/accel/adxl345.h
+++ b/drivers/iio/accel/adxl345.h
@@ -9,37 +9,93 @@
#define _ADXL345_H_
#define ADXL345_REG_DEVID 0x00
+#define ADXL345_REG_THRESH_TAP 0x1D
#define ADXL345_REG_OFSX 0x1E
#define ADXL345_REG_OFSY 0x1F
#define ADXL345_REG_OFSZ 0x20
#define ADXL345_REG_OFS_AXIS(index) (ADXL345_REG_OFSX + (index))
+
+/* Tap duration */
+#define ADXL345_REG_DUR 0x21
+/* Tap latency */
+#define ADXL345_REG_LATENT 0x22
+/* Tap window */
+#define ADXL345_REG_WINDOW 0x23
+/* Activity threshold */
+#define ADXL345_REG_THRESH_ACT 0x24
+/* Inactivity threshold */
+#define ADXL345_REG_THRESH_INACT 0x25
+/* Inactivity time */
+#define ADXL345_REG_TIME_INACT 0x26
+/* Axis enable control for activity and inactivity detection */
+#define ADXL345_REG_ACT_INACT_CTRL 0x27
+/* Free-fall threshold */
+#define ADXL345_REG_THRESH_FF 0x28
+/* Free-fall time */
+#define ADXL345_REG_TIME_FF 0x29
+/* Axis control for single tap or double tap */
+#define ADXL345_REG_TAP_AXIS 0x2A
+/* Source of single tap or double tap */
+#define ADXL345_REG_ACT_TAP_STATUS 0x2B
+/* Data rate and power mode control */
#define ADXL345_REG_BW_RATE 0x2C
#define ADXL345_REG_POWER_CTL 0x2D
+#define ADXL345_REG_INT_ENABLE 0x2E
+#define ADXL345_REG_INT_MAP 0x2F
+#define ADXL345_REG_INT_SOURCE 0x30
+#define ADXL345_REG_INT_SOURCE_MSK 0xFF
#define ADXL345_REG_DATA_FORMAT 0x31
-#define ADXL345_REG_DATAX0 0x32
-#define ADXL345_REG_DATAY0 0x34
-#define ADXL345_REG_DATAZ0 0x36
-#define ADXL345_REG_DATA_AXIS(index) \
- (ADXL345_REG_DATAX0 + (index) * sizeof(__le16))
+#define ADXL345_REG_XYZ_BASE 0x32
+#define ADXL345_REG_DATA_AXIS(index) \
+ (ADXL345_REG_XYZ_BASE + (index) * sizeof(__le16))
+
+#define ADXL345_REG_FIFO_CTL 0x38
+#define ADXL345_FIFO_CTL_SAMPLES_MSK GENMASK(4, 0)
+/* 0: INT1, 1: INT2 */
+#define ADXL345_FIFO_CTL_TRIGGER_MSK BIT(5)
+#define ADXL345_FIFO_CTL_MODE_MSK GENMASK(7, 6)
+#define ADXL345_REG_FIFO_STATUS 0x39
+#define ADXL345_REG_FIFO_STATUS_MSK 0x3F
+#define ADXL345_INT_OVERRUN BIT(0)
+#define ADXL345_INT_WATERMARK BIT(1)
+#define ADXL345_INT_FREE_FALL BIT(2)
+#define ADXL345_INT_INACTIVITY BIT(3)
+#define ADXL345_INT_ACTIVITY BIT(4)
+#define ADXL345_INT_DOUBLE_TAP BIT(5)
+#define ADXL345_INT_SINGLE_TAP BIT(6)
+#define ADXL345_INT_DATA_READY BIT(7)
+
+/*
+ * BW_RATE bits - Bandwidth and output data rate. The default value is
+ * 0x0A, which translates to a 100 Hz output data rate
+ */
#define ADXL345_BW_RATE GENMASK(3, 0)
+#define ADXL345_BW_LOW_POWER BIT(4)
#define ADXL345_BASE_RATE_NANO_HZ 97656250LL
-#define ADXL345_POWER_CTL_MEASURE BIT(3)
#define ADXL345_POWER_CTL_STANDBY 0x00
+#define ADXL345_POWER_CTL_WAKEUP GENMASK(1, 0)
+#define ADXL345_POWER_CTL_SLEEP BIT(2)
+#define ADXL345_POWER_CTL_MEASURE BIT(3)
+#define ADXL345_POWER_CTL_AUTO_SLEEP BIT(4)
+#define ADXL345_POWER_CTL_LINK BIT(5)
-#define ADXL345_DATA_FORMAT_RANGE GENMASK(1, 0) /* Set the g range */
-#define ADXL345_DATA_FORMAT_JUSTIFY BIT(2) /* Left-justified (MSB) mode */
-#define ADXL345_DATA_FORMAT_FULL_RES BIT(3) /* Up to 13-bits resolution */
-#define ADXL345_DATA_FORMAT_SPI_3WIRE BIT(6) /* 3-wire SPI mode */
-#define ADXL345_DATA_FORMAT_SELF_TEST BIT(7) /* Enable a self test */
-
+/* Set the g range */
+#define ADXL345_DATA_FORMAT_RANGE GENMASK(1, 0)
+/* Data is left justified */
+#define ADXL345_DATA_FORMAT_JUSTIFY BIT(2)
+/* Up to 13-bits resolution */
+#define ADXL345_DATA_FORMAT_FULL_RES BIT(3)
+#define ADXL345_DATA_FORMAT_SPI_3WIRE BIT(6)
+#define ADXL345_DATA_FORMAT_SELF_TEST BIT(7)
#define ADXL345_DATA_FORMAT_2G 0
#define ADXL345_DATA_FORMAT_4G 1
#define ADXL345_DATA_FORMAT_8G 2
#define ADXL345_DATA_FORMAT_16G 3
#define ADXL345_DEVID 0xE5
+#define ADXL345_FIFO_SIZE 32
/*
* In full-resolution mode, scale factor is maintained at ~4 mg/LSB
@@ -62,6 +118,7 @@ struct adxl345_chip_info {
};
int adxl345_core_probe(struct device *dev, struct regmap *regmap,
+ bool fifo_delay_default,
int (*setup)(struct device*, struct regmap*));
#endif /* _ADXL345_H_ */
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index b1efab0f6404..d1b2d3985a40 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -7,6 +7,8 @@
* Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADXL345.pdf
*/
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -14,36 +16,92 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
#include "adxl345.h"
-struct adxl345_data {
+#define ADXL345_FIFO_BYPASS 0
+#define ADXL345_FIFO_FIFO 1
+#define ADXL345_FIFO_STREAM 2
+
+#define ADXL345_DIRS 3
+
+#define ADXL345_INT_NONE 0xff
+#define ADXL345_INT1 0
+#define ADXL345_INT2 1
+
+struct adxl345_state {
const struct adxl345_chip_info *info;
struct regmap *regmap;
+ bool fifo_delay; /* delay: delay is needed for SPI */
+ int irq;
+ u8 intio;
+ u8 int_map;
+ u8 watermark;
+ u8 fifo_mode;
+ __le16 fifo_buf[ADXL345_DIRS * ADXL345_FIFO_SIZE + 1] __aligned(IIO_DMA_MINALIGN);
};
-#define ADXL345_CHANNEL(index, axis) { \
+#define ADXL345_CHANNEL(index, reg, axis) { \
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .address = index, \
+ .address = (reg), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (index), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 13, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
}
+enum adxl345_chans {
+ chan_x, chan_y, chan_z,
+};
+
static const struct iio_chan_spec adxl345_channels[] = {
- ADXL345_CHANNEL(0, X),
- ADXL345_CHANNEL(1, Y),
- ADXL345_CHANNEL(2, Z),
+ ADXL345_CHANNEL(0, chan_x, X),
+ ADXL345_CHANNEL(1, chan_y, Y),
+ ADXL345_CHANNEL(2, chan_z, Z),
};
+static const unsigned long adxl345_scan_masks[] = {
+ BIT(chan_x) | BIT(chan_y) | BIT(chan_z),
+ 0
+};
+
+static int adxl345_set_interrupts(struct adxl345_state *st)
+{
+ int ret;
+ unsigned int int_enable = st->int_map;
+ unsigned int int_map;
+
+ /*
+ * Any bits set to 0 in the INT map register send their respective
+ * interrupts to the INT1 pin, whereas bits set to 1 send their respective
+ * interrupts to the INT2 pin. The intio shall convert this accordingly.
+ */
+ int_map = FIELD_GET(ADXL345_REG_INT_SOURCE_MSK,
+ st->intio ? st->int_map : ~st->int_map);
+
+ ret = regmap_write(st->regmap, ADXL345_REG_INT_MAP, int_map);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, ADXL345_REG_INT_ENABLE, int_enable);
+}
+
static int adxl345_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- struct adxl345_data *data = iio_priv(indio_dev);
+ struct adxl345_state *st = iio_priv(indio_dev);
__le16 accel;
long long samp_freq_nhz;
unsigned int regval;
@@ -56,7 +114,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
* ADXL345_REG_DATA(X0/Y0/Z0) contain the least significant byte
* and ADXL345_REG_DATA(X0/Y0/Z0) + 1 the most significant byte
*/
- ret = regmap_bulk_read(data->regmap,
+ ret = regmap_bulk_read(st->regmap,
ADXL345_REG_DATA_AXIS(chan->address),
&accel, sizeof(accel));
if (ret < 0)
@@ -66,10 +124,10 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- *val2 = data->info->uscale;
+ *val2 = st->info->uscale;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_CALIBBIAS:
- ret = regmap_read(data->regmap,
+ ret = regmap_read(st->regmap,
ADXL345_REG_OFS_AXIS(chan->address), &regval);
if (ret < 0)
return ret;
@@ -81,7 +139,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = regmap_read(data->regmap, ADXL345_REG_BW_RATE, &regval);
+ ret = regmap_read(st->regmap, ADXL345_REG_BW_RATE, &regval);
if (ret < 0)
return ret;
@@ -99,7 +157,7 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- struct adxl345_data *data = iio_priv(indio_dev);
+ struct adxl345_state *st = iio_priv(indio_dev);
s64 n;
switch (mask) {
@@ -108,14 +166,14 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
* 8-bit resolution at +/- 2g, that is 4x accel data scale
* factor
*/
- return regmap_write(data->regmap,
+ return regmap_write(st->regmap,
ADXL345_REG_OFS_AXIS(chan->address),
val / 4);
case IIO_CHAN_INFO_SAMP_FREQ:
n = div_s64(val * NANOHZ_PER_HZ + val2,
ADXL345_BASE_RATE_NANO_HZ);
- return regmap_update_bits(data->regmap, ADXL345_REG_BW_RATE,
+ return regmap_update_bits(st->regmap, ADXL345_REG_BW_RATE,
ADXL345_BW_RATE,
clamp_val(ilog2(n), 0,
ADXL345_BW_RATE));
@@ -124,6 +182,24 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int adxl345_set_watermark(struct iio_dev *indio_dev, unsigned int value)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+ unsigned int fifo_mask = 0x1F;
+ int ret;
+
+ value = min(value, ADXL345_FIFO_SIZE - 1);
+
+ ret = regmap_update_bits(st->regmap, ADXL345_REG_FIFO_CTL, fifo_mask, value);
+ if (ret)
+ return ret;
+
+ st->watermark = value;
+ st->int_map |= ADXL345_INT_WATERMARK;
+
+ return 0;
+}
+
static int adxl345_write_raw_get_fmt(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask)
@@ -138,6 +214,33 @@ static int adxl345_write_raw_get_fmt(struct iio_dev *indio_dev,
}
}
+/**
+ * adxl345_set_measure_en() - Enable and disable measuring.
+ *
+ * @st: The device data.
+ * @en: Enable measurements, else standby mode.
+ *
+ * For lowest power operation, standby mode can be used. In standby mode,
+ * current consumption is supposed to be reduced to 0.1uA (typical). In this
+ * mode no measurements are made. Placing the device into standby mode
+ * preserves the contents of FIFO.
+ *
+ * Return: Returns 0 if successful, or a negative error value.
+ */
+static int adxl345_set_measure_en(struct adxl345_state *st, bool en)
+{
+ unsigned int val = en ? ADXL345_POWER_CTL_MEASURE : ADXL345_POWER_CTL_STANDBY;
+
+ return regmap_write(st->regmap, ADXL345_REG_POWER_CTL, val);
+}
+
+static void adxl345_powerdown(void *ptr)
+{
+ struct adxl345_state *st = ptr;
+
+ adxl345_set_measure_en(st, false);
+}
+
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"0.09765625 0.1953125 0.390625 0.78125 1.5625 3.125 6.25 12.5 25 50 100 200 400 800 1600 3200"
);
@@ -151,37 +254,244 @@ static const struct attribute_group adxl345_attrs_group = {
.attrs = adxl345_attrs,
};
-static const struct iio_info adxl345_info = {
- .attrs = &adxl345_attrs_group,
- .read_raw = adxl345_read_raw,
- .write_raw = adxl345_write_raw,
- .write_raw_get_fmt = adxl345_write_raw_get_fmt,
+static int adxl345_set_fifo(struct adxl345_state *st)
+{
+ int ret;
+
+ /* FIFO should only be configured while in standby mode */
+ ret = adxl345_set_measure_en(st, false);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL345_REG_FIFO_CTL,
+ FIELD_PREP(ADXL345_FIFO_CTL_SAMPLES_MSK,
+ st->watermark) |
+ FIELD_PREP(ADXL345_FIFO_CTL_TRIGGER_MSK,
+ st->intio) |
+ FIELD_PREP(ADXL345_FIFO_CTL_MODE_MSK,
+ st->fifo_mode));
+ if (ret < 0)
+ return ret;
+
+ return adxl345_set_measure_en(st, true);
+}
+
+/**
+ * adxl345_get_samples() - Read number of FIFO entries.
+ * @st: The initialized state instance of this driver.
+ *
+ * The sensor does not support treating any axis individually, or exclude them
+ * from measuring.
+ *
+ * Return: negative error, or value.
+ */
+static int adxl345_get_samples(struct adxl345_state *st)
+{
+ unsigned int regval = 0;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_FIFO_STATUS, &regval);
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(ADXL345_REG_FIFO_STATUS_MSK, regval);
+}
+
+/**
+ * adxl345_fifo_transfer() - Read samples number of elements.
+ * @st: The instance of the state object of this sensor.
+ * @samples: The number of lines in the FIFO referred to as fifo_entry.
+ *
+ * It is recommended that a multiple-byte read of all registers be performed to
+ * prevent a change in data between reads of sequential registers. That is to
+ * read out the data registers X0, X1, Y0, Y1, Z0, Z1, i.e. 6 bytes at once.
+ *
+ * Return: 0 or error value.
+ */
+static int adxl345_fifo_transfer(struct adxl345_state *st, int samples)
+{
+ size_t count;
+ int i, ret = 0;
+
+ /* count is the 3x the fifo_buf element size, hence 6B */
+ count = sizeof(st->fifo_buf[0]) * ADXL345_DIRS;
+ for (i = 0; i < samples; i++) {
+ /* read 3x 2 byte elements from base address into next fifo_buf position */
+ ret = regmap_bulk_read(st->regmap, ADXL345_REG_XYZ_BASE,
+ st->fifo_buf + (i * count / 2), count);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * To ensure that the FIFO has completely popped, there must be at least 5
+ * us between the end of reading the data registers, signified by the
+ * transition to register 0x38 from 0x37 or the CS pin going high, and the
+ * start of new reads of the FIFO or reading the FIFO_STATUS register. For
+ * SPI operation at 1.5 MHz or lower, the register addressing portion of the
+ * transmission is sufficient delay to ensure the FIFO has completely
+ * popped. It is necessary for SPI operation greater than 1.5 MHz to
+ * de-assert the CS pin to ensure a total of 5 us, which is at most 3.4 us
+ * at 5 MHz operation.
+ */
+ if (st->fifo_delay && samples > 1)
+ udelay(3);
+ }
+ return ret;
+}
+
+/**
+ * adxl345_fifo_reset() - Empty the FIFO in error condition.
+ * @st: The instance to the state object of the sensor.
+ *
+ * Read all elements of the FIFO. Reading the interrupt source register
+ * resets the sensor.
+ */
+static void adxl345_fifo_reset(struct adxl345_state *st)
+{
+ int regval;
+ int samples;
+
+ adxl345_set_measure_en(st, false);
+
+ samples = adxl345_get_samples(st);
+ if (samples > 0)
+ adxl345_fifo_transfer(st, samples);
+
+ regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &regval);
+
+ adxl345_set_measure_en(st, true);
+}
+
+static int adxl345_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = adxl345_set_interrupts(st);
+ if (ret < 0)
+ return ret;
+
+ st->fifo_mode = ADXL345_FIFO_STREAM;
+ return adxl345_set_fifo(st);
+}
+
+static int adxl345_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int ret;
+
+ st->fifo_mode = ADXL345_FIFO_BYPASS;
+ ret = adxl345_set_fifo(st);
+ if (ret < 0)
+ return ret;
+
+ st->int_map = 0x00;
+ return adxl345_set_interrupts(st);
+}
+
+static const struct iio_buffer_setup_ops adxl345_buffer_ops = {
+ .postenable = adxl345_buffer_postenable,
+ .predisable = adxl345_buffer_predisable,
};
-static int adxl345_powerup(void *regmap)
+static int adxl345_get_status(struct adxl345_state *st)
{
- return regmap_write(regmap, ADXL345_REG_POWER_CTL, ADXL345_POWER_CTL_MEASURE);
+ int ret;
+ unsigned int regval;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &regval);
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(ADXL345_REG_INT_SOURCE_MSK, regval);
}
-static void adxl345_powerdown(void *regmap)
+static int adxl345_fifo_push(struct iio_dev *indio_dev,
+ int samples)
{
- regmap_write(regmap, ADXL345_REG_POWER_CTL, ADXL345_POWER_CTL_STANDBY);
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int i, ret;
+
+ if (samples <= 0)
+ return -EINVAL;
+
+ ret = adxl345_fifo_transfer(st, samples);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ADXL345_DIRS * samples; i += ADXL345_DIRS)
+ iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
+
+ return 0;
+}
+
+/**
+ * adxl345_irq_handler() - Handle irqs of the ADXL345.
+ * @irq: The irq being handled.
+ * @p: The struct iio_device pointer for the device.
+ *
+ * Return: The interrupt was handled.
+ */
+static irqreturn_t adxl345_irq_handler(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int int_stat;
+ int samples;
+
+ int_stat = adxl345_get_status(st);
+ if (int_stat <= 0)
+ return IRQ_NONE;
+
+ if (int_stat & ADXL345_INT_OVERRUN)
+ goto err;
+
+ if (int_stat & ADXL345_INT_WATERMARK) {
+ samples = adxl345_get_samples(st);
+ if (samples < 0)
+ goto err;
+
+ if (adxl345_fifo_push(indio_dev, samples) < 0)
+ goto err;
+ }
+ return IRQ_HANDLED;
+
+err:
+ adxl345_fifo_reset(st);
+
+ return IRQ_HANDLED;
}
+static const struct iio_info adxl345_info = {
+ .attrs = &adxl345_attrs_group,
+ .read_raw = adxl345_read_raw,
+ .write_raw = adxl345_write_raw,
+ .write_raw_get_fmt = adxl345_write_raw_get_fmt,
+ .hwfifo_set_watermark = adxl345_set_watermark,
+};
+
/**
- * adxl345_core_probe() - probe and setup for the adxl345 accelerometer,
- * also covers the adlx375 accelerometer
+ * adxl345_core_probe() - Probe and setup for the accelerometer.
* @dev: Driver model representation of the device
* @regmap: Regmap instance for the device
+ * @fifo_delay_default: Using FIFO with SPI needs delay
* @setup: Setup routine to be executed right before the standard device
* setup
*
+ * For SPI operation greater than 1.6 MHz, it is necessary to deassert the CS
+ * pin to ensure a total delay of 5 us; otherwise, the delay is not sufficient.
+ * The total delay necessary for 5 MHz operation is at most 3.4 us. This is not
+ * a concern when using I2C mode because the communication rate is low enough
+ * to ensure a sufficient delay between FIFO reads.
+ * Ref: "Retrieving Data from FIFO", p. 21 of 36, Data Sheet ADXL345 Rev. G
+ *
* Return: 0 on success, negative errno on error
*/
int adxl345_core_probe(struct device *dev, struct regmap *regmap,
+ bool fifo_delay_default,
int (*setup)(struct device*, struct regmap*))
{
- struct adxl345_data *data;
+ struct adxl345_state *st;
struct iio_dev *indio_dev;
u32 regval;
unsigned int data_format_mask = (ADXL345_DATA_FORMAT_RANGE |
@@ -190,30 +500,32 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
ADXL345_DATA_FORMAT_SELF_TEST);
int ret;
- indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
- data = iio_priv(indio_dev);
- data->regmap = regmap;
- data->info = device_get_match_data(dev);
- if (!data->info)
+ st = iio_priv(indio_dev);
+ st->regmap = regmap;
+ st->info = device_get_match_data(dev);
+ if (!st->info)
return -ENODEV;
+ st->fifo_delay = fifo_delay_default;
- indio_dev->name = data->info->name;
+ indio_dev->name = st->info->name;
indio_dev->info = &adxl345_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = adxl345_channels;
indio_dev->num_channels = ARRAY_SIZE(adxl345_channels);
+ indio_dev->available_scan_masks = adxl345_scan_masks;
if (setup) {
/* Perform optional initial bus specific configuration */
- ret = setup(dev, data->regmap);
+ ret = setup(dev, st->regmap);
if (ret)
return ret;
/* Enable full-resolution mode */
- ret = regmap_update_bits(data->regmap, ADXL345_REG_DATA_FORMAT,
+ ret = regmap_update_bits(st->regmap, ADXL345_REG_DATA_FORMAT,
data_format_mask,
ADXL345_DATA_FORMAT_FULL_RES);
if (ret)
@@ -222,14 +534,14 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
} else {
/* Enable full-resolution mode (init all data_format bits) */
- ret = regmap_write(data->regmap, ADXL345_REG_DATA_FORMAT,
+ ret = regmap_write(st->regmap, ADXL345_REG_DATA_FORMAT,
ADXL345_DATA_FORMAT_FULL_RES);
if (ret)
return dev_err_probe(dev, ret,
"Failed to set data range\n");
}
- ret = regmap_read(data->regmap, ADXL345_REG_DEVID, &regval);
+ ret = regmap_read(st->regmap, ADXL345_REG_DEVID, &regval);
if (ret < 0)
return dev_err_probe(dev, ret, "Error reading device ID\n");
@@ -238,14 +550,43 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
regval, ADXL345_DEVID);
/* Enable measurement mode */
- ret = adxl345_powerup(data->regmap);
+ ret = adxl345_set_measure_en(st, true);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to enable measurement mode\n");
- ret = devm_add_action_or_reset(dev, adxl345_powerdown, data->regmap);
+ ret = devm_add_action_or_reset(dev, adxl345_powerdown, st);
if (ret < 0)
return ret;
+ st->intio = ADXL345_INT1;
+ st->irq = fwnode_irq_get_byname(dev_fwnode(dev), "INT1");
+ if (st->irq < 0) {
+ st->intio = ADXL345_INT2;
+ st->irq = fwnode_irq_get_byname(dev_fwnode(dev), "INT2");
+ if (st->irq < 0)
+ st->intio = ADXL345_INT_NONE;
+ }
+
+ if (st->intio != ADXL345_INT_NONE) {
+ /* FIFO_STREAM mode is going to be activated later */
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, &adxl345_buffer_ops);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, st->irq, NULL,
+ &adxl345_irq_handler,
+ IRQF_SHARED | IRQF_ONESHOT,
+ indio_dev->name, indio_dev);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_write(st->regmap, ADXL345_REG_FIFO_CTL,
+ FIELD_PREP(ADXL345_FIFO_CTL_MODE_MSK,
+ ADXL345_FIFO_BYPASS));
+ if (ret < 0)
+ return ret;
+ }
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(adxl345_core_probe, "IIO_ADXL345");
diff --git a/drivers/iio/accel/adxl345_i2c.c b/drivers/iio/accel/adxl345_i2c.c
index cb23fb11fcd7..8c385dd6c01d 100644
--- a/drivers/iio/accel/adxl345_i2c.c
+++ b/drivers/iio/accel/adxl345_i2c.c
@@ -27,7 +27,7 @@ static int adxl345_i2c_probe(struct i2c_client *client)
if (IS_ERR(regmap))
return dev_err_probe(&client->dev, PTR_ERR(regmap), "Error initializing regmap\n");
- return adxl345_core_probe(&client->dev, regmap, NULL);
+ return adxl345_core_probe(&client->dev, regmap, false, NULL);
}
static const struct adxl345_chip_info adxl345_i2c_info = {
diff --git a/drivers/iio/accel/adxl345_spi.c b/drivers/iio/accel/adxl345_spi.c
index 968e7b390d4b..7e518aea17bf 100644
--- a/drivers/iio/accel/adxl345_spi.c
+++ b/drivers/iio/accel/adxl345_spi.c
@@ -12,6 +12,7 @@
#include "adxl345.h"
#define ADXL345_MAX_SPI_FREQ_HZ 5000000
+#define ADXL345_MAX_FREQ_NO_FIFO_DELAY 1500000
static const struct regmap_config adxl345_spi_regmap_config = {
.reg_bits = 8,
@@ -28,6 +29,7 @@ static int adxl345_spi_setup(struct device *dev, struct regmap *regmap)
static int adxl345_spi_probe(struct spi_device *spi)
{
struct regmap *regmap;
+ bool needs_delay;
/* Bail out if max_speed_hz exceeds 5 MHz */
if (spi->max_speed_hz > ADXL345_MAX_SPI_FREQ_HZ)
@@ -38,10 +40,11 @@ static int adxl345_spi_probe(struct spi_device *spi)
if (IS_ERR(regmap))
return dev_err_probe(&spi->dev, PTR_ERR(regmap), "Error initializing regmap\n");
+ needs_delay = spi->max_speed_hz > ADXL345_MAX_FREQ_NO_FIFO_DELAY;
if (spi->mode & SPI_3WIRE)
- return adxl345_core_probe(&spi->dev, regmap, adxl345_spi_setup);
+ return adxl345_core_probe(&spi->dev, regmap, needs_delay, adxl345_spi_setup);
else
- return adxl345_core_probe(&spi->dev, regmap, NULL);
+ return adxl345_core_probe(&spi->dev, regmap, needs_delay, NULL);
}
static const struct adxl345_chip_info adxl345_spi_info = {
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 009e6243c6cb..96ba028157ee 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -66,7 +66,7 @@ struct bma220_data {
struct {
s8 chans[3];
/* Ensure timestamp is naturally aligned. */
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u8 tx_buf[2] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 65aac60f1245..987212a7c038 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -129,6 +129,8 @@
#define FXLS8962AF_DEVICE_ID 0x62
#define FXLS8964AF_DEVICE_ID 0x84
+#define FXLS8974CF_DEVICE_ID 0x86
+#define FXLS8967AF_DEVICE_ID 0x87
/* Raw temp channel offset */
#define FXLS8962AF_TEMP_CENTER_VAL 25
@@ -766,6 +768,18 @@ static const struct fxls8962af_chip_info fxls_chip_info_table[] = {
.channels = fxls8962af_channels,
.num_channels = ARRAY_SIZE(fxls8962af_channels),
},
+ [fxls8967af] = {
+ .chip_id = FXLS8967AF_DEVICE_ID,
+ .name = "fxls8967af",
+ .channels = fxls8962af_channels,
+ .num_channels = ARRAY_SIZE(fxls8962af_channels),
+ },
+ [fxls8974cf] = {
+ .chip_id = FXLS8974CF_DEVICE_ID,
+ .name = "fxls8974cf",
+ .channels = fxls8962af_channels,
+ .num_channels = ARRAY_SIZE(fxls8962af_channels),
+ },
};
static const struct iio_info fxls8962af_info = {
diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
index 2e1bb43ef2a1..1b9156b6b2e3 100644
--- a/drivers/iio/accel/fxls8962af-i2c.c
+++ b/drivers/iio/accel/fxls8962af-i2c.c
@@ -30,6 +30,8 @@ static int fxls8962af_probe(struct i2c_client *client)
static const struct i2c_device_id fxls8962af_id[] = {
{ "fxls8962af", fxls8962af },
{ "fxls8964af", fxls8964af },
+ { "fxls8967af", fxls8967af },
+ { "fxls8974cf", fxls8974cf },
{}
};
MODULE_DEVICE_TABLE(i2c, fxls8962af_id);
diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h
index 6eaa2803b26f..1c9adfc8c0dc 100644
--- a/drivers/iio/accel/fxls8962af.h
+++ b/drivers/iio/accel/fxls8962af.h
@@ -11,6 +11,8 @@ struct device;
enum {
fxls8962af,
fxls8964af,
+ fxls8967af,
+ fxls8974cf,
};
int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
diff --git a/drivers/iio/accel/kionix-kx022a-i2c.c b/drivers/iio/accel/kionix-kx022a-i2c.c
index b39a43ecadff..42388636ca31 100644
--- a/drivers/iio/accel/kionix-kx022a-i2c.c
+++ b/drivers/iio/accel/kionix-kx022a-i2c.c
@@ -38,7 +38,9 @@ static int kx022a_i2c_probe(struct i2c_client *i2c)
static const struct i2c_device_id kx022a_i2c_id[] = {
{ .name = "kx022a", .driver_data = (kernel_ulong_t)&kx022a_chip_info },
{ .name = "kx132-1211", .driver_data = (kernel_ulong_t)&kx132_chip_info },
+ { .name = "kx134-1211", .driver_data = (kernel_ulong_t)&kx134_chip_info },
{ .name = "kx132acr-lbz", .driver_data = (kernel_ulong_t)&kx132acr_chip_info },
+ { .name = "kx134acr-lbz", .driver_data = (kernel_ulong_t)&kx134acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, kx022a_i2c_id);
@@ -46,7 +48,9 @@ MODULE_DEVICE_TABLE(i2c, kx022a_i2c_id);
static const struct of_device_id kx022a_of_match[] = {
{ .compatible = "kionix,kx022a", .data = &kx022a_chip_info },
{ .compatible = "kionix,kx132-1211", .data = &kx132_chip_info },
+ { .compatible = "kionix,kx134-1211", .data = &kx134_chip_info },
{ .compatible = "rohm,kx132acr-lbz", .data = &kx132acr_chip_info },
+ { .compatible = "rohm,kx134acr-lbz", .data = &kx134acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, kx022a_of_match);
diff --git a/drivers/iio/accel/kionix-kx022a-spi.c b/drivers/iio/accel/kionix-kx022a-spi.c
index c38a47806a00..e30d21083dc8 100644
--- a/drivers/iio/accel/kionix-kx022a-spi.c
+++ b/drivers/iio/accel/kionix-kx022a-spi.c
@@ -38,7 +38,9 @@ static int kx022a_spi_probe(struct spi_device *spi)
static const struct spi_device_id kx022a_id[] = {
{ .name = "kx022a", .driver_data = (kernel_ulong_t)&kx022a_chip_info },
{ .name = "kx132-1211", .driver_data = (kernel_ulong_t)&kx132_chip_info },
+ { .name = "kx134-1211", .driver_data = (kernel_ulong_t)&kx134_chip_info },
{ .name = "kx132acr-lbz", .driver_data = (kernel_ulong_t)&kx132acr_chip_info },
+ { .name = "kx134acr-lbz", .driver_data = (kernel_ulong_t)&kx134acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, kx022a_id);
@@ -46,7 +48,9 @@ MODULE_DEVICE_TABLE(spi, kx022a_id);
static const struct of_device_id kx022a_of_match[] = {
{ .compatible = "kionix,kx022a", .data = &kx022a_chip_info },
{ .compatible = "kionix,kx132-1211", .data = &kx132_chip_info },
+ { .compatible = "kionix,kx134-1211", .data = &kx134_chip_info },
{ .compatible = "rohm,kx132acr-lbz", .data = &kx132acr_chip_info },
+ { .compatible = "rohm,kx134acr-lbz", .data = &kx134acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, kx022a_of_match);
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index 670bac21965b..5aeb3b951ac5 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -5,6 +5,7 @@
* ROHM/KIONIX accelerometer driver
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
@@ -407,11 +408,21 @@ static const int kx022a_scale_table[][2] = {
{ 0, 4788403 },
};
+/* KX134ACR-LBZ ranges are (+/-) 8, 16, 32, 64 G */
+static const int kx134acr_lbz_scale_table[][2] = {
+ { 0, 2394202 },
+ { 0, 4788403 },
+ { 0, 9576807 },
+ { 0, 19153613 },
+};
+
static int kx022a_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type, int *length,
long mask)
{
+ struct kx022a_data *data = iio_priv(indio_dev);
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
*vals = (const int *)kx022a_accel_samp_freq_table;
@@ -420,9 +431,8 @@ static int kx022a_read_avail(struct iio_dev *indio_dev,
*type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_SCALE:
- *vals = (const int *)kx022a_scale_table;
- *length = ARRAY_SIZE(kx022a_scale_table) *
- ARRAY_SIZE(kx022a_scale_table[0]);
+ *vals = (const int *)data->chip_info->scale_table;
+ *length = data->chip_info->scale_table_size;
*type = IIO_VAL_INT_PLUS_NANO;
return IIO_AVAIL_LIST;
default:
@@ -438,17 +448,17 @@ static void kx022a_reg2freq(unsigned int val, int *val1, int *val2)
*val2 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][1];
}
-static void kx022a_reg2scale(unsigned int val, unsigned int *val1,
- unsigned int *val2)
+static void kx022a_reg2scale(struct kx022a_data *data, unsigned int val,
+ unsigned int *val1, unsigned int *val2)
{
val &= KX022A_MASK_GSEL;
val >>= KX022A_GSEL_SHIFT;
- *val1 = kx022a_scale_table[val][0];
- *val2 = kx022a_scale_table[val][1];
+ *val1 = data->chip_info->scale_table[val][0];
+ *val2 = data->chip_info->scale_table[val][1];
}
-static int kx022a_turn_on_off_unlocked(struct kx022a_data *data, bool on)
+static int __kx022a_turn_on_off(struct kx022a_data *data, bool on)
{
int ret;
@@ -469,7 +479,7 @@ static int kx022a_turn_off_lock(struct kx022a_data *data)
int ret;
mutex_lock(&data->mutex);
- ret = kx022a_turn_on_off_unlocked(data, false);
+ ret = __kx022a_turn_on_off(data, false);
if (ret)
mutex_unlock(&data->mutex);
@@ -480,7 +490,7 @@ static int kx022a_turn_on_unlock(struct kx022a_data *data)
{
int ret;
- ret = kx022a_turn_on_off_unlocked(data, true);
+ ret = __kx022a_turn_on_off(data, true);
mutex_unlock(&data->mutex);
return ret;
@@ -543,11 +553,11 @@ static int kx022a_write_raw(struct iio_dev *idev,
kx022a_turn_on_unlock(data);
break;
case IIO_CHAN_INFO_SCALE:
- n = ARRAY_SIZE(kx022a_scale_table);
+ n = data->chip_info->scale_table_size / 2;
while (n-- > 0)
- if (val == kx022a_scale_table[n][0] &&
- val2 == kx022a_scale_table[n][1])
+ if (val == data->chip_info->scale_table[n][0] &&
+ val2 == data->chip_info->scale_table[n][1])
break;
if (n < 0) {
ret = -EINVAL;
@@ -642,7 +652,7 @@ static int kx022a_read_raw(struct iio_dev *idev,
if (ret < 0)
return ret;
- kx022a_reg2scale(regval, val, val2);
+ kx022a_reg2scale(data, regval, val, val2);
return IIO_VAL_INT_PLUS_NANO;
}
@@ -912,18 +922,19 @@ static int kx022a_fifo_disable(struct kx022a_data *data)
{
int ret = 0;
- ret = kx022a_turn_off_lock(data);
+ guard(mutex)(&data->mutex);
+ ret = __kx022a_turn_on_off(data, false);
if (ret)
return ret;
ret = regmap_clear_bits(data->regmap, data->ien_reg, KX022A_MASK_WMI);
if (ret)
- goto unlock_out;
+ return ret;
ret = regmap_clear_bits(data->regmap, data->chip_info->buf_cntl2,
KX022A_MASK_BUF_EN);
if (ret)
- goto unlock_out;
+ return ret;
data->state &= ~KX022A_STATE_FIFO;
@@ -931,12 +942,7 @@ static int kx022a_fifo_disable(struct kx022a_data *data)
kfree(data->fifo_buffer);
- return kx022a_turn_on_unlock(data);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return __kx022a_turn_on_off(data, true);
}
static int kx022a_buffer_predisable(struct iio_dev *idev)
@@ -959,33 +965,29 @@ static int kx022a_fifo_enable(struct kx022a_data *data)
if (!data->fifo_buffer)
return -ENOMEM;
- ret = kx022a_turn_off_lock(data);
+ guard(mutex)(&data->mutex);
+ ret = __kx022a_turn_on_off(data, false);
if (ret)
return ret;
/* Update watermark to HW */
ret = kx022a_fifo_set_wmi(data);
if (ret)
- goto unlock_out;
+ return ret;
/* Enable buffer */
ret = regmap_set_bits(data->regmap, data->chip_info->buf_cntl2,
KX022A_MASK_BUF_EN);
if (ret)
- goto unlock_out;
+ return ret;
data->state |= KX022A_STATE_FIFO;
ret = regmap_set_bits(data->regmap, data->ien_reg,
KX022A_MASK_WMI);
if (ret)
- goto unlock_out;
-
- return kx022a_turn_on_unlock(data);
-
-unlock_out:
- mutex_unlock(&data->mutex);
+ return ret;
- return ret;
+ return __kx022a_turn_on_off(data, true);
}
static int kx022a_buffer_postenable(struct iio_dev *idev)
@@ -1053,7 +1055,7 @@ static irqreturn_t kx022a_irq_thread_handler(int irq, void *private)
struct kx022a_data *data = iio_priv(idev);
irqreturn_t ret = IRQ_NONE;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->trigger_enabled) {
iio_trigger_poll_nested(data->trig);
@@ -1068,8 +1070,6 @@ static irqreturn_t kx022a_irq_thread_handler(int irq, void *private)
ret = IRQ_HANDLED;
}
- mutex_unlock(&data->mutex);
-
return ret;
}
@@ -1079,32 +1079,26 @@ static int kx022a_trigger_set_state(struct iio_trigger *trig,
struct kx022a_data *data = iio_trigger_get_drvdata(trig);
int ret = 0;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->trigger_enabled == state)
- goto unlock_out;
+ return 0;
if (data->state & KX022A_STATE_FIFO) {
dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
- ret = -EBUSY;
- goto unlock_out;
+ return -EBUSY;
}
- ret = kx022a_turn_on_off_unlocked(data, false);
+ ret = __kx022a_turn_on_off(data, false);
if (ret)
- goto unlock_out;
+ return ret;
data->trigger_enabled = state;
ret = kx022a_set_drdy_irq(data, state);
if (ret)
- goto unlock_out;
-
- ret = kx022a_turn_on_off_unlocked(data, true);
-
-unlock_out:
- mutex_unlock(&data->mutex);
+ return ret;
- return ret;
+ return __kx022a_turn_on_off(data, true);
}
static const struct iio_trigger_ops kx022a_trigger_ops = {
@@ -1121,10 +1115,15 @@ static int kx022a_chip_init(struct kx022a_data *data)
return ret;
/*
- * I've seen I2C read failures if we poll too fast after the sensor
- * reset. Slight delay gives I2C block the time to recover.
+ * According to the power-on procedure documents, there is (at least)
+ * 2ms delay required after the software reset. This should be same for
+ * all, KX022ACR-Z, KX132-1211, KX132ACR-LBZ and KX134ACR-LBZ.
+ *
+ * https://fscdn.rohm.com/kionix/en/document/AN010_KX022ACR-Z_Power-on_Procedure_E.pdf
+ * https://fscdn.rohm.com/kionix/en/document/TN027-Power-On-Procedure.pdf
+ * https://fscdn.rohm.com/kionix/en/document/AN011_KX134ACR-LBZ_Power-on_Procedure_E.pdf
*/
- msleep(1);
+ msleep(2);
ret = regmap_read_poll_timeout(data->regmap, data->chip_info->cntl2, val,
!(val & KX022A_MASK_SRST),
@@ -1158,6 +1157,9 @@ const struct kx022a_chip_info kx022a_chip_info = {
.regmap_config = &kx022a_regmap_config,
.channels = kx022a_channels,
.num_channels = ARRAY_SIZE(kx022a_channels),
+ .scale_table = kx022a_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx022a_scale_table) *
+ ARRAY_SIZE(kx022a_scale_table[0]),
.fifo_length = KX022A_FIFO_LENGTH,
.who = KX022A_REG_WHO,
.id = KX022A_ID,
@@ -1183,6 +1185,9 @@ const struct kx022a_chip_info kx132_chip_info = {
.regmap_config = &kx132_regmap_config,
.channels = kx132_channels,
.num_channels = ARRAY_SIZE(kx132_channels),
+ .scale_table = kx022a_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx022a_scale_table) *
+ ARRAY_SIZE(kx022a_scale_table[0]),
.fifo_length = KX132_FIFO_LENGTH,
.who = KX132_REG_WHO,
.id = KX132_ID,
@@ -1204,6 +1209,35 @@ const struct kx022a_chip_info kx132_chip_info = {
};
EXPORT_SYMBOL_NS_GPL(kx132_chip_info, "IIO_KX022A");
+const struct kx022a_chip_info kx134_chip_info = {
+ .name = "kx134-1211",
+ .regmap_config = &kx132_regmap_config,
+ .channels = kx132_channels,
+ .num_channels = ARRAY_SIZE(kx132_channels),
+ .scale_table = kx134acr_lbz_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx134acr_lbz_scale_table) *
+ ARRAY_SIZE(kx134acr_lbz_scale_table[0]),
+ .fifo_length = KX132_FIFO_LENGTH,
+ .who = KX132_REG_WHO,
+ .id = KX134_1211_ID,
+ .cntl = KX132_REG_CNTL,
+ .cntl2 = KX132_REG_CNTL2,
+ .odcntl = KX132_REG_ODCNTL,
+ .buf_cntl1 = KX132_REG_BUF_CNTL1,
+ .buf_cntl2 = KX132_REG_BUF_CNTL2,
+ .buf_clear = KX132_REG_BUF_CLEAR,
+ .buf_status1 = KX132_REG_BUF_STATUS_1,
+ .buf_smp_lvl_mask = KX132_MASK_BUF_SMP_LVL,
+ .buf_read = KX132_REG_BUF_READ,
+ .inc1 = KX132_REG_INC1,
+ .inc4 = KX132_REG_INC4,
+ .inc5 = KX132_REG_INC5,
+ .inc6 = KX132_REG_INC6,
+ .xout_l = KX132_REG_XOUT_L,
+ .get_fifo_bytes_available = kx132_get_fifo_bytes_available,
+};
+EXPORT_SYMBOL_NS_GPL(kx134_chip_info, "IIO_KX022A");
+
/*
* Despite the naming, KX132ACR-LBZ is not similar to KX132-1211 but it is
* exact subset of KX022A. KX132ACR-LBZ is meant to be used for industrial
@@ -1216,6 +1250,9 @@ const struct kx022a_chip_info kx132acr_chip_info = {
.regmap_config = &kx022a_regmap_config,
.channels = kx022a_channels,
.num_channels = ARRAY_SIZE(kx022a_channels),
+ .scale_table = kx022a_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx022a_scale_table) *
+ ARRAY_SIZE(kx022a_scale_table[0]),
.fifo_length = KX022A_FIFO_LENGTH,
.who = KX022A_REG_WHO,
.id = KX132ACR_LBZ_ID,
@@ -1236,6 +1273,34 @@ const struct kx022a_chip_info kx132acr_chip_info = {
};
EXPORT_SYMBOL_NS_GPL(kx132acr_chip_info, "IIO_KX022A");
+const struct kx022a_chip_info kx134acr_chip_info = {
+ .name = "kx134acr-lbz",
+ .regmap_config = &kx022a_regmap_config,
+ .channels = kx022a_channels,
+ .num_channels = ARRAY_SIZE(kx022a_channels),
+ .scale_table = kx134acr_lbz_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx134acr_lbz_scale_table) *
+ ARRAY_SIZE(kx134acr_lbz_scale_table[0]),
+ .fifo_length = KX022A_FIFO_LENGTH,
+ .who = KX022A_REG_WHO,
+ .id = KX134ACR_LBZ_ID,
+ .cntl = KX022A_REG_CNTL,
+ .cntl2 = KX022A_REG_CNTL2,
+ .odcntl = KX022A_REG_ODCNTL,
+ .buf_cntl1 = KX022A_REG_BUF_CNTL1,
+ .buf_cntl2 = KX022A_REG_BUF_CNTL2,
+ .buf_clear = KX022A_REG_BUF_CLEAR,
+ .buf_status1 = KX022A_REG_BUF_STATUS_1,
+ .buf_read = KX022A_REG_BUF_READ,
+ .inc1 = KX022A_REG_INC1,
+ .inc4 = KX022A_REG_INC4,
+ .inc5 = KX022A_REG_INC5,
+ .inc6 = KX022A_REG_INC6,
+ .xout_l = KX022A_REG_XOUT_L,
+ .get_fifo_bytes_available = kx022a_get_fifo_bytes_available,
+};
+EXPORT_SYMBOL_NS_GPL(kx134acr_chip_info, "IIO_KX022A");
+
int kx022a_probe_internal(struct device *dev, const struct kx022a_chip_info *chip_info)
{
static const char * const regulator_names[] = {"io-vdd", "vdd"};
diff --git a/drivers/iio/accel/kionix-kx022a.h b/drivers/iio/accel/kionix-kx022a.h
index 7060438ad88c..0ed54f584223 100644
--- a/drivers/iio/accel/kionix-kx022a.h
+++ b/drivers/iio/accel/kionix-kx022a.h
@@ -14,6 +14,7 @@
#define KX022A_REG_WHO 0x0f
#define KX022A_ID 0xc8
#define KX132ACR_LBZ_ID 0xd8
+#define KX134ACR_LBZ_ID 0xcc
#define KX022A_REG_CNTL2 0x19
#define KX022A_MASK_SRST BIT(7)
@@ -77,6 +78,7 @@
#define KX132_REG_WHO 0x13
#define KX132_ID 0x3d
+#define KX134_1211_ID 0x46
#define KX132_FIFO_LENGTH 86
@@ -135,6 +137,14 @@ struct kx022a_data;
*
* @name: name of the device
* @regmap_config: pointer to register map configuration
+ * @scale_table: An array of tables of scaling factors for
+ * a supported acceleration measurement range.
+ * Each table containing a single scaling
+ * factor consisting of two integers. The first
+ * value in a table is the integer part, and
+ * the second value is the fractional part as
+ * parts per billion.
+ * @scale_table_size: Amount of values in tables.
* @channels: pointer to iio_chan_spec array
* @num_channels: number of iio_chan_spec channels
* @fifo_length: number of 16-bit samples in a full buffer
@@ -161,6 +171,8 @@ struct kx022a_data;
struct kx022a_chip_info {
const char *name;
const struct regmap_config *regmap_config;
+ const int (*scale_table)[2];
+ const int scale_table_size;
const struct iio_chan_spec *channels;
unsigned int num_channels;
unsigned int fifo_length;
@@ -187,6 +199,8 @@ int kx022a_probe_internal(struct device *dev, const struct kx022a_chip_info *chi
extern const struct kx022a_chip_info kx022a_chip_info;
extern const struct kx022a_chip_info kx132_chip_info;
+extern const struct kx022a_chip_info kx134_chip_info;
extern const struct kx022a_chip_info kx132acr_chip_info;
+extern const struct kx022a_chip_info kx134acr_chip_info;
#endif
diff --git a/drivers/iio/adc/ad4000.c b/drivers/iio/adc/ad4000.c
index b3b82535f5c1..1d556a842a68 100644
--- a/drivers/iio/adc/ad4000.c
+++ b/drivers/iio/adc/ad4000.c
@@ -35,10 +35,6 @@
#define AD4000_SCALE_OPTIONS 2
-#define AD4000_TQUIET1_NS 190
-#define AD4000_TQUIET2_NS 60
-#define AD4000_TCONV_NS 320
-
#define __AD4000_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access) \
{ \
.type = IIO_VOLTAGE, \
@@ -49,6 +45,7 @@
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
+ .scan_index = 0, \
.scan_type = { \
.sign = _sign, \
.realbits = _real_bits, \
@@ -62,6 +59,12 @@
__AD4000_DIFF_CHANNEL((_sign), (_real_bits), \
((_real_bits) > 16 ? 32 : 16), (_reg_access))
+#define AD4000_DIFF_CHANNELS(_sign, _real_bits, _reg_access) \
+{ \
+ AD4000_DIFF_CHANNEL(_sign, _real_bits, _reg_access), \
+ IIO_CHAN_SOFT_TIMESTAMP(1), \
+}
+
#define __AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access)\
{ \
.type = IIO_VOLTAGE, \
@@ -71,6 +74,7 @@
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
+ .scan_index = 0, \
.scan_type = { \
.sign = _sign, \
.realbits = _real_bits, \
@@ -84,6 +88,12 @@
__AD4000_PSEUDO_DIFF_CHANNEL((_sign), (_real_bits), \
((_real_bits) > 16 ? 32 : 16), (_reg_access))
+#define AD4000_PSEUDO_DIFF_CHANNELS(_sign, _real_bits, _reg_access) \
+{ \
+ AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _reg_access), \
+ IIO_CHAN_SOFT_TIMESTAMP(1), \
+}
+
static const char * const ad4000_power_supplies[] = {
"vdd", "vio"
};
@@ -108,111 +118,280 @@ static const int ad4000_gains[] = {
454, 909, 1000, 1900,
};
+struct ad4000_time_spec {
+ int t_conv_ns;
+ int t_quiet2_ns;
+};
+
+/*
+ * Same timing specifications for all of AD4000, AD4001, ..., AD4008, AD4010,
+ * ADAQ4001, and ADAQ4003.
+ */
+static const struct ad4000_time_spec ad4000_t_spec = {
+ .t_conv_ns = 320,
+ .t_quiet2_ns = 60,
+};
+
+/* AD4020, AD4021, AD4022 */
+static const struct ad4000_time_spec ad4020_t_spec = {
+ .t_conv_ns = 350,
+ .t_quiet2_ns = 60,
+};
+
+/* AD7983, AD7984 */
+static const struct ad4000_time_spec ad7983_t_spec = {
+ .t_conv_ns = 500,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7980, AD7982 */
+static const struct ad4000_time_spec ad7980_t_spec = {
+ .t_conv_ns = 800,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7946, AD7686, AD7688, AD7988-5, AD7693 */
+static const struct ad4000_time_spec ad7686_t_spec = {
+ .t_conv_ns = 1600,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7690 */
+static const struct ad4000_time_spec ad7690_t_spec = {
+ .t_conv_ns = 2100,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7942, AD7685, AD7687 */
+static const struct ad4000_time_spec ad7687_t_spec = {
+ .t_conv_ns = 3200,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7691 */
+static const struct ad4000_time_spec ad7691_t_spec = {
+ .t_conv_ns = 3700,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7988-1 */
+static const struct ad4000_time_spec ad7988_1_t_spec = {
+ .t_conv_ns = 9500,
+ .t_quiet2_ns = 0,
+};
+
struct ad4000_chip_info {
const char *dev_name;
- struct iio_chan_spec chan_spec;
- struct iio_chan_spec reg_access_chan_spec;
+ struct iio_chan_spec chan_spec[2];
+ struct iio_chan_spec reg_access_chan_spec[2];
+ const struct ad4000_time_spec *time_spec;
bool has_hardware_gain;
};
static const struct ad4000_chip_info ad4000_chip_info = {
.dev_name = "ad4000",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4001_chip_info = {
.dev_name = "ad4001",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4002_chip_info = {
.dev_name = "ad4002",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4003_chip_info = {
.dev_name = "ad4003",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4004_chip_info = {
.dev_name = "ad4004",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4005_chip_info = {
.dev_name = "ad4005",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4006_chip_info = {
.dev_name = "ad4006",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4007_chip_info = {
.dev_name = "ad4007",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4008_chip_info = {
.dev_name = "ad4008",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4010_chip_info = {
.dev_name = "ad4010",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4011_chip_info = {
.dev_name = "ad4011",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4020_chip_info = {
.dev_name = "ad4020",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 20, 1),
+ .time_spec = &ad4020_t_spec,
};
static const struct ad4000_chip_info ad4021_chip_info = {
.dev_name = "ad4021",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 20, 1),
+ .time_spec = &ad4020_t_spec,
};
static const struct ad4000_chip_info ad4022_chip_info = {
.dev_name = "ad4022",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 20, 1),
+ .time_spec = &ad4020_t_spec,
};
static const struct ad4000_chip_info adaq4001_chip_info = {
.dev_name = "adaq4001",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 16, 1),
+ .time_spec = &ad4000_t_spec,
.has_hardware_gain = true,
};
static const struct ad4000_chip_info adaq4003_chip_info = {
.dev_name = "adaq4003",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .time_spec = &ad4000_t_spec,
.has_hardware_gain = true,
};
+static const struct ad4000_chip_info ad7685_chip_info = {
+ .dev_name = "ad7685",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7687_t_spec,
+};
+
+static const struct ad4000_chip_info ad7686_chip_info = {
+ .dev_name = "ad7686",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
+static const struct ad4000_chip_info ad7687_chip_info = {
+ .dev_name = "ad7687",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .time_spec = &ad7687_t_spec,
+};
+
+static const struct ad4000_chip_info ad7688_chip_info = {
+ .dev_name = "ad7688",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
+static const struct ad4000_chip_info ad7690_chip_info = {
+ .dev_name = "ad7690",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .time_spec = &ad7690_t_spec,
+};
+
+static const struct ad4000_chip_info ad7691_chip_info = {
+ .dev_name = "ad7691",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .time_spec = &ad7691_t_spec,
+};
+
+static const struct ad4000_chip_info ad7693_chip_info = {
+ .dev_name = "ad7693",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
+static const struct ad4000_chip_info ad7942_chip_info = {
+ .dev_name = "ad7942",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 14, 0),
+ .time_spec = &ad7687_t_spec,
+};
+
+static const struct ad4000_chip_info ad7946_chip_info = {
+ .dev_name = "ad7946",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 14, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
+static const struct ad4000_chip_info ad7980_chip_info = {
+ .dev_name = "ad7980",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7980_t_spec,
+};
+
+static const struct ad4000_chip_info ad7982_chip_info = {
+ .dev_name = "ad7982",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .time_spec = &ad7980_t_spec,
+};
+
+static const struct ad4000_chip_info ad7983_chip_info = {
+ .dev_name = "ad7983",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7983_t_spec,
+};
+
+static const struct ad4000_chip_info ad7984_chip_info = {
+ .dev_name = "ad7984",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .time_spec = &ad7983_t_spec,
+};
+
+static const struct ad4000_chip_info ad7988_1_chip_info = {
+ .dev_name = "ad7988-1",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7988_1_t_spec,
+};
+
+static const struct ad4000_chip_info ad7988_5_chip_info = {
+ .dev_name = "ad7988-5",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
struct ad4000_state {
struct spi_device *spi;
struct gpio_desc *cnv_gpio;
@@ -224,6 +403,7 @@ struct ad4000_state {
bool span_comp;
u16 gain_milli;
int scale_tbl[AD4000_SCALE_OPTIONS][2];
+ const struct ad4000_time_spec *time_spec;
/*
* DMA (thus cache coherency maintenance) requires the transfer buffers
@@ -234,7 +414,7 @@ struct ad4000_state {
__be16 sample_buf16;
__be32 sample_buf32;
} data;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan __aligned(IIO_DMA_MINALIGN);
u8 tx_buf[2];
u8 rx_buf[2];
@@ -488,16 +668,15 @@ static const struct iio_info ad4000_info = {
static int ad4000_prepare_3wire_mode_message(struct ad4000_state *st,
const struct iio_chan_spec *chan)
{
- unsigned int cnv_pulse_time = AD4000_TCONV_NS;
struct spi_transfer *xfers = st->xfers;
xfers[0].cs_change = 1;
- xfers[0].cs_change_delay.value = cnv_pulse_time;
+ xfers[0].cs_change_delay.value = st->time_spec->t_conv_ns;
xfers[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = &st->scan.data;
xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
- xfers[1].delay.value = AD4000_TQUIET2_NS;
+ xfers[1].delay.value = st->time_spec->t_quiet2_ns;
xfers[1].delay.unit = SPI_DELAY_UNIT_NSECS;
spi_message_init_with_transfers(&st->msg, st->xfers, 2);
@@ -515,7 +694,6 @@ static int ad4000_prepare_3wire_mode_message(struct ad4000_state *st,
static int ad4000_prepare_4wire_mode_message(struct ad4000_state *st,
const struct iio_chan_spec *chan)
{
- unsigned int cnv_to_sdi_time = AD4000_TCONV_NS;
struct spi_transfer *xfers = st->xfers;
/*
@@ -523,7 +701,7 @@ static int ad4000_prepare_4wire_mode_message(struct ad4000_state *st,
* going low.
*/
xfers[0].cs_off = 1;
- xfers[0].delay.value = cnv_to_sdi_time;
+ xfers[0].delay.value = st->time_spec->t_conv_ns;
xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = &st->scan.data;
@@ -562,6 +740,7 @@ static int ad4000_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
st->spi = spi;
+ st->time_spec = chip->time_spec;
ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(ad4000_power_supplies),
ad4000_power_supplies);
@@ -591,7 +770,7 @@ static int ad4000_probe(struct spi_device *spi)
switch (st->sdi_pin) {
case AD4000_SDI_MOSI:
indio_dev->info = &ad4000_reg_access_info;
- indio_dev->channels = &chip->reg_access_chan_spec;
+ indio_dev->channels = chip->reg_access_chan_spec;
/*
* In "3-wire mode", the ADC SDI line must be kept high when
@@ -603,7 +782,7 @@ static int ad4000_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels);
+ ret = ad4000_prepare_3wire_mode_message(st, &indio_dev->channels[0]);
if (ret)
return ret;
@@ -614,16 +793,16 @@ static int ad4000_probe(struct spi_device *spi)
break;
case AD4000_SDI_VIO:
indio_dev->info = &ad4000_info;
- indio_dev->channels = &chip->chan_spec;
- ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels);
+ indio_dev->channels = chip->chan_spec;
+ ret = ad4000_prepare_3wire_mode_message(st, &indio_dev->channels[0]);
if (ret)
return ret;
break;
case AD4000_SDI_CS:
indio_dev->info = &ad4000_info;
- indio_dev->channels = &chip->chan_spec;
- ret = ad4000_prepare_4wire_mode_message(st, indio_dev->channels);
+ indio_dev->channels = chip->chan_spec;
+ ret = ad4000_prepare_4wire_mode_message(st, &indio_dev->channels[0]);
if (ret)
return ret;
@@ -637,7 +816,7 @@ static int ad4000_probe(struct spi_device *spi)
}
indio_dev->name = chip->dev_name;
- indio_dev->num_channels = 1;
+ indio_dev->num_channels = 2;
ret = devm_mutex_init(dev, &st->lock);
if (ret)
@@ -658,7 +837,7 @@ static int ad4000_probe(struct spi_device *spi)
}
}
- ad4000_fill_scale_tbl(st, indio_dev->channels);
+ ad4000_fill_scale_tbl(st, &indio_dev->channels[0]);
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
&iio_pollfunc_store_time,
@@ -686,6 +865,21 @@ static const struct spi_device_id ad4000_id[] = {
{ "ad4022", (kernel_ulong_t)&ad4022_chip_info },
{ "adaq4001", (kernel_ulong_t)&adaq4001_chip_info },
{ "adaq4003", (kernel_ulong_t)&adaq4003_chip_info },
+ { "ad7685", (kernel_ulong_t)&ad7685_chip_info },
+ { "ad7686", (kernel_ulong_t)&ad7686_chip_info },
+ { "ad7687", (kernel_ulong_t)&ad7687_chip_info },
+ { "ad7688", (kernel_ulong_t)&ad7688_chip_info },
+ { "ad7690", (kernel_ulong_t)&ad7690_chip_info },
+ { "ad7691", (kernel_ulong_t)&ad7691_chip_info },
+ { "ad7693", (kernel_ulong_t)&ad7693_chip_info },
+ { "ad7942", (kernel_ulong_t)&ad7942_chip_info },
+ { "ad7946", (kernel_ulong_t)&ad7946_chip_info },
+ { "ad7980", (kernel_ulong_t)&ad7980_chip_info },
+ { "ad7982", (kernel_ulong_t)&ad7982_chip_info },
+ { "ad7983", (kernel_ulong_t)&ad7983_chip_info },
+ { "ad7984", (kernel_ulong_t)&ad7984_chip_info },
+ { "ad7988-1", (kernel_ulong_t)&ad7988_1_chip_info },
+ { "ad7988-5", (kernel_ulong_t)&ad7988_5_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad4000_id);
@@ -707,6 +901,21 @@ static const struct of_device_id ad4000_of_match[] = {
{ .compatible = "adi,ad4022", .data = &ad4022_chip_info },
{ .compatible = "adi,adaq4001", .data = &adaq4001_chip_info },
{ .compatible = "adi,adaq4003", .data = &adaq4003_chip_info },
+ { .compatible = "adi,ad7685", .data = &ad7685_chip_info },
+ { .compatible = "adi,ad7686", .data = &ad7686_chip_info },
+ { .compatible = "adi,ad7687", .data = &ad7687_chip_info },
+ { .compatible = "adi,ad7688", .data = &ad7688_chip_info },
+ { .compatible = "adi,ad7690", .data = &ad7690_chip_info },
+ { .compatible = "adi,ad7691", .data = &ad7691_chip_info },
+ { .compatible = "adi,ad7693", .data = &ad7693_chip_info },
+ { .compatible = "adi,ad7942", .data = &ad7942_chip_info },
+ { .compatible = "adi,ad7946", .data = &ad7946_chip_info },
+ { .compatible = "adi,ad7980", .data = &ad7980_chip_info },
+ { .compatible = "adi,ad7982", .data = &ad7982_chip_info },
+ { .compatible = "adi,ad7983", .data = &ad7983_chip_info },
+ { .compatible = "adi,ad7984", .data = &ad7984_chip_info },
+ { .compatible = "adi,ad7988-1", .data = &ad7988_1_chip_info },
+ { .compatible = "adi,ad7988-5", .data = &ad7988_5_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, ad4000_of_match);
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
index 595ec4158e73..b79d135a5471 100644
--- a/drivers/iio/adc/ad4695.c
+++ b/drivers/iio/adc/ad4695.c
@@ -30,7 +30,7 @@
#include <linux/spi/spi.h>
#include <linux/units.h>
-#include <dt-bindings/iio/adi,ad4695.h>
+#include <dt-bindings/iio/adc/adi,ad4695.h>
/* AD4695 registers */
#define AD4695_REG_SPI_CONFIG_A 0x0000
@@ -91,6 +91,7 @@
#define AD4695_T_WAKEUP_SW_MS 3
#define AD4695_T_REFBUF_MS 100
#define AD4695_T_REGCONFIG_NS 20
+#define AD4695_T_SCK_CNV_DELAY_NS 80
#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
/* Max number of voltage input channels. */
@@ -132,8 +133,13 @@ struct ad4695_state {
unsigned int vref_mv;
/* Common mode input pin voltage. */
unsigned int com_mv;
- /* 1 per voltage and temperature chan plus 1 xfer to trigger 1st CNV */
- struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS + 2];
+ /*
+ * 2 per voltage and temperature chan plus 1 xfer to trigger 1st
+ * CNV. Excluding the trigger xfer, every 2nd xfer only serves
+ * to control CS and add a delay between the last SCLK and next
+ * CNV rising edges.
+ */
+ struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS * 2 + 3];
struct spi_message buf_read_msg;
/* Raw conversion data received. */
u8 buf[ALIGN((AD4695_MAX_CHANNELS + 2) * AD4695_MAX_CHANNEL_SIZE,
@@ -423,7 +429,7 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
u8 temp_chan_bit = st->chip_info->num_voltage_inputs;
u32 bit, num_xfer, num_slots;
u32 temp_en = 0;
- int ret;
+ int ret, rx_buf_offset = 0;
/*
* We are using the advanced sequencer since it is the only way to read
@@ -449,11 +455,9 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
iio_for_each_active_channel(indio_dev, bit) {
xfer = &st->buf_read_xfer[num_xfer];
xfer->bits_per_word = 16;
- xfer->rx_buf = &st->buf[(num_xfer - 1) * 2];
+ xfer->rx_buf = &st->buf[rx_buf_offset];
xfer->len = 2;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ rx_buf_offset += xfer->len;
if (bit == temp_chan_bit) {
temp_en = 1;
@@ -468,21 +472,44 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
}
num_xfer++;
+
+ /*
+ * We need to add a blank xfer in data reads, to meet the timing
+ * requirement of a minimum delay between the last SCLK rising
+ * edge and the CS deassert.
+ */
+ xfer = &st->buf_read_xfer[num_xfer];
+ xfer->delay.value = AD4695_T_SCK_CNV_DELAY_NS;
+ xfer->delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ num_xfer++;
}
/*
* The advanced sequencer requires that at least 2 slots are enabled.
* Since slot 0 is always used for other purposes, we need only 1
- * enabled voltage channel to meet this requirement. If the temperature
- * channel is the only enabled channel, we need to add one more slot
- * in the sequence but not read from it.
+ * enabled voltage channel to meet this requirement. If the temperature
+ * channel is the only enabled channel, we need to add one more slot in
+ * the sequence but not read from it. This is because the temperature
+ * sensor is sampled at the end of the channel sequence in advanced
+ * sequencer mode (see datasheet page 38).
+ *
+ * From the iio_for_each_active_channel() block above, we now have an
+ * xfer with data followed by a blank xfer to allow us to meet the
+ * timing spec, so move both of those up before adding an extra to
+ * handle the temperature-only case.
*/
if (num_slots < 2) {
- /* move last xfer so we can insert one more xfer before it */
- st->buf_read_xfer[num_xfer] = *xfer;
+ /* Move last two xfers */
+ st->buf_read_xfer[num_xfer] = st->buf_read_xfer[num_xfer - 1];
+ st->buf_read_xfer[num_xfer - 1] = st->buf_read_xfer[num_xfer - 2];
num_xfer++;
- /* modify 2nd to last xfer for extra slot */
+ /* Modify inserted xfer for extra slot. */
+ xfer = &st->buf_read_xfer[num_xfer - 3];
memset(xfer, 0, sizeof(*xfer));
xfer->cs_change = 1;
xfer->delay.value = st->chip_info->t_acq_ns;
@@ -499,6 +526,12 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
return ret;
num_slots++;
+
+ /*
+ * We still want to point at the last xfer when finished, so
+ * update the pointer.
+ */
+ xfer = &st->buf_read_xfer[num_xfer - 1];
}
/*
@@ -583,8 +616,20 @@ out:
*/
static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
{
- struct spi_transfer xfer[2] = { };
- int ret, i = 0;
+ struct spi_transfer xfers[2] = {
+ {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .bits_per_word = 16,
+ .tx_buf = &st->cnv_cmd,
+ .len = 2,
+ },
+ {
+ /* Required delay between last SCLK and CNV/CS */
+ .delay.value = AD4695_T_SCK_CNV_DELAY_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ }
+ };
+ int ret;
ret = ad4695_set_single_cycle_mode(st, address);
if (ret)
@@ -592,29 +637,22 @@ static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
/*
* Setting the first channel to the temperature channel isn't supported
- * in single-cycle mode, so we have to do an extra xfer to read the
- * temperature.
+ * in single-cycle mode, so we have to do an extra conversion to read
+ * the temperature.
*/
if (address == AD4695_CMD_TEMP_CHAN) {
- /* We aren't reading, so we can make this a short xfer. */
- st->cnv_cmd2 = AD4695_CMD_TEMP_CHAN << 3;
- xfer[0].tx_buf = &st->cnv_cmd2;
- xfer[0].len = 1;
- xfer[0].cs_change = 1;
- xfer[0].cs_change_delay.value = AD4695_T_CONVERT_NS;
- xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
-
- i = 1;
+ st->cnv_cmd = AD4695_CMD_TEMP_CHAN << 11;
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret)
+ return ret;
}
/* Then read the result and exit conversion mode. */
st->cnv_cmd = AD4695_CMD_EXIT_CNV_MODE << 11;
- xfer[i].bits_per_word = 16;
- xfer[i].tx_buf = &st->cnv_cmd;
- xfer[i].rx_buf = &st->raw_data;
- xfer[i].len = 2;
+ xfers[0].rx_buf = &st->raw_data;
- return spi_sync_transfer(st->spi, xfer, i + 1);
+ return spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
}
static int ad4695_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 7314fb32bdec..6ae27cdd3250 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -95,6 +95,10 @@
#define AD7124_MAX_CONFIGS 8
#define AD7124_MAX_CHANNELS 16
+/* AD7124 input sources */
+#define AD7124_INPUT_TEMPSENSOR 16
+#define AD7124_INPUT_AVSS 17
+
enum ad7124_ids {
ID_AD7124_4,
ID_AD7124_8,
@@ -360,20 +364,21 @@ static int ad7124_find_free_config_slot(struct ad7124_state *st)
return free_cfg_slot;
}
+/* Only called during probe, so dev_err_probe() can be used */
static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channel_config *cfg)
{
+ struct device *dev = &st->sd.spi->dev;
unsigned int refsel = cfg->refsel;
switch (refsel) {
case AD7124_REFIN1:
case AD7124_REFIN2:
case AD7124_AVDD_REF:
- if (IS_ERR(st->vref[refsel])) {
- dev_err(&st->sd.spi->dev,
- "Error, trying to use external voltage reference without a %s regulator.\n",
- ad7124_ref_names[refsel]);
- return PTR_ERR(st->vref[refsel]);
- }
+ if (IS_ERR(st->vref[refsel]))
+ return dev_err_probe(dev, PTR_ERR(st->vref[refsel]),
+ "Error, trying to use external voltage reference without a %s regulator.\n",
+ ad7124_ref_names[refsel]);
+
cfg->vref_mv = regulator_get_voltage(st->vref[refsel]);
/* Conversion from uV to mV */
cfg->vref_mv /= 1000;
@@ -384,8 +389,7 @@ static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channe
st->adc_control |= AD7124_ADC_CTRL_REF_EN(1);
return 0;
default:
- dev_err(&st->sd.spi->dev, "Invalid reference %d\n", refsel);
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL, "Invalid reference %d\n", refsel);
}
}
@@ -571,6 +575,7 @@ static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.data_reg = AD7124_DATA,
.num_slots = 8,
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 64,
};
static int ad7124_read_raw(struct iio_dev *indio_dev,
@@ -588,26 +593,59 @@ static int ad7124_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- mutex_lock(&st->cfgs_lock);
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ mutex_lock(&st->cfgs_lock);
- idx = st->channels[chan->address].cfg.pga_bits;
- *val = st->channels[chan->address].cfg.vref_mv;
- if (st->channels[chan->address].cfg.bipolar)
- *val2 = chan->scan_type.realbits - 1 + idx;
- else
- *val2 = chan->scan_type.realbits + idx;
+ idx = st->channels[chan->address].cfg.pga_bits;
+ *val = st->channels[chan->address].cfg.vref_mv;
+ if (st->channels[chan->address].cfg.bipolar)
+ *val2 = chan->scan_type.realbits - 1 + idx;
+ else
+ *val2 = chan->scan_type.realbits + idx;
+
+ mutex_unlock(&st->cfgs_lock);
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_TEMP:
+ /*
+ * According to the data sheet
+ * Temperature (°C)
+ * = ((Conversion − 0x800000)/13584) − 272.5
+ * = (Conversion − 0x800000 - 13584 * 272.5) / 13584
+ * = (Conversion − 12090248) / 13584
+ * So scale with 1000/13584 to yield °mC. Reduce by 8 to
+ * 125/1698.
+ */
+ *val = 125;
+ *val2 = 1698;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
- mutex_unlock(&st->cfgs_lock);
- return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
- mutex_lock(&st->cfgs_lock);
- if (st->channels[chan->address].cfg.bipolar)
- *val = -(1 << (chan->scan_type.realbits - 1));
- else
- *val = 0;
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ mutex_lock(&st->cfgs_lock);
+ if (st->channels[chan->address].cfg.bipolar)
+ *val = -(1 << (chan->scan_type.realbits - 1));
+ else
+ *val = 0;
+
+ mutex_unlock(&st->cfgs_lock);
+ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ /* see calculation above */
+ *val = -12090248;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
- mutex_unlock(&st->cfgs_lock);
- return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
mutex_lock(&st->cfgs_lock);
*val = st->channels[chan->address].cfg.odr;
@@ -751,12 +789,14 @@ static const struct iio_info ad7124_info = {
.attrs = &ad7124_attrs_group,
};
+/* Only called during probe, so dev_err_probe() can be used */
static int ad7124_soft_reset(struct ad7124_state *st)
{
+ struct device *dev = &st->sd.spi->dev;
unsigned int readval, timeout;
int ret;
- ret = ad_sd_reset(&st->sd, 64);
+ ret = ad_sd_reset(&st->sd);
if (ret < 0)
return ret;
@@ -765,7 +805,7 @@ static int ad7124_soft_reset(struct ad7124_state *st)
do {
ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "Error reading status register\n");
if (!(readval & AD7124_STATUS_POR_FLAG_MSK))
return 0;
@@ -774,39 +814,47 @@ static int ad7124_soft_reset(struct ad7124_state *st)
usleep_range(100, 2000);
} while (--timeout);
- dev_err(&st->sd.spi->dev, "Soft reset failed\n");
-
- return -EIO;
+ return dev_err_probe(dev, -EIO, "Soft reset failed\n");
}
static int ad7124_check_chip_id(struct ad7124_state *st)
{
+ struct device *dev = &st->sd.spi->dev;
unsigned int readval, chip_id, silicon_rev;
int ret;
ret = ad_sd_read_reg(&st->sd, AD7124_ID, 1, &readval);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "Failure to read ID register\n");
chip_id = AD7124_DEVICE_ID_GET(readval);
silicon_rev = AD7124_SILICON_REV_GET(readval);
- if (chip_id != st->chip_info->chip_id) {
- dev_err(&st->sd.spi->dev,
- "Chip ID mismatch: expected %u, got %u\n",
- st->chip_info->chip_id, chip_id);
- return -ENODEV;
- }
+ if (chip_id != st->chip_info->chip_id)
+ return dev_err_probe(dev, -ENODEV,
+ "Chip ID mismatch: expected %u, got %u\n",
+ st->chip_info->chip_id, chip_id);
- if (silicon_rev == 0) {
- dev_err(&st->sd.spi->dev,
- "Silicon revision empty. Chip may not be present\n");
- return -ENODEV;
- }
+ if (silicon_rev == 0)
+ return dev_err_probe(dev, -ENODEV,
+ "Silicon revision empty. Chip may not be present\n");
return 0;
}
+/*
+ * Input specifiers 8 - 15 are explicitly reserved for ad7124-4
+ * while they are fine for ad7124-8. Values above 31 don't fit
+ * into the register field and so are invalid for sure.
+ */
+static bool ad7124_valid_input_select(unsigned int ain, const struct ad7124_chip_info *info)
+{
+ if (ain >= info->num_inputs && ain < 16)
+ return false;
+
+ return ain <= FIELD_MAX(AD7124_CHANNEL_AINM_MSK);
+}
+
static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
struct device *dev)
{
@@ -815,11 +863,23 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
struct ad7124_channel *channels;
struct iio_chan_spec *chan;
unsigned int ain[2], channel = 0, tmp;
+ unsigned int num_channels;
int ret;
- st->num_channels = device_get_child_node_count(dev);
- if (!st->num_channels)
- return dev_err_probe(dev, -ENODEV, "no channel children\n");
+ num_channels = device_get_child_node_count(dev);
+
+ /*
+ * The driver assigns each logical channel defined in the device tree
+ * statically one channel register. So only accept 16 such logical
+ * channels to not treat CONFIG_0 (i.e. the register following
+ * CHANNEL_15) as an additional channel register. The driver could be
+ * improved to lift this limitation.
+ */
+ if (num_channels > AD7124_MAX_CHANNELS)
+ return dev_err_probe(dev, -EINVAL, "Too many channels defined\n");
+
+ /* Add one for temperature */
+ st->num_channels = min(num_channels + 1, AD7124_MAX_CHANNELS);
chan = devm_kcalloc(indio_dev->dev.parent, st->num_channels,
sizeof(*chan), GFP_KERNEL);
@@ -838,16 +898,23 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
device_for_each_child_node_scoped(dev, child) {
ret = fwnode_property_read_u32(child, "reg", &channel);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Failed to parse reg property of %pfwP\n", child);
- if (channel >= indio_dev->num_channels)
+ if (channel >= num_channels)
return dev_err_probe(dev, -EINVAL,
- "Channel index >= number of channels\n");
+ "Channel index >= number of channels in %pfwP\n", child);
ret = fwnode_property_read_u32_array(child, "diff-channels",
ain, 2);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Failed to parse diff-channels property of %pfwP\n", child);
+
+ if (!ad7124_valid_input_select(ain[0], st->chip_info) ||
+ !ad7124_valid_input_select(ain[1], st->chip_info))
+ return dev_err_probe(dev, -EINVAL,
+ "diff-channels property of %pfwP contains invalid data\n", child);
st->channels[channel].nr = channel;
st->channels[channel].ain = AD7124_CHANNEL_AINP(ain[0]) |
@@ -874,17 +941,49 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
chan[channel].channel2 = ain[1];
}
+ if (num_channels < AD7124_MAX_CHANNELS) {
+ st->channels[num_channels] = (struct ad7124_channel) {
+ .nr = num_channels,
+ .ain = AD7124_CHANNEL_AINP(AD7124_INPUT_TEMPSENSOR) |
+ AD7124_CHANNEL_AINM(AD7124_INPUT_AVSS),
+ .cfg = {
+ .bipolar = true,
+ },
+ };
+
+ chan[num_channels] = (struct iio_chan_spec) {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_type = {
+ /*
+ * You might find it strange that a bipolar
+ * measurement yields an unsigned value, but
+ * this matches the device's manual.
+ */
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ .address = num_channels,
+ .scan_index = num_channels,
+ };
+ }
+
return 0;
}
static int ad7124_setup(struct ad7124_state *st)
{
+ struct device *dev = &st->sd.spi->dev;
unsigned int fclk, power_mode;
int i, ret;
fclk = clk_get_rate(st->mclk);
if (!fclk)
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL, "Failed to get mclk rate\n");
/* The power mode changes the master clock frequency */
power_mode = ad7124_find_closest_match(ad7124_master_clk_freq_hz,
@@ -893,7 +992,7 @@ static int ad7124_setup(struct ad7124_state *st)
if (fclk != ad7124_master_clk_freq_hz[power_mode]) {
ret = clk_set_rate(st->mclk, fclk);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to set mclk rate\n");
}
/* Set the power mode */
@@ -917,11 +1016,14 @@ static int ad7124_setup(struct ad7124_state *st)
* set all channels to this default value.
*/
ad7124_set_channel_odr(st, i, 10);
+
+ /* Disable all channels to prevent unintended conversions. */
+ ad_sd_write_reg(&st->sd, AD7124_CHANNEL(i), 2, 0);
}
ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to setup CONTROL register\n");
return ret;
}
@@ -934,13 +1036,14 @@ static void ad7124_reg_disable(void *r)
static int ad7124_probe(struct spi_device *spi)
{
const struct ad7124_chip_info *info;
+ struct device *dev = &spi->dev;
struct ad7124_state *st;
struct iio_dev *indio_dev;
int i, ret;
info = spi_get_device_match_data(spi);
if (!info)
- return -ENODEV;
+ return dev_err_probe(dev, -ENODEV, "Failed to get match data\n");
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
@@ -975,17 +1078,17 @@ static int ad7124_probe(struct spi_device *spi)
ret = regulator_enable(st->vref[i]);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to enable regulator #%d\n", i);
ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
st->vref[i]);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to register disable handler for regulator #%d\n", i);
}
st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
- return PTR_ERR(st->mclk);
+ return dev_err_probe(dev, PTR_ERR(st->mclk), "Failed to get mclk\n");
ret = ad7124_soft_reset(st);
if (ret < 0)
@@ -1001,10 +1104,13 @@ static int ad7124_probe(struct spi_device *spi)
ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to setup triggers\n");
- return devm_iio_device_register(&spi->dev, indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register iio device\n");
+ return 0;
}
static const struct of_device_id ad7124_of_match[] = {
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 8a0c931ca83a..6c4ed10ae580 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -150,6 +150,11 @@
#define AD7173_FILTER_ODR0_MASK GENMASK(5, 0)
#define AD7173_MAX_CONFIGS 8
+#define AD7173_MODE_CAL_INT_ZERO 0x4 /* Internal Zero-Scale Calibration */
+#define AD7173_MODE_CAL_INT_FULL 0x5 /* Internal Full-Scale Calibration */
+#define AD7173_MODE_CAL_SYS_ZERO 0x6 /* System Zero-Scale Calibration */
+#define AD7173_MODE_CAL_SYS_FULL 0x7 /* System Full-Scale Calibration */
+
struct ad7173_device_info {
const unsigned int *sinc5_data_rates;
unsigned int num_sinc5_data_rates;
@@ -175,6 +180,7 @@ struct ad7173_device_info {
bool has_input_buf;
bool has_int_ref;
bool has_ref2;
+ bool has_internal_fs_calibration;
bool higher_gpio_bits;
u8 num_gpios;
};
@@ -193,13 +199,14 @@ struct ad7173_channel_config {
};
struct ad7173_channel {
- unsigned int chan_reg;
unsigned int ain;
struct ad7173_channel_config cfg;
+ u8 syscalib_mode;
};
struct ad7173_state {
struct ad_sigma_delta sd;
+ struct ad_sigma_delta_info sigma_delta_info;
const struct ad7173_device_info *info;
struct ad7173_channel *channels;
struct regulator_bulk_data regulators[3];
@@ -272,6 +279,7 @@ static const struct ad7173_device_info ad4111_device_info = {
.has_input_buf = true,
.has_current_inputs = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
@@ -291,6 +299,7 @@ static const struct ad7173_device_info ad4112_device_info = {
.has_input_buf = true,
.has_current_inputs = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
@@ -326,6 +335,7 @@ static const struct ad7173_device_info ad4114_device_info = {
.has_temp = true,
.has_input_buf = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
@@ -343,6 +353,7 @@ static const struct ad7173_device_info ad4115_device_info = {
.has_temp = true,
.has_input_buf = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 8 * HZ_PER_MHZ,
.sinc5_data_rates = ad4115_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad4115_sinc5_data_rates),
@@ -360,6 +371,7 @@ static const struct ad7173_device_info ad4116_device_info = {
.has_temp = true,
.has_input_buf = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 4 * HZ_PER_MHZ,
.sinc5_data_rates = ad4116_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad4116_sinc5_data_rates),
@@ -505,6 +517,105 @@ static const struct regmap_config ad7173_regmap_config = {
.read_flag_mask = BIT(6),
};
+enum {
+ AD7173_SYSCALIB_ZERO_SCALE,
+ AD7173_SYSCALIB_FULL_SCALE,
+};
+
+static const char * const ad7173_syscalib_modes[] = {
+ [AD7173_SYSCALIB_ZERO_SCALE] = "zero_scale",
+ [AD7173_SYSCALIB_FULL_SCALE] = "full_scale",
+};
+
+static int ad7173_set_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+
+ st->channels[chan->channel].syscalib_mode = mode;
+
+ return 0;
+}
+
+static int ad7173_get_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+
+ return st->channels[chan->channel].syscalib_mode;
+}
+
+static ssize_t ad7173_write_syscalib(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+ bool sys_calib;
+ int ret, mode;
+
+ ret = kstrtobool(buf, &sys_calib);
+ if (ret)
+ return ret;
+
+ mode = st->channels[chan->channel].syscalib_mode;
+ if (sys_calib) {
+ if (mode == AD7173_SYSCALIB_ZERO_SCALE)
+ ret = ad_sd_calibrate(&st->sd, AD7173_MODE_CAL_SYS_ZERO,
+ chan->address);
+ else
+ ret = ad_sd_calibrate(&st->sd, AD7173_MODE_CAL_SYS_FULL,
+ chan->address);
+ }
+
+ return ret ? : len;
+}
+
+static const struct iio_enum ad7173_syscalib_mode_enum = {
+ .items = ad7173_syscalib_modes,
+ .num_items = ARRAY_SIZE(ad7173_syscalib_modes),
+ .set = ad7173_set_syscalib_mode,
+ .get = ad7173_get_syscalib_mode
+};
+
+static const struct iio_chan_spec_ext_info ad7173_calibsys_ext_info[] = {
+ {
+ .name = "sys_calibration",
+ .write = ad7173_write_syscalib,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("sys_calibration_mode", IIO_SEPARATE,
+ &ad7173_syscalib_mode_enum),
+ IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
+ &ad7173_syscalib_mode_enum),
+ { }
+};
+
+static int ad7173_calibrate_all(struct ad7173_state *st, struct iio_dev *indio_dev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < st->num_channels; i++) {
+ if (indio_dev->channels[i].type != IIO_VOLTAGE)
+ continue;
+
+ ret = ad_sd_calibrate(&st->sd, AD7173_MODE_CAL_INT_ZERO, st->channels[i].ain);
+ if (ret < 0)
+ return ret;
+
+ if (st->info->has_internal_fs_calibration) {
+ ret = ad_sd_calibrate(&st->sd, AD7173_MODE_CAL_INT_FULL,
+ st->channels[i].ain);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int ad7173_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask)
@@ -753,7 +864,7 @@ static int ad7173_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
return ad_sd_write_reg(sd, AD7173_REG_CH(chan), 2, 0);
}
-static struct ad_sigma_delta_info ad7173_sigma_delta_info = {
+static const struct ad_sigma_delta_info ad7173_sigma_delta_info = {
.set_channel = ad7173_set_channel,
.append_status = ad7173_append_status,
.disable_all = ad7173_disable_all,
@@ -764,6 +875,7 @@ static struct ad_sigma_delta_info ad7173_sigma_delta_info = {
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
.data_reg = AD7173_REG_DATA,
+ .num_resetclks = 64,
};
static int ad7173_setup(struct iio_dev *indio_dev)
@@ -801,6 +913,10 @@ static int ad7173_setup(struct iio_dev *indio_dev)
if (!st->config_cnts)
return -ENOMEM;
+ ret = ad7173_calibrate_all(st, indio_dev);
+ if (ret)
+ return ret;
+
/* All channels are enabled by default after a reset */
return ad7173_disable_all(&st->sd);
}
@@ -1023,6 +1139,7 @@ static const struct iio_chan_spec ad7173_channel_template = {
.storagebits = 32,
.endianness = IIO_BE,
},
+ .ext_info = ad7173_calibsys_ext_info,
};
static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
@@ -1316,7 +1433,6 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan->address = chan_index;
chan->scan_index = chan_index;
chan->channel = ain[0];
- chan_st_priv->chan_reg = chan_index;
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.odr = 0;
@@ -1403,7 +1519,7 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
if (ret < 0)
return dev_err_probe(dev, ret, "Interrupt 'rdy' is required\n");
- ad7173_sigma_delta_info.irq_line = ret;
+ st->sigma_delta_info.irq_line = ret;
return ad7173_fw_parse_channel_config(indio_dev);
}
@@ -1436,8 +1552,9 @@ static int ad7173_probe(struct spi_device *spi)
spi->mode = SPI_MODE_3;
spi_setup(spi);
- ad7173_sigma_delta_info.num_slots = st->info->num_configs;
- ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7173_sigma_delta_info);
+ st->sigma_delta_info = ad7173_sigma_delta_info;
+ st->sigma_delta_info.num_slots = st->info->num_configs;
+ ret = ad_sd_init(&st->sd, indio_dev, spi, &st->sigma_delta_info);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 1c87db0e0460..cfaf8f7e0a07 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -361,6 +361,7 @@ static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
.status_ch_mask = GENMASK(3, 0),
.num_slots = 4,
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 40,
};
static const struct ad_sigma_delta_info ad7194_sigma_delta_info = {
@@ -373,6 +374,7 @@ static const struct ad_sigma_delta_info ad7194_sigma_delta_info = {
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 40,
};
static const struct ad_sd_calib_data ad7192_calib_arr[8] = {
@@ -565,7 +567,7 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device *dev)
int i, ret, id;
/* reset the serial interface */
- ret = ad_sd_reset(&st->sd, 48);
+ ret = ad_sd_reset(&st->sd);
if (ret < 0)
return ret;
usleep_range(500, 1000); /* Wait for at least 500us */
@@ -1082,7 +1084,7 @@ static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned lon
conf &= ~AD7192_CONF_CHAN_MASK;
for_each_set_bit(i, scan_mask, 8)
- conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, i);
+ conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, BIT(i));
ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
if (ret < 0)
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index e35d55d03d86..d39354afd539 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -175,17 +175,17 @@ static const struct iio_chan_spec ad7616_channels[] = {
AD7606_CHANNEL(15, 16),
};
-static int ad7606c_18bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606c_18bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7606c_16bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606c_16bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7606_16bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606_16bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7607_chan_scale_setup(struct ad7606_state *st,
+static int ad7607_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7608_chan_scale_setup(struct ad7606_state *st,
+static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7609_chan_scale_setup(struct ad7606_state *st,
+static int ad7609_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
const struct ad7606_chip_info ad7605_4_info = {
@@ -323,9 +323,10 @@ int ad7606_reset(struct ad7606_state *st)
}
EXPORT_SYMBOL_NS_GPL(ad7606_reset, "IIO_AD7606");
-static int ad7606_16bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606_16bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
if (!st->sw_mode_en) {
@@ -345,10 +346,12 @@ static int ad7606_16bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7606_get_chan_config(struct ad7606_state *st, int ch,
+static int ad7606_get_chan_config(struct iio_dev *indio_dev, int ch,
bool *bipolar, bool *differential)
{
- unsigned int num_channels = st->chip_info->num_channels - 1;
+ struct ad7606_state *st = iio_priv(indio_dev);
+ unsigned int num_channels = st->chip_info->num_adc_channels;
+ unsigned int offset = indio_dev->num_channels - st->chip_info->num_adc_channels;
struct device *dev = st->dev;
int ret;
@@ -364,7 +367,7 @@ static int ad7606_get_chan_config(struct ad7606_state *st, int ch,
continue;
/* channel number (here) is from 1 to num_channels */
- if (reg == 0 || reg > num_channels) {
+ if (reg < offset || reg > num_channels) {
dev_warn(dev,
"Invalid channel number (ignoring): %d\n", reg);
continue;
@@ -399,9 +402,10 @@ static int ad7606_get_chan_config(struct ad7606_state *st, int ch,
return 0;
}
-static int ad7606c_18bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606c_18bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
bool bipolar, differential;
int ret;
@@ -413,7 +417,7 @@ static int ad7606c_18bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
- ret = ad7606_get_chan_config(st, ch, &bipolar, &differential);
+ ret = ad7606_get_chan_config(indio_dev, ch, &bipolar, &differential);
if (ret)
return ret;
@@ -455,9 +459,10 @@ static int ad7606c_18bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7606c_16bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606c_16bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
bool bipolar, differential;
int ret;
@@ -469,7 +474,7 @@ static int ad7606c_16bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
- ret = ad7606_get_chan_config(st, ch, &bipolar, &differential);
+ ret = ad7606_get_chan_config(indio_dev, ch, &bipolar, &differential);
if (ret)
return ret;
@@ -512,9 +517,10 @@ static int ad7606c_16bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7607_chan_scale_setup(struct ad7606_state *st,
+static int ad7607_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
cs->range = 0;
@@ -523,9 +529,10 @@ static int ad7607_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7608_chan_scale_setup(struct ad7606_state *st,
+static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
cs->range = 0;
@@ -534,9 +541,10 @@ static int ad7608_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7609_chan_scale_setup(struct ad7606_state *st,
+static int ad7609_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
cs->range = 0;
@@ -1039,7 +1047,7 @@ static int ad7606_read_avail(struct iio_dev *indio_dev,
cs = &st->chan_scales[ch];
*vals = (int *)cs->scale_avail;
- *length = cs->num_scales;
+ *length = cs->num_scales * 2;
*type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_LIST;
@@ -1146,8 +1154,8 @@ static int ad7606_sw_mode_setup(struct iio_dev *indio_dev)
static int ad7606_chan_scales_setup(struct iio_dev *indio_dev)
{
- unsigned int num_channels = indio_dev->num_channels - 1;
struct ad7606_state *st = iio_priv(indio_dev);
+ unsigned int offset = indio_dev->num_channels - st->chip_info->num_adc_channels;
struct iio_chan_spec *chans;
size_t size;
int ch, ret;
@@ -1161,8 +1169,8 @@ static int ad7606_chan_scales_setup(struct iio_dev *indio_dev)
memcpy(chans, indio_dev->channels, size);
indio_dev->channels = chans;
- for (ch = 0; ch < num_channels; ch++) {
- ret = st->chip_info->scale_setup_cb(st, &chans[ch + 1], ch);
+ for (ch = 0; ch < st->chip_info->num_adc_channels; ch++) {
+ ret = st->chip_info->scale_setup_cb(indio_dev, &chans[ch + offset], ch);
if (ret)
return ret;
}
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 998814a92b82..8778ffe515b3 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -69,7 +69,7 @@
struct ad7606_state;
-typedef int (*ad7606_scale_setup_cb_t)(struct ad7606_state *st,
+typedef int (*ad7606_scale_setup_cb_t)(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
/**
diff --git a/drivers/iio/adc/ad7625.c b/drivers/iio/adc/ad7625.c
index aefe3bf75c91..afa9bf4ddf3c 100644
--- a/drivers/iio/adc/ad7625.c
+++ b/drivers/iio/adc/ad7625.c
@@ -477,12 +477,12 @@ static int devm_ad7625_pwm_get(struct device *dev,
ref_clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(ref_clk))
return dev_err_probe(dev, PTR_ERR(ref_clk),
- "failed to get ref_clk");
+ "failed to get ref_clk\n");
ref_clk_rate_hz = clk_get_rate(ref_clk);
if (!ref_clk_rate_hz)
return dev_err_probe(dev, -EINVAL,
- "failed to get ref_clk rate");
+ "failed to get ref_clk rate\n");
st->ref_clk_rate_hz = ref_clk_rate_hz;
@@ -533,7 +533,7 @@ static int devm_ad7625_regulator_setup(struct device *dev,
if (!st->info->has_internal_vref && !st->have_refin && !ref_mv)
return dev_err_probe(dev, -EINVAL,
- "Need either REFIN or REF");
+ "Need either REFIN or REF\n");
if (st->have_refin && ref_mv)
return dev_err_probe(dev, -EINVAL,
@@ -623,7 +623,7 @@ static int ad7625_probe(struct platform_device *pdev)
st->back = devm_iio_backend_get(dev, NULL);
if (IS_ERR(st->back))
return dev_err_probe(dev, PTR_ERR(st->back),
- "failed to get IIO backend");
+ "failed to get IIO backend\n");
ret = devm_iio_backend_request_buffer(dev, st->back, indio_dev);
if (ret)
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index e1bf13fe2cd7..76118fe22db8 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -254,6 +254,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
.addr_shift = 4,
.read_mask = BIT(3),
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 32,
};
static int ad7791_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index d55c71566707..1b50d9643a63 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -206,6 +206,7 @@ static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
.addr_shift = 3,
.read_mask = BIT(6),
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 32,
};
static const struct ad_sd_calib_data ad7793_calib_arr[6] = {
@@ -265,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
return ret;
/* reset the serial interface */
- ret = ad_sd_reset(&st->sd, 32);
+ ret = ad_sd_reset(&st->sd);
if (ret < 0)
goto out;
usleep_range(500, 2000); /* Wait for at least 500us */
diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c
index a5aea4e9f1a7..0ec9cda10f5f 100644
--- a/drivers/iio/adc/ad7944.c
+++ b/drivers/iio/adc/ad7944.c
@@ -75,7 +75,7 @@ struct ad7944_adc {
u16 u16;
u32 u32;
} raw;
- u64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} sample __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index d358958ab310..f30119b42ba0 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -895,7 +895,7 @@ static int ad9467_update_scan_mode(struct iio_dev *indio_dev,
return 0;
}
-static struct iio_info ad9467_info = {
+static const struct iio_info ad9467_info = {
.read_raw = ad9467_read_raw,
.write_raw = ad9467_write_raw,
.update_scan_mode = ad9467_update_scan_mode,
@@ -903,6 +903,14 @@ static struct iio_info ad9467_info = {
.read_avail = ad9467_read_avail,
};
+/* Same as above, but without .read_avail */
+static const struct iio_info ad9467_info_no_read_avail = {
+ .read_raw = ad9467_read_raw,
+ .write_raw = ad9467_write_raw,
+ .update_scan_mode = ad9467_update_scan_mode,
+ .debugfs_reg_access = ad9467_reg_access,
+};
+
static int ad9467_scale_fill(struct ad9467_state *st)
{
const struct ad9467_chip_info *info = st->info;
@@ -1214,11 +1222,12 @@ static int ad9467_probe(struct spi_device *spi)
}
if (st->info->num_scales > 1)
- ad9467_info.read_avail = ad9467_read_avail;
+ indio_dev->info = &ad9467_info;
+ else
+ indio_dev->info = &ad9467_info_no_read_avail;
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
- indio_dev->info = &ad9467_info;
ret = ad9467_iio_backend_get(st);
if (ret)
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 3fd200b34161..d5d81581ab34 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -29,8 +29,11 @@
#define AD_SD_COMM_CHAN_MASK 0x3
#define AD_SD_REG_COMM 0x00
+#define AD_SD_REG_STATUS 0x00
#define AD_SD_REG_DATA 0x03
+#define AD_SD_REG_STATUS_RDY 0x80
+
/**
* ad_sd_set_comm() - Set communications register
*
@@ -109,7 +112,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
}, {
.rx_buf = val,
.len = size,
- .cs_change = sigma_delta->bus_locked,
+ .cs_change = sigma_delta->keep_cs_asserted,
},
};
struct spi_message m;
@@ -178,13 +181,12 @@ EXPORT_SYMBOL_NS_GPL(ad_sd_read_reg, "IIO_AD_SIGMA_DELTA");
* ad_sd_reset() - Reset the serial interface
*
* @sigma_delta: The sigma delta device
- * @reset_length: Number of SCLKs with DIN = 1
*
* Returns 0 on success, an error code otherwise.
**/
-int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
- unsigned int reset_length)
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta)
{
+ unsigned int reset_length = sigma_delta->info->num_resetclks;
uint8_t *buf;
unsigned int size;
int ret;
@@ -202,6 +204,107 @@ int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
}
EXPORT_SYMBOL_NS_GPL(ad_sd_reset, "IIO_AD_SIGMA_DELTA");
+static bool ad_sd_disable_irq(struct ad_sigma_delta *sigma_delta)
+{
+ guard(spinlock_irqsave)(&sigma_delta->irq_lock);
+
+ /* It's already off, return false to indicate nothing was changed */
+ if (sigma_delta->irq_dis)
+ return false;
+
+ sigma_delta->irq_dis = true;
+ disable_irq_nosync(sigma_delta->irq_line);
+ return true;
+}
+
+static void ad_sd_enable_irq(struct ad_sigma_delta *sigma_delta)
+{
+ guard(spinlock_irqsave)(&sigma_delta->irq_lock);
+
+ sigma_delta->irq_dis = false;
+ enable_irq(sigma_delta->irq_line);
+}
+
+#define AD_SD_CLEAR_DATA_BUFLEN 9
+
+/* Called with `sigma_delta->bus_locked == true` only. */
+static int ad_sigma_delta_clear_pending_event(struct ad_sigma_delta *sigma_delta)
+{
+ bool pending_event;
+ unsigned int data_read_len = BITS_TO_BYTES(sigma_delta->info->num_resetclks);
+ u8 *data;
+ struct spi_transfer t[] = {
+ {
+ .len = 1,
+ }, {
+ .len = data_read_len,
+ }
+ };
+ struct spi_message m;
+ int ret;
+
+ /*
+ * Read R̅D̅Y̅ pin (if possible) or status register to check if there is an
+ * old event.
+ */
+ if (sigma_delta->rdy_gpiod) {
+ pending_event = gpiod_get_value(sigma_delta->rdy_gpiod);
+ } else {
+ unsigned int status_reg;
+
+ ret = ad_sd_read_reg(sigma_delta, AD_SD_REG_STATUS, 1, &status_reg);
+ if (ret)
+ return ret;
+
+ pending_event = !(status_reg & AD_SD_REG_STATUS_RDY);
+ }
+
+ if (!pending_event)
+ return 0;
+
+ /*
+ * In general the size of the data register is unknown. It varies from
+ * device to device, might be one byte longer if CONTROL.DATA_STATUS is
+ * set and even varies on some devices depending on which input is
+ * selected. So send one byte to start reading the data register and
+ * then just clock for some bytes with DIN (aka MOSI) high to not
+ * confuse the register access state machine after the data register was
+ * completely read. Note however that the sequence length must be
+ * shorter than the reset procedure.
+ */
+
+ data = kzalloc(data_read_len + 1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spi_message_init(&m);
+ if (sigma_delta->info->has_registers) {
+ unsigned int data_reg = sigma_delta->info->data_reg ?: AD_SD_REG_DATA;
+
+ data[0] = data_reg << sigma_delta->info->addr_shift;
+ data[0] |= sigma_delta->info->read_mask;
+ data[0] |= sigma_delta->comm;
+ t[0].tx_buf = data;
+ spi_message_add_tail(&t[0], &m);
+ }
+
+ /*
+ * The first transferred byte is part of the real data register,
+ * so this doesn't need to be 0xff. In the remaining
+ * `data_read_len - 1` bytes are less than $num_resetclks ones.
+ */
+ t[1].tx_buf = data + 1;
+ data[1] = 0x00;
+ memset(data + 2, 0xff, data_read_len - 1);
+ spi_message_add_tail(&t[1], &m);
+
+ ret = spi_sync_locked(sigma_delta->spi, &m);
+
+ kfree(data);
+
+ return ret;
+}
+
int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
@@ -217,16 +320,18 @@ int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
+ ret = ad_sigma_delta_clear_pending_event(sigma_delta);
+ if (ret)
+ goto out;
+
ret = ad_sigma_delta_set_mode(sigma_delta, mode);
if (ret < 0)
goto out;
- sigma_delta->irq_dis = false;
- enable_irq(sigma_delta->irq_line);
+ ad_sd_enable_irq(sigma_delta);
time_left = wait_for_completion_timeout(&sigma_delta->completion, 2 * HZ);
if (time_left == 0) {
- sigma_delta->irq_dis = true;
- disable_irq_nosync(sigma_delta->irq_line);
+ ad_sd_disable_irq(sigma_delta);
ret = -EIO;
} else {
ret = 0;
@@ -292,10 +397,13 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
+ ret = ad_sigma_delta_clear_pending_event(sigma_delta);
+ if (ret)
+ goto out_unlock;
+
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
- sigma_delta->irq_dis = false;
- enable_irq(sigma_delta->irq_line);
+ ad_sd_enable_irq(sigma_delta);
ret = wait_for_completion_interruptible_timeout(
&sigma_delta->completion, HZ);
@@ -314,14 +422,13 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
&raw_sample);
out:
- if (!sigma_delta->irq_dis) {
- disable_irq_nosync(sigma_delta->irq_line);
- sigma_delta->irq_dis = true;
- }
+ ad_sd_disable_irq(sigma_delta);
- sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
ad_sigma_delta_disable_one(sigma_delta, chan->address);
+
+out_unlock:
+ sigma_delta->keep_cs_asserted = false;
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->controller);
iio_device_release_direct_mode(indio_dev);
@@ -392,12 +499,15 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
sigma_delta->bus_locked = true;
sigma_delta->keep_cs_asserted = true;
+ ret = ad_sigma_delta_clear_pending_event(sigma_delta);
+ if (ret)
+ goto err_unlock;
+
ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS);
if (ret)
goto err_unlock;
- sigma_delta->irq_dis = false;
- enable_irq(sigma_delta->irq_line);
+ ad_sd_enable_irq(sigma_delta);
return 0;
@@ -414,10 +524,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
reinit_completion(&sigma_delta->completion);
wait_for_completion_timeout(&sigma_delta->completion, HZ);
- if (!sigma_delta->irq_dis) {
- disable_irq_nosync(sigma_delta->irq_line);
- sigma_delta->irq_dis = true;
- }
+ ad_sd_disable_irq(sigma_delta);
sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
@@ -516,8 +623,7 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
irq_handled:
iio_trigger_notify_done(indio_dev->trig);
- sigma_delta->irq_dis = false;
- enable_irq(sigma_delta->irq_line);
+ ad_sd_enable_irq(sigma_delta);
return IRQ_HANDLED;
}
@@ -539,12 +645,31 @@ static irqreturn_t ad_sd_data_rdy_trig_poll(int irq, void *private)
{
struct ad_sigma_delta *sigma_delta = private;
- complete(&sigma_delta->completion);
- disable_irq_nosync(irq);
- sigma_delta->irq_dis = true;
- iio_trigger_poll(sigma_delta->trig);
+ /*
+ * AD7124 and a few others use the same physical line for interrupt
+ * reporting (R̅D̅Y̅) and MISO.
+ * As MISO toggles when reading a register, this likely results in a
+ * pending interrupt. This has two consequences: a) The irq might
+ * trigger immediately after it's enabled even though the conversion
+ * isn't done yet; and b) checking the STATUS register's R̅D̅Y̅ flag is
+ * off-limits as reading that would trigger another irq event.
+ *
+ * So read the MOSI line as GPIO (if available) and only trigger the irq
+ * if the line is active. Without such a GPIO assume this is a valid
+ * interrupt.
+ *
+ * Also as disable_irq_nosync() is used to disable the irq, only act if
+ * the irq wasn't disabled before.
+ */
+ if ((!sigma_delta->rdy_gpiod || gpiod_get_value(sigma_delta->rdy_gpiod)) &&
+ ad_sd_disable_irq(sigma_delta)) {
+ complete(&sigma_delta->completion);
+ iio_trigger_poll(sigma_delta->trig);
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
}
/**
@@ -674,11 +799,24 @@ int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
}
}
+ spin_lock_init(&sigma_delta->irq_lock);
+
if (info->irq_line)
sigma_delta->irq_line = info->irq_line;
else
sigma_delta->irq_line = spi->irq;
+ sigma_delta->rdy_gpiod = devm_gpiod_get_optional(&spi->dev, "rdy", GPIOD_IN);
+ if (IS_ERR(sigma_delta->rdy_gpiod))
+ return dev_err_probe(&spi->dev, PTR_ERR(sigma_delta->rdy_gpiod),
+ "Failed to find rdy gpio\n");
+
+ if (sigma_delta->rdy_gpiod && !sigma_delta->irq_line) {
+ sigma_delta->irq_line = gpiod_to_irq(sigma_delta->rdy_gpiod);
+ if (sigma_delta->irq_line < 0)
+ return sigma_delta->irq_line;
+ }
+
iio_device_set_drvdata(indio_dev, sigma_delta);
return 0;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 8e5aaf15a921..c3a1dea2aa82 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -329,7 +329,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
#define AT91_HWFIFO_MAX_SIZE_STR "128"
#define AT91_HWFIFO_MAX_SIZE 128
-#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
+#define AT91_SAMA_CHAN_SINGLE(index, num, addr, rbits) \
{ \
.type = IIO_VOLTAGE, \
.channel = num, \
@@ -337,7 +337,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.scan_index = index, \
.scan_type = { \
.sign = 'u', \
- .realbits = 14, \
+ .realbits = rbits, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
@@ -350,7 +350,13 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.indexed = 1, \
}
-#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
+#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
+ AT91_SAMA_CHAN_SINGLE(index, num, addr, 14)
+
+#define AT91_SAMA7G5_CHAN_SINGLE(index, num, addr) \
+ AT91_SAMA_CHAN_SINGLE(index, num, addr, 16)
+
+#define AT91_SAMA_CHAN_DIFF(index, num, num2, addr, rbits) \
{ \
.type = IIO_VOLTAGE, \
.differential = 1, \
@@ -360,7 +366,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.scan_index = index, \
.scan_type = { \
.sign = 's', \
- .realbits = 14, \
+ .realbits = rbits, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
@@ -373,6 +379,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.indexed = 1, \
}
+#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
+ AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 14)
+
+#define AT91_SAMA7G5_CHAN_DIFF(index, num, num2, addr) \
+ AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 16)
+
#define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \
{ \
.type = IIO_POSITIONRELATIVE, \
@@ -666,30 +678,30 @@ static const struct iio_chan_spec at91_sama5d2_adc_channels[] = {
};
static const struct iio_chan_spec at91_sama7g5_adc_channels[] = {
- AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60),
- AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64),
- AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68),
- AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c),
- AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70),
- AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74),
- AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78),
- AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c),
- AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80),
- AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84),
- AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88),
- AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c),
- AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90),
- AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94),
- AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98),
- AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c),
- AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60),
- AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68),
- AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70),
- AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78),
- AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80),
- AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88),
- AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90),
- AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98),
+ AT91_SAMA7G5_CHAN_SINGLE(0, 0, 0x60),
+ AT91_SAMA7G5_CHAN_SINGLE(1, 1, 0x64),
+ AT91_SAMA7G5_CHAN_SINGLE(2, 2, 0x68),
+ AT91_SAMA7G5_CHAN_SINGLE(3, 3, 0x6c),
+ AT91_SAMA7G5_CHAN_SINGLE(4, 4, 0x70),
+ AT91_SAMA7G5_CHAN_SINGLE(5, 5, 0x74),
+ AT91_SAMA7G5_CHAN_SINGLE(6, 6, 0x78),
+ AT91_SAMA7G5_CHAN_SINGLE(7, 7, 0x7c),
+ AT91_SAMA7G5_CHAN_SINGLE(8, 8, 0x80),
+ AT91_SAMA7G5_CHAN_SINGLE(9, 9, 0x84),
+ AT91_SAMA7G5_CHAN_SINGLE(10, 10, 0x88),
+ AT91_SAMA7G5_CHAN_SINGLE(11, 11, 0x8c),
+ AT91_SAMA7G5_CHAN_SINGLE(12, 12, 0x90),
+ AT91_SAMA7G5_CHAN_SINGLE(13, 13, 0x94),
+ AT91_SAMA7G5_CHAN_SINGLE(14, 14, 0x98),
+ AT91_SAMA7G5_CHAN_SINGLE(15, 15, 0x9c),
+ AT91_SAMA7G5_CHAN_DIFF(16, 0, 1, 0x60),
+ AT91_SAMA7G5_CHAN_DIFF(17, 2, 3, 0x68),
+ AT91_SAMA7G5_CHAN_DIFF(18, 4, 5, 0x70),
+ AT91_SAMA7G5_CHAN_DIFF(19, 6, 7, 0x78),
+ AT91_SAMA7G5_CHAN_DIFF(20, 8, 9, 0x80),
+ AT91_SAMA7G5_CHAN_DIFF(21, 10, 11, 0x88),
+ AT91_SAMA7G5_CHAN_DIFF(22, 12, 13, 0x90),
+ AT91_SAMA7G5_CHAN_DIFF(23, 14, 15, 0x98),
IIO_CHAN_SOFT_TIMESTAMP(24),
AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc),
};
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index a3f0a2321666..5927756b749a 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -979,7 +979,7 @@ static int at91_ts_register(struct iio_dev *idev,
return ret;
err:
- input_free_device(st->ts_input);
+ input_free_device(input);
return ret;
}
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 30328626d9be..221a5fdc1eaa 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -66,8 +66,6 @@ struct dln2_adc {
/* Demux table */
unsigned int demux_count;
struct dln2_adc_demux_table demux[DLN2_ADC_MAX_CHANNELS];
- /* Precomputed timestamp padding offset and length */
- unsigned int ts_pad_offset, ts_pad_length;
};
struct dln2_adc_port_chan {
@@ -111,8 +109,6 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2)
if (iio_get_masklength(indio_dev) &&
(*indio_dev->active_scan_mask & 0xff) == 0xff) {
dln2_adc_add_demux(dln2, 0, 0, 16);
- dln2->ts_pad_offset = 0;
- dln2->ts_pad_length = 0;
return;
}
@@ -127,16 +123,6 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2)
out_loc += 2;
in_loc += 2;
}
-
- if (indio_dev->scan_timestamp) {
- size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
-
- dln2->ts_pad_offset = out_loc;
- dln2->ts_pad_length = ts_offset * sizeof(int64_t) - out_loc;
- } else {
- dln2->ts_pad_offset = 0;
- dln2->ts_pad_length = 0;
- }
}
static int dln2_adc_get_chan_count(struct dln2_adc *dln2)
@@ -494,6 +480,8 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
if (ret < 0)
goto done;
+ memset(&data, 0, sizeof(data));
+
/* Demux operation */
for (i = 0; i < dln2->demux_count; ++i) {
t = &dln2->demux[i];
@@ -501,11 +489,6 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
(void *)dev_data.values + t->from, t->length);
}
- /* Zero padding space between values and timestamp */
- if (dln2->ts_pad_length)
- memset((void *)data.values + dln2->ts_pad_offset,
- 0, dln2->ts_pad_length);
-
iio_push_to_buffers_with_timestamp(indio_dev, &data,
iio_get_time_ns(indio_dev));
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 48c95e12e791..40d14faa71c5 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -150,7 +150,7 @@ struct ina2xx_chip_info {
/* data buffer needs space for channel data and timestamp */
struct {
u16 chan[4];
- u64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 3d0a7d0eb7ee..565ca2e21c0c 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -39,7 +39,7 @@ struct max1118 {
/* Ensure natural alignment of buffer elements */
struct {
u8 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
u8 data __aligned(IIO_DMA_MINALIGN);
diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c
index f0dc4b460903..76abafd47404 100644
--- a/drivers/iio/adc/max11410.c
+++ b/drivers/iio/adc/max11410.c
@@ -143,7 +143,7 @@ struct max11410_state {
int irq;
struct {
u32 data __aligned(IIO_DMA_MINALIGN);
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 9a0baea08ab6..e8d731bc34e0 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -161,6 +161,7 @@ struct max1363_chip_info {
* @vref_uv: Actual (external or internal) reference voltage
* @send: function used to send data to the chip
* @recv: function used to receive data from the chip
+ * @data: buffer to store channel data and timestamp
*/
struct max1363_state {
struct i2c_client *client;
@@ -186,6 +187,10 @@ struct max1363_state {
const char *buf, int count);
int (*recv)(const struct i2c_client *client,
char *buf, int count);
+ struct {
+ u8 buf[MAX1363_MAX_CHANNELS * 2];
+ aligned_s64 ts;
+ } data;
};
#define MAX1363_MODE_SINGLE(_num, _mask) { \
@@ -1462,22 +1467,10 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct max1363_state *st = iio_priv(indio_dev);
- __u8 *rxbuf;
int b_sent;
- size_t d_size;
unsigned long numvals = bitmap_weight(st->current_mode->modemask,
MAX1363_MAX_CHANNELS);
- /* Ensure the timestamp is 8 byte aligned */
- if (st->chip_info->bits != 8)
- d_size = numvals*2;
- else
- d_size = numvals;
- if (indio_dev->scan_timestamp) {
- d_size += sizeof(s64);
- if (d_size % sizeof(s64))
- d_size += sizeof(s64) - (d_size % sizeof(s64));
- }
/* Monitor mode prevents reading. Whilst not currently implemented
* might as well have this test in here in the meantime as it does
* no harm.
@@ -1485,21 +1478,16 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
if (numvals == 0)
goto done;
- rxbuf = kmalloc(d_size, GFP_KERNEL);
- if (rxbuf == NULL)
- goto done;
if (st->chip_info->bits != 8)
- b_sent = st->recv(st->client, rxbuf, numvals * 2);
+ b_sent = st->recv(st->client, st->data.buf, numvals * 2);
else
- b_sent = st->recv(st->client, rxbuf, numvals);
+ b_sent = st->recv(st->client, st->data.buf, numvals);
if (b_sent < 0)
- goto done_free;
+ goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, rxbuf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->data,
iio_get_time_ns(indio_dev));
-done_free:
- kfree(rxbuf);
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index b097f04172c8..6748b44d568d 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -122,7 +122,7 @@ struct mcp3911 {
const struct mcp3911_chip_info *chip;
struct {
u32 channels[MCP39XX_MAX_NUM_CHANNELS];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
u8 tx_buf __aligned(IIO_DMA_MINALIGN);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 2d475b43e717..997def4a4d2f 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -155,10 +155,10 @@
*/
#define MESON_SAR_ADC_REG11 0x2c
#define MESON_SAR_ADC_REG11_BANDGAP_EN BIT(13)
- #define MESON_SAR_ADC_REG11_CMV_SEL BIT(6)
- #define MESON_SAR_ADC_REG11_VREF_VOLTAGE BIT(5)
- #define MESON_SAR_ADC_REG11_EOC BIT(1)
- #define MESON_SAR_ADC_REG11_VREF_SEL BIT(0)
+ #define MESON_SAR_ADC_REG11_CMV_SEL BIT(6)
+ #define MESON_SAR_ADC_REG11_VREF_VOLTAGE BIT(5)
+ #define MESON_SAR_ADC_REG11_EOC BIT(1)
+ #define MESON_SAR_ADC_REG11_VREF_SEL BIT(0)
#define MESON_SAR_ADC_REG13 0x34
#define MESON_SAR_ADC_REG13_12BIT_CALIBRATION_MASK GENMASK(13, 8)
@@ -315,19 +315,17 @@ static const struct iio_chan_spec meson_sar_adc_and_temp_iio_channels[] = {
struct meson_sar_adc_param {
bool has_bl30_integration;
unsigned long clock_rate;
- u32 bandgap_reg;
unsigned int resolution;
const struct regmap_config *regmap_config;
u8 temperature_trimming_bits;
unsigned int temperature_multiplier;
unsigned int temperature_divider;
u8 disable_ring_counter;
- bool has_reg11;
bool has_vref_select;
u8 vref_select;
u8 cmv_select;
u8 adc_eoc;
- enum meson_sar_adc_vref_sel vref_volatge;
+ enum meson_sar_adc_vref_sel vref_voltage;
};
struct meson_sar_adc_data {
@@ -976,7 +974,7 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
MESON_SAR_ADC_REG3_CTRL_CONT_RING_COUNTER_EN,
regval);
- if (priv->param->has_reg11) {
+ if (priv->param->regmap_config->max_register >= MESON_SAR_ADC_REG11) {
regval = FIELD_PREP(MESON_SAR_ADC_REG11_EOC, priv->param->adc_eoc);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
MESON_SAR_ADC_REG11_EOC, regval);
@@ -989,7 +987,7 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
}
regval = FIELD_PREP(MESON_SAR_ADC_REG11_VREF_VOLTAGE,
- priv->param->vref_volatge);
+ priv->param->vref_voltage);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
MESON_SAR_ADC_REG11_VREF_VOLTAGE, regval);
@@ -1013,16 +1011,15 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- const struct meson_sar_adc_param *param = priv->param;
- u32 enable_mask;
- if (param->bandgap_reg == MESON_SAR_ADC_REG11)
- enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
+ if (priv->param->regmap_config->max_register >= MESON_SAR_ADC_REG11)
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
+ MESON_SAR_ADC_REG11_BANDGAP_EN,
+ on_off ? MESON_SAR_ADC_REG11_BANDGAP_EN : 0);
else
- enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
-
- regmap_update_bits(priv->regmap, param->bandgap_reg, enable_mask,
- on_off ? enable_mask : 0);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
+ MESON_SAR_ADC_DELTA_10_TS_VBG_EN,
+ on_off ? MESON_SAR_ADC_DELTA_10_TS_VBG_EN : 0);
}
static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
@@ -1186,7 +1183,6 @@ static const struct iio_info meson_sar_adc_iio_info = {
static const struct meson_sar_adc_param meson_sar_adc_meson8_param = {
.has_bl30_integration = false,
.clock_rate = 1150000,
- .bandgap_reg = MESON_SAR_ADC_DELTA_10,
.regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
.temperature_trimming_bits = 4,
@@ -1197,7 +1193,6 @@ static const struct meson_sar_adc_param meson_sar_adc_meson8_param = {
static const struct meson_sar_adc_param meson_sar_adc_meson8b_param = {
.has_bl30_integration = false,
.clock_rate = 1150000,
- .bandgap_reg = MESON_SAR_ADC_DELTA_10,
.regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
.temperature_trimming_bits = 5,
@@ -1208,35 +1203,29 @@ static const struct meson_sar_adc_param meson_sar_adc_meson8b_param = {
static const struct meson_sar_adc_param meson_sar_adc_gxbb_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
- .bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 10,
- .has_reg11 = true,
- .vref_volatge = 1,
+ .vref_voltage = 1,
.cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
- .bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.disable_ring_counter = 1,
- .has_reg11 = true,
- .vref_volatge = 1,
+ .vref_voltage = 1,
.cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_axg_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
- .bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.disable_ring_counter = 1,
- .has_reg11 = true,
- .vref_volatge = 1,
+ .vref_voltage = 1,
.has_vref_select = true,
.vref_select = VREF_VDDA,
.cmv_select = 1,
@@ -1245,11 +1234,9 @@ static const struct meson_sar_adc_param meson_sar_adc_axg_param = {
static const struct meson_sar_adc_param meson_sar_adc_g12a_param = {
.has_bl30_integration = false,
.clock_rate = 1200000,
- .bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.disable_ring_counter = 1,
- .has_reg11 = true,
.adc_eoc = 1,
.has_vref_select = true,
.vref_select = VREF_VDDA,
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
index b0f6727cfe38..63f518215156 100644
--- a/drivers/iio/adc/pac1921.c
+++ b/drivers/iio/adc/pac1921.c
@@ -12,6 +12,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/limits.h>
#include <linux/regmap.h>
#include <linux/units.h>
@@ -67,6 +68,14 @@ enum pac1921_mxsl {
#define PAC1921_DEFAULT_DI_GAIN 0 /* 2^(value): 1x gain (HW default) */
#define PAC1921_DEFAULT_NUM_SAMPLES 0 /* 2^(value): 1 sample (HW default) */
+#define PAC1921_ACPI_GET_uOHMS_VALS 0
+#define PAC1921_ACPI_GET_LABEL 1
+
+/* f7bb9932-86ee-4516-a236-7a7a742e55cb */
+static const guid_t pac1921_guid =
+ GUID_INIT(0xf7bb9932, 0x86ee, 0x4516, 0xa2,
+ 0x36, 0x7a, 0x7a, 0x74, 0x2e, 0x55, 0xcb);
+
/*
* Pre-computed scale factors for BUS voltage
* format: IIO_VAL_INT_PLUS_NANO
@@ -200,7 +209,7 @@ struct pac1921_priv {
struct {
u16 chan[PAC1921_NUM_MEAS_CHANS];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
@@ -782,7 +791,7 @@ static ssize_t pac1921_write_shunt_resistor(struct iio_dev *indio_dev,
const char *buf, size_t len)
{
struct pac1921_priv *priv = iio_priv(indio_dev);
- u64 rshunt_uohm;
+ u32 rshunt_uohm;
int val, val_fract;
int ret;
@@ -793,10 +802,17 @@ static ssize_t pac1921_write_shunt_resistor(struct iio_dev *indio_dev,
if (ret)
return ret;
- rshunt_uohm = val * MICRO + val_fract;
- if (rshunt_uohm == 0 || rshunt_uohm > INT_MAX)
+ /*
+ * This check validates the shunt is not zero and does not surpass
+ * INT_MAX. The check is done before calculating in order to avoid
+ * val * MICRO overflowing.
+ */
+ if ((!val && !val_fract) || val > INT_MAX / MICRO ||
+ (val == INT_MAX / MICRO && val_fract > INT_MAX % MICRO))
return -EINVAL;
+ rshunt_uohm = val * MICRO + val_fract;
+
guard(mutex)(&priv->lock);
priv->rshunt_uohm = rshunt_uohm;
@@ -1151,6 +1167,61 @@ static void pac1921_regulator_disable(void *data)
regulator_disable(regulator);
}
+/*
+ * Documentation related to the ACPI device definition
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC193X-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
+ */
+static int pac1921_match_acpi_device(struct iio_dev *indio_dev)
+{
+ acpi_handle handle;
+ union acpi_object *status;
+ char *label;
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ struct device *dev = &priv->client->dev;
+
+ handle = ACPI_HANDLE(dev);
+
+ status = acpi_evaluate_dsm(handle, &pac1921_guid, 1,
+ PAC1921_ACPI_GET_uOHMS_VALS, NULL);
+ if (!status)
+ return dev_err_probe(dev, -EINVAL,
+ "Could not read shunt from ACPI table\n");
+
+ priv->rshunt_uohm = status->package.elements[0].integer.value;
+ ACPI_FREE(status);
+
+ status = acpi_evaluate_dsm(handle, &pac1921_guid, 1,
+ PAC1921_ACPI_GET_LABEL, NULL);
+ if (!status)
+ return dev_err_probe(dev, -EINVAL,
+ "Could not read label from ACPI table\n");
+
+ label = devm_kstrdup(dev, status->package.elements[0].string.pointer,
+ GFP_KERNEL);
+ ACPI_FREE(status);
+ if (!label)
+ return -ENOMEM;
+
+ indio_dev->label = label;
+
+ return 0;
+}
+
+static int pac1921_parse_of_fw(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ struct device *dev = &priv->client->dev;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &priv->rshunt_uohm);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot read shunt resistor property\n");
+
+ return 0;
+}
+
static int pac1921_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1179,11 +1250,14 @@ static int pac1921_probe(struct i2c_client *client)
priv->di_gain = PAC1921_DEFAULT_DI_GAIN;
priv->n_samples = PAC1921_DEFAULT_NUM_SAMPLES;
- ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms",
- &priv->rshunt_uohm);
+ if (is_acpi_device_node(dev->fwnode))
+ ret = pac1921_match_acpi_device(indio_dev);
+ else
+ ret = pac1921_parse_of_fw(indio_dev);
if (ret)
return dev_err_probe(dev, ret,
- "Cannot read shunt resistor property\n");
+ "Parameter parsing error\n");
+
if (priv->rshunt_uohm == 0 || priv->rshunt_uohm > INT_MAX)
return dev_err_probe(dev, -EINVAL,
"Invalid shunt resistor: %u\n",
@@ -1246,11 +1320,18 @@ static const struct of_device_id pac1921_of_match[] = {
};
MODULE_DEVICE_TABLE(of, pac1921_of_match);
+static const struct acpi_device_id pac1921_acpi_match[] = {
+ { "MCHP1921" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pac1921_acpi_match);
+
static struct i2c_driver pac1921_driver = {
.driver = {
.name = "pac1921",
.pm = pm_sleep_ptr(&pac1921_pm_ops),
.of_match_table = pac1921_of_match,
+ .acpi_match_table = pac1921_acpi_match,
},
.probe = pac1921_probe,
.id_table = pac1921_id,
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 240cfa391674..a29e54754c8f 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -363,11 +363,13 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p)
*/
struct {
u16 values[SARADC_MAX_CHANNELS];
- int64_t timestamp;
+ aligned_s64 timestamp;
} data;
int ret;
int i, j = 0;
+ memset(&data, 0, sizeof(data));
+
mutex_lock(&info->lock);
iio_for_each_active_channel(i_dev, i) {
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index 56ed948a8ae1..337bc8b31b2c 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -634,7 +634,7 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
struct device *dev = priv->dev;
struct {
u16 vals[RTQ6056_MAX_CHANNEL];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} data;
unsigned int raw;
int i = 0, bit, ret;
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index cd3a7e46ea53..883c167c0670 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -8,12 +8,13 @@
*/
#include <linux/bitfield.h>
-#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -32,20 +33,15 @@
#define RZG2L_ADM1_MS BIT(2)
#define RZG2L_ADM1_BS BIT(4)
#define RZG2L_ADM1_EGA_MASK GENMASK(13, 12)
-#define RZG2L_ADM2_CHSEL_MASK GENMASK(7, 0)
#define RZG2L_ADM3_ADIL_MASK GENMASK(31, 24)
#define RZG2L_ADM3_ADCMP_MASK GENMASK(23, 16)
-#define RZG2L_ADM3_ADCMP_E FIELD_PREP(RZG2L_ADM3_ADCMP_MASK, 0xe)
-#define RZG2L_ADM3_ADSMP_MASK GENMASK(15, 0)
#define RZG2L_ADINT 0x20
-#define RZG2L_ADINT_INTEN_MASK GENMASK(7, 0)
#define RZG2L_ADINT_CSEEN BIT(16)
#define RZG2L_ADINT_INTS BIT(31)
#define RZG2L_ADSTS 0x24
#define RZG2L_ADSTS_CSEST BIT(16)
-#define RZG2L_ADSTS_INTST_MASK GENMASK(7, 0)
#define RZG2L_ADIVC 0x28
#define RZG2L_ADIVC_DIVADC_MASK GENMASK(8, 0)
@@ -56,12 +52,28 @@
#define RZG2L_ADCR(n) (0x30 + ((n) * 0x4))
#define RZG2L_ADCR_AD_MASK GENMASK(11, 0)
-#define RZG2L_ADSMP_DEFAULT_SAMPLING 0x578
-
-#define RZG2L_ADC_MAX_CHANNELS 8
-#define RZG2L_ADC_CHN_MASK 0x7
+#define RZG2L_ADC_MAX_CHANNELS 9
#define RZG2L_ADC_TIMEOUT usecs_to_jiffies(1 * 4)
+/**
+ * struct rzg2l_adc_hw_params - ADC hardware specific parameters
+ * @default_adsmp: default ADC sampling period (see ADM3 register); index 0 is
+ * used for voltage channels, index 1 is used for temperature channel
+ * @adsmp_mask: ADC sampling period mask (see ADM3 register)
+ * @adint_inten_mask: conversion end interrupt mask (see ADINT register)
+ * @default_adcmp: default ADC cmp (see ADM3 register)
+ * @num_channels: number of supported channels
+ * @adivc: specifies if ADVIC register is available
+ */
+struct rzg2l_adc_hw_params {
+ u16 default_adsmp[2];
+ u16 adsmp_mask;
+ u16 adint_inten_mask;
+ u8 default_adcmp;
+ u8 num_channels;
+ bool adivc;
+};
+
struct rzg2l_adc_data {
const struct iio_chan_spec *channels;
u8 num_channels;
@@ -69,25 +81,36 @@ struct rzg2l_adc_data {
struct rzg2l_adc {
void __iomem *base;
- struct clk *pclk;
- struct clk *adclk;
struct reset_control *presetn;
struct reset_control *adrstn;
- struct completion completion;
const struct rzg2l_adc_data *data;
+ const struct rzg2l_adc_hw_params *hw_params;
+ struct completion completion;
struct mutex lock;
u16 last_val[RZG2L_ADC_MAX_CHANNELS];
+ bool was_rpm_active;
+};
+
+/**
+ * struct rzg2l_adc_channel - ADC channel descriptor
+ * @name: ADC channel name
+ * @type: ADC channel type
+ */
+struct rzg2l_adc_channel {
+ const char * const name;
+ enum iio_chan_type type;
};
-static const char * const rzg2l_adc_channel_name[] = {
- "adc0",
- "adc1",
- "adc2",
- "adc3",
- "adc4",
- "adc5",
- "adc6",
- "adc7",
+static const struct rzg2l_adc_channel rzg2l_adc_channels[] = {
+ { "adc0", IIO_VOLTAGE },
+ { "adc1", IIO_VOLTAGE },
+ { "adc2", IIO_VOLTAGE },
+ { "adc3", IIO_VOLTAGE },
+ { "adc4", IIO_VOLTAGE },
+ { "adc5", IIO_VOLTAGE },
+ { "adc6", IIO_VOLTAGE },
+ { "adc7", IIO_VOLTAGE },
+ { "adc8", IIO_TEMP },
};
static unsigned int rzg2l_adc_readl(struct rzg2l_adc *adc, u32 reg)
@@ -115,7 +138,7 @@ static void rzg2l_adc_pwr(struct rzg2l_adc *adc, bool on)
static void rzg2l_adc_start_stop(struct rzg2l_adc *adc, bool start)
{
- int timeout = 5;
+ int ret;
u32 reg;
reg = rzg2l_adc_readl(adc, RZG2L_ADM(0));
@@ -128,15 +151,10 @@ static void rzg2l_adc_start_stop(struct rzg2l_adc *adc, bool start)
if (start)
return;
- do {
- usleep_range(100, 200);
- reg = rzg2l_adc_readl(adc, RZG2L_ADM(0));
- timeout--;
- if (!timeout) {
- pr_err("%s stopping ADC timed out\n", __func__);
- break;
- }
- } while (((reg & RZG2L_ADM0_ADBSY) || (reg & RZG2L_ADM0_ADCE)));
+ ret = read_poll_timeout(rzg2l_adc_readl, reg, !(reg & (RZG2L_ADM0_ADBSY | RZG2L_ADM0_ADCE)),
+ 200, 1000, true, adc, RZG2L_ADM(0));
+ if (ret)
+ pr_err("%s stopping ADC timed out\n", __func__);
}
static void rzg2l_set_trigger(struct rzg2l_adc *adc)
@@ -158,8 +176,18 @@ static void rzg2l_set_trigger(struct rzg2l_adc *adc)
rzg2l_adc_writel(adc, RZG2L_ADM(1), reg);
}
+static u8 rzg2l_adc_ch_to_adsmp_index(u8 ch)
+{
+ if (rzg2l_adc_channels[ch].type == IIO_VOLTAGE)
+ return 0;
+
+ return 1;
+}
+
static int rzg2l_adc_conversion_setup(struct rzg2l_adc *adc, u8 ch)
{
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
+ u8 index = rzg2l_adc_ch_to_adsmp_index(ch);
u32 reg;
if (rzg2l_adc_readl(adc, RZG2L_ADM(0)) & RZG2L_ADM0_ADBSY)
@@ -169,10 +197,15 @@ static int rzg2l_adc_conversion_setup(struct rzg2l_adc *adc, u8 ch)
/* Select analog input channel subjected to conversion. */
reg = rzg2l_adc_readl(adc, RZG2L_ADM(2));
- reg &= ~RZG2L_ADM2_CHSEL_MASK;
+ reg &= ~GENMASK(hw_params->num_channels - 1, 0);
reg |= BIT(ch);
rzg2l_adc_writel(adc, RZG2L_ADM(2), reg);
+ reg = rzg2l_adc_readl(adc, RZG2L_ADM(3));
+ reg &= ~hw_params->adsmp_mask;
+ reg |= hw_params->default_adsmp[index];
+ rzg2l_adc_writel(adc, RZG2L_ADM(3), reg);
+
/*
* Setup ADINT
* INTS[31] - Select pulse signal
@@ -181,36 +214,26 @@ static int rzg2l_adc_conversion_setup(struct rzg2l_adc *adc, u8 ch)
*/
reg = rzg2l_adc_readl(adc, RZG2L_ADINT);
reg &= ~RZG2L_ADINT_INTS;
- reg &= ~RZG2L_ADINT_INTEN_MASK;
+ reg &= ~hw_params->adint_inten_mask;
reg |= (RZG2L_ADINT_CSEEN | BIT(ch));
rzg2l_adc_writel(adc, RZG2L_ADINT, reg);
return 0;
}
-static int rzg2l_adc_set_power(struct iio_dev *indio_dev, bool on)
-{
- struct device *dev = indio_dev->dev.parent;
-
- if (on)
- return pm_runtime_resume_and_get(dev);
-
- return pm_runtime_put_sync(dev);
-}
-
static int rzg2l_adc_conversion(struct iio_dev *indio_dev, struct rzg2l_adc *adc, u8 ch)
{
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
+ struct device *dev = indio_dev->dev.parent;
int ret;
- ret = rzg2l_adc_set_power(indio_dev, true);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
ret = rzg2l_adc_conversion_setup(adc, ch);
- if (ret) {
- rzg2l_adc_set_power(indio_dev, false);
- return ret;
- }
+ if (ret)
+ goto rpm_put;
reinit_completion(&adc->completion);
@@ -218,13 +241,16 @@ static int rzg2l_adc_conversion(struct iio_dev *indio_dev, struct rzg2l_adc *adc
if (!wait_for_completion_timeout(&adc->completion, RZG2L_ADC_TIMEOUT)) {
rzg2l_adc_writel(adc, RZG2L_ADINT,
- rzg2l_adc_readl(adc, RZG2L_ADINT) & ~RZG2L_ADINT_INTEN_MASK);
- rzg2l_adc_start_stop(adc, false);
- rzg2l_adc_set_power(indio_dev, false);
- return -ETIMEDOUT;
+ rzg2l_adc_readl(adc, RZG2L_ADINT) & ~hw_params->adint_inten_mask);
+ ret = -ETIMEDOUT;
}
- return rzg2l_adc_set_power(indio_dev, false);
+ rzg2l_adc_start_stop(adc, false);
+
+rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return ret;
}
static int rzg2l_adc_read_raw(struct iio_dev *indio_dev,
@@ -233,24 +259,22 @@ static int rzg2l_adc_read_raw(struct iio_dev *indio_dev,
{
struct rzg2l_adc *adc = iio_priv(indio_dev);
int ret;
- u8 ch;
switch (mask) {
- case IIO_CHAN_INFO_RAW:
- if (chan->type != IIO_VOLTAGE)
+ case IIO_CHAN_INFO_RAW: {
+ if (chan->type != IIO_VOLTAGE && chan->type != IIO_TEMP)
return -EINVAL;
- mutex_lock(&adc->lock);
- ch = chan->channel & RZG2L_ADC_CHN_MASK;
- ret = rzg2l_adc_conversion(indio_dev, adc, ch);
- if (ret) {
- mutex_unlock(&adc->lock);
+ guard(mutex)(&adc->lock);
+
+ ret = rzg2l_adc_conversion(indio_dev, adc, chan->channel);
+ if (ret)
return ret;
- }
- *val = adc->last_val[ch];
- mutex_unlock(&adc->lock);
+
+ *val = adc->last_val[chan->channel];
return IIO_VAL_INT;
+ }
default:
return -EINVAL;
@@ -261,7 +285,7 @@ static int rzg2l_adc_read_label(struct iio_dev *iio_dev,
const struct iio_chan_spec *chan,
char *label)
{
- return sysfs_emit(label, "%s\n", rzg2l_adc_channel_name[chan->channel]);
+ return sysfs_emit(label, "%s\n", rzg2l_adc_channels[chan->channel].name);
}
static const struct iio_info rzg2l_adc_iio_info = {
@@ -272,6 +296,7 @@ static const struct iio_info rzg2l_adc_iio_info = {
static irqreturn_t rzg2l_adc_isr(int irq, void *dev_id)
{
struct rzg2l_adc *adc = dev_id;
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
unsigned long intst;
u32 reg;
int ch;
@@ -284,11 +309,11 @@ static irqreturn_t rzg2l_adc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
- intst = reg & RZG2L_ADSTS_INTST_MASK;
+ intst = reg & GENMASK(hw_params->num_channels - 1, 0);
if (!intst)
return IRQ_NONE;
- for_each_set_bit(ch, &intst, RZG2L_ADC_MAX_CHANNELS)
+ for_each_set_bit(ch, &intst, hw_params->num_channels)
adc->last_val[ch] = rzg2l_adc_readl(adc, RZG2L_ADCR(ch)) & RZG2L_ADCR_AD_MASK;
/* clear the channel interrupt */
@@ -301,6 +326,7 @@ static irqreturn_t rzg2l_adc_isr(int irq, void *dev_id)
static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l_adc *adc)
{
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
struct iio_chan_spec *chan_array;
struct rzg2l_adc_data *data;
unsigned int channel;
@@ -313,15 +339,12 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l
return -ENOMEM;
num_channels = device_get_child_node_count(&pdev->dev);
- if (!num_channels) {
- dev_err(&pdev->dev, "no channel children\n");
- return -ENODEV;
- }
+ if (!num_channels)
+ return dev_err_probe(&pdev->dev, -ENODEV, "no channel children\n");
- if (num_channels > RZG2L_ADC_MAX_CHANNELS) {
- dev_err(&pdev->dev, "num of channel children out of range\n");
- return -EINVAL;
- }
+ if (num_channels > hw_params->num_channels)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "num of channel children out of range\n");
chan_array = devm_kcalloc(&pdev->dev, num_channels, sizeof(*chan_array),
GFP_KERNEL);
@@ -334,14 +357,14 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l
if (ret)
return ret;
- if (channel >= RZG2L_ADC_MAX_CHANNELS)
+ if (channel >= hw_params->num_channels)
return -EINVAL;
- chan_array[i].type = IIO_VOLTAGE;
+ chan_array[i].type = rzg2l_adc_channels[channel].type;
chan_array[i].indexed = 1;
chan_array[i].channel = channel;
chan_array[i].info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
- chan_array[i].datasheet_name = rzg2l_adc_channel_name[channel];
+ chan_array[i].datasheet_name = rzg2l_adc_channels[channel].name;
i++;
}
@@ -352,13 +375,13 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l
return 0;
}
-static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
+static int rzg2l_adc_hw_init(struct device *dev, struct rzg2l_adc *adc)
{
- int timeout = 5;
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
u32 reg;
int ret;
- ret = clk_prepare_enable(adc->pclk);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
@@ -367,21 +390,19 @@ static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
reg |= RZG2L_ADM0_SRESB;
rzg2l_adc_writel(adc, RZG2L_ADM(0), reg);
- while (!(rzg2l_adc_readl(adc, RZG2L_ADM(0)) & RZG2L_ADM0_SRESB)) {
- if (!timeout) {
- ret = -EBUSY;
- goto exit_hw_init;
- }
- timeout--;
- usleep_range(100, 200);
+ ret = read_poll_timeout(rzg2l_adc_readl, reg, reg & RZG2L_ADM0_SRESB,
+ 200, 1000, false, adc, RZG2L_ADM(0));
+ if (ret)
+ goto exit_hw_init;
+
+ if (hw_params->adivc) {
+ /* Only division by 4 can be set */
+ reg = rzg2l_adc_readl(adc, RZG2L_ADIVC);
+ reg &= ~RZG2L_ADIVC_DIVADC_MASK;
+ reg |= RZG2L_ADIVC_DIVADC_4;
+ rzg2l_adc_writel(adc, RZG2L_ADIVC, reg);
}
- /* Only division by 4 can be set */
- reg = rzg2l_adc_readl(adc, RZG2L_ADIVC);
- reg &= ~RZG2L_ADIVC_DIVADC_MASK;
- reg |= RZG2L_ADIVC_DIVADC_4;
- rzg2l_adc_writel(adc, RZG2L_ADIVC, reg);
-
/*
* Setup AMD3
* ADIL[31:24] - Should be always set to 0
@@ -391,35 +412,18 @@ static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
reg = rzg2l_adc_readl(adc, RZG2L_ADM(3));
reg &= ~RZG2L_ADM3_ADIL_MASK;
reg &= ~RZG2L_ADM3_ADCMP_MASK;
- reg &= ~RZG2L_ADM3_ADSMP_MASK;
- reg |= (RZG2L_ADM3_ADCMP_E | RZG2L_ADSMP_DEFAULT_SAMPLING);
+ reg &= ~hw_params->adsmp_mask;
+ reg |= FIELD_PREP(RZG2L_ADM3_ADCMP_MASK, hw_params->default_adcmp) |
+ hw_params->default_adsmp[0];
+
rzg2l_adc_writel(adc, RZG2L_ADM(3), reg);
exit_hw_init:
- clk_disable_unprepare(adc->pclk);
-
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
-static void rzg2l_adc_pm_runtime_disable(void *data)
-{
- struct device *dev = data;
-
- pm_runtime_disable(dev->parent);
-}
-
-static void rzg2l_adc_pm_runtime_set_suspended(void *data)
-{
- struct device *dev = data;
-
- pm_runtime_set_suspended(dev->parent);
-}
-
-static void rzg2l_adc_reset_assert(void *data)
-{
- reset_control_assert(data);
-}
-
static int rzg2l_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -434,6 +438,10 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
adc = iio_priv(indio_dev);
+ adc->hw_params = device_get_match_data(dev);
+ if (!adc->hw_params || adc->hw_params->num_channels > RZG2L_ADC_MAX_CHANNELS)
+ return -EINVAL;
+
ret = rzg2l_adc_parse_properties(pdev, adc);
if (ret)
return ret;
@@ -444,63 +452,28 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
- adc->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(adc->pclk)) {
- dev_err(dev, "Failed to get pclk");
- return PTR_ERR(adc->pclk);
- }
+ adc->adrstn = devm_reset_control_get_exclusive_deasserted(dev, "adrst-n");
+ if (IS_ERR(adc->adrstn))
+ return dev_err_probe(dev, PTR_ERR(adc->adrstn),
+ "failed to get/deassert adrst-n\n");
- adc->adclk = devm_clk_get(dev, "adclk");
- if (IS_ERR(adc->adclk)) {
- dev_err(dev, "Failed to get adclk");
- return PTR_ERR(adc->adclk);
- }
+ adc->presetn = devm_reset_control_get_exclusive_deasserted(dev, "presetn");
+ if (IS_ERR(adc->presetn))
+ return dev_err_probe(dev, PTR_ERR(adc->presetn),
+ "failed to get/deassert presetn\n");
- adc->adrstn = devm_reset_control_get_exclusive(dev, "adrst-n");
- if (IS_ERR(adc->adrstn)) {
- dev_err(dev, "failed to get adrstn\n");
- return PTR_ERR(adc->adrstn);
- }
-
- adc->presetn = devm_reset_control_get_exclusive(dev, "presetn");
- if (IS_ERR(adc->presetn)) {
- dev_err(dev, "failed to get presetn\n");
- return PTR_ERR(adc->presetn);
- }
-
- ret = reset_control_deassert(adc->adrstn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert adrstn pin, %d\n", ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_adc_reset_assert, adc->adrstn);
- if (ret) {
- dev_err(&pdev->dev, "failed to register adrstn assert devm action, %d\n",
- ret);
+ pm_runtime_set_autosuspend_delay(dev, 300);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
return ret;
- }
- ret = reset_control_deassert(adc->presetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert presetn pin, %d\n", ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_adc_reset_assert, adc->presetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to register presetn assert devm action, %d\n",
- ret);
- return ret;
- }
+ platform_set_drvdata(pdev, indio_dev);
- ret = rzg2l_adc_hw_init(adc);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize ADC HW, %d\n", ret);
- return ret;
- }
+ ret = rzg2l_adc_hw_init(dev, adc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to initialize ADC HW\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -513,72 +486,130 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
init_completion(&adc->completion);
- platform_set_drvdata(pdev, indio_dev);
-
indio_dev->name = DRIVER_NAME;
indio_dev->info = &rzg2l_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = adc->data->channels;
indio_dev->num_channels = adc->data->num_channels;
- pm_runtime_set_suspended(dev);
- ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_adc_pm_runtime_set_suspended, &indio_dev->dev);
- if (ret)
- return ret;
-
- pm_runtime_enable(dev);
- ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_adc_pm_runtime_disable, &indio_dev->dev);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
+static const struct rzg2l_adc_hw_params rzg2l_hw_params = {
+ .num_channels = 8,
+ .default_adcmp = 0xe,
+ .default_adsmp = { 0x578 },
+ .adsmp_mask = GENMASK(15, 0),
+ .adint_inten_mask = GENMASK(7, 0),
+ .adivc = true
+};
+
+static const struct rzg2l_adc_hw_params rzg3s_hw_params = {
+ .num_channels = 9,
+ .default_adcmp = 0x1d,
+ .default_adsmp = { 0x7f, 0xff },
+ .adsmp_mask = GENMASK(7, 0),
+ .adint_inten_mask = GENMASK(11, 0),
+};
+
static const struct of_device_id rzg2l_adc_match[] = {
- { .compatible = "renesas,rzg2l-adc",},
+ { .compatible = "renesas,r9a08g045-adc", .data = &rzg3s_hw_params },
+ { .compatible = "renesas,rzg2l-adc", .data = &rzg2l_hw_params },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_adc_match);
-static int __maybe_unused rzg2l_adc_pm_runtime_suspend(struct device *dev)
+static int rzg2l_adc_pm_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct rzg2l_adc *adc = iio_priv(indio_dev);
rzg2l_adc_pwr(adc, false);
- clk_disable_unprepare(adc->adclk);
- clk_disable_unprepare(adc->pclk);
return 0;
}
-static int __maybe_unused rzg2l_adc_pm_runtime_resume(struct device *dev)
+static int rzg2l_adc_pm_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct rzg2l_adc *adc = iio_priv(indio_dev);
+
+ rzg2l_adc_pwr(adc, true);
+
+ return 0;
+}
+
+static int rzg2l_adc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct rzg2l_adc *adc = iio_priv(indio_dev);
+ struct reset_control_bulk_data resets[] = {
+ { .rstc = adc->presetn },
+ { .rstc = adc->adrstn },
+ };
int ret;
- ret = clk_prepare_enable(adc->pclk);
+ if (pm_runtime_suspended(dev)) {
+ adc->was_rpm_active = false;
+ } else {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+ adc->was_rpm_active = true;
+ }
+
+ ret = reset_control_bulk_assert(ARRAY_SIZE(resets), resets);
if (ret)
- return ret;
+ goto rpm_restore;
+
+ return 0;
+
+rpm_restore:
+ if (adc->was_rpm_active)
+ pm_runtime_force_resume(dev);
+
+ return ret;
+}
+
+static int rzg2l_adc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct rzg2l_adc *adc = iio_priv(indio_dev);
+ struct reset_control_bulk_data resets[] = {
+ { .rstc = adc->adrstn },
+ { .rstc = adc->presetn },
+ };
+ int ret;
- ret = clk_prepare_enable(adc->adclk);
- if (ret) {
- clk_disable_unprepare(adc->pclk);
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(resets), resets);
+ if (ret)
return ret;
+
+ if (adc->was_rpm_active) {
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ goto resets_restore;
}
- rzg2l_adc_pwr(adc, true);
+ ret = rzg2l_adc_hw_init(dev, adc);
+ if (ret)
+ goto rpm_restore;
return 0;
+
+rpm_restore:
+ if (adc->was_rpm_active) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+resets_restore:
+ reset_control_bulk_assert(ARRAY_SIZE(resets), resets);
+ return ret;
}
static const struct dev_pm_ops rzg2l_adc_pm_ops = {
- SET_RUNTIME_PM_OPS(rzg2l_adc_pm_runtime_suspend,
- rzg2l_adc_pm_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(rzg2l_adc_pm_runtime_suspend, rzg2l_adc_pm_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(rzg2l_adc_suspend, rzg2l_adc_resume)
};
static struct platform_driver rzg2l_adc_driver = {
@@ -586,7 +617,7 @@ static struct platform_driver rzg2l_adc_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = rzg2l_adc_match,
- .pm = &rzg2l_adc_pm_ops,
+ .pm = pm_ptr(&rzg2l_adc_pm_ops),
},
};
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 1f9eca2fb2bf..fe11b0d8eab3 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -691,11 +691,14 @@ static int stm32_dfsdm_generic_channel_parse_of(struct stm32_dfsdm *dfsdm,
return -EINVAL;
}
- ret = fwnode_property_read_string(node, "label", &ch->datasheet_name);
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- " Error parsing 'label' for idx %d\n", ch->channel);
- return ret;
+ if (fwnode_property_present(node, "label")) {
+ /* label is optional */
+ ret = fwnode_property_read_string(node, "label", &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'label' for idx %d\n", ch->channel);
+ return ret;
+ }
}
df_ch = &dfsdm->ch_list[ch->channel];
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 6c2cb3dabbbf..1af9be071d8d 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -37,7 +37,7 @@ struct adc081c {
/* Ensure natural alignment of buffer elements */
struct {
u16 channel;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index bf98f9bf942a..da16876c32ae 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -29,7 +29,7 @@ struct adc084s021 {
/* Buffer used to align data */
struct {
__be16 channels[4];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
/*
* DMA (thus cache coherency maintenance) may require the
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 47fe8e16aee4..4355726b373a 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -448,7 +448,7 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
/* Ensure natural alignment of timestamp */
struct {
s16 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
int chan, ret, res;
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index e9d9d4d46d38..de019b3faa48 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -500,12 +500,14 @@ static irqreturn_t ads1119_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct ads1119_state *st = iio_priv(indio_dev);
struct {
- unsigned int sample;
- s64 timestamp __aligned(8);
+ s16 sample;
+ aligned_s64 timestamp;
} scan;
unsigned int index;
int ret;
+ memset(&scan, 0, sizeof(scan));
+
if (!iio_trigger_using_own(indio_dev)) {
index = find_first_bit(indio_dev->active_scan_mask,
iio_get_masklength(indio_dev));
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 425b48d8986f..f452f57f11c9 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -183,9 +183,9 @@ static int ads124s_reset(struct iio_dev *indio_dev)
struct ads124s_private *priv = iio_priv(indio_dev);
if (priv->reset_gpio) {
- gpiod_set_value(priv->reset_gpio, 0);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
udelay(200);
- gpiod_set_value(priv->reset_gpio, 1);
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
} else {
return ads124s_write_cmd(indio_dev, ADS124S08_CMD_RESET);
}
diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
index 36d43495f603..03f762415fa5 100644
--- a/drivers/iio/adc/ti-ads1298.c
+++ b/drivers/iio/adc/ti-ads1298.c
@@ -613,6 +613,8 @@ static int ads1298_init(struct iio_dev *indio_dev)
}
indio_dev->name = devm_kasprintf(dev, GFP_KERNEL, "ads129%u%s",
indio_dev->num_channels, suffix);
+ if (!indio_dev->name)
+ return -ENOMEM;
/* Enable internal test signal, double amplitude, double frequency */
ret = regmap_write(priv->regmap, ADS1298_REG_CONFIG2,
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 31f1f229d97a..91a79ebc4bde 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -102,7 +102,7 @@ struct ads131e08_state {
struct completion completion;
struct {
u8 data[ADS131E08_NUM_DATA_BYTES_MAX];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} tmp_buf;
u8 tx_buf[3] __aligned(IIO_DMA_MINALIGN);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 9b1814f1965a..a31658b760a4 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -381,7 +381,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
/* Ensure naturally aligned timestamp */
- u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
+ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8) = { };
int i, j = 0;
iio_for_each_active_channel(indio_dev, i) {
diff --git a/drivers/iio/adc/ti-lmp92064.c b/drivers/iio/adc/ti-lmp92064.c
index 169e3591320b..1e4a78677fe5 100644
--- a/drivers/iio/adc/ti-lmp92064.c
+++ b/drivers/iio/adc/ti-lmp92064.c
@@ -199,7 +199,7 @@ static irqreturn_t lmp92064_trigger_handler(int irq, void *p)
struct lmp92064_adc_priv *priv = iio_priv(indio_dev);
struct {
u16 values[2];
- int64_t timestamp __aligned(8);
+ aligned_s64 timestamp;
} data;
int ret;
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index b56f2503f14c..49560059f4b7 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -157,7 +157,7 @@ struct tsc2046_adc_priv {
/* Scan data for each channel */
u16 data[TI_TSC2046_MAX_CHAN];
/* Timestamp */
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan_buf;
/*
@@ -812,9 +812,7 @@ static int tsc2046_adc_probe(struct spi_device *spi)
spin_lock_init(&priv->state_lock);
priv->state = TSC2046_STATE_SHUTDOWN;
- hrtimer_init(&priv->trig_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
- priv->trig_timer.function = tsc2046_adc_timer;
+ hrtimer_setup(&priv->trig_timer, tsc2046_adc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ret = devm_iio_trigger_register(dev, trig);
if (ret) {
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 4d83c12975c5..513365d42aa5 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -173,10 +173,14 @@ struct vf610_adc {
/* Ensure the timestamp is naturally aligned */
struct {
u16 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
+struct vf610_chip_info {
+ u8 num_channels;
+};
+
static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
static const u32 vf610_lst_adder[] = { 3, 5, 7, 9, 13, 17, 21, 25 };
@@ -808,14 +812,31 @@ static const struct iio_info vf610_adc_iio_info = {
.attrs = &vf610_attribute_group,
};
+static const struct vf610_chip_info vf610_chip_info = {
+ .num_channels = ARRAY_SIZE(vf610_adc_iio_channels),
+};
+
+static const struct vf610_chip_info imx6sx_chip_info = {
+ .num_channels = 4,
+};
+
static const struct of_device_id vf610_adc_match[] = {
- { .compatible = "fsl,vf610-adc", },
+ { .compatible = "fsl,imx6sx-adc", .data = &imx6sx_chip_info},
+ { .compatible = "fsl,vf610-adc", .data = &vf610_chip_info},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, vf610_adc_match);
+static void vf610_adc_action_remove(void *d)
+{
+ struct vf610_adc *info = d;
+
+ regulator_disable(info->vref);
+}
+
static int vf610_adc_probe(struct platform_device *pdev)
{
+ const struct vf610_chip_info *chip_info;
struct device *dev = &pdev->dev;
struct vf610_adc *info;
struct iio_dev *indio_dev;
@@ -823,10 +844,8 @@ static int vf610_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct vf610_adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "Failed allocating iio device\n");
- return -ENOMEM;
- }
+ if (!indio_dev)
+ return dev_err_probe(&pdev->dev, -ENOMEM, "Failed allocating iio device\n");
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
@@ -835,6 +854,8 @@ static int vf610_adc_probe(struct platform_device *pdev)
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
+ chip_info = device_get_match_data(dev);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -842,17 +863,12 @@ static int vf610_adc_probe(struct platform_device *pdev)
ret = devm_request_irq(info->dev, irq,
vf610_adc_isr, 0,
dev_name(&pdev->dev), indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed requesting irq, irq = %d\n", irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed requesting irq, irq = %d\n", irq);
- info->clk = devm_clk_get(&pdev->dev, "adc");
- if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed getting clock, err = %ld\n",
- PTR_ERR(info->clk));
- return PTR_ERR(info->clk);
- }
+ info->clk = devm_clk_get_enabled(&pdev->dev, "adc");
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), "failed getting clock\n");
info->vref = devm_regulator_get(&pdev->dev, "vref");
if (IS_ERR(info->vref))
@@ -862,6 +878,10 @@ static int vf610_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, vf610_adc_action_remove, info);
+ if (ret)
+ return ret;
+
info->vref_uv = regulator_get_voltage(info->vref);
device_property_read_u32_array(dev, "fsl,adck-max-frequency", info->max_adck_rate, 3);
@@ -877,54 +897,23 @@ static int vf610_adc_probe(struct platform_device *pdev)
indio_dev->info = &vf610_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = vf610_adc_iio_channels;
- indio_dev->num_channels = ARRAY_SIZE(vf610_adc_iio_channels);
-
- ret = clk_prepare_enable(info->clk);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not prepare or enable the clock.\n");
- goto error_adc_clk_enable;
- }
+ indio_dev->num_channels = chip_info->num_channels;
vf610_adc_cfg_init(info);
vf610_adc_hw_init(info);
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, &iio_triggered_buffer_setup_ops);
- if (ret < 0) {
- dev_err(&pdev->dev, "Couldn't initialise the buffer\n");
- goto error_iio_device_register;
- }
+ ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev, &iio_pollfunc_store_time,
+ NULL, &iio_triggered_buffer_setup_ops);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Couldn't initialise the buffer\n");
mutex_init(&info->lock);
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't register the device.\n");
- goto error_adc_buffer_init;
- }
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Couldn't register the device.\n");
return 0;
-
-error_adc_buffer_init:
- iio_triggered_buffer_cleanup(indio_dev);
-error_iio_device_register:
- clk_disable_unprepare(info->clk);
-error_adc_clk_enable:
- regulator_disable(info->vref);
-
- return ret;
-}
-
-static void vf610_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct vf610_adc *info = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- regulator_disable(info->vref);
- clk_disable_unprepare(info->clk);
}
static int vf610_adc_suspend(struct device *dev)
@@ -972,7 +961,6 @@ static DEFINE_SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops, vf610_adc_suspend,
static struct platform_driver vf610_adc_driver = {
.probe = vf610_adc_probe,
- .remove = vf610_adc_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = vf610_adc_match,
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index d2e1529ad8fd..614e1c4189a9 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -206,7 +206,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
/**
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
- * @dev: Parent device for the buffer
+ * @dev: DMA channel consumer device
* @channel: DMA channel name, typically "rx".
*
* This allocates a new IIO buffer which internally uses the DMAengine framework
@@ -288,6 +288,21 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
+/**
+ * iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
+ * @dev: DMA channel consumer device
+ * @indio_dev: IIO device to which to attach this buffer.
+ * @channel: DMA channel name, typically "rx".
+ * @dir: Direction of buffer (in or out)
+ *
+ * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
+ * and attaches it to an IIO device with iio_device_attach_buffer().
+ * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
+ * IIO device.
+ *
+ * Once done using the buffer iio_dmaengine_buffer_free() should be used to
+ * release it.
+ */
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
@@ -321,7 +336,7 @@ static void __devm_iio_dmaengine_buffer_free(void *buffer)
/**
* devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
- * @dev: Parent device for the buffer
+ * @dev: Device for devm ownership and DMA channel consumer device
* @indio_dev: IIO device to which to attach this buffer.
* @channel: DMA channel name, typically "rx".
* @dir: Direction of buffer (in or out)
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index 00ab89b3138b..7d86ed8b02e6 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -2,6 +2,7 @@
#ifndef BME680_H_
#define BME680_H_
+#include <linux/pm.h>
#include <linux/regmap.h>
#define BME680_REG_CHIP_ID 0xD0
@@ -80,6 +81,7 @@
#define BME680_CALIB_RANGE_3_LEN 5
extern const struct regmap_config bme680_regmap_config;
+extern const struct dev_pm_ops bme680_dev_pm_ops;
int bme680_core_probe(struct device *dev, struct regmap *regmap,
const char *name);
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index d12270409c8a..9d73fd2cf52c 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -14,7 +14,10 @@
#include <linux/device.h>
#include <linux/log2.h>
#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
@@ -111,6 +114,8 @@ enum bme680_scan {
BME680_GAS,
};
+static const char *const bme680_supply_names[] = { "vdd", "vddio" };
+
struct bme680_data {
struct regmap *regmap;
struct bme680_calib bme680;
@@ -817,9 +822,9 @@ static int bme680_read_gas(struct bme680_data *data, int *comp_gas_res)
return 0;
}
-static int bme680_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
+static int __bme680_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
{
struct bme680_data *data = iio_priv(indio_dev);
int chan_val, ret;
@@ -874,11 +879,11 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_TEMP:
- ret = bme680_read_temp(data, (s16 *)&chan_val);
+ ret = bme680_read_temp(data, &temp_chan_val);
if (ret)
return ret;
- *val = chan_val;
+ *val = temp_chan_val;
return IIO_VAL_INT;
case IIO_PRESSURE:
ret = bme680_read_press(data, &chan_val);
@@ -932,14 +937,33 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
}
}
+static int bme680_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = __bme680_read_raw(indio_dev, chan, val, val2, mask);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
static bool bme680_is_valid_oversampling(int rate)
{
return (rate > 0 && rate <= 16 && is_power_of_2(rate));
}
-static int bme680_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int __bme680_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct bme680_data *data = iio_priv(indio_dev);
@@ -984,6 +1008,25 @@ static int bme680_write_raw(struct iio_dev *indio_dev,
}
}
+static int bme680_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = __bme680_write_raw(indio_dev, chan, val, val2, mask);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
static const char bme680_oversampling_ratio_show[] = "1 2 4 8 16";
static IIO_CONST_ATTR(oversampling_ratio_available,
@@ -1084,6 +1127,29 @@ out:
return IRQ_HANDLED;
}
+static int bme680_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int bme680_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops bme680_buffer_setup_ops = {
+ .preenable = bme680_buffer_preenable,
+ .postdisable = bme680_buffer_postdisable,
+};
+
int bme680_core_probe(struct device *dev, struct regmap *regmap,
const char *name)
{
@@ -1114,6 +1180,14 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
data->heater_dur = 150; /* milliseconds */
data->preheat_curr_mA = 0;
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(bme680_supply_names),
+ bme680_supply_names);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get and enable supplies.\n");
+
+ fsleep(BME680_STARTUP_TIME_US);
+
ret = regmap_write(regmap, BME680_REG_SOFT_RESET, BME680_CMD_SOFTRESET);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to reset chip\n");
@@ -1149,15 +1223,47 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
iio_pollfunc_store_time,
bme680_trigger_handler,
- NULL);
+ &bme680_buffer_setup_ops);
if (ret)
return dev_err_probe(dev, ret,
"iio triggered buffer setup failed\n");
+ /* Enable runtime PM */
+ pm_runtime_set_autosuspend_delay(dev, BME680_STARTUP_TIME_US);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(bme680_core_probe, "IIO_BME680");
+static int bme680_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bme680_data *data = iio_priv(indio_dev);
+
+ return bme680_set_mode(data, BME680_MODE_SLEEP);
+}
+
+static int bme680_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bme680_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = bme680_chip_config(data);
+ if (ret)
+ return ret;
+
+ return bme680_gas_config(data);
+}
+
+EXPORT_RUNTIME_DEV_PM_OPS(bme680_dev_pm_ops, bme680_runtime_suspend,
+ bme680_runtime_resume, NULL);
+
MODULE_AUTHOR("Himanshu Jha <himanshujha199640@gmail.com>");
MODULE_DESCRIPTION("Bosch BME680 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index 7a949228b4a6..ac7763f98a6a 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -51,6 +51,7 @@ static struct i2c_driver bme680_i2c_driver = {
.driver = {
.name = "bme680_i2c",
.of_match_table = bme680_of_i2c_match,
+ .pm = pm_ptr(&bme680_dev_pm_ops),
},
.probe = bme680_i2c_probe,
.id_table = bme680_i2c_id,
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index 3916a51ba68e..ecb24ba0ebc9 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -154,6 +154,7 @@ static struct spi_driver bme680_spi_driver = {
.driver = {
.name = "bme680_spi",
.of_match_table = bme680_of_spi_match,
+ .pm = pm_ptr(&bme680_dev_pm_ops),
},
.probe = bme680_spi_probe,
.id_table = bme680_spi_id,
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 17d1bc518bf2..451fb65dbe60 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -81,7 +81,7 @@ struct ccs811_data {
/* Ensures correct alignment of timestamp if present */
struct {
s16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/chemical/ens160_core.c b/drivers/iio/chemical/ens160_core.c
index 4a89cd5894d9..48d5ad2075b6 100644
--- a/drivers/iio/chemical/ens160_core.c
+++ b/drivers/iio/chemical/ens160_core.c
@@ -60,7 +60,7 @@ struct ens160_data {
struct mutex mutex;
struct {
__le16 chans[2];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan __aligned(IIO_DMA_MINALIGN);
u8 fw_version[3];
__le16 buf;
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index ac3080929f0b..d613c54cb28d 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -594,7 +594,7 @@ static irqreturn_t scd30_trigger_handler(int irq, void *p)
struct scd30_state *state = iio_priv(indio_dev);
struct {
int data[SCD30_MEAS_COUNT];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
int ret;
diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c
index 52cad54e8572..50e3ac44422b 100644
--- a/drivers/iio/chemical/scd4x.c
+++ b/drivers/iio/chemical/scd4x.c
@@ -665,7 +665,7 @@ static irqreturn_t scd4x_trigger_handler(int irq, void *p)
struct scd4x_state *state = iio_priv(indio_dev);
struct {
uint16_t data[3];
- int64_t ts __aligned(8);
+ aligned_s64 ts;
} scan;
int ret;
diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
index c081b5caa475..97526ba87b93 100644
--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
@@ -109,8 +109,8 @@ static bool inv_update_chip_period(struct inv_sensors_timestamp *ts,
static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
{
- const int64_t period_min = ts->min_period * ts->mult;
- const int64_t period_max = ts->max_period * ts->mult;
+ const int64_t period_min = (int64_t)ts->min_period * ts->mult;
+ const int64_t period_max = (int64_t)ts->max_period * ts->mult;
int64_t add_max, sub_max;
int64_t delta, jitter;
int64_t adjust;
diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c
index caa404edd9d0..78ac689de2fe 100644
--- a/drivers/iio/common/ssp_sensors/ssp_iio.c
+++ b/drivers/iio/common/ssp_sensors/ssp_iio.c
@@ -8,6 +8,8 @@
#include <linux/iio/kfifo_buf.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
#include "ssp_iio_sensor.h"
/**
@@ -70,8 +72,7 @@ EXPORT_SYMBOL_NS(ssp_common_buffer_postdisable, "IIO_SSP_SENSORS");
int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
unsigned int len, int64_t timestamp)
{
- __le32 time;
- int64_t calculated_time = 0;
+ int64_t calculated_time;
struct ssp_sensor_data *spd = iio_priv(indio_dev);
if (indio_dev->scan_bytes == 0)
@@ -82,11 +83,8 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
*/
memcpy(spd->buffer, buf, len);
- if (indio_dev->scan_timestamp) {
- memcpy(&time, &((char *)buf)[len], SSP_TIME_SIZE);
- calculated_time =
- timestamp + (int64_t)le32_to_cpu(time) * 1000000;
- }
+ calculated_time = timestamp +
+ (int64_t)get_unaligned_le32(buf + len) * MEGA;
return iio_push_to_buffers_with_timestamp(indio_dev, spd->buffer,
calculated_time);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 5d01ba4edbf3..5690a37267d8 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -80,7 +80,7 @@ config AD5421
depends on SPI
help
Say yes here to build support for Analog Devices AD5421 loop-powered
- digital-to-analog convertors (DAC).
+ digital-to-analog converters (DAC).
To compile this driver as module choose M here: the module will be called
ad5421.
@@ -348,6 +348,14 @@ config AD8801
To compile this driver as a module choose M here: the module will be called
ad8801.
+config BD79703
+ tristate "ROHM Semiconductor BD79703 DAC driver"
+ depends on SPI
+ select REGMAP_SPI
+ help
+ Say yes here to build support for ROHM Semiconductor BD79703 Digital
+ to Analog Converter (DAC).
+
config CIO_DAC
tristate "Measurement Computing CIO-DAC IIO driver"
depends on X86 && (ISA_BUS || PC104)
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 414c152be779..8dd6cce81ed1 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_AD8460) += ad8460.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_AD9739A) += ad9739a.o
obj-$(CONFIG_ADI_AXI_DAC) += adi-axi-dac.o
+obj-$(CONFIG_BD79703) += rohm-bd79703.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
obj-$(CONFIG_DS4424) += ds4424.o
diff --git a/drivers/iio/dac/ad3552r-common.c b/drivers/iio/dac/ad3552r-common.c
index 0f495df2e5ce..03e0864f5084 100644
--- a/drivers/iio/dac/ad3552r-common.c
+++ b/drivers/iio/dac/ad3552r-common.c
@@ -22,11 +22,10 @@ EXPORT_SYMBOL_NS_GPL(ad3552r_ch_ranges, "IIO_AD3552R");
const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = {
[AD3542R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 },
- [AD3542R_CH_OUTPUT_RANGE_0__3V] = { 0, 3000 },
[AD3542R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 },
[AD3542R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 },
- [AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = { -2500, 7500 },
- [AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 }
+ [AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 },
+ [AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = { -2500, 7500 }
};
EXPORT_SYMBOL_NS_GPL(ad3542r_ch_ranges, "IIO_AD3552R");
diff --git a/drivers/iio/dac/ad3552r-hs.c b/drivers/iio/dac/ad3552r-hs.c
index 216c634f3eaf..8974df625670 100644
--- a/drivers/iio/dac/ad3552r-hs.c
+++ b/drivers/iio/dac/ad3552r-hs.c
@@ -329,6 +329,12 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
dev_info(st->dev, "Chip ID error. Expected 0x%x, Read 0x%x\n",
AD3552R_ID, id);
+ /* Clear reset error flag, see ad3552r manual, rev B table 38. */
+ ret = st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_ERR_STATUS,
+ AD3552R_MASK_RESET_STATUS, 1);
+ if (ret)
+ return ret;
+
ret = st->data->bus_reg_write(st->back,
AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
0, 1);
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
index e7206af53af6..7944f5c1d264 100644
--- a/drivers/iio/dac/ad3552r.c
+++ b/drivers/iio/dac/ad3552r.c
@@ -410,6 +410,12 @@ static int ad3552r_reset(struct ad3552r_desc *dac)
return ret;
}
+ /* Clear reset error flag, see ad3552r manual, rev B table 38. */
+ ret = ad3552r_write_reg(dac, AD3552R_REG_ADDR_ERR_STATUS,
+ AD3552R_MASK_RESET_STATUS);
+ if (ret)
+ return ret;
+
return ad3552r_update_reg_field(dac,
AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
AD3552R_MASK_ADDR_ASCENSION,
diff --git a/drivers/iio/dac/ad3552r.h b/drivers/iio/dac/ad3552r.h
index fd5a3dfd1d1c..4b5581039ae9 100644
--- a/drivers/iio/dac/ad3552r.h
+++ b/drivers/iio/dac/ad3552r.h
@@ -131,7 +131,7 @@
#define AD3552R_CH1_ACTIVE BIT(1)
#define AD3552R_MAX_RANGES 5
-#define AD3542R_MAX_RANGES 6
+#define AD3542R_MAX_RANGES 5
#define AD3552R_QUAD_SPI 2
extern const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2];
@@ -189,16 +189,14 @@ enum ad3552r_ch_vref_select {
enum ad3542r_ch_output_range {
/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
AD3542R_CH_OUTPUT_RANGE_0__2P5V,
- /* Range from 0 V to 3 V. Requires Rfb1x connection */
- AD3542R_CH_OUTPUT_RANGE_0__3V,
/* Range from 0 V to 5 V. Requires Rfb1x connection */
AD3542R_CH_OUTPUT_RANGE_0__5V,
/* Range from 0 V to 10 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_0__10V,
- /* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */
- AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
/* Range from -5 V to 5 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_NEG_5__5V,
+ /* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */
+ AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
};
enum ad3552r_ch_output_range {
diff --git a/drivers/iio/dac/ad5624r.h b/drivers/iio/dac/ad5624r.h
index 14a439b06eb6..098fb5a7683d 100644
--- a/drivers/iio/dac/ad5624r.h
+++ b/drivers/iio/dac/ad5624r.h
@@ -41,11 +41,9 @@ struct ad5624r_chip_info {
};
/**
- * struct ad5446_state - driver instance specific data
- * @indio_dev: the industrial I/O device
+ * struct ad5624r_state - driver instance specific data
* @us: spi_device
* @chip_info: chip model specific constants, available modes etc
- * @reg: supply regulator
* @vref_mv: actual reference voltage used
* @pwr_down_mask power down mask
* @pwr_down_mode current power down mode
diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c
index 39b5dad0d6a5..9c727aa6ea18 100644
--- a/drivers/iio/dac/ad5686-spi.c
+++ b/drivers/iio/dac/ad5686-spi.c
@@ -95,11 +95,6 @@ static int ad5686_spi_probe(struct spi_device *spi)
ad5686_spi_write, ad5686_spi_read);
}
-static void ad5686_spi_remove(struct spi_device *spi)
-{
- ad5686_remove(&spi->dev);
-}
-
static const struct spi_device_id ad5686_spi_id[] = {
{"ad5310r", ID_AD5310R},
{"ad5672r", ID_AD5672R},
@@ -126,7 +121,6 @@ static struct spi_driver ad5686_spi_driver = {
.name = "ad5686",
},
.probe = ad5686_spi_probe,
- .remove = ad5686_spi_remove,
.id_table = ad5686_spi_id,
};
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 8dc578b08784..763af690c444 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -455,39 +455,28 @@ int ad5686_probe(struct device *dev,
struct ad5686_state *st;
struct iio_dev *indio_dev;
unsigned int val, ref_bit_msk;
+ bool has_external_vref;
u8 cmd;
- int ret, i, voltage_uv = 0;
+ int ret, i;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
- dev_set_drvdata(dev, indio_dev);
st->dev = dev;
st->write = write;
st->read = read;
- st->reg = devm_regulator_get_optional(dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(st->reg);
- if (ret < 0)
- goto error_disable_reg;
-
- voltage_uv = ret;
- }
-
st->chip_info = &ad5686_chip_info_tbl[chip_type];
- if (voltage_uv)
- st->vref_mv = voltage_uv / 1000;
- else
- st->vref_mv = st->chip_info->int_vref_mv;
+ ret = devm_regulator_get_enable_read_voltage(dev, "vcc");
+ if (ret < 0 && ret != -ENODEV)
+ return ret;
+
+ has_external_vref = ret != -ENODEV;
+ st->vref_mv = has_external_vref ? ret / 1000 : st->chip_info->int_vref_mv;
/* Set all the power down mode for all channels to 1K pulldown */
for (i = 0; i < st->chip_info->num_channels; i++)
@@ -505,12 +494,12 @@ int ad5686_probe(struct device *dev,
case AD5310_REGMAP:
cmd = AD5686_CMD_CONTROL_REG;
ref_bit_msk = AD5310_REF_BIT_MSK;
- st->use_internal_vref = !voltage_uv;
+ st->use_internal_vref = !has_external_vref;
break;
case AD5683_REGMAP:
cmd = AD5686_CMD_CONTROL_REG;
ref_bit_msk = AD5683_REF_BIT_MSK;
- st->use_internal_vref = !voltage_uv;
+ st->use_internal_vref = !has_external_vref;
break;
case AD5686_REGMAP:
cmd = AD5686_CMD_INTERNAL_REFER_SETUP;
@@ -519,43 +508,22 @@ int ad5686_probe(struct device *dev,
case AD5693_REGMAP:
cmd = AD5686_CMD_CONTROL_REG;
ref_bit_msk = AD5693_REF_BIT_MSK;
- st->use_internal_vref = !voltage_uv;
+ st->use_internal_vref = !has_external_vref;
break;
default:
- ret = -EINVAL;
- goto error_disable_reg;
+ return -EINVAL;
}
- val = (voltage_uv | ref_bit_msk);
+ val = (has_external_vref | ref_bit_msk);
ret = st->write(st, cmd, 0, !!val);
if (ret)
- goto error_disable_reg;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_disable_reg;
-
- return 0;
+ return ret;
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
- return ret;
+ return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(ad5686_probe, "IIO_AD5686");
-void ad5686_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5686_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-}
-EXPORT_SYMBOL_NS_GPL(ad5686_remove, "IIO_AD5686");
-
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index 760f852911df..e7d36bae3e59 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -115,10 +115,9 @@ struct ad5686_chip_info {
};
/**
- * struct ad5446_state - driver instance specific data
+ * struct ad5686_state - driver instance specific data
* @spi: spi_device
* @chip_info: chip model specific constants, available modes etc
- * @reg: supply regulator
* @vref_mv: actual reference voltage used
* @pwr_down_mask: power down mask
* @pwr_down_mode: current power down mode
@@ -130,7 +129,6 @@ struct ad5686_chip_info {
struct ad5686_state {
struct device *dev;
const struct ad5686_chip_info *chip_info;
- struct regulator *reg;
unsigned short vref_mv;
unsigned int pwr_down_mask;
unsigned int pwr_down_mode;
@@ -157,7 +155,5 @@ int ad5686_probe(struct device *dev,
const char *name, ad5686_write_func write,
ad5686_read_func read);
-void ad5686_remove(struct device *dev);
-
#endif /* __DRIVERS_IIO_DAC_AD5686_H__ */
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index bbcda246c547..0156f32c12c8 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -65,11 +65,6 @@ static int ad5686_i2c_probe(struct i2c_client *i2c)
ad5686_i2c_write, ad5686_i2c_read);
}
-static void ad5686_i2c_remove(struct i2c_client *i2c)
-{
- ad5686_remove(&i2c->dev);
-}
-
static const struct i2c_device_id ad5686_i2c_id[] = {
{"ad5311r", ID_AD5311R},
{"ad5337r", ID_AD5337R},
@@ -116,7 +111,6 @@ static struct i2c_driver ad5686_i2c_driver = {
.of_match_table = ad5686_of_match,
},
.probe = ad5686_i2c_probe,
- .remove = ad5686_i2c_remove,
.id_table = ad5686_i2c_id,
};
diff --git a/drivers/iio/dac/ad7293.c b/drivers/iio/dac/ad7293.c
index 1d4032670482..d3f49b5337d2 100644
--- a/drivers/iio/dac/ad7293.c
+++ b/drivers/iio/dac/ad7293.c
@@ -141,8 +141,6 @@ struct ad7293_state {
/* Protect against concurrent accesses to the device, page selection and data content */
struct mutex lock;
struct gpio_desc *gpio_reset;
- struct regulator *reg_avdd;
- struct regulator *reg_vdrive;
u8 page_select;
u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
@@ -777,6 +775,15 @@ static int ad7293_reset(struct ad7293_state *st)
static int ad7293_properties_parse(struct ad7293_state *st)
{
struct spi_device *spi = st->spi;
+ int ret;
+
+ ret = devm_regulator_get_enable(&spi->dev, "avdd");
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "failed to enable AVDD\n");
+
+ ret = devm_regulator_get_enable(&spi->dev, "vdrive");
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "failed to enable VDRIVE\n");
st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset",
GPIOD_OUT_HIGH);
@@ -784,24 +791,9 @@ static int ad7293_properties_parse(struct ad7293_state *st)
return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_reset),
"failed to get the reset GPIO\n");
- st->reg_avdd = devm_regulator_get(&spi->dev, "avdd");
- if (IS_ERR(st->reg_avdd))
- return dev_err_probe(&spi->dev, PTR_ERR(st->reg_avdd),
- "failed to get the AVDD voltage\n");
-
- st->reg_vdrive = devm_regulator_get(&spi->dev, "vdrive");
- if (IS_ERR(st->reg_vdrive))
- return dev_err_probe(&spi->dev, PTR_ERR(st->reg_vdrive),
- "failed to get the VDRIVE voltage\n");
-
return 0;
}
-static void ad7293_reg_disable(void *data)
-{
- regulator_disable(data);
-}
-
static int ad7293_init(struct ad7293_state *st)
{
int ret;
@@ -816,48 +808,6 @@ static int ad7293_init(struct ad7293_state *st)
if (ret)
return ret;
- ret = regulator_enable(st->reg_avdd);
- if (ret) {
- dev_err(&spi->dev,
- "Failed to enable specified AVDD Voltage!\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable,
- st->reg_avdd);
- if (ret)
- return ret;
-
- ret = regulator_enable(st->reg_vdrive);
- if (ret) {
- dev_err(&spi->dev,
- "Failed to enable specified VDRIVE Voltage!\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable,
- st->reg_vdrive);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(st->reg_avdd);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to read avdd regulator: %d\n", ret);
- return ret;
- }
-
- if (ret > 5500000 || ret < 4500000)
- return -EINVAL;
-
- ret = regulator_get_voltage(st->reg_vdrive);
- if (ret < 0) {
- dev_err(&spi->dev,
- "Failed to read vdrive regulator: %d\n", ret);
- return ret;
- }
- if (ret > 5500000 || ret < 1700000)
- return -EINVAL;
-
/* Check Chip ID */
ret = __ad7293_spi_read(st, AD7293_REG_DEVICE_ID, &chip_id);
if (ret)
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
index 919e8c880697..8a362fae2eca 100644
--- a/drivers/iio/dac/ad8801.c
+++ b/drivers/iio/dac/ad8801.c
@@ -23,8 +23,6 @@ struct ad8801_state {
unsigned char dac_cache[8]; /* Value write on each channel */
unsigned int vrefh_mv;
unsigned int vrefl_mv;
- struct regulator *vrefh_reg;
- struct regulator *vrefl_reg;
__be16 data __aligned(IIO_DMA_MINALIGN);
};
@@ -122,86 +120,34 @@ static int ad8801_probe(struct spi_device *spi)
state->spi = spi;
id = spi_get_device_id(spi);
- state->vrefh_reg = devm_regulator_get(&spi->dev, "vrefh");
- if (IS_ERR(state->vrefh_reg))
- return dev_err_probe(&spi->dev, PTR_ERR(state->vrefh_reg),
- "Vrefh regulator not specified\n");
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vrefh");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "failed to get Vrefh voltage\n");
- ret = regulator_enable(state->vrefh_reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable vrefh regulator: %d\n",
- ret);
- return ret;
- }
-
- ret = regulator_get_voltage(state->vrefh_reg);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to read vrefh regulator: %d\n",
- ret);
- goto error_disable_vrefh_reg;
- }
state->vrefh_mv = ret / 1000;
if (id->driver_data == ID_AD8803) {
- state->vrefl_reg = devm_regulator_get(&spi->dev, "vrefl");
- if (IS_ERR(state->vrefl_reg)) {
- ret = dev_err_probe(&spi->dev, PTR_ERR(state->vrefl_reg),
- "Vrefl regulator not specified\n");
- goto error_disable_vrefh_reg;
- }
-
- ret = regulator_enable(state->vrefl_reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable vrefl regulator: %d\n",
- ret);
- goto error_disable_vrefh_reg;
- }
-
- ret = regulator_get_voltage(state->vrefl_reg);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to read vrefl regulator: %d\n",
- ret);
- goto error_disable_vrefl_reg;
- }
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vrefl");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "failed to get Vrefl voltage\n");
+
state->vrefl_mv = ret / 1000;
- } else {
- state->vrefl_mv = 0;
- state->vrefl_reg = NULL;
}
- spi_set_drvdata(spi, indio_dev);
indio_dev->info = &ad8801_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ad8801_channels;
indio_dev->num_channels = ARRAY_SIZE(ad8801_channels);
indio_dev->name = id->name;
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&spi->dev, "Failed to register iio device: %d\n",
- ret);
- goto error_disable_vrefl_reg;
- }
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to register iio device\n");
return 0;
-
-error_disable_vrefl_reg:
- if (state->vrefl_reg)
- regulator_disable(state->vrefl_reg);
-error_disable_vrefh_reg:
- regulator_disable(state->vrefh_reg);
- return ret;
-}
-
-static void ad8801_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad8801_state *state = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (state->vrefl_reg)
- regulator_disable(state->vrefl_reg);
- regulator_disable(state->vrefh_reg);
}
static const struct spi_device_id ad8801_ids[] = {
@@ -216,7 +162,6 @@ static struct spi_driver ad8801_driver = {
.name = "ad8801",
},
.probe = ad8801_probe,
- .remove = ad8801_remove,
.id_table = ad8801_ids,
};
module_spi_driver(ad8801_driver);
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index a4fb2509c950..999348836d87 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -41,13 +41,11 @@ struct ltc2632_chip_info {
* @spi_dev: pointer to the spi_device struct
* @powerdown_cache_mask: used to show current channel powerdown state
* @vref_mv: used reference voltage (internal or external)
- * @vref_reg: regulator for the reference voltage
*/
struct ltc2632_state {
struct spi_device *spi_dev;
unsigned int powerdown_cache_mask;
int vref_mv;
- struct regulator *vref_reg;
};
enum ltc2632_supported_device_ids {
@@ -310,6 +308,7 @@ static int ltc2632_probe(struct spi_device *spi)
struct ltc2632_state *st;
struct iio_dev *indio_dev;
struct ltc2632_chip_info *chip_info;
+ bool has_external_vref;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -318,49 +317,31 @@ static int ltc2632_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
st->spi_dev = spi;
chip_info = (struct ltc2632_chip_info *)
spi_get_device_id(spi)->driver_data;
- st->vref_reg = devm_regulator_get_optional(&spi->dev, "vref");
- if (PTR_ERR(st->vref_reg) == -ENODEV) {
- /* use internal reference voltage */
- st->vref_reg = NULL;
- st->vref_mv = chip_info->vref_mv;
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to get vref regulator voltage\n");
- ret = ltc2632_spi_write(spi, LTC2632_CMD_INTERNAL_REFER,
- 0, 0, 0);
- if (ret) {
- dev_err(&spi->dev,
- "Set internal reference command failed, %d\n",
- ret);
- return ret;
- }
- } else if (IS_ERR(st->vref_reg)) {
- dev_err(&spi->dev,
- "Error getting voltage reference regulator\n");
- return PTR_ERR(st->vref_reg);
- } else {
- /* use external reference voltage */
- ret = regulator_enable(st->vref_reg);
- if (ret) {
- dev_err(&spi->dev,
- "enable reference regulator failed, %d\n",
- ret);
- return ret;
- }
- st->vref_mv = regulator_get_voltage(st->vref_reg) / 1000;
+ has_external_vref = ret != -ENODEV;
+ st->vref_mv = has_external_vref ? ret / 1000 : chip_info->vref_mv;
+ if (has_external_vref) {
ret = ltc2632_spi_write(spi, LTC2632_CMD_EXTERNAL_REFER,
- 0, 0, 0);
- if (ret) {
- dev_err(&spi->dev,
- "Set external reference command failed, %d\n",
- ret);
- return ret;
- }
+ 0, 0, 0);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Set external reference command failed\n");
+ } else {
+ ret = ltc2632_spi_write(spi, LTC2632_CMD_INTERNAL_REFER,
+ 0, 0, 0);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Set internal reference command failed\n");
}
indio_dev->name = fwnode_get_name(dev_fwnode(&spi->dev)) ?: spi_get_device_id(spi)->name;
@@ -369,18 +350,7 @@ static int ltc2632_probe(struct spi_device *spi)
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
- return iio_device_register(indio_dev);
-}
-
-static void ltc2632_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ltc2632_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- if (st->vref_reg)
- regulator_disable(st->vref_reg);
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ltc2632_id[] = {
@@ -472,7 +442,6 @@ static struct spi_driver ltc2632_driver = {
.of_match_table = ltc2632_of_match,
},
.probe = ltc2632_probe,
- .remove = ltc2632_remove,
.id_table = ltc2632_id,
};
module_spi_driver(ltc2632_driver);
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index 376dca163c91..bdc857c7fa6d 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -842,7 +842,7 @@ static int ltc2688_channel_config(struct ltc2688_state *st)
return 0;
}
-static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
+static int ltc2688_setup(struct ltc2688_state *st, bool has_external_vref)
{
struct device *dev = &st->spi->dev;
struct gpio_desc *gpio;
@@ -881,18 +881,13 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
if (ret)
return ret;
- if (!vref)
+ if (!has_external_vref)
return 0;
return regmap_set_bits(st->regmap, LTC2688_CMD_CONFIG,
LTC2688_CONFIG_EXT_REF);
}
-static void ltc2688_disable_regulator(void *regulator)
-{
- regulator_disable(regulator);
-}
-
static bool ltc2688_reg_readable(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -947,8 +942,8 @@ static int ltc2688_probe(struct spi_device *spi)
static const char * const regulators[] = { "vcc", "iovcc" };
struct ltc2688_state *st;
struct iio_dev *indio_dev;
- struct regulator *vref_reg;
struct device *dev = &spi->dev;
+ bool has_external_vref;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
@@ -973,34 +968,15 @@ static int ltc2688_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "Failed to enable regulators\n");
- vref_reg = devm_regulator_get_optional(dev, "vref");
- if (IS_ERR(vref_reg)) {
- if (PTR_ERR(vref_reg) != -ENODEV)
- return dev_err_probe(dev, PTR_ERR(vref_reg),
- "Failed to get vref regulator");
-
- vref_reg = NULL;
- /* internal reference */
- st->vref = 4096;
- } else {
- ret = regulator_enable(vref_reg);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to enable vref regulators\n");
-
- ret = devm_add_action_or_reset(dev, ltc2688_disable_regulator,
- vref_reg);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(vref_reg);
- if (ret < 0)
- return dev_err_probe(dev, ret, "Failed to get vref\n");
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "Failed to get vref regulator voltage\n");
- st->vref = ret / 1000;
- }
+ has_external_vref = ret != -ENODEV;
+ st->vref = has_external_vref ? ret / 1000 : 0;
- ret = ltc2688_setup(st, vref_reg);
+ ret = ltc2688_setup(st, has_external_vref);
if (ret)
return ret;
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 18ba3eaaad75..b062a18be5e7 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -32,7 +32,6 @@ enum max5821_device_ids {
struct max5821_data {
struct i2c_client *client;
- struct regulator *vref_reg;
unsigned short vref_mv;
bool powerdown[MAX5821_MAX_DAC_CHANNELS];
u8 powerdown_mode[MAX5821_MAX_DAC_CHANNELS];
@@ -295,11 +294,6 @@ static const struct iio_info max5821_info = {
.write_raw = max5821_write_raw,
};
-static void max5821_regulator_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int max5821_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -321,32 +315,10 @@ static int max5821_probe(struct i2c_client *client)
data->powerdown_mode[tmp] = MAX5821_100KOHM_TO_GND;
}
- data->vref_reg = devm_regulator_get(&client->dev, "vref");
- if (IS_ERR(data->vref_reg))
- return dev_err_probe(&client->dev, PTR_ERR(data->vref_reg),
- "Failed to get vref regulator\n");
-
- ret = regulator_enable(data->vref_reg);
- if (ret) {
- dev_err(&client->dev,
- "Failed to enable vref regulator: %d\n", ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(&client->dev, max5821_regulator_disable,
- data->vref_reg);
- if (ret) {
- dev_err(&client->dev,
- "Failed to add action to managed regulator: %d\n", ret);
- return ret;
- }
-
- ret = regulator_get_voltage(data->vref_reg);
- if (ret < 0) {
- dev_err(&client->dev,
- "Failed to get voltage on regulator: %d\n", ret);
- return ret;
- }
+ ret = devm_regulator_get_enable_read_voltage(&client->dev, "vref");
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to get vref regulator voltage\n");
data->vref_mv = ret / 1000;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 25bb1c0490af..1337fb02ccf5 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -379,7 +379,7 @@ static int mcp4725_probe_dt(struct device *dev,
struct mcp4725_platform_data *pdata)
{
/* check if is the vref-supply defined */
- pdata->use_vref = device_property_read_bool(dev, "vref-supply");
+ pdata->use_vref = device_property_present(dev, "vref-supply");
pdata->vref_buffered =
device_property_read_bool(dev, "microchip,vref-buffered");
diff --git a/drivers/iio/dac/rohm-bd79703.c b/drivers/iio/dac/rohm-bd79703.c
new file mode 100644
index 000000000000..e998ab51052e
--- /dev/null
+++ b/drivers/iio/dac/rohm-bd79703.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BD79703 ROHM Digital to Analog converter
+ *
+ * Copyright (c) 2024, ROHM Semiconductor.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#define BD79703_MAX_REGISTER 0xf
+#define BD79703_DAC_BITS 8
+#define BD79703_REG_OUT_ALL GENMASK(2, 0)
+
+/*
+ * The BD79703 uses 12-bit SPI commands. First four bits (high bits) define
+ * channel(s) which are operated on, and also the mode. The mode can be to set
+ * a DAC word only, or set DAC word and output. The data-sheet is not very
+ * specific on how a previously set DAC word can be 'taken in to use'. Thus
+ * this driver only uses the 'set DAC and output it' -mode.
+ *
+ * The BD79703 latches last 12-bits when the chip-select is toggled. Thus we
+ * can use 16-bit transfers which should be widely supported. To simplify this
+ * further, we treat the last 8 bits as a value, and first 8 bits as an
+ * address. This allows us to separate channels/mode by address and treat the
+ * 8-bit register value as DAC word. The highest 4 bits of address will be
+ * discarded when the transfer is latched.
+ */
+static const struct regmap_config bd79703_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = BD79703_MAX_REGISTER,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+struct bd79703_data {
+ struct regmap *regmap;
+ int vfs;
+};
+
+static int bd79703_read_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct bd79703_data *data = iio_priv(idev);
+
+ if (mask != IIO_CHAN_INFO_SCALE)
+ return -EINVAL;
+
+ *val = data->vfs / 1000;
+ *val2 = BD79703_DAC_BITS;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+}
+
+static int bd79703_write_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct bd79703_data *data = iio_priv(idev);
+
+ if (val < 0 || val >= 1 << BD79703_DAC_BITS)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, chan->channel + 1, val);
+};
+
+static const struct iio_info bd79703_info = {
+ .read_raw = bd79703_read_raw,
+ .write_raw = bd79703_write_raw,
+};
+
+#define BD79703_CHAN(_chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = (_chan), \
+}
+
+static const struct iio_chan_spec bd79703_channels[] = {
+ BD79703_CHAN(0),
+ BD79703_CHAN(1),
+ BD79703_CHAN(2),
+ BD79703_CHAN(3),
+ BD79703_CHAN(4),
+ BD79703_CHAN(5),
+};
+
+static int bd79703_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct bd79703_data *data;
+ struct iio_dev *idev;
+ int ret;
+
+ idev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!idev)
+ return -ENOMEM;
+
+ data = iio_priv(idev);
+
+ data->regmap = devm_regmap_init_spi(spi, &bd79703_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "Failed to initialize Regmap\n");
+
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable VCC\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "vfs");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get Vfs\n");
+
+ data->vfs = ret;
+ idev->channels = bd79703_channels;
+ idev->num_channels = ARRAY_SIZE(bd79703_channels);
+ idev->modes = INDIO_DIRECT_MODE;
+ idev->info = &bd79703_info;
+ idev->name = "bd79703";
+
+ /* Initialize all to output zero */
+ ret = regmap_write(data->regmap, BD79703_REG_OUT_ALL, 0);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, idev);
+}
+
+static const struct spi_device_id bd79703_id[] = {
+ { "bd79703", },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, bd79703_id);
+
+static const struct of_device_id bd79703_of_match[] = {
+ { .compatible = "rohm,bd79703", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bd79703_of_match);
+
+static struct spi_driver bd79703_driver = {
+ .driver = {
+ .name = "bd79703",
+ .of_match_table = bd79703_of_match,
+ },
+ .probe = bd79703_probe,
+ .id_table = bd79703_id,
+};
+module_spi_driver(bd79703_driver);
+
+MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
+MODULE_DESCRIPTION("ROHM BD79703 DAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index 4ca3f1aaff99..288880346707 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -48,7 +48,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
int i = 0, j;
u16 *data;
- data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ data = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
if (!data)
goto done;
diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
index 848baa6e3bbf..d85b7d3de866 100644
--- a/drivers/iio/filter/admv8818.c
+++ b/drivers/iio/filter/admv8818.c
@@ -574,21 +574,15 @@ static int admv8818_init(struct admv8818_state *st)
struct spi_device *spi = st->spi;
unsigned int chip_id;
- ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
- ADMV8818_SOFTRESET_N_MSK |
- ADMV8818_SOFTRESET_MSK,
- FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) |
- FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1));
+ ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SOFTRESET_N_MSK | ADMV8818_SOFTRESET_MSK);
if (ret) {
dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n");
return ret;
}
- ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
- ADMV8818_SDOACTIVE_N_MSK |
- ADMV8818_SDOACTIVE_MSK,
- FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) |
- FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1));
+ ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SDOACTIVE_N_MSK | ADMV8818_SDOACTIVE_MSK);
if (ret) {
dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n");
return ret;
diff --git a/drivers/iio/gyro/adxrs290.c b/drivers/iio/gyro/adxrs290.c
index 600e9725da78..223fc181109c 100644
--- a/drivers/iio/gyro/adxrs290.c
+++ b/drivers/iio/gyro/adxrs290.c
@@ -75,7 +75,7 @@ struct adxrs290_state {
/* Ensure correct alignment of timestamp when present */
struct {
s16 channels[3];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} buffer;
};
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index ba877d067afb..deb3c6459dde 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -99,7 +99,7 @@ struct bmg160_data {
/* Ensure naturally aligned timestamp */
struct {
s16 chans[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u32 dps_range;
int ev_enable_state;
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 0391c78c2f18..754c8a564ba4 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -730,14 +730,21 @@ static irqreturn_t fxas21002c_trigger_handler(int irq, void *p)
int ret;
mutex_lock(&data->lock);
+ ret = fxas21002c_pm_get(data);
+ if (ret < 0)
+ goto out_unlock;
+
ret = regmap_bulk_read(data->regmap, FXAS21002C_REG_OUT_X_MSB,
data->buffer, CHANNEL_SCAN_MAX * sizeof(s16));
if (ret < 0)
- goto out_unlock;
+ goto out_pm_put;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
data->timestamp);
+out_pm_put:
+ fxas21002c_pm_put(data);
+
out_unlock:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index 4cfa0d439560..a624400a239c 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -52,7 +52,7 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
*/
struct {
__be16 buf[ITG3200_SCAN_ELEMENTS];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
int ret = itg3200_read_all_channels(st->i2c, scan.buf);
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index b6883e8b2a8b..d66224bed8e3 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -474,7 +474,7 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
int ret;
struct {
__be16 chans[4];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
s64 timestamp;
unsigned int datums_from_fifo = 0;
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 6b0aa3a3f025..2323974b805c 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -35,7 +35,7 @@ struct am2315_data {
/* Ensure timestamp is naturally aligned */
struct {
s16 chans[2];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 9b355380c9bf..a303f704b7ed 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -44,7 +44,7 @@ struct hdc100x_data {
/* Ensure natural alignment of timestamp */
struct {
__be16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
index 721359e226cb..0215f11fc35e 100644
--- a/drivers/iio/humidity/hts221.h
+++ b/drivers/iio/humidity/hts221.h
@@ -40,7 +40,7 @@ struct hts221_hw {
/* Ensure natural alignment of timestamp */
struct {
__le16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 0a5d13d2240e..727e0a11eac1 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -878,11 +878,32 @@ static const struct iio_chan_spec adis16545_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(17),
};
+static const struct iio_chan_spec adis16489_channels[] = {
+ ADIS16480_GYRO_CHANNEL(X),
+ ADIS16480_GYRO_CHANNEL(Y),
+ ADIS16480_GYRO_CHANNEL(Z),
+ ADIS16480_ACCEL_CHANNEL(X),
+ ADIS16480_ACCEL_CHANNEL(Y),
+ ADIS16480_ACCEL_CHANNEL(Z),
+ ADIS16480_PRESSURE_CHANNEL(),
+ ADIS16480_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(X),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(Y),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(Z),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(X),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(Y),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(Z),
+};
+
enum adis16480_variant {
ADIS16375,
ADIS16480,
ADIS16485,
+ ADIS16486,
+ ADIS16487,
ADIS16488,
+ ADIS16489,
ADIS16490,
ADIS16495_1,
ADIS16495_2,
@@ -1038,6 +1059,38 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0, 0),
},
+ [ADIS16486] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
+ .accel_max_scale = 18,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 200,
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .has_sleep_cnt = true,
+ .filter_freqs = adis16480_def_filter_freqs,
+ .adis_data = ADIS16480_DATA(16486, &adis16480_timeouts, 0, 0),
+ },
+ [ADIS16487] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
+ .accel_max_scale = 5,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 50,
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .has_sleep_cnt = true,
+ .filter_freqs = adis16480_def_filter_freqs,
+ .adis_data = ADIS16480_DATA(16487, &adis16485_timeouts, 0, 0),
+ },
[ADIS16488] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
@@ -1054,6 +1107,22 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0, 0),
},
+ [ADIS16489] = {
+ .channels = adis16489_channels,
+ .num_channels = ARRAY_SIZE(adis16489_channels),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
+ .accel_max_scale = 18,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 200,
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .has_sleep_cnt = true,
+ .filter_freqs = adis16480_def_filter_freqs,
+ .adis_data = ADIS16480_DATA(16489, &adis16480_timeouts, 0, 0),
+ },
[ADIS16490] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
@@ -1741,7 +1810,10 @@ static const struct spi_device_id adis16480_ids[] = {
{ "adis16375", ADIS16375 },
{ "adis16480", ADIS16480 },
{ "adis16485", ADIS16485 },
+ { "adis16486", ADIS16486 },
+ { "adis16487", ADIS16487 },
{ "adis16488", ADIS16488 },
+ { "adis16489", ADIS16489 },
{ "adis16490", ADIS16490 },
{ "adis16495-1", ADIS16495_1 },
{ "adis16495-2", ADIS16495_2 },
@@ -1763,7 +1835,10 @@ static const struct of_device_id adis16480_of_match[] = {
{ .compatible = "adi,adis16375" },
{ .compatible = "adi,adis16480" },
{ .compatible = "adi,adis16485" },
+ { .compatible = "adi,adis16486" },
+ { .compatible = "adi,adis16487" },
{ .compatible = "adi,adis16488" },
+ { .compatible = "adi,adis16489" },
{ .compatible = "adi,adis16490" },
{ .compatible = "adi,adis16495-1" },
{ .compatible = "adi,adis16495-2" },
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index f7d7f4442e65..7f386c5e58b4 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -174,7 +174,7 @@ struct bmi323_data {
__le16 fifo_buff[BMI323_FIFO_FULL_IN_WORDS] __aligned(IIO_DMA_MINALIGN);
struct {
__le16 channels[BMI323_CHAN_MAX];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} buffer;
__le16 steps_count[BMI323_STEP_LEN];
};
diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c
index 0728d38260a1..597c402b98de 100644
--- a/drivers/iio/imu/bno055/bno055.c
+++ b/drivers/iio/imu/bno055/bno055.c
@@ -207,7 +207,7 @@ struct bno055_priv {
bool sw_reset;
struct {
__le16 chans[BNO055_SCAN_CH_COUNT];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} buf;
struct dentry *debugfs;
};
@@ -1193,7 +1193,7 @@ static ssize_t serialnumber_show(struct device *dev,
}
static ssize_t calibration_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct bno055_priv *priv = iio_priv(dev_to_iio_dev(kobj_to_dev(kobj)));
@@ -1348,16 +1348,16 @@ static struct attribute *bno055_attrs[] = {
NULL
};
-static BIN_ATTR_RO(calibration_data, BNO055_CALDATA_LEN);
+static const BIN_ATTR_RO(calibration_data, BNO055_CALDATA_LEN);
-static struct bin_attribute *bno055_bin_attrs[] = {
+static const struct bin_attribute *const bno055_bin_attrs[] = {
&bin_attr_calibration_data,
NULL
};
static const struct attribute_group bno055_attrs_group = {
.attrs = bno055_attrs,
- .bin_attrs = bno055_bin_attrs,
+ .bin_attrs_new = bno055_bin_attrs,
};
static const struct iio_info bno055_info = {
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index 3a07e43e4cf1..18787a43477b 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -403,6 +403,7 @@ struct inv_icm42600_sensor_state {
typedef int (*inv_icm42600_bus_setup)(struct inv_icm42600_state *);
extern const struct regmap_config inv_icm42600_regmap_config;
+extern const struct regmap_config inv_icm42600_spi_regmap_config;
extern const struct dev_pm_ops inv_icm42600_pm_ops;
const struct iio_mount_matrix *
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 7968aa27f9fd..388520ec60b5 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -178,7 +178,7 @@ static const struct iio_chan_spec inv_icm42600_accel_channels[] = {
struct inv_icm42600_accel_buffer {
struct inv_icm42600_fifo_sensor_data accel;
int16_t temp;
- int64_t timestamp __aligned(8);
+ aligned_s64 timestamp;
};
#define INV_ICM42600_SCAN_MASK_ACCEL_3AXIS \
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 561d245c1d64..ef9875d3b79d 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -87,6 +87,21 @@ const struct regmap_config inv_icm42600_regmap_config = {
};
EXPORT_SYMBOL_NS_GPL(inv_icm42600_regmap_config, "IIO_ICM42600");
+/* define specific regmap for SPI not supporting burst write */
+const struct regmap_config inv_icm42600_spi_regmap_config = {
+ .name = "inv_icm42600",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x4FFF,
+ .ranges = inv_icm42600_regmap_ranges,
+ .num_ranges = ARRAY_SIZE(inv_icm42600_regmap_ranges),
+ .volatile_table = inv_icm42600_regmap_volatile_accesses,
+ .rd_noinc_table = inv_icm42600_regmap_rd_noinc_accesses,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_write = true,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm42600_spi_regmap_config, "IIO_ICM42600");
+
struct inv_icm42600_hw {
uint8_t whoami;
const char *name;
@@ -814,6 +829,8 @@ out_unlock:
static int inv_icm42600_resume(struct device *dev)
{
struct inv_icm42600_state *st = dev_get_drvdata(dev);
+ struct inv_icm42600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
+ struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel);
int ret;
mutex_lock(&st->lock);
@@ -834,9 +851,12 @@ static int inv_icm42600_resume(struct device *dev)
goto out_unlock;
/* restore FIFO data streaming */
- if (st->fifo.on)
+ if (st->fifo.on) {
+ inv_sensors_timestamp_reset(&gyro_st->ts);
+ inv_sensors_timestamp_reset(&accel_st->ts);
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_STREAM);
+ }
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index c6bb68bf5e14..591ed78a55bb 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -78,7 +78,7 @@ static const struct iio_chan_spec inv_icm42600_gyro_channels[] = {
struct inv_icm42600_gyro_buffer {
struct inv_icm42600_fifo_sensor_data gyro;
int16_t temp;
- int64_t timestamp __aligned(8);
+ aligned_s64 timestamp;
};
#define INV_ICM42600_SCAN_MASK_GYRO_3AXIS \
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
index c55d8e672183..2bd2c4c8e50c 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
@@ -59,7 +59,8 @@ static int inv_icm42600_probe(struct spi_device *spi)
return -EINVAL;
chip = (uintptr_t)match;
- regmap = devm_regmap_init_spi(spi, &inv_icm42600_regmap_config);
+ /* use SPI specific regmap */
+ regmap = devm_regmap_init_spi(spi, &inv_icm42600_spi_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 844b611b825a..5bcd5e797046 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -277,6 +277,14 @@ static const struct inv_mpu6050_hw hw_info[] = {
.temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
.startup_time = {INV_ICM20690_GYRO_STARTUP_TIME, INV_ICM20690_ACCEL_STARTUP_TIME},
},
+ { .whoami = INV_IAM20380_WHOAMI_VALUE,
+ .name = "IAM20380",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 512,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+ .startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME},
+ },
{
.whoami = INV_IAM20680_WHOAMI_VALUE,
.name = "IAM20680",
@@ -1519,6 +1527,14 @@ static const struct iio_chan_spec inv_mpu6050_channels[] = {
INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
};
+static const struct iio_chan_spec inv_iam20380_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
+
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+};
+
static const struct iio_chan_spec inv_mpu6500_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
@@ -1623,6 +1639,10 @@ static const struct iio_chan_spec inv_mpu9250_channels[] = {
| BIT(INV_MPU9X50_SCAN_MAGN_Y) \
| BIT(INV_MPU9X50_SCAN_MAGN_Z))
+static const unsigned long inv_iam20380_scan_masks[] = {
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+};
+
static const unsigned long inv_mpu9x50_scan_masks[] = {
/* 3-axis accel */
INV_MPU6050_SCAN_MASK_3AXIS_ACCEL,
@@ -2026,6 +2046,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
break;
+ case INV_IAM20380:
+ indio_dev->channels = inv_iam20380_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_iam20380_channels);
+ indio_dev->available_scan_masks = inv_iam20380_scan_masks;
+ break;
case INV_ICM20600:
case INV_ICM20602:
indio_dev->channels = inv_mpu6500_channels;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 307a06f4df2e..91d77f94d204 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -34,6 +34,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
case INV_ICM20689:
case INV_ICM20600:
case INV_ICM20602:
+ case INV_IAM20380:
case INV_IAM20680:
/* no i2c auxiliary bus on the chip */
return false;
@@ -187,6 +188,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"icm20600", INV_ICM20600},
{"icm20602", INV_ICM20602},
{"icm20690", INV_ICM20690},
+ {"iam20380", INV_IAM20380},
{"iam20680", INV_IAM20680},
{"iam20680hp", INV_IAM20680HP},
{"iam20680ht", INV_IAM20680HT},
@@ -253,6 +255,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20690
},
{
+ .compatible = "invensense,iam20380",
+ .data = (void *)INV_IAM20380
+ },
+ {
.compatible = "invensense,iam20680",
.data = (void *)INV_IAM20680
},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index a6862cf42639..211901f8b8eb 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -84,6 +84,7 @@ enum inv_devices {
INV_ICM20600,
INV_ICM20602,
INV_ICM20690,
+ INV_IAM20380,
INV_IAM20680,
INV_IAM20680HP,
INV_IAM20680HT,
@@ -425,6 +426,7 @@ struct inv_mpu6050_state {
#define INV_ICM20600_WHOAMI_VALUE 0x11
#define INV_ICM20602_WHOAMI_VALUE 0x12
#define INV_ICM20690_WHOAMI_VALUE 0x20
+#define INV_IAM20380_WHOAMI_VALUE 0xB5
#define INV_IAM20680_WHOAMI_VALUE 0xA9
#define INV_IAM20680HP_WHOAMI_VALUE 0xF8
#define INV_IAM20680HT_WHOAMI_VALUE 0xFA
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index ab415874d699..20de6eb5cd35 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -79,6 +79,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"icm20600", INV_ICM20600},
{"icm20602", INV_ICM20602},
{"icm20690", INV_ICM20690},
+ {"iam20380", INV_IAM20380},
{"iam20680", INV_IAM20680},
{"iam20680hp", INV_IAM20680HP},
{"iam20680ht", INV_IAM20680HT},
@@ -141,6 +142,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20690
},
{
+ .compatible = "invensense,iam20380",
+ .data = (void *)INV_IAM20380
+ },
+ {
.compatible = "invensense,iam20680",
.data = (void *)INV_IAM20680
},
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 324c38764656..e19c5d3137c6 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1193,7 +1193,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
struct kmx61_data *data = kmx61_get_data(indio_dev);
int bit, ret, i = 0;
u8 base;
- s16 buffer[8];
+ s16 buffer[8] = { };
if (indio_dev == data->acc_indio_dev)
base = KMX61_ACC_XOUT_L;
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 89d687ec3099..3cabec3b152d 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -6,9 +6,6 @@ config IIO_ST_LSM6DSX
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select IIO_KFIFO_BUF
- select IIO_ST_LSM6DSX_I2C if (I2C)
- select IIO_ST_LSM6DSX_SPI if (SPI_MASTER)
- select IIO_ST_LSM6DSX_I3C if (I3C)
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor.
@@ -42,16 +39,19 @@ config IIO_ST_LSM6DSX
will be called st_lsm6dsx.
config IIO_ST_LSM6DSX_I2C
- tristate
- depends on IIO_ST_LSM6DSX
+ tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors I2C Interface"
+ depends on I2C && IIO_ST_LSM6DSX
+ default I2C && IIO_ST_LSM6DSX
select REGMAP_I2C
config IIO_ST_LSM6DSX_SPI
- tristate
- depends on IIO_ST_LSM6DSX
+ tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors SPI Interface"
+ depends on SPI_MASTER && IIO_ST_LSM6DSX
+ default SPI_MASTER && IIO_ST_LSM6DSX
select REGMAP_SPI
config IIO_ST_LSM6DSX_I3C
- tristate
- depends on IIO_ST_LSM6DSX
+ tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors I3C Interface"
+ depends on I3C && IIO_ST_LSM6DSX
+ default I3C && IIO_ST_LSM6DSX
select REGMAP_I3C
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
index 6952d901316f..f968f32890d1 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
@@ -9,7 +9,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/i3c/device.h>
-#include <linux/i3c/master.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -30,15 +29,16 @@ static int st_lsm6dsx_i3c_probe(struct i3c_device *i3cdev)
};
const struct i3c_device_id *id = i3c_device_match_id(i3cdev,
st_lsm6dsx_i3c_ids);
+ struct device *dev = i3cdev_to_dev(i3cdev);
struct regmap *regmap;
regmap = devm_regmap_init_i3c(i3cdev, &st_lsm6dsx_i3c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&i3cdev->dev, "Failed to register i3c regmap %ld\n", PTR_ERR(regmap));
+ dev_err(dev, "Failed to register i3c regmap %ld\n", PTR_ERR(regmap));
return PTR_ERR(regmap);
}
- return st_lsm6dsx_probe(&i3cdev->dev, 0, (uintptr_t)id->data, regmap);
+ return st_lsm6dsx_probe(dev, 0, (uintptr_t)id->data, regmap);
}
static struct i3c_driver st_lsm6dsx_driver = {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 2708f87df719..a80f7cc25a27 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -1137,7 +1137,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
int ret;
indio_dev->active_scan_mask = config->scan_mask;
- indio_dev->scan_timestamp = config->scan_timestamp;
+ ACCESS_PRIVATE(indio_dev, scan_timestamp) = config->scan_timestamp;
indio_dev->scan_bytes = config->scan_bytes;
iio_dev_opaque->currentmode = config->mode;
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index 3b5a99815062..d70ebe3bf774 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -915,6 +915,41 @@ int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_gain_sel_for_scale_using_time, "IIO_GTS_HELPER");
+/**
+ * iio_gts_find_gain_time_sel_for_scale - Fetch gain and time selectors for scale
+ * @gts: Gain time scale descriptor
+ * @scale_int: Integral part of the scale (typically val1)
+ * @scale_nano: Fractional part of the scale (nano or ppb)
+ * @gain_sel: Pointer to value where gain selector is stored.
+ * @time_sel: Pointer to value where time selector is stored.
+ *
+ * Wrapper around iio_gts_find_gain_for_scale_using_time() to fetch the
+ * gain and time selectors for a given scale.
+ *
+ * Return: 0 on success and -EINVAL on error.
+ */
+int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int,
+ int scale_nano, int *gain_sel,
+ int *time_sel)
+{
+ int i, ret;
+
+ for (i = 0; i < gts->num_itime; i++) {
+ *time_sel = gts->itime_table[i].sel;
+ ret = iio_gts_find_gain_sel_for_scale_using_time(gts, *time_sel,
+ scale_int,
+ scale_nano,
+ gain_sel);
+ if (ret)
+ continue;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_NS_GPL(iio_gts_find_gain_time_sel_for_scale, "IIO_GTS_HELPER");
+
static int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
{
const struct iio_itime_sel_mul *itime;
@@ -1086,6 +1121,48 @@ int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain,
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_new_gain_by_old_gain_time, "IIO_GTS_HELPER");
+/**
+ * iio_gts_find_new_gain_by_gain_time_min - compensate for time change
+ * @gts: Gain time scale descriptor
+ * @old_gain: Previously set gain
+ * @old_time: Selector corresponding previously set time
+ * @new_time: Selector corresponding new time to be set
+ * @new_gain: Pointer to value where new gain is to be written
+ * @in_range: Indicate if the @new_gain was in the range of
+ * supported gains.
+ *
+ * Wrapper around iio_gts_find_new_gain_by_old_gain_time() that tries to
+ * set an optimal value if no exact match was found, defaulting to the
+ * minimum gain to avoid saturations if the optimal value is not in the
+ * range of supported gains.
+ *
+ * Return: 0 on success and a negative value if no gain was found.
+ */
+int iio_gts_find_new_gain_by_gain_time_min(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain, bool *in_range)
+{
+ int ret;
+
+ *in_range = true;
+ ret = iio_gts_find_new_gain_by_old_gain_time(gts, old_gain, old_time,
+ new_time, new_gain);
+ if (*new_gain < 0)
+ return -EINVAL;
+
+ if (ret) {
+ *new_gain = iio_find_closest_gain_low(gts, *new_gain, in_range);
+ if (*new_gain < 0) {
+ *new_gain = iio_gts_get_min_gain(gts);
+ if (*new_gain < 0)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(iio_gts_find_new_gain_by_gain_time_min, "IIO_GTS_HELPER");
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
MODULE_DESCRIPTION("IIO light sensor gain-time-scale helpers");
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 136b225b6bc8..c174ebb7d5e6 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/minmax.h>
+#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -500,7 +501,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
return_ptr(chans);
error_free_chans:
- for (i = 0; i < nummaps; i++)
+ for (i = 0; i < mapind; i++)
iio_device_put(chans[i].indio_dev);
return ERR_PTR(ret);
}
@@ -989,6 +990,11 @@ ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
{
const struct iio_chan_spec_ext_info *ext_info;
+ if (!buf || offset_in_page(buf)) {
+ pr_err("iio: invalid ext_info read buffer\n");
+ return -EINVAL;
+ }
+
ext_info = iio_lookup_ext_info(chan, attr);
if (!ext_info)
return -EINVAL;
@@ -1014,6 +1020,11 @@ EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf)
{
+ if (!buf || offset_in_page(buf)) {
+ pr_err("iio: invalid label read buffer\n");
+ return -EINVAL;
+ }
+
return do_iio_read_channel_label(chan->indio_dev, chan->channel, buf);
}
EXPORT_SYMBOL_GPL(iio_read_channel_label);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 29ffa8491927..e34e551eef3e 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -329,22 +329,6 @@ config JSA1212
To compile this driver as a module, choose M here:
the module will be called jsa1212.
-config ROHM_BU27008
- tristate "ROHM BU27008 color (RGB+C/IR) sensor"
- depends on I2C
- select REGMAP_I2C
- select IIO_GTS_HELPER
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- Enable support for the ROHM BU27008 color sensor.
- The ROHM BU27008 is a sensor with 5 photodiodes (red, green,
- blue, clear and IR) with four configurable channels. Red and
- green being always available and two out of the rest three
- (blue, clear, IR) can be selected to be simultaneously measured.
- Typical application is adjusting LCD backlight of TVs,
- mobile phones and tablet PCs.
-
config ROHM_BU27034
tristate "ROHM BU27034 ambient light sensor"
depends on I2C
@@ -491,6 +475,19 @@ config OPT4001
If built as a dynamically linked module, it will be called
opt4001.
+config OPT4060
+ tristate "Texas Instruments OPT4060 RGBW Color Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say Y or M here, you get support for Texas Instruments
+ OPT4060 RGBW Color Sensor.
+
+ If built as a dynamically linked module, it will be called
+ opt4060.
+
config PA12203001
tristate "TXC PA12203001 light and proximity sensor"
depends on I2C
@@ -672,6 +669,7 @@ config VCNL4035
config VEML3235
tristate "VEML3235 ambient light sensor"
select REGMAP_I2C
+ select IIO_GTS_HELPER
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VEML3235
@@ -683,6 +681,8 @@ config VEML3235
config VEML6030
tristate "VEML6030 and VEML6035 ambient light sensors"
select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VEML6030
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index f14a37442712..11a4041b918a 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -42,8 +42,8 @@ obj-$(CONFIG_MAX44009) += max44009.o
obj-$(CONFIG_NOA1305) += noa1305.o
obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_OPT4001) += opt4001.o
+obj-$(CONFIG_OPT4060) += opt4060.o
obj-$(CONFIG_PA12203001) += pa12203001.o
-obj-$(CONFIG_ROHM_BU27008) += rohm-bu27008.o
obj-$(CONFIG_ROHM_BU27034) += rohm-bu27034.o
obj-$(CONFIG_RPR0521) += rpr0521.o
obj-$(CONFIG_SI1133) += si1133.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index c1b43053fbc7..cf96e3dd8bc6 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -56,7 +56,7 @@ struct adjd_s311_data {
struct i2c_client *client;
struct {
s16 chans[4];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c
index 69a0d609cffc..5ed7e17f49e7 100644
--- a/drivers/iio/light/apds9306.c
+++ b/drivers/iio/light/apds9306.c
@@ -108,11 +108,11 @@ static const struct part_id_gts_multiplier apds9306_gts_mul[] = {
{
.part_id = 0xB1,
.max_scale_int = 16,
- .max_scale_nano = 3264320,
+ .max_scale_nano = 326432000,
}, {
.part_id = 0xB3,
.max_scale_int = 14,
- .max_scale_nano = 9712000,
+ .max_scale_nano = 97120000,
},
};
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index be0068081ebb..37fffce35dd1 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -177,6 +177,12 @@ struct as73211_data {
BIT(AS73211_SCAN_INDEX_TEMP) | \
AS73211_SCAN_MASK_COLOR)
+static const unsigned long as73211_scan_masks[] = {
+ AS73211_SCAN_MASK_COLOR,
+ AS73211_SCAN_MASK_ALL,
+ 0
+};
+
static const struct iio_chan_spec as73211_channels[] = {
{
.type = IIO_TEMP,
@@ -636,7 +642,7 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
struct as73211_data *data = iio_priv(indio_dev);
struct {
__le16 chan[4];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
int data_result, ret;
@@ -672,9 +678,12 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
/* AS73211 starts reading at address 2 */
ret = i2c_master_recv(data->client,
- (char *)&scan.chan[1], 3 * sizeof(scan.chan[1]));
+ (char *)&scan.chan[0], 3 * sizeof(scan.chan[0]));
if (ret < 0)
goto done;
+
+ /* Avoid pushing uninitialized data */
+ scan.chan[3] = 0;
}
if (data_result) {
@@ -682,9 +691,15 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
* Saturate all channels (in case of overflows). Temperature channel
* is not affected by overflows.
*/
- scan.chan[1] = cpu_to_le16(U16_MAX);
- scan.chan[2] = cpu_to_le16(U16_MAX);
- scan.chan[3] = cpu_to_le16(U16_MAX);
+ if (*indio_dev->active_scan_mask == AS73211_SCAN_MASK_ALL) {
+ scan.chan[1] = cpu_to_le16(U16_MAX);
+ scan.chan[2] = cpu_to_le16(U16_MAX);
+ scan.chan[3] = cpu_to_le16(U16_MAX);
+ } else {
+ scan.chan[0] = cpu_to_le16(U16_MAX);
+ scan.chan[1] = cpu_to_le16(U16_MAX);
+ scan.chan[2] = cpu_to_le16(U16_MAX);
+ }
}
iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev));
@@ -758,6 +773,7 @@ static int as73211_probe(struct i2c_client *client)
indio_dev->channels = data->spec_dev->channels;
indio_dev->num_channels = data->spec_dev->num_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = as73211_scan_masks;
ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_OSR);
if (ret < 0)
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
index 56e32689bb97..3b4056be54a0 100644
--- a/drivers/iio/light/bh1745.c
+++ b/drivers/iio/light/bh1745.c
@@ -739,13 +739,15 @@ static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
struct bh1745_data *data = iio_priv(indio_dev);
struct {
u16 chans[4];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u16 value;
int ret;
int i;
int j = 0;
+ memset(&scan, 0, sizeof(scan));
+
iio_for_each_active_channel(indio_dev, i) {
ret = regmap_bulk_read(data->regmap, BH1745_RED_LSB + 2 * i,
&value, 2);
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index b6288dd25bbf..5b00ad2a014e 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -89,6 +89,15 @@ static int cm3232_reg_init(struct cm3232_chip *chip)
chip->als_info = &cm3232_als_info_default;
+ /* Disable and reset device */
+ chip->regs_cmd = CM3232_CMD_ALS_DISABLE | CM3232_CMD_ALS_RESET;
+ ret = i2c_smbus_write_byte_data(client, CM3232_REG_ADDR_CMD,
+ chip->regs_cmd);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "Error writing reg_cmd\n");
+ return ret;
+ }
+
/* Identify device */
ret = i2c_smbus_read_word_data(client, CM3232_REG_ADDR_ID);
if (ret < 0) {
@@ -99,15 +108,6 @@ static int cm3232_reg_init(struct cm3232_chip *chip)
if ((ret & 0xFF) != chip->als_info->hw_id)
return -ENODEV;
- /* Disable and reset device */
- chip->regs_cmd = CM3232_CMD_ALS_DISABLE | CM3232_CMD_ALS_RESET;
- ret = i2c_smbus_write_byte_data(client, CM3232_REG_ADDR_CMD,
- chip->regs_cmd);
- if (ret < 0) {
- dev_err(&chip->client->dev, "Error writing reg_cmd\n");
- return ret;
- }
-
/* Register default value */
chip->regs_cmd = chip->als_info->regs_cmd_default;
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index c83acbd78275..76b76d12b388 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -49,9 +49,10 @@ static const u32 prox_sensitivity_addresses[] = {
#define PROX_CHANNEL(_is_proximity, _channel) \
{\
.type = _is_proximity ? IIO_PROXIMITY : IIO_ATTENTION,\
- .info_mask_separate = _is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\
- BIT(IIO_CHAN_INFO_PROCESSED),\
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |\
+ .info_mask_separate = \
+ (_is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\
+ BIT(IIO_CHAN_INFO_PROCESSED)) |\
+ BIT(IIO_CHAN_INFO_OFFSET) |\
BIT(IIO_CHAN_INFO_SCALE) |\
BIT(IIO_CHAN_INFO_SAMP_FREQ) |\
BIT(IIO_CHAN_INFO_HYSTERESIS),\
@@ -94,6 +95,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
*val2 = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
if (chan->scan_index >= prox_state->num_channels)
return -EINVAL;
address = prox_state->channel2usage[chan->scan_index];
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index b176bf4c884b..326dc39e7929 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -54,7 +54,7 @@ struct isl29125_data {
/* Ensure timestamp is naturally aligned */
struct {
u16 chans[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 604f5f900a2e..669da0840eba 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1280,7 +1280,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
struct ltr501_data *data = iio_priv(indio_dev);
struct {
u16 channels[3];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
__le16 als_buf[2];
u8 mask = 0;
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index b935976871a6..e8b767680133 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -78,7 +78,7 @@ struct max44000_data {
/* Ensure naturally aligned timestamp */
struct {
u16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/light/opt4060.c b/drivers/iio/light/opt4060.c
new file mode 100644
index 000000000000..ab55f8d2ea0c
--- /dev/null
+++ b/drivers/iio/light/opt4060.c
@@ -0,0 +1,1343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Axis Communications AB
+ *
+ * Datasheet: https://www.ti.com/lit/gpn/opt4060
+ *
+ * Device driver for the Texas Instruments OPT4060 RGBW Color Sensor.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/math64.h>
+#include <linux/units.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* OPT4060 register set */
+#define OPT4060_RED_MSB 0x00
+#define OPT4060_RED_LSB 0x01
+#define OPT4060_GREEN_MSB 0x02
+#define OPT4060_GREEN_LSB 0x03
+#define OPT4060_BLUE_MSB 0x04
+#define OPT4060_BLUE_LSB 0x05
+#define OPT4060_CLEAR_MSB 0x06
+#define OPT4060_CLEAR_LSB 0x07
+#define OPT4060_THRESHOLD_LOW 0x08
+#define OPT4060_THRESHOLD_HIGH 0x09
+#define OPT4060_CTRL 0x0a
+#define OPT4060_INT_CTRL 0x0b
+#define OPT4060_RES_CTRL 0x0c
+#define OPT4060_DEVICE_ID 0x11
+
+/* OPT4060 register mask */
+#define OPT4060_EXPONENT_MASK GENMASK(15, 12)
+#define OPT4060_MSB_MASK GENMASK(11, 0)
+#define OPT4060_LSB_MASK GENMASK(15, 8)
+#define OPT4060_COUNTER_MASK GENMASK(7, 4)
+#define OPT4060_CRC_MASK GENMASK(3, 0)
+
+/* OPT4060 device id mask */
+#define OPT4060_DEVICE_ID_MASK GENMASK(11, 0)
+
+/* OPT4060 control register masks */
+#define OPT4060_CTRL_QWAKE_MASK BIT(15)
+#define OPT4060_CTRL_RANGE_MASK GENMASK(13, 10)
+#define OPT4060_CTRL_CONV_TIME_MASK GENMASK(9, 6)
+#define OPT4060_CTRL_OPER_MODE_MASK GENMASK(5, 4)
+#define OPT4060_CTRL_LATCH_MASK BIT(3)
+#define OPT4060_CTRL_INT_POL_MASK BIT(2)
+#define OPT4060_CTRL_FAULT_COUNT_MASK GENMASK(1, 0)
+
+/* OPT4060 interrupt control register masks */
+#define OPT4060_INT_CTRL_THRESH_SEL GENMASK(6, 5)
+#define OPT4060_INT_CTRL_OUTPUT BIT(4)
+#define OPT4060_INT_CTRL_INT_CFG GENMASK(3, 2)
+#define OPT4060_INT_CTRL_THRESHOLD 0x0
+#define OPT4060_INT_CTRL_NEXT_CH 0x1
+#define OPT4060_INT_CTRL_ALL_CH 0x3
+
+/* OPT4060 result control register masks */
+#define OPT4060_RES_CTRL_OVERLOAD BIT(3)
+#define OPT4060_RES_CTRL_CONV_READY BIT(2)
+#define OPT4060_RES_CTRL_FLAG_H BIT(1)
+#define OPT4060_RES_CTRL_FLAG_L BIT(0)
+
+/* OPT4060 constants */
+#define OPT4060_DEVICE_ID_VAL 0x821
+
+/* OPT4060 operating modes */
+#define OPT4060_CTRL_OPER_MODE_OFF 0x0
+#define OPT4060_CTRL_OPER_MODE_FORCED 0x1
+#define OPT4060_CTRL_OPER_MODE_ONE_SHOT 0x2
+#define OPT4060_CTRL_OPER_MODE_CONTINUOUS 0x3
+
+/* OPT4060 conversion control register definitions */
+#define OPT4060_CTRL_CONVERSION_0_6MS 0x0
+#define OPT4060_CTRL_CONVERSION_1MS 0x1
+#define OPT4060_CTRL_CONVERSION_1_8MS 0x2
+#define OPT4060_CTRL_CONVERSION_3_4MS 0x3
+#define OPT4060_CTRL_CONVERSION_6_5MS 0x4
+#define OPT4060_CTRL_CONVERSION_12_7MS 0x5
+#define OPT4060_CTRL_CONVERSION_25MS 0x6
+#define OPT4060_CTRL_CONVERSION_50MS 0x7
+#define OPT4060_CTRL_CONVERSION_100MS 0x8
+#define OPT4060_CTRL_CONVERSION_200MS 0x9
+#define OPT4060_CTRL_CONVERSION_400MS 0xa
+#define OPT4060_CTRL_CONVERSION_800MS 0xb
+
+/* OPT4060 fault count control register definitions */
+#define OPT4060_CTRL_FAULT_COUNT_1 0x0
+#define OPT4060_CTRL_FAULT_COUNT_2 0x1
+#define OPT4060_CTRL_FAULT_COUNT_4 0x2
+#define OPT4060_CTRL_FAULT_COUNT_8 0x3
+
+/* OPT4060 scale light level range definitions */
+#define OPT4060_CTRL_LIGHT_SCALE_AUTO 12
+
+/* OPT4060 default values */
+#define OPT4060_DEFAULT_CONVERSION_TIME OPT4060_CTRL_CONVERSION_50MS
+
+/*
+ * enum opt4060_chan_type - OPT4060 channel types
+ * @OPT4060_RED: Red channel.
+ * @OPT4060_GREEN: Green channel.
+ * @OPT4060_BLUE: Blue channel.
+ * @OPT4060_CLEAR: Clear (white) channel.
+ * @OPT4060_ILLUM: Calculated illuminance channel.
+ * @OPT4060_NUM_CHANS: Number of channel types.
+ */
+enum opt4060_chan_type {
+ OPT4060_RED,
+ OPT4060_GREEN,
+ OPT4060_BLUE,
+ OPT4060_CLEAR,
+ OPT4060_ILLUM,
+ OPT4060_NUM_CHANS
+};
+
+struct opt4060_chip {
+ struct regmap *regmap;
+ struct device *dev;
+ struct iio_trigger *trig;
+ u8 int_time;
+ int irq;
+ /*
+ * Mutex for protecting sensor irq settings. Switching between interrupt
+ * on each sample and on thresholds needs to be synchronized.
+ */
+ struct mutex irq_setting_lock;
+ /*
+ * Mutex for protecting event enabling.
+ */
+ struct mutex event_enabling_lock;
+ struct completion completion;
+ bool thresh_event_lo_active;
+ bool thresh_event_hi_active;
+};
+
+struct opt4060_channel_factor {
+ u32 mul;
+ u32 div;
+};
+
+static const int opt4060_int_time_available[][2] = {
+ { 0, 600 },
+ { 0, 1000 },
+ { 0, 1800 },
+ { 0, 3400 },
+ { 0, 6500 },
+ { 0, 12700 },
+ { 0, 25000 },
+ { 0, 50000 },
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 400000 },
+ { 0, 800000 },
+};
+
+/*
+ * Conversion time is integration time + time to set register
+ * this is used as integration time.
+ */
+static const int opt4060_int_time_reg[][2] = {
+ { 600, OPT4060_CTRL_CONVERSION_0_6MS },
+ { 1000, OPT4060_CTRL_CONVERSION_1MS },
+ { 1800, OPT4060_CTRL_CONVERSION_1_8MS },
+ { 3400, OPT4060_CTRL_CONVERSION_3_4MS },
+ { 6500, OPT4060_CTRL_CONVERSION_6_5MS },
+ { 12700, OPT4060_CTRL_CONVERSION_12_7MS },
+ { 25000, OPT4060_CTRL_CONVERSION_25MS },
+ { 50000, OPT4060_CTRL_CONVERSION_50MS },
+ { 100000, OPT4060_CTRL_CONVERSION_100MS },
+ { 200000, OPT4060_CTRL_CONVERSION_200MS },
+ { 400000, OPT4060_CTRL_CONVERSION_400MS },
+ { 800000, OPT4060_CTRL_CONVERSION_800MS },
+};
+
+static int opt4060_als_time_to_index(const u32 als_integration_time)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt4060_int_time_available); i++) {
+ if (als_integration_time == opt4060_int_time_available[i][1])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static u8 opt4060_calculate_crc(u8 exp, u32 mantissa, u8 count)
+{
+ u8 crc;
+
+ /*
+ * Calculates a 4-bit CRC from a 20-bit mantissa, 4-bit exponent and a 4-bit counter.
+ * crc[0] = XOR(mantissa[19:0], exp[3:0], count[3:0])
+ * crc[1] = XOR(mantissa[1,3,5,7,9,11,13,15,17,19], exp[1,3], count[1,3])
+ * crc[2] = XOR(mantissa[3,7,11,15,19], exp[3], count[3])
+ * crc[3] = XOR(mantissa[3,11,19])
+ */
+ crc = (hweight32(mantissa) + hweight32(exp) + hweight32(count)) % 2;
+ crc |= ((hweight32(mantissa & 0xAAAAA) + hweight32(exp & 0xA)
+ + hweight32(count & 0xA)) % 2) << 1;
+ crc |= ((hweight32(mantissa & 0x88888) + hweight32(exp & 0x8)
+ + hweight32(count & 0x8)) % 2) << 2;
+ crc |= (hweight32(mantissa & 0x80808) % 2) << 3;
+
+ return crc;
+}
+
+static int opt4060_set_int_state(struct opt4060_chip *chip, u32 state)
+{
+ int ret;
+ unsigned int regval;
+
+ guard(mutex)(&chip->irq_setting_lock);
+
+ regval = FIELD_PREP(OPT4060_INT_CTRL_INT_CFG, state);
+ ret = regmap_update_bits(chip->regmap, OPT4060_INT_CTRL,
+ OPT4060_INT_CTRL_INT_CFG, regval);
+ if (ret)
+ dev_err(chip->dev, "Failed to set interrupt config\n");
+ return ret;
+}
+
+static int opt4060_set_sampling_mode(struct opt4060_chip *chip,
+ bool continuous)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(chip->regmap, OPT4060_CTRL, &reg);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read ctrl register\n");
+ return ret;
+ }
+ reg &= ~OPT4060_CTRL_OPER_MODE_MASK;
+ if (continuous)
+ reg |= FIELD_PREP(OPT4060_CTRL_OPER_MODE_MASK,
+ OPT4060_CTRL_OPER_MODE_CONTINUOUS);
+ else
+ reg |= FIELD_PREP(OPT4060_CTRL_OPER_MODE_MASK,
+ OPT4060_CTRL_OPER_MODE_ONE_SHOT);
+
+ /*
+ * Trigger a new conversions by writing to CRTL register. It is not
+ * possible to use regmap_update_bits() since that will only write when
+ * data is modified.
+ */
+ ret = regmap_write(chip->regmap, OPT4060_CTRL, reg);
+ if (ret)
+ dev_err(chip->dev, "Failed to set ctrl register\n");
+ return ret;
+}
+
+static bool opt4060_event_active(struct opt4060_chip *chip)
+{
+ return chip->thresh_event_lo_active || chip->thresh_event_hi_active;
+}
+
+static int opt4060_set_state_common(struct opt4060_chip *chip,
+ bool continuous_sampling,
+ bool continuous_irq)
+{
+ int ret = 0;
+
+ /* It is important to setup irq before sampling to avoid missing samples. */
+ if (continuous_irq)
+ ret = opt4060_set_int_state(chip, OPT4060_INT_CTRL_ALL_CH);
+ else
+ ret = opt4060_set_int_state(chip, OPT4060_INT_CTRL_THRESHOLD);
+ if (ret) {
+ dev_err(chip->dev, "Failed to set irq state.\n");
+ return ret;
+ }
+
+ if (continuous_sampling || opt4060_event_active(chip))
+ ret = opt4060_set_sampling_mode(chip, true);
+ else
+ ret = opt4060_set_sampling_mode(chip, false);
+ if (ret)
+ dev_err(chip->dev, "Failed to set sampling state.\n");
+ return ret;
+}
+
+/*
+ * Function for setting the driver state for sampling and irq. Either direct
+ * mode of buffer mode will be claimed during the transition to prevent races
+ * between sysfs read, buffer or events.
+ */
+static int opt4060_set_driver_state(struct iio_dev *indio_dev,
+ bool continuous_sampling,
+ bool continuous_irq)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+any_mode_retry:
+ if (iio_device_claim_buffer_mode(indio_dev)) {
+ /*
+ * This one is a *bit* hacky. If we cannot claim buffer mode,
+ * then try direct mode so that we make sure things cannot
+ * concurrently change. And we just keep trying until we get one
+ * of the modes...
+ */
+ if (iio_device_claim_direct_mode(indio_dev))
+ goto any_mode_retry;
+ /*
+ * This path means that we managed to claim direct mode. In
+ * this case the buffer isn't enabled and it's okay to leave
+ * continuous mode for sampling and/or irq.
+ */
+ ret = opt4060_set_state_common(chip, continuous_sampling,
+ continuous_irq);
+ iio_device_release_direct_mode(indio_dev);
+ } else {
+ /*
+ * This path means that we managed to claim buffer mode. In
+ * this case the buffer is enabled and irq and sampling must go
+ * to or remain continuous, but only if the trigger is from this
+ * device.
+ */
+ if (!iio_trigger_validate_own_device(indio_dev->trig, indio_dev))
+ ret = opt4060_set_state_common(chip, true, true);
+ else
+ ret = opt4060_set_state_common(chip, continuous_sampling,
+ continuous_irq);
+ iio_device_release_buffer_mode(indio_dev);
+ }
+ return ret;
+}
+
+/*
+ * This function is called with framework mutex locked.
+ */
+static int opt4060_trigger_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+
+ return opt4060_set_state_common(chip, state, state);
+}
+
+static int opt4060_read_raw_value(struct opt4060_chip *chip,
+ unsigned long address, u32 *raw)
+{
+ int ret;
+ u16 result[2];
+ u32 mantissa_raw;
+ u16 msb, lsb;
+ u8 exp, count, crc, calc_crc;
+
+ ret = regmap_bulk_read(chip->regmap, address, result, 2);
+ if (ret) {
+ dev_err(chip->dev, "Reading channel data failed\n");
+ return ret;
+ }
+ exp = FIELD_GET(OPT4060_EXPONENT_MASK, result[0]);
+ msb = FIELD_GET(OPT4060_MSB_MASK, result[0]);
+ count = FIELD_GET(OPT4060_COUNTER_MASK, result[1]);
+ crc = FIELD_GET(OPT4060_CRC_MASK, result[1]);
+ lsb = FIELD_GET(OPT4060_LSB_MASK, result[1]);
+ mantissa_raw = (msb << 8) + lsb;
+ calc_crc = opt4060_calculate_crc(exp, mantissa_raw, count);
+ if (calc_crc != crc)
+ return -EIO;
+ *raw = mantissa_raw << exp;
+ return 0;
+}
+
+static int opt4060_trigger_new_samples(struct iio_dev *indio_dev)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ /*
+ * The conversion time should be 500us startup time plus the integration time
+ * times the number of channels. An exact timeout isn't critical, it's better
+ * not to get incorrect errors in the log. Setting the timeout to double the
+ * theoretical time plus and extra 100ms margin.
+ */
+ unsigned int timeout_us = (500 + OPT4060_NUM_CHANS *
+ opt4060_int_time_reg[chip->int_time][0]) * 2 + 100000;
+
+ /* Setting the state in one shot mode with irq on each sample. */
+ ret = opt4060_set_driver_state(indio_dev, false, true);
+ if (ret)
+ return ret;
+
+ if (chip->irq) {
+ guard(mutex)(&chip->irq_setting_lock);
+ reinit_completion(&chip->completion);
+ if (wait_for_completion_timeout(&chip->completion,
+ usecs_to_jiffies(timeout_us)) == 0) {
+ dev_err(chip->dev, "Completion timed out.\n");
+ return -ETIME;
+ }
+ } else {
+ unsigned int ready;
+
+ ret = regmap_read_poll_timeout(chip->regmap, OPT4060_RES_CTRL,
+ ready, (ready & OPT4060_RES_CTRL_CONV_READY),
+ 1000, timeout_us);
+ if (ret)
+ dev_err(chip->dev, "Conversion ready did not finish within timeout.\n");
+ }
+ /* Setting the state in one shot mode with irq on thresholds. */
+ return opt4060_set_driver_state(indio_dev, false, false);
+}
+
+static int opt4060_read_chan_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ u32 adc_raw;
+ int ret;
+
+ ret = opt4060_trigger_new_samples(indio_dev);
+ if (ret) {
+ dev_err(chip->dev, "Failed to trigger new samples.\n");
+ return ret;
+ }
+
+ ret = opt4060_read_raw_value(chip, chan->address, &adc_raw);
+ if (ret) {
+ dev_err(chip->dev, "Reading raw channel data failed.\n");
+ return ret;
+ }
+ *val = adc_raw;
+ return IIO_VAL_INT;
+}
+
+/*
+ * Returns the scale values used for red, green and blue. Scales the raw value
+ * so that for a particular test light source, typically white, the measurement
+ * intensity is the same across different color channels.
+ */
+static int opt4060_get_chan_scale(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+
+ switch (chan->scan_index) {
+ case OPT4060_RED:
+ /* 2.4 */
+ *val = 2;
+ *val2 = 400000;
+ break;
+ case OPT4060_GREEN:
+ /* 1.0 */
+ *val = 1;
+ *val2 = 0;
+ break;
+ case OPT4060_BLUE:
+ /* 1.3 */
+ *val = 1;
+ *val2 = 300000;
+ break;
+ default:
+ dev_err(chip->dev, "Unexpected channel index.\n");
+ return -EINVAL;
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int opt4060_calc_illuminance(struct opt4060_chip *chip, int *val)
+{
+ u32 lux_raw;
+ int ret;
+
+ /* The green wide spectral channel is used for illuminance. */
+ ret = opt4060_read_raw_value(chip, OPT4060_GREEN_MSB, &lux_raw);
+ if (ret) {
+ dev_err(chip->dev, "Reading raw channel data failed\n");
+ return ret;
+ }
+
+ /* Illuminance is calculated by ADC_RAW * 2.15e-3. */
+ *val = DIV_U64_ROUND_CLOSEST((u64)(lux_raw * 215), 1000);
+ return ret;
+}
+
+static int opt4060_read_illuminance(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ ret = opt4060_trigger_new_samples(indio_dev);
+ if (ret) {
+ dev_err(chip->dev, "Failed to trigger new samples.\n");
+ return ret;
+ }
+ ret = opt4060_calc_illuminance(chip, val);
+ if (ret) {
+ dev_err(chip->dev, "Failed to calculate illuminance.\n");
+ return ret;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int opt4060_set_int_time(struct opt4060_chip *chip)
+{
+ unsigned int regval;
+ int ret;
+
+ regval = FIELD_PREP(OPT4060_CTRL_CONV_TIME_MASK, chip->int_time);
+ ret = regmap_update_bits(chip->regmap, OPT4060_CTRL,
+ OPT4060_CTRL_CONV_TIME_MASK, regval);
+ if (ret)
+ dev_err(chip->dev, "Failed to set integration time.\n");
+
+ return ret;
+}
+
+static int opt4060_power_down(struct opt4060_chip *chip)
+{
+ int ret;
+
+ ret = regmap_clear_bits(chip->regmap, OPT4060_CTRL, OPT4060_CTRL_OPER_MODE_MASK);
+ if (ret)
+ dev_err(chip->dev, "Failed to power down\n");
+
+ return ret;
+}
+
+static void opt4060_chip_off_action(void *chip)
+{
+ opt4060_power_down(chip);
+}
+
+#define _OPT4060_COLOR_CHANNEL(_color, _mask, _ev_spec, _num_ev_spec) \
+{ \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_LIGHT_##_color, \
+ .info_mask_separate = _mask, \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .address = OPT4060_##_color##_MSB, \
+ .scan_index = OPT4060_##_color, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+ .event_spec = _ev_spec, \
+ .num_event_specs = _num_ev_spec, \
+}
+
+#define OPT4060_COLOR_CHANNEL(_color, _mask) \
+ _OPT4060_COLOR_CHANNEL(_color, _mask, opt4060_event_spec, \
+ ARRAY_SIZE(opt4060_event_spec)) \
+
+#define OPT4060_COLOR_CHANNEL_NO_EVENTS(_color, _mask) \
+ _OPT4060_COLOR_CHANNEL(_color, _mask, NULL, 0) \
+
+#define OPT4060_LIGHT_CHANNEL(_channel) \
+{ \
+ .type = IIO_LIGHT, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .scan_index = OPT4060_##_channel, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const struct iio_event_spec opt4060_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD),
+ },
+};
+
+static const struct iio_chan_spec opt4060_channels[] = {
+ OPT4060_COLOR_CHANNEL(RED, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL(GREEN, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL(BLUE, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL(CLEAR, BIT(IIO_CHAN_INFO_RAW)),
+ OPT4060_LIGHT_CHANNEL(ILLUM),
+ IIO_CHAN_SOFT_TIMESTAMP(OPT4060_NUM_CHANS),
+};
+
+static const struct iio_chan_spec opt4060_channels_no_events[] = {
+ OPT4060_COLOR_CHANNEL_NO_EVENTS(RED, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL_NO_EVENTS(GREEN, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL_NO_EVENTS(BLUE, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL_NO_EVENTS(CLEAR, BIT(IIO_CHAN_INFO_RAW)),
+ OPT4060_LIGHT_CHANNEL(ILLUM),
+ IIO_CHAN_SOFT_TIMESTAMP(OPT4060_NUM_CHANS),
+};
+
+static int opt4060_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return opt4060_read_chan_raw(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ return opt4060_get_chan_scale(indio_dev, chan, val, val2);
+ case IIO_CHAN_INFO_PROCESSED:
+ return opt4060_read_illuminance(indio_dev, chan, val);
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = opt4060_int_time_reg[chip->int_time][0];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4060_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int int_time;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ int_time = opt4060_als_time_to_index(val2);
+ if (int_time < 0)
+ return int_time;
+ chip->int_time = int_time;
+ return opt4060_set_int_time(chip);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4060_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 opt4060_calc_th_reg(u32 adc_val)
+{
+ u32 th_val, th_exp, bits;
+ /*
+ * The threshold registers take 4 bits of exponent and 12 bits of data
+ * ADC = TH_VAL << (8 + TH_EXP)
+ */
+ bits = fls(adc_val);
+
+ if (bits > 31)
+ th_exp = 11; /* Maximum exponent */
+ else if (bits > 20)
+ th_exp = bits - 20;
+ else
+ th_exp = 0;
+ th_val = (adc_val >> (8 + th_exp)) & 0xfff;
+
+ return (th_exp << 12) + th_val;
+}
+
+static u32 opt4060_calc_val_from_th_reg(u32 th_reg)
+{
+ /*
+ * The threshold registers take 4 bits of exponent and 12 bits of data
+ * ADC = TH_VAL << (8 + TH_EXP)
+ */
+ u32 th_val, th_exp;
+
+ th_exp = (th_reg >> 12) & 0xf;
+ th_val = th_reg & 0xfff;
+
+ return th_val << (8 + th_exp);
+}
+
+static int opt4060_read_available(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(opt4060_int_time_available) * 2;
+ *vals = (const int *)opt4060_int_time_available;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t opt4060_read_ev_period(struct opt4060_chip *chip, int *val,
+ int *val2)
+{
+ int ret, pers, fault_count, int_time;
+ u64 uval;
+
+ int_time = opt4060_int_time_reg[chip->int_time][0];
+
+ ret = regmap_read(chip->regmap, OPT4060_CTRL, &fault_count);
+ if (ret < 0)
+ return ret;
+
+ fault_count = fault_count & OPT4060_CTRL_FAULT_COUNT_MASK;
+ switch (fault_count) {
+ case OPT4060_CTRL_FAULT_COUNT_2:
+ pers = 2;
+ break;
+ case OPT4060_CTRL_FAULT_COUNT_4:
+ pers = 4;
+ break;
+ case OPT4060_CTRL_FAULT_COUNT_8:
+ pers = 8;
+ break;
+
+ default:
+ pers = 1;
+ break;
+ }
+
+ uval = mul_u32_u32(int_time, pers);
+ *val = div_u64_rem(uval, MICRO, val2);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static ssize_t opt4060_write_ev_period(struct opt4060_chip *chip, int val,
+ int val2)
+{
+ u64 uval, int_time;
+ unsigned int regval, fault_count_val;
+
+ uval = mul_u32_u32(val, MICRO) + val2;
+ int_time = opt4060_int_time_reg[chip->int_time][0];
+
+ /* Check if the period is closest to 1, 2, 4 or 8 times integration time.*/
+ if (uval <= int_time)
+ fault_count_val = OPT4060_CTRL_FAULT_COUNT_1;
+ else if (uval <= int_time * 2)
+ fault_count_val = OPT4060_CTRL_FAULT_COUNT_2;
+ else if (uval <= int_time * 4)
+ fault_count_val = OPT4060_CTRL_FAULT_COUNT_4;
+ else
+ fault_count_val = OPT4060_CTRL_FAULT_COUNT_8;
+
+ regval = FIELD_PREP(OPT4060_CTRL_FAULT_COUNT_MASK, fault_count_val);
+ return regmap_update_bits(chip->regmap, OPT4060_CTRL,
+ OPT4060_CTRL_FAULT_COUNT_MASK, regval);
+}
+
+static int opt4060_get_channel_sel(struct opt4060_chip *chip, int *ch_sel)
+{
+ int ret;
+ u32 regval;
+
+ ret = regmap_read(chip->regmap, OPT4060_INT_CTRL, &regval);
+ if (ret) {
+ dev_err(chip->dev, "Failed to get channel selection.\n");
+ return ret;
+ }
+ *ch_sel = FIELD_GET(OPT4060_INT_CTRL_THRESH_SEL, regval);
+ return ret;
+}
+
+static int opt4060_set_channel_sel(struct opt4060_chip *chip, int ch_sel)
+{
+ int ret;
+ u32 regval;
+
+ regval = FIELD_PREP(OPT4060_INT_CTRL_THRESH_SEL, ch_sel);
+ ret = regmap_update_bits(chip->regmap, OPT4060_INT_CTRL,
+ OPT4060_INT_CTRL_THRESH_SEL, regval);
+ if (ret)
+ dev_err(chip->dev, "Failed to set channel selection.\n");
+ return ret;
+}
+
+static int opt4060_get_thresholds(struct opt4060_chip *chip, u32 *th_lo, u32 *th_hi)
+{
+ int ret;
+ u32 regval;
+
+ ret = regmap_read(chip->regmap, OPT4060_THRESHOLD_LOW, &regval);
+ if (ret) {
+ dev_err(chip->dev, "Failed to read THRESHOLD_LOW.\n");
+ return ret;
+ }
+ *th_lo = opt4060_calc_val_from_th_reg(regval);
+
+ ret = regmap_read(chip->regmap, OPT4060_THRESHOLD_HIGH, &regval);
+ if (ret) {
+ dev_err(chip->dev, "Failed to read THRESHOLD_LOW.\n");
+ return ret;
+ }
+ *th_hi = opt4060_calc_val_from_th_reg(regval);
+
+ return ret;
+}
+
+static int opt4060_set_thresholds(struct opt4060_chip *chip, u32 th_lo, u32 th_hi)
+{
+ int ret;
+
+ ret = regmap_write(chip->regmap, OPT4060_THRESHOLD_LOW, opt4060_calc_th_reg(th_lo));
+ if (ret) {
+ dev_err(chip->dev, "Failed to write THRESHOLD_LOW.\n");
+ return ret;
+ }
+
+ ret = regmap_write(chip->regmap, OPT4060_THRESHOLD_HIGH, opt4060_calc_th_reg(th_hi));
+ if (ret)
+ dev_err(chip->dev, "Failed to write THRESHOLD_HIGH.\n");
+
+ return ret;
+}
+
+static int opt4060_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ u32 th_lo, th_hi;
+ int ret;
+
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = opt4060_get_thresholds(chip, &th_lo, &th_hi);
+ if (ret)
+ return ret;
+ if (dir == IIO_EV_DIR_FALLING) {
+ *val = th_lo;
+ ret = IIO_VAL_INT;
+ } else if (dir == IIO_EV_DIR_RISING) {
+ *val = th_hi;
+ ret = IIO_VAL_INT;
+ }
+ return ret;
+ case IIO_EV_INFO_PERIOD:
+ return opt4060_read_ev_period(chip, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4060_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ u32 th_lo, th_hi;
+ int ret;
+
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = opt4060_get_thresholds(chip, &th_lo, &th_hi);
+ if (ret)
+ return ret;
+ if (dir == IIO_EV_DIR_FALLING)
+ th_lo = val;
+ else if (dir == IIO_EV_DIR_RISING)
+ th_hi = val;
+ return opt4060_set_thresholds(chip, th_lo, th_hi);
+ case IIO_EV_INFO_PERIOD:
+ return opt4060_write_ev_period(chip, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4060_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ int ch_sel, ch_idx = chan->scan_index;
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ ret = opt4060_get_channel_sel(chip, &ch_sel);
+ if (ret)
+ return ret;
+
+ if (((dir == IIO_EV_DIR_FALLING) && chip->thresh_event_lo_active) ||
+ ((dir == IIO_EV_DIR_RISING) && chip->thresh_event_hi_active))
+ return ch_sel == ch_idx;
+
+ return ret;
+}
+
+static int opt4060_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, bool state)
+{
+ int ch_sel, ch_idx = chan->scan_index;
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&chip->event_enabling_lock);
+
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ ret = opt4060_get_channel_sel(chip, &ch_sel);
+ if (ret)
+ return ret;
+
+ if (state) {
+ /* Only one channel can be active at the same time */
+ if ((chip->thresh_event_lo_active || chip->thresh_event_hi_active) &&
+ (ch_idx != ch_sel))
+ return -EBUSY;
+ if (dir == IIO_EV_DIR_FALLING)
+ chip->thresh_event_lo_active = true;
+ else if (dir == IIO_EV_DIR_RISING)
+ chip->thresh_event_hi_active = true;
+ ret = opt4060_set_channel_sel(chip, ch_idx);
+ if (ret)
+ return ret;
+ } else {
+ if (ch_idx == ch_sel) {
+ if (dir == IIO_EV_DIR_FALLING)
+ chip->thresh_event_lo_active = false;
+ else if (dir == IIO_EV_DIR_RISING)
+ chip->thresh_event_hi_active = false;
+ }
+ }
+
+ return opt4060_set_driver_state(indio_dev,
+ chip->thresh_event_hi_active |
+ chip->thresh_event_lo_active,
+ false);
+}
+
+static const struct iio_info opt4060_info = {
+ .read_raw = opt4060_read_raw,
+ .write_raw = opt4060_write_raw,
+ .write_raw_get_fmt = opt4060_write_raw_get_fmt,
+ .read_avail = opt4060_read_available,
+ .read_event_value = opt4060_read_event,
+ .write_event_value = opt4060_write_event,
+ .read_event_config = opt4060_read_event_config,
+ .write_event_config = opt4060_write_event_config,
+};
+
+static const struct iio_info opt4060_info_no_irq = {
+ .read_raw = opt4060_read_raw,
+ .write_raw = opt4060_write_raw,
+ .write_raw_get_fmt = opt4060_write_raw_get_fmt,
+ .read_avail = opt4060_read_available,
+};
+
+static int opt4060_load_defaults(struct opt4060_chip *chip)
+{
+ u16 reg;
+ int ret;
+
+ chip->int_time = OPT4060_DEFAULT_CONVERSION_TIME;
+
+ /* Set initial MIN/MAX thresholds */
+ ret = opt4060_set_thresholds(chip, 0, UINT_MAX);
+ if (ret)
+ return ret;
+
+ /*
+ * Setting auto-range, latched window for thresholds, one-shot conversion
+ * and quick wake-up mode as default.
+ */
+ reg = FIELD_PREP(OPT4060_CTRL_RANGE_MASK,
+ OPT4060_CTRL_LIGHT_SCALE_AUTO);
+ reg |= FIELD_PREP(OPT4060_CTRL_CONV_TIME_MASK, chip->int_time);
+ reg |= FIELD_PREP(OPT4060_CTRL_OPER_MODE_MASK,
+ OPT4060_CTRL_OPER_MODE_ONE_SHOT);
+ reg |= OPT4060_CTRL_QWAKE_MASK | OPT4060_CTRL_LATCH_MASK;
+
+ ret = regmap_write(chip->regmap, OPT4060_CTRL, reg);
+ if (ret)
+ dev_err(chip->dev, "Failed to set configuration\n");
+
+ return ret;
+}
+
+static bool opt4060_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg <= OPT4060_CLEAR_LSB || reg == OPT4060_RES_CTRL;
+}
+
+static bool opt4060_writable_reg(struct device *dev, unsigned int reg)
+{
+ return reg >= OPT4060_THRESHOLD_LOW || reg >= OPT4060_INT_CTRL;
+}
+
+static bool opt4060_readonly_reg(struct device *dev, unsigned int reg)
+{
+ return reg == OPT4060_DEVICE_ID;
+}
+
+static bool opt4060_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* Volatile, writable and read-only registers are readable. */
+ return opt4060_volatile_reg(dev, reg) || opt4060_writable_reg(dev, reg) ||
+ opt4060_readonly_reg(dev, reg);
+}
+
+static const struct regmap_config opt4060_regmap_config = {
+ .name = "opt4060",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = OPT4060_DEVICE_ID,
+ .readable_reg = opt4060_readable_reg,
+ .writeable_reg = opt4060_writable_reg,
+ .volatile_reg = opt4060_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static const struct iio_trigger_ops opt4060_trigger_ops = {
+ .set_trigger_state = opt4060_trigger_set_state,
+};
+
+static irqreturn_t opt4060_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *idev = pf->indio_dev;
+ struct opt4060_chip *chip = iio_priv(idev);
+ struct {
+ u32 chan[OPT4060_NUM_CHANS];
+ aligned_s64 ts;
+ } raw;
+ int i = 0;
+ int chan, ret;
+
+ /* If the trigger is not from this driver, a new sample is needed.*/
+ if (iio_trigger_validate_own_device(idev->trig, idev))
+ opt4060_trigger_new_samples(idev);
+
+ memset(&raw, 0, sizeof(raw));
+
+ iio_for_each_active_channel(idev, chan) {
+ if (chan == OPT4060_ILLUM)
+ ret = opt4060_calc_illuminance(chip, &raw.chan[i++]);
+ else
+ ret = opt4060_read_raw_value(chip,
+ idev->channels[chan].address,
+ &raw.chan[i++]);
+ if (ret) {
+ dev_err(chip->dev, "Reading channel data failed\n");
+ goto err_read;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp);
+err_read:
+ iio_trigger_notify_done(idev->trig);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t opt4060_irq_thread(int irq, void *private)
+{
+ struct iio_dev *idev = private;
+ struct opt4060_chip *chip = iio_priv(idev);
+ int ret, dummy;
+ unsigned int int_res;
+
+ ret = regmap_read(chip->regmap, OPT4060_RES_CTRL, &int_res);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read interrupt reasons.\n");
+ return IRQ_NONE;
+ }
+
+ /* Read OPT4060_CTRL to clear interrupt */
+ ret = regmap_read(chip->regmap, OPT4060_CTRL, &dummy);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to clear interrupt\n");
+ return IRQ_NONE;
+ }
+
+ /* Handle events */
+ if (int_res & (OPT4060_RES_CTRL_FLAG_H | OPT4060_RES_CTRL_FLAG_L)) {
+ u64 code;
+ int chan = 0;
+
+ ret = opt4060_get_channel_sel(chip, &chan);
+ if (ret) {
+ dev_err(chip->dev, "Failed to read threshold channel.\n");
+ return IRQ_NONE;
+ }
+
+ /* Check if the interrupt is from the lower threshold */
+ if (int_res & OPT4060_RES_CTRL_FLAG_L) {
+ code = IIO_MOD_EVENT_CODE(IIO_INTENSITY,
+ chan,
+ idev->channels[chan].channel2,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING);
+ iio_push_event(idev, code, iio_get_time_ns(idev));
+ }
+ /* Check if the interrupt is from the upper threshold */
+ if (int_res & OPT4060_RES_CTRL_FLAG_H) {
+ code = IIO_MOD_EVENT_CODE(IIO_INTENSITY,
+ chan,
+ idev->channels[chan].channel2,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING);
+ iio_push_event(idev, code, iio_get_time_ns(idev));
+ }
+ }
+
+ /* Handle conversion ready */
+ if (int_res & OPT4060_RES_CTRL_CONV_READY) {
+ /* Signal completion for potentially waiting reads */
+ complete(&chip->completion);
+
+ /* Handle data ready triggers */
+ if (iio_buffer_enabled(idev))
+ iio_trigger_poll_nested(chip->trig);
+ }
+ return IRQ_HANDLED;
+}
+
+static int opt4060_setup_buffer(struct opt4060_chip *chip, struct iio_dev *idev)
+{
+ int ret;
+
+ ret = devm_iio_triggered_buffer_setup(chip->dev, idev,
+ &iio_pollfunc_store_time,
+ opt4060_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(chip->dev, ret,
+ "Buffer setup failed.\n");
+ return ret;
+}
+
+static int opt4060_setup_trigger(struct opt4060_chip *chip, struct iio_dev *idev)
+{
+ struct iio_trigger *data_trigger;
+ char *name;
+ int ret;
+
+ data_trigger = devm_iio_trigger_alloc(chip->dev, "%s-data-ready-dev%d",
+ idev->name, iio_device_id(idev));
+ if (!data_trigger)
+ return -ENOMEM;
+
+ /*
+ * The data trigger allows for sample capture on each new conversion
+ * ready interrupt.
+ */
+ chip->trig = data_trigger;
+ data_trigger->ops = &opt4060_trigger_ops;
+ iio_trigger_set_drvdata(data_trigger, idev);
+ ret = devm_iio_trigger_register(chip->dev, data_trigger);
+ if (ret)
+ return dev_err_probe(chip->dev, ret,
+ "Data ready trigger registration failed\n");
+
+ name = devm_kasprintf(chip->dev, GFP_KERNEL, "%s-opt4060",
+ dev_name(chip->dev));
+ if (!name)
+ return dev_err_probe(chip->dev, -ENOMEM, "Failed to alloc chip name\n");
+
+ ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL, opt4060_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, idev);
+ if (ret)
+ return dev_err_probe(chip->dev, ret, "Could not request IRQ\n");
+
+ init_completion(&chip->completion);
+
+ ret = devm_mutex_init(chip->dev, &chip->irq_setting_lock);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(chip->dev, &chip->event_enabling_lock);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(chip->regmap, OPT4060_INT_CTRL,
+ OPT4060_INT_CTRL_OUTPUT,
+ OPT4060_INT_CTRL_OUTPUT);
+ if (ret)
+ return dev_err_probe(chip->dev, ret,
+ "Failed to set interrupt as output\n");
+
+ return 0;
+}
+
+static int opt4060_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct opt4060_chip *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+ unsigned int regval, dev_id;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable vdd supply\n");
+
+ chip->regmap = devm_regmap_init_i2c(client, &opt4060_regmap_config);
+ if (IS_ERR(chip->regmap))
+ return dev_err_probe(dev, PTR_ERR(chip->regmap),
+ "regmap initialization failed\n");
+
+ chip->dev = dev;
+ chip->irq = client->irq;
+
+ ret = regmap_reinit_cache(chip->regmap, &opt4060_regmap_config);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to reinit regmap cache\n");
+
+ ret = regmap_read(chip->regmap, OPT4060_DEVICE_ID, &regval);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to read the device ID register\n");
+
+ dev_id = FIELD_GET(OPT4060_DEVICE_ID_MASK, regval);
+ if (dev_id != OPT4060_DEVICE_ID_VAL)
+ dev_info(dev, "Device ID: %#04x unknown\n", dev_id);
+
+ if (chip->irq) {
+ indio_dev->info = &opt4060_info;
+ indio_dev->channels = opt4060_channels;
+ indio_dev->num_channels = ARRAY_SIZE(opt4060_channels);
+ } else {
+ indio_dev->info = &opt4060_info_no_irq;
+ indio_dev->channels = opt4060_channels_no_events;
+ indio_dev->num_channels = ARRAY_SIZE(opt4060_channels_no_events);
+ }
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = "opt4060";
+
+ ret = opt4060_load_defaults(chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to set sensor defaults\n");
+
+ ret = devm_add_action_or_reset(dev, opt4060_chip_off_action, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to setup power off action\n");
+
+ ret = opt4060_setup_buffer(chip, indio_dev);
+ if (ret)
+ return ret;
+
+ if (chip->irq) {
+ ret = opt4060_setup_trigger(chip, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id opt4060_id[] = {
+ { "opt4060", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, opt4060_id);
+
+static const struct of_device_id opt4060_of_match[] = {
+ { .compatible = "ti,opt4060" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, opt4060_of_match);
+
+static struct i2c_driver opt4060_driver = {
+ .driver = {
+ .name = "opt4060",
+ .of_match_table = opt4060_of_match,
+ },
+ .probe = opt4060_probe,
+ .id_table = opt4060_id,
+};
+module_i2c_driver(opt4060_driver);
+
+MODULE_AUTHOR("Per-Daniel Olsson <perdaniel.olsson@axis.com>");
+MODULE_DESCRIPTION("Texas Instruments OPT4060 RGBW color sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/rohm-bu27008.c b/drivers/iio/light/rohm-bu27008.c
deleted file mode 100644
index fa35dd32700c..000000000000
--- a/drivers/iio/light/rohm-bu27008.c
+++ /dev/null
@@ -1,1635 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ROHM Colour Sensor driver for
- * - BU27008 RGBC sensor
- * - BU27010 RGBC + Flickering sensor
- *
- * Copyright (c) 2023, ROHM Semiconductor.
- */
-
-#include <linux/bitfield.h>
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/property.h>
-#include <linux/regmap.h>
-#include <linux/regulator/consumer.h>
-#include <linux/units.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/iio-gts-helper.h>
-#include <linux/iio/trigger.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
-
-/*
- * A word about register address and mask definitions.
- *
- * At a quick glance to the data-sheet register tables, the BU27010 has all the
- * registers that the BU27008 has. On top of that the BU27010 adds couple of new
- * ones.
- *
- * So, all definitions BU27008_REG_* are there also for BU27010 but none of the
- * BU27010_REG_* are present on BU27008. This makes sense as BU27010 just adds
- * some features (Flicker FIFO, more power control) on top of the BU27008.
- *
- * Unfortunately, some of the wheel has been re-invented. Even though the names
- * of the registers have stayed the same, pretty much all of the functionality
- * provided by the registers has changed place. Contents of all MODE_CONTROL
- * registers on BU27008 and BU27010 are different.
- *
- * Chip-specific mapping from register addresses/bits to functionality is done
- * in bu27_chip_data structures.
- */
-#define BU27008_REG_SYSTEM_CONTROL 0x40
-#define BU27008_MASK_SW_RESET BIT(7)
-#define BU27008_MASK_PART_ID GENMASK(5, 0)
-#define BU27008_ID 0x1a
-#define BU27008_REG_MODE_CONTROL1 0x41
-#define BU27008_MASK_MEAS_MODE GENMASK(2, 0)
-#define BU27008_MASK_CHAN_SEL GENMASK(3, 2)
-
-#define BU27008_REG_MODE_CONTROL2 0x42
-#define BU27008_MASK_RGBC_GAIN GENMASK(7, 3)
-#define BU27008_MASK_IR_GAIN_LO GENMASK(2, 0)
-#define BU27008_SHIFT_IR_GAIN 3
-
-#define BU27008_REG_MODE_CONTROL3 0x43
-#define BU27008_MASK_VALID BIT(7)
-#define BU27008_MASK_INT_EN BIT(1)
-#define BU27008_INT_EN BU27008_MASK_INT_EN
-#define BU27008_INT_DIS 0
-#define BU27008_MASK_MEAS_EN BIT(0)
-#define BU27008_MEAS_EN BIT(0)
-#define BU27008_MEAS_DIS 0
-
-#define BU27008_REG_DATA0_LO 0x50
-#define BU27008_REG_DATA1_LO 0x52
-#define BU27008_REG_DATA2_LO 0x54
-#define BU27008_REG_DATA3_LO 0x56
-#define BU27008_REG_DATA3_HI 0x57
-#define BU27008_REG_MANUFACTURER_ID 0x92
-#define BU27008_REG_MAX BU27008_REG_MANUFACTURER_ID
-
-/* BU27010 specific definitions */
-
-#define BU27010_MASK_SW_RESET BIT(7)
-#define BU27010_ID 0x1b
-#define BU27010_REG_POWER 0x3e
-#define BU27010_MASK_POWER BIT(0)
-
-#define BU27010_REG_RESET 0x3f
-#define BU27010_MASK_RESET BIT(0)
-#define BU27010_RESET_RELEASE BU27010_MASK_RESET
-
-#define BU27010_MASK_MEAS_EN BIT(1)
-
-#define BU27010_MASK_CHAN_SEL GENMASK(7, 6)
-#define BU27010_MASK_MEAS_MODE GENMASK(5, 4)
-#define BU27010_MASK_RGBC_GAIN GENMASK(3, 0)
-
-#define BU27010_MASK_DATA3_GAIN GENMASK(7, 6)
-#define BU27010_MASK_DATA2_GAIN GENMASK(5, 4)
-#define BU27010_MASK_DATA1_GAIN GENMASK(3, 2)
-#define BU27010_MASK_DATA0_GAIN GENMASK(1, 0)
-
-#define BU27010_MASK_FLC_MODE BIT(7)
-#define BU27010_MASK_FLC_GAIN GENMASK(4, 0)
-
-#define BU27010_REG_MODE_CONTROL4 0x44
-/* If flicker is ever to be supported the IRQ must be handled as a field */
-#define BU27010_IRQ_DIS_ALL GENMASK(1, 0)
-#define BU27010_DRDY_EN BIT(0)
-#define BU27010_MASK_INT_SEL GENMASK(1, 0)
-
-#define BU27010_REG_MODE_CONTROL5 0x45
-#define BU27010_MASK_RGB_VALID BIT(7)
-#define BU27010_MASK_FLC_VALID BIT(6)
-#define BU27010_MASK_WAIT_EN BIT(3)
-#define BU27010_MASK_FIFO_EN BIT(2)
-#define BU27010_MASK_RGB_EN BIT(1)
-#define BU27010_MASK_FLC_EN BIT(0)
-
-#define BU27010_REG_DATA_FLICKER_LO 0x56
-#define BU27010_MASK_DATA_FLICKER_HI GENMASK(2, 0)
-#define BU27010_REG_FLICKER_COUNT 0x5a
-#define BU27010_REG_FIFO_LEVEL_LO 0x5b
-#define BU27010_MASK_FIFO_LEVEL_HI BIT(0)
-#define BU27010_REG_FIFO_DATA_LO 0x5d
-#define BU27010_REG_FIFO_DATA_HI 0x5e
-#define BU27010_MASK_FIFO_DATA_HI GENMASK(2, 0)
-#define BU27010_REG_MANUFACTURER_ID 0x92
-#define BU27010_REG_MAX BU27010_REG_MANUFACTURER_ID
-
-/**
- * enum bu27008_chan_type - BU27008 channel types
- * @BU27008_RED: Red channel. Always via data0.
- * @BU27008_GREEN: Green channel. Always via data1.
- * @BU27008_BLUE: Blue channel. Via data2 (when used).
- * @BU27008_CLEAR: Clear channel. Via data2 or data3 (when used).
- * @BU27008_IR: IR channel. Via data3 (when used).
- * @BU27008_LUX: Illuminance channel, computed using RGB and IR.
- * @BU27008_NUM_CHANS: Number of channel types.
- */
-enum bu27008_chan_type {
- BU27008_RED,
- BU27008_GREEN,
- BU27008_BLUE,
- BU27008_CLEAR,
- BU27008_IR,
- BU27008_LUX,
- BU27008_NUM_CHANS
-};
-
-/**
- * enum bu27008_chan - BU27008 physical data channel
- * @BU27008_DATA0: Always red.
- * @BU27008_DATA1: Always green.
- * @BU27008_DATA2: Blue or clear.
- * @BU27008_DATA3: IR or clear.
- * @BU27008_NUM_HW_CHANS: Number of physical channels
- */
-enum bu27008_chan {
- BU27008_DATA0,
- BU27008_DATA1,
- BU27008_DATA2,
- BU27008_DATA3,
- BU27008_NUM_HW_CHANS
-};
-
-/* We can always measure red and green at same time */
-#define ALWAYS_SCANNABLE (BIT(BU27008_RED) | BIT(BU27008_GREEN))
-
-/* We use these data channel configs. Ensure scan_masks below follow them too */
-#define BU27008_BLUE2_CLEAR3 0x0 /* buffer is R, G, B, C */
-#define BU27008_CLEAR2_IR3 0x1 /* buffer is R, G, C, IR */
-#define BU27008_BLUE2_IR3 0x2 /* buffer is R, G, B, IR */
-
-static const unsigned long bu27008_scan_masks[] = {
- /* buffer is R, G, B, C */
- ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_CLEAR),
- /* buffer is R, G, C, IR */
- ALWAYS_SCANNABLE | BIT(BU27008_CLEAR) | BIT(BU27008_IR),
- /* buffer is R, G, B, IR */
- ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR),
- /* buffer is R, G, B, IR, LUX */
- ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR) | BIT(BU27008_LUX),
- 0
-};
-
-/*
- * Available scales with gain 1x - 1024x, timings 55, 100, 200, 400 mS
- * Time impacts to gain: 1x, 2x, 4x, 8x.
- *
- * => Max total gain is HWGAIN * gain by integration time (8 * 1024) = 8192
- *
- * Max amplification is (HWGAIN * MAX integration-time multiplier) 1024 * 8
- * = 8192. With NANO scale we get rid of accuracy loss when we start with the
- * scale 16.0 for HWGAIN1, INT-TIME 55 mS. This way the nano scale for MAX
- * total gain 8192 will be 1953125
- */
-#define BU27008_SCALE_1X 16
-
-/*
- * On BU27010 available scales with gain 1x - 4096x,
- * timings 55, 100, 200, 400 mS. Time impacts to gain: 1x, 2x, 4x, 8x.
- *
- * => Max total gain is HWGAIN * gain by integration time (8 * 4096)
- *
- * Using NANO precision for scale we must use scale 64x corresponding gain 1x
- * to avoid precision loss.
- */
-#define BU27010_SCALE_1X 64
-
-/* See the data sheet for the "Gain Setting" table */
-#define BU27008_GSEL_1X 0x00
-#define BU27008_GSEL_4X 0x08
-#define BU27008_GSEL_8X 0x09
-#define BU27008_GSEL_16X 0x0a
-#define BU27008_GSEL_32X 0x0b
-#define BU27008_GSEL_64X 0x0c
-#define BU27008_GSEL_256X 0x18
-#define BU27008_GSEL_512X 0x19
-#define BU27008_GSEL_1024X 0x1a
-
-static const struct iio_gain_sel_pair bu27008_gains[] = {
- GAIN_SCALE_GAIN(1, BU27008_GSEL_1X),
- GAIN_SCALE_GAIN(4, BU27008_GSEL_4X),
- GAIN_SCALE_GAIN(8, BU27008_GSEL_8X),
- GAIN_SCALE_GAIN(16, BU27008_GSEL_16X),
- GAIN_SCALE_GAIN(32, BU27008_GSEL_32X),
- GAIN_SCALE_GAIN(64, BU27008_GSEL_64X),
- GAIN_SCALE_GAIN(256, BU27008_GSEL_256X),
- GAIN_SCALE_GAIN(512, BU27008_GSEL_512X),
- GAIN_SCALE_GAIN(1024, BU27008_GSEL_1024X),
-};
-
-static const struct iio_gain_sel_pair bu27008_gains_ir[] = {
- GAIN_SCALE_GAIN(2, BU27008_GSEL_1X),
- GAIN_SCALE_GAIN(4, BU27008_GSEL_4X),
- GAIN_SCALE_GAIN(8, BU27008_GSEL_8X),
- GAIN_SCALE_GAIN(16, BU27008_GSEL_16X),
- GAIN_SCALE_GAIN(32, BU27008_GSEL_32X),
- GAIN_SCALE_GAIN(64, BU27008_GSEL_64X),
- GAIN_SCALE_GAIN(256, BU27008_GSEL_256X),
- GAIN_SCALE_GAIN(512, BU27008_GSEL_512X),
- GAIN_SCALE_GAIN(1024, BU27008_GSEL_1024X),
-};
-
-#define BU27010_GSEL_1X 0x00 /* 000000 */
-#define BU27010_GSEL_4X 0x08 /* 001000 */
-#define BU27010_GSEL_16X 0x09 /* 001001 */
-#define BU27010_GSEL_64X 0x0e /* 001110 */
-#define BU27010_GSEL_256X 0x1e /* 011110 */
-#define BU27010_GSEL_1024X 0x2e /* 101110 */
-#define BU27010_GSEL_4096X 0x3f /* 111111 */
-
-static const struct iio_gain_sel_pair bu27010_gains[] = {
- GAIN_SCALE_GAIN(1, BU27010_GSEL_1X),
- GAIN_SCALE_GAIN(4, BU27010_GSEL_4X),
- GAIN_SCALE_GAIN(16, BU27010_GSEL_16X),
- GAIN_SCALE_GAIN(64, BU27010_GSEL_64X),
- GAIN_SCALE_GAIN(256, BU27010_GSEL_256X),
- GAIN_SCALE_GAIN(1024, BU27010_GSEL_1024X),
- GAIN_SCALE_GAIN(4096, BU27010_GSEL_4096X),
-};
-
-static const struct iio_gain_sel_pair bu27010_gains_ir[] = {
- GAIN_SCALE_GAIN(2, BU27010_GSEL_1X),
- GAIN_SCALE_GAIN(4, BU27010_GSEL_4X),
- GAIN_SCALE_GAIN(16, BU27010_GSEL_16X),
- GAIN_SCALE_GAIN(64, BU27010_GSEL_64X),
- GAIN_SCALE_GAIN(256, BU27010_GSEL_256X),
- GAIN_SCALE_GAIN(1024, BU27010_GSEL_1024X),
- GAIN_SCALE_GAIN(4096, BU27010_GSEL_4096X),
-};
-
-#define BU27008_MEAS_MODE_100MS 0x00
-#define BU27008_MEAS_MODE_55MS 0x01
-#define BU27008_MEAS_MODE_200MS 0x02
-#define BU27008_MEAS_MODE_400MS 0x04
-
-#define BU27010_MEAS_MODE_100MS 0x00
-#define BU27010_MEAS_MODE_55MS 0x03
-#define BU27010_MEAS_MODE_200MS 0x01
-#define BU27010_MEAS_MODE_400MS 0x02
-
-#define BU27008_MEAS_TIME_MAX_MS 400
-
-static const struct iio_itime_sel_mul bu27008_itimes[] = {
- GAIN_SCALE_ITIME_US(400000, BU27008_MEAS_MODE_400MS, 8),
- GAIN_SCALE_ITIME_US(200000, BU27008_MEAS_MODE_200MS, 4),
- GAIN_SCALE_ITIME_US(100000, BU27008_MEAS_MODE_100MS, 2),
- GAIN_SCALE_ITIME_US(55000, BU27008_MEAS_MODE_55MS, 1),
-};
-
-static const struct iio_itime_sel_mul bu27010_itimes[] = {
- GAIN_SCALE_ITIME_US(400000, BU27010_MEAS_MODE_400MS, 8),
- GAIN_SCALE_ITIME_US(200000, BU27010_MEAS_MODE_200MS, 4),
- GAIN_SCALE_ITIME_US(100000, BU27010_MEAS_MODE_100MS, 2),
- GAIN_SCALE_ITIME_US(55000, BU27010_MEAS_MODE_55MS, 1),
-};
-
-/*
- * All the RGBC channels share the same gain.
- * IR gain can be fine-tuned from the gain set for the RGBC by 2 bit, but this
- * would yield quite complex gain setting. Especially since not all bit
- * compinations are supported. And in any case setting GAIN for RGBC will
- * always also change the IR-gain.
- *
- * On top of this, the selector '0' which corresponds to hw-gain 1X on RGBC,
- * corresponds to gain 2X on IR. Rest of the selctors correspond to same gains
- * though. This, however, makes it not possible to use shared gain for all
- * RGBC and IR settings even though they are all changed at the one go.
- */
-#define BU27008_CHAN(color, data, separate_avail) \
-{ \
- .type = IIO_INTENSITY, \
- .modified = 1, \
- .channel2 = IIO_MOD_LIGHT_##color, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_separate_available = (separate_avail), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
- .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), \
- .address = BU27008_REG_##data##_LO, \
- .scan_index = BU27008_##color, \
- .scan_type = { \
- .sign = 'u', \
- .realbits = 16, \
- .storagebits = 16, \
- .endianness = IIO_LE, \
- }, \
-}
-
-/* For raw reads we always configure DATA3 for CLEAR */
-static const struct iio_chan_spec bu27008_channels[] = {
- BU27008_CHAN(RED, DATA0, BIT(IIO_CHAN_INFO_SCALE)),
- BU27008_CHAN(GREEN, DATA1, BIT(IIO_CHAN_INFO_SCALE)),
- BU27008_CHAN(BLUE, DATA2, BIT(IIO_CHAN_INFO_SCALE)),
- BU27008_CHAN(CLEAR, DATA2, BIT(IIO_CHAN_INFO_SCALE)),
- /*
- * We don't allow setting scale for IR (because of shared gain bits).
- * Hence we don't advertise available ones either.
- */
- BU27008_CHAN(IR, DATA3, 0),
- {
- .type = IIO_LIGHT,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .channel = BU27008_LUX,
- .scan_index = BU27008_LUX,
- .scan_type = {
- .sign = 'u',
- .realbits = 64,
- .storagebits = 64,
- .endianness = IIO_CPU,
- },
- },
- IIO_CHAN_SOFT_TIMESTAMP(BU27008_NUM_CHANS),
-};
-
-struct bu27008_data;
-
-struct bu27_chip_data {
- const char *name;
- int (*chip_init)(struct bu27008_data *data);
- int (*get_gain_sel)(struct bu27008_data *data, int *sel);
- int (*write_gain_sel)(struct bu27008_data *data, int sel);
- const struct regmap_config *regmap_cfg;
- const struct iio_gain_sel_pair *gains;
- const struct iio_gain_sel_pair *gains_ir;
- const struct iio_itime_sel_mul *itimes;
- int num_gains;
- int num_gains_ir;
- int num_itimes;
- int scale1x;
-
- int drdy_en_reg;
- int drdy_en_mask;
- int meas_en_reg;
- int meas_en_mask;
- int valid_reg;
- int chan_sel_reg;
- int chan_sel_mask;
- int int_time_mask;
- u8 part_id;
-};
-
-struct bu27008_data {
- const struct bu27_chip_data *cd;
- struct regmap *regmap;
- struct iio_trigger *trig;
- struct device *dev;
- struct iio_gts gts;
- struct iio_gts gts_ir;
- int irq;
-
- /*
- * Prevent changing gain/time config when scale is read/written.
- * Similarly, protect the integration_time read/change sequence.
- * Prevent changing gain/time when data is read.
- */
- struct mutex mutex;
-};
-
-static const struct regmap_range bu27008_volatile_ranges[] = {
- {
- .range_min = BU27008_REG_SYSTEM_CONTROL, /* SWRESET */
- .range_max = BU27008_REG_SYSTEM_CONTROL,
- }, {
- .range_min = BU27008_REG_MODE_CONTROL3, /* VALID */
- .range_max = BU27008_REG_MODE_CONTROL3,
- }, {
- .range_min = BU27008_REG_DATA0_LO, /* DATA */
- .range_max = BU27008_REG_DATA3_HI,
- },
-};
-
-static const struct regmap_range bu27010_volatile_ranges[] = {
- {
- .range_min = BU27010_REG_RESET, /* RSTB */
- .range_max = BU27008_REG_SYSTEM_CONTROL, /* RESET */
- }, {
- .range_min = BU27010_REG_MODE_CONTROL5, /* VALID bits */
- .range_max = BU27010_REG_MODE_CONTROL5,
- }, {
- .range_min = BU27008_REG_DATA0_LO,
- .range_max = BU27010_REG_FIFO_DATA_HI,
- },
-};
-
-static const struct regmap_access_table bu27008_volatile_regs = {
- .yes_ranges = &bu27008_volatile_ranges[0],
- .n_yes_ranges = ARRAY_SIZE(bu27008_volatile_ranges),
-};
-
-static const struct regmap_access_table bu27010_volatile_regs = {
- .yes_ranges = &bu27010_volatile_ranges[0],
- .n_yes_ranges = ARRAY_SIZE(bu27010_volatile_ranges),
-};
-
-static const struct regmap_range bu27008_read_only_ranges[] = {
- {
- .range_min = BU27008_REG_DATA0_LO,
- .range_max = BU27008_REG_DATA3_HI,
- }, {
- .range_min = BU27008_REG_MANUFACTURER_ID,
- .range_max = BU27008_REG_MANUFACTURER_ID,
- },
-};
-
-static const struct regmap_range bu27010_read_only_ranges[] = {
- {
- .range_min = BU27008_REG_DATA0_LO,
- .range_max = BU27010_REG_FIFO_DATA_HI,
- }, {
- .range_min = BU27010_REG_MANUFACTURER_ID,
- .range_max = BU27010_REG_MANUFACTURER_ID,
- }
-};
-
-static const struct regmap_access_table bu27008_ro_regs = {
- .no_ranges = &bu27008_read_only_ranges[0],
- .n_no_ranges = ARRAY_SIZE(bu27008_read_only_ranges),
-};
-
-static const struct regmap_access_table bu27010_ro_regs = {
- .no_ranges = &bu27010_read_only_ranges[0],
- .n_no_ranges = ARRAY_SIZE(bu27010_read_only_ranges),
-};
-
-static const struct regmap_config bu27008_regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = BU27008_REG_MAX,
- .cache_type = REGCACHE_RBTREE,
- .volatile_table = &bu27008_volatile_regs,
- .wr_table = &bu27008_ro_regs,
- /*
- * All register writes are serialized by the mutex which protects the
- * scale setting/getting. This is needed because scale is combined by
- * gain and integration time settings and we need to ensure those are
- * not read / written when scale is being computed.
- *
- * As a result of this serializing, we don't need regmap locking. Note,
- * this is not true if we add any configurations which are not
- * serialized by the mutex and which may need for example a protected
- * read-modify-write cycle (eg. regmap_update_bits()). Please, revise
- * this when adding features to the driver.
- */
- .disable_locking = true,
-};
-
-static const struct regmap_config bu27010_regmap = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = BU27010_REG_MAX,
- .cache_type = REGCACHE_RBTREE,
- .volatile_table = &bu27010_volatile_regs,
- .wr_table = &bu27010_ro_regs,
- .disable_locking = true,
-};
-
-static int bu27008_write_gain_sel(struct bu27008_data *data, int sel)
-{
- int regval;
-
- regval = FIELD_PREP(BU27008_MASK_RGBC_GAIN, sel);
-
- /*
- * We do always set also the LOW bits of IR-gain because othervice we
- * would risk resulting an invalid GAIN register value.
- *
- * We could allow setting separate gains for RGBC and IR when the
- * values were such that HW could support both gain settings.
- * Eg, when the shared bits were same for both gain values.
- *
- * This, however, has a negligible benefit compared to the increased
- * software complexity when we would need to go through the gains
- * for both channels separately when the integration time changes.
- * This would end up with nasty logic for computing gain values for
- * both channels - and rejecting them if shared bits changed.
- *
- * We should then build the logic by guessing what a user prefers.
- * RGBC or IR gains correctly set while other jumps to odd value?
- * Maybe look-up a value where both gains are somehow optimized
- * <what this somehow is, is ATM unknown to us>. Or maybe user would
- * expect us to reject changes when optimal gains can't be set to both
- * channels w/given integration time. At best that would result
- * solution that works well for a very specific subset of
- * configurations but causes unexpected corner-cases.
- *
- * So, we keep it simple. Always set same selector to IR and RGBC.
- * We disallow setting IR (as I expect that most of the users are
- * interested in RGBC). This way we can show the user that the scales
- * for RGBC and IR channels are different (1X Vs 2X with sel 0) while
- * still keeping the operation deterministic.
- */
- regval |= FIELD_PREP(BU27008_MASK_IR_GAIN_LO, sel);
-
- return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL2,
- BU27008_MASK_RGBC_GAIN, regval);
-}
-
-static int bu27010_write_gain_sel(struct bu27008_data *data, int sel)
-{
- unsigned int regval;
- int ret, chan_selector;
-
- /*
- * Gain 'selector' is composed of two registers. Selector is 6bit value,
- * 4 high bits being the RGBC gain fieild in MODE_CONTROL1 register and
- * two low bits being the channel specific gain in MODE_CONTROL2.
- *
- * Let's take the 4 high bits of whole 6 bit selector, and prepare
- * the MODE_CONTROL1 value (RGBC gain part).
- */
- regval = FIELD_PREP(BU27010_MASK_RGBC_GAIN, (sel >> 2));
-
- ret = regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL1,
- BU27010_MASK_RGBC_GAIN, regval);
- if (ret)
- return ret;
-
- /*
- * Two low two bits of the selector must be written for all 4
- * channels in the MODE_CONTROL2 register. Copy these two bits for
- * all channels.
- */
- chan_selector = sel & GENMASK(1, 0);
-
- regval = FIELD_PREP(BU27010_MASK_DATA0_GAIN, chan_selector);
- regval |= FIELD_PREP(BU27010_MASK_DATA1_GAIN, chan_selector);
- regval |= FIELD_PREP(BU27010_MASK_DATA2_GAIN, chan_selector);
- regval |= FIELD_PREP(BU27010_MASK_DATA3_GAIN, chan_selector);
-
- return regmap_write(data->regmap, BU27008_REG_MODE_CONTROL2, regval);
-}
-
-static int bu27008_get_gain_sel(struct bu27008_data *data, int *sel)
-{
- int ret;
-
- /*
- * If we always "lock" the gain selectors for all channels to prevent
- * unsupported configs, then it does not matter which channel is used
- * we can just return selector from any of them.
- *
- * This, however is not true if we decide to support only 4X and 16X
- * and then individual gains for channels. Currently this is not the
- * case.
- *
- * If we some day decide to support individual gains, then we need to
- * have channel information here.
- */
-
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL2, sel);
- if (ret)
- return ret;
-
- *sel = FIELD_GET(BU27008_MASK_RGBC_GAIN, *sel);
-
- return 0;
-}
-
-static int bu27010_get_gain_sel(struct bu27008_data *data, int *sel)
-{
- int ret, tmp;
-
- /*
- * We always "lock" the gain selectors for all channels to prevent
- * unsupported configs. It does not matter which channel is used
- * we can just return selector from any of them.
- *
- * Read the channel0 gain.
- */
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL2, sel);
- if (ret)
- return ret;
-
- *sel = FIELD_GET(BU27010_MASK_DATA0_GAIN, *sel);
-
- /* Read the shared gain */
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL1, &tmp);
- if (ret)
- return ret;
-
- /*
- * The gain selector is made as a combination of common RGBC gain and
- * the channel specific gain. The channel specific gain forms the low
- * bits of selector and RGBC gain is appended right after it.
- *
- * Compose the selector from channel0 gain and shared RGBC gain.
- */
- *sel |= FIELD_GET(BU27010_MASK_RGBC_GAIN, tmp) << fls(BU27010_MASK_DATA0_GAIN);
-
- return ret;
-}
-
-static int bu27008_chip_init(struct bu27008_data *data)
-{
- int ret;
-
- ret = regmap_write_bits(data->regmap, BU27008_REG_SYSTEM_CONTROL,
- BU27008_MASK_SW_RESET, BU27008_MASK_SW_RESET);
- if (ret)
- return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
-
- /*
- * The data-sheet does not tell how long performing the IC reset takes.
- * However, the data-sheet says the minimum time it takes the IC to be
- * able to take inputs after power is applied, is 100 uS. I'd assume
- * > 1 mS is enough.
- */
- msleep(1);
-
- ret = regmap_reinit_cache(data->regmap, data->cd->regmap_cfg);
- if (ret)
- dev_err(data->dev, "Failed to reinit reg cache\n");
-
- return ret;
-}
-
-static int bu27010_chip_init(struct bu27008_data *data)
-{
- int ret;
-
- ret = regmap_write_bits(data->regmap, BU27008_REG_SYSTEM_CONTROL,
- BU27010_MASK_SW_RESET, BU27010_MASK_SW_RESET);
- if (ret)
- return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
-
- msleep(1);
-
- /* Power ON*/
- ret = regmap_write_bits(data->regmap, BU27010_REG_POWER,
- BU27010_MASK_POWER, BU27010_MASK_POWER);
- if (ret)
- return dev_err_probe(data->dev, ret, "Sensor power-on failed\n");
-
- msleep(1);
-
- /* Release blocks from reset */
- ret = regmap_write_bits(data->regmap, BU27010_REG_RESET,
- BU27010_MASK_RESET, BU27010_RESET_RELEASE);
- if (ret)
- return dev_err_probe(data->dev, ret, "Sensor powering failed\n");
-
- msleep(1);
-
- /*
- * The IRQ enabling on BU27010 is done in a peculiar way. The IRQ
- * enabling is not a bit mask where individual IRQs could be enabled but
- * a field which values are:
- * 00 => IRQs disabled
- * 01 => Data-ready (RGBC/IR)
- * 10 => Data-ready (flicker)
- * 11 => Flicker FIFO
- *
- * So, only one IRQ can be enabled at a time and enabling for example
- * flicker FIFO would automagically disable data-ready IRQ.
- *
- * Currently the driver does not support the flicker. Hence, we can
- * just treat the RGBC data-ready as single bit which can be enabled /
- * disabled. This works for as long as the second bit in the field
- * stays zero. Here we ensure it gets zeroed.
- */
- return regmap_clear_bits(data->regmap, BU27010_REG_MODE_CONTROL4,
- BU27010_IRQ_DIS_ALL);
-}
-
-static const struct bu27_chip_data bu27010_chip = {
- .name = "bu27010",
- .chip_init = bu27010_chip_init,
- .get_gain_sel = bu27010_get_gain_sel,
- .write_gain_sel = bu27010_write_gain_sel,
- .regmap_cfg = &bu27010_regmap,
- .gains = &bu27010_gains[0],
- .gains_ir = &bu27010_gains_ir[0],
- .itimes = &bu27010_itimes[0],
- .num_gains = ARRAY_SIZE(bu27010_gains),
- .num_gains_ir = ARRAY_SIZE(bu27010_gains_ir),
- .num_itimes = ARRAY_SIZE(bu27010_itimes),
- .scale1x = BU27010_SCALE_1X,
- .drdy_en_reg = BU27010_REG_MODE_CONTROL4,
- .drdy_en_mask = BU27010_DRDY_EN,
- .meas_en_reg = BU27010_REG_MODE_CONTROL5,
- .meas_en_mask = BU27010_MASK_MEAS_EN,
- .valid_reg = BU27010_REG_MODE_CONTROL5,
- .chan_sel_reg = BU27008_REG_MODE_CONTROL1,
- .chan_sel_mask = BU27010_MASK_CHAN_SEL,
- .int_time_mask = BU27010_MASK_MEAS_MODE,
- .part_id = BU27010_ID,
-};
-
-static const struct bu27_chip_data bu27008_chip = {
- .name = "bu27008",
- .chip_init = bu27008_chip_init,
- .get_gain_sel = bu27008_get_gain_sel,
- .write_gain_sel = bu27008_write_gain_sel,
- .regmap_cfg = &bu27008_regmap,
- .gains = &bu27008_gains[0],
- .gains_ir = &bu27008_gains_ir[0],
- .itimes = &bu27008_itimes[0],
- .num_gains = ARRAY_SIZE(bu27008_gains),
- .num_gains_ir = ARRAY_SIZE(bu27008_gains_ir),
- .num_itimes = ARRAY_SIZE(bu27008_itimes),
- .scale1x = BU27008_SCALE_1X,
- .drdy_en_reg = BU27008_REG_MODE_CONTROL3,
- .drdy_en_mask = BU27008_MASK_INT_EN,
- .valid_reg = BU27008_REG_MODE_CONTROL3,
- .meas_en_reg = BU27008_REG_MODE_CONTROL3,
- .meas_en_mask = BU27008_MASK_MEAS_EN,
- .chan_sel_reg = BU27008_REG_MODE_CONTROL3,
- .chan_sel_mask = BU27008_MASK_CHAN_SEL,
- .int_time_mask = BU27008_MASK_MEAS_MODE,
- .part_id = BU27008_ID,
-};
-
-#define BU27008_MAX_VALID_RESULT_WAIT_US 50000
-#define BU27008_VALID_RESULT_WAIT_QUANTA_US 1000
-
-static int bu27008_chan_read_data(struct bu27008_data *data, int reg, int *val)
-{
- int ret, valid;
- __le16 tmp;
-
- ret = regmap_read_poll_timeout(data->regmap, data->cd->valid_reg,
- valid, (valid & BU27008_MASK_VALID),
- BU27008_VALID_RESULT_WAIT_QUANTA_US,
- BU27008_MAX_VALID_RESULT_WAIT_US);
- if (ret)
- return ret;
-
- ret = regmap_bulk_read(data->regmap, reg, &tmp, sizeof(tmp));
- if (ret)
- dev_err(data->dev, "Reading channel data failed\n");
-
- *val = le16_to_cpu(tmp);
-
- return ret;
-}
-
-static int bu27008_get_gain(struct bu27008_data *data, struct iio_gts *gts, int *gain)
-{
- int ret, sel;
-
- ret = data->cd->get_gain_sel(data, &sel);
- if (ret)
- return ret;
-
- ret = iio_gts_find_gain_by_sel(gts, sel);
- if (ret < 0) {
- dev_err(data->dev, "unknown gain value 0x%x\n", sel);
- return ret;
- }
-
- *gain = ret;
-
- return 0;
-}
-
-static int bu27008_set_gain(struct bu27008_data *data, int gain)
-{
- int ret;
-
- ret = iio_gts_find_sel_by_gain(&data->gts, gain);
- if (ret < 0)
- return ret;
-
- return data->cd->write_gain_sel(data, ret);
-}
-
-static int bu27008_get_int_time_sel(struct bu27008_data *data, int *sel)
-{
- int ret, val;
-
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL1, &val);
- if (ret)
- return ret;
-
- val &= data->cd->int_time_mask;
- val >>= ffs(data->cd->int_time_mask) - 1;
-
- *sel = val;
-
- return 0;
-}
-
-static int bu27008_set_int_time_sel(struct bu27008_data *data, int sel)
-{
- sel <<= ffs(data->cd->int_time_mask) - 1;
-
- return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL1,
- data->cd->int_time_mask, sel);
-}
-
-static int bu27008_get_int_time_us(struct bu27008_data *data)
-{
- int ret, sel;
-
- ret = bu27008_get_int_time_sel(data, &sel);
- if (ret)
- return ret;
-
- return iio_gts_find_int_time_by_sel(&data->gts, sel);
-}
-
-static int _bu27008_get_scale(struct bu27008_data *data, bool ir, int *val,
- int *val2)
-{
- struct iio_gts *gts;
- int gain, ret;
-
- if (ir)
- gts = &data->gts_ir;
- else
- gts = &data->gts;
-
- ret = bu27008_get_gain(data, gts, &gain);
- if (ret)
- return ret;
-
- ret = bu27008_get_int_time_us(data);
- if (ret < 0)
- return ret;
-
- return iio_gts_get_scale(gts, gain, ret, val, val2);
-}
-
-static int bu27008_get_scale(struct bu27008_data *data, bool ir, int *val,
- int *val2)
-{
- int ret;
-
- mutex_lock(&data->mutex);
- ret = _bu27008_get_scale(data, ir, val, val2);
- mutex_unlock(&data->mutex);
-
- return ret;
-}
-
-static int bu27008_set_int_time(struct bu27008_data *data, int time)
-{
- int ret;
-
- ret = iio_gts_find_sel_by_int_time(&data->gts, time);
- if (ret < 0)
- return ret;
-
- return bu27008_set_int_time_sel(data, ret);
-}
-
-/* Try to change the time so that the scale is maintained */
-static int bu27008_try_set_int_time(struct bu27008_data *data, int int_time_new)
-{
- int ret, old_time_sel, new_time_sel, old_gain, new_gain;
-
- mutex_lock(&data->mutex);
-
- ret = bu27008_get_int_time_sel(data, &old_time_sel);
- if (ret < 0)
- goto unlock_out;
-
- if (!iio_gts_valid_time(&data->gts, int_time_new)) {
- dev_dbg(data->dev, "Unsupported integration time %u\n",
- int_time_new);
-
- ret = -EINVAL;
- goto unlock_out;
- }
-
- /* If we already use requested time, then we're done */
- new_time_sel = iio_gts_find_sel_by_int_time(&data->gts, int_time_new);
- if (new_time_sel == old_time_sel)
- goto unlock_out;
-
- ret = bu27008_get_gain(data, &data->gts, &old_gain);
- if (ret)
- goto unlock_out;
-
- ret = iio_gts_find_new_gain_sel_by_old_gain_time(&data->gts, old_gain,
- old_time_sel, new_time_sel, &new_gain);
- if (ret) {
- int scale1, scale2;
- bool ok;
-
- _bu27008_get_scale(data, false, &scale1, &scale2);
- dev_dbg(data->dev,
- "Can't support time %u with current scale %u %u\n",
- int_time_new, scale1, scale2);
-
- if (new_gain < 0)
- goto unlock_out;
-
- /*
- * If caller requests for integration time change and we
- * can't support the scale - then the caller should be
- * prepared to 'pick up the pieces and deal with the
- * fact that the scale changed'.
- */
- ret = iio_find_closest_gain_low(&data->gts, new_gain, &ok);
- if (!ok)
- dev_dbg(data->dev, "optimal gain out of range\n");
-
- if (ret < 0) {
- dev_dbg(data->dev,
- "Total gain increase. Risk of saturation");
- ret = iio_gts_get_min_gain(&data->gts);
- if (ret < 0)
- goto unlock_out;
- }
- new_gain = ret;
- dev_dbg(data->dev, "scale changed, new gain %u\n", new_gain);
- }
-
- ret = bu27008_set_gain(data, new_gain);
- if (ret)
- goto unlock_out;
-
- ret = bu27008_set_int_time(data, int_time_new);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
-}
-
-static int bu27008_meas_set(struct bu27008_data *data, bool enable)
-{
- if (enable)
- return regmap_set_bits(data->regmap, data->cd->meas_en_reg,
- data->cd->meas_en_mask);
- return regmap_clear_bits(data->regmap, data->cd->meas_en_reg,
- data->cd->meas_en_mask);
-}
-
-static int bu27008_chan_cfg(struct bu27008_data *data,
- struct iio_chan_spec const *chan)
-{
- int chan_sel;
-
- if (chan->scan_index == BU27008_BLUE)
- chan_sel = BU27008_BLUE2_CLEAR3;
- else
- chan_sel = BU27008_CLEAR2_IR3;
-
- /*
- * prepare bitfield for channel sel. The FIELD_PREP works only when
- * mask is constant. In our case the mask is assigned based on the
- * chip type. Hence the open-coded FIELD_PREP here. We don't bother
- * zeroing the irrelevant bits though - update_bits takes care of that.
- */
- chan_sel <<= ffs(data->cd->chan_sel_mask) - 1;
-
- return regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
- BU27008_MASK_CHAN_SEL, chan_sel);
-}
-
-static int bu27008_read_one(struct bu27008_data *data, struct iio_dev *idev,
- struct iio_chan_spec const *chan, int *val, int *val2)
-{
- int ret, int_time;
-
- ret = bu27008_chan_cfg(data, chan);
- if (ret)
- return ret;
-
- ret = bu27008_meas_set(data, true);
- if (ret)
- return ret;
-
- ret = bu27008_get_int_time_us(data);
- if (ret < 0)
- int_time = BU27008_MEAS_TIME_MAX_MS;
- else
- int_time = ret / USEC_PER_MSEC;
-
- msleep(int_time);
-
- ret = bu27008_chan_read_data(data, chan->address, val);
- if (!ret)
- ret = IIO_VAL_INT;
-
- if (bu27008_meas_set(data, false))
- dev_warn(data->dev, "measurement disabling failed\n");
-
- return ret;
-}
-
-#define BU27008_LUX_DATA_RED 0
-#define BU27008_LUX_DATA_GREEN 1
-#define BU27008_LUX_DATA_BLUE 2
-#define BU27008_LUX_DATA_IR 3
-#define LUX_DATA_SIZE (BU27008_NUM_HW_CHANS * sizeof(__le16))
-
-static int bu27008_read_lux_chans(struct bu27008_data *data, unsigned int time,
- __le16 *chan_data)
-{
- int ret, chan_sel, tmpret, valid;
-
- chan_sel = BU27008_BLUE2_IR3 << (ffs(data->cd->chan_sel_mask) - 1);
-
- ret = regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
- data->cd->chan_sel_mask, chan_sel);
- if (ret)
- return ret;
-
- ret = bu27008_meas_set(data, true);
- if (ret)
- return ret;
-
- msleep(time / USEC_PER_MSEC);
-
- ret = regmap_read_poll_timeout(data->regmap, data->cd->valid_reg,
- valid, (valid & BU27008_MASK_VALID),
- BU27008_VALID_RESULT_WAIT_QUANTA_US,
- BU27008_MAX_VALID_RESULT_WAIT_US);
- if (ret)
- goto out;
-
- ret = regmap_bulk_read(data->regmap, BU27008_REG_DATA0_LO, chan_data,
- LUX_DATA_SIZE);
- if (ret)
- goto out;
-out:
- tmpret = bu27008_meas_set(data, false);
- if (tmpret)
- dev_warn(data->dev, "Stopping measurement failed\n");
-
- return ret;
-}
-
-/*
- * Following equation for computing lux out of register values was given by
- * ROHM HW colleagues;
- *
- * Red = RedData*1024 / Gain * 20 / meas_mode
- * Green = GreenData* 1024 / Gain * 20 / meas_mode
- * Blue = BlueData* 1024 / Gain * 20 / meas_mode
- * IR = IrData* 1024 / Gain * 20 / meas_mode
- *
- * where meas_mode is the integration time in mS / 10
- *
- * IRratio = (IR > 0.18 * Green) ? 0 : 1
- *
- * Lx = max(c1*Red + c2*Green + c3*Blue,0)
- *
- * for
- * IRratio 0: c1 = -0.00002237, c2 = 0.0003219, c3 = -0.000120371
- * IRratio 1: c1 = -0.00001074, c2 = 0.000305415, c3 = -0.000129367
- */
-
-/*
- * The max chan data is 0xffff. When we multiply it by 1024 * 20, we'll get
- * 0x4FFFB000 which still fits in 32-bit integer. This won't overflow.
- */
-#define NORM_CHAN_DATA_FOR_LX_CALC(chan, gain, time) (le16_to_cpu(chan) * \
- 1024 * 20 / (gain) / (time))
-static u64 bu27008_calc_nlux(struct bu27008_data *data, __le16 *lux_data,
- unsigned int gain, unsigned int gain_ir, unsigned int time)
-{
- unsigned int red, green, blue, ir;
- s64 c1, c2, c3, nlux;
-
- time /= 10000;
- ir = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_IR], gain_ir, time);
- red = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_RED], gain, time);
- green = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_GREEN], gain, time);
- blue = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_BLUE], gain, time);
-
- if ((u64)ir * 100LLU > (u64)green * 18LLU) {
- c1 = -22370;
- c2 = 321900;
- c3 = -120371;
- } else {
- c1 = -10740;
- c2 = 305415;
- c3 = -129367;
- }
- nlux = c1 * red + c2 * green + c3 * blue;
-
- return max_t(s64, 0, nlux);
-}
-
-static int bu27008_get_time_n_gains(struct bu27008_data *data,
- unsigned int *gain, unsigned int *gain_ir, unsigned int *time)
-{
- int ret;
-
- ret = bu27008_get_gain(data, &data->gts, gain);
- if (ret < 0)
- return ret;
-
- ret = bu27008_get_gain(data, &data->gts_ir, gain_ir);
- if (ret < 0)
- return ret;
-
- ret = bu27008_get_int_time_us(data);
- if (ret < 0)
- return ret;
-
- /* Max integration time is 400000. Fits in signed int. */
- *time = ret;
-
- return 0;
-}
-
-struct bu27008_buf {
- __le16 chan[BU27008_NUM_HW_CHANS];
- u64 lux __aligned(8);
- s64 ts __aligned(8);
-};
-
-static int bu27008_buffer_fill_lux(struct bu27008_data *data,
- struct bu27008_buf *raw)
-{
- unsigned int gain, gain_ir, time;
- int ret;
-
- ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time);
- if (ret)
- return ret;
-
- raw->lux = bu27008_calc_nlux(data, raw->chan, gain, gain_ir, time);
-
- return 0;
-}
-
-static int bu27008_read_lux(struct bu27008_data *data, struct iio_dev *idev,
- struct iio_chan_spec const *chan,
- int *val, int *val2)
-{
- __le16 lux_data[BU27008_NUM_HW_CHANS];
- unsigned int gain, gain_ir, time;
- u64 nlux;
- int ret;
-
- ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time);
- if (ret)
- return ret;
-
- ret = bu27008_read_lux_chans(data, time, lux_data);
- if (ret)
- return ret;
-
- nlux = bu27008_calc_nlux(data, lux_data, gain, gain_ir, time);
- *val = (int)nlux;
- *val2 = nlux >> 32LLU;
-
- return IIO_VAL_INT_64;
-}
-
-static int bu27008_read_raw(struct iio_dev *idev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
-{
- struct bu27008_data *data = iio_priv(idev);
- int busy, ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- busy = iio_device_claim_direct_mode(idev);
- if (busy)
- return -EBUSY;
-
- mutex_lock(&data->mutex);
- if (chan->type == IIO_LIGHT)
- ret = bu27008_read_lux(data, idev, chan, val, val2);
- else
- ret = bu27008_read_one(data, idev, chan, val, val2);
- mutex_unlock(&data->mutex);
-
- iio_device_release_direct_mode(idev);
-
- return ret;
-
- case IIO_CHAN_INFO_SCALE:
- if (chan->type == IIO_LIGHT) {
- *val = 0;
- *val2 = 1;
- return IIO_VAL_INT_PLUS_NANO;
- }
- ret = bu27008_get_scale(data, chan->scan_index == BU27008_IR,
- val, val2);
- if (ret)
- return ret;
-
- return IIO_VAL_INT_PLUS_NANO;
-
- case IIO_CHAN_INFO_INT_TIME:
- ret = bu27008_get_int_time_us(data);
- if (ret < 0)
- return ret;
-
- *val = 0;
- *val2 = ret;
-
- return IIO_VAL_INT_PLUS_MICRO;
-
- default:
- return -EINVAL;
- }
-}
-
-/* Called if the new scale could not be supported with existing int-time */
-static int bu27008_try_find_new_time_gain(struct bu27008_data *data, int val,
- int val2, int *gain_sel)
-{
- int i, ret, new_time_sel;
-
- for (i = 0; i < data->gts.num_itime; i++) {
- new_time_sel = data->gts.itime_table[i].sel;
- ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts,
- new_time_sel, val, val2, gain_sel);
- if (!ret)
- break;
- }
- if (i == data->gts.num_itime) {
- dev_err(data->dev, "Can't support scale %u %u\n", val, val2);
-
- return -EINVAL;
- }
-
- return bu27008_set_int_time_sel(data, new_time_sel);
-}
-
-static int bu27008_set_scale(struct bu27008_data *data,
- struct iio_chan_spec const *chan,
- int val, int val2)
-{
- int ret, gain_sel, time_sel;
-
- if (chan->scan_index == BU27008_IR)
- return -EINVAL;
-
- mutex_lock(&data->mutex);
-
- ret = bu27008_get_int_time_sel(data, &time_sel);
- if (ret < 0)
- goto unlock_out;
-
- ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, time_sel,
- val, val2, &gain_sel);
- if (ret) {
- ret = bu27008_try_find_new_time_gain(data, val, val2, &gain_sel);
- if (ret)
- goto unlock_out;
-
- }
- ret = data->cd->write_gain_sel(data, gain_sel);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
-}
-
-static int bu27008_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
-
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- return IIO_VAL_INT_PLUS_NANO;
- case IIO_CHAN_INFO_INT_TIME:
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
-}
-
-static int bu27008_write_raw(struct iio_dev *idev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- struct bu27008_data *data = iio_priv(idev);
- int ret;
-
- /*
- * Do not allow changing scale when measurement is ongoing as doing so
- * could make values in the buffer inconsistent.
- */
- ret = iio_device_claim_direct_mode(idev);
- if (ret)
- return ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- ret = bu27008_set_scale(data, chan, val, val2);
- break;
- case IIO_CHAN_INFO_INT_TIME:
- if (val) {
- ret = -EINVAL;
- break;
- }
- ret = bu27008_try_set_int_time(data, val2);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- iio_device_release_direct_mode(idev);
-
- return ret;
-}
-
-static int bu27008_read_avail(struct iio_dev *idev,
- struct iio_chan_spec const *chan, const int **vals,
- int *type, int *length, long mask)
-{
- struct bu27008_data *data = iio_priv(idev);
-
- switch (mask) {
- case IIO_CHAN_INFO_INT_TIME:
- return iio_gts_avail_times(&data->gts, vals, type, length);
- case IIO_CHAN_INFO_SCALE:
- if (chan->channel2 == IIO_MOD_LIGHT_IR)
- return iio_gts_all_avail_scales(&data->gts_ir, vals,
- type, length);
- return iio_gts_all_avail_scales(&data->gts, vals, type, length);
- default:
- return -EINVAL;
- }
-}
-
-static int bu27008_update_scan_mode(struct iio_dev *idev,
- const unsigned long *scan_mask)
-{
- struct bu27008_data *data = iio_priv(idev);
- int chan_sel;
-
- /* Configure channel selection */
- if (test_bit(BU27008_BLUE, idev->active_scan_mask)) {
- if (test_bit(BU27008_CLEAR, idev->active_scan_mask))
- chan_sel = BU27008_BLUE2_CLEAR3;
- else
- chan_sel = BU27008_BLUE2_IR3;
- } else {
- chan_sel = BU27008_CLEAR2_IR3;
- }
-
- chan_sel <<= ffs(data->cd->chan_sel_mask) - 1;
-
- return regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
- data->cd->chan_sel_mask, chan_sel);
-}
-
-static const struct iio_info bu27008_info = {
- .read_raw = &bu27008_read_raw,
- .write_raw = &bu27008_write_raw,
- .write_raw_get_fmt = &bu27008_write_raw_get_fmt,
- .read_avail = &bu27008_read_avail,
- .update_scan_mode = bu27008_update_scan_mode,
- .validate_trigger = iio_validate_own_trigger,
-};
-
-static int bu27008_trigger_set_state(struct iio_trigger *trig, bool state)
-{
- struct bu27008_data *data = iio_trigger_get_drvdata(trig);
- int ret;
-
-
- if (state)
- ret = regmap_set_bits(data->regmap, data->cd->drdy_en_reg,
- data->cd->drdy_en_mask);
- else
- ret = regmap_clear_bits(data->regmap, data->cd->drdy_en_reg,
- data->cd->drdy_en_mask);
- if (ret)
- dev_err(data->dev, "Failed to set trigger state\n");
-
- return ret;
-}
-
-static void bu27008_trigger_reenable(struct iio_trigger *trig)
-{
- struct bu27008_data *data = iio_trigger_get_drvdata(trig);
-
- enable_irq(data->irq);
-}
-
-static const struct iio_trigger_ops bu27008_trigger_ops = {
- .set_trigger_state = bu27008_trigger_set_state,
- .reenable = bu27008_trigger_reenable,
-};
-
-static irqreturn_t bu27008_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *idev = pf->indio_dev;
- struct bu27008_data *data = iio_priv(idev);
- struct bu27008_buf raw;
- int ret, dummy;
-
- memset(&raw, 0, sizeof(raw));
-
- /*
- * After some measurements, it seems reading the
- * BU27008_REG_MODE_CONTROL3 debounces the IRQ line
- */
- ret = regmap_read(data->regmap, data->cd->valid_reg, &dummy);
- if (ret < 0)
- goto err_read;
-
- ret = regmap_bulk_read(data->regmap, BU27008_REG_DATA0_LO, &raw.chan,
- sizeof(raw.chan));
- if (ret < 0)
- goto err_read;
-
- if (test_bit(BU27008_LUX, idev->active_scan_mask)) {
- ret = bu27008_buffer_fill_lux(data, &raw);
- if (ret)
- goto err_read;
- }
-
- iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp);
-err_read:
- iio_trigger_notify_done(idev->trig);
-
- return IRQ_HANDLED;
-}
-
-static int bu27008_buffer_preenable(struct iio_dev *idev)
-{
- struct bu27008_data *data = iio_priv(idev);
-
- return bu27008_meas_set(data, true);
-}
-
-static int bu27008_buffer_postdisable(struct iio_dev *idev)
-{
- struct bu27008_data *data = iio_priv(idev);
-
- return bu27008_meas_set(data, false);
-}
-
-static const struct iio_buffer_setup_ops bu27008_buffer_ops = {
- .preenable = bu27008_buffer_preenable,
- .postdisable = bu27008_buffer_postdisable,
-};
-
-static irqreturn_t bu27008_data_rdy_poll(int irq, void *private)
-{
- /*
- * The BU27008 keeps IRQ asserted until we read the VALID bit from
- * a register. We need to keep the IRQ disabled until then.
- */
- disable_irq_nosync(irq);
- iio_trigger_poll(private);
-
- return IRQ_HANDLED;
-}
-
-static int bu27008_setup_trigger(struct bu27008_data *data, struct iio_dev *idev)
-{
- struct iio_trigger *itrig;
- char *name;
- int ret;
-
- ret = devm_iio_triggered_buffer_setup(data->dev, idev,
- &iio_pollfunc_store_time,
- bu27008_trigger_handler,
- &bu27008_buffer_ops);
- if (ret)
- return dev_err_probe(data->dev, ret,
- "iio_triggered_buffer_setup_ext FAIL\n");
-
- itrig = devm_iio_trigger_alloc(data->dev, "%sdata-rdy-dev%d",
- idev->name, iio_device_id(idev));
- if (!itrig)
- return -ENOMEM;
-
- data->trig = itrig;
-
- itrig->ops = &bu27008_trigger_ops;
- iio_trigger_set_drvdata(itrig, data);
-
- name = devm_kasprintf(data->dev, GFP_KERNEL, "%s-bu27008",
- dev_name(data->dev));
-
- ret = devm_request_irq(data->dev, data->irq,
- &bu27008_data_rdy_poll,
- 0, name, itrig);
- if (ret)
- return dev_err_probe(data->dev, ret, "Could not request IRQ\n");
-
- ret = devm_iio_trigger_register(data->dev, itrig);
- if (ret)
- return dev_err_probe(data->dev, ret,
- "Trigger registration failed\n");
-
- /* set default trigger */
- idev->trig = iio_trigger_get(itrig);
-
- return 0;
-}
-
-static int bu27008_probe(struct i2c_client *i2c)
-{
- struct device *dev = &i2c->dev;
- struct bu27008_data *data;
- struct regmap *regmap;
- unsigned int part_id, reg;
- struct iio_dev *idev;
- int ret;
-
- idev = devm_iio_device_alloc(dev, sizeof(*data));
- if (!idev)
- return -ENOMEM;
-
- ret = devm_regulator_get_enable(dev, "vdd");
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get regulator\n");
-
- data = iio_priv(idev);
-
- data->cd = device_get_match_data(&i2c->dev);
- if (!data->cd)
- return -ENODEV;
-
- regmap = devm_regmap_init_i2c(i2c, data->cd->regmap_cfg);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "Failed to initialize Regmap\n");
-
-
- ret = regmap_read(regmap, BU27008_REG_SYSTEM_CONTROL, &reg);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to access sensor\n");
-
- part_id = FIELD_GET(BU27008_MASK_PART_ID, reg);
-
- if (part_id != data->cd->part_id)
- dev_warn(dev, "unknown device 0x%x\n", part_id);
-
- ret = devm_iio_init_iio_gts(dev, data->cd->scale1x, 0, data->cd->gains,
- data->cd->num_gains, data->cd->itimes,
- data->cd->num_itimes, &data->gts);
- if (ret)
- return ret;
-
- ret = devm_iio_init_iio_gts(dev, data->cd->scale1x, 0, data->cd->gains_ir,
- data->cd->num_gains_ir, data->cd->itimes,
- data->cd->num_itimes, &data->gts_ir);
- if (ret)
- return ret;
-
- mutex_init(&data->mutex);
- data->regmap = regmap;
- data->dev = dev;
- data->irq = i2c->irq;
-
- idev->channels = bu27008_channels;
- idev->num_channels = ARRAY_SIZE(bu27008_channels);
- idev->name = data->cd->name;
- idev->info = &bu27008_info;
- idev->modes = INDIO_DIRECT_MODE;
- idev->available_scan_masks = bu27008_scan_masks;
-
- ret = data->cd->chip_init(data);
- if (ret)
- return ret;
-
- if (i2c->irq) {
- ret = bu27008_setup_trigger(data, idev);
- if (ret)
- return ret;
- } else {
- dev_info(dev, "No IRQ, buffered mode disabled\n");
- }
-
- ret = devm_iio_device_register(dev, idev);
- if (ret)
- return dev_err_probe(dev, ret,
- "Unable to register iio device\n");
-
- return 0;
-}
-
-static const struct of_device_id bu27008_of_match[] = {
- { .compatible = "rohm,bu27008", .data = &bu27008_chip },
- { .compatible = "rohm,bu27010", .data = &bu27010_chip },
- { }
-};
-MODULE_DEVICE_TABLE(of, bu27008_of_match);
-
-static struct i2c_driver bu27008_i2c_driver = {
- .driver = {
- .name = "bu27008",
- .of_match_table = bu27008_of_match,
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- },
- .probe = bu27008_probe,
-};
-module_i2c_driver(bu27008_i2c_driver);
-
-MODULE_DESCRIPTION("ROHM BU27008 and BU27010 colour sensor driver");
-MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS("IIO_GTS_HELPER");
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index 4f591c2278f2..cc25596cb248 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -205,7 +206,7 @@ struct bu27034_data {
struct {
u32 mlux;
__le16 channels[BU27034_NUM_HW_DATA_CHANS];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
@@ -395,30 +396,26 @@ static int bu27034_try_set_int_time(struct bu27034_data *data, int time_us)
int numg = ARRAY_SIZE(gains);
int ret, int_time_old, i;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = bu27034_get_int_time(data);
if (ret < 0)
- goto unlock_out;
+ return ret;
int_time_old = ret;
if (!iio_gts_valid_time(&data->gts, time_us)) {
dev_err(data->dev, "Unsupported integration time %u\n",
time_us);
- ret = -EINVAL;
-
- goto unlock_out;
+ return -EINVAL;
}
- if (time_us == int_time_old) {
- ret = 0;
- goto unlock_out;
- }
+ if (time_us == int_time_old)
+ return 0;
for (i = 0; i < numg; i++) {
ret = bu27034_get_gain(data, gains[i].chan, &gains[i].old_gain);
if (ret)
- goto unlock_out;
+ return 0;
ret = iio_gts_find_new_gain_by_old_gain_time(&data->gts,
gains[i].old_gain,
@@ -434,7 +431,7 @@ static int bu27034_try_set_int_time(struct bu27034_data *data, int time_us)
gains[i].chan, time_us, scale1, scale2);
if (gains[i].new_gain < 0)
- goto unlock_out;
+ return ret;
/*
* If caller requests for integration time change and we
@@ -455,7 +452,7 @@ static int bu27034_try_set_int_time(struct bu27034_data *data, int time_us)
"Total gain increase. Risk of saturation");
ret = iio_gts_get_min_gain(&data->gts);
if (ret < 0)
- goto unlock_out;
+ return ret;
}
dev_dbg(data->dev, "chan %u scale changed\n",
gains[i].chan);
@@ -468,15 +465,10 @@ static int bu27034_try_set_int_time(struct bu27034_data *data, int time_us)
for (i = 0; i < numg; i++) {
ret = bu27034_set_gain(data, gains[i].chan, gains[i].new_gain);
if (ret)
- goto unlock_out;
+ return ret;
}
- ret = bu27034_set_int_time(data, time_us);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bu27034_set_int_time(data, time_us);
}
static int bu27034_set_scale(struct bu27034_data *data, int chan,
@@ -492,10 +484,10 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
return -EINVAL;
}
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = regmap_read(data->regmap, BU27034_REG_MODE_CONTROL1, &time_sel);
if (ret)
- goto unlock_out;
+ return ret;
ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, time_sel,
val, val2, &gain_sel);
@@ -518,7 +510,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
ret = bu27034_get_gain(data, gain.chan, &gain.old_gain);
if (ret)
- goto unlock_out;
+ return ret;
/*
* Iterate through all the times to see if we find one which
@@ -551,26 +543,20 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
if (!found) {
dev_dbg(data->dev,
"Can't set scale maintaining other channel\n");
- ret = -EINVAL;
-
- goto unlock_out;
+ return -EINVAL;
}
ret = bu27034_set_gain(data, gain.chan, gain.new_gain);
if (ret)
- goto unlock_out;
+ return ret;
ret = regmap_update_bits(data->regmap, BU27034_REG_MODE_CONTROL1,
BU27034_MASK_MEAS_MODE, new_time_sel);
if (ret)
- goto unlock_out;
+ return ret;
}
- ret = bu27034_write_gain_sel(data, chan, gain_sel);
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bu27034_write_gain_sel(data, chan, gain_sel);
}
/*
@@ -1221,42 +1207,33 @@ static int bu27034_buffer_enable(struct iio_dev *idev)
struct task_struct *task;
int ret;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = bu27034_meas_set(data, true);
if (ret)
- goto unlock_out;
+ return ret;
task = kthread_run(bu27034_buffer_thread, idev,
"bu27034-buffering-%u",
iio_device_id(idev));
- if (IS_ERR(task)) {
- ret = PTR_ERR(task);
- goto unlock_out;
- }
+ if (IS_ERR(task))
+ return PTR_ERR(task);
data->task = task;
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return 0;
}
static int bu27034_buffer_disable(struct iio_dev *idev)
{
struct bu27034_data *data = iio_priv(idev);
- int ret;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->task) {
kthread_stop(data->task);
data->task = NULL;
}
- ret = bu27034_meas_set(data, false);
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bu27034_meas_set(data, false);
}
static const struct iio_buffer_setup_ops bu27034_buffer_ops = {
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 56f5fbbf79ac..2ba917c5c138 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -203,7 +203,7 @@ struct rpr0521_data {
struct {
__le16 channels[3];
u8 garbage;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h
index 283086887caf..1f93e3dc45c2 100644
--- a/drivers/iio/light/st_uvis25.h
+++ b/drivers/iio/light/st_uvis25.h
@@ -30,7 +30,7 @@ struct st_uvis25_hw {
/* Ensure timestamp is naturally aligned */
struct {
u8 chan;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 4fecdf10aeb1..884e43e4cda4 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -56,7 +56,7 @@ struct tcs3414_data {
/* Ensure timestamp is naturally aligned */
struct {
u16 chans[4];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 4186aac04902..2bd36a344ea5 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -67,7 +67,7 @@ struct tcs3472_data {
/* Ensure timestamp is naturally aligned */
struct {
u16 chans[4];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 337a1332c2c6..67c94be02018 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -105,7 +105,7 @@ static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct vcnl4035_data *data = iio_priv(indio_dev);
/* Ensure naturally aligned timestamp */
- u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8);
+ u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8) = { };
int ret;
ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer);
diff --git a/drivers/iio/light/veml3235.c b/drivers/iio/light/veml3235.c
index 66361c3012a3..77c9ae17ed47 100644
--- a/drivers/iio/light/veml3235.c
+++ b/drivers/iio/light/veml3235.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
+#include <linux/iio/iio-gts-helper.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -35,17 +36,33 @@ struct veml3235_data {
struct device *dev;
struct regmap *regmap;
struct veml3235_rf rf;
+ struct iio_gts gts;
};
-static const int veml3235_it_times[][2] = {
- { 0, 50000 },
- { 0, 100000 },
- { 0, 200000 },
- { 0, 400000 },
- { 0, 800000 },
+static const struct iio_itime_sel_mul veml3235_it_sel[] = {
+ GAIN_SCALE_ITIME_US(50000, 0, 1),
+ GAIN_SCALE_ITIME_US(100000, 1, 2),
+ GAIN_SCALE_ITIME_US(200000, 2, 4),
+ GAIN_SCALE_ITIME_US(400000, 3, 8),
+ GAIN_SCALE_ITIME_US(800000, 4, 16),
};
-static const int veml3235_scale_vals[] = { 1, 2, 4, 8 };
+/*
+ * The MSB (DG) doubles the value of the rest of the field, which leads to
+ * two possible combinations to obtain gain = 2 and gain = 4. The gain
+ * handling can be simplified by restricting DG = 1 to the only gain that
+ * really requires it, gain = 8. Note that "X10" is a reserved value.
+ */
+#define VEML3235_SEL_GAIN_X1 0
+#define VEML3235_SEL_GAIN_X2 1
+#define VEML3235_SEL_GAIN_X4 3
+#define VEML3235_SEL_GAIN_X8 7
+static const struct iio_gain_sel_pair veml3235_gain_sel[] = {
+ GAIN_SCALE_GAIN(1, VEML3235_SEL_GAIN_X1),
+ GAIN_SCALE_GAIN(2, VEML3235_SEL_GAIN_X2),
+ GAIN_SCALE_GAIN(4, VEML3235_SEL_GAIN_X4),
+ GAIN_SCALE_GAIN(8, VEML3235_SEL_GAIN_X8),
+};
static int veml3235_power_on(struct veml3235_data *data)
{
@@ -101,42 +118,58 @@ static const struct iio_chan_spec veml3235_channels[] = {
},
};
+static const struct regmap_range veml3235_readable_ranges[] = {
+ regmap_reg_range(VEML3235_REG_CONF, VEML3235_REG_ID),
+};
+
+static const struct regmap_access_table veml3235_readable_table = {
+ .yes_ranges = veml3235_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml3235_readable_ranges),
+};
+
+static const struct regmap_range veml3235_writable_ranges[] = {
+ regmap_reg_range(VEML3235_REG_CONF, VEML3235_REG_CONF),
+};
+
+static const struct regmap_access_table veml3235_writable_table = {
+ .yes_ranges = veml3235_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml3235_writable_ranges),
+};
+
+static const struct regmap_range veml3235_volatile_ranges[] = {
+ regmap_reg_range(VEML3235_REG_WH_DATA, VEML3235_REG_ALS_DATA),
+};
+
+static const struct regmap_access_table veml3235_volatile_table = {
+ .yes_ranges = veml3235_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml3235_volatile_ranges),
+};
+
static const struct regmap_config veml3235_regmap_config = {
.name = "veml3235_regmap",
.reg_bits = 8,
.val_bits = 16,
.max_register = VEML3235_REG_ID,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .rd_table = &veml3235_readable_table,
+ .wr_table = &veml3235_writable_table,
+ .volatile_table = &veml3235_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
};
static int veml3235_get_it(struct veml3235_data *data, int *val, int *val2)
{
- int ret, reg;
+ int ret, it_idx;
- ret = regmap_field_read(data->rf.it, &reg);
+ ret = regmap_field_read(data->rf.it, &it_idx);
if (ret)
return ret;
- switch (reg) {
- case 0:
- *val2 = 50000;
- break;
- case 1:
- *val2 = 100000;
- break;
- case 2:
- *val2 = 200000;
- break;
- case 3:
- *val2 = 400000;
- break;
- case 4:
- *val2 = 800000;
- break;
- default:
- return -EINVAL;
- }
+ ret = iio_gts_find_int_time_by_sel(&data->gts, it_idx);
+ if (ret < 0)
+ return ret;
+ *val2 = ret;
*val = 0;
return IIO_VAL_INT_PLUS_MICRO;
@@ -145,78 +178,78 @@ static int veml3235_get_it(struct veml3235_data *data, int *val, int *val2)
static int veml3235_set_it(struct iio_dev *indio_dev, int val, int val2)
{
struct veml3235_data *data = iio_priv(indio_dev);
- int ret, new_it;
+ int ret, gain_idx, it_idx, new_gain, prev_gain, prev_it;
+ bool in_range;
- if (val)
+ if (val || !iio_gts_valid_time(&data->gts, val2))
return -EINVAL;
- switch (val2) {
- case 50000:
- new_it = 0x00;
- break;
- case 100000:
- new_it = 0x01;
- break;
- case 200000:
- new_it = 0x02;
- break;
- case 400000:
- new_it = 0x03;
- break;
- case 800000:
- new_it = 0x04;
- break;
- default:
- return -EINVAL;
- }
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
+ return ret;
- ret = regmap_field_write(data->rf.it, new_it);
- if (ret) {
- dev_err(data->dev,
- "failed to update integration time: %d\n", ret);
+ ret = regmap_field_read(data->rf.gain, &gain_idx);
+ if (ret)
return ret;
- }
- return 0;
+ prev_it = iio_gts_find_int_time_by_sel(&data->gts, it_idx);
+ if (prev_it < 0)
+ return prev_it;
+
+ if (prev_it == val2)
+ return 0;
+
+ prev_gain = iio_gts_find_gain_by_sel(&data->gts, gain_idx);
+ if (prev_gain < 0)
+ return prev_gain;
+
+ ret = iio_gts_find_new_gain_by_gain_time_min(&data->gts, prev_gain, prev_it,
+ val2, &new_gain, &in_range);
+ if (ret)
+ return ret;
+
+ if (!in_range)
+ dev_dbg(data->dev, "Optimal gain out of range\n");
+
+ ret = iio_gts_find_sel_by_int_time(&data->gts, val2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_field_write(data->rf.it, ret);
+ if (ret)
+ return ret;
+
+ ret = iio_gts_find_sel_by_gain(&data->gts, new_gain);
+ if (ret < 0)
+ return ret;
+
+ return regmap_field_write(data->rf.gain, ret);
}
-static int veml3235_set_gain(struct iio_dev *indio_dev, int val, int val2)
+static int veml3235_set_scale(struct iio_dev *indio_dev, int val, int val2)
{
struct veml3235_data *data = iio_priv(indio_dev);
- int ret, new_gain;
+ int ret, it_idx, gain_sel, time_sel;
- if (val2 != 0)
- return -EINVAL;
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
+ return ret;
- switch (val) {
- case 1:
- new_gain = 0x00;
- break;
- case 2:
- new_gain = 0x01;
- break;
- case 4:
- new_gain = 0x03;
- break;
- case 8:
- new_gain = 0x07;
- break;
- default:
- return -EINVAL;
- }
+ ret = iio_gts_find_gain_time_sel_for_scale(&data->gts, val, val2,
+ &gain_sel, &time_sel);
+ if (ret)
+ return ret;
- ret = regmap_field_write(data->rf.gain, new_gain);
- if (ret) {
- dev_err(data->dev, "failed to set gain: %d\n", ret);
+ ret = regmap_field_write(data->rf.it, time_sel);
+ if (ret)
return ret;
- }
- return 0;
+ return regmap_field_write(data->rf.gain, gain_sel);
}
-static int veml3235_get_gain(struct veml3235_data *data, int *val)
+static int veml3235_get_scale(struct veml3235_data *data, int *val, int *val2)
{
- int ret, reg;
+ int gain, it, reg, ret;
ret = regmap_field_read(data->rf.gain, &reg);
if (ret) {
@@ -224,25 +257,25 @@ static int veml3235_get_gain(struct veml3235_data *data, int *val)
return ret;
}
- switch (reg & 0x03) {
- case 0:
- *val = 1;
- break;
- case 1:
- *val = 2;
- break;
- case 3:
- *val = 4;
- break;
- default:
- return -EINVAL;
+ gain = iio_gts_find_gain_by_sel(&data->gts, reg);
+ if (gain < 0)
+ return gain;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret) {
+ dev_err(data->dev, "failed to read integration time %d\n", ret);
+ return ret;
}
- /* Double gain */
- if (reg & 0x04)
- *val *= 2;
+ it = iio_gts_find_int_time_by_sel(&data->gts, reg);
+ if (it < 0)
+ return it;
+
+ ret = iio_gts_get_scale(&data->gts, gain, it, val, val2);
+ if (ret)
+ return ret;
- return IIO_VAL_INT;
+ return IIO_VAL_INT_PLUS_NANO;
}
static int veml3235_read_raw(struct iio_dev *indio_dev,
@@ -276,7 +309,7 @@ static int veml3235_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_INT_TIME:
return veml3235_get_it(data, val, val2);
case IIO_CHAN_INFO_SCALE:
- return veml3235_get_gain(data, val);
+ return veml3235_get_scale(data, val, val2);
default:
return -EINVAL;
}
@@ -287,17 +320,27 @@ static int veml3235_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct veml3235_data *data = iio_priv(indio_dev);
+
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- *vals = (int *)&veml3235_it_times;
- *length = 2 * ARRAY_SIZE(veml3235_it_times);
- *type = IIO_VAL_INT_PLUS_MICRO;
- return IIO_AVAIL_LIST;
+ return iio_gts_avail_times(&data->gts, vals, type, length);
+ case IIO_CHAN_INFO_SCALE:
+ return iio_gts_all_avail_scales(&data->gts, vals, type, length);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml3235_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
case IIO_CHAN_INFO_SCALE:
- *vals = (int *)&veml3235_scale_vals;
- *length = ARRAY_SIZE(veml3235_scale_vals);
- *type = IIO_VAL_INT;
- return IIO_AVAIL_LIST;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -311,7 +354,7 @@ static int veml3235_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_INT_TIME:
return veml3235_set_it(indio_dev, val, val2);
case IIO_CHAN_INFO_SCALE:
- return veml3235_set_gain(indio_dev, val, val2);
+ return veml3235_set_scale(indio_dev, val, val2);
}
return -EINVAL;
@@ -321,7 +364,7 @@ static void veml3235_read_id(struct veml3235_data *data)
{
int ret, reg;
- ret = regmap_field_read(data->rf.id, &reg);
+ ret = regmap_field_read(data->rf.id, &reg);
if (ret) {
dev_info(data->dev, "failed to read ID\n");
return;
@@ -371,6 +414,13 @@ static int veml3235_hw_init(struct iio_dev *indio_dev)
struct device *dev = data->dev;
int ret;
+ ret = devm_iio_init_iio_gts(data->dev, 0, 272640000,
+ veml3235_gain_sel, ARRAY_SIZE(veml3235_gain_sel),
+ veml3235_it_sel, ARRAY_SIZE(veml3235_it_sel),
+ &data->gts);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "failed to init iio gts\n");
+
/* Set gain to 1 and integration time to 100 ms */
ret = regmap_field_write(data->rf.gain, 0x00);
if (ret)
@@ -389,9 +439,10 @@ static int veml3235_hw_init(struct iio_dev *indio_dev)
}
static const struct iio_info veml3235_info = {
- .read_raw = veml3235_read_raw,
- .read_avail = veml3235_read_avail,
+ .read_raw = veml3235_read_raw,
+ .read_avail = veml3235_read_avail,
.write_raw = veml3235_write_raw,
+ .write_raw_get_fmt = veml3235_write_raw_get_fmt,
};
static int veml3235_probe(struct i2c_client *client)
@@ -493,3 +544,4 @@ module_i2c_driver(veml3235_driver);
MODULE_AUTHOR("Javier Carrasco <javier.carrasco.cruz@gmail.com>");
MODULE_DESCRIPTION("VEML3235 Ambient Light Sensor");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_GTS_HELPER");
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index ccb43dfd5cf7..9b71825eea9b 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -28,6 +28,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
/* Device registers */
#define VEML6030_REG_ALS_CONF 0x00
@@ -37,6 +39,7 @@
#define VEML6030_REG_ALS_DATA 0x04
#define VEML6030_REG_WH_DATA 0x05
#define VEML6030_REG_ALS_INT 0x06
+#define VEML6030_REG_DATA(ch) (VEML6030_REG_ALS_DATA + (ch))
/* Bit masks for specific functionality */
#define VEML6030_ALS_IT GENMASK(9, 6)
@@ -56,6 +59,12 @@
#define VEML6035_INT_CHAN BIT(3)
#define VEML6035_CHAN_EN BIT(2)
+enum veml6030_scan {
+ VEML6030_SCAN_ALS,
+ VEML6030_SCAN_WH,
+ VEML6030_SCAN_TIMESTAMP,
+};
+
struct veml603x_chip {
const char *name;
const int(*scale_vals)[][2];
@@ -242,6 +251,13 @@ static const struct iio_chan_spec veml6030_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.event_spec = veml6030_event_spec,
.num_event_specs = ARRAY_SIZE(veml6030_event_spec),
+ .scan_index = VEML6030_SCAN_ALS,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_INTENSITY,
@@ -253,7 +269,15 @@ static const struct iio_chan_spec veml6030_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6030_SCAN_WH,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(VEML6030_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec veml7700_channels[] = {
@@ -266,6 +290,13 @@ static const struct iio_chan_spec veml7700_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6030_SCAN_ALS,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_INTENSITY,
@@ -277,7 +308,15 @@ static const struct iio_chan_spec veml7700_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6030_SCAN_WH,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(VEML6030_SCAN_TIMESTAMP),
};
static const struct regmap_config veml6030_regmap_config = {
@@ -889,6 +928,37 @@ static irqreturn_t veml6030_event_handler(int irq, void *private)
return IRQ_HANDLED;
}
+static irqreturn_t veml6030_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio = pf->indio_dev;
+ struct veml6030_data *data = iio_priv(iio);
+ unsigned int reg;
+ int ch, ret, i = 0;
+ struct {
+ u16 chans[2];
+ aligned_s64 timestamp;
+ } scan;
+
+ memset(&scan, 0, sizeof(scan));
+
+ iio_for_each_active_channel(iio, ch) {
+ ret = regmap_read(data->regmap, VEML6030_REG_DATA(ch),
+ &reg);
+ if (ret)
+ goto done;
+
+ scan.chans[i++] = reg;
+ }
+
+ iio_push_to_buffers_with_timestamp(iio, &scan, pf->timestamp);
+
+done:
+ iio_trigger_notify_done(iio->trig);
+
+ return IRQ_HANDLED;
+}
+
static int veml6030_set_info(struct iio_dev *indio_dev)
{
struct veml6030_data *data = iio_priv(indio_dev);
@@ -1077,6 +1147,12 @@ static int veml6030_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev, NULL,
+ veml6030_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register triggered buffer");
+
return devm_iio_device_register(&client->dev, indio_dev);
}
diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
index acd291f3e792..a70bf8a3c73b 100644
--- a/drivers/iio/magnetometer/af8133j.c
+++ b/drivers/iio/magnetometer/af8133j.c
@@ -360,7 +360,7 @@ static irqreturn_t af8133j_trigger_handler(int irq, void *p)
s64 timestamp = iio_get_time_ns(indio_dev);
struct {
__le16 values[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} sample;
int ret;
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 8306a18706ac..08975c60e325 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -197,7 +197,7 @@ struct ak8974 {
/* Ensure timestamp is naturally aligned */
struct {
__le16 channels[3];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 18077fb463a9..ef1363126cc2 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -426,7 +426,7 @@ struct ak8975_data {
/* Ensure natural alignment of timestamp */
struct {
s16 channels[3];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index 7f545740178e..88bb673e40d8 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -140,7 +140,7 @@ struct bmc150_magn_data {
/* Ensure timestamp is naturally aligned */
struct {
s32 chans[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
struct iio_trigger *dready_trig;
bool dready_trigger_on;
diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index 60fbb5431c88..ffd669b1ee7c 100644
--- a/drivers/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -44,7 +44,7 @@ struct hmc5843_data {
struct iio_mount_matrix orientation;
struct {
__be16 chans[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 5295dc0100e4..2fe8e97f2cf8 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -60,7 +60,7 @@ struct mag3110_data {
struct {
__be16 channels[3];
u8 temperature;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index c55a38650c0d..28012b20c64f 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -236,7 +236,7 @@ struct yas5xx {
*/
struct {
s32 channels[4];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index 2953403bef53..c309d991490c 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -7,6 +7,7 @@
* Author: Peter Rosin <peda@axentia.se>
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
@@ -237,49 +238,18 @@ static ssize_t mux_write_ext_info(struct iio_dev *indio_dev, uintptr_t private,
return ret;
}
-static int mux_configure_channel(struct device *dev, struct mux *mux,
- u32 state, const char *label, int idx)
+static int mux_configure_chan_ext_info(struct device *dev, struct mux *mux,
+ int idx, int num_ext_info)
{
struct mux_child *child = &mux->child[idx];
- struct iio_chan_spec *chan = &mux->chan[idx];
struct iio_chan_spec const *pchan = mux->parent->channel;
- char *page = NULL;
- int num_ext_info;
int i;
int ret;
- chan->indexed = 1;
- chan->output = pchan->output;
- chan->datasheet_name = label;
- chan->ext_info = mux->ext_info;
-
- ret = iio_get_channel_type(mux->parent, &chan->type);
- if (ret < 0) {
- dev_err(dev, "failed to get parent channel type\n");
- return ret;
- }
-
- if (iio_channel_has_info(pchan, IIO_CHAN_INFO_RAW))
- chan->info_mask_separate |= BIT(IIO_CHAN_INFO_RAW);
- if (iio_channel_has_info(pchan, IIO_CHAN_INFO_SCALE))
- chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SCALE);
-
- if (iio_channel_has_available(pchan, IIO_CHAN_INFO_RAW))
- chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW);
-
- if (state >= mux_control_states(mux->control)) {
- dev_err(dev, "too many channels\n");
- return -EINVAL;
- }
-
- chan->channel = state;
+ char *page __free(kfree) = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
- num_ext_info = iio_get_channel_ext_info_count(mux->parent);
- if (num_ext_info) {
- page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
- if (!page)
- return -ENOMEM;
- }
child->ext_info_cache = devm_kcalloc(dev,
num_ext_info,
sizeof(*child->ext_info_cache),
@@ -318,8 +288,46 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
child->ext_info_cache[i].size = ret;
}
- if (page)
- devm_kfree(dev, page);
+ return 0;
+}
+
+static int mux_configure_channel(struct device *dev, struct mux *mux, u32 state,
+ const char *label, int idx)
+{
+ struct iio_chan_spec *chan = &mux->chan[idx];
+ struct iio_chan_spec const *pchan = mux->parent->channel;
+ int num_ext_info;
+ int ret;
+
+ chan->indexed = 1;
+ chan->output = pchan->output;
+ chan->datasheet_name = label;
+ chan->ext_info = mux->ext_info;
+
+ ret = iio_get_channel_type(mux->parent, &chan->type);
+ if (ret < 0) {
+ dev_err(dev, "failed to get parent channel type\n");
+ return ret;
+ }
+
+ if (iio_channel_has_info(pchan, IIO_CHAN_INFO_RAW))
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_RAW);
+ if (iio_channel_has_info(pchan, IIO_CHAN_INFO_SCALE))
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SCALE);
+
+ if (iio_channel_has_available(pchan, IIO_CHAN_INFO_RAW))
+ chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW);
+
+ if (state >= mux_control_states(mux->control)) {
+ dev_err(dev, "too many channels\n");
+ return -EINVAL;
+ }
+
+ chan->channel = state;
+
+ num_ext_info = iio_get_channel_ext_info_count(mux->parent);
+ if (num_ext_info)
+ return mux_configure_chan_ext_info(dev, mux, idx, num_ext_info);
return 0;
}
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 5376605b69b4..d44ab65c94cb 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1002,7 +1002,7 @@ static int bmp280_preinit(struct bmp280_data *data)
* after resetting, the device uses the complete power-on sequence so
* it needs to wait for the defined start-up time.
*/
- fsleep(data->start_up_time);
+ fsleep(data->start_up_time_us);
ret = regmap_read(data->regmap, BMP280_REG_STATUS, &reg);
if (ret)
@@ -1161,7 +1161,7 @@ const struct bmp280_chip_info bmp280_chip_info = {
.chip_id = bmp280_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp280_chip_ids),
.regmap_config = &bmp280_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp280_channels,
.num_channels = ARRAY_SIZE(bmp280_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -1347,7 +1347,7 @@ const struct bmp280_chip_info bme280_chip_info = {
.chip_id = bme280_chip_ids,
.num_chip_id = ARRAY_SIZE(bme280_chip_ids),
.regmap_config = &bme280_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bme280_channels,
.num_channels = ARRAY_SIZE(bme280_channels),
.avail_scan_masks = bme280_avail_scan_masks,
@@ -1414,7 +1414,7 @@ static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
return ret;
}
/* Wait for 2ms for command to be processed */
- usleep_range(data->start_up_time, data->start_up_time + 100);
+ fsleep(data->start_up_time_us);
/* Check for command processing error */
ret = regmap_read(data->regmap, BMP380_REG_ERROR, &reg);
if (ret) {
@@ -1806,7 +1806,7 @@ static int bmp380_chip_config(struct bmp280_data *data)
* formula in datasheet section 3.9.2 with an offset of ~+15%
* as it seen as well in table 3.9.1.
*/
- msleep(150);
+ fsleep(150 * USEC_PER_MSEC);
/* Check config error flag */
ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
@@ -1957,7 +1957,7 @@ const struct bmp280_chip_info bmp380_chip_info = {
.num_chip_id = ARRAY_SIZE(bmp380_chip_ids),
.regmap_config = &bmp380_regmap_config,
.spi_read_extra_byte = true,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp380_channels,
.num_channels = ARRAY_SIZE(bmp380_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -2006,7 +2006,8 @@ static int bmp580_soft_reset(struct bmp280_data *data)
dev_err(data->dev, "failed to send reset command to device\n");
return ret;
}
- usleep_range(2000, 2500);
+ /* From datasheet's table 4: electrical characteristics */
+ fsleep(2000);
/* Dummy read of chip_id */
ret = regmap_read(data->regmap, BMP580_REG_CHIP_ID, &reg);
@@ -2208,7 +2209,7 @@ static int bmp580_nvmem_read_impl(void *priv, unsigned int offset, void *val,
goto exit;
}
/* Wait standby transition time */
- usleep_range(2500, 3000);
+ fsleep(2500);
while (bytes >= sizeof(*dst)) {
addr = bmp580_nvmem_addrs[offset / sizeof(*dst)];
@@ -2274,7 +2275,7 @@ static int bmp580_nvmem_write_impl(void *priv, unsigned int offset, void *val,
goto exit;
}
/* Wait standby transition time */
- usleep_range(2500, 3000);
+ fsleep(2500);
while (bytes >= sizeof(*buf)) {
addr = bmp580_nvmem_addrs[offset / sizeof(*buf)];
@@ -2458,7 +2459,7 @@ static int bmp580_chip_config(struct bmp280_data *data)
return ret;
}
/* From datasheet's table 4: electrical characteristics */
- usleep_range(2500, 3000);
+ fsleep(2500);
/* Set default DSP mode settings */
reg_val = FIELD_PREP(BMP580_DSP_COMP_MASK, BMP580_DSP_PRESS_TEMP_COMP_EN) |
@@ -2649,7 +2650,7 @@ const struct bmp280_chip_info bmp580_chip_info = {
.chip_id = bmp580_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp580_chip_ids),
.regmap_config = &bmp580_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp580_channels,
.num_channels = ARRAY_SIZE(bmp580_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -2720,7 +2721,7 @@ static int bmp180_wait_for_eoc(struct bmp280_data *data, u8 ctrl_meas)
delay_us =
conversion_time_max[data->oversampling_press];
- usleep_range(delay_us, delay_us + 1000);
+ fsleep(delay_us);
}
ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl);
@@ -2988,7 +2989,7 @@ const struct bmp280_chip_info bmp180_chip_info = {
.chip_id = bmp180_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp180_chip_ids),
.regmap_config = &bmp180_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp280_channels,
.num_channels = ARRAY_SIZE(bmp280_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -3066,7 +3067,7 @@ const struct bmp280_chip_info bmp085_chip_info = {
.chip_id = bmp180_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp180_chip_ids),
.regmap_config = &bmp180_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp280_channels,
.num_channels = ARRAY_SIZE(bmp280_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -3175,7 +3176,7 @@ int bmp280_common_probe(struct device *dev,
data->oversampling_temp = chip_info->oversampling_temp_default;
data->iir_filter_coeff = chip_info->iir_filter_coeff_default;
data->sampling_freq = chip_info->sampling_freq_default;
- data->start_up_time = chip_info->start_up_time;
+ data->start_up_time_us = chip_info->start_up_time_us;
/* Bring up regulators */
regulator_bulk_set_supply_names(data->supplies,
@@ -3201,7 +3202,7 @@ int bmp280_common_probe(struct device *dev,
return ret;
/* Wait to make sure we started up properly */
- usleep_range(data->start_up_time, data->start_up_time + 100);
+ fsleep(data->start_up_time_us);
/* Bring chip out of reset if there is an assigned GPIO line */
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
@@ -3287,7 +3288,7 @@ int bmp280_common_probe(struct device *dev,
* Set autosuspend to two orders of magnitude larger than the
* start-up time.
*/
- pm_runtime_set_autosuspend_delay(dev, data->start_up_time / 10);
+ pm_runtime_set_autosuspend_delay(dev, data->start_up_time_us / 10);
pm_runtime_use_autosuspend(dev);
pm_runtime_put(dev);
@@ -3306,7 +3307,7 @@ static int bmp280_runtime_suspend(struct device *dev)
data->chip_info->set_mode(data, BMP280_SLEEP);
- fsleep(data->start_up_time);
+ fsleep(data->start_up_time_us);
return regulator_bulk_disable(BMP280_NUM_SUPPLIES, data->supplies);
}
@@ -3320,7 +3321,7 @@ static int bmp280_runtime_resume(struct device *dev)
if (ret)
return ret;
- usleep_range(data->start_up_time, data->start_up_time + 100);
+ fsleep(data->start_up_time_us);
ret = data->chip_info->chip_config(data);
if (ret)
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 2df1175b6b85..5b2ee1d0ee46 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -434,7 +434,7 @@ struct bmp280_data {
struct bmp380_calib bmp380;
} calib;
struct regulator_bulk_data supplies[BMP280_NUM_SUPPLIES];
- unsigned int start_up_time; /* in microseconds */
+ unsigned int start_up_time_us;
/* log of base 2 of oversampling rate */
u8 oversampling_press;
@@ -470,8 +470,8 @@ struct bmp280_data {
/* Sensor data buffer */
u8 buf[BME280_BURST_READ_BYTES];
/* Calibration data buffers */
- __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
- __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
+ __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / sizeof(__le16)];
+ __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / sizeof(__be16)];
u8 bme280_humid_cal_buf[BME280_CONTIGUOUS_CALIB_REGS];
u8 bmp380_cal_buf[BMP380_CALIB_REG_COUNT];
/* Miscellaneous, endianness-aware data buffers */
@@ -490,7 +490,7 @@ struct bmp280_chip_info {
const struct iio_chan_spec *channels;
int num_channels;
- unsigned int start_up_time;
+ unsigned int start_up_time_us;
const unsigned long *avail_scan_masks;
const int *oversampling_temp_avail;
diff --git a/drivers/iio/pressure/hsc030pa.h b/drivers/iio/pressure/hsc030pa.h
index 9b40f46f575f..5db46784f4c6 100644
--- a/drivers/iio/pressure/hsc030pa.h
+++ b/drivers/iio/pressure/hsc030pa.h
@@ -58,7 +58,7 @@ struct hsc_data {
s32 p_offset_dec;
struct {
__be16 chan[2];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u8 buffer[HSC_REG_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 056c8271c49d..00c077b2a2a4 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -213,7 +213,7 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p)
/* Ensure buffer elements are naturally aligned */
struct {
s32 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
int ret;
diff --git a/drivers/iio/pressure/rohm-bm1390.c b/drivers/iio/pressure/rohm-bm1390.c
index f24d9f927681..9c1197f0e742 100644
--- a/drivers/iio/pressure/rohm-bm1390.c
+++ b/drivers/iio/pressure/rohm-bm1390.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -138,7 +139,7 @@ enum {
struct bm1390_data_buf {
u32 pressure;
__be16 temp;
- s64 ts __aligned(8);
+ aligned_s64 ts;
};
/* BM1390 has FIFO for 4 pressure samples */
@@ -263,14 +264,14 @@ static int bm1390_read_data(struct bm1390_data *data,
{
int ret, warn;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
/*
* We use 'continuous mode' even for raw read because according to the
* data-sheet an one-shot mode can't be used with IIR filter.
*/
ret = bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
if (ret)
- goto unlock_out;
+ return ret;
switch (chan->type) {
case IIO_PRESSURE:
@@ -287,10 +288,8 @@ static int bm1390_read_data(struct bm1390_data *data,
warn = bm1390_meas_set(data, BM1390_MEAS_MODE_STOP);
if (warn)
dev_warn(data->dev, "Failed to stop measurement (%d)\n", warn);
-unlock_out:
- mutex_unlock(&data->mutex);
- return ret;
+ return 0;
}
static int bm1390_read_raw(struct iio_dev *idev,
@@ -543,38 +542,33 @@ static int bm1390_fifo_enable(struct iio_dev *idev)
if (data->irq <= 0)
return -EINVAL;
- mutex_lock(&data->mutex);
- if (data->trigger_enabled) {
- ret = -EBUSY;
- goto unlock_out;
- }
+ guard(mutex)(&data->mutex);
+
+ if (data->trigger_enabled)
+ return -EBUSY;
/* Update watermark to HW */
ret = bm1390_fifo_set_wmi(data);
if (ret)
- goto unlock_out;
+ return ret;
/* Enable WMI_IRQ */
ret = regmap_set_bits(data->regmap, BM1390_REG_MODE_CTRL,
BM1390_MASK_WMI_EN);
if (ret)
- goto unlock_out;
+ return ret;
/* Enable FIFO */
ret = regmap_set_bits(data->regmap, BM1390_REG_FIFO_CTRL,
BM1390_MASK_FIFO_EN);
if (ret)
- goto unlock_out;
+ return ret;
data->state = BM1390_STATE_FIFO;
data->old_timestamp = iio_get_time_ns(idev);
- ret = bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
}
static int bm1390_fifo_disable(struct iio_dev *idev)
@@ -584,27 +578,22 @@ static int bm1390_fifo_disable(struct iio_dev *idev)
msleep(1);
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = bm1390_meas_set(data, BM1390_MEAS_MODE_STOP);
if (ret)
- goto unlock_out;
+ return ret;
/* Disable FIFO */
ret = regmap_clear_bits(data->regmap, BM1390_REG_FIFO_CTRL,
BM1390_MASK_FIFO_EN);
if (ret)
- goto unlock_out;
+ return ret;
data->state = BM1390_STATE_SAMPLE;
/* Disable WMI_IRQ */
- ret = regmap_clear_bits(data->regmap, BM1390_REG_MODE_CTRL,
+ return regmap_clear_bits(data->regmap, BM1390_REG_MODE_CTRL,
BM1390_MASK_WMI_EN);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
}
static int bm1390_buffer_postenable(struct iio_dev *idev)
@@ -688,25 +677,24 @@ static irqreturn_t bm1390_irq_thread_handler(int irq, void *private)
{
struct iio_dev *idev = private;
struct bm1390_data *data = iio_priv(idev);
- int ret = IRQ_NONE;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->trigger_enabled) {
iio_trigger_poll_nested(data->trig);
- ret = IRQ_HANDLED;
- } else if (data->state == BM1390_STATE_FIFO) {
+ return IRQ_HANDLED;
+ }
+
+ if (data->state == BM1390_STATE_FIFO) {
int ok;
ok = __bm1390_fifo_flush(idev, BM1390_FIFO_LENGTH,
data->timestamp);
if (ok > 0)
- ret = IRQ_HANDLED;
+ return IRQ_HANDLED;
}
- mutex_unlock(&data->mutex);
-
- return ret;
+ return IRQ_NONE;
}
static int bm1390_set_drdy_irq(struct bm1390_data *data, bool en)
@@ -722,17 +710,16 @@ static int bm1390_trigger_set_state(struct iio_trigger *trig,
bool state)
{
struct bm1390_data *data = iio_trigger_get_drvdata(trig);
- int ret = 0;
+ int ret;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->trigger_enabled == state)
- goto unlock_out;
+ return 0;
if (data->state == BM1390_STATE_FIFO) {
dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
- ret = -EBUSY;
- goto unlock_out;
+ return -EBUSY;
}
data->trigger_enabled = state;
@@ -740,13 +727,13 @@ static int bm1390_trigger_set_state(struct iio_trigger *trig,
if (state) {
ret = bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
if (ret)
- goto unlock_out;
+ return ret;
} else {
int dummy;
ret = bm1390_meas_set(data, BM1390_MEAS_MODE_STOP);
if (ret)
- goto unlock_out;
+ return ret;
/*
* We need to read the status register in order to ACK the
@@ -758,12 +745,7 @@ static int bm1390_trigger_set_state(struct iio_trigger *trig,
dev_warn(data->dev, "status read failed\n");
}
- ret = bm1390_set_drdy_irq(data, state);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bm1390_set_drdy_irq(data, state);
}
static const struct iio_trigger_ops bm1390_trigger_ops = {
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 2adea84f5b4d..9db1c94dfc18 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -586,6 +586,8 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev,
} sample;
int err;
+ memset(&sample, 0, sizeof(sample));
+
if (test_bit(0, indio_dev->active_scan_mask)) {
/* Get current pressure from hardware FIFO. */
err = zpa2326_dequeue_pressure(indio_dev, &sample.pressure);
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 96fa97451cbf..9d3caf2bef18 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -63,7 +63,7 @@ struct as3935_state {
/* Ensure timestamp is naturally aligned */
struct {
u8 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/proximity/aw96103.c b/drivers/iio/proximity/aw96103.c
index cdd254da9e50..3472a2c36e44 100644
--- a/drivers/iio/proximity/aw96103.c
+++ b/drivers/iio/proximity/aw96103.c
@@ -433,7 +433,7 @@ static int aw96103_write_event_config(struct iio_dev *indio_dev,
state ? BIT(chan->channel) : 0);
}
-static struct iio_info iio_info = {
+static const struct iio_info iio_info = {
.read_raw = aw96103_read_raw,
.read_event_value = aw96103_read_event_val,
.write_event_value = aw96103_write_event_val,
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index 4021feb7a7ac..5aa8e5a22f32 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -14,6 +14,7 @@
#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
@@ -100,6 +101,17 @@
#define HX9023S_INTERRUPT_MASK GENMASK(9, 0)
#define HX9023S_PROX_DEBOUNCE_MASK GENMASK(3, 0)
+#define FW_VER_OFFSET 2
+#define FW_REG_CNT_OFFSET 3
+#define FW_DATA_OFFSET 16
+
+struct hx9023s_bin {
+ u16 reg_count;
+ u16 fw_size;
+ u8 fw_ver;
+ u8 data[] __counted_by(fw_size);
+};
+
struct hx9023s_ch_data {
s16 raw; /* Raw Data*/
s16 lp; /* Low Pass Filter Data*/
@@ -134,7 +146,7 @@ struct hx9023s_data {
struct {
__le16 channels[HX9023S_CH_NUM];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} buffer;
/*
@@ -998,6 +1010,78 @@ static int hx9023s_id_check(struct iio_dev *indio_dev)
return 0;
}
+static int hx9023s_bin_load(struct hx9023s_data *data, struct hx9023s_bin *bin)
+{
+ u8 *cfg_start = bin->data + FW_DATA_OFFSET;
+ u8 addr, val;
+ u16 i;
+ int ret;
+
+ for (i = 0; i < bin->reg_count; i++) {
+ addr = cfg_start[i * 2];
+ val = cfg_start[i * 2 + 1];
+ ret = regmap_write(data->regmap, addr, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx9023s_send_cfg(const struct firmware *fw, struct hx9023s_data *data)
+{
+ struct hx9023s_bin *bin __free(kfree) =
+ kzalloc(fw->size + sizeof(*bin), GFP_KERNEL);
+ if (!bin)
+ return -ENOMEM;
+
+ memcpy(bin->data, fw->data, fw->size);
+
+ bin->fw_size = fw->size;
+ bin->fw_ver = bin->data[FW_VER_OFFSET];
+ bin->reg_count = get_unaligned_le16(bin->data + FW_REG_CNT_OFFSET);
+
+ release_firmware(fw);
+
+ return hx9023s_bin_load(data, bin);
+}
+
+static void hx9023s_cfg_update(const struct firmware *fw, void *context)
+{
+ struct hx9023s_data *data = context;
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ if (!fw || !fw->data) {
+ dev_warn(dev, "No firmware\n");
+ goto no_fw;
+ }
+
+ ret = hx9023s_send_cfg(fw, data);
+ if (ret) {
+ dev_warn(dev, "Firmware update failed: %d\n", ret);
+ goto no_fw;
+ }
+
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ dev_err(dev, "regcache sync failed\n");
+
+ return;
+
+no_fw:
+ ret = regmap_multi_reg_write(data->regmap, hx9023s_reg_init_list,
+ ARRAY_SIZE(hx9023s_reg_init_list));
+ if (ret) {
+ dev_err(dev, "Error loading default configuration\n");
+ return;
+ }
+
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ dev_err(dev, "regcache sync failed\n");
+}
+
static int hx9023s_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1036,18 +1120,14 @@ static int hx9023s_probe(struct i2c_client *client)
indio_dev->modes = INDIO_DIRECT_MODE;
i2c_set_clientdata(client, indio_dev);
- ret = regmap_multi_reg_write(data->regmap, hx9023s_reg_init_list,
- ARRAY_SIZE(hx9023s_reg_init_list));
- if (ret)
- return dev_err_probe(dev, ret, "device init failed\n");
-
ret = hx9023s_ch_cfg(data);
if (ret)
return dev_err_probe(dev, ret, "channel config failed\n");
- ret = regcache_sync(data->regmap);
+ ret = request_firmware_nowait(THIS_MODULE, true, "hx9023s.bin", dev,
+ GFP_KERNEL, data, hx9023s_cfg_update);
if (ret)
- return dev_err_probe(dev, ret, "regcache sync failed\n");
+ return dev_err_probe(dev, ret, "reg config failed\n");
if (client->irq) {
ret = devm_request_threaded_irq(dev, client->irq,
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index 614e65cb9d42..cfc75d001f20 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -45,7 +45,7 @@ struct mb1232_data {
/* Ensure correct alignment of data to push to IIO buffer */
struct {
s16 distance;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 5c959730aecd..f3d054b06b4c 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -47,7 +47,7 @@ struct lidar_data {
/* Ensure timestamp is naturally aligned */
struct {
u16 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index a75ea5042876..86cab113ef3d 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -66,7 +66,7 @@ struct srf08_data {
/* Ensure timestamp is naturally aligned */
struct {
s16 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
/* Sensor-Type */
diff --git a/drivers/iio/proximity/sx_common.h b/drivers/iio/proximity/sx_common.h
index fb14e6f06a6d..259b5c695233 100644
--- a/drivers/iio/proximity/sx_common.h
+++ b/drivers/iio/proximity/sx_common.h
@@ -125,7 +125,7 @@ struct sx_common_data {
/* Ensure correct alignment of timestamp when present. */
struct {
__be16 channels[SX_COMMON_MAX_NUM_CHANNELS];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} buffer;
unsigned int suspend_ctrl;
diff --git a/drivers/iio/resolver/ad2s1210.c b/drivers/iio/resolver/ad2s1210.c
index a414eef12e5e..b681129a99b6 100644
--- a/drivers/iio/resolver/ad2s1210.c
+++ b/drivers/iio/resolver/ad2s1210.c
@@ -164,7 +164,7 @@ struct ad2s1210_state {
struct {
__be16 chan[2];
/* Ensure timestamp is naturally aligned. */
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
/** SPI transmit buffer. */
u8 rx[2];
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 0c844137d7aa..1998047a1f24 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -248,10 +248,12 @@ static irqreturn_t tmp006_trigger_handler(int irq, void *p)
struct tmp006_data *data = iio_priv(indio_dev);
struct {
s16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
s32 ret;
+ memset(&scan, 0, sizeof(scan));
+
ret = i2c_smbus_read_word_data(data->client, TMP006_VOBJECT);
if (ret < 0)
goto err;
diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig
index 33cca49c8058..7a181cac3cc9 100644
--- a/drivers/iio/test/Kconfig
+++ b/drivers/iio/test/Kconfig
@@ -5,7 +5,7 @@
# Keep in alphabetical order
config IIO_GTS_KUNIT_TEST
- tristate "Test IIO formatting functions" if !KUNIT_ALL_TESTS
+ tristate "Test IIO gain-time-scale helpers" if !KUNIT_ALL_TESTS
depends on KUNIT
select IIO_GTS_HELPER
select TEST_KUNIT_DEVICE_HELPERS
diff --git a/drivers/iio/test/iio-test-rescale.c b/drivers/iio/test/iio-test-rescale.c
index cbf13337ed1f..bbc6a2e1c2c1 100644
--- a/drivers/iio/test/iio-test-rescale.c
+++ b/drivers/iio/test/iio-test-rescale.c
@@ -652,6 +652,8 @@ static void iio_rescale_test_scale(struct kunit *test)
int rel_ppm;
int ret;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buff);
+
rescale.numerator = t->numerator;
rescale.denominator = t->denominator;
rescale.offset = t->offset;
@@ -681,6 +683,8 @@ static void iio_rescale_test_offset(struct kunit *test)
int values[2];
int ret;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buff_off);
+
rescale.numerator = t->numerator;
rescale.denominator = t->denominator;
rescale.offset = t->offset;
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index 716c795d08fb..82c72baccb62 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -145,8 +145,8 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name)
trig_info->swt.trigger->ops = &iio_hrtimer_trigger_ops;
trig_info->swt.trigger->dev.groups = iio_hrtimer_attr_groups;
- hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
- trig_info->timer.function = iio_hrtimer_trig_handler;
+ hrtimer_setup(&trig_info->timer, iio_hrtimer_trig_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_HARD);
trig_info->sampling_frequency[0] = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency[0];
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index bb60b2d7b2ec..e41cb741253b 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -38,6 +38,9 @@ static const void *triggers_table[][MAX_TRIGGERS] = {
{ TIM15_TRGO,},
{ TIM16_OC1,},
{ TIM17_OC1,},
+ { }, /* timer 18 */
+ { }, /* timer 19 */
+ { TIM20_TRGO, TIM20_TRGO2, TIM20_OC1, TIM20_OC2, TIM20_OC3, },
};
/* List the triggers accepted by each timer */
@@ -119,7 +122,7 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
unsigned int frequency)
{
unsigned long long prd, div;
- int prescaler = 0;
+ int prescaler = 0, ret;
u32 ccer;
/* Period and prescaler values depends of clock rate */
@@ -150,10 +153,12 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
if (ccer & TIM_CCER_CCXE)
return -EBUSY;
- mutex_lock(&priv->lock);
+ guard(mutex)(&priv->lock);
if (!priv->enabled) {
priv->enabled = true;
- clk_enable(priv->clk);
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
}
regmap_write(priv->regmap, TIM_PSC, prescaler);
@@ -173,7 +178,6 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
/* Enable controller */
regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
- mutex_unlock(&priv->lock);
return 0;
}
@@ -307,7 +311,7 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
struct iio_trigger *trig = to_iio_trigger(dev);
u32 mask, shift, master_mode_max;
- int i;
+ int i, ret;
if (stm32_timer_is_trgo2_name(trig->name)) {
mask = TIM_CR2_MMS2;
@@ -322,15 +326,16 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
for (i = 0; i <= master_mode_max; i++) {
if (!strncmp(master_mode_table[i], buf,
strlen(master_mode_table[i]))) {
- mutex_lock(&priv->lock);
+ guard(mutex)(&priv->lock);
if (!priv->enabled) {
/* Clock should be enabled first */
priv->enabled = true;
- clk_enable(priv->clk);
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
}
regmap_update_bits(priv->regmap, TIM_CR2, mask,
i << shift);
- mutex_unlock(&priv->lock);
return len;
}
}
@@ -482,6 +487,7 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -491,12 +497,14 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
/* fixed scale */
return -EINVAL;
- case IIO_CHAN_INFO_ENABLE:
- mutex_lock(&priv->lock);
+ case IIO_CHAN_INFO_ENABLE: {
+ guard(mutex)(&priv->lock);
if (val) {
if (!priv->enabled) {
priv->enabled = true;
- clk_enable(priv->clk);
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
}
regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
} else {
@@ -506,11 +514,12 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
clk_disable(priv->clk);
}
}
- mutex_unlock(&priv->lock);
+
return 0;
}
-
- return -EINVAL;
+ default:
+ return -EINVAL;
+ }
}
static int stm32_counter_validate_trigger(struct iio_dev *indio_dev,
@@ -602,6 +611,7 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
int sms = stm32_enable_mode2sms(mode);
+ int ret;
if (sms < 0)
return sms;
@@ -609,12 +619,15 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
* Triggered mode sets CEN bit automatically by hardware. So, first
* enable counter clock, so it can use it. Keeps it in sync with CEN.
*/
- mutex_lock(&priv->lock);
- if (sms == 6 && !priv->enabled) {
- clk_enable(priv->clk);
- priv->enabled = true;
+ scoped_guard(mutex, &priv->lock) {
+ if (sms == 6 && !priv->enabled) {
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ priv->enabled = true;
+ }
}
- mutex_unlock(&priv->lock);
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
@@ -781,7 +794,7 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
return -EINVAL;
/* Create an IIO device only if we have triggers to be validated */
- if (*cfg->valids_table[index])
+ if (cfg->valids_table && *cfg->valids_table[index])
priv = stm32_setup_counter_device(dev);
else
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -794,7 +807,8 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
priv->triggers = triggers_table[index];
- priv->valids = cfg->valids_table[index];
+ if (cfg->valids_table && *cfg->valids_table[index])
+ priv->valids = cfg->valids_table[index];
stm32_timer_detect_trgo2(priv);
mutex_init(&priv->lock);
@@ -886,6 +900,16 @@ static const struct stm32_timer_trigger_cfg stm32h7_timer_trg_cfg = {
.num_valids_table = ARRAY_SIZE(stm32h7_valids_table),
};
+static const struct stm32_timer_trigger_cfg stm32mp25_timer_trg_cfg = {
+ /*
+ * valids_table not used: counter framework is now superseding the deprecated IIO
+ * counter interface (IIO_COUNT), so don't support it. num_valids_table is only
+ * kept here to register the IIO HW triggers. valids_table should be moved at some
+ * point to the stm32-timer-cnt driver instead.
+ */
+ .num_valids_table = ARRAY_SIZE(triggers_table),
+};
+
static const struct of_device_id stm32_trig_of_match[] = {
{
.compatible = "st,stm32-timer-trigger",
@@ -893,6 +917,9 @@ static const struct of_device_id stm32_trig_of_match[] = {
}, {
.compatible = "st,stm32h7-timer-trigger",
.data = (void *)&stm32h7_timer_trg_cfg,
+ }, {
+ .compatible = "st,stm32mp25-timer-trigger",
+ .data = (void *)&stm32mp25_timer_trg_cfg,
},
{ /* end node */ },
};
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index b7c078b7f7cf..f8413f8a9f26 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1127,41 +1127,6 @@ err:
}
EXPORT_SYMBOL(ib_find_cached_pkey);
-int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
- u16 pkey, u16 *index)
-{
- struct ib_pkey_cache *cache;
- unsigned long flags;
- int i;
- int ret = -ENOENT;
-
- if (!rdma_is_port_valid(device, port_num))
- return -EINVAL;
-
- read_lock_irqsave(&device->cache_lock, flags);
-
- cache = device->port_data[port_num].cache.pkey;
- if (!cache) {
- ret = -EINVAL;
- goto err;
- }
-
- *index = -1;
-
- for (i = 0; i < cache->table_len; ++i)
- if (cache->table[i] == pkey) {
- *index = i;
- ret = 0;
- break;
- }
-
-err:
- read_unlock_irqrestore(&device->cache_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(ib_find_exact_cached_pkey);
-
int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
{
unsigned long flags;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index ca9b956c034d..0ded91f056f3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -209,23 +209,6 @@ static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
printk("%s(NULL ib_device): %pV", level, vaf);
}
-void ibdev_printk(const char *level, const struct ib_device *ibdev,
- const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- __ibdev_printk(level, ibdev, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(ibdev_printk);
-
#define define_ibdev_printk_level(func, level) \
void func(const struct ib_device *ibdev, const char *fmt, ...) \
{ \
@@ -2296,6 +2279,33 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
EXPORT_SYMBOL(ib_device_get_netdev);
/**
+ * ib_query_netdev_port - Query the port number of a net_device
+ * associated with an ibdev
+ * @ibdev: IB device
+ * @ndev: Network device
+ * @port: IB port the net_device is connected to
+ */
+int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
+ u32 *port)
+{
+ struct net_device *ib_ndev;
+ u32 port_num;
+
+ rdma_for_each_port(ibdev, port_num) {
+ ib_ndev = ib_device_get_netdev(ibdev, port_num);
+ if (ndev == ib_ndev) {
+ *port = port_num;
+ dev_put(ib_ndev);
+ return 0;
+ }
+ dev_put(ib_ndev);
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(ib_query_netdev_port);
+
+/**
* ib_device_get_by_netdev - Find an IB device associated with a netdev
* @ndev: netdev to locate
* @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
@@ -2761,6 +2771,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
+ SET_DEVICE_OP(dev_ops, report_port_event);
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
@@ -2854,11 +2865,62 @@ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
},
};
+void ib_dispatch_port_state_event(struct ib_device *ibdev, struct net_device *ndev)
+{
+ enum ib_port_state curr_state;
+ struct ib_event ibevent = {};
+ u32 port;
+
+ if (ib_query_netdev_port(ibdev, ndev, &port))
+ return;
+
+ curr_state = ib_get_curr_port_state(ndev);
+
+ write_lock_irq(&ibdev->cache_lock);
+ if (ibdev->port_data[port].cache.last_port_state == curr_state) {
+ write_unlock_irq(&ibdev->cache_lock);
+ return;
+ }
+ ibdev->port_data[port].cache.last_port_state = curr_state;
+ write_unlock_irq(&ibdev->cache_lock);
+
+ ibevent.event = (curr_state == IB_PORT_DOWN) ?
+ IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE;
+ ibevent.device = ibdev;
+ ibevent.element.port_num = port;
+ ib_dispatch_event(&ibevent);
+}
+EXPORT_SYMBOL(ib_dispatch_port_state_event);
+
+static void handle_port_event(struct net_device *ndev, unsigned long event)
+{
+ struct ib_device *ibdev;
+
+ /* Currently, link events in bonding scenarios are still
+ * reported by drivers that support bonding.
+ */
+ if (netif_is_lag_master(ndev) || netif_is_lag_port(ndev))
+ return;
+
+ ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
+ if (!ibdev)
+ return;
+
+ if (ibdev->ops.report_port_event) {
+ ibdev->ops.report_port_event(ibdev, ndev, event);
+ goto put_ibdev;
+ }
+
+ ib_dispatch_port_state_event(ibdev, ndev);
+
+put_ibdev:
+ ib_device_put(ibdev);
+};
+
static int ib_netdevice_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct net_device *ib_ndev;
struct ib_device *ibdev;
u32 port;
@@ -2868,15 +2930,21 @@ static int ib_netdevice_event(struct notifier_block *this,
if (!ibdev)
return NOTIFY_DONE;
- rdma_for_each_port(ibdev, port) {
- ib_ndev = ib_device_get_netdev(ibdev, port);
- if (ndev == ib_ndev)
- rdma_nl_notify_event(ibdev, port,
- RDMA_NETDEV_RENAME_EVENT);
- dev_put(ib_ndev);
+ if (ib_query_netdev_port(ibdev, ndev, &port)) {
+ ib_device_put(ibdev);
+ break;
}
+
+ rdma_nl_notify_event(ibdev, port, RDMA_NETDEV_RENAME_EVENT);
ib_device_put(ibdev);
break;
+
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ case NETDEV_DOWN:
+ handle_port_event(ndev, event);
+ break;
+
default:
break;
}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 64d9c492de64..8d3dfef9ebaa 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -462,86 +462,3 @@ int ib_ud_header_pack(struct ib_ud_header *header,
return len;
}
EXPORT_SYMBOL(ib_ud_header_pack);
-
-/**
- * ib_ud_header_unpack - Unpack UD header struct from wire format
- * @header:UD header struct
- * @buf:Buffer to pack into
- *
- * ib_ud_header_pack() unpacks the UD header structure @header from wire
- * format in the buffer @buf.
- */
-int ib_ud_header_unpack(void *buf,
- struct ib_ud_header *header)
-{
- ib_unpack(lrh_table, ARRAY_SIZE(lrh_table),
- buf, &header->lrh);
- buf += IB_LRH_BYTES;
-
- if (header->lrh.link_version != 0) {
- pr_warn("Invalid LRH.link_version %u\n",
- header->lrh.link_version);
- return -EINVAL;
- }
-
- switch (header->lrh.link_next_header) {
- case IB_LNH_IBA_LOCAL:
- header->grh_present = 0;
- break;
-
- case IB_LNH_IBA_GLOBAL:
- header->grh_present = 1;
- ib_unpack(grh_table, ARRAY_SIZE(grh_table),
- buf, &header->grh);
- buf += IB_GRH_BYTES;
-
- if (header->grh.ip_version != 6) {
- pr_warn("Invalid GRH.ip_version %u\n",
- header->grh.ip_version);
- return -EINVAL;
- }
- if (header->grh.next_header != 0x1b) {
- pr_warn("Invalid GRH.next_header 0x%02x\n",
- header->grh.next_header);
- return -EINVAL;
- }
- break;
-
- default:
- pr_warn("Invalid LRH.link_next_header %u\n",
- header->lrh.link_next_header);
- return -EINVAL;
- }
-
- ib_unpack(bth_table, ARRAY_SIZE(bth_table),
- buf, &header->bth);
- buf += IB_BTH_BYTES;
-
- switch (header->bth.opcode) {
- case IB_OPCODE_UD_SEND_ONLY:
- header->immediate_present = 0;
- break;
- case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE:
- header->immediate_present = 1;
- break;
- default:
- pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
- return -EINVAL;
- }
-
- if (header->bth.transport_header_version != 0) {
- pr_warn("Invalid BTH.transport_header_version %u\n",
- header->bth.transport_header_version);
- return -EINVAL;
- }
-
- ib_unpack(deth_table, ARRAY_SIZE(deth_table),
- buf, &header->deth);
- buf += IB_DETH_BYTES;
-
- if (header->immediate_present)
- memcpy(&header->immediate_data, buf, sizeof header->immediate_data);
-
- return 0;
-}
-EXPORT_SYMBOL(ib_ud_header_unpack);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 11a080646916..e803f609ec87 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -171,45 +171,3 @@ void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
__ib_copy_path_rec_to_user(dst, src);
}
EXPORT_SYMBOL(ib_copy_path_rec_to_user);
-
-void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
- struct ib_user_path_rec *src)
-{
- u32 slid, dlid;
-
- memset(dst, 0, sizeof(*dst));
- if ((ib_is_opa_gid((union ib_gid *)src->sgid)) ||
- (ib_is_opa_gid((union ib_gid *)src->dgid))) {
- dst->rec_type = SA_PATH_REC_TYPE_OPA;
- slid = opa_get_lid_from_gid((union ib_gid *)src->sgid);
- dlid = opa_get_lid_from_gid((union ib_gid *)src->dgid);
- } else {
- dst->rec_type = SA_PATH_REC_TYPE_IB;
- slid = ntohs(src->slid);
- dlid = ntohs(src->dlid);
- }
- memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid);
- memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid);
-
- sa_path_set_dlid(dst, dlid);
- sa_path_set_slid(dst, slid);
- sa_path_set_raw_traffic(dst, src->raw_traffic);
- dst->flow_label = src->flow_label;
- dst->hop_limit = src->hop_limit;
- dst->traffic_class = src->traffic_class;
- dst->reversible = src->reversible;
- dst->numb_path = src->numb_path;
- dst->pkey = src->pkey;
- dst->sl = src->sl;
- dst->mtu_selector = src->mtu_selector;
- dst->mtu = src->mtu;
- dst->rate_selector = src->rate_selector;
- dst->rate = src->rate;
- dst->packet_life_time = src->packet_life_time;
- dst->preference = src->preference;
- dst->packet_life_time_selector = src->packet_life_time_selector;
-
- /* TODO: No need to set this */
- sa_path_set_dmac_zero(dst);
-}
-EXPORT_SYMBOL(ib_copy_path_rec_from_user);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 1211f4317a9f..aba96ca9bce5 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
-obj-$(CONFIG_INFINIBAND_HNS) += hns/
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 2975b11b79bf..502a79136d4d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -53,12 +53,6 @@
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
-#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
-#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
/* Number of MRs to reserve for PF, leaving remainder for VFs */
#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
@@ -187,7 +181,6 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
struct auxiliary_device *adev;
- struct notifier_block nb;
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
@@ -204,7 +197,7 @@ struct bnxt_re_dev {
struct bnxt_re_nq_record *nqr;
/* Device Resources */
- struct bnxt_qplib_dev_attr dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_ctx qplib_ctx;
struct bnxt_qplib_res qplib_res;
struct bnxt_qplib_dpi dpi_privileged;
@@ -229,6 +222,9 @@ struct bnxt_re_dev {
DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
struct dentry *dbg_root;
struct dentry *qp_debugfs;
+ unsigned long event_bitmap;
+ struct bnxt_qplib_cc_param cc_param;
+ struct workqueue_struct *dcb_wq;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 1e63f8091748..f039aefcaf67 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -37,18 +37,9 @@
*
*/
-#include <linux/interrupt.h>
#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/prefetch.h>
-#include <linux/delay.h>
-#include <rdma/ib_addr.h>
-
-#include "bnxt_ulp.h"
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -357,8 +348,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
bnxt_re_copy_err_stats(rdev, stats, err_s);
- if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
- !rdev->is_virtfn) {
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn)) {
rc = bnxt_re_get_ext_stat(rdev, stats);
if (rc) {
clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index e3d26bd6de05..6f5db32082dd 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -52,8 +52,6 @@
#include <rdma/uverbs_ioctl.h>
#include <linux/hashtable.h>
-#include "bnxt_ulp.h"
-
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -161,7 +159,7 @@ static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags)
static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
struct bnxt_qplib_mrw *qplib_mr)
{
- if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
+ if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
}
@@ -186,7 +184,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
@@ -275,7 +273,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
int rc;
memset(port_attr, 0, sizeof(*port_attr));
@@ -333,8 +331,8 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
- rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
- rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
+ rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
+ rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
}
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
@@ -585,7 +583,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.pd = &pd->qplib_pd;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
- if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
@@ -1057,7 +1055,7 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
align = sizeof(struct sq_send_hdr);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
@@ -1277,7 +1275,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
rq = &qplqp->rq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (init_attr->srq) {
struct bnxt_re_srq *srq;
@@ -1314,7 +1312,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
qplqp->rq.max_sge = dev_attr->max_qp_sges;
@@ -1340,7 +1338,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
sq->max_sge = init_attr->cap.max_send_sge;
entries = init_attr->cap.max_send_wr;
@@ -1393,7 +1391,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
@@ -1442,7 +1440,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
/* Setup misc params */
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
@@ -1612,7 +1610,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
ib_pd = ib_qp->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
@@ -1840,7 +1838,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
ib_pd = ib_srq->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
@@ -1872,6 +1870,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit;
srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
+ srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
+ srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
nq = &rdev->nqr->nq[0];
if (udata) {
@@ -2044,7 +2044,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
enum ib_qp_state curr_qp_state, new_qp_state;
int rc, entries;
unsigned int flags;
@@ -3091,7 +3091,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata = &attrs->driver_udata;
struct bnxt_re_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_qplib_chip_ctx *cctx;
int cqe = attr->cqe;
int rc, entries;
@@ -3226,7 +3226,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!ibcq->uobject) {
ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
return -EOPNOTSUPP;
@@ -4199,7 +4199,7 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
- if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
@@ -4291,7 +4291,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
struct bnxt_re_ucontext *uctx =
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_re_user_mmap_entry *entry;
struct bnxt_re_uctx_resp resp = {};
struct bnxt_re_uctx_req ureq = {};
@@ -4467,9 +4467,10 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
case BNXT_RE_MMAP_TOGGLE_PAGE:
/* Driver doesn't expect write access for user space */
if (vma->vm_flags & VM_WRITE)
- return -EFAULT;
- ret = vm_insert_page(vma, vma->vm_start,
- virt_to_page((void *)bnxt_entry->mem_offset));
+ ret = -EFAULT;
+ else
+ ret = vm_insert_page(vma, vma->vm_start,
+ virt_to_page((void *)bnxt_entry->mem_offset));
break;
default:
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index c143f273b759..4659a2f73364 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -79,17 +79,12 @@ MODULE_LICENSE("Dual BSD/GPL");
/* globals */
static DEFINE_MUTEX(bnxt_re_mutex);
-static void bnxt_re_stop_irq(void *handle);
-static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
-static int bnxt_re_netdev_event(struct notifier_block *notifier,
- unsigned long event, void *ptr);
-static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
-static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
-static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable);
+static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
+ u8 port_num, enum ib_event_type event);
static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
@@ -153,6 +148,10 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
if (!rdev->chip_ctx)
return;
+
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+
chip_ctx = rdev->chip_ctx;
rdev->chip_ctx = NULL;
rdev->rcfw.res = NULL;
@@ -166,7 +165,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- int rc;
+ int rc = -ENOMEM;
en_dev = rdev->en_dev;
@@ -182,7 +181,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
- rdev->qplib_res.dattr = &rdev->dev_attr;
+ rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
+ if (!rdev->dev_attr)
+ goto free_chip_ctx;
+ rdev->qplib_res.dattr = rdev->dev_attr;
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
rdev->qplib_res.en_dev = en_dev;
@@ -190,16 +192,20 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
bnxt_re_set_db_offset(rdev);
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
- if (rc) {
- kfree(rdev->chip_ctx);
- rdev->chip_ctx = NULL;
- return rc;
- }
+ if (rc)
+ goto free_dev_attr;
if (bnxt_qplib_determine_atomics(en_dev->pdev))
ibdev_info(&rdev->ibdev,
"platform doesn't support global atomics.");
return 0;
+free_dev_attr:
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+free_chip_ctx:
+ kfree(rdev->chip_ctx);
+ rdev->chip_ctx = NULL;
+ return rc;
}
/* SR-IOV helper functions */
@@ -221,7 +227,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
struct bnxt_qplib_ctx *ctx;
int i;
- attr = &rdev->dev_attr;
+ attr = rdev->dev_attr;
ctx = &rdev->qplib_ctx;
ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
@@ -235,7 +241,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
rdev->qplib_ctx.tqm_ctx.qcount[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
+ rdev->dev_attr->tqm_alloc_reqs[i];
}
static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
@@ -302,17 +308,128 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
&rdev->qplib_ctx);
}
-static void bnxt_re_shutdown(struct auxiliary_device *adev)
+struct bnxt_re_dcb_work {
+ struct work_struct work;
+ struct bnxt_re_dev *rdev;
+ struct hwrm_async_event_cmpl cmpl;
+};
+
+static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp)
{
- struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ return qp->ib_qp.qp_type == IB_QPT_GSI;
+}
+
+static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ if (bnxt_re_is_qp1_qp(qp)) {
+ mutex_unlock(&rdev->qp_lock);
+ return qp;
+ }
+ }
+ mutex_unlock(&rdev->qp_lock);
+ return NULL;
+}
+
+static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
+ qp = bnxt_re_get_qp1_qp(rdev);
+ if (!qp)
+ return 0;
+
+ qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP;
+ qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp;
+
+ return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+}
+
+static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq");
+}
+
+static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ if (!rdev->dcb_wq)
+ return;
+ destroy_workqueue(rdev->dcb_wq);
+}
+
+static void bnxt_re_dcb_wq_task(struct work_struct *work)
+{
+ struct bnxt_re_dcb_work *dcb_work =
+ container_of(work, struct bnxt_re_dcb_work, work);
+ struct bnxt_re_dev *rdev = dcb_work->rdev;
+ struct bnxt_qplib_cc_param *cc_param;
+ int rc;
+
+ if (!rdev)
+ goto free_dcb;
+
+ cc_param = &rdev->cc_param;
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "Failed to query ccparam rc:%d", rc);
+ goto free_dcb;
+ }
+ if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) {
+ cc_param->qp1_tos_dscp = cc_param->tos_dscp;
+ rc = bnxt_re_update_qp1_tos_dscp(rdev);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "%s: Failed to modify QP1 rc:%d",
+ __func__, rc);
+ goto free_dcb;
+ }
+ }
+
+free_dcb:
+ kfree(dcb_work);
+}
+
+static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_re_dcb_work *dcb_work;
struct bnxt_re_dev *rdev;
+ u32 data1, data2;
+ u16 event_id;
rdev = en_info->rdev;
- ib_unregister_device(&rdev->ibdev);
- bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+ if (!rdev)
+ return;
+
+ event_id = le16_to_cpu(cmpl->event_id);
+ data1 = le32_to_cpu(cmpl->event_data1);
+ data2 = le32_to_cpu(cmpl->event_data2);
+
+ ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
+ event_id, data1, data2);
+
+ switch (event_id) {
+ case ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ break;
+
+ dcb_work->rdev = rdev;
+ memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl));
+ INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
+ queue_work(rdev->dcb_wq, &dcb_work->work);
+ break;
+ default:
+ break;
+ }
}
-static void bnxt_re_stop_irq(void *handle)
+static void bnxt_re_stop_irq(void *handle, bool reset)
{
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
struct bnxt_qplib_rcfw *rcfw;
@@ -321,8 +438,18 @@ static void bnxt_re_stop_irq(void *handle)
int indx;
rdev = en_info->rdev;
+ if (!rdev)
+ return;
rcfw = &rdev->rcfw;
+ if (reset) {
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ IB_EVENT_DEVICE_FATAL);
+ }
+
for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) {
nq = &rdev->nqr->nq[indx - 1];
bnxt_qplib_nq_stop_irq(nq, false);
@@ -341,6 +468,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
int indx, rc;
rdev = en_info->rdev;
+ if (!rdev)
+ return;
msix_ent = rdev->nqr->msix_entries;
rcfw = &rdev->rcfw;
if (!ent) {
@@ -378,6 +507,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
}
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
+ .ulp_async_notifier = bnxt_re_async_notifier,
.ulp_irq_stop = bnxt_re_stop_irq,
.ulp_irq_restart = bnxt_re_start_irq
};
@@ -839,17 +969,6 @@ static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext)
}
/* Device */
-
-static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
-{
- struct ib_device *ibdev =
- ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
- if (!ibdev)
- return NULL;
-
- return container_of(ibdev, struct bnxt_re_dev, ibdev);
-}
-
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -1240,7 +1359,6 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev,
return NULL;
}
/* Default values */
- rdev->nb.notifier_call = NULL;
rdev->netdev = en_dev->net;
rdev->en_dev = en_dev;
rdev->adev = adev;
@@ -1627,12 +1745,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto fail;
- rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
- rdev->netdev, &rdev->dev_attr);
+ rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev);
if (rc)
goto fail;
@@ -1807,6 +1924,26 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return 0;
}
+static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
+static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -1886,6 +2023,9 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_debugfs_rem_pdev(rdev);
+ bnxt_re_net_unregister_async_event(rdev);
+ bnxt_re_uninit_dcb_wq(rdev);
+
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
@@ -1990,8 +2130,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
* memory for the function and all child VFs
*/
rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx,
- BNXT_RE_MAX_QPC_COUNT);
+ &rdev->qplib_ctx);
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate RCFW Channel: %#x\n", rc);
@@ -2032,7 +2171,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
rdev->pacing.dbr_pacing = false;
}
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto disable_rcfw;
@@ -2081,6 +2220,11 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
if (!rdev->is_virtfn) {
+ /* Query f/w defaults of CC params */
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param);
+ if (rc)
+ ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
+
rc = bnxt_re_setup_qos(rdev);
if (rc)
ibdev_info(&rdev->ibdev,
@@ -2099,6 +2243,9 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_debugfs_add_pdev(rdev);
+ bnxt_re_init_dcb_wq(rdev);
+ bnxt_re_net_register_async_event(rdev);
+
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
@@ -2117,6 +2264,30 @@ fail:
return rc;
}
+static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
+{
+ struct bnxt_qplib_cc_param cc_param = {};
+
+ /* Do not enable congestion control on VFs */
+ if (rdev->is_virtfn)
+ return;
+
+ /* Currently enabling only for GenP5 adapters */
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
+ if (enable) {
+ cc_param.enable = 1;
+ cc_param.tos_ecn = 1;
+ }
+
+ cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
+
+ if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
+ ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
+}
+
static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev,
struct bnxt_re_en_dev_info *en_info,
struct auxiliary_device *adev)
@@ -2163,20 +2334,10 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
goto re_dev_uninit;
}
- rdev->nb.notifier_call = bnxt_re_netdev_event;
- rc = register_netdevice_notifier(&rdev->nb);
- if (rc) {
- rdev->nb.notifier_call = NULL;
- pr_err("%s: Cannot register to netdevice_notifier",
- ROCE_DRV_MODULE_NAME);
- goto re_dev_unreg;
- }
bnxt_re_setup_cc(rdev, true);
return 0;
-re_dev_unreg:
- ib_unregister_device(&rdev->ibdev);
re_dev_uninit:
bnxt_re_update_en_info_rdev(NULL, en_info, adev);
bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
@@ -2186,93 +2347,11 @@ exit:
return rc;
}
-static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
-{
- struct bnxt_qplib_cc_param cc_param = {};
-
- /* Do not enable congestion control on VFs */
- if (rdev->is_virtfn)
- return;
-
- /* Currently enabling only for GenP5 adapters */
- if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
- return;
-
- if (enable) {
- cc_param.enable = 1;
- cc_param.tos_ecn = 1;
- }
-
- cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
- CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
-
- if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
- ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
-}
-
-/*
- * "Notifier chain callback can be invoked for the same chain from
- * different CPUs at the same time".
- *
- * For cases when the netdev is already present, our call to the
- * register_netdevice_notifier() will actually get the rtnl_lock()
- * before sending NETDEV_REGISTER and (if up) NETDEV_UP
- * events.
- *
- * But for cases when the netdev is not already present, the notifier
- * chain is subjected to be invoked from different CPUs simultaneously.
- *
- * This is protected by the netdev_mutex.
- */
-static int bnxt_re_netdev_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
- struct bnxt_re_dev *rdev;
-
- real_dev = rdma_vlan_dev_real_dev(netdev);
- if (!real_dev)
- real_dev = netdev;
-
- if (real_dev != netdev)
- goto exit;
-
- rdev = bnxt_re_from_netdev(real_dev);
- if (!rdev)
- return NOTIFY_DONE;
-
-
- switch (event) {
- case NETDEV_UP:
- case NETDEV_DOWN:
- case NETDEV_CHANGE:
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
- netif_carrier_ok(real_dev) ?
- IB_EVENT_PORT_ACTIVE :
- IB_EVENT_PORT_ERR);
- break;
- default:
- break;
- }
- ib_device_put(&rdev->ibdev);
-exit:
- return NOTIFY_DONE;
-}
-
#define BNXT_ADEV_NAME "bnxt_en"
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
struct auxiliary_device *aux_dev)
{
- if (rdev->nb.notifier_call) {
- unregister_netdevice_notifier(&rdev->nb);
- rdev->nb.notifier_call = NULL;
- } else {
- /* If notifier is null, we should have already done a
- * clean up before coming here.
- */
- return;
- }
bnxt_re_setup_cc(rdev, false);
ib_unregister_device(&rdev->ibdev);
bnxt_re_dev_uninit(rdev, op_type);
@@ -2316,13 +2395,9 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
if (rc)
- goto err;
- mutex_unlock(&bnxt_re_mutex);
- return 0;
+ kfree(en_info);
-err:
mutex_unlock(&bnxt_re_mutex);
- kfree(en_info);
return rc;
}
@@ -2356,6 +2431,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
__func__, en_dev->en_state);
bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
mutex_unlock(&bnxt_re_mutex);
return 0;
@@ -2375,6 +2451,16 @@ static int bnxt_re_resume(struct auxiliary_device *adev)
return 0;
}
+static void bnxt_re_shutdown(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ rdev = en_info->rdev;
+ ib_unregister_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+}
+
static const struct auxiliary_device_id bnxt_re_id_table[] = {
{ .name = BNXT_ADEV_NAME ".rdma", },
{},
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 5336f74297f8..457eecb99f96 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1217,8 +1217,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
qp->path_mtu =
CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
}
- qp->modify_flags &=
- ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
/* Bono FW require the max_dest_rd_atomic to be >= 1 */
if (qp->max_dest_rd_atomic < 1)
qp->max_dest_rd_atomic = 1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 0660101b5310..0d9487c889ff 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -343,6 +343,7 @@ struct bnxt_qplib_qp {
u32 msn;
u32 msn_tbl_sz;
bool is_host_msn_tbl;
+ u8 tos_dscp;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 17e62f22683b..d23074383428 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -915,7 +915,6 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
@@ -924,8 +923,7 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz)
+ struct bnxt_qplib_ctx *ctx)
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
@@ -969,12 +967,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
if (!rcfw->crsqe_tbl)
goto fail;
- /* Allocate one extra to hold the QP1 entries */
- rcfw->qp_tbl_size = qp_tbl_sz + 1;
- rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
- GFP_KERNEL);
- if (!rcfw->qp_tbl)
- goto fail;
spin_lock_init(&rcfw->tbl_lock);
rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 88814cb3aa74..ff873c5f1b25 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -262,8 +262,7 @@ static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg,
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz);
+ struct bnxt_qplib_ctx *ctx);
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
@@ -285,9 +284,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
+
static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
{
/* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
- return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
+ return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2));
}
#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 96ceec1e8199..6cd05207ffed 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -871,19 +871,27 @@ int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
{
+ kfree(res->rcfw->qp_tbl);
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
}
-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
- struct net_device *netdev,
- struct bnxt_qplib_dev_attr *dev_attr)
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_dev_attr *dev_attr;
int rc;
- res->pdev = pdev;
res->netdev = netdev;
+ dev_attr = res->dattr;
+
+ /* Allocate one extra to hold the QP1 entries */
+ rcfw->qp_tbl_size = max_t(u32, BNXT_RE_MAX_QPC_COUNT + 1, dev_attr->max_qp);
+ rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
+ GFP_KERNEL);
+ if (!rcfw->qp_tbl)
+ return -ENOMEM;
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
if (rc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index cbfc49a1a56d..6a13927674b4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -49,6 +49,13 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_58818 0xd818
#define CHIP_NUM_57608 0x1760
+#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
+#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
#define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
@@ -424,9 +431,7 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
- struct net_device *netdev,
- struct bnxt_qplib_dev_attr *dev_attr);
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
@@ -549,6 +554,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
}
+static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
+ u16 flags, bool virtfn)
+{
+ /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
+ return (_is_ext_stats_supported(flags) &&
+ ((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
+}
+
static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
{
return dev_cap_flags &
@@ -594,4 +607,9 @@ static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
}
+static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
+{
+ return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
+}
+
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9df3e3271577..f231e886ad9d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -88,9 +88,9 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
fw_ver[3] = resp.fw_rsvd;
}
-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr)
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
{
+ struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr;
struct creq_query_func_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
@@ -176,6 +176,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
+ if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
+ attr->max_srq += le16_to_cpu(sb->max_srq_ext);
+
bnxt_qplib_query_version(rcfw, attr->fw_ver);
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
@@ -1022,3 +1025,116 @@ free_mem:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
return rc;
}
+
+static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext,
+ struct creq_query_roce_cc_gen1_resp_sb_tlv *sb)
+{
+ cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi);
+ cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps);
+ cc_ext->init_cp = le16_to_cpu(sb->init_cp);
+ cc_ext->tr_update_mode = sb->tr_update_mode;
+ cc_ext->tr_update_cyls = sb->tr_update_cycles;
+ cc_ext->fr_rtt = sb->fr_num_rtts;
+ cc_ext->ai_rate_incr = sb->ai_rate_increase;
+ cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th);
+ cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th);
+ cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th);
+ cc_ext->bw_avg_weight = sb->bw_avg_weight;
+ cc_ext->cr_factor = sb->actual_cr_factor;
+ cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th);
+ cc_ext->cp_bias_en = sb->cp_bias_en;
+ cc_ext->cp_bias = sb->cp_bias;
+ cc_ext->cnp_ecn = sb->cnp_ecn;
+ cc_ext->rtt_jitter_en = sb->rtt_jitter_en;
+ cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec);
+ cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th);
+ cc_ext->cr_width = sb->cr_width;
+ cc_ext->min_quota = sb->quota_period_min;
+ cc_ext->max_quota = sb->quota_period_max;
+ cc_ext->abs_max_quota = sb->quota_period_abs_max;
+ cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound);
+ cc_ext->cr_prob_fac = sb->cr_prob_factor;
+ cc_ext->tr_prob_fac = sb->tr_prob_factor;
+ cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th);
+ cc_ext->red_div = sb->red_div;
+ cc_ext->cnp_ratio_th = sb->cnp_ratio_th;
+ cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts);
+ cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio;
+ cc_ext->low_rate_en = sb->use_rate_table;
+ cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th);
+ cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1);
+ cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2);
+ cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th);
+ cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1);
+ cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2);
+ cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt);
+ cc_ext->cc_ack_bytes = sb->cc_ack_bytes;
+ cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th);
+}
+
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param)
+{
+ struct bnxt_qplib_tlv_query_rcc_sb *ext_sb;
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_roce_cc_resp resp = {};
+ struct creq_query_roce_cc_resp_sb *sb;
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_query_roce_cc req = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ size_t resp_size;
+ int rc;
+
+ /* Query the parameters from chip */
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC,
+ sizeof(req));
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx))
+ resp_size = sizeof(*ext_sb);
+ else
+ resp_size = sizeof(*sb);
+
+ sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
+ if (rc)
+ goto out;
+
+ ext_sb = sbuf.sb;
+ sb = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb :
+ (struct creq_query_roce_cc_resp_sb *)ext_sb;
+
+ cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC;
+ cc_param->tos_ecn = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT;
+ cc_param->tos_dscp = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT;
+ cc_param->alt_tos_dscp = sb->alt_tos_dscp;
+ cc_param->alt_vlan_pcp = sb->alt_vlan_pcp;
+
+ cc_param->g = sb->g;
+ cc_param->nph_per_state = sb->num_phases_per_state;
+ cc_param->init_cr = le16_to_cpu(sb->init_cr);
+ cc_param->init_tr = le16_to_cpu(sb->init_tr);
+ cc_param->cc_mode = sb->cc_mode;
+ cc_param->inact_th = le16_to_cpu(sb->inactivity_th);
+ cc_param->rtt = le16_to_cpu(sb->rtt);
+ cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp);
+ cc_param->time_pph = sb->time_per_phase;
+ cc_param->pkts_pph = sb->pkts_per_phase;
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb);
+ cc_param->inact_th |= (cc_param->cc_ext.inact_th_hi & 0x3F) << 16;
+ }
+out:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index e6beeb514b7d..e626b05038a1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -296,6 +296,7 @@ struct bnxt_qplib_cc_param_ext {
struct bnxt_qplib_cc_param {
u8 alt_vlan_pcp;
+ u8 qp1_tos_dscp;
u16 alt_tos_dscp;
u8 cc_mode;
u8 enable;
@@ -325,8 +326,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr);
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx);
@@ -355,6 +355,8 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
u32 resp_size, void *resp_va);
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
#define BNXT_VAR_MAX_WQE 4352
#define BNXT_VAR_MAX_SLOT_ALIGN 256
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 0ee60fdc18b3..7eceb3e9f4ce 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2215,11 +2215,12 @@ struct creq_query_func_resp_sb {
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ #define CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED 0x40UL
#define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
__le16 max_xp_qp_size;
__le16 create_qp_batch_size;
__le16 destroy_qp_batch_size;
- __le16 reserved16;
+ __le16 max_srq_ext;
__le64 reserved64;
};
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 80970a1738f8..034b85c42255 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1114,8 +1114,10 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
* The math here assumes sizeof cpl_pass_accept_req >= sizeof
* cpl_rx_pkt.
*/
- skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
- sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
+ skb = alloc_skb(size_add(gl->tot_len,
+ sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header)) - pktshift,
+ GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 7b5c4522b426..955f061a55e9 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1599,6 +1599,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int count;
int rq_flushed = 0, sq_flushed;
unsigned long flag;
+ struct ib_event ev;
pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
@@ -1607,6 +1608,13 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
if (schp != rchp)
spin_lock(&schp->lock);
spin_lock(&qhp->lock);
+ if (qhp->srq && qhp->attr.state == C4IW_QP_STATE_ERROR &&
+ qhp->ibqp.event_handler) {
+ ev.device = qhp->ibqp.device;
+ ev.element.qp = &qhp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qhp->ibqp.event_handler(&ev, qhp->ibqp.qp_context);
+ }
if (qhp->wq.flushed) {
spin_unlock(&qhp->lock);
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index d7fc9d5eeefd..838182d0409c 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_H_
@@ -57,15 +57,15 @@ struct efa_dev {
u64 db_bar_addr;
u64 db_bar_len;
- unsigned int num_irq_vectors;
- int admin_msix_vector_idx;
+ u32 num_irq_vectors;
+ u32 admin_msix_vector_idx;
struct efa_irq admin_irq;
struct efa_stats stats;
/* Array of completion EQs */
struct efa_eq *eqs;
- unsigned int neqs;
+ u32 neqs;
/* Only stores CQs with interrupts enabled */
struct xarray cqs_xa;
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
index 77282234ce68..4d9ca97e4296 100644
--- a/drivers/infiniband/hw/efa/efa_com.h
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
@@ -65,7 +65,7 @@ struct efa_com_admin_queue {
u16 depth;
struct efa_com_admin_cq cq;
struct efa_com_admin_sq sq;
- u16 msix_vector_idx;
+ u32 msix_vector_idx;
unsigned long state;
@@ -89,7 +89,7 @@ struct efa_com_aenq {
struct efa_aenq_handlers *aenq_handlers;
dma_addr_t dma_addr;
u32 cc; /* consumer counter */
- u16 msix_vector_idx;
+ u32 msix_vector_idx;
u16 depth;
u8 phase;
};
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index ad225823e6f2..4f03c0ec819f 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
@@ -141,8 +141,7 @@ static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
return 0;
}
-static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
- int vector)
+static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq, u32 vector)
{
u32 cpu;
@@ -305,7 +304,7 @@ static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
efa_free_irq(dev, &eq->irq);
}
-static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
+static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u32 msix_vec)
{
int err;
@@ -328,21 +327,17 @@ err_free_comp_irq:
static int efa_create_eqs(struct efa_dev *dev)
{
- unsigned int neqs = dev->dev_attr.max_eq;
- int err;
- int i;
-
- neqs = min_t(unsigned int, neqs,
- dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
+ u32 neqs = dev->dev_attr.max_eq;
+ int err, i;
+ neqs = min_t(u32, neqs, dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
dev->neqs = neqs;
dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
if (!dev->eqs)
return -ENOMEM;
for (i = 0; i < neqs; i++) {
- err = efa_create_eq(dev, &dev->eqs[i],
- i + EFA_COMP_EQS_VEC_BASE);
+ err = efa_create_eq(dev, &dev->eqs[i], i + EFA_COMP_EQS_VEC_BASE);
if (err)
goto err_destroy_eqs;
}
@@ -470,7 +465,6 @@ static void efa_ib_device_remove(struct efa_dev *dev)
ibdev_info(&dev->ibdev, "Unregister ib device\n");
ib_unregister_device(&dev->ibdev);
efa_destroy_eqs(dev);
- efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
efa_release_doorbell_bar(dev);
}
@@ -643,12 +637,14 @@ err_disable_device:
return ERR_PTR(err);
}
-static void efa_remove_device(struct pci_dev *pdev)
+static void efa_remove_device(struct pci_dev *pdev,
+ enum efa_regs_reset_reason_types reset_reason)
{
struct efa_dev *dev = pci_get_drvdata(pdev);
struct efa_com_dev *edev;
edev = &dev->edev;
+ efa_com_dev_reset(edev, reset_reason);
efa_com_admin_destroy(edev);
efa_free_irq(dev, &dev->admin_irq);
efa_disable_msix(dev);
@@ -676,7 +672,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_remove_device:
- efa_remove_device(pdev);
+ efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
return err;
}
@@ -685,7 +681,7 @@ static void efa_remove(struct pci_dev *pdev)
struct efa_dev *dev = pci_get_drvdata(pdev);
efa_ib_device_remove(dev);
- efa_remove_device(pdev);
+ efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
}
static void efa_shutdown(struct pci_dev *pdev)
diff --git a/drivers/infiniband/hw/erdma/Kconfig b/drivers/infiniband/hw/erdma/Kconfig
index 169038e3ceb1..267fc1f3c42a 100644
--- a/drivers/infiniband/hw/erdma/Kconfig
+++ b/drivers/infiniband/hw/erdma/Kconfig
@@ -5,7 +5,7 @@ config INFINIBAND_ERDMA
depends on INFINIBAND_ADDR_TRANS
depends on INFINIBAND_USER_ACCESS
help
- This is a RDMA/iWarp driver for Alibaba Elastic RDMA Adapter(ERDMA),
+ This is a RDMA driver for Alibaba Elastic RDMA Adapter(ERDMA),
which supports RDMA features in Alibaba cloud environment.
To compile this driver as module, choose M here. The module will be
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index 3c166359448d..2a023b99f992 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -16,7 +16,7 @@
#include "erdma_hw.h"
#define DRV_MODULE_NAME "erdma"
-#define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
+#define ERDMA_NODE_DESC "Elastic RDMA Adapter stack"
struct erdma_eq {
void *qbuf;
@@ -101,8 +101,6 @@ struct erdma_cmdq {
struct erdma_comp_wait *wait_pool;
spinlock_t lock;
- bool use_event;
-
struct erdma_cmdq_sq sq;
struct erdma_cmdq_cq cq;
struct erdma_eq eq;
@@ -148,6 +146,8 @@ struct erdma_devattr {
u32 max_mr;
u32 max_pd;
u32 max_mw;
+ u32 max_gid;
+ u32 max_ah;
u32 local_dma_key;
};
@@ -177,7 +177,8 @@ struct erdma_resource_cb {
enum {
ERDMA_RES_TYPE_PD = 0,
ERDMA_RES_TYPE_STAG_IDX = 1,
- ERDMA_RES_CNT = 2,
+ ERDMA_RES_TYPE_AH = 2,
+ ERDMA_RES_CNT = 3,
};
struct erdma_dev {
@@ -192,8 +193,6 @@ struct erdma_dev {
u8 __iomem *func_bar;
struct erdma_devattr attrs;
- /* physical port state (only one port per device) */
- enum ib_port_state state;
u32 mtu;
/* cmdq and aeq use the same msix vector */
@@ -215,6 +214,7 @@ struct erdma_dev {
struct dma_pool *db_pool;
struct dma_pool *resp_pool;
+ enum erdma_proto_type proto;
};
static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
@@ -265,7 +265,7 @@ void erdma_cmdq_destroy(struct erdma_dev *dev);
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
- u64 *resp0, u64 *resp1);
+ u64 *resp0, u64 *resp1, bool sleepable);
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
int erdma_ceqs_init(struct erdma_dev *dev);
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index 771059a8eb7d..1b23c698ec25 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -567,7 +567,8 @@ reject_conn:
static int erdma_proc_mpareply(struct erdma_cep *cep)
{
- struct erdma_qp_attrs qp_attrs;
+ enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
+ struct erdma_mod_qp_params_iwarp params;
struct erdma_qp *qp = cep->qp;
struct mpa_rr *rep;
int ret;
@@ -597,26 +598,29 @@ static int erdma_proc_mpareply(struct erdma_cep *cep)
return -EINVAL;
}
- memset(&qp_attrs, 0, sizeof(qp_attrs));
- qp_attrs.irq_size = cep->ird;
- qp_attrs.orq_size = cep->ord;
- qp_attrs.state = ERDMA_QP_STATE_RTS;
+ memset(&params, 0, sizeof(params));
+ params.state = ERDMA_QPS_IWARP_RTS;
+ params.irq_size = cep->ird;
+ params.orq_size = cep->ord;
down_write(&qp->state_lock);
- if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
ret = -EINVAL;
up_write(&qp->state_lock);
goto out_err;
}
- qp->attrs.qp_type = ERDMA_QP_ACTIVE;
- if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc)
- qp->attrs.cc = COMPROMISE_CC;
+ to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_LLP_HANDLE |
+ ERDMA_QPA_IWARP_MPA | ERDMA_QPA_IWARP_IRD |
+ ERDMA_QPA_IWARP_ORD;
- ret = erdma_modify_qp_internal(qp, &qp_attrs,
- ERDMA_QP_ATTR_STATE |
- ERDMA_QP_ATTR_LLP_HANDLE |
- ERDMA_QP_ATTR_MPA);
+ params.qp_type = ERDMA_QP_ACTIVE;
+ if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc) {
+ to_modify_attrs |= ERDMA_QPA_IWARP_CC;
+ params.cc = COMPROMISE_CC;
+ }
+
+ ret = erdma_modify_qp_state_iwarp(qp, &params, to_modify_attrs);
up_write(&qp->state_lock);
@@ -722,7 +726,7 @@ static int erdma_newconn_connected(struct erdma_cep *cep)
__mpa_rr_set_revision(&cep->mpa.hdr.params.bits, MPA_REVISION_EXT_1);
memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, MPA_KEY_SIZE);
- cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
__mpa_ext_set_cc(&cep->mpa.ext_data.bits, cep->qp->attrs.cc);
ret = erdma_send_mpareqrep(cep, cep->private_data, cep->pd_len);
@@ -1126,10 +1130,11 @@ error_put_qp:
int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
{
- struct erdma_dev *dev = to_edev(id->device);
struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+ struct erdma_mod_qp_params_iwarp mod_qp_params;
+ enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
+ struct erdma_dev *dev = to_edev(id->device);
struct erdma_qp *qp;
- struct erdma_qp_attrs qp_attrs;
int ret;
erdma_cep_set_inuse(cep);
@@ -1156,7 +1161,7 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
erdma_qp_get(qp);
down_write(&qp->state_lock);
- if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
ret = -EINVAL;
up_write(&qp->state_lock);
goto error;
@@ -1181,11 +1186,11 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->cm_id = id;
id->add_ref(id);
- memset(&qp_attrs, 0, sizeof(qp_attrs));
- qp_attrs.orq_size = params->ord;
- qp_attrs.irq_size = params->ird;
+ memset(&mod_qp_params, 0, sizeof(mod_qp_params));
- qp_attrs.state = ERDMA_QP_STATE_RTS;
+ mod_qp_params.irq_size = params->ird;
+ mod_qp_params.orq_size = params->ord;
+ mod_qp_params.state = ERDMA_QPS_IWARP_RTS;
/* Associate QP with CEP */
erdma_cep_get(cep);
@@ -1194,19 +1199,21 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->state = ERDMA_EPSTATE_RDMA_MODE;
- qp->attrs.qp_type = ERDMA_QP_PASSIVE;
- qp->attrs.pd_len = params->private_data_len;
+ mod_qp_params.qp_type = ERDMA_QP_PASSIVE;
+ mod_qp_params.pd_len = params->private_data_len;
- if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits))
- qp->attrs.cc = COMPROMISE_CC;
+ to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_ORD |
+ ERDMA_QPA_IWARP_LLP_HANDLE | ERDMA_QPA_IWARP_IRD |
+ ERDMA_QPA_IWARP_MPA;
+
+ if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits)) {
+ to_modify_attrs |= ERDMA_QPA_IWARP_CC;
+ mod_qp_params.cc = COMPROMISE_CC;
+ }
/* move to rts */
- ret = erdma_modify_qp_internal(qp, &qp_attrs,
- ERDMA_QP_ATTR_STATE |
- ERDMA_QP_ATTR_ORD |
- ERDMA_QP_ATTR_LLP_HANDLE |
- ERDMA_QP_ATTR_IRD |
- ERDMA_QP_ATTR_MPA);
+ ret = erdma_modify_qp_state_iwarp(qp, &mod_qp_params, to_modify_attrs);
+
up_write(&qp->state_lock);
if (ret)
@@ -1214,7 +1221,7 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->mpa.ext_data.bits = 0;
__mpa_ext_set_cc(&cep->mpa.ext_data.bits, qp->attrs.cc);
- cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
ret = erdma_send_mpareqrep(cep, params->private_data,
params->private_data_len);
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index a3d8922d1ad1..b867aefe83b2 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -182,7 +182,6 @@ int erdma_cmdq_init(struct erdma_dev *dev)
int err;
cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
- cmdq->use_event = false;
sema_init(&cmdq->credits, cmdq->max_outstandings);
@@ -223,8 +222,6 @@ err_destroy_sq:
void erdma_finish_cmdq_init(struct erdma_dev *dev)
{
- /* after device init successfully, change cmdq to event mode. */
- dev->cmdq.use_event = true;
arm_cmdq_cq(&dev->cmdq);
}
@@ -312,8 +309,7 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
/* Copy 16B comp data after cqe hdr to outer */
be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4);
- if (cmdq->use_event)
- complete(&comp_wait->wait_event);
+ complete(&comp_wait->wait_event);
return 0;
}
@@ -332,9 +328,6 @@ static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
if (erdma_poll_single_cmd_completion(cmdq))
break;
- if (comp_num && cmdq->use_event)
- arm_cmdq_cq(cmdq);
-
spin_unlock_irqrestore(&cmdq->cq.lock, flags);
}
@@ -342,8 +335,7 @@ void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
{
int got_event = 0;
- if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
- !cmdq->use_event)
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
return;
while (get_next_valid_eqe(&cmdq->eq)) {
@@ -354,6 +346,7 @@ void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
if (got_event) {
cmdq->cq.cmdsn++;
erdma_polling_cmd_completions(cmdq);
+ arm_cmdq_cq(cmdq);
}
notify_eq(&cmdq->eq);
@@ -372,7 +365,7 @@ static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
if (time_is_before_jiffies(comp_timeout))
return -ETIME;
- msleep(20);
+ udelay(20);
}
return 0;
@@ -403,7 +396,7 @@ void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
}
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
- u64 *resp0, u64 *resp1)
+ u64 *resp0, u64 *resp1, bool sleepable)
{
struct erdma_comp_wait *comp_wait;
int ret;
@@ -411,7 +404,12 @@ int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
return -ENODEV;
- down(&cmdq->credits);
+ if (!sleepable) {
+ while (down_trylock(&cmdq->credits))
+ ;
+ } else {
+ down(&cmdq->credits);
+ }
comp_wait = get_comp_wait(cmdq);
if (IS_ERR(comp_wait)) {
@@ -425,7 +423,7 @@ int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
push_cmdq_sqe(cmdq, req, req_size, comp_wait);
spin_unlock(&cmdq->sq.lock);
- if (cmdq->use_event)
+ if (sleepable)
ret = erdma_wait_cmd_completion(comp_wait, cmdq,
ERDMA_CMDQ_TIMEOUT_MS);
else
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
index 70f89f0162aa..1f456327e63c 100644
--- a/drivers/infiniband/hw/erdma/erdma_cq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -105,6 +105,22 @@ static const struct {
{ ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
};
+static void erdma_process_ud_cqe(struct erdma_cqe *cqe, struct ib_wc *wc)
+{
+ u32 ud_info;
+
+ wc->wc_flags |= (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
+ ud_info = be32_to_cpu(cqe->ud.info);
+ wc->network_hdr_type = FIELD_GET(ERDMA_CQE_NTYPE_MASK, ud_info);
+ if (wc->network_hdr_type == ERDMA_NETWORK_TYPE_IPV4)
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ wc->src_qp = FIELD_GET(ERDMA_CQE_SQPN_MASK, ud_info);
+ wc->sl = FIELD_GET(ERDMA_CQE_SL_MASK, ud_info);
+ wc->pkey_index = 0;
+}
+
#define ERDMA_POLLCQ_NO_QP 1
static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
@@ -168,6 +184,10 @@ static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
}
+ if (erdma_device_rocev2(dev) &&
+ (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_GSI))
+ erdma_process_ud_cqe(cqe, wc);
+
if (syndrome >= ERDMA_NUM_WC_STATUS)
syndrome = ERDMA_WC_GENERAL_ERR;
@@ -201,3 +221,48 @@ int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return npolled;
}
+
+void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_cqe *cqe, *dst_cqe;
+ u32 prev_cq_ci, cur_cq_ci;
+ u32 ncqe = 0, nqp_cqe = 0;
+ unsigned long flags;
+ u8 owner;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, flags);
+
+ prev_cq_ci = cq->kern_cq.ci;
+
+ while (ncqe < cq->depth && (cqe = get_next_valid_cqe(cq)) != NULL) {
+ ++cq->kern_cq.ci;
+ ++ncqe;
+ }
+
+ while (ncqe > 0) {
+ cur_cq_ci = prev_cq_ci + ncqe - 1;
+ cqe = get_queue_entry(cq->kern_cq.qbuf, cur_cq_ci, cq->depth,
+ CQE_SHIFT);
+
+ if (be32_to_cpu(cqe->qpn) == qpn) {
+ ++nqp_cqe;
+ } else if (nqp_cqe) {
+ dst_cqe = get_queue_entry(cq->kern_cq.qbuf,
+ cur_cq_ci + nqp_cqe,
+ cq->depth, CQE_SHIFT);
+ owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ be32_to_cpu(dst_cqe->hdr));
+ cqe->hdr = cpu_to_be32(
+ (be32_to_cpu(cqe->hdr) &
+ ~ERDMA_CQE_HDR_OWNER_MASK) |
+ FIELD_PREP(ERDMA_CQE_HDR_OWNER_MASK, owner));
+ memcpy(dst_cqe, cqe, sizeof(*cqe));
+ }
+
+ --ncqe;
+ }
+
+ cq->kern_cq.ci = prev_cq_ci + nqp_cqe;
+ spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index 9a72fec6d5cc..6486234a2360 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -236,7 +236,8 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ false);
}
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
@@ -278,7 +279,8 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
req.qtype = ERDMA_EQ_TYPE_CEQ;
req.vector_idx = ceqn + 1;
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ false);
if (err)
return;
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index 05978f3b1475..ea4db53901a4 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/if_ether.h>
/* PCIe device related definition. */
#define ERDMA_PCI_WIDTH 64
@@ -21,8 +22,21 @@
#define ERDMA_NUM_MSIX_VEC 32U
#define ERDMA_MSIX_VECTOR_CMDQ 0
+/* RoCEv2 related */
+#define ERDMA_ROCEV2_GID_SIZE 16
+#define ERDMA_MAX_PKEYS 1
+#define ERDMA_DEFAULT_PKEY 0xFFFF
+
+/* erdma device protocol type */
+enum erdma_proto_type {
+ ERDMA_PROTO_IWARP = 0,
+ ERDMA_PROTO_ROCEV2 = 1,
+ ERDMA_PROTO_COUNT = 2,
+};
+
/* PCIe Bar0 Registers. */
#define ERDMA_REGS_VERSION_REG 0x0
+#define ERDMA_REGS_DEV_PROTO_REG 0xC
#define ERDMA_REGS_DEV_CTRL_REG 0x10
#define ERDMA_REGS_DEV_ST_REG 0x14
#define ERDMA_REGS_NETDEV_MAC_L_REG 0x18
@@ -136,7 +150,11 @@ enum CMDQ_RDMA_OPCODE {
CMDQ_OPCODE_DESTROY_CQ = 5,
CMDQ_OPCODE_REFLUSH = 6,
CMDQ_OPCODE_REG_MR = 8,
- CMDQ_OPCODE_DEREG_MR = 9
+ CMDQ_OPCODE_DEREG_MR = 9,
+ CMDQ_OPCODE_SET_GID = 14,
+ CMDQ_OPCODE_CREATE_AH = 15,
+ CMDQ_OPCODE_DESTROY_AH = 16,
+ CMDQ_OPCODE_QUERY_QP = 17,
};
enum CMDQ_COMMON_OPCODE {
@@ -284,6 +302,36 @@ struct erdma_cmdq_dereg_mr_req {
u32 cfg;
};
+/* create_av cfg0 */
+#define ERDMA_CMD_CREATE_AV_FL_MASK GENMASK(19, 0)
+#define ERDMA_CMD_CREATE_AV_NTYPE_MASK BIT(20)
+
+struct erdma_av_cfg {
+ u32 cfg0;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 sl;
+ u8 rsvd;
+ u16 udp_sport;
+ u16 sgid_index;
+ u8 dmac[ETH_ALEN];
+ u8 padding[2];
+ u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+};
+
+struct erdma_cmdq_create_ah_req {
+ u64 hdr;
+ u32 pdn;
+ u32 ahn;
+ struct erdma_av_cfg av_cfg;
+};
+
+struct erdma_cmdq_destroy_ah_req {
+ u64 hdr;
+ u32 pdn;
+ u32 ahn;
+};
+
/* modify qp cfg */
#define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
#define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
@@ -301,6 +349,36 @@ struct erdma_cmdq_modify_qp_req {
u32 recv_nxt;
};
+/* modify qp cfg1 for roce device */
+#define ERDMA_CMD_MODIFY_QP_DQPN_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_mod_qp_req_rocev2 {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u32 attr_mask;
+ u32 qkey;
+ u32 rq_psn;
+ u32 sq_psn;
+ struct erdma_av_cfg av_cfg;
+};
+
+/* query qp response mask */
+#define ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK GENMASK_ULL(23, 0)
+#define ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK GENMASK_ULL(47, 24)
+#define ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK GENMASK_ULL(55, 48)
+#define ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK GENMASK_ULL(56, 56)
+
+struct erdma_cmdq_query_qp_req_rocev2 {
+ u64 hdr;
+ u32 qpn;
+};
+
+enum erdma_qp_type {
+ ERDMA_QPT_RC = 0,
+ ERDMA_QPT_UD = 1,
+};
+
/* create qp cfg0 */
#define ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_QPN_MASK GENMASK(19, 0)
@@ -309,6 +387,9 @@ struct erdma_cmdq_modify_qp_req {
#define ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_PD_MASK GENMASK(19, 0)
+/* create qp cfg2 */
+#define ERDMA_CMD_CREATE_QP_TYPE_MASK GENMASK(3, 0)
+
/* create qp cqn_mtt_cfg */
#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
#define ERDMA_CMD_CREATE_QP_DB_CFG_MASK BIT(25)
@@ -342,6 +423,7 @@ struct erdma_cmdq_create_qp_req {
u64 rq_mtt_entry[3];
u32 db_cfg;
+ u32 cfg2;
};
struct erdma_cmdq_destroy_qp_req {
@@ -394,10 +476,33 @@ struct erdma_cmdq_query_stats_resp {
u64 rx_pps_meter_drop_packets_cnt;
};
+enum erdma_network_type {
+ ERDMA_NETWORK_TYPE_IPV4 = 0,
+ ERDMA_NETWORK_TYPE_IPV6 = 1,
+};
+
+enum erdma_set_gid_op {
+ ERDMA_SET_GID_OP_ADD = 0,
+ ERDMA_SET_GID_OP_DEL = 1,
+};
+
+/* set gid cfg */
+#define ERDMA_CMD_SET_GID_SGID_IDX_MASK GENMASK(15, 0)
+#define ERDMA_CMD_SET_GID_NTYPE_MASK BIT(16)
+#define ERDMA_CMD_SET_GID_OP_MASK BIT(31)
+
+struct erdma_cmdq_set_gid_req {
+ u64 hdr;
+ u32 cfg;
+ u8 gid[ERDMA_ROCEV2_GID_SIZE];
+};
+
/* cap qword 0 definition */
+#define ERDMA_CMD_DEV_CAP_MAX_GID_MASK GENMASK_ULL(51, 48)
#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
#define ERDMA_CMD_DEV_CAP_FLAGS_MASK GENMASK_ULL(31, 24)
#define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_AH_MASK GENMASK_ULL(15, 8)
#define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
/* cap qword 1 definition */
@@ -426,6 +531,10 @@ enum {
#define ERDMA_CQE_QTYPE_RQ 1
#define ERDMA_CQE_QTYPE_CMDQ 2
+#define ERDMA_CQE_NTYPE_MASK BIT(31)
+#define ERDMA_CQE_SL_MASK GENMASK(27, 20)
+#define ERDMA_CQE_SQPN_MASK GENMASK(19, 0)
+
struct erdma_cqe {
__be32 hdr;
__be32 qe_idx;
@@ -435,7 +544,16 @@ struct erdma_cqe {
__be32 inv_rkey;
};
__be32 size;
- __be32 rsvd[3];
+ union {
+ struct {
+ __be32 rsvd[3];
+ } rc;
+
+ struct {
+ __be32 rsvd[2];
+ __be32 info;
+ } ud;
+ };
};
struct erdma_sge {
@@ -487,7 +605,7 @@ struct erdma_write_sqe {
struct erdma_sge sgl[];
};
-struct erdma_send_sqe {
+struct erdma_send_sqe_rc {
__le64 hdr;
union {
__be32 imm_data;
@@ -498,6 +616,17 @@ struct erdma_send_sqe {
struct erdma_sge sgl[];
};
+struct erdma_send_sqe_ud {
+ __le64 hdr;
+ __be32 imm_data;
+ __le32 length;
+ __le32 qkey;
+ __le32 dst_qpn;
+ __le32 ahn;
+ __le32 rsvd;
+ struct erdma_sge sgl[];
+};
+
struct erdma_readreq_sqe {
__le64 hdr;
__le32 invalid_stag;
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 62f497a71004..f35b30235018 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -26,14 +26,6 @@ static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
goto done;
switch (event) {
- case NETDEV_UP:
- dev->state = IB_PORT_ACTIVE;
- erdma_port_event(dev, IB_EVENT_PORT_ACTIVE);
- break;
- case NETDEV_DOWN:
- dev->state = IB_PORT_DOWN;
- erdma_port_event(dev, IB_EVENT_PORT_ERR);
- break;
case NETDEV_CHANGEMTU:
if (dev->mtu != netdev->mtu) {
erdma_set_mtu(dev, netdev->mtu);
@@ -172,6 +164,8 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
{
int ret;
+ dev->proto = erdma_reg_read32(dev, ERDMA_REGS_DEV_PROTO_REG);
+
dev->resp_pool = dma_pool_create("erdma_resp_pool", &pdev->dev,
ERDMA_HW_RESP_SIZE, ERDMA_HW_RESP_SIZE,
0);
@@ -390,7 +384,7 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
CMDQ_OPCODE_QUERY_DEVICE);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
- &cap1);
+ &cap1, true);
if (err)
return err;
@@ -398,6 +392,8 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
+ dev->attrs.max_gid = 1 << ERDMA_GET_CAP(MAX_GID, cap0);
+ dev->attrs.max_ah = 1 << ERDMA_GET_CAP(MAX_AH, cap0);
dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
@@ -415,12 +411,13 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
+ dev->res_cb[ERDMA_RES_TYPE_AH].max_cap = dev->attrs.max_ah;
erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_QUERY_FW_INFO);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
- &cap1);
+ &cap1, true);
if (!err)
dev->attrs.fw_version =
FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
@@ -441,7 +438,8 @@ static int erdma_device_config(struct erdma_dev *dev)
req.cfg = FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PGSHIFT_MASK, PAGE_SHIFT) |
FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PS_EN_MASK, 1);
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int erdma_res_cb_init(struct erdma_dev *dev)
@@ -474,6 +472,29 @@ static void erdma_res_cb_free(struct erdma_dev *dev)
bitmap_free(dev->res_cb[i].bitmap);
}
+static const struct ib_device_ops erdma_device_ops_rocev2 = {
+ .get_link_layer = erdma_get_link_layer,
+ .add_gid = erdma_add_gid,
+ .del_gid = erdma_del_gid,
+ .query_pkey = erdma_query_pkey,
+ .create_ah = erdma_create_ah,
+ .destroy_ah = erdma_destroy_ah,
+ .query_ah = erdma_query_ah,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, erdma_ah, ibah),
+};
+
+static const struct ib_device_ops erdma_device_ops_iwarp = {
+ .iw_accept = erdma_accept,
+ .iw_add_ref = erdma_qp_get_ref,
+ .iw_connect = erdma_connect,
+ .iw_create_listen = erdma_create_listen,
+ .iw_destroy_listen = erdma_destroy_listen,
+ .iw_get_qp = erdma_get_ibqp,
+ .iw_reject = erdma_reject,
+ .iw_rem_ref = erdma_qp_put_ref,
+};
+
static const struct ib_device_ops erdma_device_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_ERDMA,
@@ -494,18 +515,9 @@ static const struct ib_device_ops erdma_device_ops = {
.get_dma_mr = erdma_get_dma_mr,
.get_hw_stats = erdma_get_hw_stats,
.get_port_immutable = erdma_get_port_immutable,
- .iw_accept = erdma_accept,
- .iw_add_ref = erdma_qp_get_ref,
- .iw_connect = erdma_connect,
- .iw_create_listen = erdma_create_listen,
- .iw_destroy_listen = erdma_destroy_listen,
- .iw_get_qp = erdma_get_ibqp,
- .iw_reject = erdma_reject,
- .iw_rem_ref = erdma_qp_put_ref,
.map_mr_sg = erdma_map_mr_sg,
.mmap = erdma_mmap,
.mmap_free = erdma_mmap_free,
- .modify_qp = erdma_modify_qp,
.post_recv = erdma_post_recv,
.post_send = erdma_post_send,
.poll_cq = erdma_poll_cq,
@@ -515,6 +527,7 @@ static const struct ib_device_ops erdma_device_ops = {
.query_qp = erdma_query_qp,
.req_notify_cq = erdma_req_notify_cq,
.reg_user_mr = erdma_reg_user_mr,
+ .modify_qp = erdma_modify_qp,
INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
@@ -537,7 +550,14 @@ static int erdma_ib_device_add(struct pci_dev *pdev)
if (ret)
return ret;
- ibdev->node_type = RDMA_NODE_RNIC;
+ if (erdma_device_iwarp(dev)) {
+ ibdev->node_type = RDMA_NODE_RNIC;
+ ib_set_device_ops(ibdev, &erdma_device_ops_iwarp);
+ } else {
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ib_set_device_ops(ibdev, &erdma_device_ops_rocev2);
+ }
+
memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
/*
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 4d1f9114cd97..25f6c49aec77 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -11,20 +11,20 @@
void erdma_qp_llp_close(struct erdma_qp *qp)
{
- struct erdma_qp_attrs qp_attrs;
+ struct erdma_mod_qp_params_iwarp params;
down_write(&qp->state_lock);
- switch (qp->attrs.state) {
- case ERDMA_QP_STATE_RTS:
- case ERDMA_QP_STATE_RTR:
- case ERDMA_QP_STATE_IDLE:
- case ERDMA_QP_STATE_TERMINATE:
- qp_attrs.state = ERDMA_QP_STATE_CLOSING;
- erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ switch (qp->attrs.iwarp.state) {
+ case ERDMA_QPS_IWARP_RTS:
+ case ERDMA_QPS_IWARP_RTR:
+ case ERDMA_QPS_IWARP_IDLE:
+ case ERDMA_QPS_IWARP_TERMINATE:
+ params.state = ERDMA_QPS_IWARP_CLOSING;
+ erdma_modify_qp_state_iwarp(qp, &params, ERDMA_QPA_IWARP_STATE);
break;
- case ERDMA_QP_STATE_CLOSING:
- qp->attrs.state = ERDMA_QP_STATE_IDLE;
+ case ERDMA_QPS_IWARP_CLOSING:
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
break;
default:
break;
@@ -48,9 +48,10 @@ struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
return NULL;
}
-static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
- struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask)
+static int
+erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ enum erdma_qpa_mask_iwarp mask)
{
int ret;
struct erdma_dev *dev = qp->dev;
@@ -59,12 +60,15 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
struct erdma_cep *cep = qp->cep;
struct sockaddr_storage local_addr, remote_addr;
- if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
+ if (!(mask & ERDMA_QPA_IWARP_LLP_HANDLE))
return -EINVAL;
- if (!(mask & ERDMA_QP_ATTR_MPA))
+ if (!(mask & ERDMA_QPA_IWARP_MPA))
return -EINVAL;
+ if (!(mask & ERDMA_QPA_IWARP_CC))
+ params->cc = qp->attrs.cc;
+
ret = getname_local(cep->sock, &local_addr);
if (ret < 0)
return ret;
@@ -73,18 +77,16 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
if (ret < 0)
return ret;
- qp->attrs.state = ERDMA_QP_STATE_RTS;
-
tp = tcp_sk(qp->cep->sock->sk);
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
- req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
- FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, params->cc) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
- req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
+ req.cookie = be32_to_cpu(cep->mpa.ext_data.cookie);
req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
req.dport = to_sockaddr_in(remote_addr).sin_port;
@@ -92,33 +94,57 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
req.send_nxt = tp->snd_nxt;
/* rsvd tcp seq for mpa-rsp in server. */
- if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
- req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
+ if (params->qp_type == ERDMA_QP_PASSIVE)
+ req.send_nxt += MPA_DEFAULT_HDR_LEN + params->pd_len;
req.recv_nxt = tp->rcv_nxt;
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ return ret;
+
+ if (mask & ERDMA_QPA_IWARP_IRD)
+ qp->attrs.irq_size = params->irq_size;
+
+ if (mask & ERDMA_QPA_IWARP_ORD)
+ qp->attrs.orq_size = params->orq_size;
+
+ if (mask & ERDMA_QPA_IWARP_CC)
+ qp->attrs.cc = params->cc;
+
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_RTS;
+
+ return 0;
}
-static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
- struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask)
+static int
+erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ enum erdma_qpa_mask_iwarp mask)
{
struct erdma_dev *dev = qp->dev;
struct erdma_cmdq_modify_qp_req req;
-
- qp->attrs.state = attrs->state;
+ int ret;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
- req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ return ret;
+
+ qp->attrs.iwarp.state = params->state;
+
+ return 0;
}
-int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask)
+int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ int mask)
{
bool need_reflush = false;
int drop_conn, ret = 0;
@@ -126,31 +152,31 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
if (!mask)
return 0;
- if (!(mask & ERDMA_QP_ATTR_STATE))
+ if (!(mask & ERDMA_QPA_IWARP_STATE))
return 0;
- switch (qp->attrs.state) {
- case ERDMA_QP_STATE_IDLE:
- case ERDMA_QP_STATE_RTR:
- if (attrs->state == ERDMA_QP_STATE_RTS) {
- ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
- } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
- qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ switch (qp->attrs.iwarp.state) {
+ case ERDMA_QPS_IWARP_IDLE:
+ case ERDMA_QPS_IWARP_RTR:
+ if (params->state == ERDMA_QPS_IWARP_RTS) {
+ ret = erdma_modify_qp_state_to_rts(qp, params, mask);
+ } else if (params->state == ERDMA_QPS_IWARP_ERROR) {
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
need_reflush = true;
if (qp->cep) {
erdma_cep_put(qp->cep);
qp->cep = NULL;
}
- ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
}
break;
- case ERDMA_QP_STATE_RTS:
+ case ERDMA_QPS_IWARP_RTS:
drop_conn = 0;
- if (attrs->state == ERDMA_QP_STATE_CLOSING ||
- attrs->state == ERDMA_QP_STATE_TERMINATE ||
- attrs->state == ERDMA_QP_STATE_ERROR) {
- ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ if (params->state == ERDMA_QPS_IWARP_CLOSING ||
+ params->state == ERDMA_QPS_IWARP_TERMINATE ||
+ params->state == ERDMA_QPS_IWARP_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
drop_conn = 1;
need_reflush = true;
}
@@ -159,17 +185,17 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
erdma_qp_cm_drop(qp);
break;
- case ERDMA_QP_STATE_TERMINATE:
- if (attrs->state == ERDMA_QP_STATE_ERROR)
- qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ case ERDMA_QPS_IWARP_TERMINATE:
+ if (params->state == ERDMA_QPS_IWARP_ERROR)
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
break;
- case ERDMA_QP_STATE_CLOSING:
- if (attrs->state == ERDMA_QP_STATE_IDLE) {
- qp->attrs.state = ERDMA_QP_STATE_IDLE;
- } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
- ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
- qp->attrs.state = ERDMA_QP_STATE_ERROR;
- } else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
+ case ERDMA_QPS_IWARP_CLOSING:
+ if (params->state == ERDMA_QPS_IWARP_IDLE) {
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
+ } else if (params->state == ERDMA_QPS_IWARP_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ } else if (params->state != ERDMA_QPS_IWARP_CLOSING) {
return -ECONNABORTED;
}
break;
@@ -186,6 +212,98 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
return ret;
}
+static int modify_qp_cmd_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ enum erdma_qpa_mask_rocev2 attr_mask)
+{
+ struct erdma_cmdq_mod_qp_req_rocev2 req;
+
+ memset(&req, 0, sizeof(req));
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK,
+ params->state);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_DQPN_MASK,
+ params->dst_qpn);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
+ req.qkey = params->qkey;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_AV)
+ erdma_set_av_cfg(&req.av_cfg, &params->av);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_SQ_PSN)
+ req.sq_psn = params->sq_psn;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_RQ_PSN)
+ req.rq_psn = params->rq_psn;
+
+ req.attr_mask = attr_mask;
+
+ return erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL,
+ NULL, true);
+}
+
+static void erdma_reset_qp(struct erdma_qp *qp)
+{
+ qp->kern_qp.sq_pi = 0;
+ qp->kern_qp.sq_ci = 0;
+ qp->kern_qp.rq_pi = 0;
+ qp->kern_qp.rq_ci = 0;
+ memset(qp->kern_qp.swr_tbl, 0, qp->attrs.sq_size * sizeof(u64));
+ memset(qp->kern_qp.rwr_tbl, 0, qp->attrs.rq_size * sizeof(u64));
+ memset(qp->kern_qp.sq_buf, 0, qp->attrs.sq_size << SQEBB_SHIFT);
+ memset(qp->kern_qp.rq_buf, 0, qp->attrs.rq_size << RQE_SHIFT);
+ erdma_remove_cqes_of_qp(&qp->scq->ibcq, QP_ID(qp));
+ if (qp->rcq != qp->scq)
+ erdma_remove_cqes_of_qp(&qp->rcq->ibcq, QP_ID(qp));
+}
+
+int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ int attr_mask)
+{
+ struct erdma_dev *dev = to_edev(qp->ibqp.device);
+ int ret;
+
+ ret = modify_qp_cmd_rocev2(qp, params, attr_mask);
+ if (ret)
+ return ret;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
+ qp->attrs.rocev2.state = params->state;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
+ qp->attrs.rocev2.qkey = params->qkey;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
+ qp->attrs.rocev2.dst_qpn = params->dst_qpn;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_AV)
+ memcpy(&qp->attrs.rocev2.av, &params->av,
+ sizeof(struct erdma_av));
+
+ if (rdma_is_kernel_res(&qp->ibqp.res) &&
+ params->state == ERDMA_QPS_ROCEV2_RESET)
+ erdma_reset_qp(qp);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res) &&
+ params->state == ERDMA_QPS_ROCEV2_ERROR) {
+ qp->flags |= ERDMA_QP_IN_FLUSHING;
+ mod_delayed_work(dev->reflush_wq, &qp->reflush_dwork,
+ usecs_to_jiffies(100));
+ }
+
+ return 0;
+}
+
static void erdma_qp_safe_free(struct kref *ref)
{
struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
@@ -282,17 +400,57 @@ static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
return 0;
}
+static void init_send_sqe_rc(struct erdma_qp *qp, struct erdma_send_sqe_rc *sqe,
+ const struct ib_send_wr *wr, u32 *hw_op)
+{
+ u32 op = ERDMA_OP_SEND;
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ op = ERDMA_OP_SEND_WITH_IMM;
+ sqe->imm_data = wr->ex.imm_data;
+ } else if (wr->opcode == IB_WR_SEND_WITH_INV) {
+ op = ERDMA_OP_SEND_WITH_INV;
+ sqe->invalid_stag = cpu_to_le32(wr->ex.invalidate_rkey);
+ }
+
+ *hw_op = op;
+}
+
+static void init_send_sqe_ud(struct erdma_qp *qp, struct erdma_send_sqe_ud *sqe,
+ const struct ib_send_wr *wr, u32 *hw_op)
+{
+ const struct ib_ud_wr *uwr = ud_wr(wr);
+ struct erdma_ah *ah = to_eah(uwr->ah);
+ u32 op = ERDMA_OP_SEND;
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ op = ERDMA_OP_SEND_WITH_IMM;
+ sqe->imm_data = wr->ex.imm_data;
+ }
+
+ *hw_op = op;
+
+ sqe->ahn = cpu_to_le32(ah->ahn);
+ sqe->dst_qpn = cpu_to_le32(uwr->remote_qpn);
+ /* Not allowed to send control qkey */
+ if (uwr->remote_qkey & 0x80000000)
+ sqe->qkey = cpu_to_le32(qp->attrs.rocev2.qkey);
+ else
+ sqe->qkey = cpu_to_le32(uwr->remote_qkey);
+}
+
static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
const struct ib_send_wr *send_wr)
{
u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
u32 idx = *pi & (qp->attrs.sq_size - 1);
enum ib_wr_opcode op = send_wr->opcode;
+ struct erdma_send_sqe_rc *rc_send_sqe;
+ struct erdma_send_sqe_ud *ud_send_sqe;
struct erdma_atomic_sqe *atomic_sqe;
struct erdma_readreq_sqe *read_sqe;
struct erdma_reg_mr_sqe *regmr_sge;
struct erdma_write_sqe *write_sqe;
- struct erdma_send_sqe *send_sqe;
struct ib_rdma_wr *rdma_wr;
struct erdma_sge *sge;
__le32 *length_field;
@@ -301,6 +459,10 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
u32 attrs;
int ret;
+ if (qp->ibqp.qp_type != IB_QPT_RC && send_wr->opcode != IB_WR_SEND &&
+ send_wr->opcode != IB_WR_SEND_WITH_IMM)
+ return -EINVAL;
+
entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
SQEBB_SHIFT);
@@ -374,21 +536,20 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
case IB_WR_SEND_WITH_INV:
- send_sqe = (struct erdma_send_sqe *)entry;
- hw_op = ERDMA_OP_SEND;
- if (op == IB_WR_SEND_WITH_IMM) {
- hw_op = ERDMA_OP_SEND_WITH_IMM;
- send_sqe->imm_data = send_wr->ex.imm_data;
- } else if (op == IB_WR_SEND_WITH_INV) {
- hw_op = ERDMA_OP_SEND_WITH_INV;
- send_sqe->invalid_stag =
- cpu_to_le32(send_wr->ex.invalidate_rkey);
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ rc_send_sqe = (struct erdma_send_sqe_rc *)entry;
+ init_send_sqe_rc(qp, rc_send_sqe, send_wr, &hw_op);
+ length_field = &rc_send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe_rc);
+ } else {
+ ud_send_sqe = (struct erdma_send_sqe_ud *)entry;
+ init_send_sqe_ud(qp, ud_send_sqe, send_wr, &hw_op);
+ length_field = &ud_send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe_ud);
}
- wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
- length_field = &send_sqe->length;
- wqe_size = sizeof(struct erdma_send_sqe);
- sgl_offset = wqe_size;
+ sgl_offset = wqe_size;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
break;
case IB_WR_REG_MR:
wqe_hdr |=
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 51d619edb6c5..af36a8d2df22 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -55,6 +55,13 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
ilog2(qp->attrs.rq_size)) |
FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
+ ERDMA_QPT_RC);
+ else
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
+ ERDMA_QPT_UD);
+
if (rdma_is_kernel_res(&qp->ibqp.res)) {
u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
@@ -119,10 +126,10 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
}
}
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
- &resp1);
- if (!err)
- qp->attrs.cookie =
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, &resp1,
+ true);
+ if (!err && erdma_device_iwarp(dev))
+ qp->attrs.iwarp.cookie =
FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
return err;
@@ -178,7 +185,8 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
}
post_cmd:
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
@@ -240,7 +248,8 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
}
}
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
@@ -336,6 +345,11 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
+ if (erdma_device_rocev2(dev)) {
+ attr->max_pkeys = ERDMA_MAX_PKEYS;
+ attr->max_ah = dev->attrs.max_ah;
+ }
+
if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC)
attr->atomic_cap = IB_ATOMIC_GLOB;
@@ -367,7 +381,14 @@ int erdma_query_port(struct ib_device *ibdev, u32 port,
memset(attr, 0, sizeof(*attr));
- attr->gid_tbl_len = 1;
+ if (erdma_device_iwarp(dev)) {
+ attr->gid_tbl_len = 1;
+ } else {
+ attr->gid_tbl_len = dev->attrs.max_gid;
+ attr->ip_gids = true;
+ attr->pkey_tbl_len = ERDMA_MAX_PKEYS;
+ }
+
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
attr->max_msg_sz = -1;
@@ -377,14 +398,10 @@ int erdma_query_port(struct ib_device *ibdev, u32 port,
ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
- if (netif_running(ndev) && netif_carrier_ok(ndev))
- dev->state = IB_PORT_ACTIVE;
- else
- dev->state = IB_PORT_DOWN;
- attr->state = dev->state;
+ attr->state = ib_get_curr_port_state(ndev);
out:
- if (dev->state == IB_PORT_ACTIVE)
+ if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
@@ -395,8 +412,18 @@ out:
int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
struct ib_port_immutable *port_immutable)
{
- port_immutable->gid_tbl_len = 1;
- port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ if (erdma_device_iwarp(dev)) {
+ port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ port_immutable->gid_tbl_len = 1;
+ } else {
+ port_immutable->core_cap_flags =
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ port_immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ port_immutable->gid_tbl_len = dev->attrs.max_gid;
+ port_immutable->pkey_tbl_len = ERDMA_MAX_PKEYS;
+ }
return 0;
}
@@ -438,7 +465,8 @@ static void erdma_flush_worker(struct work_struct *work)
req.qpn = QP_ID(qp);
req.sq_pi = qp->kern_qp.sq_pi;
req.rq_pi = qp->kern_qp.rq_pi;
- erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL);
+ erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int erdma_qp_validate_cap(struct erdma_dev *dev,
@@ -459,7 +487,11 @@ static int erdma_qp_validate_cap(struct erdma_dev *dev,
static int erdma_qp_validate_attr(struct erdma_dev *dev,
struct ib_qp_init_attr *attrs)
{
- if (attrs->qp_type != IB_QPT_RC)
+ if (erdma_device_iwarp(dev) && attrs->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+
+ if (erdma_device_rocev2(dev) && attrs->qp_type != IB_QPT_RC &&
+ attrs->qp_type != IB_QPT_UD && attrs->qp_type != IB_QPT_GSI)
return -EOPNOTSUPP;
if (attrs->srq)
@@ -937,7 +969,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
udata, struct erdma_ucontext, ibucontext);
struct erdma_ureq_create_qp ureq;
struct erdma_uresp_create_qp uresp;
- int ret;
+ void *old_entry;
+ int ret = 0;
ret = erdma_qp_validate_cap(dev, attrs);
if (ret)
@@ -956,9 +989,16 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
kref_init(&qp->ref);
init_completion(&qp->safe_free);
- ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
- XA_LIMIT(1, dev->attrs.max_qp - 1),
- &dev->next_alloc_qpn, GFP_KERNEL);
+ if (qp->ibqp.qp_type == IB_QPT_GSI) {
+ old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
+ if (xa_is_err(old_entry))
+ ret = xa_err(old_entry);
+ } else {
+ ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
+ XA_LIMIT(1, dev->attrs.max_qp - 1),
+ &dev->next_alloc_qpn, GFP_KERNEL);
+ }
+
if (ret < 0) {
ret = -ENOMEM;
goto err_out;
@@ -995,7 +1035,12 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
qp->attrs.max_send_sge = attrs->cap.max_send_sge;
qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
- qp->attrs.state = ERDMA_QP_STATE_IDLE;
+
+ if (erdma_device_iwarp(qp->dev))
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
+ else
+ qp->attrs.rocev2.state = ERDMA_QPS_ROCEV2_RESET;
+
INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker);
ret = create_qp_cmd(uctx, qp);
@@ -1219,7 +1264,8 @@ int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
- ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (ret)
return ret;
@@ -1244,7 +1290,8 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_CQ);
req.cqn = cq->cqn;
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (err)
return err;
@@ -1269,13 +1316,20 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct erdma_dev *dev = to_edev(ibqp->device);
struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
udata, struct erdma_ucontext, ibucontext);
- struct erdma_qp_attrs qp_attrs;
- int err;
struct erdma_cmdq_destroy_qp_req req;
+ union erdma_mod_qp_params params;
+ int err;
down_write(&qp->state_lock);
- qp_attrs.state = ERDMA_QP_STATE_ERROR;
- erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ if (erdma_device_iwarp(dev)) {
+ params.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ erdma_modify_qp_state_iwarp(qp, &params.iwarp,
+ ERDMA_QPA_IWARP_STATE);
+ } else {
+ params.rocev2.state = ERDMA_QPS_ROCEV2_ERROR;
+ erdma_modify_qp_state_rocev2(qp, &params.rocev2,
+ ERDMA_QPA_ROCEV2_STATE);
+ }
up_write(&qp->state_lock);
cancel_delayed_work_sync(&qp->reflush_dwork);
@@ -1284,7 +1338,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_QP);
req.qpn = QP_ID(qp);
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (err)
return err;
@@ -1382,7 +1437,8 @@ static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx,
FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
- ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1,
+ true);
if (ret)
return ret;
@@ -1417,7 +1473,8 @@ static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx)
req.rdb_off = ctx->ext_db.rdb_off;
req.cdb_off = ctx->ext_db.cdb_off;
- ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (ret)
ibdev_err_ratelimited(&dev->ibdev,
"free db resources failed %d", ret);
@@ -1506,69 +1563,248 @@ void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
atomic_dec(&dev->num_ctx);
}
-static int ib_qp_state_to_erdma_qp_state[IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = ERDMA_QP_STATE_IDLE,
- [IB_QPS_INIT] = ERDMA_QP_STATE_IDLE,
- [IB_QPS_RTR] = ERDMA_QP_STATE_RTR,
- [IB_QPS_RTS] = ERDMA_QP_STATE_RTS,
- [IB_QPS_SQD] = ERDMA_QP_STATE_CLOSING,
- [IB_QPS_SQE] = ERDMA_QP_STATE_TERMINATE,
- [IB_QPS_ERR] = ERDMA_QP_STATE_ERROR
+static void erdma_attr_to_av(const struct rdma_ah_attr *ah_attr,
+ struct erdma_av *av, u16 sport)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
+ av->port = rdma_ah_get_port_num(ah_attr);
+ av->sgid_index = grh->sgid_index;
+ av->hop_limit = grh->hop_limit;
+ av->traffic_class = grh->traffic_class;
+ av->sl = rdma_ah_get_sl(ah_attr);
+
+ av->flow_label = grh->flow_label;
+ av->udp_sport = sport;
+
+ ether_addr_copy(av->dmac, ah_attr->roce.dmac);
+ memcpy(av->dgid, grh->dgid.raw, ERDMA_ROCEV2_GID_SIZE);
+
+ if (ipv6_addr_v4mapped((struct in6_addr *)&grh->dgid))
+ av->ntype = ERDMA_NETWORK_TYPE_IPV4;
+ else
+ av->ntype = ERDMA_NETWORK_TYPE_IPV6;
+}
+
+static void erdma_av_to_attr(struct erdma_av *av, struct rdma_ah_attr *ah_attr)
+{
+ ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+
+ rdma_ah_set_sl(ah_attr, av->sl);
+ rdma_ah_set_port_num(ah_attr, av->port);
+ rdma_ah_set_ah_flags(ah_attr, IB_AH_GRH);
+
+ rdma_ah_set_grh(ah_attr, NULL, av->flow_label, av->sgid_index,
+ av->hop_limit, av->traffic_class);
+ rdma_ah_set_dgid_raw(ah_attr, av->dgid);
+}
+
+static int ib_qps_to_erdma_qps[ERDMA_PROTO_COUNT][IB_QPS_ERR + 1] = {
+ [ERDMA_PROTO_IWARP] = {
+ [IB_QPS_RESET] = ERDMA_QPS_IWARP_IDLE,
+ [IB_QPS_INIT] = ERDMA_QPS_IWARP_IDLE,
+ [IB_QPS_RTR] = ERDMA_QPS_IWARP_RTR,
+ [IB_QPS_RTS] = ERDMA_QPS_IWARP_RTS,
+ [IB_QPS_SQD] = ERDMA_QPS_IWARP_CLOSING,
+ [IB_QPS_SQE] = ERDMA_QPS_IWARP_TERMINATE,
+ [IB_QPS_ERR] = ERDMA_QPS_IWARP_ERROR,
+ },
+ [ERDMA_PROTO_ROCEV2] = {
+ [IB_QPS_RESET] = ERDMA_QPS_ROCEV2_RESET,
+ [IB_QPS_INIT] = ERDMA_QPS_ROCEV2_INIT,
+ [IB_QPS_RTR] = ERDMA_QPS_ROCEV2_RTR,
+ [IB_QPS_RTS] = ERDMA_QPS_ROCEV2_RTS,
+ [IB_QPS_SQD] = ERDMA_QPS_ROCEV2_SQD,
+ [IB_QPS_SQE] = ERDMA_QPS_ROCEV2_SQE,
+ [IB_QPS_ERR] = ERDMA_QPS_ROCEV2_ERROR,
+ },
};
-int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
- struct ib_udata *udata)
+static int erdma_qps_to_ib_qps[ERDMA_PROTO_COUNT][ERDMA_QPS_ROCEV2_COUNT] = {
+ [ERDMA_PROTO_IWARP] = {
+ [ERDMA_QPS_IWARP_IDLE] = IB_QPS_INIT,
+ [ERDMA_QPS_IWARP_RTR] = IB_QPS_RTR,
+ [ERDMA_QPS_IWARP_RTS] = IB_QPS_RTS,
+ [ERDMA_QPS_IWARP_CLOSING] = IB_QPS_ERR,
+ [ERDMA_QPS_IWARP_TERMINATE] = IB_QPS_ERR,
+ [ERDMA_QPS_IWARP_ERROR] = IB_QPS_ERR,
+ },
+ [ERDMA_PROTO_ROCEV2] = {
+ [ERDMA_QPS_ROCEV2_RESET] = IB_QPS_RESET,
+ [ERDMA_QPS_ROCEV2_INIT] = IB_QPS_INIT,
+ [ERDMA_QPS_ROCEV2_RTR] = IB_QPS_RTR,
+ [ERDMA_QPS_ROCEV2_RTS] = IB_QPS_RTS,
+ [ERDMA_QPS_ROCEV2_SQD] = IB_QPS_SQD,
+ [ERDMA_QPS_ROCEV2_SQE] = IB_QPS_SQE,
+ [ERDMA_QPS_ROCEV2_ERROR] = IB_QPS_ERR,
+ },
+};
+
+static inline enum erdma_qps_iwarp ib_to_iwarp_qps(enum ib_qp_state state)
{
- struct erdma_qp_attrs new_attrs;
- enum erdma_qp_attr_mask erdma_attr_mask = 0;
- struct erdma_qp *qp = to_eqp(ibqp);
- int ret = 0;
+ return ib_qps_to_erdma_qps[ERDMA_PROTO_IWARP][state];
+}
- if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
- return -EOPNOTSUPP;
+static inline enum erdma_qps_rocev2 ib_to_rocev2_qps(enum ib_qp_state state)
+{
+ return ib_qps_to_erdma_qps[ERDMA_PROTO_ROCEV2][state];
+}
- memset(&new_attrs, 0, sizeof(new_attrs));
+static inline enum ib_qp_state iwarp_to_ib_qps(enum erdma_qps_iwarp state)
+{
+ return erdma_qps_to_ib_qps[ERDMA_PROTO_IWARP][state];
+}
- if (attr_mask & IB_QP_STATE) {
- new_attrs.state = ib_qp_state_to_erdma_qp_state[attr->qp_state];
+static inline enum ib_qp_state rocev2_to_ib_qps(enum erdma_qps_rocev2 state)
+{
+ return erdma_qps_to_ib_qps[ERDMA_PROTO_ROCEV2][state];
+}
- erdma_attr_mask |= ERDMA_QP_ATTR_STATE;
+static int erdma_check_qp_attrs(struct erdma_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ enum ib_qp_state cur_state, nxt_state;
+ struct erdma_dev *dev = qp->dev;
+ int ret = -EINVAL;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((attr_mask & IB_QP_PORT) &&
+ !rdma_is_port_valid(&dev->ibdev, attr->port_num))
+ goto out;
+
+ if (erdma_device_rocev2(dev)) {
+ cur_state = (attr_mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state :
+ rocev2_to_ib_qps(qp->attrs.rocev2.state);
+
+ nxt_state = (attr_mask & IB_QP_STATE) ? attr->qp_state :
+ cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, nxt_state, qp->ibqp.qp_type,
+ attr_mask))
+ goto out;
+
+ if ((attr_mask & IB_QP_AV) &&
+ erdma_check_gid_attr(
+ rdma_ah_read_grh(&attr->ah_attr)->sgid_attr))
+ goto out;
+
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= ERDMA_MAX_PKEYS)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static void erdma_init_mod_qp_params_rocev2(
+ struct erdma_qp *qp, struct erdma_mod_qp_params_rocev2 *params,
+ int *erdma_attr_mask, struct ib_qp_attr *attr, int ib_attr_mask)
+{
+ enum erdma_qpa_mask_rocev2 to_modify_attrs = 0;
+ enum erdma_qps_rocev2 cur_state, nxt_state;
+ u16 udp_sport;
+
+ if (ib_attr_mask & IB_QP_CUR_STATE)
+ cur_state = ib_to_rocev2_qps(attr->cur_qp_state);
+ else
+ cur_state = qp->attrs.rocev2.state;
+
+ if (ib_attr_mask & IB_QP_STATE)
+ nxt_state = ib_to_rocev2_qps(attr->qp_state);
+ else
+ nxt_state = cur_state;
+
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_STATE;
+ params->state = nxt_state;
+
+ if (ib_attr_mask & IB_QP_QKEY) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_QKEY;
+ params->qkey = attr->qkey;
+ }
+
+ if (ib_attr_mask & IB_QP_SQ_PSN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_SQ_PSN;
+ params->sq_psn = attr->sq_psn;
+ }
+
+ if (ib_attr_mask & IB_QP_RQ_PSN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_RQ_PSN;
+ params->rq_psn = attr->rq_psn;
+ }
+
+ if (ib_attr_mask & IB_QP_DEST_QPN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_DST_QPN;
+ params->dst_qpn = attr->dest_qp_num;
}
+ if (ib_attr_mask & IB_QP_AV) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_AV;
+ udp_sport = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ QP_ID(qp), params->dst_qpn);
+ erdma_attr_to_av(&attr->ah_attr, &params->av, udp_sport);
+ }
+
+ *erdma_attr_mask = to_modify_attrs;
+}
+
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ union erdma_mod_qp_params params;
+ int ret = 0, erdma_attr_mask = 0;
+
down_write(&qp->state_lock);
- ret = erdma_modify_qp_internal(qp, &new_attrs, erdma_attr_mask);
+ ret = erdma_check_qp_attrs(qp, attr, attr_mask);
+ if (ret)
+ goto out;
- up_write(&qp->state_lock);
+ if (erdma_device_iwarp(qp->dev)) {
+ if (attr_mask & IB_QP_STATE) {
+ erdma_attr_mask |= ERDMA_QPA_IWARP_STATE;
+ params.iwarp.state = ib_to_iwarp_qps(attr->qp_state);
+ }
+
+ ret = erdma_modify_qp_state_iwarp(qp, &params.iwarp,
+ erdma_attr_mask);
+ } else {
+ erdma_init_mod_qp_params_rocev2(
+ qp, &params.rocev2, &erdma_attr_mask, attr, attr_mask);
+
+ ret = erdma_modify_qp_state_rocev2(qp, &params.rocev2,
+ erdma_attr_mask);
+ }
+out:
+ up_write(&qp->state_lock);
return ret;
}
static enum ib_qp_state query_qp_state(struct erdma_qp *qp)
{
- switch (qp->attrs.state) {
- case ERDMA_QP_STATE_IDLE:
- return IB_QPS_INIT;
- case ERDMA_QP_STATE_RTR:
- return IB_QPS_RTR;
- case ERDMA_QP_STATE_RTS:
- return IB_QPS_RTS;
- case ERDMA_QP_STATE_CLOSING:
- return IB_QPS_ERR;
- case ERDMA_QP_STATE_TERMINATE:
- return IB_QPS_ERR;
- case ERDMA_QP_STATE_ERROR:
- return IB_QPS_ERR;
- default:
- return IB_QPS_ERR;
- }
+ if (erdma_device_iwarp(qp->dev))
+ return iwarp_to_ib_qps(qp->attrs.iwarp.state);
+ else
+ return rocev2_to_ib_qps(qp->attrs.rocev2.state);
}
int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
+ struct erdma_cmdq_query_qp_req_rocev2 req;
struct erdma_dev *dev;
struct erdma_qp *qp;
+ u64 resp0, resp1;
+ int ret;
if (ibqp && qp_attr && qp_init_attr) {
qp = to_eqp(ibqp);
@@ -1595,8 +1831,37 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
- qp_attr->qp_state = query_qp_state(qp);
- qp_attr->cur_qp_state = query_qp_state(qp);
+ if (erdma_device_rocev2(dev)) {
+ /* Query hardware to get some attributes */
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_QUERY_QP);
+ req.qpn = QP_ID(qp);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
+ &resp1, true);
+ if (ret)
+ return ret;
+
+ qp_attr->sq_psn =
+ FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK, resp0);
+ qp_attr->rq_psn =
+ FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK, resp0);
+ qp_attr->qp_state = rocev2_to_ib_qps(FIELD_GET(
+ ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK, resp0));
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->sq_draining = FIELD_GET(
+ ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK, resp0);
+
+ qp_attr->pkey_index = 0;
+ qp_attr->dest_qp_num = qp->attrs.rocev2.dst_qpn;
+
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ erdma_av_to_attr(&qp->attrs.rocev2.av,
+ &qp_attr->ah_attr);
+ } else {
+ qp_attr->qp_state = query_qp_state(qp);
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ }
return 0;
}
@@ -1736,7 +2001,7 @@ void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
CMDQ_OPCODE_CONF_MTU);
req.mtu = mtu;
- erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, true);
}
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
@@ -1806,7 +2071,8 @@ static int erdma_query_hw_stats(struct erdma_dev *dev,
req.target_addr = dma_addr;
req.target_length = ERDMA_HW_RESP_SIZE;
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (err)
goto out;
@@ -1839,3 +2105,159 @@ int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
return stats->num_counters;
}
+
+enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev, u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int erdma_set_gid(struct erdma_dev *dev, u8 op, u32 idx,
+ const union ib_gid *gid)
+{
+ struct erdma_cmdq_set_gid_req req;
+ u8 ntype;
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_SET_GID_SGID_IDX_MASK, idx) |
+ FIELD_PREP(ERDMA_CMD_SET_GID_OP_MASK, op);
+
+ if (op == ERDMA_SET_GID_OP_ADD) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)gid))
+ ntype = ERDMA_NETWORK_TYPE_IPV4;
+ else
+ ntype = ERDMA_NETWORK_TYPE_IPV6;
+
+ req.cfg |= FIELD_PREP(ERDMA_CMD_SET_GID_NTYPE_MASK, ntype);
+
+ memcpy(&req.gid, gid, ERDMA_ROCEV2_GID_SIZE);
+ }
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_SET_GID);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+}
+
+int erdma_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct erdma_dev *dev = to_edev(attr->device);
+ int ret;
+
+ ret = erdma_check_gid_attr(attr);
+ if (ret)
+ return ret;
+
+ return erdma_set_gid(dev, ERDMA_SET_GID_OP_ADD, attr->index,
+ &attr->gid);
+}
+
+int erdma_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ return erdma_set_gid(to_edev(attr->device), ERDMA_SET_GID_OP_DEL,
+ attr->index, NULL);
+}
+
+int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
+{
+ if (index >= ERDMA_MAX_PKEYS)
+ return -EINVAL;
+
+ *pkey = ERDMA_DEFAULT_PKEY;
+ return 0;
+}
+
+void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av)
+{
+ av_cfg->cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_AV_FL_MASK, av->flow_label) |
+ FIELD_PREP(ERDMA_CMD_CREATE_AV_NTYPE_MASK, av->ntype);
+
+ av_cfg->traffic_class = av->traffic_class;
+ av_cfg->hop_limit = av->hop_limit;
+ av_cfg->sl = av->sl;
+
+ av_cfg->udp_sport = av->udp_sport;
+ av_cfg->sgid_index = av->sgid_index;
+
+ ether_addr_copy(av_cfg->dmac, av->dmac);
+ memcpy(av_cfg->dgid, av->dgid, ERDMA_ROCEV2_GID_SIZE);
+}
+
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(init_attr->ah_attr);
+ struct erdma_dev *dev = to_edev(ibah->device);
+ struct erdma_pd *pd = to_epd(ibah->pd);
+ struct erdma_ah *ah = to_eah(ibah);
+ struct erdma_cmdq_create_ah_req req;
+ u32 udp_sport;
+ int ret;
+
+ ret = erdma_check_gid_attr(grh->sgid_attr);
+ if (ret)
+ return ret;
+
+ ret = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_AH]);
+ if (ret < 0)
+ return ret;
+
+ ah->ahn = ret;
+
+ if (grh->flow_label)
+ udp_sport = rdma_flow_label_to_udp_sport(grh->flow_label);
+ else
+ udp_sport =
+ IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + (ah->ahn & 0x3FFF);
+
+ erdma_attr_to_av(init_attr->ah_attr, &ah->av, udp_sport);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_AH);
+
+ req.pdn = pd->pdn;
+ req.ahn = ah->ahn;
+ erdma_set_av_cfg(&req.av_cfg, &ah->av);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
+ if (ret) {
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+ return ret;
+ }
+
+ return 0;
+}
+
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct erdma_dev *dev = to_edev(ibah->device);
+ struct erdma_pd *pd = to_epd(ibah->pd);
+ struct erdma_ah *ah = to_eah(ibah);
+ struct erdma_cmdq_destroy_ah_req req;
+ int ret;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_AH);
+
+ req.pdn = pd->pdn;
+ req.ahn = ah->ahn;
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ flags & RDMA_DESTROY_AH_SLEEPABLE);
+ if (ret)
+ return ret;
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+
+ return 0;
+}
+
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct erdma_ah *ah = to_eah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ erdma_av_to_attr(&ah->av, ah_attr);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index c998acd39a78..f9408ccc8bad 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -136,6 +136,25 @@ struct erdma_user_dbrecords_page {
int refcnt;
};
+struct erdma_av {
+ u8 port;
+ u8 hop_limit;
+ u8 traffic_class;
+ u8 sl;
+ u8 sgid_index;
+ u16 udp_sport;
+ u32 flow_label;
+ u8 dmac[ETH_ALEN];
+ u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+ enum erdma_network_type ntype;
+};
+
+struct erdma_ah {
+ struct ib_ah ibah;
+ struct erdma_av av;
+ u32 ahn;
+};
+
struct erdma_uqp {
struct erdma_mem sq_mem;
struct erdma_mem rq_mem;
@@ -176,33 +195,91 @@ struct erdma_kqp {
u8 sig_all;
};
-enum erdma_qp_state {
- ERDMA_QP_STATE_IDLE = 0,
- ERDMA_QP_STATE_RTR = 1,
- ERDMA_QP_STATE_RTS = 2,
- ERDMA_QP_STATE_CLOSING = 3,
- ERDMA_QP_STATE_TERMINATE = 4,
- ERDMA_QP_STATE_ERROR = 5,
- ERDMA_QP_STATE_UNDEF = 7,
- ERDMA_QP_STATE_COUNT = 8
+enum erdma_qps_iwarp {
+ ERDMA_QPS_IWARP_IDLE = 0,
+ ERDMA_QPS_IWARP_RTR = 1,
+ ERDMA_QPS_IWARP_RTS = 2,
+ ERDMA_QPS_IWARP_CLOSING = 3,
+ ERDMA_QPS_IWARP_TERMINATE = 4,
+ ERDMA_QPS_IWARP_ERROR = 5,
+ ERDMA_QPS_IWARP_UNDEF = 6,
+ ERDMA_QPS_IWARP_COUNT = 7,
+};
+
+enum erdma_qpa_mask_iwarp {
+ ERDMA_QPA_IWARP_STATE = (1 << 0),
+ ERDMA_QPA_IWARP_LLP_HANDLE = (1 << 2),
+ ERDMA_QPA_IWARP_ORD = (1 << 3),
+ ERDMA_QPA_IWARP_IRD = (1 << 4),
+ ERDMA_QPA_IWARP_SQ_SIZE = (1 << 5),
+ ERDMA_QPA_IWARP_RQ_SIZE = (1 << 6),
+ ERDMA_QPA_IWARP_MPA = (1 << 7),
+ ERDMA_QPA_IWARP_CC = (1 << 8),
};
-enum erdma_qp_attr_mask {
- ERDMA_QP_ATTR_STATE = (1 << 0),
- ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
- ERDMA_QP_ATTR_ORD = (1 << 3),
- ERDMA_QP_ATTR_IRD = (1 << 4),
- ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
- ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
- ERDMA_QP_ATTR_MPA = (1 << 7)
+enum erdma_qps_rocev2 {
+ ERDMA_QPS_ROCEV2_RESET = 0,
+ ERDMA_QPS_ROCEV2_INIT = 1,
+ ERDMA_QPS_ROCEV2_RTR = 2,
+ ERDMA_QPS_ROCEV2_RTS = 3,
+ ERDMA_QPS_ROCEV2_SQD = 4,
+ ERDMA_QPS_ROCEV2_SQE = 5,
+ ERDMA_QPS_ROCEV2_ERROR = 6,
+ ERDMA_QPS_ROCEV2_COUNT = 7,
+};
+
+enum erdma_qpa_mask_rocev2 {
+ ERDMA_QPA_ROCEV2_STATE = (1 << 0),
+ ERDMA_QPA_ROCEV2_QKEY = (1 << 1),
+ ERDMA_QPA_ROCEV2_AV = (1 << 2),
+ ERDMA_QPA_ROCEV2_SQ_PSN = (1 << 3),
+ ERDMA_QPA_ROCEV2_RQ_PSN = (1 << 4),
+ ERDMA_QPA_ROCEV2_DST_QPN = (1 << 5),
};
enum erdma_qp_flags {
ERDMA_QP_IN_FLUSHING = (1 << 0),
};
+#define ERDMA_QP_ACTIVE 0
+#define ERDMA_QP_PASSIVE 1
+
+struct erdma_mod_qp_params_iwarp {
+ enum erdma_qps_iwarp state;
+ enum erdma_cc_alg cc;
+ u8 qp_type;
+ u8 pd_len;
+ u32 irq_size;
+ u32 orq_size;
+};
+
+struct erdma_qp_attrs_iwarp {
+ enum erdma_qps_iwarp state;
+ u32 cookie;
+};
+
+struct erdma_mod_qp_params_rocev2 {
+ enum erdma_qps_rocev2 state;
+ u32 qkey;
+ u32 sq_psn;
+ u32 rq_psn;
+ u32 dst_qpn;
+ struct erdma_av av;
+};
+
+union erdma_mod_qp_params {
+ struct erdma_mod_qp_params_iwarp iwarp;
+ struct erdma_mod_qp_params_rocev2 rocev2;
+};
+
+struct erdma_qp_attrs_rocev2 {
+ enum erdma_qps_rocev2 state;
+ u32 qkey;
+ u32 dst_qpn;
+ struct erdma_av av;
+};
+
struct erdma_qp_attrs {
- enum erdma_qp_state state;
enum erdma_cc_alg cc; /* Congestion control algorithm */
u32 sq_size;
u32 rq_size;
@@ -210,11 +287,10 @@ struct erdma_qp_attrs {
u32 irq_size;
u32 max_send_sge;
u32 max_recv_sge;
- u32 cookie;
-#define ERDMA_QP_ACTIVE 0
-#define ERDMA_QP_PASSIVE 1
- u8 qp_type;
- u8 pd_len;
+ union {
+ struct erdma_qp_attrs_iwarp iwarp;
+ struct erdma_qp_attrs_rocev2 rocev2;
+ };
};
struct erdma_qp {
@@ -286,11 +362,25 @@ static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
void erdma_qp_get(struct erdma_qp *qp);
void erdma_qp_put(struct erdma_qp *qp);
-int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask);
+int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ int mask);
+int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ int attr_mask);
void erdma_qp_llp_close(struct erdma_qp *qp);
void erdma_qp_cm_drop(struct erdma_qp *qp);
+static inline bool erdma_device_iwarp(struct erdma_dev *dev)
+{
+ return dev->proto == ERDMA_PROTO_IWARP;
+}
+
+static inline bool erdma_device_rocev2(struct erdma_dev *dev)
+{
+ return dev->proto == ERDMA_PROTO_ROCEV2;
+}
+
static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
{
return container_of(ibctx, struct erdma_ucontext, ibucontext);
@@ -316,6 +406,21 @@ static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
return container_of(ibcq, struct erdma_cq, ibcq);
}
+static inline struct erdma_ah *to_eah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct erdma_ah, ibah);
+}
+
+static inline int erdma_check_gid_attr(const struct ib_gid_attr *attr)
+{
+ u8 ntype = rdma_gid_attr_network_type(attr);
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6)
+ return -EINVAL;
+
+ return 0;
+}
+
static inline struct erdma_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry *ibmmap)
{
@@ -360,6 +465,7 @@ int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn);
struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg);
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
@@ -370,5 +476,15 @@ struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
u32 port_num);
int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port, int index);
+enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev,
+ u32 port_num);
+int erdma_add_gid(const struct ib_gid_attr *attr, void **context);
+int erdma_del_gid(const struct ib_gid_attr *attr, void **context);
+int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av);
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags);
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
#endif
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index eb38f81aeeb1..cb630551cf1a 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -2339,20 +2339,6 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
-/*
- * this is used for formatting hw error messages...
- */
-struct hfi1_hwerror_msgs {
- u64 mask;
- const char *msg;
- size_t sz;
-};
-
-/* in intr.c... */
-void hfi1_format_hwerrors(u64 hwerrs,
- const struct hfi1_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t lmsg);
-
#define USER_OPCODE_CHECK_VAL 0xC0
#define USER_OPCODE_CHECK_MASK 0xC0
#define OPCODE_CHECK_VAL_DISABLED 0x0
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index cbac4a442d9e..d6fbd9c2b8b4 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -635,12 +635,11 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
spin_lock_init(&ppd->cca_timer_lock);
for (i = 0; i < OPA_MAX_SLS; i++) {
- hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
ppd->cca_timer[i].ppd = ppd;
ppd->cca_timer[i].sl = i;
ppd->cca_timer[i].ccti = 0;
- ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
+ hrtimer_setup(&ppd->cca_timer[i].hrtimer, cca_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 3737f632d62a..d8dd1a599631 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -47,37 +47,6 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
hfi1_event_pkey_change(ppd->dd, ppd->port);
}
-/**
- * format_hwmsg - format a single hwerror message
- * @msg: message buffer
- * @msgl: length of message buffer
- * @hwmsg: message to add to message buffer
- */
-static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
- strlcat(msg, "[", msgl);
- strlcat(msg, hwmsg, msgl);
- strlcat(msg, "]", msgl);
-}
-
-/**
- * hfi1_format_hwerrors - format hardware error messages for display
- * @hwerrs: hardware errors bit vector
- * @hwerrmsgs: hardware error descriptions
- * @nhwerrmsgs: number of hwerrmsgs
- * @msg: message buffer
- * @msgl: message buffer length
- */
-void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t msgl)
-{
- int i;
-
- for (i = 0; i < nhwerrmsgs; i++)
- if (hwerrs & hwerrmsgs[i].mask)
- format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-}
-
static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
{
struct ib_event event;
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 49805a24bb0a..7259f4f55700 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -92,7 +92,7 @@ struct iowait_work {
*
* The lock field is used by waiters to record
* the seqlock_t that guards the list head.
- * Waiters explicity know that, but the destroy
+ * Waiters explicitly know that, but the destroy
* code that unwaits QPs does not.
*/
struct iowait {
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index d62ba5fdd80c..d94216c7d576 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -27,8 +27,8 @@ static struct hfi1_pportdata *hfi1_get_pportdata_kobj(struct kobject *kobj)
* Congestion control table size followed by table entries
*/
static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t pos, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
int ret;
struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
@@ -57,7 +57,7 @@ static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
@@ -65,7 +65,7 @@ static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
@@ -93,9 +93,9 @@ static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-static struct bin_attribute *port_cc_bin_attributes[] = {
+static const struct bin_attribute *const port_cc_bin_attributes[] = {
&bin_attr_cc_setting_bin,
&bin_attr_cc_table_bin,
NULL
@@ -134,7 +134,7 @@ static struct attribute *port_cc_attributes[] = {
static const struct attribute_group port_cc_group = {
.name = "CCMgtA",
.attrs = port_cc_attributes,
- .bin_attrs = port_cc_bin_attributes,
+ .bin_attrs_new = port_cc_bin_attributes,
};
/* Start sc2vl */
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index ab3fbba70789..44cdb706fe27 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,21 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_HNS
- tristate "HNS RoCE Driver"
- depends on NET_VENDOR_HISILICON
- depends on ARM64 || (COMPILE_TEST && 64BIT)
- depends on (HNS_DSAF && HNS_ENET) || HNS3
- help
- This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
-
- To compile HIP08 driver as module, choose M here.
-
config INFINIBAND_HNS_HIP08
- bool "Hisilicon Hip08 Family RoCE support"
- depends on INFINIBAND_HNS && PCI && HNS3
- depends on INFINIBAND_HNS=m || HNS3=y
+ tristate "Hisilicon Hip08 Family RoCE support"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on PCI && HNS3
help
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
- To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
- module will be called hns-roce-hw-v2.
+ To compile this driver, choose M here. This module will be called
+ hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index be1e1cdbcfa8..7917af8e6380 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -5,12 +5,9 @@
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
+hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
- hns_roce_debugfs.o
+ hns_roce_debugfs.o hns_roce_hw_v2.o
-ifdef CONFIG_INFINIBAND_HNS_HIP08
-hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 950c133d4220..6ee911f6885b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -175,8 +175,10 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
ida_destroy(&hr_dev->xrcd_ida.ida);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ida_destroy(&hr_dev->srq_table.srq_ida.ida);
+ xa_destroy(&hr_dev->srq_table.xa);
+ }
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 4106423a1b39..3a5c93c9fb3e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -537,5 +537,6 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
ida_destroy(&hr_dev->cq_table.bank[i].ida);
+ xa_destroy(&hr_dev->cq_table.array);
mutex_destroy(&hr_dev->cq_table.bank_mutex);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 605562122ecc..ca0798224e56 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1361,6 +1361,11 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
return ret;
}
+/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming
+ * the bt page size is not expanded by cal_best_bt_pg_sz()
+ */
+#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800
+
/* construct the base address table and link them by address hop config */
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
@@ -1369,6 +1374,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
{
const struct hns_roce_buf_region *r;
int ofs, end;
+ int loop;
int unit;
int ret;
int i;
@@ -1386,7 +1392,10 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
continue;
end = r->offset + r->count;
- for (ofs = r->offset; ofs < end; ofs += unit) {
+ for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+
ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
hem_list->mid_bt[i],
&hem_list->btm_bt);
@@ -1443,9 +1452,14 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct list_head *head = &hem_list->btm_bt;
struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
+ int loop = 1;
int nr = 0;
list_for_each_entry_safe(hem, temp_hem, head, sibling) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+ loop++;
+
if (hem_list_page_is_in_range(hem, offset)) {
nr = offset - hem->start;
cpu_base = hem->addr + nr * BA_BYTE_LEN;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0144e7210d05..160e8927d364 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1286,10 +1286,8 @@ static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
return tx_timeout;
}
-static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
+static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
u32 timeout = 0;
do {
@@ -1299,8 +1297,9 @@ static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
} while (++timeout < tx_timeout);
}
-static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmq_desc *desc, int num)
+static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc,
+ int num, u32 tx_timeout)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
@@ -1309,8 +1308,6 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
int ret;
int i;
- spin_lock_bh(&csq->lock);
-
tail = csq->head;
for (i = 0; i < num; i++) {
@@ -1324,22 +1321,17 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
- hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode));
+ hns_roce_wait_csq_done(hr_dev, tx_timeout);
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
/* check the result of hardware write back */
- desc[i] = csq->desc[tail++];
+ desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
tail = 0;
-
- desc_ret = le16_to_cpu(desc[i].retval);
if (likely(desc_ret == CMD_EXEC_SUCCESS))
continue;
- dev_err_ratelimited(hr_dev->dev,
- "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
- desc->opcode, desc_ret);
ret = hns_roce_cmd_err_convert_errno(desc_ret);
}
} else {
@@ -1354,14 +1346,54 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
ret = -EAGAIN;
}
- spin_unlock_bh(&csq->lock);
-
if (ret)
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
return ret;
}
+static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ u16 opcode = le16_to_cpu(desc->opcode);
+ u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
+ u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
+ u32 rsv_tail;
+ int ret;
+ int i;
+
+ while (try_cnt) {
+ try_cnt--;
+
+ spin_lock_bh(&csq->lock);
+ rsv_tail = csq->head;
+ ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
+ if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
+ try_cnt) {
+ spin_unlock_bh(&csq->lock);
+ mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
+ continue;
+ }
+
+ for (i = 0; i < num; i++) {
+ desc[i] = csq->desc[rsv_tail++];
+ if (rsv_tail == csq->desc_num)
+ rsv_tail = 0;
+ }
+ spin_unlock_bh(&csq->lock);
+ break;
+ }
+
+ if (ret)
+ dev_err_ratelimited(hr_dev->dev,
+ "Cmdq IO error, opcode = 0x%x, return = %d.\n",
+ opcode, ret);
+
+ return ret;
+}
+
static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
@@ -7185,9 +7217,22 @@ static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
return ret;
}
+static void hns_roce_hw_v2_link_status_change(struct hnae3_handle *handle,
+ bool linkup)
+{
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct net_device *netdev = handle->rinfo.netdev;
+
+ if (linkup || !hr_dev)
+ return;
+
+ ib_dispatch_port_state_event(&hr_dev->ib_dev, netdev);
+}
+
static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
.init_instance = hns_roce_hw_v2_init_instance,
.uninit_instance = hns_roce_hw_v2_uninit_instance,
+ .link_status_change = hns_roce_hw_v2_link_status_change,
.reset_notify = hns_roce_hw_v2_reset_notify,
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index cbdbc9edbce6..91a5665465ff 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -230,6 +230,8 @@ enum hns_roce_opcode_type {
};
#define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000
+#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8
+#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5
struct hns_roce_cmdq_tx_timeout_map {
u16 opcode;
u32 tx_timeout;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index ae24c81c9812..cf89a8db4f64 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -183,7 +183,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_DEVICE_RC_RNR_NAK_GEN;
props->max_send_sge = hr_dev->caps.max_sq_sg;
props->max_recv_sge = hr_dev->caps.max_rq_sg;
- props->max_sge_rd = 1;
+ props->max_sge_rd = hr_dev->caps.max_sq_sg;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
props->max_mr = hr_dev->caps.num_mtpts;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9e2e76c59406..8901c142c1b6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -868,12 +868,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp *ucmd,
struct hns_roce_ib_create_qp_resp *resp)
{
+ bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd);
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext, ibucontext);
+ bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp);
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
- if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
+ if (has_sdb) {
ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
if (ret) {
ibdev_err(ibdev,
@@ -884,7 +886,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
}
- if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
+ if (has_rdb) {
ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
if (ret) {
ibdev_err(ibdev,
@@ -898,7 +900,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
return 0;
err_sdb:
- if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
+ if (has_sdb)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_out:
return ret;
@@ -1119,24 +1121,23 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibucontext);
hr_qp->config = uctx->config;
ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
- if (ret)
+ if (ret) {
ibdev_err(ibdev,
"failed to set user SQ size, ret = %d.\n",
ret);
+ return ret;
+ }
ret = set_congest_param(hr_dev, hr_qp, ucmd);
- if (ret)
- return ret;
} else {
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
+ default_congest_type(hr_dev, hr_qp);
ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
if (ret)
ibdev_err(ibdev,
"failed to set kernel SQ size, ret = %d.\n",
ret);
-
- default_congest_type(hr_dev, hr_qp);
}
return ret;
@@ -1219,7 +1220,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
min(udata->outlen, sizeof(resp)));
if (ret) {
ibdev_err(ibdev, "copy qp resp failed!\n");
- goto err_store;
+ goto err_flow_ctrl;
}
}
@@ -1602,6 +1603,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida);
xa_destroy(&hr_dev->qp_table.dip_xa);
+ xa_destroy(&hr_dev->qp_table_xa);
mutex_destroy(&hr_dev->qp_table.bank_mutex);
mutex_destroy(&hr_dev->qp_table.scc_mutex);
}
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index e1e3d3ae72b7..ddf02a462efa 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -59,10 +59,6 @@ int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
struct irdma_hmc_fcn_info *hmcfcninfo,
u16 *pmf_idx);
-int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index d7c8ea948bcd..c0c9441885d3 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -85,10 +85,6 @@ int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
int irdma_process_bh(struct irdma_sc_dev *dev);
int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
-int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 0422787592d8..0e594122baa7 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -320,9 +320,6 @@ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
case NETDEV_DOWN:
iwdev->iw_status = 0;
fallthrough;
- case NETDEV_UP:
- irdma_port_ibevent(iwdev);
- break;
default:
break;
}
@@ -972,74 +969,6 @@ void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
}
/**
- * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
- * @dev: function device struct
- * @val_mem: buffer for fpm
- * @hmc_fn_id: function id for fpm
- */
-int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
-{
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- struct irdma_pci_f *rf = dev_to_rf(dev);
- int status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- cqp_request->param = NULL;
- cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
- cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
- cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
- cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
- cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
- cqp_info->post_sq = 1;
- cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
-
- status = irdma_handle_cqp_op(rf, cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
- * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
- * @dev: hardware control device structure
- * @val_mem: buffer with fpm values
- * @hmc_fn_id: function id for fpm
- */
-int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
-{
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- struct irdma_pci_f *rf = dev_to_rf(dev);
- int status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- cqp_request->param = NULL;
- cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
- cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
- cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
- cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
- cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
- cqp_info->post_sq = 1;
- cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
-
- status = irdma_handle_cqp_op(rf, cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
* irdma_cqp_cq_create_cmd - create a cq for the cqp
* @dev: device pointer
* @cq: pointer to created cq
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 67c2d43135a8..457cea6d9909 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
req.num_resources = 1;
- req.alignment = 1;
+ req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
/* Have GDMA start searching from 0 */
req.allocated_resources = 0;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index aa9ea6ba26e5..c592374f4a58 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -150,8 +150,12 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev,
return PTR_ERR(*umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
- err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+ err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
if (err)
goto err_buf;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 529db874d67c..dd35e03402ab 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -351,7 +351,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
struct mlx4_port_gid_table *port_gid_table;
int ret = 0;
int hw_update = 0;
- struct gid_entry *gids;
+ struct gid_entry *gids = NULL;
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -389,10 +389,10 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
}
spin_unlock_bh(&iboe->lock);
- if (!ret && hw_update) {
+ if (gids)
ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
- kfree(gids);
- }
+
+ kfree(gids);
return ret;
}
@@ -2341,37 +2341,38 @@ static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev,
iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL;
- if (event == NETDEV_UP || event == NETDEV_DOWN) {
- enum ib_port_state port_state;
- struct ib_event ibev = { };
+ spin_unlock_bh(&iboe->lock);
- if (ib_get_cached_port_state(&ibdev->ib_dev, dev->dev_port + 1,
- &port_state))
- goto iboe_out;
+ if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER)
+ mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
+}
- if (event == NETDEV_UP &&
- (port_state != IB_PORT_ACTIVE ||
- iboe->last_port_state[dev->dev_port] != IB_PORT_DOWN))
- goto iboe_out;
- if (event == NETDEV_DOWN &&
- (port_state != IB_PORT_DOWN ||
- iboe->last_port_state[dev->dev_port] != IB_PORT_ACTIVE))
- goto iboe_out;
- iboe->last_port_state[dev->dev_port] = port_state;
+static void mlx4_ib_port_event(struct ib_device *ibdev, struct net_device *ndev,
+ unsigned long event)
+{
+ struct mlx4_ib_dev *mlx4_ibdev =
+ container_of(ibdev, struct mlx4_ib_dev, ib_dev);
+ struct mlx4_ib_iboe *iboe = &mlx4_ibdev->iboe;
- ibev.device = &ibdev->ib_dev;
- ibev.element.port_num = dev->dev_port + 1;
- ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
- IB_EVENT_PORT_ERR;
- ib_dispatch_event(&ibev);
- }
+ if (!net_eq(dev_net(ndev), &init_net))
+ return;
+
+ ASSERT_RTNL();
+
+ if (ndev->dev.parent != mlx4_ibdev->ib_dev.dev.parent)
+ return;
+
+ spin_lock_bh(&iboe->lock);
+
+ iboe->netdevs[ndev->dev_port] = event != NETDEV_UNREGISTER ? ndev : NULL;
+
+ if (event == NETDEV_UP || event == NETDEV_DOWN)
+ ib_dispatch_port_state_event(&mlx4_ibdev->ib_dev, ndev);
-iboe_out:
spin_unlock_bh(&iboe->lock);
- if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
- event == NETDEV_UP || event == NETDEV_CHANGE)
- mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
+ if (event == NETDEV_UP || event == NETDEV_CHANGE)
+ mlx4_ib_update_qps(mlx4_ibdev, ndev, ndev->dev_port + 1);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -2569,6 +2570,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
.req_notify_cq = mlx4_ib_arm_cq,
.rereg_user_mr = mlx4_ib_rereg_user_mr,
.resize_cq = mlx4_ib_resize_cq,
+ .report_port_event = mlx4_ib_port_event,
INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index b52bceff7d97..f53b1846594c 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -667,6 +667,9 @@ struct mlx4_uverbs_ex_query_device {
__u32 reserved;
};
+/* 4k - 4G */
+#define MLX4_PAGE_SIZE_SUPPORTED ((unsigned long)GENMASK_ULL(31, 12))
+
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -936,8 +939,19 @@ mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
{
return 0;
}
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
- int *num_of_mtts);
+static inline int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
+ u64 start,
+ int *num_of_mtts)
+{
+ unsigned long pg_sz;
+
+ pg_sz = ib_umem_find_best_pgsz(umem, MLX4_PAGE_SIZE_SUPPORTED, start);
+ if (!pg_sz)
+ return -EOPNOTSUPP;
+
+ *num_of_mtts = ib_umem_num_dma_blocks(umem, pg_sz);
+ return order_base_2(pg_sz);
+}
int mlx4_ib_cm_init(void);
void mlx4_ib_cm_destroy(void);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index a40bf58bcdd3..e77645a673fb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -87,286 +87,20 @@ err_free:
return ERR_PTR(err);
}
-enum {
- MLX4_MAX_MTT_SHIFT = 31
-};
-
-static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
- struct mlx4_mtt *mtt,
- u64 mtt_size, u64 mtt_shift, u64 len,
- u64 cur_start_addr, u64 *pages,
- int *start_index, int *npages)
-{
- u64 cur_end_addr = cur_start_addr + len;
- u64 cur_end_addr_aligned = 0;
- u64 mtt_entries;
- int err = 0;
- int k;
-
- len += (cur_start_addr & (mtt_size - 1ULL));
- cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
- len += (cur_end_addr_aligned - cur_end_addr);
- if (len & (mtt_size - 1ULL)) {
- pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
- len, mtt_size);
- return -EINVAL;
- }
-
- mtt_entries = (len >> mtt_shift);
-
- /*
- * Align the MTT start address to the mtt_size.
- * Required to handle cases when the MR starts in the middle of an MTT
- * record. Was not required in old code since the physical addresses
- * provided by the dma subsystem were page aligned, which was also the
- * MTT size.
- */
- cur_start_addr = round_down(cur_start_addr, mtt_size);
- /* A new block is started ... */
- for (k = 0; k < mtt_entries; ++k) {
- pages[*npages] = cur_start_addr + (mtt_size * k);
- (*npages)++;
- /*
- * Be friendly to mlx4_write_mtt() and pass it chunks of
- * appropriate size.
- */
- if (*npages == PAGE_SIZE / sizeof(u64)) {
- err = mlx4_write_mtt(dev->dev, mtt, *start_index,
- *npages, pages);
- if (err)
- return err;
-
- (*start_index) += *npages;
- *npages = 0;
- }
- }
-
- return 0;
-}
-
-static inline u64 alignment_of(u64 ptr)
-{
- return ilog2(ptr & (~(ptr - 1)));
-}
-
-static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
- u64 current_block_end,
- u64 block_shift)
-{
- /* Check whether the alignment of the new block is aligned as well as
- * the previous block.
- * Block address must start with zeros till size of entity_size.
- */
- if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
- /*
- * It is not as well aligned as the previous block-reduce the
- * mtt size accordingly. Here we take the last right bit which
- * is 1.
- */
- block_shift = alignment_of(next_block_start);
-
- /*
- * Check whether the alignment of the end of previous block - is it
- * aligned as well as the start of the block
- */
- if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
- /*
- * It is not as well aligned as the start of the block -
- * reduce the mtt size accordingly.
- */
- block_shift = alignment_of(current_block_end);
-
- return block_shift;
-}
-
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
- u64 *pages;
- u64 len = 0;
- int err = 0;
- u64 mtt_size;
- u64 cur_start_addr = 0;
- u64 mtt_shift;
- int start_index = 0;
- int npages = 0;
- struct scatterlist *sg;
- int i;
-
- pages = (u64 *) __get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- mtt_shift = mtt->page_shift;
- mtt_size = 1ULL << mtt_shift;
+ struct ib_block_iter biter;
+ int err, i = 0;
+ u64 addr;
- for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
- if (cur_start_addr + len == sg_dma_address(sg)) {
- /* still the same block */
- len += sg_dma_len(sg);
- continue;
- }
- /*
- * A new block is started ...
- * If len is malaligned, write an extra mtt entry to cover the
- * misaligned area (round up the division)
- */
- err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
- mtt_shift, len,
- cur_start_addr,
- pages, &start_index,
- &npages);
- if (err)
- goto out;
-
- cur_start_addr = sg_dma_address(sg);
- len = sg_dma_len(sg);
- }
-
- /* Handle the last block */
- if (len > 0) {
- /*
- * If len is malaligned, write an extra mtt entry to cover
- * the misaligned area (round up the division)
- */
- err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
- mtt_shift, len,
- cur_start_addr, pages,
- &start_index, &npages);
+ rdma_umem_for_each_dma_block(umem, &biter, BIT(mtt->page_shift)) {
+ addr = rdma_block_iter_dma_address(&biter);
+ err = mlx4_write_mtt(dev->dev, mtt, i++, 1, &addr);
if (err)
- goto out;
- }
-
- if (npages)
- err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
-
-out:
- free_page((unsigned long) pages);
- return err;
-}
-
-/*
- * Calculate optimal mtt size based on contiguous pages.
- * Function will return also the number of pages that are not aligned to the
- * calculated mtt_size to be added to total number of pages. For that we should
- * check the first chunk length & last chunk length and if not aligned to
- * mtt_size we should increment the non_aligned_pages number. All chunks in the
- * middle already handled as part of mtt shift calculation for both their start
- * & end addresses.
- */
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
- int *num_of_mtts)
-{
- u64 block_shift = MLX4_MAX_MTT_SHIFT;
- u64 min_shift = PAGE_SHIFT;
- u64 last_block_aligned_end = 0;
- u64 current_block_start = 0;
- u64 first_block_start = 0;
- u64 current_block_len = 0;
- u64 last_block_end = 0;
- struct scatterlist *sg;
- u64 current_block_end;
- u64 misalignment_bits;
- u64 next_block_start;
- u64 total_len = 0;
- int i;
-
- *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-
- for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
- /*
- * Initialization - save the first chunk start as the
- * current_block_start - block means contiguous pages.
- */
- if (current_block_len == 0 && current_block_start == 0) {
- current_block_start = sg_dma_address(sg);
- first_block_start = current_block_start;
- /*
- * Find the bits that are different between the physical
- * address and the virtual address for the start of the
- * MR.
- * umem_get aligned the start_va to a page boundary.
- * Therefore, we need to align the start va to the same
- * boundary.
- * misalignment_bits is needed to handle the case of a
- * single memory region. In this case, the rest of the
- * logic will not reduce the block size. If we use a
- * block size which is bigger than the alignment of the
- * misalignment bits, we might use the virtual page
- * number instead of the physical page number, resulting
- * in access to the wrong data.
- */
- misalignment_bits =
- (start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^
- current_block_start;
- block_shift = min(alignment_of(misalignment_bits),
- block_shift);
- }
-
- /*
- * Go over the scatter entries and check if they continue the
- * previous scatter entry.
- */
- next_block_start = sg_dma_address(sg);
- current_block_end = current_block_start + current_block_len;
- /* If we have a split (non-contig.) between two blocks */
- if (current_block_end != next_block_start) {
- block_shift = mlx4_ib_umem_calc_block_mtt
- (next_block_start,
- current_block_end,
- block_shift);
-
- /*
- * If we reached the minimum shift for 4k page we stop
- * the loop.
- */
- if (block_shift <= min_shift)
- goto end;
-
- /*
- * If not saved yet we are in first block - we save the
- * length of first block to calculate the
- * non_aligned_pages number at the end.
- */
- total_len += current_block_len;
-
- /* Start a new block */
- current_block_start = next_block_start;
- current_block_len = sg_dma_len(sg);
- continue;
- }
- /* The scatter entry is another part of the current block,
- * increase the block size.
- * An entry in the scatter can be larger than 4k (page) as of
- * dma mapping which merge some blocks together.
- */
- current_block_len += sg_dma_len(sg);
+ return err;
}
-
- /* Account for the last block in the total len */
- total_len += current_block_len;
- /* Add to the first block the misalignment that it suffers from. */
- total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
- last_block_end = current_block_start + current_block_len;
- last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
- total_len += (last_block_aligned_end - last_block_end);
-
- if (total_len & ((1ULL << block_shift) - 1ULL))
- pr_warn("misaligned total length detected (%llu, %llu)!",
- total_len, block_shift);
-
- *num_of_mtts = total_len >> block_shift;
-end:
- if (block_shift < min_shift) {
- /*
- * If shift is less than the min we set a warning and return the
- * min shift.
- */
- pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
-
- block_shift = min_shift;
- }
- return block_shift;
+ return 0;
}
static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
@@ -424,6 +158,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
+ if (shift < 0) {
+ err = shift;
+ goto err_umem;
+ }
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9d08aa99f3cb..50fd407103c7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -925,8 +925,12 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
- err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;
@@ -1108,8 +1112,12 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
- err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 505bc47fd575..531a57f9ee7e 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -50,11 +50,12 @@ static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
return sport;
}
-static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+static int create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
struct rdma_ah_init_attr *init_attr)
{
struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
enum ib_gid_type gid_type;
+ int rate_val;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
@@ -67,7 +68,10 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.tclass = grh->traffic_class;
}
- ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
+ rate_val = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr));
+ if (rate_val < 0)
+ return rate_val;
+ ah->av.stat_rate_sl = rate_val << 4;
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (init_attr->xmit_slave)
@@ -88,6 +92,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
}
+
+ return 0;
}
int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
@@ -120,8 +126,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
return err;
}
- create_ib_ah(dev, ah, init_attr);
- return 0;
+ return create_ib_ah(dev, ah, init_attr);
}
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 4f6c1968a2ee..81cfa74147a1 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -546,6 +546,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ bool new = false;
int err;
if (!counter->id) {
@@ -560,6 +561,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return err;
counter->id =
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ new = true;
}
err = mlx5_ib_qp_set_counter(qp, counter);
@@ -569,8 +571,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return 0;
fail_set_counter:
- mlx5_ib_counter_dealloc(counter);
- counter->id = 0;
+ if (new) {
+ mlx5_ib_counter_dealloc(counter);
+ counter->id = 0;
+ }
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 520034acf73a..162814ae8cb4 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -943,7 +943,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
}
dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dst.counter_id = mlx5_fc_id(opfc->fc);
+ dst.counter = opfc->fc;
flow_act.action =
MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
@@ -1113,8 +1113,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
handler->ibcounters = flow_act.counters;
dest_arr[dest_num].type =
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest_arr[dest_num].counter_id =
- mlx5_fc_id(mcounters->hw_cntrs_hndl);
+ dest_arr[dest_num].counter =
+ mcounters->hw_cntrs_hndl;
dest_num++;
}
@@ -1603,7 +1603,7 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
static struct mlx5_ib_flow_handler *raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act,
- u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type)
+ struct mlx5_fc *counter, void *cmd_in, int inlen, int dest_id, int dest_type)
{
struct mlx5_flow_destination *dst;
struct mlx5_ib_flow_prio *ft_prio;
@@ -1652,8 +1652,12 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
}
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (WARN_ON(!counter)) {
+ err = -EINVAL;
+ goto unlock;
+ }
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dst[dst_num].counter_id = counter_id;
+ dst[dst_num].counter = counter;
dst_num++;
}
@@ -1878,7 +1882,8 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
return 0;
}
-static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
+static bool
+is_flow_counter(void *obj, u32 offset, u32 *counter_id, u32 *fc_bulk_size)
{
struct devx_obj *devx_obj = obj;
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
@@ -1888,6 +1893,7 @@ static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
if (offset && offset >= devx_obj->flow_counter_bulk_size)
return false;
+ *fc_bulk_size = devx_obj->flow_counter_bulk_size;
*counter_id = MLX5_GET(dealloc_flow_counter_in,
devx_obj->dinbox,
flow_counter_id);
@@ -1904,13 +1910,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
{
struct mlx5_flow_context flow_context = {.flow_tag =
MLX5_FS_DEFAULT_FLOW_TAG};
- u32 *offset_attr, offset = 0, counter_id = 0;
int dest_id, dest_type = -1, inlen, len, ret, i;
struct mlx5_ib_flow_handler *flow_handler;
struct mlx5_ib_flow_matcher *fs_matcher;
struct ib_uobject **arr_flow_actions;
struct ib_uflow_resources *uflow_res;
struct mlx5_flow_act flow_act = {};
+ struct mlx5_fc *counter = NULL;
struct ib_qp *qp = NULL;
void *devx_obj, *cmd_in;
struct ib_uobject *uobj;
@@ -1937,6 +1943,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
if (len) {
+ u32 *offset_attr, fc_bulk_size, offset = 0, counter_id = 0;
devx_obj = arr_flow_actions[0]->object;
if (uverbs_attr_is_valid(attrs,
@@ -1956,8 +1963,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
offset = *offset_attr;
}
- if (!is_flow_counter(devx_obj, offset, &counter_id))
+ if (!is_flow_counter(devx_obj, offset, &counter_id, &fc_bulk_size))
return -EINVAL;
+ counter = mlx5_fc_local_create(counter_id, offset, fc_bulk_size);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
@@ -1968,8 +1978,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
- if (!uflow_res)
- return -ENOMEM;
+ if (!uflow_res) {
+ ret = -ENOMEM;
+ goto destroy_counter;
+ }
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
@@ -1996,7 +2008,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
flow_handler =
raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act,
- counter_id, cmd_in, inlen, dest_id, dest_type);
+ counter, cmd_in, inlen, dest_id, dest_type);
if (IS_ERR(flow_handler)) {
ret = PTR_ERR(flow_handler);
goto err_out;
@@ -2007,6 +2019,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
return 0;
err_out:
ib_uverbs_flow_resources_free(uflow_res);
+destroy_counter:
+ if (counter)
+ mlx5_fc_local_destroy(counter);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index f5b59d02f4d3..81849eb671a1 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -242,6 +242,10 @@ static int mlx5_netdev_event(struct notifier_block *this,
case NETDEV_DOWN: {
struct net_device *upper = NULL;
+ if (!netif_is_lag_master(ndev) && !netif_is_lag_port(ndev) &&
+ !mlx5_core_mp_enabled(mdev))
+ return NOTIFY_DONE;
+
if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
struct net_device *lag_ndev;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a01b592aa716..974a45c92fbb 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -669,6 +669,12 @@ struct mlx5_ib_mkey {
#define mlx5_update_odp_stats(mr, counter_name, value) \
atomic64_add(value, &((mr)->odp_stats.counter_name))
+#define mlx5_update_odp_stats_with_handled(mr, counter_name, value) \
+ do { \
+ mlx5_update_odp_stats(mr, counter_name, value); \
+ atomic64_add(1, &((mr)->odp_stats.counter_name##_handled)); \
+ } while (0)
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_ib_mkey mmkey;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 45d9dc9c6c8f..753faa9ad06a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1550,7 +1550,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
- if (!umem_dmabuf->sgt)
+ if (!umem_dmabuf->sgt || !mr)
return;
mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
@@ -1935,7 +1935,8 @@ err:
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (!mr->umem && !mr->data_direct && mr->descs) {
+ if (!mr->umem && !mr->data_direct &&
+ mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -2021,6 +2022,16 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+ bool is_odp = is_odp_mr(mr);
+ bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
+ int ret = 0;
+
+ if (is_odp)
+ mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+
+ if (is_odp_dma_buf)
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL);
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
ent = mr->mmkey.cache_ent;
@@ -2032,7 +2043,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
- return 0;
+ goto out;
}
if (ent) {
@@ -2041,7 +2052,21 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
mr->mmkey.cache_ent = NULL;
spin_unlock_irq(&ent->mkeys_queue.lock);
}
- return destroy_mkey(dev, mr);
+ ret = destroy_mkey(dev, mr);
+out:
+ if (is_odp) {
+ if (!ret)
+ to_ib_umem_odp(mr->umem)->private = NULL;
+ mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ }
+
+ if (is_odp_dma_buf) {
+ if (!ret)
+ to_ib_umem_dmabuf(mr->umem)->private = NULL;
+ dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ }
+
+ return ret;
}
static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 4b37446758fd..e77c9280c07e 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -228,13 +228,28 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *imr = mr->parent;
+ /*
+ * If userspace is racing freeing the parent implicit ODP MR then we can
+ * loose the race with parent destruction. In this case
+ * mlx5_ib_free_odp_mr() will free everything in the implicit_children
+ * xarray so NOP is fine. This child MR cannot be destroyed here because
+ * we are under its umem_mutex.
+ */
if (!refcount_inc_not_zero(&imr->mmkey.usecount))
return;
- xa_erase(&imr->implicit_children, idx);
+ xa_lock(&imr->implicit_children);
+ if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
+ mr) {
+ xa_unlock(&imr->implicit_children);
+ mlx5r_deref_odp_mkey(&imr->mmkey);
+ return;
+ }
+
if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
- xa_erase(&mr_to_mdev(mr)->odp_mkeys,
- mlx5_base_mkey(mr->mmkey.key));
+ __xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key));
+ xa_unlock(&imr->implicit_children);
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
@@ -268,6 +283,8 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
if (!umem_odp->npages)
goto out;
mr = umem_odp->private;
+ if (!mr)
+ goto out;
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
@@ -313,7 +330,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
- mlx5_update_odp_stats(mr, invalidations, invalidations);
+ mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations);
/*
* We are now sure that the device will not access the
@@ -500,18 +517,18 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
refcount_inc(&ret->mmkey.usecount);
goto out_lock;
}
- xa_unlock(&imr->implicit_children);
if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
- ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_KERNEL);
+ ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_KERNEL);
if (xa_is_err(ret)) {
ret = ERR_PTR(xa_err(ret));
- xa_erase(&imr->implicit_children, idx);
- goto out_mr;
+ __xa_erase(&imr->implicit_children, idx);
+ goto out_lock;
}
mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD;
}
+ xa_unlock(&imr->implicit_children);
mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
return mr;
@@ -944,8 +961,7 @@ out:
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
- * Returns number of OS pages retrieved on success. The caller may continue to
- * the next data segment.
+ * Returns zero on success. The caller may continue to the next data segment.
* Can return the following error codes:
* -EAGAIN to designate a temporary error. The caller will abort handling the
* page fault and resolve it.
@@ -958,7 +974,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
u32 *bytes_committed,
u32 *bytes_mapped)
{
- int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
+ int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range;
struct pf_frame *head = NULL, *frame;
struct mlx5_ib_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -993,13 +1009,20 @@ next_mr:
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) -
+ (io_virt & PAGE_MASK)) >>
+ PAGE_SHIFT;
ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
if (ret < 0)
goto end;
- mlx5_update_odp_stats(mr, faults, ret);
+ mlx5_update_odp_stats_with_handled(mr, faults, ret);
+
+ if (ret < pages_in_range) {
+ ret = -EFAULT;
+ goto end;
+ }
- npages += ret;
ret = 0;
break;
@@ -1090,7 +1113,7 @@ end:
kfree(out);
*bytes_committed = 0;
- return ret ? ret : npages;
+ return ret;
}
/*
@@ -1109,8 +1132,7 @@ end:
* the committed bytes).
* @receive_queue: receive WQE end of sg list
*
- * Returns the number of pages loaded if positive, zero for an empty WQE, or a
- * negative error code.
+ * Returns zero for success or a negative error code.
*/
static int pagefault_data_segments(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault,
@@ -1118,7 +1140,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
void *wqe_end, u32 *bytes_mapped,
u32 *total_wqe_bytes, bool receive_queue)
{
- int ret = 0, npages = 0;
+ int ret = 0;
u64 io_virt;
__be32 key;
u32 byte_count;
@@ -1175,10 +1197,9 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
bytes_mapped);
if (ret < 0)
break;
- npages += ret;
}
- return ret < 0 ? ret : npages;
+ return ret;
}
/*
@@ -1414,12 +1435,6 @@ resolve_page_fault:
free_page((unsigned long)wqe_start);
}
-static int pages_in_range(u64 address, u32 length)
-{
- return (ALIGN(address + length, PAGE_SIZE) -
- (address & PAGE_MASK)) >> PAGE_SHIFT;
-}
-
static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{
@@ -1458,7 +1473,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
- } else if (ret < 0 || pages_in_range(address, length) > ret) {
+ } else if (ret < 0) {
mlx5_ib_page_fault_resume(dev, pfault, 1);
if (ret != -ENOENT)
mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n",
@@ -1529,7 +1544,7 @@ static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
goto err;
}
- mlx5_update_odp_stats(mr, faults, ret);
+ mlx5_update_odp_stats_with_handled(mr, faults, ret);
mlx5r_deref_odp_mkey(mmkey);
if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a43eba9d3572..88724d15705d 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3447,11 +3447,11 @@ static int ib_to_mlx5_rate_map(u8 rate)
return 0;
}
-static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate)
{
u32 stat_rate_support;
- if (rate == IB_RATE_PORT_CURRENT)
+ if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS)
return 0;
if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
@@ -3596,7 +3596,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
sizeof(grh->dgid.raw));
}
- err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
+ err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
MLX5_SET(ads, path, stat_rate, err);
@@ -4579,6 +4579,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
+
+ qp->port = attr->port_num;
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
@@ -5074,7 +5076,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
}
if (qp_attr_mask & IB_QP_PORT)
- qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ qp_attr->port_num = mqp->port;
if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
if (qp_attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
index b6ee7c3ee1ca..2530e7730635 100644
--- a/drivers/infiniband/hw/mlx5/qp.h
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -56,4 +56,5 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
int mlx5_ib_qp_event_init(void);
void mlx5_ib_qp_event_cleanup(void);
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate);
#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
index affcf8fe943c..67841922c7b8 100644
--- a/drivers/infiniband/hw/mlx5/restrack.c
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -96,9 +96,18 @@ static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
atomic64_read(&mr->odp_stats.faults)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_faults_handled",
+ atomic64_read(&mr->odp_stats.faults_handled)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
msg, "page_invalidations",
atomic64_read(&mr->odp_stats.invalidations)))
goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations_handled",
+ atomic64_read(&mr->odp_stats.invalidations_handled)))
+ goto err_table;
+
if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
atomic64_read(&mr->odp_stats.prefetch)))
goto err_table;
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 887fd6fa3ba9..793f3c5c4d01 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -231,30 +231,6 @@ void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
ib_dealloc_pd(dev->umrc.pd);
}
-static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
-{
- struct umr_common *umrc = &dev->umrc;
- struct ib_qp_attr attr;
- int err;
-
- attr.qp_state = IB_QPS_RESET;
- err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
- if (err) {
- mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
- goto err;
- }
-
- err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
- if (err)
- goto err;
-
- umrc->state = MLX5_UMR_STATE_ACTIVE;
- return 0;
-
-err:
- umrc->state = MLX5_UMR_STATE_ERR;
- return err;
-}
static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
struct mlx5r_umr_wqe *wqe, bool with_data)
@@ -302,6 +278,61 @@ out:
return err;
}
+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev, u32 mkey,
+ struct mlx5r_umr_context *umr_context,
+ struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_qp_attr attr;
+ int err;
+
+ mutex_lock(&umrc->lock);
+ /* Preventing any further WRs to be sent now */
+ if (umrc->state != MLX5_UMR_STATE_RECOVER) {
+ mlx5_ib_warn(dev, "UMR recovery encountered an unexpected state=%d\n",
+ umrc->state);
+ umrc->state = MLX5_UMR_STATE_RECOVER;
+ }
+ mutex_unlock(&umrc->lock);
+
+ /* Sending a final/barrier WR (the failed one) and wait for its completion.
+ * This will ensure that all the previous WRs got a completion before
+ * we set the QP state to RESET.
+ */
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe,
+ with_data);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR recovery post send failed, err %d\n", err);
+ goto err;
+ }
+
+ /* Since the QP is in an error state, it will only receive
+ * IB_WC_WR_FLUSH_ERR. However, as it serves only as a barrier
+ * we don't care about its status.
+ */
+ wait_for_completion(&umr_context->done);
+
+ attr.qp_state = IB_QPS_RESET;
+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RESET, err=%d\n", err);
+ goto err;
+ }
+
+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RTS, err=%d\n", err);
+ goto err;
+ }
+
+ umrc->state = MLX5_UMR_STATE_ACTIVE;
+ return 0;
+
+err:
+ umrc->state = MLX5_UMR_STATE_ERR;
+ return err;
+}
+
static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct mlx5_ib_umr_context *context =
@@ -366,9 +397,7 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
mlx5_ib_warn(dev,
"reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n",
umr_context.status, mkey);
- mutex_lock(&umrc->lock);
- err = mlx5r_umr_recover(dev);
- mutex_unlock(&umrc->lock);
+ err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data);
if (err)
mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
err);
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index ba2cd68b53e6..805e37dc7621 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -214,8 +214,8 @@ static const struct attribute_group port_linkcontrol_group = {
* Congestion control table size followed by table entries
*/
static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t pos, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
@@ -241,7 +241,7 @@ static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
@@ -249,8 +249,8 @@ static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t pos, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
@@ -274,9 +274,9 @@ static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-static struct bin_attribute *port_ccmgta_attributes[] = {
+static const struct bin_attribute *const port_ccmgta_attributes[] = {
&bin_attr_cc_setting_bin,
&bin_attr_cc_table_bin,
NULL,
@@ -295,7 +295,7 @@ static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj,
static const struct attribute_group port_ccmgta_attribute_group = {
.name = "CCMgtA",
.is_bin_visible = qib_ccmgta_is_bin_visible,
- .bin_attrs = port_ccmgta_attributes,
+ .bin_attrs_new = port_ccmgta_attributes,
};
/* Start sl2vl */
diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h
index 7fe9502ce8d3..86a82a4da0aa 100644
--- a/drivers/infiniband/hw/usnic/usnic_abi.h
+++ b/drivers/infiniband/hw/usnic/usnic_abi.h
@@ -72,7 +72,7 @@ struct usnic_ib_create_qp_resp {
u64 bar_bus_addr;
u32 bar_len;
/*
- * WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface
+ * WQ, RQ, CQ are explicitly specified bc exposing a generic resources inteface
* expands the scope of ABI to many files.
*/
u32 wq_cnt;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 13b654ddd3cc..4ddcd5860e0f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -151,34 +151,6 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
ib_event.element.port_num = 1;
ib_dispatch_event(&ib_event);
break;
- case NETDEV_UP:
- case NETDEV_DOWN:
- case NETDEV_CHANGE:
- if (!us_ibdev->ufdev->link_up &&
- netif_carrier_ok(netdev)) {
- usnic_fwd_carrier_up(us_ibdev->ufdev);
- usnic_info("Link UP on %s\n",
- dev_name(&us_ibdev->ib_dev.dev));
- ib_event.event = IB_EVENT_PORT_ACTIVE;
- ib_event.device = &us_ibdev->ib_dev;
- ib_event.element.port_num = 1;
- ib_dispatch_event(&ib_event);
- } else if (us_ibdev->ufdev->link_up &&
- !netif_carrier_ok(netdev)) {
- usnic_fwd_carrier_down(us_ibdev->ufdev);
- usnic_info("Link DOWN on %s\n",
- dev_name(&us_ibdev->ib_dev.dev));
- usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
- ib_event.event = IB_EVENT_PORT_ERR;
- ib_event.device = &us_ibdev->ib_dev;
- ib_event.element.port_num = 1;
- ib_dispatch_event(&ib_event);
- } else {
- usnic_dbg("Ignoring %s on %s\n",
- netdev_cmd_to_name(event),
- dev_name(&us_ibdev->ib_dev.dev));
- }
- break;
case NETDEV_CHANGEADDR:
if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
sizeof(us_ibdev->ufdev->mac))) {
@@ -218,6 +190,50 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
mutex_unlock(&us_ibdev->usdev_lock);
}
+static void usnic_ib_handle_port_event(struct ib_device *ibdev,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(ibdev, struct usnic_ib_dev, ib_dev);
+ struct ib_event ib_event;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ if (!us_ibdev->ufdev->link_up &&
+ netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_up(us_ibdev->ufdev);
+ usnic_info("Link UP on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
+ ib_event.event = IB_EVENT_PORT_ACTIVE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else if (us_ibdev->ufdev->link_up &&
+ !netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_down(us_ibdev->ufdev);
+ usnic_info("Link DOWN on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_PORT_ERR;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else {
+ usnic_dbg("Ignoring %s on %s\n",
+ netdev_cmd_to_name(event),
+ dev_name(&us_ibdev->ib_dev.dev));
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+}
+
static int usnic_ib_netdevice_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
@@ -358,6 +374,7 @@ static const struct ib_device_ops usnic_dev_ops = {
.query_port = usnic_ib_query_port,
.query_qp = usnic_ib_query_qp,
.reg_user_mr = usnic_ib_reg_mr,
+ .report_port_event = usnic_ib_handle_port_event,
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp),
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 768aad364c89..1664d1d7d969 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -143,6 +143,46 @@ static int pvrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
}
+static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
+ enum ib_event_type event)
+{
+ struct ib_event ib_event;
+
+ memset(&ib_event, 0, sizeof(ib_event));
+ ib_event.device = &dev->ib_dev;
+ ib_event.element.port_num = port;
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+}
+
+static void pvrdma_report_event_handle(struct ib_device *ibdev,
+ struct net_device *ndev,
+ unsigned long event)
+{
+ struct pvrdma_dev *dev = container_of(ibdev, struct pvrdma_dev, ib_dev);
+
+ switch (event) {
+ case NETDEV_DOWN:
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_UP:
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL,
+ PVRDMA_DEVICE_CTL_UNQUIESCE);
+
+ mb();
+
+ if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
+ dev_err(&dev->pdev->dev,
+ "failed to activate device during link up\n");
+ else
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+
+ default:
+ break;
+ }
+}
+
static const struct ib_device_ops pvrdma_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_VMW_PVRDMA,
@@ -181,6 +221,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.query_qp = pvrdma_query_qp,
.reg_user_mr = pvrdma_reg_user_mr,
.req_notify_cq = pvrdma_req_notify_cq,
+ .report_port_event = pvrdma_report_event_handle,
INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
@@ -362,18 +403,6 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
}
}
-static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
- enum ib_event_type event)
-{
- struct ib_event ib_event;
-
- memset(&ib_event, 0, sizeof(ib_event));
- ib_event.device = &dev->ib_dev;
- ib_event.element.port_num = port;
- ib_event.event = event;
- ib_dispatch_event(&ib_event);
-}
-
static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
{
if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
@@ -666,21 +695,8 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
switch (event) {
case NETDEV_REBOOT:
- case NETDEV_DOWN:
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
break;
- case NETDEV_UP:
- pvrdma_write_reg(dev, PVRDMA_REG_CTL,
- PVRDMA_DEVICE_CTL_UNQUIESCE);
-
- mb();
-
- if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
- dev_err(&dev->pdev->dev,
- "failed to activate device during link up\n");
- else
- pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
- break;
case NETDEV_UNREGISTER:
ib_device_set_netdev(&dev->ib_dev, NULL, 1);
dev_put(dev->netdev);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e6203e26cc06..614009fb9632 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1107,9 +1107,8 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
}
/* initialize timers needed for rc qp */
timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
- hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- qp->s_rnr_timer.function = rvt_rc_rnr_retry;
+ hrtimer_setup(&qp->s_rnr_timer, rvt_rc_rnr_retry, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Driver needs to set up it's private QP structure and do any
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 1ba4a0c8726a..e27478fe9456 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -38,10 +38,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
}
/* initialize rxe device parameters */
-static void rxe_init_device_param(struct rxe_dev *rxe)
+static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
{
- struct net_device *ndev;
-
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
rxe->attr.vendor_id = RXE_VENDOR_ID;
@@ -74,15 +72,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
- ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
- if (!ndev)
- return;
-
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
ndev->dev_addr);
- dev_put(ndev);
-
rxe->max_ucontext = RXE_MAX_UCONTEXT;
}
@@ -115,18 +107,13 @@ static void rxe_init_port_param(struct rxe_port *port)
/* initialize port state, note IB convention that HCA ports are always
* numbered from 1
*/
-static void rxe_init_ports(struct rxe_dev *rxe)
+static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev)
{
struct rxe_port *port = &rxe->port;
- struct net_device *ndev;
rxe_init_port_param(port);
- ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
- if (!ndev)
- return;
addrconf_addr_eui48((unsigned char *)&port->port_guid,
ndev->dev_addr);
- dev_put(ndev);
spin_lock_init(&port->port_lock);
}
@@ -144,12 +131,12 @@ static void rxe_init_pools(struct rxe_dev *rxe)
}
/* initialize rxe device state */
-static void rxe_init(struct rxe_dev *rxe)
+static void rxe_init(struct rxe_dev *rxe, struct net_device *ndev)
{
/* init default device parameters */
- rxe_init_device_param(rxe);
+ rxe_init_device_param(rxe, ndev);
- rxe_init_ports(rxe);
+ rxe_init_ports(rxe, ndev);
rxe_init_pools(rxe);
/* init pending mmap list */
@@ -184,7 +171,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
struct net_device *ndev)
{
- rxe_init(rxe);
+ rxe_init(rxe, ndev);
rxe_set_mtu(rxe, mtu);
return rxe_register_device(rxe, ibdev_name, ndev);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 8cc64ceeb356..132a87e52d5c 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -571,11 +571,6 @@ static void rxe_port_event(struct rxe_dev *rxe,
/* Caller must hold net_info_lock */
void rxe_port_up(struct rxe_dev *rxe)
{
- struct rxe_port *port;
-
- port = &rxe->port;
- port->attr.state = IB_PORT_ACTIVE;
-
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
dev_info(&rxe->ib_dev.dev, "set active\n");
}
@@ -583,11 +578,6 @@ void rxe_port_up(struct rxe_dev *rxe)
/* Caller must hold net_info_lock */
void rxe_port_down(struct rxe_dev *rxe)
{
- struct rxe_port *port;
-
- port = &rxe->port;
- port->attr.state = IB_PORT_DOWN;
-
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
dev_info(&rxe->ib_dev.dev, "set down\n");
@@ -601,7 +591,7 @@ void rxe_set_port_state(struct rxe_dev *rxe)
if (!ndev)
return;
- if (netif_running(ndev) && netif_carrier_ok(ndev))
+ if (ib_get_curr_port_state(ndev) == IB_PORT_ACTIVE)
rxe_port_up(rxe);
else
rxe_port_down(rxe);
@@ -623,18 +613,14 @@ static int rxe_notify(struct notifier_block *not_blk,
case NETDEV_UNREGISTER:
ib_unregister_device_queued(&rxe->ib_dev);
break;
- case NETDEV_UP:
- rxe_port_up(rxe);
- break;
- case NETDEV_DOWN:
- rxe_port_down(rxe);
- break;
case NETDEV_CHANGEMTU:
rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
rxe_set_mtu(rxe, ndev->mtu);
break;
+ case NETDEV_DOWN:
case NETDEV_CHANGE:
- rxe_set_port_state(rxe);
+ if (ib_get_curr_port_state(ndev) == IB_PORT_DOWN)
+ rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
break;
case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index d2f57ead78ad..003f681e5dc0 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -129,7 +129,7 @@ enum rxe_device_param {
enum rxe_port_param {
RXE_PORT_GID_TBL_LEN = 1024,
RXE_PORT_PORT_CAP_FLAGS = IB_PORT_CM_SUP,
- RXE_PORT_MAX_MSG_SZ = 0x800000,
+ RXE_PORT_MAX_MSG_SZ = (1UL << 31),
RXE_PORT_BAD_PKEY_CNTR = 0,
RXE_PORT_QKEY_VIOL_CNTR = 0,
RXE_PORT_LID = 0,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 67567d62195e..d9cb682fd71f 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -178,7 +178,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
{
struct rxe_pool *pool = elem->pool;
struct xarray *xa = &pool->xa;
- static int timeout = RXE_POOL_TIMEOUT;
int ret, err = 0;
void *xa_ret;
@@ -202,19 +201,19 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
* return to rdma-core
*/
if (sleepable) {
- if (!completion_done(&elem->complete) && timeout) {
+ if (!completion_done(&elem->complete)) {
ret = wait_for_completion_timeout(&elem->complete,
- timeout);
+ msecs_to_jiffies(50000));
/* Shouldn't happen. There are still references to
* the object but, rather than deadlock, free the
* object or pass back to rdma-core.
*/
if (WARN_ON(!ret))
- err = -EINVAL;
+ err = -ETIMEDOUT;
}
} else {
- unsigned long until = jiffies + timeout;
+ unsigned long until = jiffies + RXE_POOL_TIMEOUT;
/* AH objects are unique in that the destroy_ah verb
* can be called in atomic context. This delay
@@ -226,7 +225,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
mdelay(1);
if (WARN_ON(!completion_done(&elem->complete)))
- err = -EINVAL;
+ err = -ETIMEDOUT;
}
if (pool->cleanup)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 8a5fc20fd186..6152a0fdfc8c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -62,6 +62,7 @@ static int rxe_query_port(struct ib_device *ibdev,
ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
&attr->active_width);
+ attr->state = ib_get_curr_port_state(ndev);
if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else if (dev_get_flags(ndev) & IFF_UP)
@@ -696,7 +697,7 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
for (i = 0; i < ibwr->num_sge; i++)
length += ibwr->sg_list[i].length;
- if (length > (1UL << 31)) {
+ if (length > RXE_PORT_MAX_MSG_SZ) {
rxe_err_qp(qp, "message length too long\n");
break;
}
@@ -980,8 +981,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
- /* IBA max message size is 2^31 */
- if (length >= (1UL<<31)) {
+ if (length > RXE_PORT_MAX_MSG_SZ) {
err = -EINVAL;
rxe_dbg("message length too long\n");
goto err_out;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 14d3103aee6f..b17752bd1ecc 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -379,14 +379,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
sdev = to_siw_dev(base_dev);
switch (event) {
- case NETDEV_UP:
- siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
- break;
-
- case NETDEV_DOWN:
- siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
- break;
-
case NETDEV_REGISTER:
/*
* Device registration now handled only by
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 7ca0297d68a4..5ac8bd450d24 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -189,10 +189,9 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
attr->max_msg_sz = -1;
attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
- attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
+ attr->state = ib_get_curr_port_state(ndev);
+ attr->phys_state = attr->state == IB_PORT_ACTIVE ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
- attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
/*
* All zero
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 4e17d546d4cc..bf38ac6f87c4 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -584,6 +584,9 @@ static void dev_free(struct kref *ref)
list_del(&dev->entry);
mutex_unlock(&pool->mutex);
+ if (pool->ops && pool->ops->deinit)
+ pool->ops->deinit(dev);
+
ib_dealloc_pd(dev->ib_pd);
kfree(dev);
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 2916e77f589b..1378651735f6 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2844,7 +2844,8 @@ static int srp_target_alloc(struct scsi_target *starget)
return 0;
}
-static int srp_slave_configure(struct scsi_device *sdev)
+static int srp_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
struct srp_target_port *target = host_to_target(shost);
@@ -3067,7 +3068,7 @@ static const struct scsi_host_template srp_template = {
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
.target_alloc = srp_target_alloc,
- .slave_configure = srp_slave_configure,
+ .sdev_configure = srp_sdev_configure,
.info = srp_target_info,
.init_cmd_priv = srp_init_cmd_priv,
.exit_cmd_priv = srp_exit_cmd_priv,
@@ -3978,7 +3979,6 @@ static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
return host;
put_host:
- device_del(&host->dev);
put_device(&host->dev);
return NULL;
}
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 3bdbd34314b3..88ecdf5218ee 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -152,20 +152,6 @@ config INPUT_EVDEV
To compile this driver as a module, choose M here: the
module will be called evdev.
-config INPUT_EVBUG
- tristate "Event debugging"
- help
- Say Y here if you have a problem with the input subsystem and
- want all events (keypresses, mouse movements), to be output to
- the system log. While this is useful for debugging, it's also
- a security threat - your keypresses include your passwords, of
- course.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called evbug.
-
config INPUT_KUNIT_TEST
tristate "KUnit tests for Input" if !KUNIT_ALL_TESTS
depends on INPUT && KUNIT
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index c78753274921..930b64d2115e 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_INPUT_LEDS) += input-leds.o
obj-$(CONFIG_INPUT_MOUSEDEV) += mousedev.o
obj-$(CONFIG_INPUT_JOYDEV) += joydev.o
obj-$(CONFIG_INPUT_EVDEV) += evdev.o
-obj-$(CONFIG_INPUT_EVBUG) += evbug.o
obj-$(CONFIG_INPUT_KEYBOARD) += keyboard/
obj-$(CONFIG_INPUT_MOUSE) += mouse/
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
deleted file mode 100644
index e47bdf92088a..000000000000
--- a/drivers/input/evbug.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 1999-2001 Vojtech Pavlik
- */
-
-/*
- * Input driver event debug module - dumps all events into syslog
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/init.h>
-#include <linux/device.h>
-
-MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
-MODULE_DESCRIPTION("Input driver event debug module");
-MODULE_LICENSE("GPL");
-
-static void evbug_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
-{
- printk(KERN_DEBUG pr_fmt("Event. Dev: %s, Type: %d, Code: %d, Value: %d\n"),
- dev_name(&handle->dev->dev), type, code, value);
-}
-
-static int evbug_connect(struct input_handler *handler, struct input_dev *dev,
- const struct input_device_id *id)
-{
- struct input_handle *handle;
- int error;
-
- handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->dev = dev;
- handle->handler = handler;
- handle->name = "evbug";
-
- error = input_register_handle(handle);
- if (error)
- goto err_free_handle;
-
- error = input_open_device(handle);
- if (error)
- goto err_unregister_handle;
-
- printk(KERN_DEBUG pr_fmt("Connected device: %s (%s at %s)\n"),
- dev_name(&dev->dev),
- dev->name ?: "unknown",
- dev->phys ?: "unknown");
-
- return 0;
-
- err_unregister_handle:
- input_unregister_handle(handle);
- err_free_handle:
- kfree(handle);
- return error;
-}
-
-static void evbug_disconnect(struct input_handle *handle)
-{
- printk(KERN_DEBUG pr_fmt("Disconnected device: %s\n"),
- dev_name(&handle->dev->dev));
-
- input_close_device(handle);
- input_unregister_handle(handle);
- kfree(handle);
-}
-
-static const struct input_device_id evbug_ids[] = {
- { .driver_info = 1 }, /* Matches all devices */
- { }, /* Terminating zero entry */
-};
-
-MODULE_DEVICE_TABLE(input, evbug_ids);
-
-static struct input_handler evbug_handler = {
- .event = evbug_event,
- .connect = evbug_connect,
- .disconnect = evbug_disconnect,
- .name = "evbug",
- .id_table = evbug_ids,
-};
-
-static int __init evbug_init(void)
-{
- return input_register_handler(&evbug_handler);
-}
-
-static void __exit evbug_exit(void)
-{
- input_unregister_handler(&evbug_handler);
-}
-
-module_init(evbug_init);
-module_exit(evbug_exit);
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 609a5f01761b..b527308cb52e 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -93,7 +93,7 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
{
struct ff_device *ff = dev->ff;
struct ff_effect *old;
- int ret = 0;
+ int error;
int id;
if (!test_bit(EV_FF, dev->evbit))
@@ -114,22 +114,20 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
}
if (!test_bit(effect->type, ff->ffbit)) {
- ret = compat_effect(ff, effect);
- if (ret)
- return ret;
+ error = compat_effect(ff, effect);
+ if (error)
+ return error;
}
- mutex_lock(&ff->mutex);
+ guard(mutex)(&ff->mutex);
if (effect->id == -1) {
for (id = 0; id < ff->max_effects; id++)
if (!ff->effect_owners[id])
break;
- if (id >= ff->max_effects) {
- ret = -ENOSPC;
- goto out;
- }
+ if (id >= ff->max_effects)
+ return -ENOSPC;
effect->id = id;
old = NULL;
@@ -137,30 +135,26 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
} else {
id = effect->id;
- ret = check_effect_access(ff, id, file);
- if (ret)
- goto out;
+ error = check_effect_access(ff, id, file);
+ if (error)
+ return error;
old = &ff->effects[id];
- if (!check_effects_compatible(effect, old)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!check_effects_compatible(effect, old))
+ return -EINVAL;
}
- ret = ff->upload(dev, effect, old);
- if (ret)
- goto out;
+ error = ff->upload(dev, effect, old);
+ if (error)
+ return error;
- spin_lock_irq(&dev->event_lock);
- ff->effects[id] = *effect;
- ff->effect_owners[id] = file;
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ ff->effects[id] = *effect;
+ ff->effect_owners[id] = file;
+ }
- out:
- mutex_unlock(&ff->mutex);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(input_ff_upload);
@@ -178,17 +172,16 @@ static int erase_effect(struct input_dev *dev, int effect_id,
if (error)
return error;
- spin_lock_irq(&dev->event_lock);
- ff->playback(dev, effect_id, 0);
- ff->effect_owners[effect_id] = NULL;
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ ff->playback(dev, effect_id, 0);
+ ff->effect_owners[effect_id] = NULL;
+ }
if (ff->erase) {
error = ff->erase(dev, effect_id);
if (error) {
- spin_lock_irq(&dev->event_lock);
- ff->effect_owners[effect_id] = file;
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock)
+ ff->effect_owners[effect_id] = file;
return error;
}
@@ -210,16 +203,12 @@ static int erase_effect(struct input_dev *dev, int effect_id,
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
{
struct ff_device *ff = dev->ff;
- int ret;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
- mutex_lock(&ff->mutex);
- ret = erase_effect(dev, effect_id, file);
- mutex_unlock(&ff->mutex);
-
- return ret;
+ guard(mutex)(&ff->mutex);
+ return erase_effect(dev, effect_id, file);
}
EXPORT_SYMBOL_GPL(input_ff_erase);
@@ -239,13 +228,11 @@ int input_ff_flush(struct input_dev *dev, struct file *file)
dev_dbg(&dev->dev, "flushing now\n");
- mutex_lock(&ff->mutex);
+ guard(mutex)(&ff->mutex);
for (i = 0; i < ff->max_effects; i++)
erase_effect(dev, i, file);
- mutex_unlock(&ff->mutex);
-
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_flush);
@@ -303,8 +290,6 @@ EXPORT_SYMBOL_GPL(input_ff_event);
*/
int input_ff_create(struct input_dev *dev, unsigned int max_effects)
{
- struct ff_device *ff;
- size_t ff_dev_size;
int i;
if (!max_effects) {
@@ -317,25 +302,19 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
return -EINVAL;
}
- ff_dev_size = struct_size(ff, effect_owners, max_effects);
- if (ff_dev_size == SIZE_MAX) /* overflow */
- return -EINVAL;
-
- ff = kzalloc(ff_dev_size, GFP_KERNEL);
+ struct ff_device *ff __free(kfree) =
+ kzalloc(struct_size(ff, effect_owners, max_effects),
+ GFP_KERNEL);
if (!ff)
return -ENOMEM;
- ff->effects = kcalloc(max_effects, sizeof(struct ff_effect),
- GFP_KERNEL);
- if (!ff->effects) {
- kfree(ff);
+ ff->effects = kcalloc(max_effects, sizeof(*ff->effects), GFP_KERNEL);
+ if (!ff->effects)
return -ENOMEM;
- }
ff->max_effects = max_effects;
mutex_init(&ff->mutex);
- dev->ff = ff;
dev->flush = input_ff_flush;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
@@ -348,6 +327,8 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
if (test_bit(FF_PERIODIC, ff->ffbit))
__set_bit(FF_RUMBLE, dev->ffbit);
+ dev->ff = no_free_ptr(ff);
+
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create);
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index c321cdabd214..e9120ba6bae0 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -401,13 +401,11 @@ static void ml_effect_timer(struct timer_list *t)
{
struct ml_device *ml = from_timer(ml, t, timer);
struct input_dev *dev = ml->dev;
- unsigned long flags;
pr_debug("timer: updating effects\n");
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
ml_play_effects(ml);
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
/*
@@ -465,7 +463,7 @@ static int ml_ff_upload(struct input_dev *dev,
struct ml_device *ml = dev->ff->private;
struct ml_effect_state *state = &ml->states[effect->id];
- spin_lock_irq(&dev->event_lock);
+ guard(spinlock_irq)(&dev->event_lock);
if (test_bit(FF_EFFECT_STARTED, &state->flags)) {
__clear_bit(FF_EFFECT_PLAYING, &state->flags);
@@ -477,8 +475,6 @@ static int ml_ff_upload(struct input_dev *dev,
ml_schedule_timer(ml);
}
- spin_unlock_irq(&dev->event_lock);
-
return 0;
}
@@ -507,12 +503,11 @@ static void ml_ff_destroy(struct ff_device *ff)
int input_ff_create_memless(struct input_dev *dev, void *data,
int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
{
- struct ml_device *ml;
struct ff_device *ff;
int error;
int i;
- ml = kzalloc(sizeof(struct ml_device), GFP_KERNEL);
+ struct ml_device *ml __free(kfree) = kzalloc(sizeof(*ml), GFP_KERNEL);
if (!ml)
return -ENOMEM;
@@ -525,13 +520,10 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
set_bit(FF_GAIN, dev->ffbit);
error = input_ff_create(dev, FF_MEMLESS_EFFECTS);
- if (error) {
- kfree(ml);
+ if (error)
return error;
- }
ff = dev->ff;
- ff->private = ml;
ff->upload = ml_ff_upload;
ff->playback = ml_ff_playback;
ff->set_gain = ml_ff_set_gain;
@@ -548,6 +540,8 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
for (i = 0; i < FF_MEMLESS_EFFECTS; i++)
ml->states[i].effect = &ff->effects[i];
+ ff->private = no_free_ptr(ml);
+
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create_memless);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 6b04a674f832..337006dd9dcf 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -39,20 +39,20 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
unsigned int flags)
{
- struct input_mt *mt = dev->mt;
- int i;
-
if (!num_slots)
return 0;
- if (mt)
- return mt->num_slots != num_slots ? -EINVAL : 0;
+
+ if (dev->mt)
+ return dev->mt->num_slots != num_slots ? -EINVAL : 0;
+
/* Arbitrary limit for avoiding too large memory allocation. */
if (num_slots > 1024)
return -EINVAL;
- mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
+ struct input_mt *mt __free(kfree) =
+ kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
if (!mt)
- goto err_mem;
+ return -ENOMEM;
mt->num_slots = num_slots;
mt->flags = flags;
@@ -86,21 +86,18 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
unsigned int n2 = num_slots * num_slots;
mt->red = kcalloc(n2, sizeof(*mt->red), GFP_KERNEL);
if (!mt->red)
- goto err_mem;
+ return -ENOMEM;
}
/* Mark slots as 'inactive' */
- for (i = 0; i < num_slots; i++)
+ for (unsigned int i = 0; i < num_slots; i++)
input_mt_set_value(&mt->slots[i], ABS_MT_TRACKING_ID, -1);
/* Mark slots as 'unused' */
mt->frame = 1;
- dev->mt = mt;
+ dev->mt = no_free_ptr(mt);
return 0;
-err_mem:
- kfree(mt);
- return -ENOMEM;
}
EXPORT_SYMBOL(input_mt_init_slots);
@@ -285,14 +282,10 @@ void input_mt_drop_unused(struct input_dev *dev)
struct input_mt *mt = dev->mt;
if (mt) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
__input_mt_drop_unused(dev, mt);
mt->frame++;
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_mt_drop_unused);
@@ -339,11 +332,8 @@ void input_mt_sync_frame(struct input_dev *dev)
return;
if (mt->flags & INPUT_MT_DROP_UNUSED) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
__input_mt_drop_unused(dev, mt);
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
diff --git a/drivers/input/input-poller.c b/drivers/input/input-poller.c
index 688e3cb1c2a0..9c57713a6151 100644
--- a/drivers/input/input-poller.c
+++ b/drivers/input/input-poller.c
@@ -162,7 +162,7 @@ static ssize_t input_dev_set_poll_interval(struct device *dev,
if (interval > poller->poll_interval_max)
return -EINVAL;
- mutex_lock(&input->mutex);
+ guard(mutex)(&input->mutex);
poller->poll_interval = interval;
@@ -172,8 +172,6 @@ static ssize_t input_dev_set_poll_interval(struct device *dev,
input_dev_poller_queue_work(poller);
}
- mutex_unlock(&input->mutex);
-
return count;
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7f0477e04ad2..c9e3ac64bcd0 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -115,23 +115,23 @@ static void input_pass_values(struct input_dev *dev,
lockdep_assert_held(&dev->event_lock);
- rcu_read_lock();
+ scoped_guard(rcu) {
+ handle = rcu_dereference(dev->grab);
+ if (handle) {
+ count = handle->handle_events(handle, vals, count);
+ break;
+ }
- handle = rcu_dereference(dev->grab);
- if (handle) {
- count = handle->handle_events(handle, vals, count);
- } else {
- list_for_each_entry_rcu(handle, &dev->h_list, d_node)
+ list_for_each_entry_rcu(handle, &dev->h_list, d_node) {
if (handle->open) {
count = handle->handle_events(handle, vals,
count);
if (!count)
break;
}
+ }
}
- rcu_read_unlock();
-
/* trigger auto repeat for key events */
if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
for (v = vals; v != vals + count; v++) {
@@ -390,13 +390,9 @@ void input_handle_event(struct input_dev *dev,
void input_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
- unsigned long flags;
-
if (is_event_supported(type, dev->evbit, EV_MAX)) {
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
input_handle_event(dev, type, code, value);
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_event);
@@ -417,18 +413,15 @@ void input_inject_event(struct input_handle *handle,
{
struct input_dev *dev = handle->dev;
struct input_handle *grab;
- unsigned long flags;
if (is_event_supported(type, dev->evbit, EV_MAX)) {
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
+ guard(rcu)();
- rcu_read_lock();
grab = rcu_dereference(dev->grab);
if (!grab || grab == handle)
input_handle_event(dev, type, code, value);
- rcu_read_unlock();
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_inject_event);
@@ -526,22 +519,15 @@ EXPORT_SYMBOL(input_copy_abs);
int input_grab_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- int retval;
- retval = mutex_lock_interruptible(&dev->mutex);
- if (retval)
- return retval;
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ if (dev->grab)
+ return -EBUSY;
- if (dev->grab) {
- retval = -EBUSY;
- goto out;
+ rcu_assign_pointer(dev->grab, handle);
}
- rcu_assign_pointer(dev->grab, handle);
-
- out:
- mutex_unlock(&dev->mutex);
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_grab_device);
@@ -576,9 +562,8 @@ void input_release_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
__input_release_device(handle);
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_release_device);
@@ -592,67 +577,57 @@ EXPORT_SYMBOL(input_release_device);
int input_open_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- int retval;
-
- retval = mutex_lock_interruptible(&dev->mutex);
- if (retval)
- return retval;
-
- if (dev->going_away) {
- retval = -ENODEV;
- goto out;
- }
+ int error;
- handle->open++;
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ if (dev->going_away)
+ return -ENODEV;
- if (handle->handler->passive_observer)
- goto out;
+ handle->open++;
- if (dev->users++ || dev->inhibited) {
- /*
- * Device is already opened and/or inhibited,
- * so we can exit immediately and report success.
- */
- goto out;
- }
+ if (handle->handler->passive_observer)
+ return 0;
- if (dev->open) {
- retval = dev->open(dev);
- if (retval) {
- dev->users--;
- handle->open--;
+ if (dev->users++ || dev->inhibited) {
/*
- * Make sure we are not delivering any more events
- * through this handle
+ * Device is already opened and/or inhibited,
+ * so we can exit immediately and report success.
*/
- synchronize_rcu();
- goto out;
+ return 0;
}
- }
- if (dev->poller)
- input_dev_poller_start(dev->poller);
+ if (dev->open) {
+ error = dev->open(dev);
+ if (error) {
+ dev->users--;
+ handle->open--;
+ /*
+ * Make sure we are not delivering any more
+ * events through this handle.
+ */
+ synchronize_rcu();
+ return error;
+ }
+ }
- out:
- mutex_unlock(&dev->mutex);
- return retval;
+ if (dev->poller)
+ input_dev_poller_start(dev->poller);
+ }
+
+ return 0;
}
EXPORT_SYMBOL(input_open_device);
int input_flush_device(struct input_handle *handle, struct file *file)
{
struct input_dev *dev = handle->dev;
- int retval;
- retval = mutex_lock_interruptible(&dev->mutex);
- if (retval)
- return retval;
-
- if (dev->flush)
- retval = dev->flush(dev, file);
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ if (dev->flush)
+ return dev->flush(dev, file);
+ }
- mutex_unlock(&dev->mutex);
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_flush_device);
@@ -667,7 +642,7 @@ void input_close_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
__input_release_device(handle);
@@ -688,8 +663,6 @@ void input_close_device(struct input_handle *handle)
*/
synchronize_rcu();
}
-
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_close_device);
@@ -726,11 +699,10 @@ static void input_disconnect_device(struct input_dev *dev)
* not to protect access to dev->going_away but rather to ensure
* that there are no threads in the middle of input_open_device()
*/
- mutex_lock(&dev->mutex);
- dev->going_away = true;
- mutex_unlock(&dev->mutex);
+ scoped_guard(mutex, &dev->mutex)
+ dev->going_away = true;
- spin_lock_irq(&dev->event_lock);
+ guard(spinlock_irq)(&dev->event_lock);
/*
* Simulate keyup events for all pressed keys so that handlers
@@ -743,8 +715,6 @@ static void input_disconnect_device(struct input_dev *dev)
list_for_each_entry(handle, &dev->h_list, d_node)
handle->open = 0;
-
- spin_unlock_irq(&dev->event_lock);
}
/**
@@ -901,14 +871,9 @@ static int input_default_setkeycode(struct input_dev *dev,
*/
int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
{
- unsigned long flags;
- int retval;
+ guard(spinlock_irqsave)(&dev->event_lock);
- spin_lock_irqsave(&dev->event_lock, flags);
- retval = dev->getkeycode(dev, ke);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return retval;
+ return dev->getkeycode(dev, ke);
}
EXPORT_SYMBOL(input_get_keycode);
@@ -923,18 +888,17 @@ EXPORT_SYMBOL(input_get_keycode);
int input_set_keycode(struct input_dev *dev,
const struct input_keymap_entry *ke)
{
- unsigned long flags;
unsigned int old_keycode;
- int retval;
+ int error;
if (ke->keycode > KEY_MAX)
return -EINVAL;
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
- retval = dev->setkeycode(dev, ke, &old_keycode);
- if (retval)
- goto out;
+ error = dev->setkeycode(dev, ke, &old_keycode);
+ if (error)
+ return error;
/* Make sure KEY_RESERVED did not get enabled. */
__clear_bit(KEY_RESERVED, dev->keybit);
@@ -962,10 +926,7 @@ int input_set_keycode(struct input_dev *dev,
EV_SYN, SYN_REPORT, 1);
}
- out:
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_set_keycode);
@@ -1799,26 +1760,21 @@ static void input_dev_toggle(struct input_dev *dev, bool activate)
*/
void input_reset_device(struct input_dev *dev)
{
- unsigned long flags;
-
- mutex_lock(&dev->mutex);
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(mutex)(&dev->mutex);
+ guard(spinlock_irqsave)(&dev->event_lock);
input_dev_toggle(dev, true);
if (input_dev_release_keys(dev))
input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_reset_device);
static int input_inhibit_device(struct input_dev *dev)
{
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
if (dev->inhibited)
- goto out;
+ return 0;
if (dev->users) {
if (dev->close)
@@ -1827,54 +1783,50 @@ static int input_inhibit_device(struct input_dev *dev)
input_dev_poller_stop(dev->poller);
}
- spin_lock_irq(&dev->event_lock);
- input_mt_release_slots(dev);
- input_dev_release_keys(dev);
- input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
- input_dev_toggle(dev, false);
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ input_mt_release_slots(dev);
+ input_dev_release_keys(dev);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
+ input_dev_toggle(dev, false);
+ }
dev->inhibited = true;
-out:
- mutex_unlock(&dev->mutex);
return 0;
}
static int input_uninhibit_device(struct input_dev *dev)
{
- int ret = 0;
+ int error;
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
if (!dev->inhibited)
- goto out;
+ return 0;
if (dev->users) {
if (dev->open) {
- ret = dev->open(dev);
- if (ret)
- goto out;
+ error = dev->open(dev);
+ if (error)
+ return error;
}
if (dev->poller)
input_dev_poller_start(dev->poller);
}
dev->inhibited = false;
- spin_lock_irq(&dev->event_lock);
- input_dev_toggle(dev, true);
- spin_unlock_irq(&dev->event_lock);
-out:
- mutex_unlock(&dev->mutex);
- return ret;
+ scoped_guard(spinlock_irq, &dev->event_lock)
+ input_dev_toggle(dev, true);
+
+ return 0;
}
static int input_dev_suspend(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/*
* Keys that are pressed now are unlikely to be
@@ -1886,8 +1838,6 @@ static int input_dev_suspend(struct device *dev)
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -1895,13 +1845,11 @@ static int input_dev_resume(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/* Restore state of LEDs and sounds, if any were active. */
input_dev_toggle(input_dev, true);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -1909,7 +1857,7 @@ static int input_dev_freeze(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/*
* Keys that are pressed now are unlikely to be
@@ -1918,8 +1866,6 @@ static int input_dev_freeze(struct device *dev)
if (input_dev_release_keys(input_dev))
input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -1927,13 +1873,11 @@ static int input_dev_poweroff(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -2274,18 +2218,16 @@ static void __input_unregister_device(struct input_dev *dev)
input_disconnect_device(dev);
- mutex_lock(&input_mutex);
-
- list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
- handle->handler->disconnect(handle);
- WARN_ON(!list_empty(&dev->h_list));
+ scoped_guard(mutex, &input_mutex) {
+ list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
+ handle->handler->disconnect(handle);
+ WARN_ON(!list_empty(&dev->h_list));
- del_timer_sync(&dev->timer);
- list_del_init(&dev->node);
+ del_timer_sync(&dev->timer);
+ list_del_init(&dev->node);
- input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
+ input_wakeup_procfs_readers();
+ }
device_del(&dev->dev);
}
@@ -2308,9 +2250,8 @@ static void devm_input_device_unregister(struct device *dev, void *res)
static void input_repeat_key(struct timer_list *t)
{
struct input_dev *dev = from_timer(dev, t, timer);
- unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
if (!dev->inhibited &&
test_bit(dev->repeat_key, dev->key) &&
@@ -2324,8 +2265,6 @@ static void input_repeat_key(struct timer_list *t)
mod_timer(&dev->timer, jiffies +
msecs_to_jiffies(dev->rep[REP_PERIOD]));
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
/**
@@ -2370,10 +2309,10 @@ static int input_device_tune_vals(struct input_dev *dev)
if (!vals)
return -ENOMEM;
- spin_lock_irq(&dev->event_lock);
- dev->max_vals = max_vals;
- swap(dev->vals, vals);
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ dev->max_vals = max_vals;
+ swap(dev->vals, vals);
+ }
/* Because of swap() above, this frees the old vals memory */
kfree(vals);
@@ -2465,18 +2404,15 @@ int input_register_device(struct input_dev *dev)
path ? path : "N/A");
kfree(path);
- error = mutex_lock_interruptible(&input_mutex);
- if (error)
- goto err_device_del;
+ error = -EINTR;
+ scoped_cond_guard(mutex_intr, goto err_device_del, &input_mutex) {
+ list_add_tail(&dev->node, &input_dev_list);
- list_add_tail(&dev->node, &input_dev_list);
+ list_for_each_entry(handler, &input_handler_list, node)
+ input_attach_handler(dev, handler);
- list_for_each_entry(handler, &input_handler_list, node)
- input_attach_handler(dev, handler);
-
- input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
+ input_wakeup_procfs_readers();
+ }
if (dev->devres_managed) {
dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
@@ -2556,20 +2492,17 @@ int input_register_handler(struct input_handler *handler)
if (error)
return error;
- INIT_LIST_HEAD(&handler->h_list);
+ scoped_cond_guard(mutex_intr, return -EINTR, &input_mutex) {
+ INIT_LIST_HEAD(&handler->h_list);
- error = mutex_lock_interruptible(&input_mutex);
- if (error)
- return error;
-
- list_add_tail(&handler->node, &input_handler_list);
+ list_add_tail(&handler->node, &input_handler_list);
- list_for_each_entry(dev, &input_dev_list, node)
- input_attach_handler(dev, handler);
+ list_for_each_entry(dev, &input_dev_list, node)
+ input_attach_handler(dev, handler);
- input_wakeup_procfs_readers();
+ input_wakeup_procfs_readers();
+ }
- mutex_unlock(&input_mutex);
return 0;
}
EXPORT_SYMBOL(input_register_handler);
@@ -2585,7 +2518,7 @@ void input_unregister_handler(struct input_handler *handler)
{
struct input_handle *handle, *next;
- mutex_lock(&input_mutex);
+ guard(mutex)(&input_mutex);
list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
handler->disconnect(handle);
@@ -2594,8 +2527,6 @@ void input_unregister_handler(struct input_handler *handler)
list_del_init(&handler->node);
input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
}
EXPORT_SYMBOL(input_unregister_handler);
@@ -2615,19 +2546,17 @@ int input_handler_for_each_handle(struct input_handler *handler, void *data,
int (*fn)(struct input_handle *, void *))
{
struct input_handle *handle;
- int retval = 0;
+ int retval;
- rcu_read_lock();
+ guard(rcu)();
list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
retval = fn(handle, data);
if (retval)
- break;
+ return retval;
}
- rcu_read_unlock();
-
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_handler_for_each_handle);
@@ -2715,27 +2644,22 @@ int input_register_handle(struct input_handle *handle)
{
struct input_handler *handler = handle->handler;
struct input_dev *dev = handle->dev;
- int error;
input_handle_setup_event_handler(handle);
/*
* We take dev->mutex here to prevent race with
* input_release_device().
*/
- error = mutex_lock_interruptible(&dev->mutex);
- if (error)
- return error;
-
- /*
- * Filters go to the head of the list, normal handlers
- * to the tail.
- */
- if (handler->filter)
- list_add_rcu(&handle->d_node, &dev->h_list);
- else
- list_add_tail_rcu(&handle->d_node, &dev->h_list);
-
- mutex_unlock(&dev->mutex);
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ /*
+ * Filters go to the head of the list, normal handlers
+ * to the tail.
+ */
+ if (handler->filter)
+ list_add_rcu(&handle->d_node, &dev->h_list);
+ else
+ list_add_tail_rcu(&handle->d_node, &dev->h_list);
+ }
/*
* Since we are supposed to be called from ->connect()
@@ -2771,9 +2695,8 @@ void input_unregister_handle(struct input_handle *handle)
/*
* Take dev->mutex to prevent race with input_release_device().
*/
- mutex_lock(&dev->mutex);
- list_del_rcu(&handle->d_node);
- mutex_unlock(&dev->mutex);
+ scoped_guard(mutex, &dev->mutex)
+ list_del_rcu(&handle->d_node);
synchronize_rcu();
}
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index f6e92db4d789..3a5873e5fcb3 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -14,6 +14,7 @@
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
+#include <linux/string_choices.h>
#define DRIVER_DESC "Microsoft SideWinder joystick family driver"
@@ -677,7 +678,7 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
case 48: /* Ambiguous */
if (j == 14) { /* ID length 14*3 -> FFP */
sw->type = SW_ID_FFP;
- sprintf(comment, " [AC %s]", sw_get_bits(idbuf,38,1,3) ? "off" : "on");
+ sprintf(comment, " [AC %s]", str_off_on(sw_get_bits(idbuf,38,1,3)));
} else
sw->type = SW_ID_PP;
break;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ff9bc87f2f70..c33e6f33265b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -140,6 +140,7 @@ static const struct xpad_device {
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
@@ -150,6 +151,7 @@ static const struct xpad_device {
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
{ 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ { 0x045e, 0x02a9, "Xbox 360 Wireless Receiver (Unofficial)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE },
@@ -176,6 +178,7 @@ static const struct xpad_device {
{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
{ 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX },
{ 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4503, "Mad Catz Racing Wheel", 0, XTYPE_XBOXONE },
{ 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
{ 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
@@ -237,6 +240,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
@@ -275,12 +279,15 @@ static const struct xpad_device {
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
+ { 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
@@ -305,6 +312,7 @@ static const struct xpad_device {
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
+ { 0x1a86, 0xe310, "Legion Go S", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -341,6 +349,7 @@ static const struct xpad_device {
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
@@ -364,6 +373,7 @@ static const struct xpad_device {
{ 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x581a, "ThrustMaster XB1 Classic Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
@@ -372,19 +382,32 @@ static const struct xpad_device {
{ 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
{ 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
+ { 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
- { 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0603, "Nacon Pro Compact controller for Xbox", 0, XTYPE_XBOXONE },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0614, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
+ { 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
+ { 0x3285, 0x0662, "Nacon Revolution5 Pro", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
+ { 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
+ { 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -483,6 +506,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x044f), /* Thrustmaster Xbox One controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */
@@ -514,7 +538,9 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
+ XPAD_XBOX360_VENDOR(0x1a86), /* Nanjing Qinheng Microelectronics (WCH) */
XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
+ XPAD_XBOX360_VENDOR(0x1ee9), /* ZOTAC Technology Limited */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2345), /* Machenike Controllers */
@@ -522,16 +548,20 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
- XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
+ XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
+ XPAD_XBOX360_VENDOR(0x2993), /* TECNO Mobile */
XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */
- XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller */
- XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
- XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke Xbox One pad */
- XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
+ XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Controllers */
+ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Controllers */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e95), /* SCUF Gaming Controller */
XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
+ XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
{ }
};
@@ -684,7 +714,9 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
+ XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
+ XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index ec94fcfa4cde..adf0f311996c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -89,7 +89,7 @@ static const unsigned short atkbd_set2_keycode[ATKBD_KEYMAP_SIZE] = {
0, 46, 45, 32, 18, 5, 4, 95, 0, 57, 47, 33, 20, 19, 6,183,
0, 49, 48, 35, 34, 21, 7,184, 0, 0, 50, 36, 22, 8, 9,185,
0, 51, 37, 23, 24, 11, 10, 0, 0, 52, 53, 38, 39, 25, 12, 0,
- 0, 89, 40, 0, 26, 13, 0, 0, 58, 54, 28, 27, 0, 43, 0, 85,
+ 0, 89, 40, 0, 26, 13, 0,193, 58, 54, 28, 27, 0, 43, 0, 85,
0, 86, 91, 90, 92, 0, 14, 94, 0, 79,124, 75, 71,121, 0, 0,
82, 83, 80, 76, 77, 72, 1, 69, 87, 78, 81, 74, 55, 73, 70, 99,
diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c
index 993cdbda509e..4184dd2eaeeb 100644
--- a/drivers/input/keyboard/dlink-dir685-touchkeys.c
+++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/bitops.h>
struct dir685_touchkeys {
@@ -48,7 +49,7 @@ static irqreturn_t dir685_tk_irq_thread(int irq, void *data)
changed = tk->cur_key ^ key;
for_each_set_bit(i, &changed, num_bits) {
dev_dbg(tk->dev, "key %d is %s\n", i,
- test_bit(i, &key) ? "down" : "up");
+ str_down_up(test_bit(i, &key)));
input_report_key(tk->input, tk->codes[i], test_bit(i, &key));
}
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index e26bf2956344..e19442c6f80f 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -21,6 +21,7 @@
#include <linux/platform_data/lm8323.h>
#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/* Commands to send to the chip. */
#define LM8323_CMD_READ_ID 0x80 /* Read chip ID. */
@@ -269,7 +270,7 @@ static void process_keys(struct lm8323_chip *lm)
unsigned short keycode = lm->keymap[key];
dev_vdbg(&lm->client->dev, "key 0x%02x %s\n",
- key, isdown ? "down" : "up");
+ key, str_down_up(isdown));
if (lm->kp_enabled) {
input_event(lm->idev, EV_MSC, MSC_SCAN, key);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 6a852c76331b..13d135257e06 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -917,6 +917,18 @@ config INPUT_HISI_POWERKEY
To compile this driver as a module, choose M here: the
module will be called hisi_powerkey.
+config INPUT_QNAP_MCU
+ tristate "Input Support for QNAP MCU controllers"
+ depends on MFD_QNAP_MCU
+ help
+ This option enables support for input elements available on
+ embedded controllers used in QNAP NAS devices.
+
+ This includes a polled power-button as well as a beeper.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qnap-mcu-input.
+
config INPUT_RAVE_SP_PWRBUTTON
tristate "RAVE SP Power button Driver"
depends on RAVE_SP_CORE
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 4f7f736831ba..6d91804d0a6f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o
obj-$(CONFIG_INPUT_PWM_VIBRA) += pwm-vibra.o
+obj-$(CONFIG_INPUT_QNAP_MCU) += qnap-mcu-input.o
obj-$(CONFIG_INPUT_RAVE_SP_PWRBUTTON) += rave-sp-pwrbutton.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o
diff --git a/drivers/input/misc/ideapad_slidebar.c b/drivers/input/misc/ideapad_slidebar.c
index f6e5fc807b4d..ab2e0a401904 100644
--- a/drivers/input/misc/ideapad_slidebar.c
+++ b/drivers/input/misc/ideapad_slidebar.c
@@ -121,7 +121,7 @@ static void slidebar_mode_set(u8 mode)
}
static bool slidebar_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
static bool extended = false;
@@ -219,7 +219,7 @@ static int __init ideapad_probe(struct platform_device* pdev)
input_set_capability(slidebar_input_dev, EV_ABS, ABS_X);
input_set_abs_params(slidebar_input_dev, ABS_X, 0, 0xff, 0, 0);
- err = i8042_install_filter(slidebar_i8042_filter);
+ err = i8042_install_filter(slidebar_i8042_filter, NULL);
if (err) {
dev_err(&pdev->dev,
"Failed to install i8042 filter: %d\n", err);
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 22022d11470d..80b917944b51 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -100,11 +100,11 @@ enum iqs7222_reg_key_id {
enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_STAT,
- IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_CYCLE,
IQS7222_REG_GRP_GLBL,
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
+ IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_SLDR,
IQS7222_REG_GRP_TPAD,
IQS7222_REG_GRP_GPIO,
@@ -286,6 +286,7 @@ static const struct iqs7222_event_desc iqs7222_tp_events[] = {
struct iqs7222_reg_grp_desc {
u16 base;
+ u16 val_len;
int num_row;
int num_col;
};
@@ -342,6 +343,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAC00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -400,6 +402,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAC00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -454,6 +457,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xC400,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -496,6 +500,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xC400,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -543,6 +548,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAA00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -600,6 +606,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAA00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -656,6 +663,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -712,6 +720,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -768,6 +777,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -1604,7 +1614,7 @@ static int iqs7222_force_comms(struct iqs7222_private *iqs7222)
}
static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
- u16 reg, void *val, u16 num_val)
+ u16 reg, void *val, u16 val_len)
{
u8 reg_buf[sizeof(__be16)];
int ret, i;
@@ -1619,7 +1629,7 @@ static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
{
.addr = client->addr,
.flags = I2C_M_RD,
- .len = num_val * sizeof(__le16),
+ .len = val_len,
.buf = (u8 *)val,
},
};
@@ -1675,7 +1685,7 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
__le16 val_buf;
int error;
- error = iqs7222_read_burst(iqs7222, reg, &val_buf, 1);
+ error = iqs7222_read_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
if (error)
return error;
@@ -1685,10 +1695,9 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
}
static int iqs7222_write_burst(struct iqs7222_private *iqs7222,
- u16 reg, const void *val, u16 num_val)
+ u16 reg, const void *val, u16 val_len)
{
int reg_len = reg > U8_MAX ? sizeof(reg) : sizeof(u8);
- int val_len = num_val * sizeof(__le16);
int msg_len = reg_len + val_len;
int ret, i;
struct i2c_client *client = iqs7222->client;
@@ -1747,7 +1756,7 @@ static int iqs7222_write_word(struct iqs7222_private *iqs7222, u16 reg, u16 val)
{
__le16 val_buf = cpu_to_le16(val);
- return iqs7222_write_burst(iqs7222, reg, &val_buf, 1);
+ return iqs7222_write_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
}
static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
@@ -1831,30 +1840,14 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
/*
* Acknowledge reset before writing any registers in case the device
- * suffers a spurious reset during initialization. Because this step
- * may change the reserved fields of the second filter beta register,
- * its cache must be updated.
- *
- * Writing the second filter beta register, in turn, may clobber the
- * system status register. As such, the filter beta register pair is
- * written first to protect against this hazard.
+ * suffers a spurious reset during initialization.
*/
if (dir == WRITE) {
- u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1;
- u16 filt_setup;
-
error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
iqs7222->sys_setup[0] |
IQS7222_SYS_SETUP_ACK_RESET);
if (error)
return error;
-
- error = iqs7222_read_word(iqs7222, reg, &filt_setup);
- if (error)
- return error;
-
- iqs7222->filt_setup[1] &= GENMASK(7, 0);
- iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0));
}
/*
@@ -1883,6 +1876,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
int num_col = dev_desc->reg_grps[i].num_col;
u16 reg = dev_desc->reg_grps[i].base;
__le16 *val_buf;
+ u16 val_len = dev_desc->reg_grps[i].val_len ? : num_col * sizeof(*val_buf);
u16 *val;
if (!num_col)
@@ -1900,7 +1894,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
switch (dir) {
case READ:
error = iqs7222_read_burst(iqs7222, reg,
- val_buf, num_col);
+ val_buf, val_len);
for (k = 0; k < num_col; k++)
val[k] = le16_to_cpu(val_buf[k]);
break;
@@ -1909,7 +1903,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
for (k = 0; k < num_col; k++)
val_buf[k] = cpu_to_le16(val[k]);
error = iqs7222_write_burst(iqs7222, reg,
- val_buf, num_col);
+ val_buf, val_len);
break;
default:
@@ -1962,7 +1956,7 @@ static int iqs7222_dev_info(struct iqs7222_private *iqs7222)
int error, i;
error = iqs7222_read_burst(iqs7222, IQS7222_PROD_NUM, dev_id,
- ARRAY_SIZE(dev_id));
+ sizeof(dev_id));
if (error)
return error;
@@ -2915,7 +2909,7 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
__le16 status[IQS7222_MAX_COLS_STAT];
error = iqs7222_read_burst(iqs7222, IQS7222_SYS_STATUS, status,
- num_stat);
+ num_stat * sizeof(*status));
if (error)
return error;
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 0e646f1b257b..cdb9be737e48 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/workqueue.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/max77693.h>
@@ -94,7 +95,7 @@ static int max77843_haptic_bias(struct max77693_haptic *haptic, bool on)
on << MAINCTRL1_BIASEN_SHIFT);
if (error) {
dev_err(haptic->dev, "failed to %s bias: %d\n",
- on ? "enable" : "disable", error);
+ str_enable_disable(on), error);
return error;
}
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 08412239b8e6..0c661140fb88 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -38,6 +38,8 @@
#define MMA8450_CTRL_REG1 0x38
#define MMA8450_CTRL_REG2 0x39
+#define MMA8450_ID 0xc6
+#define MMA8450_WHO_AM_I 0x0f
static int mma8450_read(struct i2c_client *c, unsigned int off)
{
@@ -148,8 +150,20 @@ static void mma8450_close(struct input_dev *input)
*/
static int mma8450_probe(struct i2c_client *c)
{
+ struct i2c_adapter *adapter = c->adapter;
struct input_dev *input;
- int err;
+ int err, client_id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA))
+ return dev_err_probe(&c->dev, -EINVAL,
+ "I2C adapter doesn't support SMBUS BYTE");
+
+ client_id = i2c_smbus_read_byte_data(c, MMA8450_WHO_AM_I);
+ if (client_id != MMA8450_ID)
+ return dev_err_probe(&c->dev, -EINVAL,
+ "unexpected chip ID 0x%x (vs 0x%x)\n",
+ client_id, MMA8450_ID);
input = devm_input_allocate_device(&c->dev);
if (!input)
diff --git a/drivers/input/misc/nxp-bbnsm-pwrkey.c b/drivers/input/misc/nxp-bbnsm-pwrkey.c
index eb4173f9c820..7ba8d166d68c 100644
--- a/drivers/input/misc/nxp-bbnsm-pwrkey.c
+++ b/drivers/input/misc/nxp-bbnsm-pwrkey.c
@@ -187,6 +187,12 @@ static int bbnsm_pwrkey_probe(struct platform_device *pdev)
return 0;
}
+static void bbnsm_pwrkey_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+}
+
static int __maybe_unused bbnsm_pwrkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -223,6 +229,8 @@ static struct platform_driver bbnsm_pwrkey_driver = {
.of_match_table = bbnsm_pwrkey_ids,
},
.probe = bbnsm_pwrkey_probe,
+ .remove = bbnsm_pwrkey_remove,
+
};
module_platform_driver(bbnsm_pwrkey_driver);
diff --git a/drivers/input/misc/qnap-mcu-input.c b/drivers/input/misc/qnap-mcu-input.c
new file mode 100644
index 000000000000..76e62f0816c1
--- /dev/null
+++ b/drivers/input/misc/qnap-mcu-input.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Driver for input events on QNAP-MCUs
+ *
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/input.h>
+#include <linux/mfd/qnap-mcu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <uapi/linux/input-event-codes.h>
+
+/*
+ * The power-key needs to be pressed for a while to create an event,
+ * so there is no use for overly frequent polling.
+ */
+#define POLL_INTERVAL 500
+
+struct qnap_mcu_input_dev {
+ struct input_dev *input;
+ struct qnap_mcu *mcu;
+ struct device *dev;
+
+ struct work_struct beep_work;
+ int beep_type;
+};
+
+static void qnap_mcu_input_poll(struct input_dev *input)
+{
+ struct qnap_mcu_input_dev *idev = input_get_drvdata(input);
+ static const u8 cmd[] = { '@', 'C', 'V' };
+ u8 reply[4];
+ int state, ret;
+
+ /* poll the power button */
+ ret = qnap_mcu_exec(idev->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return;
+
+ /* First bytes must mirror the sent command */
+ if (memcmp(cmd, reply, sizeof(cmd))) {
+ dev_err(idev->dev, "malformed data received\n");
+ return;
+ }
+
+ state = reply[3] - 0x30;
+ input_event(input, EV_KEY, KEY_POWER, state);
+ input_sync(input);
+}
+
+static void qnap_mcu_input_beeper_work(struct work_struct *work)
+{
+ struct qnap_mcu_input_dev *idev =
+ container_of(work, struct qnap_mcu_input_dev, beep_work);
+ const u8 cmd[] = { '@', 'C', (idev->beep_type == SND_TONE) ? '3' : '2' };
+
+ qnap_mcu_exec_with_ack(idev->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_input_event(struct input_dev *input, unsigned int type,
+ unsigned int code, int value)
+{
+ struct qnap_mcu_input_dev *idev = input_get_drvdata(input);
+
+ if (type != EV_SND || (code != SND_BELL && code != SND_TONE))
+ return -EOPNOTSUPP;
+
+ if (value < 0)
+ return -EINVAL;
+
+ /* beep runtime is determined by the MCU */
+ if (value == 0)
+ return 0;
+
+ /* Schedule work to actually turn the beeper on */
+ idev->beep_type = code;
+ schedule_work(&idev->beep_work);
+
+ return 0;
+}
+
+static void qnap_mcu_input_close(struct input_dev *input)
+{
+ struct qnap_mcu_input_dev *idev = input_get_drvdata(input);
+
+ cancel_work_sync(&idev->beep_work);
+}
+
+static int qnap_mcu_input_probe(struct platform_device *pdev)
+{
+ struct qnap_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
+ struct qnap_mcu_input_dev *idev;
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ int ret;
+
+ idev = devm_kzalloc(dev, sizeof(*idev), GFP_KERNEL);
+ if (!idev)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return dev_err_probe(dev, -ENOMEM, "no memory for input device\n");
+
+ idev->input = input;
+ idev->dev = dev;
+ idev->mcu = mcu;
+
+ input_set_drvdata(input, idev);
+
+ input->name = "qnap-mcu";
+ input->phys = "qnap-mcu-input/input0";
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+ input->event = qnap_mcu_input_event;
+ input->close = qnap_mcu_input_close;
+
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ input_set_capability(input, EV_SND, SND_BELL);
+ input_set_capability(input, EV_SND, SND_TONE);
+
+ INIT_WORK(&idev->beep_work, qnap_mcu_input_beeper_work);
+
+ ret = input_setup_polling(input, qnap_mcu_input_poll);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to set up polling\n");
+
+ input_set_poll_interval(input, POLL_INTERVAL);
+
+ ret = input_register_device(input);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to register input device\n");
+
+ return 0;
+}
+
+static struct platform_driver qnap_mcu_input_driver = {
+ .probe = qnap_mcu_input_probe,
+ .driver = {
+ .name = "qnap-mcu-input",
+ },
+};
+module_platform_driver(qnap_mcu_input_driver);
+
+MODULE_ALIAS("platform:qnap-mcu-input");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("QNAP MCU input driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 3666ba6d1f30..9711f5c7c78a 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#define MAX_MAGNITUDE_SHIFT 16
@@ -44,7 +45,7 @@ static int regulator_haptic_toggle(struct regulator_haptic *haptic, bool on)
if (error) {
dev_err(haptic->dev,
"failed to switch regulator %s: %d\n",
- on ? "on" : "off", error);
+ str_on_off(on), error);
return error;
}
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index a841883660fb..fee1796da3d0 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/string_choices.h>
#include <linux/input.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>
@@ -199,7 +200,7 @@ static int elan_set_power(struct elan_tp_data *data, bool on)
} while (--repeat > 0);
dev_err(&data->client->dev, "failed to set power %s: %d\n",
- on ? "on" : "off", error);
+ str_on_off(on), error);
return error;
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2735f86c23cc..aba57abe6978 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -665,23 +665,50 @@ static void synaptics_pt_stop(struct serio *serio)
priv->pt_port = NULL;
}
+static int synaptics_pt_open(struct serio *serio)
+{
+ struct psmouse *parent = psmouse_from_serio(serio->parent);
+ struct synaptics_data *priv = parent->private;
+
+ guard(serio_pause_rx)(parent->ps2dev.serio);
+ priv->pt_port_open = true;
+
+ return 0;
+}
+
+static void synaptics_pt_close(struct serio *serio)
+{
+ struct psmouse *parent = psmouse_from_serio(serio->parent);
+ struct synaptics_data *priv = parent->private;
+
+ guard(serio_pause_rx)(parent->ps2dev.serio);
+ priv->pt_port_open = false;
+}
+
static int synaptics_is_pt_packet(u8 *buf)
{
return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
}
-static void synaptics_pass_pt_packet(struct serio *ptport, u8 *packet)
+static void synaptics_pass_pt_packet(struct synaptics_data *priv, u8 *packet)
{
- struct psmouse *child = psmouse_from_serio(ptport);
+ struct serio *ptport;
- if (child && child->state == PSMOUSE_ACTIVATED) {
- serio_interrupt(ptport, packet[1], 0);
- serio_interrupt(ptport, packet[4], 0);
- serio_interrupt(ptport, packet[5], 0);
- if (child->pktsize == 4)
- serio_interrupt(ptport, packet[2], 0);
- } else {
- serio_interrupt(ptport, packet[1], 0);
+ ptport = priv->pt_port;
+ if (!ptport)
+ return;
+
+ serio_interrupt(ptport, packet[1], 0);
+
+ if (priv->pt_port_open) {
+ struct psmouse *child = psmouse_from_serio(ptport);
+
+ if (child->state == PSMOUSE_ACTIVATED) {
+ serio_interrupt(ptport, packet[4], 0);
+ serio_interrupt(ptport, packet[5], 0);
+ if (child->pktsize == 4)
+ serio_interrupt(ptport, packet[2], 0);
+ }
}
}
@@ -720,6 +747,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
serio->write = synaptics_pt_write;
serio->start = synaptics_pt_start;
serio->stop = synaptics_pt_stop;
+ serio->open = synaptics_pt_open;
+ serio->close = synaptics_pt_close;
serio->parent = psmouse->ps2dev.serio;
psmouse->pt_activate = synaptics_pt_activate;
@@ -1216,11 +1245,10 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
if (SYN_CAP_PASS_THROUGH(priv->info.capabilities) &&
synaptics_is_pt_packet(psmouse->packet)) {
- if (priv->pt_port)
- synaptics_pass_pt_packet(priv->pt_port,
- psmouse->packet);
- } else
+ synaptics_pass_pt_packet(priv, psmouse->packet);
+ } else {
synaptics_process_packet(psmouse);
+ }
return PSMOUSE_FULL_PACKET;
}
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 899aee598632..3853165b6b3a 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -188,6 +188,7 @@ struct synaptics_data {
bool disable_gesture; /* disable gestures */
struct serio *pt_port; /* Pass-through serio port */
+ bool pt_port_open;
/*
* Last received Advanced Gesture Mode (AGM) packet. An AGM packet
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 127cfdc8668a..6ed9fc34948c 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1080,16 +1080,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/* Mivvy M310 */
@@ -1159,9 +1157,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
/*
* A lot of modern Clevo barebones have touchpad and/or keyboard issues
- * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
- * none of them have an external PS/2 port so this can safely be set for
- * all of them.
+ * after suspend fixable with the forcenorestore quirk.
* Clevo barebones come with board_vendor and/or system_vendor set to
* either the very generic string "Notebook" and/or a different value
* for each individual reseller. The only somewhat universal way to
@@ -1171,29 +1167,25 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
@@ -1205,29 +1197,19 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
- /*
- * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
- * the keyboard very laggy for ~5 seconds after boot and
- * sometimes also after resume.
- * However both are required for the keyboard to not fail
- * completely sometimes after boot or resume.
- */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
/*
* At least one modern Clevo barebone has the touchpad connected both
@@ -1243,17 +1225,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
- SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
- SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX |
+ SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
- SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
- SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX |
+ SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
@@ -1265,8 +1245,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "P640RE"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1277,16 +1262,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1297,8 +1280,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1309,8 +1291,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1321,8 +1302,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1333,22 +1313,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PB51RF"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PB71RD"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PC70DR"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PCX0DX_GN20"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
/* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
{
@@ -1361,15 +1362,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 509330a27880..cab5a4c5baf5 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -179,8 +179,8 @@ static struct platform_device *i8042_platform_device;
static struct notifier_block i8042_kbd_bind_notifier_block;
static bool i8042_handle_data(int irq);
-static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
- struct serio *serio);
+static i8042_filter_t i8042_platform_filter;
+static void *i8042_platform_filter_context;
void i8042_lock_chip(void)
{
@@ -194,8 +194,7 @@ void i8042_unlock_chip(void)
}
EXPORT_SYMBOL(i8042_unlock_chip);
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+int i8042_install_filter(i8042_filter_t filter, void *context)
{
guard(spinlock_irqsave)(&i8042_lock);
@@ -203,12 +202,12 @@ int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
return -EBUSY;
i8042_platform_filter = filter;
+ i8042_platform_filter_context = context;
return 0;
}
EXPORT_SYMBOL(i8042_install_filter);
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *port))
+int i8042_remove_filter(i8042_filter_t filter)
{
guard(spinlock_irqsave)(&i8042_lock);
@@ -216,6 +215,7 @@ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
return -EINVAL;
i8042_platform_filter = NULL;
+ i8042_platform_filter_context = NULL;
return 0;
}
EXPORT_SYMBOL(i8042_remove_filter);
@@ -480,7 +480,10 @@ static bool i8042_filter(unsigned char data, unsigned char str,
}
}
- if (i8042_platform_filter && i8042_platform_filter(data, str, serio)) {
+ if (!i8042_platform_filter)
+ return false;
+
+ if (i8042_platform_filter(data, str, serio, i8042_platform_filter_context)) {
dbg("Filtered out by platform filter\n");
return true;
}
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 066dc04003fa..67264c5b49cb 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1021,7 +1021,7 @@ static int ads7846_setup_pendown(struct spi_device *spi,
if (pdata->get_pendown_state) {
ts->get_pendown_state = pdata->get_pendown_state;
} else {
- ts->gpio_pendown = gpiod_get(&spi->dev, "pendown", GPIOD_IN);
+ ts->gpio_pendown = devm_gpiod_get(&spi->dev, "pendown", GPIOD_IN);
if (IS_ERR(ts->gpio_pendown)) {
dev_err(&spi->dev, "failed to request pendown GPIO\n");
return PTR_ERR(ts->gpio_pendown);
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index f4e950920e84..eb3cc2befcdf 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -23,6 +23,7 @@
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/bitops.h>
#include <linux/input/mt.h>
@@ -102,7 +103,7 @@ static irqreturn_t egalax_ts_interrupt(int irq, void *dev_id)
input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, down);
dev_dbg(&client->dev, "%s id:%d x:%d y:%d z:%d",
- down ? "down" : "up", id, x, y, z);
+ str_down_up(down), id, x, y, z);
if (down) {
input_report_abs(input_dev, ABS_MT_POSITION_X, x);
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
index 3fc03cf0ca23..7f8cfdd106fa 100644
--- a/drivers/input/touchscreen/goodix_berlin_core.c
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -165,7 +165,7 @@ struct goodix_berlin_core {
struct device *dev;
struct regmap *regmap;
struct regulator *avdd;
- struct regulator *iovdd;
+ struct regulator *vddio;
struct gpio_desc *reset_gpio;
struct touchscreen_properties props;
struct goodix_berlin_fw_version fw_version;
@@ -248,22 +248,22 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
{
int error;
- error = regulator_enable(cd->iovdd);
+ error = regulator_enable(cd->vddio);
if (error) {
- dev_err(cd->dev, "Failed to enable iovdd: %d\n", error);
+ dev_err(cd->dev, "Failed to enable vddio: %d\n", error);
return error;
}
- /* Vendor waits 3ms for IOVDD to settle */
+ /* Vendor waits 3ms for VDDIO to settle */
usleep_range(3000, 3100);
error = regulator_enable(cd->avdd);
if (error) {
dev_err(cd->dev, "Failed to enable avdd: %d\n", error);
- goto err_iovdd_disable;
+ goto err_vddio_disable;
}
- /* Vendor waits 15ms for IOVDD to settle */
+ /* Vendor waits 15ms for AVDD to settle */
usleep_range(15000, 15100);
gpiod_set_value_cansleep(cd->reset_gpio, 0);
@@ -283,8 +283,8 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
err_dev_reset:
gpiod_set_value_cansleep(cd->reset_gpio, 1);
regulator_disable(cd->avdd);
-err_iovdd_disable:
- regulator_disable(cd->iovdd);
+err_vddio_disable:
+ regulator_disable(cd->vddio);
return error;
}
@@ -292,7 +292,7 @@ static void goodix_berlin_power_off(struct goodix_berlin_core *cd)
{
gpiod_set_value_cansleep(cd->reset_gpio, 1);
regulator_disable(cd->avdd);
- regulator_disable(cd->iovdd);
+ regulator_disable(cd->vddio);
}
static int goodix_berlin_read_version(struct goodix_berlin_core *cd)
@@ -744,10 +744,10 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
return dev_err_probe(dev, PTR_ERR(cd->avdd),
"Failed to request avdd regulator\n");
- cd->iovdd = devm_regulator_get(dev, "iovdd");
- if (IS_ERR(cd->iovdd))
- return dev_err_probe(dev, PTR_ERR(cd->iovdd),
- "Failed to request iovdd regulator\n");
+ cd->vddio = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(cd->vddio))
+ return dev_err_probe(dev, PTR_ERR(cd->vddio),
+ "Failed to request vddio regulator\n");
error = goodix_berlin_power_on(cd);
if (error) {
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
index abeae9102323..3c8bbe284b73 100644
--- a/drivers/input/touchscreen/imagis.c
+++ b/drivers/input/touchscreen/imagis.c
@@ -22,6 +22,7 @@
#define IST3032C_WHOAMI 0x32c
#define IST3038C_WHOAMI 0x38c
+#define IST3038H_WHOAMI 0x38d
#define IST3038B_REG_CHIPID 0x30
#define IST3038B_WHOAMI 0x30380b
@@ -428,11 +429,19 @@ static const struct imagis_properties imagis_3038c_data = {
.protocol_b = true,
};
+static const struct imagis_properties imagis_3038h_data = {
+ .interrupt_msg_cmd = IST3038C_REG_INTR_MESSAGE,
+ .touch_coord_cmd = IST3038C_REG_TOUCH_COORD,
+ .whoami_cmd = IST3038C_REG_CHIPID,
+ .whoami_val = IST3038H_WHOAMI,
+};
+
static const struct of_device_id imagis_of_match[] = {
{ .compatible = "imagis,ist3032c", .data = &imagis_3032c_data },
{ .compatible = "imagis,ist3038", .data = &imagis_3038_data },
{ .compatible = "imagis,ist3038b", .data = &imagis_3038b_data },
{ .compatible = "imagis,ist3038c", .data = &imagis_3038c_data },
+ { .compatible = "imagis,ist3038h", .data = &imagis_3038h_data },
{ },
};
MODULE_DEVICE_TABLE(of, imagis_of_match);
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 27941245e962..88d376090e6e 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -1153,11 +1153,13 @@ static const struct i2c_device_id wdt87xx_dev_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wdt87xx_dev_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id wdt87xx_acpi_id[] = {
{ "WDHT0001", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, wdt87xx_acpi_id);
+#endif
static struct i2c_driver wdt87xx_driver = {
.probe = wdt87xx_ts_probe,
diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
index b956e4050f38..88f311c11020 100644
--- a/drivers/interconnect/icc-clk.c
+++ b/drivers/interconnect/icc-clk.c
@@ -116,6 +116,11 @@ struct icc_provider *icc_clk_register(struct device *dev,
}
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_master", data[i].name);
+ if (!node->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
node->data = &qp->clocks[i];
icc_node_add(node, provider);
/* link to the next node, slave */
@@ -129,6 +134,11 @@ struct icc_provider *icc_clk_register(struct device *dev,
}
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_slave", data[i].name);
+ if (!node->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/* no data for slave node */
icc_node_add(node, provider);
onecell->nodes[j++] = node;
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 362fb9b0a198..1219f4f23d40 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -337,6 +337,15 @@ config INTERCONNECT_QCOM_SM8650
This is a driver for the Qualcomm Network-on-Chip on SM8650-based
platforms.
+config INTERCONNECT_QCOM_SM8750
+ tristate "Qualcomm SM8750 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on SM8750-based
+ platforms.
+
config INTERCONNECT_QCOM_X1E80100
tristate "Qualcomm X1E80100 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 9997728c02bf..7887b1e8d69b 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -40,6 +40,7 @@ qnoc-sm8350-objs := sm8350.o
qnoc-sm8450-objs := sm8450.o
qnoc-sm8550-objs := sm8550.o
qnoc-sm8650-objs := sm8650.o
+qnoc-sm8750-objs := sm8750.o
qnoc-x1e80100-objs := x1e80100.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
@@ -80,5 +81,6 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8450) += qnoc-sm8450.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8550) += qnoc-sm8550.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8650) += qnoc-sm8650.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8750) += qnoc-sm8750.o
obj-$(CONFIG_INTERCONNECT_QCOM_X1E80100) += qnoc-x1e80100.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index a8ed435f696c..ea1042d38128 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -503,6 +503,7 @@ int qnoc_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->num_nodes = num_nodes;
qp->num_intf_clks = cd_num;
for (i = 0; i < cd_num; i++)
@@ -597,7 +598,6 @@ regmap_done:
data->nodes[i] = node;
}
- data->num_nodes = num_nodes;
clk_bulk_disable_unprepare(qp->num_intf_clks, qp->intf_clks);
diff --git a/drivers/interconnect/qcom/sm8750.c b/drivers/interconnect/qcom/sm8750.c
new file mode 100644
index 000000000000..69bc22222075
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8750.c
@@ -0,0 +1,1705 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm8750-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+#define SM8750_MASTER_GPU_TCU 0
+#define SM8750_MASTER_SYS_TCU 1
+#define SM8750_MASTER_APPSS_PROC 2
+#define SM8750_MASTER_LLCC 3
+#define SM8750_MASTER_QDSS_BAM 4
+#define SM8750_MASTER_QSPI_0 5
+#define SM8750_MASTER_QUP_1 6
+#define SM8750_MASTER_QUP_2 7
+#define SM8750_MASTER_A1NOC_SNOC 8
+#define SM8750_MASTER_A2NOC_SNOC 9
+#define SM8750_MASTER_CAMNOC_HF 10
+#define SM8750_MASTER_CAMNOC_NRT_ICP_SF 11
+#define SM8750_MASTER_CAMNOC_RT_CDM_SF 12
+#define SM8750_MASTER_CAMNOC_SF 13
+#define SM8750_MASTER_GEM_NOC_CNOC 14
+#define SM8750_MASTER_GEM_NOC_PCIE_SNOC 15
+#define SM8750_MASTER_GFX3D 16
+#define SM8750_MASTER_LPASS_GEM_NOC 17
+#define SM8750_MASTER_LPASS_LPINOC 18
+#define SM8750_MASTER_LPIAON_NOC 19
+#define SM8750_MASTER_LPASS_PROC 20
+#define SM8750_MASTER_MDP 21
+#define SM8750_MASTER_MSS_PROC 22
+#define SM8750_MASTER_MNOC_HF_MEM_NOC 23
+#define SM8750_MASTER_MNOC_SF_MEM_NOC 24
+#define SM8750_MASTER_CDSP_PROC 25
+#define SM8750_MASTER_COMPUTE_NOC 26
+#define SM8750_MASTER_ANOC_PCIE_GEM_NOC 27
+#define SM8750_MASTER_SNOC_SF_MEM_NOC 28
+#define SM8750_MASTER_UBWC_P 29
+#define SM8750_MASTER_CDSP_HCP 30
+#define SM8750_MASTER_VIDEO_CV_PROC 31
+#define SM8750_MASTER_VIDEO_EVA 32
+#define SM8750_MASTER_VIDEO_MVP 33
+#define SM8750_MASTER_VIDEO_V_PROC 34
+#define SM8750_MASTER_CNOC_CFG 35
+#define SM8750_MASTER_CNOC_MNOC_CFG 36
+#define SM8750_MASTER_PCIE_ANOC_CFG 37
+#define SM8750_MASTER_QUP_CORE_0 38
+#define SM8750_MASTER_QUP_CORE_1 39
+#define SM8750_MASTER_QUP_CORE_2 40
+#define SM8750_MASTER_CRYPTO 41
+#define SM8750_MASTER_IPA 42
+#define SM8750_MASTER_QUP_3 43
+#define SM8750_MASTER_SOCCP_AGGR_NOC 44
+#define SM8750_MASTER_SP 45
+#define SM8750_MASTER_GIC 46
+#define SM8750_MASTER_PCIE_0 47
+#define SM8750_MASTER_QDSS_ETR 48
+#define SM8750_MASTER_QDSS_ETR_1 49
+#define SM8750_MASTER_SDCC_2 50
+#define SM8750_MASTER_SDCC_4 51
+#define SM8750_MASTER_UFS_MEM 52
+#define SM8750_MASTER_USB3_0 53
+#define SM8750_SLAVE_UBWC_P 54
+#define SM8750_SLAVE_EBI1 55
+#define SM8750_SLAVE_AHB2PHY_SOUTH 56
+#define SM8750_SLAVE_AHB2PHY_NORTH 57
+#define SM8750_SLAVE_AOSS 58
+#define SM8750_SLAVE_CAMERA_CFG 59
+#define SM8750_SLAVE_CLK_CTL 60
+#define SM8750_SLAVE_CRYPTO_0_CFG 61
+#define SM8750_SLAVE_DISPLAY_CFG 62
+#define SM8750_SLAVE_EVA_CFG 63
+#define SM8750_SLAVE_GFX3D_CFG 64
+#define SM8750_SLAVE_I2C 65
+#define SM8750_SLAVE_I3C_IBI0_CFG 66
+#define SM8750_SLAVE_I3C_IBI1_CFG 67
+#define SM8750_SLAVE_IMEM_CFG 68
+#define SM8750_SLAVE_IPA_CFG 69
+#define SM8750_SLAVE_IPC_ROUTER_CFG 70
+#define SM8750_SLAVE_CNOC_MSS 71
+#define SM8750_SLAVE_PCIE_CFG 72
+#define SM8750_SLAVE_PRNG 73
+#define SM8750_SLAVE_QDSS_CFG 74
+#define SM8750_SLAVE_QSPI_0 75
+#define SM8750_SLAVE_QUP_3 76
+#define SM8750_SLAVE_QUP_1 77
+#define SM8750_SLAVE_QUP_2 78
+#define SM8750_SLAVE_SDCC_2 79
+#define SM8750_SLAVE_SDCC_4 80
+#define SM8750_SLAVE_SOCCP 81
+#define SM8750_SLAVE_SPSS_CFG 82
+#define SM8750_SLAVE_TCSR 83
+#define SM8750_SLAVE_TLMM 84
+#define SM8750_SLAVE_TME_CFG 85
+#define SM8750_SLAVE_UFS_MEM_CFG 86
+#define SM8750_SLAVE_USB3_0 87
+#define SM8750_SLAVE_VENUS_CFG 88
+#define SM8750_SLAVE_VSENSE_CTRL_CFG 89
+#define SM8750_SLAVE_A1NOC_SNOC 90
+#define SM8750_SLAVE_A2NOC_SNOC 91
+#define SM8750_SLAVE_APPSS 92
+#define SM8750_SLAVE_GEM_NOC_CNOC 93
+#define SM8750_SLAVE_SNOC_GEM_NOC_SF 94
+#define SM8750_SLAVE_LLCC 95
+#define SM8750_SLAVE_LPASS_GEM_NOC 96
+#define SM8750_SLAVE_LPIAON_NOC_LPASS_AG_NOC 97
+#define SM8750_SLAVE_LPICX_NOC_LPIAON_NOC 98
+#define SM8750_SLAVE_MNOC_HF_MEM_NOC 99
+#define SM8750_SLAVE_MNOC_SF_MEM_NOC 100
+#define SM8750_SLAVE_CDSP_MEM_NOC 101
+#define SM8750_SLAVE_MEM_NOC_PCIE_SNOC 102
+#define SM8750_SLAVE_ANOC_PCIE_GEM_NOC 103
+#define SM8750_SLAVE_CNOC_CFG 104
+#define SM8750_SLAVE_DDRSS_CFG 105
+#define SM8750_SLAVE_CNOC_MNOC_CFG 106
+#define SM8750_SLAVE_PCIE_ANOC_CFG 107
+#define SM8750_SLAVE_QUP_CORE_0 108
+#define SM8750_SLAVE_QUP_CORE_1 109
+#define SM8750_SLAVE_QUP_CORE_2 110
+#define SM8750_SLAVE_BOOT_IMEM 111
+#define SM8750_SLAVE_IMEM 112
+#define SM8750_SLAVE_BOOT_IMEM_2 113
+#define SM8750_SLAVE_SERVICE_CNOC 114
+#define SM8750_SLAVE_SERVICE_MNOC 115
+#define SM8750_SLAVE_SERVICE_PCIE_ANOC 116
+#define SM8750_SLAVE_PCIE_0 117
+#define SM8750_SLAVE_QDSS_STM 118
+#define SM8750_SLAVE_TCU 119
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SM8750_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SM8750_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_qup02 = {
+ .name = "qxm_qup02",
+ .id = SM8750_MASTER_QUP_3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM8750_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM8750_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM8750_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM8750_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SM8750_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM8750_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM8750_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_soccp = {
+ .name = "qxm_soccp",
+ .id = SM8750_MASTER_SOCCP_AGGR_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .id = SM8750_MASTER_SP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .id = SM8750_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .id = SM8750_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM8750_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SM8750_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SM8750_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SM8750_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+ .name = "qsm_cfg",
+ .id = SM8750_MASTER_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 33,
+ .links = { SM8750_SLAVE_AHB2PHY_SOUTH, SM8750_SLAVE_AHB2PHY_NORTH,
+ SM8750_SLAVE_CAMERA_CFG, SM8750_SLAVE_CLK_CTL,
+ SM8750_SLAVE_CRYPTO_0_CFG, SM8750_SLAVE_DISPLAY_CFG,
+ SM8750_SLAVE_EVA_CFG, SM8750_SLAVE_GFX3D_CFG,
+ SM8750_SLAVE_I2C, SM8750_SLAVE_I3C_IBI0_CFG,
+ SM8750_SLAVE_I3C_IBI1_CFG, SM8750_SLAVE_IMEM_CFG,
+ SM8750_SLAVE_CNOC_MSS, SM8750_SLAVE_PCIE_CFG,
+ SM8750_SLAVE_PRNG, SM8750_SLAVE_QDSS_CFG,
+ SM8750_SLAVE_QSPI_0, SM8750_SLAVE_QUP_3,
+ SM8750_SLAVE_QUP_1, SM8750_SLAVE_QUP_2,
+ SM8750_SLAVE_SDCC_2, SM8750_SLAVE_SDCC_4,
+ SM8750_SLAVE_SPSS_CFG, SM8750_SLAVE_TCSR,
+ SM8750_SLAVE_TLMM, SM8750_SLAVE_UFS_MEM_CFG,
+ SM8750_SLAVE_USB3_0, SM8750_SLAVE_VENUS_CFG,
+ SM8750_SLAVE_VSENSE_CTRL_CFG, SM8750_SLAVE_CNOC_MNOC_CFG,
+ SM8750_SLAVE_PCIE_ANOC_CFG, SM8750_SLAVE_QDSS_STM,
+ SM8750_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SM8750_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 12,
+ .links = { SM8750_SLAVE_AOSS, SM8750_SLAVE_IPA_CFG,
+ SM8750_SLAVE_IPC_ROUTER_CFG, SM8750_SLAVE_SOCCP,
+ SM8750_SLAVE_TME_CFG, SM8750_SLAVE_APPSS,
+ SM8750_SLAVE_CNOC_CFG, SM8750_SLAVE_DDRSS_CFG,
+ SM8750_SLAVE_BOOT_IMEM, SM8750_SLAVE_IMEM,
+ SM8750_SLAVE_BOOT_IMEM_2, SM8750_SLAVE_SERVICE_CNOC },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SM8750_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_PCIE_0 },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SM8750_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SM8750_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SM8750_MASTER_APPSS_PROC,
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 4,
+ .links = { SM8750_SLAVE_UBWC_P, SM8750_SLAVE_GEM_NOC_CNOC,
+ SM8750_SLAVE_LLCC, SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM8750_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpass_gemnoc = {
+ .name = "qnm_lpass_gemnoc",
+ .id = SM8750_MASTER_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
+ SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mdsp = {
+ .name = "qnm_mdsp",
+ .id = SM8750_MASTER_MSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
+ SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM8750_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM8750_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_nsp_gemnoc = {
+ .name = "qnm_nsp_gemnoc",
+ .id = SM8750_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
+ SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM8750_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM8750_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
+ SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_ubwc_p = {
+ .name = "qnm_ubwc_p",
+ .id = SM8750_MASTER_UBWC_P,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM8750_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+ .name = "qnm_lpiaon_noc",
+ .id = SM8750_MASTER_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+ .name = "qnm_lpass_lpinoc",
+ .id = SM8750_MASTER_LPASS_LPINOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qnm_lpinoc_dsp_qns4m = {
+ .name = "qnm_lpinoc_dsp_qns4m",
+ .id = SM8750_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LPICX_NOC_LPIAON_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM8750_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SM8750_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_nrt_icp_sf = {
+ .name = "qnm_camnoc_nrt_icp_sf",
+ .id = SM8750_MASTER_CAMNOC_NRT_ICP_SF,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_rt_cdm_sf = {
+ .name = "qnm_camnoc_rt_cdm_sf",
+ .id = SM8750_MASTER_CAMNOC_RT_CDM_SF,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = SM8750_MASTER_CAMNOC_SF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .id = SM8750_MASTER_MDP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .id = SM8750_MASTER_CDSP_HCP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .id = SM8750_MASTER_VIDEO_CV_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_eva = {
+ .name = "qnm_video_eva",
+ .id = SM8750_MASTER_VIDEO_EVA,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_mvp = {
+ .name = "qnm_video_mvp",
+ .id = SM8750_MASTER_VIDEO_MVP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .id = SM8750_MASTER_VIDEO_V_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+ .name = "qsm_mnoc_cfg",
+ .id = SM8750_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_nsp = {
+ .name = "qnm_nsp",
+ .id = SM8750_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_pcie_anoc_cfg = {
+ .name = "qsm_pcie_anoc_cfg",
+ .id = SM8750_MASTER_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_SERVICE_PCIE_ANOC },
+};
+
+static struct qcom_icc_node xm_pcie3 = {
+ .name = "xm_pcie3",
+ .id = SM8750_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM8750_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM8750_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM8750_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM8750_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SM8750_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SM8750_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SM8750_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SM8750_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SM8750_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM8750_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM8750_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM8750_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM8750_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_eva_cfg = {
+ .name = "qhs_eva_cfg",
+ .id = SM8750_SLAVE_EVA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM8750_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i2c = {
+ .name = "qhs_i2c",
+ .id = SM8750_SLAVE_I2C,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi0_cfg = {
+ .name = "qhs_i3c_ibi0_cfg",
+ .id = SM8750_SLAVE_I3C_IBI0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi1_cfg = {
+ .name = "qhs_i3c_ibi1_cfg",
+ .id = SM8750_SLAVE_I3C_IBI1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM8750_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SM8750_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie_cfg = {
+ .name = "qhs_pcie_cfg",
+ .id = SM8750_SLAVE_PCIE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM8750_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM8750_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SM8750_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup02 = {
+ .name = "qhs_qup02",
+ .id = SM8750_SLAVE_QUP_3,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SM8750_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SM8750_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM8750_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM8750_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SM8750_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM8750_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SM8750_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM8750_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM8750_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM8750_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM8750_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+ .name = "qss_mnoc_cfg",
+ .id = SM8750_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qss_pcie_anoc_cfg = {
+ .name = "qss_pcie_anoc_cfg",
+ .id = SM8750_SLAVE_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_MASTER_PCIE_ANOC_CFG },
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM8750_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM8750_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM8750_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM8750_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SM8750_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_soccp = {
+ .name = "qhs_soccp",
+ .id = SM8750_SLAVE_SOCCP,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .id = SM8750_SLAVE_TME_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_apss = {
+ .name = "qns_apss",
+ .id = SM8750_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_cfg = {
+ .name = "qss_cfg",
+ .id = SM8750_SLAVE_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_MASTER_CNOC_CFG },
+};
+
+static struct qcom_icc_node qss_ddrss_cfg = {
+ .name = "qss_ddrss_cfg",
+ .id = SM8750_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .id = SM8750_SLAVE_BOOT_IMEM,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM8750_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_modem_boot_imem = {
+ .name = "qxs_modem_boot_imem",
+ .id = SM8750_SLAVE_BOOT_IMEM_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_cnoc_main = {
+ .name = "srvc_cnoc_main",
+ .id = SM8750_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie = {
+ .name = "xs_pcie",
+ .id = SM8750_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node chs_ubwc_p = {
+ .name = "chs_ubwc_p",
+ .id = SM8750_SLAVE_UBWC_P,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SM8750_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM8750_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SM8750_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+ .name = "qns_lpass_ag_noc_gemnoc",
+ .id = SM8750_SLAVE_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+ .name = "qns_lpass_aggnoc",
+ .id = SM8750_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_LPIAON_NOC },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+ .name = "qns_lpi_aon_noc",
+ .id = SM8750_SLAVE_LPICX_NOC_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_LPASS_LPINOC },
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM8750_SLAVE_EBI1,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM8750_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SM8750_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM8750_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SM8750_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SM8750_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_pcie_aggre_noc = {
+ .name = "srvc_pcie_aggre_noc",
+ .id = SM8750_SLAVE_SERVICE_PCIE_ANOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM8750_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(0),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .enable_mask = BIT(0),
+ .keepalive = true,
+ .num_nodes = 44,
+ .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+ &qhs_ahb2phy1, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_crypto0_cfg,
+ &qhs_eva_cfg, &qhs_gpuss_cfg,
+ &qhs_i3c_ibi0_cfg, &qhs_i3c_ibi1_cfg,
+ &qhs_imem_cfg, &qhs_mss_cfg,
+ &qhs_pcie_cfg, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_spss_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_ufs_mem_cfg,
+ &qhs_usb3_0, &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg, &qss_mnoc_cfg,
+ &qss_pcie_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg, &qnm_gemnoc_cnoc,
+ &qnm_gemnoc_pcie, &qhs_aoss,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_soccp, &qhs_tme_cfg,
+ &qns_apss, &qss_cfg,
+ &qss_ddrss_cfg, &qxs_boot_imem,
+ &qxs_imem, &qxs_modem_boot_imem,
+ &srvc_cnoc_main, &xs_pcie },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 5,
+ .nodes = { &qhs_display_cfg, &qhs_i2c,
+ &qhs_qup02, &qhs_qup1,
+ &qhs_qup2 },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .enable_mask = BIT(0),
+ .num_nodes = 2,
+ .nodes = { &qnm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+ .name = "LP0",
+ .num_nodes = 2,
+ .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .enable_mask = BIT(0),
+ .num_nodes = 9,
+ .nodes = { &qnm_camnoc_hf, &qnm_camnoc_nrt_icp_sf,
+ &qnm_camnoc_rt_cdm_sf, &qnm_camnoc_sf,
+ &qnm_vapss_hcp, &qnm_video_cv_cpu,
+ &qnm_video_mvp, &qnm_video_v_cpu,
+ &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .enable_mask = BIT(0),
+ .num_nodes = 14,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu,
+ &chm_apps, &qnm_gpu,
+ &qnm_mdsp, &qnm_mnoc_hf,
+ &qnm_mnoc_sf, &qnm_nsp_gemnoc,
+ &qnm_pcie, &qnm_snoc_sf,
+ &xm_gic, &chs_ubwc_p,
+ &qns_gem_noc_cnoc, &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_ubw0 = {
+ .name = "UBW0",
+ .num_nodes = 1,
+ .nodes = { &qnm_ubwc_p },
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_3] = &qxm_qup02,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8750_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_SOCCP_AGGR_NOC] = &qxm_soccp,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8750_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc sm8750_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [MASTER_CNOC_CFG] = &qsm_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_EVA_CFG] = &qhs_eva_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_I2C] = &qhs_i2c,
+ [SLAVE_I3C_IBI0_CFG] = &qhs_i3c_ibi0_cfg,
+ [SLAVE_I3C_IBI1_CFG] = &qhs_i3c_ibi1_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_PCIE_CFG] = &qhs_pcie_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_3] = &qhs_qup02,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+ [SLAVE_PCIE_ANOC_CFG] = &qss_pcie_anoc_cfg,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sm8750_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_SOCCP] = &qhs_soccp,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_APPSS] = &qns_apss,
+ [SLAVE_CNOC_CFG] = &qss_cfg,
+ [SLAVE_DDRSS_CFG] = &qss_ddrss_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_BOOT_IMEM_2] = &qxs_modem_boot_imem,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc_main,
+ [SLAVE_PCIE_0] = &xs_pcie,
+};
+
+static const struct qcom_icc_desc sm8750_cnoc_main = {
+ .nodes = cnoc_main_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+ .bcms = cnoc_main_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_ubw0,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_LPASS_GEM_NOC] = &qnm_lpass_gemnoc,
+ [MASTER_MSS_PROC] = &qnm_mdsp,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_UBWC_P] = &qnm_ubwc_p,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_UBWC_P] = &chs_ubwc_p,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+};
+
+static const struct qcom_icc_desc sm8750_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+ [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8750_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+ &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+ [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+ [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct qcom_icc_desc sm8750_lpass_lpiaon_noc = {
+ .nodes = lpass_lpiaon_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+ .bcms = lpass_lpiaon_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+ [MASTER_LPASS_PROC] = &qnm_lpinoc_dsp_qns4m,
+ [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct qcom_icc_desc sm8750_lpass_lpicx_noc = {
+ .nodes = lpass_lpicx_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sm8750_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_NRT_ICP_SF] = &qnm_camnoc_nrt_icp_sf,
+ [MASTER_CAMNOC_RT_CDM_SF] = &qnm_camnoc_rt_cdm_sf,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_EVA] = &qnm_video_eva,
+ [MASTER_VIDEO_MVP] = &qnm_video_mvp,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sm8750_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+ [MASTER_CDSP_PROC] = &qnm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8750_nsp_noc = {
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
+ [MASTER_PCIE_ANOC_CFG] = &qsm_pcie_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie3,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
+};
+
+static const struct qcom_icc_desc sm8750_pcie_anoc = {
+ .nodes = pcie_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+ .bcms = pcie_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn2,
+ &bcm_sn3,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct qcom_icc_desc sm8750_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm8750-aggre1-noc", .data = &sm8750_aggre1_noc},
+ { .compatible = "qcom,sm8750-aggre2-noc", .data = &sm8750_aggre2_noc},
+ { .compatible = "qcom,sm8750-clk-virt", .data = &sm8750_clk_virt},
+ { .compatible = "qcom,sm8750-config-noc", .data = &sm8750_config_noc},
+ { .compatible = "qcom,sm8750-cnoc-main", .data = &sm8750_cnoc_main},
+ { .compatible = "qcom,sm8750-gem-noc", .data = &sm8750_gem_noc},
+ { .compatible = "qcom,sm8750-lpass-ag-noc", .data = &sm8750_lpass_ag_noc},
+ { .compatible = "qcom,sm8750-lpass-lpiaon-noc", .data = &sm8750_lpass_lpiaon_noc},
+ { .compatible = "qcom,sm8750-lpass-lpicx-noc", .data = &sm8750_lpass_lpicx_noc},
+ { .compatible = "qcom,sm8750-mc-virt", .data = &sm8750_mc_virt},
+ { .compatible = "qcom,sm8750-mmss-noc", .data = &sm8750_mmss_noc},
+ { .compatible = "qcom,sm8750-nsp-noc", .data = &sm8750_nsp_noc},
+ { .compatible = "qcom,sm8750-pcie-anoc", .data = &sm8750_pcie_anoc},
+ { .compatible = "qcom,sm8750-system-noc", .data = &sm8750_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sm8750",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("SM8750 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 47c46e4b739e..ec1b5e32b972 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -367,6 +367,18 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
'arm-smmu.disable_bypass' will continue to override this
config.
+config ARM_SMMU_MMU_500_CPRE_ERRATA
+ bool "Enable errata workaround for CPRE in SMMU reset path"
+ depends on ARM_SMMU
+ default y
+ help
+ Say Y here (by default) to apply workaround to disable
+ MMU-500's next-page prefetcher for sake of 4 known errata.
+
+ Say N here only when it is sure that any errata related to
+ prefetch enablement are not applicable on the platform.
+ Refer silicon-errata.rst for info on errata IDs.
+
config ARM_SMMU_QCOM
def_tristate y
depends on ARM_SMMU && ARCH_QCOM
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 1bef5d55b2f9..68debf5ee2d7 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -16,7 +16,6 @@ irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
irqreturn_t amd_iommu_int_handler(int irq, void *data);
-void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
u8 cntrl_intr, u8 cntrl_log,
u32 status_run_mask, u32 status_overflow_mask);
@@ -41,13 +40,13 @@ void amd_iommu_disable(void);
int amd_iommu_reenable(int mode);
int amd_iommu_enable_faulting(unsigned int cpu);
extern int amd_iommu_guest_ir;
-extern enum io_pgtable_fmt amd_iommu_pgtable;
+extern enum protection_domain_mode amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
extern unsigned long amd_iommu_pgsize_bitmap;
/* Protection domain ops */
void amd_iommu_init_identity_domain(void);
-struct protection_domain *protection_domain_alloc(unsigned int type, int nid);
+struct protection_domain *protection_domain_alloc(void);
void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm);
@@ -89,7 +88,6 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
*/
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
-void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
@@ -184,3 +182,6 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
#endif
+
+struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
+struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index fdb0357e0bb9..23caea22f8dc 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -175,6 +175,7 @@
#define CONTROL_GAM_EN 25
#define CONTROL_GALOG_EN 28
#define CONTROL_GAINT_EN 29
+#define CONTROL_EPH_EN 45
#define CONTROL_XT_EN 50
#define CONTROL_INTCAPXT_EN 51
#define CONTROL_IRTCACHEDIS 59
@@ -220,6 +221,8 @@
#define DEV_ENTRY_EX 0x67
#define DEV_ENTRY_SYSMGT1 0x68
#define DEV_ENTRY_SYSMGT2 0x69
+#define DTE_DATA1_SYSMGT_MASK GENMASK_ULL(41, 40)
+
#define DEV_ENTRY_IRQ_TBL_EN 0x80
#define DEV_ENTRY_INIT_PASS 0xb8
#define DEV_ENTRY_EINT_PASS 0xb9
@@ -407,8 +410,7 @@
#define DTE_FLAG_HAD (3ULL << 7)
#define DTE_FLAG_GIOV BIT_ULL(54)
#define DTE_FLAG_GV BIT_ULL(55)
-#define DTE_GLX_SHIFT (56)
-#define DTE_GLX_MASK (3)
+#define DTE_GLX GENMASK_ULL(57, 56)
#define DTE_FLAG_IR BIT_ULL(61)
#define DTE_FLAG_IW BIT_ULL(62)
@@ -416,18 +418,18 @@
#define DTE_FLAG_MASK (0x3ffULL << 32)
#define DEV_DOMID_MASK 0xffffULL
-#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
-#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
-
-#define DTE_GCR3_SHIFT_A 58
-#define DTE_GCR3_SHIFT_B 16
-#define DTE_GCR3_SHIFT_C 43
+#define DTE_GCR3_14_12 GENMASK_ULL(60, 58)
+#define DTE_GCR3_30_15 GENMASK_ULL(31, 16)
+#define DTE_GCR3_51_31 GENMASK_ULL(63, 43)
#define DTE_GPT_LEVEL_SHIFT 54
+#define DTE_GPT_LEVEL_MASK GENMASK_ULL(55, 54)
#define GCR3_VALID 0x01ULL
+/* DTE[128:179] | DTE[184:191] */
+#define DTE_DATA2_INTR_MASK ~GENMASK_ULL(55, 52)
+
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD)
@@ -468,7 +470,7 @@ extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
do { \
if (amd_iommu_dump) \
- pr_info("AMD-Vi: " format, ## arg); \
+ pr_info(format, ## arg); \
} while(0);
/* global flag if IOMMUs cache non-present entries */
@@ -516,6 +518,9 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \
list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list)
+#define for_each_ivhd_dte_flags(entry) \
+ list_for_each_entry((entry), &amd_ivhd_dev_flags_list, list)
+
struct amd_iommu;
struct iommu_domain;
struct irq_domain;
@@ -837,6 +842,7 @@ struct devid_map {
struct iommu_dev_data {
/*Protect against attach/detach races */
struct mutex mutex;
+ spinlock_t dte_lock; /* DTE lock for 256-bit access */
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
@@ -881,7 +887,21 @@ extern struct list_head amd_iommu_list;
* Structure defining one entry in the device table
*/
struct dev_table_entry {
- u64 data[4];
+ union {
+ u64 data[4];
+ u128 data128[2];
+ };
+};
+
+/*
+ * Structure to sture persistent DTE flags from IVHD
+ */
+struct ivhd_dte_flags {
+ struct list_head list;
+ u16 segid;
+ u16 devid_first;
+ u16 devid_last;
+ struct dev_table_entry dte;
};
/*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 0e0a531042ac..cb536d372b12 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -152,7 +152,7 @@ struct ivmd_header {
bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
-enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
+enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1;
/* Guest page table level */
int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
@@ -174,8 +174,8 @@ bool amd_iommu_snp_en;
EXPORT_SYMBOL(amd_iommu_snp_en);
LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
-LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
- system */
+LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */
+LIST_HEAD(amd_ivhd_dev_flags_list); /* list of all IVHD device entry settings */
/* Number of IOMMUs present in the system */
static int amd_iommus_present;
@@ -984,36 +984,12 @@ static void iommu_enable_gt(struct amd_iommu *iommu)
}
/* sets a specific bit in the device table entry. */
-static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
- u16 devid, u8 bit)
+static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- dev_table[devid].data[i] |= (1UL << _bit);
-}
-
-static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
-{
- struct dev_table_entry *dev_table = get_dev_table(iommu);
-
- return __set_dev_entry_bit(dev_table, devid, bit);
-}
-
-static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
- u16 devid, u8 bit)
-{
- int i = (bit >> 6) & 0x03;
- int _bit = bit & 0x3f;
-
- return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
-}
-
-static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
-{
- struct dev_table_entry *dev_table = get_dev_table(iommu);
-
- return __get_dev_entry_bit(dev_table, devid, bit);
+ dte->data[i] |= (1UL << _bit);
}
static bool __copy_device_table(struct amd_iommu *iommu)
@@ -1081,11 +1057,9 @@ static bool __copy_device_table(struct amd_iommu *iommu)
}
/* If gcr3 table existed, mask it out */
if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
- tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
- tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+ tmp = (DTE_GCR3_30_15 | DTE_GCR3_51_31);
pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
- tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
- tmp |= DTE_FLAG_GV;
+ tmp = (DTE_GCR3_14_12 | DTE_FLAG_GV);
pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
}
}
@@ -1136,42 +1110,107 @@ static bool copy_device_table(void)
return true;
}
-void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
+struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid)
{
- int sysmgt;
+ struct ivhd_dte_flags *e;
+ unsigned int best_len = UINT_MAX;
+ struct dev_table_entry *dte = NULL;
+
+ for_each_ivhd_dte_flags(e) {
+ /*
+ * Need to go through the whole list to find the smallest range,
+ * which contains the devid.
+ */
+ if ((e->segid == segid) &&
+ (e->devid_first <= devid) && (devid <= e->devid_last)) {
+ unsigned int len = e->devid_last - e->devid_first;
- sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
- (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
+ if (len < best_len) {
+ dte = &(e->dte);
+ best_len = len;
+ }
+ }
+ }
+ return dte;
+}
- if (sysmgt == 0x01)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
+static bool search_ivhd_dte_flags(u16 segid, u16 first, u16 last)
+{
+ struct ivhd_dte_flags *e;
+
+ for_each_ivhd_dte_flags(e) {
+ if ((e->segid == segid) &&
+ (e->devid_first == first) &&
+ (e->devid_last == last))
+ return true;
+ }
+ return false;
}
/*
* This function takes the device specific flags read from the ACPI
* table and sets up the device table entry with that information
*/
-static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
- u16 devid, u32 flags, u32 ext_flags)
+static void __init
+set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last,
+ u32 flags, u32 ext_flags)
{
- if (flags & ACPI_DEVFLAG_INITPASS)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
- if (flags & ACPI_DEVFLAG_EXTINT)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
- if (flags & ACPI_DEVFLAG_NMI)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
- if (flags & ACPI_DEVFLAG_SYSMGT1)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
- if (flags & ACPI_DEVFLAG_SYSMGT2)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
- if (flags & ACPI_DEVFLAG_LINT0)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
- if (flags & ACPI_DEVFLAG_LINT1)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
+ int i;
+ struct dev_table_entry dte = {};
+
+ /* Parse IVHD DTE setting flags and store information */
+ if (flags) {
+ struct ivhd_dte_flags *d;
+
+ if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last))
+ return;
- amd_iommu_apply_erratum_63(iommu, devid);
+ d = kzalloc(sizeof(struct ivhd_dte_flags), GFP_KERNEL);
+ if (!d)
+ return;
- amd_iommu_set_rlookup_table(iommu, devid);
+ pr_debug("%s: devid range %#x:%#x\n", __func__, first, last);
+
+ if (flags & ACPI_DEVFLAG_INITPASS)
+ set_dte_bit(&dte, DEV_ENTRY_INIT_PASS);
+ if (flags & ACPI_DEVFLAG_EXTINT)
+ set_dte_bit(&dte, DEV_ENTRY_EINT_PASS);
+ if (flags & ACPI_DEVFLAG_NMI)
+ set_dte_bit(&dte, DEV_ENTRY_NMI_PASS);
+ if (flags & ACPI_DEVFLAG_SYSMGT1)
+ set_dte_bit(&dte, DEV_ENTRY_SYSMGT1);
+ if (flags & ACPI_DEVFLAG_SYSMGT2)
+ set_dte_bit(&dte, DEV_ENTRY_SYSMGT2);
+ if (flags & ACPI_DEVFLAG_LINT0)
+ set_dte_bit(&dte, DEV_ENTRY_LINT0_PASS);
+ if (flags & ACPI_DEVFLAG_LINT1)
+ set_dte_bit(&dte, DEV_ENTRY_LINT1_PASS);
+
+ /* Apply erratum 63, which needs info in initial_dte */
+ if (FIELD_GET(DTE_DATA1_SYSMGT_MASK, dte.data[1]) == 0x1)
+ dte.data[0] |= DTE_FLAG_IW;
+
+ memcpy(&d->dte, &dte, sizeof(dte));
+ d->segid = iommu->pci_seg->id;
+ d->devid_first = first;
+ d->devid_last = last;
+ list_add_tail(&d->list, &amd_ivhd_dev_flags_list);
+ }
+
+ for (i = first; i <= last; i++) {
+ if (flags) {
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ memcpy(&dev_table[i], &dte, sizeof(dte));
+ }
+ amd_iommu_set_rlookup_table(iommu, i);
+ }
+}
+
+static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
+ u16 devid, u32 flags, u32 ext_flags)
+{
+ set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags);
}
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
@@ -1239,7 +1278,7 @@ static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
entry->cmd_line = cmd_line;
entry->root_devid = (entry->devid & (~0x7));
- pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
+ pr_info("%s, add hid:%s, uid:%s, rdevid:%#x\n",
entry->cmd_line ? "cmd" : "ivrs",
entry->hid, entry->uid, entry->root_devid);
@@ -1331,15 +1370,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
switch (e->type) {
case IVHD_DEV_ALL:
- DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
-
- for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
- set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
+ DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags);
+ set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0);
break;
case IVHD_DEV_SELECT:
- DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
- "flags: %02x\n",
+ DUMP_printk(" DEV_SELECT\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1350,8 +1386,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_SELECT_RANGE_START:
- DUMP_printk(" DEV_SELECT_RANGE_START\t "
- "devid: %04x:%02x:%02x.%x flags: %02x\n",
+ DUMP_printk(" DEV_SELECT_RANGE_START\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1364,8 +1399,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS:
- DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
- "flags: %02x devid_to: %02x:%02x.%x\n",
+ DUMP_printk(" DEV_ALIAS\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %02x:%02x.%x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1382,9 +1416,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS_RANGE:
- DUMP_printk(" DEV_ALIAS_RANGE\t\t "
- "devid: %04x:%02x:%02x.%x flags: %02x "
- "devid_to: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_ALIAS_RANGE\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %04x:%02x:%02x.%x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1401,8 +1433,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT:
- DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
- "flags: %02x ext: %08x\n",
+ DUMP_printk(" DEV_EXT_SELECT\t\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1414,8 +1445,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT_RANGE:
- DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
- "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
+ DUMP_printk(" DEV_EXT_SELECT_RANGE\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1428,21 +1458,18 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_RANGE_END:
- DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_RANGE_END\t\tdevid: %04x:%02x:%02x.%x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid));
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias) {
+ if (alias)
pci_seg->alias_table[dev_i] = devid_to;
- set_dev_entry_from_acpi(iommu,
- devid_to, flags, ext_flags);
- }
- set_dev_entry_from_acpi(iommu, dev_i,
- flags, ext_flags);
}
+ set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
+ set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break;
case IVHD_DEV_SPECIAL: {
u8 handle, type;
@@ -1461,11 +1488,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
else
var = "UNKNOWN";
- DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
var, (int)handle,
seg_id, PCI_BUS_NUM(devid),
PCI_SLOT(devid),
- PCI_FUNC(devid));
+ PCI_FUNC(devid),
+ e->flags);
ret = add_special_device(type, handle, &devid, false);
if (ret)
@@ -1525,11 +1553,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
}
devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
- DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
hid, uid, seg_id,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
- PCI_FUNC(devid));
+ PCI_FUNC(devid),
+ e->flags);
flags = e->flags;
@@ -1757,13 +1786,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- /*
- * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
- * GAM also requires GA mode. Therefore, we need to
- * check cmpxchg16b support before enabling it.
- */
- if (!boot_cpu_has(X86_FEATURE_CX16) ||
- ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+ /* GAM requires GA mode. */
+ if ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
case 0x11:
@@ -1773,13 +1797,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- /*
- * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
- * XT, GAM also requires GA mode. Therefore, we need to
- * check cmpxchg16b support before enabling them.
- */
- if (!boot_cpu_has(X86_FEATURE_CX16) ||
- ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
+ /* XT and GAM require GA mode. */
+ if ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
}
@@ -2145,7 +2164,7 @@ static void print_iommu_info(void)
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
- if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (amd_iommu_pgtable == PD_MODE_V2) {
pr_info("V2 page table enabled (Paging mode : %d level)\n",
amd_iommu_gpt_level);
}
@@ -2332,7 +2351,7 @@ static struct irq_chip intcapxt_controller = {
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_affinity = intcapxt_set_affinity,
.irq_set_wake = intcapxt_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_MOVE_DEFERRED,
};
static const struct irq_domain_ops intcapxt_domain_ops = {
@@ -2575,9 +2594,9 @@ static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
return;
for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
- __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
+ set_dte_bit(&dev_table[devid], DEV_ENTRY_VALID);
if (!amd_iommu_snp_en)
- __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
+ set_dte_bit(&dev_table[devid], DEV_ENTRY_TRANSLATION);
}
}
@@ -2605,8 +2624,7 @@ static void init_device_table(void)
for_each_pci_segment(pci_seg) {
for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
- __set_dev_entry_bit(pci_seg->dev_table,
- devid, DEV_ENTRY_IRQ_TBL_EN);
+ set_dte_bit(&pci_seg->dev_table[devid], DEV_ENTRY_IRQ_TBL_EN);
}
}
@@ -2635,6 +2653,10 @@ static void iommu_init_flags(struct amd_iommu *iommu)
/* Set IOTLB invalidation timeout to 1s */
iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
+
+ /* Enable Enhanced Peripheral Page Request Handling */
+ if (check_feature(FEATURE_EPHSUP))
+ iommu_feature_enable(iommu, CONTROL_EPH_EN);
}
static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
@@ -3033,6 +3055,11 @@ static int __init early_amd_iommu_init(void)
return -EINVAL;
}
+ if (!boot_cpu_has(X86_FEATURE_CX16)) {
+ pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
+ return -EINVAL;
+ }
+
/*
* Validate checksum here so we don't need to do it when
* we actually parse the table
@@ -3059,10 +3086,10 @@ static int __init early_amd_iommu_init(void)
FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
- if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (amd_iommu_pgtable == PD_MODE_V2) {
if (!amd_iommu_v2_pgtbl_supported()) {
pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
+ amd_iommu_pgtable = PD_MODE_V1;
}
}
@@ -3171,7 +3198,7 @@ out:
return true;
}
-static void iommu_snp_enable(void)
+static __init void iommu_snp_enable(void)
{
#ifdef CONFIG_KVM_AMD_SEV
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
@@ -3185,7 +3212,7 @@ static void iommu_snp_enable(void)
goto disable_snp;
}
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ if (amd_iommu_pgtable != PD_MODE_V1) {
pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
goto disable_snp;
}
@@ -3196,6 +3223,14 @@ static void iommu_snp_enable(void)
goto disable_snp;
}
+ /*
+ * Enable host SNP support once SNP support is checked on IOMMU.
+ */
+ if (snp_rmptable_init()) {
+ pr_warn("SNP: RMP initialization failed, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
pr_info("IOMMU SNP support enabled.\n");
return;
@@ -3295,6 +3330,19 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
ret = state_next();
}
+ /*
+ * SNP platform initilazation requires IOMMUs to be fully configured.
+ * If the SNP support on IOMMUs has NOT been checked, simply mark SNP
+ * as unsupported. If the SNP support on IOMMUs has been checked and
+ * host SNP support enabled but RMP enforcement has not been enabled
+ * in IOMMUs, then the system is in a half-baked state, but can limp
+ * along as all memory should be Hypervisor-Owned in the RMP. WARN,
+ * but leave SNP as "supported" to avoid confusing the kernel.
+ */
+ if (ret && cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
+ !WARN_ON_ONCE(amd_iommu_snp_en))
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
+
return ret;
}
@@ -3398,25 +3446,28 @@ static bool amd_iommu_sme_check(void)
* IOMMUs
*
****************************************************************************/
-int __init amd_iommu_detect(void)
+void __init amd_iommu_detect(void)
{
int ret;
if (no_iommu || (iommu_detected && !gart_iommu_aperture))
- return -ENODEV;
+ goto disable_snp;
if (!amd_iommu_sme_check())
- return -ENODEV;
+ goto disable_snp;
ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
if (ret)
- return ret;
+ goto disable_snp;
amd_iommu_detected = true;
iommu_detected = 1;
x86_init.iommu.iommu_init = amd_iommu_init;
+ return;
- return 1;
+disable_snp:
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
}
/****************************************************************************
@@ -3464,9 +3515,9 @@ static int __init parse_amd_iommu_options(char *str)
} else if (strncmp(str, "force_isolation", 15) == 0) {
amd_iommu_force_isolation = true;
} else if (strncmp(str, "pgtbl_v1", 8) == 0) {
- amd_iommu_pgtable = AMD_IOMMU_V1;
+ amd_iommu_pgtable = PD_MODE_V1;
} else if (strncmp(str, "pgtbl_v2", 8) == 0) {
- amd_iommu_pgtable = AMD_IOMMU_V2;
+ amd_iommu_pgtable = PD_MODE_V2;
} else if (strncmp(str, "irtcachedis", 11) == 0) {
amd_iommu_irtcachedis = true;
} else if (strncmp(str, "nohugepages", 11) == 0) {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 16f40b8000d7..cd5116d8c3b2 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -83,12 +83,142 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
static void set_dte_entry(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data);
+static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
+
+static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid);
+
/****************************************************************************
*
* Helper functions
*
****************************************************************************/
+static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val)
+{
+ /*
+ * Note:
+ * We use arch_cmpxchg128_local() because:
+ * - Need cmpxchg16b instruction mainly for 128-bit store to DTE
+ * (not necessary for cmpxchg since this function is already
+ * protected by a spin_lock for this DTE).
+ * - Neither need LOCK_PREFIX nor try loop because of the spin_lock.
+ */
+ arch_cmpxchg128_local(ptr, *ptr, val);
+}
+
+static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new)
+{
+ struct dev_table_entry old;
+
+ old.data128[1] = ptr->data128[1];
+ /*
+ * Preserve DTE_DATA2_INTR_MASK. This needs to be
+ * done here since it requires to be inside
+ * spin_lock(&dev_data->dte_lock) context.
+ */
+ new->data[2] &= ~DTE_DATA2_INTR_MASK;
+ new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK;
+
+ amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]);
+}
+
+static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new)
+{
+ amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]);
+}
+
+/*
+ * Note:
+ * IOMMU reads the entire Device Table entry in a single 256-bit transaction
+ * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver
+ * need to ensure the following:
+ * - DTE[V|GV] bit is being written last when setting.
+ * - DTE[V|GV] bit is being written first when clearing.
+ *
+ * This function is used only by code, which updates DMA translation part of the DTE.
+ * So, only consider control bits related to DMA when updating the entry.
+ */
+static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
+ struct dev_table_entry *new)
+{
+ unsigned long flags;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct dev_table_entry *ptr = &dev_table[dev_data->devid];
+
+ spin_lock_irqsave(&dev_data->dte_lock, flags);
+
+ if (!(ptr->data[0] & DTE_FLAG_V)) {
+ /* Existing DTE is not valid. */
+ write_dte_upper128(ptr, new);
+ write_dte_lower128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (!(new->data[0] & DTE_FLAG_V)) {
+ /* Existing DTE is valid. New DTE is not valid. */
+ write_dte_lower128(ptr, new);
+ write_dte_upper128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
+ /*
+ * Both DTEs are valid.
+ * Existing DTE has no guest page table.
+ */
+ write_dte_upper128(ptr, new);
+ write_dte_lower128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
+ /*
+ * Both DTEs are valid.
+ * Existing DTE has guest page table,
+ * new DTE has no guest page table,
+ */
+ write_dte_lower128(ptr, new);
+ write_dte_upper128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) !=
+ FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) {
+ /*
+ * Both DTEs are valid and have guest page table,
+ * but have different number of levels. So, we need
+ * to upadte both upper and lower 128-bit value, which
+ * require disabling and flushing.
+ */
+ struct dev_table_entry clear = {};
+
+ /* First disable DTE */
+ write_dte_lower128(ptr, &clear);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+
+ /* Then update DTE */
+ write_dte_upper128(ptr, new);
+ write_dte_lower128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else {
+ /*
+ * Both DTEs are valid and have guest page table,
+ * and same number of levels. We just need to only
+ * update the lower 128-bit. So no need to disable DTE.
+ */
+ write_dte_lower128(ptr, new);
+ }
+
+ spin_unlock_irqrestore(&dev_data->dte_lock, flags);
+}
+
+static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
+ struct dev_table_entry *dte)
+{
+ unsigned long flags;
+ struct dev_table_entry *ptr;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ ptr = &dev_table[dev_data->devid];
+
+ spin_lock_irqsave(&dev_data->dte_lock, flags);
+ dte->data128[0] = ptr->data128[0];
+ dte->data128[1] = ptr->data128[1];
+ spin_unlock_irqrestore(&dev_data->dte_lock, flags);
+}
+
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
return (pdom && (pdom->pd_mode == PD_MODE_V2));
@@ -209,6 +339,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
return NULL;
mutex_init(&dev_data->mutex);
+ spin_lock_init(&dev_data->dte_lock);
dev_data->devid = devid;
ratelimit_default_init(&dev_data->rs);
@@ -216,7 +347,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
return dev_data;
}
-static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
+struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
struct llist_node *node;
@@ -236,9 +367,11 @@ static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct dev_table_entry new;
struct amd_iommu *iommu;
- struct dev_table_entry *dev_table;
+ struct iommu_dev_data *dev_data, *alias_data;
u16 devid = pci_dev_id(pdev);
+ int ret = 0;
if (devid == alias)
return 0;
@@ -247,13 +380,27 @@ static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
if (!iommu)
return 0;
- amd_iommu_set_rlookup_table(iommu, alias);
- dev_table = get_dev_table(iommu);
- memcpy(dev_table[alias].data,
- dev_table[devid].data,
- sizeof(dev_table[alias].data));
+ /* Copy the data from pdev */
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ if (!dev_data) {
+ pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid);
+ ret = -EINVAL;
+ goto out;
+ }
+ get_dte256(iommu, dev_data, &new);
- return 0;
+ /* Setup alias */
+ alias_data = find_dev_data(iommu, alias);
+ if (!alias_data) {
+ pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias);
+ ret = -EINVAL;
+ goto out;
+ }
+ update_dte256(iommu, alias_data, &new);
+
+ amd_iommu_set_rlookup_table(iommu, alias);
+out:
+ return ret;
}
static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
@@ -526,6 +673,12 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
return -ENOMEM;
dev_data->dev = dev;
+
+ /*
+ * The dev_iommu_priv_set() needes to be called before setup_aliases.
+ * Otherwise, subsequent call to dev_iommu_priv_get() will fail.
+ */
+ dev_iommu_priv_set(dev, dev_data);
setup_aliases(iommu, dev);
/*
@@ -539,8 +692,6 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
dev_data->flags = pdev_get_caps(to_pci_dev(dev));
}
- dev_iommu_priv_set(dev, dev_data);
-
return 0;
}
@@ -571,10 +722,13 @@ static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{
int i;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct dev_table_entry dte;
+ struct iommu_dev_data *dev_data = find_dev_data(iommu, devid);
+
+ get_dte256(iommu, dev_data, &dte);
for (i = 0; i < 4; ++i)
- pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
+ pr_err("DTE[%d]: %016llx\n", i, dte.data[i]);
}
static void dump_command(unsigned long phys_addr)
@@ -1261,6 +1415,15 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
return iommu_queue_command(iommu, &cmd);
}
+static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid)
+{
+ int ret;
+
+ ret = iommu_flush_dte(iommu, devid);
+ if (!ret)
+ iommu_completion_wait(iommu);
+}
+
static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
{
u32 devid;
@@ -1603,15 +1766,6 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
domain_flush_complete(domain);
}
-void amd_iommu_domain_update(struct protection_domain *domain)
-{
- /* Update device table */
- amd_iommu_update_and_flush_device_table(domain);
-
- /* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_all(domain);
-}
-
int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
{
struct iommu_dev_data *dev_data;
@@ -1826,90 +1980,109 @@ int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
return ret;
}
+static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr,
+ struct dev_table_entry *new)
+{
+ /* All existing DTE must have V bit set */
+ new->data128[0] = DTE_FLAG_V;
+ new->data128[1] = 0;
+}
+
+/*
+ * Note:
+ * The old value for GCR3 table and GPT have been cleared from caller.
+ */
+static void set_dte_gcr3_table(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data,
+ struct dev_table_entry *target)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ u64 gcr3;
+
+ if (!gcr3_info->gcr3_tbl)
+ return;
+
+ pr_debug("%s: devid=%#x, glx=%#x, gcr3_tbl=%#llx\n",
+ __func__, dev_data->devid, gcr3_info->glx,
+ (unsigned long long)gcr3_info->gcr3_tbl);
+
+ gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
+
+ target->data[0] |= DTE_FLAG_GV |
+ FIELD_PREP(DTE_GLX, gcr3_info->glx) |
+ FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12);
+ if (pdom_is_v2_pgtbl_mode(dev_data->domain))
+ target->data[0] |= DTE_FLAG_GIOV;
+
+ target->data[1] |= FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) |
+ FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31);
+
+ /* Guest page table can only support 4 and 5 levels */
+ if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL)
+ target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL);
+ else
+ target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL);
+}
+
static void set_dte_entry(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data)
{
- u64 pte_root = 0;
- u64 flags = 0;
- u32 old_domid;
- u16 devid = dev_data->devid;
u16 domid;
+ u32 old_domid;
+ struct dev_table_entry *initial_dte;
+ struct dev_table_entry new = {};
struct protection_domain *domain = dev_data->domain;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
if (gcr3_info && gcr3_info->gcr3_tbl)
domid = dev_data->gcr3_info.domid;
else
domid = domain->id;
+ make_clear_dte(dev_data, dte, &new);
+
if (domain->iop.mode != PAGE_MODE_NONE)
- pte_root = iommu_virt_to_phys(domain->iop.root);
+ new.data[0] |= iommu_virt_to_phys(domain->iop.root);
- pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
+ new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+ new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW;
/*
- * When SNP is enabled, Only set TV bit when IOMMU
- * page translation is in use.
+ * When SNP is enabled, we can only support TV=1 with non-zero domain ID.
+ * This is prevented by the SNP-enable and IOMMU_DOMAIN_IDENTITY check in
+ * do_iommu_domain_alloc().
*/
- if (!amd_iommu_snp_en || (domid != 0))
- pte_root |= DTE_FLAG_TV;
-
- flags = dev_table[devid].data[1];
-
- if (dev_data->ats_enabled)
- flags |= DTE_FLAG_IOTLB;
+ WARN_ON(amd_iommu_snp_en && (domid == 0));
+ new.data[0] |= DTE_FLAG_TV;
if (dev_data->ppr)
- pte_root |= 1ULL << DEV_ENTRY_PPR;
+ new.data[0] |= 1ULL << DEV_ENTRY_PPR;
if (domain->dirty_tracking)
- pte_root |= DTE_FLAG_HAD;
-
- if (gcr3_info && gcr3_info->gcr3_tbl) {
- u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
- u64 glx = gcr3_info->glx;
- u64 tmp;
+ new.data[0] |= DTE_FLAG_HAD;
- pte_root |= DTE_FLAG_GV;
- pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
-
- /* First mask out possible old values for GCR3 table */
- tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
- flags &= ~tmp;
-
- tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
- flags &= ~tmp;
-
- /* Encode GCR3 table into DTE */
- tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
- pte_root |= tmp;
-
- tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
- flags |= tmp;
-
- tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
- flags |= tmp;
+ if (dev_data->ats_enabled)
+ new.data[1] |= DTE_FLAG_IOTLB;
- if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) {
- dev_table[devid].data[2] |=
- ((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
- }
+ old_domid = READ_ONCE(dte->data[1]) & DEV_DOMID_MASK;
+ new.data[1] |= domid;
- /* GIOV is supported with V2 page table mode only */
- if (pdom_is_v2_pgtbl_mode(domain))
- pte_root |= DTE_FLAG_GIOV;
+ /*
+ * Restore cached persistent DTE bits, which can be set by information
+ * in IVRS table. See set_dev_entry_from_acpi().
+ */
+ initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid);
+ if (initial_dte) {
+ new.data128[0] |= initial_dte->data128[0];
+ new.data128[1] |= initial_dte->data128[1];
}
- flags &= ~DEV_DOMID_MASK;
- flags |= domid;
+ set_dte_gcr3_table(iommu, dev_data, &new);
- old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
- dev_table[devid].data[1] = flags;
- dev_table[devid].data[0] = pte_root;
+ update_dte256(iommu, dev_data, &new);
/*
* A kdump kernel might be replacing a domain ID that was copied from
@@ -1921,19 +2094,16 @@ static void set_dte_entry(struct amd_iommu *iommu,
}
}
-static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
+/*
+ * Clear DMA-remap related flags to block all DMA (blockeded domain)
+ */
+static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data)
{
- struct dev_table_entry *dev_table = get_dev_table(iommu);
-
- /* remove entry from the device table seen by the hardware */
- dev_table[devid].data[0] = DTE_FLAG_V;
-
- if (!amd_iommu_snp_en)
- dev_table[devid].data[0] |= DTE_FLAG_TV;
+ struct dev_table_entry new = {};
+ struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
- dev_table[devid].data[1] &= DTE_FLAG_MASK;
-
- amd_iommu_apply_erratum_63(iommu, devid);
+ make_clear_dte(dev_data, dte, &new);
+ update_dte256(iommu, dev_data, &new);
}
/* Update and flush DTE for the given device */
@@ -1944,7 +2114,7 @@ static void dev_update_dte(struct iommu_dev_data *dev_data, bool set)
if (set)
set_dte_entry(iommu, dev_data);
else
- clear_dte_entry(iommu, dev_data->devid);
+ clear_dte_entry(iommu, dev_data);
clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
@@ -2007,7 +2177,6 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
struct protection_domain *pdom)
{
struct pdom_iommu_info *pdom_iommu_info, *curr;
- struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg;
unsigned long flags;
int ret = 0;
@@ -2036,10 +2205,6 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
goto out_unlock;
}
- /* Update NUMA Node ID */
- if (cfg->amd.nid == NUMA_NO_NODE)
- cfg->amd.nid = dev_to_node(&iommu->dev->dev);
-
out_unlock:
spin_unlock_irqrestore(&pdom->lock, flags);
return ret;
@@ -2276,16 +2441,15 @@ void protection_domain_free(struct protection_domain *domain)
kfree(domain);
}
-static void protection_domain_init(struct protection_domain *domain, int nid)
+static void protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
INIT_LIST_HEAD(&domain->dev_list);
INIT_LIST_HEAD(&domain->dev_data_list);
xa_init(&domain->iommu_array);
- domain->iop.pgtbl.cfg.amd.nid = nid;
}
-struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
+struct protection_domain *protection_domain_alloc(void)
{
struct protection_domain *domain;
int domid;
@@ -2301,42 +2465,37 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
}
domain->id = domid;
- protection_domain_init(domain, nid);
+ protection_domain_init(domain);
return domain;
}
static int pdom_setup_pgtable(struct protection_domain *domain,
- unsigned int type, int pgtable)
+ struct device *dev)
{
struct io_pgtable_ops *pgtbl_ops;
+ enum io_pgtable_fmt fmt;
- /* No need to allocate io pgtable ops in passthrough mode */
- if (!(type & __IOMMU_DOMAIN_PAGING))
- return 0;
-
- switch (pgtable) {
- case AMD_IOMMU_V1:
- domain->pd_mode = PD_MODE_V1;
+ switch (domain->pd_mode) {
+ case PD_MODE_V1:
+ fmt = AMD_IOMMU_V1;
break;
- case AMD_IOMMU_V2:
- domain->pd_mode = PD_MODE_V2;
+ case PD_MODE_V2:
+ fmt = AMD_IOMMU_V2;
break;
- default:
- return -EINVAL;
}
- pgtbl_ops =
- alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain);
+ domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev);
+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &domain->iop.pgtbl.cfg, domain);
if (!pgtbl_ops)
return -ENOMEM;
return 0;
}
-static inline u64 dma_max_address(int pgtable)
+static inline u64 dma_max_address(enum protection_domain_mode pgtable)
{
- if (pgtable == AMD_IOMMU_V1)
+ if (pgtable == PD_MODE_V1)
return ~0ULL;
/* V2 with 4/5 level page table */
@@ -2348,31 +2507,21 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
return iommu && (iommu->features & FEATURE_HDSUP);
}
-static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
- struct device *dev,
- u32 flags, int pgtable)
+static struct iommu_domain *
+do_iommu_domain_alloc(struct device *dev, u32 flags,
+ enum protection_domain_mode pgtable)
{
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
struct protection_domain *domain;
- struct amd_iommu *iommu = NULL;
int ret;
- if (dev)
- iommu = get_amd_iommu_from_dev(dev);
-
- /*
- * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
- * default to use IOMMU_DOMAIN_DMA[_FQ].
- */
- if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
- return ERR_PTR(-EINVAL);
-
- domain = protection_domain_alloc(type,
- dev ? dev_to_node(dev) : NUMA_NO_NODE);
+ domain = protection_domain_alloc();
if (!domain)
return ERR_PTR(-ENOMEM);
- ret = pdom_setup_pgtable(domain, type, pgtable);
+ domain->pd_mode = pgtable;
+ ret = pdom_setup_pgtable(domain, dev);
if (ret) {
pdom_id_free(domain->id);
kfree(domain);
@@ -2384,72 +2533,45 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
domain->domain.geometry.force_aperture = true;
domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
- if (iommu) {
- domain->domain.type = type;
- domain->domain.ops = iommu->iommu.ops->default_domain_ops;
+ domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
+ domain->domain.ops = iommu->iommu.ops->default_domain_ops;
- if (dirty_tracking)
- domain->domain.dirty_ops = &amd_dirty_ops;
- }
+ if (dirty_tracking)
+ domain->domain.dirty_ops = &amd_dirty_ops;
return &domain->domain;
}
-static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
-{
- struct iommu_domain *domain;
- int pgtable = amd_iommu_pgtable;
-
- /*
- * Force IOMMU v1 page table when allocating
- * domain for pass-through devices.
- */
- if (type == IOMMU_DOMAIN_UNMANAGED)
- pgtable = AMD_IOMMU_V1;
-
- domain = do_iommu_domain_alloc(type, NULL, 0, pgtable);
- if (IS_ERR(domain))
- return NULL;
-
- return domain;
-}
-
static struct iommu_domain *
amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
const struct iommu_user_data *user_data)
{
- unsigned int type = IOMMU_DOMAIN_UNMANAGED;
- struct amd_iommu *iommu = NULL;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID;
- if (dev)
- iommu = get_amd_iommu_from_dev(dev);
-
if ((flags & ~supported_flags) || user_data)
return ERR_PTR(-EOPNOTSUPP);
- /* Allocate domain with v2 page table if IOMMU supports PASID. */
- if (flags & IOMMU_HWPT_ALLOC_PASID) {
+ switch (flags & supported_flags) {
+ case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
+ /* Allocate domain with v1 page table for dirty tracking */
+ if (!amd_iommu_hd_support(iommu))
+ break;
+ return do_iommu_domain_alloc(dev, flags, PD_MODE_V1);
+ case IOMMU_HWPT_ALLOC_PASID:
+ /* Allocate domain with v2 page table if IOMMU supports PASID. */
if (!amd_iommu_pasid_supported())
- return ERR_PTR(-EOPNOTSUPP);
-
- return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V2);
- }
-
- /* Allocate domain with v1 page table for dirty tracking */
- if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) {
- if (iommu && amd_iommu_hd_support(iommu)) {
- return do_iommu_domain_alloc(type, dev,
- flags, AMD_IOMMU_V1);
- }
-
- return ERR_PTR(-EOPNOTSUPP);
+ break;
+ return do_iommu_domain_alloc(dev, flags, PD_MODE_V2);
+ case 0:
+ /* If nothing specific is required use the kernel commandline default */
+ return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable);
+ default:
+ break;
}
-
- /* If nothing specific is required use the kernel commandline default */
- return do_iommu_domain_alloc(type, dev, 0, amd_iommu_pgtable);
+ return ERR_PTR(-EOPNOTSUPP);
}
void amd_iommu_domain_free(struct iommu_domain *dom)
@@ -2475,10 +2597,19 @@ static int blocked_domain_attach_device(struct iommu_domain *domain,
return 0;
}
+static int blocked_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old)
+{
+ amd_iommu_remove_dev_pasid(dev, pasid, old);
+ return 0;
+}
+
static struct iommu_domain blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
.attach_dev = blocked_domain_attach_device,
+ .set_dev_pasid = blocked_domain_set_dev_pasid,
}
};
@@ -2498,7 +2629,7 @@ void amd_iommu_init_identity_domain(void)
identity_domain.id = pdom_id_alloc();
- protection_domain_init(&identity_domain, NUMA_NO_NODE);
+ protection_domain_init(&identity_domain);
}
/* Same as blocked domain except it supports only ops->attach_dev() */
@@ -2666,12 +2797,12 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct protection_domain *pdomain = to_pdomain(domain);
- struct dev_table_entry *dev_table;
+ struct dev_table_entry *dte;
struct iommu_dev_data *dev_data;
bool domain_flush = false;
struct amd_iommu *iommu;
unsigned long flags;
- u64 pte_root;
+ u64 new;
spin_lock_irqsave(&pdomain->lock, flags);
if (!(pdomain->dirty_tracking ^ enable)) {
@@ -2680,16 +2811,15 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
+ spin_lock(&dev_data->dte_lock);
iommu = get_amd_iommu_from_dev_data(dev_data);
-
- dev_table = get_dev_table(iommu);
- pte_root = dev_table[dev_data->devid].data[0];
-
- pte_root = (enable ? pte_root | DTE_FLAG_HAD :
- pte_root & ~DTE_FLAG_HAD);
+ dte = &get_dev_table(iommu)[dev_data->devid];
+ new = dte->data[0];
+ new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD);
+ dte->data[0] = new;
+ spin_unlock(&dev_data->dte_lock);
/* Flush device DTE */
- dev_table[dev_data->devid].data[0] = pte_root;
device_flush_dte(dev_data);
domain_flush = true;
}
@@ -2890,7 +3020,6 @@ const struct iommu_ops amd_iommu_ops = {
.blocked_domain = &blocked_domain,
.release_domain = &release_domain,
.identity_domain = &identity_domain.domain,
- .domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = amd_iommu_domain_alloc_sva,
.probe_device = amd_iommu_probe_device,
@@ -2901,7 +3030,6 @@ const struct iommu_ops amd_iommu_ops = {
.def_domain_type = amd_iommu_def_domain_type,
.dev_enable_feat = amd_iommu_dev_enable_feature,
.dev_disable_feat = amd_iommu_dev_disable_feature,
- .remove_dev_pasid = amd_iommu_remove_dev_pasid,
.page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
@@ -2956,17 +3084,23 @@ out:
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
- u64 dte;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
+ u64 new;
+ struct dev_table_entry *dte = &get_dev_table(iommu)[devid];
+ struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
+
+ if (dev_data)
+ spin_lock(&dev_data->dte_lock);
- dte = dev_table[devid].data[2];
- dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
- dte |= iommu_virt_to_phys(table->table);
- dte |= DTE_IRQ_REMAP_INTCTL;
- dte |= DTE_INTTABLEN;
- dte |= DTE_IRQ_REMAP_ENABLE;
+ new = READ_ONCE(dte->data[2]);
+ new &= ~DTE_IRQ_PHYS_ADDR_MASK;
+ new |= iommu_virt_to_phys(table->table);
+ new |= DTE_IRQ_REMAP_INTCTL;
+ new |= DTE_INTTABLEN;
+ new |= DTE_IRQ_REMAP_ENABLE;
+ WRITE_ONCE(dte->data[2], new);
- dev_table[devid].data[2] = dte;
+ if (dev_data)
+ spin_unlock(&dev_data->dte_lock);
}
static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
@@ -3540,7 +3674,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip;
irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
- irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
return 0;
diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c
index 8c73a30c2800..11150cfd6718 100644
--- a/drivers/iommu/amd/pasid.c
+++ b/drivers/iommu/amd/pasid.c
@@ -185,12 +185,13 @@ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct protection_domain *pdom;
int ret;
- pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA, dev_to_node(dev));
+ pdom = protection_domain_alloc();
if (!pdom)
return ERR_PTR(-ENOMEM);
pdom->domain.ops = &amd_sva_domain_ops;
pdom->mn.ops = &sva_mn;
+ pdom->domain.type = IOMMU_DOMAIN_SVA;
ret = mmu_notifier_register(&pdom->mn, mm);
if (ret) {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index c7cc613050d9..5aa2e7af58b4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -178,18 +178,12 @@ arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
const struct iommu_user_data *user_data)
{
struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
- const u32 SUPPORTED_FLAGS = IOMMU_HWPT_FAULT_ID_VALID;
struct arm_smmu_nested_domain *nested_domain;
struct iommu_hwpt_arm_smmuv3 arg;
bool enable_ats = false;
int ret;
- /*
- * Faults delivered to the nested domain are faults that originated by
- * the S1 in the domain. The core code will match all PASIDs when
- * delivering the fault due to user_pasid_table
- */
- if (flags & ~SUPPORTED_FLAGS)
+ if (flags)
return ERR_PTR(-EOPNOTSUPP);
ret = iommu_copy_struct_from_user(&arg, user_data,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 1d3e71569775..9ba596430e7c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -112,6 +112,15 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
* from the current CPU register
*/
target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
+
+ /*
+ * Note that we don't bother with S1PIE on the SMMU, we just rely on
+ * our default encoding scheme matching direct permissions anyway.
+ * SMMU has no notion of S1POE nor GCS, so make sure that is clear if
+ * either is enabled for CPUs, just in case anyone imagines otherwise.
+ */
+ if (system_supports_poe() || system_supports_gcs())
+ dev_warn_once(master->smmu->dev, "SVA devices ignore permission overlays and GCS\n");
}
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
@@ -206,8 +215,12 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
unsigned long asid_bits;
u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
- if (vabits_actual == 52)
+ if (vabits_actual == 52) {
+ /* We don't support LPA2 */
+ if (PAGE_SIZE != SZ_64K)
+ return false;
feat_mask |= ARM_SMMU_FEAT_VAX;
+ }
if ((smmu->features & feat_mask) != feat_mask)
return false;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index a5c7002ff75b..358072b4e293 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <kunit/visibility.h>
#include <uapi/linux/iommufd.h>
@@ -83,8 +84,28 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
-static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_device *smmu, u32 flags);
+static const char * const event_str[] = {
+ [EVT_ID_BAD_STREAMID_CONFIG] = "C_BAD_STREAMID",
+ [EVT_ID_STE_FETCH_FAULT] = "F_STE_FETCH",
+ [EVT_ID_BAD_STE_CONFIG] = "C_BAD_STE",
+ [EVT_ID_STREAM_DISABLED_FAULT] = "F_STREAM_DISABLED",
+ [EVT_ID_BAD_SUBSTREAMID_CONFIG] = "C_BAD_SUBSTREAMID",
+ [EVT_ID_CD_FETCH_FAULT] = "F_CD_FETCH",
+ [EVT_ID_BAD_CD_CONFIG] = "C_BAD_CD",
+ [EVT_ID_TRANSLATION_FAULT] = "F_TRANSLATION",
+ [EVT_ID_ADDR_SIZE_FAULT] = "F_ADDR_SIZE",
+ [EVT_ID_ACCESS_FAULT] = "F_ACCESS",
+ [EVT_ID_PERMISSION_FAULT] = "F_PERMISSION",
+ [EVT_ID_VMS_FETCH_FAULT] = "F_VMS_FETCH",
+};
+
+static const char * const event_class_str[] = {
+ [0] = "CD fetch",
+ [1] = "Stage 1 translation table fetch",
+ [2] = "Input address caused fault",
+ [3] = "Reserved",
+};
+
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master);
static void parse_driver_options(struct arm_smmu_device *smmu)
@@ -1759,17 +1780,49 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
}
/* IRQ and event handlers */
-static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
+static void arm_smmu_decode_event(struct arm_smmu_device *smmu, u64 *raw,
+ struct arm_smmu_event *event)
+{
+ struct arm_smmu_master *master;
+
+ event->id = FIELD_GET(EVTQ_0_ID, raw[0]);
+ event->sid = FIELD_GET(EVTQ_0_SID, raw[0]);
+ event->ssv = FIELD_GET(EVTQ_0_SSV, raw[0]);
+ event->ssid = event->ssv ? FIELD_GET(EVTQ_0_SSID, raw[0]) : IOMMU_NO_PASID;
+ event->privileged = FIELD_GET(EVTQ_1_PnU, raw[1]);
+ event->instruction = FIELD_GET(EVTQ_1_InD, raw[1]);
+ event->s2 = FIELD_GET(EVTQ_1_S2, raw[1]);
+ event->read = FIELD_GET(EVTQ_1_RnW, raw[1]);
+ event->stag = FIELD_GET(EVTQ_1_STAG, raw[1]);
+ event->stall = FIELD_GET(EVTQ_1_STALL, raw[1]);
+ event->class = FIELD_GET(EVTQ_1_CLASS, raw[1]);
+ event->iova = FIELD_GET(EVTQ_2_ADDR, raw[2]);
+ event->ipa = raw[3] & EVTQ_3_IPA;
+ event->fetch_addr = raw[3] & EVTQ_3_FETCH_ADDR;
+ event->ttrnw = FIELD_GET(EVTQ_1_TT_READ, raw[1]);
+ event->class_tt = false;
+ event->dev = NULL;
+
+ if (event->id == EVT_ID_PERMISSION_FAULT)
+ event->class_tt = (event->class == EVTQ_1_CLASS_TT);
+
+ mutex_lock(&smmu->streams_mutex);
+ master = arm_smmu_find_master(smmu, event->sid);
+ if (master)
+ event->dev = get_device(master->dev);
+ mutex_unlock(&smmu->streams_mutex);
+}
+
+static int arm_smmu_handle_event(struct arm_smmu_device *smmu,
+ struct arm_smmu_event *event)
{
int ret = 0;
u32 perm = 0;
struct arm_smmu_master *master;
- bool ssid_valid = evt[0] & EVTQ_0_SSV;
- u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
struct iopf_fault fault_evt = { };
struct iommu_fault *flt = &fault_evt.fault;
- switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
+ switch (event->id) {
case EVT_ID_TRANSLATION_FAULT:
case EVT_ID_ADDR_SIZE_FAULT:
case EVT_ID_ACCESS_FAULT:
@@ -1779,35 +1832,35 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
return -EOPNOTSUPP;
}
- if (!(evt[1] & EVTQ_1_STALL))
+ if (!event->stall)
return -EOPNOTSUPP;
- if (evt[1] & EVTQ_1_RnW)
+ if (event->read)
perm |= IOMMU_FAULT_PERM_READ;
else
perm |= IOMMU_FAULT_PERM_WRITE;
- if (evt[1] & EVTQ_1_InD)
+ if (event->instruction)
perm |= IOMMU_FAULT_PERM_EXEC;
- if (evt[1] & EVTQ_1_PnU)
+ if (event->privileged)
perm |= IOMMU_FAULT_PERM_PRIV;
flt->type = IOMMU_FAULT_PAGE_REQ;
flt->prm = (struct iommu_fault_page_request) {
.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
- .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+ .grpid = event->stag,
.perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ .addr = event->iova,
};
- if (ssid_valid) {
+ if (event->ssv) {
flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
+ flt->prm.pasid = event->ssid;
}
mutex_lock(&smmu->streams_mutex);
- master = arm_smmu_find_master(smmu, sid);
+ master = arm_smmu_find_master(smmu, event->sid);
if (!master) {
ret = -EINVAL;
goto out_unlock;
@@ -1819,29 +1872,82 @@ out_unlock:
return ret;
}
+static void arm_smmu_dump_raw_event(struct arm_smmu_device *smmu, u64 *raw,
+ struct arm_smmu_event *event)
+{
+ int i;
+
+ dev_err(smmu->dev, "event 0x%02x received:\n", event->id);
+
+ for (i = 0; i < EVTQ_ENT_DWORDS; ++i)
+ dev_err(smmu->dev, "\t0x%016llx\n", raw[i]);
+}
+
+#define ARM_SMMU_EVT_KNOWN(e) ((e)->id < ARRAY_SIZE(event_str) && event_str[(e)->id])
+#define ARM_SMMU_LOG_EVT_STR(e) ARM_SMMU_EVT_KNOWN(e) ? event_str[(e)->id] : "UNKNOWN"
+#define ARM_SMMU_LOG_CLIENT(e) (e)->dev ? dev_name((e)->dev) : "(unassigned sid)"
+
+static void arm_smmu_dump_event(struct arm_smmu_device *smmu, u64 *raw,
+ struct arm_smmu_event *evt,
+ struct ratelimit_state *rs)
+{
+ if (!__ratelimit(rs))
+ return;
+
+ arm_smmu_dump_raw_event(smmu, raw, evt);
+
+ switch (evt->id) {
+ case EVT_ID_TRANSLATION_FAULT:
+ case EVT_ID_ADDR_SIZE_FAULT:
+ case EVT_ID_ACCESS_FAULT:
+ case EVT_ID_PERMISSION_FAULT:
+ dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x iova: %#llx ipa: %#llx",
+ ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt),
+ evt->sid, evt->ssid, evt->iova, evt->ipa);
+
+ dev_err(smmu->dev, "%s %s %s %s \"%s\"%s%s stag: %#x",
+ evt->privileged ? "priv" : "unpriv",
+ evt->instruction ? "inst" : "data",
+ str_read_write(evt->read),
+ evt->s2 ? "s2" : "s1", event_class_str[evt->class],
+ evt->class_tt ? (evt->ttrnw ? " ttd_read" : " ttd_write") : "",
+ evt->stall ? " stall" : "", evt->stag);
+
+ break;
+
+ case EVT_ID_STE_FETCH_FAULT:
+ case EVT_ID_CD_FETCH_FAULT:
+ case EVT_ID_VMS_FETCH_FAULT:
+ dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x fetch_addr: %#llx",
+ ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt),
+ evt->sid, evt->ssid, evt->fetch_addr);
+
+ break;
+
+ default:
+ dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x",
+ ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt),
+ evt->sid, evt->ssid);
+ }
+}
+
static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
{
- int i, ret;
+ u64 evt[EVTQ_ENT_DWORDS];
+ struct arm_smmu_event event = {0};
struct arm_smmu_device *smmu = dev;
struct arm_smmu_queue *q = &smmu->evtq.q;
struct arm_smmu_ll_queue *llq = &q->llq;
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- u64 evt[EVTQ_ENT_DWORDS];
do {
while (!queue_remove_raw(q, evt)) {
- u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
-
- ret = arm_smmu_handle_evt(smmu, evt);
- if (!ret || !__ratelimit(&rs))
- continue;
-
- dev_info(smmu->dev, "event 0x%02x received:\n", id);
- for (i = 0; i < ARRAY_SIZE(evt); ++i)
- dev_info(smmu->dev, "\t0x%016llx\n",
- (unsigned long long)evt[i]);
+ arm_smmu_decode_event(smmu, evt, &event);
+ if (arm_smmu_handle_event(smmu, &event))
+ arm_smmu_dump_event(smmu, evt, &event, &rs);
+ put_device(event.dev);
cond_resched();
}
@@ -2353,39 +2459,12 @@ struct arm_smmu_domain *arm_smmu_domain_alloc(void)
if (!smmu_domain)
return ERR_PTR(-ENOMEM);
- mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
return smmu_domain;
}
-static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
-{
- struct arm_smmu_domain *smmu_domain;
-
- /*
- * Allocate the domain and initialise some of its data structures.
- * We can't really do anything meaningful until we've added a
- * master.
- */
- smmu_domain = arm_smmu_domain_alloc();
- if (IS_ERR(smmu_domain))
- return ERR_CAST(smmu_domain);
-
- if (dev) {
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- int ret;
-
- ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, 0);
- if (ret) {
- kfree(smmu_domain);
- return ERR_PTR(ret);
- }
- }
- return &smmu_domain->domain;
-}
-
static void arm_smmu_domain_free_paging(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2451,12 +2530,6 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_domain *smmu_domain);
bool enable_dirty = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
- /* Restrict the stage to what we can actually support */
- if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
- smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
- if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
@@ -2745,9 +2818,14 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
* Translation Requests and Translated transactions are denied
* as though ATS is disabled for the stream (STE.EATS == 0b00),
* causing F_BAD_ATS_TREQ and F_TRANSL_FORBIDDEN events
- * (IHI0070Ea 5.2 Stream Table Entry). Thus ATS can only be
- * enabled if we have arm_smmu_domain, those always have page
- * tables.
+ * (IHI0070Ea 5.2 Stream Table Entry).
+ *
+ * However, if we have installed a CD table and are using S1DSS
+ * then ATS will work in S1DSS bypass. See "13.6.4 Full ATS
+ * skipping stage 1".
+ *
+ * Disable ATS if we are going to create a normal 0b100 bypass
+ * STE.
*/
state->ats_enabled = !state->disable_ats &&
arm_smmu_ats_supported(master);
@@ -2853,15 +2931,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
state.master = master = dev_iommu_priv_get(dev);
smmu = master->smmu;
- mutex_lock(&smmu_domain->init_mutex);
-
- if (!smmu_domain->smmu) {
- ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0);
- } else if (smmu_domain->smmu != smmu)
- ret = -EINVAL;
-
- mutex_unlock(&smmu_domain->init_mutex);
- if (ret)
+ if (smmu_domain->smmu != smmu)
return ret;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
@@ -2918,16 +2988,9 @@ static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_cd target_cd;
- int ret = 0;
- mutex_lock(&smmu_domain->init_mutex);
- if (!smmu_domain->smmu)
- ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0);
- else if (smmu_domain->smmu != smmu)
- ret = -EINVAL;
- mutex_unlock(&smmu_domain->init_mutex);
- if (ret)
- return ret;
+ if (smmu_domain->smmu != smmu)
+ return -EINVAL;
if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
return -EINVAL;
@@ -3016,13 +3079,12 @@ out_unlock:
return ret;
}
-static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
- struct iommu_domain *domain)
+static int arm_smmu_blocking_set_dev_pasid(struct iommu_domain *new_domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old_domain)
{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(old_domain);
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- struct arm_smmu_domain *smmu_domain;
-
- smmu_domain = to_smmu_domain(domain);
mutex_lock(&arm_smmu_asid_lock);
arm_smmu_clear_cd(master, pasid);
@@ -3043,6 +3105,7 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
sid_domain->type == IOMMU_DOMAIN_BLOCKED)
sid_domain->ops->attach_dev(sid_domain, dev);
}
+ return 0;
}
static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
@@ -3070,8 +3133,10 @@ static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
if (arm_smmu_ssids_in_use(&master->cd_table)) {
/*
* If a CD table has to be present then we need to run with ATS
- * on even though the RID will fail ATS queries with UR. This is
- * because we have no idea what the PASID's need.
+ * on because we have to assume a PASID is using ATS. For
+ * IDENTITY this will setup things so that S1DSS=bypass which
+ * follows the explanation in "13.6.4 Full ATS skipping stage 1"
+ * and allows for ATS on the RID to work.
*/
state.cd_needs_ats = true;
arm_smmu_attach_prepare(&state, domain);
@@ -3124,6 +3189,7 @@ static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
static const struct iommu_domain_ops arm_smmu_blocked_ops = {
.attach_dev = arm_smmu_attach_dev_blocked,
+ .set_dev_pasid = arm_smmu_blocking_set_dev_pasid,
};
static struct iommu_domain arm_smmu_blocked_domain = {
@@ -3136,6 +3202,7 @@ arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags,
const struct iommu_user_data *user_data)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct arm_smmu_device *smmu = master->smmu;
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID |
IOMMU_HWPT_ALLOC_NEST_PARENT;
@@ -3147,25 +3214,43 @@ arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags,
if (user_data)
return ERR_PTR(-EOPNOTSUPP);
- if (flags & IOMMU_HWPT_ALLOC_PASID)
- return arm_smmu_domain_alloc_paging(dev);
-
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
- if (flags & IOMMU_HWPT_ALLOC_NEST_PARENT) {
- if (!(master->smmu->features & ARM_SMMU_FEAT_NESTING)) {
+ switch (flags) {
+ case 0:
+ /* Prefer S1 if available */
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+ break;
+ case IOMMU_HWPT_ALLOC_NEST_PARENT:
+ if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) {
ret = -EOPNOTSUPP;
goto err_free;
}
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
smmu_domain->nest_parent = true;
+ break;
+ case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
+ case IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_ALLOC_PASID:
+ case IOMMU_HWPT_ALLOC_PASID:
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) {
+ ret = -EOPNOTSUPP;
+ goto err_free;
+ }
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto err_free;
}
smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops;
- ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, flags);
+ ret = arm_smmu_domain_finalise(smmu_domain, smmu, flags);
if (ret)
goto err_free;
return &smmu_domain->domain;
@@ -3237,8 +3322,8 @@ static struct platform_driver arm_smmu_driver;
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
- fwnode);
+ struct device *dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -3543,7 +3628,6 @@ static struct iommu_ops arm_smmu_ops = {
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.hw_info = arm_smmu_hw_info,
- .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.domain_alloc_sva = arm_smmu_sva_domain_alloc,
.domain_alloc_paging_flags = arm_smmu_domain_alloc_paging_flags,
.probe_device = arm_smmu_probe_device,
@@ -3551,7 +3635,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .remove_dev_pasid = arm_smmu_remove_dev_pasid,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
.page_response = arm_smmu_page_response,
@@ -4239,7 +4322,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
*/
if (!!(reg & IDR0_COHACC) != coherent)
dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
- coherent ? "true" : "false");
+ str_true_false(coherent));
switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
case IDR0_STALL_MODEL_FORCE:
@@ -4663,7 +4746,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Initialise in-memory data structures */
ret = arm_smmu_init_structures(smmu);
if (ret)
- return ret;
+ goto err_free_iopf;
/* Record our private device structure */
platform_set_drvdata(pdev, smmu);
@@ -4674,22 +4757,29 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Reset the device */
ret = arm_smmu_device_reset(smmu);
if (ret)
- return ret;
+ goto err_disable;
/* And we're up. Go go go! */
ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
"smmu3.%pa", &ioaddr);
if (ret)
- return ret;
+ goto err_disable;
ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- iommu_device_sysfs_remove(&smmu->iommu);
- return ret;
+ goto err_free_sysfs;
}
return 0;
+
+err_free_sysfs:
+ iommu_device_sysfs_remove(&smmu->iommu);
+err_disable:
+ arm_smmu_device_disable(smmu);
+err_free_iopf:
+ iopf_queue_free(smmu->evtq.iopf);
+ return ret;
}
static void arm_smmu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 0107d3f333a1..bd9d7c85576a 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -452,10 +452,18 @@ static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
#define EVTQ_0_ID GENMASK_ULL(7, 0)
+#define EVT_ID_BAD_STREAMID_CONFIG 0x02
+#define EVT_ID_STE_FETCH_FAULT 0x03
+#define EVT_ID_BAD_STE_CONFIG 0x04
+#define EVT_ID_STREAM_DISABLED_FAULT 0x06
+#define EVT_ID_BAD_SUBSTREAMID_CONFIG 0x08
+#define EVT_ID_CD_FETCH_FAULT 0x09
+#define EVT_ID_BAD_CD_CONFIG 0x0a
#define EVT_ID_TRANSLATION_FAULT 0x10
#define EVT_ID_ADDR_SIZE_FAULT 0x11
#define EVT_ID_ACCESS_FAULT 0x12
#define EVT_ID_PERMISSION_FAULT 0x13
+#define EVT_ID_VMS_FETCH_FAULT 0x25
#define EVTQ_0_SSV (1UL << 11)
#define EVTQ_0_SSID GENMASK_ULL(31, 12)
@@ -467,9 +475,11 @@ static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
#define EVTQ_1_RnW (1UL << 35)
#define EVTQ_1_S2 (1UL << 39)
#define EVTQ_1_CLASS GENMASK_ULL(41, 40)
+#define EVTQ_1_CLASS_TT 0x01
#define EVTQ_1_TT_READ (1UL << 44)
#define EVTQ_2_ADDR GENMASK_ULL(63, 0)
#define EVTQ_3_IPA GENMASK_ULL(51, 12)
+#define EVTQ_3_FETCH_ADDR GENMASK_ULL(51, 3)
/* PRI queue */
#define PRIQ_ENT_SZ_SHIFT 4
@@ -789,6 +799,26 @@ struct arm_smmu_stream {
struct rb_node node;
};
+struct arm_smmu_event {
+ u8 stall : 1,
+ ssv : 1,
+ privileged : 1,
+ instruction : 1,
+ s2 : 1,
+ read : 1,
+ ttrnw : 1,
+ class_tt : 1;
+ u8 id;
+ u8 class;
+ u16 stag;
+ u32 sid;
+ u32 ssid;
+ u64 iova;
+ u64 ipa;
+ u64 fetch_addr;
+ struct device *dev;
+};
+
/* SMMU private data for each master */
struct arm_smmu_master {
struct arm_smmu_device *smmu;
@@ -813,7 +843,6 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
- struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
atomic_t nr_ats_masters;
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index 6e41ddaa24d6..d525ab43a4ae 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -79,7 +79,6 @@
#define TEGRA241_VCMDQ_PAGE1(q) (TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
#define VCMDQ_ADDR GENMASK(47, 5)
#define VCMDQ_LOG2SIZE GENMASK(4, 0)
-#define VCMDQ_LOG2SIZE_MAX 19
#define TEGRA241_VCMDQ_BASE 0x00000
#define TEGRA241_VCMDQ_CONS_INDX_BASE 0x00008
@@ -505,12 +504,15 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
struct arm_smmu_queue *q = &cmdq->q;
char name[16];
+ u32 regval;
int ret;
snprintf(name, 16, "vcmdq%u", vcmdq->idx);
- /* Queue size, capped to ensure natural alignment */
- q->llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, VCMDQ_LOG2SIZE_MAX);
+ /* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
+ regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
+ q->llq.max_n_shift =
+ min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
/* Use the common helper to init the VCMDQ, and then... */
ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 99030e6b16e7..db9b9a8e139c 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -110,7 +110,6 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
int arm_mmu500_reset(struct arm_smmu_device *smmu)
{
u32 reg, major;
- int i;
/*
* On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
* writes to the context bank ACTLRs will stick. And we just hope that
@@ -128,11 +127,12 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
+#ifdef CONFIG_ARM_SMMU_MMU_500_CPRE_ERRATA
/*
* Disable MMU-500's not-particularly-beneficial next-page
* prefetcher for the sake of at least 5 known errata.
*/
- for (i = 0; i < smmu->num_context_banks; ++i) {
+ for (int i = 0; i < smmu->num_context_banks; ++i) {
reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
reg &= ~ARM_MMU500_ACTLR_CPRE;
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
@@ -140,6 +140,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
if (reg & ARM_MMU500_ACTLR_CPRE)
dev_warn_once(smmu->dev, "Failed to disable prefetcher for errata workarounds, check SACR.CACHE_LOCK\n");
}
+#endif
return 0;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index 548783f3f8e8..d03b2239baad 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -73,7 +73,7 @@ void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
if (__ratelimit(&rs)) {
dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
- cfg = qsmmu->cfg;
+ cfg = qsmmu->data->cfg;
if (!cfg)
return;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 6372f3e25c4b..59d02687280e 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -16,6 +16,40 @@
#define QCOM_DUMMY_VAL -1
+/*
+ * SMMU-500 TRM defines BIT(0) as CMTLB (Enable context caching in the
+ * macro TLB) and BIT(1) as CPRE (Enable context caching in the prefetch
+ * buffer). The remaining bits are implementation defined and vary across
+ * SoCs.
+ */
+
+#define CPRE (1 << 1)
+#define CMTLB (1 << 0)
+#define PREFETCH_SHIFT 8
+#define PREFETCH_DEFAULT 0
+#define PREFETCH_SHALLOW (1 << PREFETCH_SHIFT)
+#define PREFETCH_MODERATE (2 << PREFETCH_SHIFT)
+#define PREFETCH_DEEP (3 << PREFETCH_SHIFT)
+#define GFX_ACTLR_PRR (1 << 5)
+
+static const struct of_device_id qcom_smmu_actlr_client_of_match[] = {
+ { .compatible = "qcom,adreno",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,adreno-gmu",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,adreno-smmu",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,fastrpc",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,sc7280-mdss",
+ .data = (const void *) (PREFETCH_SHALLOW | CPRE | CMTLB) },
+ { .compatible = "qcom,sc7280-venus",
+ .data = (const void *) (PREFETCH_SHALLOW | CPRE | CMTLB) },
+ { .compatible = "qcom,sm8550-mdss",
+ .data = (const void *) (PREFETCH_DEFAULT | CMTLB) },
+ { }
+};
+
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
@@ -99,6 +133,47 @@ static void qcom_adreno_smmu_resume_translation(const void *cookie, bool termina
arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
}
+static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ u32 reg = 0;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(smmu->dev);
+ if (ret < 0) {
+ dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+ return;
+ }
+
+ reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR);
+ reg &= ~GFX_ACTLR_PRR;
+ if (set)
+ reg |= FIELD_PREP(GFX_ACTLR_PRR, 1);
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR, reg);
+ pm_runtime_put_autosuspend(smmu->dev);
+}
+
+static void qcom_adreno_smmu_set_prr_addr(const void *cookie, phys_addr_t page_addr)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(smmu->dev);
+ if (ret < 0) {
+ dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+ return;
+ }
+
+ writel_relaxed(lower_32_bits(page_addr),
+ smmu->base + ARM_SMMU_GFX_PRR_CFG_LADDR);
+ writel_relaxed(upper_32_bits(page_addr),
+ smmu->base + ARM_SMMU_GFX_PRR_CFG_UADDR);
+ pm_runtime_put_autosuspend(smmu->dev);
+}
+
#define QCOM_ADRENO_SMMU_GPU_SID 0
static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
@@ -207,13 +282,37 @@ static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
return true;
}
+static void qcom_smmu_set_actlr_dev(struct device *dev, struct arm_smmu_device *smmu, int cbndx,
+ const struct of_device_id *client_match)
+{
+ const struct of_device_id *match =
+ of_match_device(client_match, dev);
+
+ if (!match) {
+ dev_dbg(dev, "no ACTLR settings present\n");
+ return;
+ }
+
+ arm_smmu_cb_write(smmu, cbndx, ARM_SMMU_CB_ACTLR, (unsigned long)match->data);
+}
+
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ const struct device_node *np = smmu_domain->smmu->dev->of_node;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct of_device_id *client_match;
+ int cbndx = smmu_domain->cfg.cbndx;
struct adreno_smmu_priv *priv;
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+ client_match = qsmmu->data->client_match;
+
+ if (client_match)
+ qcom_smmu_set_actlr_dev(dev, smmu, cbndx, client_match);
+
/* Only enable split pagetables for the GPU device (SID 0) */
if (!qcom_adreno_smmu_is_gpu_device(dev))
return 0;
@@ -239,6 +338,14 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
priv->resume_translation = qcom_adreno_smmu_resume_translation;
+ priv->set_prr_bit = NULL;
+ priv->set_prr_addr = NULL;
+
+ if (of_device_is_compatible(np, "qcom,smmu-500") &&
+ of_device_is_compatible(np, "qcom,adreno-smmu")) {
+ priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit;
+ priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr;
+ }
return 0;
}
@@ -269,8 +376,18 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct of_device_id *client_match;
+ int cbndx = smmu_domain->cfg.cbndx;
+
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+ client_match = qsmmu->data->client_match;
+
+ if (client_match)
+ qcom_smmu_set_actlr_dev(dev, smmu, cbndx, client_match);
+
return 0;
}
@@ -507,7 +624,7 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
return ERR_PTR(-ENOMEM);
qsmmu->smmu.impl = impl;
- qsmmu->cfg = data->cfg;
+ qsmmu->data = data;
return &qsmmu->smmu;
}
@@ -550,6 +667,7 @@ static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = {
.impl = &qcom_smmu_500_impl,
.adreno_impl = &qcom_adreno_smmu_500_impl,
.cfg = &qcom_smmu_impl0_cfg,
+ .client_match = qcom_smmu_actlr_client_of_match,
};
/*
@@ -567,6 +685,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sdm670-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
{ .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
index 3c134d1a6277..8addd453f5f1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -8,7 +8,7 @@
struct qcom_smmu {
struct arm_smmu_device smmu;
- const struct qcom_smmu_config *cfg;
+ const struct qcom_smmu_match_data *data;
bool bypass_quirk;
u8 bypass_cbndx;
u32 stall_enabled;
@@ -28,6 +28,7 @@ struct qcom_smmu_match_data {
const struct qcom_smmu_config *cfg;
const struct arm_smmu_impl *impl;
const struct arm_smmu_impl *adreno_impl;
+ const struct of_device_id * const client_match;
};
irqreturn_t qcom_smmu_context_fault(int irq, void *dev);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 650664e0f6e3..de205a34ffc6 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -34,6 +34,7 @@
#include <linux/pm_runtime.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/fsl/mc.h>
@@ -1411,8 +1412,8 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
- fwnode);
+ struct device *dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -1437,17 +1438,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
goto out_free;
} else {
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
-
- /*
- * Defer probe if the relevant SMMU instance hasn't finished
- * probing yet. This is a fragile hack and we'd ideally
- * avoid this race in the core code. Until that's ironed
- * out, however, this is the most pragmatic option on the
- * table.
- */
- if (!smmu)
- return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER,
- "smmu dev has not bound yet\n"));
}
ret = -EINVAL;
@@ -2117,7 +2107,7 @@ static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
}
dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
- cnt == 1 ? "" : "s");
+ str_plural(cnt));
iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
}
@@ -2227,29 +2217,26 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
i, irq);
}
+ platform_set_drvdata(pdev, smmu);
+
+ /* Check for RMRs and install bypass SMRs if any */
+ arm_smmu_rmr_install_bypass_smr(smmu);
+
+ arm_smmu_device_reset(smmu);
+ arm_smmu_test_smr_masks(smmu);
+
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
"smmu.%pa", &smmu->ioaddr);
- if (err) {
- dev_err(dev, "Failed to register iommu in sysfs\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register iommu in sysfs\n");
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
using_legacy_binding ? NULL : dev);
if (err) {
- dev_err(dev, "Failed to register iommu\n");
iommu_device_sysfs_remove(&smmu->iommu);
- return err;
+ return dev_err_probe(dev, err, "Failed to register iommu\n");
}
- platform_set_drvdata(pdev, smmu);
-
- /* Check for RMRs and install bypass SMRs if any */
- arm_smmu_rmr_install_bypass_smr(smmu);
-
- arm_smmu_device_reset(smmu);
- arm_smmu_test_smr_masks(smmu);
-
/*
* We want to avoid touching dev->power.lock in fastpaths unless
* it's really going to do something useful - pm_runtime_enabled()
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index e2aeb511ae90..2dbf3243b5ad 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -154,6 +154,8 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_SCTLR_M BIT(0)
#define ARM_SMMU_CB_ACTLR 0x4
+#define ARM_SMMU_GFX_PRR_CFG_LADDR 0x6008
+#define ARM_SMMU_GFX_PRR_CFG_UADDR 0x600C
#define ARM_SMMU_CB_RESUME 0x8
#define ARM_SMMU_RESUME_TERMINATE BIT(0)
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index c666ecab955d..69e23e017d9e 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -249,7 +249,7 @@ struct exynos_iommu_domain {
struct list_head clients; /* list of sysmmu_drvdata.domain_node */
sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
short *lv2entcnt; /* free lv2 entry counter for each section */
- spinlock_t lock; /* lock for modyfying list of clients */
+ spinlock_t lock; /* lock for modifying list of clients */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
struct iommu_domain domain; /* generic domain data structure */
};
@@ -292,7 +292,7 @@ struct sysmmu_drvdata {
struct clk *aclk; /* SYSMMU's aclk clock */
struct clk *pclk; /* SYSMMU's pclk clock */
struct clk *clk_master; /* master's device clock */
- spinlock_t lock; /* lock for modyfying state */
+ spinlock_t lock; /* lock for modifying state */
bool active; /* current status */
struct exynos_iommu_domain *domain; /* domain we belong to */
struct list_head domain_node; /* node for domain clients list */
@@ -746,7 +746,7 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
dev_name(dev), data);
if (ret) {
- dev_err(dev, "Unabled to register handler of irq %d\n", irq);
+ dev_err(dev, "Unable to register handler of irq %d\n", irq);
return ret;
}
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 8a5c17b97310..2a86aa5d54c6 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -164,8 +164,8 @@ static int __init hyperv_prepare_irq_remapping(void)
* max cpu affinity for IOAPIC irqs. Scan cpu 0-255 and set cpu
* into ioapic_max_cpumask if its APIC ID is less than 256.
*/
- for (i = min_t(unsigned int, num_possible_cpus() - 1, 255); i >= 0; i--)
- if (cpu_physical_id(i) < 256)
+ for (i = min_t(unsigned int, nr_cpu_ids - 1, 255); i >= 0; i--)
+ if (cpu_possible(i) && cpu_physical_id(i) < 256)
cpumask_set_cpu(i, &ioapic_max_cpumask);
return 0;
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index d3bb0798092d..6c7528130cf9 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o
-obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
+obj-$(CONFIG_DMAR_TABLE) += trace.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 09694cca8752..fc35cba59145 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -47,6 +47,7 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct cache_tag *tag, *temp;
+ struct list_head *prev;
unsigned long flags;
tag = kzalloc(sizeof(*tag), GFP_KERNEL);
@@ -65,6 +66,7 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
tag->dev = iommu->iommu.dev;
spin_lock_irqsave(&domain->cache_lock, flags);
+ prev = &domain->cache_tags;
list_for_each_entry(temp, &domain->cache_tags, node) {
if (cache_tage_match(temp, did, iommu, dev, pasid, type)) {
temp->users++;
@@ -73,8 +75,15 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
trace_cache_tag_assign(temp);
return 0;
}
+ if (temp->iommu == iommu)
+ prev = &temp->node;
}
- list_add_tail(&tag->node, &domain->cache_tags);
+ /*
+ * Link cache tags of same iommu unit together, so corresponding
+ * flush ops can be batched for iommu unit.
+ */
+ list_add(&tag->node, prev);
+
spin_unlock_irqrestore(&domain->cache_lock, flags);
trace_cache_tag_assign(tag);
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
deleted file mode 100644
index 9862dc20b35e..000000000000
--- a/drivers/iommu/intel/cap_audit.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * cap_audit.c - audit iommu capabilities for boot time and hot plug
- *
- * Copyright (C) 2021 Intel Corporation
- *
- * Author: Kyung Min Park <kyung.min.park@intel.com>
- * Lu Baolu <baolu.lu@linux.intel.com>
- */
-
-#define pr_fmt(fmt) "DMAR: " fmt
-
-#include "iommu.h"
-#include "cap_audit.h"
-
-static u64 intel_iommu_cap_sanity;
-static u64 intel_iommu_ecap_sanity;
-
-static inline void check_irq_capabilities(struct intel_iommu *a,
- struct intel_iommu *b)
-{
- CHECK_FEATURE_MISMATCH(a, b, cap, pi_support, CAP_PI_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, eim_support, ECAP_EIM_MASK);
-}
-
-static inline void check_dmar_capabilities(struct intel_iommu *a,
- struct intel_iommu *b)
-{
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_MAMV_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_NFR_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_SLLPS_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_FRO_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_MGAW_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_SAGAW_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_NDOMS_MASK);
- MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_PSS_MASK);
- MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
- MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
-
- CHECK_FEATURE_MISMATCH(a, b, cap, fl5lp_support, CAP_FL5LP_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, pgsel_inv, CAP_PSI_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, zlr, CAP_ZLR_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, caching_mode, CAP_CM_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, phmr, CAP_PHMR_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, plmr, CAP_PLMR_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, rwbf, CAP_RWBF_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, afl, CAP_AFL_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, rps, ECAP_RPS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, smpwc, ECAP_SMPWC_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, flts, ECAP_FLTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, slts, ECAP_SLTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, nwfs, ECAP_NWFS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, smts, ECAP_SMTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, pds, ECAP_PDS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, dit, ECAP_DIT_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, pasid, ECAP_PASID_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, eafs, ECAP_EAFS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, srs, ECAP_SRS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, ers, ECAP_ERS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, prs, ECAP_PRS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, nest, ECAP_NEST_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, mts, ECAP_MTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, sc_support, ECAP_SC_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, pass_through, ECAP_PT_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, dev_iotlb_support, ECAP_DT_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, qis, ECAP_QI_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, coherent, ECAP_C_MASK);
-}
-
-static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type)
-{
- bool mismatch = false;
- u64 old_cap = intel_iommu_cap_sanity;
- u64 old_ecap = intel_iommu_ecap_sanity;
-
- if (type == CAP_AUDIT_HOTPLUG_IRQR) {
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pi_support, CAP_PI_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eim_support, ECAP_EIM_MASK);
- goto out;
- }
-
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl5lp_support, CAP_FL5LP_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pgsel_inv, CAP_PSI_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, zlr, CAP_ZLR_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, caching_mode, CAP_CM_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, phmr, CAP_PHMR_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, plmr, CAP_PLMR_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, rwbf, CAP_RWBF_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, afl, CAP_AFL_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, rps, ECAP_RPS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smpwc, ECAP_SMPWC_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, flts, ECAP_FLTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slts, ECAP_SLTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nwfs, ECAP_NWFS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smts, ECAP_SMTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pds, ECAP_PDS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dit, ECAP_DIT_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pasid, ECAP_PASID_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eafs, ECAP_EAFS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, srs, ECAP_SRS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, ers, ECAP_ERS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, prs, ECAP_PRS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nest, ECAP_NEST_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, mts, ECAP_MTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, sc_support, ECAP_SC_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pass_through, ECAP_PT_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dev_iotlb_support, ECAP_DT_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, qis, ECAP_QI_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, coherent, ECAP_C_MASK);
-
- /* Abort hot plug if the hot plug iommu feature is smaller than global */
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, max_amask_val, CAP_MAMV_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, num_fault_regs, CAP_NFR_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, super_page_val, CAP_SLLPS_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, fault_reg_offset, CAP_FRO_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, mgaw, CAP_MGAW_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, sagaw, CAP_SAGAW_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, ndoms, CAP_NDOMS_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, ecap, pss, ECAP_PSS_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, ecap, max_handle_mask, ECAP_MHMV_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, ecap, iotlb_offset, ECAP_IRO_MASK, mismatch);
-
-out:
- if (mismatch) {
- intel_iommu_cap_sanity = old_cap;
- intel_iommu_ecap_sanity = old_ecap;
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
-{
- struct dmar_drhd_unit *d;
- struct intel_iommu *i;
- int rc = 0;
-
- rcu_read_lock();
- if (list_empty(&dmar_drhd_units))
- goto out;
-
- for_each_active_iommu(i, d) {
- if (!iommu) {
- intel_iommu_ecap_sanity = i->ecap;
- intel_iommu_cap_sanity = i->cap;
- iommu = i;
- continue;
- }
-
- if (type == CAP_AUDIT_STATIC_DMAR)
- check_dmar_capabilities(iommu, i);
- else
- check_irq_capabilities(iommu, i);
- }
-
- /*
- * If the system is sane to support scalable mode, either SL or FL
- * should be sane.
- */
- if (intel_cap_smts_sanity() &&
- !intel_cap_flts_sanity() && !intel_cap_slts_sanity())
- rc = -EOPNOTSUPP;
-
-out:
- rcu_read_unlock();
- return rc;
-}
-
-int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu)
-{
- switch (type) {
- case CAP_AUDIT_STATIC_DMAR:
- case CAP_AUDIT_STATIC_IRQR:
- return cap_audit_static(iommu, type);
- case CAP_AUDIT_HOTPLUG_DMAR:
- case CAP_AUDIT_HOTPLUG_IRQR:
- return cap_audit_hotplug(iommu, type);
- default:
- break;
- }
-
- return -EFAULT;
-}
-
-bool intel_cap_smts_sanity(void)
-{
- return ecap_smts(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_pasid_sanity(void)
-{
- return ecap_pasid(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_nest_sanity(void)
-{
- return ecap_nest(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_flts_sanity(void)
-{
- return ecap_flts(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_slts_sanity(void)
-{
- return ecap_slts(intel_iommu_ecap_sanity);
-}
diff --git a/drivers/iommu/intel/cap_audit.h b/drivers/iommu/intel/cap_audit.h
deleted file mode 100644
index d07b75938961..000000000000
--- a/drivers/iommu/intel/cap_audit.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * cap_audit.h - audit iommu capabilities header
- *
- * Copyright (C) 2021 Intel Corporation
- *
- * Author: Kyung Min Park <kyung.min.park@intel.com>
- */
-
-/*
- * Capability Register Mask
- */
-#define CAP_FL5LP_MASK BIT_ULL(60)
-#define CAP_PI_MASK BIT_ULL(59)
-#define CAP_FL1GP_MASK BIT_ULL(56)
-#define CAP_RD_MASK BIT_ULL(55)
-#define CAP_WD_MASK BIT_ULL(54)
-#define CAP_MAMV_MASK GENMASK_ULL(53, 48)
-#define CAP_NFR_MASK GENMASK_ULL(47, 40)
-#define CAP_PSI_MASK BIT_ULL(39)
-#define CAP_SLLPS_MASK GENMASK_ULL(37, 34)
-#define CAP_FRO_MASK GENMASK_ULL(33, 24)
-#define CAP_ZLR_MASK BIT_ULL(22)
-#define CAP_MGAW_MASK GENMASK_ULL(21, 16)
-#define CAP_SAGAW_MASK GENMASK_ULL(12, 8)
-#define CAP_CM_MASK BIT_ULL(7)
-#define CAP_PHMR_MASK BIT_ULL(6)
-#define CAP_PLMR_MASK BIT_ULL(5)
-#define CAP_RWBF_MASK BIT_ULL(4)
-#define CAP_AFL_MASK BIT_ULL(3)
-#define CAP_NDOMS_MASK GENMASK_ULL(2, 0)
-
-/*
- * Extended Capability Register Mask
- */
-#define ECAP_RPS_MASK BIT_ULL(49)
-#define ECAP_SMPWC_MASK BIT_ULL(48)
-#define ECAP_FLTS_MASK BIT_ULL(47)
-#define ECAP_SLTS_MASK BIT_ULL(46)
-#define ECAP_SLADS_MASK BIT_ULL(45)
-#define ECAP_VCS_MASK BIT_ULL(44)
-#define ECAP_SMTS_MASK BIT_ULL(43)
-#define ECAP_PDS_MASK BIT_ULL(42)
-#define ECAP_DIT_MASK BIT_ULL(41)
-#define ECAP_PASID_MASK BIT_ULL(40)
-#define ECAP_PSS_MASK GENMASK_ULL(39, 35)
-#define ECAP_EAFS_MASK BIT_ULL(34)
-#define ECAP_NWFS_MASK BIT_ULL(33)
-#define ECAP_SRS_MASK BIT_ULL(31)
-#define ECAP_ERS_MASK BIT_ULL(30)
-#define ECAP_PRS_MASK BIT_ULL(29)
-#define ECAP_NEST_MASK BIT_ULL(26)
-#define ECAP_MTS_MASK BIT_ULL(25)
-#define ECAP_MHMV_MASK GENMASK_ULL(23, 20)
-#define ECAP_IRO_MASK GENMASK_ULL(17, 8)
-#define ECAP_SC_MASK BIT_ULL(7)
-#define ECAP_PT_MASK BIT_ULL(6)
-#define ECAP_EIM_MASK BIT_ULL(4)
-#define ECAP_DT_MASK BIT_ULL(2)
-#define ECAP_QI_MASK BIT_ULL(1)
-#define ECAP_C_MASK BIT_ULL(0)
-
-/*
- * u64 intel_iommu_cap_sanity, intel_iommu_ecap_sanity will be adjusted as each
- * IOMMU gets audited.
- */
-#define DO_CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
-do { \
- if (cap##_##feature(a) != cap##_##feature(b)) { \
- intel_iommu_##cap##_sanity &= ~(MASK); \
- pr_info("IOMMU feature %s inconsistent", #feature); \
- } \
-} while (0)
-
-#define CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
- DO_CHECK_FEATURE_MISMATCH((a)->cap, (b)->cap, cap, feature, MASK)
-
-#define CHECK_FEATURE_MISMATCH_HOTPLUG(b, cap, feature, MASK) \
-do { \
- if (cap##_##feature(intel_iommu_##cap##_sanity)) \
- DO_CHECK_FEATURE_MISMATCH(intel_iommu_##cap##_sanity, \
- (b)->cap, cap, feature, MASK); \
-} while (0)
-
-#define MINIMAL_FEATURE_IOMMU(iommu, cap, MASK) \
-do { \
- u64 min_feature = intel_iommu_##cap##_sanity & (MASK); \
- min_feature = min_t(u64, min_feature, (iommu)->cap & (MASK)); \
- intel_iommu_##cap##_sanity = (intel_iommu_##cap##_sanity & ~(MASK)) | \
- min_feature; \
-} while (0)
-
-#define MINIMAL_FEATURE_HOTPLUG(iommu, cap, feature, MASK, mismatch) \
-do { \
- if ((intel_iommu_##cap##_sanity & (MASK)) > \
- (cap##_##feature((iommu)->cap))) \
- mismatch = true; \
- else \
- (iommu)->cap = ((iommu)->cap & ~(MASK)) | \
- (intel_iommu_##cap##_sanity & (MASK)); \
-} while (0)
-
-enum cap_audit_type {
- CAP_AUDIT_STATIC_DMAR,
- CAP_AUDIT_STATIC_IRQR,
- CAP_AUDIT_HOTPLUG_DMAR,
- CAP_AUDIT_HOTPLUG_IRQR,
-};
-
-bool intel_cap_smts_sanity(void);
-bool intel_cap_pasid_sanity(void);
-bool intel_cap_nest_sanity(void);
-bool intel_cap_flts_sanity(void);
-bool intel_cap_slts_sanity(void);
-
-static inline bool scalable_mode_support(void)
-{
- return (intel_iommu_sm && intel_cap_smts_sanity());
-}
-
-static inline bool pasid_mode_support(void)
-{
- return scalable_mode_support() && intel_cap_pasid_sanity();
-}
-
-static inline bool nested_mode_support(void)
-{
- return scalable_mode_support() && intel_cap_nest_sanity();
-}
-
-int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 9f424acf474e..e540092d664d 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -2043,6 +2043,7 @@ int enable_drhd_fault_handling(unsigned int cpu)
/*
* Enable fault control interrupt.
*/
+ guard(rwsem_read)(&dmar_global_lock);
for_each_iommu(iommu, drhd) {
u32 fault_status;
int ret;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 79e0da9eb626..bf1f0c814348 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -29,7 +29,6 @@
#include "../irq_remapping.h"
#include "../iommu-pages.h"
#include "pasid.h"
-#include "cap_audit.h"
#include "perfmon.h"
#define ROOT_SIZE VTD_PAGE_SIZE
@@ -2118,10 +2117,6 @@ static int __init init_dmars(void)
struct intel_iommu *iommu;
int ret;
- ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
- if (ret)
- goto free_iommu;
-
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
iommu_disable_translation(iommu);
@@ -2617,10 +2612,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
struct intel_iommu *iommu = dmaru->iommu;
int ret;
- ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
- if (ret)
- goto out;
-
/*
* Disable translation if already enabled prior to OS handover.
*/
@@ -3155,7 +3146,14 @@ int __init intel_iommu_init(void)
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
+ /*
+ * The iommu device probe is protected by the iommu_probe_device_lock.
+ * Release the dmar_global_lock before entering the device probe path
+ * to avoid unnecessary lock order splat.
+ */
+ up_read(&dmar_global_lock);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
+ down_read(&dmar_global_lock);
iommu_pmu_register(iommu);
}
@@ -3250,10 +3248,15 @@ static int blocking_domain_attach_dev(struct iommu_domain *domain,
return 0;
}
+static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old);
+
static struct iommu_domain blocking_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
.attach_dev = blocking_domain_attach_dev,
+ .set_dev_pasid = blocking_domain_set_dev_pasid,
}
};
@@ -3342,8 +3345,7 @@ intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
bool first_stage;
if (flags &
- (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING
- | IOMMU_HWPT_FAULT_ID_VALID)))
+ (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING)))
return ERR_PTR(-EOPNOTSUPP);
if (nested_parent && !nested_supported(iommu))
return ERR_PTR(-EOPNOTSUPP);
@@ -4090,22 +4092,26 @@ void domain_remove_dev_pasid(struct iommu_domain *domain,
break;
}
}
- WARN_ON_ONCE(!dev_pasid);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
cache_tag_unassign_domain(dmar_domain, dev, pasid);
domain_detach_iommu(dmar_domain, iommu);
- intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
- kfree(dev_pasid);
+ if (!WARN_ON_ONCE(!dev_pasid)) {
+ intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
+ kfree(dev_pasid);
+ }
}
-static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
- struct iommu_domain *domain)
+static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
intel_pasid_tear_down_entry(info->iommu, dev, pasid, false);
- domain_remove_dev_pasid(domain, dev, pasid);
+ domain_remove_dev_pasid(old, dev, pasid);
+
+ return 0;
}
struct dev_pasid_info *
@@ -4379,9 +4385,6 @@ static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void *
{
struct device *dev = data;
- if (dev != &pdev->dev)
- return 0;
-
return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff);
}
@@ -4445,21 +4448,6 @@ static struct iommu_domain identity_domain = {
},
};
-static struct iommu_domain *intel_iommu_domain_alloc_paging(struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu = info->iommu;
- struct dmar_domain *dmar_domain;
- bool first_stage;
-
- first_stage = first_level_by_default(iommu);
- dmar_domain = paging_domain_alloc(dev, first_stage);
- if (IS_ERR(dmar_domain))
- return ERR_CAST(dmar_domain);
-
- return &dmar_domain->domain;
-}
-
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
.release_domain = &blocking_domain,
@@ -4468,7 +4456,6 @@ const struct iommu_ops intel_iommu_ops = {
.hw_info = intel_iommu_hw_info,
.domain_alloc_paging_flags = intel_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = intel_svm_domain_alloc,
- .domain_alloc_paging = intel_iommu_domain_alloc_paging,
.domain_alloc_nested = intel_iommu_domain_alloc_nested,
.probe_device = intel_iommu_probe_device,
.release_device = intel_iommu_release_device,
@@ -4478,7 +4465,6 @@ const struct iommu_ops intel_iommu_ops = {
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
- .remove_dev_pasid = intel_iommu_remove_dev_pasid,
.pgsize_bitmap = SZ_4K,
.page_response = intel_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 466c1412dd45..ad795c772f21 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -24,7 +24,6 @@
#include "iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"
-#include "cap_audit.h"
enum irq_mode {
IRQ_REMAPPING,
@@ -727,9 +726,6 @@ static int __init intel_prepare_irq_remapping(void)
if (dmar_table_init() < 0)
return -ENODEV;
- if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
- return -ENODEV;
-
if (!dmar_ir_support())
return -ENODEV;
@@ -1463,7 +1459,6 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
else
irq_data->chip = &intel_ir_chip;
intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
- irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
return 0;
@@ -1534,10 +1529,6 @@ static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
int ret;
int eim = x2apic_enabled();
- ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
- if (ret)
- return ret;
-
if (eim && !ecap_eim_support(iommu->ecap)) {
pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
iommu->reg_phys, iommu->ecap);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 5b7d85f1e143..fb59a7d35958 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -244,11 +244,31 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
+ if (WARN_ON(!pte)) {
spin_unlock(&iommu->lock);
return;
}
+ if (!pasid_pte_is_present(pte)) {
+ if (!pasid_pte_is_fault_disabled(pte)) {
+ WARN_ON(READ_ONCE(pte->val[0]) != 0);
+ spin_unlock(&iommu->lock);
+ return;
+ }
+
+ /*
+ * When a PASID is used for SVA by a device, it's possible
+ * that the pasid entry is non-present with the Fault
+ * Processing Disabled bit set. Clear the pasid entry and
+ * drain the PRQ for the PASID before return.
+ */
+ pasid_clear_entry(pte);
+ spin_unlock(&iommu->lock);
+ intel_iommu_drain_pasid_prq(dev, pasid);
+
+ return;
+ }
+
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
intel_pasid_clear_entry(dev, pasid, fault_ignore);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 082f4fe20216..668d8ece6b14 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -73,6 +73,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
+/* Get FPD(Fault Processing Disable) bit of a PASID table entry */
+static inline bool pasid_pte_is_fault_disabled(struct pasid_entry *pte)
+{
+ return READ_ONCE(pte->val[0]) & PASID_PTE_FPD;
+}
+
/* Get PGTT field of a PASID table entry */
static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
{
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index c2d792db52c3..064194399b38 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -87,7 +87,9 @@ prq_retry:
struct page_req_dsc *req;
req = &iommu->prq[head / sizeof(*req)];
- if (!req->pasid_present || req->pasid != pasid) {
+ if (req->rid != sid ||
+ (req->pasid_present && pasid != req->pasid) ||
+ (!req->pasid_present && pasid != IOMMU_NO_PASID)) {
head = (head + sizeof(*req)) & PRQ_RING_MASK;
continue;
}
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index 4674e618797c..8b5926c1452e 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -478,6 +478,7 @@ void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
ops->page_response(dev, iopf, &resp);
list_del_init(&group->pending_node);
+ iopf_free_group(group);
}
mutex_unlock(&fault_param->lock);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 6b9bb58a414f..7632c80edea6 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -223,6 +223,34 @@ static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
return ptes_per_table - (i & (ptes_per_table - 1));
}
+/*
+ * Check if concatenated PGDs are mandatory according to Arm DDI0487 (K.a)
+ * 1) R_DXBSH: For 16KB, and 48-bit input size, use level 1 instead of 0.
+ * 2) R_SRKBC: After de-ciphering the table for PA size and valid initial lookup
+ * a) 40 bits PA size with 4K: use level 1 instead of level 0 (2 tables for ias = oas)
+ * b) 40 bits PA size with 16K: use level 2 instead of level 1 (16 tables for ias = oas)
+ * c) 42 bits PA size with 4K: use level 1 instead of level 0 (8 tables for ias = oas)
+ * d) 48 bits PA size with 16K: use level 1 instead of level 0 (2 tables for ias = oas)
+ */
+static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
+ struct arm_lpae_io_pgtable *data)
+{
+ unsigned int ias = cfg->ias;
+ unsigned int oas = cfg->oas;
+
+ /* Covers 1 and 2.d */
+ if ((ARM_LPAE_GRANULE(data) == SZ_16K) && (data->start_level == 0))
+ return (oas == 48) || (ias == 48);
+
+ /* Covers 2.a and 2.c */
+ if ((ARM_LPAE_GRANULE(data) == SZ_4K) && (data->start_level == 0))
+ return (oas == 40) || (oas == 42);
+
+ /* Case 2.b */
+ return (ARM_LPAE_GRANULE(data) == SZ_16K) &&
+ (data->start_level == 1) && (oas == 40);
+}
+
static bool selftest_running = false;
static dma_addr_t __arm_lpae_dma_addr(void *pages)
@@ -676,85 +704,107 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov
data->start_level, ptep);
}
+struct io_pgtable_walk_data {
+ struct io_pgtable *iop;
+ void *data;
+ int (*visit)(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size);
+ unsigned long flags;
+ u64 addr;
+ const u64 end;
+};
+
+static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl);
+
+struct iova_to_phys_data {
+ arm_lpae_iopte pte;
+ int lvl;
+};
+
+static int visit_iova_to_phys(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct iova_to_phys_data *data = walk_data->data;
+ data->pte = *ptep;
+ data->lvl = lvl;
+ return 0;
+}
+
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- arm_lpae_iopte pte, *ptep = data->pgd;
- int lvl = data->start_level;
-
- do {
- /* Valid IOPTE pointer? */
- if (!ptep)
- return 0;
-
- /* Grab the IOPTE we're interested in */
- ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
- pte = READ_ONCE(*ptep);
-
- /* Valid entry? */
- if (!pte)
- return 0;
+ struct iova_to_phys_data d;
+ struct io_pgtable_walk_data walk_data = {
+ .data = &d,
+ .visit = visit_iova_to_phys,
+ .addr = iova,
+ .end = iova + 1,
+ };
+ int ret;
- /* Leaf entry? */
- if (iopte_leaf(pte, lvl, data->iop.fmt))
- goto found_translation;
+ ret = __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
+ if (ret)
+ return 0;
- /* Take it to the next level */
- ptep = iopte_deref(pte, data);
- } while (++lvl < ARM_LPAE_MAX_LEVELS);
+ iova &= (ARM_LPAE_BLOCK_SIZE(d.lvl, data) - 1);
+ return iopte_to_paddr(d.pte, data) | iova;
+}
- /* Ran out of page tables to walk */
+static int visit_pgtable_walk(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct arm_lpae_io_pgtable_walk_data *data = walk_data->data;
+ data->ptes[lvl] = *ptep;
return 0;
-
-found_translation:
- iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
- return iopte_to_paddr(pte, data) | iova;
}
-struct io_pgtable_walk_data {
- struct iommu_dirty_bitmap *dirty;
- unsigned long flags;
- u64 addr;
- const u64 end;
-};
+static int arm_lpae_pgtable_walk(struct io_pgtable_ops *ops, unsigned long iova,
+ void *wd)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_walk_data walk_data = {
+ .data = wd,
+ .visit = visit_pgtable_walk,
+ .addr = iova,
+ .end = iova + 1,
+ };
-static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep,
- int lvl);
+ return __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
+}
-static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep, int lvl)
+static int io_pgtable_visit(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep, int lvl)
{
struct io_pgtable *iop = &data->iop;
arm_lpae_iopte pte = READ_ONCE(*ptep);
- if (iopte_leaf(pte, lvl, iop->fmt)) {
- size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ int ret = walk_data->visit(walk_data, lvl, ptep, size);
+ if (ret)
+ return ret;
- if (iopte_writeable_dirty(pte)) {
- iommu_dirty_bitmap_record(walk_data->dirty,
- walk_data->addr, size);
- if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
- iopte_set_writeable_clean(ptep);
- }
+ if (iopte_leaf(pte, lvl, iop->fmt)) {
walk_data->addr += size;
return 0;
}
- if (WARN_ON(!iopte_table(pte, lvl)))
+ if (!iopte_table(pte, lvl)) {
return -EINVAL;
+ }
ptep = iopte_deref(pte, data);
- return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
+ return __arm_lpae_iopte_walk(data, walk_data, ptep, lvl + 1);
}
-static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep,
- int lvl)
+static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl)
{
u32 idx;
int max_entries, ret;
@@ -769,7 +819,7 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
(idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
- ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
+ ret = io_pgtable_visit(data, walk_data, ptep + idx, lvl);
if (ret)
return ret;
}
@@ -777,6 +827,23 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
return 0;
}
+static int visit_dirty(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct iommu_dirty_bitmap *dirty = walk_data->data;
+
+ if (!iopte_leaf(*ptep, lvl, walk_data->iop->fmt))
+ return 0;
+
+ if (iopte_writeable_dirty(*ptep)) {
+ iommu_dirty_bitmap_record(dirty, walk_data->addr, size);
+ if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
+ iopte_set_writeable_clean(ptep);
+ }
+
+ return 0;
+}
+
static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long flags,
@@ -785,7 +852,9 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct io_pgtable_walk_data walk_data = {
- .dirty = dirty,
+ .iop = &data->iop,
+ .data = dirty,
+ .visit = visit_dirty,
.flags = flags,
.addr = iova,
.end = iova + size,
@@ -800,7 +869,7 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
if (data->iop.fmt != ARM_64_LPAE_S1)
return -EINVAL;
- return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
+ return __arm_lpae_iopte_walk(data, &walk_data, ptep, lvl);
}
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
@@ -882,6 +951,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.unmap_pages = arm_lpae_unmap_pages,
.iova_to_phys = arm_lpae_iova_to_phys,
.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
+ .pgtable_walk = arm_lpae_pgtable_walk,
};
return data;
@@ -1006,18 +1076,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
if (!data)
return NULL;
- /*
- * Concatenate PGDs at level 1 if possible in order to reduce
- * the depth of the stage-2 walk.
- */
- if (data->start_level == 0) {
- unsigned long pgd_pages;
-
- pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
- if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
- data->pgd_bits += data->bits_per_level;
- data->start_level++;
- }
+ if (arm_lpae_concat_mandatory(cfg, data)) {
+ if (WARN_ON((ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte)) >
+ ARM_LPAE_S2_MAX_CONCAT_PAGES))
+ return NULL;
+ data->pgd_bits += data->bits_per_level;
+ data->start_level++;
}
/* VTCR */
@@ -1364,15 +1428,14 @@ static int __init arm_lpae_do_selftests(void)
SZ_64K | SZ_512M,
};
- static const unsigned int ias[] __initconst = {
+ static const unsigned int address_size[] __initconst = {
32, 36, 40, 42, 44, 48,
};
- int i, j, pass = 0, fail = 0;
+ int i, j, k, pass = 0, fail = 0;
struct device dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
- .oas = 48,
.coherent_walk = true,
.iommu_dev = &dev,
};
@@ -1381,15 +1444,19 @@ static int __init arm_lpae_do_selftests(void)
set_dev_node(&dev, NUMA_NO_NODE);
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
- for (j = 0; j < ARRAY_SIZE(ias); ++j) {
- cfg.pgsize_bitmap = pgsize[i];
- cfg.ias = ias[j];
- pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
- pgsize[i], ias[j]);
- if (arm_lpae_run_tests(&cfg))
- fail++;
- else
- pass++;
+ for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
+ /* Don't use ias > oas as it is not valid for stage-2. */
+ for (k = 0; k <= j; ++k) {
+ cfg.pgsize_bitmap = pgsize[i];
+ cfg.ias = address_size[k];
+ cfg.oas = address_size[j];
+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u OAS %u\n",
+ pgsize[i], cfg.ias, cfg.oas);
+ if (arm_lpae_run_tests(&cfg))
+ fail++;
+ else
+ pass++;
+ }
}
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 599030e1e890..60aed01e54f2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1756,7 +1756,7 @@ static int iommu_get_def_domain_type(struct iommu_group *group,
group->id);
/*
- * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY
+ * Try to recover, drivers are allowed to force IDENTITY or DMA, IDENTITY
* takes precedence.
*/
if (type == IOMMU_DOMAIN_IDENTITY)
@@ -2819,7 +2819,7 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (!ops)
- return -EPROBE_DEFER;
+ return driver_deferred_probe_check_state(dev);
if (fwspec)
return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
@@ -3312,6 +3312,16 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+static void iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
+ struct iommu_domain *domain)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_domain *blocked_domain = ops->blocked_domain;
+
+ WARN_ON(blocked_domain->ops->set_dev_pasid(blocked_domain,
+ dev, pasid, domain));
+}
+
static int __iommu_set_group_pasid(struct iommu_domain *domain,
struct iommu_group *group, ioasid_t pasid)
{
@@ -3330,11 +3340,9 @@ static int __iommu_set_group_pasid(struct iommu_domain *domain,
err_revert:
last_gdev = device;
for_each_group_device(group, device) {
- const struct iommu_ops *ops = dev_iommu_ops(device->dev);
-
if (device == last_gdev)
break;
- ops->remove_dev_pasid(device->dev, pasid, domain);
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
}
return ret;
}
@@ -3344,12 +3352,9 @@ static void __iommu_remove_group_pasid(struct iommu_group *group,
struct iommu_domain *domain)
{
struct group_device *device;
- const struct iommu_ops *ops;
- for_each_group_device(group, device) {
- ops = dev_iommu_ops(device->dev);
- ops->remove_dev_pasid(device->dev, pasid, domain);
- }
+ for_each_group_device(group, device)
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
}
/*
@@ -3368,16 +3373,20 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
/* Caller must be a probed driver on dev */
struct iommu_group *group = dev->iommu_group;
struct group_device *device;
+ const struct iommu_ops *ops;
int ret;
- if (!domain->ops->set_dev_pasid)
- return -EOPNOTSUPP;
-
if (!group)
return -ENODEV;
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner ||
- pasid == IOMMU_NO_PASID)
+ ops = dev_iommu_ops(dev);
+
+ if (!domain->ops->set_dev_pasid ||
+ !ops->blocked_domain ||
+ !ops->blocked_domain->ops->set_dev_pasid)
+ return -EOPNOTSUPP;
+
+ if (ops != domain->owner || pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index 1fe804e28a86..d9a937450e55 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -103,15 +103,23 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
{
struct iommufd_fault *fault = hwpt->fault;
struct iopf_group *group, *next;
+ struct list_head free_list;
unsigned long index;
if (!fault)
return;
+ INIT_LIST_HEAD(&free_list);
mutex_lock(&fault->mutex);
+ spin_lock(&fault->lock);
list_for_each_entry_safe(group, next, &fault->deliver, node) {
if (group->attach_handle != &handle->handle)
continue;
+ list_move(&group->node, &free_list);
+ }
+ spin_unlock(&fault->lock);
+
+ list_for_each_entry_safe(group, next, &free_list, node) {
list_del(&group->node);
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
iopf_free_group(group);
@@ -213,6 +221,7 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
{
struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
struct iopf_group *group, *next;
+ unsigned long index;
/*
* The iommufd object's reference count is zero at this point.
@@ -225,6 +234,13 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
iopf_free_group(group);
}
+ xa_for_each(&fault->response, index, group) {
+ xa_erase(&fault->response, index);
+ iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ iopf_free_group(group);
+ }
+ xa_destroy(&fault->response);
+ mutex_destroy(&fault->mutex);
}
static void iommufd_compose_fault_message(struct iommu_fault *fault,
@@ -247,7 +263,7 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
{
size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
struct iommufd_fault *fault = filep->private_data;
- struct iommu_hwpt_pgfault data;
+ struct iommu_hwpt_pgfault data = {};
struct iommufd_device *idev;
struct iopf_group *group;
struct iopf_fault *iopf;
@@ -258,17 +274,19 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
return -ESPIPE;
mutex_lock(&fault->mutex);
- while (!list_empty(&fault->deliver) && count > done) {
- group = list_first_entry(&fault->deliver,
- struct iopf_group, node);
-
- if (group->fault_count * fault_size > count - done)
+ while ((group = iommufd_fault_deliver_fetch(fault))) {
+ if (done >= count ||
+ group->fault_count * fault_size > count - done) {
+ iommufd_fault_deliver_restore(fault, group);
break;
+ }
rc = xa_alloc(&fault->response, &group->cookie, group,
xa_limit_32b, GFP_KERNEL);
- if (rc)
+ if (rc) {
+ iommufd_fault_deliver_restore(fault, group);
break;
+ }
idev = to_iommufd_handle(group->attach_handle)->idev;
list_for_each_entry(iopf, &group->faults, list) {
@@ -277,13 +295,12 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
group->cookie);
if (copy_to_user(buf + done, &data, fault_size)) {
xa_erase(&fault->response, group->cookie);
+ iommufd_fault_deliver_restore(fault, group);
rc = -EFAULT;
break;
}
done += fault_size;
}
-
- list_del(&group->node);
}
mutex_unlock(&fault->mutex);
@@ -341,10 +358,10 @@ static __poll_t iommufd_fault_fops_poll(struct file *filep,
__poll_t pollflags = EPOLLOUT;
poll_wait(filep, &fault->wait_queue, wait);
- mutex_lock(&fault->mutex);
+ spin_lock(&fault->lock);
if (!list_empty(&fault->deliver))
pollflags |= EPOLLIN | EPOLLRDNORM;
- mutex_unlock(&fault->mutex);
+ spin_unlock(&fault->lock);
return pollflags;
}
@@ -386,6 +403,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
INIT_LIST_HEAD(&fault->deliver);
xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
mutex_init(&fault->mutex);
+ spin_lock_init(&fault->lock);
init_waitqueue_head(&fault->wait_queue);
filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
@@ -434,9 +452,9 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
hwpt = group->attach_handle->domain->fault_data;
fault = hwpt->fault;
- mutex_lock(&fault->mutex);
+ spin_lock(&fault->lock);
list_add_tail(&group->node, &fault->deliver);
- mutex_unlock(&fault->mutex);
+ spin_unlock(&fault->lock);
wake_up_interruptible(&fault->wait_queue);
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index ce03c3804651..598be26a14e2 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -140,8 +140,8 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
if (ops->domain_alloc_paging_flags) {
- hwpt->domain = ops->domain_alloc_paging_flags(idev->dev, flags,
- user_data);
+ hwpt->domain = ops->domain_alloc_paging_flags(idev->dev,
+ flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
@@ -280,6 +280,8 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
struct iommufd_hw_pagetable *hwpt;
int rc;
+ if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
+ return ERR_PTR(-EOPNOTSUPP);
if (!user_data->len)
return ERR_PTR(-EOPNOTSUPP);
if (!viommu->ops || !viommu->ops->alloc_domain_nested)
@@ -296,7 +298,9 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
hwpt_nested->parent = viommu->hwpt;
hwpt->domain =
- viommu->ops->alloc_domain_nested(viommu, flags, user_data);
+ viommu->ops->alloc_domain_nested(viommu,
+ flags & ~IOMMU_HWPT_FAULT_ID_VALID,
+ user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index b6d706cf2c66..0b1bafc7fd99 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -443,14 +443,39 @@ struct iommufd_fault {
struct iommufd_ctx *ictx;
struct file *filep;
- /* The lists of outstanding faults protected by below mutex. */
- struct mutex mutex;
+ spinlock_t lock; /* protects the deliver list */
struct list_head deliver;
+ struct mutex mutex; /* serializes response flows */
struct xarray response;
struct wait_queue_head wait_queue;
};
+/* Fetch the first node out of the fault->deliver list */
+static inline struct iopf_group *
+iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
+{
+ struct list_head *list = &fault->deliver;
+ struct iopf_group *group = NULL;
+
+ spin_lock(&fault->lock);
+ if (!list_empty(list)) {
+ group = list_first_entry(list, struct iopf_group, node);
+ list_del(&group->node);
+ }
+ spin_unlock(&fault->lock);
+ return group;
+}
+
+/* Restore a node back to the head of the fault->deliver list */
+static inline void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
+ struct iopf_group *group)
+{
+ spin_lock(&fault->lock);
+ list_add(&group->node, &fault->deliver);
+ spin_unlock(&fault->lock);
+}
+
struct iommufd_attach_handle {
struct iommu_attach_handle handle;
struct iommufd_device *idev;
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index ab665cf38ef4..39a86a4a1d3a 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -130,7 +130,7 @@ struct iova_bitmap {
static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
unsigned long iova)
{
- unsigned long pgsize = 1 << bitmap->mapped.pgshift;
+ unsigned long pgsize = 1UL << bitmap->mapped.pgshift;
return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
}
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 97c5e3567d33..ccf616462a1c 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -104,7 +104,7 @@ static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx,
if (wait_event_timeout(ictx->destroy_wait,
refcount_read(&to_destroy->shortterm_users) ==
0,
- msecs_to_jiffies(10000)))
+ msecs_to_jiffies(60000)))
return 0;
pr_crit("Time out waiting for iommufd object to become free\n");
@@ -307,9 +307,9 @@ union ucmd_buffer {
struct iommu_ioas_map map;
struct iommu_ioas_unmap unmap;
struct iommu_option option;
+ struct iommu_vdevice_alloc vdev;
struct iommu_vfio_ioas vfio_ioas;
struct iommu_viommu_alloc viommu;
- struct iommu_vdevice_alloc vdev;
#ifdef CONFIG_IOMMUFD_TEST
struct iommu_test_cmd test;
#endif
@@ -333,8 +333,8 @@ struct iommufd_ioctl_op {
}
static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
- IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc, struct iommu_fault_alloc,
- out_fault_fd),
+ IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc,
+ struct iommu_fault_alloc, out_fault_fd),
IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info,
__reserved),
IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
@@ -355,20 +355,18 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
src_iova),
IOCTL_OP(IOMMU_IOAS_IOVA_RANGES, iommufd_ioas_iova_ranges,
struct iommu_ioas_iova_ranges, out_iova_alignment),
- IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map,
- iova),
+ IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map, iova),
IOCTL_OP(IOMMU_IOAS_MAP_FILE, iommufd_ioas_map_file,
struct iommu_ioas_map_file, iova),
IOCTL_OP(IOMMU_IOAS_UNMAP, iommufd_ioas_unmap, struct iommu_ioas_unmap,
length),
- IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option,
- val64),
+ IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option, val64),
+ IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
+ struct iommu_vdevice_alloc, virt_id),
IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas,
__reserved),
IOCTL_OP(IOMMU_VIOMMU_ALLOC, iommufd_viommu_alloc_ioctl,
struct iommu_viommu_alloc, out_viommu_id),
- IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
- struct iommu_vdevice_alloc, virt_id),
#ifdef CONFIG_IOMMUFD_TEST
IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last),
#endif
@@ -490,8 +488,8 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_DEVICE] = {
.destroy = iommufd_device_destroy,
},
- [IOMMUFD_OBJ_IOAS] = {
- .destroy = iommufd_ioas_destroy,
+ [IOMMUFD_OBJ_FAULT] = {
+ .destroy = iommufd_fault_destroy,
},
[IOMMUFD_OBJ_HWPT_PAGING] = {
.destroy = iommufd_hwpt_paging_destroy,
@@ -501,15 +499,15 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
.destroy = iommufd_hwpt_nested_destroy,
.abort = iommufd_hwpt_nested_abort,
},
- [IOMMUFD_OBJ_FAULT] = {
- .destroy = iommufd_fault_destroy,
- },
- [IOMMUFD_OBJ_VIOMMU] = {
- .destroy = iommufd_viommu_destroy,
+ [IOMMUFD_OBJ_IOAS] = {
+ .destroy = iommufd_ioas_destroy,
},
[IOMMUFD_OBJ_VDEVICE] = {
.destroy = iommufd_vdevice_destroy,
},
+ [IOMMUFD_OBJ_VIOMMU] = {
+ .destroy = iommufd_viommu_destroy,
+ },
#ifdef CONFIG_IOMMUFD_TEST
[IOMMUFD_OBJ_SELFTEST] = {
.destroy = iommufd_selftest_destroy,
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index a0de6d6d4e68..d40deb0a4f06 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -311,25 +311,6 @@ static const struct iommu_dirty_ops dirty_ops = {
.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
};
-static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
-{
- struct mock_dev *mdev = to_mock_dev(dev);
- struct mock_iommu_domain *mock;
-
- mock = kzalloc(sizeof(*mock), GFP_KERNEL);
- if (!mock)
- return NULL;
- mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
- mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
- mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
- if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
- mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
- mock->domain.ops = mock_ops.default_domain_ops;
- mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
- xa_init(&mock->pfns);
- return &mock->domain;
-}
-
static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data *user_data)
{
@@ -385,21 +366,30 @@ mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_NEST_PARENT;
- bool no_dirty_ops = to_mock_dev(dev)->flags &
- MOCK_FLAGS_DEVICE_NO_DIRTY;
- struct iommu_domain *domain;
+ struct mock_dev *mdev = to_mock_dev(dev);
+ bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
+ struct mock_iommu_domain *mock;
if (user_data)
return ERR_PTR(-EOPNOTSUPP);
if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
return ERR_PTR(-EOPNOTSUPP);
- domain = mock_domain_alloc_paging(dev);
- if (!domain)
+ mock = kzalloc(sizeof(*mock), GFP_KERNEL);
+ if (!mock)
return ERR_PTR(-ENOMEM);
+ mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
+ mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
+ mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
+ if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
+ mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
+ mock->domain.ops = mock_ops.default_domain_ops;
+ mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
+ xa_init(&mock->pfns);
+
if (has_dirty_flag)
- domain->dirty_ops = &dirty_ops;
- return domain;
+ mock->domain.dirty_ops = &dirty_ops;
+ return &mock->domain;
}
static void mock_domain_free(struct iommu_domain *domain)
@@ -595,7 +585,7 @@ mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
struct mock_iommu_domain_nested *mock_nested;
- if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
+ if (flags)
return ERR_PTR(-EOPNOTSUPP);
mock_nested = __mock_domain_alloc_nested(user_data);
@@ -713,7 +703,6 @@ static const struct iommu_ops mock_ops = {
.owner = THIS_MODULE,
.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
.hw_info = mock_domain_hw_info,
- .domain_alloc_paging = mock_domain_alloc_paging,
.domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
.domain_alloc_nested = mock_domain_alloc_nested,
.capable = mock_domain_capable,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index ce40f0a419ea..2769e4544038 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -725,47 +725,32 @@ static int msm_iommu_probe(struct platform_device *pdev)
iommu->dev = &pdev->dev;
INIT_LIST_HEAD(&iommu->ctx_list);
- iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
+ iommu->pclk = devm_clk_get_prepared(iommu->dev, "smmu_pclk");
if (IS_ERR(iommu->pclk))
return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
"could not get smmu_pclk\n");
- ret = clk_prepare(iommu->pclk);
- if (ret)
- return dev_err_probe(iommu->dev, ret,
- "could not prepare smmu_pclk\n");
-
- iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
- if (IS_ERR(iommu->clk)) {
- clk_unprepare(iommu->pclk);
+ iommu->clk = devm_clk_get_prepared(iommu->dev, "iommu_clk");
+ if (IS_ERR(iommu->clk))
return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
"could not get iommu_clk\n");
- }
-
- ret = clk_prepare(iommu->clk);
- if (ret) {
- clk_unprepare(iommu->pclk);
- return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
- }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iommu->base = devm_ioremap_resource(iommu->dev, r);
if (IS_ERR(iommu->base)) {
ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
- goto fail;
+ return ret;
}
ioaddr = r->start;
iommu->irq = platform_get_irq(pdev, 0);
- if (iommu->irq < 0) {
- ret = -ENODEV;
- goto fail;
- }
+ if (iommu->irq < 0)
+ return -ENODEV;
ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
if (ret) {
dev_err(iommu->dev, "could not get ncb\n");
- goto fail;
+ return ret;
}
iommu->ncb = val;
@@ -780,8 +765,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
if (!par) {
pr_err("Invalid PAR value detected\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
@@ -791,7 +775,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
iommu);
if (ret) {
pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
- goto fail;
+ return ret;
}
list_add(&iommu->dev_node, &qcom_iommu_devices);
@@ -800,23 +784,19 @@ static int msm_iommu_probe(struct platform_device *pdev)
"msm-smmu.%pa", &ioaddr);
if (ret) {
pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
- goto fail;
+ return ret;
}
ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
if (ret) {
pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
- goto fail;
+ return ret;
}
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
return ret;
-fail:
- clk_unprepare(iommu->clk);
- clk_unprepare(iommu->pclk);
- return ret;
}
static const struct of_device_id msm_iommu_dt_match[] = {
@@ -824,20 +804,11 @@ static const struct of_device_id msm_iommu_dt_match[] = {
{}
};
-static void msm_iommu_remove(struct platform_device *pdev)
-{
- struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
-
- clk_unprepare(iommu->clk);
- clk_unprepare(iommu->pclk);
-}
-
static struct platform_driver msm_iommu_driver = {
.driver = {
.name = "msm_iommu",
.of_match_table = msm_iommu_dt_match,
},
.probe = msm_iommu_probe,
- .remove = msm_iommu_remove,
};
builtin_platform_driver(msm_iommu_driver);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index ab60901f8f92..034b0e670384 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/soc/mediatek/infracfg.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
+#include <linux/string_choices.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -510,7 +511,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
bank->parent_dev,
"fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n",
int_state, fault_iova, fault_pa, regval, fault_larb, fault_port,
- layer, write ? "write" : "read");
+ layer, str_write_read(write));
}
/* Interrupt clear */
@@ -602,7 +603,7 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n",
- enable ? "enable" : "disable", dev_name(larb_mmu->dev),
+ str_enable_disable(enable), dev_name(larb_mmu->dev),
portid_msk, regionid, upper_32_bits(region->iova_base));
if (enable)
@@ -630,8 +631,8 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
}
if (ret)
dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n",
- enable ? "enable" : "disable",
- dev_name(data->dev), portid_msk, ret);
+ str_enable_disable(enable), dev_name(data->dev),
+ portid_msk, ret);
}
return ret;
}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b6de1ca00cef..a565b9e40f4a 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <asm/barrier.h>
#include <asm/dma-iommu.h>
#include <dt-bindings/memory/mtk-memory-port.h>
@@ -243,7 +244,7 @@ static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
larb_mmu = &data->larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
- enable ? "enable" : "disable", portid);
+ str_enable_disable(enable), portid);
if (enable)
larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e7a6a1611d19..97987cd78da9 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -29,8 +29,6 @@ static int of_iommu_xlate(struct device *dev,
return -ENODEV;
ret = iommu_fwspec_init(dev, of_fwnode_handle(iommu_spec->np));
- if (ret == -EPROBE_DEFER)
- return driver_deferred_probe_check_state(dev);
if (ret)
return ret;
diff --git a/drivers/iommu/riscv/iommu-pci.c b/drivers/iommu/riscv/iommu-pci.c
index c7a89143014c..d82d2b00904c 100644
--- a/drivers/iommu/riscv/iommu-pci.c
+++ b/drivers/iommu/riscv/iommu-pci.c
@@ -101,6 +101,13 @@ static void riscv_iommu_pci_remove(struct pci_dev *pdev)
riscv_iommu_remove(iommu);
}
+static void riscv_iommu_pci_shutdown(struct pci_dev *pdev)
+{
+ struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev);
+
+ riscv_iommu_disable(iommu);
+}
+
static const struct pci_device_id riscv_iommu_pci_tbl[] = {
{PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_RISCV_IOMMU), 0},
{PCI_VDEVICE(RIVOS, PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA), 0},
@@ -112,6 +119,7 @@ static struct pci_driver riscv_iommu_pci_driver = {
.id_table = riscv_iommu_pci_tbl,
.probe = riscv_iommu_pci_probe,
.remove = riscv_iommu_pci_remove,
+ .shutdown = riscv_iommu_pci_shutdown,
.driver = {
.suppress_bind_attrs = true,
},
diff --git a/drivers/iommu/riscv/iommu-platform.c b/drivers/iommu/riscv/iommu-platform.c
index 382ba2841849..725e919b97ef 100644
--- a/drivers/iommu/riscv/iommu-platform.c
+++ b/drivers/iommu/riscv/iommu-platform.c
@@ -11,18 +11,43 @@
*/
#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "iommu-bits.h"
#include "iommu.h"
+static void riscv_iommu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct riscv_iommu_device *iommu = dev_get_drvdata(dev);
+ u16 idx = desc->msi_index;
+ u64 addr;
+
+ addr = ((u64)msg->address_hi << 32) | msg->address_lo;
+
+ if (addr != (addr & RISCV_IOMMU_MSI_CFG_TBL_ADDR)) {
+ dev_err_once(dev,
+ "uh oh, the IOMMU can't send MSIs to 0x%llx, sending to 0x%llx instead\n",
+ addr, addr & RISCV_IOMMU_MSI_CFG_TBL_ADDR);
+ }
+
+ addr &= RISCV_IOMMU_MSI_CFG_TBL_ADDR;
+
+ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_ADDR(idx), addr);
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_DATA(idx), msg->data);
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_CTRL(idx), 0);
+}
+
static int riscv_iommu_platform_probe(struct platform_device *pdev)
{
+ enum riscv_iommu_igs_settings igs;
struct device *dev = &pdev->dev;
struct riscv_iommu_device *iommu = NULL;
struct resource *res = NULL;
- int vec;
+ int vec, ret;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -40,16 +65,6 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
iommu->caps = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAPABILITIES);
iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL);
- /* For now we only support WSI */
- switch (FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps)) {
- case RISCV_IOMMU_CAPABILITIES_IGS_WSI:
- case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
- break;
- default:
- return dev_err_probe(dev, -ENODEV,
- "unable to use wire-signaled interrupts\n");
- }
-
iommu->irqs_count = platform_irq_count(pdev);
if (iommu->irqs_count <= 0)
return dev_err_probe(dev, -ENODEV,
@@ -57,13 +72,58 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
if (iommu->irqs_count > RISCV_IOMMU_INTR_COUNT)
iommu->irqs_count = RISCV_IOMMU_INTR_COUNT;
- for (vec = 0; vec < iommu->irqs_count; vec++)
- iommu->irqs[vec] = platform_get_irq(pdev, vec);
+ igs = FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps);
+ switch (igs) {
+ case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
+ case RISCV_IOMMU_CAPABILITIES_IGS_MSI:
+ if (is_of_node(dev->fwnode))
+ of_msi_configure(dev, to_of_node(dev->fwnode));
+
+ if (!dev_get_msi_domain(dev)) {
+ dev_warn(dev, "failed to find an MSI domain\n");
+ goto msi_fail;
+ }
+
+ ret = platform_device_msi_init_and_alloc_irqs(dev, iommu->irqs_count,
+ riscv_iommu_write_msi_msg);
+ if (ret) {
+ dev_warn(dev, "failed to allocate MSIs\n");
+ goto msi_fail;
+ }
+
+ for (vec = 0; vec < iommu->irqs_count; vec++)
+ iommu->irqs[vec] = msi_get_virq(dev, vec);
+
+ /* Enable message-signaled interrupts, fctl.WSI */
+ if (iommu->fctl & RISCV_IOMMU_FCTL_WSI) {
+ iommu->fctl ^= RISCV_IOMMU_FCTL_WSI;
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
+ }
+
+ dev_info(dev, "using MSIs\n");
+ break;
+
+msi_fail:
+ if (igs != RISCV_IOMMU_CAPABILITIES_IGS_BOTH) {
+ return dev_err_probe(dev, -ENODEV,
+ "unable to use wire-signaled interrupts\n");
+ }
- /* Enable wire-signaled interrupts, fctl.WSI */
- if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) {
- iommu->fctl |= RISCV_IOMMU_FCTL_WSI;
- riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
+ fallthrough;
+
+ case RISCV_IOMMU_CAPABILITIES_IGS_WSI:
+ for (vec = 0; vec < iommu->irqs_count; vec++)
+ iommu->irqs[vec] = platform_get_irq(pdev, vec);
+
+ /* Enable wire-signaled interrupts, fctl.WSI */
+ if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) {
+ iommu->fctl |= RISCV_IOMMU_FCTL_WSI;
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
+ }
+ dev_info(dev, "using wire-signaled interrupts\n");
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "invalid IGS\n");
}
return riscv_iommu_init(iommu);
@@ -71,7 +131,18 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
static void riscv_iommu_platform_remove(struct platform_device *pdev)
{
- riscv_iommu_remove(dev_get_drvdata(&pdev->dev));
+ struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev);
+ bool msi = !(iommu->fctl & RISCV_IOMMU_FCTL_WSI);
+
+ riscv_iommu_remove(iommu);
+
+ if (msi)
+ platform_device_msi_free_irqs_all(&pdev->dev);
+};
+
+static void riscv_iommu_platform_shutdown(struct platform_device *pdev)
+{
+ riscv_iommu_disable(dev_get_drvdata(&pdev->dev));
};
static const struct of_device_id riscv_iommu_of_match[] = {
@@ -82,6 +153,7 @@ static const struct of_device_id riscv_iommu_of_match[] = {
static struct platform_driver riscv_iommu_platform_driver = {
.probe = riscv_iommu_platform_probe,
.remove = riscv_iommu_platform_remove,
+ .shutdown = riscv_iommu_platform_shutdown,
.driver = {
.name = "riscv,iommu",
.of_match_table = riscv_iommu_of_match,
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 8a05def774bd..8f049d4a0e2c 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -240,6 +240,12 @@ static int riscv_iommu_queue_enable(struct riscv_iommu_device *iommu,
return rc;
}
+ /* Empty queue before enabling it */
+ if (queue->qid == RISCV_IOMMU_INTR_CQ)
+ riscv_iommu_writel(queue->iommu, Q_TAIL(queue), 0);
+ else
+ riscv_iommu_writel(queue->iommu, Q_HEAD(queue), 0);
+
/*
* Enable queue with interrupts, clear any memory fault if any.
* Wait for the hardware to acknowledge request and activate queue
@@ -645,9 +651,11 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm
* This is best effort IOMMU translation shutdown flow.
* Disable IOMMU without waiting for hardware response.
*/
-static void riscv_iommu_disable(struct riscv_iommu_device *iommu)
+void riscv_iommu_disable(struct riscv_iommu_device *iommu)
{
- riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, 0);
+ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP,
+ FIELD_PREP(RISCV_IOMMU_DDTP_IOMMU_MODE,
+ RISCV_IOMMU_DDTP_IOMMU_MODE_BARE));
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQCSR, 0);
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FQCSR, 0);
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_PQCSR, 0);
@@ -1270,7 +1278,7 @@ static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
dma_addr_t iova)
{
struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain);
- unsigned long pte_size;
+ size_t pte_size;
unsigned long *ptr;
ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size);
diff --git a/drivers/iommu/riscv/iommu.h b/drivers/iommu/riscv/iommu.h
index b1c4664542b4..46df79dd5495 100644
--- a/drivers/iommu/riscv/iommu.h
+++ b/drivers/iommu/riscv/iommu.h
@@ -64,6 +64,7 @@ struct riscv_iommu_device {
int riscv_iommu_init(struct riscv_iommu_device *iommu);
void riscv_iommu_remove(struct riscv_iommu_device *iommu);
+void riscv_iommu_disable(struct riscv_iommu_device *iommu);
#define riscv_iommu_readl(iommu, addr) \
readl_relaxed((iommu)->reg + (addr))
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4b369419b32c..323cc665c357 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -25,6 +25,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include "iommu-pages.h"
@@ -611,7 +612,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
dev_err(iommu->dev, "Page fault at %pad of type %s\n",
&iova,
- (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+ str_write_read(flags == IOMMU_FAULT_WRITE));
log_iova(iommu, i, iova);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 9bee02db1643..083fa9578bb7 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -169,6 +169,7 @@ config IXP4XX_IRQ
config LAN966X_OIC
tristate "Microchip LAN966x OIC Support"
+ depends on MCHP_LAN966X_PCI || COMPILE_TEST
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
help
@@ -534,8 +535,9 @@ config LS1X_IRQ
Support for the Loongson-1 platform Interrupt Controller.
config TI_SCI_INTR_IRQCHIP
- bool
+ tristate "TI SCI INTR Interrupt Controller"
depends on TI_SCI_PROTOCOL
+ depends on ARCH_K3 || COMPILE_TEST
select IRQ_DOMAIN_HIERARCHY
help
This enables the irqchip driver support for K3 Interrupt router
@@ -544,8 +546,9 @@ config TI_SCI_INTR_IRQCHIP
TI System Controller, say Y here. Otherwise, say N.
config TI_SCI_INTA_IRQCHIP
- bool
+ tristate "TI SCI INTA Interrupt Controller"
depends on TI_SCI_PROTOCOL
+ depends on ARCH_K3 || (COMPILE_TEST && ARM64)
select IRQ_DOMAIN_HIERARCHY
select TI_SCI_INTA_MSI_DOMAIN
help
@@ -587,13 +590,7 @@ config RISCV_IMSIC
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_MATRIX_ALLOCATOR
select GENERIC_MSI_IRQ
-
-config RISCV_IMSIC_PCI
- bool
- depends on RISCV_IMSIC
- depends on PCI
- depends on PCI_MSI
- default RISCV_IMSIC
+ select IRQ_MSI_LIB
config SIFIVE_PLIC
bool
@@ -749,6 +746,18 @@ config MCHP_EIC
help
Support for Microchip External Interrupt Controller.
+config SOPHGO_SG2042_MSI
+ bool "Sophgo SG2042 MSI Controller"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on PCI
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_MSI_LIB
+ select PCI_MSI
+ help
+ Support for the Sophgo SG2042 MSI Controller.
+ This on-chip interrupt controller enables MSI sources to be
+ routed to the primary PLIC controller on SoC.
+
config SUNPLUS_SP7021_INTC
bool "Sunplus SP7021 interrupt controller" if COMPILE_TEST
default SOC_SP7021
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 25e9ad29b8c4..dd60e597491d 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -128,4 +128,5 @@ obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o
obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o
+obj-$(CONFIG_SOPHGO_SG2042_MSI) += irq-sg2042-msi.o
obj-$(CONFIG_SUNPLUS_SP7021_INTC) += irq-sp7021-intc.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index da5250f0155c..2b1684c60e3c 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -577,7 +577,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
}
- if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
+ if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
+ (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
int irq;
if (cpumask_test_cpu(smp_processor_id(),
&aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index c988886917f7..db4c9721fcf2 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -61,32 +61,6 @@ struct brcmstb_l2_intc_data {
u32 saved_mask; /* for suspend/resume */
};
-/**
- * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
- * @d: irq_data
- *
- * Chip has separate enable/disable registers instead of a single mask
- * register and pending interrupt is acknowledged by setting a bit.
- *
- * Note: This function is generic and could easily be added to the
- * generic irqchip implementation if there ever becomes a will to do so.
- * Perhaps with a name like irq_gc_mask_disable_and_ack_set().
- *
- * e.g.: https://patchwork.kernel.org/patch/9831047/
- */
-static void brcmstb_l2_mask_and_ack(struct irq_data *d)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct irq_chip_type *ct = irq_data_get_chip_type(d);
- u32 mask = d->mask;
-
- irq_gc_lock(gc);
- irq_reg_writel(gc, mask, ct->regs.disable);
- *ct->mask_cache &= ~mask;
- irq_reg_writel(gc, mask, ct->regs.ack);
- irq_gc_unlock(gc);
-}
-
static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
{
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
@@ -248,7 +222,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
if (init_params->cpu_clear >= 0) {
ct->regs.ack = init_params->cpu_clear;
ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
} else {
/* No Ack - but still slightly more efficient to define this */
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index f4f8e9fadbbf..d7948c55f542 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -154,24 +153,20 @@ static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-static int __init
-davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
- struct device_node *node)
+static int __init davinci_cp_intc_do_init(struct resource *res, unsigned int num_irqs,
+ struct device_node *node)
{
- unsigned int num_regs = BITS_TO_LONGS(config->num_irqs);
+ unsigned int num_regs = BITS_TO_LONGS(num_irqs);
int offset, irq_base;
void __iomem *req;
- req = request_mem_region(config->reg.start,
- resource_size(&config->reg),
- "davinci-cp-intc");
+ req = request_mem_region(res->start, resource_size(res), "davinci-cp-intc");
if (!req) {
pr_err("%s: register range busy\n", __func__);
return -EBUSY;
}
- davinci_cp_intc_base = ioremap(config->reg.start,
- resource_size(&config->reg));
+ davinci_cp_intc_base = ioremap(res->start, resource_size(res));
if (!davinci_cp_intc_base) {
pr_err("%s: unable to ioremap register range\n", __func__);
return -EINVAL;
@@ -184,8 +179,7 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
/* Disable system interrupts */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(~0,
- DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
+ davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
/* Set to normal mode, no nesting, no priority hold */
davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL);
@@ -193,28 +187,25 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
/* Clear system interrupt status */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(~0,
- DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
+ davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
/* Enable nIRQ (what about nFIQ?) */
davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+ /* 4 channels per register */
+ num_regs = (num_irqs + 3) >> 2;
/* Default all priorities to channel 7. */
- num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(0x07070707,
- DAVINCI_CP_INTC_CHAN_MAP(offset));
+ davinci_cp_intc_write(0x07070707, DAVINCI_CP_INTC_CHAN_MAP(offset));
- irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
if (irq_base < 0) {
- pr_err("%s: unable to allocate interrupt descriptors: %d\n",
- __func__, irq_base);
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n", __func__, irq_base);
return irq_base;
}
- davinci_cp_intc_irq_domain = irq_domain_add_legacy(
- node, config->num_irqs, irq_base, 0,
- &davinci_cp_intc_irq_domain_ops, NULL);
+ davinci_cp_intc_irq_domain = irq_domain_add_legacy(node, num_irqs, irq_base, 0,
+ &davinci_cp_intc_irq_domain_ops, NULL);
if (!davinci_cp_intc_irq_domain) {
pr_err("%s: unable to create an interrupt domain\n", __func__);
@@ -229,31 +220,25 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
return 0;
}
-int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config)
-{
- return davinci_cp_intc_do_init(config, NULL);
-}
-
static int __init davinci_cp_intc_of_init(struct device_node *node,
struct device_node *parent)
{
- struct davinci_cp_intc_config config = { };
+ unsigned int num_irqs;
+ struct resource res;
int ret;
- ret = of_address_to_resource(node, 0, &config.reg);
+ ret = of_address_to_resource(node, 0, &res);
if (ret) {
- pr_err("%s: unable to get the register range from device-tree\n",
- __func__);
+ pr_err("%s: unable to get the register range from device-tree\n", __func__);
return ret;
}
- ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs);
+ ret = of_property_read_u32(node, "ti,intc-size", &num_irqs);
if (ret) {
- pr_err("%s: unable to read the 'ti,intc-size' property\n",
- __func__);
+ pr_err("%s: unable to read the 'ti,intc-size' property\n", __func__);
return ret;
}
- return davinci_cp_intc_do_init(&config, node);
+ return davinci_cp_intc_do_init(&res, num_irqs, node);
}
IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index be35c5349986..1e3476c335ca 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -255,6 +255,7 @@ static void __init gicv2m_teardown(void)
static struct msi_parent_ops gicv2m_msi_parent_ops = {
.supported_flags = GICV2M_MSI_FLAGS_SUPPORTED,
.required_flags = GICV2M_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "GICv2m-",
diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
index e150365fbe89..bdb04c808148 100644
--- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c
+++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
@@ -203,6 +203,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
.supported_flags = ITS_MSI_FLAGS_SUPPORTED,
.required_flags = ITS_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "ITS-",
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 92244cfa0464..f30ed281882f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -205,13 +205,15 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static gfp_t gfp_flags_quirk;
+
static struct page *its_alloc_pages_node(int node, gfp_t gfp,
unsigned int order)
{
struct page *page;
int ret = 0;
- page = alloc_pages_node(node, gfp, order);
+ page = alloc_pages_node(node, gfp | gfp_flags_quirk, order);
if (!page)
return NULL;
@@ -2045,7 +2047,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
if (!is_v4(its_dev->its))
return -EINVAL;
- guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
+ guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
/* Unmap request? */
if (!info)
@@ -4887,6 +4889,17 @@ static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
return true;
}
+static bool __maybe_unused its_enable_rk3568002(void *data)
+{
+ if (!of_machine_is_compatible("rockchip,rk3566") &&
+ !of_machine_is_compatible("rockchip,rk3568"))
+ return false;
+
+ gfp_flags_quirk |= GFP_DMA32;
+
+ return true;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -4954,6 +4967,14 @@ static const struct gic_quirk its_quirks[] = {
.property = "dma-noncoherent",
.init = its_set_non_coherent,
},
+#ifdef CONFIG_ROCKCHIP_ERRATUM_3568002
+ {
+ .desc = "ITS: Rockchip erratum RK3568002",
+ .iidr = 0x0201743b,
+ .mask = 0xffffffff,
+ .init = its_enable_rk3568002,
+ },
+#endif
{
}
};
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index 3fe870f8ee17..3e1d8a1cda5e 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -201,6 +201,7 @@ static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
.supported_flags = MBI_MSI_FLAGS_SUPPORTED,
.required_flags = MBI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "MBI-",
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 79d8cc80693c..270d7a4d85a6 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -44,6 +44,7 @@ static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
+#define FLAGS_WORKAROUND_INSECURE (1ULL << 3)
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
@@ -83,6 +84,8 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
+static bool nmi_support_forbidden;
+
/*
* There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
* are potentially stolen by the secure side. Some code, especially code dealing
@@ -163,21 +166,27 @@ static void __init gic_prio_init(void)
{
bool ds;
- ds = gic_dist_security_disabled();
- if (!ds) {
- u32 val;
-
- val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
- val |= GICD_CTLR_DS;
- writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
+ cpus_have_group0 = gic_has_group0();
- ds = gic_dist_security_disabled();
- if (ds)
- pr_warn("Broken GIC integration, security disabled");
+ ds = gic_dist_security_disabled();
+ if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) {
+ if (cpus_have_group0) {
+ u32 val;
+
+ val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
+ val |= GICD_CTLR_DS;
+ writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
+
+ ds = gic_dist_security_disabled();
+ if (ds)
+ pr_warn("Broken GIC integration, security disabled\n");
+ } else {
+ pr_warn("Broken GIC integration, pNMI forbidden\n");
+ nmi_support_forbidden = true;
+ }
}
cpus_have_security_disabled = ds;
- cpus_have_group0 = gic_has_group0();
/*
* How priority values are used by the GIC depends on two things:
@@ -209,7 +218,7 @@ static void __init gic_prio_init(void)
* be in the non-secure range, we program the non-secure values into
* the distributor to match the PMR values we want.
*/
- if (cpus_have_group0 & !cpus_have_security_disabled) {
+ if (cpus_have_group0 && !cpus_have_security_disabled) {
dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
}
@@ -1522,7 +1531,7 @@ static int gic_retrigger(struct irq_data *data)
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
- if (cmd == CPU_PM_EXIT) {
+ if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) {
if (gic_dist_security_disabled())
gic_enable_redist(true);
gic_cpu_sys_reg_enable();
@@ -1922,6 +1931,18 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
return true;
}
+static bool gic_enable_quirk_rk3399(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ if (of_machine_is_compatible("rockchip,rk3399")) {
+ d->flags |= FLAGS_WORKAROUND_INSECURE;
+ return true;
+ }
+
+ return false;
+}
+
static bool rd_set_non_coherent(void *data)
{
struct gic_chip_data *d = data;
@@ -1997,6 +2018,12 @@ static const struct gic_quirk gic_quirks[] = {
.init = rd_set_non_coherent,
},
{
+ .desc = "GICv3: Insecure RK3399 integration",
+ .iidr = 0x0000043b,
+ .mask = 0xff000fff,
+ .init = gic_enable_quirk_rk3399,
+ },
+ {
}
};
@@ -2004,7 +2031,7 @@ static void gic_enable_nmi_support(void)
{
int i;
- if (!gic_prio_masking_enabled())
+ if (!gic_prio_masking_enabled() || nmi_support_forbidden)
return;
rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index b0e9788c0045..afbfcce3b1e3 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -24,7 +24,7 @@
#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
-#define CHAN_MAX_OUTPUT_INT 0x8
+#define CHAN_MAX_OUTPUT_INT 0xF
struct irqsteer_data {
void __iomem *regs;
@@ -228,10 +228,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
for (i = 0; i < data->irq_count; i++) {
data->irq[i] = irq_of_parse_and_map(np, i);
- if (!data->irq[i]) {
- ret = -EINVAL;
- goto out;
- }
+ if (!data->irq[i])
+ break;
irq_set_chained_handler_and_data(data->irq[i],
imx_irqsteer_irq_handler,
@@ -254,9 +252,13 @@ static void imx_irqsteer_remove(struct platform_device *pdev)
struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < irqsteer_data->irq_count; i++)
+ for (i = 0; i < irqsteer_data->irq_count; i++) {
+ if (!irqsteer_data->irq[i])
+ break;
+
irq_set_chained_handler_and_data(irqsteer_data->irq[i],
NULL, NULL);
+ }
irq_domain_remove(irqsteer_data->domain);
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index 4342a21de1eb..69aacdfc8bef 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -214,6 +214,7 @@ static void imx_mu_msi_irq_handler(struct irq_desc *desc)
static const struct msi_parent_ops imx_mu_msi_parent_ops = {
.supported_flags = IMX_MU_MSI_FLAGS_SUPPORTED,
.required_flags = IMX_MU_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "MU-MSI-",
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index b9dcc8e78c75..1f613eb7b7f0 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -38,7 +38,7 @@ static struct irq_chip jcore_aic;
static void handle_jcore_irq(struct irq_desc *desc)
{
if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
- handle_percpu_irq(desc);
+ handle_percpu_devid_irq(desc);
else
handle_simple_irq(desc);
}
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 808c781e2548..37e1a03fcbb4 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -141,18 +141,11 @@ static int keystone_irq_probe(struct platform_device *pdev)
if (!kirq)
return -ENOMEM;
- kirq->devctrl_regs =
- syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ kirq->devctrl_regs = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev",
+ 1, &kirq->devctrl_offset);
if (IS_ERR(kirq->devctrl_regs))
return PTR_ERR(kirq->devctrl_regs);
- ret = of_property_read_u32_index(np, "ti,syscon-dev", 1,
- &kirq->devctrl_offset);
- if (ret) {
- dev_err(dev, "couldn't read the devctrl_offset offset!\n");
- return ret;
- }
-
kirq->irq = platform_get_irq(pdev, 0);
if (kirq->irq < 0)
return kirq->irq;
diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c
index 0f6e465dd309..80e55955a29f 100644
--- a/drivers/irqchip/irq-loongarch-avec.c
+++ b/drivers/irqchip/irq-loongarch-avec.c
@@ -56,6 +56,15 @@ struct avecintc_data {
unsigned int moving;
};
+static inline void avecintc_enable(void)
+{
+ u64 value;
+
+ value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+ value |= IOCSR_MISC_FUNC_AVEC_EN;
+ iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
+}
+
static inline void avecintc_ack_irq(struct irq_data *d)
{
}
@@ -127,6 +136,8 @@ static int avecintc_cpu_online(unsigned int cpu)
guard(raw_spinlock)(&loongarch_avec.lock);
+ avecintc_enable();
+
irq_matrix_online(loongarch_avec.vector_matrix);
pending_list_init(cpu);
@@ -339,7 +350,6 @@ static int __init irq_matrix_init(void)
static int __init avecintc_init(struct irq_domain *parent)
{
int ret, parent_irq;
- unsigned long value;
raw_spin_lock_init(&loongarch_avec.lock);
@@ -378,9 +388,7 @@ static int __init avecintc_init(struct irq_domain *parent)
"irqchip/loongarch/avecintc:starting",
avecintc_cpu_online, avecintc_cpu_offline);
#endif
- value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
- value |= IOCSR_MISC_FUNC_AVEC_EN;
- iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
+ avecintc_enable();
return ret;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index bd337ecddb40..9c62108b3ad5 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -146,6 +146,7 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = {
static struct msi_parent_ops pch_msi_parent_ops = {
.required_flags = PCH_MSI_FLAGS_REQUIRED,
.supported_flags = PCH_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PCI_MSI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.prefix = "PCH-",
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index d8e29fc0d406..51464c6257f3 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -28,6 +28,7 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct msi_domain_info *info)
{
const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
+ struct irq_chip *chip = info->chip;
u32 required_flags;
/* Parent ops available? */
@@ -92,10 +93,10 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
info->flags |= required_flags;
/* Chip updates for all child bus types */
- if (!info->chip->irq_eoi)
- info->chip->irq_eoi = irq_chip_eoi_parent;
- if (!info->chip->irq_ack)
- info->chip->irq_ack = irq_chip_ack_parent;
+ if (!chip->irq_eoi && (pops->chip_flags & MSI_CHIP_FLAG_SET_EOI))
+ chip->irq_eoi = irq_chip_eoi_parent;
+ if (!chip->irq_ack && (pops->chip_flags & MSI_CHIP_FLAG_SET_ACK))
+ chip->irq_ack = irq_chip_ack_parent;
/*
* The device MSI domain can never have a set affinity callback. It
@@ -105,7 +106,7 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* device MSI domain aside of mask/unmask which is provided e.g. by
* PCI/MSI device domains.
*/
- info->chip->irq_set_affinity = msi_domain_set_affinity;
+ chip->irq_set_affinity = msi_domain_set_affinity;
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index 2b6183919ea4..d67f93f6d750 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -161,6 +161,7 @@ static const struct irq_domain_ops gicp_domain_ops = {
static const struct msi_parent_ops gicp_msi_parent_ops = {
.supported_flags = GICP_MSI_FLAGS_SUPPORTED,
.required_flags = GICP_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "GICP-",
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index b337f6c05f18..4eebed39880a 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -68,7 +68,8 @@ static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
- struct mvebu_icu_msi_data *msi_data = d->host_data;
+ struct msi_domain_info *info = d->host_data;
+ struct mvebu_icu_msi_data *msi_data = info->chip_data;
struct mvebu_icu *icu = msi_data->icu;
/* Check the count of the parameters in dt */
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
index ff19bfd258dc..28f7e81df94f 100644
--- a/drivers/irqchip/irq-mvebu-odmi.c
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -157,6 +157,7 @@ static const struct irq_domain_ops odmi_domain_ops = {
static const struct msi_parent_ops odmi_msi_parent_ops = {
.supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
.required_flags = ODMI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "ODMI-",
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index 065166ab5dbc..ebd4a9014e8d 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -356,6 +356,7 @@ static void mvebu_sei_reset(struct mvebu_sei *sei)
static const struct msi_parent_ops sei_msi_parent_ops = {
.supported_flags = SEI_MSI_FLAGS_SUPPORTED,
.required_flags = SEI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PLATFORM_MSI,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.prefix = "SEI-",
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
index 8e76d2913e6b..4441ffe149ea 100644
--- a/drivers/irqchip/irq-partition-percpu.c
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -98,7 +98,7 @@ static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
- seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+ seq_printf(p, "%5s-%lu", chip->name, data->hwirq);
}
static struct irq_chip partition_irq_chip = {
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 99e27e01b0b1..6a2e41f02446 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -541,43 +541,36 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
return -ENODEV;
parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- dev_err(&pdev->dev, "cannot find parent domain\n");
- return -ENODEV;
- }
+ if (!parent_domain)
+ return dev_err_probe(dev, -ENODEV, "cannot find parent domain\n");
- rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
+ rzg2l_irqc_data = devm_kzalloc(dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
if (!rzg2l_irqc_data)
return -ENOMEM;
rzg2l_irqc_data->irqchip = irq_chip;
- rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+ rzg2l_irqc_data->base = devm_of_iomap(dev, dev->of_node, 0, NULL);
if (IS_ERR(rzg2l_irqc_data->base))
return PTR_ERR(rzg2l_irqc_data->base);
ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
- if (ret) {
- dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- return ret;
- }
-
- resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(resetn))
- return PTR_ERR(resetn);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot parse interrupts: %d\n", ret);
- ret = reset_control_deassert(resetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- return ret;
+ resetn = devm_reset_control_get_exclusive_deasserted(dev, NULL);
+ if (IS_ERR(resetn)) {
+ return dev_err_probe(dev, PTR_ERR(resetn),
+ "failed to acquire deasserted reset: %d\n", ret);
}
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
- goto pm_disable;
- }
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "devm_pm_runtime_enable failed: %d\n", ret);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "pm_runtime_resume_and_get failed: %d\n", ret);
raw_spin_lock_init(&rzg2l_irqc_data->lock);
@@ -585,9 +578,8 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
node, &rzg2l_irqc_domain_ops,
rzg2l_irqc_data);
if (!irq_domain) {
- dev_err(&pdev->dev, "failed to add irq domain\n");
- ret = -ENOMEM;
- goto pm_put;
+ pm_runtime_put(dev);
+ return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n");
}
register_syscore_ops(&rzg2l_irqc_syscore_ops);
@@ -604,13 +596,6 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
dev = NULL;
return 0;
-
-pm_put:
- pm_runtime_put(&pdev->dev);
-pm_disable:
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(resetn);
- return ret;
}
static int __init rzg2l_irqc_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index fe2d29e91026..3d5b5fdf9bde 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -64,11 +64,18 @@
#define ICU_TINT_LEVEL_HIGH 2
#define ICU_TINT_LEVEL_LOW 3
-#define ICU_TSSR_K(tint_nr) ((tint_nr) / 4)
-#define ICU_TSSR_TSSEL_N(tint_nr) ((tint_nr) % 4)
-#define ICU_TSSR_TSSEL_PREP(tssel, n) ((tssel) << ((n) * 8))
-#define ICU_TSSR_TSSEL_MASK(n) ICU_TSSR_TSSEL_PREP(0x7F, n)
-#define ICU_TSSR_TIEN(n) (BIT(7) << ((n) * 8))
+#define ICU_TSSR_TSSEL_PREP(tssel, n, field_width) ((tssel) << ((n) * (field_width)))
+#define ICU_TSSR_TSSEL_MASK(n, field_width) \
+({\
+ typeof(field_width) (_field_width) = (field_width); \
+ ICU_TSSR_TSSEL_PREP((GENMASK(((_field_width) - 2), 0)), (n), _field_width); \
+})
+
+#define ICU_TSSR_TIEN(n, field_width) \
+({\
+ typeof(field_width) (_field_width) = (field_width); \
+ BIT((_field_width) - 1) << ((n) * (_field_width)); \
+})
#define ICU_TITSR_K(tint_nr) ((tint_nr) / 16)
#define ICU_TITSR_TITSEL_N(tint_nr) ((tint_nr) % 16)
@@ -78,20 +85,36 @@
#define ICU_TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
#define ICU_TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
-#define ICU_PB5_TINT 0x55
+#define ICU_RZG3E_TINT_OFFSET 0x800
+#define ICU_RZG3E_TSSEL_MAX_VAL 0x8c
+#define ICU_RZV2H_TSSEL_MAX_VAL 0x55
+
+/**
+ * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure.
+ * @tssel_lut: TINT lookup table
+ * @t_offs: TINT offset
+ * @max_tssel: TSSEL max value
+ * @field_width: TSSR field width
+ */
+struct rzv2h_hw_info {
+ const u8 *tssel_lut;
+ u16 t_offs;
+ u8 max_tssel;
+ u8 field_width;
+};
/**
* struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
* @base: Controller's base address
- * @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
+ * @info: Pointer to struct rzv2h_hw_info
*/
struct rzv2h_icu_priv {
void __iomem *base;
- const struct irq_chip *irqchip;
struct irq_fwspec fwspec[ICU_NUM_IRQ];
raw_spinlock_t lock;
+ const struct rzv2h_hw_info *info;
};
static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
@@ -111,7 +134,7 @@ static void rzv2h_icu_eoi(struct irq_data *d)
tintirq_nr = hw_irq - ICU_TINT_START;
bit = BIT(tintirq_nr);
if (!irqd_is_level_type(d))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
} else if (hw_irq >= ICU_IRQ_START) {
tintirq_nr = hw_irq - ICU_IRQ_START;
bit = BIT(tintirq_nr);
@@ -130,21 +153,23 @@ static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
unsigned int hw_irq = irqd_to_hwirq(d);
u32 tint_nr, tssel_n, k, tssr;
+ u8 nr_tint;
if (hw_irq < ICU_TINT_START)
return;
tint_nr = hw_irq - ICU_TINT_START;
- k = ICU_TSSR_K(tint_nr);
- tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+ nr_tint = 32 / priv->info->field_width;
+ k = tint_nr / nr_tint;
+ tssel_n = tint_nr % nr_tint;
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(k));
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(k));
if (enable)
- tssr |= ICU_TSSR_TIEN(tssel_n);
+ tssr |= ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
else
- tssr &= ~ICU_TSSR_TIEN(tssel_n);
- writel_relaxed(tssr, priv->base + ICU_TSSR(k));
+ tssr &= ~ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k));
}
static void rzv2h_icu_irq_disable(struct irq_data *d)
@@ -247,8 +272,8 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
u32 bit = BIT(tint_nr);
int k = tint_nr / 16;
- tsctr = readl_relaxed(priv->base + ICU_TSCTR);
- titsr = readl_relaxed(priv->base + ICU_TITSR(k));
+ tsctr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSCTR);
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(k));
titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n);
/*
@@ -257,7 +282,7 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
*/
if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) ||
(titsel == ICU_TINT_EDGE_FALLING)))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
}
static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
@@ -268,6 +293,7 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
unsigned int hwirq;
u32 tint, sense;
int tint_nr;
+ u8 nr_tint;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_LOW:
@@ -290,39 +316,42 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
+ priv = irq_data_to_priv(d);
tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
- if (tint > ICU_PB5_TINT)
+ if (tint > priv->info->max_tssel)
return -EINVAL;
- priv = irq_data_to_priv(d);
- hwirq = irqd_to_hwirq(d);
+ if (priv->info->tssel_lut)
+ tint = priv->info->tssel_lut[tint];
+ hwirq = irqd_to_hwirq(d);
tint_nr = hwirq - ICU_TINT_START;
- tssr_k = ICU_TSSR_K(tint_nr);
- tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+ nr_tint = 32 / priv->info->field_width;
+ tssr_k = tint_nr / nr_tint;
+ tssel_n = tint_nr % nr_tint;
+ tien = ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
titsr_k = ICU_TITSR_K(tint_nr);
titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
- tien = ICU_TSSR_TIEN(titsel_n);
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(tssr_k));
- tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n) | tien);
- tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n);
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
+ tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n, priv->info->field_width) | tien);
+ tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n, priv->info->field_width);
- writel_relaxed(tssr, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
- titsr = readl_relaxed(priv->base + ICU_TITSR(titsr_k));
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
- writel_relaxed(titsr, priv->base + ICU_TITSR(titsr_k));
+ writel_relaxed(titsr, priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
rzv2h_clear_tint_int(priv, hwirq);
- writel_relaxed(tssr | tien, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr | tien, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
return 0;
}
@@ -390,7 +419,7 @@ static int rzv2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigne
if (hwirq > (ICU_NUM_IRQ - 1))
return -EINVAL;
- ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &rzv2h_icu_chip,
(void *)(uintptr_t)tint);
if (ret)
return ret;
@@ -421,7 +450,13 @@ static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device
return 0;
}
-static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+static void rzv2h_icu_put_device(void *data)
+{
+ put_device(data);
+}
+
+static int rzv2h_icu_init_common(struct device_node *node, struct device_node *parent,
+ const struct rzv2h_hw_info *hw_info)
{
struct irq_domain *irq_domain, *parent_domain;
struct rzv2h_icu_priv *rzv2h_icu_data;
@@ -433,50 +468,48 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
if (!pdev)
return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2h_icu_put_device,
+ &pdev->dev);
+ if (ret < 0)
+ return ret;
+
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
- ret = -ENODEV;
- goto put_dev;
+ return -ENODEV;
}
rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL);
- if (!rzv2h_icu_data) {
- ret = -ENOMEM;
- goto put_dev;
- }
-
- rzv2h_icu_data->irqchip = &rzv2h_icu_chip;
+ if (!rzv2h_icu_data)
+ return -ENOMEM;
rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
- if (IS_ERR(rzv2h_icu_data->base)) {
- ret = PTR_ERR(rzv2h_icu_data->base);
- goto put_dev;
- }
+ if (IS_ERR(rzv2h_icu_data->base))
+ return PTR_ERR(rzv2h_icu_data->base);
ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- goto put_dev;
+ return ret;
}
- resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ resetn = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
if (IS_ERR(resetn)) {
ret = PTR_ERR(resetn);
- goto put_dev;
+ dev_err(&pdev->dev, "failed to acquire deasserted reset: %d\n", ret);
+ return ret;
}
- ret = reset_control_deassert(resetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- goto put_dev;
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "devm_pm_runtime_enable failed, %d\n", ret);
+ return ret;
}
- pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
- goto pm_disable;
+ return ret;
}
raw_spin_lock_init(&rzv2h_icu_data->lock);
@@ -489,6 +522,8 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
goto pm_put;
}
+ rzv2h_icu_data->info = hw_info;
+
/*
* coccicheck complains about a missing put_device call before returning, but it's a false
* positive. We still need &pdev->dev after successfully returning from this function.
@@ -497,16 +532,61 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
pm_put:
pm_runtime_put(&pdev->dev);
-pm_disable:
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(resetn);
-put_dev:
- put_device(&pdev->dev);
return ret;
}
+/* Mapping based on port index on Table 4.2-6 and TSSEL bits on Table 4.6-4 */
+static const u8 rzg3e_tssel_lut[] = {
+ 81, 82, 83, 84, 85, 86, 87, 88, /* P00-P07 */
+ 89, 90, 91, 92, 93, 94, 95, 96, /* P10-P17 */
+ 111, 112, /* P20-P21 */
+ 97, 98, 99, 100, 101, 102, 103, 104, /* P30-P37 */
+ 105, 106, 107, 108, 109, 110, /* P40-P45 */
+ 113, 114, 115, 116, 117, 118, 119, /* P50-P56 */
+ 120, 121, 122, 123, 124, 125, 126, /* P60-P66 */
+ 127, 128, 129, 130, 131, 132, 133, 134, /* P70-P77 */
+ 135, 136, 137, 138, 139, 140, /* P80-P85 */
+ 43, 44, 45, 46, 47, 48, 49, 50, /* PA0-PA7 */
+ 51, 52, 53, 54, 55, 56, 57, 58, /* PB0-PB7 */
+ 59, 60, 61, /* PC0-PC2 */
+ 62, 63, 64, 65, 66, 67, 68, 69, /* PD0-PD7 */
+ 70, 71, 72, 73, 74, 75, 76, 77, /* PE0-PE7 */
+ 78, 79, 80, /* PF0-PF2 */
+ 25, 26, 27, 28, 29, 30, 31, 32, /* PG0-PG7 */
+ 33, 34, 35, 36, 37, 38, /* PH0-PH5 */
+ 4, 5, 6, 7, 8, /* PJ0-PJ4 */
+ 39, 40, 41, 42, /* PK0-PK3 */
+ 9, 10, 11, 12, 21, 22, 23, 24, /* PL0-PL7 */
+ 13, 14, 15, 16, 17, 18, 19, 20, /* PM0-PM7 */
+ 0, 1, 2, 3 /* PS0-PS3 */
+};
+
+static const struct rzv2h_hw_info rzg3e_hw_params = {
+ .tssel_lut = rzg3e_tssel_lut,
+ .t_offs = ICU_RZG3E_TINT_OFFSET,
+ .max_tssel = ICU_RZG3E_TSSEL_MAX_VAL,
+ .field_width = 16,
+};
+
+static const struct rzv2h_hw_info rzv2h_hw_params = {
+ .t_offs = 0,
+ .max_tssel = ICU_RZV2H_TSSEL_MAX_VAL,
+ .field_width = 8,
+};
+
+static int rzg3e_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzg3e_hw_params);
+}
+
+static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzv2h_hw_params);
+}
+
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
+IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_init)
IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
index 7cd6b646774b..205ad61d15e4 100644
--- a/drivers/irqchip/irq-riscv-aplic-direct.c
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -31,7 +31,7 @@ struct aplic_direct {
};
struct aplic_idc {
- unsigned int hart_index;
+ u32 hart_index;
void __iomem *regs;
struct aplic_direct *direct;
};
@@ -219,6 +219,20 @@ static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
return 0;
}
+static int aplic_direct_get_hart_index(struct device *dev, u32 logical_index,
+ u32 *hart_index)
+{
+ const char *prop_hart_index = "riscv,hart-indexes";
+ struct device_node *np = to_of_node(dev->fwnode);
+
+ if (!np || !of_property_present(np, prop_hart_index)) {
+ *hart_index = logical_index;
+ return 0;
+ }
+
+ return of_property_read_u32_index(np, prop_hart_index, logical_index, hart_index);
+}
+
int aplic_direct_setup(struct device *dev, void __iomem *regs)
{
int i, j, rc, cpu, current_cpu, setup_count = 0;
@@ -265,8 +279,12 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
cpumask_set_cpu(cpu, &direct->lmask);
idc = per_cpu_ptr(&aplic_idcs, cpu);
- idc->hart_index = i;
- idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
+ rc = aplic_direct_get_hart_index(dev, i, &idc->hart_index);
+ if (rc) {
+ dev_warn(dev, "hart index not found for IDC%d\n", i);
+ continue;
+ }
+ idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE;
idc->direct = direct;
aplic_idc_set_delivery(idc, true);
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index c5c2e6929a2f..d9ae87808651 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -27,7 +27,7 @@ static void imsic_ipi_send(unsigned int cpu)
{
struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
- writel_relaxed(IMSIC_IPI_ID, local->msi_va);
+ writel(IMSIC_IPI_ID, local->msi_va);
}
static void imsic_ipi_starting_cpu(void)
@@ -73,10 +73,16 @@ static int __init imsic_ipi_domain_init(void) { return 0; }
static void imsic_handle_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- int err, cpu = smp_processor_id();
+ int cpu = smp_processor_id();
struct imsic_vector *vec;
unsigned long local_id;
+ /*
+ * Process pending local synchronization instead of waiting
+ * for per-CPU local timer to expire.
+ */
+ imsic_local_sync_all(false);
+
chained_irq_enter(chip, desc);
while ((local_id = csr_swap(CSR_TOPEI, 0))) {
@@ -97,9 +103,7 @@ static void imsic_handle_irq(struct irq_desc *desc)
continue;
}
- err = generic_handle_domain_irq(imsic->base_domain, vec->hwirq);
- if (unlikely(err))
- pr_warn_ratelimited("hwirq 0x%x mapping not found\n", vec->hwirq);
+ generic_handle_irq(vec->irq);
}
chained_irq_exit(chip, desc);
@@ -120,7 +124,7 @@ static int imsic_starting_cpu(unsigned int cpu)
* Interrupts identities might have been enabled/disabled while
* this CPU was not running so sync-up local enable/disable state.
*/
- imsic_local_sync_all();
+ imsic_local_sync_all(true);
/* Enable local interrupt delivery */
imsic_local_delivery(true);
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index c708780e8760..b8ae67c25b37 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
+#include "irq-msi-lib.h"
#include "irq-riscv-imsic-state.h"
static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
@@ -63,6 +64,11 @@ static int imsic_irq_retrigger(struct irq_data *d)
return 0;
}
+static void imsic_irq_ack(struct irq_data *d)
+{
+ irq_move_irq(d);
+}
+
static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
{
phys_addr_t msi_addr;
@@ -96,9 +102,23 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
bool force)
{
struct imsic_vector *old_vec, *new_vec;
- struct irq_data *pd = d->parent_data;
+ struct imsic_vector tmp_vec;
+
+ /*
+ * Requirements for the downstream irqdomains (or devices):
+ *
+ * 1) Downstream irqdomains (or devices) with atomic MSI update can
+ * happily do imsic_irq_set_affinity() in the process-context on
+ * any CPU so the irqchip of such irqdomains must not set the
+ * IRQCHIP_MOVE_DEFERRED flag.
+ *
+ * 2) Downstream irqdomains (or devices) with non-atomic MSI update
+ * must use imsic_irq_set_affinity() in nterrupt-context upon
+ * the next device interrupt so the irqchip of such irqdomains
+ * must set the IRQCHIP_MOVE_DEFERRED flag.
+ */
- old_vec = irq_data_get_irq_chip_data(pd);
+ old_vec = irq_data_get_irq_chip_data(d);
if (WARN_ON(!old_vec))
return -ENOENT;
@@ -111,34 +131,95 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
return -EBUSY;
/* Get a new vector on the desired set of CPUs */
- new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
+ new_vec = imsic_vector_alloc(old_vec->irq, mask_val);
if (!new_vec)
return -ENOSPC;
+ /*
+ * Device having non-atomic MSI update might see an intermediate
+ * state when changing target IMSIC vector from one CPU to another.
+ *
+ * To avoid losing interrupt to such intermediate state, do the
+ * following (just like x86 APIC):
+ *
+ * 1) First write a temporary IMSIC vector to the device which
+ * has MSI address same as the old IMSIC vector but MSI data
+ * matches the new IMSIC vector.
+ *
+ * 2) Next write the new IMSIC vector to the device.
+ *
+ * Based on the above, __imsic_local_sync() must check pending
+ * status of both old MSI data and new MSI data on the old CPU.
+ */
+ if (!irq_can_move_in_process_context(d) &&
+ new_vec->local_id != old_vec->local_id) {
+ /* Setup temporary vector */
+ tmp_vec.cpu = old_vec->cpu;
+ tmp_vec.local_id = new_vec->local_id;
+
+ /* Point device to the temporary vector */
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), &tmp_vec);
+ }
+
/* Point device to the new vector */
- imsic_msi_update_msg(d, new_vec);
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
/* Update irq descriptors with the new vector */
- pd->chip_data = new_vec;
+ d->chip_data = new_vec;
- /* Update effective affinity of parent irq data */
- irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
+ /* Update effective affinity */
+ irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu));
/* Move state of the old vector to the new vector */
imsic_vector_move(old_vec, new_vec);
return IRQ_SET_MASK_OK_DONE;
}
+
+static void imsic_irq_force_complete_move(struct irq_data *d)
+{
+ struct imsic_vector *mvec, *vec = irq_data_get_irq_chip_data(d);
+ unsigned int cpu = smp_processor_id();
+
+ if (WARN_ON(!vec))
+ return;
+
+ /* Do nothing if there is no in-flight move */
+ mvec = imsic_vector_get_move(vec);
+ if (!mvec)
+ return;
+
+ /* Do nothing if the old IMSIC vector does not belong to current CPU */
+ if (mvec->cpu != cpu)
+ return;
+
+ /*
+ * The best we can do is force cleanup the old IMSIC vector.
+ *
+ * The challenges over here are same as x86 vector domain so
+ * refer to the comments in irq_force_complete_move() function
+ * implemented at arch/x86/kernel/apic/vector.c.
+ */
+
+ /* Force cleanup in-flight move */
+ pr_info("IRQ fixup: irq %d move in progress, old vector cpu %d local_id %d\n",
+ d->irq, mvec->cpu, mvec->local_id);
+ imsic_vector_force_move_cleanup(vec);
+}
#endif
static struct irq_chip imsic_irq_base_chip = {
- .name = "IMSIC",
- .irq_mask = imsic_irq_mask,
- .irq_unmask = imsic_irq_unmask,
- .irq_retrigger = imsic_irq_retrigger,
- .irq_compose_msi_msg = imsic_irq_compose_msg,
- .flags = IRQCHIP_SKIP_SET_WAKE |
- IRQCHIP_MASK_ON_SUSPEND,
+ .name = "IMSIC",
+ .irq_mask = imsic_irq_mask,
+ .irq_unmask = imsic_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = imsic_irq_set_affinity,
+ .irq_force_complete_move = imsic_irq_force_complete_move,
+#endif
+ .irq_retrigger = imsic_irq_retrigger,
+ .irq_ack = imsic_irq_ack,
+ .irq_compose_msi_msg = imsic_irq_compose_msg,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
};
static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -155,7 +236,7 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return -ENOSPC;
irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
- handle_simple_irq, NULL, NULL);
+ handle_edge_irq, NULL, NULL);
irq_set_noprobe(virq);
irq_set_affinity(virq, cpu_online_mask);
irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
@@ -172,22 +253,6 @@ static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token)
-{
- const struct msi_parent_ops *ops = domain->msi_parent_ops;
- u32 busmask = BIT(bus_token);
-
- if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
- return 0;
-
- /* Handle pure domain searches */
- if (bus_token == ops->bus_select_token)
- return 1;
-
- return !!(ops->bus_select_mask & busmask);
-}
-
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
struct irq_data *irqd, int ind)
@@ -204,107 +269,37 @@ static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
static const struct irq_domain_ops imsic_base_domain_ops = {
.alloc = imsic_irq_domain_alloc,
.free = imsic_irq_domain_free,
- .select = imsic_irq_domain_select,
+ .select = msi_lib_irq_domain_select,
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
.debug_show = imsic_irq_debug_show,
#endif
};
-#ifdef CONFIG_RISCV_IMSIC_PCI
-
-static void imsic_pci_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void imsic_pci_unmask_irq(struct irq_data *d)
+static bool imsic_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
{
- irq_chip_unmask_parent(d);
- pci_msi_unmask_irq(d);
-}
-
-#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
-
-#else
-
-#define MATCH_PCI_MSI 0
-
-#endif
-
-static bool imsic_init_dev_msi_info(struct device *dev,
- struct irq_domain *domain,
- struct irq_domain *real_parent,
- struct msi_domain_info *info)
-{
- const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
-
- /* MSI parent domain specific settings */
- switch (real_parent->bus_token) {
- case DOMAIN_BUS_NEXUS:
- if (WARN_ON_ONCE(domain != real_parent))
- return false;
-#ifdef CONFIG_SMP
- info->chip->irq_set_affinity = imsic_irq_set_affinity;
-#endif
- break;
- default:
- WARN_ON_ONCE(1);
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
- }
- /* Is the target supported? */
switch (info->bus_token) {
-#ifdef CONFIG_RISCV_IMSIC_PCI
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
- info->chip->irq_mask = imsic_pci_mask_irq;
- info->chip->irq_unmask = imsic_pci_unmask_irq;
- break;
-#endif
- case DOMAIN_BUS_DEVICE_MSI:
- /*
- * Per-device MSI should never have any MSI feature bits
- * set. It's sole purpose is to create a dumb interrupt
- * chip which has a device specific irq_write_msi_msg()
- * callback.
- */
- if (WARN_ON_ONCE(info->flags))
- return false;
-
- /* Core managed MSI descriptors */
- info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
- MSI_FLAG_FREE_MSI_DESCS;
- break;
- case DOMAIN_BUS_WIRED_TO_MSI:
+ info->chip->flags |= IRQCHIP_MOVE_DEFERRED;
break;
default:
- WARN_ON_ONCE(1);
- return false;
+ break;
}
- /* Use hierarchial chip operations re-trigger */
- info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
-
- /*
- * Mask out the domain specific MSI feature flags which are not
- * supported by the real parent.
- */
- info->flags &= pops->supported_flags;
-
- /* Enforce the required flags */
- info->flags |= pops->required_flags;
-
return true;
}
-#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
-
static const struct msi_parent_ops imsic_msi_parent_ops = {
.supported_flags = MSI_GENERIC_FLAGS_MASK |
MSI_FLAG_PCI_MSIX,
.required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
- MSI_FLAG_USE_DEF_CHIP_OPS,
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.init_dev_msi_info = imsic_init_dev_msi_info,
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index b97e6cd89ed7..bdf5cd2037f2 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -124,10 +124,11 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend,
}
}
-static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
{
- struct imsic_local_config *mlocal;
- struct imsic_vector *vec, *mvec;
+ struct imsic_local_config *tlocal, *mlocal;
+ struct imsic_vector *vec, *tvec, *mvec;
+ bool ret = true;
int i;
lockdep_assert_held(&lpriv->lock);
@@ -143,35 +144,97 @@ static void __imsic_local_sync(struct imsic_local_priv *lpriv)
__imsic_id_clear_enable(i);
/*
- * If the ID was being moved to a new ID on some other CPU
- * then we can get a MSI during the movement so check the
- * ID pending bit and re-trigger the new ID on other CPU
- * using MMIO write.
+ * Clear the previous vector pointer of the new vector only
+ * after the movement is complete on the old CPU.
*/
- mvec = READ_ONCE(vec->move);
- WRITE_ONCE(vec->move, NULL);
- if (mvec && mvec != vec) {
- if (__imsic_id_read_clear_pending(i)) {
+ mvec = READ_ONCE(vec->move_prev);
+ if (mvec) {
+ /*
+ * If the old vector has not been updated then
+ * try again in the next sync-up call.
+ */
+ if (READ_ONCE(mvec->move_next)) {
+ ret = false;
+ continue;
+ }
+
+ WRITE_ONCE(vec->move_prev, NULL);
+ }
+
+ /*
+ * If a vector was being moved to a new vector on some other
+ * CPU then we can get a MSI during the movement so check the
+ * ID pending bit and re-trigger the new ID on other CPU using
+ * MMIO write.
+ */
+ mvec = READ_ONCE(vec->move_next);
+ if (mvec) {
+ /*
+ * Devices having non-atomic MSI update might see
+ * an intermediate state so check both old ID and
+ * new ID for pending interrupts.
+ *
+ * For details, see imsic_irq_set_affinity().
+ */
+ tvec = vec->local_id == mvec->local_id ?
+ NULL : &lpriv->vectors[mvec->local_id];
+
+ if (tvec && !irq_can_move_in_process_context(irq_get_irq_data(vec->irq)) &&
+ __imsic_id_read_clear_pending(tvec->local_id)) {
+ /* Retrigger temporary vector if it was already in-use */
+ if (READ_ONCE(tvec->enable)) {
+ tlocal = per_cpu_ptr(imsic->global.local, tvec->cpu);
+ writel_relaxed(tvec->local_id, tlocal->msi_va);
+ }
+
+ mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
+ writel_relaxed(mvec->local_id, mlocal->msi_va);
+ }
+
+ if (__imsic_id_read_clear_pending(vec->local_id)) {
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
writel_relaxed(mvec->local_id, mlocal->msi_va);
}
- imsic_vector_free(&lpriv->vectors[i]);
+ WRITE_ONCE(vec->move_next, NULL);
+ imsic_vector_free(vec);
}
skip:
bitmap_clear(lpriv->dirty_bitmap, i, 1);
}
+
+ return ret;
}
-void imsic_local_sync_all(void)
+#ifdef CONFIG_SMP
+static void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+ lockdep_assert_held(&lpriv->lock);
+
+ if (!timer_pending(&lpriv->timer)) {
+ lpriv->timer.expires = jiffies + 1;
+ add_timer_on(&lpriv->timer, smp_processor_id());
+ }
+}
+#else
+static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+}
+#endif
+
+void imsic_local_sync_all(bool force_all)
{
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
unsigned long flags;
raw_spin_lock_irqsave(&lpriv->lock, flags);
- bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
- __imsic_local_sync(lpriv);
+
+ if (force_all)
+ bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+ if (!__imsic_local_sync(lpriv))
+ __imsic_local_timer_start(lpriv);
+
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
}
@@ -190,12 +253,7 @@ void imsic_local_delivery(bool enable)
#ifdef CONFIG_SMP
static void imsic_local_timer_callback(struct timer_list *timer)
{
- struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&lpriv->lock, flags);
- __imsic_local_sync(lpriv);
- raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+ imsic_local_sync_all(false);
}
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
@@ -216,14 +274,11 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
*/
if (cpu_online(cpu)) {
if (cpu == smp_processor_id()) {
- __imsic_local_sync(lpriv);
- return;
+ if (__imsic_local_sync(lpriv))
+ return;
}
- if (!timer_pending(&lpriv->timer)) {
- lpriv->timer.expires = jiffies + 1;
- add_timer_on(&lpriv->timer, cpu);
- }
+ __imsic_local_timer_start(lpriv);
}
}
#else
@@ -278,8 +333,26 @@ void imsic_vector_unmask(struct imsic_vector *vec)
raw_spin_unlock(&lpriv->lock);
}
-static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
- bool new_enable, struct imsic_vector *new_move)
+void imsic_vector_force_move_cleanup(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *mvec;
+ unsigned long flags;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+
+ mvec = READ_ONCE(vec->move_prev);
+ WRITE_ONCE(vec->move_prev, NULL);
+ if (mvec)
+ imsic_vector_free(mvec);
+
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+static bool imsic_vector_move_update(struct imsic_local_priv *lpriv,
+ struct imsic_vector *vec, bool is_old_vec,
+ bool new_enable, struct imsic_vector *move_vec)
{
unsigned long flags;
bool enabled;
@@ -289,7 +362,10 @@ static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsi
/* Update enable and move details */
enabled = READ_ONCE(vec->enable);
WRITE_ONCE(vec->enable, new_enable);
- WRITE_ONCE(vec->move, new_move);
+ if (is_old_vec)
+ WRITE_ONCE(vec->move_next, move_vec);
+ else
+ WRITE_ONCE(vec->move_prev, move_vec);
/* Mark the vector as dirty and synchronize */
bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
@@ -322,8 +398,8 @@ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_ve
* interrupt on the old vector while device was being moved
* to the new vector.
*/
- enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
- imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
+ enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec);
+ imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec);
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
@@ -368,7 +444,7 @@ struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int l
return &lpriv->vectors[local_id];
}
-struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask)
+struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask)
{
struct imsic_vector *vec = NULL;
struct imsic_local_priv *lpriv;
@@ -384,9 +460,10 @@ struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
vec = &lpriv->vectors[local_id];
- vec->hwirq = hwirq;
+ vec->irq = irq;
vec->enable = false;
- vec->move = NULL;
+ vec->move_next = NULL;
+ vec->move_prev = NULL;
return vec;
}
@@ -396,7 +473,7 @@ void imsic_vector_free(struct imsic_vector *vec)
unsigned long flags;
raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
- vec->hwirq = UINT_MAX;
+ vec->irq = 0;
irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
}
@@ -455,7 +532,7 @@ static int __init imsic_local_init(void)
vec = &lpriv->vectors[i];
vec->cpu = cpu;
vec->local_id = i;
- vec->hwirq = UINT_MAX;
+ vec->irq = 0;
}
}
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
index 391e44280827..3202ffa4e849 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.h
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -20,10 +20,11 @@ struct imsic_vector {
unsigned int cpu;
unsigned int local_id;
/* Details saved by driver in the vector */
- unsigned int hwirq;
+ unsigned int irq;
/* Details accessed using local lock held */
bool enable;
- struct imsic_vector *move;
+ struct imsic_vector *move_next;
+ struct imsic_vector *move_prev;
};
struct imsic_local_priv {
@@ -74,7 +75,7 @@ static inline void __imsic_id_clear_enable(unsigned long id)
__imsic_eix_update(id, 1, false, false);
}
-void imsic_local_sync_all(void);
+void imsic_local_sync_all(bool force_all);
void imsic_local_delivery(bool enable);
void imsic_vector_mask(struct imsic_vector *vec);
@@ -87,14 +88,15 @@ static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
{
- return READ_ONCE(vec->move);
+ return READ_ONCE(vec->move_prev);
}
+void imsic_vector_force_move_cleanup(struct imsic_vector *vec);
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id);
-struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask);
+struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask);
void imsic_vector_free(struct imsic_vector *vector);
void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind);
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
new file mode 100644
index 000000000000..ee682e87eb8b
--- /dev/null
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SG2042 MSI Controller
+ *
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Chen Wang <unicorn_wang@outlook.com>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include "irq-msi-lib.h"
+
+#define SG2042_MAX_MSI_VECTOR 32
+
+struct sg2042_msi_chipdata {
+ void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR
+
+ phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET
+
+ u32 irq_first; // The vector number that MSIs starts
+ u32 num_irqs; // The number of vectors for MSIs
+
+ DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR);
+ struct mutex msi_map_lock; // lock for msi_map
+};
+
+static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req)
+{
+ int first;
+
+ guard(mutex)(&data->msi_map_lock);
+ first = bitmap_find_free_region(data->msi_map, data->num_irqs,
+ get_count_order(num_req));
+ return first >= 0 ? first : -ENOSPC;
+}
+
+static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req)
+{
+ guard(mutex)(&data->msi_map_lock);
+ bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req));
+}
+
+static void sg2042_msi_irq_ack(struct irq_data *d)
+{
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ int bit_off = d->hwirq;
+
+ writel(1 << bit_off, data->reg_clr);
+
+ irq_chip_ack_parent(d);
+}
+
+static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ msg->address_hi = upper_32_bits(data->doorbell_addr);
+ msg->address_lo = lower_32_bits(data->doorbell_addr);
+ msg->data = 1 << d->hwirq;
+}
+
+static const struct irq_chip sg2042_msi_middle_irq_chip = {
+ .name = "SG2042 MSI",
+ .irq_ack = sg2042_msi_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg,
+};
+
+static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
+{
+ struct sg2042_msi_chipdata *data = domain->host_data;
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = data->irq_first + hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct sg2042_msi_chipdata *data = domain->host_data;
+ int hwirq, err, i;
+
+ hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs);
+ if (hwirq < 0)
+ return hwirq;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto err_hwirq;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &sg2042_msi_middle_irq_chip, data);
+ }
+
+ return 0;
+
+err_hwirq:
+ sg2042_msi_free_hwirq(data, hwirq, nr_irqs);
+ irq_domain_free_irqs_parent(domain, virq, i);
+
+ return err;
+}
+
+static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops sg2042_msi_middle_domain_ops = {
+ .alloc = sg2042_msi_middle_domain_alloc,
+ .free = sg2042_msi_middle_domain_free,
+ .select = msi_lib_irq_domain_select,
+};
+
+#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+
+#define SG2042_MSI_FLAGS_SUPPORTED MSI_GENERIC_FLAGS_MASK
+
+static const struct msi_parent_ops sg2042_msi_parent_ops = {
+ .required_flags = SG2042_MSI_FLAGS_REQUIRED,
+ .supported_flags = SG2042_MSI_FLAGS_SUPPORTED,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .prefix = "SG2042-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
+ struct irq_domain *plic_domain, struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct irq_domain *middle_domain;
+
+ middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode,
+ &sg2042_msi_middle_domain_ops, data);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSI middle domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
+
+ middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ middle_domain->msi_parent_ops = &sg2042_msi_parent_ops;
+
+ return 0;
+}
+
+static int sg2042_msi_probe(struct platform_device *pdev)
+{
+ struct fwnode_reference_args args = { };
+ struct sg2042_msi_chipdata *data;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *plic_domain;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr");
+ if (IS_ERR(data->reg_clr)) {
+ dev_err(dev, "Failed to map clear register\n");
+ return PTR_ERR(data->reg_clr);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "doorbell");
+ if (!res) {
+ dev_err(dev, "Failed get resource from set\n");
+ return -EINVAL;
+ }
+ data->doorbell_addr = res->start;
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges",
+ "#interrupt-cells", 0, 0, &args);
+ if (ret) {
+ dev_err(dev, "Unable to parse MSI vec base\n");
+ return ret;
+ }
+ fwnode_handle_put(args.fwnode);
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges", NULL,
+ args.nargs + 1, 0, &args);
+ if (ret) {
+ dev_err(dev, "Unable to parse MSI vec number\n");
+ return ret;
+ }
+
+ plic_domain = irq_find_matching_fwnode(args.fwnode, DOMAIN_BUS_ANY);
+ fwnode_handle_put(args.fwnode);
+ if (!plic_domain) {
+ pr_err("Failed to find the PLIC domain\n");
+ return -ENXIO;
+ }
+
+ data->irq_first = (u32)args.args[0];
+ data->num_irqs = (u32)args.args[args.nargs - 1];
+
+ mutex_init(&data->msi_map_lock);
+
+ return sg2042_msi_init_domains(data, plic_domain, dev);
+}
+
+static const struct of_device_id sg2042_msi_of_match[] = {
+ { .compatible = "sophgo,sg2042-msi" },
+ { }
+};
+
+static struct platform_driver sg2042_msi_driver = {
+ .driver = {
+ .name = "sg2042-msi",
+ .of_match_table = sg2042_msi_of_match,
+ },
+ .probe = sg2042_msi_probe,
+};
+builtin_platform_driver(sg2042_msi_driver);
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index bb92fd85e975..01b0d8321728 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -48,32 +48,41 @@ enum {
SUNXI_SRC_TYPE_EDGE_RISING,
};
-struct sunxi_sc_nmi_reg_offs {
- u32 ctrl;
- u32 pend;
- u32 enable;
+struct sunxi_sc_nmi_data {
+ struct {
+ u32 ctrl;
+ u32 pend;
+ u32 enable;
+ } reg_offs;
+ u32 enable_val;
};
-static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
- .ctrl = SUN6I_NMI_CTRL,
- .pend = SUN6I_NMI_PENDING,
- .enable = SUN6I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun6i_data __initconst = {
+ .reg_offs.ctrl = SUN6I_NMI_CTRL,
+ .reg_offs.pend = SUN6I_NMI_PENDING,
+ .reg_offs.enable = SUN6I_NMI_ENABLE,
};
-static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
- .ctrl = SUN7I_NMI_CTRL,
- .pend = SUN7I_NMI_PENDING,
- .enable = SUN7I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun7i_data __initconst = {
+ .reg_offs.ctrl = SUN7I_NMI_CTRL,
+ .reg_offs.pend = SUN7I_NMI_PENDING,
+ .reg_offs.enable = SUN7I_NMI_ENABLE,
};
-static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
- .ctrl = SUN9I_NMI_CTRL,
- .pend = SUN9I_NMI_PENDING,
- .enable = SUN9I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun9i_data __initconst = {
+ .reg_offs.ctrl = SUN9I_NMI_CTRL,
+ .reg_offs.pend = SUN9I_NMI_PENDING,
+ .reg_offs.enable = SUN9I_NMI_ENABLE,
};
-static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
- u32 val)
+static const struct sunxi_sc_nmi_data sun55i_a523_data __initconst = {
+ .reg_offs.ctrl = SUN9I_NMI_CTRL,
+ .reg_offs.pend = SUN9I_NMI_PENDING,
+ .reg_offs.enable = SUN9I_NMI_ENABLE,
+ .enable_val = BIT(31),
+};
+
+static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, u32 val)
{
irq_reg_writel(gc, val, off);
}
@@ -143,15 +152,13 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
}
static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
- const struct sunxi_sc_nmi_reg_offs *reg_offs)
+ const struct sunxi_sc_nmi_data *data)
{
- struct irq_domain *domain;
+ unsigned int irq, clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct irq_chip_generic *gc;
- unsigned int irq;
- unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_domain *domain;
int ret;
-
domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("Could not register interrupt domain.\n");
@@ -186,26 +193,28 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
- gc->chip_types[0].regs.ack = reg_offs->pend;
- gc->chip_types[0].regs.mask = reg_offs->enable;
- gc->chip_types[0].regs.type = reg_offs->ctrl;
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED |
+ IRQCHIP_EOI_IF_HANDLED |
+ IRQCHIP_SKIP_SET_WAKE;
+ gc->chip_types[0].regs.ack = data->reg_offs.pend;
+ gc->chip_types[0].regs.mask = data->reg_offs.enable;
+ gc->chip_types[0].regs.type = data->reg_offs.ctrl;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[1].regs.ack = reg_offs->pend;
- gc->chip_types[1].regs.mask = reg_offs->enable;
- gc->chip_types[1].regs.type = reg_offs->ctrl;
+ gc->chip_types[1].regs.ack = data->reg_offs.pend;
+ gc->chip_types[1].regs.mask = data->reg_offs.enable;
+ gc->chip_types[1].regs.type = data->reg_offs.ctrl;
gc->chip_types[1].handler = handle_edge_irq;
/* Disable any active interrupts */
- sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
+ sunxi_sc_nmi_write(gc, data->reg_offs.enable, data->enable_val);
/* Clear any pending NMI interrupts */
- sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
+ sunxi_sc_nmi_write(gc, data->reg_offs.pend, SUNXI_NMI_IRQ_BIT);
irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
@@ -220,20 +229,27 @@ fail_irqd_remove:
static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun6i_data);
}
IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun7i_data);
}
IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
static int __init sun9i_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun9i_data);
}
IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
+
+static int __init sun55i_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun55i_a523_data);
+}
+IRQCHIP_DECLARE(sun55i_nmi, "allwinner,sun55i-a523-nmi", sun55i_nmi_irq_init);
diff --git a/drivers/irqchip/irq-thead-c900-aclint-sswi.c b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
index b0e366ade427..8ff6e7a1363b 100644
--- a/drivers/irqchip/irq-thead-c900-aclint-sswi.c
+++ b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
@@ -31,7 +31,7 @@ static DEFINE_PER_CPU(void __iomem *, sswi_cpu_regs);
static void thead_aclint_sswi_ipi_send(unsigned int cpu)
{
- writel_relaxed(0x1, per_cpu(sswi_cpu_regs, cpu));
+ writel(0x1, per_cpu(sswi_cpu_regs, cpu));
}
static void thead_aclint_sswi_ipi_clear(void)
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index b83f5cbab123..a887efba262c 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -743,3 +743,4 @@ module_platform_driver(ti_sci_inta_irq_domain_driver);
MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>");
MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index c027cd9e4a69..b49a73106c69 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -303,3 +303,4 @@ module_platform_driver(ti_sci_intr_irq_domain_driver);
MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index cc219f28d317..960c343d5781 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -52,7 +52,7 @@ static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
- seq_printf(p, "%s", dev_name(&data->pdev->dev));
+ seq_puts(p, dev_name(&data->pdev->dev));
}
static const struct irq_chip ts4800_chip = {
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 1eeb0d0156ce..0ee7b6b71f5f 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -35,11 +35,10 @@ void __init irqchip_init(void)
int platform_irqchip_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *par_np = of_irq_find_parent(np);
+ struct device_node *par_np __free(device_node) = of_irq_find_parent(np);
of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
if (!irq_init_cb) {
- of_node_put(par_np);
return -EINVAL;
}
@@ -55,7 +54,6 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller can check for specific domains as necessary.
*/
if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
- of_node_put(par_np);
return -EPROBE_DEFER;
}
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 74b2f124116e..52d77546aacb 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -21,9 +21,11 @@
#include <linux/types.h>
#define PDC_MAX_GPIO_IRQS 256
+#define PDC_DRV_OFFSET 0x10000
/* Valid only on HW version < 3.2 */
#define IRQ_ENABLE_BANK 0x10
+#define IRQ_ENABLE_BANK_MAX (IRQ_ENABLE_BANK + BITS_TO_BYTES(PDC_MAX_GPIO_IRQS))
#define IRQ_i_CFG 0x110
/* Valid only on HW version >= 3.2 */
@@ -46,13 +48,20 @@ struct pdc_pin_region {
static DEFINE_RAW_SPINLOCK(pdc_lock);
static void __iomem *pdc_base;
+static void __iomem *pdc_prev_base;
static struct pdc_pin_region *pdc_region;
static int pdc_region_cnt;
static unsigned int pdc_version;
+static bool pdc_x1e_quirk;
+
+static void pdc_base_reg_write(void __iomem *base, int reg, u32 i, u32 val)
+{
+ writel_relaxed(val, base + reg + i * sizeof(u32));
+}
static void pdc_reg_write(int reg, u32 i, u32 val)
{
- writel_relaxed(val, pdc_base + reg + i * sizeof(u32));
+ pdc_base_reg_write(pdc_base, reg, i, val);
}
static u32 pdc_reg_read(int reg, u32 i)
@@ -60,6 +69,34 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
+static void pdc_x1e_irq_enable_write(u32 bank, u32 enable)
+{
+ void __iomem *base;
+
+ /* Remap the write access to work around a hardware bug on X1E */
+ switch (bank) {
+ case 0 ... 1:
+ /* Use previous DRV (client) region and shift to bank 3-4 */
+ base = pdc_prev_base;
+ bank += 3;
+ break;
+ case 2 ... 4:
+ /* Use our own region and shift to bank 0-2 */
+ base = pdc_base;
+ bank -= 2;
+ break;
+ case 5:
+ /* No fixup required for bank 5 */
+ base = pdc_base;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ pdc_base_reg_write(base, IRQ_ENABLE_BANK, bank, enable);
+}
+
static void __pdc_enable_intr(int pin_out, bool on)
{
unsigned long enable;
@@ -72,7 +109,11 @@ static void __pdc_enable_intr(int pin_out, bool on)
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
__assign_bit(mask, &enable, on);
- pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+
+ if (pdc_x1e_quirk)
+ pdc_x1e_irq_enable_write(index, enable);
+ else
+ pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
} else {
enable = pdc_reg_read(IRQ_i_CFG, pin_out);
__assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on);
@@ -324,10 +365,29 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
if (res_size > resource_size(&res))
pr_warn("%pOF: invalid reg size, please fix DT\n", node);
+ /*
+ * PDC has multiple DRV regions, each one provides the same set of
+ * registers for a particular client in the system. Due to a hardware
+ * bug on X1E, some writes to the IRQ_ENABLE_BANK register must be
+ * issued inside the previous region. This region belongs to
+ * a different client and is not described in the device tree. Map the
+ * region with the expected offset to preserve support for old DTs.
+ */
+ if (of_device_is_compatible(node, "qcom,x1e80100-pdc")) {
+ pdc_prev_base = ioremap(res.start - PDC_DRV_OFFSET, IRQ_ENABLE_BANK_MAX);
+ if (!pdc_prev_base) {
+ pr_err("%pOF: unable to map previous PDC DRV region\n", node);
+ return -ENXIO;
+ }
+
+ pdc_x1e_quirk = true;
+ }
+
pdc_base = ioremap(res.start, res_size);
if (!pdc_base) {
pr_err("%pOF: unable to map PDC registers\n", node);
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
}
pdc_version = pdc_reg_read(PDC_VERSION_REG, 0);
@@ -363,6 +423,7 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
fail:
kfree(pdc_region);
iounmap(pdc_base);
+ iounmap(pdc_prev_base);
return ret;
}
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index e34a7a46754e..8ec2d4d4f135 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -294,20 +294,6 @@ get_Bprotocol4mask(u_int m)
return NULL;
}
-struct Bprotocol *
-get_Bprotocol4id(u_int id)
-{
- u_int m;
-
- if (id < ISDN_P_B_START || id > 63) {
- printk(KERN_WARNING "%s id not in range %d\n",
- __func__, id);
- return NULL;
- }
- m = 1 << (id & ISDN_P_B_MASK);
- return get_Bprotocol4mask(m);
-}
-
int
mISDN_register_Bprotocol(struct Bprotocol *bp)
{
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
index 42599f49c189..5617c06de8e4 100644
--- a/drivers/isdn/mISDN/core.h
+++ b/drivers/isdn/mISDN/core.h
@@ -55,7 +55,6 @@ extern void __add_layer2(struct mISDNchannel *, struct mISDNstack *);
extern u_int get_all_Bprotocols(void);
struct Bprotocol *get_Bprotocol4mask(u_int);
-struct Bprotocol *get_Bprotocol4id(u_int);
extern int mISDN_inittimer(u_int *);
extern void mISDN_timer_cleanup(void);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b784bb74a837..2b27d043921c 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -217,6 +217,8 @@ config LEDS_TURRIS_OMNIA
depends on I2C
depends on MACH_ARMADA_38X || COMPILE_TEST
depends on OF
+ depends on TURRIS_OMNIA_MCU
+ depends on TURRIS_OMNIA_MCU_GPIO
select LEDS_TRIGGERS
help
This option enables basic support for the LEDs found on the front
@@ -511,6 +513,18 @@ config LEDS_LP8860
on the LP8860 4 channel LED driver using the I2C communication
bus.
+config LEDS_LP8864
+ tristate "LED support for the TI LP8864/LP8866 4/6 channel LED drivers"
+ depends on LEDS_CLASS && I2C && OF
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the TI LP8864-Q1,
+ LP8864S-Q1, LP8866-Q1, LP8866S-Q1 4/6 channel LED backlight
+ drivers with I2C interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp8864.
+
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
depends on LEDS_CLASS && BROKEN
@@ -580,6 +594,17 @@ config LEDS_PCA995X
LED driver chips accessed via the I2C bus. Supported
devices include PCA9955BTW, PCA9952TW and PCA9955TW.
+config LEDS_QNAP_MCU
+ tristate "LED Support for QNAP MCU controllers"
+ depends on LEDS_CLASS
+ depends on MFD_QNAP_MCU
+ help
+ This option enables support for LEDs available on embedded
+ controllers used in QNAP NAS devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called qnap-mcu-leds.
+
config LEDS_WM831X_STATUS
tristate "LED support for status LEDs on WM831x PMICs"
depends on LEDS_CLASS
@@ -815,6 +840,15 @@ config LEDS_SC27XX_BLTC
This driver can also be built as a module. If so the module will be
called leds-sc27xx-bltc.
+config LEDS_UPBOARD
+ tristate "LED support for the UP board"
+ depends on LEDS_CLASS && MFD_UPBOARD_FPGA
+ help
+ This option enables support for the UP board LEDs.
+
+ This driver can also be built as a module. If so the module will be
+ called leds-upboard.
+
comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
config LEDS_BLINKM
@@ -931,6 +965,16 @@ config LEDS_LM36274
Say Y to enable the LM36274 LED driver for TI LMU devices.
This supports the LED device LM36274.
+config LEDS_ST1202
+ tristate "LED Support for STMicroelectronics LED1202 I2C chips"
+ depends on LEDS_CLASS
+ depends on I2C
+ depends on OF
+ select LEDS_TRIGGERS
+ help
+ Say Y to enable support for LEDs connected to LED1202
+ LED driver chips accessed via the I2C bus.
+
config LEDS_TPS6105X
tristate "LED support for TI TPS6105X"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 18afbb5a23ee..6ad52e219ec6 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
obj-$(CONFIG_LEDS_LP8860) += leds-lp8860.o
+obj-$(CONFIG_LEDS_LP8864) += leds-lp8864.o
obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
obj-$(CONFIG_LEDS_MAX5970) += leds-max5970.o
obj-$(CONFIG_LEDS_MAX77650) += leds-max77650.o
@@ -79,8 +80,10 @@ obj-$(CONFIG_LEDS_PCA995X) += leds-pca995x.o
obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
+obj-$(CONFIG_LEDS_QNAP_MCU) += leds-qnap-mcu.o
obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
+obj-$(CONFIG_LEDS_ST1202) += leds-st1202.o
obj-$(CONFIG_LEDS_SUN50I_A100) += leds-sun50i-a100.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
@@ -89,6 +92,7 @@ obj-$(CONFIG_LEDS_TI_LMU_COMMON) += leds-ti-lmu-common.o
obj-$(CONFIG_LEDS_TLC591XX) += leds-tlc591xx.o
obj-$(CONFIG_LEDS_TPS6105X) += leds-tps6105x.o
obj-$(CONFIG_LEDS_TURRIS_OMNIA) += leds-turris-omnia.o
+obj-$(CONFIG_LEDS_UPBOARD) += leds-upboard.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 2a04ac61574d..c20ac8ccf52b 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -85,13 +85,13 @@ static ssize_t max_brightness_show(struct device *dev,
static DEVICE_ATTR_RO(max_brightness);
#ifdef CONFIG_LEDS_TRIGGERS
-static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
-static struct bin_attribute *led_trigger_bin_attrs[] = {
+static const BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
+static const struct bin_attribute *const led_trigger_bin_attrs[] = {
&bin_attr_trigger,
NULL,
};
static const struct attribute_group led_trigger_group = {
- .bin_attrs = led_trigger_bin_attrs,
+ .bin_attrs_new = led_trigger_bin_attrs,
};
#endif
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 78eb20093b2c..b2d40f87a5ff 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -34,7 +34,7 @@ trigger_relevant(struct led_classdev *led_cdev, struct led_trigger *trig)
}
ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -123,7 +123,7 @@ static int led_trigger_format(char *buf, size_t size,
* copy it.
*/
ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
diff --git a/drivers/leds/leds-cht-wcove.c b/drivers/leds/leds-cht-wcove.c
index 8246f048edcb..9a609dd5acdc 100644
--- a/drivers/leds/leds-cht-wcove.c
+++ b/drivers/leds/leds-cht-wcove.c
@@ -394,7 +394,7 @@ static int cht_wc_leds_probe(struct platform_device *pdev)
led->cdev.pattern_clear = cht_wc_leds_pattern_clear;
led->cdev.max_brightness = 255;
- ret = led_classdev_register(&pdev->dev, &led->cdev);
+ ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
return ret;
}
@@ -406,10 +406,6 @@ static int cht_wc_leds_probe(struct platform_device *pdev)
static void cht_wc_leds_remove(struct platform_device *pdev)
{
struct cht_wc_leds *leds = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < CHT_WC_LED_COUNT; i++)
- led_classdev_unregister(&leds->leds[i].cdev);
/* Restore LED1 regs if hw-control was active else leave LED1 off */
if (!(leds->led1_initial_regs.ctrl & CHT_WC_LED1_SWCTL))
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 7a136fd81720..06196d851ade 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -265,7 +265,7 @@ static int lp8860_init(struct lp8860_led *led)
goto out;
}
- reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs) / sizeof(lp8860_eeprom_disp_regs[0]);
+ reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs);
for (i = 0; i < reg_count; i++) {
ret = regmap_write(led->eeprom_regmap,
lp8860_eeprom_disp_regs[i].reg,
diff --git a/drivers/leds/leds-lp8864.c b/drivers/leds/leds-lp8864.c
new file mode 100644
index 000000000000..3afd729d2f8a
--- /dev/null
+++ b/drivers/leds/leds-lp8864.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI LP8864/LP8866 4/6 Channel LED Driver
+ *
+ * Copyright (C) 2024 Siemens AG
+ *
+ * Based on LP8860 driver by Dan Murphy <dmurphy@ti.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define LP8864_BRT_CONTROL 0x00
+#define LP8864_USER_CONFIG1 0x04
+#define LP8864_BRT_MODE_MASK GENMASK(9, 8)
+#define LP8864_BRT_MODE_REG BIT(9) /* Brightness control by DISPLAY_BRT reg */
+#define LP8864_SUPPLY_STATUS 0x0e
+#define LP8864_BOOST_STATUS 0x10
+#define LP8864_LED_STATUS 0x12
+#define LP8864_LED_STATUS_WR_MASK GENMASK(14, 9) /* Writeable bits in the LED_STATUS reg */
+
+/* Textual meaning for status bits, starting from bit 1 */
+static const char *const lp8864_supply_status_msg[] = {
+ "Vin under-voltage fault",
+ "Vin over-voltage fault",
+ "Vdd under-voltage fault",
+ "Vin over-current fault",
+ "Missing charge pump fault",
+ "Charge pump fault",
+ "Missing boost sync fault",
+ "CRC error fault ",
+};
+
+/* Textual meaning for status bits, starting from bit 1 */
+static const char *const lp8864_boost_status_msg[] = {
+ "Boost OVP low fault",
+ "Boost OVP high fault",
+ "Boost over-current fault",
+ "Missing boost FSET resistor fault",
+ "Missing MODE SEL resistor fault",
+ "Missing LED resistor fault",
+ "ISET resistor short to ground fault",
+ "Thermal shutdown fault",
+};
+
+/* Textual meaning for every register bit */
+static const char *const lp8864_led_status_msg[] = {
+ "LED 1 fault",
+ "LED 2 fault",
+ "LED 3 fault",
+ "LED 4 fault",
+ "LED 5 fault",
+ "LED 6 fault",
+ "LED open fault",
+ "LED internal short fault",
+ "LED short to GND fault",
+ NULL, NULL, NULL,
+ "Invalid string configuration fault",
+ NULL,
+ "I2C time out fault",
+};
+
+/**
+ * struct lp8864_led
+ * @client: Pointer to the I2C client
+ * @led_dev: led class device pointer
+ * @regmap: Devices register map
+ * @led_status_mask: Helps to report LED fault only once
+ */
+struct lp8864_led {
+ struct i2c_client *client;
+ struct led_classdev led_dev;
+ struct regmap *regmap;
+ u16 led_status_mask;
+};
+
+static int lp8864_fault_check(struct lp8864_led *led)
+{
+ int ret, i;
+ unsigned int val;
+
+ ret = regmap_read(led->regmap, LP8864_SUPPLY_STATUS, &val);
+ if (ret)
+ goto err;
+
+ /* Odd bits are status bits, even bits are clear bits */
+ for (i = 0; i < ARRAY_SIZE(lp8864_supply_status_msg); i++)
+ if (val & BIT(i * 2 + 1))
+ dev_warn(&led->client->dev, "%s\n", lp8864_supply_status_msg[i]);
+
+ /*
+ * Clear bits have an index preceding the corresponding Status bits;
+ * both have to be written "1" simultaneously to clear the corresponding
+ * Status bit.
+ */
+ if (val)
+ ret = regmap_write(led->regmap, LP8864_SUPPLY_STATUS, val >> 1 | val);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(led->regmap, LP8864_BOOST_STATUS, &val);
+ if (ret)
+ goto err;
+
+ /* Odd bits are status bits, even bits are clear bits */
+ for (i = 0; i < ARRAY_SIZE(lp8864_boost_status_msg); i++)
+ if (val & BIT(i * 2 + 1))
+ dev_warn(&led->client->dev, "%s\n", lp8864_boost_status_msg[i]);
+
+ if (val)
+ ret = regmap_write(led->regmap, LP8864_BOOST_STATUS, val >> 1 | val);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(led->regmap, LP8864_LED_STATUS, &val);
+ if (ret)
+ goto err;
+
+ /*
+ * Clear already reported faults that maintain their value until device
+ * power-down
+ */
+ val &= ~led->led_status_mask;
+
+ for (i = 0; i < ARRAY_SIZE(lp8864_led_status_msg); i++)
+ if (lp8864_led_status_msg[i] && val & BIT(i))
+ dev_warn(&led->client->dev, "%s\n", lp8864_led_status_msg[i]);
+
+ /*
+ * Mark those which maintain their value until device power-down as
+ * "already reported"
+ */
+ led->led_status_mask |= val & ~LP8864_LED_STATUS_WR_MASK;
+
+ /*
+ * Only bits 14, 12, 10 have to be cleared here, but others are RO,
+ * we don't care what we write to them.
+ */
+ if (val & LP8864_LED_STATUS_WR_MASK)
+ ret = regmap_write(led->regmap, LP8864_LED_STATUS, val >> 1 | val);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&led->client->dev, "Failed to read/clear faults (%pe)\n", ERR_PTR(ret));
+
+ return ret;
+}
+
+static int lp8864_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lp8864_led *led = container_of(led_cdev, struct lp8864_led, led_dev);
+ /* Scale 0..LED_FULL into 16-bit HW brightness */
+ unsigned int val = brt_val * 0xffff / LED_FULL;
+ int ret;
+
+ ret = lp8864_fault_check(led);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(led->regmap, LP8864_BRT_CONTROL, val);
+ if (ret)
+ dev_err(&led->client->dev, "Failed to write brightness value\n");
+
+ return ret;
+}
+
+static enum led_brightness lp8864_brightness_get(struct led_classdev *led_cdev)
+{
+ struct lp8864_led *led = container_of(led_cdev, struct lp8864_led, led_dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(led->regmap, LP8864_BRT_CONTROL, &val);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to read brightness value\n");
+ return ret;
+ }
+
+ /* Scale 16-bit HW brightness into 0..LED_FULL */
+ return val * LED_FULL / 0xffff;
+}
+
+static const struct regmap_config lp8864_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static void lp8864_disable_gpio(void *data)
+{
+ struct gpio_desc *gpio = data;
+
+ gpiod_set_value(gpio, 0);
+}
+
+static int lp8864_probe(struct i2c_client *client)
+{
+ int ret;
+ struct lp8864_led *led;
+ struct device_node *np = dev_of_node(&client->dev);
+ struct device_node *child_node;
+ struct led_init_data init_data = {};
+ struct gpio_desc *enable_gpio;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ child_node = of_get_next_available_child(np, NULL);
+ if (!child_node) {
+ dev_err(&client->dev, "No LED function defined\n");
+ return -EINVAL;
+ }
+
+ ret = devm_regulator_get_enable_optional(&client->dev, "vled");
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&client->dev, ret, "Failed to enable vled regulator\n");
+
+ enable_gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(enable_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(enable_gpio),
+ "Failed to get enable GPIO\n");
+
+ ret = devm_add_action_or_reset(&client->dev, lp8864_disable_gpio, enable_gpio);
+ if (ret)
+ return ret;
+
+ led->client = client;
+ led->led_dev.brightness_set_blocking = lp8864_brightness_set;
+ led->led_dev.brightness_get = lp8864_brightness_get;
+
+ led->regmap = devm_regmap_init_i2c(client, &lp8864_regmap_config);
+ if (IS_ERR(led->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(led->regmap),
+ "Failed to allocate regmap\n");
+
+ /* Control brightness by DISPLAY_BRT register */
+ ret = regmap_update_bits(led->regmap, LP8864_USER_CONFIG1, LP8864_BRT_MODE_MASK,
+ LP8864_BRT_MODE_REG);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to set brightness control mode\n");
+ return ret;
+ }
+
+ ret = lp8864_fault_check(led);
+ if (ret)
+ return ret;
+
+ init_data.fwnode = of_fwnode_handle(child_node);
+ init_data.devicename = "lp8864";
+ init_data.default_label = ":display_cluster";
+
+ ret = devm_led_classdev_register_ext(&client->dev, &led->led_dev, &init_data);
+ if (ret)
+ dev_err(&client->dev, "Failed to register LED device (%pe)\n", ERR_PTR(ret));
+
+ return ret;
+}
+
+static const struct i2c_device_id lp8864_id[] = {
+ { "lp8864" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lp8864_id);
+
+static const struct of_device_id of_lp8864_leds_match[] = {
+ { .compatible = "ti,lp8864" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_lp8864_leds_match);
+
+static struct i2c_driver lp8864_driver = {
+ .driver = {
+ .name = "lp8864",
+ .of_match_table = of_lp8864_leds_match,
+ },
+ .probe = lp8864_probe,
+ .id_table = lp8864_id,
+};
+module_i2c_driver(lp8864_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8864/LP8866 LED driver");
+MODULE_AUTHOR("Alexander Sverdlin <alexander.sverdlin@siemens.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index af5a908b8d9e..e95287416ef8 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -439,6 +439,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
}
gpio_ext_pdev = of_find_device_by_node(gpio_ext_np);
if (!gpio_ext_pdev) {
+ of_node_put(gpio_ext_np);
dev_err(dev, "Failed to find platform device for gpio-ext\n");
return -ENODEV;
}
diff --git a/drivers/leds/leds-qnap-mcu.c b/drivers/leds/leds-qnap-mcu.c
new file mode 100644
index 000000000000..4e4709456261
--- /dev/null
+++ b/drivers/leds/leds-qnap-mcu.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for LEDs found on QNAP MCU devices
+ *
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/leds.h>
+#include <linux/mfd/qnap-mcu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
+
+enum qnap_mcu_err_led_mode {
+ QNAP_MCU_ERR_LED_ON = 0,
+ QNAP_MCU_ERR_LED_OFF = 1,
+ QNAP_MCU_ERR_LED_BLINK_FAST = 2,
+ QNAP_MCU_ERR_LED_BLINK_SLOW = 3,
+};
+
+struct qnap_mcu_err_led {
+ struct qnap_mcu *mcu;
+ struct led_classdev cdev;
+ char name[LED_MAX_NAME_SIZE];
+ u8 num;
+ u8 mode;
+};
+
+static inline struct qnap_mcu_err_led *
+ cdev_to_qnap_mcu_err_led(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct qnap_mcu_err_led, cdev);
+}
+
+static int qnap_mcu_err_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct qnap_mcu_err_led *err_led = cdev_to_qnap_mcu_err_led(led_cdev);
+ u8 cmd[] = { '@', 'R', '0' + err_led->num, '0' };
+
+ /* Don't disturb a possible set blink-mode if LED stays on */
+ if (brightness != 0 && err_led->mode >= QNAP_MCU_ERR_LED_BLINK_FAST)
+ return 0;
+
+ err_led->mode = brightness ? QNAP_MCU_ERR_LED_ON : QNAP_MCU_ERR_LED_OFF;
+ cmd[3] = '0' + err_led->mode;
+
+ return qnap_mcu_exec_with_ack(err_led->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_err_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct qnap_mcu_err_led *err_led = cdev_to_qnap_mcu_err_led(led_cdev);
+ u8 cmd[] = { '@', 'R', '0' + err_led->num, '0' };
+
+ /* LED is off, nothing to do */
+ if (err_led->mode == QNAP_MCU_ERR_LED_OFF)
+ return 0;
+
+ if (*delay_on < 500) {
+ *delay_on = 100;
+ *delay_off = 100;
+ err_led->mode = QNAP_MCU_ERR_LED_BLINK_FAST;
+ } else {
+ *delay_on = 500;
+ *delay_off = 500;
+ err_led->mode = QNAP_MCU_ERR_LED_BLINK_SLOW;
+ }
+
+ cmd[3] = '0' + err_led->mode;
+
+ return qnap_mcu_exec_with_ack(err_led->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_register_err_led(struct device *dev, struct qnap_mcu *mcu, int num_err_led)
+{
+ struct qnap_mcu_err_led *err_led;
+ int ret;
+
+ err_led = devm_kzalloc(dev, sizeof(*err_led), GFP_KERNEL);
+ if (!err_led)
+ return -ENOMEM;
+
+ err_led->mcu = mcu;
+ err_led->num = num_err_led;
+ err_led->mode = QNAP_MCU_ERR_LED_OFF;
+
+ scnprintf(err_led->name, LED_MAX_NAME_SIZE, "hdd%d:red:status", num_err_led + 1);
+ err_led->cdev.name = err_led->name;
+
+ err_led->cdev.brightness_set_blocking = qnap_mcu_err_led_set;
+ err_led->cdev.blink_set = qnap_mcu_err_led_blink_set;
+ err_led->cdev.brightness = 0;
+ err_led->cdev.max_brightness = 1;
+
+ ret = devm_led_classdev_register(dev, &err_led->cdev);
+ if (ret)
+ return ret;
+
+ return qnap_mcu_err_led_set(&err_led->cdev, 0);
+}
+
+enum qnap_mcu_usb_led_mode {
+ QNAP_MCU_USB_LED_ON = 1,
+ QNAP_MCU_USB_LED_OFF = 3,
+ QNAP_MCU_USB_LED_BLINK = 2,
+};
+
+struct qnap_mcu_usb_led {
+ struct qnap_mcu *mcu;
+ struct led_classdev cdev;
+ u8 mode;
+};
+
+static inline struct qnap_mcu_usb_led *
+ cdev_to_qnap_mcu_usb_led(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct qnap_mcu_usb_led, cdev);
+}
+
+static int qnap_mcu_usb_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct qnap_mcu_usb_led *usb_led = cdev_to_qnap_mcu_usb_led(led_cdev);
+ u8 cmd[] = { '@', 'C', 0 };
+
+ /* Don't disturb a possible set blink-mode if LED stays on */
+ if (brightness != 0 && usb_led->mode == QNAP_MCU_USB_LED_BLINK)
+ return 0;
+
+ usb_led->mode = brightness ? QNAP_MCU_USB_LED_ON : QNAP_MCU_USB_LED_OFF;
+
+ /*
+ * Byte 3 is shared between the usb led target on/off/blink
+ * and also the buzzer control (in the input driver)
+ */
+ cmd[2] = 'D' + usb_led->mode;
+
+ return qnap_mcu_exec_with_ack(usb_led->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_usb_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct qnap_mcu_usb_led *usb_led = cdev_to_qnap_mcu_usb_led(led_cdev);
+ u8 cmd[] = { '@', 'C', 0 };
+
+ /* LED is off, nothing to do */
+ if (usb_led->mode == QNAP_MCU_USB_LED_OFF)
+ return 0;
+
+ *delay_on = 250;
+ *delay_off = 250;
+ usb_led->mode = QNAP_MCU_USB_LED_BLINK;
+
+ /*
+ * Byte 3 is shared between the USB LED target on/off/blink
+ * and also the buzzer control (in the input driver)
+ */
+ cmd[2] = 'D' + usb_led->mode;
+
+ return qnap_mcu_exec_with_ack(usb_led->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_register_usb_led(struct device *dev, struct qnap_mcu *mcu)
+{
+ struct qnap_mcu_usb_led *usb_led;
+ int ret;
+
+ usb_led = devm_kzalloc(dev, sizeof(*usb_led), GFP_KERNEL);
+ if (!usb_led)
+ return -ENOMEM;
+
+ usb_led->mcu = mcu;
+ usb_led->mode = QNAP_MCU_USB_LED_OFF;
+ usb_led->cdev.name = "usb:blue:disk";
+ usb_led->cdev.brightness_set_blocking = qnap_mcu_usb_led_set;
+ usb_led->cdev.blink_set = qnap_mcu_usb_led_blink_set;
+ usb_led->cdev.brightness = 0;
+ usb_led->cdev.max_brightness = 1;
+
+ ret = devm_led_classdev_register(dev, &usb_led->cdev);
+ if (ret)
+ return ret;
+
+ return qnap_mcu_usb_led_set(&usb_led->cdev, 0);
+}
+
+static int qnap_mcu_leds_probe(struct platform_device *pdev)
+{
+ struct qnap_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
+ const struct qnap_mcu_variant *variant = pdev->dev.platform_data;
+ int ret;
+
+ for (int i = 0; i < variant->num_drives; i++) {
+ ret = qnap_mcu_register_err_led(&pdev->dev, mcu, i);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register error LED %d\n", i);
+ }
+
+ if (variant->usb_led) {
+ ret = qnap_mcu_register_usb_led(&pdev->dev, mcu);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register USB LED\n");
+ }
+
+ return 0;
+}
+
+static struct platform_driver qnap_mcu_leds_driver = {
+ .probe = qnap_mcu_leds_probe,
+ .driver = {
+ .name = "qnap-mcu-leds",
+ },
+};
+module_platform_driver(qnap_mcu_leds_driver);
+
+MODULE_ALIAS("platform:qnap-mcu-leds");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("QNAP MCU LEDs driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-st1202.c b/drivers/leds/leds-st1202.c
new file mode 100644
index 000000000000..e894b3f9a0f4
--- /dev/null
+++ b/drivers/leds/leds-st1202.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LED driver for STMicroelectronics LED1202 chip
+ *
+ * Copyright (C) 2024 Remote-Tech Ltd. UK
+ */
+
+#include <linux/cleanup.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define ST1202_CHAN_DISABLE_ALL 0x00
+#define ST1202_CHAN_ENABLE_HIGH 0x03
+#define ST1202_CHAN_ENABLE_LOW 0x02
+#define ST1202_CONFIG_REG 0x04
+/* PATS: Pattern sequence feature enable */
+#define ST1202_CONFIG_REG_PATS BIT(7)
+/* PATSR: Pattern sequence runs (self-clear when sequence is finished) */
+#define ST1202_CONFIG_REG_PATSR BIT(6)
+#define ST1202_CONFIG_REG_SHFT BIT(3)
+#define ST1202_DEV_ENABLE 0x01
+#define ST1202_DEV_ENABLE_ON BIT(0)
+#define ST1202_DEV_ENABLE_RESET BIT(7)
+#define ST1202_DEVICE_ID 0x00
+#define ST1202_ILED_REG0 0x09
+#define ST1202_MAX_LEDS 12
+#define ST1202_MAX_PATTERNS 8
+#define ST1202_MILLIS_PATTERN_DUR_MAX 5660
+#define ST1202_MILLIS_PATTERN_DUR_MIN 22
+#define ST1202_PATTERN_DUR 0x16
+#define ST1202_PATTERN_PWM 0x1E
+#define ST1202_PATTERN_REP 0x15
+
+struct st1202_led {
+ struct fwnode_handle *fwnode;
+ struct led_classdev led_cdev;
+ struct st1202_chip *chip;
+ bool is_active;
+ int led_num;
+};
+
+struct st1202_chip {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct st1202_led leds[ST1202_MAX_LEDS];
+};
+
+static struct st1202_led *cdev_to_st1202_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct st1202_led, led_cdev);
+}
+
+static int st1202_read_reg(struct st1202_chip *chip, int reg, uint8_t *val)
+{
+ struct device *dev = &chip->client->dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read register [0x%x]: %d\n", reg, ret);
+ return ret;
+ }
+
+ *val = (uint8_t)ret;
+ return 0;
+}
+
+static int st1202_write_reg(struct st1202_chip *chip, int reg, uint8_t val)
+{
+ struct device *dev = &chip->client->dev;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+ if (ret != 0)
+ dev_err(dev, "Failed to write %d to register [0x%x]: %d\n", val, reg, ret);
+
+ return ret;
+}
+
+static uint8_t st1202_prescalar_to_miliseconds(unsigned int value)
+{
+ return value / ST1202_MILLIS_PATTERN_DUR_MIN - 1;
+}
+
+static int st1202_pwm_pattern_write(struct st1202_chip *chip, int led_num,
+ int pattern, unsigned int value)
+{
+ u8 value_l, value_h;
+ int ret;
+
+ value_l = (u8)value;
+ value_h = (u8)(value >> 8);
+
+ /*
+ * Datasheet: Register address low = 1Eh + 2*(xh) + 18h*(yh),
+ * where x is the channel number (led number) in hexadecimal (x = 00h .. 0Bh)
+ * and y is the pattern number in hexadecimal (y = 00h .. 07h)
+ */
+ ret = st1202_write_reg(chip, (ST1202_PATTERN_PWM + (led_num * 2) + 0x18 * pattern),
+ value_l);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Datasheet: Register address high = 1Eh + 01h + 2(xh) +18h*(yh),
+ * where x is the channel number in hexadecimal (x = 00h .. 0Bh)
+ * and y is the pattern number in hexadecimal (y = 00h .. 07h)
+ */
+ ret = st1202_write_reg(chip, (ST1202_PATTERN_PWM + 0x1 + (led_num * 2) + 0x18 * pattern),
+ value_h);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static int st1202_duration_pattern_write(struct st1202_chip *chip, int pattern,
+ unsigned int value)
+{
+ return st1202_write_reg(chip, (ST1202_PATTERN_DUR + pattern),
+ st1202_prescalar_to_miliseconds(value));
+}
+
+static void st1202_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct st1202_led *led = cdev_to_st1202_led(led_cdev);
+ struct st1202_chip *chip = led->chip;
+
+ guard(mutex)(&chip->lock);
+
+ st1202_write_reg(chip, ST1202_ILED_REG0 + led->led_num, value);
+}
+
+static enum led_brightness st1202_brightness_get(struct led_classdev *led_cdev)
+{
+ struct st1202_led *led = cdev_to_st1202_led(led_cdev);
+ struct st1202_chip *chip = led->chip;
+ u8 value = 0;
+
+ guard(mutex)(&chip->lock);
+
+ st1202_read_reg(chip, ST1202_ILED_REG0 + led->led_num, &value);
+
+ return value;
+}
+
+static int st1202_channel_set(struct st1202_chip *chip, int led_num, bool active)
+{
+ u8 chan_low, chan_high;
+ int ret;
+
+ guard(mutex)(&chip->lock);
+
+ if (led_num <= 7) {
+ ret = st1202_read_reg(chip, ST1202_CHAN_ENABLE_LOW, &chan_low);
+ if (ret < 0)
+ return ret;
+
+ chan_low = active ? chan_low | BIT(led_num) : chan_low & ~BIT(led_num);
+
+ ret = st1202_write_reg(chip, ST1202_CHAN_ENABLE_LOW, chan_low);
+ if (ret < 0)
+ return ret;
+
+ } else {
+ ret = st1202_read_reg(chip, ST1202_CHAN_ENABLE_HIGH, &chan_high);
+ if (ret < 0)
+ return ret;
+
+ chan_high = active ? chan_high | (BIT(led_num) >> 8) :
+ chan_high & ~(BIT(led_num) >> 8);
+
+ ret = st1202_write_reg(chip, ST1202_CHAN_ENABLE_HIGH, chan_high);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int st1202_led_set(struct led_classdev *ldev, enum led_brightness value)
+{
+ struct st1202_led *led = cdev_to_st1202_led(ldev);
+ struct st1202_chip *chip = led->chip;
+
+ return st1202_channel_set(chip, led->led_num, value == LED_OFF ? false : true);
+}
+
+static int st1202_led_pattern_clear(struct led_classdev *ldev)
+{
+ struct st1202_led *led = cdev_to_st1202_led(ldev);
+ struct st1202_chip *chip = led->chip;
+ int ret;
+
+ guard(mutex)(&chip->lock);
+
+ for (int patt = 0; patt < ST1202_MAX_PATTERNS; patt++) {
+ ret = st1202_pwm_pattern_write(chip, led->led_num, patt, LED_OFF);
+ if (ret != 0)
+ return ret;
+
+ ret = st1202_duration_pattern_write(chip, patt, ST1202_MILLIS_PATTERN_DUR_MIN);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int st1202_led_pattern_set(struct led_classdev *ldev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ struct st1202_led *led = cdev_to_st1202_led(ldev);
+ struct st1202_chip *chip = led->chip;
+ int ret;
+
+ if (len > ST1202_MAX_PATTERNS)
+ return -EINVAL;
+
+ guard(mutex)(&chip->lock);
+
+ for (int patt = 0; patt < len; patt++) {
+ if (pattern[patt].delta_t < ST1202_MILLIS_PATTERN_DUR_MIN ||
+ pattern[patt].delta_t > ST1202_MILLIS_PATTERN_DUR_MAX)
+ return -EINVAL;
+
+ ret = st1202_pwm_pattern_write(chip, led->led_num, patt, pattern[patt].brightness);
+ if (ret != 0)
+ return ret;
+
+ ret = st1202_duration_pattern_write(chip, patt, pattern[patt].delta_t);
+ if (ret != 0)
+ return ret;
+ }
+
+ ret = st1202_write_reg(chip, ST1202_PATTERN_REP, repeat);
+ if (ret != 0)
+ return ret;
+
+ ret = st1202_write_reg(chip, ST1202_CONFIG_REG, (ST1202_CONFIG_REG_PATSR |
+ ST1202_CONFIG_REG_PATS | ST1202_CONFIG_REG_SHFT));
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static int st1202_dt_init(struct st1202_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+ struct st1202_led *led;
+ int err, reg;
+
+ for_each_available_child_of_node_scoped(dev_of_node(dev), child) {
+ err = of_property_read_u32(child, "reg", &reg);
+ if (err)
+ return dev_err_probe(dev, err, "Invalid register\n");
+
+ led = &chip->leds[reg];
+ led->is_active = true;
+ led->fwnode = of_fwnode_handle(child);
+
+ led->led_cdev.max_brightness = U8_MAX;
+ led->led_cdev.brightness_set_blocking = st1202_led_set;
+ led->led_cdev.pattern_set = st1202_led_pattern_set;
+ led->led_cdev.pattern_clear = st1202_led_pattern_clear;
+ led->led_cdev.default_trigger = "pattern";
+ led->led_cdev.brightness_set = st1202_brightness_set;
+ led->led_cdev.brightness_get = st1202_brightness_get;
+ }
+
+ return 0;
+}
+
+static int st1202_setup(struct st1202_chip *chip)
+{
+ int ret;
+
+ guard(mutex)(&chip->lock);
+
+ /*
+ * Once the supply voltage is applied, the LED1202 executes some internal checks,
+ * afterwords it stops the oscillator and puts the internal LDO in quiescent mode.
+ * To start the device, EN bit must be set inside the “Device Enable” register at
+ * address 01h. As soon as EN is set, the LED1202 loads the adjustment parameters
+ * from the internal non-volatile memory and performs an auto-calibration procedure
+ * in order to increase the output current precision.
+ * Such initialization lasts about 6.5 ms.
+ */
+
+ /* Reset the chip during setup */
+ ret = st1202_write_reg(chip, ST1202_DEV_ENABLE, ST1202_DEV_ENABLE_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* Enable phase-shift delay feature */
+ ret = st1202_write_reg(chip, ST1202_CONFIG_REG, ST1202_CONFIG_REG_SHFT);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the device */
+ ret = st1202_write_reg(chip, ST1202_DEV_ENABLE, ST1202_DEV_ENABLE_ON);
+ if (ret < 0)
+ return ret;
+
+ /* Duration of initialization */
+ usleep_range(6500, 10000);
+
+ /* Deactivate all LEDS (channels) and activate only the ones found in Device Tree */
+ ret = st1202_write_reg(chip, ST1202_CHAN_ENABLE_LOW, ST1202_CHAN_DISABLE_ALL);
+ if (ret < 0)
+ return ret;
+
+ ret = st1202_write_reg(chip, ST1202_CHAN_ENABLE_HIGH, ST1202_CHAN_DISABLE_ALL);
+ if (ret < 0)
+ return ret;
+
+ ret = st1202_write_reg(chip, ST1202_CONFIG_REG,
+ ST1202_CONFIG_REG_PATS | ST1202_CONFIG_REG_PATSR);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int st1202_probe(struct i2c_client *client)
+{
+ struct st1202_chip *chip;
+ struct st1202_led *led;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return dev_err_probe(&client->dev, -EIO, "SMBUS Byte Data not Supported\n");
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ devm_mutex_init(&client->dev, &chip->lock);
+ chip->client = client;
+
+ ret = st1202_dt_init(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = st1202_setup(chip);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < ST1202_MAX_LEDS; i++) {
+ struct led_init_data init_data = {};
+ led = &chip->leds[i];
+ led->chip = chip;
+ led->led_num = i;
+
+ if (!led->is_active)
+ continue;
+
+ ret = st1202_channel_set(led->chip, led->led_num, true);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to activate LED channel\n");
+
+ ret = st1202_led_pattern_clear(&led->led_cdev);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to clear LED pattern\n");
+
+ init_data.fwnode = led->fwnode;
+ init_data.devicename = "st1202";
+ init_data.default_label = ":";
+
+ ret = devm_led_classdev_register_ext(&client->dev, &led->led_cdev, &init_data);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register LED class device\n");
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id st1202_id[] = {
+ { "st1202-i2c" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, st1202_id);
+
+static const struct of_device_id st1202_dt_ids[] = {
+ { .compatible = "st,led1202" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, st1202_dt_ids);
+
+static struct i2c_driver st1202_driver = {
+ .driver = {
+ .name = "leds-st1202",
+ .of_match_table = of_match_ptr(st1202_dt_ids),
+ },
+ .probe = st1202_probe,
+ .id_table = st1202_id,
+};
+module_i2c_driver(st1202_driver);
+
+MODULE_AUTHOR("Remote Tech LTD");
+MODULE_DESCRIPTION("STMicroelectronics LED1202 : 12-channel constant current LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 2de825ac08b3..4fe1a9c0bc1b 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -2,7 +2,7 @@
/*
* CZ.NIC's Turris Omnia LEDs driver
*
- * 2020, 2023 by Marek Behún <kabel@kernel.org>
+ * 2020, 2023, 2024 by Marek Behún <kabel@kernel.org>
*/
#include <linux/i2c.h>
@@ -10,35 +10,23 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/turris-omnia-mcu-interface.h>
#define OMNIA_BOARD_LEDS 12
#define OMNIA_LED_NUM_CHANNELS 3
-/* MCU controller commands at I2C address 0x2a */
-#define OMNIA_MCU_I2C_ADDR 0x2a
-
-#define CMD_GET_STATUS_WORD 0x01
-#define STS_FEATURES_SUPPORTED BIT(2)
-
-#define CMD_GET_FEATURES 0x10
-#define FEAT_LED_GAMMA_CORRECTION BIT(5)
-
-/* LED controller commands at I2C address 0x2b */
-#define CMD_LED_MODE 0x03
-#define CMD_LED_MODE_LED(l) ((l) & 0x0f)
-#define CMD_LED_MODE_USER 0x10
-
-#define CMD_LED_STATE 0x04
-#define CMD_LED_STATE_LED(l) ((l) & 0x0f)
-#define CMD_LED_STATE_ON 0x10
-
-#define CMD_LED_COLOR 0x05
-#define CMD_LED_SET_BRIGHTNESS 0x07
-#define CMD_LED_GET_BRIGHTNESS 0x08
-
-#define CMD_SET_GAMMA_CORRECTION 0x30
-#define CMD_GET_GAMMA_CORRECTION 0x31
-
+/* MCU controller I2C address 0x2a, needed for detecting MCU features */
+#define OMNIA_MCU_I2C_ADDR 0x2a
+
+/**
+ * struct omnia_led - per-LED part of driver private data structure
+ * @mc_cdev: multi-color LED class device
+ * @subled_info: per-channel information
+ * @cached_channels: cached values of per-channel brightness that was sent to the MCU
+ * @on: whether the LED was set on
+ * @hwtrig: whether the LED blinking was offloaded to the MCU
+ * @reg: LED identifier to the MCU
+ */
struct omnia_led {
struct led_classdev_mc mc_cdev;
struct mc_subled subled_info[OMNIA_LED_NUM_CHANNELS];
@@ -49,73 +37,38 @@ struct omnia_led {
#define to_omnia_led(l) container_of(l, struct omnia_led, mc_cdev)
+/**
+ * struct omnia_leds - driver private data structure
+ * @client: I2C client device
+ * @lock: mutex to protect cached state
+ * @has_gamma_correction: whether the MCU firmware supports gamma correction
+ * @brightness_knode: kernel node of the "brightness" device sysfs attribute (this is the
+ * driver specific global brightness, not the LED classdev brightness)
+ * @leds: flexible array of per-LED data
+ */
struct omnia_leds {
struct i2c_client *client;
struct mutex lock;
bool has_gamma_correction;
+ struct kernfs_node *brightness_knode;
struct omnia_led leds[];
};
-static int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, u8 val)
-{
- u8 buf[2] = { cmd, val };
- int ret;
-
- ret = i2c_master_send(client, buf, sizeof(buf));
-
- return ret < 0 ? ret : 0;
-}
-
-static int omnia_cmd_read_raw(struct i2c_adapter *adapter, u8 addr, u8 cmd,
- void *reply, size_t len)
-{
- struct i2c_msg msgs[2];
- int ret;
-
- msgs[0].addr = addr;
- msgs[0].flags = 0;
- msgs[0].len = 1;
- msgs[0].buf = &cmd;
- msgs[1].addr = addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = reply;
-
- ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
- if (likely(ret == ARRAY_SIZE(msgs)))
- return 0;
- else if (ret < 0)
- return ret;
- else
- return -EIO;
-}
-
-static int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd)
+static int omnia_cmd_set_color(const struct i2c_client *client, u8 led, u8 r, u8 g, u8 b)
{
- u8 reply;
- int err;
+ u8 buf[5] = { OMNIA_CMD_LED_COLOR, led, r, g, b };
- err = omnia_cmd_read_raw(client->adapter, client->addr, cmd, &reply, 1);
- if (err)
- return err;
-
- return reply;
+ return omnia_cmd_write(client, buf, sizeof(buf));
}
static int omnia_led_send_color_cmd(const struct i2c_client *client,
struct omnia_led *led)
{
- char cmd[5];
int ret;
- cmd[0] = CMD_LED_COLOR;
- cmd[1] = led->reg;
- cmd[2] = led->subled_info[0].brightness;
- cmd[3] = led->subled_info[1].brightness;
- cmd[4] = led->subled_info[2].brightness;
-
/* Send the color change command */
- ret = i2c_master_send(client, cmd, 5);
+ ret = omnia_cmd_set_color(client, led->reg, led->subled_info[0].brightness,
+ led->subled_info[1].brightness, led->subled_info[2].brightness);
if (ret < 0)
return ret;
@@ -170,12 +123,12 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
* is not being blinked by HW.
*/
if (!err && !led->hwtrig && !brightness != !led->on) {
- u8 state = CMD_LED_STATE_LED(led->reg);
+ u8 state = OMNIA_CMD_LED_STATE_LED(led->reg);
if (brightness)
- state |= CMD_LED_STATE_ON;
+ state |= OMNIA_CMD_LED_STATE_ON;
- err = omnia_cmd_write_u8(leds->client, CMD_LED_STATE, state);
+ err = omnia_cmd_write_u8(leds->client, OMNIA_CMD_LED_STATE, state);
if (!err)
led->on = !!brightness;
}
@@ -210,8 +163,8 @@ static int omnia_hwtrig_activate(struct led_classdev *cdev)
if (!err) {
/* Put the LED into MCU controlled mode */
- err = omnia_cmd_write_u8(leds->client, CMD_LED_MODE,
- CMD_LED_MODE_LED(led->reg));
+ err = omnia_cmd_write_u8(leds->client, OMNIA_CMD_LED_MODE,
+ OMNIA_CMD_LED_MODE_LED(led->reg));
if (!err)
led->hwtrig = true;
}
@@ -232,9 +185,8 @@ static void omnia_hwtrig_deactivate(struct led_classdev *cdev)
led->hwtrig = false;
/* Put the LED into software mode */
- err = omnia_cmd_write_u8(leds->client, CMD_LED_MODE,
- CMD_LED_MODE_LED(led->reg) |
- CMD_LED_MODE_USER);
+ err = omnia_cmd_write_u8(leds->client, OMNIA_CMD_LED_MODE,
+ OMNIA_CMD_LED_MODE_LED(led->reg) | OMNIA_CMD_LED_MODE_USER);
mutex_unlock(&leds->lock);
@@ -300,38 +252,26 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
*/
cdev->default_trigger = omnia_hw_trigger.name;
- /* put the LED into software mode */
- ret = omnia_cmd_write_u8(client, CMD_LED_MODE,
- CMD_LED_MODE_LED(led->reg) |
- CMD_LED_MODE_USER);
- if (ret) {
- dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np,
- ret);
- return ret;
- }
+ /* Put the LED into software mode */
+ ret = omnia_cmd_write_u8(client, OMNIA_CMD_LED_MODE, OMNIA_CMD_LED_MODE_LED(led->reg) |
+ OMNIA_CMD_LED_MODE_USER);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot set LED %pOF to software mode\n", np);
- /* disable the LED */
- ret = omnia_cmd_write_u8(client, CMD_LED_STATE,
- CMD_LED_STATE_LED(led->reg));
- if (ret) {
- dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret);
- return ret;
- }
+ /* Disable the LED */
+ ret = omnia_cmd_write_u8(client, OMNIA_CMD_LED_STATE, OMNIA_CMD_LED_STATE_LED(led->reg));
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot set LED %pOF brightness\n", np);
/* Set initial color and cache it */
ret = omnia_led_send_color_cmd(client, led);
- if (ret < 0) {
- dev_err(dev, "Cannot set LED %pOF initial color: %i\n", np,
- ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot set LED %pOF initial color\n", np);
ret = devm_led_classdev_multicolor_register_ext(dev, &led->mc_cdev,
&init_data);
- if (ret < 0) {
- dev_err(dev, "Cannot register LED %pOF: %i\n", np, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot register LED %pOF\n", np);
return 1;
}
@@ -351,14 +291,14 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
- int ret;
-
- ret = omnia_cmd_read_u8(client, CMD_LED_GET_BRIGHTNESS);
+ u8 reply;
+ int err;
- if (ret < 0)
- return ret;
+ err = omnia_cmd_read_u8(client, OMNIA_CMD_GET_BRIGHTNESS, &reply);
+ if (err < 0)
+ return err;
- return sysfs_emit(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", reply);
}
static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
@@ -374,7 +314,7 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
if (brightness > 100)
return -EINVAL;
- err = omnia_cmd_write_u8(client, CMD_LED_SET_BRIGHTNESS, brightness);
+ err = omnia_cmd_write_u8(client, OMNIA_CMD_SET_BRIGHTNESS, brightness);
return err ?: count;
}
@@ -385,17 +325,16 @@ static ssize_t gamma_correction_show(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct omnia_leds *leds = i2c_get_clientdata(client);
- int ret;
+ u8 reply = 0;
+ int err;
if (leds->has_gamma_correction) {
- ret = omnia_cmd_read_u8(client, CMD_GET_GAMMA_CORRECTION);
- if (ret < 0)
- return ret;
- } else {
- ret = 0;
+ err = omnia_cmd_read_u8(client, OMNIA_CMD_GET_GAMMA_CORRECTION, &reply);
+ if (err < 0)
+ return err;
}
- return sysfs_emit(buf, "%d\n", !!ret);
+ return sysfs_emit(buf, "%d\n", !!reply);
}
static ssize_t gamma_correction_store(struct device *dev,
@@ -413,7 +352,7 @@ static ssize_t gamma_correction_store(struct device *dev,
if (kstrtobool(buf, &val) < 0)
return -EINVAL;
- err = omnia_cmd_write_u8(client, CMD_SET_GAMMA_CORRECTION, val);
+ err = omnia_cmd_write_u8(client, OMNIA_CMD_SET_GAMMA_CORRECTION, val);
return err ?: count;
}
@@ -426,26 +365,104 @@ static struct attribute *omnia_led_controller_attrs[] = {
};
ATTRIBUTE_GROUPS(omnia_led_controller);
-static int omnia_mcu_get_features(const struct i2c_client *client)
+static irqreturn_t omnia_brightness_changed_threaded_fn(int irq, void *data)
+{
+ struct omnia_leds *leds = data;
+
+ if (unlikely(!leds->brightness_knode)) {
+ /*
+ * Note that sysfs_get_dirent() may sleep. This is okay, because we are in threaded
+ * context.
+ */
+ leds->brightness_knode = sysfs_get_dirent(leds->client->dev.kobj.sd, "brightness");
+ if (!leds->brightness_knode)
+ return IRQ_NONE;
+ }
+
+ sysfs_notify_dirent(leds->brightness_knode);
+
+ return IRQ_HANDLED;
+}
+
+static void omnia_brightness_knode_put(void *data)
+{
+ struct omnia_leds *leds = data;
+
+ if (leds->brightness_knode)
+ sysfs_put(leds->brightness_knode);
+}
+
+static int omnia_request_brightness_irq(struct omnia_leds *leds)
+{
+ struct device *dev = &leds->client->dev;
+ int ret;
+
+ if (!leds->client->irq) {
+ dev_info(dev,
+ "Brightness change interrupt supported by MCU firmware but not described in device-tree\n");
+
+ return 0;
+ }
+
+ /*
+ * Registering the brightness_knode destructor before requesting the IRQ ensures that on
+ * removal the brightness_knode sysfs node is put only after the IRQ is freed.
+ * This is needed because the interrupt handler uses the knode.
+ */
+ ret = devm_add_action(dev, omnia_brightness_knode_put, leds);
+ if (ret < 0)
+ return ret;
+
+ return devm_request_threaded_irq(dev, leds->client->irq, NULL,
+ omnia_brightness_changed_threaded_fn, IRQF_ONESHOT,
+ "leds-turris-omnia", leds);
+}
+
+static int omnia_mcu_get_features(const struct i2c_client *mcu_client)
{
u16 reply;
int err;
- err = omnia_cmd_read_raw(client->adapter, OMNIA_MCU_I2C_ADDR,
- CMD_GET_STATUS_WORD, &reply, sizeof(reply));
+ err = omnia_cmd_read_u16(mcu_client, OMNIA_CMD_GET_STATUS_WORD, &reply);
if (err)
return err;
- /* Check whether MCU firmware supports the CMD_GET_FEAUTRES command */
- if (!(le16_to_cpu(reply) & STS_FEATURES_SUPPORTED))
+ /* Check whether MCU firmware supports the OMNIA_CMD_GET_FEAUTRES command */
+ if (!(reply & OMNIA_STS_FEATURES_SUPPORTED))
return 0;
- err = omnia_cmd_read_raw(client->adapter, OMNIA_MCU_I2C_ADDR,
- CMD_GET_FEATURES, &reply, sizeof(reply));
+ err = omnia_cmd_read_u16(mcu_client, OMNIA_CMD_GET_FEATURES, &reply);
if (err)
return err;
- return le16_to_cpu(reply);
+ return reply;
+}
+
+static int omnia_match_mcu_client(struct device *dev, const void *data)
+{
+ struct i2c_client *client;
+
+ client = i2c_verify_client(dev);
+ if (!client)
+ return 0;
+
+ return client->addr == OMNIA_MCU_I2C_ADDR;
+}
+
+static int omnia_find_mcu_and_get_features(struct device *dev)
+{
+ struct device *mcu_dev;
+ int ret;
+
+ mcu_dev = device_find_child(dev->parent, NULL, omnia_match_mcu_client);
+ if (!mcu_dev)
+ return -ENODEV;
+
+ ret = omnia_mcu_get_features(i2c_verify_client(mcu_dev));
+
+ put_device(mcu_dev);
+
+ return ret;
}
static int omnia_leds_probe(struct i2c_client *client)
@@ -457,13 +474,10 @@ static int omnia_leds_probe(struct i2c_client *client)
int ret, count;
count = of_get_available_child_count(np);
- if (!count) {
- dev_err(dev, "LEDs are not defined in device tree!\n");
- return -ENODEV;
- } else if (count > OMNIA_BOARD_LEDS) {
- dev_err(dev, "Too many LEDs defined in device tree!\n");
- return -EINVAL;
- }
+ if (count == 0)
+ return dev_err_probe(dev, -ENODEV, "LEDs are not defined in device tree!\n");
+ if (count > OMNIA_BOARD_LEDS)
+ return dev_err_probe(dev, -EINVAL, "Too many LEDs defined in device tree!\n");
leds = devm_kzalloc(dev, struct_size(leds, leds, count), GFP_KERNEL);
if (!leds)
@@ -472,28 +486,23 @@ static int omnia_leds_probe(struct i2c_client *client)
leds->client = client;
i2c_set_clientdata(client, leds);
- ret = omnia_mcu_get_features(client);
- if (ret < 0) {
- dev_err(dev, "Cannot determine MCU supported features: %d\n",
- ret);
- return ret;
- }
+ ret = omnia_find_mcu_and_get_features(dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot determine MCU supported features\n");
- leds->has_gamma_correction = ret & FEAT_LED_GAMMA_CORRECTION;
- if (!leds->has_gamma_correction) {
- dev_info(dev,
- "Your board's MCU firmware does not support the LED gamma correction feature.\n");
- dev_info(dev,
- "Consider upgrading MCU firmware with the omnia-mcutool utility.\n");
+ leds->has_gamma_correction = ret & OMNIA_FEAT_LED_GAMMA_CORRECTION;
+
+ if (ret & OMNIA_FEAT_BRIGHTNESS_INT) {
+ ret = omnia_request_brightness_irq(leds);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot request brightness IRQ\n");
}
mutex_init(&leds->lock);
ret = devm_led_trigger_register(dev, &omnia_hw_trigger);
- if (ret < 0) {
- dev_err(dev, "Cannot register private LED trigger: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot register private LED trigger\n");
led = &leds->leds[0];
for_each_available_child_of_node_scoped(np, child) {
@@ -509,20 +518,11 @@ static int omnia_leds_probe(struct i2c_client *client)
static void omnia_leds_remove(struct i2c_client *client)
{
- u8 buf[5];
-
- /* put all LEDs into default (HW triggered) mode */
- omnia_cmd_write_u8(client, CMD_LED_MODE,
- CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
-
- /* set all LEDs color to [255, 255, 255] */
- buf[0] = CMD_LED_COLOR;
- buf[1] = OMNIA_BOARD_LEDS;
- buf[2] = 255;
- buf[3] = 255;
- buf[4] = 255;
+ /* Put all LEDs into default (HW triggered) mode */
+ omnia_cmd_write_u8(client, OMNIA_CMD_LED_MODE, OMNIA_CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
- i2c_master_send(client, buf, 5);
+ /* Set all LEDs color to [255, 255, 255] */
+ omnia_cmd_set_color(client, OMNIA_BOARD_LEDS, 255, 255, 255);
}
static const struct of_device_id of_omnia_leds_match[] = {
diff --git a/drivers/leds/leds-upboard.c b/drivers/leds/leds-upboard.c
new file mode 100644
index 000000000000..b350eb294280
--- /dev/null
+++ b/drivers/leds/leds-upboard.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UP board LED driver.
+ *
+ * Copyright (c) AAEON. All rights reserved.
+ * Copyright (C) 2024 Bootlin
+ *
+ * Author: Gary Wang <garywang@aaeon.com.tw>
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/container_of.h>
+#include <linux/leds.h>
+#include <linux/mfd/upboard-fpga.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define led_cdev_to_led_upboard(c) container_of(c, struct upboard_led, cdev)
+
+struct upboard_led {
+ struct regmap_field *field;
+ struct led_classdev cdev;
+};
+
+struct upboard_led_profile {
+ const char *name;
+ unsigned int bit;
+};
+
+static struct upboard_led_profile upboard_up_led_profile[] = {
+ { "upboard:yellow:" LED_FUNCTION_STATUS, 0 },
+ { "upboard:green:" LED_FUNCTION_STATUS, 1 },
+ { "upboard:red:" LED_FUNCTION_STATUS, 2 },
+};
+
+static struct upboard_led_profile upboard_up2_led_profile[] = {
+ { "upboard:blue:" LED_FUNCTION_STATUS, 0 },
+ { "upboard:yellow:" LED_FUNCTION_STATUS, 1 },
+ { "upboard:green:" LED_FUNCTION_STATUS, 2 },
+ { "upboard:red:" LED_FUNCTION_STATUS, 3 },
+};
+
+static enum led_brightness upboard_led_brightness_get(struct led_classdev *cdev)
+{
+ struct upboard_led *led = led_cdev_to_led_upboard(cdev);
+ int brightness, ret;
+
+ ret = regmap_field_read(led->field, &brightness);
+
+ return ret ? LED_OFF : brightness;
+};
+
+static int upboard_led_brightness_set(struct led_classdev *cdev, enum led_brightness brightness)
+{
+ struct upboard_led *led = led_cdev_to_led_upboard(cdev);
+
+ return regmap_field_write(led->field, brightness != LED_OFF);
+};
+
+static int upboard_led_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct upboard_fpga *fpga = dev_get_drvdata(dev->parent);
+ struct upboard_led_profile *led_profile;
+ struct upboard_led *led;
+ int led_instances, ret, i;
+
+ switch (fpga->fpga_data->type) {
+ case UPBOARD_UP_FPGA:
+ led_profile = upboard_up_led_profile;
+ led_instances = ARRAY_SIZE(upboard_up_led_profile);
+ break;
+ case UPBOARD_UP2_FPGA:
+ led_profile = upboard_up2_led_profile;
+ led_instances = ARRAY_SIZE(upboard_up2_led_profile);
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unknown device type %d\n",
+ fpga->fpga_data->type);
+ }
+
+ for (i = 0; i < led_instances; i++) {
+ const struct reg_field fldconf = {
+ .reg = UPBOARD_REG_FUNC_EN0,
+ .lsb = led_profile[i].bit,
+ .msb = led_profile[i].bit,
+ };
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->field = devm_regmap_field_alloc(&pdev->dev, fpga->regmap, fldconf);
+ if (IS_ERR(led->field))
+ return PTR_ERR(led->field);
+
+ led->cdev.brightness_get = upboard_led_brightness_get;
+ led->cdev.brightness_set_blocking = upboard_led_brightness_set;
+ led->cdev.max_brightness = LED_ON;
+
+ led->cdev.name = led_profile[i].name;
+
+ ret = devm_led_classdev_register(dev, &led->cdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver upboard_led_driver = {
+ .driver = {
+ .name = "upboard-leds",
+ },
+ .probe = upboard_led_probe,
+};
+
+module_platform_driver(upboard_led_driver);
+
+MODULE_AUTHOR("Gary Wang <garywang@aaeon.com.tw>");
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
+MODULE_DESCRIPTION("UP Board LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:upboard-led");
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index d7999e7372a4..bee46651e068 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -22,10 +22,10 @@ void led_stop_software_blink(struct led_classdev *led_cdev);
void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value);
void led_set_brightness_nosleep(struct led_classdev *led_cdev, unsigned int value);
ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count);
ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count);
extern struct rw_semaphore leds_list_lock;
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index e1a81e0109e8..f80a06cc31f8 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -50,7 +50,13 @@ static int led_pwm_mc_set(struct led_classdev *cdev,
duty = priv->leds[i].state.period - duty;
priv->leds[i].state.duty_cycle = duty;
- priv->leds[i].state.enabled = duty > 0;
+ /*
+ * Disabling a PWM doesn't guarantee that it emits the inactive level.
+ * So keep it on. Only for suspending the PWM should be disabled because
+ * otherwise it refuses to suspend. The possible downside is that the
+ * LED might stay (or even go) on.
+ */
+ priv->leds[i].state.enabled = !(cdev->flags & LED_SUSPENDED);
ret = pwm_apply_might_sleep(priv->leds[i].pwm,
&priv->leds[i].state);
if (ret)
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 33cbf8413658..b3ee33aed36e 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -156,7 +156,7 @@ static ssize_t led_invert_show(struct device *dev,
{
struct activity_data *activity_data = led_trigger_get_drvdata(dev);
- return sprintf(buf, "%u\n", activity_data->invert);
+ return sprintf(buf, "%d\n", activity_data->invert);
}
static ssize_t led_invert_store(struct device *dev,
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 4b0863db901a..c15efe3e5078 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -605,6 +605,8 @@ static int netdev_trig_notify(struct notifier_block *nb,
trigger_data->net_dev = NULL;
break;
case NETDEV_UP:
+ trigger_data->hw_control = can_hw_control(trigger_data);
+ fallthrough;
case NETDEV_CHANGE:
get_device_state(trigger_data);
/* Refresh link_speed visibility */
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index aad48c2540fc..a594bd5e2233 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -483,8 +483,8 @@ static int pattern_trig_activate(struct led_classdev *led_cdev)
data->led_cdev = led_cdev;
led_set_trigger_data(led_cdev, data);
timer_setup(&data->timer, pattern_trig_timer_function, 0);
- hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->hrtimer.function = pattern_trig_hrtimer_function;
+ hrtimer_setup(&data->hrtimer, pattern_trig_hrtimer_function, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
led_cdev->activated = true;
if (led_cdev->flags & LED_INIT_DEFAULT_TRIGGER) {
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index b461b1bed25b..369d72f59b3c 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -215,7 +215,7 @@ static int mac_hid_toggle_emumouse(const struct ctl_table *table, int write,
}
/* file(s) in /proc/sys/dev/mac_hid */
-static struct ctl_table mac_hid_files[] = {
+static const struct ctl_table mac_hid_files[] = {
{
.procname = "mouse_button_emulation",
.data = &mouse_emulate_buttons,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index a01bc5090cdf..a1534cc6c641 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -492,11 +492,7 @@ int __init smu_init (void)
goto fail_np;
}
- smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES);
- if (!smu)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct smu_device));
-
+ smu = memblock_alloc_or_panic(sizeof(struct smu_device), SMP_CACHE_BYTES);
spin_lock_init(&smu->lock);
INIT_LIST_HEAD(&smu->cmd_list);
INIT_LIST_HEAD(&smu->cmd_i2c_list);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 8ecba7fb999e..ed52db272f4d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,17 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config EXYNOS_MBOX
+ tristate "Exynos Mailbox"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ help
+ Say Y here if you want to build the Samsung Exynos Mailbox controller
+ driver. The controller has 16 flag bits for hardware interrupt
+ generation and a shared register for passing mailbox messages.
+ When the controller is used by the ACPM interface the shared register
+ is ignored and the mailbox controller acts as a doorbell that raises
+ the interrupt to the ACPM firmware.
+
config IMX_MBOX
tristate "i.MX Mailbox"
depends on ARCH_MXC || COMPILE_TEST
@@ -178,6 +189,19 @@ config POLARFIRE_SOC_MAILBOX
If unsure, say N.
+config MCHP_SBI_IPC_MBOX
+ tristate "Microchip Inter-processor Communication (IPC) SBI driver"
+ depends on RISCV_SBI || COMPILE_TEST
+ depends on ARCH_MICROCHIP
+ help
+ Mailbox implementation for Microchip devices with an
+ Inter-process communication (IPC) controller.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mailbox-mchp-ipc-sbi.
+
+ If unsure, say N.
+
config QCOM_APCS_IPC
tristate "Qualcomm APCS IPC driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 5f4f5b0ce2cc..9a1542b55539 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
+
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
@@ -45,6 +47,8 @@ obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
obj-$(CONFIG_POLARFIRE_SOC_MAILBOX) += mailbox-mpfs.o
+obj-$(CONFIG_MCHP_SBI_IPC_MBOX) += mailbox-mchp-ipc-sbi.o
+
obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
diff --git a/drivers/mailbox/exynos-mailbox.c b/drivers/mailbox/exynos-mailbox.c
new file mode 100644
index 000000000000..20049f0ec5ff
--- /dev/null
+++ b/drivers/mailbox/exynos-mailbox.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/exynos-message.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define EXYNOS_MBOX_MCUCTRL 0x0 /* Mailbox Control Register */
+#define EXYNOS_MBOX_INTCR0 0x24 /* Interrupt Clear Register 0 */
+#define EXYNOS_MBOX_INTMR0 0x28 /* Interrupt Mask Register 0 */
+#define EXYNOS_MBOX_INTSR0 0x2c /* Interrupt Status Register 0 */
+#define EXYNOS_MBOX_INTMSR0 0x30 /* Interrupt Mask Status Register 0 */
+#define EXYNOS_MBOX_INTGR1 0x40 /* Interrupt Generation Register 1 */
+#define EXYNOS_MBOX_INTMR1 0x48 /* Interrupt Mask Register 1 */
+#define EXYNOS_MBOX_INTSR1 0x4c /* Interrupt Status Register 1 */
+#define EXYNOS_MBOX_INTMSR1 0x50 /* Interrupt Mask Status Register 1 */
+
+#define EXYNOS_MBOX_INTMR0_MASK GENMASK(15, 0)
+#define EXYNOS_MBOX_INTGR1_MASK GENMASK(15, 0)
+
+#define EXYNOS_MBOX_CHAN_COUNT HWEIGHT32(EXYNOS_MBOX_INTGR1_MASK)
+
+/**
+ * struct exynos_mbox - driver's private data.
+ * @regs: mailbox registers base address.
+ * @mbox: pointer to the mailbox controller.
+ * @pclk: pointer to the mailbox peripheral clock.
+ */
+struct exynos_mbox {
+ void __iomem *regs;
+ struct mbox_controller *mbox;
+ struct clk *pclk;
+};
+
+static int exynos_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct exynos_mbox *exynos_mbox = dev_get_drvdata(dev);
+ struct exynos_mbox_msg *msg = data;
+
+ if (msg->chan_id >= exynos_mbox->mbox->num_chans) {
+ dev_err(dev, "Invalid channel ID %d\n", msg->chan_id);
+ return -EINVAL;
+ }
+
+ if (msg->chan_type != EXYNOS_MBOX_CHAN_TYPE_DOORBELL) {
+ dev_err(dev, "Unsupported channel type [%d]\n", msg->chan_type);
+ return -EINVAL;
+ };
+
+ writel(BIT(msg->chan_id), exynos_mbox->regs + EXYNOS_MBOX_INTGR1);
+
+ return 0;
+}
+
+static const struct mbox_chan_ops exynos_mbox_chan_ops = {
+ .send_data = exynos_mbox_send_data,
+};
+
+static struct mbox_chan *exynos_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int i;
+
+ if (sp->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Return the first available channel. When we don't pass the
+ * channel ID from device tree, each channel populated by the driver is
+ * just a software construct or a virtual channel. We use 'void *data'
+ * in send_data() to pass the channel identifiers.
+ */
+ for (i = 0; i < mbox->num_chans; i++)
+ if (mbox->chans[i].cl == NULL)
+ return &mbox->chans[i];
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct of_device_id exynos_mbox_match[] = {
+ { .compatible = "google,gs101-mbox" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_mbox_match);
+
+static int exynos_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_mbox *exynos_mbox;
+ struct mbox_controller *mbox;
+ struct mbox_chan *chans;
+ int i;
+
+ exynos_mbox = devm_kzalloc(dev, sizeof(*exynos_mbox), GFP_KERNEL);
+ if (!exynos_mbox)
+ return -ENOMEM;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(dev, EXYNOS_MBOX_CHAN_COUNT, sizeof(*chans),
+ GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ exynos_mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(exynos_mbox->regs))
+ return PTR_ERR(exynos_mbox->regs);
+
+ exynos_mbox->pclk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(exynos_mbox->pclk))
+ return dev_err_probe(dev, PTR_ERR(exynos_mbox->pclk),
+ "Failed to enable clock.\n");
+
+ mbox->num_chans = EXYNOS_MBOX_CHAN_COUNT;
+ mbox->chans = chans;
+ mbox->dev = dev;
+ mbox->ops = &exynos_mbox_chan_ops;
+ mbox->of_xlate = exynos_mbox_of_xlate;
+
+ for (i = 0; i < EXYNOS_MBOX_CHAN_COUNT; i++)
+ chans[i].mbox = mbox;
+
+ exynos_mbox->mbox = mbox;
+
+ platform_set_drvdata(pdev, exynos_mbox);
+
+ /* Mask out all interrupts. We support just polling channels for now. */
+ writel(EXYNOS_MBOX_INTMR0_MASK, exynos_mbox->regs + EXYNOS_MBOX_INTMR0);
+
+ return devm_mbox_controller_register(dev, mbox);
+}
+
+static struct platform_driver exynos_mbox_driver = {
+ .probe = exynos_mbox_probe,
+ .driver = {
+ .name = "exynos-acpm-mbox",
+ .of_match_table = exynos_mbox_match,
+ },
+};
+module_platform_driver(exynos_mbox_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mailbox-mchp-ipc-sbi.c b/drivers/mailbox/mailbox-mchp-ipc-sbi.c
new file mode 100644
index 000000000000..a6e52009a424
--- /dev/null
+++ b/drivers/mailbox/mailbox-mchp-ipc-sbi.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip Inter-Processor communication (IPC) driver
+ *
+ * Copyright (c) 2021 - 2024 Microchip Technology Inc. All rights reserved.
+ *
+ * Author: Valentina Fernandez <valentina.fernandezalanis@microchip.com>
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox/mchp-ipc.h>
+#include <asm/sbi.h>
+#include <asm/vendorid_list.h>
+
+#define IRQ_STATUS_BITS 12
+#define NUM_CHANS_PER_CLUSTER 5
+#define IPC_DMA_BIT_MASK 32
+#define SBI_EXT_MICROCHIP_TECHNOLOGY (SBI_EXT_VENDOR_START | \
+ MICROCHIP_VENDOR_ID)
+
+enum {
+ SBI_EXT_IPC_PROBE = 0x100,
+ SBI_EXT_IPC_CH_INIT,
+ SBI_EXT_IPC_SEND,
+ SBI_EXT_IPC_RECEIVE,
+ SBI_EXT_IPC_STATUS,
+};
+
+enum ipc_hw {
+ MIV_IHC,
+};
+
+/**
+ * struct mchp_ipc_mbox_info - IPC probe message format
+ *
+ * @hw_type: IPC implementation available in the hardware
+ * @num_channels: number of IPC channels available in the hardware
+ *
+ * Used to retrieve information on the IPC implementation
+ * using the SBI_EXT_IPC_PROBE SBI function id.
+ */
+struct mchp_ipc_mbox_info {
+ enum ipc_hw hw_type;
+ u8 num_channels;
+};
+
+/**
+ * struct mchp_ipc_init - IPC channel init message format
+ *
+ * @max_msg_size: maxmimum message size in bytes of a given channel
+ *
+ * struct used by the SBI_EXT_IPC_CH_INIT SBI function id to get
+ * the max message size in bytes of the initialized channel.
+ */
+struct mchp_ipc_init {
+ u16 max_msg_size;
+};
+
+/**
+ * struct mchp_ipc_status - IPC status message format
+ *
+ * @status: interrupt status for all channels associated to a cluster
+ * @cluster: specifies the cluster instance that originated an irq
+ *
+ * struct used by the SBI_EXT_IPC_STATUS SBI function id to get
+ * the message present and message clear interrupt status for all the
+ * channels associated to a cluster.
+ */
+struct mchp_ipc_status {
+ u32 status;
+ u8 cluster;
+};
+
+/**
+ * struct mchp_ipc_sbi_msg - IPC SBI payload message
+ *
+ * @buf_addr: physical address where the received data should be copied to
+ * @size: maximum size(in bytes) that can be stored in the buffer pointed to by `buf`
+ * @irq_type: mask representing the irq types that triggered an irq
+ *
+ * struct used by the SBI_EXT_IPC_SEND/SBI_EXT_IPC_RECEIVE SBI function
+ * ids to send/receive a message from an associated processor using
+ * the IPC.
+ */
+struct mchp_ipc_sbi_msg {
+ u64 buf_addr;
+ u16 size;
+ u8 irq_type;
+};
+
+struct mchp_ipc_cluster_cfg {
+ void *buf_base;
+ phys_addr_t buf_base_addr;
+ int irq;
+};
+
+struct mchp_ipc_sbi_mbox {
+ struct device *dev;
+ struct mbox_chan *chans;
+ struct mchp_ipc_cluster_cfg *cluster_cfg;
+ void *buf_base;
+ unsigned long buf_base_addr;
+ struct mbox_controller controller;
+ enum ipc_hw hw_type;
+};
+
+static int mchp_ipc_sbi_chan_send(u32 command, u32 channel, unsigned long address)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, channel,
+ address, 0, 0, 0, 0);
+
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return ret.value;
+}
+
+static int mchp_ipc_sbi_send(u32 command, unsigned long address)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, address,
+ 0, 0, 0, 0, 0);
+
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return ret.value;
+}
+
+static struct mchp_ipc_sbi_mbox *to_mchp_ipc_mbox(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct mchp_ipc_sbi_mbox, controller);
+}
+
+static inline void mchp_ipc_prepare_receive_req(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_msg request;
+
+ request.buf_addr = chan_info->msg_buf_rx_addr;
+ request.size = chan_info->max_msg_size;
+ memcpy(chan_info->buf_base_rx, &request, sizeof(struct mchp_ipc_sbi_msg));
+}
+
+static inline void mchp_ipc_process_received_data(struct mbox_chan *chan,
+ struct mchp_ipc_msg *ipc_msg)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_msg sbi_msg;
+
+ memcpy(&sbi_msg, chan_info->buf_base_rx, sizeof(struct mchp_ipc_sbi_msg));
+ ipc_msg->buf = (u32 *)chan_info->msg_buf_rx;
+ ipc_msg->size = sbi_msg.size;
+}
+
+static irqreturn_t mchp_ipc_cluster_aggr_isr(int irq, void *data)
+{
+ struct mbox_chan *chan;
+ struct mchp_ipc_sbi_chan *chan_info;
+ struct mchp_ipc_sbi_mbox *ipc = (struct mchp_ipc_sbi_mbox *)data;
+ struct mchp_ipc_msg ipc_msg;
+ struct mchp_ipc_status status_msg;
+ int ret;
+ unsigned long hartid;
+ u32 i, chan_index, chan_id;
+
+ /* Find out the hart that originated the irq */
+ for_each_online_cpu(i) {
+ hartid = cpuid_to_hartid_map(i);
+ if (irq == ipc->cluster_cfg[hartid].irq)
+ break;
+ }
+
+ status_msg.cluster = hartid;
+ memcpy(ipc->cluster_cfg[hartid].buf_base, &status_msg, sizeof(struct mchp_ipc_status));
+
+ ret = mchp_ipc_sbi_send(SBI_EXT_IPC_STATUS, ipc->cluster_cfg[hartid].buf_base_addr);
+ if (ret < 0) {
+ dev_err_ratelimited(ipc->dev, "could not get IHC irq status ret=%d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ memcpy(&status_msg, ipc->cluster_cfg[hartid].buf_base, sizeof(struct mchp_ipc_status));
+
+ /*
+ * Iterate over each bit set in the IHC interrupt status register (IRQ_STATUS) to identify
+ * the channel(s) that have a message to be processed/acknowledged.
+ * The bits are organized in alternating format, where each pair of bits represents
+ * the status of the message present and message clear interrupts for each cluster/hart
+ * (from hart 0 to hart 5). Each cluster can have up to 5 fixed channels associated.
+ */
+
+ for_each_set_bit(i, (unsigned long *)&status_msg.status, IRQ_STATUS_BITS) {
+ /* Find out the destination hart that triggered the interrupt */
+ chan_index = i / 2;
+
+ /*
+ * The IP has no loopback channels, so we need to decrement the index when
+ * the target hart has a greater index than our own
+ */
+ if (chan_index >= status_msg.cluster)
+ chan_index--;
+
+ /*
+ * Calculate the channel id given the hart and channel index. Channel IDs
+ * are unique across all clusters of an IPC, and iterate contiguously
+ * across all clusters.
+ */
+ chan_id = status_msg.cluster * (NUM_CHANS_PER_CLUSTER + chan_index);
+
+ chan = &ipc->chans[chan_id];
+ chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+
+ if (i % 2 == 0) {
+ mchp_ipc_prepare_receive_req(chan);
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
+ chan_info->buf_base_rx_addr);
+ if (ret < 0)
+ continue;
+
+ mchp_ipc_process_received_data(chan, &ipc_msg);
+ mbox_chan_received_data(&ipc->chans[chan_id], (void *)&ipc_msg);
+
+ } else {
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
+ chan_info->buf_base_rx_addr);
+ mbox_chan_txdone(&ipc->chans[chan_id], ret);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int mchp_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ const struct mchp_ipc_msg *msg = data;
+ struct mchp_ipc_sbi_msg sbi_payload;
+
+ memcpy(chan_info->msg_buf_tx, msg->buf, msg->size);
+ sbi_payload.buf_addr = chan_info->msg_buf_tx_addr;
+ sbi_payload.size = msg->size;
+ memcpy(chan_info->buf_base_tx, &sbi_payload, sizeof(sbi_payload));
+
+ return mchp_ipc_sbi_chan_send(SBI_EXT_IPC_SEND, chan_info->id, chan_info->buf_base_tx_addr);
+}
+
+static int mchp_ipc_startup(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(chan->mbox);
+ struct mchp_ipc_init ch_init_msg;
+ int ret;
+
+ /*
+ * The TX base buffer is used to transmit two types of messages:
+ * - struct mchp_ipc_init to initialize the channel
+ * - struct mchp_ipc_sbi_msg to transmit user data/payload
+ * Ensure the TX buffer size is large enough to accommodate either message type.
+ */
+ size_t max_size = max(sizeof(struct mchp_ipc_init), sizeof(struct mchp_ipc_sbi_msg));
+
+ chan_info->buf_base_tx = kmalloc(max_size, GFP_KERNEL);
+ if (!chan_info->buf_base_tx) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ chan_info->buf_base_tx_addr = __pa(chan_info->buf_base_tx);
+
+ chan_info->buf_base_rx = kmalloc(max_size, GFP_KERNEL);
+ if (!chan_info->buf_base_rx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_base_tx;
+ }
+
+ chan_info->buf_base_rx_addr = __pa(chan_info->buf_base_rx);
+
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_CH_INIT, chan_info->id,
+ chan_info->buf_base_tx_addr);
+ if (ret < 0) {
+ dev_err(ipc->dev, "channel %u init failed\n", chan_info->id);
+ goto fail_free_buf_base_rx;
+ }
+
+ memcpy(&ch_init_msg, chan_info->buf_base_tx, sizeof(struct mchp_ipc_init));
+ chan_info->max_msg_size = ch_init_msg.max_msg_size;
+
+ chan_info->msg_buf_tx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
+ if (!chan_info->msg_buf_tx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_base_rx;
+ }
+
+ chan_info->msg_buf_tx_addr = __pa(chan_info->msg_buf_tx);
+
+ chan_info->msg_buf_rx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
+ if (!chan_info->msg_buf_rx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_msg_tx;
+ }
+
+ chan_info->msg_buf_rx_addr = __pa(chan_info->msg_buf_rx);
+
+ switch (ipc->hw_type) {
+ case MIV_IHC:
+ return 0;
+ default:
+ goto fail_free_buf_msg_rx;
+ }
+
+ if (ret) {
+ dev_err(ipc->dev, "failed to register interrupt(s)\n");
+ goto fail_free_buf_msg_rx;
+ }
+
+ return ret;
+
+fail_free_buf_msg_rx:
+ kfree(chan_info->msg_buf_rx);
+fail_free_buf_msg_tx:
+ kfree(chan_info->msg_buf_tx);
+fail_free_buf_base_rx:
+ kfree(chan_info->buf_base_rx);
+fail_free_buf_base_tx:
+ kfree(chan_info->buf_base_tx);
+fail:
+ return ret;
+}
+
+static void mchp_ipc_shutdown(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+
+ kfree(chan_info->buf_base_tx);
+ kfree(chan_info->buf_base_rx);
+ kfree(chan_info->msg_buf_tx);
+ kfree(chan_info->msg_buf_rx);
+}
+
+static const struct mbox_chan_ops mchp_ipc_ops = {
+ .startup = mchp_ipc_startup,
+ .send_data = mchp_ipc_send_data,
+ .shutdown = mchp_ipc_shutdown,
+};
+
+static struct mbox_chan *mchp_ipc_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(controller);
+ unsigned int chan_id = spec->args[0];
+
+ if (chan_id >= ipc->controller.num_chans) {
+ dev_err(ipc->dev, "invalid channel id %d\n", chan_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &ipc->chans[chan_id];
+}
+
+static int mchp_ipc_get_cluster_aggr_irq(struct mchp_ipc_sbi_mbox *ipc)
+{
+ struct platform_device *pdev = to_platform_device(ipc->dev);
+ char *irq_name;
+ int cpuid, ret;
+ unsigned long hartid;
+ bool irq_found = false;
+
+ for_each_online_cpu(cpuid) {
+ hartid = cpuid_to_hartid_map(cpuid);
+ irq_name = devm_kasprintf(ipc->dev, GFP_KERNEL, "hart-%lu", hartid);
+ ret = platform_get_irq_byname_optional(pdev, irq_name);
+ if (ret <= 0)
+ continue;
+
+ ipc->cluster_cfg[hartid].irq = ret;
+ ret = devm_request_irq(ipc->dev, ipc->cluster_cfg[hartid].irq,
+ mchp_ipc_cluster_aggr_isr, IRQF_SHARED,
+ "miv-ihc-irq", ipc);
+ if (ret)
+ return ret;
+
+ ipc->cluster_cfg[hartid].buf_base = devm_kmalloc(ipc->dev,
+ sizeof(struct mchp_ipc_status),
+ GFP_KERNEL);
+
+ if (!ipc->cluster_cfg[hartid].buf_base)
+ return -ENOMEM;
+
+ ipc->cluster_cfg[hartid].buf_base_addr = __pa(ipc->cluster_cfg[hartid].buf_base);
+
+ irq_found = true;
+ }
+
+ return irq_found;
+}
+
+static int mchp_ipc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mchp_ipc_mbox_info ipc_info;
+ struct mchp_ipc_sbi_mbox *ipc;
+ struct mchp_ipc_sbi_chan *priv;
+ bool irq_avail = false;
+ int ret;
+ u32 chan_id;
+
+ ret = sbi_probe_extension(SBI_EXT_MICROCHIP_TECHNOLOGY);
+ if (ret <= 0)
+ return dev_err_probe(dev, ret, "Microchip SBI extension not detected\n");
+
+ ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
+ if (!ipc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ipc);
+
+ ipc->buf_base = devm_kmalloc(dev, sizeof(struct mchp_ipc_mbox_info), GFP_KERNEL);
+ if (!ipc->buf_base)
+ return -ENOMEM;
+
+ ipc->buf_base_addr = __pa(ipc->buf_base);
+
+ ret = mchp_ipc_sbi_send(SBI_EXT_IPC_PROBE, ipc->buf_base_addr);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "could not probe IPC SBI service\n");
+
+ memcpy(&ipc_info, ipc->buf_base, sizeof(struct mchp_ipc_mbox_info));
+ ipc->controller.num_chans = ipc_info.num_channels;
+ ipc->hw_type = ipc_info.hw_type;
+
+ ipc->chans = devm_kcalloc(dev, ipc->controller.num_chans, sizeof(*ipc->chans), GFP_KERNEL);
+ if (!ipc->chans)
+ return -ENOMEM;
+
+ ipc->dev = dev;
+ ipc->controller.txdone_irq = true;
+ ipc->controller.dev = ipc->dev;
+ ipc->controller.ops = &mchp_ipc_ops;
+ ipc->controller.chans = ipc->chans;
+ ipc->controller.of_xlate = mchp_ipc_mbox_xlate;
+
+ for (chan_id = 0; chan_id < ipc->controller.num_chans; chan_id++) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ipc->chans[chan_id].con_priv = priv;
+ priv->id = chan_id;
+ }
+
+ if (ipc->hw_type == MIV_IHC) {
+ ipc->cluster_cfg = devm_kcalloc(dev, num_online_cpus(),
+ sizeof(struct mchp_ipc_cluster_cfg),
+ GFP_KERNEL);
+ if (!ipc->cluster_cfg)
+ return -ENOMEM;
+
+ if (mchp_ipc_get_cluster_aggr_irq(ipc))
+ irq_avail = true;
+ }
+
+ if (!irq_avail)
+ return dev_err_probe(dev, -ENODEV, "missing interrupt property\n");
+
+ ret = devm_mbox_controller_register(dev, &ipc->controller);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Inter-Processor communication (IPC) registration failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id mchp_ipc_of_match[] = {
+ {.compatible = "microchip,sbi-ipc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mchp_ipc_of_match);
+
+static struct platform_driver mchp_ipc_driver = {
+ .driver = {
+ .name = "microchip_ipc",
+ .of_match_table = mchp_ipc_of_match,
+ },
+ .probe = mchp_ipc_probe,
+};
+
+module_platform_driver(mchp_ipc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Valentina Fernandez <valentina.fernandezalanis@microchip.com>");
+MODULE_DESCRIPTION("Microchip Inter-Processor Communication (IPC) driver");
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 4df546e3b7ea..d5d9effece97 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -251,7 +251,7 @@ static inline int mpfs_mbox_syscon_probe(struct mpfs_mbox *mbox, struct platform
return PTR_ERR(mbox->sysreg_scb);
mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mbox->ctrl_base))
+ if (IS_ERR(mbox->mbox_base))
return PTR_ERR(mbox->mbox_base);
return 0;
diff --git a/drivers/mailbox/mailbox-th1520.c b/drivers/mailbox/mailbox-th1520.c
index 4e84640ac3b8..a6b2aa9ae952 100644
--- a/drivers/mailbox/mailbox-th1520.c
+++ b/drivers/mailbox/mailbox-th1520.c
@@ -41,7 +41,7 @@
#ifdef CONFIG_PM_SLEEP
/* store MBOX context across system-wide suspend/resume transitions */
struct th1520_mbox_context {
- u32 intr_mask[TH_1520_MBOX_CHANS - 1];
+ u32 intr_mask[TH_1520_MBOX_CHANS];
};
#endif
@@ -387,8 +387,10 @@ static void __iomem *th1520_map_mmio(struct platform_device *pdev,
mapped = devm_ioremap(&pdev->dev, res->start + offset,
resource_size(res) - offset);
- if (IS_ERR(mapped))
+ if (!mapped) {
dev_err(&pdev->dev, "Failed to map resource: %s\n", res_name);
+ return ERR_PTR(-ENOMEM);
+ }
return mapped;
}
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index d3d26a2c9895..118beaf447aa 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -534,9 +534,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
return -EINVAL;
}
- hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- mbox->poll_hrt.function = txdone_hrtimer;
+ hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
spin_lock_init(&mbox->poll_hrt_lock);
}
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index f0d1fc0fb9ff..11c41e935a36 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -157,6 +157,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq5424-apcs-apps-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 14c7907c6632..0b17a38ea6bf 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -14,6 +14,7 @@
#include <dt-bindings/mailbox/qcom-ipcc.h>
/* IPCC Register offsets */
+#define IPCC_REG_CONFIG 0x08
#define IPCC_REG_SEND_ID 0x0c
#define IPCC_REG_RECV_ID 0x10
#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
@@ -21,6 +22,7 @@
#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
#define IPCC_REG_CLIENT_CLEAR 0x38
+#define IPCC_CLEAR_ON_RECV_RD BIT(0)
#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
@@ -274,6 +276,7 @@ static int qcom_ipcc_pm_resume(struct device *dev)
static int qcom_ipcc_probe(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc;
+ u32 config_value;
static int id;
char *name;
int ret;
@@ -288,6 +291,19 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (IS_ERR(ipcc->base))
return PTR_ERR(ipcc->base);
+ /*
+ * It is possible that boot firmware is using the same IPCC instance
+ * as of the HLOS and it has kept CLEAR_ON_RECV_RD set which basically
+ * means Interrupt pending registers are cleared when RECV_ID is read.
+ * The register automatically updates to the next pending interrupt/client
+ * status based on priority.
+ */
+ config_value = readl(ipcc->base + IPCC_REG_CONFIG);
+ if (config_value & IPCC_CLEAR_ON_RECV_RD) {
+ config_value &= ~(IPCC_CLEAR_ON_RECV_RD);
+ writel(config_value, ipcc->base + IPCC_REG_CONFIG);
+ }
+
ipcc->irq = platform_get_irq(pdev, 0);
if (ipcc->irq < 0)
return ipcc->irq;
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 8d5e2d7dc03b..c1981f091bd1 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -388,7 +388,6 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX);
value &= ~HSP_SM_SHRD_MBOX_FULL;
msg = (void *)(unsigned long)value;
- mbox_chan_received_data(channel->chan, msg);
/*
* Need to clear all bits here since some producers, such as TCU, depend
@@ -398,6 +397,8 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
* explicitly, so we have to make sure we cover all possible cases.
*/
tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX);
+
+ mbox_chan_received_data(channel->chan, msg);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = {
@@ -433,7 +434,6 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3);
msg = (void *)(unsigned long)value;
- mbox_chan_received_data(channel->chan, msg);
/*
* Clear data registers and tag.
@@ -443,6 +443,8 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG);
+
+ mbox_chan_received_data(channel->chan, msg);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = {
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index aa5249da59b2..0c143beaafda 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -905,7 +905,7 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *nc, *np = pdev->dev.of_node;
- struct zynqmp_ipi_pdata __percpu *pdata;
+ struct zynqmp_ipi_pdata *pdata;
struct of_phandle_args out_irq;
struct zynqmp_ipi_mbox *mbox;
int num_mboxes, ret = -EINVAL;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 1e9db8e4acdf..0b1870a09e1f 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -61,6 +61,19 @@ config MD_BITMAP_FILE
various kernel APIs and can only work with files on a file system not
actually sitting on the MD device.
+config MD_LINEAR
+ tristate "Linear (append) mode"
+ depends on BLK_DEV_MD
+ help
+ If you say Y here, then your multiple devices driver will be able to
+ use the so-called linear mode, i.e. it will combine the hard disk
+ partitions by simply appending one to the other.
+
+ To compile this as a module, choose M here: the module
+ will be called linear.
+
+ If unsure, say Y.
+
config MD_RAID0
tristate "RAID-0 (striping) mode"
depends on BLK_DEV_MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 476a214e4bdc..87bdfc9fe14c 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,12 +29,14 @@ dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
md-mod-y += md.o md-bitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
+linear-y += md-linear.o
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
# themselves, and md.o may use the personalities when it
# auto-initialised.
+obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index ef6abf33f926..45ca134cbf02 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io)
bio_init(bio, NULL, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
- bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+ bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_private = &io->cl;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index c1d28e365910..453efbbdc8ee 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -334,7 +334,7 @@ static void dirty_init(struct keybuf_key *w)
bio_init(bio, NULL, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
- bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+ bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
bio->bi_private = w;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 1ae2c71bb383..02a2919f4e5a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -59,6 +59,7 @@ struct convert_context {
struct bio *bio_out;
struct bvec_iter iter_out;
atomic_t cc_pending;
+ unsigned int tag_offset;
u64 cc_sector;
union {
struct skcipher_request *req;
@@ -1187,7 +1188,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift);
- bip->bip_iter.bi_sector = io->cc->start + io->sector;
+ bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
tag_len, offset_in_page(io->integrity_metadata));
@@ -1256,6 +1257,7 @@ static void crypt_convert_init(struct crypt_config *cc,
if (bio_out)
ctx->iter_out = bio_out->bi_iter;
ctx->cc_sector = sector + cc->iv_offset;
+ ctx->tag_offset = 0;
init_completion(&ctx->restart);
}
@@ -1588,7 +1590,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
static blk_status_t crypt_convert(struct crypt_config *cc,
struct convert_context *ctx, bool atomic, bool reset_pending)
{
- unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;
@@ -1611,9 +1612,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
atomic_inc(&ctx->cc_pending);
if (crypt_integrity_aead(cc))
- r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
+ r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset);
else
- r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
+ r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset);
switch (r) {
/*
@@ -1633,8 +1634,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* exit and continue processing in a workqueue
*/
ctx->r.req = NULL;
+ ctx->tag_offset++;
ctx->cc_sector += sector_step;
- tag_offset++;
return BLK_STS_DEV_RESOURCE;
}
} else {
@@ -1648,8 +1649,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
*/
case -EINPROGRESS:
ctx->r.req = NULL;
+ ctx->tag_offset++;
ctx->cc_sector += sector_step;
- tag_offset++;
continue;
/*
* The request was already processed (synchronously).
@@ -1657,7 +1658,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
case 0:
atomic_dec(&ctx->cc_pending);
ctx->cc_sector += sector_step;
- tag_offset++;
+ ctx->tag_offset++;
if (!atomic)
cond_resched();
continue;
@@ -1719,6 +1720,7 @@ retry:
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_ioprio = io->base_bio->bi_ioprio;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
remaining_size = size;
@@ -1909,7 +1911,6 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
crypt_dec_pending(io);
return 1;
}
- clone->bi_iter.bi_sector = cc->start + io->sector;
crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
io->saved_bi_iter = clone->bi_iter;
dm_submit_bio_remap(io->base_bio, clone);
@@ -1925,13 +1926,13 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;
+
+ clone->bi_iter.bi_sector = cc->start + io->sector;
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
crypt_inc_pending(io);
- clone->bi_iter.bi_sector = cc->start + io->sector;
-
if (dm_crypt_integrity_io_alloc(io, clone)) {
crypt_dec_pending(io);
bio_put(clone);
@@ -2039,8 +2040,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
/* crypt_convert should have filled the clone bio */
BUG_ON(io->ctx.iter_out.bi_size);
- clone->bi_iter.bi_sector = cc->start + io->sector;
-
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
dm_submit_bio_remap(io->base_bio, clone);
@@ -2092,13 +2091,12 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
struct crypt_config *cc = io->cc;
struct convert_context *ctx = &io->ctx;
int crypt_finished;
- sector_t sector = io->sector;
blk_status_t r;
wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);
- r = crypt_convert(cc, &io->ctx, true, false);
+ r = crypt_convert(cc, &io->ctx, false, false);
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
@@ -2109,10 +2107,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
}
/* Encryption was already finished, submit io now */
- if (crypt_finished) {
+ if (crypt_finished)
kcryptd_crypt_write_io_submit(io, 0);
- io->sector = sector;
- }
crypt_dec_pending(io);
}
@@ -2123,14 +2119,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct convert_context *ctx = &io->ctx;
struct bio *clone;
int crypt_finished;
- sector_t sector = io->sector;
blk_status_t r;
/*
* Prevent io from disappearing until this function completes.
*/
crypt_inc_pending(io);
- crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
+ crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector);
clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
if (unlikely(!clone)) {
@@ -2147,8 +2142,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.iter_in = clone->bi_iter;
}
- sector += bio_sectors(clone);
-
crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
@@ -2172,10 +2165,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
}
/* Encryption was already finished, submit io now */
- if (crypt_finished) {
+ if (crypt_finished)
kcryptd_crypt_write_io_submit(io, 0);
- io->sector = sector;
- }
dec:
crypt_dec_pending(io);
@@ -2203,7 +2194,7 @@ static void kcryptd_crypt_read_continue(struct work_struct *work)
wait_for_completion(&io->ctx.restart);
reinit_completion(&io->ctx.restart);
- r = crypt_convert(cc, &io->ctx, true, false);
+ r = crypt_convert(cc, &io->ctx, false, false);
if (r)
io->error = r;
@@ -2221,7 +2212,6 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
if (io->ctx.aead_recheck) {
- io->ctx.cc_sector = io->sector + cc->iv_offset;
r = crypt_convert(cc, &io->ctx,
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
} else {
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 731467d4ed10..b690905ab89f 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -426,7 +426,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
if (!clone)
return NULL;
- bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf);
+ bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
clone->bi_private = bio;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index ee9f7cecd78e..c45464b6576a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3790,20 +3790,18 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE: {
- __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
-
- watermark_percentage += ic->journal_entries / 2;
- do_div(watermark_percentage, ic->journal_entries);
- arg_count = 3;
+ arg_count = 1; /* buffer_sectors */
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
arg_count += ic->reset_recalculate_flag;
arg_count += ic->discard;
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'B';
- arg_count += ic->mode == 'B';
+ arg_count += ic->mode != 'I'; /* interleave_sectors */
+ arg_count += ic->mode == 'J'; /* journal_sectors */
+ arg_count += ic->mode == 'J'; /* journal_watermark */
+ arg_count += ic->mode == 'J'; /* commit_time */
+ arg_count += ic->mode == 'B'; /* sectors_per_bit */
+ arg_count += ic->mode == 'B'; /* bitmap_flush_interval */
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
@@ -3822,10 +3820,15 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" reset_recalculate");
if (ic->discard)
DMEMIT(" allow_discards");
- DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
- DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+ if (ic->mode != 'I')
+ DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
if (ic->mode == 'J') {
+ __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
+
+ watermark_percentage += ic->journal_entries / 2;
+ do_div(watermark_percentage, ic->journal_entries);
+ DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index d7a8e2f40db3..c37668790577 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -379,6 +379,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
atomic_inc(&io->count);
submit_bio(bio);
+ WARN_ON_ONCE(opf & REQ_ATOMIC && remaining);
} while (remaining);
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 49fb0f684193..66318aba4bdb 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -199,9 +199,10 @@ static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
- DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
+ DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO |
+ DM_TARGET_ATOMIC_WRITES,
.report_zones = linear_report_zones,
.module = THIS_MODULE,
.ctr = linear_ctr,
diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
index 461ee6b2044d..716807e511ee 100644
--- a/drivers/md/dm-ps-io-affinity.c
+++ b/drivers/md/dm-ps-io-affinity.c
@@ -116,7 +116,7 @@ static int ioa_create(struct path_selector *ps, unsigned int argc, char **argv)
if (!s)
return -ENOMEM;
- s->path_map = kzalloc(nr_cpu_ids * sizeof(struct path_info *),
+ s->path_map = kcalloc(nr_cpu_ids, sizeof(struct path_info *),
GFP_KERNEL);
if (!s->path_map)
goto free_selector;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1e0d3b9b75d6..6adc55fd90d3 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3196,7 +3196,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (reshape_sectors || rs_is_raid1(rs)) {
/*
* We can only prepare for a reshape here, because the
- * raid set needs to run to provide the repective reshape
+ * raid set needs to run to provide the respective reshape
* check functions via its MD personality instance.
*
* So do the reshape check after md_run() succeeded.
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9511dae5b556..8c6f1f7e6456 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -656,7 +656,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
unsigned int i;
struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
struct mirror *m;
- blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
+ blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH | REQ_ATOMIC);
struct dm_io_request io_req = {
.bi_opf = REQ_OP_WRITE | op_flags,
.mem.type = DM_IO_BIO,
@@ -1483,8 +1483,9 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 14, 0},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
+ .features = DM_TARGET_ATOMIC_WRITES,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
.map = mirror_map,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 499f8cc8a39f..e23076f7ece2 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -547,7 +547,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
md->tag_set->ops = &dm_mq_ops;
md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
md->tag_set->numa_node = md->numa_node_id;
- md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
+ md->tag_set->flags = BLK_MQ_F_STACKING;
md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
md->tag_set->driver_data = md;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4112071de0be..3786ac67cefe 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -465,8 +465,9 @@ static void stripe_io_hints(struct dm_target *ti,
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 6, 0},
- .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT,
+ .version = {1, 7, 0},
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
+ DM_TARGET_ATOMIC_WRITES,
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index bd8b796ae683..0ef5203387b2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1806,6 +1806,32 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}
+static int device_not_atomic_write_capable(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ return !bdev_can_atomic_write(dev->bdev);
+}
+
+static bool dm_table_supports_atomic_writes(struct dm_table *t)
+{
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (!dm_target_supports_atomic_writes(ti->type))
+ return false;
+
+ if (!ti->type->iterate_devices)
+ return false;
+
+ if (ti->type->iterate_devices(ti,
+ device_not_atomic_write_capable, NULL)) {
+ return false;
+ }
+ }
+ return true;
+}
+
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1854,6 +1880,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
return r;
}
+ if (dm_table_supports_atomic_writes(t))
+ limits->features |= BLK_FEAT_ATOMIC_WRITES;
+
r = queue_limits_set(q, limits);
if (r)
return r;
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index b6f8e2dc7729..3f3d29af1be4 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -2178,6 +2178,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval);
vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval);
+ spin_lock_init(&zones->lock);
/*
* Since we will save up the timeouts that would have been reported but were ratelimited,
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index e61855da6461..0c41949db784 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -122,7 +122,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
par = fec_read_parity(v, rsb, block_offset, &offset,
- par_buf_offset, &buf, bio_prio(bio));
+ par_buf_offset, &buf, bio->bi_ioprio);
if (IS_ERR(par))
return PTR_ERR(par);
@@ -164,7 +164,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
dm_bufio_release(buf);
par = fec_read_parity(v, rsb, block_offset, &offset,
- par_buf_offset, &buf, bio_prio(bio));
+ par_buf_offset, &buf, bio->bi_ioprio);
if (IS_ERR(par))
return PTR_ERR(par);
}
@@ -254,7 +254,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
bufio = v->bufio;
}
- bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio_prio(bio));
+ bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio->bi_ioprio);
if (IS_ERR(bbuf)) {
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
v->data_dev->name,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 47d595f6a76e..e86c1431b108 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -321,7 +321,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
}
} else {
data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
- &buf, bio_prio(bio));
+ &buf, bio->bi_ioprio);
}
if (IS_ERR(data))
@@ -789,7 +789,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
verity_fec_init_io(io);
- verity_submit_prefetch(v, io, bio_prio(bio));
+ verity_submit_prefetch(v, io, bio->bi_ioprio);
submit_bio_noacct(bio);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 12ecf07a3841..4d1e42891d24 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1479,12 +1479,12 @@ static void setup_split_accounting(struct clone_info *ci, unsigned int len)
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
struct dm_target *ti, unsigned int num_bios,
- unsigned *len, gfp_t gfp_flag)
+ unsigned *len)
{
struct bio *bio;
- int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1;
+ int try;
- for (; try < 2; try++) {
+ for (try = 0; try < 2; try++) {
int bio_nr;
if (try && num_bios > 1)
@@ -1508,8 +1508,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios, unsigned int *len,
- gfp_t gfp_flag)
+ unsigned int num_bios, unsigned int *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
struct bio *clone;
@@ -1526,7 +1525,7 @@ static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_targe
* Using alloc_multiple_bios(), even if num_bios is 1, to consistently
* support allocating using GFP_NOWAIT with GFP_NOIO fallback.
*/
- alloc_multiple_bios(&blist, ci, ti, num_bios, len, gfp_flag);
+ alloc_multiple_bios(&blist, ci, ti, num_bios, len);
while ((clone = bio_list_pop(&blist))) {
if (num_bios > 1)
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
@@ -1564,7 +1563,7 @@ static void __send_empty_flush(struct clone_info *ci)
atomic_add(ti->num_flush_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
- NULL, GFP_NOWAIT);
+ NULL);
atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
}
} else {
@@ -1612,7 +1611,7 @@ static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
__max_io_len(ti, ci->sector, max_granularity, max_sectors));
atomic_add(num_bios, &ci->io->io_count);
- bios = __send_duplicate_bios(ci, ti, num_bios, &len, GFP_NOIO);
+ bios = __send_duplicate_bios(ci, ti, num_bios, &len);
/*
* alloc_io() takes one extra reference for submission, so the
* reference won't reach 0 without the following (+1) subtraction
@@ -1746,6 +1745,9 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
+ if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count)
+ return BLK_STS_IOERR;
+
setup_split_accounting(ci, len);
if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
@@ -1849,7 +1851,7 @@ static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
* not go crazy with the clone allocation.
*/
alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32),
- NULL, GFP_NOIO);
+ NULL);
}
/* Get a clone and change it to a regular reset operation. */
@@ -1881,7 +1883,7 @@ static void __send_zone_reset_all_native(struct clone_info *ci,
unsigned int bios;
atomic_add(1, &ci->io->io_count);
- bios = __send_duplicate_bios(ci, ti, 1, NULL, GFP_NOIO);
+ bios = __send_duplicate_bios(ci, ti, 1, NULL);
atomic_sub(1 - bios, &ci->io->io_count);
ci->sector_count = 0;
@@ -1969,6 +1971,15 @@ static void dm_split_and_process_bio(struct mapped_device *md,
/* Only support nowait for normal IO */
if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
+ /*
+ * Don't support NOWAIT for FLUSH because it may allocate
+ * multiple bios and there's no easy way how to undo the
+ * allocations.
+ */
+ if (bio->bi_opf & REQ_PREFLUSH) {
+ bio_wouldblock_error(bio);
+ return;
+ }
io = alloc_io(md, bio, GFP_NOWAIT);
if (unlikely(!io)) {
/* Unable to do anything without dm_io. */
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index b2a00f213c2c..4b80165afd23 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -49,6 +49,7 @@ static int md_setup_ents __initdata;
* instead of just one. -- KTK
* 18May2000: Added support for persistent-superblock arrays:
* md=n,0,factor,fault,device-list uses RAID0 for device n
+ * md=n,-1,factor,fault,device-list uses LINEAR for device n
* md=n,device-list reads a RAID superblock from the devices
* elements in device-list are read by name_to_kdev_t so can be
* a hex number or something like /dev/hda1 /dev/sdb
@@ -87,7 +88,7 @@ static int __init md_setup(char *str)
md_setup_ents++;
switch (get_option(&str, &level)) { /* RAID level */
case 2: /* could be 0 or -1.. */
- if (level == 0) {
+ if (level == 0 || level == LEVEL_LINEAR) {
if (get_option(&str, &factor) != 2 || /* Chunk Size */
get_option(&str, &fault) != 2) {
printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
@@ -95,7 +96,10 @@ static int __init md_setup(char *str)
}
md_setup_args[ent].level = level;
md_setup_args[ent].chunk = 1 << (factor+12);
- pername = "raid0";
+ if (level == LEVEL_LINEAR)
+ pername = "linear";
+ else
+ pername = "raid0";
break;
}
fallthrough;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index c3a42dd66ce5..23c09d22fcdb 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -682,7 +682,7 @@ static void bitmap_update_sb(void *data)
return;
if (!bitmap->storage.sb_page) /* no superblock */
return;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared)
/* rocking back to read-only */
@@ -702,7 +702,7 @@ static void bitmap_update_sb(void *data)
sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
bitmap_info.space);
- kunmap_atomic(sb);
+ kunmap_local(sb);
if (bitmap->storage.file)
write_file_page(bitmap, bitmap->storage.sb_page, 1);
@@ -717,7 +717,7 @@ static void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->storage.sb_page)
return;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %u\n", le32_to_cpu(sb->version));
@@ -736,7 +736,7 @@ static void bitmap_print_sb(struct bitmap *bitmap)
pr_debug(" sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2);
pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
- kunmap_atomic(sb);
+ kunmap_local(sb);
}
/*
@@ -760,7 +760,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
return -ENOMEM;
bitmap->storage.sb_index = 0;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->magic = cpu_to_le32(BITMAP_MAGIC);
sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
@@ -768,7 +768,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
chunksize = bitmap->mddev->bitmap_info.chunksize;
BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) {
- kunmap_atomic(sb);
+ kunmap_local(sb);
pr_warn("bitmap chunksize not a power of 2\n");
return -EINVAL;
}
@@ -803,7 +803,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
bitmap->mddev->bitmap_info.nodes = 0;
- kunmap_atomic(sb);
+ kunmap_local(sb);
return 0;
}
@@ -865,7 +865,7 @@ re_read:
return err;
err = -EINVAL;
- sb = kmap_atomic(sb_page);
+ sb = kmap_local_page(sb_page);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -932,7 +932,7 @@ re_read:
err = 0;
out:
- kunmap_atomic(sb);
+ kunmap_local(sb);
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
/* Assigning chunksize is required for "re_read" */
bitmap->mddev->bitmap_info.chunksize = chunksize;
@@ -1161,12 +1161,12 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
bit = file_page_offset(&bitmap->storage, chunk);
/* set the bit */
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
set_bit(bit, kaddr);
else
set_bit_le(bit, kaddr);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
pr_debug("set file bit %lu page %lu\n", bit, index);
/* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_DIRTY);
@@ -1190,12 +1190,12 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
if (!page)
return;
bit = file_page_offset(&bitmap->storage, chunk);
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
clear_bit(bit, paddr);
else
clear_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
if (!test_page_attr(bitmap, index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_PENDING);
bitmap->allclean = 0;
@@ -1214,12 +1214,12 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
if (!page)
return -EINVAL;
bit = file_page_offset(&bitmap->storage, chunk);
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
set = test_bit(bit, paddr);
else
set = test_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
return set;
}
@@ -1388,9 +1388,9 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
* If the bitmap is out of date, dirty the whole page
* and write it out
*/
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
memset(paddr + offset, 0xff, PAGE_SIZE - offset);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
filemap_write_page(bitmap, i, true);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) {
@@ -1406,12 +1406,12 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
void *paddr;
bool was_set;
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
was_set = test_bit(bit, paddr);
else
was_set = test_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
if (was_set) {
/* if the disk bit is set, set the memory bit */
@@ -1546,10 +1546,10 @@ static void bitmap_daemon_work(struct mddev *mddev)
bitmap_super_t *sb;
bitmap->need_sync = 0;
if (bitmap->storage.filemap) {
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->events_cleared =
cpu_to_le64(bitmap->events_cleared);
- kunmap_atomic(sb);
+ kunmap_local(sb);
set_page_attr(bitmap, 0,
BITMAP_PAGE_NEEDWRITE);
}
@@ -1671,24 +1671,13 @@ __acquires(bitmap->lock)
}
static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool behind)
+ unsigned long sectors)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return 0;
- if (behind) {
- int bw;
- atomic_inc(&bitmap->behind_writes);
- bw = atomic_read(&bitmap->behind_writes);
- if (bw > bitmap->behind_writes_used)
- bitmap->behind_writes_used = bw;
-
- pr_debug("inc write-behind count %d/%lu\n",
- bw, bitmap->mddev->bitmap_info.max_write_behind);
- }
-
while (sectors) {
sector_t blocks;
bitmap_counter_t *bmc;
@@ -1737,21 +1726,13 @@ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
}
static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool success, bool behind)
+ unsigned long sectors)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return;
- if (behind) {
- if (atomic_dec_and_test(&bitmap->behind_writes))
- wake_up(&bitmap->behind_wait);
- pr_debug("dec write-behind count %d/%lu\n",
- atomic_read(&bitmap->behind_writes),
- bitmap->mddev->bitmap_info.max_write_behind);
- }
-
while (sectors) {
sector_t blocks;
unsigned long flags;
@@ -1764,15 +1745,16 @@ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
return;
}
- if (success && !bitmap->mddev->degraded &&
- bitmap->events_cleared < bitmap->mddev->events) {
- bitmap->events_cleared = bitmap->mddev->events;
- bitmap->need_sync = 1;
- sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
- }
-
- if (!success && !NEEDED(*bmc))
+ if (!bitmap->mddev->degraded) {
+ if (bitmap->events_cleared < bitmap->mddev->events) {
+ bitmap->events_cleared = bitmap->mddev->events;
+ bitmap->need_sync = 1;
+ sysfs_notify_dirent_safe(
+ bitmap->sysfs_can_clear);
+ }
+ } else if (!NEEDED(*bmc)) {
*bmc |= NEEDED_MASK;
+ }
if (COUNTER(*bmc) == COUNTER_MAX)
wake_up(&bitmap->overflow_wait);
@@ -2062,6 +2044,37 @@ static void md_bitmap_free(void *data)
kfree(bitmap);
}
+static void bitmap_start_behind_write(struct mddev *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+ int bw;
+
+ if (!bitmap)
+ return;
+
+ atomic_inc(&bitmap->behind_writes);
+ bw = atomic_read(&bitmap->behind_writes);
+ if (bw > bitmap->behind_writes_used)
+ bitmap->behind_writes_used = bw;
+
+ pr_debug("inc write-behind count %d/%lu\n",
+ bw, bitmap->mddev->bitmap_info.max_write_behind);
+}
+
+static void bitmap_end_behind_write(struct mddev *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return;
+
+ if (atomic_dec_and_test(&bitmap->behind_writes))
+ wake_up(&bitmap->behind_wait);
+ pr_debug("dec write-behind count %d/%lu\n",
+ atomic_read(&bitmap->behind_writes),
+ bitmap->mddev->bitmap_info.max_write_behind);
+}
+
static void bitmap_wait_behind_writes(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
@@ -2342,7 +2355,10 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats)
if (!bitmap)
return -ENOENT;
-
+ if (bitmap->mddev->bitmap_info.external)
+ return -ENOENT;
+ if (!bitmap->storage.sb_page) /* no superblock */
+ return -EINVAL;
sb = kmap_local_page(bitmap->storage.sb_page);
stats->sync_size = le64_to_cpu(sb->sync_size);
kunmap_local(sb);
@@ -2981,6 +2997,9 @@ static struct bitmap_operations bitmap_ops = {
.dirty_bits = bitmap_dirty_bits,
.unplug = bitmap_unplug,
.daemon_work = bitmap_daemon_work,
+
+ .start_behind_write = bitmap_start_behind_write,
+ .end_behind_write = bitmap_end_behind_write,
.wait_behind_writes = bitmap_wait_behind_writes,
.startwrite = bitmap_startwrite,
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 662e6fc141a7..31c93019c76b 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -84,12 +84,15 @@ struct bitmap_operations {
unsigned long e);
void (*unplug)(struct mddev *mddev, bool sync);
void (*daemon_work)(struct mddev *mddev);
+
+ void (*start_behind_write)(struct mddev *mddev);
+ void (*end_behind_write)(struct mddev *mddev);
void (*wait_behind_writes)(struct mddev *mddev);
int (*startwrite)(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool behind);
+ unsigned long sectors);
void (*endwrite)(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool success, bool behind);
+ unsigned long sectors);
bool (*start_sync)(struct mddev *mddev, sector_t offset,
sector_t *blocks, bool degraded);
void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
new file mode 100644
index 000000000000..369aed044b40
--- /dev/null
+++ b/drivers/md/md-linear.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linear.c : Multiple Devices driver for Linux Copyright (C) 1994-96 Marc
+ * ZYNGIER <zyngier@ufr-info-p7.ibp.fr> or <maz@gloups.fdn.fr>
+ */
+
+#include <linux/blkdev.h>
+#include <linux/raid/md_u.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <trace/events/block.h>
+#include "md.h"
+
+struct dev_info {
+ struct md_rdev *rdev;
+ sector_t end_sector;
+};
+
+struct linear_conf {
+ struct rcu_head rcu;
+ sector_t array_sectors;
+ /* a copy of mddev->raid_disks */
+ int raid_disks;
+ struct dev_info disks[] __counted_by(raid_disks);
+};
+
+/*
+ * find which device holds a particular offset
+ */
+static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
+{
+ int lo, mid, hi;
+ struct linear_conf *conf;
+
+ lo = 0;
+ hi = mddev->raid_disks - 1;
+ conf = mddev->private;
+
+ /*
+ * Binary Search
+ */
+
+ while (hi > lo) {
+
+ mid = (hi + lo) / 2;
+ if (sector < conf->disks[mid].end_sector)
+ hi = mid;
+ else
+ lo = mid + 1;
+ }
+
+ return conf->disks + lo;
+}
+
+static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
+{
+ struct linear_conf *conf;
+ sector_t array_sectors;
+
+ conf = mddev->private;
+ WARN_ONCE(sectors || raid_disks,
+ "%s does not support generic reshape\n", __func__);
+ array_sectors = conf->array_sectors;
+
+ return array_sectors;
+}
+
+static int linear_set_limits(struct mddev *mddev)
+{
+ struct queue_limits lim;
+ int err;
+
+ md_init_stacking_limits(&lim);
+ lim.max_hw_sectors = mddev->chunk_sectors;
+ lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.io_min = mddev->chunk_sectors << 9;
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+ if (err)
+ return err;
+
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+}
+
+static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
+{
+ struct linear_conf *conf;
+ struct md_rdev *rdev;
+ int ret = -EINVAL;
+ int cnt;
+ int i;
+
+ conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
+ if (!conf)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * conf->raid_disks is copy of mddev->raid_disks. The reason to
+ * keep a copy of mddev->raid_disks in struct linear_conf is,
+ * mddev->raid_disks may not be consistent with pointers number of
+ * conf->disks[] when it is updated in linear_add() and used to
+ * iterate old conf->disks[] earray in linear_congested().
+ * Here conf->raid_disks is always consitent with number of
+ * pointers in conf->disks[] array, and mddev->private is updated
+ * with rcu_assign_pointer() in linear_addr(), such race can be
+ * avoided.
+ */
+ conf->raid_disks = raid_disks;
+
+ cnt = 0;
+ conf->array_sectors = 0;
+
+ rdev_for_each(rdev, mddev) {
+ int j = rdev->raid_disk;
+ struct dev_info *disk = conf->disks + j;
+ sector_t sectors;
+
+ if (j < 0 || j >= raid_disks || disk->rdev) {
+ pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
+ goto out;
+ }
+
+ disk->rdev = rdev;
+ if (mddev->chunk_sectors) {
+ sectors = rdev->sectors;
+ sector_div(sectors, mddev->chunk_sectors);
+ rdev->sectors = sectors * mddev->chunk_sectors;
+ }
+
+ conf->array_sectors += rdev->sectors;
+ cnt++;
+ }
+ if (cnt != raid_disks) {
+ pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
+ goto out;
+ }
+
+ /*
+ * Here we calculate the device offsets.
+ */
+ conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
+
+ for (i = 1; i < raid_disks; i++)
+ conf->disks[i].end_sector =
+ conf->disks[i-1].end_sector +
+ conf->disks[i].rdev->sectors;
+
+ if (!mddev_is_dm(mddev)) {
+ ret = linear_set_limits(mddev);
+ if (ret)
+ goto out;
+ }
+
+ return conf;
+
+out:
+ kfree(conf);
+ return ERR_PTR(ret);
+}
+
+static int linear_run(struct mddev *mddev)
+{
+ struct linear_conf *conf;
+ int ret;
+
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+
+ conf = linear_conf(mddev, mddev->raid_disks);
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+
+ mddev->private = conf;
+ md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
+
+ ret = md_integrity_register(mddev);
+ if (ret) {
+ kfree(conf);
+ mddev->private = NULL;
+ }
+ return ret;
+}
+
+static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
+{
+ /* Adding a drive to a linear array allows the array to grow.
+ * It is permitted if the new drive has a matching superblock
+ * already on it, with raid_disk equal to raid_disks.
+ * It is achieved by creating a new linear_private_data structure
+ * and swapping it in in-place of the current one.
+ * The current one is never freed until the array is stopped.
+ * This avoids races.
+ */
+ struct linear_conf *newconf, *oldconf;
+
+ if (rdev->saved_raid_disk != mddev->raid_disks)
+ return -EINVAL;
+
+ rdev->raid_disk = rdev->saved_raid_disk;
+ rdev->saved_raid_disk = -1;
+
+ newconf = linear_conf(mddev, mddev->raid_disks + 1);
+ if (IS_ERR(newconf))
+ return PTR_ERR(newconf);
+
+ /* newconf->raid_disks already keeps a copy of * the increased
+ * value of mddev->raid_disks, WARN_ONCE() is just used to make
+ * sure of this. It is possible that oldconf is still referenced
+ * in linear_congested(), therefore kfree_rcu() is used to free
+ * oldconf until no one uses it anymore.
+ */
+ oldconf = rcu_dereference_protected(mddev->private,
+ lockdep_is_held(&mddev->reconfig_mutex));
+ mddev->raid_disks++;
+ WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
+ "copied raid_disks doesn't match mddev->raid_disks");
+ rcu_assign_pointer(mddev->private, newconf);
+ md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
+ set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
+ kfree_rcu(oldconf, rcu);
+ return 0;
+}
+
+static void linear_free(struct mddev *mddev, void *priv)
+{
+ struct linear_conf *conf = priv;
+
+ kfree(conf);
+}
+
+static bool linear_make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct dev_info *tmp_dev;
+ sector_t start_sector, end_sector, data_offset;
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
+ return true;
+
+ tmp_dev = which_dev(mddev, bio_sector);
+ start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+ end_sector = tmp_dev->end_sector;
+ data_offset = tmp_dev->rdev->data_offset;
+
+ if (unlikely(bio_sector >= end_sector ||
+ bio_sector < start_sector))
+ goto out_of_bounds;
+
+ if (unlikely(is_rdev_broken(tmp_dev->rdev))) {
+ md_error(mddev, tmp_dev->rdev);
+ bio_io_error(bio);
+ return true;
+ }
+
+ if (unlikely(bio_end_sector(bio) > end_sector)) {
+ /* This bio crosses a device boundary, so we have to split it */
+ struct bio *split = bio_split(bio, end_sector - bio_sector,
+ GFP_NOIO, &mddev->bio_set);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return true;
+ }
+
+ bio_chain(split, bio);
+ submit_bio_noacct(bio);
+ bio = split;
+ }
+
+ md_account_bio(mddev, &bio);
+ bio_set_dev(bio, tmp_dev->rdev->bdev);
+ bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
+ start_sector + data_offset;
+
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !bdev_max_discard_sectors(bio->bi_bdev))) {
+ /* Just ignore it */
+ bio_endio(bio);
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
+ bio_sector);
+ mddev_check_write_zeroes(mddev, bio);
+ submit_bio_noacct(bio);
+ }
+ return true;
+
+out_of_bounds:
+ pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_iter.bi_sector,
+ tmp_dev->rdev->bdev,
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
+ bio_io_error(bio);
+ return true;
+}
+
+static void linear_status(struct seq_file *seq, struct mddev *mddev)
+{
+ seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
+}
+
+static void linear_error(struct mddev *mddev, struct md_rdev *rdev)
+{
+ if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
+ char *md_name = mdname(mddev);
+
+ pr_crit("md/linear%s: Disk failure on %pg detected, failing array.\n",
+ md_name, rdev->bdev);
+ }
+}
+
+static void linear_quiesce(struct mddev *mddev, int state)
+{
+}
+
+static struct md_personality linear_personality = {
+ .name = "linear",
+ .level = LEVEL_LINEAR,
+ .owner = THIS_MODULE,
+ .make_request = linear_make_request,
+ .run = linear_run,
+ .free = linear_free,
+ .status = linear_status,
+ .hot_add_disk = linear_add,
+ .size = linear_size,
+ .quiesce = linear_quiesce,
+ .error_handler = linear_error,
+};
+
+static int __init linear_init(void)
+{
+ return register_md_personality(&linear_personality);
+}
+
+static void linear_exit(void)
+{
+ unregister_md_personality(&linear_personality);
+}
+
+module_init(linear_init);
+module_exit(linear_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Linear device concatenation personality for MD (deprecated)");
+MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
+MODULE_ALIAS("md-linear");
+MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aebe12b0ee27..30b3dbbce2d2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -294,7 +294,7 @@ void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev)
static struct ctl_table_header *raid_table_header;
-static struct ctl_table raid_table[] = {
+static const struct ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
@@ -8124,7 +8124,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
return;
mddev->pers->error_handler(mddev, rdev);
- if (mddev->pers->level == 0)
+ if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
return;
if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
@@ -8376,6 +8376,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
return 0;
spin_unlock(&all_mddevs_lock);
+
+ /* prevent bitmap to be freed after checking */
+ mutex_lock(&mddev->bitmap_info.mutex);
+
spin_lock(&mddev->lock);
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : ", mdname(mddev));
@@ -8451,6 +8455,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
spin_unlock(&mddev->lock);
+ mutex_unlock(&mddev->bitmap_info.mutex);
spin_lock(&all_mddevs_lock);
if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
@@ -8745,12 +8750,32 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+static void md_bitmap_start(struct mddev *mddev,
+ struct md_io_clone *md_io_clone)
+{
+ if (mddev->pers->bitmap_sector)
+ mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
+ &md_io_clone->sectors);
+
+ mddev->bitmap_ops->startwrite(mddev, md_io_clone->offset,
+ md_io_clone->sectors);
+}
+
+static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
+{
+ mddev->bitmap_ops->endwrite(mddev, md_io_clone->offset,
+ md_io_clone->sectors);
+}
+
static void md_end_clone_io(struct bio *bio)
{
struct md_io_clone *md_io_clone = bio->bi_private;
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
+ if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ md_bitmap_end(mddev, md_io_clone);
+
if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
@@ -8775,6 +8800,12 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
if (blk_queue_io_stat(bdev->bd_disk->queue))
md_io_clone->start_time = bio_start_io_acct(*bio);
+ if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
+ md_io_clone->offset = (*bio)->bi_iter.bi_sector;
+ md_io_clone->sectors = bio_sectors(*bio);
+ md_bitmap_start(mddev, md_io_clone);
+ }
+
clone->bi_end_io = md_end_clone_io;
clone->bi_private = md_io_clone;
*bio = clone;
@@ -8793,6 +8824,9 @@ void md_free_cloned_bio(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
+ if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ md_bitmap_end(mddev, md_io_clone);
+
if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 4ba93af36126..def808064ad8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -746,6 +746,9 @@ struct md_personality
void *(*takeover) (struct mddev *mddev);
/* Changes the consistency policy of an active array. */
int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
+ /* convert io ranges from array to bitmap */
+ void (*bitmap_sector)(struct mddev *mddev, sector_t *offset,
+ unsigned long *sectors);
};
struct md_sysfs_entry {
@@ -828,6 +831,8 @@ struct md_io_clone {
struct mddev *mddev;
struct bio *orig_bio;
unsigned long start_time;
+ sector_t offset;
+ unsigned long sectors;
struct bio bio_clone;
};
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index c7ba4e6cbbc7..98c745d90f48 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/hash.h>
+#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
@@ -77,7 +78,7 @@ static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
/*----------------------------------------------------------------*/
struct shadow_info {
- struct hlist_node hlist;
+ struct rb_node node;
dm_block_t where;
};
@@ -95,7 +96,7 @@ struct dm_transaction_manager {
struct dm_space_map *sm;
spinlock_t lock;
- struct hlist_head buckets[DM_HASH_SIZE];
+ struct rb_root buckets[DM_HASH_SIZE];
struct prefetch_set prefetches;
};
@@ -106,14 +107,22 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
int r = 0;
unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
- struct shadow_info *si;
+ struct rb_node **node;
spin_lock(&tm->lock);
- hlist_for_each_entry(si, tm->buckets + bucket, hlist)
- if (si->where == b) {
+ node = &tm->buckets[bucket].rb_node;
+ while (*node) {
+ struct shadow_info *si =
+ rb_entry(*node, struct shadow_info, node);
+ if (b == si->where) {
r = 1;
break;
}
+ if (b < si->where)
+ node = &si->node.rb_left;
+ else
+ node = &si->node.rb_right;
+ }
spin_unlock(&tm->lock);
return r;
@@ -130,30 +139,41 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
si = kmalloc(sizeof(*si), GFP_NOIO);
if (si) {
+ struct rb_node **node, *parent;
si->where = b;
bucket = dm_hash_block(b, DM_HASH_MASK);
+
spin_lock(&tm->lock);
- hlist_add_head(&si->hlist, tm->buckets + bucket);
+ node = &tm->buckets[bucket].rb_node;
+ parent = NULL;
+ while (*node) {
+ struct shadow_info *si =
+ rb_entry(*node, struct shadow_info, node);
+ parent = *node;
+ if (b < si->where)
+ node = &si->node.rb_left;
+ else
+ node = &si->node.rb_right;
+ }
+ rb_link_node(&si->node, parent, node);
+ rb_insert_color(&si->node, &tm->buckets[bucket]);
spin_unlock(&tm->lock);
}
}
static void wipe_shadow_table(struct dm_transaction_manager *tm)
{
- struct shadow_info *si;
- struct hlist_node *tmp;
- struct hlist_head *bucket;
- int i;
+ unsigned int i;
spin_lock(&tm->lock);
for (i = 0; i < DM_HASH_SIZE; i++) {
- bucket = tm->buckets + i;
- hlist_for_each_entry_safe(si, tmp, bucket, hlist)
+ while (!RB_EMPTY_ROOT(&tm->buckets[i])) {
+ struct shadow_info *si =
+ rb_entry(tm->buckets[i].rb_node, struct shadow_info, node);
+ rb_erase(&si->node, &tm->buckets[i]);
kfree(si);
-
- INIT_HLIST_HEAD(bucket);
+ }
}
-
spin_unlock(&tm->lock);
}
@@ -162,7 +182,7 @@ static void wipe_shadow_table(struct dm_transaction_manager *tm)
static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
struct dm_space_map *sm)
{
- int i;
+ unsigned int i;
struct dm_transaction_manager *tm;
tm = kmalloc(sizeof(*tm), GFP_KERNEL);
@@ -176,7 +196,7 @@ static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
spin_lock_init(&tm->lock);
for (i = 0; i < DM_HASH_SIZE; i++)
- INIT_HLIST_HEAD(tm->buckets + i);
+ tm->buckets[i] = RB_ROOT;
prefetch_init(&tm->prefetches);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 7049ec7fb8eb..70bcc3cdf2cd 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -384,12 +384,10 @@ static int raid0_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
- lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 519c56f0ee3d..10ea3af40991 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -420,10 +420,8 @@ static void close_write(struct r1bio *r1_bio)
r1_bio->behind_master_bio = NULL;
}
- /* clear the bitmap if all writes complete successfully */
- mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->end_behind_write(mddev);
md_write_end(mddev);
}
@@ -480,8 +478,6 @@ static void raid1_end_write_request(struct bio *bio)
if (!test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_WriteError, &r1_bio->state);
else {
- /* Fail the request */
- set_bit(R1BIO_Degraded, &r1_bio->state);
/* Finished with this branch */
r1_bio->bios[mirror] = NULL;
to_put = bio;
@@ -1535,11 +1531,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
write_behind = true;
r1_bio->bios[i] = NULL;
- if (!rdev || test_bit(Faulty, &rdev->flags)) {
- if (i < conf->raid_disks)
- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
- }
atomic_inc(&rdev->nr_pending);
if (test_bit(WriteErrorSeen, &rdev->flags)) {
@@ -1558,16 +1551,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
*/
max_sectors = bad_sectors;
rdev_dec_pending(rdev, mddev);
- /* We don't set R1BIO_Degraded as that
- * only applies if the disk is
- * missing, so it might be re-added,
- * and we want to know to recover this
- * chunk.
- * In this case the device is here,
- * and the fact that this chunk is not
- * in-sync is recorded in the bad
- * block log
- */
continue;
}
if (is_bad) {
@@ -1645,9 +1628,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
stats.behind_writes < max_write_behind)
alloc_behind_master_bio(r1_bio, bio);
- mddev->bitmap_ops->startwrite(
- mddev, r1_bio->sector, r1_bio->sectors,
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->start_behind_write(mddev);
first_clone = 0;
}
@@ -2614,12 +2596,10 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
* errors.
*/
fail = true;
- if (!narrow_write_error(r1_bio, m)) {
+ if (!narrow_write_error(r1_bio, m))
md_error(conf->mddev,
conf->mirrors[m].rdev);
/* an I/O failed, we can't clear the bitmap */
- set_bit(R1BIO_Degraded, &r1_bio->state);
- }
rdev_dec_pending(conf->mirrors[m].rdev,
conf->mddev);
}
@@ -2710,8 +2690,6 @@ static void raid1d(struct md_thread *thread)
list_del(&r1_bio->retry_list);
idx = sector_to_idx(r1_bio->sector);
atomic_dec(&conf->nr_queued[idx]);
- if (mddev->degraded)
- set_bit(R1BIO_Degraded, &r1_bio->state);
if (test_bit(R1BIO_WriteError, &r1_bio->state))
close_write(r1_bio);
raid_end_bio_io(r1_bio);
@@ -3239,12 +3217,10 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
- lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 5300cbaa58a4..33f318fcc268 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -188,7 +188,6 @@ struct r1bio {
enum r1bio_state {
R1BIO_Uptodate,
R1BIO_IsSync,
- R1BIO_Degraded,
R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7d7a8a2524dc..15b9ae5bf84d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -428,10 +428,6 @@ static void close_write(struct r10bio *r10_bio)
{
struct mddev *mddev = r10_bio->mddev;
- /* clear the bitmap if all writes complete successfully */
- mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors,
- !test_bit(R10BIO_Degraded, &r10_bio->state),
- false);
md_write_end(mddev);
}
@@ -501,7 +497,6 @@ static void raid10_end_write_request(struct bio *bio)
set_bit(R10BIO_WriteError, &r10_bio->state);
else {
/* Fail the request */
- set_bit(R10BIO_Degraded, &r10_bio->state);
r10_bio->devs[slot].bio = NULL;
to_put = bio;
dec_rdev = 1;
@@ -1438,10 +1433,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->devs[i].bio = NULL;
r10_bio->devs[i].repl_bio = NULL;
- if (!rdev && !rrdev) {
- set_bit(R10BIO_Degraded, &r10_bio->state);
+ if (!rdev && !rrdev)
continue;
- }
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
@@ -1458,14 +1451,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
* to other devices yet
*/
max_sectors = bad_sectors;
- /* We don't set R10BIO_Degraded as that
- * only applies if the disk is missing,
- * so it might be re-added, and we want to
- * know to recover this chunk.
- * In this case the device is here, and the
- * fact that this chunk is not in-sync is
- * recorded in the bad block log.
- */
continue;
}
if (is_bad) {
@@ -1519,8 +1504,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
md_account_bio(mddev, &bio);
r10_bio->master_bio = bio;
atomic_set(&r10_bio->remaining, 1);
- mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors,
- false);
for (i = 0; i < conf->copies; i++) {
if (r10_bio->devs[i].bio)
@@ -2966,11 +2949,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_dec_pending(rdev, conf->mddev);
} else if (bio != NULL && bio->bi_status) {
fail = true;
- if (!narrow_write_error(r10_bio, m)) {
+ if (!narrow_write_error(r10_bio, m))
md_error(conf->mddev, rdev);
- set_bit(R10BIO_Degraded,
- &r10_bio->state);
- }
rdev_dec_pending(rdev, conf->mddev);
}
bio = r10_bio->devs[m].repl_bio;
@@ -3029,8 +3009,6 @@ static void raid10d(struct md_thread *thread)
r10_bio = list_first_entry(&tmp, struct r10bio,
retry_list);
list_del(&r10_bio->retry_list);
- if (mddev->degraded)
- set_bit(R10BIO_Degraded, &r10_bio->state);
if (test_bit(R10BIO_WriteError,
&r10_bio->state))
@@ -4040,12 +4018,10 @@ static int raid10_set_queue_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
- lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 2e75e88d0802..3f16ad6904a9 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -161,7 +161,6 @@ enum r10bio_state {
R10BIO_IsSync,
R10BIO_IsRecover,
R10BIO_IsReshape,
- R10BIO_Degraded,
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them.
*/
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index b4f7b79fd187..e530271cb86b 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -313,10 +313,6 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
if (sh->dev[i].written) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- !test_bit(STRIPE_DEGRADED, &sh->state),
- false);
}
}
}
@@ -1023,10 +1019,10 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
/* checksum is already calculated in last run */
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
continue;
- addr = kmap_atomic(sh->dev[i].page);
+ addr = kmap_local_page(sh->dev[i].page);
sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
parity_pages = 1 + !!(sh->qd_idx >= 0);
data_pages = write_disks - parity_pages;
@@ -1979,9 +1975,9 @@ r5l_recovery_verify_data_checksum(struct r5l_log *log,
u32 checksum;
r5l_recovery_read_page(log, ctx, page, log_offset);
- addr = kmap_atomic(page);
+ addr = kmap_local_page(page);
checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
}
@@ -2381,11 +2377,11 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
payload->size = cpu_to_le32(BLOCK_SECTORS);
payload->location = cpu_to_le64(
raid5_compute_blocknr(sh, i, 0));
- addr = kmap_atomic(dev->page);
+ addr = kmap_local_page(dev->page);
payload->checksum[0] = cpu_to_le32(
crc32c_le(log->uuid_checksum, addr,
PAGE_SIZE));
- kunmap_atomic(addr);
+ kunmap_local(addr);
sync_page_io(log->rdev, write_pos, PAGE_SIZE,
dev->page, REQ_OP_WRITE, false);
write_pos = r5l_ring_add(log, write_pos,
@@ -2888,10 +2884,10 @@ int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
continue;
- addr = kmap_atomic(sh->dev[i].page);
+ addr = kmap_local_page(sh->dev[i].page);
sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
pages++;
}
WARN_ON(pages == 0);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f09e7677ee9f..5c79429acc64 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -906,8 +906,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
if (raid5_has_log(conf) || raid5_has_ppl(conf))
return false;
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
- !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
- is_full_stripe_write(sh);
+ is_full_stripe_write(sh);
}
/* we only do back search */
@@ -1345,8 +1344,6 @@ again:
submit_bio_noacct(rbi);
}
if (!rdev && !rrdev) {
- if (op_is_write(op))
- set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %d on disc %d for sector %llu\n",
bi->bi_opf, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
@@ -2884,7 +2881,6 @@ static void raid5_end_write_request(struct bio *bi)
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
if (bi->bi_status) {
- set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
@@ -3548,29 +3544,9 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
(*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
sh->dev[dd_idx].sector);
- if (conf->mddev->bitmap && firstwrite) {
- /* Cannot hold spinlock over bitmap_startwrite,
- * but must ensure this isn't added to a batch until
- * we have added to the bitmap and set bm_seq.
- * So set STRIPE_BITMAP_PENDING to prevent
- * batching.
- * If multiple __add_stripe_bio() calls race here they
- * much all set STRIPE_BITMAP_PENDING. So only the first one
- * to complete "bitmap_startwrite" gets to set
- * STRIPE_BIT_DELAY. This is important as once a stripe
- * is added to a batch, STRIPE_BIT_DELAY cannot be changed
- * any more.
- */
- set_bit(STRIPE_BITMAP_PENDING, &sh->state);
- spin_unlock_irq(&sh->stripe_lock);
- conf->mddev->bitmap_ops->startwrite(conf->mddev, sh->sector,
- RAID5_STRIPE_SECTORS(conf), false);
- spin_lock_irq(&sh->stripe_lock);
- clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
- if (!sh->batch_head) {
- sh->bm_seq = conf->seq_flush+1;
- set_bit(STRIPE_BIT_DELAY, &sh->state);
- }
+ if (conf->mddev->bitmap && firstwrite && !sh->batch_head) {
+ sh->bm_seq = conf->seq_flush+1;
+ set_bit(STRIPE_BIT_DELAY, &sh->state);
}
}
@@ -3621,7 +3597,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
BUG_ON(sh->batch_head);
for (i = disks; i--; ) {
struct bio *bi;
- int bitmap_end = 0;
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
struct md_rdev *rdev = conf->disks[i].rdev;
@@ -3646,8 +3621,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].towrite = NULL;
sh->overwrite_disks = 0;
spin_unlock_irq(&sh->stripe_lock);
- if (bi)
- bitmap_end = 1;
log_stripe_write_finished(sh);
@@ -3662,11 +3635,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bio_io_error(bi);
bi = nextbi;
}
- if (bitmap_end)
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- false, false);
- bitmap_end = 0;
/* and fail all 'written' */
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
@@ -3675,7 +3643,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].page = sh->dev[i].orig_page;
}
- if (bi) bitmap_end = 1;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
@@ -3709,10 +3676,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = nextbi;
}
}
- if (bitmap_end)
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- false, false);
/* If we were in the middle of a write the parity block might
* still be locked - so just clear all R5_LOCKED flags
*/
@@ -4061,10 +4024,7 @@ returnbi:
bio_endio(wbi);
wbi = wbi2;
}
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- !test_bit(STRIPE_DEGRADED, &sh->state),
- false);
+
if (head_sh->batch_head) {
sh = list_first_entry(&sh->batch_list,
struct stripe_head,
@@ -4341,7 +4301,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
s->locked++;
set_bit(R5_Wantwrite, &dev->flags);
- clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
case check_state_run:
@@ -4498,7 +4457,6 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
clear_bit(R5_Wantwrite, &dev->flags);
s->locked--;
}
- clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
@@ -4891,8 +4849,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_COMPUTE_RUN) |
(1 << STRIPE_DISCARD) |
(1 << STRIPE_BATCH_READY) |
- (1 << STRIPE_BATCH_ERR) |
- (1 << STRIPE_BITMAP_PENDING)),
+ (1 << STRIPE_BATCH_ERR)),
"stripe state: %lx\n", sh->state);
WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
(1 << STRIPE_REPLACED)),
@@ -4900,7 +4857,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
- (1 << STRIPE_DEGRADED) |
(1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
@@ -5784,10 +5740,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
}
spin_unlock_irq(&sh->stripe_lock);
if (conf->mddev->bitmap) {
- for (d = 0; d < conf->raid_disks - conf->max_degraded;
- d++)
- mddev->bitmap_ops->startwrite(mddev, sh->sector,
- RAID5_STRIPE_SECTORS(conf), false);
sh->bm_seq = conf->seq_flush + 1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
@@ -5928,6 +5880,54 @@ static enum reshape_loc get_reshape_loc(struct mddev *mddev,
return LOC_BEHIND_RESHAPE;
}
+static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset,
+ unsigned long *sectors)
+{
+ struct r5conf *conf = mddev->private;
+ sector_t start = *offset;
+ sector_t end = start + *sectors;
+ sector_t prev_start = start;
+ sector_t prev_end = end;
+ int sectors_per_chunk;
+ enum reshape_loc loc;
+ int dd_idx;
+
+ sectors_per_chunk = conf->chunk_sectors *
+ (conf->raid_disks - conf->max_degraded);
+ start = round_down(start, sectors_per_chunk);
+ end = round_up(end, sectors_per_chunk);
+
+ start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL);
+ end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL);
+
+ /*
+ * For LOC_INSIDE_RESHAPE, this IO will wait for reshape to make
+ * progress, hence it's the same as LOC_BEHIND_RESHAPE.
+ */
+ loc = get_reshape_loc(mddev, conf, prev_start);
+ if (likely(loc != LOC_AHEAD_OF_RESHAPE)) {
+ *offset = start;
+ *sectors = end - start;
+ return;
+ }
+
+ sectors_per_chunk = conf->prev_chunk_sectors *
+ (conf->previous_raid_disks - conf->max_degraded);
+ prev_start = round_down(prev_start, sectors_per_chunk);
+ prev_end = round_down(prev_end, sectors_per_chunk);
+
+ prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL);
+ prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL);
+
+ /*
+ * for LOC_AHEAD_OF_RESHAPE, reshape can make progress before this IO
+ * is handled in make_stripe_request(), we can't know this here hence
+ * we set bits for both.
+ */
+ *offset = min(start, prev_start);
+ *sectors = max(end, prev_end) - *offset;
+}
+
static enum stripe_result make_stripe_request(struct mddev *mddev,
struct r5conf *conf, struct stripe_request_ctx *ctx,
sector_t logical_sector, struct bio *bi)
@@ -8976,6 +8976,7 @@ static struct md_personality raid6_personality =
.takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
+ .bitmap_sector = raid5_bitmap_sector,
};
static struct md_personality raid5_personality =
{
@@ -9001,6 +9002,7 @@ static struct md_personality raid5_personality =
.takeover = raid5_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
+ .bitmap_sector = raid5_bitmap_sector,
};
static struct md_personality raid4_personality =
@@ -9027,6 +9029,7 @@ static struct md_personality raid4_personality =
.takeover = raid4_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
+ .bitmap_sector = raid5_bitmap_sector,
};
static int __init raid5_init(void)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index d174e586698f..eafc6e9ed6ee 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -358,7 +358,6 @@ enum {
STRIPE_REPLACED,
STRIPE_PREREAD_ACTIVE,
STRIPE_DELAYED,
- STRIPE_DEGRADED,
STRIPE_BIT_DELAY,
STRIPE_EXPANDING,
STRIPE_EXPAND_SOURCE,
@@ -372,9 +371,6 @@ enum {
STRIPE_ON_RELEASE_LIST,
STRIPE_BATCH_READY,
STRIPE_BATCH_ERR,
- STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
- * to batch yet.
- */
STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
* this bit is used in two scenarios:
*
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index c7d36010c890..ba6828ef540e 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -7,12 +7,13 @@
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/ktime.h>
-#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index ca0db8d457b4..e10bd588a586 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -5,13 +5,14 @@
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
-#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
diff --git a/drivers/media/cec/core/cec-pin-error-inj.c b/drivers/media/cec/core/cec-pin-error-inj.c
index fc0968b9d40e..6e61a04b8168 100644
--- a/drivers/media/cec/core/cec-pin-error-inj.c
+++ b/drivers/media/cec/core/cec-pin-error-inj.c
@@ -4,8 +4,9 @@
*/
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/sched/types.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <media/cec-pin.h>
#include "cec-pin-priv.h"
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index 330d5d5d86ab..bebaa40e0eb5 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -4,8 +4,9 @@
*/
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/sched/types.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <media/cec-pin.h>
#include "cec-pin-priv.h"
@@ -1345,9 +1346,8 @@ struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
if (pin == NULL)
return ERR_PTR(-ENOMEM);
pin->ops = pin_ops;
- hrtimer_init(&pin->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_set(&pin->work_pin_num_events, 0);
- pin->timer.function = cec_pin_timer;
+ hrtimer_setup(&pin->timer, cec_pin_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
init_waitqueue_head(&pin->kthread_waitq);
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
diff --git a/drivers/media/cec/platform/cec-gpio/cec-gpio.c b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
index cf64e8871fe5..50cdc557c943 100644
--- a/drivers/media/cec/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
@@ -3,11 +3,12 @@
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
-#include <linux/module.h>
-#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <media/cec-notifier.h>
#include <media/cec-pin.h>
diff --git a/drivers/media/common/b2c2/flexcop-common.h b/drivers/media/common/b2c2/flexcop-common.h
index f944c59cf495..a468ea7e77a1 100644
--- a/drivers/media/common/b2c2/flexcop-common.h
+++ b/drivers/media/common/b2c2/flexcop-common.h
@@ -125,8 +125,6 @@ void flexcop_dma_free(struct flexcop_dma *dma);
int flexcop_dma_control_timer_irq(struct flexcop_device *fc,
flexcop_dma_index_t no, int onoff);
-int flexcop_dma_control_size_irq(struct flexcop_device *fc,
- flexcop_dma_index_t no, int onoff);
int flexcop_dma_config(struct flexcop_device *fc, struct flexcop_dma *dma,
flexcop_dma_index_t dma_idx);
int flexcop_dma_xfer_control(struct flexcop_device *fc,
@@ -170,8 +168,6 @@ int flexcop_sram_init(struct flexcop_device *fc);
void flexcop_determine_revision(struct flexcop_device *fc);
void flexcop_device_name(struct flexcop_device *fc,
const char *prefix, const char *suffix);
-void flexcop_dump_reg(struct flexcop_device *fc,
- flexcop_ibi_register reg, int num);
/* from flexcop-hw-filter.c */
int flexcop_pid_feed_control(struct flexcop_device *fc,
diff --git a/drivers/media/common/b2c2/flexcop-misc.c b/drivers/media/common/b2c2/flexcop-misc.c
index 83d01d3a81cc..251c4f731ed1 100644
--- a/drivers/media/common/b2c2/flexcop-misc.c
+++ b/drivers/media/common/b2c2/flexcop-misc.c
@@ -70,16 +70,3 @@ void flexcop_device_name(struct flexcop_device *fc,
flexcop_bus_names[fc->bus_type],
flexcop_revision_names[fc->rev], suffix);
}
-
-void flexcop_dump_reg(struct flexcop_device *fc,
- flexcop_ibi_register reg, int num)
-{
- flexcop_ibi_value v;
- int i;
- for (i = 0; i < num; i++) {
- v = fc->read_ibi_reg(fc, reg+4*i);
- deb_rdump("0x%03x: %08x, ", reg+4*i, v.raw);
- }
- deb_rdump("\n");
-}
-EXPORT_SYMBOL(flexcop_dump_reg);
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 9ce5f010de3f..6063782e937a 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -731,7 +731,7 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
if (ret < 0) {
dvb_dmxdev_feed_restart(filter);
- filter->feed.sec->start_filtering(*secfeed);
+ *secfeed = NULL;
dprintk("could not get filter\n");
return ret;
}
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index d925ca24183b..415f1f91cc30 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -311,12 +311,8 @@ static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv,
static u32 cxd2841er_calc_iffreq_xtal(enum cxd2841er_xtal xtal, u32 ifhz)
{
- u64 tmp;
-
- tmp = (u64) ifhz * 16777216;
- do_div(tmp, ((xtal == SONY_XTAL_24000) ? 48000000 : 41000000));
-
- return (u32) tmp;
+ return div_u64(ifhz * 16777216ull,
+ (xtal == SONY_XTAL_24000) ? 48000000 : 41000000);
}
static u32 cxd2841er_calc_iffreq(u32 ifhz)
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 05254d8717db..0357624968f1 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1363,6 +1363,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
dev->vb_queue.ops = &rtl2832_sdr_vb2_ops;
dev->vb_queue.mem_ops = &vb2_vmalloc_memops;
dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ dev->vb_queue.lock = &dev->vb_queue_lock;
ret = vb2_queue_init(&dev->vb_queue);
if (ret) {
dev_err(&pdev->dev, "Could not initialize vb2 queue\n");
@@ -1421,7 +1422,6 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
/* Init video_device structure */
dev->vdev = rtl2832_sdr_template;
dev->vdev.queue = &dev->vb_queue;
- dev->vdev.queue->lock = &dev->vb_queue_lock;
video_set_drvdata(&dev->vdev, dev);
/* Register the v4l2_device structure */
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index e1ae0f9fad43..2cdab2f3d9dc 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -3335,9 +3335,11 @@ static int ccs_probe(struct i2c_client *client)
rval = request_firmware(&fw, filename, &client->dev);
if (!rval) {
- ccs_data_parse(&sensor->sdata, fw->data, fw->size, &client->dev,
- true);
+ rval = ccs_data_parse(&sensor->sdata, fw->data, fw->size,
+ &client->dev, true);
release_firmware(fw);
+ if (rval)
+ goto out_power_off;
}
if (!(ccsdev->flags & CCS_DEVICE_FLAG_IS_SMIA) ||
@@ -3351,9 +3353,11 @@ static int ccs_probe(struct i2c_client *client)
rval = request_firmware(&fw, filename, &client->dev);
if (!rval) {
- ccs_data_parse(&sensor->mdata, fw->data, fw->size,
- &client->dev, true);
+ rval = ccs_data_parse(&sensor->mdata, fw->data,
+ fw->size, &client->dev, true);
release_firmware(fw);
+ if (rval)
+ goto out_release_sdata;
}
}
@@ -3566,15 +3570,15 @@ out_disable_runtime_pm:
out_cleanup:
ccs_cleanup(sensor);
+out_free_ccs_limits:
+ kfree(sensor->ccs_limits);
+
out_release_mdata:
kvfree(sensor->mdata.backing);
out_release_sdata:
kvfree(sensor->sdata.backing);
-out_free_ccs_limits:
- kfree(sensor->ccs_limits);
-
out_power_off:
ccs_power_off(&client->dev);
mutex_destroy(&sensor->mutex);
diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
index 08400edf77ce..f469afcea680 100644
--- a/drivers/media/i2c/ccs/ccs-data.c
+++ b/drivers/media/i2c/ccs/ccs-data.c
@@ -10,6 +10,7 @@
#include <linux/limits.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include "ccs-data-defs.h"
@@ -97,7 +98,7 @@ ccs_data_parse_length_specifier(const struct __ccs_data_length_specifier *__len,
plen = ((size_t)
(__len3->length[0] &
((1 << CCS_DATA_LENGTH_SPECIFIER_SIZE_SHIFT) - 1))
- << 16) + (__len3->length[0] << 8) + __len3->length[1];
+ << 16) + (__len3->length[1] << 8) + __len3->length[2];
break;
}
default:
@@ -948,15 +949,15 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data,
rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, verbose);
if (rval)
- return rval;
+ goto out_cleanup;
rval = bin_backing_alloc(&bin);
if (rval)
- return rval;
+ goto out_cleanup;
rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, false);
if (rval)
- goto out_free;
+ goto out_cleanup;
if (verbose && ccsdata->version)
print_ccs_data_version(dev, ccsdata->version);
@@ -965,15 +966,17 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data,
rval = -EPROTO;
dev_dbg(dev, "parsing mismatch; base %p; now %p; end %p\n",
bin.base, bin.now, bin.end);
- goto out_free;
+ goto out_cleanup;
}
ccsdata->backing = bin.base;
return 0;
-out_free:
+out_cleanup:
kvfree(bin.base);
+ memset(ccsdata, 0, sizeof(*ccsdata));
+ dev_warn(dev, "failed to parse CCS static data: %d\n", rval);
return rval;
}
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index 79bddfee2e2e..fd2d2d5272bf 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -8,6 +8,7 @@
* Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -146,6 +147,19 @@ static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val)
return ret;
}
+static int ub913_update_bits(const struct ub913_data *priv, u8 reg, u8 mask,
+ u8 val)
+{
+ int ret;
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret < 0)
+ dev_err(&priv->client->dev,
+ "Cannot update register 0x%02x %d!\n", reg, ret);
+
+ return ret;
+}
+
/*
* GPIO chip
*/
@@ -733,10 +747,13 @@ static int ub913_hw_init(struct ub913_data *priv)
if (ret)
return dev_err_probe(dev, ret, "i2c master init failed\n");
- ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
- v &= ~UB913_REG_GENERAL_CFG_PCLK_RISING;
- v |= priv->pclk_polarity_rising ? UB913_REG_GENERAL_CFG_PCLK_RISING : 0;
- ub913_write(priv, UB913_REG_GENERAL_CFG, v);
+ ret = ub913_update_bits(priv, UB913_REG_GENERAL_CFG,
+ UB913_REG_GENERAL_CFG_PCLK_RISING,
+ FIELD_PREP(UB913_REG_GENERAL_CFG_PCLK_RISING,
+ priv->pclk_polarity_rising));
+
+ if (ret)
+ return ret;
return 0;
}
@@ -793,7 +810,6 @@ static void ub913_subdev_uninit(struct ub913_data *priv)
v4l2_async_unregister_subdev(&priv->sd);
ub913_v4l2_nf_unregister(priv);
v4l2_subdev_cleanup(&priv->sd);
- fwnode_handle_put(priv->sd.fwnode);
media_entity_cleanup(&priv->sd.entity);
}
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index 725589b3e1c5..46569381b332 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -65,6 +65,9 @@
#define UB953_REG_GPIO_INPUT_CTRL_OUT_EN(n) BIT(4 + (n))
#define UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(n) BIT(0 + (n))
+#define UB953_REG_BC_CTRL 0x49
+#define UB953_REG_BC_CTRL_CRC_ERR_CLR BIT(3)
+
#define UB953_REG_REV_MASK_ID 0x50
#define UB953_REG_GENERAL_STATUS 0x52
@@ -397,8 +400,13 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
int ret;
/* Set all GPIOs to local input mode */
- ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
- ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
+ ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
+ if (ret)
+ return ret;
+
+ ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
+ if (ret)
+ return ret;
gc->label = dev_name(dev);
gc->parent = dev;
@@ -618,6 +626,12 @@ static int ub953_log_status(struct v4l2_subdev *sd)
ub953_read(priv, UB953_REG_CRC_ERR_CNT2, &v2);
dev_info(dev, "CRC error count %u\n", v1 | (v2 << 8));
+ /* Clear CRC error counter */
+ if (v1 || v2)
+ regmap_update_bits(priv->regmap, UB953_REG_BC_CTRL,
+ UB953_REG_BC_CTRL_CRC_ERR_CLR,
+ UB953_REG_BC_CTRL_CRC_ERR_CLR);
+
ub953_read(priv, UB953_REG_CSI_ERR_CNT, &v);
dev_info(dev, "CSI error count %u\n", v);
@@ -958,10 +972,11 @@ static void ub953_calc_clkout_params(struct ub953_data *priv,
clkout_data->rate = clkout_rate;
}
-static void ub953_write_clkout_regs(struct ub953_data *priv,
- const struct ub953_clkout_data *clkout_data)
+static int ub953_write_clkout_regs(struct ub953_data *priv,
+ const struct ub953_clkout_data *clkout_data)
{
u8 clkout_ctrl0, clkout_ctrl1;
+ int ret;
if (priv->hw_data->is_ub971)
clkout_ctrl0 = clkout_data->m;
@@ -971,8 +986,15 @@ static void ub953_write_clkout_regs(struct ub953_data *priv,
clkout_ctrl1 = clkout_data->n;
- ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
- ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
+ if (ret)
+ return ret;
+
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
+ if (ret)
+ return ret;
+
+ return 0;
}
static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
@@ -1052,9 +1074,7 @@ static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&priv->client->dev, "%s %lu (requested %lu)\n", __func__,
clkout_data.rate, rate);
- ub953_write_clkout_regs(priv, &clkout_data);
-
- return 0;
+ return ub953_write_clkout_regs(priv, &clkout_data);
}
static const struct clk_ops ub953_clkout_ops = {
@@ -1079,7 +1099,9 @@ static int ub953_register_clkout(struct ub953_data *priv)
/* Initialize clkout to 25MHz by default */
ub953_calc_clkout_params(priv, UB953_DEFAULT_CLKOUT_RATE, &clkout_data);
- ub953_write_clkout_regs(priv, &clkout_data);
+ ret = ub953_write_clkout_regs(priv, &clkout_data);
+ if (ret)
+ return ret;
priv->clkout_clk_hw.init = &init;
@@ -1226,10 +1248,15 @@ static int ub953_hw_init(struct ub953_data *priv)
if (ret)
return dev_err_probe(dev, ret, "i2c init failed\n");
- ub953_write(priv, UB953_REG_GENERAL_CFG,
- (priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK) |
- ((priv->num_data_lanes - 1) << UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT) |
- UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE);
+ v = 0;
+ v |= priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK;
+ v |= (priv->num_data_lanes - 1) <<
+ UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT;
+ v |= UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE;
+
+ ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v);
+ if (ret)
+ return ret;
return 0;
}
@@ -1288,7 +1315,6 @@ static void ub953_subdev_uninit(struct ub953_data *priv)
v4l2_async_unregister_subdev(&priv->sd);
ub953_v4l2_notifier_unregister(priv);
v4l2_subdev_cleanup(&priv->sd);
- fwnode_handle_put(priv->sd.fwnode);
media_entity_cleanup(&priv->sd.entity);
}
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
index 1b1ff7f7505b..5dde8452739b 100644
--- a/drivers/media/i2c/ds90ub960.c
+++ b/drivers/media/i2c/ds90ub960.c
@@ -43,6 +43,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <linux/workqueue.h>
#include <media/i2c/ds90ub9xx.h>
@@ -51,7 +52,16 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define MHZ(v) ((u32)((v) * 1000000U))
+#define MHZ(v) ((u32)((v) * HZ_PER_MHZ))
+
+/*
+ * If this is defined, the i2c addresses from UB960_DEBUG_I2C_RX_ID to
+ * UB960_DEBUG_I2C_RX_ID + 3 can be used to access the paged RX port registers
+ * directly.
+ *
+ * Only for debug purposes.
+ */
+/* #define UB960_DEBUG_I2C_RX_ID 0x40 */
#define UB960_POLL_TIME_MS 500
@@ -349,12 +359,13 @@
#define UB960_SR_FPD3_RX_ID(n) (0xf0 + (n))
#define UB960_SR_FPD3_RX_ID_LEN 6
-#define UB960_SR_I2C_RX_ID(n) (0xf8 + (n)) /* < UB960_FPD_RX_NPORTS */
+#define UB960_SR_I2C_RX_ID(n) (0xf8 + (n))
+
+#define UB9702_SR_REFCLK_FREQ 0x3d
/* Indirect register blocks */
#define UB960_IND_TARGET_PAT_GEN 0x00
#define UB960_IND_TARGET_RX_ANA(n) (0x01 + (n))
-#define UB960_IND_TARGET_CSI_CSIPLL_REG_1 0x92 /* UB9702 */
#define UB960_IND_TARGET_CSI_ANA 0x07
/* UB960_IR_PGEN_*: Indirect Registers for Test Pattern Generator */
@@ -568,11 +579,23 @@ struct ub960_format_info {
};
static const struct ub960_format_info ub960_formats[] = {
+ { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, .datatype = MIPI_CSI2_DT_RGB888, },
+
{ .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
+ { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
+ { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
+ { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
+ { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
+
+ { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
+ { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
+ { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
+ { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
+
{ .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
{ .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
{ .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
@@ -1552,7 +1575,12 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
if (missing == 0)
break;
- msleep(50);
+ /*
+ * The sleep time of 10 ms was found by testing to give a lock
+ * with a few iterations. It can be decreased if on some setups
+ * the lock can be achieved much faster.
+ */
+ fsleep(10 * USEC_PER_MSEC);
}
if (lock_mask)
@@ -1574,16 +1602,24 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v);
- ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
- if (ret)
- return ret;
+ if (priv->hw_data->is_ub9702) {
+ dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n",
+ nport, ((u64)v * HZ_PER_MHZ) >> 8);
+ } else {
+ ret = ub960_rxport_get_strobe_pos(priv, nport,
+ &strobe_pos);
+ if (ret)
+ return ret;
- ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
- if (ret)
- return ret;
+ ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
+ if (ret)
+ return ret;
- dev_dbg(dev, "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
- nport, strobe_pos, eq_level, (v * 1000000ULL) >> 8);
+ dev_dbg(dev,
+ "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
+ nport, strobe_pos, eq_level,
+ ((u64)v * HZ_PER_MHZ) >> 8);
+ }
}
return 0;
@@ -2412,7 +2448,6 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
} rx_data[UB960_MAX_RX_NPORTS] = {};
u8 vc_map[UB960_MAX_RX_NPORTS] = {};
struct v4l2_subdev_route *route;
- unsigned int nport;
int ret;
ret = ub960_validate_stream_vcs(priv);
@@ -2482,7 +2517,8 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
*/
fwd_ctl = GENMASK(7, 4);
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ for (unsigned int nport = 0; nport < priv->hw_data->num_rxports;
+ nport++) {
struct ub960_rxport *rxport = priv->rxports[nport];
u8 vc = vc_map[nport];
@@ -2522,7 +2558,7 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
for (i = 0; i < 8; i++)
ub960_rxport_write(priv, nport,
UB960_RR_VC_ID_MAP(i),
- nport);
+ (nport << 4) | nport);
}
break;
@@ -2939,20 +2975,78 @@ static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
.set_fmt = ub960_set_fmt,
};
+static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
+ unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+ u8 eq_level;
+ s8 strobe_pos;
+ int ret;
+ u8 v;
+
+ /* Strobe */
+
+ ret = ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
+ if (ret)
+ return;
+
+ dev_info(dev, "\t%s strobe\n",
+ (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
+ "Manual");
+
+ if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
+ ret = ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
+ if (ret)
+ return;
+
+ dev_info(dev, "\tStrobe range [%d, %d]\n",
+ ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
+ ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
+ }
+
+ ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
+ if (ret)
+ return;
+
+ dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
+
+ /* EQ */
+
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+ if (ret)
+ return;
+
+ dev_info(dev, "\t%s EQ\n",
+ (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
+ "Adaptive");
+
+ if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
+ if (ret)
+ return;
+
+ dev_info(dev, "\tEQ range [%u, %u]\n",
+ (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
+ (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
+ }
+
+ if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
+ dev_info(dev, "\tEQ level %u\n", eq_level);
+}
+
static int ub960_log_status(struct v4l2_subdev *sd)
{
struct ub960_data *priv = sd_to_ub960(sd);
struct device *dev = &priv->client->dev;
struct v4l2_subdev_state *state;
unsigned int nport;
- unsigned int i;
u16 v16 = 0;
u8 v = 0;
u8 id[UB960_SR_FPD3_RX_ID_LEN];
state = v4l2_subdev_lock_and_get_active_state(sd);
- for (i = 0; i < sizeof(id); i++)
+ for (unsigned int i = 0; i < sizeof(id); i++)
ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i]);
dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
@@ -2986,9 +3080,6 @@ static int ub960_log_status(struct v4l2_subdev *sd)
for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
struct ub960_rxport *rxport = priv->rxports[nport];
- u8 eq_level;
- s8 strobe_pos;
- unsigned int i;
dev_info(dev, "RX %u\n", nport);
@@ -3009,7 +3100,7 @@ static int ub960_log_status(struct v4l2_subdev *sd)
dev_info(dev, "\trx_port_sts2 %#02x\n", v);
ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v16);
- dev_info(dev, "\tlink freq %llu Hz\n", (v16 * 1000000ULL) >> 8);
+ dev_info(dev, "\tlink freq %llu Hz\n", ((u64)v16 * HZ_PER_MHZ) >> 8);
ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v16);
dev_info(dev, "\tparity errors %u\n", v16);
@@ -3023,47 +3114,11 @@ static int ub960_log_status(struct v4l2_subdev *sd)
ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
dev_info(dev, "\tcsi_err_counter %u\n", v);
- /* Strobe */
-
- ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
-
- dev_info(dev, "\t%s strobe\n",
- (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
- "Manual");
-
- if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
- ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
-
- dev_info(dev, "\tStrobe range [%d, %d]\n",
- ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
- ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
- }
-
- ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
-
- dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
-
- /* EQ */
-
- ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
-
- dev_info(dev, "\t%s EQ\n",
- (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
- "Adaptive");
-
- if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
- ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
-
- dev_info(dev, "\tEQ range [%u, %u]\n",
- (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
- (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
- }
-
- if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
- dev_info(dev, "\tEQ level %u\n", eq_level);
+ if (!priv->hw_data->is_ub9702)
+ ub960_log_status_ub960_sp_eq(priv, nport);
/* GPIOs */
- for (i = 0; i < UB960_NUM_BC_GPIOS; i++) {
+ for (unsigned int i = 0; i < UB960_NUM_BC_GPIOS; i++) {
u8 ctl_reg;
u8 ctl_shift;
@@ -3834,13 +3889,16 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
if (ret)
goto err_pd_gpio;
- ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
+ if (priv->hw_data->is_ub9702)
+ ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq);
+ else
+ ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
if (ret)
goto err_pd_gpio;
dev_dbg(dev, "refclk valid %u freq %u MHz (clk fw freq %lu MHz)\n",
!!(dev_sts & BIT(4)), refclk_freq,
- clk_get_rate(priv->refclk) / 1000000);
+ clk_get_rate(priv->refclk) / HZ_PER_MHZ);
/* Disable all RX ports by default */
ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0);
@@ -3974,6 +4032,12 @@ static int ub960_probe(struct i2c_client *client)
schedule_delayed_work(&priv->poll_work,
msecs_to_jiffies(UB960_POLL_TIME_MS));
+#ifdef UB960_DEBUG_I2C_RX_ID
+ for (unsigned int i = 0; i < priv->hw_data->num_rxports; i++)
+ ub960_write(priv, UB960_SR_I2C_RX_ID(i),
+ (UB960_DEBUG_I2C_RX_ID + i) << 1);
+#endif
+
return 0;
err_free_sers:
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index 2184c90f7864..2b5a6ce7b1ae 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -814,7 +814,7 @@ out_unlock:
}
static ssize_t otp_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index f5ee6bd3b52d..fbf7eba3d71d 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -170,12 +170,15 @@ enum imx290_model {
IMX290_MODEL_IMX290LQR,
IMX290_MODEL_IMX290LLR,
IMX290_MODEL_IMX327LQR,
+ IMX290_MODEL_IMX462LQR,
+ IMX290_MODEL_IMX462LLR,
};
struct imx290_model_info {
enum imx290_colour_variant colour_variant;
const struct cci_reg_sequence *init_regs;
size_t init_regs_num;
+ unsigned int max_analog_gain;
const char *name;
};
@@ -267,7 +270,6 @@ static const struct cci_reg_sequence imx290_global_init_settings[] = {
{ IMX290_WINWV, 1097 },
{ IMX290_XSOUTSEL, IMX290_XSOUTSEL_XVSOUTSEL_VSYNC |
IMX290_XSOUTSEL_XHSOUTSEL_HSYNC },
- { CCI_REG8(0x3011), 0x02 },
{ CCI_REG8(0x3012), 0x64 },
{ CCI_REG8(0x3013), 0x00 },
};
@@ -275,6 +277,51 @@ static const struct cci_reg_sequence imx290_global_init_settings[] = {
static const struct cci_reg_sequence imx290_global_init_settings_290[] = {
{ CCI_REG8(0x300f), 0x00 },
{ CCI_REG8(0x3010), 0x21 },
+ { CCI_REG8(0x3011), 0x00 },
+ { CCI_REG8(0x3016), 0x09 },
+ { CCI_REG8(0x3070), 0x02 },
+ { CCI_REG8(0x3071), 0x11 },
+ { CCI_REG8(0x309b), 0x10 },
+ { CCI_REG8(0x309c), 0x22 },
+ { CCI_REG8(0x30a2), 0x02 },
+ { CCI_REG8(0x30a6), 0x20 },
+ { CCI_REG8(0x30a8), 0x20 },
+ { CCI_REG8(0x30aa), 0x20 },
+ { CCI_REG8(0x30ac), 0x20 },
+ { CCI_REG8(0x30b0), 0x43 },
+ { CCI_REG8(0x3119), 0x9e },
+ { CCI_REG8(0x311c), 0x1e },
+ { CCI_REG8(0x311e), 0x08 },
+ { CCI_REG8(0x3128), 0x05 },
+ { CCI_REG8(0x313d), 0x83 },
+ { CCI_REG8(0x3150), 0x03 },
+ { CCI_REG8(0x317e), 0x00 },
+ { CCI_REG8(0x32b8), 0x50 },
+ { CCI_REG8(0x32b9), 0x10 },
+ { CCI_REG8(0x32ba), 0x00 },
+ { CCI_REG8(0x32bb), 0x04 },
+ { CCI_REG8(0x32c8), 0x50 },
+ { CCI_REG8(0x32c9), 0x10 },
+ { CCI_REG8(0x32ca), 0x00 },
+ { CCI_REG8(0x32cb), 0x04 },
+ { CCI_REG8(0x332c), 0xd3 },
+ { CCI_REG8(0x332d), 0x10 },
+ { CCI_REG8(0x332e), 0x0d },
+ { CCI_REG8(0x3358), 0x06 },
+ { CCI_REG8(0x3359), 0xe1 },
+ { CCI_REG8(0x335a), 0x11 },
+ { CCI_REG8(0x3360), 0x1e },
+ { CCI_REG8(0x3361), 0x61 },
+ { CCI_REG8(0x3362), 0x10 },
+ { CCI_REG8(0x33b0), 0x50 },
+ { CCI_REG8(0x33b2), 0x1a },
+ { CCI_REG8(0x33b3), 0x04 },
+};
+
+static const struct cci_reg_sequence imx290_global_init_settings_462[] = {
+ { CCI_REG8(0x300f), 0x00 },
+ { CCI_REG8(0x3010), 0x21 },
+ { CCI_REG8(0x3011), 0x02 },
{ CCI_REG8(0x3016), 0x09 },
{ CCI_REG8(0x3070), 0x02 },
{ CCI_REG8(0x3071), 0x11 },
@@ -328,6 +375,7 @@ static const struct cci_reg_sequence xclk_regs[][IMX290_NUM_CLK_REGS] = {
};
static const struct cci_reg_sequence imx290_global_init_settings_327[] = {
+ { CCI_REG8(0x3011), 0x02 },
{ CCI_REG8(0x309e), 0x4A },
{ CCI_REG8(0x309f), 0x4A },
{ CCI_REG8(0x313b), 0x61 },
@@ -876,14 +924,10 @@ static int imx290_ctrl_init(struct imx290 *imx290)
* up to 72.0dB (240) add further digital gain. Limit the range to
* analog gain only, support for digital gain can be added separately
* if needed.
- *
- * The IMX327 and IMX462 are largely compatible with the IMX290, but
- * have an analog gain range of 0.0dB to 29.4dB and 42dB of digital
- * gain. When support for those sensors gets added to the driver, the
- * gain control should be adjusted accordingly.
*/
v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
- V4L2_CID_ANALOGUE_GAIN, 0, 100, 1, 0);
+ V4L2_CID_ANALOGUE_GAIN, 0,
+ imx290->model->max_analog_gain, 1, 0);
/*
* Correct range will be determined through imx290_ctrl_update setting
@@ -1441,20 +1485,37 @@ static const struct imx290_model_info imx290_models[] = {
.colour_variant = IMX290_VARIANT_COLOUR,
.init_regs = imx290_global_init_settings_290,
.init_regs_num = ARRAY_SIZE(imx290_global_init_settings_290),
+ .max_analog_gain = 100,
.name = "imx290",
},
[IMX290_MODEL_IMX290LLR] = {
.colour_variant = IMX290_VARIANT_MONO,
.init_regs = imx290_global_init_settings_290,
.init_regs_num = ARRAY_SIZE(imx290_global_init_settings_290),
+ .max_analog_gain = 100,
.name = "imx290",
},
[IMX290_MODEL_IMX327LQR] = {
.colour_variant = IMX290_VARIANT_COLOUR,
.init_regs = imx290_global_init_settings_327,
.init_regs_num = ARRAY_SIZE(imx290_global_init_settings_327),
+ .max_analog_gain = 98,
.name = "imx327",
},
+ [IMX290_MODEL_IMX462LQR] = {
+ .colour_variant = IMX290_VARIANT_COLOUR,
+ .init_regs = imx290_global_init_settings_462,
+ .init_regs_num = ARRAY_SIZE(imx290_global_init_settings_462),
+ .max_analog_gain = 98,
+ .name = "imx462",
+ },
+ [IMX290_MODEL_IMX462LLR] = {
+ .colour_variant = IMX290_VARIANT_MONO,
+ .init_regs = imx290_global_init_settings_462,
+ .init_regs_num = ARRAY_SIZE(imx290_global_init_settings_462),
+ .max_analog_gain = 98,
+ .name = "imx462",
+ },
};
static int imx290_parse_dt(struct imx290 *imx290)
@@ -1653,6 +1714,12 @@ static const struct of_device_id imx290_of_match[] = {
}, {
.compatible = "sony,imx327lqr",
.data = &imx290_models[IMX290_MODEL_IMX327LQR],
+ }, {
+ .compatible = "sony,imx462lqr",
+ .data = &imx290_models[IMX290_MODEL_IMX462LQR],
+ }, {
+ .compatible = "sony,imx462llr",
+ .data = &imx290_models[IMX290_MODEL_IMX462LLR],
},
{ /* sentinel */ },
};
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index 83149fa729c4..f3bec16b527c 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -954,6 +954,8 @@ static int imx296_identify_model(struct imx296 *sensor)
return ret;
}
+ usleep_range(2000, 5000);
+
ret = imx296_read(sensor, IMX296_SENSOR_INFO);
if (ret < 0) {
dev_err(sensor->dev, "failed to read sensor information (%d)\n",
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index 0bfe3046fcc8..c74097a59c42 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -547,7 +547,7 @@ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain)
lpfr = imx412->vblank + imx412->cur_mode->height;
- dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u",
+ dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u\n",
exposure, gain, lpfr);
ret = imx412_write_reg(imx412, IMX412_REG_HOLD, 1, 1);
@@ -594,7 +594,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_VBLANK:
imx412->vblank = imx412->vblank_ctrl->val;
- dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u",
+ dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u\n",
imx412->vblank,
imx412->vblank + imx412->cur_mode->height);
@@ -613,7 +613,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
exposure = ctrl->val;
analog_gain = imx412->again_ctrl->val;
- dev_dbg(imx412->dev, "Received exp %u, analog gain %u",
+ dev_dbg(imx412->dev, "Received exp %u, analog gain %u\n",
exposure, analog_gain);
ret = imx412_update_exp_gain(imx412, exposure, analog_gain);
@@ -622,7 +622,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
- dev_err(imx412->dev, "Invalid control %d", ctrl->id);
+ dev_err(imx412->dev, "Invalid control %d\n", ctrl->id);
ret = -EINVAL;
}
@@ -803,14 +803,14 @@ static int imx412_start_streaming(struct imx412 *imx412)
ret = imx412_write_regs(imx412, reg_list->regs,
reg_list->num_of_regs);
if (ret) {
- dev_err(imx412->dev, "fail to write initial registers");
+ dev_err(imx412->dev, "fail to write initial registers\n");
return ret;
}
/* Setup handler will write actual exposure and gain */
ret = __v4l2_ctrl_handler_setup(imx412->sd.ctrl_handler);
if (ret) {
- dev_err(imx412->dev, "fail to setup handler");
+ dev_err(imx412->dev, "fail to setup handler\n");
return ret;
}
@@ -821,7 +821,7 @@ static int imx412_start_streaming(struct imx412 *imx412)
ret = imx412_write_reg(imx412, IMX412_REG_MODE_SELECT,
1, IMX412_MODE_STREAMING);
if (ret) {
- dev_err(imx412->dev, "fail to start streaming");
+ dev_err(imx412->dev, "fail to start streaming\n");
return ret;
}
@@ -895,7 +895,7 @@ static int imx412_detect(struct imx412 *imx412)
return ret;
if (val != IMX412_ID) {
- dev_err(imx412->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx412->dev, "chip id mismatch: %x!=%x\n",
IMX412_ID, val);
return -ENXIO;
}
@@ -927,7 +927,7 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
imx412->reset_gpio = devm_gpiod_get_optional(imx412->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(imx412->reset_gpio)) {
- dev_err(imx412->dev, "failed to get reset gpio %ld",
+ dev_err(imx412->dev, "failed to get reset gpio %ld\n",
PTR_ERR(imx412->reset_gpio));
return PTR_ERR(imx412->reset_gpio);
}
@@ -935,13 +935,13 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
/* Get sensor input clock */
imx412->inclk = devm_clk_get(imx412->dev, NULL);
if (IS_ERR(imx412->inclk)) {
- dev_err(imx412->dev, "could not get inclk");
+ dev_err(imx412->dev, "could not get inclk\n");
return PTR_ERR(imx412->inclk);
}
rate = clk_get_rate(imx412->inclk);
if (rate != IMX412_INCLK_RATE) {
- dev_err(imx412->dev, "inclk frequency mismatch");
+ dev_err(imx412->dev, "inclk frequency mismatch\n");
return -EINVAL;
}
@@ -966,14 +966,14 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX412_NUM_DATA_LANES) {
dev_err(imx412->dev,
- "number of CSI2 data lanes %d is not supported",
+ "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto done_endpoint_free;
}
if (!bus_cfg.nr_of_link_frequencies) {
- dev_err(imx412->dev, "no link frequencies defined");
+ dev_err(imx412->dev, "no link frequencies defined\n");
ret = -EINVAL;
goto done_endpoint_free;
}
@@ -1034,7 +1034,7 @@ static int imx412_power_on(struct device *dev)
ret = clk_prepare_enable(imx412->inclk);
if (ret) {
- dev_err(imx412->dev, "fail to enable inclk");
+ dev_err(imx412->dev, "fail to enable inclk\n");
goto error_reset;
}
@@ -1145,7 +1145,7 @@ static int imx412_init_controls(struct imx412 *imx412)
imx412->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (ctrl_hdlr->error) {
- dev_err(imx412->dev, "control init failed: %d",
+ dev_err(imx412->dev, "control init failed: %d\n",
ctrl_hdlr->error);
v4l2_ctrl_handler_free(ctrl_hdlr);
return ctrl_hdlr->error;
@@ -1183,7 +1183,7 @@ static int imx412_probe(struct i2c_client *client)
ret = imx412_parse_hw_config(imx412);
if (ret) {
- dev_err(imx412->dev, "HW configuration is not supported");
+ dev_err(imx412->dev, "HW configuration is not supported\n");
return ret;
}
@@ -1191,14 +1191,14 @@ static int imx412_probe(struct i2c_client *client)
ret = imx412_power_on(imx412->dev);
if (ret) {
- dev_err(imx412->dev, "failed to power-on the sensor");
+ dev_err(imx412->dev, "failed to power-on the sensor\n");
goto error_mutex_destroy;
}
/* Check module identity */
ret = imx412_detect(imx412);
if (ret) {
- dev_err(imx412->dev, "failed to find sensor: %d", ret);
+ dev_err(imx412->dev, "failed to find sensor: %d\n", ret);
goto error_power_off;
}
@@ -1208,7 +1208,7 @@ static int imx412_probe(struct i2c_client *client)
ret = imx412_init_controls(imx412);
if (ret) {
- dev_err(imx412->dev, "failed to init controls: %d", ret);
+ dev_err(imx412->dev, "failed to init controls: %d\n", ret);
goto error_power_off;
}
@@ -1222,14 +1222,14 @@ static int imx412_probe(struct i2c_client *client)
imx412->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx412->sd.entity, 1, &imx412->pad);
if (ret) {
- dev_err(imx412->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx412->dev, "failed to init entity pads: %d\n", ret);
goto error_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&imx412->sd);
if (ret < 0) {
dev_err(imx412->dev,
- "failed to register async subdev: %d", ret);
+ "failed to register async subdev: %d\n", ret);
goto error_media_entity;
}
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index c484b753a718..9a5d118b87b0 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -11,6 +11,7 @@
#include <linux/pm_runtime.h>
#include <linux/nvmem-provider.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -76,6 +77,14 @@
/* OTP registers from sensor */
#define OV2740_REG_OTP_CUSTOMER 0x7010
+static const char * const ov2740_supply_name[] = {
+ "AVDD",
+ "DOVDD",
+ "DVDD",
+};
+
+#define OV2740_NUM_SUPPLIES ARRAY_SIZE(ov2740_supply_name)
+
struct nvm_data {
struct nvmem_device *nvmem;
struct regmap *regmap;
@@ -523,9 +532,11 @@ struct ov2740 {
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
- /* GPIOs, clocks */
+ /* GPIOs, clocks, regulators */
struct gpio_desc *reset_gpio;
+ struct gpio_desc *powerdown_gpio;
struct clk *clk;
+ struct regulator_bulk_data supplies[OV2740_NUM_SUPPLIES];
/* Current mode */
const struct ov2740_mode *cur_mode;
@@ -644,6 +655,8 @@ static int ov2740_identify_module(struct ov2740 *ov2740)
return -ENXIO;
}
+ dev_dbg(&client->dev, "chip id: %x\n", val);
+
ov2740->identified = true;
return 0;
@@ -753,15 +766,17 @@ static const struct v4l2_ctrl_ops ov2740_ctrl_ops = {
static int ov2740_init_controls(struct ov2740 *ov2740)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
const struct ov2740_mode *cur_mode;
s64 exposure_max, h_blank, pixel_rate;
u32 vblank_min, vblank_max, vblank_default;
+ struct v4l2_fwnode_device_properties props;
int size;
int ret;
ctrl_hdlr = &ov2740->ctrl_handler;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
if (ret)
return ret;
@@ -811,6 +826,13 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(ov2740_test_pattern_menu) - 1,
0, 0, ov2740_test_pattern_menu);
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov2740_ctrl_ops, &props);
+
if (ctrl_hdlr->error) {
v4l2_ctrl_handler_free(ctrl_hdlr);
return ctrl_hdlr->error;
@@ -1295,7 +1317,9 @@ static int ov2740_suspend(struct device *dev)
struct ov2740 *ov2740 = to_ov2740(sd);
gpiod_set_value_cansleep(ov2740->reset_gpio, 1);
+ gpiod_set_value_cansleep(ov2740->powerdown_gpio, 1);
clk_disable_unprepare(ov2740->clk);
+ regulator_bulk_disable(OV2740_NUM_SUPPLIES, ov2740->supplies);
return 0;
}
@@ -1305,10 +1329,17 @@ static int ov2740_resume(struct device *dev)
struct ov2740 *ov2740 = to_ov2740(sd);
int ret;
- ret = clk_prepare_enable(ov2740->clk);
+ ret = regulator_bulk_enable(OV2740_NUM_SUPPLIES, ov2740->supplies);
if (ret)
return ret;
+ ret = clk_prepare_enable(ov2740->clk);
+ if (ret) {
+ regulator_bulk_disable(OV2740_NUM_SUPPLIES, ov2740->supplies);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ov2740->powerdown_gpio, 0);
gpiod_set_value_cansleep(ov2740->reset_gpio, 0);
msleep(20);
@@ -1320,7 +1351,7 @@ static int ov2740_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct ov2740 *ov2740;
bool full_power;
- int ret;
+ int i, ret;
ov2740 = devm_kzalloc(&client->dev, sizeof(*ov2740), GFP_KERNEL);
if (!ov2740)
@@ -1337,9 +1368,17 @@ static int ov2740_probe(struct i2c_client *client)
if (IS_ERR(ov2740->reset_gpio)) {
return dev_err_probe(dev, PTR_ERR(ov2740->reset_gpio),
"failed to get reset GPIO\n");
- } else if (ov2740->reset_gpio) {
+ }
+
+ ov2740->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(ov2740->powerdown_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(ov2740->powerdown_gpio),
+ "failed to get powerdown GPIO\n");
+ }
+
+ if (ov2740->reset_gpio || ov2740->powerdown_gpio) {
/*
- * Ensure reset is asserted for at least 20 ms before
+ * Ensure reset/powerdown is asserted for at least 20 ms before
* ov2740_resume() deasserts it.
*/
msleep(20);
@@ -1350,6 +1389,13 @@ static int ov2740_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(ov2740->clk),
"failed to get clock\n");
+ for (i = 0; i < OV2740_NUM_SUPPLIES; i++)
+ ov2740->supplies[i].supply = ov2740_supply_name[i];
+
+ ret = devm_regulator_bulk_get(dev, OV2740_NUM_SUPPLIES, ov2740->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
full_power = acpi_dev_state_d0(&client->dev);
if (full_power) {
/* ACPI does not always clear the reset GPIO / enable the clock */
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index da5cb5f45a4f..0dae0438aa80 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -1982,6 +1982,7 @@ static int ov5640_get_light_freq(struct ov5640_dev *sensor)
light_freq = 50;
} else {
/* 60Hz */
+ light_freq = 60;
}
}
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index 9f52af6f047f..87e5d7ce5a47 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -40,7 +40,7 @@
/* Exposure control */
#define OV9282_REG_EXPOSURE 0x3500
#define OV9282_EXPOSURE_MIN 1
-#define OV9282_EXPOSURE_OFFSET 12
+#define OV9282_EXPOSURE_OFFSET 25
#define OV9282_EXPOSURE_STEP 1
#define OV9282_EXPOSURE_DEFAULT 0x0282
diff --git a/drivers/media/pci/b2c2/flexcop-dma.c b/drivers/media/pci/b2c2/flexcop-dma.c
index ff8058568240..2ef97be4dc54 100644
--- a/drivers/media/pci/b2c2/flexcop-dma.c
+++ b/drivers/media/pci/b2c2/flexcop-dma.c
@@ -123,23 +123,6 @@ static int flexcop_dma_remap(struct flexcop_device *fc,
return 0;
}
-int flexcop_dma_control_size_irq(struct flexcop_device *fc,
- flexcop_dma_index_t no,
- int onoff)
-{
- flexcop_ibi_value v = fc->read_ibi_reg(fc, ctrl_208);
-
- if (no & FC_DMA_1)
- v.ctrl_208.DMA1_IRQ_Enable_sig = onoff;
-
- if (no & FC_DMA_2)
- v.ctrl_208.DMA2_IRQ_Enable_sig = onoff;
-
- fc->write_ibi_reg(fc, ctrl_208, v);
- return 0;
-}
-EXPORT_SYMBOL(flexcop_dma_control_size_irq);
-
int flexcop_dma_control_timer_irq(struct flexcop_device *fc,
flexcop_dma_index_t no,
int onoff)
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index c85eb8d25837..485a6cbeb15a 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -305,21 +305,6 @@ int cx18_gpio_register(struct cx18 *cx, u32 hw)
return v4l2_device_register_subdev(&cx->v4l2_dev, sd);
}
-void cx18_reset_ir_gpio(void *data)
-{
- struct cx18 *cx = to_cx18(data);
-
- if (cx->card->gpio_i2c_slave_reset.ir_reset_mask == 0)
- return;
-
- CX18_DEBUG_INFO("Resetting IR microcontroller\n");
-
- v4l2_subdev_call(&cx->sd_resetctrl,
- core, reset, CX18_GPIO_RESET_Z8F0811);
-}
-EXPORT_SYMBOL(cx18_reset_ir_gpio);
-/* This symbol is exported for use by lirc_pvr150 for the IR-blaster */
-
/* Xceive tuner reset function */
int cx18_reset_tuner_gpio(void *dev, int component, int cmd, int value)
{
diff --git a/drivers/media/pci/cx18/cx18-gpio.h b/drivers/media/pci/cx18/cx18-gpio.h
index 0fa4c7ad2286..8d5797dea7f5 100644
--- a/drivers/media/pci/cx18/cx18-gpio.h
+++ b/drivers/media/pci/cx18/cx18-gpio.h
@@ -17,5 +17,4 @@ enum cx18_gpio_reset_type {
CX18_GPIO_RESET_XC2028 = 2,
};
-void cx18_reset_ir_gpio(void *data);
int cx18_reset_tuner_gpio(void *dev, int component, int cmd, int value);
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index a04a1d33fadb..b9f2c14d62b4 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -190,8 +190,7 @@ static int __cx88_ir_start(void *priv)
ir = core->ir;
if (ir->polling) {
- hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ir->timer.function = cx88_ir_work;
+ hrtimer_setup(&ir->timer, cx88_ir_work, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&ir->timer,
ktime_set(0, ir->polling * 1000000),
HRTIMER_MODE_REL);
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index 1bf249f446a9..1cb745855600 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -2,6 +2,7 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/i2c.h>
@@ -107,7 +108,6 @@ static const char * const ipu_vcm_types[] = {
"lc898212axb",
};
-#if IS_ENABLED(CONFIG_ACPI)
/*
* Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
* instead of device and driver match to probe IVSC device.
@@ -127,11 +127,11 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
struct acpi_device *consumer, *ivsc_adev;
- acpi_handle handle = acpi_device_handle(adev);
+ acpi_handle handle = acpi_device_handle(ACPI_PTR(adev));
for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
/* camera sensor depends on IVSC in DSDT if exist */
for_each_acpi_consumer_dev(ivsc_adev, consumer)
- if (consumer->handle == handle) {
+ if (ACPI_PTR(consumer->handle) == handle) {
acpi_dev_put(consumer);
return ivsc_adev;
}
@@ -139,12 +139,6 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
return NULL;
}
-#else
-static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
-{
- return NULL;
-}
-#endif
static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
{
@@ -259,12 +253,8 @@ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_dev
{
enum v4l2_fwnode_orientation orientation;
struct acpi_pld_info *pld = NULL;
- acpi_status status = AE_ERROR;
-#if IS_ENABLED(CONFIG_ACPI)
- status = acpi_get_physical_device_location(adev->handle, &pld);
-#endif
- if (ACPI_FAILURE(status)) {
+ if (!acpi_get_physical_device_location(ACPI_PTR(adev->handle), &pld)) {
dev_warn(ADEV_DEV(adev), "_PLD call failed, using default orientation\n");
return V4L2_FWNODE_ORIENTATION_EXTERNAL;
}
@@ -498,9 +488,7 @@ static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
if (sensor->csi_dev) {
const char *device_hid = "";
-#if IS_ENABLED(CONFIG_ACPI)
device_hid = acpi_device_hid(sensor->ivsc_adev);
-#endif
snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
device_hid, sensor->link);
@@ -671,11 +659,7 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
struct acpi_device *adev = NULL;
int ret;
-#if IS_ENABLED(CONFIG_ACPI)
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
-#else
- while (true) {
-#endif
if (!ACPI_PTR(adev->status.enabled))
continue;
@@ -768,15 +752,10 @@ static int ipu_bridge_ivsc_is_ready(void)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
-#if IS_ENABLED(CONFIG_ACPI)
const struct ipu_sensor_config *cfg =
&ipu_supported_sensors[i];
for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
-#else
- while (true) {
- sensor_adev = NULL;
-#endif
if (!ACPI_PTR(sensor_adev->status.enabled))
continue;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
index e898902e83f3..d8db5aa5d528 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-buttress.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
@@ -847,10 +847,10 @@ int ipu6_buttress_init(struct ipu6_device *isp)
INIT_LIST_HEAD(&b->constraints);
isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
- dev_info(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n",
- isp->secure_mode ? "secure" : "non-secure",
- readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH),
- readl(isp->base + BUTTRESS_REG_CAMERA_MASK));
+ dev_dbg(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n",
+ isp->secure_mode ? "secure" : "non-secure",
+ readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH),
+ readl(isp->base + BUTTRESS_REG_CAMERA_MASK));
b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT);
writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
index 8b8142bcb2d5..b7013f6524ec 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-cpd.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
@@ -275,7 +275,7 @@ static int ipu6_cpd_validate_moduledata(struct ipu6_device *isp,
return -EINVAL;
}
- dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date);
+ dev_dbg(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date);
ret = ipu6_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len,
moduledata_size - mod_hdr->hdr_len,
moduledata_size);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
index 77f9c7319868..8df1d83a74b5 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -1133,6 +1133,7 @@ static int isys_probe(struct auxiliary_device *auxdev,
free_fw_msg_bufs:
free_fw_msg_bufs(isys);
out_remove_pkg_dir_shared_buffer:
+ cpu_latency_qos_remove_request(&isys->pm_qos);
if (!isp->secure_mode)
ipu6_cpd_free_pkg_dir(adev);
remove_shared_buffer:
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index bc63dc81bcae..f90ffc4dad52 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -40,7 +40,9 @@
#include "mgb4_trigger.h"
#include "mgb4_core.h"
-#define MGB4_USER_IRQS 16
+#define MGB4_USER_IRQS 16
+#define MGB4_MGB4_BAR_ID 0
+#define MGB4_XDMA_BAR_ID 1
#define DIGITEQ_VID 0x1ed8
#define T100_DID 0x0101
@@ -123,7 +125,7 @@ static const struct hwmon_chip_info temp_chip_info = {
};
#endif
-static int match_i2c_adap(struct device *dev, void *data)
+static int match_i2c_adap(struct device *dev, const void *data)
{
return i2c_verify_adapter(dev) ? 1 : 0;
}
@@ -139,7 +141,7 @@ static struct i2c_adapter *get_i2c_adap(struct platform_device *pdev)
return dev ? to_i2c_adapter(dev) : NULL;
}
-static int match_spi_adap(struct device *dev, void *data)
+static int match_spi_adap(struct device *dev, const void *data)
{
return to_spi_device(dev) ? 1 : 0;
}
diff --git a/drivers/media/pci/mgb4/mgb4_core.h b/drivers/media/pci/mgb4/mgb4_core.h
index 9aec62514c0b..e86742d7b6c4 100644
--- a/drivers/media/pci/mgb4/mgb4_core.h
+++ b/drivers/media/pci/mgb4/mgb4_core.h
@@ -18,9 +18,6 @@
#define MGB4_VIN_DEVICES 2
#define MGB4_VOUT_DEVICES 2
-#define MGB4_MGB4_BAR_ID 0
-#define MGB4_XDMA_BAR_ID 1
-
#define MGB4_IS_GMSL(mgbdev) \
((mgbdev)->module_version >> 4 == 2)
#define MGB4_IS_FPDL3(mgbdev) \
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_in.c b/drivers/media/pci/mgb4/mgb4_sysfs_in.c
index 0ba66a2cf145..9626fa59e3d3 100644
--- a/drivers/media/pci/mgb4/mgb4_sysfs_in.c
+++ b/drivers/media/pci/mgb4/mgb4_sysfs_in.c
@@ -333,7 +333,7 @@ static ssize_t hsync_width_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal);
+ vindev->config->regs.hsync);
return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
}
@@ -344,7 +344,7 @@ static ssize_t vsync_width_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal2);
+ vindev->config->regs.vsync);
return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
}
@@ -355,7 +355,7 @@ static ssize_t hback_porch_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal);
+ vindev->config->regs.hsync);
return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
}
@@ -366,7 +366,7 @@ static ssize_t hfront_porch_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal);
+ vindev->config->regs.hsync);
return sprintf(buf, "%u\n", (sig & 0x000000FF));
}
@@ -377,7 +377,7 @@ static ssize_t vback_porch_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal2);
+ vindev->config->regs.vsync);
return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
}
@@ -388,7 +388,7 @@ static ssize_t vfront_porch_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal2);
+ vindev->config->regs.vsync);
return sprintf(buf, "%u\n", (sig & 0x000000FF));
}
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index 3f171c624b40..434eaf0440e2 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -143,8 +143,8 @@ static int get_timings(struct mgb4_vin_dev *vindev,
u32 status = mgb4_read_reg(video, regs->status);
u32 pclk = mgb4_read_reg(video, regs->pclk);
- u32 signal = mgb4_read_reg(video, regs->signal);
- u32 signal2 = mgb4_read_reg(video, regs->signal2);
+ u32 hsync = mgb4_read_reg(video, regs->hsync);
+ u32 vsync = mgb4_read_reg(video, regs->vsync);
u32 resolution = mgb4_read_reg(video, regs->resolution);
if (!(status & (1U << 2)))
@@ -161,12 +161,12 @@ static int get_timings(struct mgb4_vin_dev *vindev,
if (status & (1U << 13))
timings->bt.polarities |= V4L2_DV_VSYNC_POS_POL;
timings->bt.pixelclock = pclk * 1000;
- timings->bt.hsync = (signal & 0x00FF0000) >> 16;
- timings->bt.vsync = (signal2 & 0x00FF0000) >> 16;
- timings->bt.hbackporch = (signal & 0x0000FF00) >> 8;
- timings->bt.hfrontporch = signal & 0x000000FF;
- timings->bt.vbackporch = (signal2 & 0x0000FF00) >> 8;
- timings->bt.vfrontporch = signal2 & 0x000000FF;
+ timings->bt.hsync = (hsync & 0x00FF0000) >> 16;
+ timings->bt.vsync = (vsync & 0x00FF0000) >> 16;
+ timings->bt.hbackporch = (hsync & 0x0000FF00) >> 8;
+ timings->bt.hfrontporch = hsync & 0x000000FF;
+ timings->bt.vbackporch = (vsync & 0x0000FF00) >> 8;
+ timings->bt.vfrontporch = vsync & 0x000000FF;
return 0;
}
@@ -864,9 +864,9 @@ static void create_debugfs(struct mgb4_vin_dev *vindev)
vindev->regs[5].name = "PCLK_FREQUENCY";
vindev->regs[5].offset = vindev->config->regs.pclk;
vindev->regs[6].name = "VIDEO_PARAMS_1";
- vindev->regs[6].offset = vindev->config->regs.signal;
+ vindev->regs[6].offset = vindev->config->regs.hsync;
vindev->regs[7].name = "VIDEO_PARAMS_2";
- vindev->regs[7].offset = vindev->config->regs.signal2;
+ vindev->regs[7].offset = vindev->config->regs.vsync;
vindev->regs[8].name = "PADDING_PIXELS";
vindev->regs[8].offset = vindev->config->regs.padding;
if (has_timeperframe(video)) {
diff --git a/drivers/media/pci/mgb4/mgb4_vin.h b/drivers/media/pci/mgb4/mgb4_vin.h
index 8fd10c0a5554..2a2c829914ce 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.h
+++ b/drivers/media/pci/mgb4/mgb4_vin.h
@@ -22,8 +22,8 @@ struct mgb4_vin_regs {
u32 frame_period;
u32 sync;
u32 pclk;
- u32 signal;
- u32 signal2;
+ u32 hsync;
+ u32 vsync;
u32 padding;
u32 timer;
};
diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c
index 6b2791e29de1..14c5725bd4d8 100644
--- a/drivers/media/pci/mgb4/mgb4_vout.c
+++ b/drivers/media/pci/mgb4/mgb4_vout.c
@@ -24,10 +24,6 @@
#include "mgb4_cmt.h"
#include "mgb4_vout.h"
-#define DEFAULT_WIDTH 1280
-#define DEFAULT_HEIGHT 640
-#define DEFAULT_PERIOD (MGB4_HW_FREQ / 60)
-
ATTRIBUTE_GROUPS(mgb4_fpdl3_out);
ATTRIBUTE_GROUPS(mgb4_gmsl_out);
@@ -180,7 +176,10 @@ static void stop_streaming(struct vb2_queue *vq)
xdma_disable_user_irq(mgbdev->xdev, irq);
cancel_work_sync(&voutdev->dma_work);
+
mgb4_mask_reg(&mgbdev->video, voutdev->config->regs.config, 0x2, 0x0);
+ mgb4_write_reg(&mgbdev->video, voutdev->config->regs.padding, 0);
+
return_all_buffers(voutdev, VB2_BUF_STATE_ERROR);
}
@@ -196,6 +195,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
int rv;
u32 addr;
+ mgb4_write_reg(video, config->regs.padding, voutdev->padding);
mgb4_mask_reg(video, config->regs.config, 0x2, 0x2);
addr = mgb4_read_reg(video, config->regs.address);
@@ -359,7 +359,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width
* pixelsize)) / pixelsize;
- mgb4_write_reg(video, voutdev->config->regs.padding, voutdev->padding);
return 0;
}
@@ -661,11 +660,10 @@ static void fpga_init(struct mgb4_vout_dev *voutdev)
const struct mgb4_vout_regs *regs = &voutdev->config->regs;
mgb4_write_reg(video, regs->config, 0x00000011);
- mgb4_write_reg(video, regs->resolution,
- (DEFAULT_WIDTH << 16) | DEFAULT_HEIGHT);
+ mgb4_write_reg(video, regs->resolution, (1280 << 16) | 640);
mgb4_write_reg(video, regs->hsync, 0x00283232);
mgb4_write_reg(video, regs->vsync, 0x40141F1E);
- mgb4_write_reg(video, regs->frame_limit, DEFAULT_PERIOD);
+ mgb4_write_reg(video, regs->frame_limit, MGB4_HW_FREQ / 60);
mgb4_write_reg(video, regs->padding, 0x00000000);
voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 61150 >> 1) << 1;
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index a6738baab688..ac958a5fca78 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -77,9 +77,7 @@ static int saa7164_vbi_buffers_alloc(struct saa7164_port *port)
/* TODO: NTSC SPECIFIC */
/* Init and establish defaults */
params->samplesperline = 1440;
- params->numberoflines = 12;
params->numberoflines = 18;
- params->pitch = 1600;
params->pitch = 1440;
params->numpagetables = 2 +
((params->numberoflines * params->pitch) / PAGE_SIZE);
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 1a9e2bccc413..6ec1480a6d18 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -362,7 +362,7 @@ static ssize_t sdram_offsets_show(struct device *dev,
}
static ssize_t sdram_show(struct file *file, struct kobject *kobj,
- struct bin_attribute *a, char *buf,
+ const struct bin_attribute *a, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -432,7 +432,7 @@ static int solo_sysfs_init(struct solo_dev *solo_dev)
sysfs_attr_init(&sdram_attr->attr);
sdram_attr->attr.name = "sdram";
sdram_attr->attr.mode = 0440;
- sdram_attr->read = sdram_show;
+ sdram_attr->read_new = sdram_show;
sdram_attr->size = solo_dev->sdram_size;
if (device_create_bin_file(dev, sdram_attr)) {
diff --git a/drivers/media/platform/broadcom/bcm2835-unicam.c b/drivers/media/platform/broadcom/bcm2835-unicam.c
index 3aed0e493c81..f10064107d54 100644
--- a/drivers/media/platform/broadcom/bcm2835-unicam.c
+++ b/drivers/media/platform/broadcom/bcm2835-unicam.c
@@ -199,6 +199,7 @@ struct unicam_device {
/* subdevice async notifier */
struct v4l2_async_notifier notifier;
unsigned int sequence;
+ bool frame_started;
/* Sensor node */
struct {
@@ -546,7 +547,8 @@ unicam_find_format_by_fourcc(u32 fourcc, u32 pad)
}
for (i = 0; i < num_formats; ++i) {
- if (formats[i].fourcc == fourcc)
+ if (formats[i].fourcc == fourcc ||
+ formats[i].unpacked_fourcc == fourcc)
return &formats[i];
}
@@ -638,7 +640,14 @@ static inline void unicam_reg_write_field(struct unicam_device *unicam, u32 offs
static void unicam_wr_dma_addr(struct unicam_node *node,
struct unicam_buffer *buf)
{
- dma_addr_t endaddr = buf->dma_addr + buf->size;
+ /*
+ * Due to a HW bug causing buffer overruns in circular buffer mode under
+ * certain (not yet fully known) conditions, the dummy buffer allocation
+ * is set to a a single page size, but the hardware gets programmed with
+ * a buffer size of 0.
+ */
+ dma_addr_t endaddr = buf->dma_addr +
+ (buf != &node->dummy_buf ? buf->size : 0);
if (node->id == UNICAM_IMAGE_NODE) {
unicam_reg_write(node->dev, UNICAM_IBSA0, buf->dma_addr);
@@ -742,6 +751,8 @@ static irqreturn_t unicam_isr(int irq, void *dev)
* buffer forever.
*/
if (fe) {
+ bool inc_seq = unicam->frame_started;
+
/*
* Ensure we have swapped buffers already as we can't
* stop the peripheral. If no buffer is available, use a
@@ -761,11 +772,24 @@ static irqreturn_t unicam_isr(int irq, void *dev)
* + FS + LS). In this case, we cannot signal the buffer
* as complete, as the HW will reuse that buffer.
*/
- if (node->cur_frm && node->cur_frm != node->next_frm)
+ if (node->cur_frm && node->cur_frm != node->next_frm) {
unicam_process_buffer_complete(node, sequence);
+ inc_seq = true;
+ }
node->cur_frm = node->next_frm;
}
- unicam->sequence++;
+
+ /*
+ * Increment the sequence number conditionally on either a FS
+ * having already occurred, or in the FE + FS condition as
+ * caught in the FE handler above. This ensures the sequence
+ * number corresponds to the frames generated by the sensor, not
+ * the frames dequeued to userland.
+ */
+ if (inc_seq) {
+ unicam->sequence++;
+ unicam->frame_started = false;
+ }
}
if (ista & UNICAM_FSI) {
@@ -795,6 +819,7 @@ static irqreturn_t unicam_isr(int irq, void *dev)
}
unicam_queue_event_sof(unicam);
+ unicam->frame_started = true;
}
/*
@@ -816,11 +841,6 @@ static irqreturn_t unicam_isr(int irq, void *dev)
}
}
- if (unicam_reg_read(unicam, UNICAM_ICTL) & UNICAM_FCM) {
- /* Switch out of trigger mode if selected */
- unicam_reg_write_field(unicam, UNICAM_ICTL, 1, UNICAM_TFC);
- unicam_reg_write_field(unicam, UNICAM_ICTL, 0, UNICAM_FCM);
- }
return IRQ_HANDLED;
}
@@ -984,8 +1004,7 @@ static void unicam_start_rx(struct unicam_device *unicam,
unicam_reg_write_field(unicam, UNICAM_ANA, 0, UNICAM_DDL);
- /* Always start in trigger frame capture mode (UNICAM_FCM set) */
- val = UNICAM_FSIE | UNICAM_FEIE | UNICAM_FCM | UNICAM_IBOB;
+ val = UNICAM_FSIE | UNICAM_FEIE | UNICAM_IBOB;
line_int_freq = max(fmt->height >> 2, 128);
unicam_set_field(&val, line_int_freq, UNICAM_LCIE_MASK);
unicam_reg_write(unicam, UNICAM_ICTL, val);
@@ -1413,6 +1432,7 @@ static int unicam_sd_enable_streams(struct v4l2_subdev *sd,
if (unicam->pipe.nodes & BIT(UNICAM_METADATA_NODE))
unicam_start_metadata(unicam);
+ unicam->frame_started = false;
unicam_start_rx(unicam, state);
}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
index 6b294a2d6717..8479dc9c9a8f 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -269,9 +269,9 @@ static int wave5_vpu_probe(struct platform_device *pdev)
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0) {
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
- hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- dev->hrtimer.function = &wave5_vpu_timer_callback;
- dev->worker = kthread_create_worker(0, "vpu_irq_thread");
+ hrtimer_setup(&dev->hrtimer, &wave5_vpu_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ dev->worker = kthread_run_worker(0, "vpu_irq_thread");
if (IS_ERR(dev->worker)) {
dev_err(&pdev->dev, "failed to create vpu irq worker\n");
ret = PTR_ERR(dev->worker);
diff --git a/drivers/media/platform/marvell/mcam-core.c b/drivers/media/platform/marvell/mcam-core.c
index 9ec01228f907..b8360d37000a 100644
--- a/drivers/media/platform/marvell/mcam-core.c
+++ b/drivers/media/platform/marvell/mcam-core.c
@@ -935,7 +935,12 @@ static int mclk_enable(struct clk_hw *hw)
ret = pm_runtime_resume_and_get(cam->dev);
if (ret < 0)
return ret;
- clk_enable(cam->clk[0]);
+ ret = clk_enable(cam->clk[0]);
+ if (ret) {
+ pm_runtime_put(cam->dev);
+ return ret;
+ }
+
mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
mcam_ctlr_power_up(cam);
diff --git a/drivers/media/platform/marvell/mmp-driver.c b/drivers/media/platform/marvell/mmp-driver.c
index 3fd4fc1b9c48..d3da7ebb4a2b 100644
--- a/drivers/media/platform/marvell/mmp-driver.c
+++ b/drivers/media/platform/marvell/mmp-driver.c
@@ -232,12 +232,22 @@ static int mmpcam_probe(struct platform_device *pdev)
mcam_init_clk(mcam);
/*
+ * Register with V4L.
+ */
+
+ ret = v4l2_device_register(mcam->dev, &mcam->v4l2_dev);
+ if (ret)
+ return ret;
+
+ /*
* Create a match of the sensor against its OF node.
*/
ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(pdev->dev.of_node),
NULL);
- if (!ep)
- return -ENODEV;
+ if (!ep) {
+ ret = -ENODEV;
+ goto out_v4l2_device_unregister;
+ }
v4l2_async_nf_init(&mcam->notifier, &mcam->v4l2_dev);
@@ -246,7 +256,7 @@ static int mmpcam_probe(struct platform_device *pdev)
fwnode_handle_put(ep);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
- goto out;
+ goto out_v4l2_device_unregister;
}
/*
@@ -254,7 +264,7 @@ static int mmpcam_probe(struct platform_device *pdev)
*/
ret = mccic_register(mcam);
if (ret)
- goto out;
+ goto out_v4l2_device_unregister;
/*
* Add OF clock provider.
@@ -283,6 +293,8 @@ static int mmpcam_probe(struct platform_device *pdev)
return 0;
out:
mccic_shutdown(mcam);
+out_v4l2_device_unregister:
+ v4l2_device_unregister(&mcam->v4l2_dev);
return ret;
}
@@ -293,6 +305,7 @@ static void mmpcam_remove(struct platform_device *pdev)
struct mcam_camera *mcam = &cam->mcam;
mccic_shutdown(mcam);
+ v4l2_device_unregister(&mcam->v4l2_dev);
pm_runtime_force_suspend(mcam->dev);
}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
index ea2ea119dd2a..e5ccf673e152 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
@@ -114,19 +114,15 @@ static struct img_config *__get_config_offset(struct mdp_dev *mdp,
if (pp_idx >= mdp->mdp_data->pp_used)
goto err_param;
- if (CFG_CHECK(MT8183, p_id))
+ if (CFG_CHECK(MT8183, p_id)) {
cfg_c = CFG_OFST(MT8183, param->config, pp_idx);
- else if (CFG_CHECK(MT8195, p_id))
- cfg_c = CFG_OFST(MT8195, param->config, pp_idx);
- else
- goto err_param;
-
- if (CFG_CHECK(MT8183, p_id))
cfg_n = CFG_OFST(MT8183, param->config, pp_idx + 1);
- else if (CFG_CHECK(MT8195, p_id))
+ } else if (CFG_CHECK(MT8195, p_id)) {
+ cfg_c = CFG_OFST(MT8195, param->config, pp_idx);
cfg_n = CFG_OFST(MT8195, param->config, pp_idx + 1);
- else
+ } else {
goto err_param;
+ }
if ((long)cfg_n - (long)mdp->vpu.config > bound) {
dev_err(dev, "config offset %ld OOB %ld\n", (long)cfg_n, bound);
@@ -325,8 +321,7 @@ static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
/* Enable mux settings */
for (index = 0; index < ctrl->num_sets; index++) {
set = &ctrl->sets[index];
- cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
- set->value, 0xFFFFFFFF);
+ cmdq_pkt_write(&cmd->pkt, set->subsys_id, set->reg, set->value);
}
/* Config sub-frame information */
for (index = (num_comp - 1); index >= 0; index--) {
@@ -381,8 +376,7 @@ static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
/* Disable mux settings */
for (index = 0; index < ctrl->num_sets; index++) {
set = &ctrl->sets[index];
- cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
- 0, 0xFFFFFFFF);
+ cmdq_pkt_write(&cmd->pkt, set->subsys_id, set->reg, 0);
}
return 0;
@@ -471,43 +465,6 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
return 0;
}
-static int mdp_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
- size_t size)
-{
- struct device *dev;
- dma_addr_t dma_addr;
-
- pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base)
- return -ENOMEM;
-
- pkt->buf_size = size;
- pkt->cl = (void *)client;
-
- dev = client->chan->mbox->dev;
- dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
- kfree(pkt->va_base);
- return -ENOMEM;
- }
-
- pkt->pa_base = dma_addr;
-
- return 0;
-}
-
-static void mdp_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
-{
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
-
- dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
- DMA_TO_DEVICE);
- kfree(pkt->va_base);
- pkt->va_base = NULL;
-}
-
static void mdp_auto_release_work(struct work_struct *work)
{
struct mdp_cmdq_cmd *cmd;
@@ -538,7 +495,7 @@ static void mdp_auto_release_work(struct work_struct *work)
wake_up(&mdp->callback_wq);
}
- mdp_cmdq_pkt_destroy(&cmd->pkt);
+ cmdq_pkt_destroy(mdp->cmdq_clt[cmd->pp_idx], &cmd->pkt);
kfree(cmd->comps);
cmd->comps = NULL;
kfree(cmd);
@@ -578,7 +535,7 @@ static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg)
if (refcount_dec_and_test(&mdp->job_count))
wake_up(&mdp->callback_wq);
- mdp_cmdq_pkt_destroy(&cmd->pkt);
+ cmdq_pkt_destroy(mdp->cmdq_clt[cmd->pp_idx], &cmd->pkt);
kfree(cmd->comps);
cmd->comps = NULL;
kfree(cmd);
@@ -607,20 +564,13 @@ static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
goto err_uninit;
}
- if (CFG_CHECK(MT8183, p_id))
- num_comp = CFG_GET(MT8183, config, num_components);
- else if (CFG_CHECK(MT8195, p_id))
- num_comp = CFG_GET(MT8195, config, num_components);
- else
- goto err_uninit;
-
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto err_uninit;
}
- ret = mdp_cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K);
+ ret = cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K);
if (ret)
goto err_free_cmd;
@@ -632,6 +582,7 @@ static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
ret = -EINVAL;
goto err_destroy_pkt;
}
+
comps = kcalloc(num_comp, sizeof(*comps), GFP_KERNEL);
if (!comps) {
ret = -ENOMEM;
@@ -676,7 +627,8 @@ static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
dev_err(dev, "mdp_path_config error %d\n", pp_idx);
goto err_free_path;
}
- cmdq_pkt_finalize(&cmd->pkt);
+ cmdq_pkt_eoc(&cmd->pkt);
+ cmdq_pkt_jump_rel(&cmd->pkt, CMDQ_INST_SIZE, mdp->cmdq_shift_pa[pp_idx]);
for (i = 0; i < num_comp; i++) {
s32 inner_id = MDP_COMP_NONE;
@@ -699,6 +651,7 @@ static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
cmd->comps = comps;
cmd->num_comps = num_comp;
cmd->mdp_ctx = param->mdp_ctx;
+ cmd->pp_idx = pp_idx;
kfree(path);
return cmd;
@@ -710,7 +663,7 @@ err_free_path:
err_free_comps:
kfree(comps);
err_destroy_pkt:
- mdp_cmdq_pkt_destroy(&cmd->pkt);
+ cmdq_pkt_destroy(mdp->cmdq_clt[pp_idx], &cmd->pkt);
err_free_cmd:
kfree(cmd);
err_uninit:
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
index 53a30ad7e0b0..935ae9825728 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
@@ -35,6 +35,7 @@ struct mdp_cmdq_cmd {
struct mdp_comp *comps;
void *mdp_ctx;
u8 num_comps;
+ u8 pp_idx;
};
struct mdp_dev;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 8f62fb167156..683c066ed975 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -72,14 +72,14 @@ static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
/* Disable RSZ1 */
if (ctx->comp->inner_id == rdma0 && prz1)
- MM_REG_WRITE(cmd, subsys_id, prz1->reg_base, PRZ_ENABLE,
- 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, prz1->reg_base,
+ PRZ_ENABLE, 0x0, BIT(0));
}
/* Reset RDMA */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
- MM_REG_POLL(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
+ MM_REG_POLL_MASK(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
return 0;
}
@@ -98,26 +98,25 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
if (block10bit)
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
else
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
}
/* Setup smi control */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
- (7 << 4) + //burst type to 8
- (1 << 16), //enable pre-ultra
- 0x00030071);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
+ (7 << 4) + //burst type to 8
+ (1 << 16), //enable pre-ultra
+ 0x00030071);
/* Setup source frame info */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.src_ctrl);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.src_ctrl);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, reg,
- 0x03C8FE0F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_SRC_CON, reg, 0x03C8FE0F);
if (mdp_cfg)
if (mdp_cfg->rdma_support_10bit && en_ufo) {
@@ -126,17 +125,15 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_y);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_y);
- MM_REG_WRITE(cmd, subsys_id,
- base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_UFO_DEC_LENGTH_BASE_Y, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_c);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_c);
- MM_REG_WRITE(cmd, subsys_id,
- base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_UFO_DEC_LENGTH_BASE_C, reg);
/* Set 10bit source frame pitch */
if (block10bit) {
@@ -144,9 +141,9 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd_in_pxl);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd_in_pxl);
- MM_REG_WRITE(cmd, subsys_id,
- base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
- reg, 0x001FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
+ reg, 0x001FFFFF);
}
}
@@ -157,128 +154,121 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8195, ctx->param, rdma.control);
rdma_con_mask = 0x1130;
}
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, reg,
- rdma_con_mask);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_CON, reg, rdma_con_mask);
/* Setup source buffer base */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova[0]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova[1]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova[2]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, reg);
/* Setup source buffer end */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[0]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[1]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[2]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2, reg);
/* Setup source frame pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
- reg, 0x001FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
+ reg, 0x001FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.sf_bkgd);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.sf_bkgd);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
- reg, 0x001FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
+ reg, 0x001FFFFF);
/* Setup color transform */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.transform);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.transform);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
- reg, 0x0F110000);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
+ reg, 0x0F110000);
if (!mdp_cfg || !mdp_cfg->rdma_esl_setting)
goto rdma_config_done;
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_0,
- reg, 0x0FFF00FF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_0,
+ reg, 0x0FFF00FF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_0,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_0,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_0,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_0,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_1,
- reg, 0x0F7F007F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_1,
+ reg, 0x0F7F007F);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_1,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_1,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_1,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_1,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con2);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_2,
- reg, 0x0F3F003F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_2,
+ reg, 0x0F3F003F);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con2);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_2,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_2,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con2);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_2,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_2,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con3);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_3,
- reg, 0x0F3F003F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_3,
+ reg, 0x0F3F003F);
rdma_config_done:
return 0;
@@ -297,15 +287,14 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
u32 reg = 0;
/* Enable RDMA */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
/* Set Y pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[0]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0, reg);
/* Set 10bit UFO mode */
if (mdp_cfg) {
@@ -315,8 +304,7 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset_0_p);
MM_REG_WRITE(cmd, subsys_id, base,
- MDP_RDMA_SRC_OFFSET_0_P,
- reg, 0xFFFFFFFF);
+ MDP_RDMA_SRC_OFFSET_0_P, reg);
}
}
@@ -325,40 +313,38 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[1]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1, reg);
/* Set V pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[2]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2, reg);
/* Set source size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].src);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, reg,
- 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, reg,
+ 0x1FFF1FFF);
/* Set target size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
- reg, 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
+ reg, 0x1FFF1FFF);
/* Set crop offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip_ofst);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
- reg, 0x003F001F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
+ reg, 0x003F001F);
if (CFG_CHECK(MT8183, p_id)) {
csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
@@ -369,8 +355,8 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
}
if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
if ((csf_r - csf_l + 1) > 320)
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
return 0;
}
@@ -393,7 +379,7 @@ static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
/* Disable RDMA */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
return 0;
}
@@ -411,10 +397,10 @@ static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u8 subsys_id = ctx->comp->subsys_id;
/* Reset RSZ */
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
/* Enable RSZ */
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
if (CFG_CHECK(MT8195, p_id)) {
struct device *dev;
@@ -437,7 +423,7 @@ static int config_rsz_frame(struct mdp_comp_ctx *ctx,
u32 reg = 0;
if (mdp_cfg && mdp_cfg->rsz_etc_control)
- MM_REG_WRITE(cmd, subsys_id, base, RSZ_ETC_CONTROL, 0x0, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, RSZ_ETC_CONTROL, 0x0);
if (CFG_CHECK(MT8183, p_id))
bypass = CFG_COMP(MT8183, ctx->param, frame.bypass);
@@ -446,7 +432,7 @@ static int config_rsz_frame(struct mdp_comp_ctx *ctx,
if (bypass) {
/* Disable RSZ */
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
return 0;
}
@@ -454,29 +440,27 @@ static int config_rsz_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rsz.control1);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.control1);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, reg,
- 0x03FFFDF3);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_1, reg, 0x03FFFDF3);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.control2);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.control2);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
- 0x0FFFC290);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_2, reg, 0x0FFFC290);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_x);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_x);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP,
- reg, 0x007FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP, reg,
+ 0x007FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_y);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_y);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP,
- reg, 0x007FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP, reg,
+ 0x007FFFFF);
return 0;
}
@@ -495,15 +479,13 @@ static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].control2);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].control2);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
- 0x00003800);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_2, reg, 0x00003800);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].src);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, reg);
if (CFG_CHECK(MT8183, p_id)) {
csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
@@ -514,60 +496,56 @@ static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
}
if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
if ((csf_r - csf_l + 1) <= 16)
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1,
- BIT(27), BIT(27));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_1,
+ BIT(27), BIT(27));
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
- reg, 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
+ reg, 0xFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left_subpix);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left_subpix);
- MM_REG_WRITE(cmd, subsys_id,
- base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
- reg, 0x1FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
+ reg, 0x1FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
- reg, 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
+ reg, 0xFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top_subpix);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top_subpix);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
- reg, 0x1FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
+ reg, 0x1FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left);
- MM_REG_WRITE(cmd, subsys_id,
- base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
- reg, 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
+ reg, 0xFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left_subpix);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left_subpix);
- MM_REG_WRITE(cmd, subsys_id,
- base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
- reg, 0x1FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
+ reg, 0x1FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].clip);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, reg);
if (CFG_CHECK(MT8195, p_id)) {
struct device *dev;
@@ -596,19 +574,19 @@ static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].merge_cfg);
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_0, reg, 0xFFFFFFFF);
+ MDP_MERGE_CFG_0, reg);
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_4, reg, 0xFFFFFFFF);
+ MDP_MERGE_CFG_4, reg);
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_24, reg, 0xFFFFFFFF);
+ MDP_MERGE_CFG_24, reg);
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_25, reg, 0xFFFFFFFF);
+ MDP_MERGE_CFG_25, reg);
/* Bypass mode */
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_12, BIT(0), 0xFFFFFFFF);
+ MDP_MERGE_CFG_12, BIT(0));
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_ENABLE, BIT(0), 0xFFFFFFFF);
+ MDP_MERGE_ENABLE, BIT(0));
}
rsz_subfrm_done:
@@ -634,8 +612,8 @@ static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
}
if ((csf_r - csf_l + 1) <= 16)
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
- BIT(27));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
+ BIT(27));
}
return 0;
@@ -655,15 +633,15 @@ static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u8 subsys_id = ctx->comp->subsys_id;
/* Reset WROT */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
- MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
+ MM_REG_POLL_MASK(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
/* Reset setting */
if (CFG_CHECK(MT8195, p_id))
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, 0x0, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, 0x0);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
- MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
+ MM_REG_POLL_MASK(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
return 0;
}
@@ -681,39 +659,36 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.iova[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.iova[0]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.iova[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.iova[1]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.iova[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.iova[2]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, reg);
if (mdp_cfg && mdp_cfg->wrot_support_10bit) {
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.scan_10bit);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_SCAN_10BIT,
- reg, 0x0000000F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_SCAN_10BIT,
+ reg, 0x0000000F);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.pending_zero);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_PENDING_ZERO,
- reg, 0x04000000);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_PENDING_ZERO,
+ reg, 0x04000000);
}
if (CFG_CHECK(MT8195, p_id)) {
reg = CFG_COMP(MT8195, ctx->param, wrot.bit_number);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL_2,
- reg, 0x00000007);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_CTRL_2,
+ reg, 0x00000007);
}
/* Write frame related registers */
@@ -721,14 +696,13 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.control);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.control);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, reg,
- 0xF131510F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_CTRL, reg, 0xF131510F);
/* Write pre-ultra threshold */
if (CFG_CHECK(MT8195, p_id)) {
reg = CFG_COMP(MT8195, ctx->param, wrot.pre_ultra);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_DMA_PREULTRA, reg,
- 0x00FFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_DMA_PREULTRA, reg,
+ 0x00FFFFFF);
}
/* Write frame Y pitch */
@@ -736,37 +710,34 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.stride[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.stride[0]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, reg,
- 0x0000FFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_STRIDE, reg, 0x0000FFFF);
/* Write frame UV pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.stride[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.stride[1]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, reg,
- 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_STRIDE_C, reg, 0xFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.stride[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.stride[2]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, reg,
- 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_STRIDE_V, reg, 0xFFFF);
/* Write matrix control */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.mat_ctrl);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.mat_ctrl);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, reg, 0xF3);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAT_CTRL, reg, 0xF3);
/* Set the fixed ALPHA as 0xFF */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
- 0xFF000000);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
+ 0xFF000000);
/* Set VIDO_EOL_SEL */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
/* Set VIDO_FIFO_TEST */
if (CFG_CHECK(MT8183, p_id))
@@ -775,8 +746,8 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8195, ctx->param, wrot.fifo_test);
if (reg != 0)
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST,
- reg, 0xFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_FIFO_TEST, reg,
+ 0xFFF);
/* Filter enable */
if (mdp_cfg && mdp_cfg->wrot_filter_constraint) {
@@ -784,13 +755,13 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.filter);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.filter);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
- reg, 0x77);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, reg,
+ 0x77);
/* Turn off WROT DMA DCM */
if (CFG_CHECK(MT8195, p_id))
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN,
- (0x1 << 23) + (0x1 << 20), 0x900000);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_ROT_EN,
+ (0x1 << 23) + (0x1 << 20), 0x900000);
}
return 0;
@@ -808,57 +779,52 @@ static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[0]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_OFST_ADDR, reg, 0x0FFFFFFF);
/* Write U pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[1]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_OFST_ADDR_C, reg, 0x0FFFFFFF);
/* Write V pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[2]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_OFST_ADDR_V, reg,
+ 0x0FFFFFFF);
/* Write source size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].src);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, reg,
- 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_IN_SIZE, reg, 0x1FFF1FFF);
/* Write target size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, reg,
- 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_TAR_SIZE, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip_ofst);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, reg,
- 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_CROP_OFST, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].main_buf);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].main_buf);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
- reg, 0x1FFF7F00);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, reg,
+ 0x1FFF7F00);
/* Enable WROT */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
return 0;
}
@@ -881,11 +847,11 @@ static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
- 0x77);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
+ 0x77);
/* Disable WROT */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
return 0;
}
@@ -904,9 +870,9 @@ static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u8 subsys_id = ctx->comp->subsys_id;
/* Reset WDMA */
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
- MM_REG_POLL(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
+ MM_REG_POLL_MASK(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
return 0;
}
@@ -918,40 +884,35 @@ static int config_wdma_frame(struct mdp_comp_ctx *ctx,
u8 subsys_id = ctx->comp->subsys_id;
u32 reg = 0;
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050);
/* Setup frame information */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.wdma_cfg);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_CFG, reg,
- 0x0F01B8F0);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_CFG, reg, 0x0F01B8F0);
/* Setup frame base address */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.iova[0]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.iova[1]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.iova[2]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, reg);
/* Setup Y pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.w_in_byte);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE,
- reg, 0x0000FFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE, reg,
+ 0x0000FFFF);
/* Setup UV pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.uv_stride);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_UV_PITCH,
- reg, 0x0000FFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_UV_PITCH, reg,
+ 0x0000FFFF);
/* Set the fixed ALPHA as 0xFF */
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
- 0x800000FF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
+ 0x800000FF);
return 0;
}
@@ -966,36 +927,33 @@ static int config_wdma_subfrm(struct mdp_comp_ctx *ctx,
/* Write Y pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[0]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET, reg,
+ 0x0FFFFFFF);
/* Write U pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[1]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET, reg,
+ 0x0FFFFFFF);
/* Write V pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[2]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET, reg,
+ 0x0FFFFFFF);
/* Write source size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_SRC_SIZE, reg,
- 0x3FFF3FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_SRC_SIZE, reg, 0x3FFF3FFF);
/* Write target size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_SIZE, reg,
- 0x3FFF3FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_CLIP_SIZE, reg, 0x3FFF3FFF);
/* Write clip offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_COORD, reg,
- 0x3FFF3FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_CLIP_COORD, reg, 0x3FFF3FFF);
/* Enable WDMA */
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
return 0;
}
@@ -1007,7 +965,7 @@ static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
/* Disable WDMA */
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
return 0;
}
@@ -1033,19 +991,17 @@ static int reset_luma_hist(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
/* Reset histogram */
for (i = 0; i <= hist_num; i++)
- MM_REG_WRITE_MASK(cmd, subsys_id, base,
- (MDP_LUMA_HIST_INIT + (i << 2)),
- 0, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base,
+ (MDP_LUMA_HIST_INIT + (i << 2)), 0);
if (mdp_cfg->tdshp_constrain)
MM_REG_WRITE(cmd, subsys_id, base,
- MDP_DC_TWO_D_W1_RESULT_INIT, 0, 0xFFFFFFFF);
+ MDP_DC_TWO_D_W1_RESULT_INIT, 0);
if (mdp_cfg->tdshp_contour)
for (i = 0; i < hist_num; i++)
- MM_REG_WRITE_MASK(cmd, subsys_id, base,
- (MDP_CONTOUR_HIST_INIT + (i << 2)),
- 0, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base,
+ (MDP_CONTOUR_HIST_INIT + (i << 2)), 0);
return 0;
}
@@ -1055,9 +1011,9 @@ static int init_tdshp(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CTRL, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_CTRL, BIT(0), BIT(0));
/* Enable FIFO */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CFG, BIT(1), BIT(1));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_CFG, BIT(1), BIT(1));
return reset_luma_hist(ctx, cmd);
}
@@ -1072,7 +1028,7 @@ static int config_tdshp_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.cfg);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CFG, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_CFG, reg, BIT(0));
return 0;
}
@@ -1086,26 +1042,24 @@ static int config_tdshp_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_INPUT_SIZE,
- reg, MDP_TDSHP_INPUT_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_INPUT_SIZE, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_OFFSET,
- reg, 0x00FF00FF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_OFFSET, reg,
+ 0x00FF00FF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_SIZE,
- reg, MDP_TDSHP_OUTPUT_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_SIZE, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_00, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_00, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_01, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_01, reg);
return 0;
}
@@ -1122,21 +1076,19 @@ static int init_color(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_COLOR_START, 0x1, BIT(1) | BIT(0));
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_COLOR_WIN_X_MAIN, 0xFFFF0000, 0xFFFFFFFF);
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_COLOR_WIN_Y_MAIN, 0xFFFF0000, 0xFFFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_START, 0x1,
+ BIT(1) | BIT(0));
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_WIN_X_MAIN, 0xFFFF0000);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_WIN_Y_MAIN, 0xFFFF0000);
/* Reset color matrix */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_CM1_EN, 0x0, BIT(0));
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_CM2_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_CM1_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_CM2_EN, 0x0, BIT(0));
/* Enable interrupt */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTEN, 0x7, 0x7);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_INTEN, 0x7, 0x7);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_OUT_SEL, 0x333, 0x333);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_OUT_SEL, 0x333, 0x333);
return 0;
}
@@ -1151,8 +1103,7 @@ static int config_color_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, color.start);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_START,
- reg, MDP_COLOR_START_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_START, reg);
return 0;
}
@@ -1166,13 +1117,13 @@ static int config_color_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_hsize);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_WIDTH,
- reg, 0x00003FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_WIDTH,
+ reg, 0x00003FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_vsize);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_HEIGHT,
- reg, 0x00003FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_HEIGHT,
+ reg, 0x00003FFF);
return 0;
}
@@ -1190,9 +1141,9 @@ static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u8 subsys_id = ctx->comp->subsys_id;
/* CCORR enable */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
/* Relay mode */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
return 0;
}
@@ -1214,8 +1165,8 @@ static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx,
hsize = csf_r - csf_l + 1;
vsize = csf_b - csf_t + 1;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_SIZE,
- (hsize << 16) + (vsize << 0), 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_CCORR_SIZE,
+ (hsize << 16) + (vsize << 0), 0x1FFF1FFF);
return 0;
}
@@ -1231,7 +1182,7 @@ static int init_aal(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u16 subsys_id = ctx->comp->subsys_id;
/* Always set MDP_AAL enable to 1 */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_EN, BIT(0), BIT(0));
return 0;
}
@@ -1246,11 +1197,11 @@ static int config_aal_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.cfg_main);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_CFG_MAIN, reg, BIT(7));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_CFG_MAIN, reg, BIT(7));
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.cfg);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_CFG, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_CFG, reg, BIT(0));
return 0;
}
@@ -1264,18 +1215,16 @@ static int config_aal_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_SIZE,
- reg, MDP_AAL_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_SIZE, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_OFFSET,
- reg, 0x00FF00FF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_OUTPUT_OFFSET, reg,
+ 0x00FF00FF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_SIZE,
- reg, MDP_AAL_OUTPUT_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_SIZE, reg);
return 0;
}
@@ -1293,7 +1242,7 @@ static int init_hdr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u16 subsys_id = ctx->comp->subsys_id;
/* Always set MDP_HDR enable to 1 */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_TOP, BIT(0), BIT(0));
return 0;
}
@@ -1308,11 +1257,11 @@ static int config_hdr_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.top);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(29) | BIT(28));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(29) | BIT(28));
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.relay);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_RELAY, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_RELAY, reg, BIT(0));
return 0;
}
@@ -1326,37 +1275,36 @@ static int config_hdr_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].win_size);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TILE_POS,
- reg, MDP_HDR_TILE_POS_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TILE_POS, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_0, reg, 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_SIZE_0, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_1, reg, 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_SIZE_1, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_2, reg, 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_SIZE_2, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_0, reg, 0x00003FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_0, reg, 0x00003FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_1, reg, 0x00003FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_1, reg, 0x00003FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hdr_top);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(6) | BIT(5));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(6) | BIT(5));
/* Enable histogram */
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_addr);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_ADDR, reg, BIT(9));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_HIST_ADDR, reg, BIT(9));
return 0;
}
@@ -1373,8 +1321,8 @@ static int init_fg(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TRIGGER, BIT(2), BIT(2));
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TRIGGER, 0x0, BIT(2));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_TRIGGER, BIT(2), BIT(2));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_TRIGGER, 0x0, BIT(2));
return 0;
}
@@ -1389,11 +1337,11 @@ static int config_fg_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, fg.ctrl_0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_FG_CTRL_0, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_FG_CTRL_0, reg, BIT(0));
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, fg.ck_en);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_FG_CK_EN, reg, 0x7);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_FG_CK_EN, reg, 0x7);
return 0;
}
@@ -1407,11 +1355,11 @@ static int config_fg_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_0, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_0, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_1, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_1, reg);
return 0;
}
@@ -1428,14 +1376,11 @@ static int init_ovl(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_EN,
- BIT(0), MDP_OVL_EN_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_EN, BIT(0));
/* Set to relay mode */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON,
- BIT(9), MDP_OVL_SRC_CON_MASK);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_DP_CON,
- BIT(0), MDP_OVL_DP_CON_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON, BIT(9));
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_DP_CON, BIT(0));
return 0;
}
@@ -1450,11 +1395,11 @@ static int config_ovl_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, ovl.L0_con);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_CON, reg, BIT(29) | BIT(28));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_OVL_L0_CON, reg, BIT(29) | BIT(28));
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, ovl.src_con);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_OVL_SRC_CON, reg, BIT(0));
return 0;
}
@@ -1468,14 +1413,12 @@ static int config_ovl_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].L0_src_size);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_SRC_SIZE,
- reg, MDP_OVL_L0_SRC_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_SRC_SIZE, reg);
/* Setup output size */
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].roi_size);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_ROI_SIZE,
- reg, MDP_OVL_ROI_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_ROI_SIZE, reg);
return 0;
}
@@ -1492,13 +1435,10 @@ static int init_pad(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_CON,
- BIT(1), MDP_PAD_CON_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_CON, BIT(1));
/* Reset */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_W_SIZE,
- 0, MDP_PAD_W_SIZE_MASK);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_H_SIZE,
- 0, MDP_PAD_H_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_W_SIZE, 0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_H_SIZE, 0);
return 0;
}
@@ -1512,8 +1452,7 @@ static int config_pad_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, pad.subfrms[index].pic_size);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_PIC_SIZE,
- reg, MDP_PAD_PIC_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_PIC_SIZE, reg);
return 0;
}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
index 3e5d2da1c807..681906c16419 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
@@ -9,18 +9,18 @@
#include "mtk-mdp3-cmdq.h"
-#define MM_REG_WRITE_MASK(cmd, id, base, ofst, val, mask, ...) \
- cmdq_pkt_write_mask(&((cmd)->pkt), id, \
- (base) + (ofst), (val), (mask), ##__VA_ARGS__)
-
-#define MM_REG_WRITE(cmd, id, base, ofst, val, mask, ...) \
+#define MM_REG_WRITE_MASK(cmd, id, base, ofst, val, mask) \
do { \
typeof(mask) (m) = (mask); \
- MM_REG_WRITE_MASK(cmd, id, base, ofst, val, \
+ cmdq_pkt_write_mask(&((cmd)->pkt), id, (base) + (ofst), \
+ (val), \
(((m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
- (0xffffffff) : (m), ##__VA_ARGS__); \
+ (0xffffffff) : (m)); \
} while (0)
+#define MM_REG_WRITE(cmd, id, base, ofst, val) \
+ cmdq_pkt_write(&((cmd)->pkt), id, (base) + (ofst), (val))
+
#define MM_REG_WAIT(cmd, evt) \
do { \
typeof(cmd) (c) = (cmd); \
@@ -49,20 +49,17 @@ do { \
cmdq_pkt_set_event(&((c)->pkt), (e)); \
} while (0)
-#define MM_REG_POLL_MASK(cmd, id, base, ofst, val, _mask, ...) \
+#define MM_REG_POLL_MASK(cmd, id, base, ofst, val, _mask) \
do { \
typeof(_mask) (_m) = (_mask); \
cmdq_pkt_poll_mask(&((cmd)->pkt), id, \
- (base) + (ofst), (val), (_m), ##__VA_ARGS__); \
+ (base) + (ofst), (val), \
+ (((_m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
+ (0xffffffff) : (_m)); \
} while (0)
-#define MM_REG_POLL(cmd, id, base, ofst, val, mask, ...) \
-do { \
- typeof(mask) (m) = (mask); \
- MM_REG_POLL_MASK((cmd), id, base, ofst, val, \
- (((m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
- (0xffffffff) : (m), ##__VA_ARGS__); \
-} while (0)
+#define MM_REG_POLL(cmd, id, base, ofst, val) \
+ cmdq_pkt_poll(&((cmd)->pkt), id, (base) + (ofst), (val))
enum mtk_mdp_comp_id {
MDP_COMP_NONE = -1, /* Invalid engine */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index 5e94ff0d0756..f571f561f070 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -312,6 +312,8 @@ static int mdp_probe(struct platform_device *pdev)
ret = PTR_ERR(mdp->cmdq_clt[i]);
goto err_mbox_destroy;
}
+
+ mdp->cmdq_shift_pa[i] = cmdq_get_shift_pa(mdp->cmdq_clt[i]->chan);
}
init_waitqueue_head(&mdp->callback_wq);
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
index 430251f63754..05cade1d098e 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
@@ -126,6 +126,7 @@ struct mdp_dev {
u32 id_count;
struct ida mdp_ida;
struct cmdq_client *cmdq_clt[MDP_PP_MAX];
+ u8 cmdq_shift_pa[MDP_PP_MAX];
wait_queue_head_t callback_wq;
struct v4l2_device v4l2_dev;
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
index 4f5d75645b2b..024cd8ee1709 100644
--- a/drivers/media/platform/nuvoton/npcm-video.c
+++ b/drivers/media/platform/nuvoton/npcm-video.c
@@ -1665,9 +1665,9 @@ static int npcm_video_ece_init(struct npcm_video *video)
dev_info(dev, "Support HEXTILE pixel format\n");
ece_pdev = of_find_device_by_node(ece_node);
- if (IS_ERR(ece_pdev)) {
+ if (!ece_pdev) {
dev_err(dev, "Failed to find ECE device\n");
- return PTR_ERR(ece_pdev);
+ return -ENODEV;
}
of_node_put(ece_node);
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 7f5fe551179b..1221b309a916 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -2677,11 +2677,12 @@ static void mxc_jpeg_detach_pm_domains(struct mxc_jpeg_dev *jpeg)
int i;
for (i = 0; i < jpeg->num_domains; i++) {
- if (jpeg->pd_dev[i] && !pm_runtime_suspended(jpeg->pd_dev[i]))
+ if (!IS_ERR_OR_NULL(jpeg->pd_dev[i]) &&
+ !pm_runtime_suspended(jpeg->pd_dev[i]))
pm_runtime_force_suspend(jpeg->pd_dev[i]);
- if (jpeg->pd_link[i] && !IS_ERR(jpeg->pd_link[i]))
+ if (!IS_ERR_OR_NULL(jpeg->pd_link[i]))
device_link_del(jpeg->pd_link[i]);
- if (jpeg->pd_dev[i] && !IS_ERR(jpeg->pd_dev[i]))
+ if (!IS_ERR_OR_NULL(jpeg->pd_dev[i]))
dev_pm_domain_detach(jpeg->pd_dev[i], true);
jpeg->pd_dev[i] = NULL;
jpeg->pd_link[i] = NULL;
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index aaf58063677c..1e79b1211b60 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -307,6 +307,19 @@ static const struct mxc_isi_plat_data mxc_imx8mp_data = {
.has_36bit_dma = true,
};
+static const struct mxc_isi_plat_data mxc_imx8ulp_data = {
+ .model = MXC_ISI_IMX8ULP,
+ .num_ports = 1,
+ .num_channels = 1,
+ .reg_offset = 0x0,
+ .ier_reg = &mxc_imx8_isi_ier_v2,
+ .set_thd = &mxc_imx8_isi_thd_v1,
+ .clks = mxc_imx8mn_clks,
+ .num_clks = ARRAY_SIZE(mxc_imx8mn_clks),
+ .buf_active_reverse = true,
+ .has_36bit_dma = false,
+};
+
static const struct mxc_isi_plat_data mxc_imx93_data = {
.model = MXC_ISI_IMX93,
.num_ports = 1,
@@ -528,6 +541,7 @@ static void mxc_isi_remove(struct platform_device *pdev)
static const struct of_device_id mxc_isi_of_match[] = {
{ .compatible = "fsl,imx8mn-isi", .data = &mxc_imx8mn_data },
{ .compatible = "fsl,imx8mp-isi", .data = &mxc_imx8mp_data },
+ { .compatible = "fsl,imx8ulp-isi", .data = &mxc_imx8ulp_data },
{ .compatible = "fsl,imx93-isi", .data = &mxc_imx93_data },
{ /* sentinel */ },
};
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
index 2810ebe9b5f7..9c7fe9e5f941 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
@@ -158,6 +158,7 @@ struct mxc_gasket_ops {
enum model {
MXC_ISI_IMX8MN,
MXC_ISI_IMX8MP,
+ MXC_ISI_IMX8ULP,
MXC_ISI_IMX93,
};
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
index c0ba34ea82fd..8654150728a8 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
@@ -861,6 +861,7 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
const struct mxc_isi_format_info *info,
const struct v4l2_pix_format_mplane *pix)
{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb2);
unsigned int i;
for (i = 0; i < info->mem_planes; i++) {
@@ -875,6 +876,8 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
vb2_set_plane_payload(vb2, i, size);
}
+ v4l2_buf->field = pix->field;
+
return 0;
}
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index df7e93a5a4f6..f341f7b7fd8a 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -505,9 +505,9 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
u32 val;
switch (csiphy->camss->res->version) {
- case CAMSS_845:
- r = &lane_regs_sdm845[0][0];
- array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
+ case CAMSS_7280:
+ r = &lane_regs_sm8250[0][0];
+ array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
break;
case CAMSS_8250:
r = &lane_regs_sm8250[0][0];
@@ -517,6 +517,10 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
r = &lane_regs_sc8280xp[0][0];
array_size = ARRAY_SIZE(lane_regs_sc8280xp[0]);
break;
+ case CAMSS_845:
+ r = &lane_regs_sdm845[0][0];
+ array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
+ break;
default:
WARN(1, "unknown cspi version\n");
return;
@@ -557,9 +561,10 @@ static bool csiphy_is_gen2(u32 version)
bool ret = false;
switch (version) {
- case CAMSS_845:
+ case CAMSS_7280:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_845:
ret = true;
break;
}
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 5af2b382a843..3791c2d8a6cf 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -103,6 +103,11 @@ const struct csiphy_formats csiphy_formats_8x96 = {
.formats = formats_8x96
};
+const struct csiphy_formats csiphy_formats_sc7280 = {
+ .nformats = ARRAY_SIZE(formats_sdm845),
+ .formats = formats_sdm845
+};
+
const struct csiphy_formats csiphy_formats_sdm845 = {
.nformats = ARRAY_SIZE(formats_sdm845),
.formats = formats_sdm845
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index eebc1ff1cfab..90cc3f976643 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -26,6 +26,12 @@ struct csiphy_lane {
u8 pol;
};
+/**
+ * struct csiphy_lanes_cfg - CSIPHY lanes configuration
+ * @num_data: number of data lanes
+ * @data: data lanes configuration
+ * @clk: clock lane configuration (only for D-PHY)
+ */
struct csiphy_lanes_cfg {
int num_data;
struct csiphy_lane *data;
@@ -111,6 +117,7 @@ void msm_csiphy_unregister_entity(struct csiphy_device *csiphy);
extern const struct csiphy_formats csiphy_formats_8x16;
extern const struct csiphy_formats csiphy_formats_8x96;
+extern const struct csiphy_formats csiphy_formats_sc7280;
extern const struct csiphy_formats csiphy_formats_sdm845;
extern const struct csiphy_hw_ops csiphy_ops_2ph_1_0;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 80a62ba11295..95f6a1ac7eaf 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -334,11 +334,12 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
return sink_code;
}
break;
- case CAMSS_8x96:
case CAMSS_660:
- case CAMSS_845:
+ case CAMSS_7280:
+ case CAMSS_8x96:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_845:
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
{
@@ -1693,9 +1694,10 @@ static int vfe_bpl_align(struct vfe_device *vfe)
int ret = 8;
switch (vfe->camss->res->version) {
- case CAMSS_845:
+ case CAMSS_7280:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_845:
ret = 16;
break;
default:
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 9fb31f4c18ad..a85e9df0f301 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -1266,6 +1266,310 @@ static const struct resources_icc icc_res_sm8250[] = {
},
};
+static const struct camss_subdev_resources csiphy_res_7280[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+ /* CSIPHY3 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy3", "csiphy3_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy3" },
+ .interrupt = { "csiphy3" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+ /* CSIPHY4 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy4", "csiphy4_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy4" },
+ .interrupt = { "csiphy4" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+};
+
+static const struct camss_subdev_resources csid_res_7280[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe0_csid", "vfe0_cphy_rx", "vfe0" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 }
+ },
+
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID1 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe1_csid", "vfe1_cphy_rx", "vfe1" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 }
+ },
+
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID2 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe2_csid", "vfe2_cphy_rx", "vfe2" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 }
+ },
+
+ .reg = { "csid2" },
+ .interrupt = { "csid2" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID3 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe_lite0_csid", "vfe_lite0_cphy_rx", "vfe_lite0" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 320000000, 400000000, 480000000, 600000000 }
+ },
+
+ .reg = { "csid_lite0" },
+ .interrupt = { "csid_lite0" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID4 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe_lite1_csid", "vfe_lite1_cphy_rx", "vfe_lite1" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 320000000, 400000000, 480000000, 600000000 }
+ },
+
+ .reg = { "csid_lite1" },
+ .interrupt = { "csid_lite1" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+};
+
+static const struct camss_subdev_resources vfe_res_7280[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe0",
+ "vfe0_axi", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 },
+ { 0 },
+ { 0 } },
+
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife0",
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE1 */
+ {
+ .regulators = {},
+
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe1",
+ "vfe1_axi", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 },
+ { 0 },
+ { 0 } },
+
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife1",
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE2 */
+ {
+ .regulators = {},
+
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe2",
+ "vfe2_axi", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 },
+ { 0 },
+ { 0 } },
+
+ .reg = { "vfe2" },
+ .interrupt = { "vfe2" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .hw_ops = &vfe_ops_170,
+ .has_pd = true,
+ .pd_name = "ife2",
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE3 (lite) */
+ {
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb",
+ "vfe_lite0", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 320000000, 400000000, 480000000, 600000000 },
+ { 0 } },
+
+ .regulators = {},
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE4 (lite) */
+ {
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb",
+ "vfe_lite1", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 320000000, 400000000, 480000000, 600000000 },
+ { 0 } },
+
+ .regulators = {},
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+};
+
+static const struct resources_icc icc_res_sc7280[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 38400,
+ .icc_bw_tbl.peak = 76800,
+ },
+ {
+ .name = "hf_0",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
/* CSIPHY0 */
{
@@ -1995,6 +2299,24 @@ static int camss_init_subdevices(struct camss *camss)
/*
* camss_link_entities - Register subdev nodes and create links
+ * camss_link_err - print error in case link creation fails
+ * @src_name: name for source of the link
+ * @sink_name: name for sink of the link
+ */
+inline void camss_link_err(struct camss *camss,
+ const char *src_name,
+ const char *sink_name,
+ int ret)
+{
+ dev_err(camss->dev,
+ "Failed to link %s->%s entities: %d\n",
+ src_name,
+ sink_name,
+ ret);
+}
+
+/*
+ * camss_link_entities - Register subdev nodes and create links
* @camss: CAMSS device
*
* Return 0 on success or a negative error code on failure
@@ -2012,11 +2334,10 @@ static int camss_link_entities(struct camss *camss)
MSM_CSID_PAD_SINK,
0);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- camss->csiphy[i].subdev.entity.name,
- camss->csid[j].subdev.entity.name,
- ret);
+ camss_link_err(camss,
+ camss->csiphy[i].subdev.entity.name,
+ camss->csid[j].subdev.entity.name,
+ ret);
return ret;
}
}
@@ -2031,11 +2352,10 @@ static int camss_link_entities(struct camss *camss)
MSM_ISPIF_PAD_SINK,
0);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- camss->csid[i].subdev.entity.name,
- camss->ispif->line[j].subdev.entity.name,
- ret);
+ camss_link_err(camss,
+ camss->csid[i].subdev.entity.name,
+ camss->ispif->line[j].subdev.entity.name,
+ ret);
return ret;
}
}
@@ -2053,11 +2373,9 @@ static int camss_link_entities(struct camss *camss)
MSM_VFE_PAD_SINK,
0);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- ispif->entity.name,
- vfe->entity.name,
- ret);
+ camss_link_err(camss, ispif->entity.name,
+ vfe->entity.name,
+ ret);
return ret;
}
}
@@ -2074,11 +2392,9 @@ static int camss_link_entities(struct camss *camss)
MSM_VFE_PAD_SINK,
0);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- csid->entity.name,
- vfe->entity.name,
- ret);
+ camss_link_err(camss, csid->entity.name,
+ vfe->entity.name,
+ ret);
return ret;
}
}
@@ -2227,9 +2543,9 @@ static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async)
input, MSM_CSIPHY_PAD_SINK,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- sensor->name, input->name, ret);
+ camss_link_err(camss, sensor->name,
+ input->name,
+ ret);
return ret;
}
}
@@ -2622,14 +2938,29 @@ static const struct camss_resources sc8280xp_resources = {
.link_entities = camss_link_entities
};
+static const struct camss_resources sc7280_resources = {
+ .version = CAMSS_7280,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_7280,
+ .csid_res = csid_res_7280,
+ .vfe_res = vfe_res_7280,
+ .icc_res = icc_res_sc7280,
+ .icc_path_num = ARRAY_SIZE(icc_res_sc7280),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_7280),
+ .csid_num = ARRAY_SIZE(csid_res_7280),
+ .vfe_num = ARRAY_SIZE(vfe_res_7280),
+ .link_entities = camss_link_entities
+};
+
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
{ .compatible = "qcom,msm8953-camss", .data = &msm8953_resources },
{ .compatible = "qcom,msm8996-camss", .data = &msm8996_resources },
+ { .compatible = "qcom,sc7280-camss", .data = &sc7280_resources },
+ { .compatible = "qcom,sc8280xp-camss", .data = &sc8280xp_resources },
{ .compatible = "qcom,sdm660-camss", .data = &sdm660_resources },
{ .compatible = "qcom,sdm845-camss", .data = &sdm845_resources },
{ .compatible = "qcom,sm8250-camss", .data = &sm8250_resources },
- { .compatible = "qcom,sc8280xp-camss", .data = &sc8280xp_resources },
{ }
};
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 9da7f48f5dd7..9a046eea334f 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -77,13 +77,14 @@ enum pm_domain {
};
enum camss_version {
+ CAMSS_660,
+ CAMSS_7280,
CAMSS_8x16,
CAMSS_8x53,
CAMSS_8x96,
- CAMSS_660,
- CAMSS_845,
CAMSS_8250,
CAMSS_8280XP,
+ CAMSS_845,
};
enum icc_count {
diff --git a/drivers/media/platform/qcom/venus/Kconfig b/drivers/media/platform/qcom/venus/Kconfig
index bfd50e8f3421..bc2e410b29cb 100644
--- a/drivers/media/platform/qcom/venus/Kconfig
+++ b/drivers/media/platform/qcom/venus/Kconfig
@@ -3,6 +3,7 @@ config VIDEO_QCOM_VENUS
depends on V4L_MEM2MEM_DRIVERS
depends on VIDEO_DEV && QCOM_SMEM
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ select OF_DYNAMIC if ARCH_QCOM
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 2d27c5167246..77d48578ecd2 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -286,6 +286,89 @@ static irqreturn_t venus_isr_thread(int irq, void *dev_id)
return ret;
}
+#if defined(CONFIG_OF_DYNAMIC)
+static int venus_add_video_core(struct venus_core *core, const char *node_name,
+ const char *compat)
+{
+ struct of_changeset *ocs = core->ocs;
+ struct device *dev = core->dev;
+ struct device_node *np, *enp;
+ int ret;
+
+ if (!node_name)
+ return 0;
+
+ enp = of_find_node_by_name(dev->of_node, node_name);
+ if (enp) {
+ of_node_put(enp);
+ return 0;
+ }
+
+ np = of_changeset_create_node(ocs, dev->of_node, node_name);
+ if (!np) {
+ dev_err(dev, "Unable to create new node\n");
+ return -ENODEV;
+ }
+
+ ret = of_changeset_add_prop_string(ocs, np, "compatible", compat);
+ if (ret)
+ dev_err(dev, "unable to add %s\n", compat);
+
+ of_node_put(np);
+
+ return ret;
+}
+
+static int venus_add_dynamic_nodes(struct venus_core *core)
+{
+ struct device *dev = core->dev;
+ int ret;
+
+ core->ocs = kmalloc(sizeof(*core->ocs), GFP_KERNEL);
+ if (!core->ocs)
+ return -ENOMEM;
+
+ of_changeset_init(core->ocs);
+
+ ret = venus_add_video_core(core, core->res->dec_nodename, "venus-decoder");
+ if (ret)
+ goto err;
+
+ ret = venus_add_video_core(core, core->res->enc_nodename, "venus-encoder");
+ if (ret)
+ goto err;
+
+ ret = of_changeset_apply(core->ocs);
+ if (ret) {
+ dev_err(dev, "applying changeset fail ret %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ of_changeset_destroy(core->ocs);
+ kfree(core->ocs);
+ core->ocs = NULL;
+ return ret;
+}
+
+static void venus_remove_dynamic_nodes(struct venus_core *core)
+{
+ if (core->ocs) {
+ of_changeset_revert(core->ocs);
+ of_changeset_destroy(core->ocs);
+ kfree(core->ocs);
+ }
+}
+#else
+static int venus_add_dynamic_nodes(struct venus_core *core)
+{
+ return 0;
+}
+
+static void venus_remove_dynamic_nodes(struct venus_core *core) {}
+#endif
+
static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -365,9 +448,15 @@ static int venus_probe(struct platform_device *pdev)
if (ret < 0)
goto err_runtime_disable;
+ if (core->res->dec_nodename || core->res->enc_nodename) {
+ ret = venus_add_dynamic_nodes(core);
+ if (ret)
+ goto err_runtime_disable;
+ }
+
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret)
- goto err_runtime_disable;
+ goto err_remove_dynamic_nodes;
ret = venus_firmware_init(core);
if (ret)
@@ -411,6 +500,8 @@ err_firmware_deinit:
venus_firmware_deinit(core);
err_of_depopulate:
of_platform_depopulate(dev);
+err_remove_dynamic_nodes:
+ venus_remove_dynamic_nodes(core);
err_runtime_disable:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
@@ -443,6 +534,8 @@ static void venus_remove(struct platform_device *pdev)
venus_firmware_deinit(core);
+ venus_remove_dynamic_nodes(core);
+
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -506,18 +599,14 @@ err_cpucfg_path:
void venus_close_common(struct venus_inst *inst)
{
/*
- * First, remove the inst from the ->instances list, so that
- * to_instance() will return NULL.
- */
- hfi_session_destroy(inst);
- /*
- * Second, make sure we don't have IRQ/IRQ-thread currently running
+ * Make sure we don't have IRQ/IRQ-thread currently running
* or pending execution, which would race with the inst destruction.
*/
synchronize_irq(inst->core->irq);
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
+ hfi_session_destroy(inst);
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
v4l2_ctrl_handler_free(&inst->ctrl_handler);
@@ -582,6 +671,8 @@ static const struct venus_resources msm8916_res = {
.vmem_addr = 0,
.dma_mask = 0xddc00000 - 1,
.fwname = "qcom/venus-1.8/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
};
static const struct freq_tbl msm8996_freq_table[] = {
@@ -791,6 +882,8 @@ static const struct venus_resources sdm845_res_v2 = {
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
.fwname = "qcom/venus-5.2/venus.mbn",
+ .dec_nodename = "video-core0",
+ .enc_nodename = "video-core1",
};
static const struct freq_tbl sc7180_freq_table[] = {
@@ -839,6 +932,8 @@ static const struct venus_resources sc7180_res = {
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
.fwname = "qcom/venus-5.4/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
};
static const struct freq_tbl sm8250_freq_table[] = {
@@ -894,6 +989,8 @@ static const struct venus_resources sm8250_res = {
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
.fwname = "qcom/vpu-1.0/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
};
static const struct freq_tbl sc7280_freq_table[] = {
@@ -956,6 +1053,8 @@ static const struct venus_resources sc7280_res = {
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
.fwname = "qcom/vpu-2.0/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
};
static const struct of_device_id venus_dt_match[] = {
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 44f1c3bc4186..abeeafa86697 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -90,6 +90,8 @@ struct venus_resources {
u32 cp_nonpixel_start;
u32 cp_nonpixel_size;
const char *fwname;
+ const char *enc_nodename;
+ const char *dec_nodename;
};
enum venus_fmt {
@@ -169,6 +171,7 @@ struct venus_format {
* @root: debugfs root directory
* @venus_ver: the venus firmware version
* @dump_core: a flag indicating that a core dump is required
+ * @ocs: OF changeset pointer
*/
struct venus_core {
void __iomem *base;
@@ -231,6 +234,7 @@ struct venus_core {
u32 rev;
} venus_ver;
unsigned long dump_core;
+ struct of_changeset *ocs;
};
struct vdec_controls {
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index e00aedb41d16..675e6fd1e9fa 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -138,29 +138,6 @@ int hfi_core_trigger_ssr(struct venus_core *core, u32 type)
return core->ops->core_trigger_ssr(core, type);
}
-int hfi_core_ping(struct venus_core *core)
-{
- int ret;
-
- mutex_lock(&core->lock);
-
- ret = core->ops->core_ping(core, 0xbeef);
- if (ret)
- goto unlock;
-
- ret = wait_for_completion_timeout(&core->done, TIMEOUT);
- if (!ret) {
- ret = -ETIMEDOUT;
- goto unlock;
- }
- ret = 0;
- if (core->error != HFI_ERR_NONE)
- ret = -ENODEV;
-unlock:
- mutex_unlock(&core->lock);
- return ret;
-}
-
static int wait_session_msg(struct venus_inst *inst)
{
int ret;
diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h
index f25d412d6553..0338841d5992 100644
--- a/drivers/media/platform/qcom/venus/hfi.h
+++ b/drivers/media/platform/qcom/venus/hfi.h
@@ -108,7 +108,6 @@ struct hfi_inst_ops {
struct hfi_ops {
int (*core_init)(struct venus_core *core);
int (*core_deinit)(struct venus_core *core);
- int (*core_ping)(struct venus_core *core, u32 cookie);
int (*core_trigger_ssr)(struct venus_core *core, u32 trigger_type);
int (*session_init)(struct venus_inst *inst, u32 session_type,
@@ -152,7 +151,6 @@ int hfi_core_deinit(struct venus_core *core, bool blocking);
int hfi_core_suspend(struct venus_core *core);
int hfi_core_resume(struct venus_core *core, bool force);
int hfi_core_trigger_ssr(struct venus_core *core, u32 type);
-int hfi_core_ping(struct venus_core *core);
int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops);
void hfi_session_destroy(struct venus_inst *inst);
int hfi_session_init(struct venus_inst *inst, u32 pixfmt);
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index f9437b6412b9..a9167867063c 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -1178,16 +1178,6 @@ static int venus_core_deinit(struct venus_core *core)
return 0;
}
-static int venus_core_ping(struct venus_core *core, u32 cookie)
-{
- struct venus_hfi_device *hdev = to_hfi_priv(core);
- struct hfi_sys_ping_pkt pkt;
-
- pkt_sys_ping(&pkt, cookie);
-
- return venus_iface_cmdq_write(hdev, &pkt, false);
-}
-
static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
@@ -1639,7 +1629,6 @@ static int venus_suspend(struct venus_core *core)
static const struct hfi_ops venus_hfi_ops = {
.core_init = venus_core_init,
.core_deinit = venus_core_deinit,
- .core_ping = venus_core_ping,
.core_trigger_ssr = venus_core_trigger_ssr,
.session_init = venus_session_init,
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 98c22b9f9372..9f82882b77bc 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -1697,10 +1697,6 @@ static int vdec_open(struct file *file)
if (ret)
goto err_free;
- ret = hfi_session_create(inst, &vdec_hfi_ops);
- if (ret)
- goto err_ctrl_deinit;
-
vdec_inst_init(inst);
ida_init(&inst->dpb_ids);
@@ -1712,15 +1708,19 @@ static int vdec_open(struct file *file)
inst->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops);
if (IS_ERR(inst->m2m_dev)) {
ret = PTR_ERR(inst->m2m_dev);
- goto err_session_destroy;
+ goto err_ctrl_deinit;
}
inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
if (IS_ERR(inst->m2m_ctx)) {
ret = PTR_ERR(inst->m2m_ctx);
- goto err_m2m_release;
+ goto err_m2m_dev_release;
}
+ ret = hfi_session_create(inst, &vdec_hfi_ops);
+ if (ret)
+ goto err_m2m_ctx_release;
+
v4l2_fh_init(&inst->fh, core->vdev_dec);
inst->fh.ctrl_handler = &inst->ctrl_handler;
@@ -1730,10 +1730,10 @@ static int vdec_open(struct file *file)
return 0;
-err_m2m_release:
+err_m2m_ctx_release:
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+err_m2m_dev_release:
v4l2_m2m_release(inst->m2m_dev);
-err_session_destroy:
- hfi_session_destroy(inst);
err_ctrl_deinit:
v4l2_ctrl_handler_free(&inst->ctrl_handler);
err_free:
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index c1c543535aaf..c7f8e37dba9b 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -1492,10 +1492,6 @@ static int venc_open(struct file *file)
if (ret)
goto err_free;
- ret = hfi_session_create(inst, &venc_hfi_ops);
- if (ret)
- goto err_ctrl_deinit;
-
venc_inst_init(inst);
/*
@@ -1505,15 +1501,19 @@ static int venc_open(struct file *file)
inst->m2m_dev = v4l2_m2m_init(&venc_m2m_ops);
if (IS_ERR(inst->m2m_dev)) {
ret = PTR_ERR(inst->m2m_dev);
- goto err_session_destroy;
+ goto err_ctrl_deinit;
}
inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
if (IS_ERR(inst->m2m_ctx)) {
ret = PTR_ERR(inst->m2m_ctx);
- goto err_m2m_release;
+ goto err_m2m_dev_release;
}
+ ret = hfi_session_create(inst, &venc_hfi_ops);
+ if (ret)
+ goto err_m2m_ctx_release;
+
v4l2_fh_init(&inst->fh, core->vdev_enc);
inst->fh.ctrl_handler = &inst->ctrl_handler;
@@ -1523,10 +1523,10 @@ static int venc_open(struct file *file)
return 0;
-err_m2m_release:
+err_m2m_ctx_release:
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+err_m2m_dev_release:
v4l2_m2m_release(inst->m2m_dev);
-err_session_destroy:
- hfi_session_destroy(inst);
err_ctrl_deinit:
v4l2_ctrl_handler_free(&inst->ctrl_handler);
err_free:
diff --git a/drivers/media/platform/renesas/rcar-csi2.c b/drivers/media/platform/renesas/rcar-csi2.c
index 27ffdd28cbf7..0a53dd47d7bf 100644
--- a/drivers/media/platform/renesas/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-csi2.c
@@ -183,17 +183,19 @@ struct rcar_csi2;
#define V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(n) (0x23840 + ((n) * 2)) /* n = 0 - 11 */
#define V4H_CORE_DIG_RW_COMMON_REG(n) (0x23880 + ((n) * 2)) /* n = 0 - 15 */
#define V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(n) (0x239e0 + ((n) * 2)) /* n = 0 - 3 */
-#define V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG 0x2a400
#define V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG 0x2a60c
/* V4H C-PHY */
#define V4H_CORE_DIG_RW_TRIO0_REG(n) (0x22100 + ((n) * 2)) /* n = 0 - 3 */
#define V4H_CORE_DIG_RW_TRIO1_REG(n) (0x22500 + ((n) * 2)) /* n = 0 - 3 */
#define V4H_CORE_DIG_RW_TRIO2_REG(n) (0x22900 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_CLANE_0_RW_CFG_0_REG 0x2a000
#define V4H_CORE_DIG_CLANE_0_RW_LP_0_REG 0x2a080
#define V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(n) (0x2a100 + ((n) * 2)) /* n = 0 - 6 */
+#define V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG 0x2a400
#define V4H_CORE_DIG_CLANE_1_RW_LP_0_REG 0x2a480
#define V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(n) (0x2a500 + ((n) * 2)) /* n = 0 - 6 */
+#define V4H_CORE_DIG_CLANE_2_RW_CFG_0_REG 0x2a800
#define V4H_CORE_DIG_CLANE_2_RW_LP_0_REG 0x2a880
#define V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(n) (0x2a900 + ((n) * 2)) /* n = 0 - 6 */
@@ -672,6 +674,21 @@ static const struct rcar_csi2_format *rcsi2_code_to_fmt(unsigned int code)
return NULL;
}
+struct rcsi2_cphy_line_order {
+ enum v4l2_mbus_csi2_cphy_line_orders_type order;
+ u16 cfg;
+ u16 ctrl29;
+};
+
+static const struct rcsi2_cphy_line_order rcsi2_cphy_line_orders[] = {
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC, .cfg = 0x0, .ctrl29 = 0x0 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB, .cfg = 0xa, .ctrl29 = 0x1 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC, .cfg = 0xc, .ctrl29 = 0x1 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA, .cfg = 0x5, .ctrl29 = 0x0 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB, .cfg = 0x3, .ctrl29 = 0x0 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA, .cfg = 0x9, .ctrl29 = 0x1 }
+};
+
enum rcar_csi2_pads {
RCAR_CSI2_SINK,
RCAR_CSI2_SOURCE_VC0,
@@ -722,6 +739,7 @@ struct rcar_csi2 {
bool cphy;
unsigned short lanes;
unsigned char lane_swap[4];
+ enum v4l2_mbus_csi2_cphy_line_orders_type line_orders[3];
};
static inline struct rcar_csi2 *sd_to_csi2(struct v4l2_subdev *sd)
@@ -754,11 +772,24 @@ static void rcsi2_write(struct rcar_csi2 *priv, unsigned int reg, u32 data)
iowrite32(data, priv->base + reg);
}
+static u16 rcsi2_read16(struct rcar_csi2 *priv, unsigned int reg)
+{
+ return ioread16(priv->base + reg);
+}
+
static void rcsi2_write16(struct rcar_csi2 *priv, unsigned int reg, u16 data)
{
iowrite16(data, priv->base + reg);
}
+static void rcsi2_modify16(struct rcar_csi2 *priv, unsigned int reg, u16 data, u16 mask)
+{
+ u16 val;
+
+ val = rcsi2_read16(priv, reg) & ~mask;
+ rcsi2_write16(priv, reg, val | data);
+}
+
static int rcsi2_phtw_write(struct rcar_csi2 *priv, u8 data, u8 code)
{
unsigned int timeout;
@@ -1112,6 +1143,26 @@ static int rcsi2_start_receiver_gen3(struct rcar_csi2 *priv,
return 0;
}
+static void rsci2_set_line_order(struct rcar_csi2 *priv,
+ enum v4l2_mbus_csi2_cphy_line_orders_type order,
+ unsigned int cfgreg, unsigned int ctrlreg)
+{
+ const struct rcsi2_cphy_line_order *info = NULL;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(rcsi2_cphy_line_orders); i++) {
+ if (rcsi2_cphy_line_orders[i].order == order) {
+ info = &rcsi2_cphy_line_orders[i];
+ break;
+ }
+ }
+
+ if (!info)
+ return;
+
+ rcsi2_modify16(priv, cfgreg, info->cfg, 0x000f);
+ rcsi2_modify16(priv, ctrlreg, info->ctrl29, 0x0100);
+}
+
static int rcsi2_wait_phy_start_v4h(struct rcar_csi2 *priv, u32 match)
{
unsigned int timeout;
@@ -1189,12 +1240,18 @@ static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps)
rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(1), conf->trio1);
rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(1), conf->trio1);
- /*
- * Configure pin-swap.
- * TODO: This registers is not documented yet, the values should depend
- * on the 'clock-lanes' and 'data-lanes' devicetree properties.
- */
- rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG, 0xf5);
+ /* Configure data line order. */
+ rsci2_set_line_order(priv, priv->line_orders[0],
+ V4H_CORE_DIG_CLANE_0_RW_CFG_0_REG,
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(9));
+ rsci2_set_line_order(priv, priv->line_orders[1],
+ V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG,
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(9));
+ rsci2_set_line_order(priv, priv->line_orders[2],
+ V4H_CORE_DIG_CLANE_2_RW_CFG_0_REG,
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(9));
+
+ /* TODO: This registers is not documented. */
rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG, 0x5000);
/* Leave Shutdown mode */
@@ -1349,15 +1406,15 @@ static int rcsi2_init_common_v4m(struct rcar_csi2 *priv, unsigned int mbps)
static const struct phtw_value step2[] = {
{ .data = 0x00, .code = 0x00 },
{ .data = 0x80, .code = 0xe0 },
- { .data = 0x01, .code = 0xe1 },
+ { .data = 0x31, .code = 0xe1 },
{ .data = 0x06, .code = 0x00 },
- { .data = 0x0f, .code = 0x11 },
+ { .data = 0x11, .code = 0x11 },
{ .data = 0x08, .code = 0x00 },
- { .data = 0x0f, .code = 0x11 },
+ { .data = 0x11, .code = 0x11 },
{ .data = 0x0a, .code = 0x00 },
- { .data = 0x0f, .code = 0x11 },
+ { .data = 0x11, .code = 0x11 },
{ .data = 0x0c, .code = 0x00 },
- { .data = 0x0f, .code = 0x11 },
+ { .data = 0x11, .code = 0x11 },
{ .data = 0x01, .code = 0x00 },
{ .data = 0x31, .code = 0xaa },
{ .data = 0x05, .code = 0x00 },
@@ -1370,6 +1427,11 @@ static int rcsi2_init_common_v4m(struct rcar_csi2 *priv, unsigned int mbps)
{ .data = 0x05, .code = 0x09 },
};
+ static const struct phtw_value step3[] = {
+ { .data = 0x01, .code = 0x00 },
+ { .data = 0x06, .code = 0xab },
+ };
+
if (priv->info->hsfreqrange) {
ret = rcsi2_set_phypll(priv, mbps);
if (ret)
@@ -1400,7 +1462,7 @@ static int rcsi2_init_common_v4m(struct rcar_csi2 *priv, unsigned int mbps)
return ret;
}
- return ret;
+ return rcsi2_phtw_write_array(priv, step3, ARRAY_SIZE(step3));
}
static int rcsi2_start_receiver_v4m(struct rcar_csi2 *priv,
@@ -1732,6 +1794,9 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
}
}
+ for (i = 0; i < ARRAY_SIZE(priv->line_orders); i++)
+ priv->line_orders[i] = vep->bus.mipi_csi2.line_orders[i];
+
return 0;
}
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
index 17a1af507a27..cd69c8a686d3 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -558,7 +558,7 @@ static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count
goto assert_aresetn;
}
- /* Allocate scratch buffer. */
+ /* Allocate scratch buffer */
cru->scratch = dma_alloc_coherent(cru->dev, cru->format.sizeimage,
&cru->scratch_phys, GFP_KERNEL);
if (!cru->scratch) {
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index 8a48e9d91f96..4396348811c8 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
index 11c3d7234757..bf55beec0fac 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.c
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
index e8917e5630a4..cc6bd7f5b030 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.h
+++ b/drivers/media/platform/rockchip/rga/rga-hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
#ifndef __RGA_HW_H__
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 1739ac0c8e92..3dccab5fa4a1 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
index 8105bb2efe57..530e12de73c4 100644
--- a/drivers/media/platform/rockchip/rga/rga.h
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
#ifndef __RGA_H__
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index 02339cd94486..6dcefd144d5a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -35,8 +35,6 @@
#define RKISP1_SP_DEV_NAME RKISP1_DRIVER_NAME "_selfpath"
#define RKISP1_MP_DEV_NAME RKISP1_DRIVER_NAME "_mainpath"
-#define RKISP1_MIN_BUFFERS_NEEDED 3
-
enum rkisp1_plane {
RKISP1_PLANE_Y = 0,
RKISP1_PLANE_CB = 1,
@@ -1561,7 +1559,7 @@ static int rkisp1_register_capture(struct rkisp1_capture *cap)
q->ops = &rkisp1_vb2_ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct rkisp1_buffer);
- q->min_queued_buffers = RKISP1_MIN_BUFFERS_NEEDED;
+ q->min_queued_buffers = 1;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &node->vlock;
q->dev = cap->rkisp1->dev;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 0100b9c3edbe..dc65a7924f8a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -228,6 +228,9 @@ static int rkisp1_subdev_notifier_register(struct rkisp1_device *rkisp1)
break;
}
+ if (ret)
+ break;
+
/* Parse the endpoint and validate the bus type. */
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (ret) {
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c
index 7a48fad1df16..ac67a04e5eeb 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c
@@ -12,137 +12,6 @@
#include "fimc-is-errno.h"
-const char *fimc_is_param_strerr(unsigned int error)
-{
- switch (error) {
- case ERROR_COMMON_CMD:
- return "ERROR_COMMON_CMD: Invalid Command";
- case ERROR_COMMON_PARAMETER:
- return "ERROR_COMMON_PARAMETER: Invalid Parameter";
- case ERROR_COMMON_SETFILE_LOAD:
- return "ERROR_COMMON_SETFILE_LOAD: Illegal Setfile Loading";
- case ERROR_COMMON_SETFILE_ADJUST:
- return "ERROR_COMMON_SETFILE_ADJUST: Setfile isn't adjusted";
- case ERROR_COMMON_SETFILE_INDEX:
- return "ERROR_COMMON_SETFILE_INDEX: Invalid setfile index";
- case ERROR_COMMON_INPUT_PATH:
- return "ERROR_COMMON_INPUT_PATH: Input path can be changed in ready state";
- case ERROR_COMMON_INPUT_INIT:
- return "ERROR_COMMON_INPUT_INIT: IP can not start if input path is not set";
- case ERROR_COMMON_OUTPUT_PATH:
- return "ERROR_COMMON_OUTPUT_PATH: Output path can be changed in ready state (stop)";
- case ERROR_COMMON_OUTPUT_INIT:
- return "ERROR_COMMON_OUTPUT_INIT: IP can not start if output path is not set";
- case ERROR_CONTROL_BYPASS:
- return "ERROR_CONTROL_BYPASS";
- case ERROR_OTF_INPUT_FORMAT:
- return "ERROR_OTF_INPUT_FORMAT: Invalid format (DRC: YUV444, FD: YUV444, 422, 420)";
- case ERROR_OTF_INPUT_WIDTH:
- return "ERROR_OTF_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
- case ERROR_OTF_INPUT_HEIGHT:
- return "ERROR_OTF_INPUT_HEIGHT: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
- case ERROR_OTF_INPUT_BIT_WIDTH:
- return "ERROR_OTF_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
- case ERROR_DMA_INPUT_WIDTH:
- return "ERROR_DMA_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
- case ERROR_DMA_INPUT_HEIGHT:
- return "ERROR_DMA_INPUT_HEIGHT: Invalid height (DRC: 64~8192, FD: 16~8190)";
- case ERROR_DMA_INPUT_FORMAT:
- return "ERROR_DMA_INPUT_FORMAT: Invalid format (DRC: YUV444 or YUV422, FD: YUV444,422,420)";
- case ERROR_DMA_INPUT_BIT_WIDTH:
- return "ERROR_DMA_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
- case ERROR_DMA_INPUT_ORDER:
- return "ERROR_DMA_INPUT_ORDER: Invalid order(DRC: YYCbCr,YCbYCr,FD:NO,YYCbCr,YCbYCr,CbCr,CrCb)";
- case ERROR_DMA_INPUT_PLANE:
- return "ERROR_DMA_INPUT_PLANE: Invalid plane (DRC: 3, FD: 1, 2, 3)";
- case ERROR_OTF_OUTPUT_WIDTH:
- return "ERROR_OTF_OUTPUT_WIDTH: Invalid width (DRC: 128~8192)";
- case ERROR_OTF_OUTPUT_HEIGHT:
- return "ERROR_OTF_OUTPUT_HEIGHT: Invalid height (DRC: 64~8192)";
- case ERROR_OTF_OUTPUT_FORMAT:
- return "ERROR_OTF_OUTPUT_FORMAT: Invalid format (DRC: YUV444)";
- case ERROR_OTF_OUTPUT_BIT_WIDTH:
- return "ERROR_OTF_OUTPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
- case ERROR_DMA_OUTPUT_WIDTH:
- return "ERROR_DMA_OUTPUT_WIDTH";
- case ERROR_DMA_OUTPUT_HEIGHT:
- return "ERROR_DMA_OUTPUT_HEIGHT";
- case ERROR_DMA_OUTPUT_FORMAT:
- return "ERROR_DMA_OUTPUT_FORMAT";
- case ERROR_DMA_OUTPUT_BIT_WIDTH:
- return "ERROR_DMA_OUTPUT_BIT_WIDTH";
- case ERROR_DMA_OUTPUT_PLANE:
- return "ERROR_DMA_OUTPUT_PLANE";
- case ERROR_DMA_OUTPUT_ORDER:
- return "ERROR_DMA_OUTPUT_ORDER";
-
- /* Sensor Error(100~199) */
- case ERROR_SENSOR_I2C_FAIL:
- return "ERROR_SENSOR_I2C_FAIL";
- case ERROR_SENSOR_INVALID_FRAMERATE:
- return "ERROR_SENSOR_INVALID_FRAMERATE";
- case ERROR_SENSOR_INVALID_EXPOSURETIME:
- return "ERROR_SENSOR_INVALID_EXPOSURETIME";
- case ERROR_SENSOR_INVALID_SIZE:
- return "ERROR_SENSOR_INVALID_SIZE";
- case ERROR_SENSOR_INVALID_SETTING:
- return "ERROR_SENSOR_INVALID_SETTING";
- case ERROR_SENSOR_ACTUATOR_INIT_FAIL:
- return "ERROR_SENSOR_ACTUATOR_INIT_FAIL";
- case ERROR_SENSOR_INVALID_AF_POS:
- return "ERROR_SENSOR_INVALID_AF_POS";
- case ERROR_SENSOR_UNSUPPORT_FUNC:
- return "ERROR_SENSOR_UNSUPPORT_FUNC";
- case ERROR_SENSOR_UNSUPPORT_PERI:
- return "ERROR_SENSOR_UNSUPPORT_PERI";
- case ERROR_SENSOR_UNSUPPORT_AF:
- return "ERROR_SENSOR_UNSUPPORT_AF";
-
- /* ISP Error (200~299) */
- case ERROR_ISP_AF_BUSY:
- return "ERROR_ISP_AF_BUSY";
- case ERROR_ISP_AF_INVALID_COMMAND:
- return "ERROR_ISP_AF_INVALID_COMMAND";
- case ERROR_ISP_AF_INVALID_MODE:
- return "ERROR_ISP_AF_INVALID_MODE";
-
- /* DRC Error (300~399) */
- /* FD Error (400~499) */
- case ERROR_FD_CONFIG_MAX_NUMBER_STATE:
- return "ERROR_FD_CONFIG_MAX_NUMBER_STATE";
- case ERROR_FD_CONFIG_MAX_NUMBER_INVALID:
- return "ERROR_FD_CONFIG_MAX_NUMBER_INVALID";
- case ERROR_FD_CONFIG_YAW_ANGLE_STATE:
- return "ERROR_FD_CONFIG_YAW_ANGLE_STATE";
- case ERROR_FD_CONFIG_YAW_ANGLE_INVALID:
- return "ERROR_FD_CONFIG_YAW_ANGLE_INVALID\n";
- case ERROR_FD_CONFIG_ROLL_ANGLE_STATE:
- return "ERROR_FD_CONFIG_ROLL_ANGLE_STATE";
- case ERROR_FD_CONFIG_ROLL_ANGLE_INVALID:
- return "ERROR_FD_CONFIG_ROLL_ANGLE_INVALID";
- case ERROR_FD_CONFIG_SMILE_MODE_INVALID:
- return "ERROR_FD_CONFIG_SMILE_MODE_INVALID";
- case ERROR_FD_CONFIG_BLINK_MODE_INVALID:
- return "ERROR_FD_CONFIG_BLINK_MODE_INVALID";
- case ERROR_FD_CONFIG_EYES_DETECT_INVALID:
- return "ERROR_FD_CONFIG_EYES_DETECT_INVALID";
- case ERROR_FD_CONFIG_MOUTH_DETECT_INVALID:
- return "ERROR_FD_CONFIG_MOUTH_DETECT_INVALID";
- case ERROR_FD_CONFIG_ORIENTATION_STATE:
- return "ERROR_FD_CONFIG_ORIENTATION_STATE";
- case ERROR_FD_CONFIG_ORIENTATION_INVALID:
- return "ERROR_FD_CONFIG_ORIENTATION_INVALID";
- case ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID:
- return "ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID";
- case ERROR_FD_RESULT:
- return "ERROR_FD_RESULT";
- case ERROR_FD_MODE:
- return "ERROR_FD_MODE";
- default:
- return "Unknown";
- }
-}
-
const char *fimc_is_strerr(unsigned int error)
{
error &= ~IS_ERROR_TIME_OUT_FLAG;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
index 809e117331c0..fa8204ffec7b 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
@@ -240,6 +240,5 @@ enum fimc_is_error {
};
const char *fimc_is_strerr(unsigned int error);
-const char *fimc_is_param_strerr(unsigned int error);
#endif /* FIMC_IS_ERR_H_ */
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c
index 9c816ae3b3e5..443362da8cc8 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c
@@ -204,15 +204,6 @@ int __is_hw_update_params(struct fimc_is *is)
return ret;
}
-void __is_get_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf)
-{
- struct isp_param *isp;
-
- isp = &is->config[is->config_index].isp;
- mf->width = isp->otf_input.width;
- mf->height = isp->otf_input.height;
-}
-
void __is_set_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf)
{
unsigned int index = is->config_index;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h
index 206904674927..10ad02f36fed 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h
@@ -994,7 +994,6 @@ void fimc_is_set_initial_params(struct fimc_is *is);
unsigned int __get_pending_param_count(struct fimc_is *is);
int __is_hw_update_params(struct fimc_is *is);
-void __is_get_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf);
void __is_set_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf);
void __is_set_sensor(struct fimc_is *is, int fps);
void __is_set_isp_aa_ae(struct fimc_is *is);
diff --git a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
index 63f3eecdd7e6..452880b5350c 100644
--- a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
@@ -940,13 +940,19 @@ static int s5pcsis_pm_resume(struct device *dev, bool runtime)
state->supplies);
goto unlock;
}
- clk_enable(state->clock[CSIS_CLK_GATE]);
+ ret = clk_enable(state->clock[CSIS_CLK_GATE]);
+ if (ret) {
+ phy_power_off(state->phy);
+ regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+ state->supplies);
+ goto unlock;
+ }
}
if (state->flags & ST_STREAMING)
s5pcsis_start_stream(state);
state->flags &= ~ST_SUSPENDED;
- unlock:
+unlock:
mutex_unlock(&state->lock);
return ret ? -EAGAIN : 0;
}
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-core.c b/drivers/media/platform/samsung/s3c-camif/camif-core.c
index de6e8f151849..221e3c447f36 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-core.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-core.c
@@ -527,10 +527,19 @@ static void s3c_camif_remove(struct platform_device *pdev)
static int s3c_camif_runtime_resume(struct device *dev)
{
struct camif_dev *camif = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(camif->clock[CLK_GATE]);
+ if (ret)
+ return ret;
- clk_enable(camif->clock[CLK_GATE]);
/* null op on s3c244x */
- clk_enable(camif->clock[CLK_CAM]);
+ ret = clk_enable(camif->clock[CLK_CAM]);
+ if (ret) {
+ clk_disable(camif->clock[CLK_GATE]);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index 2fe3c9228ac5..5f80931f056d 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -774,8 +774,10 @@ static int s5p_mfc_open(struct file *file)
int ret = 0;
mfc_debug_enter();
- if (mutex_lock_interruptible(&dev->mfc_mutex))
- return -ERESTARTSYS;
+ if (mutex_lock_interruptible(&dev->mfc_mutex)) {
+ ret = -ERESTARTSYS;
+ goto err_enter;
+ }
dev->num_inst++; /* It is guarded by mfc_mutex in vfd */
/* Allocate memory for context */
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -946,6 +948,7 @@ err_no_ctx:
err_alloc:
dev->num_inst--;
mutex_unlock(&dev->mfc_mutex);
+err_enter:
mfc_debug_leave();
return ret;
}
diff --git a/drivers/media/platform/st/stm32/Kconfig b/drivers/media/platform/st/stm32/Kconfig
index 9df9a2a17728..f12e67bcc9bc 100644
--- a/drivers/media/platform/st/stm32/Kconfig
+++ b/drivers/media/platform/st/stm32/Kconfig
@@ -1,6 +1,20 @@
# SPDX-License-Identifier: GPL-2.0-only
# V4L drivers
+config VIDEO_STM32_CSI
+ tristate "STM32 Camera Serial Interface (CSI) support"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_STM32 || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This module makes the STM32 Camera Serial Interface (CSI)
+ available as a v4l2 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called stm32-csi.
+
config VIDEO_STM32_DCMI
tristate "STM32 Digital Camera Memory Interface (DCMI) support"
depends on V4L_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/st/stm32/Makefile b/drivers/media/platform/st/stm32/Makefile
index 7ed8297b9b19..9ae57897f030 100644
--- a/drivers/media/platform/st/stm32/Makefile
+++ b/drivers/media/platform/st/stm32/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_STM32_CSI) += stm32-csi.o
obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32-dcmi.o
obj-$(CONFIG_VIDEO_STM32_DCMIPP) += stm32-dcmipp/
stm32-dma2d-objs := dma2d/dma2d.o dma2d/dma2d-hw.o
diff --git a/drivers/media/platform/st/stm32/stm32-csi.c b/drivers/media/platform/st/stm32/stm32-csi.c
new file mode 100644
index 000000000000..48941aae8c9b
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-csi.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Camera Serial Interface
+ *
+ * Copyright (C) STMicroelectronics SA 2024
+ * Author: Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define STM32_CSI_CR 0x0000
+#define STM32_CSI_CR_CSIEN BIT(0)
+#define STM32_CSI_CR_VCXSTART(x) BIT(2 + ((x) * 4))
+#define STM32_CSI_CR_VCXSTOP(x) BIT(3 + ((x) * 4))
+#define STM32_CSI_PCR 0x0004
+#define STM32_CSI_PCR_DL1EN BIT(3)
+#define STM32_CSI_PCR_DL0EN BIT(2)
+#define STM32_CSI_PCR_CLEN BIT(1)
+#define STM32_CSI_PCR_PWRDOWN BIT(0)
+#define STM32_CSI_VCXCFGR1(x) ((((x) + 1) * 0x0010) + 0x0)
+#define STM32_CSI_VCXCFGR1_ALLDT BIT(0)
+#define STM32_CSI_VCXCFGR1_DT0EN BIT(1)
+#define STM32_CSI_VCXCFGR1_DT1EN BIT(2)
+#define STM32_CSI_VCXCFGR1_CDTFT_SHIFT 8
+#define STM32_CSI_VCXCFGR1_DT0_SHIFT 16
+#define STM32_CSI_VCXCFGR1_DT0FT_SHIFT 24
+#define STM32_CSI_VCXCFGR2(x) ((((x) + 1) * 0x0010) + 0x4)
+#define STM32_CSI_VCXCFGR2_DT1_SHIFT 0
+#define STM32_CSI_VCXCFGR2_DT1FT_SHIFT 8
+#define STM32_CSI_INPUT_BPP8 2
+#define STM32_CSI_INPUT_BPP10 3
+#define STM32_CSI_INPUT_BPP12 4
+#define STM32_CSI_INPUT_BPP14 5
+#define STM32_CSI_LMCFGR 0x0070
+#define STM32_CSI_LMCFGR_LANENB_SHIFT 8
+#define STM32_CSI_LMCFGR_DLMAP_SHIFT 16
+#define STM32_CSI_IER0 0x0080
+#define STM32_CSI_IER1 0x0084
+#define STM32_CSI_SR0 0x0090
+#define STM32_CSI_SR0_SYNCERRF BIT(30)
+#define STM32_CSI_SR0_SPKTERRF BIT(28)
+#define STM32_CSI_SR0_IDERRF BIT(27)
+#define STM32_CSI_SR0_CECCERRF BIT(26)
+#define STM32_CSI_SR0_ECCERRF BIT(25)
+#define STM32_CSI_SR0_CRCERRF BIT(24)
+#define STM32_CSI_SR0_CCFIFOFF BIT(21)
+#define STM32_CSI_SR0_VCXSTATEF(x) BIT(17 + (x))
+#define STM32_CSI_SR1 0x0094
+#define STM32_CSI_SR1_ECTRLDL1F BIT(12)
+#define STM32_CSI_SR1_ESYNCESCDL1F BIT(11)
+#define STM32_CSI_SR1_EESCDL1F BIT(10)
+#define STM32_CSI_SR1_ESOTSYNCDL1F BIT(9)
+#define STM32_CSI_SR1_ESOTDL1F BIT(8)
+#define STM32_CSI_SR1_ECTRLDL0F BIT(4)
+#define STM32_CSI_SR1_ESYNCESCDL0F BIT(3)
+#define STM32_CSI_SR1_EESCDL0F BIT(2)
+#define STM32_CSI_SR1_ESOTSYNCDL0F BIT(1)
+#define STM32_CSI_SR1_ESOTDL0F BIT(0)
+#define STM32_CSI_FCR0 0x0100
+#define STM32_CSI_FCR1 0x0104
+#define STM32_CSI_SPDFR 0x0110
+#define STM32_CSI_DT_MASK 0x3f
+#define STM32_CSI_VC_MASK 0x03
+#define STM32_CSI_ERR1 0x0114
+#define STM32_CSI_ERR1_IDVCERR_SHIFT 22
+#define STM32_CSI_ERR1_IDDTERR_SHIFT 16
+#define STM32_CSI_ERR1_CECCVCERR_SHIFT 14
+#define STM32_CSI_ERR1_CECCDTERR_SHIFT 8
+#define STM32_CSI_ERR1_CRCVCERR_SHIFT 6
+#define STM32_CSI_ERR1_CRCDTERR_SHIFT 0
+#define STM32_CSI_ERR2 0x0118
+#define STM32_CSI_ERR2_SYNCVCERR_SHIFT 18
+#define STM32_CSI_ERR2_SPKTVCERR_SHIFT 6
+#define STM32_CSI_ERR2_SPKTDTERR_SHIFT 0
+#define STM32_CSI_PRCR 0x1000
+#define STM32_CSI_PRCR_PEN BIT(1)
+#define STM32_CSI_PMCR 0x1004
+#define STM32_CSI_PFCR 0x1008
+#define STM32_CSI_PFCR_CCFR_MASK GENMASK(5, 0)
+#define STM32_CSI_PFCR_CCFR_SHIFT 0
+#define STM32_CSI_PFCR_HSFR_MASK GENMASK(14, 8)
+#define STM32_CSI_PFCR_HSFR_SHIFT 8
+#define STM32_CSI_PFCR_DLD BIT(16)
+#define STM32_CSI_PTCR0 0x1010
+#define STM32_CSI_PTCR0_TCKEN BIT(0)
+#define STM32_CSI_PTCR1 0x1014
+#define STM32_CSI_PTCR1_TWM BIT(16)
+#define STM32_CSI_PTCR1_TDI_MASK GENMASK(7, 0)
+#define STM32_CSI_PTCR1_TDI_SHIFT 0
+#define STM32_CSI_PTSR 0x1018
+
+#define STM32_CSI_LANES_MAX 2
+
+#define STM32_CSI_SR0_ERRORS (STM32_CSI_SR0_SYNCERRF | STM32_CSI_SR0_SPKTERRF |\
+ STM32_CSI_SR0_IDERRF | STM32_CSI_SR0_CECCERRF |\
+ STM32_CSI_SR0_ECCERRF | STM32_CSI_SR0_CRCERRF |\
+ STM32_CSI_SR0_CCFIFOFF)
+#define STM32_CSI_SR1_DL0_ERRORS (STM32_CSI_SR1_ECTRLDL0F | STM32_CSI_SR1_ESYNCESCDL0F |\
+ STM32_CSI_SR1_EESCDL0F | STM32_CSI_SR1_ESOTSYNCDL0F |\
+ STM32_CSI_SR1_ESOTDL0F)
+#define STM32_CSI_SR1_DL1_ERRORS (STM32_CSI_SR1_ECTRLDL1F | STM32_CSI_SR1_ESYNCESCDL1F |\
+ STM32_CSI_SR1_EESCDL1F | STM32_CSI_SR1_ESOTSYNCDL1F |\
+ STM32_CSI_SR1_ESOTDL1F)
+#define STM32_CSI_SR1_ERRORS (STM32_CSI_SR1_DL0_ERRORS | STM32_CSI_SR1_DL1_ERRORS)
+
+enum stm32_csi_pads {
+ STM32_CSI_PAD_SINK,
+ STM32_CSI_PAD_SOURCE,
+ STM32_CSI_PAD_MAX,
+};
+
+struct stm32_csi_event {
+ u32 mask;
+ const char * const name;
+};
+
+static const struct stm32_csi_event stm32_csi_events_sr0[] = {
+ {STM32_CSI_SR0_SYNCERRF, "Synchronization error"},
+ {STM32_CSI_SR0_SPKTERRF, "Short packet error"},
+ {STM32_CSI_SR0_IDERRF, "Data type ID error"},
+ {STM32_CSI_SR0_CECCERRF, "Corrected ECC error"},
+ {STM32_CSI_SR0_ECCERRF, "ECC error"},
+ {STM32_CSI_SR0_CRCERRF, "CRC error"},
+ {STM32_CSI_SR0_CCFIFOFF, "Clk changer FIFO full error"},
+};
+
+#define STM32_CSI_NUM_SR0_EVENTS ARRAY_SIZE(stm32_csi_events_sr0)
+
+static const struct stm32_csi_event stm32_csi_events_sr1[] = {
+ {STM32_CSI_SR1_ECTRLDL1F, "L1: D-PHY control error"},
+ {STM32_CSI_SR1_ESYNCESCDL1F,
+ "L1: D-PHY low power data transmission synchro error"},
+ {STM32_CSI_SR1_EESCDL1F, "L1: D-PHY escape entry error"},
+ {STM32_CSI_SR1_ESOTSYNCDL1F,
+ "L1: Start of transmission synchro error"},
+ {STM32_CSI_SR1_ESOTDL1F, "L1: Start of transmission error"},
+ {STM32_CSI_SR1_ECTRLDL0F, "L0: D-PHY control error"},
+ {STM32_CSI_SR1_ESYNCESCDL0F,
+ "L0: D-PHY low power data transmission synchro error"},
+ {STM32_CSI_SR1_EESCDL0F, "L0: D-PHY escape entry error"},
+ {STM32_CSI_SR1_ESOTSYNCDL0F,
+ "L0: Start of transmission synchro error"},
+ {STM32_CSI_SR1_ESOTDL0F, "L0: Start of transmission error"},
+};
+
+#define STM32_CSI_NUM_SR1_EVENTS ARRAY_SIZE(stm32_csi_events_sr1)
+
+enum stm32_csi_clk {
+ STM32_CSI_CLK_PCLK,
+ STM32_CSI_CLK_TXESC,
+ STM32_CSI_CLK_CSI2PHY,
+ STM32_CSI_CLK_NB,
+};
+
+static const char * const stm32_csi_clks_id[] = {
+ "pclk",
+ "txesc",
+ "csi2phy",
+};
+
+struct stm32_csi_dev {
+ struct device *dev;
+
+ void __iomem *base;
+
+ struct clk_bulk_data clks[STM32_CSI_CLK_NB];
+ struct regulator_bulk_data supplies[2];
+
+ u8 lanes[STM32_CSI_LANES_MAX];
+ u8 num_lanes;
+
+ /*
+ * spinlock slock is used to protect to srX_counters tables being
+ * accessed from log_status and interrupt context
+ */
+ spinlock_t slock;
+
+ u32 sr0_counters[STM32_CSI_NUM_SR0_EVENTS];
+ u32 sr1_counters[STM32_CSI_NUM_SR1_EVENTS];
+
+ struct v4l2_subdev sd;
+ struct v4l2_async_notifier notifier;
+ struct media_pad pads[STM32_CSI_PAD_MAX];
+
+ /* Remote source */
+ struct v4l2_subdev *s_subdev;
+ u32 s_subdev_pad_nb;
+};
+
+struct stm32_csi_fmts {
+ u32 code;
+ u32 datatype;
+ u32 input_fmt;
+ u8 bpp;
+};
+
+#define FMT_MBUS_DT_DTFMT_BPP(mbus, dt, input, byteperpixel) \
+ { \
+ .code = MEDIA_BUS_FMT_##mbus, \
+ .datatype = MIPI_CSI2_DT_##dt, \
+ .input_fmt = STM32_CSI_INPUT_##input, \
+ .bpp = byteperpixel, \
+ }
+static const struct stm32_csi_fmts stm32_csi_formats[] = {
+ /* YUV 422 8 bit */
+ FMT_MBUS_DT_DTFMT_BPP(UYVY8_1X16, YUV422_8B, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(YUYV8_1X16, YUV422_8B, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(YVYU8_1X16, YUV422_8B, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(VYUY8_1X16, YUV422_8B, BPP8, 8),
+
+ /* Raw Bayer */
+ /* 8 bit */
+ FMT_MBUS_DT_DTFMT_BPP(SBGGR8_1X8, RAW8, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(SGBRG8_1X8, RAW8, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(SGRBG8_1X8, RAW8, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB8_1X8, RAW8, BPP8, 8),
+ /* 10 bit */
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB10_1X10, RAW10, BPP10, 10),
+ FMT_MBUS_DT_DTFMT_BPP(SGBRG10_1X10, RAW10, BPP10, 10),
+ FMT_MBUS_DT_DTFMT_BPP(SGRBG10_1X10, RAW10, BPP10, 10),
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB10_1X10, RAW10, BPP10, 10),
+ /* 12 bit */
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB12_1X12, RAW12, BPP12, 12),
+ FMT_MBUS_DT_DTFMT_BPP(SGBRG12_1X12, RAW12, BPP12, 12),
+ FMT_MBUS_DT_DTFMT_BPP(SGRBG12_1X12, RAW12, BPP12, 12),
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB12_1X12, RAW12, BPP12, 12),
+ /* 14 bit */
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB14_1X14, RAW14, BPP14, 14),
+ FMT_MBUS_DT_DTFMT_BPP(SGBRG14_1X14, RAW14, BPP14, 14),
+ FMT_MBUS_DT_DTFMT_BPP(SGRBG14_1X14, RAW14, BPP14, 14),
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB14_1X14, RAW14, BPP14, 14),
+
+ /* RGB 565 */
+ FMT_MBUS_DT_DTFMT_BPP(RGB565_1X16, RGB565, BPP8, 8),
+
+ /* JPEG (datatype isn't used) */
+ FMT_MBUS_DT_DTFMT_BPP(JPEG_1X8, NULL, BPP8, 8),
+};
+
+struct stm32_csi_mbps_phy_reg {
+ unsigned int mbps;
+ unsigned int hsfreqrange;
+ unsigned int osc_freq_target;
+};
+
+/*
+ * Table describing configuration of the PHY depending on the
+ * intended Bit Rate. From table 5-8 Frequency Ranges and Defaults
+ * of the Synopsis DWC MIPI PHY databook
+ */
+static const struct stm32_csi_mbps_phy_reg snps_stm32mp25[] = {
+ { .mbps = 80, .hsfreqrange = 0x00, .osc_freq_target = 460 },
+ { .mbps = 90, .hsfreqrange = 0x10, .osc_freq_target = 460 },
+ { .mbps = 100, .hsfreqrange = 0x20, .osc_freq_target = 460 },
+ { .mbps = 110, .hsfreqrange = 0x30, .osc_freq_target = 460 },
+ { .mbps = 120, .hsfreqrange = 0x01, .osc_freq_target = 460 },
+ { .mbps = 130, .hsfreqrange = 0x11, .osc_freq_target = 460 },
+ { .mbps = 140, .hsfreqrange = 0x21, .osc_freq_target = 460 },
+ { .mbps = 150, .hsfreqrange = 0x31, .osc_freq_target = 460 },
+ { .mbps = 160, .hsfreqrange = 0x02, .osc_freq_target = 460 },
+ { .mbps = 170, .hsfreqrange = 0x12, .osc_freq_target = 460 },
+ { .mbps = 180, .hsfreqrange = 0x22, .osc_freq_target = 460 },
+ { .mbps = 190, .hsfreqrange = 0x32, .osc_freq_target = 460 },
+ { .mbps = 205, .hsfreqrange = 0x03, .osc_freq_target = 460 },
+ { .mbps = 220, .hsfreqrange = 0x13, .osc_freq_target = 460 },
+ { .mbps = 235, .hsfreqrange = 0x23, .osc_freq_target = 460 },
+ { .mbps = 250, .hsfreqrange = 0x33, .osc_freq_target = 460 },
+ { .mbps = 275, .hsfreqrange = 0x04, .osc_freq_target = 460 },
+ { .mbps = 300, .hsfreqrange = 0x14, .osc_freq_target = 460 },
+ { .mbps = 325, .hsfreqrange = 0x25, .osc_freq_target = 460 },
+ { .mbps = 350, .hsfreqrange = 0x35, .osc_freq_target = 460 },
+ { .mbps = 400, .hsfreqrange = 0x05, .osc_freq_target = 460 },
+ { .mbps = 450, .hsfreqrange = 0x16, .osc_freq_target = 460 },
+ { .mbps = 500, .hsfreqrange = 0x26, .osc_freq_target = 460 },
+ { .mbps = 550, .hsfreqrange = 0x37, .osc_freq_target = 460 },
+ { .mbps = 600, .hsfreqrange = 0x07, .osc_freq_target = 460 },
+ { .mbps = 650, .hsfreqrange = 0x18, .osc_freq_target = 460 },
+ { .mbps = 700, .hsfreqrange = 0x28, .osc_freq_target = 460 },
+ { .mbps = 750, .hsfreqrange = 0x39, .osc_freq_target = 460 },
+ { .mbps = 800, .hsfreqrange = 0x09, .osc_freq_target = 460 },
+ { .mbps = 850, .hsfreqrange = 0x19, .osc_freq_target = 460 },
+ { .mbps = 900, .hsfreqrange = 0x29, .osc_freq_target = 460 },
+ { .mbps = 950, .hsfreqrange = 0x3a, .osc_freq_target = 460 },
+ { .mbps = 1000, .hsfreqrange = 0x0a, .osc_freq_target = 460 },
+ { .mbps = 1050, .hsfreqrange = 0x1a, .osc_freq_target = 460 },
+ { .mbps = 1100, .hsfreqrange = 0x2a, .osc_freq_target = 460 },
+ { .mbps = 1150, .hsfreqrange = 0x3b, .osc_freq_target = 460 },
+ { .mbps = 1200, .hsfreqrange = 0x0b, .osc_freq_target = 460 },
+ { .mbps = 1250, .hsfreqrange = 0x1b, .osc_freq_target = 460 },
+ { .mbps = 1300, .hsfreqrange = 0x2b, .osc_freq_target = 460 },
+ { .mbps = 1350, .hsfreqrange = 0x3c, .osc_freq_target = 460 },
+ { .mbps = 1400, .hsfreqrange = 0x0c, .osc_freq_target = 460 },
+ { .mbps = 1450, .hsfreqrange = 0x1c, .osc_freq_target = 460 },
+ { .mbps = 1500, .hsfreqrange = 0x2c, .osc_freq_target = 460 },
+ { .mbps = 1550, .hsfreqrange = 0x3d, .osc_freq_target = 285 },
+ { .mbps = 1600, .hsfreqrange = 0x0d, .osc_freq_target = 295 },
+ { .mbps = 1650, .hsfreqrange = 0x1d, .osc_freq_target = 304 },
+ { .mbps = 1700, .hsfreqrange = 0x2e, .osc_freq_target = 313 },
+ { .mbps = 1750, .hsfreqrange = 0x3e, .osc_freq_target = 322 },
+ { .mbps = 1800, .hsfreqrange = 0x0e, .osc_freq_target = 331 },
+ { .mbps = 1850, .hsfreqrange = 0x1e, .osc_freq_target = 341 },
+ { .mbps = 1900, .hsfreqrange = 0x2f, .osc_freq_target = 350 },
+ { .mbps = 1950, .hsfreqrange = 0x3f, .osc_freq_target = 359 },
+ { .mbps = 2000, .hsfreqrange = 0x0f, .osc_freq_target = 368 },
+ { .mbps = 2050, .hsfreqrange = 0x40, .osc_freq_target = 377 },
+ { .mbps = 2100, .hsfreqrange = 0x41, .osc_freq_target = 387 },
+ { .mbps = 2150, .hsfreqrange = 0x42, .osc_freq_target = 396 },
+ { .mbps = 2200, .hsfreqrange = 0x43, .osc_freq_target = 405 },
+ { .mbps = 2250, .hsfreqrange = 0x44, .osc_freq_target = 414 },
+ { .mbps = 2300, .hsfreqrange = 0x45, .osc_freq_target = 423 },
+ { .mbps = 2350, .hsfreqrange = 0x46, .osc_freq_target = 432 },
+ { .mbps = 2400, .hsfreqrange = 0x47, .osc_freq_target = 442 },
+ { .mbps = 2450, .hsfreqrange = 0x48, .osc_freq_target = 451 },
+ { .mbps = 2500, .hsfreqrange = 0x49, .osc_freq_target = 460 },
+ { /* sentinel */ }
+};
+
+static const struct v4l2_mbus_framefmt fmt_default = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_REC709,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+};
+
+static const struct stm32_csi_fmts *stm32_csi_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(stm32_csi_formats); i++)
+ if (stm32_csi_formats[i].code == code)
+ return &stm32_csi_formats[i];
+
+ return NULL;
+}
+
+static inline struct stm32_csi_dev *to_csidev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct stm32_csi_dev, sd);
+}
+
+static int stm32_csi_setup_lane_merger(struct stm32_csi_dev *csidev)
+{
+ u32 lmcfgr = 0;
+ int i;
+
+ for (i = 0; i < csidev->num_lanes; i++) {
+ if (!csidev->lanes[i] || csidev->lanes[i] > STM32_CSI_LANES_MAX) {
+ dev_err(csidev->dev, "Invalid lane id (%d)\n", csidev->lanes[i]);
+ return -EINVAL;
+ }
+ lmcfgr |= (csidev->lanes[i] << ((i * 4) + STM32_CSI_LMCFGR_DLMAP_SHIFT));
+ }
+
+ lmcfgr |= (csidev->num_lanes << STM32_CSI_LMCFGR_LANENB_SHIFT);
+
+ writel_relaxed(lmcfgr, csidev->base + STM32_CSI_LMCFGR);
+
+ return 0;
+}
+
+static void stm32_csi_phy_reg_write(struct stm32_csi_dev *csidev,
+ u32 addr, u32 val)
+{
+ /* Based on sequence described at section 5.2.3.2 of DesignWave document */
+ /* For writing the 4-bit testcode MSBs */
+ /* Set testen to high */
+ writel_relaxed(STM32_CSI_PTCR1_TWM, csidev->base + STM32_CSI_PTCR1);
+
+ /* Set testclk to high */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+
+ /* Place 0x00 in testdin */
+ writel_relaxed(STM32_CSI_PTCR1_TWM, csidev->base + STM32_CSI_PTCR1);
+
+ /*
+ * Set testclk to low (with the falling edge on testclk, the testdin
+ * signal content is latched internally)
+ */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+
+ /* Set testen to low */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR1);
+
+ /* Place the 8-bit word corresponding to the testcode MSBs in testdin */
+ writel_relaxed(((addr >> 8) & STM32_CSI_PTCR1_TDI_MASK) << STM32_CSI_PTCR1_TDI_SHIFT,
+ csidev->base + STM32_CSI_PTCR1);
+
+ /* Set testclk to high */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+
+ /* For writing the 8-bit testcode LSBs */
+ /* Set testclk to low */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+
+ /* Set testen to high */
+ writel_relaxed(STM32_CSI_PTCR1_TWM, csidev->base + STM32_CSI_PTCR1);
+
+ /* Set testclk to high */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+
+ /* Place the 8-bit word test data in testdin */
+ writel_relaxed((addr & STM32_CSI_PTCR1_TDI_MASK) <<
+ STM32_CSI_PTCR1_TDI_SHIFT | STM32_CSI_PTCR1_TWM,
+ csidev->base + STM32_CSI_PTCR1);
+
+ /*
+ * Set testclk to low (with the falling edge on testclk, the testdin
+ * signal content is latched internally)
+ */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+
+ /* Set testen to low */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR1);
+
+ /* For writing the data */
+ /* Place the 8-bit word corresponding to the page offset in testdin */
+ writel_relaxed((val & STM32_CSI_PTCR1_TDI_MASK) << STM32_CSI_PTCR1_TDI_SHIFT,
+ csidev->base + STM32_CSI_PTCR1);
+
+ /* Set testclk to high (test data is programmed internally */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+
+ /* Finish by setting testclk to low */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+}
+
+static int stm32_csi_start(struct stm32_csi_dev *csidev,
+ struct v4l2_subdev_state *state)
+{
+ const struct stm32_csi_mbps_phy_reg *phy_regs;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ const struct stm32_csi_fmts *fmt;
+ unsigned long phy_clk_frate;
+ unsigned int mbps;
+ u32 lanes_ie = 0;
+ u32 lanes_en = 0;
+ s64 link_freq;
+ int ret;
+ u32 ccfr;
+
+ dev_dbg(csidev->dev, "Starting the CSI2\n");
+
+ /* Get the bpp value on pad0 (input of CSI) */
+ sink_fmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SINK);
+ fmt = stm32_csi_code_to_fmt(sink_fmt->code);
+
+ /* Get the remote sensor link frequency */
+ if (!csidev->s_subdev)
+ return -EIO;
+
+ link_freq = v4l2_get_link_freq(csidev->s_subdev->ctrl_handler,
+ fmt->bpp, 2 * csidev->num_lanes);
+ if (link_freq < 0)
+ return link_freq;
+
+ /* MBPS is expressed in Mbps, hence link_freq / 100000 * 2 */
+ mbps = div_s64(link_freq, 500000);
+ dev_dbg(csidev->dev, "Computed Mbps: %u\n", mbps);
+
+ for (phy_regs = snps_stm32mp25; phy_regs->mbps != 0; phy_regs++)
+ if (phy_regs->mbps >= mbps)
+ break;
+
+ if (!phy_regs->mbps) {
+ dev_err(csidev->dev, "Unsupported PHY speed (%u Mbps)", mbps);
+ return -ERANGE;
+ }
+
+ dev_dbg(csidev->dev, "PHY settings: (%u Mbps, %u HS FRange, %u OSC Freq)\n",
+ phy_regs->mbps, phy_regs->hsfreqrange,
+ phy_regs->osc_freq_target);
+
+ /* Prepare lanes related configuration bits */
+ lanes_ie |= STM32_CSI_SR1_DL0_ERRORS;
+ lanes_en |= STM32_CSI_PCR_DL0EN;
+ if (csidev->num_lanes == 2) {
+ lanes_ie |= STM32_CSI_SR1_DL1_ERRORS;
+ lanes_en |= STM32_CSI_PCR_DL1EN;
+ }
+
+ ret = pm_runtime_get_sync(csidev->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Retrieve CSI2PHY clock rate to compute CCFR value */
+ phy_clk_frate = clk_get_rate(csidev->clks[STM32_CSI_CLK_CSI2PHY].clk);
+ if (!phy_clk_frate) {
+ pm_runtime_put(csidev->dev);
+ dev_err(csidev->dev, "CSI2PHY clock rate invalid (0)\n");
+ return ret;
+ }
+
+ ret = stm32_csi_setup_lane_merger(csidev);
+ if (ret) {
+ pm_runtime_put(csidev->dev);
+ return ret;
+ }
+
+ /* Enable the CSI */
+ writel_relaxed(STM32_CSI_CR_CSIEN, csidev->base + STM32_CSI_CR);
+
+ /* Enable some global CSI related interrupts - bits are same as SR0 */
+ writel_relaxed(STM32_CSI_SR0_ERRORS, csidev->base + STM32_CSI_IER0);
+
+ /* Enable lanes related error interrupts */
+ writel_relaxed(lanes_ie, csidev->base + STM32_CSI_IER1);
+
+ /* Initialization of the D-PHY */
+ /* Stop the D-PHY */
+ writel_relaxed(0, csidev->base + STM32_CSI_PRCR);
+
+ /* Keep the D-PHY in power down state */
+ writel_relaxed(0, csidev->base + STM32_CSI_PCR);
+
+ /* Enable testclr clock during 15ns */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+ udelay(1);
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+
+ /* Set hsfreqrange */
+ phy_clk_frate /= 1000000;
+ ccfr = (phy_clk_frate - 17) * 4;
+ writel_relaxed((ccfr << STM32_CSI_PFCR_CCFR_SHIFT) |
+ (phy_regs->hsfreqrange << STM32_CSI_PFCR_HSFR_SHIFT),
+ csidev->base + STM32_CSI_PFCR);
+
+ /* set reg @08 deskew_polarity_rw 1'b1 */
+ stm32_csi_phy_reg_write(csidev, 0x08, 0x38);
+
+ /* set reg @0xE4 counter_for_des_en_config_if_rx 0x10 + DLL prog EN */
+ /* This is because 13<= cfgclkfreqrange[5:0]<=38 */
+ stm32_csi_phy_reg_write(csidev, 0xe4, 0x11);
+
+ /* set reg @0xe2 & reg @0xe3 value DLL target oscilation freq */
+ /* Based on the table page 77, osc_freq_target */
+ stm32_csi_phy_reg_write(csidev, 0xe2, phy_regs->osc_freq_target & 0xFF);
+ stm32_csi_phy_reg_write(csidev, 0xe3, (phy_regs->osc_freq_target >> 8) & 0x0F);
+
+ writel_relaxed(STM32_CSI_PFCR_DLD | readl_relaxed(csidev->base + STM32_CSI_PFCR),
+ csidev->base + STM32_CSI_PFCR);
+
+ /* Enable Lanes */
+ writel_relaxed(lanes_en | STM32_CSI_PCR_CLEN, csidev->base + STM32_CSI_PCR);
+ writel_relaxed(lanes_en | STM32_CSI_PCR_CLEN | STM32_CSI_PCR_PWRDOWN,
+ csidev->base + STM32_CSI_PCR);
+
+ writel_relaxed(STM32_CSI_PRCR_PEN, csidev->base + STM32_CSI_PRCR);
+
+ /* Remove the force */
+ writel_relaxed(0, csidev->base + STM32_CSI_PMCR);
+
+ return ret;
+}
+
+static void stm32_csi_stop(struct stm32_csi_dev *csidev)
+{
+ dev_dbg(csidev->dev, "Stopping the CSI2\n");
+
+ /* Disable the D-PHY */
+ writel_relaxed(0, csidev->base + STM32_CSI_PCR);
+
+ /* Disable ITs */
+ writel_relaxed(0, csidev->base + STM32_CSI_IER0);
+ writel_relaxed(0, csidev->base + STM32_CSI_IER1);
+
+ /* Disable the CSI */
+ writel_relaxed(0, csidev->base + STM32_CSI_CR);
+
+ pm_runtime_put(csidev->dev);
+}
+
+static int stm32_csi_start_vc(struct stm32_csi_dev *csidev,
+ struct v4l2_subdev_state *state, u32 vc)
+{
+ struct v4l2_mbus_framefmt *mbus_fmt;
+ const struct stm32_csi_fmts *fmt;
+ u32 cfgr1 = 0;
+ int ret = 0;
+ u32 status;
+
+ mbus_fmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SOURCE);
+ fmt = stm32_csi_code_to_fmt(mbus_fmt->code);
+
+ /* If the mbus code is JPEG, don't enable filtering */
+ if (mbus_fmt->code == MEDIA_BUS_FMT_JPEG_1X8) {
+ cfgr1 |= STM32_CSI_VCXCFGR1_ALLDT;
+ cfgr1 |= fmt->input_fmt << STM32_CSI_VCXCFGR1_CDTFT_SHIFT;
+ dev_dbg(csidev->dev, "VC%d: enable AllDT mode\n", vc);
+ } else {
+ cfgr1 |= fmt->datatype << STM32_CSI_VCXCFGR1_DT0_SHIFT;
+ cfgr1 |= fmt->input_fmt << STM32_CSI_VCXCFGR1_DT0FT_SHIFT;
+ cfgr1 |= STM32_CSI_VCXCFGR1_DT0EN;
+ dev_dbg(csidev->dev, "VC%d: enable DT0(0x%x)/DT0FT(0x%x)\n",
+ vc, fmt->datatype, fmt->input_fmt);
+ }
+ writel_relaxed(cfgr1, csidev->base + STM32_CSI_VCXCFGR1(vc));
+
+ /* Enable processing of the virtual-channel and wait for its status */
+ writel_relaxed(STM32_CSI_CR_VCXSTART(vc) | STM32_CSI_CR_CSIEN,
+ csidev->base + STM32_CSI_CR);
+
+ ret = readl_relaxed_poll_timeout(csidev->base + STM32_CSI_SR0,
+ status,
+ status & STM32_CSI_SR0_VCXSTATEF(vc),
+ 1000, 1000000);
+ if (ret) {
+ dev_err(csidev->dev, "failed to start VC(%d)\n", vc);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_csi_stop_vc(struct stm32_csi_dev *csidev, u32 vc)
+{
+ int ret = 0;
+ u32 status;
+
+ /* Stop the Virtual Channel */
+ writel_relaxed(STM32_CSI_CR_VCXSTOP(vc) | STM32_CSI_CR_CSIEN,
+ csidev->base + STM32_CSI_CR);
+
+ ret = readl_relaxed_poll_timeout(csidev->base + STM32_CSI_SR0,
+ status,
+ !(status & STM32_CSI_SR0_VCXSTATEF(vc)),
+ 1000, 1000000);
+ if (ret) {
+ dev_err(csidev->dev, "failed to stop VC(%d)\n", vc);
+ return ret;
+ }
+
+ /* Disable all DTs */
+ writel_relaxed(0, csidev->base + STM32_CSI_VCXCFGR1(vc));
+ writel_relaxed(0, csidev->base + STM32_CSI_VCXCFGR2(vc));
+
+ return 0;
+}
+
+static int stm32_csi_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ int ret;
+
+ ret = v4l2_subdev_disable_streams(csidev->s_subdev,
+ csidev->s_subdev_pad_nb, BIT_ULL(0));
+ if (ret)
+ return ret;
+
+ /* Stop the VC0 */
+ ret = stm32_csi_stop_vc(csidev, 0);
+ if (ret)
+ dev_err(csidev->dev, "Failed to stop VC0\n");
+
+ stm32_csi_stop(csidev);
+
+ return 0;
+}
+
+static int stm32_csi_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ int ret;
+
+ ret = stm32_csi_start(csidev, state);
+ if (ret)
+ return ret;
+
+ /* Configure & start the VC0 */
+ ret = stm32_csi_start_vc(csidev, state, 0);
+ if (ret) {
+ dev_err(csidev->dev, "Failed to start VC0\n");
+ stm32_csi_stop(csidev);
+ return ret;
+ }
+
+ ret = v4l2_subdev_enable_streams(csidev->s_subdev,
+ csidev->s_subdev_pad_nb, BIT_ULL(0));
+ if (ret) {
+ stm32_csi_stop_vc(csidev, 0);
+ stm32_csi_stop(csidev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_csi_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ int i;
+
+ for (i = 0; i < sd->entity.num_pads; i++)
+ *v4l2_subdev_state_get_format(state, i) = fmt_default;
+
+ return 0;
+}
+
+static int stm32_csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(stm32_csi_formats))
+ return -EINVAL;
+
+ code->code = stm32_csi_formats[code->index].code;
+ return 0;
+}
+
+static int stm32_csi_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+ const struct stm32_csi_fmts *fmt;
+
+ fmt = stm32_csi_code_to_fmt(format->format.code);
+ if (!fmt) {
+ dev_dbg(csidev->dev, "Unsupported code %d, use default\n",
+ format->format.code);
+ format->format.code = fmt_default.code;
+ }
+
+ framefmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SINK);
+
+ if (format->pad == STM32_CSI_PAD_SOURCE)
+ format->format = *framefmt;
+ else
+ *framefmt = format->format;
+
+ framefmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SOURCE);
+ *framefmt = format->format;
+
+ return 0;
+}
+
+static int stm32_csi_log_status(struct v4l2_subdev *sd)
+{
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&csidev->slock, flags);
+
+ for (i = 0; i < STM32_CSI_NUM_SR0_EVENTS; i++) {
+ if (csidev->sr0_counters[i])
+ dev_info(csidev->dev, "%s events: %d\n",
+ stm32_csi_events_sr0[i].name,
+ csidev->sr0_counters[i]);
+ }
+
+ for (i = 0; i < STM32_CSI_NUM_SR1_EVENTS; i++) {
+ if (csidev->sr1_counters[i])
+ dev_info(csidev->dev, "%s events: %d\n",
+ stm32_csi_events_sr1[i].name,
+ csidev->sr1_counters[i]);
+ }
+
+ spin_unlock_irqrestore(&csidev->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops stm32_csi_core_ops = {
+ .log_status = stm32_csi_log_status,
+};
+
+static const struct v4l2_subdev_video_ops stm32_csi_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops stm32_csi_pad_ops = {
+ .enum_mbus_code = stm32_csi_enum_mbus_code,
+ .set_fmt = stm32_csi_set_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enable_streams = stm32_csi_enable_streams,
+ .disable_streams = stm32_csi_disable_streams,
+};
+
+static const struct v4l2_subdev_ops stm32_csi_subdev_ops = {
+ .core = &stm32_csi_core_ops,
+ .pad = &stm32_csi_pad_ops,
+ .video = &stm32_csi_video_ops,
+};
+
+static const struct v4l2_subdev_internal_ops stm32_csi_subdev_internal_ops = {
+ .init_state = stm32_csi_init_state,
+};
+
+static int stm32_csi_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *s_subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct v4l2_subdev *sd = notifier->sd;
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ int remote_pad;
+
+ remote_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
+ s_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (remote_pad < 0) {
+ dev_err(csidev->dev, "Couldn't find output pad for subdev %s\n",
+ s_subdev->name);
+ return remote_pad;
+ }
+
+ csidev->s_subdev = s_subdev;
+ csidev->s_subdev_pad_nb = remote_pad;
+
+ return media_create_pad_link(&csidev->s_subdev->entity,
+ remote_pad, &csidev->sd.entity,
+ STM32_CSI_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static const struct v4l2_async_notifier_operations stm32_csi_notifier_ops = {
+ .bound = stm32_csi_async_bound,
+};
+
+static irqreturn_t stm32_csi_irq_thread(int irq, void *arg)
+{
+ struct stm32_csi_dev *csidev = arg;
+ unsigned long flags;
+ u32 sr0, sr1;
+ int i;
+
+ sr0 = readl_relaxed(csidev->base + STM32_CSI_SR0);
+ sr1 = readl_relaxed(csidev->base + STM32_CSI_SR1);
+
+ /* Clear interrupt */
+ writel_relaxed(sr0 & STM32_CSI_SR0_ERRORS,
+ csidev->base + STM32_CSI_FCR0);
+ writel_relaxed(sr1 & STM32_CSI_SR1_ERRORS,
+ csidev->base + STM32_CSI_FCR1);
+
+ spin_lock_irqsave(&csidev->slock, flags);
+
+ for (i = 0; i < STM32_CSI_NUM_SR0_EVENTS; i++)
+ if (sr0 & stm32_csi_events_sr0[i].mask)
+ csidev->sr0_counters[i]++;
+
+ for (i = 0; i < STM32_CSI_NUM_SR1_EVENTS; i++)
+ if (sr1 & stm32_csi_events_sr1[i].mask)
+ csidev->sr1_counters[i]++;
+
+ spin_unlock_irqrestore(&csidev->slock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_csi_get_resources(struct stm32_csi_dev *csidev,
+ struct platform_device *pdev)
+{
+ int irq, ret, i;
+
+ csidev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(csidev->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(csidev->base),
+ "Failed to ioremap resource\n");
+
+ for (i = 0; i < STM32_CSI_CLK_NB; i++)
+ csidev->clks[i].id = stm32_csi_clks_id[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, STM32_CSI_CLK_NB,
+ csidev->clks);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Couldn't get clks\n");
+
+ csidev->supplies[0].supply = "vdd";
+ csidev->supplies[1].supply = "vdda18";
+ ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(csidev->supplies),
+ csidev->supplies);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request regulator vdd\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ stm32_csi_irq_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), csidev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Unable to request irq");
+
+ return 0;
+}
+
+static int stm32_csi_parse_dt(struct stm32_csi_dev *csidev)
+{
+ struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *ep;
+ int ret;
+
+ /* Get bus characteristics from devicetree */
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csidev->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep) {
+ dev_err(csidev->dev, "Could not find the endpoint\n");
+ return -ENODEV;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ dev_err(csidev->dev, "Could not parse v4l2 endpoint\n");
+ return ret;
+ }
+
+ csidev->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+ if (csidev->num_lanes > STM32_CSI_LANES_MAX) {
+ dev_err(csidev->dev, "Unsupported number of data-lanes: %d\n",
+ csidev->num_lanes);
+ return -EINVAL;
+ }
+
+ memcpy(csidev->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
+ sizeof(csidev->lanes));
+
+ ep = fwnode_graph_get_next_endpoint(dev_fwnode(csidev->dev), NULL);
+ if (!ep) {
+ dev_err(csidev->dev, "Failed to get next endpoint\n");
+ return -EINVAL;
+ }
+
+ v4l2_async_subdev_nf_init(&csidev->notifier, &csidev->sd);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&csidev->notifier, ep,
+ struct v4l2_async_connection);
+
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asd)) {
+ dev_err(csidev->dev, "Failed to add fwnode remote subdev\n");
+ return PTR_ERR(asd);
+ }
+
+ csidev->notifier.ops = &stm32_csi_notifier_ops;
+
+ ret = v4l2_async_nf_register(&csidev->notifier);
+ if (ret) {
+ dev_err(csidev->dev, "Failed to register notifier\n");
+ v4l2_async_nf_cleanup(&csidev->notifier);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int stm32_csi_probe(struct platform_device *pdev)
+{
+ struct stm32_csi_dev *csidev;
+ struct reset_control *rstc;
+ int ret;
+
+ csidev = devm_kzalloc(&pdev->dev, sizeof(*csidev), GFP_KERNEL);
+ if (!csidev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, csidev);
+ csidev->dev = &pdev->dev;
+
+ spin_lock_init(&csidev->slock);
+
+ ret = stm32_csi_get_resources(csidev, pdev);
+ if (ret)
+ goto err_free_priv;
+
+ ret = stm32_csi_parse_dt(csidev);
+ if (ret)
+ goto err_free_priv;
+
+ csidev->sd.owner = THIS_MODULE;
+ csidev->sd.dev = &pdev->dev;
+ csidev->sd.internal_ops = &stm32_csi_subdev_internal_ops;
+ v4l2_subdev_init(&csidev->sd, &stm32_csi_subdev_ops);
+ v4l2_set_subdevdata(&csidev->sd, &pdev->dev);
+ snprintf(csidev->sd.name, sizeof(csidev->sd.name), "%s",
+ dev_name(&pdev->dev));
+
+ /* Create our media pads */
+ csidev->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csidev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ csidev->pads[STM32_CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ csidev->pads[STM32_CSI_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csidev->sd.entity, STM32_CSI_PAD_MAX,
+ csidev->pads);
+ if (ret)
+ goto err_cleanup;
+
+ ret = v4l2_subdev_init_finalize(&csidev->sd);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = v4l2_async_register_subdev(&csidev->sd);
+ if (ret < 0)
+ goto err_cleanup;
+
+ /* Reset device */
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rstc)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "Couldn't get reset control\n");
+ goto err_cleanup;
+ }
+
+ ret = reset_control_assert(rstc);
+ if (ret) {
+ ret = dev_err_probe(&pdev->dev, ret,
+ "Failed to assert the reset line\n");
+ goto err_cleanup;
+ }
+
+ usleep_range(3000, 5000);
+
+ ret = reset_control_deassert(rstc);
+ if (ret) {
+ ret = dev_err_probe(&pdev->dev, ret,
+ "Failed to deassert the reset line\n");
+ goto err_cleanup;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ dev_info(&pdev->dev,
+ "Probed CSI with %u lanes\n", csidev->num_lanes);
+
+ return 0;
+
+err_cleanup:
+ v4l2_async_nf_cleanup(&csidev->notifier);
+err_free_priv:
+ return ret;
+}
+
+static void stm32_csi_remove(struct platform_device *pdev)
+{
+ struct stm32_csi_dev *csidev = platform_get_drvdata(pdev);
+
+ v4l2_async_unregister_subdev(&csidev->sd);
+
+ pm_runtime_disable(&pdev->dev);
+}
+
+static int stm32_csi_runtime_suspend(struct device *dev)
+{
+ struct stm32_csi_dev *csidev = dev_get_drvdata(dev);
+ int ret;
+
+ clk_bulk_disable_unprepare(STM32_CSI_CLK_NB, csidev->clks);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(csidev->supplies),
+ csidev->supplies);
+ if (ret < 0)
+ dev_err(dev, "cannot disable regulators %d\n", ret);
+
+ return 0;
+}
+
+static int stm32_csi_runtime_resume(struct device *dev)
+{
+ struct stm32_csi_dev *csidev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(csidev->supplies),
+ csidev->supplies);
+ if (ret)
+ goto error_out;
+
+ ret = clk_bulk_prepare_enable(STM32_CSI_CLK_NB, csidev->clks);
+ if (ret)
+ goto error_disable_supplies;
+
+ return 0;
+
+error_disable_supplies:
+ ret = regulator_bulk_disable(ARRAY_SIZE(csidev->supplies), csidev->supplies);
+ if (ret < 0)
+ dev_err(dev, "cannot disable regulators %d\n", ret);
+error_out:
+ dev_err(csidev->dev, "Failed to resume: %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id stm32_csi_of_table[] = {
+ { .compatible = "st,stm32mp25-csi", },
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_csi_of_table);
+
+static const struct dev_pm_ops stm32_csi_pm_ops = {
+ RUNTIME_PM_OPS(stm32_csi_runtime_suspend,
+ stm32_csi_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_csi_driver = {
+ .driver = {
+ .name = "stm32-csi",
+ .of_match_table = stm32_csi_of_table,
+ .pm = pm_ptr(&stm32_csi_pm_ops),
+ },
+ .probe = stm32_csi_probe,
+ .remove = stm32_csi_remove,
+};
+
+module_platform_driver(stm32_csi_driver);
+
+MODULE_AUTHOR("Alain Volmat <alain.volmat@foss.st.com>");
+MODULE_DESCRIPTION("STM32 CSI controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile b/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile
index 8920d9388a21..159105fb40b8 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-stm32-dcmipp-y := dcmipp-core.o dcmipp-common.o dcmipp-parallel.o dcmipp-byteproc.o dcmipp-bytecap.o
+stm32-dcmipp-y := dcmipp-core.o dcmipp-common.o dcmipp-input.o dcmipp-byteproc.o dcmipp-bytecap.o
obj-$(CONFIG_VIDEO_STM32_DCMIPP) += stm32-dcmipp.o
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
index 7edd49bfe7e5..1c1b6b48918e 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
@@ -56,15 +56,32 @@ struct dcmipp_bytecap_pix_map {
static const struct dcmipp_bytecap_pix_map dcmipp_bytecap_pix_map_list[] = {
PIXMAP_MBUS_PFMT(RGB565_2X8_LE, RGB565),
+ PIXMAP_MBUS_PFMT(RGB565_1X16, RGB565),
PIXMAP_MBUS_PFMT(YUYV8_2X8, YUYV),
+ PIXMAP_MBUS_PFMT(YUYV8_1X16, YUYV),
PIXMAP_MBUS_PFMT(YVYU8_2X8, YVYU),
+ PIXMAP_MBUS_PFMT(YVYU8_1X16, YVYU),
PIXMAP_MBUS_PFMT(UYVY8_2X8, UYVY),
+ PIXMAP_MBUS_PFMT(UYVY8_1X16, UYVY),
PIXMAP_MBUS_PFMT(VYUY8_2X8, VYUY),
+ PIXMAP_MBUS_PFMT(VYUY8_1X16, VYUY),
PIXMAP_MBUS_PFMT(Y8_1X8, GREY),
PIXMAP_MBUS_PFMT(SBGGR8_1X8, SBGGR8),
PIXMAP_MBUS_PFMT(SGBRG8_1X8, SGBRG8),
PIXMAP_MBUS_PFMT(SGRBG8_1X8, SGRBG8),
PIXMAP_MBUS_PFMT(SRGGB8_1X8, SRGGB8),
+ PIXMAP_MBUS_PFMT(SBGGR10_1X10, SBGGR10),
+ PIXMAP_MBUS_PFMT(SGBRG10_1X10, SGBRG10),
+ PIXMAP_MBUS_PFMT(SGRBG10_1X10, SGRBG10),
+ PIXMAP_MBUS_PFMT(SRGGB10_1X10, SRGGB10),
+ PIXMAP_MBUS_PFMT(SBGGR12_1X12, SBGGR12),
+ PIXMAP_MBUS_PFMT(SGBRG12_1X12, SGBRG12),
+ PIXMAP_MBUS_PFMT(SGRBG12_1X12, SGRBG12),
+ PIXMAP_MBUS_PFMT(SRGGB12_1X12, SRGGB12),
+ PIXMAP_MBUS_PFMT(SBGGR14_1X14, SBGGR14),
+ PIXMAP_MBUS_PFMT(SGBRG14_1X14, SGBRG14),
+ PIXMAP_MBUS_PFMT(SGRBG14_1X14, SGRBG14),
+ PIXMAP_MBUS_PFMT(SRGGB14_1X14, SRGGB14),
PIXMAP_MBUS_PFMT(JPEG_1X8, JPEG),
};
@@ -112,6 +129,7 @@ struct dcmipp_bytecap_device {
u32 sequence;
struct media_pipeline pipe;
struct v4l2_subdev *s_subdev;
+ u32 s_subdev_pad_nb;
enum dcmipp_state state;
@@ -250,34 +268,34 @@ static int dcmipp_bytecap_enum_fmt_vid_cap(struct file *file, void *priv,
{
const struct dcmipp_bytecap_pix_map *vpix;
unsigned int index = f->index;
- unsigned int i;
+ unsigned int i, prev_pixelformat = 0;
- if (f->mbus_code) {
- /*
- * If a media bus code is specified, only enumerate formats
- * compatible with it.
- */
- for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
- vpix = &dcmipp_bytecap_pix_map_list[i];
- if (vpix->code != f->mbus_code)
- continue;
+ /*
+ * List up all formats (or only ones matching f->mbus_code), taking
+ * care of removing duplicated entries (due to support of both
+ * parallel & csi 16 bits formats
+ */
+ for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
+ vpix = &dcmipp_bytecap_pix_map_list[i];
+ /* Skip formats not matching requested mbus code */
+ if (f->mbus_code && vpix->code != f->mbus_code)
+ continue;
- if (index == 0)
- break;
+ /* Skip duplicated pixelformat */
+ if (vpix->pixelformat == prev_pixelformat)
+ continue;
- index--;
- }
+ prev_pixelformat = vpix->pixelformat;
- if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
- return -EINVAL;
- } else {
- /* Otherwise, enumerate all formats. */
- if (f->index >= ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
- return -EINVAL;
+ if (index == 0)
+ break;
- vpix = &dcmipp_bytecap_pix_map_list[f->index];
+ index--;
}
+ if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
+ return -EINVAL;
+
f->pixelformat = vpix->pixelformat;
return 0;
@@ -337,33 +355,6 @@ static const struct v4l2_ioctl_ops dcmipp_bytecap_ioctl_ops = {
.vidioc_streamoff = vb2_ioctl_streamoff,
};
-static int dcmipp_pipeline_s_stream(struct dcmipp_bytecap_device *vcap,
- int state)
-{
- struct media_pad *pad;
- int ret;
-
- /*
- * Get source subdev - since link is IMMUTABLE, pointer is cached
- * within the dcmipp_bytecap_device structure
- */
- if (!vcap->s_subdev) {
- pad = media_pad_remote_pad_first(&vcap->vdev.entity.pads[0]);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- return -EINVAL;
- vcap->s_subdev = media_entity_to_v4l2_subdev(pad->entity);
- }
-
- ret = v4l2_subdev_call(vcap->s_subdev, video, s_stream, state);
- if (ret < 0) {
- dev_err(vcap->dev, "failed to %s streaming (%d)\n",
- state ? "start" : "stop", ret);
- return ret;
- }
-
- return 0;
-}
-
static void dcmipp_start_capture(struct dcmipp_bytecap_device *vcap,
struct dcmipp_buf *buf)
{
@@ -395,11 +386,24 @@ static int dcmipp_bytecap_start_streaming(struct vb2_queue *vq,
struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
struct media_entity *entity = &vcap->vdev.entity;
struct dcmipp_buf *buf;
+ struct media_pad *pad;
int ret;
vcap->sequence = 0;
memset(&vcap->count, 0, sizeof(vcap->count));
+ /*
+ * Get source subdev - since link is IMMUTABLE, pointer is cached
+ * within the dcmipp_bytecap_device structure
+ */
+ if (!vcap->s_subdev) {
+ pad = media_pad_remote_pad_first(&vcap->vdev.entity.pads[0]);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ return -EINVAL;
+ vcap->s_subdev = media_entity_to_v4l2_subdev(pad->entity);
+ vcap->s_subdev_pad_nb = pad->index;
+ }
+
ret = pm_runtime_resume_and_get(vcap->dev);
if (ret < 0) {
dev_err(vcap->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
@@ -414,7 +418,8 @@ static int dcmipp_bytecap_start_streaming(struct vb2_queue *vq,
goto err_pm_put;
}
- ret = dcmipp_pipeline_s_stream(vcap, 1);
+ ret = v4l2_subdev_enable_streams(vcap->s_subdev,
+ vcap->s_subdev_pad_nb, BIT_ULL(0));
if (ret)
goto err_media_pipeline_stop;
@@ -482,7 +487,10 @@ static void dcmipp_bytecap_stop_streaming(struct vb2_queue *vq)
int ret;
u32 status;
- dcmipp_pipeline_s_stream(vcap, 0);
+ ret = v4l2_subdev_disable_streams(vcap->s_subdev,
+ vcap->s_subdev_pad_nb, BIT_ULL(0));
+ if (ret)
+ dev_warn(vcap->dev, "Failed to disable stream\n");
/* Stop the media pipeline */
media_pipeline_stop(vcap->vdev.entity.pads);
@@ -810,8 +818,7 @@ static int dcmipp_bytecap_link_validate(struct media_link *link)
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.pad = link->source->index,
};
- const struct dcmipp_bytecap_pix_map *vpix;
- int ret;
+ int ret, i;
ret = v4l2_subdev_call(source_sd, pad, get_fmt, NULL, &source_fmt);
if (ret < 0)
@@ -825,10 +832,17 @@ static int dcmipp_bytecap_link_validate(struct media_link *link)
return -EINVAL;
}
- vpix = dcmipp_bytecap_pix_map_by_pixelformat(vcap->format.pixelformat);
- if (source_fmt.format.code != vpix->code) {
- dev_err(vcap->dev, "Wrong mbus_code 0x%x, (0x%x expected)\n",
- vpix->code, source_fmt.format.code);
+ for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
+ if (dcmipp_bytecap_pix_map_list[i].pixelformat ==
+ vcap->format.pixelformat &&
+ dcmipp_bytecap_pix_map_list[i].code ==
+ source_fmt.format.code)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list)) {
+ dev_err(vcap->dev, "mbus code 0x%x do not match capture device format (0x%x)\n",
+ vcap->format.pixelformat, source_fmt.format.code);
return -EINVAL;
}
@@ -887,7 +901,7 @@ struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
q->dev = dev;
/* DCMIPP requires 16 bytes aligned buffers */
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32) & ~0x0f);
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Failed to set DMA mask\n");
goto err_mutex_destroy;
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
index 5a361ad6b023..3c742a546441 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
@@ -48,15 +48,32 @@ struct dcmipp_byteproc_pix_map {
}
static const struct dcmipp_byteproc_pix_map dcmipp_byteproc_pix_map_list[] = {
PIXMAP_MBUS_BPP(RGB565_2X8_LE, 2),
+ PIXMAP_MBUS_BPP(RGB565_1X16, 2),
PIXMAP_MBUS_BPP(YUYV8_2X8, 2),
+ PIXMAP_MBUS_BPP(YUYV8_1X16, 2),
PIXMAP_MBUS_BPP(YVYU8_2X8, 2),
+ PIXMAP_MBUS_BPP(YVYU8_1X16, 2),
PIXMAP_MBUS_BPP(UYVY8_2X8, 2),
+ PIXMAP_MBUS_BPP(UYVY8_1X16, 2),
PIXMAP_MBUS_BPP(VYUY8_2X8, 2),
+ PIXMAP_MBUS_BPP(VYUY8_1X16, 2),
PIXMAP_MBUS_BPP(Y8_1X8, 1),
PIXMAP_MBUS_BPP(SBGGR8_1X8, 1),
PIXMAP_MBUS_BPP(SGBRG8_1X8, 1),
PIXMAP_MBUS_BPP(SGRBG8_1X8, 1),
PIXMAP_MBUS_BPP(SRGGB8_1X8, 1),
+ PIXMAP_MBUS_BPP(SBGGR10_1X10, 2),
+ PIXMAP_MBUS_BPP(SGBRG10_1X10, 2),
+ PIXMAP_MBUS_BPP(SGRBG10_1X10, 2),
+ PIXMAP_MBUS_BPP(SRGGB10_1X10, 2),
+ PIXMAP_MBUS_BPP(SBGGR12_1X12, 2),
+ PIXMAP_MBUS_BPP(SGBRG12_1X12, 2),
+ PIXMAP_MBUS_BPP(SGRBG12_1X12, 2),
+ PIXMAP_MBUS_BPP(SRGGB12_1X12, 2),
+ PIXMAP_MBUS_BPP(SBGGR14_1X14, 2),
+ PIXMAP_MBUS_BPP(SGBRG14_1X14, 2),
+ PIXMAP_MBUS_BPP(SGRBG14_1X14, 2),
+ PIXMAP_MBUS_BPP(SRGGB14_1X14, 2),
PIXMAP_MBUS_BPP(JPEG_1X8, 1),
};
@@ -78,7 +95,6 @@ struct dcmipp_byteproc_device {
struct v4l2_subdev sd;
struct device *dev;
void __iomem *regs;
- bool streaming;
};
static const struct v4l2_mbus_framefmt fmt_default = {
@@ -239,11 +255,10 @@ static int dcmipp_byteproc_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
struct v4l2_rect *crop, *compose;
- if (byteproc->streaming)
+ if (v4l2_subdev_is_streaming(sd))
return -EBUSY;
mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
@@ -382,30 +397,19 @@ static int dcmipp_byteproc_set_selection(struct v4l2_subdev *sd,
return 0;
}
-static const struct v4l2_subdev_pad_ops dcmipp_byteproc_pad_ops = {
- .enum_mbus_code = dcmipp_byteproc_enum_mbus_code,
- .enum_frame_size = dcmipp_byteproc_enum_frame_size,
- .get_fmt = v4l2_subdev_get_fmt,
- .set_fmt = dcmipp_byteproc_set_fmt,
- .get_selection = dcmipp_byteproc_get_selection,
- .set_selection = dcmipp_byteproc_set_selection,
-};
-
static int dcmipp_byteproc_configure_scale_crop
- (struct dcmipp_byteproc_device *byteproc)
+ (struct dcmipp_byteproc_device *byteproc,
+ struct v4l2_subdev_state *state)
{
const struct dcmipp_byteproc_pix_map *vpix;
- struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *sink_fmt;
u32 hprediv, vprediv;
struct v4l2_rect *compose, *crop;
u32 val = 0;
- state = v4l2_subdev_lock_and_get_active_state(&byteproc->sd);
sink_fmt = v4l2_subdev_state_get_format(state, 0);
compose = v4l2_subdev_state_get_compose(state, 0);
crop = v4l2_subdev_state_get_crop(state, 1);
- v4l2_subdev_unlock_state(state);
/* find output format bpp */
vpix = dcmipp_byteproc_pix_map_by_code(sink_fmt->code);
@@ -460,48 +464,73 @@ static int dcmipp_byteproc_configure_scale_crop
return 0;
}
-static int dcmipp_byteproc_s_stream(struct v4l2_subdev *sd, int enable)
+static int dcmipp_byteproc_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
{
struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
struct v4l2_subdev *s_subdev;
- struct media_pad *pad;
- int ret = 0;
+ struct media_pad *s_pad;
+ int ret;
/* Get source subdev */
- pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ s_pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!s_pad || !is_media_entity_v4l2_subdev(s_pad->entity))
return -EINVAL;
- s_subdev = media_entity_to_v4l2_subdev(pad->entity);
-
- if (enable) {
- ret = dcmipp_byteproc_configure_scale_crop(byteproc);
- if (ret)
- return ret;
-
- ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
- if (ret < 0) {
- dev_err(byteproc->dev,
- "failed to start source subdev streaming (%d)\n",
- ret);
- return ret;
- }
- } else {
- ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
- if (ret < 0) {
- dev_err(byteproc->dev,
- "failed to stop source subdev streaming (%d)\n",
- ret);
- return ret;
- }
+ s_subdev = media_entity_to_v4l2_subdev(s_pad->entity);
+
+ ret = dcmipp_byteproc_configure_scale_crop(byteproc, state);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_enable_streams(s_subdev, s_pad->index, BIT_ULL(0));
+ if (ret < 0) {
+ dev_err(byteproc->dev,
+ "failed to start source subdev streaming (%d)\n", ret);
+ return ret;
}
- byteproc->streaming = enable;
+ return 0;
+}
+
+static int dcmipp_byteproc_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *s_subdev;
+ struct media_pad *s_pad;
+ int ret;
+
+ /* Get source subdev */
+ s_pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!s_pad || !is_media_entity_v4l2_subdev(s_pad->entity))
+ return -EINVAL;
+ s_subdev = media_entity_to_v4l2_subdev(s_pad->entity);
+
+ ret = v4l2_subdev_disable_streams(s_subdev, s_pad->index, BIT_ULL(0));
+ if (ret < 0) {
+ dev_err(byteproc->dev,
+ "failed to start source subdev streaming (%d)\n", ret);
+ return ret;
+ }
return 0;
}
+static const struct v4l2_subdev_pad_ops dcmipp_byteproc_pad_ops = {
+ .enum_mbus_code = dcmipp_byteproc_enum_mbus_code,
+ .enum_frame_size = dcmipp_byteproc_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = dcmipp_byteproc_set_fmt,
+ .get_selection = dcmipp_byteproc_get_selection,
+ .set_selection = dcmipp_byteproc_set_selection,
+ .enable_streams = dcmipp_byteproc_enable_streams,
+ .disable_streams = dcmipp_byteproc_disable_streams,
+};
+
static const struct v4l2_subdev_video_ops dcmipp_byteproc_video_ops = {
- .s_stream = dcmipp_byteproc_s_stream,
+ .s_stream = v4l2_subdev_s_stream_helper,
};
static const struct v4l2_subdev_ops dcmipp_byteproc_ops = {
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h
index 7a7cf43baf24..fe5f97233f5e 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h
@@ -199,11 +199,11 @@ static inline void __reg_clear(struct device *dev, void __iomem *base, u32 reg,
}
/* DCMIPP subdev init / release entry points */
-struct dcmipp_ent_device *dcmipp_par_ent_init(struct device *dev,
+struct dcmipp_ent_device *dcmipp_inp_ent_init(struct device *dev,
const char *entity_name,
struct v4l2_device *v4l2_dev,
void __iomem *regs);
-void dcmipp_par_ent_release(struct dcmipp_ent_device *ved);
+void dcmipp_inp_ent_release(struct dcmipp_ent_device *ved);
struct dcmipp_ent_device *
dcmipp_byteproc_ent_init(struct device *dev, const char *entity_name,
struct v4l2_device *v4l2_dev, void __iomem *regs);
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
index 3806f7c6e2fe..71acf539e1f3 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
@@ -40,6 +40,7 @@ struct dcmipp_device {
/* Hardware resources */
void __iomem *regs;
+ struct clk *mclk;
struct clk *kclk;
/* The pipeline configuration */
@@ -87,6 +88,7 @@ struct dcmipp_pipeline_config {
size_t num_ents;
const struct dcmipp_ent_link *links;
size_t num_links;
+ u32 hw_revision;
};
/* --------------------------------------------------------------------------
@@ -95,9 +97,9 @@ struct dcmipp_pipeline_config {
static const struct dcmipp_ent_config stm32mp13_ent_config[] = {
{
- .name = "dcmipp_parallel",
- .init = dcmipp_par_ent_init,
- .release = dcmipp_par_ent_release,
+ .name = "dcmipp_input",
+ .init = dcmipp_inp_ent_init,
+ .release = dcmipp_inp_ent_release,
},
{
.name = "dcmipp_dump_postproc",
@@ -111,22 +113,58 @@ static const struct dcmipp_ent_config stm32mp13_ent_config[] = {
},
};
-#define ID_PARALLEL 0
+#define ID_INPUT 0
#define ID_DUMP_BYTEPROC 1
#define ID_DUMP_CAPTURE 2
static const struct dcmipp_ent_link stm32mp13_ent_links[] = {
- DCMIPP_ENT_LINK(ID_PARALLEL, 1, ID_DUMP_BYTEPROC, 0,
+ DCMIPP_ENT_LINK(ID_INPUT, 1, ID_DUMP_BYTEPROC, 0,
MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
DCMIPP_ENT_LINK(ID_DUMP_BYTEPROC, 1, ID_DUMP_CAPTURE, 0,
MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
};
+#define DCMIPP_STM32MP13_VERR 0x10
static const struct dcmipp_pipeline_config stm32mp13_pipe_cfg = {
.ents = stm32mp13_ent_config,
.num_ents = ARRAY_SIZE(stm32mp13_ent_config),
.links = stm32mp13_ent_links,
- .num_links = ARRAY_SIZE(stm32mp13_ent_links)
+ .num_links = ARRAY_SIZE(stm32mp13_ent_links),
+ .hw_revision = DCMIPP_STM32MP13_VERR
+};
+
+static const struct dcmipp_ent_config stm32mp25_ent_config[] = {
+ {
+ .name = "dcmipp_input",
+ .init = dcmipp_inp_ent_init,
+ .release = dcmipp_inp_ent_release,
+ },
+ {
+ .name = "dcmipp_dump_postproc",
+ .init = dcmipp_byteproc_ent_init,
+ .release = dcmipp_byteproc_ent_release,
+ },
+ {
+ .name = "dcmipp_dump_capture",
+ .init = dcmipp_bytecap_ent_init,
+ .release = dcmipp_bytecap_ent_release,
+ },
+};
+
+static const struct dcmipp_ent_link stm32mp25_ent_links[] = {
+ DCMIPP_ENT_LINK(ID_INPUT, 1, ID_DUMP_BYTEPROC, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ DCMIPP_ENT_LINK(ID_DUMP_BYTEPROC, 1, ID_DUMP_CAPTURE, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+};
+
+#define DCMIPP_STM32MP25_VERR 0x30
+static const struct dcmipp_pipeline_config stm32mp25_pipe_cfg = {
+ .ents = stm32mp25_ent_config,
+ .num_ents = ARRAY_SIZE(stm32mp25_ent_config),
+ .links = stm32mp25_ent_links,
+ .num_links = ARRAY_SIZE(stm32mp25_ent_links),
+ .hw_revision = DCMIPP_STM32MP25_VERR
};
#define LINK_FLAG_TO_STR(f) ((f) == 0 ? "" :\
@@ -209,6 +247,7 @@ err_init_entity:
static const struct of_device_id dcmipp_of_match[] = {
{ .compatible = "st,stm32mp13-dcmipp", .data = &stm32mp13_pipe_cfg },
+ { .compatible = "st,stm32mp25-dcmipp", .data = &stm32mp25_pipe_cfg },
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, dcmipp_of_match);
@@ -258,13 +297,22 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
{
struct dcmipp_device *dcmipp = notifier_to_dcmipp(notifier);
unsigned int ret;
- int src_pad;
+ int src_pad, i;
struct dcmipp_ent_device *sink;
- struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_PARALLEL };
+ struct v4l2_fwnode_endpoint vep = { 0 };
struct fwnode_handle *ep;
+ enum v4l2_mbus_type supported_types[] = {
+ V4L2_MBUS_PARALLEL, V4L2_MBUS_BT656, V4L2_MBUS_CSI2_DPHY
+ };
+ int supported_types_nb = ARRAY_SIZE(supported_types);
dev_dbg(dcmipp->dev, "Subdev \"%s\" bound\n", subdev->name);
+ /* Only MP25 supports CSI input */
+ if (!of_device_is_compatible(dcmipp->dev->of_node,
+ "st,stm32mp25-dcmipp"))
+ supported_types_nb--;
+
/*
* Link this sub-device to DCMIPP, it could be
* a parallel camera sensor or a CSI-2 to parallel bridge
@@ -281,21 +329,23 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
return -ENODEV;
}
- /* Check for parallel bus-type first, then bt656 */
- ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- if (ret) {
- vep.bus_type = V4L2_MBUS_BT656;
+ /* Check for supported MBUS type */
+ for (i = 0; i < supported_types_nb; i++) {
+ vep.bus_type = supported_types[i];
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- if (ret) {
- dev_err(dcmipp->dev, "Could not parse the endpoint\n");
- fwnode_handle_put(ep);
- return ret;
- }
+ if (!ret)
+ break;
}
fwnode_handle_put(ep);
- if (vep.bus.parallel.bus_width == 0) {
+ if (ret) {
+ dev_err(dcmipp->dev, "Could not parse the endpoint\n");
+ return ret;
+ }
+
+ if (vep.bus_type != V4L2_MBUS_CSI2_DPHY &&
+ vep.bus.parallel.bus_width == 0) {
dev_err(dcmipp->dev, "Invalid parallel interface bus-width\n");
return -ENODEV;
}
@@ -308,11 +358,13 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
return -ENODEV;
}
- /* Parallel input device detected, connect it to parallel subdev */
- sink = dcmipp->entity[ID_PARALLEL];
- sink->bus.flags = vep.bus.parallel.flags;
- sink->bus.bus_width = vep.bus.parallel.bus_width;
- sink->bus.data_shift = vep.bus.parallel.data_shift;
+ /* Connect input device to the dcmipp_input subdev */
+ sink = dcmipp->entity[ID_INPUT];
+ if (vep.bus_type != V4L2_MBUS_CSI2_DPHY) {
+ sink->bus.flags = vep.bus.parallel.flags;
+ sink->bus.bus_width = vep.bus.parallel.bus_width;
+ sink->bus.data_shift = vep.bus.parallel.data_shift;
+ }
sink->bus_type = vep.bus_type;
ret = media_create_pad_link(&subdev->entity, src_pad, sink->ent, 0,
MEDIA_LNK_FL_IMMUTABLE |
@@ -411,7 +463,7 @@ static int dcmipp_graph_init(struct dcmipp_device *dcmipp)
static int dcmipp_probe(struct platform_device *pdev)
{
struct dcmipp_device *dcmipp;
- struct clk *kclk;
+ struct clk *kclk, *mclk;
const struct dcmipp_pipeline_config *pipe_cfg;
struct reset_control *rstc;
int irq;
@@ -471,12 +523,20 @@ static int dcmipp_probe(struct platform_device *pdev)
return ret;
}
- kclk = devm_clk_get(&pdev->dev, NULL);
+ kclk = devm_clk_get(&pdev->dev, "kclk");
if (IS_ERR(kclk))
return dev_err_probe(&pdev->dev, PTR_ERR(kclk),
"Unable to get kclk\n");
dcmipp->kclk = kclk;
+ if (!of_device_is_compatible(pdev->dev.of_node, "st,stm32mp13-dcmipp")) {
+ mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(mclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mclk),
+ "Unable to get mclk\n");
+ dcmipp->mclk = mclk;
+ }
+
dcmipp->entity = devm_kcalloc(&pdev->dev, dcmipp->pipe_cfg->num_ents,
sizeof(*dcmipp->entity), GFP_KERNEL);
if (!dcmipp->entity)
@@ -496,6 +556,7 @@ static int dcmipp_probe(struct platform_device *pdev)
/* Initialize media device */
strscpy(dcmipp->mdev.model, DCMIPP_MDEV_MODEL_NAME,
sizeof(dcmipp->mdev.model));
+ dcmipp->mdev.hw_revision = pipe_cfg->hw_revision;
dcmipp->mdev.dev = &pdev->dev;
media_device_init(&dcmipp->mdev);
@@ -538,6 +599,7 @@ static int dcmipp_runtime_suspend(struct device *dev)
struct dcmipp_device *dcmipp = dev_get_drvdata(dev);
clk_disable_unprepare(dcmipp->kclk);
+ clk_disable_unprepare(dcmipp->mclk);
return 0;
}
@@ -547,9 +609,17 @@ static int dcmipp_runtime_resume(struct device *dev)
struct dcmipp_device *dcmipp = dev_get_drvdata(dev);
int ret;
+ ret = clk_prepare_enable(dcmipp->mclk);
+ if (ret) {
+ dev_err(dev, "%s: Failed to prepare_enable mclk\n", __func__);
+ return ret;
+ }
+
ret = clk_prepare_enable(dcmipp->kclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(dcmipp->mclk);
dev_err(dev, "%s: Failed to prepare_enable kclk\n", __func__);
+ }
return ret;
}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c
new file mode 100644
index 000000000000..7e5311b67d7e
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Digital Camera Memory Interface Pixel Processor
+ *
+ * Copyright (C) STMicroelectronics SA 2023
+ * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
+ * Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/v4l2-mediabus.h>
+#include <media/mipi-csi2.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "dcmipp-common.h"
+
+#define DCMIPP_PRCR 0x104
+#define DCMIPP_PRCR_FORMAT_SHIFT 16
+#define DCMIPP_PRCR_FORMAT_YUV422 0x1e
+#define DCMIPP_PRCR_FORMAT_RGB565 0x22
+#define DCMIPP_PRCR_FORMAT_RAW8 0x2a
+#define DCMIPP_PRCR_FORMAT_RAW10 0x2b
+#define DCMIPP_PRCR_FORMAT_RAW12 0x2c
+#define DCMIPP_PRCR_FORMAT_RAW14 0x2d
+#define DCMIPP_PRCR_FORMAT_G8 0x4a
+#define DCMIPP_PRCR_FORMAT_BYTE_STREAM 0x5a
+#define DCMIPP_PRCR_ESS BIT(4)
+#define DCMIPP_PRCR_PCKPOL BIT(5)
+#define DCMIPP_PRCR_HSPOL BIT(6)
+#define DCMIPP_PRCR_VSPOL BIT(7)
+#define DCMIPP_PRCR_ENABLE BIT(14)
+#define DCMIPP_PRCR_SWAPCYCLES BIT(25)
+
+#define DCMIPP_PRESCR 0x108
+#define DCMIPP_PRESUR 0x10c
+
+#define DCMIPP_CMCR 0x204
+#define DCMIPP_CMCR_INSEL BIT(0)
+
+#define DCMIPP_P0FSCR 0x404
+#define DCMIPP_P0FSCR_DTMODE_MASK GENMASK(17, 16)
+#define DCMIPP_P0FSCR_DTMODE_SHIFT 16
+#define DCMIPP_P0FSCR_DTMODE_DTIDA 0x00
+#define DCMIPP_P0FSCR_DTMODE_ALLDT 0x03
+#define DCMIPP_P0FSCR_DTIDA_MASK GENMASK(5, 0)
+#define DCMIPP_P0FSCR_DTIDA_SHIFT 0
+
+#define IS_SINK(pad) (!(pad))
+#define IS_SRC(pad) ((pad))
+
+struct dcmipp_inp_pix_map {
+ unsigned int code_sink;
+ unsigned int code_src;
+ /* Parallel related information */
+ u8 prcr_format;
+ u8 prcr_swapcycles;
+ /* CSI related information */
+ unsigned int dt;
+};
+
+#define PIXMAP_SINK_SRC_PRCR_SWAP(sink, src, prcr, swap, data_type) \
+ { \
+ .code_sink = MEDIA_BUS_FMT_##sink, \
+ .code_src = MEDIA_BUS_FMT_##src, \
+ .prcr_format = DCMIPP_PRCR_FORMAT_##prcr, \
+ .prcr_swapcycles = swap, \
+ .dt = data_type, \
+ }
+static const struct dcmipp_inp_pix_map dcmipp_inp_pix_map_list[] = {
+ /* RGB565 */
+ PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_LE, RGB565_2X8_LE, RGB565, 1, MIPI_CSI2_DT_RGB565),
+ PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_BE, RGB565_2X8_LE, RGB565, 0, MIPI_CSI2_DT_RGB565),
+ PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_1X16, RGB565_1X16, RGB565, 0, MIPI_CSI2_DT_RGB565),
+ /* YUV422 */
+ PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, YUYV8_2X8, YUV422, 1, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_1X16, YUYV8_1X16, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, UYVY8_2X8, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, UYVY8_2X8, YUV422, 1, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_1X16, UYVY8_1X16, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, YUYV8_2X8, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YVYU8_2X8, YVYU8_2X8, YUV422, 1, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YVYU8_1X16, YVYU8_1X16, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(VYUY8_2X8, VYUY8_2X8, YUV422, 1, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(VYUY8_1X16, VYUY8_1X16, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ /* GREY */
+ PIXMAP_SINK_SRC_PRCR_SWAP(Y8_1X8, Y8_1X8, G8, 0, MIPI_CSI2_DT_RAW8),
+ /* Raw Bayer */
+ PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR8_1X8, SBGGR8_1X8, RAW8, 0, MIPI_CSI2_DT_RAW8),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG8_1X8, SGBRG8_1X8, RAW8, 0, MIPI_CSI2_DT_RAW8),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG8_1X8, SGRBG8_1X8, RAW8, 0, MIPI_CSI2_DT_RAW8),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB8_1X8, SRGGB8_1X8, RAW8, 0, MIPI_CSI2_DT_RAW8),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR10_1X10, SBGGR10_1X10, RAW10, 0, MIPI_CSI2_DT_RAW10),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG10_1X10, SGBRG10_1X10, RAW10, 0, MIPI_CSI2_DT_RAW10),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG10_1X10, SGRBG10_1X10, RAW10, 0, MIPI_CSI2_DT_RAW10),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB10_1X10, SRGGB10_1X10, RAW10, 0, MIPI_CSI2_DT_RAW10),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR12_1X12, SBGGR12_1X12, RAW12, 0, MIPI_CSI2_DT_RAW12),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG12_1X12, SGBRG12_1X12, RAW12, 0, MIPI_CSI2_DT_RAW12),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG12_1X12, SGRBG12_1X12, RAW12, 0, MIPI_CSI2_DT_RAW12),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB12_1X12, SRGGB12_1X12, RAW12, 0, MIPI_CSI2_DT_RAW12),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR14_1X14, SBGGR14_1X14, RAW14, 0, MIPI_CSI2_DT_RAW14),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG14_1X14, SGBRG14_1X14, RAW14, 0, MIPI_CSI2_DT_RAW14),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG14_1X14, SGRBG14_1X14, RAW14, 0, MIPI_CSI2_DT_RAW14),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB14_1X14, SRGGB14_1X14, RAW14, 0, MIPI_CSI2_DT_RAW14),
+ /* JPEG */
+ PIXMAP_SINK_SRC_PRCR_SWAP(JPEG_1X8, JPEG_1X8, BYTE_STREAM, 0, 0),
+};
+
+/*
+ * Search through the pix_map table, skipping two consecutive entry with the
+ * same code
+ */
+static inline const struct dcmipp_inp_pix_map *dcmipp_inp_pix_map_by_index
+ (unsigned int index,
+ unsigned int pad)
+{
+ unsigned int i = 0;
+ u32 prev_code = 0, cur_code;
+
+ while (i < ARRAY_SIZE(dcmipp_inp_pix_map_list)) {
+ if (IS_SRC(pad))
+ cur_code = dcmipp_inp_pix_map_list[i].code_src;
+ else
+ cur_code = dcmipp_inp_pix_map_list[i].code_sink;
+
+ if (cur_code == prev_code) {
+ i++;
+ continue;
+ }
+ prev_code = cur_code;
+
+ if (index == 0)
+ break;
+ i++;
+ index--;
+ }
+
+ if (i >= ARRAY_SIZE(dcmipp_inp_pix_map_list))
+ return NULL;
+
+ return &dcmipp_inp_pix_map_list[i];
+}
+
+static inline const struct dcmipp_inp_pix_map *dcmipp_inp_pix_map_by_code
+ (u32 code_sink, u32 code_src)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dcmipp_inp_pix_map_list); i++) {
+ if ((dcmipp_inp_pix_map_list[i].code_sink == code_sink &&
+ dcmipp_inp_pix_map_list[i].code_src == code_src) ||
+ (dcmipp_inp_pix_map_list[i].code_sink == code_src &&
+ dcmipp_inp_pix_map_list[i].code_src == code_sink) ||
+ (dcmipp_inp_pix_map_list[i].code_sink == code_sink &&
+ code_src == 0) ||
+ (code_sink == 0 &&
+ dcmipp_inp_pix_map_list[i].code_src == code_src))
+ return &dcmipp_inp_pix_map_list[i];
+ }
+ return NULL;
+}
+
+struct dcmipp_inp_device {
+ struct dcmipp_ent_device ved;
+ struct v4l2_subdev sd;
+ struct device *dev;
+ void __iomem *regs;
+};
+
+static const struct v4l2_mbus_framefmt fmt_default = {
+ .width = DCMIPP_FMT_WIDTH_DEFAULT,
+ .height = DCMIPP_FMT_HEIGHT_DEFAULT,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = DCMIPP_COLORSPACE_DEFAULT,
+ .ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
+ .quantization = DCMIPP_QUANTIZATION_DEFAULT,
+ .xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
+};
+
+static int dcmipp_inp_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int i;
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_state_get_format(sd_state, i);
+ *mf = fmt_default;
+ }
+
+ return 0;
+}
+
+static int dcmipp_inp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct dcmipp_inp_pix_map *vpix =
+ dcmipp_inp_pix_map_by_index(code->index, code->pad);
+
+ if (!vpix)
+ return -EINVAL;
+
+ code->code = IS_SRC(code->pad) ? vpix->code_src : vpix->code_sink;
+
+ return 0;
+}
+
+static int dcmipp_inp_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct dcmipp_inp_pix_map *vpix;
+
+ if (fse->index)
+ return -EINVAL;
+
+ /* Only accept code in the pix map table */
+ vpix = dcmipp_inp_pix_map_by_code(IS_SINK(fse->pad) ? fse->code : 0,
+ IS_SRC(fse->pad) ? fse->code : 0);
+ if (!vpix)
+ return -EINVAL;
+
+ fse->min_width = DCMIPP_FRAME_MIN_WIDTH;
+ fse->max_width = DCMIPP_FRAME_MAX_WIDTH;
+ fse->min_height = DCMIPP_FRAME_MIN_HEIGHT;
+ fse->max_height = DCMIPP_FRAME_MAX_HEIGHT;
+
+ return 0;
+}
+
+static void dcmipp_inp_adjust_fmt(struct dcmipp_inp_device *inp,
+ struct v4l2_mbus_framefmt *fmt, __u32 pad)
+{
+ const struct dcmipp_inp_pix_map *vpix;
+
+ /* Only accept code in the pix map table */
+ vpix = dcmipp_inp_pix_map_by_code(IS_SINK(pad) ? fmt->code : 0,
+ IS_SRC(pad) ? fmt->code : 0);
+ if (!vpix)
+ fmt->code = fmt_default.code;
+
+ /* Exclude JPEG if BT656 bus is selected */
+ if (vpix && vpix->code_sink == MEDIA_BUS_FMT_JPEG_1X8 &&
+ inp->ved.bus_type == V4L2_MBUS_BT656)
+ fmt->code = fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, DCMIPP_FRAME_MIN_WIDTH,
+ DCMIPP_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, DCMIPP_FRAME_MIN_HEIGHT,
+ DCMIPP_FRAME_MAX_HEIGHT) & ~1;
+
+ if (fmt->field == V4L2_FIELD_ANY || fmt->field == V4L2_FIELD_ALTERNATE)
+ fmt->field = fmt_default.field;
+
+ dcmipp_colorimetry_clamp(fmt);
+}
+
+static int dcmipp_inp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct dcmipp_inp_device *inp = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (v4l2_subdev_is_streaming(sd))
+ return -EBUSY;
+
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
+
+ /* Set the new format */
+ dcmipp_inp_adjust_fmt(inp, &fmt->format, fmt->pad);
+
+ dev_dbg(inp->dev, "%s: format update: old:%dx%d (0x%x, %d, %d, %d, %d) new:%dx%d (0x%x, %d, %d, %d, %d)\n",
+ inp->sd.name,
+ /* old */
+ mf->width, mf->height, mf->code,
+ mf->colorspace, mf->quantization,
+ mf->xfer_func, mf->ycbcr_enc,
+ /* new */
+ fmt->format.width, fmt->format.height, fmt->format.code,
+ fmt->format.colorspace, fmt->format.quantization,
+ fmt->format.xfer_func, fmt->format.ycbcr_enc);
+
+ *mf = fmt->format;
+
+ /* When setting the sink format, report that format on the src pad */
+ if (IS_SINK(fmt->pad)) {
+ mf = v4l2_subdev_state_get_format(sd_state, 1);
+ *mf = fmt->format;
+ dcmipp_inp_adjust_fmt(inp, mf, 1);
+ }
+
+ return 0;
+}
+
+static int dcmipp_inp_configure_parallel(struct dcmipp_inp_device *inp,
+ struct v4l2_subdev_state *state)
+{
+ u32 val = 0;
+ const struct dcmipp_inp_pix_map *vpix;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ /* Set vertical synchronization polarity */
+ if (inp->ved.bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ val |= DCMIPP_PRCR_VSPOL;
+
+ /* Set horizontal synchronization polarity */
+ if (inp->ved.bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ val |= DCMIPP_PRCR_HSPOL;
+
+ /* Set pixel clock polarity */
+ if (inp->ved.bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ val |= DCMIPP_PRCR_PCKPOL;
+
+ /*
+ * BT656 embedded synchronisation bus mode.
+ *
+ * Default SAV/EAV mode is supported here with default codes
+ * SAV=0xff000080 & EAV=0xff00009d.
+ * With DCMIPP this means LSC=SAV=0x80 & LEC=EAV=0x9d.
+ */
+ if (inp->ved.bus_type == V4L2_MBUS_BT656) {
+ val |= DCMIPP_PRCR_ESS;
+
+ /* Unmask all codes */
+ reg_write(inp, DCMIPP_PRESUR, 0xffffffff);/* FEC:LEC:LSC:FSC */
+
+ /* Trig on LSC=0x80 & LEC=0x9d codes, ignore FSC and FEC */
+ reg_write(inp, DCMIPP_PRESCR, 0xff9d80ff);/* FEC:LEC:LSC:FSC */
+ }
+
+ /* Set format */
+ sink_fmt = v4l2_subdev_state_get_format(state, 0);
+ src_fmt = v4l2_subdev_state_get_format(state, 1);
+
+ vpix = dcmipp_inp_pix_map_by_code(sink_fmt->code, src_fmt->code);
+ if (!vpix) {
+ dev_err(inp->dev, "Invalid sink/src format configuration\n");
+ return -EINVAL;
+ }
+
+ val |= vpix->prcr_format << DCMIPP_PRCR_FORMAT_SHIFT;
+
+ /* swap cycles */
+ if (vpix->prcr_swapcycles)
+ val |= DCMIPP_PRCR_SWAPCYCLES;
+
+ reg_write(inp, DCMIPP_PRCR, val);
+
+ /* Select the DCMIPP parallel interface */
+ reg_write(inp, DCMIPP_CMCR, 0);
+
+ /* Enable parallel interface */
+ reg_set(inp, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
+
+ return 0;
+}
+
+static int dcmipp_inp_configure_csi(struct dcmipp_inp_device *inp,
+ struct v4l2_subdev_state *state)
+{
+ const struct dcmipp_inp_pix_map *vpix;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ /* Get format information */
+ sink_fmt = v4l2_subdev_state_get_format(state, 0);
+ src_fmt = v4l2_subdev_state_get_format(state, 1);
+
+ vpix = dcmipp_inp_pix_map_by_code(sink_fmt->code, src_fmt->code);
+ if (!vpix) {
+ dev_err(inp->dev, "Invalid sink/src format configuration\n");
+ return -EINVAL;
+ }
+
+ /* Apply configuration on each input pipe */
+ reg_clear(inp, DCMIPP_P0FSCR,
+ DCMIPP_P0FSCR_DTMODE_MASK | DCMIPP_P0FSCR_DTIDA_MASK);
+
+ /* In case of JPEG we don't know the DT so we allow all data */
+ /*
+ * TODO - check instead dt == 0 for the time being to allow other
+ * unknown data-type
+ */
+ if (!vpix->dt)
+ reg_set(inp, DCMIPP_P0FSCR,
+ DCMIPP_P0FSCR_DTMODE_ALLDT << DCMIPP_P0FSCR_DTMODE_SHIFT);
+ else
+ reg_set(inp, DCMIPP_P0FSCR,
+ vpix->dt << DCMIPP_P0FSCR_DTIDA_SHIFT |
+ DCMIPP_P0FSCR_DTMODE_DTIDA);
+
+ /* Select the DCMIPP CSI interface */
+ reg_write(inp, DCMIPP_CMCR, DCMIPP_CMCR_INSEL);
+
+ return 0;
+}
+
+static int dcmipp_inp_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct dcmipp_inp_device *inp =
+ container_of(sd, struct dcmipp_inp_device, sd);
+ struct v4l2_subdev *s_subdev;
+ struct media_pad *s_pad;
+ int ret = 0;
+
+ /* Get source subdev */
+ s_pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!s_pad || !is_media_entity_v4l2_subdev(s_pad->entity))
+ return -EINVAL;
+ s_subdev = media_entity_to_v4l2_subdev(s_pad->entity);
+
+ if (inp->ved.bus_type == V4L2_MBUS_PARALLEL ||
+ inp->ved.bus_type == V4L2_MBUS_BT656)
+ ret = dcmipp_inp_configure_parallel(inp, state);
+ else if (inp->ved.bus_type == V4L2_MBUS_CSI2_DPHY)
+ ret = dcmipp_inp_configure_csi(inp, state);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_enable_streams(s_subdev, s_pad->index, BIT_ULL(0));
+ if (ret < 0) {
+ dev_err(inp->dev,
+ "failed to start source subdev streaming (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dcmipp_inp_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct dcmipp_inp_device *inp =
+ container_of(sd, struct dcmipp_inp_device, sd);
+ struct v4l2_subdev *s_subdev;
+ struct media_pad *s_pad;
+ int ret;
+
+ /* Get source subdev */
+ s_pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!s_pad || !is_media_entity_v4l2_subdev(s_pad->entity))
+ return -EINVAL;
+ s_subdev = media_entity_to_v4l2_subdev(s_pad->entity);
+
+ ret = v4l2_subdev_disable_streams(s_subdev, s_pad->index, BIT_ULL(0));
+ if (ret < 0) {
+ dev_err(inp->dev,
+ "failed to stop source subdev streaming (%d)\n", ret);
+ return ret;
+ }
+
+ if (inp->ved.bus_type == V4L2_MBUS_PARALLEL ||
+ inp->ved.bus_type == V4L2_MBUS_BT656) {
+ /* Disable parallel interface */
+ reg_clear(inp, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops dcmipp_inp_pad_ops = {
+ .enum_mbus_code = dcmipp_inp_enum_mbus_code,
+ .enum_frame_size = dcmipp_inp_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = dcmipp_inp_set_fmt,
+ .enable_streams = dcmipp_inp_enable_streams,
+ .disable_streams = dcmipp_inp_disable_streams,
+};
+
+static const struct v4l2_subdev_video_ops dcmipp_inp_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_ops dcmipp_inp_ops = {
+ .pad = &dcmipp_inp_pad_ops,
+ .video = &dcmipp_inp_video_ops,
+};
+
+static void dcmipp_inp_release(struct v4l2_subdev *sd)
+{
+ struct dcmipp_inp_device *inp =
+ container_of(sd, struct dcmipp_inp_device, sd);
+
+ kfree(inp);
+}
+
+static const struct v4l2_subdev_internal_ops dcmipp_inp_int_ops = {
+ .init_state = dcmipp_inp_init_state,
+ .release = dcmipp_inp_release,
+};
+
+void dcmipp_inp_ent_release(struct dcmipp_ent_device *ved)
+{
+ struct dcmipp_inp_device *inp =
+ container_of(ved, struct dcmipp_inp_device, ved);
+
+ dcmipp_ent_sd_unregister(ved, &inp->sd);
+}
+
+struct dcmipp_ent_device *dcmipp_inp_ent_init(struct device *dev,
+ const char *entity_name,
+ struct v4l2_device *v4l2_dev,
+ void __iomem *regs)
+{
+ struct dcmipp_inp_device *inp;
+ const unsigned long pads_flag[] = {
+ MEDIA_PAD_FL_SINK, MEDIA_PAD_FL_SOURCE,
+ };
+ int ret;
+
+ /* Allocate the inp struct */
+ inp = kzalloc(sizeof(*inp), GFP_KERNEL);
+ if (!inp)
+ return ERR_PTR(-ENOMEM);
+
+ inp->regs = regs;
+
+ /* Initialize ved and sd */
+ ret = dcmipp_ent_sd_register(&inp->ved, &inp->sd, v4l2_dev,
+ entity_name, MEDIA_ENT_F_VID_IF_BRIDGE,
+ ARRAY_SIZE(pads_flag), pads_flag,
+ &dcmipp_inp_int_ops, &dcmipp_inp_ops,
+ NULL, NULL);
+ if (ret) {
+ kfree(inp);
+ return ERR_PTR(ret);
+ }
+
+ inp->dev = dev;
+
+ return &inp->ved;
+}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c
deleted file mode 100644
index 62c5c3331cfe..000000000000
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c
+++ /dev/null
@@ -1,440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for STM32 Digital Camera Memory Interface Pixel Processor
- *
- * Copyright (C) STMicroelectronics SA 2023
- * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
- * Alain Volmat <alain.volmat@foss.st.com>
- * for STMicroelectronics.
- */
-
-#include <linux/v4l2-mediabus.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-subdev.h>
-
-#include "dcmipp-common.h"
-
-#define DCMIPP_PRCR 0x104
-#define DCMIPP_PRCR_FORMAT_SHIFT 16
-#define DCMIPP_PRCR_FORMAT_YUV422 0x1e
-#define DCMIPP_PRCR_FORMAT_RGB565 0x22
-#define DCMIPP_PRCR_FORMAT_RAW8 0x2a
-#define DCMIPP_PRCR_FORMAT_G8 0x4a
-#define DCMIPP_PRCR_FORMAT_BYTE_STREAM 0x5a
-#define DCMIPP_PRCR_ESS BIT(4)
-#define DCMIPP_PRCR_PCKPOL BIT(5)
-#define DCMIPP_PRCR_HSPOL BIT(6)
-#define DCMIPP_PRCR_VSPOL BIT(7)
-#define DCMIPP_PRCR_ENABLE BIT(14)
-#define DCMIPP_PRCR_SWAPCYCLES BIT(25)
-
-#define DCMIPP_PRESCR 0x108
-#define DCMIPP_PRESUR 0x10c
-
-#define IS_SINK(pad) (!(pad))
-#define IS_SRC(pad) ((pad))
-
-struct dcmipp_par_pix_map {
- unsigned int code_sink;
- unsigned int code_src;
- u8 prcr_format;
- u8 prcr_swapcycles;
-};
-
-#define PIXMAP_SINK_SRC_PRCR_SWAP(sink, src, prcr, swap) \
- { \
- .code_sink = MEDIA_BUS_FMT_##sink, \
- .code_src = MEDIA_BUS_FMT_##src, \
- .prcr_format = DCMIPP_PRCR_FORMAT_##prcr, \
- .prcr_swapcycles = swap, \
- }
-static const struct dcmipp_par_pix_map dcmipp_par_pix_map_list[] = {
- /* RGB565 */
- PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_LE, RGB565_2X8_LE, RGB565, 1),
- PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_BE, RGB565_2X8_LE, RGB565, 0),
- /* YUV422 */
- PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, YUYV8_2X8, YUV422, 1),
- PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, UYVY8_2X8, YUV422, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, UYVY8_2X8, YUV422, 1),
- PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, YUYV8_2X8, YUV422, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(YVYU8_2X8, YVYU8_2X8, YUV422, 1),
- PIXMAP_SINK_SRC_PRCR_SWAP(VYUY8_2X8, VYUY8_2X8, YUV422, 1),
- /* GREY */
- PIXMAP_SINK_SRC_PRCR_SWAP(Y8_1X8, Y8_1X8, G8, 0),
- /* Raw Bayer */
- PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR8_1X8, SBGGR8_1X8, RAW8, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG8_1X8, SGBRG8_1X8, RAW8, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG8_1X8, SGRBG8_1X8, RAW8, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB8_1X8, SRGGB8_1X8, RAW8, 0),
- /* JPEG */
- PIXMAP_SINK_SRC_PRCR_SWAP(JPEG_1X8, JPEG_1X8, BYTE_STREAM, 0),
-};
-
-/*
- * Search through the pix_map table, skipping two consecutive entry with the
- * same code
- */
-static inline const struct dcmipp_par_pix_map *dcmipp_par_pix_map_by_index
- (unsigned int index,
- unsigned int pad)
-{
- unsigned int i = 0;
- u32 prev_code = 0, cur_code;
-
- while (i < ARRAY_SIZE(dcmipp_par_pix_map_list)) {
- if (IS_SRC(pad))
- cur_code = dcmipp_par_pix_map_list[i].code_src;
- else
- cur_code = dcmipp_par_pix_map_list[i].code_sink;
-
- if (cur_code == prev_code) {
- i++;
- continue;
- }
- prev_code = cur_code;
-
- if (index == 0)
- break;
- i++;
- index--;
- }
-
- if (i >= ARRAY_SIZE(dcmipp_par_pix_map_list))
- return NULL;
-
- return &dcmipp_par_pix_map_list[i];
-}
-
-static inline const struct dcmipp_par_pix_map *dcmipp_par_pix_map_by_code
- (u32 code_sink, u32 code_src)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(dcmipp_par_pix_map_list); i++) {
- if ((dcmipp_par_pix_map_list[i].code_sink == code_sink &&
- dcmipp_par_pix_map_list[i].code_src == code_src) ||
- (dcmipp_par_pix_map_list[i].code_sink == code_src &&
- dcmipp_par_pix_map_list[i].code_src == code_sink) ||
- (dcmipp_par_pix_map_list[i].code_sink == code_sink &&
- code_src == 0) ||
- (code_sink == 0 &&
- dcmipp_par_pix_map_list[i].code_src == code_src))
- return &dcmipp_par_pix_map_list[i];
- }
- return NULL;
-}
-
-struct dcmipp_par_device {
- struct dcmipp_ent_device ved;
- struct v4l2_subdev sd;
- struct device *dev;
- void __iomem *regs;
- bool streaming;
-};
-
-static const struct v4l2_mbus_framefmt fmt_default = {
- .width = DCMIPP_FMT_WIDTH_DEFAULT,
- .height = DCMIPP_FMT_HEIGHT_DEFAULT,
- .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .field = V4L2_FIELD_NONE,
- .colorspace = DCMIPP_COLORSPACE_DEFAULT,
- .ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
- .quantization = DCMIPP_QUANTIZATION_DEFAULT,
- .xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
-};
-
-static int dcmipp_par_init_state(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
-{
- unsigned int i;
-
- for (i = 0; i < sd->entity.num_pads; i++) {
- struct v4l2_mbus_framefmt *mf;
-
- mf = v4l2_subdev_state_get_format(sd_state, i);
- *mf = fmt_default;
- }
-
- return 0;
-}
-
-static int dcmipp_par_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- const struct dcmipp_par_pix_map *vpix =
- dcmipp_par_pix_map_by_index(code->index, code->pad);
-
- if (!vpix)
- return -EINVAL;
-
- code->code = IS_SRC(code->pad) ? vpix->code_src : vpix->code_sink;
-
- return 0;
-}
-
-static int dcmipp_par_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- const struct dcmipp_par_pix_map *vpix;
-
- if (fse->index)
- return -EINVAL;
-
- /* Only accept code in the pix map table */
- vpix = dcmipp_par_pix_map_by_code(IS_SINK(fse->pad) ? fse->code : 0,
- IS_SRC(fse->pad) ? fse->code : 0);
- if (!vpix)
- return -EINVAL;
-
- fse->min_width = DCMIPP_FRAME_MIN_WIDTH;
- fse->max_width = DCMIPP_FRAME_MAX_WIDTH;
- fse->min_height = DCMIPP_FRAME_MIN_HEIGHT;
- fse->max_height = DCMIPP_FRAME_MAX_HEIGHT;
-
- return 0;
-}
-
-static void dcmipp_par_adjust_fmt(struct dcmipp_par_device *par,
- struct v4l2_mbus_framefmt *fmt, __u32 pad)
-{
- const struct dcmipp_par_pix_map *vpix;
-
- /* Only accept code in the pix map table */
- vpix = dcmipp_par_pix_map_by_code(IS_SINK(pad) ? fmt->code : 0,
- IS_SRC(pad) ? fmt->code : 0);
- if (!vpix)
- fmt->code = fmt_default.code;
-
- /* Exclude JPEG if BT656 bus is selected */
- if (vpix && vpix->code_sink == MEDIA_BUS_FMT_JPEG_1X8 &&
- par->ved.bus_type == V4L2_MBUS_BT656)
- fmt->code = fmt_default.code;
-
- fmt->width = clamp_t(u32, fmt->width, DCMIPP_FRAME_MIN_WIDTH,
- DCMIPP_FRAME_MAX_WIDTH) & ~1;
- fmt->height = clamp_t(u32, fmt->height, DCMIPP_FRAME_MIN_HEIGHT,
- DCMIPP_FRAME_MAX_HEIGHT) & ~1;
-
- if (fmt->field == V4L2_FIELD_ANY || fmt->field == V4L2_FIELD_ALTERNATE)
- fmt->field = fmt_default.field;
-
- dcmipp_colorimetry_clamp(fmt);
-}
-
-static int dcmipp_par_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct dcmipp_par_device *par = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *mf;
-
- if (par->streaming)
- return -EBUSY;
-
- mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
-
- /* Set the new format */
- dcmipp_par_adjust_fmt(par, &fmt->format, fmt->pad);
-
- dev_dbg(par->dev, "%s: format update: old:%dx%d (0x%x, %d, %d, %d, %d) new:%dx%d (0x%x, %d, %d, %d, %d)\n",
- par->sd.name,
- /* old */
- mf->width, mf->height, mf->code,
- mf->colorspace, mf->quantization,
- mf->xfer_func, mf->ycbcr_enc,
- /* new */
- fmt->format.width, fmt->format.height, fmt->format.code,
- fmt->format.colorspace, fmt->format.quantization,
- fmt->format.xfer_func, fmt->format.ycbcr_enc);
-
- *mf = fmt->format;
-
- /* When setting the sink format, report that format on the src pad */
- if (IS_SINK(fmt->pad)) {
- mf = v4l2_subdev_state_get_format(sd_state, 1);
- *mf = fmt->format;
- dcmipp_par_adjust_fmt(par, mf, 1);
- }
-
- return 0;
-}
-
-static const struct v4l2_subdev_pad_ops dcmipp_par_pad_ops = {
- .enum_mbus_code = dcmipp_par_enum_mbus_code,
- .enum_frame_size = dcmipp_par_enum_frame_size,
- .get_fmt = v4l2_subdev_get_fmt,
- .set_fmt = dcmipp_par_set_fmt,
-};
-
-static int dcmipp_par_configure(struct dcmipp_par_device *par)
-{
- u32 val = 0;
- const struct dcmipp_par_pix_map *vpix;
- struct v4l2_subdev_state *state;
- struct v4l2_mbus_framefmt *sink_fmt;
- struct v4l2_mbus_framefmt *src_fmt;
-
- /* Set vertical synchronization polarity */
- if (par->ved.bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
- val |= DCMIPP_PRCR_VSPOL;
-
- /* Set horizontal synchronization polarity */
- if (par->ved.bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
- val |= DCMIPP_PRCR_HSPOL;
-
- /* Set pixel clock polarity */
- if (par->ved.bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
- val |= DCMIPP_PRCR_PCKPOL;
-
- /*
- * BT656 embedded synchronisation bus mode.
- *
- * Default SAV/EAV mode is supported here with default codes
- * SAV=0xff000080 & EAV=0xff00009d.
- * With DCMIPP this means LSC=SAV=0x80 & LEC=EAV=0x9d.
- */
- if (par->ved.bus_type == V4L2_MBUS_BT656) {
- val |= DCMIPP_PRCR_ESS;
-
- /* Unmask all codes */
- reg_write(par, DCMIPP_PRESUR, 0xffffffff);/* FEC:LEC:LSC:FSC */
-
- /* Trig on LSC=0x80 & LEC=0x9d codes, ignore FSC and FEC */
- reg_write(par, DCMIPP_PRESCR, 0xff9d80ff);/* FEC:LEC:LSC:FSC */
- }
-
- /* Set format */
- state = v4l2_subdev_lock_and_get_active_state(&par->sd);
- sink_fmt = v4l2_subdev_state_get_format(state, 0);
- src_fmt = v4l2_subdev_state_get_format(state, 1);
- v4l2_subdev_unlock_state(state);
-
- vpix = dcmipp_par_pix_map_by_code(sink_fmt->code, src_fmt->code);
- if (!vpix) {
- dev_err(par->dev, "Invalid sink/src format configuration\n");
- return -EINVAL;
- }
-
- val |= vpix->prcr_format << DCMIPP_PRCR_FORMAT_SHIFT;
-
- /* swap cycles */
- if (vpix->prcr_swapcycles)
- val |= DCMIPP_PRCR_SWAPCYCLES;
-
- reg_write(par, DCMIPP_PRCR, val);
-
- return 0;
-}
-
-static int dcmipp_par_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct dcmipp_par_device *par =
- container_of(sd, struct dcmipp_par_device, sd);
- struct v4l2_subdev *s_subdev;
- struct media_pad *pad;
- int ret = 0;
-
- /* Get source subdev */
- pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- return -EINVAL;
- s_subdev = media_entity_to_v4l2_subdev(pad->entity);
-
- if (enable) {
- ret = dcmipp_par_configure(par);
- if (ret)
- return ret;
-
- /* Enable parallel interface */
- reg_set(par, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
-
- ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
- if (ret < 0) {
- dev_err(par->dev,
- "failed to start source subdev streaming (%d)\n",
- ret);
- return ret;
- }
- } else {
- ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
- if (ret < 0) {
- dev_err(par->dev,
- "failed to stop source subdev streaming (%d)\n",
- ret);
- return ret;
- }
-
- /* Disable parallel interface */
- reg_clear(par, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
- }
-
- par->streaming = enable;
-
- return ret;
-}
-
-static const struct v4l2_subdev_video_ops dcmipp_par_video_ops = {
- .s_stream = dcmipp_par_s_stream,
-};
-
-static const struct v4l2_subdev_ops dcmipp_par_ops = {
- .pad = &dcmipp_par_pad_ops,
- .video = &dcmipp_par_video_ops,
-};
-
-static void dcmipp_par_release(struct v4l2_subdev *sd)
-{
- struct dcmipp_par_device *par =
- container_of(sd, struct dcmipp_par_device, sd);
-
- kfree(par);
-}
-
-static const struct v4l2_subdev_internal_ops dcmipp_par_int_ops = {
- .init_state = dcmipp_par_init_state,
- .release = dcmipp_par_release,
-};
-
-void dcmipp_par_ent_release(struct dcmipp_ent_device *ved)
-{
- struct dcmipp_par_device *par =
- container_of(ved, struct dcmipp_par_device, ved);
-
- dcmipp_ent_sd_unregister(ved, &par->sd);
-}
-
-struct dcmipp_ent_device *dcmipp_par_ent_init(struct device *dev,
- const char *entity_name,
- struct v4l2_device *v4l2_dev,
- void __iomem *regs)
-{
- struct dcmipp_par_device *par;
- const unsigned long pads_flag[] = {
- MEDIA_PAD_FL_SINK, MEDIA_PAD_FL_SOURCE,
- };
- int ret;
-
- /* Allocate the par struct */
- par = kzalloc(sizeof(*par), GFP_KERNEL);
- if (!par)
- return ERR_PTR(-ENOMEM);
-
- par->regs = regs;
-
- /* Initialize ved and sd */
- ret = dcmipp_ent_sd_register(&par->ved, &par->sd, v4l2_dev,
- entity_name, MEDIA_ENT_F_VID_IF_BRIDGE,
- ARRAY_SIZE(pads_flag), pads_flag,
- &dcmipp_par_int_ops, &dcmipp_par_ops,
- NULL, NULL);
- if (ret) {
- kfree(par);
- return ERR_PTR(ret);
- }
-
- par->dev = dev;
-
- return &par->ved;
-}
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
index 811260dc3c77..edc217eed293 100644
--- a/drivers/media/platform/verisilicon/hantro.h
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -227,6 +227,7 @@ struct hantro_dev {
* @src_fmt: V4L2 pixel format of active source format.
* @vpu_dst_fmt: Descriptor of active destination format.
* @dst_fmt: V4L2 pixel format of active destination format.
+ * @ref_fmt: V4L2 pixel format of the reference frames format.
*
* @ctrl_handler: Control handler used to register controls.
* @jpeg_quality: User-specified JPEG compression quality.
@@ -255,6 +256,7 @@ struct hantro_ctx {
struct v4l2_pix_format_mplane src_fmt;
const struct hantro_fmt *vpu_dst_fmt;
struct v4l2_pix_format_mplane dst_fmt;
+ struct v4l2_pix_format_mplane ref_fmt;
struct v4l2_ctrl_handler ctrl_handler;
int jpeg_quality;
@@ -332,12 +334,19 @@ struct hantro_vp9_decoded_buffer_info {
u32 bit_depth : 4;
};
+struct hantro_av1_decoded_buffer_info {
+ /* Info needed when the decoded frame serves as a reference frame. */
+ size_t chroma_offset;
+ size_t mv_offset;
+};
+
struct hantro_decoded_buffer {
/* Must be the first field in this struct. */
struct v4l2_m2m_buffer base;
union {
struct hantro_vp9_decoded_buffer_info vp9;
+ struct hantro_av1_decoded_buffer_info av1;
};
};
diff --git a/drivers/media/platform/verisilicon/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c
index 5c1d799d8618..aae0b562fabb 100644
--- a/drivers/media/platform/verisilicon/hantro_g2.c
+++ b/drivers/media/platform/verisilicon/hantro_g2.c
@@ -47,7 +47,7 @@ irqreturn_t hantro_g2_irq(int irq, void *dev_id)
size_t hantro_g2_chroma_offset(struct hantro_ctx *ctx)
{
- return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8;
+ return ctx->ref_fmt.plane_fmt[0].bytesperline * ctx->ref_fmt.height;
}
size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx)
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index 232c93eea7ee..c435a393e0cb 100644
--- a/drivers/media/platform/verisilicon/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -194,35 +194,25 @@ void hantro_postproc_free(struct hantro_ctx *ctx)
static unsigned int hantro_postproc_buffer_size(struct hantro_ctx *ctx)
{
- struct v4l2_pix_format_mplane pix_mp;
- const struct hantro_fmt *fmt;
unsigned int buf_size;
- /* this should always pick native format */
- fmt = hantro_get_default_fmt(ctx, false, ctx->bit_depth, HANTRO_AUTO_POSTPROC);
- if (!fmt)
- return 0;
-
- v4l2_fill_pixfmt_mp(&pix_mp, fmt->fourcc, ctx->src_fmt.width,
- ctx->src_fmt.height);
-
- buf_size = pix_mp.plane_fmt[0].sizeimage;
+ buf_size = ctx->ref_fmt.plane_fmt[0].sizeimage;
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
- buf_size += hantro_h264_mv_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_h264_mv_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
- buf_size += hantro_vp9_mv_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_vp9_mv_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE) {
- buf_size += hantro_hevc_mv_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_hevc_mv_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
if (ctx->hevc_dec.use_compression)
- buf_size += hantro_hevc_compressed_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_hevc_compressed_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
}
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME)
- buf_size += hantro_av1_mv_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_av1_mv_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
return buf_size;
}
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index 2513adfbd825..2bce940a5822 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -126,6 +126,24 @@ hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
return NULL;
}
+static int
+hantro_set_reference_frames_format(struct hantro_ctx *ctx)
+{
+ const struct hantro_fmt *fmt;
+ int dst_bit_depth = hantro_get_format_depth(ctx->vpu_dst_fmt->fourcc);
+
+ fmt = hantro_get_default_fmt(ctx, false, dst_bit_depth, HANTRO_AUTO_POSTPROC);
+ if (!fmt)
+ return -EINVAL;
+
+ ctx->ref_fmt.width = ctx->src_fmt.width;
+ ctx->ref_fmt.height = ctx->src_fmt.height;
+
+ v4l2_apply_frmsize_constraints(&ctx->ref_fmt.width, &ctx->ref_fmt.height, &fmt->frmsize);
+ return v4l2_fill_pixfmt_mp(&ctx->ref_fmt, fmt->fourcc,
+ ctx->ref_fmt.width, ctx->ref_fmt.height);
+}
+
const struct hantro_fmt *
hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream,
int bit_depth, bool need_postproc)
@@ -595,6 +613,9 @@ static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
ctx->vpu_dst_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
ctx->dst_fmt = *pix_mp;
+ ret = hantro_set_reference_frames_format(ctx);
+ if (ret)
+ return ret;
/*
* Current raw format might have become invalid with newly
diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
index f850d8bddef6..35799da534ed 100644
--- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
@@ -187,23 +187,23 @@ static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
- .step_width = TILE_MB_DIM,
+ .step_width = 8,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
- .step_height = TILE_MB_DIM,
+ .step_height = 32,
},
},
{
- .fourcc = V4L2_PIX_FMT_P010_4L4,
+ .fourcc = V4L2_PIX_FMT_NV15_4L4,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
- .step_width = TILE_MB_DIM,
+ .step_width = 8,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
- .step_height = TILE_MB_DIM,
+ .step_height = 32,
},
},
{
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
index e54f5fac325b..69b5d9e12926 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
@@ -686,8 +686,6 @@ rockchip_vpu981_av1_dec_set_ref(struct hantro_ctx *ctx, int ref, int idx,
struct hantro_dev *vpu = ctx->dev;
struct hantro_decoded_buffer *dst;
dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
- size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
- size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
int cur_width = frame->frame_width_minus_1 + 1;
int cur_height = frame->frame_height_minus_1 + 1;
int scale_width =
@@ -744,8 +742,8 @@ rockchip_vpu981_av1_dec_set_ref(struct hantro_ctx *ctx, int ref, int idx,
dst = vb2_to_hantro_decoded_buf(&av1_dec->frame_refs[idx].vb2_ref->vb2_buf);
luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
- chroma_addr = luma_addr + cr_offset;
- mv_addr = luma_addr + mv_offset;
+ chroma_addr = luma_addr + dst->av1.chroma_offset;
+ mv_addr = luma_addr + dst->av1.mv_offset;
hantro_write_addr(vpu, AV1_REFERENCE_Y(ref), luma_addr);
hantro_write_addr(vpu, AV1_REFERENCE_CB(ref), chroma_addr);
@@ -2089,6 +2087,9 @@ rockchip_vpu981_av1_dec_set_output_buffer(struct hantro_ctx *ctx)
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
+ dst->av1.chroma_offset = cr_offset;
+ dst->av1.mv_offset = mv_offset;
+
hantro_write_addr(vpu, AV1_TILE_OUT_LU, luma_addr);
hantro_write_addr(vpu, AV1_TILE_OUT_CH, chroma_addr);
hantro_write_addr(vpu, AV1_TILE_OUT_MV, mv_addr);
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index d52eccdc7eb9..72776d08046a 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -221,10 +221,6 @@ config USB_RAREMONO
source "drivers/media/radio/si470x/Kconfig"
source "drivers/media/radio/si4713/Kconfig"
-# TI's ST based wl128x FM radio
-
-source "drivers/media/radio/wl128x/Kconfig"
-
#
# ISA drivers configuration
#
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index cfb6af7d3bc3..1ff46f3a6ed3 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
obj-$(CONFIG_RADIO_TYPHOON) += radio-typhoon.o
obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
-obj-$(CONFIG_RADIO_WL128X) += wl128x/
obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
obj-$(CONFIG_USB_DSBR) += dsbr100.o
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
deleted file mode 100644
index 3e7713872e3f..000000000000
--- a/drivers/media/radio/wl128x/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# TI's wl128x FM driver based on TI's ST driver.
-#
-config RADIO_WL128X
- tristate "Texas Instruments WL128x FM Radio"
- depends on VIDEO_DEV && RFKILL && TTY && TI_ST
- depends on GPIOLIB || COMPILE_TEST
- help
- Choose Y here if you have this FM radio chip.
-
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux 2 API. Information on
- this API and pointers to "v4l2" programs may be found at
- <file:Documentation/userspace-api/media/index.rst>.
diff --git a/drivers/media/radio/wl128x/Makefile b/drivers/media/radio/wl128x/Makefile
deleted file mode 100644
index 4396ca416cfa..000000000000
--- a/drivers/media/radio/wl128x/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for TI's shared transport driver based wl128x
-# FM radio.
-#
-obj-$(CONFIG_RADIO_WL128X) += fm_drv.o
-fm_drv-objs := fmdrv_common.o fmdrv_rx.o fmdrv_tx.o fmdrv_v4l2.o
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
deleted file mode 100644
index 03117a41dbd4..000000000000
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- *
- * Common header for all FM driver sub-modules.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FM_DRV_H
-#define _FM_DRV_H
-
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-
-#define FM_DRV_VERSION "0.1.1"
-#define FM_DRV_NAME "ti_fmdrv"
-#define FM_DRV_CARD_SHORT_NAME "TI FM Radio"
-#define FM_DRV_CARD_LONG_NAME "Texas Instruments FM Radio"
-
-/* Flag info */
-#define FM_INTTASK_RUNNING 0
-#define FM_INTTASK_SCHEDULE_PENDING 1
-#define FM_FW_DW_INPROGRESS 2
-#define FM_CORE_READY 3
-#define FM_CORE_TRANSPORT_READY 4
-#define FM_AF_SWITCH_INPROGRESS 5
-#define FM_CORE_TX_XMITING 6
-
-#define FM_TUNE_COMPLETE 0x1
-#define FM_BAND_LIMIT 0x2
-
-#define FM_DRV_TX_TIMEOUT (5*HZ) /* 5 seconds */
-#define FM_DRV_RX_SEEK_TIMEOUT (20*HZ) /* 20 seconds */
-
-#define fmerr(format, ...) \
- printk(KERN_ERR "fmdrv: " format, ## __VA_ARGS__)
-#define fmwarn(format, ...) \
- printk(KERN_WARNING "fmdrv: " format, ##__VA_ARGS__)
-#ifdef DEBUG
-#define fmdbg(format, ...) \
- printk(KERN_DEBUG "fmdrv: " format, ## __VA_ARGS__)
-#else /* DEBUG */
-#define fmdbg(format, ...) do {} while(0)
-#endif
-enum {
- FM_MODE_OFF,
- FM_MODE_TX,
- FM_MODE_RX,
- FM_MODE_ENTRY_MAX
-};
-
-#define FM_RX_RDS_INFO_FIELD_MAX 8 /* 4 Group * 2 Bytes */
-
-/* RX RDS data format */
-struct fm_rdsdata_format {
- union {
- struct {
- u8 buff[FM_RX_RDS_INFO_FIELD_MAX];
- } groupdatabuff;
- struct {
- u16 pidata;
- u8 blk_b[2];
- u8 blk_c[2];
- u8 blk_d[2];
- } groupgeneral;
- struct {
- u16 pidata;
- u8 blk_b[2];
- u8 af[2];
- u8 ps[2];
- } group0A;
- struct {
- u16 pi[2];
- u8 blk_b[2];
- u8 ps[2];
- } group0B;
- } data;
-};
-
-/* FM region (Europe/US, Japan) info */
-struct region_info {
- u32 chanl_space;
- u32 bot_freq;
- u32 top_freq;
- u8 fm_band;
-};
-struct fmdev;
-typedef void (*int_handler_prototype) (struct fmdev *);
-
-/* FM Interrupt processing related info */
-struct fm_irq {
- u8 stage;
- u16 flag; /* FM interrupt flag */
- u16 mask; /* FM interrupt mask */
- /* Interrupt process timeout handler */
- struct timer_list timer;
- u8 retry;
- int_handler_prototype *handlers;
-};
-
-/* RDS info */
-struct fm_rds {
- u8 flag; /* RX RDS on/off status */
- u8 last_blk_idx; /* Last received RDS block */
-
- /* RDS buffer */
- wait_queue_head_t read_queue;
- u32 buf_size; /* Size is always multiple of 3 */
- u32 wr_idx;
- u32 rd_idx;
- u8 *buff;
-};
-
-#define FM_RDS_MAX_AF_LIST 25
-
-/*
- * Current RX channel Alternate Frequency cache.
- * This info is used to switch to other freq (AF)
- * when current channel signal strength is below RSSI threshold.
- */
-struct tuned_station_info {
- u16 picode;
- u32 af_cache[FM_RDS_MAX_AF_LIST];
- u8 afcache_size;
- u8 af_list_max;
-};
-
-/* FM RX mode info */
-struct fm_rx {
- struct region_info region; /* Current selected band */
- u32 freq; /* Current RX frquency */
- u8 mute_mode; /* Current mute mode */
- u8 deemphasis_mode; /* Current deemphasis mode */
- /* RF dependent soft mute mode */
- u8 rf_depend_mute;
- u16 volume; /* Current volume level */
- u16 rssi_threshold; /* Current RSSI threshold level */
- /* Holds the index of the current AF jump */
- u8 afjump_idx;
- /* Will hold the frequency before the jump */
- u32 freq_before_jump;
- u8 rds_mode; /* RDS operation mode (RDS/RDBS) */
- u8 af_mode; /* Alternate frequency on/off */
- struct tuned_station_info stat_info;
- struct fm_rds rds;
-};
-
-#define FMTX_RDS_TXT_STR_SIZE 25
-/*
- * FM TX RDS data
- *
- * @ text_type: is the text following PS or RT
- * @ text: radio text string which could either be PS or RT
- * @ af_freq: alternate frequency for Tx
- * TODO: to be declared in application
- */
-struct tx_rds {
- u8 text_type;
- u8 text[FMTX_RDS_TXT_STR_SIZE];
- u8 flag;
- u32 af_freq;
-};
-/*
- * FM TX global data
- *
- * @ pwr_lvl: Power Level of the Transmission from mixer control
- * @ xmit_state: Transmission state = Updated locally upon Start/Stop
- * @ audio_io: i2S/Analog
- * @ tx_frq: Transmission frequency
- */
-struct fmtx_data {
- u8 pwr_lvl;
- u8 xmit_state;
- u8 audio_io;
- u8 region;
- u16 aud_mode;
- u32 preemph;
- u32 tx_frq;
- struct tx_rds rds;
-};
-
-/* FM driver operation structure */
-struct fmdev {
- struct video_device *radio_dev; /* V4L2 video device pointer */
- struct v4l2_device v4l2_dev; /* V4L2 top level struct */
- struct snd_card *card; /* Card which holds FM mixer controls */
- u16 asci_id;
- spinlock_t rds_buff_lock; /* To protect access to RDS buffer */
- spinlock_t resp_skb_lock; /* To protect access to received SKB */
-
- long flag; /* FM driver state machine info */
- int streg_cbdata; /* status of ST registration */
-
- struct sk_buff_head rx_q; /* RX queue */
- struct work_struct rx_bh_work; /* RX BH Work */
-
- struct sk_buff_head tx_q; /* TX queue */
- struct work_struct tx_bh_work; /* TX BH Work */
- unsigned long last_tx_jiffies; /* Timestamp of last pkt sent */
- atomic_t tx_cnt; /* Number of packets can send at a time */
-
- struct sk_buff *resp_skb; /* Response from the chip */
- /* Main task completion handler */
- struct completion maintask_comp;
- /* Opcode of last command sent to the chip */
- u8 pre_op;
- /* Handler used for wakeup when response packet is received */
- struct completion *resp_comp;
- struct fm_irq irq_info;
- u8 curr_fmmode; /* Current FM chip mode (TX, RX, OFF) */
- struct fm_rx rx; /* FM receiver info */
- struct fmtx_data tx_data;
-
- /* V4L2 ctrl framework handler*/
- struct v4l2_ctrl_handler ctrl_handler;
-
- /* For core assisted locking */
- struct mutex mutex;
-};
-#endif
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
deleted file mode 100644
index 4d032436691c..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ /dev/null
@@ -1,1676 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- *
- * This sub-module of FM driver is common for FM RX and TX
- * functionality. This module is responsible for:
- * 1) Forming group of Channel-8 commands to perform particular
- * functionality (eg., frequency set require more than
- * one Channel-8 command to be sent to the chip).
- * 2) Sending each Channel-8 command to the chip and reading
- * response back over Shared Transport.
- * 3) Managing TX and RX Queues and BH bh Works.
- * 4) Handling FM Interrupt packet and taking appropriate action.
- * 5) Loading FM firmware to the chip (common, FM TX, and FM RX
- * firmware files based on mode selection)
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Raja Mani <raja_mani@ti.com>
- * Author: Manjunatha Halli <manjunatha_halli@ti.com>
- */
-
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/module.h>
-#include <linux/nospec.h>
-#include <linux/jiffies.h>
-
-#include "fmdrv.h"
-#include "fmdrv_v4l2.h"
-#include "fmdrv_common.h"
-#include <linux/ti_wilink_st.h>
-#include "fmdrv_rx.h"
-#include "fmdrv_tx.h"
-
-/* Region info */
-static struct region_info region_configs[] = {
- /* Europe/US */
- {
- .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
- .bot_freq = 87500, /* 87.5 MHz */
- .top_freq = 108000, /* 108 MHz */
- .fm_band = 0,
- },
- /* Japan */
- {
- .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
- .bot_freq = 76000, /* 76 MHz */
- .top_freq = 90000, /* 90 MHz */
- .fm_band = 1,
- },
-};
-
-/* Band selection */
-static u8 default_radio_region; /* Europe/US */
-module_param(default_radio_region, byte, 0);
-MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
-
-/* RDS buffer blocks */
-static u32 default_rds_buf = 300;
-module_param(default_rds_buf, uint, 0444);
-MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries");
-
-/* Radio Nr */
-static u32 radio_nr = -1;
-module_param(radio_nr, int, 0444);
-MODULE_PARM_DESC(radio_nr, "Radio Nr");
-
-/* FM irq handlers forward declaration */
-static void fm_irq_send_flag_getcmd(struct fmdev *);
-static void fm_irq_handle_flag_getcmd_resp(struct fmdev *);
-static void fm_irq_handle_hw_malfunction(struct fmdev *);
-static void fm_irq_handle_rds_start(struct fmdev *);
-static void fm_irq_send_rdsdata_getcmd(struct fmdev *);
-static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *);
-static void fm_irq_handle_rds_finish(struct fmdev *);
-static void fm_irq_handle_tune_op_ended(struct fmdev *);
-static void fm_irq_handle_power_enb(struct fmdev *);
-static void fm_irq_handle_low_rssi_start(struct fmdev *);
-static void fm_irq_afjump_set_pi(struct fmdev *);
-static void fm_irq_handle_set_pi_resp(struct fmdev *);
-static void fm_irq_afjump_set_pimask(struct fmdev *);
-static void fm_irq_handle_set_pimask_resp(struct fmdev *);
-static void fm_irq_afjump_setfreq(struct fmdev *);
-static void fm_irq_handle_setfreq_resp(struct fmdev *);
-static void fm_irq_afjump_enableint(struct fmdev *);
-static void fm_irq_afjump_enableint_resp(struct fmdev *);
-static void fm_irq_start_afjump(struct fmdev *);
-static void fm_irq_handle_start_afjump_resp(struct fmdev *);
-static void fm_irq_afjump_rd_freq(struct fmdev *);
-static void fm_irq_afjump_rd_freq_resp(struct fmdev *);
-static void fm_irq_handle_low_rssi_finish(struct fmdev *);
-static void fm_irq_send_intmsk_cmd(struct fmdev *);
-static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *);
-
-/*
- * When FM common module receives interrupt packet, following handlers
- * will be executed one after another to service the interrupt(s)
- */
-enum fmc_irq_handler_index {
- FM_SEND_FLAG_GETCMD_IDX,
- FM_HANDLE_FLAG_GETCMD_RESP_IDX,
-
- /* HW malfunction irq handler */
- FM_HW_MAL_FUNC_IDX,
-
- /* RDS threshold reached irq handler */
- FM_RDS_START_IDX,
- FM_RDS_SEND_RDS_GETCMD_IDX,
- FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX,
- FM_RDS_FINISH_IDX,
-
- /* Tune operation ended irq handler */
- FM_HW_TUNE_OP_ENDED_IDX,
-
- /* TX power enable irq handler */
- FM_HW_POWER_ENB_IDX,
-
- /* Low RSSI irq handler */
- FM_LOW_RSSI_START_IDX,
- FM_AF_JUMP_SETPI_IDX,
- FM_AF_JUMP_HANDLE_SETPI_RESP_IDX,
- FM_AF_JUMP_SETPI_MASK_IDX,
- FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX,
- FM_AF_JUMP_SET_AF_FREQ_IDX,
- FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX,
- FM_AF_JUMP_ENABLE_INT_IDX,
- FM_AF_JUMP_ENABLE_INT_RESP_IDX,
- FM_AF_JUMP_START_AFJUMP_IDX,
- FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX,
- FM_AF_JUMP_RD_FREQ_IDX,
- FM_AF_JUMP_RD_FREQ_RESP_IDX,
- FM_LOW_RSSI_FINISH_IDX,
-
- /* Interrupt process post action */
- FM_SEND_INTMSK_CMD_IDX,
- FM_HANDLE_INTMSK_CMD_RESP_IDX,
-};
-
-/* FM interrupt handler table */
-static int_handler_prototype int_handler_table[] = {
- fm_irq_send_flag_getcmd,
- fm_irq_handle_flag_getcmd_resp,
- fm_irq_handle_hw_malfunction,
- fm_irq_handle_rds_start, /* RDS threshold reached irq handler */
- fm_irq_send_rdsdata_getcmd,
- fm_irq_handle_rdsdata_getcmd_resp,
- fm_irq_handle_rds_finish,
- fm_irq_handle_tune_op_ended,
- fm_irq_handle_power_enb, /* TX power enable irq handler */
- fm_irq_handle_low_rssi_start,
- fm_irq_afjump_set_pi,
- fm_irq_handle_set_pi_resp,
- fm_irq_afjump_set_pimask,
- fm_irq_handle_set_pimask_resp,
- fm_irq_afjump_setfreq,
- fm_irq_handle_setfreq_resp,
- fm_irq_afjump_enableint,
- fm_irq_afjump_enableint_resp,
- fm_irq_start_afjump,
- fm_irq_handle_start_afjump_resp,
- fm_irq_afjump_rd_freq,
- fm_irq_afjump_rd_freq_resp,
- fm_irq_handle_low_rssi_finish,
- fm_irq_send_intmsk_cmd, /* Interrupt process post action */
- fm_irq_handle_intmsk_cmd_resp
-};
-
-static long (*g_st_write) (struct sk_buff *skb);
-static struct completion wait_for_fmdrv_reg_comp;
-
-static inline void fm_irq_call(struct fmdev *fmdev)
-{
- fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
-}
-
-/* Continue next function in interrupt handler table */
-static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage)
-{
- fmdev->irq_info.stage = stage;
- fm_irq_call(fmdev);
-}
-
-static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage)
-{
- fmdev->irq_info.stage = stage;
- mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
-}
-
-#ifdef FM_DUMP_TXRX_PKT
- /* To dump outgoing FM Channel-8 packets */
-inline void dump_tx_skb_data(struct sk_buff *skb)
-{
- int len, len_org;
- u8 index;
- struct fm_cmd_msg_hdr *cmd_hdr;
-
- cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data;
- printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x",
- fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr,
- cmd_hdr->len, cmd_hdr->op,
- cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen);
-
- len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
- if (len_org > 0) {
- printk(KERN_CONT "\n data(%d): ", cmd_hdr->dlen);
- len = min(len_org, 14);
- for (index = 0; index < len; index++)
- printk(KERN_CONT "%x ",
- skb->data[FM_CMD_MSG_HDR_SIZE + index]);
- printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
- }
- printk(KERN_CONT "\n");
-}
-
- /* To dump incoming FM Channel-8 packets */
-inline void dump_rx_skb_data(struct sk_buff *skb)
-{
- int len, len_org;
- u8 index;
- struct fm_event_msg_hdr *evt_hdr;
-
- evt_hdr = (struct fm_event_msg_hdr *)skb->data;
- printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x",
- evt_hdr->hdr, evt_hdr->len,
- evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
- (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
-
- len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
- if (len_org > 0) {
- printk(KERN_CONT "\n data(%d): ", evt_hdr->dlen);
- len = min(len_org, 14);
- for (index = 0; index < len; index++)
- printk(KERN_CONT "%x ",
- skb->data[FM_EVT_MSG_HDR_SIZE + index]);
- printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
- }
- printk(KERN_CONT "\n");
-}
-#endif
-
-void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set)
-{
- fmdev->rx.region = region_configs[region_to_set];
-}
-
-/*
- * FM common sub-module will queue this bh work whenever it receives
- * FM packet from ST driver.
- */
-static void recv_bh_work(struct work_struct *t)
-{
- struct fmdev *fmdev;
- struct fm_irq *irq_info;
- struct fm_event_msg_hdr *evt_hdr;
- struct sk_buff *skb;
- u8 num_fm_hci_cmds;
- unsigned long flags;
-
- fmdev = from_work(fmdev, t, tx_bh_work);
- irq_info = &fmdev->irq_info;
- /* Process all packets in the RX queue */
- while ((skb = skb_dequeue(&fmdev->rx_q))) {
- if (skb->len < sizeof(struct fm_event_msg_hdr)) {
- fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n",
- skb,
- skb->len, sizeof(struct fm_event_msg_hdr));
- kfree_skb(skb);
- continue;
- }
-
- evt_hdr = (void *)skb->data;
- num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds;
-
- /* FM interrupt packet? */
- if (evt_hdr->op == FM_INTERRUPT) {
- /* FM interrupt handler started already? */
- if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
- set_bit(FM_INTTASK_RUNNING, &fmdev->flag);
- if (irq_info->stage != 0) {
- fmerr("Inval stage resetting to zero\n");
- irq_info->stage = 0;
- }
-
- /*
- * Execute first function in interrupt handler
- * table.
- */
- irq_info->handlers[irq_info->stage](fmdev);
- } else {
- set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag);
- }
- kfree_skb(skb);
- }
- /* Anyone waiting for this with completion handler? */
- else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) {
-
- spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
- fmdev->resp_skb = skb;
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
- complete(fmdev->resp_comp);
-
- fmdev->resp_comp = NULL;
- atomic_set(&fmdev->tx_cnt, 1);
- }
- /* Is this for interrupt handler? */
- else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) {
- if (fmdev->resp_skb != NULL)
- fmerr("Response SKB ptr not NULL\n");
-
- spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
- fmdev->resp_skb = skb;
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
-
- /* Execute interrupt handler where state index points */
- irq_info->handlers[irq_info->stage](fmdev);
-
- kfree_skb(skb);
- atomic_set(&fmdev->tx_cnt, 1);
- } else {
- fmerr("Nobody claimed SKB(%p),purging\n", skb);
- }
-
- /*
- * Check flow control field. If Num_FM_HCI_Commands field is
- * not zero, queue FM TX bh work.
- */
- if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt))
- if (!skb_queue_empty(&fmdev->tx_q))
- queue_work(system_bh_wq, &fmdev->tx_bh_work);
- }
-}
-
-/* FM send_bh_work: is scheduled when FM packet has to be sent to chip */
-static void send_bh_work(struct work_struct *t)
-{
- struct fmdev *fmdev;
- struct sk_buff *skb;
- int len;
-
- fmdev = from_work(fmdev, t, tx_bh_work);
-
- if (!atomic_read(&fmdev->tx_cnt))
- return;
-
- /* Check, is there any timeout happened to last transmitted packet */
- if (time_is_before_jiffies(fmdev->last_tx_jiffies + FM_DRV_TX_TIMEOUT)) {
- fmerr("TX timeout occurred\n");
- atomic_set(&fmdev->tx_cnt, 1);
- }
-
- /* Send queued FM TX packets */
- skb = skb_dequeue(&fmdev->tx_q);
- if (!skb)
- return;
-
- atomic_dec(&fmdev->tx_cnt);
- fmdev->pre_op = fm_cb(skb)->fm_op;
-
- if (fmdev->resp_comp != NULL)
- fmerr("Response completion handler is not NULL\n");
-
- fmdev->resp_comp = fm_cb(skb)->completion;
-
- /* Write FM packet to ST driver */
- len = g_st_write(skb);
- if (len < 0) {
- kfree_skb(skb);
- fmdev->resp_comp = NULL;
- fmerr("TX bh work failed to send skb(%p)\n", skb);
- atomic_set(&fmdev->tx_cnt, 1);
- } else {
- fmdev->last_tx_jiffies = jiffies;
- }
-}
-
-/*
- * Queues FM Channel-8 packet to FM TX queue and schedules FM TX bh work for
- * transmission
- */
-static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
- int payload_len, struct completion *wait_completion)
-{
- struct sk_buff *skb;
- struct fm_cmd_msg_hdr *hdr;
- int size;
-
- if (fm_op >= FM_INTERRUPT) {
- fmerr("Invalid fm opcode - %d\n", fm_op);
- return -EINVAL;
- }
- if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) {
- fmerr("Payload data is NULL during fw download\n");
- return -EINVAL;
- }
- if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag))
- size =
- FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len);
- else
- size = payload_len;
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb) {
- fmerr("No memory to create new SKB\n");
- return -ENOMEM;
- }
- /*
- * Don't fill FM header info for the commands which come from
- * FM firmware file.
- */
- if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) ||
- test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
- /* Fill command header info */
- hdr = skb_put(skb, FM_CMD_MSG_HDR_SIZE);
- hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER; /* 0x08 */
-
- /* 3 (fm_opcode,rd_wr,dlen) + payload len) */
- hdr->len = ((payload == NULL) ? 0 : payload_len) + 3;
-
- /* FM opcode */
- hdr->op = fm_op;
-
- /* read/write type */
- hdr->rd_wr = type;
- hdr->dlen = payload_len;
- fm_cb(skb)->fm_op = fm_op;
-
- /*
- * If firmware download has finished and the command is
- * not a read command then payload is != NULL - a write
- * command with u16 payload - convert to be16
- */
- if (payload != NULL)
- *(__be16 *)payload = cpu_to_be16(*(u16 *)payload);
-
- } else if (payload != NULL) {
- fm_cb(skb)->fm_op = *((u8 *)payload + 2);
- }
- if (payload != NULL)
- skb_put_data(skb, payload, payload_len);
-
- fm_cb(skb)->completion = wait_completion;
- skb_queue_tail(&fmdev->tx_q, skb);
- queue_work(system_bh_wq, &fmdev->tx_bh_work);
-
- return 0;
-}
-
-/* Sends FM Channel-8 command to the chip and waits for the response */
-int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
- unsigned int payload_len, void *response, int *response_len)
-{
- struct sk_buff *skb;
- struct fm_event_msg_hdr *evt_hdr;
- unsigned long flags;
- int ret;
-
- init_completion(&fmdev->maintask_comp);
- ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len,
- &fmdev->maintask_comp);
- if (ret)
- return ret;
-
- if (!wait_for_completion_timeout(&fmdev->maintask_comp,
- FM_DRV_TX_TIMEOUT)) {
- fmerr("Timeout(%d sec),didn't get regcompletion signal from RX bh work\n",
- jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
- return -ETIMEDOUT;
- }
- spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
- if (!fmdev->resp_skb) {
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
- fmerr("Response SKB is missing\n");
- return -EFAULT;
- }
- skb = fmdev->resp_skb;
- fmdev->resp_skb = NULL;
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
-
- evt_hdr = (void *)skb->data;
- if (evt_hdr->status != 0) {
- fmerr("Received event pkt status(%d) is not zero\n",
- evt_hdr->status);
- kfree_skb(skb);
- return -EIO;
- }
- /* Send response data to caller */
- if (response != NULL && response_len != NULL && evt_hdr->dlen &&
- evt_hdr->dlen <= payload_len) {
- /* Skip header info and copy only response data */
- skb_pull(skb, sizeof(struct fm_event_msg_hdr));
- memcpy(response, skb->data, evt_hdr->dlen);
- *response_len = evt_hdr->dlen;
- } else if (response_len != NULL && evt_hdr->dlen == 0) {
- *response_len = 0;
- }
- kfree_skb(skb);
-
- return 0;
-}
-
-/* --- Helper functions used in FM interrupt handlers ---*/
-static inline int check_cmdresp_status(struct fmdev *fmdev,
- struct sk_buff **skb)
-{
- struct fm_event_msg_hdr *fm_evt_hdr;
- unsigned long flags;
-
- del_timer(&fmdev->irq_info.timer);
-
- spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
- *skb = fmdev->resp_skb;
- fmdev->resp_skb = NULL;
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
-
- fm_evt_hdr = (void *)(*skb)->data;
- if (fm_evt_hdr->status != 0) {
- fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n",
- fm_evt_hdr->op);
-
- mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
- return -1;
- }
-
- return 0;
-}
-
-static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
-{
- struct sk_buff *skb;
-
- if (!check_cmdresp_status(fmdev, &skb))
- fm_irq_call_stage(fmdev, stage);
-}
-
-/*
- * Interrupt process timeout handler.
- * One of the irq handler did not get proper response from the chip. So take
- * recovery action here. FM interrupts are disabled in the beginning of
- * interrupt process. Therefore reset stage index to re-enable default
- * interrupts. So that next interrupt will be processed as usual.
- */
-static void int_timeout_handler(struct timer_list *t)
-{
- struct fmdev *fmdev;
- struct fm_irq *fmirq;
-
- fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
- fmdev = from_timer(fmdev, t, irq_info.timer);
- fmirq = &fmdev->irq_info;
- fmirq->retry++;
-
- if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) {
- /* Stop recovery action (interrupt reenable process) and
- * reset stage index & retry count values */
- fmirq->stage = 0;
- fmirq->retry = 0;
- fmerr("Recovery action failed duringirq processing, max retry reached\n");
- return;
- }
- fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
-}
-
-/* --------- FM interrupt handlers ------------*/
-static void fm_irq_send_flag_getcmd(struct fmdev *fmdev)
-{
- u16 flag;
-
- /* Send FLAG_GET command , to know the source of interrupt */
- if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL))
- fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX);
-}
-
-static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
- struct fm_event_msg_hdr *fm_evt_hdr;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
-
- fm_evt_hdr = (void *)skb->data;
- if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
- return;
-
- /* Skip header info and copy only response data */
- skb_pull(skb, sizeof(struct fm_event_msg_hdr));
- memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
-
- fmdev->irq_info.flag = be16_to_cpu((__force __be16)fmdev->irq_info.flag);
- fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag);
-
- /* Continue next function in interrupt handler table */
- fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX);
-}
-
-static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev)
-{
- if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask)
- fmerr("irq: HW MAL int received - do nothing\n");
-
- /* Continue next function in interrupt handler table */
- fm_irq_call_stage(fmdev, FM_RDS_START_IDX);
-}
-
-static void fm_irq_handle_rds_start(struct fmdev *fmdev)
-{
- if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) {
- fmdbg("irq: rds threshold reached\n");
- fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX;
- } else {
- /* Continue next function in interrupt handler table */
- fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX;
- }
-
- fm_irq_call(fmdev);
-}
-
-static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev)
-{
- /* Send the command to read RDS data from the chip */
- if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL,
- (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL))
- fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX);
-}
-
-/* Keeps track of current RX channel AF (Alternate Frequency) */
-static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af)
-{
- struct tuned_station_info *stat_info = &fmdev->rx.stat_info;
- u8 reg_idx = fmdev->rx.region.fm_band;
- u8 index;
- u32 freq;
-
- /* First AF indicates the number of AF follows. Reset the list */
- if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) {
- fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1);
- fmdev->rx.stat_info.afcache_size = 0;
- fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max);
- return;
- }
-
- if (af < FM_RDS_MIN_AF)
- return;
- if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF)
- return;
- if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN)
- return;
-
- freq = fmdev->rx.region.bot_freq + (af * 100);
- if (freq == fmdev->rx.freq) {
- fmdbg("Current freq(%d) is matching with received AF(%d)\n",
- fmdev->rx.freq, freq);
- return;
- }
- /* Do check in AF cache */
- for (index = 0; index < stat_info->afcache_size; index++) {
- if (stat_info->af_cache[index] == freq)
- break;
- }
- /* Reached the limit of the list - ignore the next AF */
- if (index == stat_info->af_list_max) {
- fmdbg("AF cache is full\n");
- return;
- }
- /*
- * If we reached the end of the list then this AF is not
- * in the list - add it.
- */
- if (index == stat_info->afcache_size) {
- fmdbg("Storing AF %d to cache index %d\n", freq, index);
- stat_info->af_cache[index] = freq;
- stat_info->afcache_size++;
- }
-}
-
-/*
- * Converts RDS buffer data from big endian format
- * to little endian format.
- */
-static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
- struct fm_rdsdata_format *rds_format)
-{
- u8 index = 0;
- u8 *rds_buff;
-
- /*
- * Since in Orca the 2 RDS Data bytes are in little endian and
- * in Dolphin they are in big endian, the parsing of the RDS data
- * is chip dependent
- */
- if (fmdev->asci_id != 0x6350) {
- rds_buff = &rds_format->data.groupdatabuff.buff[0];
- while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
- swap(rds_buff[index], rds_buff[index + 1]);
- index += 2;
- }
- }
-}
-
-static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
- struct fm_rdsdata_format rds_fmt;
- struct fm_rds *rds = &fmdev->rx.rds;
- unsigned long group_idx, flags;
- u8 *rds_data, meta_data, tmpbuf[FM_RDS_BLK_SIZE];
- u8 type, blk_idx, idx;
- u16 cur_picode;
- u32 rds_len;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
-
- /* Skip header info */
- skb_pull(skb, sizeof(struct fm_event_msg_hdr));
- rds_data = skb->data;
- rds_len = skb->len;
-
- /* Parse the RDS data */
- while (rds_len >= FM_RDS_BLK_SIZE) {
- meta_data = rds_data[2];
- /* Get the type: 0=A, 1=B, 2=C, 3=C', 4=D, 5=E */
- type = (meta_data & 0x07);
-
- /* Transform the blk type into index sequence (0, 1, 2, 3, 4) */
- blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
- fmdbg("Block index:%d(%s)\n", blk_idx,
- (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok");
-
- if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
- break;
-
- if (blk_idx > FM_RDS_BLK_IDX_D) {
- fmdbg("Block sequence mismatch\n");
- rds->last_blk_idx = -1;
- break;
- }
-
- /* Skip checkword (control) byte and copy only data byte */
- idx = array_index_nospec(blk_idx * (FM_RDS_BLK_SIZE - 1),
- FM_RX_RDS_INFO_FIELD_MAX - (FM_RDS_BLK_SIZE - 1));
-
- memcpy(&rds_fmt.data.groupdatabuff.buff[idx], rds_data,
- FM_RDS_BLK_SIZE - 1);
-
- rds->last_blk_idx = blk_idx;
-
- /* If completed a whole group then handle it */
- if (blk_idx == FM_RDS_BLK_IDX_D) {
- fmdbg("Good block received\n");
- fm_rdsparse_swapbytes(fmdev, &rds_fmt);
-
- /*
- * Extract PI code and store in local cache.
- * We need this during AF switch processing.
- */
- cur_picode = be16_to_cpu((__force __be16)rds_fmt.data.groupgeneral.pidata);
- if (fmdev->rx.stat_info.picode != cur_picode)
- fmdev->rx.stat_info.picode = cur_picode;
-
- fmdbg("picode:%d\n", cur_picode);
-
- group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
- fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2,
- (group_idx % 2) ? "B" : "A");
-
- group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
- if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) {
- fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]);
- fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]);
- }
- }
- rds_len -= FM_RDS_BLK_SIZE;
- rds_data += FM_RDS_BLK_SIZE;
- }
-
- /* Copy raw rds data to internal rds buffer */
- rds_data = skb->data;
- rds_len = skb->len;
-
- spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
- while (rds_len > 0) {
- /*
- * Fill RDS buffer as per V4L2 specification.
- * Store control byte
- */
- type = (rds_data[2] & 0x07);
- blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
- tmpbuf[2] = blk_idx; /* Offset name */
- tmpbuf[2] |= blk_idx << 3; /* Received offset */
-
- /* Store data byte */
- tmpbuf[0] = rds_data[0];
- tmpbuf[1] = rds_data[1];
-
- memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE);
- rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size;
-
- /* Check for overflow & start over */
- if (rds->wr_idx == rds->rd_idx) {
- fmdbg("RDS buffer overflow\n");
- rds->wr_idx = 0;
- rds->rd_idx = 0;
- break;
- }
- rds_len -= FM_RDS_BLK_SIZE;
- rds_data += FM_RDS_BLK_SIZE;
- }
- spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
-
- /* Wakeup read queue */
- if (rds->wr_idx != rds->rd_idx)
- wake_up_interruptible(&rds->read_queue);
-
- fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX);
-}
-
-static void fm_irq_handle_rds_finish(struct fmdev *fmdev)
-{
- fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX);
-}
-
-static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev)
-{
- if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev->
- irq_info.mask) {
- fmdbg("irq: tune ended/bandlimit reached\n");
- if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) {
- fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX;
- } else {
- complete(&fmdev->maintask_comp);
- fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
- }
- } else
- fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
-
- fm_irq_call(fmdev);
-}
-
-static void fm_irq_handle_power_enb(struct fmdev *fmdev)
-{
- if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) {
- fmdbg("irq: Power Enabled/Disabled\n");
- complete(&fmdev->maintask_comp);
- }
-
- fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX);
-}
-
-static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev)
-{
- if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) &&
- (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) &&
- (fmdev->rx.freq != FM_UNDEFINED_FREQ) &&
- (fmdev->rx.stat_info.afcache_size != 0)) {
- fmdbg("irq: rssi level has fallen below threshold level\n");
-
- /* Disable further low RSSI interrupts */
- fmdev->irq_info.mask &= ~FM_LEV_EVENT;
-
- fmdev->rx.afjump_idx = 0;
- fmdev->rx.freq_before_jump = fmdev->rx.freq;
- fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
- } else {
- /* Continue next function in interrupt handler table */
- fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX;
- }
-
- fm_irq_call(fmdev);
-}
-
-static void fm_irq_afjump_set_pi(struct fmdev *fmdev)
-{
- u16 payload;
-
- /* Set PI code - must be updated if the AF list is not empty */
- payload = fmdev->rx.stat_info.picode;
- if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX);
-}
-
-static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev)
-{
- fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX);
-}
-
-/*
- * Set PI mask.
- * 0xFFFF = Enable PI code matching
- * 0x0000 = Disable PI code matching
- */
-static void fm_irq_afjump_set_pimask(struct fmdev *fmdev)
-{
- u16 payload;
-
- payload = 0x0000;
- if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX);
-}
-
-static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev)
-{
- fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX);
-}
-
-static void fm_irq_afjump_setfreq(struct fmdev *fmdev)
-{
- u16 frq_index;
- u16 payload;
-
- fmdbg("Switch to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
- frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] -
- fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
-
- payload = frq_index;
- if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX);
-}
-
-static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev)
-{
- fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX);
-}
-
-static void fm_irq_afjump_enableint(struct fmdev *fmdev)
-{
- u16 payload;
-
- /* Enable FR (tuning operation ended) interrupt */
- payload = FM_FR_EVENT;
- if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX);
-}
-
-static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev)
-{
- fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX);
-}
-
-static void fm_irq_start_afjump(struct fmdev *fmdev)
-{
- u16 payload;
-
- payload = FM_TUNER_AF_JUMP_MODE;
- if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX);
-}
-
-static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
-
- fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
- set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag);
- clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
-}
-
-static void fm_irq_afjump_rd_freq(struct fmdev *fmdev)
-{
- u16 payload;
-
- if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX);
-}
-
-static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
- u16 read_freq;
- u32 curr_freq, jumped_freq;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
-
- /* Skip header info and copy only response data */
- skb_pull(skb, sizeof(struct fm_event_msg_hdr));
- memcpy(&read_freq, skb->data, sizeof(read_freq));
- read_freq = be16_to_cpu((__force __be16)read_freq);
- curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL);
-
- jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx];
-
- /* If the frequency was changed the jump succeeded */
- if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) {
- fmdbg("Successfully switched to alternate freq %d\n", curr_freq);
- fmdev->rx.freq = curr_freq;
- fm_rx_reset_rds_cache(fmdev);
-
- /* AF feature is on, enable low level RSSI interrupt */
- if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
- fmdev->irq_info.mask |= FM_LEV_EVENT;
-
- fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
- } else { /* jump to the next freq in the AF list */
- fmdev->rx.afjump_idx++;
-
- /* If we reached the end of the list - stop searching */
- if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) {
- fmdbg("AF switch processing failed\n");
- fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
- } else { /* AF List is not over - try next one */
-
- fmdbg("Trying next freq in AF cache\n");
- fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
- }
- }
- fm_irq_call(fmdev);
-}
-
-static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev)
-{
- fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
-}
-
-static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev)
-{
- u16 payload;
-
- /* Re-enable FM interrupts */
- payload = fmdev->irq_info.mask;
-
- if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX);
-}
-
-static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
- /*
- * This is last function in interrupt table to be executed.
- * So, reset stage index to 0.
- */
- fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
-
- /* Start processing any pending interrupt */
- if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag))
- fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
- else
- clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
-}
-
-/* Returns availability of RDS data in internal buffer */
-int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
- struct poll_table_struct *pts)
-{
- poll_wait(file, &fmdev->rx.rds.read_queue, pts);
- if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx)
- return 0;
-
- return -EAGAIN;
-}
-
-/* Copies RDS data from internal buffer to user buffer */
-int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
- u8 __user *buf, size_t count)
-{
- u32 block_count;
- u8 tmpbuf[FM_RDS_BLK_SIZE];
- unsigned long flags;
- int ret;
-
- if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
- if (file->f_flags & O_NONBLOCK)
- return -EWOULDBLOCK;
-
- ret = wait_event_interruptible(fmdev->rx.rds.read_queue,
- (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx));
- if (ret)
- return -EINTR;
- }
-
- /* Calculate block count from byte count */
- count /= FM_RDS_BLK_SIZE;
- block_count = 0;
- ret = 0;
-
- while (block_count < count) {
- spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
-
- if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
- spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
- break;
- }
- memcpy(tmpbuf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx],
- FM_RDS_BLK_SIZE);
- fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE;
- if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size)
- fmdev->rx.rds.rd_idx = 0;
-
- spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
-
- if (copy_to_user(buf, tmpbuf, FM_RDS_BLK_SIZE))
- break;
-
- block_count++;
- buf += FM_RDS_BLK_SIZE;
- ret += FM_RDS_BLK_SIZE;
- }
- return ret;
-}
-
-int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_freq(fmdev, freq_to_set);
-
- case FM_MODE_TX:
- return fm_tx_set_freq(fmdev, freq_to_set);
-
- default:
- return -EINVAL;
- }
-}
-
-int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
-{
- if (fmdev->rx.freq == FM_UNDEFINED_FREQ) {
- fmerr("RX frequency is not set\n");
- return -EPERM;
- }
- if (cur_tuned_frq == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- *cur_tuned_frq = fmdev->rx.freq;
- return 0;
-
- case FM_MODE_TX:
- *cur_tuned_frq = 0; /* TODO : Change this later */
- return 0;
-
- default:
- return -EINVAL;
- }
-
-}
-
-int fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_region(fmdev, region_to_set);
-
- case FM_MODE_TX:
- return fm_tx_set_region(fmdev, region_to_set);
-
- default:
- return -EINVAL;
- }
-}
-
-int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_mute_mode(fmdev, mute_mode_toset);
-
- case FM_MODE_TX:
- return fm_tx_set_mute_mode(fmdev, mute_mode_toset);
-
- default:
- return -EINVAL;
- }
-}
-
-int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_stereo_mono(fmdev, mode);
-
- case FM_MODE_TX:
- return fm_tx_set_stereo_mono(fmdev, mode);
-
- default:
- return -EINVAL;
- }
-}
-
-int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_rds_mode(fmdev, rds_en_dis);
-
- case FM_MODE_TX:
- return fm_tx_set_rds_mode(fmdev, rds_en_dis);
-
- default:
- return -EINVAL;
- }
-}
-
-/* Sends power off command to the chip */
-static int fm_power_down(struct fmdev *fmdev)
-{
- u16 payload;
- int ret;
-
- if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
- fmerr("FM core is not ready\n");
- return -EPERM;
- }
- if (fmdev->curr_fmmode == FM_MODE_OFF) {
- fmdbg("FM chip is already in OFF state\n");
- return 0;
- }
-
- payload = 0x0;
- ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return fmc_release(fmdev);
-}
-
-/* Reads init command from FM firmware file and loads to the chip */
-static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
-{
- const struct firmware *fw_entry;
- struct bts_header *fw_header;
- struct bts_action *action;
- struct bts_action_delay *delay;
- u8 *fw_data;
- int ret, fw_len;
-
- set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
-
- ret = request_firmware(&fw_entry, fw_name,
- &fmdev->radio_dev->dev);
- if (ret < 0) {
- fmerr("Unable to read firmware(%s) content\n", fw_name);
- return ret;
- }
- fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size);
-
- fw_data = (void *)fw_entry->data;
- fw_len = fw_entry->size;
-
- fw_header = (struct bts_header *)fw_data;
- if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) {
- fmerr("%s not a legal TI firmware file\n", fw_name);
- ret = -EINVAL;
- goto rel_fw;
- }
- fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic);
-
- /* Skip file header info , we already verified it */
- fw_data += sizeof(struct bts_header);
- fw_len -= sizeof(struct bts_header);
-
- while (fw_data && fw_len > 0) {
- action = (struct bts_action *)fw_data;
-
- switch (action->type) {
- case ACTION_SEND_COMMAND: /* Send */
- ret = fmc_send_cmd(fmdev, 0, 0, action->data,
- action->size, NULL, NULL);
- if (ret)
- goto rel_fw;
-
- break;
-
- case ACTION_DELAY: /* Delay */
- delay = (struct bts_action_delay *)action->data;
- mdelay(delay->msec);
- break;
- }
-
- fw_data += (sizeof(struct bts_action) + (action->size));
- fw_len -= (sizeof(struct bts_action) + (action->size));
- }
- fmdbg("Transferred only %d of %d bytes of the firmware to chip\n",
- fw_entry->size - fw_len, fw_entry->size);
-rel_fw:
- release_firmware(fw_entry);
- clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
-
- return ret;
-}
-
-/* Loads default RX configuration to the chip */
-static int load_default_rx_configuration(struct fmdev *fmdev)
-{
- int ret;
-
- ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME);
- if (ret < 0)
- return ret;
-
- return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD);
-}
-
-/* Does FM power on sequence */
-static int fm_power_up(struct fmdev *fmdev, u8 mode)
-{
- u16 payload;
- __be16 asic_id = 0, asic_ver = 0;
- int resp_len, ret;
- u8 fw_name[50];
-
- if (mode >= FM_MODE_ENTRY_MAX) {
- fmerr("Invalid firmware download option\n");
- return -EINVAL;
- }
-
- /*
- * Initialize FM common module. FM GPIO toggling is
- * taken care in Shared Transport driver.
- */
- ret = fmc_prepare(fmdev);
- if (ret < 0) {
- fmerr("Unable to prepare FM Common\n");
- return ret;
- }
-
- payload = FM_ENABLE;
- if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
- sizeof(payload), NULL, NULL))
- goto rel;
-
- /* Allow the chip to settle down in Channel-8 mode */
- msleep(20);
-
- if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL,
- sizeof(asic_id), &asic_id, &resp_len))
- goto rel;
-
- if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL,
- sizeof(asic_ver), &asic_ver, &resp_len))
- goto rel;
-
- fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n",
- be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
-
- sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START,
- be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
-
- ret = fm_download_firmware(fmdev, fw_name);
- if (ret < 0) {
- fmdbg("Failed to download firmware file %s\n", fw_name);
- goto rel;
- }
- sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ?
- FM_RX_FW_FILE_START : FM_TX_FW_FILE_START,
- be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
-
- ret = fm_download_firmware(fmdev, fw_name);
- if (ret < 0) {
- fmdbg("Failed to download firmware file %s\n", fw_name);
- goto rel;
- } else
- return ret;
-rel:
- return fmc_release(fmdev);
-}
-
-/* Set FM Modes(TX, RX, OFF) */
-int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
-{
- int ret = 0;
-
- if (fm_mode >= FM_MODE_ENTRY_MAX) {
- fmerr("Invalid FM mode\n");
- return -EINVAL;
- }
- if (fmdev->curr_fmmode == fm_mode) {
- fmdbg("Already fm is in mode(%d)\n", fm_mode);
- return ret;
- }
-
- switch (fm_mode) {
- case FM_MODE_OFF: /* OFF Mode */
- ret = fm_power_down(fmdev);
- if (ret < 0) {
- fmerr("Failed to set OFF mode\n");
- return ret;
- }
- break;
-
- case FM_MODE_TX: /* TX Mode */
- case FM_MODE_RX: /* RX Mode */
- /* Power down before switching to TX or RX mode */
- if (fmdev->curr_fmmode != FM_MODE_OFF) {
- ret = fm_power_down(fmdev);
- if (ret < 0) {
- fmerr("Failed to set OFF mode\n");
- return ret;
- }
- msleep(30);
- }
- ret = fm_power_up(fmdev, fm_mode);
- if (ret < 0) {
- fmerr("Failed to load firmware\n");
- return ret;
- }
- }
- fmdev->curr_fmmode = fm_mode;
-
- /* Set default configuration */
- if (fmdev->curr_fmmode == FM_MODE_RX) {
- fmdbg("Loading default rx configuration..\n");
- ret = load_default_rx_configuration(fmdev);
- if (ret < 0)
- fmerr("Failed to load default values\n");
- }
-
- return ret;
-}
-
-/* Returns current FM mode (TX, RX, OFF) */
-int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
-{
- if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
- fmerr("FM core is not ready\n");
- return -EPERM;
- }
- if (fmmode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *fmmode = fmdev->curr_fmmode;
- return 0;
-}
-
-/* Called by ST layer when FM packet is available */
-static long fm_st_receive(void *arg, struct sk_buff *skb)
-{
- struct fmdev *fmdev;
-
- fmdev = arg;
-
- if (skb == NULL) {
- fmerr("Invalid SKB received from ST\n");
- return -EFAULT;
- }
-
- if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) {
- fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb);
- return -EINVAL;
- }
-
- memcpy(skb_push(skb, 1), &skb->cb[0], 1);
- skb_queue_tail(&fmdev->rx_q, skb);
- queue_work(system_bh_wq, &fmdev->rx_bh_work);
-
- return 0;
-}
-
-/*
- * Called by ST layer to indicate protocol registration completion
- * status.
- */
-static void fm_st_reg_comp_cb(void *arg, int data)
-{
- struct fmdev *fmdev;
-
- fmdev = (struct fmdev *)arg;
- fmdev->streg_cbdata = data;
- complete(&wait_for_fmdrv_reg_comp);
-}
-
-/*
- * This function will be called from FM V4L2 open function.
- * Register with ST driver and initialize driver data.
- */
-int fmc_prepare(struct fmdev *fmdev)
-{
- static struct st_proto_s fm_st_proto;
- int ret;
-
- if (test_bit(FM_CORE_READY, &fmdev->flag)) {
- fmdbg("FM Core is already up\n");
- return 0;
- }
-
- memset(&fm_st_proto, 0, sizeof(fm_st_proto));
- fm_st_proto.recv = fm_st_receive;
- fm_st_proto.match_packet = NULL;
- fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb;
- fm_st_proto.write = NULL; /* TI ST driver will fill write pointer */
- fm_st_proto.priv_data = fmdev;
- fm_st_proto.chnl_id = 0x08;
- fm_st_proto.max_frame_size = 0xff;
- fm_st_proto.hdr_len = 1;
- fm_st_proto.offset_len_in_hdr = 0;
- fm_st_proto.len_size = 1;
- fm_st_proto.reserve = 1;
-
- ret = st_register(&fm_st_proto);
- if (ret == -EINPROGRESS) {
- init_completion(&wait_for_fmdrv_reg_comp);
- fmdev->streg_cbdata = -EINPROGRESS;
- fmdbg("%s waiting for ST reg completion signal\n", __func__);
-
- if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
- FM_ST_REG_TIMEOUT)) {
- fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n",
- jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
- return -ETIMEDOUT;
- }
- if (fmdev->streg_cbdata != 0) {
- fmerr("ST reg comp CB called with error status %d\n",
- fmdev->streg_cbdata);
- return -EAGAIN;
- }
-
- ret = 0;
- } else if (ret < 0) {
- fmerr("st_register failed %d\n", ret);
- return -EAGAIN;
- }
-
- if (fm_st_proto.write != NULL) {
- g_st_write = fm_st_proto.write;
- } else {
- fmerr("Failed to get ST write func pointer\n");
- ret = st_unregister(&fm_st_proto);
- if (ret < 0)
- fmerr("st_unregister failed %d\n", ret);
- return -EAGAIN;
- }
-
- spin_lock_init(&fmdev->rds_buff_lock);
- spin_lock_init(&fmdev->resp_skb_lock);
-
- /* Initialize TX queue and TX bh work */
- skb_queue_head_init(&fmdev->tx_q);
- INIT_WORK(&fmdev->tx_bh_work, send_bh_work);
-
- /* Initialize RX Queue and RX bh work */
- skb_queue_head_init(&fmdev->rx_q);
- INIT_WORK(&fmdev->rx_bh_work, recv_bh_work);
-
- fmdev->irq_info.stage = 0;
- atomic_set(&fmdev->tx_cnt, 1);
- fmdev->resp_comp = NULL;
-
- timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0);
- /*TODO: add FM_STIC_EVENT later */
- fmdev->irq_info.mask = FM_MAL_EVENT;
-
- /* Region info */
- fmdev->rx.region = region_configs[default_radio_region];
-
- fmdev->rx.mute_mode = FM_MUTE_OFF;
- fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
- fmdev->rx.rds.flag = FM_RDS_DISABLE;
- fmdev->rx.freq = FM_UNDEFINED_FREQ;
- fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS;
- fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF;
- fmdev->irq_info.retry = 0;
-
- fm_rx_reset_rds_cache(fmdev);
- init_waitqueue_head(&fmdev->rx.rds.read_queue);
-
- fm_rx_reset_station_info(fmdev);
- set_bit(FM_CORE_READY, &fmdev->flag);
-
- return ret;
-}
-
-/*
- * This function will be called from FM V4L2 release function.
- * Unregister from ST driver.
- */
-int fmc_release(struct fmdev *fmdev)
-{
- static struct st_proto_s fm_st_proto;
- int ret;
-
- if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
- fmdbg("FM Core is already down\n");
- return 0;
- }
- /* Service pending read */
- wake_up_interruptible(&fmdev->rx.rds.read_queue);
-
- cancel_work_sync(&fmdev->tx_bh_work);
- cancel_work_sync(&fmdev->rx_bh_work);
-
- skb_queue_purge(&fmdev->tx_q);
- skb_queue_purge(&fmdev->rx_q);
-
- fmdev->resp_comp = NULL;
- fmdev->rx.freq = 0;
-
- memset(&fm_st_proto, 0, sizeof(fm_st_proto));
- fm_st_proto.chnl_id = 0x08;
-
- ret = st_unregister(&fm_st_proto);
-
- if (ret < 0)
- fmerr("Failed to de-register FM from ST %d\n", ret);
- else
- fmdbg("Successfully unregistered from ST\n");
-
- clear_bit(FM_CORE_READY, &fmdev->flag);
- return ret;
-}
-
-/*
- * Module init function. Ask FM V4L module to register video device.
- * Allocate memory for FM driver context and RX RDS buffer.
- */
-static int __init fm_drv_init(void)
-{
- struct fmdev *fmdev = NULL;
- int ret = -ENOMEM;
-
- fmdbg("FM driver version %s\n", FM_DRV_VERSION);
-
- fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL);
- if (NULL == fmdev) {
- fmerr("Can't allocate operation structure memory\n");
- return ret;
- }
- fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE;
- fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL);
- if (NULL == fmdev->rx.rds.buff) {
- fmerr("Can't allocate rds ring buffer\n");
- goto rel_dev;
- }
-
- ret = fm_v4l2_init_video_device(fmdev, radio_nr);
- if (ret < 0)
- goto rel_rdsbuf;
-
- fmdev->irq_info.handlers = int_handler_table;
- fmdev->curr_fmmode = FM_MODE_OFF;
- fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF;
- fmdev->tx_data.preemph = FM_TX_PREEMPH_50US;
- return ret;
-
-rel_rdsbuf:
- kfree(fmdev->rx.rds.buff);
-rel_dev:
- kfree(fmdev);
-
- return ret;
-}
-
-/* Module exit function. Ask FM V4L module to unregister video device */
-static void __exit fm_drv_exit(void)
-{
- struct fmdev *fmdev = NULL;
-
- fmdev = fm_v4l2_deinit_video_device();
- if (fmdev != NULL) {
- kfree(fmdev->rx.rds.buff);
- kfree(fmdev);
- }
-}
-
-module_init(fm_drv_init);
-module_exit(fm_drv_exit);
-
-/* ------------- Module Info ------------- */
-MODULE_AUTHOR("Manjunatha Halli <manjunatha_halli@ti.com>");
-MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION);
-MODULE_VERSION(FM_DRV_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/radio/wl128x/fmdrv_common.h b/drivers/media/radio/wl128x/fmdrv_common.h
deleted file mode 100644
index 6a287eadae75..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_common.h
+++ /dev/null
@@ -1,389 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * FM Common module header file
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FMDRV_COMMON_H
-#define _FMDRV_COMMON_H
-
-#define FM_ST_REG_TIMEOUT msecs_to_jiffies(6000) /* 6 sec */
-#define FM_PKT_LOGICAL_CHAN_NUMBER 0x08 /* Logical channel 8 */
-
-#define REG_RD 0x1
-#define REG_WR 0x0
-
-struct fm_reg_table {
- u8 opcode;
- u8 type;
- u8 *name;
-};
-
-#define STEREO_GET 0
-#define RSSI_LVL_GET 1
-#define IF_COUNT_GET 2
-#define FLAG_GET 3
-#define RDS_SYNC_GET 4
-#define RDS_DATA_GET 5
-#define FREQ_SET 10
-#define AF_FREQ_SET 11
-#define MOST_MODE_SET 12
-#define MOST_BLEND_SET 13
-#define DEMPH_MODE_SET 14
-#define SEARCH_LVL_SET 15
-#define BAND_SET 16
-#define MUTE_STATUS_SET 17
-#define RDS_PAUSE_LVL_SET 18
-#define RDS_PAUSE_DUR_SET 19
-#define RDS_MEM_SET 20
-#define RDS_BLK_B_SET 21
-#define RDS_MSK_B_SET 22
-#define RDS_PI_MASK_SET 23
-#define RDS_PI_SET 24
-#define RDS_SYSTEM_SET 25
-#define INT_MASK_SET 26
-#define SEARCH_DIR_SET 27
-#define VOLUME_SET 28
-#define AUDIO_ENABLE_SET 29
-#define PCM_MODE_SET 30
-#define I2S_MODE_CONFIG_SET 31
-#define POWER_SET 32
-#define INTX_CONFIG_SET 33
-#define PULL_EN_SET 34
-#define HILO_SET 35
-#define SWITCH2FREF 36
-#define FREQ_DRIFT_REPORT 37
-
-#define PCE_GET 40
-#define FIRM_VER_GET 41
-#define ASIC_VER_GET 42
-#define ASIC_ID_GET 43
-#define MAN_ID_GET 44
-#define TUNER_MODE_SET 45
-#define STOP_SEARCH 46
-#define RDS_CNTRL_SET 47
-
-#define WRITE_HARDWARE_REG 100
-#define CODE_DOWNLOAD 101
-#define RESET 102
-
-#define FM_POWER_MODE 254
-#define FM_INTERRUPT 255
-
-/* Transmitter API */
-
-#define CHANL_SET 55
-#define CHANL_BW_SET 56
-#define REF_SET 57
-#define POWER_ENB_SET 90
-#define POWER_ATT_SET 58
-#define POWER_LEV_SET 59
-#define AUDIO_DEV_SET 60
-#define PILOT_DEV_SET 61
-#define RDS_DEV_SET 62
-#define TX_BAND_SET 65
-#define PUPD_SET 91
-#define AUDIO_IO_SET 63
-#define PREMPH_SET 64
-#define MONO_SET 66
-#define MUTE 92
-#define MPX_LMT_ENABLE 67
-#define PI_SET 93
-#define ECC_SET 69
-#define PTY 70
-#define AF 71
-#define DISPLAY_MODE 74
-#define RDS_REP_SET 77
-#define RDS_CONFIG_DATA_SET 98
-#define RDS_DATA_SET 99
-#define RDS_DATA_ENB 94
-#define TA_SET 78
-#define TP_SET 79
-#define DI_SET 80
-#define MS_SET 81
-#define PS_SCROLL_SPEED 82
-#define TX_AUDIO_LEVEL_TEST 96
-#define TX_AUDIO_LEVEL_TEST_THRESHOLD 73
-#define TX_AUDIO_INPUT_LEVEL_RANGE_SET 54
-#define RX_ANTENNA_SELECT 87
-#define I2C_DEV_ADDR_SET 86
-#define REF_ERR_CALIB_PARAM_SET 88
-#define REF_ERR_CALIB_PERIODICITY_SET 89
-#define SOC_INT_TRIGGER 52
-#define SOC_AUDIO_PATH_SET 83
-#define SOC_PCMI_OVERRIDE 84
-#define SOC_I2S_OVERRIDE 85
-#define RSSI_BLOCK_SCAN_FREQ_SET 95
-#define RSSI_BLOCK_SCAN_START 97
-#define RSSI_BLOCK_SCAN_DATA_GET 5
-#define READ_FMANT_TUNE_VALUE 104
-
-/* SKB helpers */
-struct fm_skb_cb {
- __u8 fm_op;
- struct completion *completion;
-};
-
-#define fm_cb(skb) ((struct fm_skb_cb *)(skb->cb))
-
-/* FM Channel-8 command message format */
-struct fm_cmd_msg_hdr {
- __u8 hdr; /* Logical Channel-8 */
- __u8 len; /* Number of bytes follows */
- __u8 op; /* FM Opcode */
- __u8 rd_wr; /* Read/Write command */
- __u8 dlen; /* Length of payload */
-} __attribute__ ((packed));
-
-#define FM_CMD_MSG_HDR_SIZE 5 /* sizeof(struct fm_cmd_msg_hdr) */
-
-/* FM Channel-8 event messgage format */
-struct fm_event_msg_hdr {
- __u8 header; /* Logical Channel-8 */
- __u8 len; /* Number of bytes follows */
- __u8 status; /* Event status */
- __u8 num_fm_hci_cmds; /* Number of pkts the host allowed to send */
- __u8 op; /* FM Opcode */
- __u8 rd_wr; /* Read/Write command */
- __u8 dlen; /* Length of payload */
-} __attribute__ ((packed));
-
-#define FM_EVT_MSG_HDR_SIZE 7 /* sizeof(struct fm_event_msg_hdr) */
-
-/* TI's magic number in firmware file */
-#define FM_FW_FILE_HEADER_MAGIC 0x42535442
-
-#define FM_ENABLE 1
-#define FM_DISABLE 0
-
-/* FLAG_GET register bits */
-#define FM_FR_EVENT BIT(0)
-#define FM_BL_EVENT BIT(1)
-#define FM_RDS_EVENT BIT(2)
-#define FM_BBLK_EVENT BIT(3)
-#define FM_LSYNC_EVENT BIT(4)
-#define FM_LEV_EVENT BIT(5)
-#define FM_IFFR_EVENT BIT(6)
-#define FM_PI_EVENT BIT(7)
-#define FM_PD_EVENT BIT(8)
-#define FM_STIC_EVENT BIT(9)
-#define FM_MAL_EVENT BIT(10)
-#define FM_POW_ENB_EVENT BIT(11)
-
-/*
- * Firmware files of FM. ASIC ID and ASIC version will be appened to this,
- * later.
- */
-#define FM_FMC_FW_FILE_START ("fmc_ch8")
-#define FM_RX_FW_FILE_START ("fm_rx_ch8")
-#define FM_TX_FW_FILE_START ("fm_tx_ch8")
-
-#define FM_UNDEFINED_FREQ 0xFFFFFFFF
-
-/* Band types */
-#define FM_BAND_EUROPE_US 0
-#define FM_BAND_JAPAN 1
-
-/* Seek directions */
-#define FM_SEARCH_DIRECTION_DOWN 0
-#define FM_SEARCH_DIRECTION_UP 1
-
-/* Tunner modes */
-#define FM_TUNER_STOP_SEARCH_MODE 0
-#define FM_TUNER_PRESET_MODE 1
-#define FM_TUNER_AUTONOMOUS_SEARCH_MODE 2
-#define FM_TUNER_AF_JUMP_MODE 3
-
-/* Min and Max volume */
-#define FM_RX_VOLUME_MIN 0
-#define FM_RX_VOLUME_MAX 70
-
-/* Volume gain step */
-#define FM_RX_VOLUME_GAIN_STEP 0x370
-
-/* Mute modes */
-#define FM_MUTE_ON 0
-#define FM_MUTE_OFF 1
-#define FM_MUTE_ATTENUATE 2
-
-#define FM_RX_UNMUTE_MODE 0x00
-#define FM_RX_RF_DEP_MODE 0x01
-#define FM_RX_AC_MUTE_MODE 0x02
-#define FM_RX_HARD_MUTE_LEFT_MODE 0x04
-#define FM_RX_HARD_MUTE_RIGHT_MODE 0x08
-#define FM_RX_SOFT_MUTE_FORCE_MODE 0x10
-
-/* RF dependent mute mode */
-#define FM_RX_RF_DEPENDENT_MUTE_ON 1
-#define FM_RX_RF_DEPENDENT_MUTE_OFF 0
-
-/* RSSI threshold min and max */
-#define FM_RX_RSSI_THRESHOLD_MIN -128
-#define FM_RX_RSSI_THRESHOLD_MAX 127
-
-/* Stereo/Mono mode */
-#define FM_STEREO_MODE 0
-#define FM_MONO_MODE 1
-#define FM_STEREO_SOFT_BLEND 1
-
-/* FM RX De-emphasis filter modes */
-#define FM_RX_EMPHASIS_FILTER_50_USEC 0
-#define FM_RX_EMPHASIS_FILTER_75_USEC 1
-
-/* FM RDS modes */
-#define FM_RDS_DISABLE 0
-#define FM_RDS_ENABLE 1
-
-#define FM_NO_PI_CODE 0
-
-/* FM and RX RDS block enable/disable */
-#define FM_RX_PWR_SET_FM_ON_RDS_OFF 0x1
-#define FM_RX_PWR_SET_FM_AND_RDS_BLK_ON 0x3
-#define FM_RX_PWR_SET_FM_AND_RDS_BLK_OFF 0x0
-
-/* RX RDS */
-#define FM_RX_RDS_FLUSH_FIFO 0x1
-#define FM_RX_RDS_FIFO_THRESHOLD 64 /* tuples */
-#define FM_RDS_BLK_SIZE 3 /* 3 bytes */
-
-/* RDS block types */
-#define FM_RDS_BLOCK_A 0
-#define FM_RDS_BLOCK_B 1
-#define FM_RDS_BLOCK_C 2
-#define FM_RDS_BLOCK_Ctag 3
-#define FM_RDS_BLOCK_D 4
-#define FM_RDS_BLOCK_E 5
-
-#define FM_RDS_BLK_IDX_A 0
-#define FM_RDS_BLK_IDX_B 1
-#define FM_RDS_BLK_IDX_C 2
-#define FM_RDS_BLK_IDX_D 3
-#define FM_RDS_BLK_IDX_UNKNOWN 0xF0
-
-#define FM_RDS_STATUS_ERR_MASK 0x18
-
-/*
- * Represents an RDS group type & version.
- * There are 15 groups, each group has 2 versions: A and B.
- */
-#define FM_RDS_GROUP_TYPE_MASK_0A BIT(0)
-#define FM_RDS_GROUP_TYPE_MASK_0B BIT(1)
-#define FM_RDS_GROUP_TYPE_MASK_1A BIT(2)
-#define FM_RDS_GROUP_TYPE_MASK_1B BIT(3)
-#define FM_RDS_GROUP_TYPE_MASK_2A BIT(4)
-#define FM_RDS_GROUP_TYPE_MASK_2B BIT(5)
-#define FM_RDS_GROUP_TYPE_MASK_3A BIT(6)
-#define FM_RDS_GROUP_TYPE_MASK_3B BIT(7)
-#define FM_RDS_GROUP_TYPE_MASK_4A BIT(8)
-#define FM_RDS_GROUP_TYPE_MASK_4B BIT(9)
-#define FM_RDS_GROUP_TYPE_MASK_5A BIT(10)
-#define FM_RDS_GROUP_TYPE_MASK_5B BIT(11)
-#define FM_RDS_GROUP_TYPE_MASK_6A BIT(12)
-#define FM_RDS_GROUP_TYPE_MASK_6B BIT(13)
-#define FM_RDS_GROUP_TYPE_MASK_7A BIT(14)
-#define FM_RDS_GROUP_TYPE_MASK_7B BIT(15)
-#define FM_RDS_GROUP_TYPE_MASK_8A BIT(16)
-#define FM_RDS_GROUP_TYPE_MASK_8B BIT(17)
-#define FM_RDS_GROUP_TYPE_MASK_9A BIT(18)
-#define FM_RDS_GROUP_TYPE_MASK_9B BIT(19)
-#define FM_RDS_GROUP_TYPE_MASK_10A BIT(20)
-#define FM_RDS_GROUP_TYPE_MASK_10B BIT(21)
-#define FM_RDS_GROUP_TYPE_MASK_11A BIT(22)
-#define FM_RDS_GROUP_TYPE_MASK_11B BIT(23)
-#define FM_RDS_GROUP_TYPE_MASK_12A BIT(24)
-#define FM_RDS_GROUP_TYPE_MASK_12B BIT(25)
-#define FM_RDS_GROUP_TYPE_MASK_13A BIT(26)
-#define FM_RDS_GROUP_TYPE_MASK_13B BIT(27)
-#define FM_RDS_GROUP_TYPE_MASK_14A BIT(28)
-#define FM_RDS_GROUP_TYPE_MASK_14B BIT(29)
-#define FM_RDS_GROUP_TYPE_MASK_15A BIT(30)
-#define FM_RDS_GROUP_TYPE_MASK_15B BIT(31)
-
-/* RX Alternate Frequency info */
-#define FM_RDS_MIN_AF 1
-#define FM_RDS_MAX_AF 204
-#define FM_RDS_MAX_AF_JAPAN 140
-#define FM_RDS_1_AF_FOLLOWS 225
-#define FM_RDS_25_AF_FOLLOWS 249
-
-/* RDS system type (RDS/RBDS) */
-#define FM_RDS_SYSTEM_RDS 0
-#define FM_RDS_SYSTEM_RBDS 1
-
-/* AF on/off */
-#define FM_RX_RDS_AF_SWITCH_MODE_ON 1
-#define FM_RX_RDS_AF_SWITCH_MODE_OFF 0
-
-/* Retry count when interrupt process goes wrong */
-#define FM_IRQ_TIMEOUT_RETRY_MAX 5 /* 5 times */
-
-/* Audio IO set values */
-#define FM_RX_AUDIO_ENABLE_I2S 0x01
-#define FM_RX_AUDIO_ENABLE_ANALOG 0x02
-#define FM_RX_AUDIO_ENABLE_I2S_AND_ANALOG 0x03
-#define FM_RX_AUDIO_ENABLE_DISABLE 0x00
-
-/* HI/LO set values */
-#define FM_RX_IFFREQ_TO_HI_SIDE 0x0
-#define FM_RX_IFFREQ_TO_LO_SIDE 0x1
-#define FM_RX_IFFREQ_HILO_AUTOMATIC 0x2
-
-/*
- * Default RX mode configuration. Chip will be configured
- * with this default values after loading RX firmware.
- */
-#define FM_DEFAULT_RX_VOLUME 10
-#define FM_DEFAULT_RSSI_THRESHOLD 3
-
-/* Range for TX power level in units for dB/uV */
-#define FM_PWR_LVL_LOW 91
-#define FM_PWR_LVL_HIGH 122
-
-/* Chip specific default TX power level value */
-#define FM_PWR_LVL_DEF 4
-
-/* FM TX Pre-emphasis filter values */
-#define FM_TX_PREEMPH_OFF 1
-#define FM_TX_PREEMPH_50US 0
-#define FM_TX_PREEMPH_75US 2
-
-/* FM TX antenna impedance values */
-#define FM_TX_ANT_IMP_50 0
-#define FM_TX_ANT_IMP_200 1
-#define FM_TX_ANT_IMP_500 2
-
-/* Functions exported by FM common sub-module */
-int fmc_prepare(struct fmdev *);
-int fmc_release(struct fmdev *);
-
-void fmc_update_region_info(struct fmdev *, u8);
-int fmc_send_cmd(struct fmdev *, u8, u16,
- void *, unsigned int, void *, int *);
-int fmc_is_rds_data_available(struct fmdev *, struct file *,
- struct poll_table_struct *);
-int fmc_transfer_rds_from_internal_buff(struct fmdev *, struct file *,
- u8 __user *, size_t);
-
-int fmc_set_freq(struct fmdev *, u32);
-int fmc_set_mode(struct fmdev *, u8);
-int fmc_set_region(struct fmdev *, u8);
-int fmc_set_mute_mode(struct fmdev *, u8);
-int fmc_set_stereo_mono(struct fmdev *, u16);
-int fmc_set_rds_mode(struct fmdev *, u8);
-
-int fmc_get_freq(struct fmdev *, u32 *);
-int fmc_get_region(struct fmdev *, u8 *);
-int fmc_get_mode(struct fmdev *, u8 *);
-
-/*
- * channel spacing
- */
-#define FM_CHANNEL_SPACING_50KHZ 1
-#define FM_CHANNEL_SPACING_100KHZ 2
-#define FM_CHANNEL_SPACING_200KHZ 4
-#define FM_FREQ_MUL 50
-
-#endif
-
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
deleted file mode 100644
index 419cf2e03bcf..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ /dev/null
@@ -1,820 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * This sub-module of FM driver implements FM RX functionality.
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Raja Mani <raja_mani@ti.com>
- * Author: Manjunatha Halli <manjunatha_halli@ti.com>
- */
-
-#include "fmdrv.h"
-#include "fmdrv_common.h"
-#include "fmdrv_rx.h"
-
-void fm_rx_reset_rds_cache(struct fmdev *fmdev)
-{
- fmdev->rx.rds.flag = FM_RDS_DISABLE;
- fmdev->rx.rds.last_blk_idx = 0;
- fmdev->rx.rds.wr_idx = 0;
- fmdev->rx.rds.rd_idx = 0;
-
- if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
- fmdev->irq_info.mask |= FM_LEV_EVENT;
-}
-
-void fm_rx_reset_station_info(struct fmdev *fmdev)
-{
- fmdev->rx.stat_info.picode = FM_NO_PI_CODE;
- fmdev->rx.stat_info.afcache_size = 0;
- fmdev->rx.stat_info.af_list_max = 0;
-}
-
-int fm_rx_set_freq(struct fmdev *fmdev, u32 freq)
-{
- unsigned long timeleft;
- u16 payload, curr_frq, intr_flag;
- u32 curr_frq_in_khz;
- u32 resp_len;
- int ret;
-
- if (freq < fmdev->rx.region.bot_freq || freq > fmdev->rx.region.top_freq) {
- fmerr("Invalid frequency %d\n", freq);
- return -EINVAL;
- }
-
- /* Set audio enable */
- payload = FM_RX_AUDIO_ENABLE_I2S_AND_ANALOG;
-
- ret = fmc_send_cmd(fmdev, AUDIO_ENABLE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set hilo to automatic selection */
- payload = FM_RX_IFFREQ_HILO_AUTOMATIC;
- ret = fmc_send_cmd(fmdev, HILO_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Calculate frequency index and set*/
- payload = (freq - fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
-
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Read flags - just to clear any pending interrupts if we had */
- ret = fmc_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, 2, NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Enable FR, BL interrupts */
- intr_flag = fmdev->irq_info.mask;
- fmdev->irq_info.mask = (FM_FR_EVENT | FM_BL_EVENT);
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Start tune */
- payload = FM_TUNER_PRESET_MODE;
- ret = fmc_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- goto exit;
-
- /* Wait for tune ended interrupt */
- init_completion(&fmdev->maintask_comp);
- timeleft = wait_for_completion_timeout(&fmdev->maintask_comp,
- FM_DRV_TX_TIMEOUT);
- if (!timeleft) {
- fmerr("Timeout(%d sec),didn't get tune ended int\n",
- jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
- ret = -ETIMEDOUT;
- goto exit;
- }
-
- /* Read freq back to confirm */
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, 2, &curr_frq, &resp_len);
- if (ret < 0)
- goto exit;
-
- curr_frq = be16_to_cpu((__force __be16)curr_frq);
- curr_frq_in_khz = (fmdev->rx.region.bot_freq + ((u32)curr_frq * FM_FREQ_MUL));
-
- if (curr_frq_in_khz != freq) {
- pr_info("Frequency is set to (%d) but requested freq is (%d)\n",
- curr_frq_in_khz, freq);
- }
-
- /* Update local cache */
- fmdev->rx.freq = curr_frq_in_khz;
-exit:
- /* Re-enable default FM interrupts */
- fmdev->irq_info.mask = intr_flag;
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Reset RDS cache and current station pointers */
- fm_rx_reset_rds_cache(fmdev);
- fm_rx_reset_station_info(fmdev);
-
- return ret;
-}
-
-static int fm_rx_set_channel_spacing(struct fmdev *fmdev, u32 spacing)
-{
- u16 payload;
- int ret;
-
- if (spacing > 0 && spacing <= 50000)
- spacing = FM_CHANNEL_SPACING_50KHZ;
- else if (spacing > 50000 && spacing <= 100000)
- spacing = FM_CHANNEL_SPACING_100KHZ;
- else
- spacing = FM_CHANNEL_SPACING_200KHZ;
-
- /* set channel spacing */
- payload = spacing;
- ret = fmc_send_cmd(fmdev, CHANL_BW_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.region.chanl_space = spacing * FM_FREQ_MUL;
-
- return ret;
-}
-
-int fm_rx_seek(struct fmdev *fmdev, u32 seek_upward,
- u32 wrap_around, u32 spacing)
-{
- u32 resp_len;
- u16 curr_frq, next_frq, last_frq;
- u16 payload, int_reason, intr_flag;
- u16 offset, space_idx;
- unsigned long timeleft;
- int ret;
-
- /* Set channel spacing */
- ret = fm_rx_set_channel_spacing(fmdev, spacing);
- if (ret < 0) {
- fmerr("Failed to set channel spacing\n");
- return ret;
- }
-
- /* Read the current frequency from chip */
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_RD, NULL,
- sizeof(curr_frq), &curr_frq, &resp_len);
- if (ret < 0)
- return ret;
-
- curr_frq = be16_to_cpu((__force __be16)curr_frq);
- last_frq = (fmdev->rx.region.top_freq - fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
-
- /* Check the offset in order to be aligned to the channel spacing*/
- space_idx = fmdev->rx.region.chanl_space / FM_FREQ_MUL;
- offset = curr_frq % space_idx;
-
- next_frq = seek_upward ? curr_frq + space_idx /* Seek Up */ :
- curr_frq - space_idx /* Seek Down */ ;
-
- /*
- * Add or subtract offset in order to stay aligned to the channel
- * spacing.
- */
- if ((short)next_frq < 0)
- next_frq = last_frq - offset;
- else if (next_frq > last_frq)
- next_frq = 0 + offset;
-
-again:
- /* Set calculated next frequency to perform seek */
- payload = next_frq;
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set search direction (0:Seek Down, 1:Seek Up) */
- payload = (seek_upward ? FM_SEARCH_DIRECTION_UP : FM_SEARCH_DIRECTION_DOWN);
- ret = fmc_send_cmd(fmdev, SEARCH_DIR_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Read flags - just to clear any pending interrupts if we had */
- ret = fmc_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, 2, NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Enable FR, BL interrupts */
- intr_flag = fmdev->irq_info.mask;
- fmdev->irq_info.mask = (FM_FR_EVENT | FM_BL_EVENT);
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Start seek */
- payload = FM_TUNER_AUTONOMOUS_SEARCH_MODE;
- ret = fmc_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Wait for tune ended/band limit reached interrupt */
- init_completion(&fmdev->maintask_comp);
- timeleft = wait_for_completion_timeout(&fmdev->maintask_comp,
- FM_DRV_RX_SEEK_TIMEOUT);
- if (!timeleft) {
- fmerr("Timeout(%d sec),didn't get tune ended int\n",
- jiffies_to_msecs(FM_DRV_RX_SEEK_TIMEOUT) / 1000);
- return -ENODATA;
- }
-
- int_reason = fmdev->irq_info.flag & (FM_TUNE_COMPLETE | FM_BAND_LIMIT);
-
- /* Re-enable default FM interrupts */
- fmdev->irq_info.mask = intr_flag;
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- if (int_reason & FM_BL_EVENT) {
- if (wrap_around == 0) {
- fmdev->rx.freq = seek_upward ?
- fmdev->rx.region.top_freq :
- fmdev->rx.region.bot_freq;
- } else {
- fmdev->rx.freq = seek_upward ?
- fmdev->rx.region.bot_freq :
- fmdev->rx.region.top_freq;
- /* Calculate frequency index to write */
- next_frq = (fmdev->rx.freq -
- fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
- goto again;
- }
- } else {
- /* Read freq to know where operation tune operation stopped */
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, 2,
- &curr_frq, &resp_len);
- if (ret < 0)
- return ret;
-
- curr_frq = be16_to_cpu((__force __be16)curr_frq);
- fmdev->rx.freq = (fmdev->rx.region.bot_freq +
- ((u32)curr_frq * FM_FREQ_MUL));
-
- }
- /* Reset RDS cache and current station pointers */
- fm_rx_reset_rds_cache(fmdev);
- fm_rx_reset_station_info(fmdev);
-
- return ret;
-}
-
-int fm_rx_set_volume(struct fmdev *fmdev, u16 vol_to_set)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (vol_to_set > FM_RX_VOLUME_MAX) {
- fmerr("Volume is not within(%d-%d) range\n",
- FM_RX_VOLUME_MIN, FM_RX_VOLUME_MAX);
- return -EINVAL;
- }
- vol_to_set *= FM_RX_VOLUME_GAIN_STEP;
-
- payload = vol_to_set;
- ret = fmc_send_cmd(fmdev, VOLUME_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.volume = vol_to_set;
- return ret;
-}
-
-/* Get volume */
-int fm_rx_get_volume(struct fmdev *fmdev, u16 *curr_vol)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_vol == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_vol = fmdev->rx.volume / FM_RX_VOLUME_GAIN_STEP;
-
- return 0;
-}
-
-/* To get current band's bottom and top frequency */
-int fm_rx_get_band_freq_range(struct fmdev *fmdev, u32 *bot_freq, u32 *top_freq)
-{
- if (bot_freq != NULL)
- *bot_freq = fmdev->rx.region.bot_freq;
-
- if (top_freq != NULL)
- *top_freq = fmdev->rx.region.top_freq;
-
- return 0;
-}
-
-/* Returns current band index (0-Europe/US; 1-Japan) */
-void fm_rx_get_region(struct fmdev *fmdev, u8 *region)
-{
- *region = fmdev->rx.region.fm_band;
-}
-
-/* Sets band (0-Europe/US; 1-Japan) */
-int fm_rx_set_region(struct fmdev *fmdev, u8 region_to_set)
-{
- u16 payload;
- u32 new_frq = 0;
- int ret;
-
- if (region_to_set != FM_BAND_EUROPE_US &&
- region_to_set != FM_BAND_JAPAN) {
- fmerr("Invalid band\n");
- return -EINVAL;
- }
-
- if (fmdev->rx.region.fm_band == region_to_set) {
- fmerr("Requested band is already configured\n");
- return 0;
- }
-
- /* Send cmd to set the band */
- payload = (u16)region_to_set;
- ret = fmc_send_cmd(fmdev, BAND_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmc_update_region_info(fmdev, region_to_set);
-
- /* Check whether current RX frequency is within band boundary */
- if (fmdev->rx.freq < fmdev->rx.region.bot_freq)
- new_frq = fmdev->rx.region.bot_freq;
- else if (fmdev->rx.freq > fmdev->rx.region.top_freq)
- new_frq = fmdev->rx.region.top_freq;
-
- if (new_frq) {
- fmdbg("Current freq is not within band limit boundary,switching to %d KHz\n",
- new_frq);
- /* Current RX frequency is not in range. So, update it */
- ret = fm_rx_set_freq(fmdev, new_frq);
- }
-
- return ret;
-}
-
-/* Reads current mute mode (Mute Off/On/Attenuate)*/
-int fm_rx_get_mute_mode(struct fmdev *fmdev, u8 *curr_mute_mode)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_mute_mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_mute_mode = fmdev->rx.mute_mode;
-
- return 0;
-}
-
-static int fm_config_rx_mute_reg(struct fmdev *fmdev)
-{
- u16 payload, muteval;
- int ret;
-
- muteval = 0;
- switch (fmdev->rx.mute_mode) {
- case FM_MUTE_ON:
- muteval = FM_RX_AC_MUTE_MODE;
- break;
-
- case FM_MUTE_OFF:
- muteval = FM_RX_UNMUTE_MODE;
- break;
-
- case FM_MUTE_ATTENUATE:
- muteval = FM_RX_SOFT_MUTE_FORCE_MODE;
- break;
- }
- if (fmdev->rx.rf_depend_mute == FM_RX_RF_DEPENDENT_MUTE_ON)
- muteval |= FM_RX_RF_DEP_MODE;
- else
- muteval &= ~FM_RX_RF_DEP_MODE;
-
- payload = muteval;
- ret = fmc_send_cmd(fmdev, MUTE_STATUS_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* Configures mute mode (Mute Off/On/Attenuate) */
-int fm_rx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
-{
- u8 org_state;
- int ret;
-
- if (fmdev->rx.mute_mode == mute_mode_toset)
- return 0;
-
- org_state = fmdev->rx.mute_mode;
- fmdev->rx.mute_mode = mute_mode_toset;
-
- ret = fm_config_rx_mute_reg(fmdev);
- if (ret < 0) {
- fmdev->rx.mute_mode = org_state;
- return ret;
- }
-
- return 0;
-}
-
-/* Gets RF dependent soft mute mode enable/disable status */
-int fm_rx_get_rfdepend_softmute(struct fmdev *fmdev, u8 *curr_mute_mode)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_mute_mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_mute_mode = fmdev->rx.rf_depend_mute;
-
- return 0;
-}
-
-/* Sets RF dependent soft mute mode */
-int fm_rx_set_rfdepend_softmute(struct fmdev *fmdev, u8 rfdepend_mute)
-{
- u8 org_state;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (rfdepend_mute != FM_RX_RF_DEPENDENT_MUTE_ON &&
- rfdepend_mute != FM_RX_RF_DEPENDENT_MUTE_OFF) {
- fmerr("Invalid RF dependent soft mute\n");
- return -EINVAL;
- }
- if (fmdev->rx.rf_depend_mute == rfdepend_mute)
- return 0;
-
- org_state = fmdev->rx.rf_depend_mute;
- fmdev->rx.rf_depend_mute = rfdepend_mute;
-
- ret = fm_config_rx_mute_reg(fmdev);
- if (ret < 0) {
- fmdev->rx.rf_depend_mute = org_state;
- return ret;
- }
-
- return 0;
-}
-
-/* Returns the signal strength level of current channel */
-int fm_rx_get_rssi_level(struct fmdev *fmdev, u16 *rssilvl)
-{
- __be16 curr_rssi_lel;
- u32 resp_len;
- int ret;
-
- if (rssilvl == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
- /* Read current RSSI level */
- ret = fmc_send_cmd(fmdev, RSSI_LVL_GET, REG_RD, NULL, 2,
- &curr_rssi_lel, &resp_len);
- if (ret < 0)
- return ret;
-
- *rssilvl = be16_to_cpu(curr_rssi_lel);
-
- return 0;
-}
-
-/*
- * Sets the signal strength level that once reached
- * will stop the auto search process
- */
-int fm_rx_set_rssi_threshold(struct fmdev *fmdev, short rssi_lvl_toset)
-{
- u16 payload;
- int ret;
-
- if (rssi_lvl_toset < FM_RX_RSSI_THRESHOLD_MIN ||
- rssi_lvl_toset > FM_RX_RSSI_THRESHOLD_MAX) {
- fmerr("Invalid RSSI threshold level\n");
- return -EINVAL;
- }
- payload = (u16)rssi_lvl_toset;
- ret = fmc_send_cmd(fmdev, SEARCH_LVL_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.rssi_threshold = rssi_lvl_toset;
-
- return 0;
-}
-
-/* Returns current RX RSSI threshold value */
-int fm_rx_get_rssi_threshold(struct fmdev *fmdev, short *curr_rssi_lvl)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_rssi_lvl == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_rssi_lvl = fmdev->rx.rssi_threshold;
-
- return 0;
-}
-
-/* Sets RX stereo/mono modes */
-int fm_rx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
-{
- u16 payload;
- int ret;
-
- if (mode != FM_STEREO_MODE && mode != FM_MONO_MODE) {
- fmerr("Invalid mode\n");
- return -EINVAL;
- }
-
- /* Set stereo/mono mode */
- payload = (u16)mode;
- ret = fmc_send_cmd(fmdev, MOST_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set stereo blending mode */
- payload = FM_STEREO_SOFT_BLEND;
- ret = fmc_send_cmd(fmdev, MOST_BLEND_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* Gets current RX stereo/mono mode */
-int fm_rx_get_stereo_mono(struct fmdev *fmdev, u16 *mode)
-{
- __be16 curr_mode;
- u32 resp_len;
- int ret;
-
- if (mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- ret = fmc_send_cmd(fmdev, MOST_MODE_SET, REG_RD, NULL, 2,
- &curr_mode, &resp_len);
- if (ret < 0)
- return ret;
-
- *mode = be16_to_cpu(curr_mode);
-
- return 0;
-}
-
-/* Choose RX de-emphasis filter mode (50us/75us) */
-int fm_rx_set_deemphasis_mode(struct fmdev *fmdev, u16 mode)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (mode != FM_RX_EMPHASIS_FILTER_50_USEC &&
- mode != FM_RX_EMPHASIS_FILTER_75_USEC) {
- fmerr("Invalid rx de-emphasis mode (%d)\n", mode);
- return -EINVAL;
- }
-
- payload = mode;
- ret = fmc_send_cmd(fmdev, DEMPH_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.deemphasis_mode = mode;
-
- return 0;
-}
-
-/* Gets current RX de-emphasis filter mode */
-int fm_rx_get_deemph_mode(struct fmdev *fmdev, u16 *curr_deemphasis_mode)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_deemphasis_mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_deemphasis_mode = fmdev->rx.deemphasis_mode;
-
- return 0;
-}
-
-/* Enable/Disable RX RDS */
-int fm_rx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
-{
- u16 payload;
- int ret;
-
- if (rds_en_dis != FM_RDS_ENABLE && rds_en_dis != FM_RDS_DISABLE) {
- fmerr("Invalid rds option\n");
- return -EINVAL;
- }
-
- if (rds_en_dis == FM_RDS_ENABLE
- && fmdev->rx.rds.flag == FM_RDS_DISABLE) {
- /* Turn on RX RDS and RDS circuit */
- payload = FM_RX_PWR_SET_FM_AND_RDS_BLK_ON;
- ret = fmc_send_cmd(fmdev, POWER_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Clear and reset RDS FIFO */
- payload = FM_RX_RDS_FLUSH_FIFO;
- ret = fmc_send_cmd(fmdev, RDS_CNTRL_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Read flags - just to clear any pending interrupts. */
- ret = fmc_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, 2,
- NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set RDS FIFO threshold value */
- payload = FM_RX_RDS_FIFO_THRESHOLD;
- ret = fmc_send_cmd(fmdev, RDS_MEM_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Enable RDS interrupt */
- fmdev->irq_info.mask |= FM_RDS_EVENT;
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0) {
- fmdev->irq_info.mask &= ~FM_RDS_EVENT;
- return ret;
- }
-
- /* Update our local flag */
- fmdev->rx.rds.flag = FM_RDS_ENABLE;
- } else if (rds_en_dis == FM_RDS_DISABLE
- && fmdev->rx.rds.flag == FM_RDS_ENABLE) {
- /* Turn off RX RDS */
- payload = FM_RX_PWR_SET_FM_ON_RDS_OFF;
- ret = fmc_send_cmd(fmdev, POWER_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Reset RDS pointers */
- fmdev->rx.rds.last_blk_idx = 0;
- fmdev->rx.rds.wr_idx = 0;
- fmdev->rx.rds.rd_idx = 0;
- fm_rx_reset_station_info(fmdev);
-
- /* Update RDS local cache */
- fmdev->irq_info.mask &= ~(FM_RDS_EVENT);
- fmdev->rx.rds.flag = FM_RDS_DISABLE;
- }
-
- return 0;
-}
-
-/* Returns current RX RDS enable/disable status */
-int fm_rx_get_rds_mode(struct fmdev *fmdev, u8 *curr_rds_en_dis)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_rds_en_dis == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_rds_en_dis = fmdev->rx.rds.flag;
-
- return 0;
-}
-
-/* Sets RDS operation mode (RDS/RDBS) */
-int fm_rx_set_rds_system(struct fmdev *fmdev, u8 rds_mode)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (rds_mode != FM_RDS_SYSTEM_RDS && rds_mode != FM_RDS_SYSTEM_RBDS) {
- fmerr("Invalid rds mode\n");
- return -EINVAL;
- }
- /* Set RDS operation mode */
- payload = (u16)rds_mode;
- ret = fmc_send_cmd(fmdev, RDS_SYSTEM_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.rds_mode = rds_mode;
-
- return 0;
-}
-
-/* Configures Alternate Frequency switch mode */
-int fm_rx_set_af_switch(struct fmdev *fmdev, u8 af_mode)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (af_mode != FM_RX_RDS_AF_SWITCH_MODE_ON &&
- af_mode != FM_RX_RDS_AF_SWITCH_MODE_OFF) {
- fmerr("Invalid af mode\n");
- return -EINVAL;
- }
- /* Enable/disable low RSSI interrupt based on af_mode */
- if (af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
- fmdev->irq_info.mask |= FM_LEV_EVENT;
- else
- fmdev->irq_info.mask &= ~FM_LEV_EVENT;
-
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.af_mode = af_mode;
-
- return 0;
-}
-
-/* Returns Alternate Frequency switch status */
-int fm_rx_get_af_switch(struct fmdev *fmdev, u8 *af_mode)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (af_mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *af_mode = fmdev->rx.af_mode;
-
- return 0;
-}
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.h b/drivers/media/radio/wl128x/fmdrv_rx.h
deleted file mode 100644
index 2748e99662c3..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_rx.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * FM RX module header.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FMDRV_RX_H
-#define _FMDRV_RX_H
-
-int fm_rx_set_freq(struct fmdev *, u32);
-int fm_rx_set_mute_mode(struct fmdev *, u8);
-int fm_rx_set_stereo_mono(struct fmdev *, u16);
-int fm_rx_set_rds_mode(struct fmdev *, u8);
-int fm_rx_set_rds_system(struct fmdev *, u8);
-int fm_rx_set_volume(struct fmdev *, u16);
-int fm_rx_set_rssi_threshold(struct fmdev *, short);
-int fm_rx_set_region(struct fmdev *, u8);
-int fm_rx_set_rfdepend_softmute(struct fmdev *, u8);
-int fm_rx_set_deemphasis_mode(struct fmdev *, u16);
-int fm_rx_set_af_switch(struct fmdev *, u8);
-
-void fm_rx_reset_rds_cache(struct fmdev *);
-void fm_rx_reset_station_info(struct fmdev *);
-
-int fm_rx_seek(struct fmdev *, u32, u32, u32);
-
-int fm_rx_get_rds_mode(struct fmdev *, u8 *);
-int fm_rx_get_mute_mode(struct fmdev *, u8 *);
-int fm_rx_get_volume(struct fmdev *, u16 *);
-int fm_rx_get_band_freq_range(struct fmdev *,
- u32 *, u32 *);
-int fm_rx_get_stereo_mono(struct fmdev *, u16 *);
-int fm_rx_get_rssi_level(struct fmdev *, u16 *);
-int fm_rx_get_rssi_threshold(struct fmdev *, short *);
-int fm_rx_get_rfdepend_softmute(struct fmdev *, u8 *);
-int fm_rx_get_deemph_mode(struct fmdev *, u16 *);
-int fm_rx_get_af_switch(struct fmdev *, u8 *);
-void fm_rx_get_region(struct fmdev *, u8 *);
-
-int fm_rx_set_chanl_spacing(struct fmdev *, u8);
-int fm_rx_get_chanl_spacing(struct fmdev *, u8 *);
-#endif
-
diff --git a/drivers/media/radio/wl128x/fmdrv_tx.c b/drivers/media/radio/wl128x/fmdrv_tx.c
deleted file mode 100644
index c589de02f4f5..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_tx.c
+++ /dev/null
@@ -1,413 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * This sub-module of FM driver implements FM TX functionality.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#include <linux/delay.h>
-#include "fmdrv.h"
-#include "fmdrv_common.h"
-#include "fmdrv_tx.h"
-
-int fm_tx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
-{
- u16 payload;
- int ret;
-
- if (fmdev->tx_data.aud_mode == mode)
- return 0;
-
- fmdbg("stereo mode: %d\n", mode);
-
- /* Set Stereo/Mono mode */
- payload = (1 - mode);
- ret = fmc_send_cmd(fmdev, MONO_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->tx_data.aud_mode = mode;
-
- return ret;
-}
-
-static int set_rds_text(struct fmdev *fmdev, u8 *rds_text)
-{
- u16 payload;
- int ret;
-
- ret = fmc_send_cmd(fmdev, RDS_DATA_SET, REG_WR, rds_text,
- strlen(rds_text), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Scroll mode */
- payload = (u16)0x1;
- ret = fmc_send_cmd(fmdev, DISPLAY_MODE, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int set_rds_data_mode(struct fmdev *fmdev, u8 mode)
-{
- u16 payload;
- int ret;
-
- /* Setting unique PI TODO: how unique? */
- payload = (u16)0xcafe;
- ret = fmc_send_cmd(fmdev, PI_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set decoder id */
- payload = (u16)0xa;
- ret = fmc_send_cmd(fmdev, DI_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* TODO: RDS_MODE_GET? */
- return 0;
-}
-
-static int set_rds_len(struct fmdev *fmdev, u8 type, u16 len)
-{
- u16 payload;
- int ret;
-
- len |= type << 8;
- payload = len;
- ret = fmc_send_cmd(fmdev, RDS_CONFIG_DATA_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* TODO: LENGTH_GET? */
- return 0;
-}
-
-int fm_tx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
-{
- u16 payload;
- int ret;
- u8 rds_text[] = "Zoom2\n";
-
- fmdbg("rds_en_dis:%d(E:%d, D:%d)\n", rds_en_dis,
- FM_RDS_ENABLE, FM_RDS_DISABLE);
-
- if (rds_en_dis == FM_RDS_ENABLE) {
- /* Set RDS length */
- set_rds_len(fmdev, 0, strlen(rds_text));
-
- /* Set RDS text */
- set_rds_text(fmdev, rds_text);
-
- /* Set RDS mode */
- set_rds_data_mode(fmdev, 0x0);
- }
-
- /* Send command to enable RDS */
- if (rds_en_dis == FM_RDS_ENABLE)
- payload = 0x01;
- else
- payload = 0x00;
-
- ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- if (rds_en_dis == FM_RDS_ENABLE) {
- /* Set RDS length */
- set_rds_len(fmdev, 0, strlen(rds_text));
-
- /* Set RDS text */
- set_rds_text(fmdev, rds_text);
- }
- fmdev->tx_data.rds.flag = rds_en_dis;
-
- return 0;
-}
-
-int fm_tx_set_radio_text(struct fmdev *fmdev, u8 *rds_text, u8 rds_type)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- fm_tx_set_rds_mode(fmdev, 0);
-
- /* Set RDS length */
- set_rds_len(fmdev, rds_type, strlen(rds_text));
-
- /* Set RDS text */
- set_rds_text(fmdev, rds_text);
-
- /* Set RDS mode */
- set_rds_data_mode(fmdev, 0x0);
-
- payload = 1;
- ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-int fm_tx_set_af(struct fmdev *fmdev, u32 af)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- fmdbg("AF: %d\n", af);
-
- af = (af - 87500) / 100;
- payload = (u16)af;
- ret = fmc_send_cmd(fmdev, TA_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-int fm_tx_set_region(struct fmdev *fmdev, u8 region)
-{
- u16 payload;
- int ret;
-
- if (region != FM_BAND_EUROPE_US && region != FM_BAND_JAPAN) {
- fmerr("Invalid band\n");
- return -EINVAL;
- }
-
- /* Send command to set the band */
- payload = (u16)region;
- ret = fmc_send_cmd(fmdev, TX_BAND_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-int fm_tx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
-{
- u16 payload;
- int ret;
-
- fmdbg("tx: mute mode %d\n", mute_mode_toset);
-
- payload = mute_mode_toset;
- ret = fmc_send_cmd(fmdev, MUTE, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* Set TX Audio I/O */
-static int set_audio_io(struct fmdev *fmdev)
-{
- struct fmtx_data *tx = &fmdev->tx_data;
- u16 payload;
- int ret;
-
- /* Set Audio I/O Enable */
- payload = tx->audio_io;
- ret = fmc_send_cmd(fmdev, AUDIO_IO_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* TODO: is audio set? */
- return 0;
-}
-
-/* Start TX Transmission */
-static int enable_xmit(struct fmdev *fmdev, u8 new_xmit_state)
-{
- struct fmtx_data *tx = &fmdev->tx_data;
- unsigned long timeleft;
- u16 payload;
- int ret;
-
- /* Enable POWER_ENB interrupts */
- payload = FM_POW_ENB_EVENT;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set Power Enable */
- payload = new_xmit_state;
- ret = fmc_send_cmd(fmdev, POWER_ENB_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Wait for Power Enabled */
- init_completion(&fmdev->maintask_comp);
- timeleft = wait_for_completion_timeout(&fmdev->maintask_comp,
- FM_DRV_TX_TIMEOUT);
- if (!timeleft) {
- fmerr("Timeout(%d sec),didn't get tune ended interrupt\n",
- jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
- return -ETIMEDOUT;
- }
-
- set_bit(FM_CORE_TX_XMITING, &fmdev->flag);
- tx->xmit_state = new_xmit_state;
-
- return 0;
-}
-
-/* Set TX power level */
-int fm_tx_set_pwr_lvl(struct fmdev *fmdev, u8 new_pwr_lvl)
-{
- u16 payload;
- struct fmtx_data *tx = &fmdev->tx_data;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
- fmdbg("tx: pwr_level_to_set %ld\n", (long int)new_pwr_lvl);
-
- /* If the core isn't ready update global variable */
- if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
- tx->pwr_lvl = new_pwr_lvl;
- return 0;
- }
-
- /* Set power level: Application will specify power level value in
- * units of dB/uV, whereas range and step are specific to FM chip.
- * For TI's WL chips, convert application specified power level value
- * to chip specific value by subtracting 122 from it. Refer to TI FM
- * data sheet for details.
- * */
-
- payload = (FM_PWR_LVL_HIGH - new_pwr_lvl);
- ret = fmc_send_cmd(fmdev, POWER_LEV_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* TODO: is the power level set? */
- tx->pwr_lvl = new_pwr_lvl;
-
- return 0;
-}
-
-/*
- * Sets FM TX pre-emphasis filter value (OFF, 50us, or 75us)
- * Convert V4L2 specified filter values to chip specific filter values.
- */
-int fm_tx_set_preemph_filter(struct fmdev *fmdev, u32 preemphasis)
-{
- struct fmtx_data *tx = &fmdev->tx_data;
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- switch (preemphasis) {
- case V4L2_PREEMPHASIS_DISABLED:
- payload = FM_TX_PREEMPH_OFF;
- break;
- case V4L2_PREEMPHASIS_50_uS:
- payload = FM_TX_PREEMPH_50US;
- break;
- case V4L2_PREEMPHASIS_75_uS:
- payload = FM_TX_PREEMPH_75US;
- break;
- }
-
- ret = fmc_send_cmd(fmdev, PREMPH_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- tx->preemph = payload;
-
- return ret;
-}
-
-/* Get the TX tuning capacitor value.*/
-int fm_tx_get_tune_cap_val(struct fmdev *fmdev)
-{
- u16 curr_val;
- u32 resp_len;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- ret = fmc_send_cmd(fmdev, READ_FMANT_TUNE_VALUE, REG_RD,
- NULL, sizeof(curr_val), &curr_val, &resp_len);
- if (ret < 0)
- return ret;
-
- curr_val = be16_to_cpu((__force __be16)curr_val);
-
- return curr_val;
-}
-
-/* Set TX Frequency */
-int fm_tx_set_freq(struct fmdev *fmdev, u32 freq_to_set)
-{
- struct fmtx_data *tx = &fmdev->tx_data;
- u16 payload, chanl_index;
- int ret;
-
- if (test_bit(FM_CORE_TX_XMITING, &fmdev->flag)) {
- enable_xmit(fmdev, 0);
- clear_bit(FM_CORE_TX_XMITING, &fmdev->flag);
- }
-
- /* Enable FR, BL interrupts */
- payload = (FM_FR_EVENT | FM_BL_EVENT);
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- tx->tx_frq = (unsigned long)freq_to_set;
- fmdbg("tx: freq_to_set %ld\n", (long int)tx->tx_frq);
-
- chanl_index = freq_to_set / 10;
-
- /* Set current tuner channel */
- payload = chanl_index;
- ret = fmc_send_cmd(fmdev, CHANL_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fm_tx_set_pwr_lvl(fmdev, tx->pwr_lvl);
- fm_tx_set_preemph_filter(fmdev, tx->preemph);
-
- tx->audio_io = 0x01; /* I2S */
- set_audio_io(fmdev);
-
- enable_xmit(fmdev, 0x01); /* Enable transmission */
-
- tx->aud_mode = FM_STEREO_MODE;
- tx->rds.flag = FM_RDS_DISABLE;
-
- return 0;
-}
-
diff --git a/drivers/media/radio/wl128x/fmdrv_tx.h b/drivers/media/radio/wl128x/fmdrv_tx.h
deleted file mode 100644
index aebdadf9e99b..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_tx.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * FM TX module header.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FMDRV_TX_H
-#define _FMDRV_TX_H
-
-int fm_tx_set_freq(struct fmdev *, u32);
-int fm_tx_set_pwr_lvl(struct fmdev *, u8);
-int fm_tx_set_region(struct fmdev *, u8);
-int fm_tx_set_mute_mode(struct fmdev *, u8);
-int fm_tx_set_stereo_mono(struct fmdev *, u16);
-int fm_tx_set_rds_mode(struct fmdev *, u8);
-int fm_tx_set_radio_text(struct fmdev *, u8 *, u8);
-int fm_tx_set_af(struct fmdev *, u32);
-int fm_tx_set_preemph_filter(struct fmdev *, u32);
-int fm_tx_get_tune_cap_val(struct fmdev *);
-
-#endif
-
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
deleted file mode 100644
index 1c146d14dbbd..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ /dev/null
@@ -1,604 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * This file provides interfaces to V4L2 subsystem.
- *
- * This module registers with V4L2 subsystem as Radio
- * data system interface (/dev/radio). During the registration,
- * it will expose two set of function pointers.
- *
- * 1) File operation related API (open, close, read, write, poll...etc).
- * 2) Set of V4L2 IOCTL complaint API.
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Raja Mani <raja_mani@ti.com>
- * Author: Manjunatha Halli <manjunatha_halli@ti.com>
- */
-
-#include <linux/export.h>
-
-#include "fmdrv.h"
-#include "fmdrv_v4l2.h"
-#include "fmdrv_common.h"
-#include "fmdrv_rx.h"
-#include "fmdrv_tx.h"
-
-static struct video_device gradio_dev;
-static u8 radio_disconnected;
-
-/* -- V4L2 RADIO (/dev/radioX) device file operation interfaces --- */
-
-/* Read RX RDS data */
-static ssize_t fm_v4l2_fops_read(struct file *file, char __user * buf,
- size_t count, loff_t *ppos)
-{
- u8 rds_mode;
- int ret;
- struct fmdev *fmdev;
-
- fmdev = video_drvdata(file);
-
- if (!radio_disconnected) {
- fmerr("FM device is already disconnected\n");
- return -EIO;
- }
-
- if (mutex_lock_interruptible(&fmdev->mutex))
- return -ERESTARTSYS;
-
- /* Turn on RDS mode if it is disabled */
- ret = fm_rx_get_rds_mode(fmdev, &rds_mode);
- if (ret < 0) {
- fmerr("Unable to read current rds mode\n");
- goto read_unlock;
- }
-
- if (rds_mode == FM_RDS_DISABLE) {
- ret = fmc_set_rds_mode(fmdev, FM_RDS_ENABLE);
- if (ret < 0) {
- fmerr("Failed to enable rds mode\n");
- goto read_unlock;
- }
- }
-
- /* Copy RDS data from internal buffer to user buffer */
- ret = fmc_transfer_rds_from_internal_buff(fmdev, file, buf, count);
-read_unlock:
- mutex_unlock(&fmdev->mutex);
- return ret;
-}
-
-/* Write TX RDS data */
-static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- struct tx_rds rds;
- int ret;
- struct fmdev *fmdev;
-
- ret = copy_from_user(&rds, buf, sizeof(rds));
- rds.text[sizeof(rds.text) - 1] = '\0';
- fmdbg("(%d)type: %d, text %s, af %d\n",
- ret, rds.text_type, rds.text, rds.af_freq);
- if (ret)
- return -EFAULT;
-
- fmdev = video_drvdata(file);
- if (mutex_lock_interruptible(&fmdev->mutex))
- return -ERESTARTSYS;
- fm_tx_set_radio_text(fmdev, rds.text, rds.text_type);
- fm_tx_set_af(fmdev, rds.af_freq);
- mutex_unlock(&fmdev->mutex);
-
- return sizeof(rds);
-}
-
-static __poll_t fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
-{
- int ret;
- struct fmdev *fmdev;
-
- fmdev = video_drvdata(file);
- mutex_lock(&fmdev->mutex);
- ret = fmc_is_rds_data_available(fmdev, file, pts);
- mutex_unlock(&fmdev->mutex);
- if (ret < 0)
- return EPOLLIN | EPOLLRDNORM;
-
- return 0;
-}
-
-/*
- * Handle open request for "/dev/radioX" device.
- * Start with FM RX mode as default.
- */
-static int fm_v4l2_fops_open(struct file *file)
-{
- int ret;
- struct fmdev *fmdev = NULL;
-
- /* Don't allow multiple open */
- if (radio_disconnected) {
- fmerr("FM device is already opened\n");
- return -EBUSY;
- }
-
- fmdev = video_drvdata(file);
-
- if (mutex_lock_interruptible(&fmdev->mutex))
- return -ERESTARTSYS;
- ret = fmc_prepare(fmdev);
- if (ret < 0) {
- fmerr("Unable to prepare FM CORE\n");
- goto open_unlock;
- }
-
- fmdbg("Load FM RX firmware..\n");
-
- ret = fmc_set_mode(fmdev, FM_MODE_RX);
- if (ret < 0) {
- fmerr("Unable to load FM RX firmware\n");
- goto open_unlock;
- }
- radio_disconnected = 1;
-
-open_unlock:
- mutex_unlock(&fmdev->mutex);
- return ret;
-}
-
-static int fm_v4l2_fops_release(struct file *file)
-{
- int ret;
- struct fmdev *fmdev;
-
- fmdev = video_drvdata(file);
- if (!radio_disconnected) {
- fmdbg("FM device is already closed\n");
- return 0;
- }
-
- mutex_lock(&fmdev->mutex);
- ret = fmc_set_mode(fmdev, FM_MODE_OFF);
- if (ret < 0) {
- fmerr("Unable to turn off the chip\n");
- goto release_unlock;
- }
-
- ret = fmc_release(fmdev);
- if (ret < 0) {
- fmerr("FM CORE release failed\n");
- goto release_unlock;
- }
- radio_disconnected = 0;
-
-release_unlock:
- mutex_unlock(&fmdev->mutex);
- return ret;
-}
-
-/* V4L2 RADIO (/dev/radioX) device IOCTL interfaces */
-static int fm_v4l2_vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *capability)
-{
- strscpy(capability->driver, FM_DRV_NAME, sizeof(capability->driver));
- strscpy(capability->card, FM_DRV_CARD_SHORT_NAME,
- sizeof(capability->card));
- sprintf(capability->bus_info, "UART");
- return 0;
-}
-
-static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct fmdev *fmdev = container_of(ctrl->handler,
- struct fmdev, ctrl_handler);
-
- switch (ctrl->id) {
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->val = fm_tx_get_tune_cap_val(fmdev);
- break;
- default:
- fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
- break;
- }
-
- return 0;
-}
-
-static int fm_v4l2_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct fmdev *fmdev = container_of(ctrl->handler,
- struct fmdev, ctrl_handler);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME: /* set volume */
- return fm_rx_set_volume(fmdev, (u16)ctrl->val);
-
- case V4L2_CID_AUDIO_MUTE: /* set mute */
- return fmc_set_mute_mode(fmdev, (u8)ctrl->val);
-
- case V4L2_CID_TUNE_POWER_LEVEL:
- /* set TX power level - ext control */
- return fm_tx_set_pwr_lvl(fmdev, (u8)ctrl->val);
-
- case V4L2_CID_TUNE_PREEMPHASIS:
- return fm_tx_set_preemph_filter(fmdev, (u8) ctrl->val);
-
- default:
- return -EINVAL;
- }
-}
-
-static int fm_v4l2_vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *audio)
-{
- memset(audio, 0, sizeof(*audio));
- strscpy(audio->name, "Radio", sizeof(audio->name));
- audio->capability = V4L2_AUDCAP_STEREO;
-
- return 0;
-}
-
-static int fm_v4l2_vidioc_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *audio)
-{
- if (audio->index != 0)
- return -EINVAL;
-
- return 0;
-}
-
-/* Get tuner attributes. If current mode is NOT RX, return error */
-static int fm_v4l2_vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *tuner)
-{
- struct fmdev *fmdev = video_drvdata(file);
- u32 bottom_freq;
- u32 top_freq;
- u16 stereo_mono_mode;
- u16 rssilvl;
- int ret;
-
- if (tuner->index != 0)
- return -EINVAL;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- ret = fm_rx_get_band_freq_range(fmdev, &bottom_freq, &top_freq);
- if (ret != 0)
- return ret;
-
- ret = fm_rx_get_stereo_mono(fmdev, &stereo_mono_mode);
- if (ret != 0)
- return ret;
-
- ret = fm_rx_get_rssi_level(fmdev, &rssilvl);
- if (ret != 0)
- return ret;
-
- strscpy(tuner->name, "FM", sizeof(tuner->name));
- tuner->type = V4L2_TUNER_RADIO;
- /* Store rangelow and rangehigh freq in unit of 62.5 Hz */
- tuner->rangelow = bottom_freq * 16;
- tuner->rangehigh = top_freq * 16;
- tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO |
- ((fmdev->rx.rds.flag == FM_RDS_ENABLE) ? V4L2_TUNER_SUB_RDS : 0);
- tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_LOW |
- V4L2_TUNER_CAP_HWSEEK_BOUNDED |
- V4L2_TUNER_CAP_HWSEEK_WRAP;
- tuner->audmode = (stereo_mono_mode ?
- V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO);
-
- /*
- * Actual rssi value lies in between -128 to +127.
- * Convert this range from 0 to 255 by adding +128
- */
- rssilvl += 128;
-
- /*
- * Return signal strength value should be within 0 to 65535.
- * Find out correct signal radio by multiplying (65535/255) = 257
- */
- tuner->signal = rssilvl * 257;
- tuner->afc = 0;
-
- return ret;
-}
-
-/*
- * Set tuner attributes. If current mode is NOT RX, set to RX.
- * Currently, we set only audio mode (mono/stereo) and RDS state (on/off).
- * Should we set other tuner attributes, too?
- */
-static int fm_v4l2_vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *tuner)
-{
- struct fmdev *fmdev = video_drvdata(file);
- u16 aud_mode;
- u8 rds_mode;
- int ret;
-
- if (tuner->index != 0)
- return -EINVAL;
-
- aud_mode = (tuner->audmode == V4L2_TUNER_MODE_STEREO) ?
- FM_STEREO_MODE : FM_MONO_MODE;
- rds_mode = (tuner->rxsubchans & V4L2_TUNER_SUB_RDS) ?
- FM_RDS_ENABLE : FM_RDS_DISABLE;
-
- if (fmdev->curr_fmmode != FM_MODE_RX) {
- ret = fmc_set_mode(fmdev, FM_MODE_RX);
- if (ret < 0) {
- fmerr("Failed to set RX mode\n");
- return ret;
- }
- }
-
- ret = fmc_set_stereo_mono(fmdev, aud_mode);
- if (ret < 0) {
- fmerr("Failed to set RX stereo/mono mode\n");
- return ret;
- }
-
- ret = fmc_set_rds_mode(fmdev, rds_mode);
- if (ret < 0)
- fmerr("Failed to set RX RDS mode\n");
-
- return ret;
-}
-
-/* Get tuner or modulator radio frequency */
-static int fm_v4l2_vidioc_g_freq(struct file *file, void *priv,
- struct v4l2_frequency *freq)
-{
- struct fmdev *fmdev = video_drvdata(file);
- int ret;
-
- ret = fmc_get_freq(fmdev, &freq->frequency);
- if (ret < 0) {
- fmerr("Failed to get frequency\n");
- return ret;
- }
-
- /* Frequency unit of 62.5 Hz*/
- freq->frequency = (u32) freq->frequency * 16;
-
- return 0;
-}
-
-/* Set tuner or modulator radio frequency */
-static int fm_v4l2_vidioc_s_freq(struct file *file, void *priv,
- const struct v4l2_frequency *freq)
-{
- struct fmdev *fmdev = video_drvdata(file);
-
- /*
- * As V4L2_TUNER_CAP_LOW is set 1 user sends the frequency
- * in units of 62.5 Hz.
- */
- return fmc_set_freq(fmdev, freq->frequency / 16);
-}
-
-/* Set hardware frequency seek. If current mode is NOT RX, set it RX. */
-static int fm_v4l2_vidioc_s_hw_freq_seek(struct file *file, void *priv,
- const struct v4l2_hw_freq_seek *seek)
-{
- struct fmdev *fmdev = video_drvdata(file);
- int ret;
-
- if (file->f_flags & O_NONBLOCK)
- return -EWOULDBLOCK;
-
- if (fmdev->curr_fmmode != FM_MODE_RX) {
- ret = fmc_set_mode(fmdev, FM_MODE_RX);
- if (ret != 0) {
- fmerr("Failed to set RX mode\n");
- return ret;
- }
- }
-
- ret = fm_rx_seek(fmdev, seek->seek_upward, seek->wrap_around,
- seek->spacing);
- if (ret < 0)
- fmerr("RX seek failed - %d\n", ret);
-
- return ret;
-}
-/* Get modulator attributes. If mode is not TX, return no attributes. */
-static int fm_v4l2_vidioc_g_modulator(struct file *file, void *priv,
- struct v4l2_modulator *mod)
-{
- struct fmdev *fmdev = video_drvdata(file);
-
- if (mod->index != 0)
- return -EINVAL;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- mod->txsubchans = ((fmdev->tx_data.aud_mode == FM_STEREO_MODE) ?
- V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO) |
- ((fmdev->tx_data.rds.flag == FM_RDS_ENABLE) ?
- V4L2_TUNER_SUB_RDS : 0);
-
- mod->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_LOW;
-
- return 0;
-}
-
-/* Set modulator attributes. If mode is not TX, set to TX. */
-static int fm_v4l2_vidioc_s_modulator(struct file *file, void *priv,
- const struct v4l2_modulator *mod)
-{
- struct fmdev *fmdev = video_drvdata(file);
- u8 rds_mode;
- u16 aud_mode;
- int ret;
-
- if (mod->index != 0)
- return -EINVAL;
-
- if (fmdev->curr_fmmode != FM_MODE_TX) {
- ret = fmc_set_mode(fmdev, FM_MODE_TX);
- if (ret != 0) {
- fmerr("Failed to set TX mode\n");
- return ret;
- }
- }
-
- aud_mode = (mod->txsubchans & V4L2_TUNER_SUB_STEREO) ?
- FM_STEREO_MODE : FM_MONO_MODE;
- rds_mode = (mod->txsubchans & V4L2_TUNER_SUB_RDS) ?
- FM_RDS_ENABLE : FM_RDS_DISABLE;
- ret = fm_tx_set_stereo_mono(fmdev, aud_mode);
- if (ret < 0) {
- fmerr("Failed to set mono/stereo mode for TX\n");
- return ret;
- }
- ret = fm_tx_set_rds_mode(fmdev, rds_mode);
- if (ret < 0)
- fmerr("Failed to set rds mode for TX\n");
-
- return ret;
-}
-
-static const struct v4l2_file_operations fm_drv_fops = {
- .owner = THIS_MODULE,
- .read = fm_v4l2_fops_read,
- .write = fm_v4l2_fops_write,
- .poll = fm_v4l2_fops_poll,
- .unlocked_ioctl = video_ioctl2,
- .open = fm_v4l2_fops_open,
- .release = fm_v4l2_fops_release,
-};
-
-static const struct v4l2_ctrl_ops fm_ctrl_ops = {
- .s_ctrl = fm_v4l2_s_ctrl,
- .g_volatile_ctrl = fm_g_volatile_ctrl,
-};
-static const struct v4l2_ioctl_ops fm_drv_ioctl_ops = {
- .vidioc_querycap = fm_v4l2_vidioc_querycap,
- .vidioc_g_audio = fm_v4l2_vidioc_g_audio,
- .vidioc_s_audio = fm_v4l2_vidioc_s_audio,
- .vidioc_g_tuner = fm_v4l2_vidioc_g_tuner,
- .vidioc_s_tuner = fm_v4l2_vidioc_s_tuner,
- .vidioc_g_frequency = fm_v4l2_vidioc_g_freq,
- .vidioc_s_frequency = fm_v4l2_vidioc_s_freq,
- .vidioc_s_hw_freq_seek = fm_v4l2_vidioc_s_hw_freq_seek,
- .vidioc_g_modulator = fm_v4l2_vidioc_g_modulator,
- .vidioc_s_modulator = fm_v4l2_vidioc_s_modulator
-};
-
-/* V4L2 RADIO device parent structure */
-static const struct video_device fm_viddev_template = {
- .fops = &fm_drv_fops,
- .ioctl_ops = &fm_drv_ioctl_ops,
- .name = FM_DRV_NAME,
- .release = video_device_release_empty,
- /*
- * To ensure both the tuner and modulator ioctls are accessible we
- * set the vfl_dir to M2M to indicate this.
- *
- * It is not really a mem2mem device of course, but it can both receive
- * and transmit using the same radio device. It's the only radio driver
- * that does this and it should really be split in two radio devices,
- * but that would affect applications using this driver.
- */
- .vfl_dir = VFL_DIR_M2M,
- .device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | V4L2_CAP_RADIO |
- V4L2_CAP_MODULATOR | V4L2_CAP_AUDIO |
- V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE,
-};
-
-int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
-{
- struct v4l2_ctrl *ctrl;
- int ret;
-
- strscpy(fmdev->v4l2_dev.name, FM_DRV_NAME,
- sizeof(fmdev->v4l2_dev.name));
- ret = v4l2_device_register(NULL, &fmdev->v4l2_dev);
- if (ret < 0)
- return ret;
-
- /* Init mutex for core locking */
- mutex_init(&fmdev->mutex);
-
- /* Setup FM driver's V4L2 properties */
- gradio_dev = fm_viddev_template;
-
- video_set_drvdata(&gradio_dev, fmdev);
-
- gradio_dev.lock = &fmdev->mutex;
- gradio_dev.v4l2_dev = &fmdev->v4l2_dev;
-
- /* Register with V4L2 subsystem as RADIO device */
- if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
- v4l2_device_unregister(&fmdev->v4l2_dev);
- fmerr("Could not register video device\n");
- return -ENOMEM;
- }
-
- fmdev->radio_dev = &gradio_dev;
-
- /* Register to v4l2 ctrl handler framework */
- fmdev->radio_dev->ctrl_handler = &fmdev->ctrl_handler;
-
- ret = v4l2_ctrl_handler_init(&fmdev->ctrl_handler, 5);
- if (ret < 0) {
- fmerr("(fmdev): Can't init ctrl handler\n");
- v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
- video_unregister_device(fmdev->radio_dev);
- v4l2_device_unregister(&fmdev->v4l2_dev);
- return -EBUSY;
- }
-
- /*
- * Following controls are handled by V4L2 control framework.
- * Added in ascending ID order.
- */
- v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_AUDIO_VOLUME, FM_RX_VOLUME_MIN,
- FM_RX_VOLUME_MAX, 1, FM_RX_VOLUME_MAX);
-
- v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
-
- v4l2_ctrl_new_std_menu(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_TUNE_PREEMPHASIS, V4L2_PREEMPHASIS_75_uS,
- 0, V4L2_PREEMPHASIS_75_uS);
-
- v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_TUNE_POWER_LEVEL, FM_PWR_LVL_LOW,
- FM_PWR_LVL_HIGH, 1, FM_PWR_LVL_HIGH);
-
- ctrl = v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0,
- 255, 1, 255);
-
- if (ctrl)
- ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
-
- return 0;
-}
-
-void *fm_v4l2_deinit_video_device(void)
-{
- struct fmdev *fmdev;
-
-
- fmdev = video_get_drvdata(&gradio_dev);
-
- /* Unregister to v4l2 ctrl handler framework*/
- v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
-
- /* Unregister RADIO device from V4L2 subsystem */
- video_unregister_device(&gradio_dev);
-
- v4l2_device_unregister(&fmdev->v4l2_dev);
-
- return fmdev;
-}
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.h b/drivers/media/radio/wl128x/fmdrv_v4l2.h
deleted file mode 100644
index 963214e9d6f2..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- *
- * FM V4L2 module header.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FMDRV_V4L2_H
-#define _FMDRV_V4L2_H
-
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ctrls.h>
-
-int fm_v4l2_init_video_device(struct fmdev *, int);
-void *fm_v4l2_deinit_video_device(void);
-
-#endif
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 276bf3c8a8cb..8af94246e591 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -194,8 +194,10 @@ static int iguanair_send(struct iguanair *ir, unsigned size)
if (rc)
return rc;
- if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0)
+ if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0) {
+ usb_kill_urb(ir->urb_out);
return -ETIMEDOUT;
+ }
return rc;
}
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index b02ded52f19e..3a526dea6532 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -37,7 +37,7 @@ static void imon_ir_data(struct imon *imon)
if (packet_no == 0xff)
return;
- dev_dbg(imon->dev, "data: %*ph", 8, imon->ir_buf);
+ dev_dbg(imon->dev, "data: %8ph", imon->ir_buf);
/*
* Only the first 5 bytes contain IR data. Right shift so we move
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index cd7af4d88b7f..044767eb3a38 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -28,7 +28,6 @@
#include <linux/workqueue.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
-#include <linux/pm_wakeup.h>
#include <media/rc-core.h>
#define DRIVER_VERSION "1.95"
@@ -658,8 +657,8 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
if (len == 2)
dev_dbg(dev, "Get hw/sw rev?");
else
- dev_dbg(dev, "hw/sw rev %*ph",
- 4, &buf[offset + 2]);
+ dev_dbg(dev, "hw/sw rev %4ph",
+ &buf[offset + 2]);
break;
case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested");
diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
index fe368aebbc13..84533fdd61aa 100644
--- a/drivers/media/rc/pwm-ir-tx.c
+++ b/drivers/media/rc/pwm-ir-tx.c
@@ -172,8 +172,7 @@ static int pwm_ir_probe(struct platform_device *pdev)
rcdev->tx_ir = pwm_ir_tx_sleep;
} else {
init_completion(&pwm_ir->tx_done);
- hrtimer_init(&pwm_ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pwm_ir->timer.function = pwm_ir_timer;
+ hrtimer_setup(&pwm_ir->timer, pwm_ir_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rcdev->tx_ir = pwm_ir_tx_atomic;
}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
index e1dd8adeba46..438483c62fac 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -191,10 +191,11 @@ static int vidtv_start_streaming(struct vidtv_dvb *dvb)
mux_args.mux_buf_sz = mux_buf_sz;
- dvb->streaming = true;
dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args);
if (!dvb->mux)
return -ENOMEM;
+
+ dvb->streaming = true;
vidtv_mux_start_thread(dvb->mux);
dev_dbg_ratelimited(dev, "Started streaming\n");
@@ -205,6 +206,11 @@ static int vidtv_stop_streaming(struct vidtv_dvb *dvb)
{
struct device *dev = &dvb->pdev->dev;
+ if (!dvb->streaming) {
+ dev_warn_ratelimited(dev, "No streaming. Skipping.\n");
+ return 0;
+ }
+
dvb->streaming = false;
vidtv_mux_stop_thread(dvb->mux);
vidtv_mux_destroy(dvb->mux);
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index 1006a2798eef..90d2ef067594 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -112,70 +112,6 @@ static int fc0013_sleep(struct dvb_frontend *fe)
return 0;
}
-int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val)
-{
- struct fc0013_priv *priv = fe->tuner_priv;
- int ret;
- u8 rc_cal;
- int val;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
-
- /* push rc_cal value, get rc_cal value */
- ret = fc0013_writereg(priv, 0x10, 0x00);
- if (ret)
- goto error_out;
-
- /* get rc_cal value */
- ret = fc0013_readreg(priv, 0x10, &rc_cal);
- if (ret)
- goto error_out;
-
- rc_cal &= 0x0f;
-
- val = (int)rc_cal + rc_val;
-
- /* forcing rc_cal */
- ret = fc0013_writereg(priv, 0x0d, 0x11);
- if (ret)
- goto error_out;
-
- /* modify rc_cal value */
- if (val > 15)
- ret = fc0013_writereg(priv, 0x10, 0x0f);
- else if (val < 0)
- ret = fc0013_writereg(priv, 0x10, 0x00);
- else
- ret = fc0013_writereg(priv, 0x10, (u8)val);
-
-error_out:
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
-
- return ret;
-}
-EXPORT_SYMBOL(fc0013_rc_cal_add);
-
-int fc0013_rc_cal_reset(struct dvb_frontend *fe)
-{
- struct fc0013_priv *priv = fe->tuner_priv;
- int ret;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
-
- ret = fc0013_writereg(priv, 0x0d, 0x01);
- if (!ret)
- ret = fc0013_writereg(priv, 0x10, 0x00);
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
-
- return ret;
-}
-EXPORT_SYMBOL(fc0013_rc_cal_reset);
-
static int fc0013_set_vhf_track(struct fc0013_priv *priv, u32 freq)
{
int ret;
diff --git a/drivers/media/tuners/fc0013.h b/drivers/media/tuners/fc0013.h
index 74ce5903f199..47ab36342ee8 100644
--- a/drivers/media/tuners/fc0013.h
+++ b/drivers/media/tuners/fc0013.h
@@ -16,8 +16,6 @@ extern struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
u8 i2c_address, int dual_master,
enum fc001x_xtal_freq xtal_freq);
-extern int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val);
-extern int fc0013_rc_cal_reset(struct dvb_frontend *fe);
#else
static inline struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
@@ -28,15 +26,6 @@ static inline struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
return NULL;
}
-static inline int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val)
-{
- return 0;
-}
-
-static inline int fc0013_rc_cal_reset(struct dvb_frontend *fe)
-{
- return 0;
-}
#endif
#endif
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 6139ef5d891d..1cfec76b72f3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -2704,7 +2704,6 @@ int cx231xx_set_gpio_value(struct cx231xx *dev, int pin_number, int pin_value)
dev->gpio_dir = value;
status = cx231xx_set_gpio_bit(dev, dev->gpio_dir,
dev->gpio_val);
- value = 0;
}
if (pin_value == 0)
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 0d2c42819d39..218f712f56b1 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -322,13 +322,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
+ /* demod access via firmware interface */
+ u32 reg;
+
if (msg[0].len < 3 || msg[1].len < 1) {
ret = -EOPNOTSUPP;
goto unlock;
}
- /* demod access via firmware interface */
- u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
- msg[0].buf[2];
+
+ reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
@@ -385,13 +388,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
+ /* demod access via firmware interface */
+ u32 reg;
+
if (msg[0].len < 3) {
ret = -EOPNOTSUPP;
goto unlock;
}
- /* demod access via firmware interface */
- u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
- msg[0].buf[2];
+
+ reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 8a34e6c0d6a6..f0537b741d13 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -373,6 +373,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
struct dvb_usb_device *d = adap_to_d(adap);
struct lme2510_state *lme_int = adap_to_priv(adap);
struct usb_host_endpoint *ep;
+ int ret;
lme_int->lme_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -390,11 +391,20 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
/* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
+ if (!ep) {
+ usb_free_urb(lme_int->lme_urb);
+ return -ENODEV;
+ }
if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
- usb_submit_urb(lme_int->lme_urb, GFP_KERNEL);
+ ret = usb_submit_urb(lme_int->lme_urb, GFP_KERNEL);
+ if (ret) {
+ usb_free_urb(lme_int->lme_urb);
+ return ret;
+ }
+
info("INT Interrupt Service Started");
return 0;
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 4fe26e82e3d1..4e58476d305e 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1579,6 +1579,40 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
}
+static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
+ struct uvc_fh *new_handle)
+{
+ lockdep_assert_held(&handle->chain->ctrl_mutex);
+
+ if (new_handle) {
+ if (ctrl->handle)
+ dev_warn_ratelimited(&handle->stream->dev->udev->dev,
+ "UVC non compliance: Setting an async control with a pending operation.");
+
+ if (new_handle == ctrl->handle)
+ return;
+
+ if (ctrl->handle) {
+ WARN_ON(!ctrl->handle->pending_async_ctrls);
+ if (ctrl->handle->pending_async_ctrls)
+ ctrl->handle->pending_async_ctrls--;
+ }
+
+ ctrl->handle = new_handle;
+ handle->pending_async_ctrls++;
+ return;
+ }
+
+ /* Cannot clear the handle for a control not owned by us.*/
+ if (WARN_ON(ctrl->handle != handle))
+ return;
+
+ ctrl->handle = NULL;
+ if (WARN_ON(!handle->pending_async_ctrls))
+ return;
+ handle->pending_async_ctrls--;
+}
+
void uvc_ctrl_status_event(struct uvc_video_chain *chain,
struct uvc_control *ctrl, const u8 *data)
{
@@ -1588,8 +1622,12 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
mutex_lock(&chain->ctrl_mutex);
+ /* Flush the control cache, the data might have changed. */
+ ctrl->loaded = 0;
+
handle = ctrl->handle;
- ctrl->handle = NULL;
+ if (handle)
+ uvc_ctrl_set_handle(handle, ctrl, NULL);
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
s32 value = __uvc_ctrl_get_value(mapping, data);
@@ -1640,10 +1678,8 @@ bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain,
struct uvc_device *dev = chain->dev;
struct uvc_ctrl_work *w = &dev->async_ctrl;
- if (list_empty(&ctrl->info.mappings)) {
- ctrl->handle = NULL;
+ if (list_empty(&ctrl->info.mappings))
return false;
- }
w->data = data;
w->urb = urb;
@@ -1673,13 +1709,13 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
{
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
- u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
unsigned int i;
unsigned int j;
for (i = 0; i < xctrls_count; ++i) {
- ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
+ u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
/* Notification will be sent from an Interrupt event. */
continue;
@@ -1811,7 +1847,10 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
}
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
- struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl)
+ struct uvc_fh *handle,
+ struct uvc_entity *entity,
+ int rollback,
+ struct uvc_control **err_ctrl)
{
struct uvc_control *ctrl;
unsigned int i;
@@ -1859,6 +1898,10 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
*err_ctrl = ctrl;
return ret;
}
+
+ if (!rollback && handle &&
+ ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ uvc_ctrl_set_handle(handle, ctrl, handle);
}
return 0;
@@ -1895,8 +1938,8 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
- ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
- &err_ctrl);
+ ret = uvc_ctrl_commit_entity(chain->dev, handle, entity,
+ rollback, &err_ctrl);
if (ret < 0) {
if (ctrls)
ctrls->error_idx =
@@ -1941,6 +1984,8 @@ int uvc_ctrl_set(struct uvc_fh *handle,
s32 max;
int ret;
+ lockdep_assert_held(&chain->ctrl_mutex);
+
if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
return -EACCES;
@@ -2046,9 +2091,6 @@ int uvc_ctrl_set(struct uvc_fh *handle,
mapping->set(mapping, value,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
- if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
- ctrl->handle = handle;
-
ctrl->dirty = 1;
ctrl->modified = 1;
return 0;
@@ -2377,7 +2419,7 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
ctrl->dirty = 1;
}
- ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL);
+ ret = uvc_ctrl_commit_entity(dev, NULL, entity, 0, NULL);
if (ret < 0)
return ret;
}
@@ -2770,6 +2812,26 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
return 0;
}
+void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
+{
+ struct uvc_entity *entity;
+
+ guard(mutex)(&handle->chain->ctrl_mutex);
+
+ if (!handle->pending_async_ctrls)
+ return;
+
+ list_for_each_entry(entity, &handle->chain->dev->entities, list) {
+ for (unsigned int i = 0; i < entity->ncontrols; ++i) {
+ if (entity->controls[i].handle != handle)
+ continue;
+ uvc_ctrl_set_handle(handle, &entity->controls[i], NULL);
+ }
+ }
+
+ WARN_ON(handle->pending_async_ctrls);
+}
+
/*
* Cleanup device controls.
*/
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index b3c8411dc05c..deadbcea5e22 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -32,7 +32,7 @@
unsigned int uvc_clock_param = CLOCK_MONOTONIC;
unsigned int uvc_hw_timestamps_param;
-unsigned int uvc_no_drop_param;
+unsigned int uvc_no_drop_param = 1;
static unsigned int uvc_quirks_param = -1;
unsigned int uvc_dbg_param;
unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT;
@@ -220,20 +220,127 @@ static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev,
* Descriptors parsing
*/
+static int uvc_parse_frame(struct uvc_device *dev,
+ struct uvc_streaming *streaming,
+ struct uvc_format *format, struct uvc_frame *frame,
+ u32 **intervals, u8 ftype, int width_multiplier,
+ const unsigned char *buffer, int buflen)
+{
+ struct usb_host_interface *alts = streaming->intf->cur_altsetting;
+ unsigned int maxIntervalIndex;
+ unsigned int interval;
+ unsigned int i, n;
+
+ if (ftype != UVC_VS_FRAME_FRAME_BASED)
+ n = buflen > 25 ? buffer[25] : 0;
+ else
+ n = buflen > 21 ? buffer[21] : 0;
+
+ n = n ? n : 3;
+
+ if (buflen < 26 + 4 * n) {
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d FRAME error\n",
+ dev->udev->devnum, alts->desc.bInterfaceNumber);
+ return -EINVAL;
+ }
+
+ frame->bFrameIndex = buffer[3];
+ frame->bmCapabilities = buffer[4];
+ frame->wWidth = get_unaligned_le16(&buffer[5]) * width_multiplier;
+ frame->wHeight = get_unaligned_le16(&buffer[7]);
+ frame->dwMinBitRate = get_unaligned_le32(&buffer[9]);
+ frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]);
+ if (ftype != UVC_VS_FRAME_FRAME_BASED) {
+ frame->dwMaxVideoFrameBufferSize =
+ get_unaligned_le32(&buffer[17]);
+ frame->dwDefaultFrameInterval =
+ get_unaligned_le32(&buffer[21]);
+ frame->bFrameIntervalType = buffer[25];
+ } else {
+ frame->dwMaxVideoFrameBufferSize = 0;
+ frame->dwDefaultFrameInterval =
+ get_unaligned_le32(&buffer[17]);
+ frame->bFrameIntervalType = buffer[21];
+ }
+
+ /*
+ * Copy the frame intervals.
+ *
+ * Some bogus devices report dwMinFrameInterval equal to
+ * dwMaxFrameInterval and have dwFrameIntervalStep set to zero. Setting
+ * all null intervals to 1 fixes the problem and some other divisions
+ * by zero that could happen.
+ */
+ frame->dwFrameInterval = *intervals;
+
+ for (i = 0; i < n; ++i) {
+ interval = get_unaligned_le32(&buffer[26 + 4 * i]);
+ (*intervals)[i] = interval ? interval : 1;
+ }
+
+ /*
+ * Apply more fixes, quirks and workarounds to handle incorrect or
+ * broken descriptors.
+ */
+
+ /*
+ * Several UVC chipsets screw up dwMaxVideoFrameBufferSize completely.
+ * Observed behaviours range from setting the value to 1.1x the actual
+ * frame size to hardwiring the 16 low bits to 0. This results in a
+ * higher than necessary memory usage as well as a wrong image size
+ * information. For uncompressed formats this can be fixed by computing
+ * the value from the frame size.
+ */
+ if (!(format->flags & UVC_FMT_FLAG_COMPRESSED))
+ frame->dwMaxVideoFrameBufferSize = format->bpp * frame->wWidth
+ * frame->wHeight / 8;
+
+ /*
+ * Clamp the default frame interval to the boundaries. A zero
+ * bFrameIntervalType value indicates a continuous frame interval
+ * range, with dwFrameInterval[0] storing the minimum value and
+ * dwFrameInterval[1] storing the maximum value.
+ */
+ maxIntervalIndex = frame->bFrameIntervalType ? n - 1 : 1;
+ frame->dwDefaultFrameInterval =
+ clamp(frame->dwDefaultFrameInterval,
+ frame->dwFrameInterval[0],
+ frame->dwFrameInterval[maxIntervalIndex]);
+
+ /*
+ * Some devices report frame intervals that are not functional. If the
+ * corresponding quirk is set, restrict operation to the first interval
+ * only.
+ */
+ if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) {
+ frame->bFrameIntervalType = 1;
+ (*intervals)[0] = frame->dwDefaultFrameInterval;
+ }
+
+ uvc_dbg(dev, DESCR, "- %ux%u (%u.%u fps)\n",
+ frame->wWidth, frame->wHeight,
+ 10000000 / frame->dwDefaultFrameInterval,
+ (100000000 / frame->dwDefaultFrameInterval) % 10);
+
+ *intervals += n;
+
+ return buffer[0];
+}
+
static int uvc_parse_format(struct uvc_device *dev,
struct uvc_streaming *streaming, struct uvc_format *format,
struct uvc_frame *frames, u32 **intervals, const unsigned char *buffer,
int buflen)
{
- struct usb_interface *intf = streaming->intf;
- struct usb_host_interface *alts = intf->cur_altsetting;
+ struct usb_host_interface *alts = streaming->intf->cur_altsetting;
const struct uvc_format_desc *fmtdesc;
struct uvc_frame *frame;
const unsigned char *start = buffer;
unsigned int width_multiplier = 1;
- unsigned int interval;
unsigned int i, n;
u8 ftype;
+ int ret;
format->type = buffer[2];
format->index = buffer[3];
@@ -371,111 +478,19 @@ static int uvc_parse_format(struct uvc_device *dev,
* Parse the frame descriptors. Only uncompressed, MJPEG and frame
* based formats have frame descriptors.
*/
- while (ftype && buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
- buffer[2] == ftype) {
- unsigned int maxIntervalIndex;
-
- frame = &frames[format->nframes];
- if (ftype != UVC_VS_FRAME_FRAME_BASED)
- n = buflen > 25 ? buffer[25] : 0;
- else
- n = buflen > 21 ? buffer[21] : 0;
-
- n = n ? n : 3;
-
- if (buflen < 26 + 4*n) {
- uvc_dbg(dev, DESCR,
- "device %d videostreaming interface %d FRAME error\n",
- dev->udev->devnum,
- alts->desc.bInterfaceNumber);
- return -EINVAL;
- }
-
- frame->bFrameIndex = buffer[3];
- frame->bmCapabilities = buffer[4];
- frame->wWidth = get_unaligned_le16(&buffer[5])
- * width_multiplier;
- frame->wHeight = get_unaligned_le16(&buffer[7]);
- frame->dwMinBitRate = get_unaligned_le32(&buffer[9]);
- frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]);
- if (ftype != UVC_VS_FRAME_FRAME_BASED) {
- frame->dwMaxVideoFrameBufferSize =
- get_unaligned_le32(&buffer[17]);
- frame->dwDefaultFrameInterval =
- get_unaligned_le32(&buffer[21]);
- frame->bFrameIntervalType = buffer[25];
- } else {
- frame->dwMaxVideoFrameBufferSize = 0;
- frame->dwDefaultFrameInterval =
- get_unaligned_le32(&buffer[17]);
- frame->bFrameIntervalType = buffer[21];
- }
-
- /*
- * Copy the frame intervals.
- *
- * Some bogus devices report dwMinFrameInterval equal to
- * dwMaxFrameInterval and have dwFrameIntervalStep set to
- * zero. Setting all null intervals to 1 fixes the problem and
- * some other divisions by zero that could happen.
- */
- frame->dwFrameInterval = *intervals;
-
- for (i = 0; i < n; ++i) {
- interval = get_unaligned_le32(&buffer[26+4*i]);
- (*intervals)[i] = interval ? interval : 1;
- }
-
- /*
- * Apply more fixes, quirks and workarounds to handle incorrect
- * or broken descriptors.
- */
-
- /*
- * Several UVC chipsets screw up dwMaxVideoFrameBufferSize
- * completely. Observed behaviours range from setting the
- * value to 1.1x the actual frame size to hardwiring the
- * 16 low bits to 0. This results in a higher than necessary
- * memory usage as well as a wrong image size information. For
- * uncompressed formats this can be fixed by computing the
- * value from the frame size.
- */
- if (!(format->flags & UVC_FMT_FLAG_COMPRESSED))
- frame->dwMaxVideoFrameBufferSize = format->bpp
- * frame->wWidth * frame->wHeight / 8;
-
- /*
- * Clamp the default frame interval to the boundaries. A zero
- * bFrameIntervalType value indicates a continuous frame
- * interval range, with dwFrameInterval[0] storing the minimum
- * value and dwFrameInterval[1] storing the maximum value.
- */
- maxIntervalIndex = frame->bFrameIntervalType ? n - 1 : 1;
- frame->dwDefaultFrameInterval =
- clamp(frame->dwDefaultFrameInterval,
- frame->dwFrameInterval[0],
- frame->dwFrameInterval[maxIntervalIndex]);
-
- /*
- * Some devices report frame intervals that are not functional.
- * If the corresponding quirk is set, restrict operation to the
- * first interval only.
- */
- if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) {
- frame->bFrameIntervalType = 1;
- (*intervals)[0] = frame->dwDefaultFrameInterval;
+ if (ftype) {
+ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
+ buffer[2] == ftype) {
+ frame = &frames[format->nframes];
+ ret = uvc_parse_frame(dev, streaming, format, frame,
+ intervals, ftype, width_multiplier,
+ buffer, buflen);
+ if (ret < 0)
+ return ret;
+ format->nframes++;
+ buflen -= ret;
+ buffer += ret;
}
-
- uvc_dbg(dev, DESCR, "- %ux%u (%u.%u fps)\n",
- frame->wWidth, frame->wHeight,
- 10000000 / frame->dwDefaultFrameInterval,
- (100000000 / frame->dwDefaultFrameInterval) % 10);
-
- format->nframes++;
- *intervals += n;
-
- buflen -= buffer[0];
- buffer += buffer[0];
}
if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
@@ -775,27 +790,14 @@ static const u8 uvc_media_transport_input_guid[16] =
UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
-static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
- u16 id, unsigned int num_pads,
- unsigned int extra_size)
+static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ unsigned int num_pads, unsigned int extra_size)
{
struct uvc_entity *entity;
unsigned int num_inputs;
unsigned int size;
unsigned int i;
- /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
- if (id == 0) {
- dev_err(&dev->udev->dev, "Found Unit with invalid ID 0.\n");
- return ERR_PTR(-EINVAL);
- }
-
- /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
- if (uvc_entity_by_id(dev, id)) {
- dev_err(&dev->udev->dev, "Found multiple Units with ID %u\n", id);
- return ERR_PTR(-EINVAL);
- }
-
extra_size = roundup(extra_size, sizeof(*entity->pads));
if (num_pads)
num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
@@ -805,7 +807,7 @@ static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+ num_inputs;
entity = kzalloc(size, GFP_KERNEL);
if (entity == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
entity->id = id;
entity->type = type;
@@ -917,10 +919,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
break;
}
- unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
- buffer[3], p + 1, 2 * n);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
+ p + 1, 2*n);
+ if (unit == NULL)
+ return -ENOMEM;
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1029,10 +1031,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
- buffer[3], 1, n + p);
- if (IS_ERR(term))
- return PTR_ERR(term);
+ term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
+ 1, n + p);
+ if (term == NULL)
+ return -ENOMEM;
if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
term->camera.bControlSize = n;
@@ -1088,10 +1090,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return 0;
}
- term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
- buffer[3], 1, 0);
- if (IS_ERR(term))
- return PTR_ERR(term);
+ term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
+ 1, 0);
+ if (term == NULL)
+ return -ENOMEM;
memcpy(term->baSourceID, &buffer[7], 1);
@@ -1110,10 +1112,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
- p + 1, 0);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
+ if (unit == NULL)
+ return -ENOMEM;
memcpy(unit->baSourceID, &buffer[5], p);
@@ -1133,9 +1134,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
+ if (unit == NULL)
+ return -ENOMEM;
memcpy(unit->baSourceID, &buffer[4], 1);
unit->processing.wMaxMultiplier =
@@ -1162,10 +1163,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
- p + 1, n);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
+ if (unit == NULL)
+ return -ENOMEM;
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1295,20 +1295,19 @@ static int uvc_gpio_parse(struct uvc_device *dev)
struct gpio_desc *gpio_privacy;
int irq;
- gpio_privacy = devm_gpiod_get_optional(&dev->udev->dev, "privacy",
+ gpio_privacy = devm_gpiod_get_optional(&dev->intf->dev, "privacy",
GPIOD_IN);
if (IS_ERR_OR_NULL(gpio_privacy))
return PTR_ERR_OR_ZERO(gpio_privacy);
irq = gpiod_to_irq(gpio_privacy);
if (irq < 0)
- return dev_err_probe(&dev->udev->dev, irq,
+ return dev_err_probe(&dev->intf->dev, irq,
"No IRQ for privacy GPIO\n");
- unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
- UVC_EXT_GPIO_UNIT_ID, 0, 1);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
+ if (!unit)
+ return -ENOMEM;
unit->gpio.gpio_privacy = gpio_privacy;
unit->gpio.irq = irq;
@@ -1329,15 +1328,27 @@ static int uvc_gpio_parse(struct uvc_device *dev)
static int uvc_gpio_init_irq(struct uvc_device *dev)
{
struct uvc_entity *unit = dev->gpio_unit;
+ int ret;
if (!unit || unit->gpio.irq < 0)
return 0;
- return devm_request_threaded_irq(&dev->udev->dev, unit->gpio.irq, NULL,
- uvc_gpio_irq,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
- IRQF_TRIGGER_RISING,
- "uvc_privacy_gpio", dev);
+ ret = request_threaded_irq(unit->gpio.irq, NULL, uvc_gpio_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ "uvc_privacy_gpio", dev);
+
+ unit->gpio.initialized = !ret;
+
+ return ret;
+}
+
+static void uvc_gpio_deinit(struct uvc_device *dev)
+{
+ if (!dev->gpio_unit || !dev->gpio_unit->gpio.initialized)
+ return;
+
+ free_irq(dev->gpio_unit->gpio.irq, dev);
}
/* ------------------------------------------------------------------------
@@ -1934,6 +1945,8 @@ static void uvc_unregister_video(struct uvc_device *dev)
{
struct uvc_streaming *stream;
+ uvc_gpio_deinit(dev);
+
list_for_each_entry(stream, &dev->streams, list) {
/* Nothing to do here, continue. */
if (!video_is_registered(&stream->vdev))
@@ -1995,7 +2008,7 @@ int uvc_register_video_device(struct uvc_device *dev,
int ret;
/* Initialize the video buffers queue. */
- ret = uvc_queue_init(queue, type, !uvc_no_drop_param);
+ ret = uvc_queue_init(queue, type);
if (ret)
return ret;
@@ -2424,8 +2437,25 @@ module_param_call(clock, uvc_clock_param_set, uvc_clock_param_get,
MODULE_PARM_DESC(clock, "Video buffers timestamp clock");
module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, 0644);
MODULE_PARM_DESC(hwtimestamps, "Use hardware timestamps");
-module_param_named(nodrop, uvc_no_drop_param, uint, 0644);
+
+static int param_set_nodrop(const char *val, const struct kernel_param *kp)
+{
+ pr_warn_once("uvcvideo: "
+ DEPRECATED
+ "nodrop parameter will be eventually removed.\n");
+ return param_set_bool(val, kp);
+}
+
+static const struct kernel_param_ops param_ops_nodrop = {
+ .set = param_set_nodrop,
+ .get = param_get_uint,
+};
+
+param_check_uint(nodrop, &uvc_no_drop_param);
+module_param_cb(nodrop, &param_ops_nodrop, &uvc_no_drop_param, 0644);
+__MODULE_PARM_TYPE(nodrop, "uint");
MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
+
module_param_named(quirks, uvc_quirks_param, uint, 0644);
MODULE_PARM_DESC(quirks, "Forced device quirks");
module_param_named(trace, uvc_dbg_param, uint, 0644);
@@ -2802,6 +2832,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
+ /* Sonix Technology Co. Ltd. - 292A IPC AR0330 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x0c45,
+ .idProduct = 0x6366,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
/* MT6227 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2830,6 +2869,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
+ /* Kurokesu C1 PRO */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x16d0,
+ .idProduct = 0x0ed1,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
/* Syntek (HP Spartan) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 26ee85657fc8..2ee142621042 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -208,8 +208,7 @@ static const struct vb2_ops uvc_meta_queue_qops = {
.stop_streaming = uvc_stop_streaming,
};
-int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
- int drop_corrupted)
+int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
{
int ret;
@@ -239,7 +238,6 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
mutex_init(&queue->mutex);
spin_lock_init(&queue->irqlock);
INIT_LIST_HEAD(&queue->irqqueue);
- queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
return 0;
}
@@ -472,14 +470,15 @@ static void uvc_queue_buffer_complete(struct kref *ref)
struct vb2_buffer *vb = &buf->buf.vb2_buf;
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
- if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
+ if (buf->error && !uvc_no_drop_param) {
uvc_queue_buffer_requeue(queue, buf);
return;
}
buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
- vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->buf.vb2_buf, buf->error ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
}
/*
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 06c867510c8f..ee01dce4b783 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -262,8 +262,6 @@ int uvc_status_init(struct uvc_device *dev)
if (ep == NULL)
return 0;
- uvc_input_init(dev);
-
dev->status = kzalloc(sizeof(*dev->status), GFP_KERNEL);
if (!dev->status)
return -ENOMEM;
@@ -271,6 +269,7 @@ int uvc_status_init(struct uvc_device *dev)
dev->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->int_urb) {
kfree(dev->status);
+ dev->status = NULL;
return -ENOMEM;
}
@@ -289,11 +288,16 @@ int uvc_status_init(struct uvc_device *dev)
dev->status, sizeof(*dev->status), uvc_status_complete,
dev, interval);
+ uvc_input_init(dev);
+
return 0;
}
void uvc_status_unregister(struct uvc_device *dev)
{
+ if (!dev->status)
+ return;
+
uvc_status_suspend(dev);
uvc_input_unregister(dev);
}
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 97c5407f6603..93c6cdb23881 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -26,6 +26,8 @@
#include "uvcvideo.h"
+static int uvc_acquire_privileges(struct uvc_fh *handle);
+
static int uvc_control_add_xu_mapping(struct uvc_video_chain *chain,
struct uvc_control_mapping *map,
const struct uvc_xu_control_mapping *xmap)
@@ -361,9 +363,11 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
return ret;
}
-static int uvc_v4l2_get_format(struct uvc_streaming *stream,
- struct v4l2_format *fmt)
+static int uvc_ioctl_g_fmt(struct file *file, void *fh,
+ struct v4l2_format *fmt)
{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
const struct uvc_format *format;
const struct uvc_frame *frame;
int ret = 0;
@@ -395,14 +399,20 @@ done:
return ret;
}
-static int uvc_v4l2_set_format(struct uvc_streaming *stream,
- struct v4l2_format *fmt)
+static int uvc_ioctl_s_fmt(struct file *file, void *fh,
+ struct v4l2_format *fmt)
{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
const struct uvc_format *format;
const struct uvc_frame *frame;
int ret;
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
+
if (fmt->type != stream->type)
return -EINVAL;
@@ -426,10 +436,12 @@ done:
return ret;
}
-static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream,
- struct v4l2_streamparm *parm)
+static int uvc_ioctl_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
{
u32 numerator, denominator;
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
if (parm->type != stream->type)
return -EINVAL;
@@ -461,9 +473,11 @@ static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream,
return 0;
}
-static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
- struct v4l2_streamparm *parm)
+static int uvc_ioctl_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
struct v4l2_fract timeperframe;
const struct uvc_format *format;
@@ -472,6 +486,10 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
unsigned int i;
int ret;
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
+
if (parm->type != stream->type)
return -EINVAL;
@@ -573,6 +591,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
* - VIDIOC_S_INPUT
* - VIDIOC_S_PARM
* - VIDIOC_S_FMT
+ * - VIDIOC_CREATE_BUFS
* - VIDIOC_REQBUFS
*/
static int uvc_acquire_privileges(struct uvc_fh *handle)
@@ -652,6 +671,8 @@ static int uvc_v4l2_release(struct file *file)
uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
+ uvc_ctrl_cleanup_fh(handle);
+
/* Only free resources if this is a privileged handle. */
if (uvc_has_privileges(handle))
uvc_queue_release(&stream->queue);
@@ -685,11 +706,13 @@ static int uvc_ioctl_querycap(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
+static int uvc_ioctl_enum_fmt(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
- const struct uvc_format *format;
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
enum v4l2_buf_type type = fmt->type;
+ const struct uvc_format *format;
u32 index = fmt->index;
if (fmt->type != stream->type || fmt->index >= stream->nformats)
@@ -707,82 +730,8 @@ static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
return 0;
}
-static int uvc_ioctl_enum_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_fmtdesc *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_ioctl_enum_fmt(stream, fmt);
-}
-
-static int uvc_ioctl_enum_fmt_vid_out(struct file *file, void *fh,
- struct v4l2_fmtdesc *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_ioctl_enum_fmt(stream, fmt);
-}
-
-static int uvc_ioctl_g_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_v4l2_get_format(stream, fmt);
-}
-
-static int uvc_ioctl_g_fmt_vid_out(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_v4l2_get_format(stream, fmt);
-}
-
-static int uvc_ioctl_s_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- int ret;
-
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
- return uvc_v4l2_set_format(stream, fmt);
-}
-
-static int uvc_ioctl_s_fmt_vid_out(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- int ret;
-
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
- return uvc_v4l2_set_format(stream, fmt);
-}
-
-static int uvc_ioctl_try_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- struct uvc_streaming_control probe;
-
- return uvc_v4l2_try_format(stream, fmt, &probe, NULL, NULL);
-}
-
-static int uvc_ioctl_try_fmt_vid_out(struct file *file, void *fh,
- struct v4l2_format *fmt)
+static int uvc_ioctl_try_fmt(struct file *file, void *fh,
+ struct v4l2_format *fmt)
{
struct uvc_fh *handle = fh;
struct uvc_streaming *stream = handle->stream;
@@ -1212,29 +1161,6 @@ static int uvc_ioctl_g_selection(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_g_parm(struct file *file, void *fh,
- struct v4l2_streamparm *parm)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_v4l2_get_streamparm(stream, parm);
-}
-
-static int uvc_ioctl_s_parm(struct file *file, void *fh,
- struct v4l2_streamparm *parm)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- int ret;
-
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
- return uvc_v4l2_set_streamparm(stream, parm);
-}
-
static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
@@ -1543,15 +1469,17 @@ static unsigned long uvc_v4l2_get_unmapped_area(struct file *file,
#endif
const struct v4l2_ioctl_ops uvc_ioctl_ops = {
+ .vidioc_g_fmt_vid_cap = uvc_ioctl_g_fmt,
+ .vidioc_g_fmt_vid_out = uvc_ioctl_g_fmt,
+ .vidioc_s_fmt_vid_cap = uvc_ioctl_s_fmt,
+ .vidioc_s_fmt_vid_out = uvc_ioctl_s_fmt,
+ .vidioc_g_parm = uvc_ioctl_g_parm,
+ .vidioc_s_parm = uvc_ioctl_s_parm,
.vidioc_querycap = uvc_ioctl_querycap,
- .vidioc_enum_fmt_vid_cap = uvc_ioctl_enum_fmt_vid_cap,
- .vidioc_enum_fmt_vid_out = uvc_ioctl_enum_fmt_vid_out,
- .vidioc_g_fmt_vid_cap = uvc_ioctl_g_fmt_vid_cap,
- .vidioc_g_fmt_vid_out = uvc_ioctl_g_fmt_vid_out,
- .vidioc_s_fmt_vid_cap = uvc_ioctl_s_fmt_vid_cap,
- .vidioc_s_fmt_vid_out = uvc_ioctl_s_fmt_vid_out,
- .vidioc_try_fmt_vid_cap = uvc_ioctl_try_fmt_vid_cap,
- .vidioc_try_fmt_vid_out = uvc_ioctl_try_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = uvc_ioctl_enum_fmt,
+ .vidioc_enum_fmt_vid_out = uvc_ioctl_enum_fmt,
+ .vidioc_try_fmt_vid_cap = uvc_ioctl_try_fmt,
+ .vidioc_try_fmt_vid_out = uvc_ioctl_try_fmt,
.vidioc_reqbufs = uvc_ioctl_reqbufs,
.vidioc_querybuf = uvc_ioctl_querybuf,
.vidioc_qbuf = uvc_ioctl_qbuf,
@@ -1570,8 +1498,6 @@ const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_try_ext_ctrls = uvc_ioctl_try_ext_ctrls,
.vidioc_querymenu = uvc_ioctl_querymenu,
.vidioc_g_selection = uvc_ioctl_g_selection,
- .vidioc_g_parm = uvc_ioctl_g_parm,
- .vidioc_s_parm = uvc_ioctl_s_parm,
.vidioc_enum_framesizes = uvc_ioctl_enum_framesizes,
.vidioc_enum_frameintervals = uvc_ioctl_enum_frameintervals,
.vidioc_subscribe_event = uvc_ioctl_subscribe_event,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index e00f38dd07d9..e3567aeb0007 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/unaligned.h>
+#include <media/jpeg.h>
#include <media/v4l2-common.h>
#include "uvcvideo.h"
@@ -79,6 +80,27 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
if (likely(ret == size))
return 0;
+ /*
+ * Some devices return shorter USB control packets than expected if the
+ * returned value can fit in less bytes. Zero all the bytes that the
+ * device has not written.
+ *
+ * This quirk is applied to all controls, regardless of their data type.
+ * Most controls are little-endian integers, in which case the missing
+ * bytes become 0 MSBs. For other data types, a different heuristic
+ * could be implemented if a device is found needing it.
+ *
+ * We exclude UVC_GET_INFO from the quirk. UVC_GET_LEN does not need
+ * to be excluded because its size is always 1.
+ */
+ if (ret > 0 && query != UVC_GET_INFO) {
+ memset(data + ret, 0, size - ret);
+ dev_warn_once(&dev->udev->dev,
+ "UVC non compliance: %s control %u on unit %u returned %d bytes when we expected %u.\n",
+ uvc_query_name(query), cs, unit, ret, size);
+ return 0;
+ }
+
if (ret != -EPIPE) {
dev_err(&dev->udev->dev,
"Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
@@ -96,8 +118,12 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
error = *(u8 *)data;
*(u8 *)data = tmp;
- if (ret != 1)
+ if (ret != 1) {
+ dev_err_ratelimited(&dev->udev->dev,
+ "Failed to query (%s) UVC error code control %u on unit %u: %d (exp. 1).\n",
+ uvc_query_name(query), cs, unit, ret);
return ret < 0 ? ret : -EPIPE;
+ }
uvc_dbg(dev, CONTROL, "Control error %u\n", error);
@@ -297,8 +323,9 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
goto out;
} else if (ret != size) {
dev_err(&stream->intf->dev,
- "Failed to query (%u) UVC %s control : %d (exp. %u).\n",
- query, probe ? "probe" : "commit", ret, size);
+ "Failed to query (%s) UVC %s control : %d (exp. %u).\n",
+ uvc_query_name(query), probe ? "probe" : "commit",
+ ret, size);
ret = (ret == -EPROTO) ? -EPROTO : -EIO;
goto out;
}
@@ -1116,6 +1143,7 @@ static void uvc_video_stats_stop(struct uvc_streaming *stream)
static int uvc_video_decode_start(struct uvc_streaming *stream,
struct uvc_buffer *buf, const u8 *data, int len)
{
+ u8 header_len;
u8 fid;
/*
@@ -1129,6 +1157,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return -EINVAL;
}
+ header_len = data[0];
fid = data[1] & UVC_STREAM_FID;
/*
@@ -1210,9 +1239,31 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return -EAGAIN;
}
+ /*
+ * Some cameras, when running two parallel streams (one MJPEG alongside
+ * another non-MJPEG stream), are known to lose the EOF packet for a frame.
+ * We can detect the end of a frame by checking for a new SOI marker, as
+ * the SOI always lies on the packet boundary between two frames for
+ * these devices.
+ */
+ if (stream->dev->quirks & UVC_QUIRK_MJPEG_NO_EOF &&
+ (stream->cur_format->fcc == V4L2_PIX_FMT_MJPEG ||
+ stream->cur_format->fcc == V4L2_PIX_FMT_JPEG)) {
+ const u8 *packet = data + header_len;
+
+ if (len >= header_len + 2 &&
+ packet[0] == 0xff && packet[1] == JPEG_MARKER_SOI &&
+ buf->bytesused != 0) {
+ buf->state = UVC_BUF_STATE_READY;
+ buf->error = 1;
+ stream->last_fid ^= UVC_STREAM_FID;
+ return -EAGAIN;
+ }
+ }
+
stream->last_fid = fid;
- return data[0];
+ return header_len;
}
static inline enum dma_data_direction uvc_stream_dir(
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 07f9921d83f2..5e388f05f3fc 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -76,6 +76,7 @@
#define UVC_QUIRK_NO_RESET_RESUME 0x00004000
#define UVC_QUIRK_DISABLE_AUTOSUSPEND 0x00008000
#define UVC_QUIRK_INVALID_DEVICE_SOF 0x00010000
+#define UVC_QUIRK_MJPEG_NO_EOF 0x00020000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -234,6 +235,7 @@ struct uvc_entity {
u8 *bmControls;
struct gpio_desc *gpio_privacy;
int irq;
+ bool initialized;
} gpio;
};
@@ -316,7 +318,6 @@ struct uvc_buffer {
};
#define UVC_QUEUE_DISCONNECTED (1 << 0)
-#define UVC_QUEUE_DROP_CORRUPTED (1 << 1)
struct uvc_video_queue {
struct vb2_queue queue;
@@ -337,7 +338,11 @@ struct uvc_video_chain {
struct uvc_entity *processing; /* Processing unit */
struct uvc_entity *selector; /* Selector unit */
- struct mutex ctrl_mutex; /* Protects ctrl.info */
+ struct mutex ctrl_mutex; /*
+ * Protects ctrl.info,
+ * ctrl.handle and
+ * uvc_fh.pending_async_ctrls
+ */
struct v4l2_prio_state prio; /* V4L2 priority state */
u32 caps; /* V4L2 chain-wide caps */
@@ -612,6 +617,7 @@ struct uvc_fh {
struct uvc_video_chain *chain;
struct uvc_streaming *stream;
enum uvc_handle_state state;
+ unsigned int pending_async_ctrls;
};
struct uvc_driver {
@@ -674,8 +680,7 @@ extern struct uvc_driver uvc_driver;
struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
/* Video buffers queue management. */
-int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
- int drop_corrupted);
+int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type);
void uvc_queue_release(struct uvc_video_queue *queue);
int uvc_request_buffers(struct uvc_video_queue *queue,
struct v4l2_requestbuffers *rb);
@@ -797,6 +802,8 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
+void uvc_ctrl_cleanup_fh(struct uvc_fh *handle);
+
/* Utility functions */
struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
u8 epaddr);
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index f19c8adf2c61..cb153ce42c45 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -127,7 +127,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
{
struct v4l2_mbus_config_mipi_csi2 *bus = &vep->bus.mipi_csi2;
bool have_clk_lane = false, have_data_lanes = false,
- have_lane_polarities = false;
+ have_lane_polarities = false, have_line_orders = false;
unsigned int flags = 0, lanes_used = 0;
u32 array[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
u32 clock_lane = 0;
@@ -197,6 +197,17 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
have_lane_polarities = true;
}
+ rval = fwnode_property_count_u32(fwnode, "line-orders");
+ if (rval > 0) {
+ if (rval != num_data_lanes) {
+ pr_warn("invalid number of line-orders entries (need %u, got %u)\n",
+ num_data_lanes, rval);
+ return -EINVAL;
+ }
+
+ have_line_orders = true;
+ }
+
if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
clock_lane = v;
pr_debug("clock lane position %u\n", v);
@@ -250,6 +261,36 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
} else {
pr_debug("no lane polarities defined, assuming not inverted\n");
}
+
+ if (have_line_orders) {
+ fwnode_property_read_u32_array(fwnode,
+ "line-orders", array,
+ num_data_lanes);
+
+ for (i = 0; i < num_data_lanes; i++) {
+ static const char * const orders[] = {
+ "ABC", "ACB", "BAC", "BCA", "CAB", "CBA"
+ };
+
+ if (array[i] >= ARRAY_SIZE(orders)) {
+ pr_warn("lane %u invalid line-order assuming ABC (got %u)\n",
+ i, array[i]);
+ bus->line_orders[i] =
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC;
+ continue;
+ }
+
+ bus->line_orders[i] = array[i];
+ pr_debug("lane %u line order %s", i,
+ orders[array[i]]);
+ }
+ } else {
+ for (i = 0; i < num_data_lanes; i++)
+ bus->line_orders[i] =
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC;
+
+ pr_debug("no line orders defined, assuming ABC\n");
+ }
}
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 4bb91359e3a9..937d358697e1 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -329,7 +329,7 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
if (!(sink->flags & MEDIA_PAD_FL_SINK))
return -EINVAL;
- fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
+ fwnode_graph_for_each_endpoint(src_sd->fwnode, endpoint) {
struct fwnode_handle *remote_ep;
int src_idx, sink_idx, ret;
struct media_pad *src;
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 50eb9f49512b..53f1888cc84f 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -358,17 +358,6 @@ static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
return (time_ps + tick_ps - 1) / tick_ps;
}
-static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
- enum gpmc_clk_domain cd)
-{
- return ticks * gpmc_get_clk_period(cs, cd) / 1000;
-}
-
-unsigned int gpmc_ticks_to_ns(unsigned int ticks)
-{
- return gpmc_clk_ticks_to_ns(ticks, /* any CS */ 0, GPMC_CD_FCLK);
-}
-
static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
{
return ticks * gpmc_get_fclk_period();
@@ -415,6 +404,13 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
}
#ifdef CONFIG_OMAP_GPMC_DEBUG
+
+static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
+ enum gpmc_clk_domain cd)
+{
+ return ticks * gpmc_get_clk_period(cs, cd) / 1000;
+}
+
/**
* get_gpmc_timing_reg - read a timing parameter and print DTS settings for it.
* @cs: Chip Select Region
@@ -1295,21 +1291,6 @@ int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
}
EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
-int gpmc_get_client_irq(unsigned int irq_config)
-{
- if (!gpmc_irq_domain) {
- pr_warn("%s called before GPMC IRQ domain available\n",
- __func__);
- return 0;
- }
-
- /* we restrict this to NAND IRQs only */
- if (irq_config >= GPMC_NR_NAND_IRQS)
- return 0;
-
- return irq_create_mapping(gpmc_irq_domain, irq_config);
-}
-
static int gpmc_irq_endis(unsigned long hwirq, bool endis)
{
u32 regval;
@@ -2245,26 +2226,6 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
goto err;
}
- if (of_node_name_eq(child, "nand")) {
- /* Warn about older DT blobs with no compatible property */
- if (!of_property_read_bool(child, "compatible")) {
- dev_warn(&pdev->dev,
- "Incompatible NAND node: missing compatible");
- ret = -EINVAL;
- goto err;
- }
- }
-
- if (of_node_name_eq(child, "onenand")) {
- /* Warn about older DT blobs with no compatible property */
- if (!of_property_read_bool(child, "compatible")) {
- dev_warn(&pdev->dev,
- "Incompatible OneNAND node: missing compatible");
- ret = -EINVAL;
- goto err;
- }
- }
-
if (of_match_node(omap_nand_ids, child)) {
/* NAND specific setup */
val = 8;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 7193f848d17e..9b7d30a21a5b 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -474,14 +474,15 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
ram_code = tegra_read_ram_code();
- for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np;
- np = of_find_node_by_name(np, "emc-tables")) {
+ for_each_child_of_node(dev->of_node, np) {
+ if (!of_node_name_eq(np, "emc-tables"))
+ continue;
err = of_property_read_u32(np, "nvidia,ram-code", &value);
if (err || value != ram_code) {
struct device_node *lpddr2_np;
bool cfg_mismatches = false;
- lpddr2_np = of_find_node_by_name(np, "lpddr2");
+ lpddr2_np = of_get_child_by_name(np, "lpddr2");
if (lpddr2_np) {
const struct lpddr2_info *info;
@@ -518,7 +519,6 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
}
if (cfg_mismatches) {
- of_node_put(np);
continue;
}
}
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index d54dc3cfff73..c8b83c9edbd5 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -13,7 +13,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/memory/ti-aemif.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -69,39 +71,27 @@
#define ACR_SSTROBE_MASK BIT(31)
#define ASIZE_16BIT 1
-#define CONFIG_MASK (TA(TA_MAX) | \
- RHOLD(RHOLD_MAX) | \
- RSTROBE(RSTROBE_MAX) | \
- RSETUP(RSETUP_MAX) | \
- WHOLD(WHOLD_MAX) | \
- WSTROBE(WSTROBE_MAX) | \
- WSETUP(WSETUP_MAX) | \
- EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | \
- ASIZE_MAX)
+#define TIMINGS_MASK (TA(TA_MAX) | \
+ RHOLD(RHOLD_MAX) | \
+ RSTROBE(RSTROBE_MAX) | \
+ RSETUP(RSETUP_MAX) | \
+ WHOLD(WHOLD_MAX) | \
+ WSTROBE(WSTROBE_MAX) | \
+ WSETUP(WSETUP_MAX))
+
+#define CONFIG_MASK (EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | ASIZE_MAX)
/**
- * struct aemif_cs_data: structure to hold cs parameters
+ * struct aemif_cs_data: structure to hold CS parameters
+ * @timings: timings configuration
* @cs: chip-select number
- * @wstrobe: write strobe width, ns
- * @rstrobe: read strobe width, ns
- * @wsetup: write setup width, ns
- * @whold: write hold width, ns
- * @rsetup: read setup width, ns
- * @rhold: read hold width, ns
- * @ta: minimum turn around time, ns
* @enable_ss: enable/disable select strobe mode
* @enable_ew: enable/disable extended wait mode
* @asize: width of the asynchronous device's data bus
*/
struct aemif_cs_data {
+ struct aemif_cs_timings timings;
u8 cs;
- u16 wstrobe;
- u16 rstrobe;
- u8 wsetup;
- u8 whold;
- u8 rsetup;
- u8 rhold;
- u8 ta;
u8 enable_ss;
u8 enable_ew;
u8 asize;
@@ -115,6 +105,7 @@ struct aemif_cs_data {
* @num_cs: number of assigned chip-selects
* @cs_offset: start number of cs nodes
* @cs_data: array of chip-select settings
+ * @config_cs_lock: lock used to access CS configuration
*/
struct aemif_device {
void __iomem *base;
@@ -123,20 +114,94 @@ struct aemif_device {
u8 num_cs;
int cs_offset;
struct aemif_cs_data cs_data[NUM_CS];
+ struct mutex config_cs_lock;
};
/**
+ * aemif_check_cs_timings() - Check the validity of a CS timing configuration.
+ * @timings: timings configuration
+ *
+ * @return: 0 if the timing configuration is valid, negative error number otherwise.
+ */
+int aemif_check_cs_timings(struct aemif_cs_timings *timings)
+{
+ if (timings->ta > TA_MAX)
+ return -EINVAL;
+
+ if (timings->rhold > RHOLD_MAX)
+ return -EINVAL;
+
+ if (timings->rstrobe > RSTROBE_MAX)
+ return -EINVAL;
+
+ if (timings->rsetup > RSETUP_MAX)
+ return -EINVAL;
+
+ if (timings->whold > WHOLD_MAX)
+ return -EINVAL;
+
+ if (timings->wstrobe > WSTROBE_MAX)
+ return -EINVAL;
+
+ if (timings->wsetup > WSETUP_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aemif_check_cs_timings);
+
+/**
+ * aemif_set_cs_timings() - Set the timing configuration of a given chip select.
+ * @aemif: aemif device to configure
+ * @cs: index of the chip select to configure
+ * @timings: timings configuration to set
+ *
+ * @return: 0 on success, else negative errno.
+ */
+int aemif_set_cs_timings(struct aemif_device *aemif, u8 cs,
+ struct aemif_cs_timings *timings)
+{
+ unsigned int offset;
+ u32 val, set;
+ int ret;
+
+ if (!timings || !aemif)
+ return -EINVAL;
+
+ if (cs > aemif->num_cs)
+ return -EINVAL;
+
+ ret = aemif_check_cs_timings(timings);
+ if (ret)
+ return ret;
+
+ set = TA(timings->ta) | RHOLD(timings->rhold) | RSTROBE(timings->rstrobe) |
+ RSETUP(timings->rsetup) | WHOLD(timings->whold) |
+ WSTROBE(timings->wstrobe) | WSETUP(timings->wsetup);
+
+ offset = A1CR_OFFSET + cs * 4;
+
+ mutex_lock(&aemif->config_cs_lock);
+ val = readl(aemif->base + offset);
+ val &= ~TIMINGS_MASK;
+ val |= set;
+ writel(val, aemif->base + offset);
+ mutex_unlock(&aemif->config_cs_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aemif_set_cs_timings);
+
+/**
* aemif_calc_rate - calculate timing data.
* @pdev: platform device to calculate for
* @wanted: The cycle time needed in nanoseconds.
* @clk: The input clock rate in kHz.
- * @max: The maximum divider value that can be programmed.
*
- * On success, returns the calculated timing value minus 1 for easy
- * programming into AEMIF timing registers, else negative errno.
+ * @return: the calculated timing value minus 1 for easy
+ * programming into AEMIF timing registers.
*/
-static int aemif_calc_rate(struct platform_device *pdev, int wanted,
- unsigned long clk, int max)
+static u32 aemif_calc_rate(struct platform_device *pdev, int wanted, unsigned long clk)
{
int result;
@@ -149,10 +214,6 @@ static int aemif_calc_rate(struct platform_device *pdev, int wanted,
if (result < 0)
result = 0;
- /* ... But configuring tighter timings is not an option. */
- else if (result > max)
- result = -EINVAL;
-
return result;
}
@@ -174,48 +235,25 @@ static int aemif_config_abus(struct platform_device *pdev, int csnum)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
struct aemif_cs_data *data = &aemif->cs_data[csnum];
- int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
- unsigned long clk_rate = aemif->clk_rate;
unsigned offset;
u32 set, val;
offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
- ta = aemif_calc_rate(pdev, data->ta, clk_rate, TA_MAX);
- rhold = aemif_calc_rate(pdev, data->rhold, clk_rate, RHOLD_MAX);
- rstrobe = aemif_calc_rate(pdev, data->rstrobe, clk_rate, RSTROBE_MAX);
- rsetup = aemif_calc_rate(pdev, data->rsetup, clk_rate, RSETUP_MAX);
- whold = aemif_calc_rate(pdev, data->whold, clk_rate, WHOLD_MAX);
- wstrobe = aemif_calc_rate(pdev, data->wstrobe, clk_rate, WSTROBE_MAX);
- wsetup = aemif_calc_rate(pdev, data->wsetup, clk_rate, WSETUP_MAX);
-
- if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
- whold < 0 || wstrobe < 0 || wsetup < 0) {
- dev_err(&pdev->dev, "%s: cannot get suitable timings\n",
- __func__);
- return -EINVAL;
- }
-
- set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
- WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
-
- set |= (data->asize & ACR_ASIZE_MASK);
+ set = (data->asize & ACR_ASIZE_MASK);
if (data->enable_ew)
set |= ACR_EW_MASK;
if (data->enable_ss)
set |= ACR_SSTROBE_MASK;
+ mutex_lock(&aemif->config_cs_lock);
val = readl(aemif->base + offset);
val &= ~CONFIG_MASK;
val |= set;
writel(val, aemif->base + offset);
+ mutex_unlock(&aemif->config_cs_lock);
- return 0;
-}
-
-static inline int aemif_cycles_to_nsec(int val, unsigned long clk_rate)
-{
- return ((val + 1) * NSEC_PER_MSEC) / clk_rate;
+ return aemif_set_cs_timings(aemif, data->cs - aemif->cs_offset, &data->timings);
}
/**
@@ -231,19 +269,18 @@ static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
struct aemif_cs_data *data = &aemif->cs_data[csnum];
- unsigned long clk_rate = aemif->clk_rate;
u32 val, offset;
offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
val = readl(aemif->base + offset);
- data->ta = aemif_cycles_to_nsec(TA_VAL(val), clk_rate);
- data->rhold = aemif_cycles_to_nsec(RHOLD_VAL(val), clk_rate);
- data->rstrobe = aemif_cycles_to_nsec(RSTROBE_VAL(val), clk_rate);
- data->rsetup = aemif_cycles_to_nsec(RSETUP_VAL(val), clk_rate);
- data->whold = aemif_cycles_to_nsec(WHOLD_VAL(val), clk_rate);
- data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
- data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
+ data->timings.ta = TA_VAL(val);
+ data->timings.rhold = RHOLD_VAL(val);
+ data->timings.rstrobe = RSTROBE_VAL(val);
+ data->timings.rsetup = RSETUP_VAL(val);
+ data->timings.whold = WHOLD_VAL(val);
+ data->timings.wstrobe = WSTROBE_VAL(val);
+ data->timings.wsetup = WSETUP_VAL(val);
data->enable_ew = EW_VAL(val);
data->enable_ss = SSTROBE_VAL(val);
data->asize = val & ASIZE_MAX;
@@ -261,6 +298,7 @@ static int of_aemif_parse_abus_config(struct platform_device *pdev,
struct device_node *np)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
+ unsigned long clk_rate = aemif->clk_rate;
struct aemif_cs_data *data;
u32 cs;
u32 val;
@@ -288,32 +326,33 @@ static int of_aemif_parse_abus_config(struct platform_device *pdev,
/* override the values from device node */
if (!of_property_read_u32(np, "ti,cs-min-turnaround-ns", &val))
- data->ta = val;
+ data->timings.ta = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-read-hold-ns", &val))
- data->rhold = val;
+ data->timings.rhold = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-read-strobe-ns", &val))
- data->rstrobe = val;
+ data->timings.rstrobe = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-read-setup-ns", &val))
- data->rsetup = val;
+ data->timings.rsetup = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-write-hold-ns", &val))
- data->whold = val;
+ data->timings.whold = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-write-strobe-ns", &val))
- data->wstrobe = val;
+ data->timings.wstrobe = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-write-setup-ns", &val))
- data->wsetup = val;
+ data->timings.wsetup = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-bus-width", &val))
if (val == 16)
data->asize = 1;
data->enable_ew = of_property_read_bool(np, "ti,cs-extended-wait-mode");
data->enable_ss = of_property_read_bool(np, "ti,cs-select-strobe-mode");
- return 0;
+
+ return aemif_check_cs_timings(&data->timings);
}
static const struct of_device_id aemif_of_match[] = {
@@ -351,6 +390,7 @@ static int aemif_probe(struct platform_device *pdev)
if (IS_ERR(aemif->base))
return PTR_ERR(aemif->base);
+ mutex_init(&aemif->config_cs_lock);
if (np) {
/*
* For every controller device node, there is a cs device node
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index ae4e8b8e6eb7..043b9ec756ff 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -200,7 +200,7 @@ static int memstick_dummy_check(struct memstick_dev *card)
/**
* memstick_detect_change - schedule media detection on memstick host
- * @host - host to use
+ * @host: host to use
*/
void memstick_detect_change(struct memstick_host *host)
{
@@ -210,13 +210,15 @@ EXPORT_SYMBOL(memstick_detect_change);
/**
* memstick_next_req - called by host driver to obtain next request to process
- * @host - host to use
- * @mrq - pointer to stick the request to
+ * @host: host to use
+ * @mrq: pointer to stick the request to
*
* Host calls this function from idle state (*mrq == NULL) or after finishing
* previous request (*mrq should point to it). If previous request was
- * unsuccessful, it is retried for predetermined number of times. Return value
- * of 0 means that new request was assigned to the host.
+ * unsuccessful, it is retried for predetermined number of times.
+ *
+ * Returns: value of 0 means that new request was assigned to the host.
+ * Otherwise a negative error code is returned.
*/
int memstick_next_req(struct memstick_host *host, struct memstick_request **mrq)
{
@@ -242,7 +244,7 @@ EXPORT_SYMBOL(memstick_next_req);
/**
* memstick_new_req - notify the host that some requests are pending
- * @host - host to use
+ * @host: host to use
*/
void memstick_new_req(struct memstick_host *host)
{
@@ -256,9 +258,9 @@ EXPORT_SYMBOL(memstick_new_req);
/**
* memstick_init_req_sg - set request fields needed for bulk data transfer
- * @mrq - request to use
- * @tpc - memstick Transport Protocol Command
- * @sg - TPC argument
+ * @mrq: request to use
+ * @tpc: memstick Transport Protocol Command
+ * @sg: TPC argument
*/
void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc,
const struct scatterlist *sg)
@@ -281,10 +283,10 @@ EXPORT_SYMBOL(memstick_init_req_sg);
/**
* memstick_init_req - set request fields needed for short data transfer
- * @mrq - request to use
- * @tpc - memstick Transport Protocol Command
- * @buf - TPC argument buffer
- * @length - TPC argument size
+ * @mrq: request to use
+ * @tpc: memstick Transport Protocol Command
+ * @buf: TPC argument buffer
+ * @length: TPC argument size
*
* The intended use of this function (transfer of data items several bytes
* in size) allows us to just copy the value between request structure and
@@ -360,7 +362,9 @@ static int h_memstick_set_rw_addr(struct memstick_dev *card,
/**
* memstick_set_rw_addr - issue SET_RW_REG_ADDR request and wait for it to
* complete
- * @card - media device to use
+ * @card: media device to use
+ *
+ * Returns: error setting for the current request
*/
int memstick_set_rw_addr(struct memstick_dev *card)
{
@@ -487,6 +491,8 @@ out_power_off:
* memstick_alloc_host - allocate a memstick_host structure
* @extra: size of the user private data to allocate
* @dev: parent device of the host
+ *
+ * Returns: %NULL on failure or the allocated &memstick_host pointer on success
*/
struct memstick_host *memstick_alloc_host(unsigned int extra,
struct device *dev)
@@ -507,7 +513,9 @@ EXPORT_SYMBOL(memstick_alloc_host);
/**
* memstick_add_host - start request processing on memstick host
- * @host - host to use
+ * @host: host to use
+ *
+ * Returns: %0 on success or a negative error code on failure
*/
int memstick_add_host(struct memstick_host *host)
{
@@ -543,7 +551,7 @@ EXPORT_SYMBOL(memstick_add_host);
/**
* memstick_remove_host - stop request processing on memstick host
- * @host - host to use
+ * @host: host to use
*/
void memstick_remove_host(struct memstick_host *host)
{
@@ -565,7 +573,7 @@ EXPORT_SYMBOL(memstick_remove_host);
/**
* memstick_free_host - free memstick host
- * @host - host to use
+ * @host: host to use
*/
void memstick_free_host(struct memstick_host *host)
{
@@ -576,7 +584,7 @@ EXPORT_SYMBOL(memstick_free_host);
/**
* memstick_suspend_host - notify bus driver of host suspension
- * @host - host to use
+ * @host: host to use
*/
void memstick_suspend_host(struct memstick_host *host)
{
@@ -588,7 +596,7 @@ EXPORT_SYMBOL(memstick_suspend_host);
/**
* memstick_resume_host - notify bus driver of host resumption
- * @host - host to use
+ * @host: host to use
*/
void memstick_resume_host(struct memstick_host *host)
{
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 20a2466bec23..5b617c1f6789 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2094,8 +2094,7 @@ static int msb_init_disk(struct memstick_dev *card)
if (msb->disk_id < 0)
return msb->disk_id;
- rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, 0);
if (rc)
goto out_release_id;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 13b317c56069..634d343b6bdb 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1139,8 +1139,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
if (disk_id < 0)
return disk_id;
- rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, 0);
if (rc)
goto out_release_id;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index ee61b70aa677..8f587c0efd9d 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -96,7 +96,7 @@ static u8 mptfcTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS;
static int mptfc_target_alloc(struct scsi_target *starget);
-static int mptfc_slave_alloc(struct scsi_device *sdev);
+static int mptfc_sdev_init(struct scsi_device *sdev);
static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt);
static void mptfc_target_destroy(struct scsi_target *starget);
static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout);
@@ -113,10 +113,10 @@ static const struct scsi_host_template mptfc_driver_template = {
.info = mptscsih_info,
.queuecommand = mptfc_qcmd,
.target_alloc = mptfc_target_alloc,
- .slave_alloc = mptfc_slave_alloc,
- .slave_configure = mptscsih_slave_configure,
+ .sdev_init = mptfc_sdev_init,
+ .sdev_configure = mptscsih_sdev_configure,
.target_destroy = mptfc_target_destroy,
- .slave_destroy = mptscsih_slave_destroy,
+ .sdev_destroy = mptscsih_sdev_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = mptfc_abort,
@@ -503,7 +503,7 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
/*
* if already mapped, remap here. If not mapped,
* target_alloc will allocate vtarget and map,
- * slave_alloc will fill in vdevice from vtarget.
+ * sdev_init will fill in vdevice from vtarget.
*/
if (ri->starget) {
vtarget = ri->starget->hostdata;
@@ -631,7 +631,7 @@ mptfc_dump_lun_info(MPT_ADAPTER *ioc, struct fc_rport *rport, struct scsi_device
* Init memory once per LUN.
*/
static int
-mptfc_slave_alloc(struct scsi_device *sdev)
+mptfc_sdev_init(struct scsi_device *sdev)
{
MPT_SCSI_HOST *hd;
VirtTarget *vtarget;
@@ -651,7 +651,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
- printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+ printk(MYIOC_s_ERR_FMT "sdev_init kmalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index d0549a4daf76..185c08eab4ca 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1710,7 +1710,7 @@ mptsas_firmware_event_work(struct work_struct *work)
static int
-mptsas_slave_configure(struct scsi_device *sdev)
+mptsas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
@@ -1736,7 +1736,7 @@ mptsas_slave_configure(struct scsi_device *sdev)
mptsas_add_device_component_starget(ioc, scsi_target(sdev));
out:
- return mptscsih_slave_configure(sdev);
+ return mptscsih_sdev_configure(sdev, lim);
}
static int
@@ -1867,7 +1867,7 @@ mptsas_target_destroy(struct scsi_target *starget)
static int
-mptsas_slave_alloc(struct scsi_device *sdev)
+mptsas_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
@@ -1880,7 +1880,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
- printk(MYIOC_s_ERR_FMT "slave_alloc kzalloc(%zd) FAILED!\n",
+ printk(MYIOC_s_ERR_FMT "sdev_init kzalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
@@ -2005,10 +2005,10 @@ static const struct scsi_host_template mptsas_driver_template = {
.info = mptscsih_info,
.queuecommand = mptsas_qcmd,
.target_alloc = mptsas_target_alloc,
- .slave_alloc = mptsas_slave_alloc,
- .slave_configure = mptsas_slave_configure,
+ .sdev_init = mptsas_sdev_init,
+ .sdev_configure = mptsas_sdev_configure,
.target_destroy = mptsas_target_destroy,
- .slave_destroy = mptscsih_slave_destroy,
+ .sdev_destroy = mptscsih_sdev_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_timed_out = mptsas_eh_timed_out,
.eh_abort_handler = mptscsih_abort,
@@ -2834,10 +2834,10 @@ struct rep_manu_reply{
u8 sas_format:1;
u8 reserved1:7;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6c3f25cc33ff..a9604ba3c805 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1071,7 +1071,7 @@ EXPORT_SYMBOL(mptscsih_flush_running_cmds);
*
* Returns: None.
*
- * Called from slave_destroy.
+ * Called from sdev_destroy.
*/
static void
mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
@@ -2331,7 +2331,7 @@ EXPORT_SYMBOL(mptscsih_raid_id_to_num);
* Called if no device present or device being unloaded
*/
void
-mptscsih_slave_destroy(struct scsi_device *sdev)
+mptscsih_sdev_destroy(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
@@ -2399,7 +2399,7 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* Return non-zero if fails.
*/
int
-mptscsih_slave_configure(struct scsi_device *sdev)
+mptscsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct Scsi_Host *sh = sdev->host;
VirtTarget *vtarget;
@@ -3302,8 +3302,8 @@ EXPORT_SYMBOL(mptscsih_resume);
EXPORT_SYMBOL(mptscsih_show_info);
EXPORT_SYMBOL(mptscsih_info);
EXPORT_SYMBOL(mptscsih_qcmd);
-EXPORT_SYMBOL(mptscsih_slave_destroy);
-EXPORT_SYMBOL(mptscsih_slave_configure);
+EXPORT_SYMBOL(mptscsih_sdev_destroy);
+EXPORT_SYMBOL(mptscsih_sdev_configure);
EXPORT_SYMBOL(mptscsih_abort);
EXPORT_SYMBOL(mptscsih_dev_reset);
EXPORT_SYMBOL(mptscsih_target_reset);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index e3d92c392673..ece451c575e1 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -116,8 +116,9 @@ extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt);
extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
u8 id, u64 lun, int ctx2abort, ulong timeout);
-extern void mptscsih_slave_destroy(struct scsi_device *device);
-extern int mptscsih_slave_configure(struct scsi_device *device);
+extern void mptscsih_sdev_destroy(struct scsi_device *device);
+extern int mptscsih_sdev_configure(struct scsi_device *device,
+ struct queue_limits *lim);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
extern int mptscsih_dev_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_target_reset(struct scsi_cmnd * SCpnt);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 4184d0c70ac3..a3901fbfac4f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -713,7 +713,7 @@ static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
mptspi_read_parameters(sdev->sdev_target);
}
-static int mptspi_slave_alloc(struct scsi_device *sdev)
+static int mptspi_sdev_init(struct scsi_device *sdev)
{
MPT_SCSI_HOST *hd = shost_priv(sdev->host);
VirtTarget *vtarget;
@@ -727,7 +727,7 @@ static int mptspi_slave_alloc(struct scsi_device *sdev)
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
- printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+ printk(MYIOC_s_ERR_FMT "sdev_init kmalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
@@ -746,7 +746,8 @@ static int mptspi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int mptspi_slave_configure(struct scsi_device *sdev)
+static int mptspi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct _MPT_SCSI_HOST *hd = shost_priv(sdev->host);
VirtTarget *vtarget = scsi_target(sdev)->hostdata;
@@ -754,7 +755,7 @@ static int mptspi_slave_configure(struct scsi_device *sdev)
mptspi_initTarget(hd, vtarget, sdev);
- ret = mptscsih_slave_configure(sdev);
+ ret = mptscsih_sdev_configure(sdev, lim);
if (ret)
return ret;
@@ -799,7 +800,7 @@ mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
return mptscsih_qcmd(SCpnt);
}
-static void mptspi_slave_destroy(struct scsi_device *sdev)
+static void mptspi_sdev_destroy(struct scsi_device *sdev)
{
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
@@ -817,7 +818,7 @@ static void mptspi_slave_destroy(struct scsi_device *sdev)
mptspi_write_spi_device_pg1(starget, &pg1);
}
- mptscsih_slave_destroy(sdev);
+ mptscsih_sdev_destroy(sdev);
}
static const struct scsi_host_template mptspi_driver_template = {
@@ -828,10 +829,10 @@ static const struct scsi_host_template mptspi_driver_template = {
.info = mptscsih_info,
.queuecommand = mptspi_qcmd,
.target_alloc = mptspi_target_alloc,
- .slave_alloc = mptspi_slave_alloc,
- .slave_configure = mptspi_slave_configure,
+ .sdev_init = mptspi_sdev_init,
+ .sdev_configure = mptspi_sdev_configure,
.target_destroy = mptspi_target_destroy,
- .slave_destroy = mptspi_slave_destroy,
+ .sdev_destroy = mptspi_sdev_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ae23b317a64e..6b0682af6e32 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2386,6 +2386,19 @@ config MFD_INTEL_M10_BMC_PMCI
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_QNAP_MCU
+ tristate "QNAP microcontroller unit core driver"
+ depends on SERIAL_DEV_BUS
+ select MFD_CORE
+ help
+ Select this to get support for the QNAP MCU device found in
+ several devices of QNAP network attached storage products that
+ implements additional functionality for the device, like fan
+ and LED control.
+
+ This driver implements the base serial protocol to talk to the
+ device and provides functions for the other parts to hook into.
+
config MFD_RSMU_I2C
tristate "Renesas Synchronization Management Unit with I2C"
depends on I2C && OF
@@ -2414,5 +2427,17 @@ config MFD_RSMU_SPI
Additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_UPBOARD_FPGA
+ tristate "Support for the AAeon UP board FPGA"
+ depends on (X86 && ACPI)
+ select MFD_CORE
+ help
+ Select this option to enable the AAEON UP and UP^2 onboard FPGA.
+ This is the core driver of this FPGA, which has a pin controller and a
+ LED controller.
+
+ To compile this driver as a module, choose M here: the module will be
+ called upboard-fpga.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index e057d6d6faef..9220eaf7cf12 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -288,5 +288,9 @@ obj-$(CONFIG_MFD_INTEL_M10_BMC_PMCI) += intel-m10-bmc-pmci.o
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o
+obj-$(CONFIG_MFD_QNAP_MCU) += qnap-mcu.o
+
obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o rsmu_core.o
obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o
+
+obj-$(CONFIG_MFD_UPBOARD_FPGA) += upboard-fpga.o
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 251465a656d0..cff56deba24f 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -1445,7 +1445,7 @@ int axp20x_device_probe(struct axp20x_dev *axp20x)
}
}
- ret = mfd_add_devices(axp20x->dev, PLATFORM_DEVID_AUTO, axp20x->cells,
+ ret = mfd_add_devices(axp20x->dev, PLATFORM_DEVID_NONE, axp20x->cells,
axp20x->nr_cells, NULL, 0, NULL);
if (ret) {
@@ -1455,10 +1455,7 @@ int axp20x_device_probe(struct axp20x_dev *axp20x)
}
if (axp20x->variant != AXP288_ID)
- devm_register_sys_off_handler(axp20x->dev,
- SYS_OFF_MODE_POWER_OFF,
- SYS_OFF_PRIO_DEFAULT,
- axp20x_power_off, axp20x);
+ devm_register_power_off_handler(axp20x->dev, axp20x_power_off, axp20x);
dev_info(axp20x->dev, "AXP20X driver loaded\n");
diff --git a/drivers/mfd/cs42l43-i2c.c b/drivers/mfd/cs42l43-i2c.c
index f0ad4002652d..a2ab001a600a 100644
--- a/drivers/mfd/cs42l43-i2c.c
+++ b/drivers/mfd/cs42l43-i2c.c
@@ -56,13 +56,6 @@ static int cs42l43_i2c_probe(struct i2c_client *i2c)
return cs42l43_dev_probe(cs42l43);
}
-static void cs42l43_i2c_remove(struct i2c_client *i2c)
-{
- struct cs42l43 *cs42l43 = dev_get_drvdata(&i2c->dev);
-
- cs42l43_dev_remove(cs42l43);
-}
-
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id cs42l43_of_match[] = {
{ .compatible = "cirrus,cs42l43", },
@@ -88,7 +81,6 @@ static struct i2c_driver cs42l43_i2c_driver = {
},
.probe = cs42l43_i2c_probe,
- .remove = cs42l43_i2c_remove,
};
module_i2c_driver(cs42l43_i2c_driver);
diff --git a/drivers/mfd/cs42l43-sdw.c b/drivers/mfd/cs42l43-sdw.c
index 3938d48039c4..023f7e1a30f8 100644
--- a/drivers/mfd/cs42l43-sdw.c
+++ b/drivers/mfd/cs42l43-sdw.c
@@ -187,15 +187,6 @@ static int cs42l43_sdw_probe(struct sdw_slave *sdw, const struct sdw_device_id *
return cs42l43_dev_probe(cs42l43);
}
-static int cs42l43_sdw_remove(struct sdw_slave *sdw)
-{
- struct cs42l43 *cs42l43 = dev_get_drvdata(&sdw->dev);
-
- cs42l43_dev_remove(cs42l43);
-
- return 0;
-}
-
static const struct sdw_device_id cs42l43_sdw_id[] = {
SDW_SLAVE_ENTRY(0x01FA, 0x4243, 0),
{}
@@ -209,7 +200,6 @@ static struct sdw_driver cs42l43_sdw_driver = {
},
.probe = cs42l43_sdw_probe,
- .remove = cs42l43_sdw_remove,
.id_table = cs42l43_sdw_id,
.ops = &cs42l43_sdw_ops,
};
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index b5ab5e613db7..103787f37443 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -29,7 +29,7 @@
#define CS42L43_RESET_DELAY_MS 20
-#define CS42L43_SDW_ATTACH_TIMEOUT_MS 500
+#define CS42L43_SDW_ATTACH_TIMEOUT_MS 5000
#define CS42L43_SDW_DETACH_TIMEOUT_MS 100
#define CS42L43_MCU_BOOT_STAGE1 1
@@ -48,6 +48,7 @@
#define CS42L43_MCU_SUPPORTED_REV 0x2105
#define CS42L43_MCU_SHADOW_REGS_REQUIRED_REV 0x2200
+#define CS42L43_BIOS_SHADOW_REGS_REQUIRED_REV 0x1002
#define CS42L43_MCU_SUPPORTED_BIOS_REV 0x0001
#define CS42L43_VDDP_DELAY_US 50
@@ -773,7 +774,8 @@ static int cs42l43_mcu_update_step(struct cs42l43 *cs42l43)
* Later versions of the firmwware require the driver to access some
* features through a set of shadow registers.
*/
- shadow = mcu_rev >= CS42L43_MCU_SHADOW_REGS_REQUIRED_REV;
+ shadow = (mcu_rev >= CS42L43_MCU_SHADOW_REGS_REQUIRED_REV) ||
+ (bios_rev >= CS42L43_BIOS_SHADOW_REGS_REQUIRED_REV);
ret = regmap_read(cs42l43->regmap, CS42L43_BOOT_CONTROL, &secure_cfg);
if (ret) {
@@ -982,7 +984,7 @@ static int cs42l43_power_up(struct cs42l43 *cs42l43)
/* vdd-p must be on for 50uS before any other supply */
usleep_range(CS42L43_VDDP_DELAY_US, 2 * CS42L43_VDDP_DELAY_US);
- gpiod_set_value_cansleep(cs42l43->reset, 1);
+ gpiod_set_raw_value_cansleep(cs42l43->reset, 1);
ret = regulator_bulk_enable(CS42L43_N_SUPPLIES, cs42l43->core_supplies);
if (ret) {
@@ -1003,7 +1005,7 @@ static int cs42l43_power_up(struct cs42l43 *cs42l43)
err_core_supplies:
regulator_bulk_disable(CS42L43_N_SUPPLIES, cs42l43->core_supplies);
err_reset:
- gpiod_set_value_cansleep(cs42l43->reset, 0);
+ gpiod_set_raw_value_cansleep(cs42l43->reset, 0);
regulator_disable(cs42l43->vdd_p);
return ret;
@@ -1025,7 +1027,7 @@ static int cs42l43_power_down(struct cs42l43 *cs42l43)
return ret;
}
- gpiod_set_value_cansleep(cs42l43->reset, 0);
+ gpiod_set_raw_value_cansleep(cs42l43->reset, 0);
ret = regulator_disable(cs42l43->vdd_p);
if (ret) {
@@ -1036,6 +1038,15 @@ static int cs42l43_power_down(struct cs42l43 *cs42l43)
return 0;
}
+static void cs42l43_dev_remove(void *data)
+{
+ struct cs42l43 *cs42l43 = data;
+
+ cancel_work_sync(&cs42l43->boot_work);
+
+ cs42l43_power_down(cs42l43);
+}
+
int cs42l43_dev_probe(struct cs42l43 *cs42l43)
{
int i, ret;
@@ -1050,11 +1061,13 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
regcache_cache_only(cs42l43->regmap, true);
- cs42l43->reset = devm_gpiod_get_optional(cs42l43->dev, "reset", GPIOD_OUT_LOW);
+ cs42l43->reset = devm_gpiod_get_optional(cs42l43->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(cs42l43->reset))
return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->reset),
"Failed to get reset\n");
+ gpiod_set_raw_value_cansleep(cs42l43->reset, 0);
+
cs42l43->vdd_p = devm_regulator_get(cs42l43->dev, "vdd-p");
if (IS_ERR(cs42l43->vdd_p))
return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->vdd_p),
@@ -1080,6 +1093,10 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(cs42l43->dev, cs42l43_dev_remove, cs42l43);
+ if (ret)
+ return ret;
+
pm_runtime_set_autosuspend_delay(cs42l43->dev, CS42L43_AUTOSUSPEND_TIME_MS);
pm_runtime_use_autosuspend(cs42l43->dev);
pm_runtime_set_active(cs42l43->dev);
@@ -1098,14 +1115,6 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
}
EXPORT_SYMBOL_NS_GPL(cs42l43_dev_probe, "MFD_CS42L43");
-void cs42l43_dev_remove(struct cs42l43 *cs42l43)
-{
- cancel_work_sync(&cs42l43->boot_work);
-
- cs42l43_power_down(cs42l43);
-}
-EXPORT_SYMBOL_NS_GPL(cs42l43_dev_remove, "MFD_CS42L43");
-
static int cs42l43_suspend(struct device *dev)
{
struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
diff --git a/drivers/mfd/cs42l43.h b/drivers/mfd/cs42l43.h
index 8d1b1b0f5a47..f3da783930f5 100644
--- a/drivers/mfd/cs42l43.h
+++ b/drivers/mfd/cs42l43.h
@@ -25,6 +25,5 @@ bool cs42l43_precious_register(struct device *dev, unsigned int reg);
bool cs42l43_volatile_register(struct device *dev, unsigned int reg);
int cs42l43_dev_probe(struct cs42l43 *cs42l43);
-void cs42l43_dev_remove(struct cs42l43 *cs42l43);
#endif /* CS42L43_CORE_INT_H */
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index dc85801b9fa0..b06cd518413b 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -585,6 +585,7 @@ static int da9052_clear_fault_log(struct da9052 *da9052)
"Cannot reset FAULT_LOG values %d\n", ret);
}
+ da9052->fault_log = fault_log;
return ret;
}
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 992855bfda3e..8582ae65a802 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -81,7 +81,7 @@ static struct mfd_cell chtdc_ti_dev[] = {
static const struct regmap_config chtdc_ti_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 128,
+ .max_register = 0xff,
.cache_type = REGCACHE_NONE,
};
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index f14901660147..4b7d0cb9340f 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -834,8 +834,9 @@ static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
{ PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
{ PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
- { PCI_VDEVICE(INTEL, 0x3197), LPC_GLK},
{ PCI_VDEVICE(INTEL, 0x2b9c), LPC_COUGARMOUNTAIN},
+ { PCI_VDEVICE(INTEL, 0x3197), LPC_GLK},
+ { PCI_VDEVICE(INTEL, 0x31e8), LPC_GLK},
{ PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
{ PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
{ PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
diff --git a/drivers/mfd/qnap-mcu.c b/drivers/mfd/qnap-mcu.c
new file mode 100644
index 000000000000..4be39d8b2905
--- /dev/null
+++ b/drivers/mfd/qnap-mcu.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Core driver for the microcontroller unit in QNAP NAS devices that is
+ * connected via a dedicated UART port.
+ *
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/export.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/qnap-mcu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+
+/* The longest command found so far is 5 bytes long */
+#define QNAP_MCU_MAX_CMD_SIZE 5
+#define QNAP_MCU_MAX_DATA_SIZE 36
+#define QNAP_MCU_CHECKSUM_SIZE 1
+
+#define QNAP_MCU_RX_BUFFER_SIZE \
+ (QNAP_MCU_MAX_DATA_SIZE + QNAP_MCU_CHECKSUM_SIZE)
+
+#define QNAP_MCU_TX_BUFFER_SIZE \
+ (QNAP_MCU_MAX_CMD_SIZE + QNAP_MCU_CHECKSUM_SIZE)
+
+#define QNAP_MCU_ACK_LEN 2
+#define QNAP_MCU_VERSION_LEN 4
+
+#define QNAP_MCU_TIMEOUT_MS 500
+
+/**
+ * struct qnap_mcu_reply - Reply to a command
+ *
+ * @data: Buffer to store reply payload in
+ * @length: Expected reply length, including the checksum
+ * @received: Received number of bytes, so far
+ * @done: Triggered when the entire reply has been received
+ */
+struct qnap_mcu_reply {
+ u8 *data;
+ size_t length;
+ size_t received;
+ struct completion done;
+};
+
+/**
+ * struct qnap_mcu - QNAP NAS embedded controller
+ *
+ * @serdev: Pointer to underlying serdev
+ * @bus_lock: Lock to serialize access to the device
+ * @reply: Reply data structure
+ * @variant: Device variant specific information
+ * @version: MCU firmware version
+ */
+struct qnap_mcu {
+ struct serdev_device *serdev;
+ struct mutex bus_lock;
+ struct qnap_mcu_reply reply;
+ const struct qnap_mcu_variant *variant;
+ u8 version[QNAP_MCU_VERSION_LEN];
+};
+
+/*
+ * The QNAP-MCU uses a basic XOR checksum.
+ * It is always the last byte and XORs the whole previous message.
+ */
+static u8 qnap_mcu_csum(const u8 *buf, size_t size)
+{
+ u8 csum = 0;
+
+ while (size--)
+ csum ^= *buf++;
+
+ return csum;
+}
+
+static int qnap_mcu_write(struct qnap_mcu *mcu, const u8 *data, u8 data_size)
+{
+ unsigned char tx[QNAP_MCU_TX_BUFFER_SIZE];
+ size_t length = data_size + QNAP_MCU_CHECKSUM_SIZE;
+
+ if (length > sizeof(tx)) {
+ dev_err(&mcu->serdev->dev, "data too big for transmit buffer");
+ return -EINVAL;
+ }
+
+ memcpy(tx, data, data_size);
+ tx[data_size] = qnap_mcu_csum(data, data_size);
+
+ serdev_device_write_flush(mcu->serdev);
+
+ return serdev_device_write(mcu->serdev, tx, length, HZ);
+}
+
+static size_t qnap_mcu_receive_buf(struct serdev_device *serdev, const u8 *buf, size_t size)
+{
+ struct device *dev = &serdev->dev;
+ struct qnap_mcu *mcu = dev_get_drvdata(dev);
+ struct qnap_mcu_reply *reply = &mcu->reply;
+ const u8 *src = buf;
+ const u8 *end = buf + size;
+
+ if (!reply->length) {
+ dev_warn(dev, "Received %zu bytes, we were not waiting for\n", size);
+ return size;
+ }
+
+ while (src < end) {
+ reply->data[reply->received] = *src++;
+ reply->received++;
+
+ if (reply->received == reply->length) {
+ /* We don't expect any characters from the device now */
+ reply->length = 0;
+
+ complete(&reply->done);
+
+ /*
+ * We report the consumed number of bytes. If there
+ * are still bytes remaining (though there shouldn't)
+ * the serdev layer will re-execute this handler with
+ * the remainder of the Rx bytes.
+ */
+ return src - buf;
+ }
+ }
+
+ /*
+ * The only way to get out of the above loop and end up here
+ * is through consuming all of the supplied data, so here we
+ * report that we processed it all.
+ */
+ return size;
+}
+
+static const struct serdev_device_ops qnap_mcu_serdev_device_ops = {
+ .receive_buf = qnap_mcu_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+int qnap_mcu_exec(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size,
+ u8 *reply_data, size_t reply_data_size)
+{
+ unsigned char rx[QNAP_MCU_RX_BUFFER_SIZE];
+ size_t length = reply_data_size + QNAP_MCU_CHECKSUM_SIZE;
+ struct qnap_mcu_reply *reply = &mcu->reply;
+ int ret = 0;
+
+ if (length > sizeof(rx)) {
+ dev_err(&mcu->serdev->dev, "expected data too big for receive buffer");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mcu->bus_lock);
+
+ reply->data = rx,
+ reply->length = length,
+ reply->received = 0,
+ reinit_completion(&reply->done);
+
+ qnap_mcu_write(mcu, cmd_data, cmd_data_size);
+
+ serdev_device_wait_until_sent(mcu->serdev, msecs_to_jiffies(QNAP_MCU_TIMEOUT_MS));
+
+ if (!wait_for_completion_timeout(&reply->done, msecs_to_jiffies(QNAP_MCU_TIMEOUT_MS))) {
+ dev_err(&mcu->serdev->dev, "Command timeout\n");
+ ret = -ETIMEDOUT;
+ } else {
+ u8 crc = qnap_mcu_csum(rx, reply_data_size);
+
+ if (crc != rx[reply_data_size]) {
+ dev_err(&mcu->serdev->dev,
+ "Invalid Checksum received\n");
+ ret = -EIO;
+ } else {
+ memcpy(reply_data, rx, reply_data_size);
+ }
+ }
+
+ mutex_unlock(&mcu->bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qnap_mcu_exec);
+
+int qnap_mcu_exec_with_ack(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size)
+{
+ u8 ack[QNAP_MCU_ACK_LEN];
+ int ret;
+
+ ret = qnap_mcu_exec(mcu, cmd_data, cmd_data_size, ack, sizeof(ack));
+ if (ret)
+ return ret;
+
+ /* Should return @0 */
+ if (ack[0] != '@' || ack[1] != '0') {
+ dev_err(&mcu->serdev->dev, "Did not receive ack\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qnap_mcu_exec_with_ack);
+
+static int qnap_mcu_get_version(struct qnap_mcu *mcu)
+{
+ const u8 cmd[] = { '%', 'V' };
+ u8 rx[14];
+ int ret;
+
+ /* Reply is the 2 command-bytes + 4 bytes describing the version */
+ ret = qnap_mcu_exec(mcu, cmd, sizeof(cmd), rx, QNAP_MCU_VERSION_LEN + 2);
+ if (ret)
+ return ret;
+
+ memcpy(mcu->version, &rx[2], QNAP_MCU_VERSION_LEN);
+
+ return 0;
+}
+
+/*
+ * The MCU controls power to the peripherals but not the CPU.
+ *
+ * So using the PMIC to power off the system keeps the MCU and hard-drives
+ * running. This also then prevents the system from turning back on until
+ * the MCU is turned off by unplugging the power cable.
+ * Turning off the MCU alone on the other hand turns off the hard drives,
+ * LEDs, etc while the main SoC stays running - including its network ports.
+ */
+static int qnap_mcu_power_off(struct sys_off_data *data)
+{
+ const u8 cmd[] = { '@', 'C', '0' };
+ struct qnap_mcu *mcu = data->cb_data;
+ int ret;
+
+ ret = qnap_mcu_exec_with_ack(mcu, cmd, sizeof(cmd));
+ if (ret) {
+ dev_err(&mcu->serdev->dev, "MCU poweroff failed %d\n", ret);
+ return NOTIFY_STOP;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const struct qnap_mcu_variant qnap_ts433_mcu = {
+ .baud_rate = 115200,
+ .num_drives = 4,
+ .fan_pwm_min = 51, /* Specified in original model.conf */
+ .fan_pwm_max = 255,
+ .usb_led = true,
+};
+
+static struct mfd_cell qnap_mcu_cells[] = {
+ { .name = "qnap-mcu-input", },
+ { .name = "qnap-mcu-leds", },
+ { .name = "qnap-mcu-hwmon", }
+};
+
+static int qnap_mcu_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct qnap_mcu *mcu;
+ int ret;
+
+ mcu = devm_kzalloc(dev, sizeof(*mcu), GFP_KERNEL);
+ if (!mcu)
+ return -ENOMEM;
+
+ mcu->serdev = serdev;
+ dev_set_drvdata(dev, mcu);
+
+ mcu->variant = of_device_get_match_data(dev);
+ if (!mcu->variant)
+ return -ENODEV;
+
+ mutex_init(&mcu->bus_lock);
+ init_completion(&mcu->reply.done);
+
+ serdev_device_set_client_ops(serdev, &qnap_mcu_serdev_device_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, mcu->variant->baud_rate);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set parity\n");
+
+ ret = qnap_mcu_get_version(mcu);
+ if (ret)
+ return ret;
+
+ ret = devm_register_sys_off_handler(dev,
+ SYS_OFF_MODE_POWER_OFF_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ &qnap_mcu_power_off, mcu);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register poweroff handler\n");
+
+ for (int i = 0; i < ARRAY_SIZE(qnap_mcu_cells); i++) {
+ qnap_mcu_cells[i].platform_data = mcu->variant;
+ qnap_mcu_cells[i].pdata_size = sizeof(*mcu->variant);
+ }
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, qnap_mcu_cells,
+ ARRAY_SIZE(qnap_mcu_cells), NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add child devices\n");
+
+ return 0;
+}
+
+static const struct of_device_id qnap_mcu_dt_ids[] = {
+ { .compatible = "qnap,ts433-mcu", .data = &qnap_ts433_mcu },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qnap_mcu_dt_ids);
+
+static struct serdev_device_driver qnap_mcu_drv = {
+ .probe = qnap_mcu_probe,
+ .driver = {
+ .name = "qnap-mcu",
+ .of_match_table = qnap_mcu_dt_ids,
+ },
+};
+module_serdev_device_driver(qnap_mcu_drv);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("QNAP MCU core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index d8a603d95aa6..081827bc0596 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -170,11 +170,7 @@ static int stpmic1_probe(struct i2c_client *i2c)
return ret;
}
- ret = devm_register_sys_off_handler(ddata->dev,
- SYS_OFF_MODE_POWER_OFF,
- SYS_OFF_PRIO_DEFAULT,
- stpmic1_power_off,
- ddata);
+ ret = devm_register_power_off_handler(ddata->dev, stpmic1_power_off, ddata);
if (ret) {
dev_err(ddata->dev, "failed to register sys-off handler: %d\n", ret);
return ret;
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 3e1d699ba934..aa4a9940b569 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -12,22 +12,16 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hwspinlock.h>
-#include <linux/io.h>
-#include <linux/init.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/platform_data/syscon.h>
-#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/mfd/syscon.h>
#include <linux/slab.h>
-static struct platform_driver syscon_driver;
-
-static DEFINE_SPINLOCK(syscon_list_slock);
+static DEFINE_MUTEX(syscon_list_lock);
static LIST_HEAD(syscon_list);
struct syscon {
@@ -54,6 +48,8 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
struct resource res;
struct reset_control *reset;
+ WARN_ON(!mutex_is_locked(&syscon_list_lock));
+
struct syscon *syscon __free(kfree) = kzalloc(sizeof(*syscon), GFP_KERNEL);
if (!syscon)
return ERR_PTR(-ENOMEM);
@@ -146,9 +142,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
syscon->regmap = regmap;
syscon->np = np;
- spin_lock(&syscon_list_slock);
list_add_tail(&syscon->list, &syscon_list);
- spin_unlock(&syscon_list_slock);
return_ptr(syscon);
@@ -165,11 +159,12 @@ err_regmap:
}
static struct regmap *device_node_get_regmap(struct device_node *np,
+ bool create_regmap,
bool check_res)
{
struct syscon *entry, *syscon = NULL;
- spin_lock(&syscon_list_slock);
+ mutex_lock(&syscon_list_lock);
list_for_each_entry(entry, &syscon_list, list)
if (entry->np == np) {
@@ -177,10 +172,13 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
break;
}
- spin_unlock(&syscon_list_slock);
-
- if (!syscon)
- syscon = of_syscon_register(np, check_res);
+ if (!syscon) {
+ if (create_regmap)
+ syscon = of_syscon_register(np, check_res);
+ else
+ syscon = ERR_PTR(-EINVAL);
+ }
+ mutex_unlock(&syscon_list_lock);
if (IS_ERR(syscon))
return ERR_CAST(syscon);
@@ -212,7 +210,7 @@ int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap)
return -ENOMEM;
/* check if syscon entry already exists */
- spin_lock(&syscon_list_slock);
+ mutex_lock(&syscon_list_lock);
list_for_each_entry(entry, &syscon_list, list)
if (entry->np == np) {
@@ -225,29 +223,48 @@ int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap)
/* register the regmap in syscon list */
list_add_tail(&syscon->list, &syscon_list);
- spin_unlock(&syscon_list_slock);
+ mutex_unlock(&syscon_list_lock);
return 0;
err_unlock:
- spin_unlock(&syscon_list_slock);
+ mutex_unlock(&syscon_list_lock);
kfree(syscon);
return ret;
}
EXPORT_SYMBOL_GPL(of_syscon_register_regmap);
+/**
+ * device_node_to_regmap() - Get or create a regmap for specified device node
+ * @np: Device tree node
+ *
+ * Get a regmap for the specified device node. If there's not an existing
+ * regmap, then one is instantiated. This function should not be used if the
+ * device node has a custom regmap driver or has resources (clocks, resets) to
+ * be managed. Use syscon_node_to_regmap() instead for those cases.
+ *
+ * Return: regmap ptr on success, negative error code on failure.
+ */
struct regmap *device_node_to_regmap(struct device_node *np)
{
- return device_node_get_regmap(np, false);
+ return device_node_get_regmap(np, true, false);
}
EXPORT_SYMBOL_GPL(device_node_to_regmap);
+/**
+ * syscon_node_to_regmap() - Get or create a regmap for specified syscon device node
+ * @np: Device tree node
+ *
+ * Get a regmap for the specified device node. If there's not an existing
+ * regmap, then one is instantiated if the node is a generic "syscon". This
+ * function is safe to use for a syscon registered with
+ * of_syscon_register_regmap().
+ *
+ * Return: regmap ptr on success, negative error code on failure.
+ */
struct regmap *syscon_node_to_regmap(struct device_node *np)
{
- if (!of_device_is_compatible(np, "syscon"))
- return ERR_PTR(-EINVAL);
-
- return device_node_get_regmap(np, true);
+ return device_node_get_regmap(np, of_device_is_compatible(np, "syscon"), true);
}
EXPORT_SYMBOL_GPL(syscon_node_to_regmap);
@@ -336,62 +353,3 @@ struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np,
return regmap;
}
EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle_optional);
-
-static int syscon_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct syscon_platform_data *pdata = dev_get_platdata(dev);
- struct syscon *syscon;
- struct regmap_config syscon_config = syscon_regmap_config;
- struct resource *res;
- void __iomem *base;
-
- syscon = devm_kzalloc(dev, sizeof(*syscon), GFP_KERNEL);
- if (!syscon)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
- base = devm_ioremap(dev, res->start, resource_size(res));
- if (!base)
- return -ENOMEM;
-
- syscon_config.max_register = resource_size(res) - 4;
- if (!syscon_config.max_register)
- syscon_config.max_register_is_0 = true;
-
- if (pdata)
- syscon_config.name = pdata->label;
- syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_config);
- if (IS_ERR(syscon->regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(syscon->regmap);
- }
-
- platform_set_drvdata(pdev, syscon);
-
- dev_dbg(dev, "regmap %pR registered\n", res);
-
- return 0;
-}
-
-static const struct platform_device_id syscon_ids[] = {
- { "syscon", },
- { }
-};
-
-static struct platform_driver syscon_driver = {
- .driver = {
- .name = "syscon",
- },
- .probe = syscon_probe,
- .id_table = syscon_ids,
-};
-
-static int __init syscon_init(void)
-{
- return platform_driver_register(&syscon_driver);
-}
-postcore_initcall(syscon_init);
diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
index 57ff5cb294a6..081c5a30b04a 100644
--- a/drivers/mfd/tps65219.c
+++ b/drivers/mfd/tps65219.c
@@ -110,19 +110,12 @@ static const struct resource tps65219_regulator_resources[] = {
};
static const struct mfd_cell tps65219_cells[] = {
- {
- .name = "tps65219-regulator",
- .resources = tps65219_regulator_resources,
- .num_resources = ARRAY_SIZE(tps65219_regulator_resources),
- },
- { .name = "tps65219-gpio", },
+ MFD_CELL_RES("tps65219-regulator", tps65219_regulator_resources),
+ MFD_CELL_NAME("tps65219-gpio"),
};
-static const struct mfd_cell tps65219_pwrbutton_cell = {
- .name = "tps65219-pwrbutton",
- .resources = tps65219_pwrbutton_resources,
- .num_resources = ARRAY_SIZE(tps65219_pwrbutton_resources),
-};
+static const struct mfd_cell tps65219_pwrbutton_cell =
+ MFD_CELL_RES("tps65219-pwrbutton", tps65219_pwrbutton_resources);
static const struct regmap_config tps65219_regmap_config = {
.reg_bits = 8,
diff --git a/drivers/mfd/upboard-fpga.c b/drivers/mfd/upboard-fpga.c
new file mode 100644
index 000000000000..5a330e2f2229
--- /dev/null
+++ b/drivers/mfd/upboard-fpga.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UP Board FPGA driver.
+ *
+ * FPGA provides more GPIO driving power, LEDS and pin mux function.
+ *
+ * Copyright (c) AAEON. All rights reserved.
+ * Copyright (C) 2024 Bootlin
+ *
+ * Author: Gary Wang <garywang@aaeon.com.tw>
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/upboard-fpga.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+
+#define UPBOARD_AAEON_MANUFACTURER_ID 0x01
+#define UPBOARD_MANUFACTURER_ID_MASK GENMASK(7, 0)
+
+#define UPBOARD_ADDRESS_SIZE 7
+#define UPBOARD_REGISTER_SIZE 16
+
+#define UPBOARD_READ_FLAG BIT(UPBOARD_ADDRESS_SIZE)
+
+#define UPBOARD_FW_ID_MAJOR_SUPPORTED 0x0
+
+#define UPBOARD_FW_ID_BUILD_MASK GENMASK(15, 12)
+#define UPBOARD_FW_ID_MAJOR_MASK GENMASK(11, 8)
+#define UPBOARD_FW_ID_MINOR_MASK GENMASK(7, 4)
+#define UPBOARD_FW_ID_PATCH_MASK GENMASK(3, 0)
+
+static int upboard_fpga_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct upboard_fpga *fpga = context;
+ int i;
+
+ /* Clear to start new transaction */
+ gpiod_set_value(fpga->clear_gpio, 0);
+ gpiod_set_value(fpga->clear_gpio, 1);
+
+ reg |= UPBOARD_READ_FLAG;
+
+ /* Send clock and addr from strobe & datain pins */
+ for (i = UPBOARD_ADDRESS_SIZE; i >= 0; i--) {
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ gpiod_set_value(fpga->datain_gpio, !!(reg & BIT(i)));
+ gpiod_set_value(fpga->strobe_gpio, 1);
+ }
+
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ *val = 0;
+
+ /* Read data from dataout pin */
+ for (i = UPBOARD_REGISTER_SIZE - 1; i >= 0; i--) {
+ gpiod_set_value(fpga->strobe_gpio, 1);
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ *val |= gpiod_get_value(fpga->dataout_gpio) << i;
+ }
+
+ gpiod_set_value(fpga->strobe_gpio, 1);
+
+ return 0;
+}
+
+static int upboard_fpga_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct upboard_fpga *fpga = context;
+ int i;
+
+ /* Clear to start new transcation */
+ gpiod_set_value(fpga->clear_gpio, 0);
+ gpiod_set_value(fpga->clear_gpio, 1);
+
+ /* Send clock and addr from strobe & datain pins */
+ for (i = UPBOARD_ADDRESS_SIZE; i >= 0; i--) {
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ gpiod_set_value(fpga->datain_gpio, !!(reg & BIT(i)));
+ gpiod_set_value(fpga->strobe_gpio, 1);
+ }
+
+ gpiod_set_value(fpga->strobe_gpio, 0);
+
+ /* Write data to datain pin */
+ for (i = UPBOARD_REGISTER_SIZE - 1; i >= 0; i--) {
+ gpiod_set_value(fpga->datain_gpio, !!(val & BIT(i)));
+ gpiod_set_value(fpga->strobe_gpio, 1);
+ gpiod_set_value(fpga->strobe_gpio, 0);
+ }
+
+ gpiod_set_value(fpga->strobe_gpio, 1);
+
+ return 0;
+}
+
+static const struct regmap_range upboard_up_readable_ranges[] = {
+ regmap_reg_range(UPBOARD_REG_PLATFORM_ID, UPBOARD_REG_FIRMWARE_ID),
+ regmap_reg_range(UPBOARD_REG_FUNC_EN0, UPBOARD_REG_FUNC_EN0),
+ regmap_reg_range(UPBOARD_REG_GPIO_EN0, UPBOARD_REG_GPIO_EN1),
+ regmap_reg_range(UPBOARD_REG_GPIO_DIR0, UPBOARD_REG_GPIO_DIR1),
+};
+
+static const struct regmap_range upboard_up_writable_ranges[] = {
+ regmap_reg_range(UPBOARD_REG_FUNC_EN0, UPBOARD_REG_FUNC_EN0),
+ regmap_reg_range(UPBOARD_REG_GPIO_EN0, UPBOARD_REG_GPIO_EN1),
+ regmap_reg_range(UPBOARD_REG_GPIO_DIR0, UPBOARD_REG_GPIO_DIR1),
+};
+
+static const struct regmap_access_table upboard_up_readable_table = {
+ .yes_ranges = upboard_up_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(upboard_up_readable_ranges),
+};
+
+static const struct regmap_access_table upboard_up_writable_table = {
+ .yes_ranges = upboard_up_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(upboard_up_writable_ranges),
+};
+
+static const struct regmap_config upboard_up_regmap_config = {
+ .reg_bits = UPBOARD_ADDRESS_SIZE,
+ .val_bits = UPBOARD_REGISTER_SIZE,
+ .max_register = UPBOARD_REG_MAX,
+ .reg_read = upboard_fpga_read,
+ .reg_write = upboard_fpga_write,
+ .fast_io = false,
+ .cache_type = REGCACHE_NONE,
+ .rd_table = &upboard_up_readable_table,
+ .wr_table = &upboard_up_writable_table,
+};
+
+static const struct regmap_range upboard_up2_readable_ranges[] = {
+ regmap_reg_range(UPBOARD_REG_PLATFORM_ID, UPBOARD_REG_FIRMWARE_ID),
+ regmap_reg_range(UPBOARD_REG_FUNC_EN0, UPBOARD_REG_FUNC_EN1),
+ regmap_reg_range(UPBOARD_REG_GPIO_EN0, UPBOARD_REG_GPIO_EN2),
+ regmap_reg_range(UPBOARD_REG_GPIO_DIR0, UPBOARD_REG_GPIO_DIR2),
+};
+
+static const struct regmap_range upboard_up2_writable_ranges[] = {
+ regmap_reg_range(UPBOARD_REG_FUNC_EN0, UPBOARD_REG_FUNC_EN1),
+ regmap_reg_range(UPBOARD_REG_GPIO_EN0, UPBOARD_REG_GPIO_EN2),
+ regmap_reg_range(UPBOARD_REG_GPIO_DIR0, UPBOARD_REG_GPIO_DIR2),
+};
+
+static const struct regmap_access_table upboard_up2_readable_table = {
+ .yes_ranges = upboard_up2_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(upboard_up2_readable_ranges),
+};
+
+static const struct regmap_access_table upboard_up2_writable_table = {
+ .yes_ranges = upboard_up2_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(upboard_up2_writable_ranges),
+};
+
+static const struct regmap_config upboard_up2_regmap_config = {
+ .reg_bits = UPBOARD_ADDRESS_SIZE,
+ .val_bits = UPBOARD_REGISTER_SIZE,
+ .max_register = UPBOARD_REG_MAX,
+ .reg_read = upboard_fpga_read,
+ .reg_write = upboard_fpga_write,
+ .fast_io = false,
+ .cache_type = REGCACHE_NONE,
+ .rd_table = &upboard_up2_readable_table,
+ .wr_table = &upboard_up2_writable_table,
+};
+
+static const struct mfd_cell upboard_up_mfd_cells[] = {
+ { .name = "upboard-pinctrl" },
+ { .name = "upboard-leds" },
+};
+
+static const struct upboard_fpga_data upboard_up_fpga_data = {
+ .type = UPBOARD_UP_FPGA,
+ .regmap_config = &upboard_up_regmap_config,
+};
+
+static const struct upboard_fpga_data upboard_up2_fpga_data = {
+ .type = UPBOARD_UP2_FPGA,
+ .regmap_config = &upboard_up2_regmap_config,
+};
+
+static int upboard_fpga_gpio_init(struct upboard_fpga *fpga)
+{
+ fpga->enable_gpio = devm_gpiod_get(fpga->dev, "enable", GPIOD_ASIS);
+ if (IS_ERR(fpga->enable_gpio))
+ return PTR_ERR(fpga->enable_gpio);
+
+ fpga->clear_gpio = devm_gpiod_get(fpga->dev, "clear", GPIOD_OUT_LOW);
+ if (IS_ERR(fpga->clear_gpio))
+ return PTR_ERR(fpga->clear_gpio);
+
+ fpga->strobe_gpio = devm_gpiod_get(fpga->dev, "strobe", GPIOD_OUT_LOW);
+ if (IS_ERR(fpga->strobe_gpio))
+ return PTR_ERR(fpga->strobe_gpio);
+
+ fpga->datain_gpio = devm_gpiod_get(fpga->dev, "datain", GPIOD_OUT_LOW);
+ if (IS_ERR(fpga->datain_gpio))
+ return PTR_ERR(fpga->datain_gpio);
+
+ fpga->dataout_gpio = devm_gpiod_get(fpga->dev, "dataout", GPIOD_IN);
+ if (IS_ERR(fpga->dataout_gpio))
+ return PTR_ERR(fpga->dataout_gpio);
+
+ gpiod_set_value(fpga->enable_gpio, 1);
+
+ return 0;
+}
+
+static int upboard_fpga_get_firmware_version(struct upboard_fpga *fpga)
+{
+ unsigned int platform_id, manufacturer_id;
+ int ret;
+
+ if (!fpga)
+ return -ENOMEM;
+
+ ret = regmap_read(fpga->regmap, UPBOARD_REG_PLATFORM_ID, &platform_id);
+ if (ret)
+ return ret;
+
+ manufacturer_id = platform_id & UPBOARD_MANUFACTURER_ID_MASK;
+ if (manufacturer_id != UPBOARD_AAEON_MANUFACTURER_ID)
+ return dev_err_probe(fpga->dev, -ENODEV,
+ "driver not compatible with custom FPGA FW from manufacturer id %#02x.",
+ manufacturer_id);
+
+ ret = regmap_read(fpga->regmap, UPBOARD_REG_FIRMWARE_ID, &fpga->firmware_version);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(UPBOARD_FW_ID_MAJOR_MASK, fpga->firmware_version) !=
+ UPBOARD_FW_ID_MAJOR_SUPPORTED)
+ return dev_err_probe(fpga->dev, -ENODEV,
+ "unsupported FPGA FW v%lu.%lu.%lu build %#02lx",
+ FIELD_GET(UPBOARD_FW_ID_MAJOR_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_MINOR_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_PATCH_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_BUILD_MASK, fpga->firmware_version));
+ return 0;
+}
+
+static ssize_t upboard_fpga_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct upboard_fpga *fpga = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "FPGA FW v%lu.%lu.%lu build %#02lx\n",
+ FIELD_GET(UPBOARD_FW_ID_MAJOR_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_MINOR_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_PATCH_MASK, fpga->firmware_version),
+ FIELD_GET(UPBOARD_FW_ID_BUILD_MASK, fpga->firmware_version));
+}
+
+static DEVICE_ATTR_RO(upboard_fpga_version);
+
+static struct attribute *upboard_fpga_attrs[] = {
+ &dev_attr_upboard_fpga_version.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(upboard_fpga);
+
+static int upboard_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct upboard_fpga *fpga;
+ int ret;
+
+ fpga = devm_kzalloc(dev, sizeof(*fpga), GFP_KERNEL);
+ if (!fpga)
+ return -ENOMEM;
+
+ fpga->fpga_data = device_get_match_data(dev);
+
+ fpga->dev = dev;
+
+ platform_set_drvdata(pdev, fpga);
+
+ fpga->regmap = devm_regmap_init(dev, NULL, fpga, fpga->fpga_data->regmap_config);
+ if (IS_ERR(fpga->regmap))
+ return PTR_ERR(fpga->regmap);
+
+ ret = upboard_fpga_gpio_init(fpga);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize FPGA common GPIOs");
+
+ ret = upboard_fpga_get_firmware_version(fpga);
+ if (ret)
+ return ret;
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, upboard_up_mfd_cells,
+ ARRAY_SIZE(upboard_up_mfd_cells), NULL, 0, NULL);
+}
+
+static const struct acpi_device_id upboard_fpga_acpi_match[] = {
+ { "AANT0F01", (kernel_ulong_t)&upboard_up2_fpga_data },
+ { "AANT0F04", (kernel_ulong_t)&upboard_up_fpga_data },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, upboard_fpga_acpi_match);
+
+static struct platform_driver upboard_fpga_driver = {
+ .driver = {
+ .name = "upboard-fpga",
+ .acpi_match_table = ACPI_PTR(upboard_fpga_acpi_match),
+ .dev_groups = upboard_fpga_groups,
+ },
+ .probe = upboard_fpga_probe,
+};
+
+module_platform_driver(upboard_fpga_driver);
+
+MODULE_AUTHOR("Gary Wang <garywang@aaeon.com.tw>");
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
+MODULE_DESCRIPTION("UP Board FPGA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index d34d58ce46db..ef03d6cec9ff 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -10,7 +10,6 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/platform_data/syscon.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/stat.h>
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 09cbe3f0ab1e..56bc72c7ce4a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -517,7 +517,6 @@ config OPEN_DICE
config NTSYNC
tristate "NT synchronization primitive emulation"
- depends on BROKEN
help
This module provides kernel support for emulation of Windows NT
synchronization primitives. It is not a hardware driver.
@@ -613,8 +612,7 @@ config MARVELL_CN10K_DPI
config MCHP_LAN966X_PCI
tristate "Microchip LAN966x PCIe Support"
depends on PCI
- select OF
- select OF_OVERLAY
+ depends on OF_OVERLAY
select IRQ_DOMAIN
help
This enables the support for the LAN966x PCIe device.
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 40bf953185c7..545aad06d088 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
+obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_SMPRO_ERRMON) += smpro-errmon.o
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 2bb1dd2511f9..fc64474b8241 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -714,7 +714,7 @@ static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
}
static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
@@ -829,7 +829,7 @@ static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
}
static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
@@ -849,8 +849,8 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
return ret;
}
/* size is computed at run-time */
-static BIN_ATTR(flash_data, 0644, c2port_read_flash_data,
- c2port_write_flash_data, 0);
+static const BIN_ATTR(flash_data, 0644, c2port_read_flash_data,
+ c2port_write_flash_data, 0);
/*
* Class attributes
@@ -869,14 +869,27 @@ static struct attribute *c2port_attrs[] = {
NULL,
};
-static struct bin_attribute *c2port_bin_attrs[] = {
+static const struct bin_attribute *const c2port_bin_attrs[] = {
&bin_attr_flash_data,
NULL,
};
+static size_t c2port_bin_attr_size(struct kobject *kobj,
+ const struct bin_attribute *attr,
+ int i)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
+
+ if (attr == &bin_attr_flash_data)
+ return c2dev->ops->blocks_num * c2dev->ops->block_size;
+
+ return attr->size;
+}
+
static const struct attribute_group c2port_group = {
.attrs = c2port_attrs,
- .bin_attrs = c2port_bin_attrs,
+ .bin_attrs_new = c2port_bin_attrs,
+ .bin_size = c2port_bin_attr_size,
};
static const struct attribute_group *c2port_groups[] = {
@@ -912,8 +925,7 @@ struct c2port_device *c2port_device_register(char *name,
if (ret < 0)
goto error_idr_alloc;
c2dev->id = ret;
-
- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
+ c2dev->ops = ops;
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
"c2port%d", c2dev->id);
@@ -924,7 +936,6 @@ struct c2port_device *c2port_device_register(char *name,
dev_set_drvdata(c2dev->dev, c2dev);
strscpy(c2dev->name, name, sizeof(c2dev->name));
- c2dev->ops = ops;
mutex_init(&c2dev->mutex);
/* By default C2 port access is off */
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 5efc4151bf58..15307f5e4307 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -9,11 +9,13 @@ config CXL_BASE
select PPC_64S_HASH_MMU
config CXL
- tristate "Support for IBM Coherent Accelerators (CXL)"
+ tristate "Support for IBM Coherent Accelerators (CXL) (DEPRECATED)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
- default m
help
+ The cxl driver is deprecated and will be removed in a future
+ kernel release.
+
Select this option to enable driver support for IBM Coherent
Accelerators (CXL). CXL is otherwise known as Coherent Accelerator
Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index cf6bd8a43056..e26ee85279fa 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -295,6 +295,8 @@ int cxl_of_probe(struct platform_device *pdev)
int ret;
int slice = 0, slice_ok = 0;
+ dev_err_once(&pdev->dev, "DEPRECATION: cxl is deprecated and will be removed in a future kernel release\n");
+
pr_devel("in %s\n", __func__);
np = pdev->dev.of_node;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 3d52f9b92d0d..92bf7c5c7b35 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1726,6 +1726,8 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
int slice;
int rc;
+ dev_err_once(&dev->dev, "DEPRECATED: cxl is deprecated and will be removed in a future kernel release\n");
+
if (cxl_pci_is_vphb_device(dev)) {
dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
return -ENODEV;
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 409bd1c39663..b1fc6446bd4b 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -444,7 +444,7 @@ static ssize_t api_version_compatible_show(struct device *device,
}
static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
@@ -538,7 +538,7 @@ static ssize_t class_show(struct kobject *kobj,
}
static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct afu_config_record *cr = to_cr(kobj);
@@ -620,7 +620,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
cr->config_attr.attr.name = "config";
cr->config_attr.attr.mode = S_IRUSR;
cr->config_attr.size = afu->crs_len;
- cr->config_attr.read = afu_read_config;
+ cr->config_attr.read_new = afu_read_config;
rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
&afu->dev.kobj, "cr%i", cr->cr);
@@ -693,7 +693,7 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu)
afu->attr_eb.attr.name = "afu_err_buff";
afu->attr_eb.attr.mode = S_IRUGO;
afu->attr_eb.size = afu->eb_len;
- afu->attr_eb.read = afu_eb_read;
+ afu->attr_eb.read_new = afu_eb_read;
rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
if (rc) {
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 4175df7ef011..5d5a70a62e98 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -154,7 +154,7 @@ static const struct attribute_group ds1682_group = {
* User data attribute
*/
static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -172,7 +172,7 @@ static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
}
static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -194,8 +194,8 @@ static const struct bin_attribute ds1682_eeprom_attr = {
.mode = S_IRUGO | S_IWUSR,
},
.size = DS1682_EEPROM_SIZE,
- .read = ds1682_eeprom_read,
- .write = ds1682_eeprom_write,
+ .read_new = ds1682_eeprom_read,
+ .write_new = ds1682_eeprom_write,
};
static int ds1682_nvmem_read(void *priv, unsigned int offset, void *val,
diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
index 88888485e6f8..ee58f7ce5bfa 100644
--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c
+++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
@@ -50,7 +50,7 @@ static struct platform_device digsy_mtc_eeprom = {
};
static struct gpiod_lookup_table eeprom_spi_gpiod_table = {
- .dev_id = "spi_gpio",
+ .dev_id = "spi_gpio.1",
.table = {
GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK,
"sck", GPIO_ACTIVE_HIGH),
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 43421fe37d33..1fc632ebf22f 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -847,7 +847,7 @@ err_mutex_unlock:
* @count: Number of bytes to write
*/
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct idt_89hpesx_dev *pdev;
@@ -871,7 +871,7 @@ static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
* @count: Number of bytes to write
*/
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct idt_89hpesx_dev *pdev;
@@ -1017,7 +1017,7 @@ static ssize_t idt_dbgfs_csr_read(struct file *filep, char __user *ubuf,
* NOTE Size will be changed in compliance with OF node. EEPROM attribute will
* be read-only as well if the corresponding flag is specified in OF node.
*/
-static BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
+static const BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
/*
* csr_dbgfs_ops - CSR debugfs-node read/write operations
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 6fab2ffa736b..1c36ad153e78 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -104,7 +104,7 @@ exit_up:
}
static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -127,7 +127,7 @@ static const struct bin_attribute user_eeprom_attr = {
.mode = S_IRUGO,
},
.size = USER_EEPROM_SIZE,
- .read = max6875_read,
+ .read_new = max6875_read,
};
static int max6875_probe(struct i2c_client *client)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 48d08eeb2d20..7b7a22c91fe4 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -139,14 +139,14 @@ struct fastrpc_mmap_rsp_msg {
};
struct fastrpc_mmap_req_msg {
- s32 pgid;
+ s32 client_id;
u32 flags;
u64 vaddr;
s32 num;
};
struct fastrpc_mem_map_req_msg {
- s32 pgid;
+ s32 client_id;
s32 fd;
s32 offset;
u32 flags;
@@ -156,20 +156,20 @@ struct fastrpc_mem_map_req_msg {
};
struct fastrpc_munmap_req_msg {
- s32 pgid;
+ s32 client_id;
u64 vaddr;
u64 size;
};
struct fastrpc_mem_unmap_req_msg {
- s32 pgid;
+ s32 client_id;
s32 fd;
u64 vaddrin;
u64 len;
};
struct fastrpc_msg {
- int pid; /* process group id */
+ int client_id; /* process client id */
int tid; /* thread id */
u64 ctx; /* invoke caller context */
u32 handle; /* handle to invoke */
@@ -234,7 +234,7 @@ struct fastrpc_invoke_ctx {
int nbufs;
int retval;
int pid;
- int tgid;
+ int client_id;
u32 sc;
u32 *crc;
u64 ctxid;
@@ -299,7 +299,7 @@ struct fastrpc_user {
struct fastrpc_session_ctx *sctx;
struct fastrpc_buf *init_mem;
- int tgid;
+ int client_id;
int pd;
bool is_secure_dev;
/* Lock for lists */
@@ -614,7 +614,7 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
ctx->sc = sc;
ctx->retval = -1;
ctx->pid = current->pid;
- ctx->tgid = user->tgid;
+ ctx->client_id = user->client_id;
ctx->cctx = cctx;
init_completion(&ctx->work);
INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
@@ -992,7 +992,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
mmap_read_lock(current->mm);
vma = find_vma(current->mm, ctx->args[i].ptr);
if (vma)
- pages[i].addr += ctx->args[i].ptr -
+ pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) -
vma->vm_start;
mmap_read_unlock(current->mm);
@@ -1019,8 +1019,8 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
(pkt_size - rlen);
pages[i].addr = pages[i].addr & PAGE_MASK;
- pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
- pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT;
+ pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
args = args + mlen;
rlen -= mlen;
@@ -1115,11 +1115,11 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
int ret;
cctx = fl->cctx;
- msg->pid = fl->tgid;
+ msg->client_id = fl->client_id;
msg->tid = current->pid;
if (kernel)
- msg->pid = 0;
+ msg->client_id = 0;
msg->ctx = ctx->ctxid | fl->pd;
msg->handle = handle;
@@ -1244,7 +1244,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
int err;
bool scm_done = false;
struct {
- int pgid;
+ int client_id;
u32 namelen;
u32 pageslen;
} inbuf;
@@ -1293,7 +1293,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
}
}
- inbuf.pgid = fl->tgid;
+ inbuf.client_id = fl->client_id;
inbuf.namelen = init.namelen;
inbuf.pageslen = 0;
fl->pd = USER_PD;
@@ -1363,7 +1363,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
int memlen;
int err;
struct {
- int pgid;
+ int client_id;
u32 namelen;
u32 filelen;
u32 pageslen;
@@ -1395,7 +1395,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
goto err;
}
- inbuf.pgid = fl->tgid;
+ inbuf.client_id = fl->client_id;
inbuf.namelen = strlen(current->comm) + 1;
inbuf.filelen = init.filelen;
inbuf.pageslen = 1;
@@ -1469,8 +1469,9 @@ err:
}
static struct fastrpc_session_ctx *fastrpc_session_alloc(
- struct fastrpc_channel_ctx *cctx)
+ struct fastrpc_user *fl)
{
+ struct fastrpc_channel_ctx *cctx = fl->cctx;
struct fastrpc_session_ctx *session = NULL;
unsigned long flags;
int i;
@@ -1480,6 +1481,8 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
if (!cctx->session[i].used && cctx->session[i].valid) {
cctx->session[i].used = true;
session = &cctx->session[i];
+ /* any non-zero ID will work, session_idx + 1 is the simplest one */
+ fl->client_id = i + 1;
break;
}
}
@@ -1501,12 +1504,12 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
{
struct fastrpc_invoke_args args[1];
- int tgid = 0;
+ int client_id = 0;
u32 sc;
- tgid = fl->tgid;
- args[0].ptr = (u64)(uintptr_t) &tgid;
- args[0].length = sizeof(tgid);
+ client_id = fl->client_id;
+ args[0].ptr = (u64)(uintptr_t) &client_id;
+ args[0].length = sizeof(client_id);
args[0].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
@@ -1579,11 +1582,10 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
INIT_LIST_HEAD(&fl->maps);
INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
- fl->tgid = current->tgid;
fl->cctx = cctx;
fl->is_secure_dev = fdevice->secure;
- fl->sctx = fastrpc_session_alloc(cctx);
+ fl->sctx = fastrpc_session_alloc(fl);
if (!fl->sctx) {
dev_err(&cctx->rpdev->dev, "No session available\n");
mutex_destroy(&fl->mutex);
@@ -1647,11 +1649,11 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
{
struct fastrpc_invoke_args args[1];
- int tgid = fl->tgid;
+ int client_id = fl->client_id;
u32 sc;
- args[0].ptr = (u64)(uintptr_t) &tgid;
- args[0].length = sizeof(tgid);
+ args[0].ptr = (u64)(uintptr_t) &client_id;
+ args[0].length = sizeof(client_id);
args[0].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
fl->pd = pd;
@@ -1803,7 +1805,7 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *
int err;
u32 sc;
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.size = buf->size;
req_msg.vaddr = buf->raddr;
@@ -1889,7 +1891,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.flags = req.flags;
req_msg.vaddr = req.vaddrin;
req_msg.num = sizeof(pages);
@@ -1978,7 +1980,7 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
return -EINVAL;
}
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.len = map->len;
req_msg.vaddrin = map->raddr;
req_msg.fd = map->fd;
@@ -2031,7 +2033,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.fd = req.fd;
req_msg.offset = req.offset;
req_msg.vaddrin = req.vaddrin;
@@ -2344,7 +2346,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
if (err)
- goto fdev_error;
+ goto populate_error;
break;
default:
err = -EINVAL;
diff --git a/drivers/misc/keba/cp500.c b/drivers/misc/keba/cp500.c
index 255d3022dae8..d0c6113dcff3 100644
--- a/drivers/misc/keba/cp500.c
+++ b/drivers/misc/keba/cp500.c
@@ -126,8 +126,9 @@ static struct cp500_devs cp520_devices = {
};
struct cp500_nvmem {
- struct nvmem_device *nvmem;
+ struct nvmem_device *base_nvmem;
unsigned int offset;
+ struct nvmem_device *nvmem;
};
struct cp500 {
@@ -581,8 +582,8 @@ static int cp500_nvmem_read(void *priv, unsigned int offset, void *val,
struct cp500_nvmem *nvmem = priv;
int ret;
- ret = nvmem_device_read(nvmem->nvmem, nvmem->offset + offset, bytes,
- val);
+ ret = nvmem_device_read(nvmem->base_nvmem, nvmem->offset + offset,
+ bytes, val);
if (ret != bytes)
return ret;
@@ -595,15 +596,16 @@ static int cp500_nvmem_write(void *priv, unsigned int offset, void *val,
struct cp500_nvmem *nvmem = priv;
int ret;
- ret = nvmem_device_write(nvmem->nvmem, nvmem->offset + offset, bytes,
- val);
+ ret = nvmem_device_write(nvmem->base_nvmem, nvmem->offset + offset,
+ bytes, val);
if (ret != bytes)
return ret;
return 0;
}
-static int cp500_nvmem_register(struct cp500 *cp500, struct nvmem_device *nvmem)
+static int cp500_nvmem_register(struct cp500 *cp500,
+ struct nvmem_device *base_nvmem)
{
struct device *dev = &cp500->pci_dev->dev;
struct nvmem_config nvmem_config = {};
@@ -625,27 +627,52 @@ static int cp500_nvmem_register(struct cp500 *cp500, struct nvmem_device *nvmem)
nvmem_config.reg_read = cp500_nvmem_read;
nvmem_config.reg_write = cp500_nvmem_write;
- cp500->nvmem_cpu.nvmem = nvmem;
+ cp500->nvmem_cpu.base_nvmem = base_nvmem;
cp500->nvmem_cpu.offset = CP500_EEPROM_CPU_OFFSET;
nvmem_config.name = CP500_EEPROM_CPU_NAME;
nvmem_config.size = CP500_EEPROM_CPU_SIZE;
nvmem_config.priv = &cp500->nvmem_cpu;
- tmp = devm_nvmem_register(dev, &nvmem_config);
+ tmp = nvmem_register(&nvmem_config);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
+ cp500->nvmem_cpu.nvmem = tmp;
- cp500->nvmem_user.nvmem = nvmem;
+ cp500->nvmem_user.base_nvmem = base_nvmem;
cp500->nvmem_user.offset = CP500_EEPROM_USER_OFFSET;
nvmem_config.name = CP500_EEPROM_USER_NAME;
nvmem_config.size = CP500_EEPROM_USER_SIZE;
nvmem_config.priv = &cp500->nvmem_user;
- tmp = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(tmp))
+ tmp = nvmem_register(&nvmem_config);
+ if (IS_ERR(tmp)) {
+ nvmem_unregister(cp500->nvmem_cpu.nvmem);
+ cp500->nvmem_cpu.nvmem = NULL;
+
return PTR_ERR(tmp);
+ }
+ cp500->nvmem_user.nvmem = tmp;
return 0;
}
+static void cp500_nvmem_unregister(struct cp500 *cp500)
+{
+ int notified;
+
+ if (cp500->nvmem_user.nvmem) {
+ nvmem_unregister(cp500->nvmem_user.nvmem);
+ cp500->nvmem_user.nvmem = NULL;
+ }
+ if (cp500->nvmem_cpu.nvmem) {
+ nvmem_unregister(cp500->nvmem_cpu.nvmem);
+ cp500->nvmem_cpu.nvmem = NULL;
+ }
+
+ /* CPU and user nvmem use the same base_nvmem, put only once */
+ notified = atomic_read(&cp500->nvmem_notified);
+ if (notified)
+ nvmem_device_put(cp500->nvmem_cpu.base_nvmem);
+}
+
static int cp500_nvmem_match(struct device *dev, const void *data)
{
const struct cp500 *cp500 = data;
@@ -663,13 +690,6 @@ static int cp500_nvmem_match(struct device *dev, const void *data)
return 0;
}
-static void cp500_devm_nvmem_put(void *data)
-{
- struct nvmem_device *nvmem = data;
-
- nvmem_device_put(nvmem);
-}
-
static int cp500_nvmem(struct notifier_block *nb, unsigned long action,
void *data)
{
@@ -698,10 +718,6 @@ static int cp500_nvmem(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
}
- ret = devm_add_action_or_reset(dev, cp500_devm_nvmem_put, nvmem);
- if (ret)
- return ret;
-
ret = cp500_nvmem_register(cp500, nvmem);
if (ret)
return ret;
@@ -932,12 +948,17 @@ static void cp500_remove(struct pci_dev *pci_dev)
{
struct cp500 *cp500 = pci_get_drvdata(pci_dev);
+ /*
+ * unregister CPU and user nvmem and put base_nvmem before parent
+ * auxiliary device of base_nvmem is unregistered
+ */
+ nvmem_unregister_notifier(&cp500->nvmem_notifier);
+ cp500_nvmem_unregister(cp500);
+
cp500_unregister_auxiliary_devs(cp500);
cp500_disable(cp500);
- nvmem_unregister_notifier(&cp500->nvmem_notifier);
-
pci_set_drvdata(pci_dev, 0);
pci_free_irq_vectors(pci_dev);
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index e616e3ec2b42..04756302b878 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -147,8 +147,11 @@ static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true);
break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), false);
+ break;
default:
- ret = -EOPNOTSUPP;
+ ret = -ENOTSUPP;
break;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -277,7 +280,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
spin_unlock_irqrestore(&priv->lock, flags);
irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
- generic_handle_irq(irq);
+ handle_nested_irq(irq);
}
}
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 67d9391f1855..7575fee96cc6 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -3,7 +3,7 @@
config INTEL_MEI
tristate "Intel Management Engine Interface"
depends on X86 && PCI
- default GENERIC_CPU || MCORE2 || MATOM || X86_GENERIC
+ default X86_64 || MATOM
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c3a6657dcd4a..a5f88ec97df7 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,6 +117,8 @@
#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
+#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 6589635f8ba3..d6ff9d82ae94 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -124,6 +124,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 35d349fee769..7be1649b1972 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -502,7 +502,7 @@ static int vsc_tp_probe(struct spi_device *spi)
if (ret)
return ret;
- tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
+ tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN);
if (IS_ERR(tp->wakeuphost))
return PTR_ERR(tp->wakeuphost);
diff --git a/drivers/misc/misc_minor_kunit.c b/drivers/misc/misc_minor_kunit.c
new file mode 100644
index 000000000000..293e0fb7e43e
--- /dev/null
+++ b/drivers/misc/misc_minor_kunit.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+
+/* dynamic minor (2) */
+static struct miscdevice dev_dynamic_minor = {
+ .minor = 2,
+ .name = "dev_dynamic_minor",
+};
+
+/* static minor (LCD_MINOR) */
+static struct miscdevice dev_static_minor = {
+ .minor = LCD_MINOR,
+ .name = "dev_static_minor",
+};
+
+/* misc dynamic minor */
+static struct miscdevice dev_misc_dynamic_minor = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dev_misc_dynamic_minor",
+};
+
+static void kunit_dynamic_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_dynamic_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 2, dev_dynamic_minor.minor);
+ misc_deregister(&dev_dynamic_minor);
+}
+
+static void kunit_static_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_static_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, LCD_MINOR, dev_static_minor.minor);
+ misc_deregister(&dev_static_minor);
+}
+
+static void kunit_misc_dynamic_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_misc_dynamic_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ misc_deregister(&dev_misc_dynamic_minor);
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(kunit_dynamic_minor),
+ KUNIT_CASE(kunit_static_minor),
+ KUNIT_CASE(kunit_misc_dynamic_minor),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "misc_minor_test",
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vimal Agrawal");
+MODULE_DESCRIPTION("misc minor testing");
diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c
index 4954553b7baa..999026a1ae04 100644
--- a/drivers/misc/ntsync.c
+++ b/drivers/misc/ntsync.c
@@ -6,11 +6,17 @@
*/
#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/overflow.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <uapi/linux/ntsync.h>
@@ -19,6 +25,8 @@
enum ntsync_type {
NTSYNC_TYPE_SEM,
+ NTSYNC_TYPE_MUTEX,
+ NTSYNC_TYPE_EVENT,
};
/*
@@ -30,10 +38,13 @@ enum ntsync_type {
*
* Both rely on struct file for reference counting. Individual
* ntsync_obj objects take a reference to the device when created.
+ * Wait operations take a reference to each object being waited on for
+ * the duration of the wait.
*/
struct ntsync_obj {
spinlock_t lock;
+ int dev_locked;
enum ntsync_type type;
@@ -46,22 +57,344 @@ struct ntsync_obj {
__u32 count;
__u32 max;
} sem;
+ struct {
+ __u32 count;
+ pid_t owner;
+ bool ownerdead;
+ } mutex;
+ struct {
+ bool manual;
+ bool signaled;
+ } event;
} u;
+
+ /*
+ * any_waiters is protected by the object lock, but all_waiters is
+ * protected by the device wait_all_lock.
+ */
+ struct list_head any_waiters;
+ struct list_head all_waiters;
+
+ /*
+ * Hint describing how many tasks are queued on this object in a
+ * wait-all operation.
+ *
+ * Any time we do a wake, we may need to wake "all" waiters as well as
+ * "any" waiters. In order to atomically wake "all" waiters, we must
+ * lock all of the objects, and that means grabbing the wait_all_lock
+ * below (and, due to lock ordering rules, before locking this object).
+ * However, wait-all is a rare operation, and grabbing the wait-all
+ * lock for every wake would create unnecessary contention.
+ * Therefore we first check whether all_hint is zero, and, if it is,
+ * we skip trying to wake "all" waiters.
+ *
+ * Since wait requests must originate from user-space threads, we're
+ * limited here by PID_MAX_LIMIT, so there's no risk of overflow.
+ */
+ atomic_t all_hint;
+};
+
+struct ntsync_q_entry {
+ struct list_head node;
+ struct ntsync_q *q;
+ struct ntsync_obj *obj;
+ __u32 index;
+};
+
+struct ntsync_q {
+ struct task_struct *task;
+ __u32 owner;
+
+ /*
+ * Protected via atomic_try_cmpxchg(). Only the thread that wins the
+ * compare-and-swap may actually change object states and wake this
+ * task.
+ */
+ atomic_t signaled;
+
+ bool all;
+ bool ownerdead;
+ __u32 count;
+ struct ntsync_q_entry entries[];
};
struct ntsync_device {
+ /*
+ * Wait-all operations must atomically grab all objects, and be totally
+ * ordered with respect to each other and wait-any operations.
+ * If one thread is trying to acquire several objects, another thread
+ * cannot touch the object at the same time.
+ *
+ * This device-wide lock is used to serialize wait-for-all
+ * operations, and operations on an object that is involved in a
+ * wait-for-all.
+ */
+ struct mutex wait_all_lock;
+
struct file *file;
};
/*
+ * Single objects are locked using obj->lock.
+ *
+ * Multiple objects are 'locked' while holding dev->wait_all_lock.
+ * In this case however, individual objects are not locked by holding
+ * obj->lock, but by setting obj->dev_locked.
+ *
+ * This means that in order to lock a single object, the sequence is slightly
+ * more complicated than usual. Specifically it needs to check obj->dev_locked
+ * after acquiring obj->lock, if set, it needs to drop the lock and acquire
+ * dev->wait_all_lock in order to serialize against the multi-object operation.
+ */
+
+static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ lockdep_assert_held(&dev->wait_all_lock);
+ lockdep_assert(obj->dev == dev);
+ spin_lock(&obj->lock);
+ /*
+ * By setting obj->dev_locked inside obj->lock, it is ensured that
+ * anyone holding obj->lock must see the value.
+ */
+ obj->dev_locked = 1;
+ spin_unlock(&obj->lock);
+}
+
+static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ lockdep_assert_held(&dev->wait_all_lock);
+ lockdep_assert(obj->dev == dev);
+ spin_lock(&obj->lock);
+ obj->dev_locked = 0;
+ spin_unlock(&obj->lock);
+}
+
+static void obj_lock(struct ntsync_obj *obj)
+{
+ struct ntsync_device *dev = obj->dev;
+
+ for (;;) {
+ spin_lock(&obj->lock);
+ if (likely(!obj->dev_locked))
+ break;
+
+ spin_unlock(&obj->lock);
+ mutex_lock(&dev->wait_all_lock);
+ spin_lock(&obj->lock);
+ /*
+ * obj->dev_locked should be set and released under the same
+ * wait_all_lock section, since we now own this lock, it should
+ * be clear.
+ */
+ lockdep_assert(!obj->dev_locked);
+ spin_unlock(&obj->lock);
+ mutex_unlock(&dev->wait_all_lock);
+ }
+}
+
+static void obj_unlock(struct ntsync_obj *obj)
+{
+ spin_unlock(&obj->lock);
+}
+
+static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ bool all;
+
+ obj_lock(obj);
+ all = atomic_read(&obj->all_hint);
+ if (unlikely(all)) {
+ obj_unlock(obj);
+ mutex_lock(&dev->wait_all_lock);
+ dev_lock_obj(dev, obj);
+ }
+
+ return all;
+}
+
+static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all)
+{
+ if (all) {
+ dev_unlock_obj(dev, obj);
+ mutex_unlock(&dev->wait_all_lock);
+ } else {
+ obj_unlock(obj);
+ }
+}
+
+#define ntsync_assert_held(obj) \
+ lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \
+ ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \
+ (obj)->dev_locked))
+
+static bool is_signaled(struct ntsync_obj *obj, __u32 owner)
+{
+ ntsync_assert_held(obj);
+
+ switch (obj->type) {
+ case NTSYNC_TYPE_SEM:
+ return !!obj->u.sem.count;
+ case NTSYNC_TYPE_MUTEX:
+ if (obj->u.mutex.owner && obj->u.mutex.owner != owner)
+ return false;
+ return obj->u.mutex.count < UINT_MAX;
+ case NTSYNC_TYPE_EVENT:
+ return obj->u.event.signaled;
+ }
+
+ WARN(1, "bad object type %#x\n", obj->type);
+ return false;
+}
+
+/*
+ * "locked_obj" is an optional pointer to an object which is already locked and
+ * should not be locked again. This is necessary so that changing an object's
+ * state and waking it can be a single atomic operation.
+ */
+static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q,
+ struct ntsync_obj *locked_obj)
+{
+ __u32 count = q->count;
+ bool can_wake = true;
+ int signaled = -1;
+ __u32 i;
+
+ lockdep_assert_held(&dev->wait_all_lock);
+ if (locked_obj)
+ lockdep_assert(locked_obj->dev_locked);
+
+ for (i = 0; i < count; i++) {
+ if (q->entries[i].obj != locked_obj)
+ dev_lock_obj(dev, q->entries[i].obj);
+ }
+
+ for (i = 0; i < count; i++) {
+ if (!is_signaled(q->entries[i].obj, q->owner)) {
+ can_wake = false;
+ break;
+ }
+ }
+
+ if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) {
+ for (i = 0; i < count; i++) {
+ struct ntsync_obj *obj = q->entries[i].obj;
+
+ switch (obj->type) {
+ case NTSYNC_TYPE_SEM:
+ obj->u.sem.count--;
+ break;
+ case NTSYNC_TYPE_MUTEX:
+ if (obj->u.mutex.ownerdead)
+ q->ownerdead = true;
+ obj->u.mutex.ownerdead = false;
+ obj->u.mutex.count++;
+ obj->u.mutex.owner = q->owner;
+ break;
+ case NTSYNC_TYPE_EVENT:
+ if (!obj->u.event.manual)
+ obj->u.event.signaled = false;
+ break;
+ }
+ }
+ wake_up_process(q->task);
+ }
+
+ for (i = 0; i < count; i++) {
+ if (q->entries[i].obj != locked_obj)
+ dev_unlock_obj(dev, q->entries[i].obj);
+ }
+}
+
+static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ struct ntsync_q_entry *entry;
+
+ lockdep_assert_held(&dev->wait_all_lock);
+ lockdep_assert(obj->dev_locked);
+
+ list_for_each_entry(entry, &obj->all_waiters, node)
+ try_wake_all(dev, entry->q, obj);
+}
+
+static void try_wake_any_sem(struct ntsync_obj *sem)
+{
+ struct ntsync_q_entry *entry;
+
+ ntsync_assert_held(sem);
+ lockdep_assert(sem->type == NTSYNC_TYPE_SEM);
+
+ list_for_each_entry(entry, &sem->any_waiters, node) {
+ struct ntsync_q *q = entry->q;
+ int signaled = -1;
+
+ if (!sem->u.sem.count)
+ break;
+
+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
+ sem->u.sem.count--;
+ wake_up_process(q->task);
+ }
+ }
+}
+
+static void try_wake_any_mutex(struct ntsync_obj *mutex)
+{
+ struct ntsync_q_entry *entry;
+
+ ntsync_assert_held(mutex);
+ lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX);
+
+ list_for_each_entry(entry, &mutex->any_waiters, node) {
+ struct ntsync_q *q = entry->q;
+ int signaled = -1;
+
+ if (mutex->u.mutex.count == UINT_MAX)
+ break;
+ if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner)
+ continue;
+
+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
+ if (mutex->u.mutex.ownerdead)
+ q->ownerdead = true;
+ mutex->u.mutex.ownerdead = false;
+ mutex->u.mutex.count++;
+ mutex->u.mutex.owner = q->owner;
+ wake_up_process(q->task);
+ }
+ }
+}
+
+static void try_wake_any_event(struct ntsync_obj *event)
+{
+ struct ntsync_q_entry *entry;
+
+ ntsync_assert_held(event);
+ lockdep_assert(event->type == NTSYNC_TYPE_EVENT);
+
+ list_for_each_entry(entry, &event->any_waiters, node) {
+ struct ntsync_q *q = entry->q;
+ int signaled = -1;
+
+ if (!event->u.event.signaled)
+ break;
+
+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
+ if (!event->u.event.manual)
+ event->u.event.signaled = false;
+ wake_up_process(q->task);
+ }
+ }
+}
+
+/*
* Actually change the semaphore state, returning -EOVERFLOW if it is made
* invalid.
*/
-static int post_sem_state(struct ntsync_obj *sem, __u32 count)
+static int release_sem_state(struct ntsync_obj *sem, __u32 count)
{
__u32 sum;
- lockdep_assert_held(&sem->lock);
+ ntsync_assert_held(sem);
if (check_add_overflow(sem->u.sem.count, count, &sum) ||
sum > sem->u.sem.max)
@@ -71,11 +404,13 @@ static int post_sem_state(struct ntsync_obj *sem, __u32 count)
return 0;
}
-static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
+static int ntsync_sem_release(struct ntsync_obj *sem, void __user *argp)
{
+ struct ntsync_device *dev = sem->dev;
__u32 __user *user_args = argp;
__u32 prev_count;
__u32 args;
+ bool all;
int ret;
if (copy_from_user(&args, argp, sizeof(args)))
@@ -84,12 +419,17 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
if (sem->type != NTSYNC_TYPE_SEM)
return -EINVAL;
- spin_lock(&sem->lock);
+ all = ntsync_lock_obj(dev, sem);
prev_count = sem->u.sem.count;
- ret = post_sem_state(sem, args);
+ ret = release_sem_state(sem, args);
+ if (!ret) {
+ if (all)
+ try_wake_all_obj(dev, sem);
+ try_wake_any_sem(sem);
+ }
- spin_unlock(&sem->lock);
+ ntsync_unlock_obj(dev, sem, all);
if (!ret && put_user(prev_count, user_args))
ret = -EFAULT;
@@ -97,13 +437,229 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
return ret;
}
-static int ntsync_obj_release(struct inode *inode, struct file *file)
+/*
+ * Actually change the mutex state, returning -EPERM if not the owner.
+ */
+static int unlock_mutex_state(struct ntsync_obj *mutex,
+ const struct ntsync_mutex_args *args)
{
- struct ntsync_obj *obj = file->private_data;
+ ntsync_assert_held(mutex);
+
+ if (mutex->u.mutex.owner != args->owner)
+ return -EPERM;
+
+ if (!--mutex->u.mutex.count)
+ mutex->u.mutex.owner = 0;
+ return 0;
+}
+
+static int ntsync_mutex_unlock(struct ntsync_obj *mutex, void __user *argp)
+{
+ struct ntsync_mutex_args __user *user_args = argp;
+ struct ntsync_device *dev = mutex->dev;
+ struct ntsync_mutex_args args;
+ __u32 prev_count;
+ bool all;
+ int ret;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+ if (!args.owner)
+ return -EINVAL;
+
+ if (mutex->type != NTSYNC_TYPE_MUTEX)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, mutex);
+
+ prev_count = mutex->u.mutex.count;
+ ret = unlock_mutex_state(mutex, &args);
+ if (!ret) {
+ if (all)
+ try_wake_all_obj(dev, mutex);
+ try_wake_any_mutex(mutex);
+ }
+
+ ntsync_unlock_obj(dev, mutex, all);
+
+ if (!ret && put_user(prev_count, &user_args->count))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+/*
+ * Actually change the mutex state to mark its owner as dead,
+ * returning -EPERM if not the owner.
+ */
+static int kill_mutex_state(struct ntsync_obj *mutex, __u32 owner)
+{
+ ntsync_assert_held(mutex);
+
+ if (mutex->u.mutex.owner != owner)
+ return -EPERM;
+
+ mutex->u.mutex.ownerdead = true;
+ mutex->u.mutex.owner = 0;
+ mutex->u.mutex.count = 0;
+ return 0;
+}
+
+static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp)
+{
+ struct ntsync_device *dev = mutex->dev;
+ __u32 owner;
+ bool all;
+ int ret;
+
+ if (get_user(owner, (__u32 __user *)argp))
+ return -EFAULT;
+ if (!owner)
+ return -EINVAL;
+
+ if (mutex->type != NTSYNC_TYPE_MUTEX)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, mutex);
+
+ ret = kill_mutex_state(mutex, owner);
+ if (!ret) {
+ if (all)
+ try_wake_all_obj(dev, mutex);
+ try_wake_any_mutex(mutex);
+ }
+
+ ntsync_unlock_obj(dev, mutex, all);
+
+ return ret;
+}
+
+static int ntsync_event_set(struct ntsync_obj *event, void __user *argp, bool pulse)
+{
+ struct ntsync_device *dev = event->dev;
+ __u32 prev_state;
+ bool all;
+
+ if (event->type != NTSYNC_TYPE_EVENT)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, event);
+
+ prev_state = event->u.event.signaled;
+ event->u.event.signaled = true;
+ if (all)
+ try_wake_all_obj(dev, event);
+ try_wake_any_event(event);
+ if (pulse)
+ event->u.event.signaled = false;
+
+ ntsync_unlock_obj(dev, event, all);
+
+ if (put_user(prev_state, (__u32 __user *)argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ntsync_event_reset(struct ntsync_obj *event, void __user *argp)
+{
+ struct ntsync_device *dev = event->dev;
+ __u32 prev_state;
+ bool all;
+
+ if (event->type != NTSYNC_TYPE_EVENT)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, event);
+
+ prev_state = event->u.event.signaled;
+ event->u.event.signaled = false;
+
+ ntsync_unlock_obj(dev, event, all);
+
+ if (put_user(prev_state, (__u32 __user *)argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ntsync_sem_read(struct ntsync_obj *sem, void __user *argp)
+{
+ struct ntsync_sem_args __user *user_args = argp;
+ struct ntsync_device *dev = sem->dev;
+ struct ntsync_sem_args args;
+ bool all;
+
+ if (sem->type != NTSYNC_TYPE_SEM)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, sem);
+
+ args.count = sem->u.sem.count;
+ args.max = sem->u.sem.max;
+
+ ntsync_unlock_obj(dev, sem, all);
+
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+}
+static int ntsync_mutex_read(struct ntsync_obj *mutex, void __user *argp)
+{
+ struct ntsync_mutex_args __user *user_args = argp;
+ struct ntsync_device *dev = mutex->dev;
+ struct ntsync_mutex_args args;
+ bool all;
+ int ret;
+
+ if (mutex->type != NTSYNC_TYPE_MUTEX)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, mutex);
+
+ args.count = mutex->u.mutex.count;
+ args.owner = mutex->u.mutex.owner;
+ ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0;
+
+ ntsync_unlock_obj(dev, mutex, all);
+
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ return -EFAULT;
+ return ret;
+}
+
+static int ntsync_event_read(struct ntsync_obj *event, void __user *argp)
+{
+ struct ntsync_event_args __user *user_args = argp;
+ struct ntsync_device *dev = event->dev;
+ struct ntsync_event_args args;
+ bool all;
+
+ if (event->type != NTSYNC_TYPE_EVENT)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, event);
+
+ args.manual = event->u.event.manual;
+ args.signaled = event->u.event.signaled;
+
+ ntsync_unlock_obj(dev, event, all);
+
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+}
+
+static void ntsync_free_obj(struct ntsync_obj *obj)
+{
fput(obj->dev->file);
kfree(obj);
+}
+static int ntsync_obj_release(struct inode *inode, struct file *file)
+{
+ ntsync_free_obj(file->private_data);
return 0;
}
@@ -114,8 +670,24 @@ static long ntsync_obj_ioctl(struct file *file, unsigned int cmd,
void __user *argp = (void __user *)parm;
switch (cmd) {
- case NTSYNC_IOC_SEM_POST:
- return ntsync_sem_post(obj, argp);
+ case NTSYNC_IOC_SEM_RELEASE:
+ return ntsync_sem_release(obj, argp);
+ case NTSYNC_IOC_SEM_READ:
+ return ntsync_sem_read(obj, argp);
+ case NTSYNC_IOC_MUTEX_UNLOCK:
+ return ntsync_mutex_unlock(obj, argp);
+ case NTSYNC_IOC_MUTEX_KILL:
+ return ntsync_mutex_kill(obj, argp);
+ case NTSYNC_IOC_MUTEX_READ:
+ return ntsync_mutex_read(obj, argp);
+ case NTSYNC_IOC_EVENT_SET:
+ return ntsync_event_set(obj, argp, false);
+ case NTSYNC_IOC_EVENT_RESET:
+ return ntsync_event_reset(obj, argp);
+ case NTSYNC_IOC_EVENT_PULSE:
+ return ntsync_event_set(obj, argp, true);
+ case NTSYNC_IOC_EVENT_READ:
+ return ntsync_event_read(obj, argp);
default:
return -ENOIOCTLCMD;
}
@@ -140,6 +712,9 @@ static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev,
obj->dev = dev;
get_file(dev->file);
spin_lock_init(&obj->lock);
+ INIT_LIST_HEAD(&obj->any_waiters);
+ INIT_LIST_HEAD(&obj->all_waiters);
+ atomic_set(&obj->all_hint, 0);
return obj;
}
@@ -165,7 +740,6 @@ static int ntsync_obj_get_fd(struct ntsync_obj *obj)
static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
{
- struct ntsync_sem_args __user *user_args = argp;
struct ntsync_sem_args args;
struct ntsync_obj *sem;
int fd;
@@ -182,12 +756,398 @@ static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
sem->u.sem.count = args.count;
sem->u.sem.max = args.max;
fd = ntsync_obj_get_fd(sem);
- if (fd < 0) {
- kfree(sem);
- return fd;
+ if (fd < 0)
+ ntsync_free_obj(sem);
+
+ return fd;
+}
+
+static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_mutex_args args;
+ struct ntsync_obj *mutex;
+ int fd;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ if (!args.owner != !args.count)
+ return -EINVAL;
+
+ mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX);
+ if (!mutex)
+ return -ENOMEM;
+ mutex->u.mutex.count = args.count;
+ mutex->u.mutex.owner = args.owner;
+ fd = ntsync_obj_get_fd(mutex);
+ if (fd < 0)
+ ntsync_free_obj(mutex);
+
+ return fd;
+}
+
+static int ntsync_create_event(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_event_args args;
+ struct ntsync_obj *event;
+ int fd;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT);
+ if (!event)
+ return -ENOMEM;
+ event->u.event.manual = args.manual;
+ event->u.event.signaled = args.signaled;
+ fd = ntsync_obj_get_fd(event);
+ if (fd < 0)
+ ntsync_free_obj(event);
+
+ return fd;
+}
+
+static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd)
+{
+ struct file *file = fget(fd);
+ struct ntsync_obj *obj;
+
+ if (!file)
+ return NULL;
+
+ if (file->f_op != &ntsync_obj_fops) {
+ fput(file);
+ return NULL;
+ }
+
+ obj = file->private_data;
+ if (obj->dev != dev) {
+ fput(file);
+ return NULL;
}
- return put_user(fd, &user_args->sem);
+ return obj;
+}
+
+static void put_obj(struct ntsync_obj *obj)
+{
+ fput(obj->file);
+}
+
+static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args)
+{
+ ktime_t timeout = ns_to_ktime(args->timeout);
+ clockid_t clock = CLOCK_MONOTONIC;
+ ktime_t *timeout_ptr;
+ int ret = 0;
+
+ timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout);
+
+ if (args->flags & NTSYNC_WAIT_REALTIME)
+ clock = CLOCK_REALTIME;
+
+ do {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&q->signaled) != -1) {
+ ret = 0;
+ break;
+ }
+ ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock);
+ } while (ret < 0);
+ __set_current_state(TASK_RUNNING);
+
+ return ret;
+}
+
+/*
+ * Allocate and initialize the ntsync_q structure, but do not queue us yet.
+ */
+static int setup_wait(struct ntsync_device *dev,
+ const struct ntsync_wait_args *args, bool all,
+ struct ntsync_q **ret_q)
+{
+ int fds[NTSYNC_MAX_WAIT_COUNT + 1];
+ const __u32 count = args->count;
+ size_t size = array_size(count, sizeof(fds[0]));
+ struct ntsync_q *q;
+ __u32 total_count;
+ __u32 i, j;
+
+ if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME))
+ return -EINVAL;
+
+ if (size >= sizeof(fds))
+ return -EINVAL;
+
+ total_count = count;
+ if (args->alert)
+ total_count++;
+
+ if (copy_from_user(fds, u64_to_user_ptr(args->objs), size))
+ return -EFAULT;
+ if (args->alert)
+ fds[count] = args->alert;
+
+ q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ q->task = current;
+ q->owner = args->owner;
+ atomic_set(&q->signaled, -1);
+ q->all = all;
+ q->ownerdead = false;
+ q->count = count;
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = get_obj(dev, fds[i]);
+
+ if (!obj)
+ goto err;
+
+ if (all) {
+ /* Check that the objects are all distinct. */
+ for (j = 0; j < i; j++) {
+ if (obj == q->entries[j].obj) {
+ put_obj(obj);
+ goto err;
+ }
+ }
+ }
+
+ entry->obj = obj;
+ entry->q = q;
+ entry->index = i;
+ }
+
+ *ret_q = q;
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ put_obj(q->entries[j].obj);
+ kfree(q);
+ return -EINVAL;
+}
+
+static void try_wake_any_obj(struct ntsync_obj *obj)
+{
+ switch (obj->type) {
+ case NTSYNC_TYPE_SEM:
+ try_wake_any_sem(obj);
+ break;
+ case NTSYNC_TYPE_MUTEX:
+ try_wake_any_mutex(obj);
+ break;
+ case NTSYNC_TYPE_EVENT:
+ try_wake_any_event(obj);
+ break;
+ }
+}
+
+static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_wait_args args;
+ __u32 i, total_count;
+ struct ntsync_q *q;
+ int signaled;
+ bool all;
+ int ret;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ ret = setup_wait(dev, &args, false, &q);
+ if (ret < 0)
+ return ret;
+
+ total_count = args.count;
+ if (args.alert)
+ total_count++;
+
+ /* queue ourselves */
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ all = ntsync_lock_obj(dev, obj);
+ list_add_tail(&entry->node, &obj->any_waiters);
+ ntsync_unlock_obj(dev, obj, all);
+ }
+
+ /*
+ * Check if we are already signaled.
+ *
+ * Note that the API requires that normal objects are checked before
+ * the alert event. Hence we queue the alert event last, and check
+ * objects in order.
+ */
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_obj *obj = q->entries[i].obj;
+
+ if (atomic_read(&q->signaled) != -1)
+ break;
+
+ all = ntsync_lock_obj(dev, obj);
+ try_wake_any_obj(obj);
+ ntsync_unlock_obj(dev, obj, all);
+ }
+
+ /* sleep */
+
+ ret = ntsync_schedule(q, &args);
+
+ /* and finally, unqueue */
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ all = ntsync_lock_obj(dev, obj);
+ list_del(&entry->node);
+ ntsync_unlock_obj(dev, obj, all);
+
+ put_obj(obj);
+ }
+
+ signaled = atomic_read(&q->signaled);
+ if (signaled != -1) {
+ struct ntsync_wait_args __user *user_args = argp;
+
+ /* even if we caught a signal, we need to communicate success */
+ ret = q->ownerdead ? -EOWNERDEAD : 0;
+
+ if (put_user(signaled, &user_args->index))
+ ret = -EFAULT;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ }
+
+ kfree(q);
+ return ret;
+}
+
+static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_wait_args args;
+ struct ntsync_q *q;
+ int signaled;
+ __u32 i;
+ int ret;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ ret = setup_wait(dev, &args, true, &q);
+ if (ret < 0)
+ return ret;
+
+ /* queue ourselves */
+
+ mutex_lock(&dev->wait_all_lock);
+
+ for (i = 0; i < args.count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ atomic_inc(&obj->all_hint);
+
+ /*
+ * obj->all_waiters is protected by dev->wait_all_lock rather
+ * than obj->lock, so there is no need to acquire obj->lock
+ * here.
+ */
+ list_add_tail(&entry->node, &obj->all_waiters);
+ }
+ if (args.alert) {
+ struct ntsync_q_entry *entry = &q->entries[args.count];
+ struct ntsync_obj *obj = entry->obj;
+
+ dev_lock_obj(dev, obj);
+ list_add_tail(&entry->node, &obj->any_waiters);
+ dev_unlock_obj(dev, obj);
+ }
+
+ /* check if we are already signaled */
+
+ try_wake_all(dev, q, NULL);
+
+ mutex_unlock(&dev->wait_all_lock);
+
+ /*
+ * Check if the alert event is signaled, making sure to do so only
+ * after checking if the other objects are signaled.
+ */
+
+ if (args.alert) {
+ struct ntsync_obj *obj = q->entries[args.count].obj;
+
+ if (atomic_read(&q->signaled) == -1) {
+ bool all = ntsync_lock_obj(dev, obj);
+ try_wake_any_obj(obj);
+ ntsync_unlock_obj(dev, obj, all);
+ }
+ }
+
+ /* sleep */
+
+ ret = ntsync_schedule(q, &args);
+
+ /* and finally, unqueue */
+
+ mutex_lock(&dev->wait_all_lock);
+
+ for (i = 0; i < args.count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ /*
+ * obj->all_waiters is protected by dev->wait_all_lock rather
+ * than obj->lock, so there is no need to acquire it here.
+ */
+ list_del(&entry->node);
+
+ atomic_dec(&obj->all_hint);
+
+ put_obj(obj);
+ }
+
+ mutex_unlock(&dev->wait_all_lock);
+
+ if (args.alert) {
+ struct ntsync_q_entry *entry = &q->entries[args.count];
+ struct ntsync_obj *obj = entry->obj;
+ bool all;
+
+ all = ntsync_lock_obj(dev, obj);
+ list_del(&entry->node);
+ ntsync_unlock_obj(dev, obj, all);
+
+ put_obj(obj);
+ }
+
+ signaled = atomic_read(&q->signaled);
+ if (signaled != -1) {
+ struct ntsync_wait_args __user *user_args = argp;
+
+ /* even if we caught a signal, we need to communicate success */
+ ret = q->ownerdead ? -EOWNERDEAD : 0;
+
+ if (put_user(signaled, &user_args->index))
+ ret = -EFAULT;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ }
+
+ kfree(q);
+ return ret;
}
static int ntsync_char_open(struct inode *inode, struct file *file)
@@ -198,6 +1158,8 @@ static int ntsync_char_open(struct inode *inode, struct file *file)
if (!dev)
return -ENOMEM;
+ mutex_init(&dev->wait_all_lock);
+
file->private_data = dev;
dev->file = file;
return nonseekable_open(inode, file);
@@ -219,8 +1181,16 @@ static long ntsync_char_ioctl(struct file *file, unsigned int cmd,
void __user *argp = (void __user *)parm;
switch (cmd) {
+ case NTSYNC_IOC_CREATE_EVENT:
+ return ntsync_create_event(dev, argp);
+ case NTSYNC_IOC_CREATE_MUTEX:
+ return ntsync_create_mutex(dev, argp);
case NTSYNC_IOC_CREATE_SEM:
return ntsync_create_sem(dev, argp);
+ case NTSYNC_IOC_WAIT_ALL:
+ return ntsync_wait_all(dev, argp);
+ case NTSYNC_IOC_WAIT_ANY:
+ return ntsync_wait_any(dev, argp);
default:
return -ENOIOCTLCMD;
}
@@ -238,6 +1208,7 @@ static struct miscdevice ntsync_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = NTSYNC_NAME,
.fops = &ntsync_fops,
+ .mode = 0666,
};
module_misc_device(ntsync_misc);
diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c
index 07520d6e6dc5..e849641687a0 100644
--- a/drivers/misc/ocxl/sysfs.c
+++ b/drivers/misc/ocxl/sysfs.c
@@ -94,7 +94,7 @@ static struct device_attribute afu_attrs[] = {
};
static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj));
@@ -155,7 +155,7 @@ int ocxl_sysfs_register_afu(struct ocxl_file_info *info)
info->attr_global_mmio.attr.name = "global_mmio_area";
info->attr_global_mmio.attr.mode = 0600;
info->attr_global_mmio.size = info->afu->config.global_mmio_size;
- info->attr_global_mmio.read = global_mmio_read;
+ info->attr_global_mmio.read_new = global_mmio_read;
info->attr_global_mmio.mmap = global_mmio_mmap;
rc = device_create_bin_file(&info->dev, &info->attr_global_mmio);
if (rc) {
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 8d2b7135738e..6121c0940cd1 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -483,7 +483,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
}
static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
unsigned int rom_signature;
@@ -553,7 +553,7 @@ return_err_nomutex:
}
static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
int err;
@@ -655,8 +655,8 @@ static const struct bin_attribute pch_bin_attr = {
.mode = S_IRUGO | S_IWUSR,
},
.size = PCH_PHUB_OROM_SIZE + 1,
- .read = pch_phub_bin_read,
- .write = pch_phub_bin_write,
+ .read_new = pch_phub_bin_read,
+ .write_new = pch_phub_bin_write,
};
static int pch_phub_probe(struct pci_dev *pdev,
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 3aaaf47fa4ee..d5ac71a49386 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -69,6 +69,9 @@
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
#define FLAG_USE_DMA BIT(0)
+#define PCI_ENDPOINT_TEST_CAPS 0x30
+#define CAP_UNALIGNED_ACCESS BIT(0)
+
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
@@ -166,43 +169,47 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
test->irq_type = IRQ_TYPE_UNDEFINED;
}
-static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
+static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
int type)
{
- int irq = -1;
+ int irq;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
- bool res = true;
switch (type) {
case IRQ_TYPE_INTX:
irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
- if (irq < 0)
+ if (irq < 0) {
dev_err(dev, "Failed to get Legacy interrupt\n");
+ return irq;
+ }
+
break;
case IRQ_TYPE_MSI:
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
- if (irq < 0)
+ if (irq < 0) {
dev_err(dev, "Failed to get MSI interrupts\n");
+ return irq;
+ }
+
break;
case IRQ_TYPE_MSIX:
irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
- if (irq < 0)
+ if (irq < 0) {
dev_err(dev, "Failed to get MSI-X interrupts\n");
+ return irq;
+ }
+
break;
default:
dev_err(dev, "Invalid IRQ type selected\n");
- }
-
- if (irq < 0) {
- irq = 0;
- res = false;
+ return -EINVAL;
}
test->irq_type = type;
test->num_irqs = irq;
- return res;
+ return 0;
}
static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
@@ -217,22 +224,22 @@ static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
test->num_irqs = 0;
}
-static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
+static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
{
int i;
- int err;
+ int ret;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
for (i = 0; i < test->num_irqs; i++) {
- err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+ ret = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
IRQF_SHARED, test->name, test);
- if (err)
+ if (ret)
goto fail;
}
- return true;
+ return 0;
fail:
switch (irq_type) {
@@ -252,7 +259,7 @@ fail:
break;
}
- return false;
+ return ret;
}
static const u32 bar_test_pattern[] = {
@@ -277,16 +284,16 @@ static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
return memcmp(write_buf, read_buf, size);
}
-static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
+static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
- int j, bar_size, buf_size, iters, remain;
+ int j, bar_size, buf_size, iters;
void *write_buf __free(kfree) = NULL;
void *read_buf __free(kfree) = NULL;
struct pci_dev *pdev = test->pdev;
if (!test->bar[barno])
- return false;
+ return -ENOMEM;
bar_size = pci_resource_len(pdev, barno);
@@ -301,28 +308,105 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
write_buf = kmalloc(buf_size, GFP_KERNEL);
if (!write_buf)
- return false;
+ return -ENOMEM;
read_buf = kmalloc(buf_size, GFP_KERNEL);
if (!read_buf)
- return false;
+ return -ENOMEM;
iters = bar_size / buf_size;
for (j = 0; j < iters; j++)
if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
write_buf, read_buf, buf_size))
- return false;
+ return -EIO;
+
+ return 0;
+}
+
+static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
+{
+ u32 val;
+
+ /* Keep the BAR pattern in the top byte. */
+ val = bar_test_pattern[barno] & 0xff000000;
+ /* Store the (partial) offset in the remaining bytes. */
+ val |= offset & 0x00ffffff;
+
+ return val;
+}
+
+static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
+ enum pci_barno barno)
+{
+ struct pci_dev *pdev = test->pdev;
+ int j, size;
+
+ size = pci_resource_len(pdev, barno);
+
+ if (barno == test->test_reg_bar)
+ size = 0x4;
+
+ for (j = 0; j < size; j += 4)
+ writel_relaxed(bar_test_pattern_with_offset(barno, j),
+ test->bar[barno] + j);
+}
+
+static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
+ enum pci_barno barno)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ int j, size;
+ u32 val;
+
+ size = pci_resource_len(pdev, barno);
+
+ if (barno == test->test_reg_bar)
+ size = 0x4;
+
+ for (j = 0; j < size; j += 4) {
+ u32 expected = bar_test_pattern_with_offset(barno, j);
+
+ val = readl_relaxed(test->bar[barno] + j);
+ if (val != expected) {
+ dev_err(dev,
+ "BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
+ barno, j, val, expected);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
+{
+ enum pci_barno bar;
+ bool ret;
+
+ /* Write all BARs in order (without reading). */
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ if (test->bar[bar])
+ pci_endpoint_test_bars_write_bar(test, bar);
- remain = bar_size % buf_size;
- if (remain)
- if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * iters,
- write_buf, read_buf, remain))
- return false;
+ /*
+ * Read all BARs in order (without writing).
+ * If there is an address translation issue on the EP, writing one BAR
+ * might have overwritten another BAR. Ensure that this is not the case.
+ * (Reading back the BAR directly after writing can not detect this.)
+ */
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (test->bar[bar]) {
+ ret = pci_endpoint_test_bars_read_bar(test, bar);
+ if (!ret)
+ return ret;
+ }
+ }
- return true;
+ return 0;
}
-static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
+static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
{
u32 val;
@@ -334,16 +418,17 @@ static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
- return false;
+ return -ETIMEDOUT;
- return true;
+ return 0;
}
-static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
+static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
u16 msi_num, bool msix)
{
- u32 val;
struct pci_dev *pdev = test->pdev;
+ u32 val;
+ int ret;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
@@ -354,9 +439,16 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
- return false;
+ return -ETIMEDOUT;
+
+ ret = pci_irq_vector(pdev, msi_num - 1);
+ if (ret < 0)
+ return ret;
+
+ if (ret != test->last_irq)
+ return -EIO;
- return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
+ return 0;
}
static int pci_endpoint_test_validate_xfer_params(struct device *dev,
@@ -375,11 +467,10 @@ static int pci_endpoint_test_validate_xfer_params(struct device *dev,
return 0;
}
-static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
+static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
- bool ret = false;
void *src_addr;
void *dst_addr;
u32 flags = 0;
@@ -398,17 +489,17 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
int irq_type = test->irq_type;
u32 src_crc32;
u32 dst_crc32;
- int err;
+ int ret;
- err = copy_from_user(&param, (void __user *)arg, sizeof(param));
- if (err) {
+ ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (ret) {
dev_err(dev, "Failed to get transfer param\n");
- return false;
+ return -EFAULT;
}
- err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
- if (err)
- return false;
+ ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (ret)
+ return ret;
size = param.size;
@@ -418,22 +509,21 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- goto err;
+ return -EINVAL;
}
orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_src_addr) {
dev_err(dev, "Failed to allocate source buffer\n");
- ret = false;
- goto err;
+ return -ENOMEM;
}
get_random_bytes(orig_src_addr, size + alignment);
orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
size + alignment, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, orig_src_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_src_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map source buffer address\n");
- ret = false;
goto err_src_phys_addr;
}
@@ -457,15 +547,15 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_dst_addr) {
dev_err(dev, "Failed to allocate destination address\n");
- ret = false;
+ ret = -ENOMEM;
goto err_dst_addr;
}
orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
size + alignment, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, orig_dst_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_dst_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map destination buffer address\n");
- ret = false;
goto err_dst_phys_addr;
}
@@ -498,8 +588,8 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
DMA_FROM_DEVICE);
dst_crc32 = crc32_le(~0, dst_addr, size);
- if (dst_crc32 == src_crc32)
- ret = true;
+ if (dst_crc32 != src_crc32)
+ ret = -EIO;
err_dst_phys_addr:
kfree(orig_dst_addr);
@@ -510,16 +600,13 @@ err_dst_addr:
err_src_phys_addr:
kfree(orig_src_addr);
-
-err:
return ret;
}
-static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
+static int pci_endpoint_test_write(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
- bool ret = false;
u32 flags = 0;
bool use_dma;
u32 reg;
@@ -534,17 +621,17 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
int irq_type = test->irq_type;
size_t size;
u32 crc32;
- int err;
+ int ret;
- err = copy_from_user(&param, (void __user *)arg, sizeof(param));
- if (err != 0) {
+ ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (ret) {
dev_err(dev, "Failed to get transfer param\n");
- return false;
+ return -EFAULT;
}
- err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
- if (err)
- return false;
+ ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (ret)
+ return ret;
size = param.size;
@@ -554,23 +641,22 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- goto err;
+ return -EINVAL;
}
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate address\n");
- ret = false;
- goto err;
+ return -ENOMEM;
}
get_random_bytes(orig_addr, size + alignment);
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, orig_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map source buffer address\n");
- ret = false;
goto err_phys_addr;
}
@@ -603,24 +689,21 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
wait_for_completion(&test->irq_raised);
reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
- if (reg & STATUS_READ_SUCCESS)
- ret = true;
+ if (!(reg & STATUS_READ_SUCCESS))
+ ret = -EIO;
dma_unmap_single(dev, orig_phys_addr, size + alignment,
DMA_TO_DEVICE);
err_phys_addr:
kfree(orig_addr);
-
-err:
return ret;
}
-static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
+static int pci_endpoint_test_read(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
- bool ret = false;
u32 flags = 0;
bool use_dma;
size_t size;
@@ -634,17 +717,17 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
size_t alignment = test->alignment;
int irq_type = test->irq_type;
u32 crc32;
- int err;
+ int ret;
- err = copy_from_user(&param, (void __user *)arg, sizeof(param));
- if (err) {
+ ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (ret) {
dev_err(dev, "Failed to get transfer param\n");
- return false;
+ return -EFAULT;
}
- err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
- if (err)
- return false;
+ ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (ret)
+ return ret;
size = param.size;
@@ -654,21 +737,20 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- goto err;
+ return -EINVAL;
}
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate destination address\n");
- ret = false;
- goto err;
+ return -ENOMEM;
}
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, orig_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map source buffer address\n");
- ret = false;
goto err_phys_addr;
}
@@ -700,50 +782,51 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
DMA_FROM_DEVICE);
crc32 = crc32_le(~0, addr, size);
- if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
- ret = true;
+ if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
+ ret = -EIO;
err_phys_addr:
kfree(orig_addr);
-err:
return ret;
}
-static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
+static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
{
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
- return true;
+
+ return 0;
}
-static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
+static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
int req_irq_type)
{
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
+ int ret;
if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- return false;
+ return -EINVAL;
}
if (test->irq_type == req_irq_type)
- return true;
+ return 0;
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
- if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
- goto err;
-
- if (!pci_endpoint_test_request_irq(test))
- goto err;
+ ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
+ if (ret)
+ return ret;
- return true;
+ ret = pci_endpoint_test_request_irq(test);
+ if (ret) {
+ pci_endpoint_test_free_irq_vectors(test);
+ return ret;
+ }
-err:
- pci_endpoint_test_free_irq_vectors(test);
- return false;
+ return 0;
}
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
@@ -768,6 +851,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
+ case PCITEST_BARS:
+ ret = pci_endpoint_test_bars(test);
+ break;
case PCITEST_INTX_IRQ:
ret = pci_endpoint_test_intx_irq(test);
break;
@@ -805,10 +891,24 @@ static const struct file_operations pci_endpoint_test_fops = {
.unlocked_ioctl = pci_endpoint_test_ioctl,
};
+static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ u32 caps;
+
+ caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
+ dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", caps);
+
+ /* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
+ if (caps & CAP_UNALIGNED_ACCESS)
+ test->alignment = 0;
+}
+
static int pci_endpoint_test_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int err;
+ int ret;
int id;
char name[24];
enum pci_barno bar;
@@ -847,24 +947,23 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
- err = pci_enable_device(pdev);
- if (err) {
+ ret = pci_enable_device(pdev);
+ if (ret) {
dev_err(dev, "Cannot enable PCI device\n");
- return err;
+ return ret;
}
- err = pci_request_regions(pdev, DRV_MODULE_NAME);
- if (err) {
+ ret = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (ret) {
dev_err(dev, "Cannot obtain PCI resources\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
- if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
- err = -EINVAL;
+ ret = pci_endpoint_test_alloc_irq_vectors(test, irq_type);
+ if (ret)
goto err_disable_irq;
- }
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
@@ -879,7 +978,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->base = test->bar[test_reg_bar];
if (!test->base) {
- err = -ENOMEM;
+ ret = -ENOMEM;
dev_err(dev, "Cannot perform PCI test without BAR%d\n",
test_reg_bar);
goto err_iounmap;
@@ -889,7 +988,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
if (id < 0) {
- err = id;
+ ret = id;
dev_err(dev, "Unable to get id\n");
goto err_iounmap;
}
@@ -897,27 +996,28 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
test->name = kstrdup(name, GFP_KERNEL);
if (!test->name) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_ida_remove;
}
- if (!pci_endpoint_test_request_irq(test)) {
- err = -EINVAL;
+ ret = pci_endpoint_test_request_irq(test);
+ if (ret)
goto err_kfree_test_name;
- }
+
+ pci_endpoint_test_get_capabilities(test);
misc_device = &test->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
misc_device->name = kstrdup(name, GFP_KERNEL);
if (!misc_device->name) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_release_irq;
}
misc_device->parent = &pdev->dev;
misc_device->fops = &pci_endpoint_test_fops;
- err = misc_register(misc_device);
- if (err) {
+ ret = misc_register(misc_device);
+ if (ret) {
dev_err(dev, "Failed to register device\n");
goto err_kfree_name;
}
@@ -949,7 +1049,7 @@ err_disable_irq:
err_disable_pdev:
pci_disable_device(pdev);
- return err;
+ return ret;
}
static void pci_endpoint_test_remove(struct pci_dev *pdev)
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 61b66e318488..7a3c34306de9 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -93,7 +93,7 @@ int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit; /* = 0 */
static int xpc_disengage_max_timelimit = 120;
-static struct ctl_table xpc_sys_xpc_hb[] = {
+static const struct ctl_table xpc_sys_xpc_hb[] = {
{
.procname = "hb_interval",
.data = &xpc_hb_interval,
@@ -111,7 +111,7 @@ static struct ctl_table xpc_sys_xpc_hb[] = {
.extra1 = &xpc_hb_check_min_interval,
.extra2 = &xpc_hb_check_max_interval},
};
-static struct ctl_table xpc_sys_xpc[] = {
+static const struct ctl_table xpc_sys_xpc[] = {
{
.procname = "disengage_timelimit",
.data = &xpc_disengage_timelimit,
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index e40b027a88e2..e5069882457e 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -23,7 +23,7 @@
#define SRAM_GRANULARITY 32
static ssize_t sram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
struct sram_partition *part;
@@ -38,7 +38,7 @@ static ssize_t sram_read(struct file *filp, struct kobject *kobj,
}
static ssize_t sram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
struct sram_partition *part;
@@ -83,8 +83,8 @@ static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
return -ENOMEM;
part->battr.attr.mode = S_IRUSR | S_IWUSR;
- part->battr.read = sram_read;
- part->battr.write = sram_write;
+ part->battr.read_new = sram_read;
+ part->battr.write_new = sram_write;
part->battr.size = block->size;
return device_create_bin_file(sram->dev, &part->battr);
diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c
index f0b1fc87490e..26166357b255 100644
--- a/drivers/misc/vcpu_stall_detector.c
+++ b/drivers/misc/vcpu_stall_detector.c
@@ -111,8 +111,7 @@ static int start_stall_detector_cpu(unsigned int cpu)
ping_timeout_ms = vcpu_stall_config.stall_timeout_sec *
MSEC_PER_SEC / 2;
- hrtimer_init(vcpu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vcpu_hrtimer->function = vcpu_stall_detect_timer_fn;
+ hrtimer_setup(vcpu_hrtimer, vcpu_stall_detect_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu_stall_detector->is_initialized = true;
hrtimer_start(vcpu_hrtimer, ms_to_ktime(ping_timeout_ms),
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d996d39c0d6f..5241528f8b90 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -19,7 +19,6 @@
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
@@ -557,8 +556,7 @@ int mmc_cqe_recovery(struct mmc_host *host)
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_STOP_TRANSMISSION;
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.flags = MMC_RSP_R1B_NO_CRC | MMC_CMD_AC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
@@ -567,8 +565,7 @@ int mmc_cqe_recovery(struct mmc_host *host)
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
cmd.arg = 1; /* Discard entire queue */
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.flags = MMC_RSP_R1B_NO_CRC | MMC_CMD_AC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 48bda70145ee..bdb22998357e 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -14,7 +14,6 @@
#include <linux/idr.h>
#include <linux/of.h>
#include <linux/pagemap.h>
-#include <linux/pm_wakeup.h>
#include <linux/export.h>
#include <linux/leds.h>
#include <linux/slab.h>
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4d6844261912..ab662f502fe7 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -441,7 +441,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
else
mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
mq->tag_set.numa_node = NUMA_NO_NODE;
- mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ mq->tag_set.flags = BLK_MQ_F_BLOCKING;
mq->tag_set.nr_hw_queues = 1;
mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
mq->tag_set.driver_data = mq;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 9566837c9848..4b19b8a16b09 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -458,6 +458,8 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
if (mmc_card_sd_combo(card))
max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
+ max_dtr = min_not_zero(max_dtr, card->quirk_max_rate);
+
return max_dtr;
}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fc360902729d..24fffc702a94 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2499,8 +2499,10 @@ static int atmci_probe(struct platform_device *pdev)
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
ret = atmci_configure_dma(host);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ clk_disable_unprepare(host->mck);
goto err_dma_probe_defer;
+ }
if (ret == 0) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 7847f0c8b465..e5f151d092cd 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1343,6 +1343,25 @@ static int bcm2835_add_host(struct bcm2835_host *host)
return 0;
}
+static int bcm2835_suspend(struct device *dev)
+{
+ struct bcm2835_host *host = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static int bcm2835_resume(struct device *dev)
+{
+ struct bcm2835_host *host = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(host->clk);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm2835_pm_ops, bcm2835_suspend,
+ bcm2835_resume);
+
static int bcm2835_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1471,6 +1490,7 @@ static struct platform_driver bcm2835_driver = {
.name = "sdhost-bcm2835",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = bcm2835_match,
+ .pm = pm_ptr(&bcm2835_pm_ops),
},
};
module_platform_driver(bcm2835_driver);
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
index d5f4b6972f63..cb8044093402 100644
--- a/drivers/mmc/host/cqhci-crypto.c
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -25,22 +25,16 @@ static const struct cqhci_crypto_alg_entry {
static inline struct cqhci_host *
cqhci_host_from_crypto_profile(struct blk_crypto_profile *profile)
{
- struct mmc_host *mmc =
- container_of(profile, struct mmc_host, crypto_profile);
-
- return mmc->cqe_private;
+ return mmc_from_crypto_profile(profile)->cqe_private;
}
-static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
- const union cqhci_crypto_cfg_entry *cfg,
- int slot)
+static void cqhci_crypto_program_key(struct cqhci_host *cq_host,
+ const union cqhci_crypto_cfg_entry *cfg,
+ int slot)
{
u32 slot_offset = cq_host->crypto_cfg_register + slot * sizeof(*cfg);
int i;
- if (cq_host->ops->program_key)
- return cq_host->ops->program_key(cq_host, cfg, slot);
-
/* Clear CFGE */
cqhci_writel(cq_host, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
@@ -55,7 +49,6 @@ static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
/* Write dword 16, which includes the new value of CFGE */
cqhci_writel(cq_host, le32_to_cpu(cfg->reg_val[16]),
slot_offset + 16 * sizeof(cfg->reg_val[0]));
- return 0;
}
static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
@@ -72,7 +65,6 @@ static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
int i;
int cap_idx = -1;
union cqhci_crypto_cfg_entry cfg = {};
- int err;
BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID != 0);
for (i = 0; i < cq_host->crypto_capabilities.num_crypto_cap; i++) {
@@ -99,10 +91,10 @@ static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
memcpy(cfg.crypto_key, key->raw, key->size);
}
- err = cqhci_crypto_program_key(cq_host, &cfg, slot);
+ cqhci_crypto_program_key(cq_host, &cfg, slot);
memzero_explicit(&cfg, sizeof(cfg));
- return err;
+ return 0;
}
static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
@@ -113,7 +105,8 @@ static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
*/
union cqhci_crypto_cfg_entry cfg = {};
- return cqhci_crypto_program_key(cq_host, &cfg, slot);
+ cqhci_crypto_program_key(cq_host, &cfg, slot);
+ return 0;
}
static int cqhci_crypto_keyslot_evict(struct blk_crypto_profile *profile,
@@ -170,7 +163,6 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
struct mmc_host *mmc = cq_host->mmc;
struct device *dev = mmc_dev(mmc);
struct blk_crypto_profile *profile = &mmc->crypto_profile;
- unsigned int num_keyslots;
unsigned int cap_idx;
enum blk_crypto_mode_num blk_mode_num;
unsigned int slot;
@@ -180,6 +172,9 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
goto out;
+ if (cq_host->ops->uses_custom_crypto_profile)
+ goto profile_initialized;
+
cq_host->crypto_capabilities.reg_val =
cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP));
@@ -198,9 +193,8 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
* CCAP.CFGC is off by one, so the actual number of crypto
* configurations (a.k.a. keyslots) is CCAP.CFGC + 1.
*/
- num_keyslots = cq_host->crypto_capabilities.config_count + 1;
-
- err = devm_blk_crypto_profile_init(dev, profile, num_keyslots);
+ err = devm_blk_crypto_profile_init(
+ dev, profile, cq_host->crypto_capabilities.config_count + 1);
if (err)
goto out;
@@ -228,9 +222,11 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
cq_host->crypto_cap_array[cap_idx].sdus_mask * 512;
}
+profile_initialized:
+
/* Clear all the keyslots so that we start in a known state. */
- for (slot = 0; slot < num_keyslots; slot++)
- cqhci_crypto_clear_keyslot(cq_host, slot);
+ for (slot = 0; slot < profile->num_slots; slot++)
+ profile->ll_ops.keyslot_evict(profile, NULL, slot);
/* CQHCI crypto requires the use of 128-bit task descriptors. */
cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index fab9d74445ba..ce189a1866b9 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -289,13 +289,11 @@ struct cqhci_host_ops {
u64 *data);
void (*pre_enable)(struct mmc_host *mmc);
void (*post_disable)(struct mmc_host *mmc);
-#ifdef CONFIG_MMC_CRYPTO
- int (*program_key)(struct cqhci_host *cq_host,
- const union cqhci_crypto_cfg_entry *cfg, int slot);
-#endif
void (*set_tran_desc)(struct cqhci_host *cq_host, u8 **desc,
dma_addr_t addr, int len, bool end, bool dma64);
-
+#ifdef CONFIG_MMC_CRYPTO
+ bool uses_custom_crypto_profile;
+#endif
};
static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
diff --git a/drivers/mmc/host/dw_mmc-hi3798mv200.c b/drivers/mmc/host/dw_mmc-hi3798mv200.c
index cce174b5249b..5791a975a944 100644
--- a/drivers/mmc/host/dw_mmc-hi3798mv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798mv200.c
@@ -181,7 +181,6 @@ static int dw_mci_hi3798mv200_init(struct dw_mci *host)
{
struct dw_mci_hi3798mv200_priv *priv;
struct device_node *np = host->dev->of_node;
- int ret;
priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -199,15 +198,12 @@ static int dw_mci_hi3798mv200_init(struct dw_mci *host)
return dev_err_probe(host->dev, PTR_ERR(priv->drive_clk),
"failed to get enabled ciu-drive clock\n");
- priv->crg_reg = syscon_regmap_lookup_by_phandle(np, "hisilicon,sap-dll-reg");
+ priv->crg_reg = syscon_regmap_lookup_by_phandle_args(np, "hisilicon,sap-dll-reg",
+ 1, &priv->sap_dll_offset);
if (IS_ERR(priv->crg_reg))
return dev_err_probe(host->dev, PTR_ERR(priv->crg_reg),
"failed to get CRG reg\n");
- ret = of_property_read_u32_index(np, "hisilicon,sap-dll-reg", 1, &priv->sap_dll_offset);
- if (ret)
- return dev_err_probe(host->dev, ret, "failed to get sample DLL register offset\n");
-
host->priv = priv;
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 3cbda98d08d2..31f40c04afda 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1875,8 +1875,7 @@ static void dw_mci_init_fault(struct dw_mci *host)
{
host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
- hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- host->fault_timer.function = dw_mci_fault_timer;
+ hrtimer_setup(&host->fault_timer, dw_mci_fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
#else
static void dw_mci_init_fault(struct dw_mci *host)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index af445d3f8e2a..345ea91629e0 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -273,6 +273,7 @@
#define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */
#define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */
+#define PAD_DS_TUNE_DLY2_SEL BIT(1) /* RW */
#define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */
#define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */
#define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */
@@ -318,6 +319,7 @@
/* EMMC50_PAD_DS_TUNE mask */
#define PAD_DS_DLY_SEL BIT(16) /* RW */
+#define PAD_DS_DLY2_SEL BIT(15) /* RW */
#define PAD_DS_DLY1 GENMASK(14, 10) /* RW */
#define PAD_DS_DLY3 GENMASK(4, 0) /* RW */
@@ -414,6 +416,7 @@ struct mtk_mmc_compatible {
u8 clk_div_bits;
bool recheck_sdio_irq;
bool hs400_tune; /* only used for MT8173 */
+ bool needs_top_base;
u32 pad_tune_reg;
bool async_fifo;
bool data_tune;
@@ -587,6 +590,7 @@ static const struct mtk_mmc_compatible mt7986_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
+ .needs_top_base = true,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
@@ -627,6 +631,7 @@ static const struct mtk_mmc_compatible mt8183_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
+ .needs_top_base = true,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
@@ -653,6 +658,7 @@ static const struct mtk_mmc_compatible mt8196_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
+ .needs_top_base = true,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
@@ -1097,11 +1103,12 @@ static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
u32 resp;
switch (mmc_resp_type(cmd)) {
- /* Actually, R1, R5, R6, R7 are the same */
+ /* Actually, R1, R5, R6, R7 are the same */
case MMC_RSP_R1:
resp = 0x1;
break;
case MMC_RSP_R1B:
+ case MMC_RSP_R1B_NO_CRC:
resp = 0x7;
break;
case MMC_RSP_R2:
@@ -1351,7 +1358,8 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
* CRC error.
*/
msdc_reset_hw(host);
- if (events & MSDC_INT_RSPCRCERR) {
+ if (events & MSDC_INT_RSPCRCERR &&
+ mmc_resp_type(cmd) != MMC_RSP_R1B_NO_CRC) {
cmd->error = -EILSEQ;
host->error |= REQ_CMD_EIO;
} else if (events & MSDC_INT_CMDTMO) {
@@ -2498,13 +2506,23 @@ tune_done:
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
+
host->hs400_mode = true;
- if (host->top_base)
- writel(host->hs400_ds_delay,
- host->top_base + EMMC50_PAD_DS_TUNE);
- else
- writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ if (host->top_base) {
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY3, host->hs400_ds_dly3);
+ if (host->hs400_ds_delay)
+ writel(host->hs400_ds_delay,
+ host->top_base + EMMC50_PAD_DS_TUNE);
+ } else {
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ if (host->hs400_ds_delay)
+ writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ }
/* hs400 mode must set it to 0 */
sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
/* to improve read performance, set outstanding to 2 */
@@ -2524,14 +2542,11 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
if (host->top_base) {
sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY_SEL);
- if (host->hs400_ds_dly3)
- sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
- PAD_DS_DLY3, host->hs400_ds_dly3);
+ sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY2_SEL);
} else {
sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
- if (host->hs400_ds_dly3)
- sdr_set_field(host->base + PAD_DS_TUNE,
- PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL);
}
host->hs400_tuning = true;
@@ -2885,9 +2900,13 @@ static int msdc_drv_probe(struct platform_device *pdev)
if (IS_ERR(host->base))
return PTR_ERR(host->base);
- host->top_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(host->top_base))
- host->top_base = NULL;
+ host->dev_comp = of_device_get_match_data(&pdev->dev);
+
+ if (host->dev_comp->needs_top_base) {
+ host->top_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(host->top_base))
+ return PTR_ERR(host->top_base);
+ }
ret = mmc_regulator_get_supply(mmc);
if (ret)
@@ -2949,7 +2968,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
- host->dev_comp = of_device_get_match_data(&pdev->dev);
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index e7a286c3216f..0a9affd12532 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -995,7 +995,7 @@ static int mxcmci_probe(struct platform_device *pdev)
struct mxcmci_host *host;
struct resource *res;
int ret = 0, irq;
- bool dat3_card_detect = false;
+ bool dat3_card_detect;
dma_cap_mask_t mask;
struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
@@ -1048,9 +1048,9 @@ static int mxcmci_probe(struct platform_device *pdev)
if (pdata)
dat3_card_detect = pdata->dat3_card_detect;
- else if (mmc_card_is_removable(mmc)
- && !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
- dat3_card_detect = true;
+ else
+ dat3_card_detect = mmc_card_is_removable(mmc) &&
+ !of_property_present(pdev->dev.of_node, "cd-gpios");
ret = mmc_regulator_get_supply(mmc);
if (ret)
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 48d3b0aae5a0..0c6eb60a95fd 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -115,8 +115,6 @@ static int sd_response_type(struct mmc_command *cmd)
return SD_RSP_TYPE_R0;
case MMC_RSP_R1:
return SD_RSP_TYPE_R1;
- case MMC_RSP_R1_NO_CRC:
- return SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
case MMC_RSP_R1B:
return SD_RSP_TYPE_R1b;
case MMC_RSP_R2:
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 107c78df53cf..d229c2b83ea9 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -313,9 +313,6 @@ static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host,
case MMC_RSP_R1:
rsp_type = SD_RSP_TYPE_R1;
break;
- case MMC_RSP_R1_NO_CRC:
- rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
- break;
case MMC_RSP_R1B:
rsp_type = SD_RSP_TYPE_R1b;
break;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index d1ce9193ece9..e6c5c82f64fa 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -822,8 +822,6 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
struct acpi_device *device;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
- struct resource *iomem;
- resource_size_t len;
size_t priv_size;
int quirks = 0;
int err;
@@ -844,17 +842,6 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_byt_defer(dev))
return -EPROBE_DEFER;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem)
- return -ENOMEM;
-
- len = resource_size(iomem);
- if (len < 0x100)
- dev_err(dev, "Invalid iomem size!\n");
-
- if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
- return -ENOMEM;
-
priv_size = slot ? slot->priv_size : 0;
host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size);
if (IS_ERR(host))
@@ -876,10 +863,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
goto err_free;
}
- host->ioaddr = devm_ioremap(dev, iomem->start,
- resource_size(iomem));
- if (host->ioaddr == NULL) {
- err = -ENOMEM;
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->ioaddr)) {
+ err = PTR_ERR(host->ioaddr);
goto err_free;
}
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 0ef4d578ade8..48cdcba0f39c 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -503,8 +503,15 @@ static int sdhci_brcmstb_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int ret;
clk_disable_unprepare(priv->base_clk);
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
return sdhci_pltfm_suspend(dev);
}
@@ -529,6 +536,9 @@ static int sdhci_brcmstb_resume(struct device *dev)
ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
}
+ if (host->mmc->caps2 & MMC_CAP2_CQE)
+ ret = cqhci_resume(host->mmc);
+
return ret;
}
#endif
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d55d045ef236..ff78a7c6a04c 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -304,6 +304,7 @@ static struct esdhc_soc_data usdhc_s32g2_data = {
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
| ESDHC_FLAG_SKIP_ERR004536 | ESDHC_FLAG_SKIP_CD_WAKE,
+ .quirks = SDHCI_QUIRK_NO_LED,
};
static struct esdhc_soc_data usdhc_imx7ulp_data = {
@@ -1647,7 +1648,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
* Retrieving and requesting the actual WP GPIO will happen
* in the call to mmc_of_parse().
*/
- if (of_property_read_bool(np, "wp-gpios"))
+ if (of_property_present(np, "wp-gpios"))
boarddata->wp_type = ESDHC_WP_GPIO;
of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 319f0ebbe652..e3d39311fdc7 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -134,9 +134,18 @@
/* Timeout value to avoid infinite waiting for pwr_irq */
#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+/* Max load for eMMC Vdd supply */
+#define MMC_VMMC_MAX_LOAD_UA 570000
+
/* Max load for eMMC Vdd-io supply */
#define MMC_VQMMC_MAX_LOAD_UA 325000
+/* Max load for SD Vdd supply */
+#define SD_VMMC_MAX_LOAD_UA 800000
+
+/* Max load for SD Vdd-io supply */
+#define SD_VQMMC_MAX_LOAD_UA 22000
+
#define msm_host_readl(msm_host, host, offset) \
msm_host->var_ops->msm_readl_relaxed(host, offset)
@@ -1403,11 +1412,48 @@ static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level)
return ret;
}
-static int sdhci_msm_set_vmmc(struct mmc_host *mmc)
+static void msm_config_vmmc_regulator(struct mmc_host *mmc, bool hpm)
+{
+ int load;
+
+ if (!hpm)
+ load = 0;
+ else if (!mmc->card)
+ load = max(MMC_VMMC_MAX_LOAD_UA, SD_VMMC_MAX_LOAD_UA);
+ else if (mmc_card_mmc(mmc->card))
+ load = MMC_VMMC_MAX_LOAD_UA;
+ else if (mmc_card_sd(mmc->card))
+ load = SD_VMMC_MAX_LOAD_UA;
+ else
+ return;
+
+ regulator_set_load(mmc->supply.vmmc, load);
+}
+
+static void msm_config_vqmmc_regulator(struct mmc_host *mmc, bool hpm)
+{
+ int load;
+
+ if (!hpm)
+ load = 0;
+ else if (!mmc->card)
+ load = max(MMC_VQMMC_MAX_LOAD_UA, SD_VQMMC_MAX_LOAD_UA);
+ else if (mmc_card_sd(mmc->card))
+ load = SD_VQMMC_MAX_LOAD_UA;
+ else
+ return;
+
+ regulator_set_load(mmc->supply.vqmmc, load);
+}
+
+static int sdhci_msm_set_vmmc(struct sdhci_msm_host *msm_host,
+ struct mmc_host *mmc, bool hpm)
{
if (IS_ERR(mmc->supply.vmmc))
return 0;
+ msm_config_vmmc_regulator(mmc, hpm);
+
return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd);
}
@@ -1420,6 +1466,8 @@ static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host,
if (msm_host->vqmmc_enabled == level)
return 0;
+ msm_config_vqmmc_regulator(mmc, level);
+
if (level) {
/* Set the IO voltage regulator to default voltage level */
if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
@@ -1642,7 +1690,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
}
if (pwr_state) {
- ret = sdhci_msm_set_vmmc(mmc);
+ ret = sdhci_msm_set_vmmc(msm_host, mmc,
+ pwr_state & REQ_BUS_ON);
if (!ret)
ret = sdhci_msm_set_vqmmc(msm_host, mmc,
pwr_state & REQ_BUS_ON);
@@ -1807,12 +1856,19 @@ out:
#ifdef CONFIG_MMC_CRYPTO
+static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops; /* forward decl */
+
static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
struct cqhci_host *cq_host)
{
struct mmc_host *mmc = msm_host->mmc;
+ struct blk_crypto_profile *profile = &mmc->crypto_profile;
struct device *dev = mmc_dev(mmc);
struct qcom_ice *ice;
+ union cqhci_crypto_capabilities caps;
+ union cqhci_crypto_cap_entry cap;
+ int err;
+ int i;
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
@@ -1827,8 +1883,37 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
return PTR_ERR_OR_ZERO(ice);
msm_host->ice = ice;
- mmc->caps2 |= MMC_CAP2_CRYPTO;
+ /* Initialize the blk_crypto_profile */
+
+ caps.reg_val = cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP));
+
+ /* The number of keyslots supported is (CFGC+1) */
+ err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1);
+ if (err)
+ return err;
+
+ profile->ll_ops = sdhci_msm_crypto_ops;
+ profile->max_dun_bytes_supported = 4;
+ profile->dev = dev;
+
+ /*
+ * Currently this driver only supports AES-256-XTS. All known versions
+ * of ICE support it, but to be safe make sure it is really declared in
+ * the crypto capability registers. The crypto capability registers
+ * also give the supported data unit size(s).
+ */
+ for (i = 0; i < caps.num_crypto_cap; i++) {
+ cap.reg_val = cpu_to_le32(cqhci_readl(cq_host,
+ CQHCI_CRYPTOCAP +
+ i * sizeof(__le32)));
+ if (cap.algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS &&
+ cap.key_size == CQHCI_CRYPTO_KEY_SIZE_256)
+ profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |=
+ cap.sdus_mask * 512;
+ }
+
+ mmc->caps2 |= MMC_CAP2_CRYPTO;
return 0;
}
@@ -1854,35 +1939,55 @@ static __maybe_unused int sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
return 0;
}
-/*
- * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires
- * vendor-specific SCM calls for this; it doesn't support the standard way.
- */
-static int sdhci_msm_program_key(struct cqhci_host *cq_host,
- const union cqhci_crypto_cfg_entry *cfg,
- int slot)
+static inline struct sdhci_msm_host *
+sdhci_msm_host_from_crypto_profile(struct blk_crypto_profile *profile)
{
- struct sdhci_host *host = mmc_priv(cq_host->mmc);
+ struct mmc_host *mmc = mmc_from_crypto_profile(profile);
+ struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- union cqhci_crypto_cap_entry cap;
- if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
- return qcom_ice_evict_key(msm_host->ice, slot);
+ return msm_host;
+}
+
+/*
+ * Program a key into a QC ICE keyslot. QC ICE requires a QC-specific SCM call
+ * for this; it doesn't support the standard way.
+ */
+static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
+{
+ struct sdhci_msm_host *msm_host =
+ sdhci_msm_host_from_crypto_profile(profile);
/* Only AES-256-XTS has been tested so far. */
- cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
- if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
- cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
- return -EINVAL;
+ if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
+ return -EOPNOTSUPP;
return qcom_ice_program_key(msm_host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
- cfg->crypto_key,
- cfg->data_unit_size, slot);
+ key->raw,
+ key->crypto_cfg.data_unit_size / 512,
+ slot);
}
+static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
+{
+ struct sdhci_msm_host *msm_host =
+ sdhci_msm_host_from_crypto_profile(profile);
+
+ return qcom_ice_evict_key(msm_host->ice, slot);
+}
+
+static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops = {
+ .keyslot_program = sdhci_msm_ice_keyslot_program,
+ .keyslot_evict = sdhci_msm_ice_keyslot_evict,
+};
+
#else /* CONFIG_MMC_CRYPTO */
static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
@@ -1988,7 +2093,7 @@ static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
.enable = sdhci_msm_cqe_enable,
.disable = sdhci_msm_cqe_disable,
#ifdef CONFIG_MMC_CRYPTO
- .program_key = sdhci_msm_program_key,
+ .uses_custom_crypto_profile = true,
#endif
};
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index b73f673db92b..f75c31815ab0 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -155,7 +155,6 @@ struct sdhci_am654_data {
u32 tuning_loop;
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
-#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
};
struct window {
@@ -357,29 +356,6 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
sdhci_set_clock(host, clock);
}
-static int sdhci_am654_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
- int ret;
-
- if ((sdhci_am654->quirks & SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA) &&
- ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
- if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret < 0) {
- pr_err("%s: Switching to 1.8V signalling voltage failed,\n",
- mmc_hostname(mmc));
- return -EIO;
- }
- }
- return 0;
- }
-
- return sdhci_start_signal_voltage_switch(mmc, ios);
-}
-
static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg)
{
writeb(val, host->ioaddr + reg);
@@ -868,11 +844,6 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
if (device_property_read_bool(dev, "ti,fails-without-test-cd"))
sdhci_am654->quirks |= SDHCI_AM654_QUIRK_FORCE_CDTEST;
- /* Suppress v1p8 ena for eMMC and SD with vqmmc supply */
- if (!!of_parse_phandle(dev->of_node, "vmmc-supply", 0) ==
- !!of_parse_phandle(dev->of_node, "vqmmc-supply", 0))
- sdhci_am654->quirks |= SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA;
-
sdhci_get_of_property(pdev);
return 0;
@@ -969,7 +940,6 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto err_pltfm_free;
}
- host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
pm_runtime_get_noresume(dev);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 45a474ccab1c..04c1c54df791 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -297,7 +297,6 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host,
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE: c |= RESP_NONE; break;
case MMC_RSP_R1:
- case MMC_RSP_R1_NO_CRC:
c |= RESP_R1; break;
case MMC_RSP_R1B: c |= RESP_R1B; break;
case MMC_RSP_R2: c |= RESP_R2; break;
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index f576e6a890e8..7584d0ba9396 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -27,6 +27,7 @@
struct mchp48_caps {
unsigned int size;
unsigned int page_size;
+ bool auto_disable_wel;
};
struct mchp48l640_flash {
@@ -194,9 +195,15 @@ static int mchp48l640_write_page(struct mtd_info *mtd, loff_t to, size_t len,
else
goto fail;
- ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
- if (ret)
- goto fail;
+ if (flash->caps->auto_disable_wel) {
+ ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
+ if (ret)
+ goto fail;
+ } else {
+ ret = mchp48l640_write_prepare(flash, false);
+ if (ret)
+ goto fail;
+ }
kfree(cmd);
return 0;
@@ -293,6 +300,13 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
static const struct mchp48_caps mchp48l640_caps = {
.size = SZ_8K,
.page_size = 32,
+ .auto_disable_wel = true,
+};
+
+static const struct mchp48_caps mb85rs128ty_caps = {
+ .size = SZ_16K,
+ .page_size = 256,
+ .auto_disable_wel = false,
};
static int mchp48l640_probe(struct spi_device *spi)
@@ -353,6 +367,10 @@ static const struct of_device_id mchp48l640_of_table[] = {
.compatible = "microchip,48l640",
.data = &mchp48l640_caps,
},
+ {
+ .compatible = "fujitsu,mb85rs128ty",
+ .data = &mb85rs128ty_caps,
+ },
{}
};
MODULE_DEVICE_TABLE(of, mchp48l640_of_table);
@@ -362,6 +380,10 @@ static const struct spi_device_id mchp48l640_spi_ids[] = {
.name = "48l640",
.driver_data = (kernel_ulong_t)&mchp48l640_caps,
},
+ {
+ .name = "mb85rs128ty",
+ .driver_data = (kernel_ulong_t)&mb85rs128ty_caps,
+ },
{}
};
MODULE_DEVICE_TABLE(spi, mchp48l640_spi_ids);
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index f756c60a4931..fd9ec165e61a 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of.h>
+#include <linux/security.h>
struct phram_mtd_list {
struct mtd_info mtd;
@@ -410,19 +411,23 @@ static int __init init_phram(void)
{
int ret;
+ ret = security_locked_down(LOCKDOWN_DEV_MEM);
+ if (ret)
+ return ret;
+
ret = platform_driver_register(&phram_driver);
if (ret)
return ret;
#ifndef MODULE
- if (phram_paramline[0])
+ if (phram_paramline[0]) {
ret = phram_setup(phram_paramline);
+ if (ret)
+ platform_driver_unregister(&phram_driver);
+ }
phram_init_called = 1;
#endif
- if (ret)
- platform_driver_unregister(&phram_driver);
-
return ret;
}
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index dba584fa2a53..f2266145b821 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2104,7 +2104,6 @@ static void stfsm_remove(struct platform_device *pdev)
WARN_ON(mtd_device_unregister(&fsm->mtd));
}
-#ifdef CONFIG_PM_SLEEP
static int stfsmfsm_suspend(struct device *dev)
{
struct stfsm *fsm = dev_get_drvdata(dev);
@@ -2120,9 +2119,8 @@ static int stfsmfsm_resume(struct device *dev)
return clk_prepare_enable(fsm->clk);
}
-#endif
-static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
static const struct of_device_id stfsm_match[] = {
{ .compatible = "st,spi-fsm", },
@@ -2136,7 +2134,7 @@ static struct platform_driver stfsm_driver = {
.driver = {
.name = "st-spi-fsm",
.of_match_table = stfsm_match,
- .pm = &stfsm_pm_ops,
+ .pm = pm_sleep_ptr(&stfsm_pm_ops),
},
};
module_platform_driver(stfsm_driver);
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index 217f4e69233f..82a1e7b7e4d8 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -174,26 +174,30 @@ static int am654_hbmc_probe(struct platform_device *pdev)
priv->hbdev.np = of_get_next_child(np, NULL);
ret = of_address_to_resource(priv->hbdev.np, 0, &res);
if (ret)
- return ret;
+ goto put_node;
- if (of_property_read_bool(dev->of_node, "mux-controls")) {
+ if (of_property_present(dev->of_node, "mux-controls")) {
struct mux_control *control = devm_mux_control_get(dev, NULL);
- if (IS_ERR(control))
- return PTR_ERR(control);
+ if (IS_ERR(control)) {
+ ret = PTR_ERR(control);
+ goto put_node;
+ }
ret = mux_control_select(control, 1);
if (ret) {
dev_err(dev, "Failed to select HBMC mux\n");
- return ret;
+ goto put_node;
}
priv->mux_ctrl = control;
}
priv->hbdev.map.size = resource_size(&res);
priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
- if (IS_ERR(priv->hbdev.map.virt))
- return PTR_ERR(priv->hbdev.map.virt);
+ if (IS_ERR(priv->hbdev.map.virt)) {
+ ret = PTR_ERR(priv->hbdev.map.virt);
+ goto disable_mux;
+ }
priv->ctlr.dev = dev;
priv->ctlr.ops = &am654_hbmc_ops;
@@ -226,6 +230,8 @@ release_dma:
disable_mux:
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
+put_node:
+ of_node_put(priv->hbdev.np);
return ret;
}
@@ -241,6 +247,7 @@ static void am654_hbmc_remove(struct platform_device *pdev)
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
+ of_node_put(priv->hbdev.np);
}
static const struct of_device_id am654_hbmc_dt_ids[] = {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 47ead84407cd..847c11542f02 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -329,7 +329,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
goto out_list_del;
ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (ret)
goto out_kfree_tag_set;
@@ -404,6 +404,7 @@ out_list_del:
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
unsigned long flags;
+ unsigned int memflags;
lockdep_assert_held(&mtd_table_mutex);
@@ -420,10 +421,10 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
spin_unlock_irqrestore(&old->queue_lock, flags);
/* freeze+quiesce queue to ensure all requests are flushed */
- blk_mq_freeze_queue(old->rq);
+ memflags = blk_mq_freeze_queue(old->rq);
blk_mq_quiesce_queue(old->rq);
blk_mq_unquiesce_queue(old->rq);
- blk_mq_unfreeze_queue(old->rq);
+ blk_mq_unfreeze_queue(old->rq, memflags);
/* If the device is currently open, tell trans driver to close it,
then put mtd device, and don't touch it again */
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 19e1291ac4d5..da1586a36574 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -3,7 +3,7 @@
nandcore-objs := core.o bbt.o
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
-
+obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
obj-y += onenand/
obj-y += raw/
obj-y += spi/
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index f66385faf631..0dc2ea4fc857 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -2923,6 +2923,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
+ *retlen = ops.retlen;
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
new file mode 100644
index 000000000000..e0ed25b5afea
--- /dev/null
+++ b/drivers/mtd/nand/qpic_common.c
@@ -0,0 +1,759 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-qpic-common.h>
+
+/**
+ * qcom_free_bam_transaction() - Frees the BAM transaction memory
+ * @nandc: qpic nand controller
+ *
+ * This function frees the bam transaction memory
+ */
+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ kfree(bam_txn);
+}
+EXPORT_SYMBOL(qcom_free_bam_transaction);
+
+/**
+ * qcom_alloc_bam_transaction() - allocate BAM transaction
+ * @nandc: qpic nand controller
+ *
+ * This function will allocate and initialize the BAM transaction structure
+ */
+struct bam_transaction *
+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn;
+ size_t bam_txn_size;
+ unsigned int num_cw = nandc->max_cwperpage;
+ void *bam_txn_buf;
+
+ bam_txn_size =
+ sizeof(*bam_txn) + num_cw *
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+
+ bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
+ if (!bam_txn_buf)
+ return NULL;
+
+ bam_txn = bam_txn_buf;
+ bam_txn_buf += sizeof(*bam_txn);
+
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
+ bam_txn->cmd_sgl = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+
+ bam_txn->data_sgl = bam_txn_buf;
+
+ init_completion(&bam_txn->txn_done);
+
+ return bam_txn;
+}
+EXPORT_SYMBOL(qcom_alloc_bam_transaction);
+
+/**
+ * qcom_clear_bam_transaction() - Clears the BAM transaction
+ * @nandc: qpic nand controller
+ *
+ * This function will clear the BAM transaction indexes.
+ */
+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (!nandc->props->supports_bam)
+ return;
+
+ memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
+ bam_txn->last_data_desc = NULL;
+
+ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_CMD_SGL);
+ sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_DATA_SGL);
+
+ reinit_completion(&bam_txn->txn_done);
+}
+EXPORT_SYMBOL(qcom_clear_bam_transaction);
+
+/**
+ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
+ * @data: data pointer
+ *
+ * This function is a callback for DMA descriptor completion
+ */
+void qcom_qpic_bam_dma_done(void *data)
+{
+ struct bam_transaction *bam_txn = data;
+
+ complete(&bam_txn->txn_done);
+}
+EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
+
+/**
+ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
+ * @nandc: qpic nand controller
+ * @is_cpu: cpu or Device
+ *
+ * This function will check for dma sync for cpu or device
+ */
+inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+{
+ if (!nandc->props->supports_bam)
+ return;
+
+ if (is_cpu)
+ dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ else
+ dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
+
+/**
+ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
+ * @nandc: qpic nand controller
+ * @chan: dma channel
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function maps the scatter gather list for DMA transfer and forms the
+ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
+ * descriptor queue which will be submitted to DMA engine.
+ */
+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan, unsigned long flags)
+{
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ unsigned int sgl_cnt;
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ enum dma_transfer_direction dir_eng;
+ struct dma_async_tx_descriptor *dma_desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ if (chan == nandc->cmd_chan) {
+ sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+ sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+ bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else if (chan == nandc->tx_chan) {
+ sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+ sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+ bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else {
+ sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+ sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+ bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ }
+
+ sg_mark_end(sgl + sgl_cnt - 1);
+ ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ if (ret == 0) {
+ dev_err(nandc->dev, "failure in mapping desc\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ desc->sgl_cnt = sgl_cnt;
+ desc->bam_sgl = sgl;
+
+ dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+ flags);
+
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failure in prep desc\n");
+ dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ kfree(desc);
+ return -EINVAL;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ /* update last data/command descriptor */
+ if (chan == nandc->cmd_chan)
+ bam_txn->last_cmd_desc = dma_desc;
+ else
+ bam_txn->last_data_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
+
+/**
+ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares the command descriptor for BAM DMA
+ * which will be used for NAND register reads and writes.
+ */
+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE | DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
+
+/**
+ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares the data descriptor for BAM DMA which
+ * will be used for NAND data reads and writes.
+ */
+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr, int size, unsigned int flags)
+{
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (read) {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+ vaddr, size);
+ bam_txn->rx_sgl_pos++;
+ } else {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+ vaddr, size);
+ bam_txn->tx_sgl_pos++;
+
+ /*
+ * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+ * is not set, form the DMA descriptor
+ */
+ if (!(flags & NAND_BAM_NO_EOT)) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
+
+/**
+ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: adm dma transaction size in bytes
+ * @flow_control: flow controller
+ *
+ * This function will prepare descriptor for adma
+ */
+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size,
+ bool flow_control)
+{
+ struct qcom_adm_peripheral_config periph_conf = {};
+ struct dma_async_tx_descriptor *dma_desc;
+ struct dma_slave_config slave_conf = {0};
+ enum dma_transfer_direction dir_eng;
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ sgl = &desc->adm_sgl;
+
+ sg_init_one(sgl, vaddr, size);
+
+ if (read) {
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ } else {
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ }
+
+ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+ if (!ret) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ slave_conf.device_fc = flow_control;
+ if (read) {
+ slave_conf.src_maxburst = 16;
+ slave_conf.src_addr = nandc->base_dma + reg_off;
+ if (nandc->data_crci) {
+ periph_conf.crci = nandc->data_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ } else {
+ slave_conf.dst_maxburst = 16;
+ slave_conf.dst_addr = nandc->base_dma + reg_off;
+ if (nandc->cmd_crci) {
+ periph_conf.crci = nandc->cmd_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ }
+
+ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+ if (ret) {
+ dev_err(nandc->dev, "failed to configure dma channel\n");
+ goto err;
+ }
+
+ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failed to prepare desc\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+err:
+ kfree(desc);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
+
+/**
+ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
+ * @nandc: qpic nand controller
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to read
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a descriptor to read a given number of
+ * contiguous registers to the reg_read_buf pointer.
+ */
+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+ void *vaddr;
+
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
+
+ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, first);
+
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
+
+ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+EXPORT_SYMBOL(qcom_read_reg_dma);
+
+/**
+ * qcom_write_reg_dma() - write a given number of registers
+ * @nandc: qpic nand controller
+ * @vaddr: contiguous memory from where register value will
+ * be written
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to write
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a descriptor to write a given number of
+ * contiguous registers
+ */
+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+ int first, int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+
+ if (first == NAND_EXEC_CMD)
+ flags |= NAND_BAM_NWD;
+
+ if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+
+ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
+
+ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+EXPORT_SYMBOL(qcom_write_reg_dma);
+
+/**
+ * qcom_read_data_dma() - transfer data
+ * @nandc: qpic nand controller
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a DMA descriptor to transfer data from the
+ * controller's internal buffer to the buffer 'vaddr'
+ */
+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+
+ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+}
+EXPORT_SYMBOL(qcom_read_data_dma);
+
+/**
+ * qcom_write_data_dma() - transfer data
+ * @nandc: qpic nand controller
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to read from
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a DMA descriptor to transfer data from
+ * 'vaddr' to the controller's internal buffer
+ */
+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+
+ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+}
+EXPORT_SYMBOL(qcom_write_data_dma);
+
+/**
+ * qcom_submit_descs() - submit dma descriptor
+ * @nandc: qpic nand controller
+ *
+ * This function will submit all the prepared dma descriptor
+ * cmd or data descriptor
+ */
+int qcom_submit_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc, *n;
+ dma_cookie_t cookie = 0;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ int ret = 0;
+
+ if (nandc->props->supports_bam) {
+ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+ }
+
+ list_for_each_entry(desc, &nandc->desc_list, node)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+ if (nandc->props->supports_bam) {
+ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+
+ dma_async_issue_pending(nandc->tx_chan);
+ dma_async_issue_pending(nandc->rx_chan);
+ dma_async_issue_pending(nandc->cmd_chan);
+
+ if (!wait_for_completion_timeout(&bam_txn->txn_done,
+ QPIC_NAND_COMPLETION_TIMEOUT))
+ ret = -ETIMEDOUT;
+ } else {
+ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap_free_desc:
+ /*
+ * Unmap the dma sg_list and free the desc allocated by both
+ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+ */
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+
+ if (nandc->props->supports_bam)
+ dma_unmap_sg(nandc->dev, desc->bam_sgl,
+ desc->sgl_cnt, desc->dir);
+ else
+ dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+ desc->dir);
+
+ kfree(desc);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_submit_descs);
+
+/**
+ * qcom_clear_read_regs() - reset the read register buffer
+ * @nandc: qpic nand controller
+ *
+ * This function reset the register read buffer for next NAND operation
+ */
+void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+{
+ nandc->reg_read_pos = 0;
+ qcom_nandc_dev_to_mem(nandc, false);
+}
+EXPORT_SYMBOL(qcom_clear_read_regs);
+
+/**
+ * qcom_nandc_unalloc() - unallocate qpic nand controller
+ * @nandc: qpic nand controller
+ *
+ * This function will unallocate memory alloacted for qpic nand controller
+ */
+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ if (nandc->props->supports_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+
+ if (nandc->tx_chan)
+ dma_release_channel(nandc->tx_chan);
+
+ if (nandc->rx_chan)
+ dma_release_channel(nandc->rx_chan);
+
+ if (nandc->cmd_chan)
+ dma_release_channel(nandc->cmd_chan);
+ } else {
+ if (nandc->chan)
+ dma_release_channel(nandc->chan);
+ }
+}
+EXPORT_SYMBOL(qcom_nandc_unalloc);
+
+/**
+ * qcom_nandc_alloc() - Allocate qpic nand controller
+ * @nandc: qpic nand controller
+ *
+ * This function will allocate memory for qpic nand controller
+ */
+int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+{
+ int ret;
+
+ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(nandc->dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ /*
+ * we use the internal buffer for reading ONFI params, reading small
+ * data like ID and status, and preforming read-copy-write operations
+ * when writing to a codeword partially. 532 is the maximum possible
+ * size of a codeword for our nand controller
+ */
+ nandc->buf_size = 532;
+
+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
+ if (!nandc->data_buffer)
+ return -ENOMEM;
+
+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
+ if (!nandc->regs)
+ return -ENOMEM;
+
+ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+ sizeof(*nandc->reg_read_buf),
+ GFP_KERNEL);
+ if (!nandc->reg_read_buf)
+ return -ENOMEM;
+
+ if (nandc->props->supports_bam) {
+ nandc->reg_read_dma =
+ dma_map_single(nandc->dev, nandc->reg_read_buf,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+ dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+ return -EIO;
+ }
+
+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+ if (IS_ERR(nandc->tx_chan)) {
+ ret = PTR_ERR(nandc->tx_chan);
+ nandc->tx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "tx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+ if (IS_ERR(nandc->rx_chan)) {
+ ret = PTR_ERR(nandc->rx_chan);
+ nandc->rx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+ if (IS_ERR(nandc->cmd_chan)) {
+ ret = PTR_ERR(nandc->cmd_chan);
+ nandc->cmd_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "cmd DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ /*
+ * Initially allocate BAM transaction to read ONFI param page.
+ * After detecting all the devices, this BAM transaction will
+ * be freed and the next BAM transaction will be allocated with
+ * maximum codeword size
+ */
+ nandc->max_cwperpage = 1;
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ ret = -ENOMEM;
+ goto unalloc;
+ }
+ } else {
+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+ if (IS_ERR(nandc->chan)) {
+ ret = PTR_ERR(nandc->chan);
+ nandc->chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rxtx DMA channel request failed\n");
+ return ret;
+ }
+ }
+
+ INIT_LIST_HEAD(&nandc->desc_list);
+ INIT_LIST_HEAD(&nandc->host_list);
+
+ return 0;
+unalloc:
+ qcom_nandc_unalloc(nandc);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_nandc_alloc);
+
+MODULE_DESCRIPTION("QPIC controller common api");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index d0aaccf72d78..b8035df8f732 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -279,8 +279,8 @@ config MTD_NAND_SH_FLCTL
config MTD_NAND_DAVINCI
tristate "DaVinci/Keystone NAND controller"
- depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on COMPILE_TEST || ARCH_DAVINCI || ARCH_KEYSTONE
+ depends on HAS_IOMEM && TI_AEMIF
help
Enable the driver for NAND flash chips on Texas Instruments
DaVinci/Keystone processors.
@@ -454,6 +454,14 @@ config MTD_NAND_TS72XX
help
Enables support for NAND controller on ts72xx SBCs.
+config MTD_NAND_NUVOTON_MA35
+ tristate "Nuvoton MA35 SoC NAND controller"
+ depends on ARCH_MA35 || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the NAND controller found on
+ the Nuvoton MA35 series SoCs.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index d0b0e6b83568..99e79c448847 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
+obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 9c253a511e45..fea5b6119956 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2342,6 +2342,11 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
status = brcmnand_waitfunc(chip);
+ if (status < 0) {
+ ret = status;
+ goto out;
+ }
+
if (status & NAND_STATUS_FAIL) {
dev_info(ctrl->dev, "program failed at %llx\n",
(unsigned long long)addr);
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 8d1d710e439d..6667eea95597 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -471,6 +471,8 @@ struct cdns_nand_ctrl {
struct {
void __iomem *virt;
dma_addr_t dma;
+ dma_addr_t iova_dma;
+ u32 size;
} io;
int irq;
@@ -1835,11 +1837,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
}
if (dir == DMA_FROM_DEVICE) {
- src_dma = cdns_ctrl->io.dma;
+ src_dma = cdns_ctrl->io.iova_dma;
dst_dma = buf_dma;
} else {
src_dma = buf_dma;
- dst_dma = cdns_ctrl->io.dma;
+ dst_dma = cdns_ctrl->io.iova_dma;
}
tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
@@ -1861,12 +1863,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
dma_async_issue_pending(cdns_ctrl->dmac);
wait_for_completion(&finished);
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
return 0;
err_unmap:
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
err:
dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
@@ -2869,6 +2871,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
{
dma_cap_mask_t mask;
+ struct dma_device *dma_dev = cdns_ctrl->dmac->device;
int ret;
cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
@@ -2904,15 +2907,24 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
dma_cap_set(DMA_MEMCPY, mask);
if (cdns_ctrl->caps1->has_dma) {
- cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
- if (!cdns_ctrl->dmac) {
- dev_err(cdns_ctrl->dev,
- "Unable to get a DMA channel\n");
- ret = -EBUSY;
+ cdns_ctrl->dmac = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(cdns_ctrl->dmac)) {
+ ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac),
+ "%d: Failed to get a DMA channel\n", ret);
goto disable_irq;
}
}
+ cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
+ cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
+
+ ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n");
+ goto dma_release_chnl;
+ }
+
nand_controller_init(&cdns_ctrl->controller);
INIT_LIST_HEAD(&cdns_ctrl->chips);
@@ -2923,18 +2935,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
if (ret) {
dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
ret);
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
kfree(cdns_ctrl->buf);
cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
if (!cdns_ctrl->buf) {
ret = -ENOMEM;
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
return 0;
+unmap_dma_resource:
+ dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma,
+ cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
+
dma_release_chnl:
if (cdns_ctrl->dmac)
dma_release_channel(cdns_ctrl->dmac);
@@ -2956,6 +2972,10 @@ free_buf_desc:
static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
{
cadence_nand_chips_cleanup(cdns_ctrl);
+ if (cdns_ctrl->dmac)
+ dma_unmap_resource(cdns_ctrl->dmac->device->dev,
+ cdns_ctrl->io.iova_dma, cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
kfree(cdns_ctrl->buf);
dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
@@ -3020,7 +3040,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
if (IS_ERR(cdns_ctrl->io.virt))
return PTR_ERR(cdns_ctrl->io.virt);
+
cdns_ctrl->io.dma = res->start;
+ cdns_ctrl->io.size = resource_size(res);
dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
if (IS_ERR(dt->clk))
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 1f8354acfb50..3986553881d0 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -10,9 +10,11 @@
* Dirk Behme <Dirk.Behme@gmail.com>
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/memory/ti-aemif.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/rawnand.h>
@@ -43,6 +45,9 @@
#define MASK_ALE 0x08
#define MASK_CLE 0x10
+#define MAX_TSU_PS 3000 /* Input setup time in ps */
+#define MAX_TH_PS 1600 /* Input hold time in ps */
+
struct davinci_nand_pdata {
uint32_t mask_ale;
uint32_t mask_cle;
@@ -66,6 +71,7 @@ struct davinci_nand_pdata {
/* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
* soft == NAND_ECC_ENGINE_TYPE_SOFT
+ * on-die == NAND_ECC_ENGINE_TYPE_ON_DIE
* else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
@@ -117,6 +123,9 @@ struct davinci_nand_info {
uint32_t mask_cle;
uint32_t core_chipsel;
+
+ struct clk *clk;
+ struct aemif_device *aemif;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
@@ -479,6 +488,44 @@ static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
.free = hwecc4_ooblayout_small_free,
};
+static int hwecc4_ooblayout_large_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int nregions = total_ecc_bytes / 10; /* 10 bytes per chunk */
+
+ if (section >= nregions)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = 10;
+
+ return 0;
+}
+
+static int hwecc4_ooblayout_large_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int nregions = total_ecc_bytes / 10; /* 10 bytes per chunk */
+
+ /* First region is used for BBT */
+ if (section >= (nregions - 1))
+ return -ERANGE;
+
+ oobregion->offset = ((section + 1) * 16);
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hwecc4_large_ooblayout_ops = {
+ .ecc = hwecc4_ooblayout_large_ecc,
+ .free = hwecc4_ooblayout_large_free,
+};
+
#if defined(CONFIG_OF)
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
@@ -525,6 +572,8 @@ nand_davinci_get_pdata(struct platform_device *pdev)
pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
if (!strncmp("hw", mode, 2))
pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ if (!strncmp("on-die", mode, 6))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
}
if (!device_property_read_u32(&pdev->dev,
"ti,davinci-ecc-bits", &prop))
@@ -580,6 +629,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
pdata->ecc_bits = 0;
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
@@ -638,9 +688,12 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd,
&hwecc4_small_ooblayout_ops);
} else if (chunks == 4 || chunks == 8) {
- mtd_set_ooblayout(mtd,
- nand_get_large_page_ooblayout());
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
+ if (chip->options & NAND_IS_BOOT_MEDIUM)
+ mtd_set_ooblayout(mtd, &hwecc4_large_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
} else {
return -EIO;
}
@@ -724,7 +777,7 @@ static int davinci_nand_exec_instr(struct davinci_nand_info *info,
case NAND_OP_WAITRDY_INSTR:
timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
- status, status & BIT(0), 100,
+ status, status & BIT(0), 5,
timeout_us);
if (ret)
return ret;
@@ -764,9 +817,82 @@ static int davinci_nand_exec_op(struct nand_chip *chip,
return 0;
}
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, (period_ns)))
+
+static int davinci_nand_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ const struct nand_sdr_timings *sdr;
+ struct aemif_cs_timings timings;
+ s32 cfg, min, cyc_ns;
+ int ret;
+
+ cyc_ns = 1000000000 / clk_get_rate(info->clk);
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ cfg = TO_CYCLES(sdr->tCLR_min, cyc_ns) - 1;
+ timings.rsetup = cfg > 0 ? cfg : 0;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tREA_max + MAX_TSU_PS, cyc_ns),
+ TO_CYCLES(sdr->tRP_min, cyc_ns)) - 1;
+ timings.rstrobe = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tCEA_max + MAX_TSU_PS, cyc_ns) - 2;
+ while ((s32)(timings.rsetup + timings.rstrobe) < min)
+ timings.rstrobe++;
+
+ cfg = TO_CYCLES((s32)(MAX_TH_PS - sdr->tCHZ_max), cyc_ns) - 1;
+ timings.rhold = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tRC_min, cyc_ns) - 3;
+ while ((s32)(timings.rsetup + timings.rstrobe + timings.rhold) < min)
+ timings.rhold++;
+
+ cfg = TO_CYCLES((s32)(sdr->tRHZ_max - (timings.rhold + 1) * cyc_ns * 1000), cyc_ns);
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCHZ_max, cyc_ns)) - 1;
+ timings.ta = cfg > 0 ? cfg : 0;
+
+ cfg = TO_CYCLES(sdr->tWP_min, cyc_ns) - 1;
+ timings.wstrobe = cfg > 0 ? cfg : 0;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tCLS_min, cyc_ns), TO_CYCLES(sdr->tALS_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCS_min, cyc_ns)) - 1;
+ timings.wsetup = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tDS_min, cyc_ns) - 2;
+ while ((s32)(timings.wsetup + timings.wstrobe) < min)
+ timings.wstrobe++;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tCLH_min, cyc_ns), TO_CYCLES(sdr->tALH_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCH_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tDH_min, cyc_ns)) - 1;
+ timings.whold = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tWC_min, cyc_ns) - 2;
+ while ((s32)(timings.wsetup + timings.wstrobe + timings.whold) < min)
+ timings.whold++;
+
+ dev_dbg(&info->pdev->dev, "RSETUP %x RSTROBE %x RHOLD %x\n",
+ timings.rsetup, timings.rstrobe, timings.rhold);
+ dev_dbg(&info->pdev->dev, "TA %x\n", timings.ta);
+ dev_dbg(&info->pdev->dev, "WSETUP %x WSTROBE %x WHOLD %x\n",
+ timings.wsetup, timings.wstrobe, timings.whold);
+
+ ret = aemif_check_cs_timings(&timings);
+ if (ret || chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return ret;
+
+ return aemif_set_cs_timings(info->aemif, info->core_chipsel, &timings);
+}
+
static const struct nand_controller_ops davinci_nand_controller_ops = {
.attach_chip = davinci_nand_attach_chip,
.exec_op = davinci_nand_exec_op,
+ .setup_interface = davinci_nand_setup_interface,
};
static int nand_davinci_probe(struct platform_device *pdev)
@@ -822,9 +948,14 @@ static int nand_davinci_probe(struct platform_device *pdev)
return -EADDRNOTAVAIL;
}
+ info->clk = devm_clk_get_enabled(&pdev->dev, "aemif");
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), "failed to get clock");
+
info->pdev = pdev;
info->base = base;
info->vaddr = vaddr;
+ info->aemif = dev_get_drvdata(pdev->dev.parent);
mtd = nand_to_mtd(&info->chip);
mtd->dev.parent = &pdev->dev;
diff --git a/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
new file mode 100644
index 000000000000..c23b537948d5
--- /dev/null
+++ b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ */
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* NFI Registers */
+#define MA35_NFI_REG_DMACTL 0x400
+#define DMA_EN BIT(0)
+#define DMA_RST BIT(1)
+#define DMA_BUSY BIT(9)
+
+#define MA35_NFI_REG_DMASA 0x408
+#define MA35_NFI_REG_GCTL 0x800
+#define GRST BIT(0)
+#define NAND_EN BIT(3)
+
+#define MA35_NFI_REG_NANDCTL 0x8A0
+#define SWRST BIT(0)
+#define DMA_R_EN BIT(1)
+#define DMA_W_EN BIT(2)
+#define ECC_CHK BIT(7)
+#define PROT3BEN BIT(8)
+#define PSIZE_2K BIT(16)
+#define PSIZE_4K BIT(17)
+#define PSIZE_8K GENMASK(17, 16)
+#define PSIZE_MASK GENMASK(17, 16)
+#define BCH_T24 BIT(18)
+#define BCH_T8 BIT(20)
+#define BCH_T12 BIT(21)
+#define BCH_NONE (0x0)
+#define BCH_MASK GENMASK(22, 18)
+#define ECC_EN BIT(23)
+#define DISABLE_CS0 BIT(25)
+
+#define MA35_NFI_REG_NANDINTEN 0x8A8
+#define MA35_NFI_REG_NANDINTSTS 0x8AC
+#define INT_DMA BIT(0)
+#define INT_ECC BIT(2)
+#define INT_RB0 BIT(10)
+
+#define MA35_NFI_REG_NANDCMD 0x8B0
+#define MA35_NFI_REG_NANDADDR 0x8B4
+#define ENDADDR BIT(31)
+
+#define MA35_NFI_REG_NANDDATA 0x8B8
+#define MA35_NFI_REG_NANDRACTL 0x8BC
+#define MA35_NFI_REG_NANDECTL 0x8C0
+#define ENABLE_WP 0x0
+#define DISABLE_WP BIT(0)
+
+#define MA35_NFI_REG_NANDECCES0 0x8D0
+#define ECC_STATUS_MASK GENMASK(1, 0)
+#define ECC_ERR_CNT_MASK GENMASK(4, 0)
+
+#define MA35_NFI_REG_NANDECCEA0 0x900
+#define MA35_NFI_REG_NANDECCED0 0x960
+#define MA35_NFI_REG_NANDRA0 0xA00
+
+/* Define for the BCH hardware ECC engine */
+/* define the total padding bytes for 512/1024 data segment */
+#define MA35_BCH_PADDING_512 32
+#define MA35_BCH_PADDING_1024 64
+/* define the BCH parity code length for 512 bytes data pattern */
+#define MA35_PARITY_BCH8 15
+#define MA35_PARITY_BCH12 23
+/* define the BCH parity code length for 1024 bytes data pattern */
+#define MA35_PARITY_BCH24 45
+
+#define MA35_MAX_NSELS (2)
+#define PREFIX_RA_IS_EMPTY(reg) FIELD_GET(GENMASK(31, 16), (reg))
+
+struct ma35_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+
+ u32 eccstatus;
+ u8 nsels;
+ u8 sels[] __counted_by(nsels);
+};
+
+struct ma35_nand_info {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct completion complete;
+ struct list_head chips;
+
+ u8 *buffer;
+ unsigned long assigned_cs;
+};
+
+static inline struct ma35_nand_chip *to_ma35_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct ma35_nand_chip, chip);
+}
+
+static int ma35_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = chip->ecc.total;
+ oob_region->offset = mtd->oobsize - oob_region->length;
+
+ return 0;
+}
+
+static int ma35_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = mtd->oobsize - chip->ecc.total - 2;
+ oob_region->offset = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ma35_ooblayout_ops = {
+ .free = ma35_ooblayout_free,
+ .ecc = ma35_ooblayout_ecc,
+};
+
+static inline void ma35_clear_spare(struct nand_chip *chip, int size)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ int i;
+
+ for (i = 0; i < size / 4; i++)
+ writel(0xff, nand->regs + MA35_NFI_REG_NANDRA0);
+}
+
+static inline void read_remaining_bytes(struct ma35_nand_info *nand, u32 *buf,
+ u32 offset, int size, int swap)
+{
+ u32 value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset);
+ u8 *ptr = (u8 *)buf;
+ int i, shift;
+
+ for (i = 0; i < size; i++) {
+ shift = (swap ? 3 - i : i) * 8;
+ ptr[i] = (value >> shift) & 0xff;
+ }
+}
+
+static inline void ma35_read_spare(struct nand_chip *chip, int size, u32 *buf, u32 offset)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 off = round_down(offset, 4);
+ int len = offset % 4;
+ int i;
+
+ if (len) {
+ read_remaining_bytes(nand, buf, off, 4 - len, 1);
+ off += 4;
+ size -= (4 - len);
+ }
+
+ for (i = 0; i < size / 4; i++)
+ *buf++ = readl(nand->regs + MA35_NFI_REG_NANDRA0 + off + (i * 4));
+
+ read_remaining_bytes(nand, buf, off + (size & ~3), size % 4, 0);
+}
+
+static inline void ma35_write_spare(struct nand_chip *chip, int size, u32 *buf)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 value;
+ int i, j;
+ u8 *ptr;
+
+ for (i = 0, j = 0; i < size / 4; i++, j += 4)
+ writel(*buf++, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+
+ ptr = (u8 *)buf;
+ switch (size % 4) {
+ case 1:
+ writel(*ptr, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ case 2:
+ value = *ptr | (*(ptr + 1) << 8);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ case 3:
+ value = *ptr | (*(ptr + 1) << 8) | (*(ptr + 2) << 16);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ma35_nand_target_enable(struct nand_chip *chip, unsigned int cs)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 reg;
+
+ switch (cs) {
+ case 0:
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~DISABLE_CS0, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ reg |= INT_RB0;
+ writel(reg, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ma35_nand_hwecc_init(struct nand_chip *chip, struct ma35_nand_info *nand)
+{
+ struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ u32 reg;
+
+ nand->buffer = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!nand->buffer)
+ return -ENOMEM;
+
+ /* Redundant area size */
+ writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
+
+ /* Protect redundant 3 bytes and disable ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ reg |= (PROT3BEN | ECC_CHK);
+ reg &= ~ECC_EN;
+
+ if (chip->ecc.strength != 0) {
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ nvtnand->eccstatus = (chip->ecc.steps < 4) ? 1 : chip->ecc.steps / 4;
+ /* Set BCH algorithm */
+ reg &= ~BCH_MASK;
+ switch (chip->ecc.strength) {
+ case 8:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH8;
+ reg |= BCH_T8;
+ break;
+ case 12:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH12;
+ reg |= BCH_T12;
+ break;
+ case 24:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH24;
+ reg |= BCH_T24;
+ break;
+ default:
+ dev_err(nand->dev, "ECC strength unsupported\n");
+ return -EINVAL;
+ }
+
+ chip->ecc.bytes = chip->ecc.total / chip->ecc.steps;
+ }
+ writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
+ return 0;
+}
+
+/* Correct data by BCH alrogithm */
+static void ma35_nfi_correct(struct nand_chip *chip, u8 index,
+ u8 err_cnt, u8 *addr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 temp_data[24], temp_addr[24];
+ u32 padding_len, parity_len;
+ u32 value, offset, remain;
+ u32 err_data[6];
+ u8 i, j;
+
+ /* Configurations */
+ if (chip->ecc.strength <= 8) {
+ parity_len = MA35_PARITY_BCH8;
+ padding_len = MA35_BCH_PADDING_512;
+ } else if (chip->ecc.strength <= 12) {
+ parity_len = MA35_PARITY_BCH12;
+ padding_len = MA35_BCH_PADDING_512;
+ } else if (chip->ecc.strength <= 24) {
+ parity_len = MA35_PARITY_BCH24;
+ padding_len = MA35_BCH_PADDING_1024;
+ } else {
+ dev_err(nand->dev, "Invalid BCH_TSEL = 0x%lx\n",
+ readl(nand->regs + MA35_NFI_REG_NANDCTL) & BCH_MASK);
+ return;
+ }
+
+ /*
+ * got valid BCH_ECC_DATAx and parse them to temp_data[]
+ * got the valid register number of BCH_ECC_DATAx since
+ * one register include 4 error bytes
+ */
+ j = (err_cnt + 3) / 4;
+ j = (j > 6) ? 6 : j;
+ for (i = 0; i < j; i++)
+ err_data[i] = readl(nand->regs + MA35_NFI_REG_NANDECCED0 + i * 4);
+
+ for (i = 0; i < j; i++) {
+ temp_data[i * 4 + 0] = err_data[i] & 0xff;
+ temp_data[i * 4 + 1] = (err_data[i] >> 8) & 0xff;
+ temp_data[i * 4 + 2] = (err_data[i] >> 16) & 0xff;
+ temp_data[i * 4 + 3] = (err_data[i] >> 24) & 0xff;
+ }
+
+ /*
+ * got valid REG_BCH_ECC_ADDRx and parse them to temp_addr[]
+ * got the valid register number of REG_BCH_ECC_ADDRx since
+ * one register include 2 error addresses
+ */
+ j = (err_cnt + 1) / 2;
+ j = (j > 12) ? 12 : j;
+ for (i = 0; i < j; i++) {
+ temp_addr[i * 2 + 0] = readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
+ & 0x07ff;
+ temp_addr[i * 2 + 1] = (readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
+ >> 16) & 0x07ff;
+ }
+
+ /* pointer to begin address of field that with data error */
+ addr += index * chip->ecc.size;
+
+ /* correct each error bytes */
+ for (i = 0; i < err_cnt; i++) {
+ u32 corrected_index = temp_addr[i];
+
+ if (corrected_index < chip->ecc.size) {
+ /* for wrong data in field */
+ *(addr + corrected_index) ^= temp_data[i];
+ } else if (corrected_index < (chip->ecc.size + 3)) {
+ /* for wrong first-3-bytes in redundancy area */
+ corrected_index -= chip->ecc.size;
+ temp_addr[i] += (parity_len * index); /* field offset */
+
+ value = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ value ^= temp_data[i] << (8 * corrected_index);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0);
+ } else {
+ /*
+ * for wrong parity code in redundancy area
+ * ERR_ADDRx = [data in field] + [3 bytes] + [xx] + [parity code]
+ * |<-- padding bytes -->|
+ * The ERR_ADDRx for last parity code always = field size + padding size.
+ * The first parity code = field size + padding size - parity code length.
+ * For example, for BCH T12, the first parity code = 512 + 32 - 23 = 521.
+ * That is, error byte address offset within field is
+ */
+ corrected_index -= (chip->ecc.size + padding_len - parity_len);
+
+ /*
+ * final address = first parity code of first field +
+ * offset of fields +
+ * offset within field
+ */
+ offset = (readl(nand->regs + MA35_NFI_REG_NANDRACTL) & 0x1ff) -
+ (parity_len * chip->ecc.steps) +
+ (parity_len * index) + corrected_index;
+
+ remain = offset % 4;
+ value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
+ value ^= temp_data[i] << (8 * remain);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
+ }
+ }
+}
+
+static int ma35_nfi_ecc_check(struct nand_chip *chip, u8 *addr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int maxbitflips = 0;
+ int cnt = 0;
+ u32 status;
+ int i, j;
+
+ for (j = 0; j < nvtnand->eccstatus; j++) {
+ status = readl(nand->regs + MA35_NFI_REG_NANDECCES0 + j * 4);
+ if (!status)
+ continue;
+
+ for (i = 0; i < 4; i++) {
+ if ((status & ECC_STATUS_MASK) == 0x01) {
+ /* Correctable error */
+ cnt = (status >> 2) & ECC_ERR_CNT_MASK;
+ ma35_nfi_correct(chip, j * 4 + i, cnt, addr);
+ maxbitflips = max_t(u32, maxbitflips, cnt);
+ mtd->ecc_stats.corrected += cnt;
+ } else {
+ /* Uncorrectable error */
+ mtd->ecc_stats.failed++;
+ dev_err(nand->dev, "uncorrectable error! 0x%4x\n", status);
+ return -EBADMSG;
+ }
+ status >>= 8;
+ }
+ }
+ return maxbitflips;
+}
+
+static void ma35_nand_dmac_init(struct ma35_nand_info *nand)
+{
+ /* DMAC reset and enable */
+ writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+ writel(DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+
+ /* Clear DMA finished flag and enable */
+ writel(INT_DMA | INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTEN);
+}
+
+static int ma35_nand_do_write(struct nand_chip *chip, const u8 *addr, u32 len)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ dma_addr_t dma_addr;
+ int ret = 0, i;
+ u32 reg;
+
+ if (len != mtd->writesize) {
+ for (i = 0; i < len; i++)
+ writel(addr[i], nand->regs + MA35_NFI_REG_NANDDATA);
+ return 0;
+ }
+
+ ma35_nand_dmac_init(nand);
+
+ /* To mark this page as dirty. */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (reg & 0xffff0000)
+ writel(reg & 0xffff, nand->regs + MA35_NFI_REG_NANDRA0);
+
+ dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(nand->dev, dma_addr);
+ if (ret) {
+ dev_err(nand->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+ dma_sync_single_for_device(nand->dev, dma_addr, len, DMA_TO_DEVICE);
+
+ reinit_completion(&nand->complete);
+ writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_W_EN,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(nand->dev, "write timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ dma_unmap_single(nand->dev, dma_addr, len, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int ma35_nand_do_read(struct nand_chip *chip, u8 *addr, u32 len)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0, cnt = 0, i;
+ dma_addr_t dma_addr;
+ u32 reg;
+
+ if (len != mtd->writesize) {
+ for (i = 0; i < len; i++)
+ addr[i] = readb(nand->regs + MA35_NFI_REG_NANDDATA);
+ return 0;
+ }
+
+ ma35_nand_dmac_init(nand);
+
+ /* Setup and start DMA using dma_addr */
+ dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(nand->dev, dma_addr);
+ if (ret) {
+ dev_err(nand->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&nand->complete);
+ writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_R_EN,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(nand->dev, "read timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ dma_unmap_single(nand->dev, dma_addr, len, DMA_FROM_DEVICE);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ if (reg & INT_ECC) {
+ cnt = ma35_nfi_ecc_check(chip, addr);
+ if (cnt < 0) {
+ writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | SWRST,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ }
+ writel(INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ }
+
+ ret = ret < 0 ? ret : cnt;
+ return ret;
+}
+
+static int ma35_nand_format_subpage(struct nand_chip *chip, u32 offset,
+ u32 len, const u8 *buf)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 page_off = round_down(offset, chip->ecc.size);
+ u32 end = DIV_ROUND_UP(page_off + len, chip->ecc.size);
+ u32 start = page_off / chip->ecc.size;
+ u32 reg;
+ int i;
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL) | 0xffff0000;
+ memset(nand->buffer, 0xff, mtd->writesize);
+ for (i = start; i < end; i++) {
+ memcpy(nand->buffer + i * chip->ecc.size,
+ buf + i * chip->ecc.size, chip->ecc.size);
+ reg &= ~(1 << (i + 16));
+ }
+ writel(reg, nand->regs + MA35_NFI_REG_NANDRACTL);
+
+ return 0;
+}
+
+static int ma35_nand_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg, oobpoi, index;
+ int i;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+
+ ma35_clear_spare(chip, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
+ (u32 *)chip->oob_poi);
+
+ ma35_nand_format_subpage(chip, offset, data_len, buf);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ ma35_nand_do_write(chip, nand->buffer, mtd->writesize);
+ nand_prog_page_end_op(chip);
+
+ oobpoi = mtd->oobsize - chip->ecc.total;
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ index = i * chip->ecc.bytes;
+ if (!(reg & (1 << (i + 16)))) {
+ ma35_read_spare(chip, chip->ecc.bytes,
+ (u32 *)(chip->oob_poi + oobpoi + index),
+ oobpoi + index);
+ }
+ }
+
+ writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return 0;
+}
+
+static int ma35_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+
+ ma35_clear_spare(chip, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
+ (u32 *)chip->oob_poi);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ ma35_nand_do_write(chip, buf, mtd->writesize);
+ nand_prog_page_end_op(chip);
+
+ ma35_read_spare(chip, chip->ecc.total,
+ (u32 *)(chip->oob_poi + (mtd->oobsize - chip->ecc.total)),
+ mtd->oobsize - chip->ecc.total);
+
+ /* Disable HW ECC engine */
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return 0;
+}
+
+static int ma35_nand_read_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, u8 *buf, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bitflips = 0;
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg)) {
+ memset((void *)buf, 0xff, mtd->writesize);
+ } else {
+ nand_read_page_op(chip, page, offset, NULL, 0);
+ bitflips = ma35_nand_do_read(chip, buf + offset, data_len);
+ ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
+ }
+
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return bitflips;
+}
+
+static int ma35_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bitflips = 0;
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg)) {
+ memset((void *)buf, 0xff, mtd->writesize);
+ } else {
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ bitflips = ma35_nand_do_read(chip, buf, mtd->writesize);
+ ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
+ }
+
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return bitflips;
+}
+
+static int ma35_nand_read_oob_hwecc(struct nand_chip *chip, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg;
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+
+ /* copy OOB data to controller redundant area for page read */
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg))
+ memset((void *)chip->oob_poi, 0xff, mtd->oobsize);
+
+ return 0;
+}
+
+static inline void ma35_hw_init(struct ma35_nand_info *nand)
+{
+ u32 reg;
+
+ /* Disable flash wp. */
+ writel(DISABLE_WP, nand->regs + MA35_NFI_REG_NANDECTL);
+
+ /* resets the internal state machine and counters */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ reg |= SWRST;
+ writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
+}
+
+static irqreturn_t ma35_nand_irq(int irq, void *id)
+{
+ struct ma35_nand_info *nand = (struct ma35_nand_info *)id;
+ u32 isr;
+
+ isr = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ if (isr & INT_DMA) {
+ writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ complete(&nand->complete);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int ma35_nand_attach_chip(struct nand_chip *chip)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ u32 reg;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16 bits bus width not supported");
+ return -EINVAL;
+ }
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL) & (~PSIZE_MASK);
+ switch (mtd->writesize) {
+ case SZ_2K:
+ writel(reg | PSIZE_2K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ case SZ_4K:
+ writel(reg | PSIZE_4K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ case SZ_8K:
+ writel(reg | PSIZE_8K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ default:
+ dev_err(dev, "Unsupported page size");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ /* Do not store BBT bits in the OOB section as it is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
+ chip->ecc.write_subpage = ma35_nand_write_subpage_hwecc;
+ chip->ecc.write_page = ma35_nand_write_page_hwecc;
+ chip->ecc.read_subpage = ma35_nand_read_subpage_hwecc;
+ chip->ecc.read_page = ma35_nand_read_page_hwecc;
+ chip->ecc.read_oob = ma35_nand_read_oob_hwecc;
+ return ma35_nand_hwecc_init(chip, nand);
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ma35_nfc_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+ u32 status;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writel(instr->ctx.cmd.opcode, nand->regs + MA35_NFI_REG_NANDCMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ if (i == (instr->ctx.addr.naddrs - 1))
+ writel(instr->ctx.addr.addrs[i] | ENDADDR,
+ nand->regs + MA35_NFI_REG_NANDADDR);
+ else
+ writel(instr->ctx.addr.addrs[i],
+ nand->regs + MA35_NFI_REG_NANDADDR);
+ }
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ ret = ma35_nand_do_read(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ ret = ma35_nand_do_write(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ return readl_poll_timeout(nand->regs + MA35_NFI_REG_NANDINTSTS, status,
+ status & INT_RB0, 20,
+ instr->ctx.waitrdy.timeout_ms * MSEC_PER_SEC);
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ma35_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret = 0;
+ u32 i;
+
+ if (check_only)
+ return 0;
+
+ ma35_nand_target_enable(chip, op->cs);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ma35_nfc_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops ma35_nfc_ops = {
+ .attach_chip = ma35_nand_attach_chip,
+ .exec_op = ma35_nfc_exec_op,
+};
+
+static int ma35_nand_chip_init(struct device *dev, struct ma35_nand_info *nand,
+ struct device_node *np)
+{
+ struct ma35_nand_chip *nvtnand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int nsels;
+ int ret;
+ u32 cs;
+ int i;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (!nsels || nsels > MA35_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ nvtnand = devm_kzalloc(dev, struct_size(nvtnand, sels, nsels),
+ GFP_KERNEL);
+ if (!nvtnand)
+ return -ENOMEM;
+
+ nvtnand->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+
+ if (cs >= MA35_MAX_NSELS) {
+ dev_err(dev, "invalid CS: %u\n", cs);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nand->assigned_cs)) {
+ dev_err(dev, "CS %u already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ nvtnand->sels[i] = cs;
+ }
+
+ chip = &nvtnand->chip;
+ chip->controller = &nand->controller;
+
+ nand_set_flash_node(chip, np);
+ nand_set_controller_data(chip, nand);
+
+ mtd = nand_to_mtd(chip);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ mtd_set_ooblayout(mtd, &ma35_ooblayout_ops);
+ ret = nand_scan(chip, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&nvtnand->node, &nand->chips);
+
+ return 0;
+}
+
+static void ma35_chips_cleanup(struct ma35_nand_info *nand)
+{
+ struct ma35_nand_chip *nvtnand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(nvtnand, tmp, &nand->chips, node) {
+ chip = &nvtnand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&nvtnand->node);
+ }
+}
+
+static int ma35_nand_chips_init(struct device *dev, struct ma35_nand_info *nand)
+{
+ struct device_node *np = dev->of_node, *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = ma35_nand_chip_init(dev, nand, nand_np);
+ if (ret) {
+ ma35_chips_cleanup(nand);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ma35_nand_probe(struct platform_device *pdev)
+{
+ struct ma35_nand_info *nand;
+ int ret = 0;
+
+ nand = devm_kzalloc(&pdev->dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand_controller_init(&nand->controller);
+ INIT_LIST_HEAD(&nand->chips);
+ nand->controller.ops = &ma35_nfc_ops;
+
+ init_completion(&nand->complete);
+
+ nand->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nand->regs))
+ return PTR_ERR(nand->regs);
+
+ nand->dev = &pdev->dev;
+
+ nand->clk = devm_clk_get_enabled(&pdev->dev, "nand_gate");
+ if (IS_ERR(nand->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(nand->clk),
+ "failed to find NAND clock\n");
+
+ nand->irq = platform_get_irq(pdev, 0);
+ if (nand->irq < 0)
+ return dev_err_probe(&pdev->dev, nand->irq,
+ "failed to get platform irq\n");
+
+ ret = devm_request_irq(&pdev->dev, nand->irq, ma35_nand_irq,
+ IRQF_TRIGGER_HIGH, "ma35d1-nand-controller", nand);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request NAND irq\n");
+ return -ENXIO;
+ }
+
+ platform_set_drvdata(pdev, nand);
+
+ writel(GRST | NAND_EN, nand->regs + MA35_NFI_REG_GCTL);
+ ma35_hw_init(nand);
+ ret = ma35_nand_chips_init(&pdev->dev, nand);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init NAND chips\n");
+ clk_disable(nand->clk);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ma35_nand_remove(struct platform_device *pdev)
+{
+ struct ma35_nand_info *nand = platform_get_drvdata(pdev);
+
+ ma35_chips_cleanup(nand);
+}
+
+static const struct of_device_id ma35_nand_of_match[] = {
+ { .compatible = "nuvoton,ma35d1-nand-controller" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ma35_nand_of_match);
+
+static struct platform_driver ma35_nand_driver = {
+ .driver = {
+ .name = "ma35d1-nand-controller",
+ .of_match_table = ma35_nand_of_match,
+ },
+ .probe = ma35_nand_probe,
+ .remove = ma35_nand_remove,
+};
+
+module_platform_driver(ma35_nand_driver);
+
+MODULE_DESCRIPTION("Nuvoton ma35 NAND driver");
+MODULE_AUTHOR("Hui-Ping Chen <hpchen0nvt@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 636bba2528bf..6720b547892b 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -15,431 +15,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-/* NANDc reg offsets */
-#define NAND_FLASH_CMD 0x00
-#define NAND_ADDR0 0x04
-#define NAND_ADDR1 0x08
-#define NAND_FLASH_CHIP_SELECT 0x0c
-#define NAND_EXEC_CMD 0x10
-#define NAND_FLASH_STATUS 0x14
-#define NAND_BUFFER_STATUS 0x18
-#define NAND_DEV0_CFG0 0x20
-#define NAND_DEV0_CFG1 0x24
-#define NAND_DEV0_ECC_CFG 0x28
-#define NAND_AUTO_STATUS_EN 0x2c
-#define NAND_DEV1_CFG0 0x30
-#define NAND_DEV1_CFG1 0x34
-#define NAND_READ_ID 0x40
-#define NAND_READ_STATUS 0x44
-#define NAND_DEV_CMD0 0xa0
-#define NAND_DEV_CMD1 0xa4
-#define NAND_DEV_CMD2 0xa8
-#define NAND_DEV_CMD_VLD 0xac
-#define SFLASHC_BURST_CFG 0xe0
-#define NAND_ERASED_CW_DETECT_CFG 0xe8
-#define NAND_ERASED_CW_DETECT_STATUS 0xec
-#define NAND_EBI2_ECC_BUF_CFG 0xf0
-#define FLASH_BUF_ACC 0x100
-
-#define NAND_CTRL 0xf00
-#define NAND_VERSION 0xf08
-#define NAND_READ_LOCATION_0 0xf20
-#define NAND_READ_LOCATION_1 0xf24
-#define NAND_READ_LOCATION_2 0xf28
-#define NAND_READ_LOCATION_3 0xf2c
-#define NAND_READ_LOCATION_LAST_CW_0 0xf40
-#define NAND_READ_LOCATION_LAST_CW_1 0xf44
-#define NAND_READ_LOCATION_LAST_CW_2 0xf48
-#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
-
-/* dummy register offsets, used by write_reg_dma */
-#define NAND_DEV_CMD1_RESTORE 0xdead
-#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
-
-/* NAND_FLASH_CMD bits */
-#define PAGE_ACC BIT(4)
-#define LAST_PAGE BIT(5)
-
-/* NAND_FLASH_CHIP_SELECT bits */
-#define NAND_DEV_SEL 0
-#define DM_EN BIT(2)
-
-/* NAND_FLASH_STATUS bits */
-#define FS_OP_ERR BIT(4)
-#define FS_READY_BSY_N BIT(5)
-#define FS_MPU_ERR BIT(8)
-#define FS_DEVICE_STS_ERR BIT(16)
-#define FS_DEVICE_WP BIT(23)
-
-/* NAND_BUFFER_STATUS bits */
-#define BS_UNCORRECTABLE_BIT BIT(8)
-#define BS_CORRECTABLE_ERR_MSK 0x1f
-
-/* NAND_DEVn_CFG0 bits */
-#define DISABLE_STATUS_AFTER_WRITE 4
-#define CW_PER_PAGE 6
-#define UD_SIZE_BYTES 9
-#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
-#define ECC_PARITY_SIZE_BYTES_RS 19
-#define SPARE_SIZE_BYTES 23
-#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
-#define NUM_ADDR_CYCLES 27
-#define STATUS_BFR_READ 30
-#define SET_RD_MODE_AFTER_STATUS 31
-
-/* NAND_DEVn_CFG0 bits */
-#define DEV0_CFG1_ECC_DISABLE 0
-#define WIDE_FLASH 1
-#define NAND_RECOVERY_CYCLES 2
-#define CS_ACTIVE_BSY 5
-#define BAD_BLOCK_BYTE_NUM 6
-#define BAD_BLOCK_IN_SPARE_AREA 16
-#define WR_RD_BSY_GAP 17
-#define ENABLE_BCH_ECC 27
-
-/* NAND_DEV0_ECC_CFG bits */
-#define ECC_CFG_ECC_DISABLE 0
-#define ECC_SW_RESET 1
-#define ECC_MODE 4
-#define ECC_PARITY_SIZE_BYTES_BCH 8
-#define ECC_NUM_DATA_BYTES 16
-#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
-#define ECC_FORCE_CLK_OPEN 30
-
-/* NAND_DEV_CMD1 bits */
-#define READ_ADDR 0
-
-/* NAND_DEV_CMD_VLD bits */
-#define READ_START_VLD BIT(0)
-#define READ_STOP_VLD BIT(1)
-#define WRITE_START_VLD BIT(2)
-#define ERASE_START_VLD BIT(3)
-#define SEQ_READ_START_VLD BIT(4)
-
-/* NAND_EBI2_ECC_BUF_CFG bits */
-#define NUM_STEPS 0
-
-/* NAND_ERASED_CW_DETECT_CFG bits */
-#define ERASED_CW_ECC_MASK 1
-#define AUTO_DETECT_RES 0
-#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
-#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
-#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
-#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
-#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
-
-/* NAND_ERASED_CW_DETECT_STATUS bits */
-#define PAGE_ALL_ERASED BIT(7)
-#define CODEWORD_ALL_ERASED BIT(6)
-#define PAGE_ERASED BIT(5)
-#define CODEWORD_ERASED BIT(4)
-#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
-#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
-
-/* NAND_READ_LOCATION_n bits */
-#define READ_LOCATION_OFFSET 0
-#define READ_LOCATION_SIZE 16
-#define READ_LOCATION_LAST 31
-
-/* Version Mask */
-#define NAND_VERSION_MAJOR_MASK 0xf0000000
-#define NAND_VERSION_MAJOR_SHIFT 28
-#define NAND_VERSION_MINOR_MASK 0x0fff0000
-#define NAND_VERSION_MINOR_SHIFT 16
-
-/* NAND OP_CMDs */
-#define OP_PAGE_READ 0x2
-#define OP_PAGE_READ_WITH_ECC 0x3
-#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
-#define OP_PAGE_READ_ONFI_READ 0x5
-#define OP_PROGRAM_PAGE 0x6
-#define OP_PAGE_PROGRAM_WITH_ECC 0x7
-#define OP_PROGRAM_PAGE_SPARE 0x9
-#define OP_BLOCK_ERASE 0xa
-#define OP_CHECK_STATUS 0xc
-#define OP_FETCH_ID 0xb
-#define OP_RESET_DEVICE 0xd
-
-/* Default Value for NAND_DEV_CMD_VLD */
-#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
- ERASE_START_VLD | SEQ_READ_START_VLD)
-
-/* NAND_CTRL bits */
-#define BAM_MODE_EN BIT(0)
-
-/*
- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
- * the driver calls the chunks 'step' or 'codeword' interchangeably
- */
-#define NANDC_STEP_SIZE 512
-
-/*
- * the largest page size we support is 8K, this will have 16 steps/codewords
- * of 512 bytes each
- */
-#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
-
-/* we read at most 3 registers per codeword scan */
-#define MAX_REG_RD (3 * MAX_NUM_STEPS)
-
-/* ECC modes supported by the controller */
-#define ECC_NONE BIT(0)
-#define ECC_RS_4BIT BIT(1)
-#define ECC_BCH_4BIT BIT(2)
-#define ECC_BCH_8BIT BIT(3)
-
-#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
-nandc_set_reg(chip, reg, \
- ((cw_offset) << READ_LOCATION_OFFSET) | \
- ((read_size) << READ_LOCATION_SIZE) | \
- ((is_last_read_loc) << READ_LOCATION_LAST))
-
-#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
-nandc_set_reg(chip, reg, \
- ((cw_offset) << READ_LOCATION_OFFSET) | \
- ((read_size) << READ_LOCATION_SIZE) | \
- ((is_last_read_loc) << READ_LOCATION_LAST))
-/*
- * Returns the actual register address for all NAND_DEV_ registers
- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
- */
-#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
-
-/* Returns the NAND register physical address */
-#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
-
-/* Returns the dma address for reg read buffer */
-#define reg_buf_dma_addr(chip, vaddr) \
- ((chip)->reg_read_dma + \
- ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
-
-#define QPIC_PER_CW_CMD_ELEMENTS 32
-#define QPIC_PER_CW_CMD_SGL 32
-#define QPIC_PER_CW_DATA_SGL 8
-
-#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
-
-/*
- * Flags used in DMA descriptor preparation helper functions
- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
- */
-/* Don't set the EOT in current tx BAM sgl */
-#define NAND_BAM_NO_EOT BIT(0)
-/* Set the NWD flag in current BAM sgl */
-#define NAND_BAM_NWD BIT(1)
-/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
-#define NAND_BAM_NEXT_SGL BIT(2)
-/*
- * Erased codeword status is being used two times in single transfer so this
- * flag will determine the current value of erased codeword status register
- */
-#define NAND_ERASED_CW_SET BIT(4)
-
-#define MAX_ADDRESS_CYCLE 5
-
-/*
- * This data type corresponds to the BAM transaction which will be used for all
- * NAND transfers.
- * @bam_ce - the array of BAM command elements
- * @cmd_sgl - sgl for NAND BAM command pipe
- * @data_sgl - sgl for NAND BAM consumer/producer pipe
- * @last_data_desc - last DMA desc in data channel (tx/rx).
- * @last_cmd_desc - last DMA desc in command channel.
- * @txn_done - completion for NAND transfer.
- * @bam_ce_pos - the index in bam_ce which is available for next sgl
- * @bam_ce_start - the index in bam_ce which marks the start position ce
- * for current sgl. It will be used for size calculation
- * for current sgl
- * @cmd_sgl_pos - current index in command sgl.
- * @cmd_sgl_start - start index in command sgl.
- * @tx_sgl_pos - current index in data sgl for tx.
- * @tx_sgl_start - start index in data sgl for tx.
- * @rx_sgl_pos - current index in data sgl for rx.
- * @rx_sgl_start - start index in data sgl for rx.
- * @wait_second_completion - wait for second DMA desc completion before making
- * the NAND transfer completion.
- */
-struct bam_transaction {
- struct bam_cmd_element *bam_ce;
- struct scatterlist *cmd_sgl;
- struct scatterlist *data_sgl;
- struct dma_async_tx_descriptor *last_data_desc;
- struct dma_async_tx_descriptor *last_cmd_desc;
- struct completion txn_done;
- u32 bam_ce_pos;
- u32 bam_ce_start;
- u32 cmd_sgl_pos;
- u32 cmd_sgl_start;
- u32 tx_sgl_pos;
- u32 tx_sgl_start;
- u32 rx_sgl_pos;
- u32 rx_sgl_start;
- bool wait_second_completion;
-};
-
-/*
- * This data type corresponds to the nand dma descriptor
- * @dma_desc - low level DMA engine descriptor
- * @list - list for desc_info
- *
- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
- * ADM
- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
- * @dir - DMA transfer direction
- */
-struct desc_info {
- struct dma_async_tx_descriptor *dma_desc;
- struct list_head node;
-
- union {
- struct scatterlist adm_sgl;
- struct {
- struct scatterlist *bam_sgl;
- int sgl_cnt;
- };
- };
- enum dma_data_direction dir;
-};
-
-/*
- * holds the current register values that we want to write. acts as a contiguous
- * chunk of memory which we use to write the controller registers through DMA.
- */
-struct nandc_regs {
- __le32 cmd;
- __le32 addr0;
- __le32 addr1;
- __le32 chip_sel;
- __le32 exec;
-
- __le32 cfg0;
- __le32 cfg1;
- __le32 ecc_bch_cfg;
-
- __le32 clrflashstatus;
- __le32 clrreadstatus;
-
- __le32 cmd1;
- __le32 vld;
-
- __le32 orig_cmd1;
- __le32 orig_vld;
-
- __le32 ecc_buf_cfg;
- __le32 read_location0;
- __le32 read_location1;
- __le32 read_location2;
- __le32 read_location3;
- __le32 read_location_last0;
- __le32 read_location_last1;
- __le32 read_location_last2;
- __le32 read_location_last3;
-
- __le32 erased_cw_detect_cfg_clr;
- __le32 erased_cw_detect_cfg_set;
-};
-
-/*
- * NAND controller data struct
- *
- * @dev: parent device
- *
- * @base: MMIO base
- *
- * @core_clk: controller clock
- * @aon_clk: another controller clock
- *
- * @regs: a contiguous chunk of memory for DMA register
- * writes. contains the register values to be
- * written to controller
- *
- * @props: properties of current NAND controller,
- * initialized via DT match data
- *
- * @controller: base controller structure
- * @host_list: list containing all the chips attached to the
- * controller
- *
- * @chan: dma channel
- * @cmd_crci: ADM DMA CRCI for command flow control
- * @data_crci: ADM DMA CRCI for data flow control
- *
- * @desc_list: DMA descriptor list (list of desc_infos)
- *
- * @data_buffer: our local DMA buffer for page read/writes,
- * used when we can't use the buffer provided
- * by upper layers directly
- * @reg_read_buf: local buffer for reading back registers via DMA
- *
- * @base_phys: physical base address of controller registers
- * @base_dma: dma base address of controller registers
- * @reg_read_dma: contains dma address for register read buffer
- *
- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
- * functions
- * @max_cwperpage: maximum QPIC codewords required. calculated
- * from all connected NAND devices pagesize
- *
- * @reg_read_pos: marker for data read in reg_read_buf
- *
- * @cmd1/vld: some fixed controller register values
- *
- * @exec_opwrite: flag to select correct number of code word
- * while reading status
- */
-struct qcom_nand_controller {
- struct device *dev;
-
- void __iomem *base;
-
- struct clk *core_clk;
- struct clk *aon_clk;
-
- struct nandc_regs *regs;
- struct bam_transaction *bam_txn;
-
- const struct qcom_nandc_props *props;
-
- struct nand_controller controller;
- struct list_head host_list;
-
- union {
- /* will be used only by QPIC for BAM DMA */
- struct {
- struct dma_chan *tx_chan;
- struct dma_chan *rx_chan;
- struct dma_chan *cmd_chan;
- };
-
- /* will be used only by EBI2 for ADM DMA */
- struct {
- struct dma_chan *chan;
- unsigned int cmd_crci;
- unsigned int data_crci;
- };
- };
-
- struct list_head desc_list;
-
- u8 *data_buffer;
- __le32 *reg_read_buf;
-
- phys_addr_t base_phys;
- dma_addr_t base_dma;
- dma_addr_t reg_read_dma;
-
- int buf_size;
- int buf_count;
- int buf_start;
- unsigned int max_cwperpage;
-
- int reg_read_pos;
-
- u32 cmd1, vld;
- bool exec_opwrite;
-};
+#include <linux/mtd/nand-qpic-common.h>
/*
* NAND special boot partitions
@@ -471,9 +47,9 @@ struct qcom_op {
unsigned int data_instr_idx;
unsigned int rdy_timeout_ms;
unsigned int rdy_delay_ns;
- u32 addr1_reg;
- u32 addr2_reg;
- u32 cmd_reg;
+ __le32 addr1_reg;
+ __le32 addr2_reg;
+ __le32 cmd_reg;
u8 flag;
};
@@ -544,243 +120,113 @@ struct qcom_nand_host {
bool bch_enabled;
};
-/*
- * This data type corresponds to the NAND controller properties which varies
- * among different NAND controllers.
- * @ecc_modes - ecc mode for NAND
- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
- * @is_bam - whether NAND controller is using BAM
- * @is_qpic - whether NAND CTRL is part of qpic IP
- * @qpic_v2 - flag to indicate QPIC IP version 2
- * @use_codeword_fixup - whether NAND has different layout for boot partitions
- */
-struct qcom_nandc_props {
- u32 ecc_modes;
- u32 dev_cmd_reg_start;
- bool is_bam;
- bool is_qpic;
- bool qpic_v2;
- bool use_codeword_fixup;
-};
-
-/* Frees the BAM transaction memory */
-static void free_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- devm_kfree(nandc->dev, bam_txn);
-}
-
-/* Allocates and Initializes the BAM transaction */
-static struct bam_transaction *
-alloc_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn;
- size_t bam_txn_size;
- unsigned int num_cw = nandc->max_cwperpage;
- void *bam_txn_buf;
-
- bam_txn_size =
- sizeof(*bam_txn) + num_cw *
- ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
- (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
- (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
-
- bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
- if (!bam_txn_buf)
- return NULL;
-
- bam_txn = bam_txn_buf;
- bam_txn_buf += sizeof(*bam_txn);
-
- bam_txn->bam_ce = bam_txn_buf;
- bam_txn_buf +=
- sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
-
- bam_txn->cmd_sgl = bam_txn_buf;
- bam_txn_buf +=
- sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
-
- bam_txn->data_sgl = bam_txn_buf;
-
- init_completion(&bam_txn->txn_done);
-
- return bam_txn;
-}
-
-/* Clears the BAM transaction indexes */
-static void clear_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- if (!nandc->props->is_bam)
- return;
-
- bam_txn->bam_ce_pos = 0;
- bam_txn->bam_ce_start = 0;
- bam_txn->cmd_sgl_pos = 0;
- bam_txn->cmd_sgl_start = 0;
- bam_txn->tx_sgl_pos = 0;
- bam_txn->tx_sgl_start = 0;
- bam_txn->rx_sgl_pos = 0;
- bam_txn->rx_sgl_start = 0;
- bam_txn->last_data_desc = NULL;
- bam_txn->wait_second_completion = false;
-
- sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_CMD_SGL);
- sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_DATA_SGL);
-
- reinit_completion(&bam_txn->txn_done);
-}
-
-/* Callback for DMA descriptor completion */
-static void qpic_bam_dma_done(void *data)
-{
- struct bam_transaction *bam_txn = data;
-
- /*
- * In case of data transfer with NAND, 2 callbacks will be generated.
- * One for command channel and another one for data channel.
- * If current transaction has data descriptors
- * (i.e. wait_second_completion is true), then set this to false
- * and wait for second DMA descriptor completion.
- */
- if (bam_txn->wait_second_completion)
- bam_txn->wait_second_completion = false;
- else
- complete(&bam_txn->txn_done);
-}
-
-static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
{
return container_of(chip, struct qcom_nand_host, chip);
}
-static inline struct qcom_nand_controller *
+static struct qcom_nand_controller *
get_qcom_nand_controller(struct nand_chip *chip)
{
- return container_of(chip->controller, struct qcom_nand_controller,
- controller);
+ return (struct qcom_nand_controller *)
+ ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
}
-static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
{
return ioread32(nandc->base + offset);
}
-static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
- u32 val)
+static void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
{
iowrite32(val, nandc->base + offset);
}
-static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
- bool is_cpu)
+/* Helper to check whether this is the last CW or not */
+static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
{
- if (!nandc->props->is_bam)
- return;
-
- if (is_cpu)
- dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
- else
- dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
+ return cw == (ecc->steps - 1);
}
-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+/**
+ * nandc_set_read_loc_first() - to set read location first register
+ * @chip: NAND Private Flash Chip Data
+ * @reg_base: location register base
+ * @cw_offset: code word offset
+ * @read_size: code word read length
+ * @is_last_read_loc: is this the last read location
+ *
+ * This function will set location register value
+ */
+static void nandc_set_read_loc_first(struct nand_chip *chip,
+ int reg_base, u32 cw_offset,
+ u32 read_size, u32 is_last_read_loc)
{
- switch (offset) {
- case NAND_FLASH_CMD:
- return &regs->cmd;
- case NAND_ADDR0:
- return &regs->addr0;
- case NAND_ADDR1:
- return &regs->addr1;
- case NAND_FLASH_CHIP_SELECT:
- return &regs->chip_sel;
- case NAND_EXEC_CMD:
- return &regs->exec;
- case NAND_FLASH_STATUS:
- return &regs->clrflashstatus;
- case NAND_DEV0_CFG0:
- return &regs->cfg0;
- case NAND_DEV0_CFG1:
- return &regs->cfg1;
- case NAND_DEV0_ECC_CFG:
- return &regs->ecc_bch_cfg;
- case NAND_READ_STATUS:
- return &regs->clrreadstatus;
- case NAND_DEV_CMD1:
- return &regs->cmd1;
- case NAND_DEV_CMD1_RESTORE:
- return &regs->orig_cmd1;
- case NAND_DEV_CMD_VLD:
- return &regs->vld;
- case NAND_DEV_CMD_VLD_RESTORE:
- return &regs->orig_vld;
- case NAND_EBI2_ECC_BUF_CFG:
- return &regs->ecc_buf_cfg;
- case NAND_READ_LOCATION_0:
- return &regs->read_location0;
- case NAND_READ_LOCATION_1:
- return &regs->read_location1;
- case NAND_READ_LOCATION_2:
- return &regs->read_location2;
- case NAND_READ_LOCATION_3:
- return &regs->read_location3;
- case NAND_READ_LOCATION_LAST_CW_0:
- return &regs->read_location_last0;
- case NAND_READ_LOCATION_LAST_CW_1:
- return &regs->read_location_last1;
- case NAND_READ_LOCATION_LAST_CW_2:
- return &regs->read_location_last2;
- case NAND_READ_LOCATION_LAST_CW_3:
- return &regs->read_location_last3;
- default:
- return NULL;
- }
-}
-
-static void nandc_set_reg(struct nand_chip *chip, int offset,
- u32 val)
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) |
+ ((is_last_read_loc) << READ_LOCATION_LAST));
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg_base == NAND_READ_LOCATION_0)
+ nandc->regs->read_location0 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_1)
+ nandc->regs->read_location1 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_2)
+ nandc->regs->read_location2 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_3)
+ nandc->regs->read_location3 = locreg_val;
+}
+
+/**
+ * nandc_set_read_loc_last - to set read location last register
+ * @chip: NAND Private Flash Chip Data
+ * @reg_base: location register base
+ * @cw_offset: code word offset
+ * @read_size: code word read length
+ * @is_last_read_loc: is this the last read location
+ *
+ * This function will set location last register value
+ */
+static void nandc_set_read_loc_last(struct nand_chip *chip,
+ int reg_base, u32 cw_offset,
+ u32 read_size, u32 is_last_read_loc)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- struct nandc_regs *regs = nandc->regs;
- __le32 *reg;
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) |
+ ((is_last_read_loc) << READ_LOCATION_LAST));
- reg = offset_to_nandc_reg(regs, offset);
+ locreg_val = cpu_to_le32(val);
- if (reg)
- *reg = cpu_to_le32(val);
-}
-
-/* Helper to check the code word, whether it is last cw or not */
-static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
-{
- return cw == (ecc->steps - 1);
+ if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
+ nandc->regs->read_location_last0 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
+ nandc->regs->read_location_last1 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
+ nandc->regs->read_location_last2 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
+ nandc->regs->read_location_last3 = locreg_val;
}
/* helper to configure location register values */
static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
- int cw_offset, int read_size, int is_last_read_loc)
+ u32 cw_offset, u32 read_size, u32 is_last_read_loc)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int reg_base = NAND_READ_LOCATION_0;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
reg_base = NAND_READ_LOCATION_LAST_CW_0;
reg_base += reg * 4;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
return nandc_set_read_loc_last(chip, reg_base, cw_offset,
read_size, is_last_read_loc);
else
@@ -792,12 +238,13 @@ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
static void set_address(struct qcom_nand_host *host, u16 column, int page)
{
struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (chip->options & NAND_BUSWIDTH_16)
column >>= 1;
- nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
- nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
+ nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
+ nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
}
/*
@@ -811,41 +258,43 @@ static void set_address(struct qcom_nand_host *host, u16 column, int page)
static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
{
struct nand_chip *chip = &host->chip;
- u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+ __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (read) {
if (host->use_ecc)
- cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
else
- cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
} else {
- cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
}
if (host->use_ecc) {
- cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE);
- cfg1 = host->cfg1;
- ecc_bch_cfg = host->ecc_bch_cfg;
+ cfg1 = cpu_to_le32(host->cfg1);
+ ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
} else {
- cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE);
- cfg1 = host->cfg1_raw;
- ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ cfg1 = cpu_to_le32(host->cfg1_raw);
+ ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
}
- nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
- nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
- nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
- nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = cmd;
+ nandc->regs->cfg0 = cfg0;
+ nandc->regs->cfg1 = cfg1;
+ nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
+
+ if (!nandc->props->qpic_version2)
+ nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
+
+ nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
+ nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
+ nandc->regs->exec = cpu_to_le32(1);
if (read)
nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
@@ -853,366 +302,6 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
}
/*
- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
- * which will be submitted to DMA engine.
- */
-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
- struct dma_chan *chan,
- unsigned long flags)
-{
- struct desc_info *desc;
- struct scatterlist *sgl;
- unsigned int sgl_cnt;
- int ret;
- struct bam_transaction *bam_txn = nandc->bam_txn;
- enum dma_transfer_direction dir_eng;
- struct dma_async_tx_descriptor *dma_desc;
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- if (chan == nandc->cmd_chan) {
- sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
- sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
- bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- } else if (chan == nandc->tx_chan) {
- sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
- sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
- bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- } else {
- sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
- sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
- bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
- dir_eng = DMA_DEV_TO_MEM;
- desc->dir = DMA_FROM_DEVICE;
- }
-
- sg_mark_end(sgl + sgl_cnt - 1);
- ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
- if (ret == 0) {
- dev_err(nandc->dev, "failure in mapping desc\n");
- kfree(desc);
- return -ENOMEM;
- }
-
- desc->sgl_cnt = sgl_cnt;
- desc->bam_sgl = sgl;
-
- dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
- flags);
-
- if (!dma_desc) {
- dev_err(nandc->dev, "failure in prep desc\n");
- dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
- kfree(desc);
- return -EINVAL;
- }
-
- desc->dma_desc = dma_desc;
-
- /* update last data/command descriptor */
- if (chan == nandc->cmd_chan)
- bam_txn->last_cmd_desc = dma_desc;
- else
- bam_txn->last_data_desc = dma_desc;
-
- list_add_tail(&desc->node, &nandc->desc_list);
-
- return 0;
-}
-
-/*
- * Prepares the command descriptor for BAM DMA which will be used for NAND
- * register reads and writes. The command descriptor requires the command
- * to be formed in command element type so this function uses the command
- * element from bam transaction ce array and fills the same with required
- * data. A single SGL can contain multiple command elements so
- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
- * after the current command element.
- */
-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr,
- int size, unsigned int flags)
-{
- int bam_ce_size;
- int i, ret;
- struct bam_cmd_element *bam_ce_buffer;
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
-
- /* fill the command desc */
- for (i = 0; i < size; i++) {
- if (read)
- bam_prep_ce(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_READ_COMMAND,
- reg_buf_dma_addr(nandc,
- (__le32 *)vaddr + i));
- else
- bam_prep_ce_le32(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_WRITE_COMMAND,
- *((__le32 *)vaddr + i));
- }
-
- bam_txn->bam_ce_pos += size;
-
- /* use the separate sgl after this command */
- if (flags & NAND_BAM_NEXT_SGL) {
- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
- bam_ce_size = (bam_txn->bam_ce_pos -
- bam_txn->bam_ce_start) *
- sizeof(struct bam_cmd_element);
- sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
- bam_ce_buffer, bam_ce_size);
- bam_txn->cmd_sgl_pos++;
- bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-
- if (flags & NAND_BAM_NWD) {
- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_FENCE |
- DMA_PREP_CMD);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/*
- * Prepares the data descriptor for BAM DMA which will be used for NAND
- * data reads and writes.
- */
-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
- const void *vaddr,
- int size, unsigned int flags)
-{
- int ret;
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- if (read) {
- sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
- vaddr, size);
- bam_txn->rx_sgl_pos++;
- } else {
- sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
- vaddr, size);
- bam_txn->tx_sgl_pos++;
-
- /*
- * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
- * is not set, form the DMA descriptor
- */
- if (!(flags & NAND_BAM_NO_EOT)) {
- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr, int size,
- bool flow_control)
-{
- struct desc_info *desc;
- struct dma_async_tx_descriptor *dma_desc;
- struct scatterlist *sgl;
- struct dma_slave_config slave_conf;
- struct qcom_adm_peripheral_config periph_conf = {};
- enum dma_transfer_direction dir_eng;
- int ret;
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- sgl = &desc->adm_sgl;
-
- sg_init_one(sgl, vaddr, size);
-
- if (read) {
- dir_eng = DMA_DEV_TO_MEM;
- desc->dir = DMA_FROM_DEVICE;
- } else {
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- }
-
- ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
- if (ret == 0) {
- ret = -ENOMEM;
- goto err;
- }
-
- memset(&slave_conf, 0x00, sizeof(slave_conf));
-
- slave_conf.device_fc = flow_control;
- if (read) {
- slave_conf.src_maxburst = 16;
- slave_conf.src_addr = nandc->base_dma + reg_off;
- if (nandc->data_crci) {
- periph_conf.crci = nandc->data_crci;
- slave_conf.peripheral_config = &periph_conf;
- slave_conf.peripheral_size = sizeof(periph_conf);
- }
- } else {
- slave_conf.dst_maxburst = 16;
- slave_conf.dst_addr = nandc->base_dma + reg_off;
- if (nandc->cmd_crci) {
- periph_conf.crci = nandc->cmd_crci;
- slave_conf.peripheral_config = &periph_conf;
- slave_conf.peripheral_size = sizeof(periph_conf);
- }
- }
-
- ret = dmaengine_slave_config(nandc->chan, &slave_conf);
- if (ret) {
- dev_err(nandc->dev, "failed to configure dma channel\n");
- goto err;
- }
-
- dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
- if (!dma_desc) {
- dev_err(nandc->dev, "failed to prepare desc\n");
- ret = -EINVAL;
- goto err;
- }
-
- desc->dma_desc = dma_desc;
-
- list_add_tail(&desc->node, &nandc->desc_list);
-
- return 0;
-err:
- kfree(desc);
-
- return ret;
-}
-
-/*
- * read_reg_dma: prepares a descriptor to read a given number of
- * contiguous registers to the reg_read_buf pointer
- *
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to read
- * @flags: flags to control DMA descriptor preparation
- */
-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
- int num_regs, unsigned int flags)
-{
- bool flow_control = false;
- void *vaddr;
-
- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
- nandc->reg_read_pos += num_regs;
-
- if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
- first = dev_cmd_reg_addr(nandc, first);
-
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
-
- return prep_adm_dma_desc(nandc, true, first, vaddr,
- num_regs * sizeof(u32), flow_control);
-}
-
-/*
- * write_reg_dma: prepares a descriptor to write a given number of
- * contiguous registers
- *
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to write
- * @flags: flags to control DMA descriptor preparation
- */
-static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
- int num_regs, unsigned int flags)
-{
- bool flow_control = false;
- struct nandc_regs *regs = nandc->regs;
- void *vaddr;
-
- vaddr = offset_to_nandc_reg(regs, first);
-
- if (first == NAND_ERASED_CW_DETECT_CFG) {
- if (flags & NAND_ERASED_CW_SET)
- vaddr = &regs->erased_cw_detect_cfg_set;
- else
- vaddr = &regs->erased_cw_detect_cfg_clr;
- }
-
- if (first == NAND_EXEC_CMD)
- flags |= NAND_BAM_NWD;
-
- if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
-
- if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
- return prep_adm_dma_desc(nandc, false, first, vaddr,
- num_regs * sizeof(u32), flow_control);
-}
-
-/*
- * read_data_dma: prepares a DMA descriptor to transfer data from the
- * controller's internal buffer to the buffer 'vaddr'
- *
- * @reg_off: offset within the controller's data buffer
- * @vaddr: virtual address of the buffer we want to write to
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
-{
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-}
-
-/*
- * write_data_dma: prepares a DMA descriptor to transfer data from
- * 'vaddr' to the controller's internal buffer
- *
- * @reg_off: offset within the controller's data buffer
- * @vaddr: virtual address of the buffer we want to read from
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
-{
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-}
-
-/*
* Helper to prepare DMA descriptors for configuring registers
* before reading a NAND page.
*/
@@ -1220,13 +309,14 @@ static void config_nand_page_read(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_v2)
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
- NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
}
/*
@@ -1239,23 +329,23 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int reg = NAND_READ_LOCATION_0;
+ __le32 *reg = &nandc->regs->read_location0;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
- reg = NAND_READ_LOCATION_LAST_CW_0;
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+ reg = &nandc->regs->read_location_last0;
- if (nandc->props->is_bam)
- write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
+ if (nandc->props->supports_bam)
+ qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
if (use_ecc) {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
} else {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
}
}
@@ -1279,11 +369,11 @@ static void config_nand_page_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_v2)
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1294,95 +384,14 @@ static void config_nand_cw_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-}
-
-/* helpers to submit/free our list of dma descriptors */
-static int submit_descs(struct qcom_nand_controller *nandc)
-{
- struct desc_info *desc, *n;
- dma_cookie_t cookie = 0;
- struct bam_transaction *bam_txn = nandc->bam_txn;
- int ret = 0;
-
- if (nandc->props->is_bam) {
- if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- if (ret)
- goto err_unmap_free_desc;
- }
-
- if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
- if (ret)
- goto err_unmap_free_desc;
- }
-
- if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_CMD);
- if (ret)
- goto err_unmap_free_desc;
- }
- }
-
- list_for_each_entry(desc, &nandc->desc_list, node)
- cookie = dmaengine_submit(desc->dma_desc);
-
- if (nandc->props->is_bam) {
- bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
- bam_txn->last_cmd_desc->callback_param = bam_txn;
- if (bam_txn->last_data_desc) {
- bam_txn->last_data_desc->callback = qpic_bam_dma_done;
- bam_txn->last_data_desc->callback_param = bam_txn;
- bam_txn->wait_second_completion = true;
- }
-
- dma_async_issue_pending(nandc->tx_chan);
- dma_async_issue_pending(nandc->rx_chan);
- dma_async_issue_pending(nandc->cmd_chan);
-
- if (!wait_for_completion_timeout(&bam_txn->txn_done,
- QPIC_NAND_COMPLETION_TIMEOUT))
- ret = -ETIMEDOUT;
- } else {
- if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
- ret = -ETIMEDOUT;
- }
-
-err_unmap_free_desc:
- /*
- * Unmap the dma sg_list and free the desc allocated by both
- * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
- */
- list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
- list_del(&desc->node);
-
- if (nandc->props->is_bam)
- dma_unmap_sg(nandc->dev, desc->bam_sgl,
- desc->sgl_cnt, desc->dir);
- else
- dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
- desc->dir);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- kfree(desc);
- }
-
- return ret;
-}
-
-/* reset the register read buffer for next NAND operation */
-static void clear_read_regs(struct qcom_nand_controller *nandc)
-{
- nandc->reg_read_pos = 0;
- nandc_read_buffer_sync(nandc, false);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1446,7 +455,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int i;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < cw_cnt; i++) {
u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -1473,13 +482,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
nand_read_page_op(chip, page, 0, NULL, 0);
nandc->buf_count = 0;
nandc->buf_start = 0;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
host->use_ecc = false;
- if (nandc->props->qpic_v2)
+ if (nandc->props->qpic_version2)
raw_cw = ecc->steps - 1;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
set_address(host, host->cw_size * cw, page);
update_rw_regs(host, 1, true, raw_cw);
config_nand_page_read(chip);
@@ -1497,7 +506,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
read_loc += data_size1;
@@ -1512,18 +521,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
config_nand_cw_read(chip, false, raw_cw);
- read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
reg_off += data_size1;
- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
reg_off += oob_size1;
- read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
reg_off += data_size2;
- read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
return ret;
@@ -1621,7 +630,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
buf = (struct read_stats *)nandc->reg_read_buf;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < ecc->steps; i++, buf++) {
u32 flash, buffer, erased_cw;
@@ -1734,7 +743,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
oob_size = host->ecc_bytes_hw + host->spare_bytes;
}
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
if (data_buf && oob_buf) {
nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
nandc_set_read_loc(chip, i, 1, data_size,
@@ -1750,8 +759,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
config_nand_cw_read(chip, true, i);
if (data_buf)
- read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
- data_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
/*
* when ecc is enabled, the controller doesn't read the real
@@ -1766,8 +775,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
for (j = 0; j < host->bbm_size; j++)
*oob_buf++ = 0xff;
- read_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
if (data_buf)
@@ -1776,7 +785,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read page/oob\n");
return ret;
@@ -1797,7 +806,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
int size;
int ret;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
size = host->use_ecc ? host->cw_data : host->cw_size;
@@ -1809,9 +818,9 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret)
dev_err(nandc->dev, "failed to copy last codeword\n");
@@ -1897,14 +906,14 @@ static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = true;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
set_address(host, 0, page);
update_rw_regs(host, ecc->steps, true, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
return read_page_ecc(host, data_buf, oob_buf, page);
}
@@ -1945,8 +954,8 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
host->use_ecc = true;
set_address(host, 0, page);
@@ -1973,8 +982,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
set_address(host, 0, page);
nandc->buf_count = 0;
nandc->buf_start = 0;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -1995,8 +1004,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
oob_size = ecc->bytes;
}
- write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
- i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
/*
* when ECC is enabled, we don't really need to write anything
@@ -2008,8 +1017,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
if (qcom_nandc_is_last_cw(ecc, i)) {
oob_buf += host->bbm_size;
- write_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
config_nand_cw_write(chip);
@@ -2018,7 +1027,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write page\n");
return ret;
@@ -2043,8 +1052,8 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
qcom_nandc_codeword_fixup(host, page);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -2070,28 +1079,28 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
- write_data_dma(nandc, reg_off, data_buf, data_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
reg_off += data_size1;
data_buf += data_size1;
- write_data_dma(nandc, reg_off, oob_buf, oob_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
reg_off += oob_size1;
oob_buf += oob_size1;
- write_data_dma(nandc, reg_off, data_buf, data_size2,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
reg_off += data_size2;
data_buf += data_size2;
- write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
oob_buf += oob_size2;
config_nand_cw_write(chip);
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write raw page\n");
return ret;
@@ -2121,7 +1130,7 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
qcom_nandc_codeword_fixup(host, page);
host->use_ecc = true;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
/* calculate the data and oob size for the last codeword/step */
data_size = ecc->size - ((ecc->steps - 1) << 2);
@@ -2136,11 +1145,11 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
update_rw_regs(host, 1, false, 0);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, data_size + oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, data_size + oob_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write oob\n");
return ret;
@@ -2167,7 +1176,7 @@ static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
*/
host->use_ecc = false;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
ret = copy_last_cw(host, page);
if (ret)
goto err;
@@ -2194,8 +1203,8 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
struct nand_ecc_ctrl *ecc = &chip->ecc;
int page, ret;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
/*
* to mark the BBM as bad, we flash the entire last codeword with 0s.
@@ -2212,11 +1221,11 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
update_rw_regs(host, 1, false, ecc->steps - 1);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, host->cw_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, host->cw_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to update BBM\n");
return ret;
@@ -2455,15 +1464,15 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
/* Free the initially allocated BAM transaction for reading the ONFI params */
- if (nandc->props->is_bam)
- free_bam_transaction(nandc);
+ if (nandc->props->supports_bam)
+ qcom_free_bam_transaction(nandc);
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
/* Now allocate the BAM transaction based on updated max_cwperpage */
- if (nandc->props->is_bam) {
- nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (nandc->props->supports_bam) {
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
@@ -2485,44 +1494,43 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
host->cw_size = host->cw_data + ecc->bytes;
bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
- host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_data << UD_SIZE_BYTES
- | 0 << DISABLE_STATUS_AFTER_WRITE
- | 5 << NUM_ADDR_CYCLES
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
- | 0 << STATUS_BFR_READ
- | 1 << SET_RD_MODE_AFTER_STATUS
- | host->spare_bytes << SPARE_SIZE_BYTES;
-
- host->cfg1 = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | bad_block_byte << BAD_BLOCK_BYTE_NUM
- | 0 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | host->bch_enabled << ENABLE_BCH_ECC;
-
- host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_size << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES;
-
- host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE;
-
- host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
- | 0 << ECC_SW_RESET
- | host->cw_data << ECC_NUM_DATA_BYTES
- | 1 << ECC_FORCE_CLK_OPEN
- | ecc_mode << ECC_MODE
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-
- if (!nandc->props->qpic_v2)
+ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
+ FIELD_PREP(STATUS_BFR_READ, 0) |
+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
+
+ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
+
+ host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
+ FIELD_PREP(ECC_SW_RESET, 0) |
+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
+ FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
+
+ if (!nandc->props->qpic_version2)
host->ecc_buf_cfg = 0x203 << NUM_STEPS;
host->clrflashstatus = FS_READY_BSY_N;
@@ -2556,7 +1564,7 @@ static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
cmd = OP_FETCH_ID;
break;
case NAND_CMD_PARAM:
- if (nandc->props->qpic_v2)
+ if (nandc->props->qpic_version2)
cmd = OP_PAGE_READ_ONFI_READ;
else
cmd = OP_PAGE_READ;
@@ -2609,7 +1617,7 @@ static int qcom_parse_instructions(struct nand_chip *chip,
if (ret < 0)
return ret;
- q_op->cmd_reg = ret;
+ q_op->cmd_reg = cpu_to_le32(ret);
q_op->rdy_delay_ns = instr->delay_ns;
break;
@@ -2619,10 +1627,10 @@ static int qcom_parse_instructions(struct nand_chip *chip,
addrs = &instr->ctx.addr.addrs[offset];
for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
- q_op->addr1_reg |= addrs[i] << (i * 8);
+ q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
if (naddrs > 4)
- q_op->addr2_reg |= addrs[4];
+ q_op->addr2_reg |= cpu_to_le32(addrs[4]);
q_op->rdy_delay_ns = instr->delay_ns;
break;
@@ -2663,7 +1671,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
unsigned long start = jiffies + msecs_to_jiffies(time_ms);
u32 flash;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
do {
flash = le32_to_cpu(nandc->reg_read_buf[0]);
@@ -2703,23 +1711,23 @@ static int qcom_read_status_exec(struct nand_chip *chip,
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting status descriptor\n");
goto err_out;
}
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < num_cw; i++) {
flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -2760,23 +1768,21 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
- nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
- nandc->props->is_bam ? 0 : DM_EN);
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = q_op.addr1_reg;
+ nandc->regs->addr1 = q_op.addr2_reg;
+ nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
+ nandc->regs->exec = cpu_to_le32(1);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting read id descriptor\n");
goto err_out;
@@ -2786,7 +1792,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
err_out:
@@ -2807,15 +1813,14 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
if (q_op.flag == OP_PROGRAM_PAGE) {
goto wait_rdy;
- } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
- nandc_set_reg(chip, NAND_DEV0_CFG0,
- host->cfg0_raw & ~(7 << CW_PER_PAGE));
- nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
+ } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
+ nandc->regs->addr0 = q_op.addr1_reg;
+ nandc->regs->addr1 = q_op.addr2_reg;
+ nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
instrs = 3;
- } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
+ } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
return 0;
}
@@ -2823,20 +1828,20 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- if (q_op.cmd_reg == OP_BLOCK_ERASE)
- write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting misc descriptor\n");
goto err_out;
@@ -2864,46 +1869,46 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
if (ret)
return ret;
- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
-
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
-
- nandc_set_reg(chip, NAND_ADDR0, 0);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
- | 512 << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES);
- nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | 0 << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = 0;
+ nandc->regs->addr1 = 0;
+
+ nandc->regs->cfg0 = cpu_to_le32(FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0));
+
+ nandc->regs->cfg1 = cpu_to_le32(FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1));
+
+ if (!nandc->props->qpic_version2)
+ nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD_VLD,
- (nandc->vld & ~READ_START_VLD));
- nandc_set_reg(chip, NAND_DEV_CMD1,
- (nandc->cmd1 & ~(0xFF << READ_ADDR))
- | NAND_CMD_PARAM << READ_ADDR);
+ if (!nandc->props->qpic_version2) {
+ nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
+ nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
}
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->exec = cpu_to_le32(1);
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
- nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+ if (!nandc->props->qpic_version2) {
+ nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
+ nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
}
instr = q_op.data_instr;
@@ -2912,9 +1917,9 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
nandc_set_read_loc(chip, 0, 0, 0, len, 1);
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ if (!nandc->props->qpic_version2) {
+ qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
}
nandc->buf_count = len;
@@ -2922,16 +1927,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
config_nand_single_cw_page_read(chip, false, 0);
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
- nandc->buf_count, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count, 0);
/* restore CMD1 and VLD regs */
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
+ if (!nandc->props->qpic_version2) {
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
+ NAND_BAM_NEXT_SGL);
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting param page descriptor\n");
goto err_out;
@@ -3015,151 +2021,24 @@ static const struct nand_controller_ops qcom_nandc_ops = {
.exec_op = qcom_nand_exec_op,
};
-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-{
- if (nandc->props->is_bam) {
- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
-
- if (nandc->tx_chan)
- dma_release_channel(nandc->tx_chan);
-
- if (nandc->rx_chan)
- dma_release_channel(nandc->rx_chan);
-
- if (nandc->cmd_chan)
- dma_release_channel(nandc->cmd_chan);
- } else {
- if (nandc->chan)
- dma_release_channel(nandc->chan);
- }
-}
-
-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-{
- int ret;
-
- ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(nandc->dev, "failed to set DMA mask\n");
- return ret;
- }
-
- /*
- * we use the internal buffer for reading ONFI params, reading small
- * data like ID and status, and preforming read-copy-write operations
- * when writing to a codeword partially. 532 is the maximum possible
- * size of a codeword for our nand controller
- */
- nandc->buf_size = 532;
-
- nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
- if (!nandc->data_buffer)
- return -ENOMEM;
-
- nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
- if (!nandc->regs)
- return -ENOMEM;
-
- nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
- sizeof(*nandc->reg_read_buf),
- GFP_KERNEL);
- if (!nandc->reg_read_buf)
- return -ENOMEM;
-
- if (nandc->props->is_bam) {
- nandc->reg_read_dma =
- dma_map_single(nandc->dev, nandc->reg_read_buf,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
- if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
- dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
- return -EIO;
- }
-
- nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
- if (IS_ERR(nandc->tx_chan)) {
- ret = PTR_ERR(nandc->tx_chan);
- nandc->tx_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "tx DMA channel request failed\n");
- goto unalloc;
- }
-
- nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
- if (IS_ERR(nandc->rx_chan)) {
- ret = PTR_ERR(nandc->rx_chan);
- nandc->rx_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "rx DMA channel request failed\n");
- goto unalloc;
- }
-
- nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
- if (IS_ERR(nandc->cmd_chan)) {
- ret = PTR_ERR(nandc->cmd_chan);
- nandc->cmd_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "cmd DMA channel request failed\n");
- goto unalloc;
- }
-
- /*
- * Initially allocate BAM transaction to read ONFI param page.
- * After detecting all the devices, this BAM transaction will
- * be freed and the next BAM transaction will be allocated with
- * maximum codeword size
- */
- nandc->max_cwperpage = 1;
- nandc->bam_txn = alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
- ret = -ENOMEM;
- goto unalloc;
- }
- } else {
- nandc->chan = dma_request_chan(nandc->dev, "rxtx");
- if (IS_ERR(nandc->chan)) {
- ret = PTR_ERR(nandc->chan);
- nandc->chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "rxtx DMA channel request failed\n");
- return ret;
- }
- }
-
- INIT_LIST_HEAD(&nandc->desc_list);
- INIT_LIST_HEAD(&nandc->host_list);
-
- nand_controller_init(&nandc->controller);
- nandc->controller.ops = &qcom_nandc_ops;
-
- return 0;
-unalloc:
- qcom_nandc_unalloc(nandc);
- return ret;
-}
-
/* one time setup of a few nand controller registers */
static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
{
u32 nand_ctrl;
+ nand_controller_init(nandc->controller);
+ nandc->controller->ops = &qcom_nandc_ops;
+
/* kill onenand */
- if (!nandc->props->is_qpic)
+ if (!nandc->props->nandc_part_of_qpic)
nandc_write(nandc, SFLASHC_BURST_CFG, 0);
- if (!nandc->props->qpic_v2)
+ if (!nandc->props->qpic_version2)
nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
NAND_DEV_CMD_VLD_VAL);
/* enable ADM or BAM DMA */
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
nand_ctrl = nandc_read(nandc, NAND_CTRL);
/*
@@ -3176,7 +2055,7 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
}
/* save the original values of these registers */
- if (!nandc->props->qpic_v2) {
+ if (!nandc->props->qpic_version2) {
nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
nandc->vld = NAND_DEV_CMD_VLD_VAL;
}
@@ -3288,7 +2167,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
chip->legacy.block_bad = qcom_nandc_block_bad;
chip->legacy.block_markbad = qcom_nandc_block_markbad;
- chip->controller = &nandc->controller;
+ chip->controller = nandc->controller;
chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
NAND_SKIP_BBTSCAN;
@@ -3349,7 +2228,7 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
struct device_node *np = nandc->dev->of_node;
int ret;
- if (!nandc->props->is_bam) {
+ if (!nandc->props->supports_bam) {
ret = of_property_read_u32(np, "qcom,cmd-crci",
&nandc->cmd_crci);
if (ret) {
@@ -3371,17 +2250,21 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
static int qcom_nandc_probe(struct platform_device *pdev)
{
struct qcom_nand_controller *nandc;
+ struct nand_controller *controller;
const void *dev_data;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
- nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
+ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
+ GFP_KERNEL);
if (!nandc)
return -ENOMEM;
+ controller = (struct nand_controller *)&nandc[1];
platform_set_drvdata(pdev, nandc);
nandc->dev = dev;
+ nandc->controller = controller;
dev_data = of_device_get_match_data(dev);
if (!dev_data) {
@@ -3474,30 +2357,30 @@ static void qcom_nandc_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq806x_nandc_props = {
.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
- .is_bam = false,
+ .supports_bam = false,
.use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
};
static const struct qcom_nandc_props ipq4019_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x0,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x7000,
};
static const struct qcom_nandc_props sdx55_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
- .qpic_v2 = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
+ .qpic_version2 = true,
.dev_cmd_reg_start = 0x7000,
};
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 19cc77288ebb..1e61ab21893a 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
-spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
+spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
index 7936ea546b03..6046c73f8424 100644
--- a/drivers/mtd/nand/spi/alliancememory.c
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -21,8 +21,8 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
index 82b377c06812..bb5298911137 100644
--- a/drivers/mtd/nand/spi/ato.c
+++ b/drivers/mtd/nand/spi/ato.c
@@ -15,8 +15,8 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index b1df7f627161..da4713692674 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -294,6 +294,9 @@ static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
struct spinand_device *spinand = nand_to_spinand(nand);
bool enable = (req->mode != MTD_OPS_RAW);
+ if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
+ return -EOPNOTSUPP;
+
memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
/* Only enable or disable the engine */
@@ -901,9 +904,17 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
.oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
+ int ret;
spinand_select_target(spinand, pos->target);
- spinand_read_page(spinand, &req);
+
+ ret = spinand_read_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ spinand_read_page(spinand, &req);
+ }
+
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
@@ -942,11 +953,14 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_write_enable_op(spinand);
- if (ret)
- return ret;
+ ret = spinand_write_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ ret = spinand_write_page(spinand, &req);
+ }
- return spinand_write_page(spinand, &req);
+ return ret;
}
static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
@@ -1117,6 +1131,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
&paragon_spinand_manufacturer,
+ &skyhigh_spinand_manufacturer,
&toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
&xtx_spinand_manufacturer,
@@ -1198,10 +1213,13 @@ spinand_select_op_variant(struct spinand_device *spinand,
const struct spinand_op_variants *variants)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ const struct spi_mem_op *best_variant = NULL;
+ u64 best_op_duration_ns = ULLONG_MAX;
unsigned int i;
for (i = 0; i < variants->nops; i++) {
struct spi_mem_op op = variants->ops[i];
+ u64 op_duration_ns = 0;
unsigned int nbytes;
int ret;
@@ -1214,17 +1232,23 @@ spinand_select_op_variant(struct spinand_device *spinand,
if (ret)
break;
+ spi_mem_adjust_op_freq(spinand->spimem, &op);
+
if (!spi_mem_supports_op(spinand->spimem, &op))
break;
nbytes -= op.data.nbytes;
+
+ op_duration_ns += spi_mem_calc_op_duration(&op);
}
- if (!nbytes)
- return &variants->ops[i];
+ if (!nbytes && op_duration_ns < best_op_duration_ns) {
+ best_op_duration_ns = op_duration_ns;
+ best_variant = &variants->ops[i];
+ }
}
- return NULL;
+ return best_variant;
}
/**
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 4597a82de23a..323a20901fc9 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -15,8 +15,8 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
index e0d2d9257045..ecd5f6bffa33 100644
--- a/drivers/mtd/nand/spi/foresee.c
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -14,8 +14,8 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -81,6 +81,16 @@ static const struct spinand_info foresee_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&f35sqa002g_ooblayout,
f35sqa002g_ecc_get_status)),
+ SPINAND_INFO("F35SQA001G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71, 0x71),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
};
static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 6023cba748bb..d620bb02a20a 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -28,32 +28,32 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP_3A(0, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index d277c3220fdc..3dc4d63d6832 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -28,8 +28,8 @@ struct macronix_priv {
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 12601bc4227a..ad0bb9755a09 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -33,8 +33,8 @@ static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -48,8 +48,8 @@ static SPINAND_OP_VARIANTS(x4_update_cache_variants,
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 519ade513c1f..6e7cc6995380 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -26,8 +26,8 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c
new file mode 100644
index 000000000000..961df0d74984
--- /dev/null
+++ b/drivers/mtd/nand/spi/skyhigh.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 SkyHigh Memory Limited
+ *
+ * Author: Takahiro Kuwano <takahiro.kuwano@infineon.com>
+ * Co-Author: KR Kim <kr.kim@skyhighmemory.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_SKYHIGH 0x01
+#define SKYHIGH_STATUS_ECC_1TO2_BITFLIPS (1 << 4)
+#define SKYHIGH_STATUS_ECC_3TO6_BITFLIPS (2 << 4)
+#define SKYHIGH_STATUS_ECC_UNCOR_ERROR (3 << 4)
+#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ /* ECC bytes are stored in hidden area. */
+ return -ERANGE;
+}
+
+static int skyhigh_spinand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* ECC bytes are stored in hidden area. Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = mtd->oobsize - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops skyhigh_spinand_ooblayout = {
+ .ecc = skyhigh_spinand_ooblayout_ecc,
+ .free = skyhigh_spinand_ooblayout_free,
+};
+
+static int skyhigh_spinand_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case SKYHIGH_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case SKYHIGH_STATUS_ECC_1TO2_BITFLIPS:
+ return 2;
+
+ case SKYHIGH_STATUS_ECC_3TO6_BITFLIPS:
+ return 6;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info skyhigh_spinand_table[] = {
+ SPINAND_INFO("S35ML01G301",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML01G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML02G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML04G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+};
+
+static int skyhigh_spinand_init(struct spinand_device *spinand)
+{
+ /*
+ * Config_Protect_En (bit 1 in Block Lock register) must be set to 1
+ * before writing other bits. Do it here before core unlocks all blocks
+ * by writing block protection bits.
+ */
+ return spinand_write_reg_op(spinand, REG_BLOCK_LOCK,
+ SKYHIGH_CONFIG_PROTECT_EN);
+}
+
+static const struct spinand_manufacturer_ops skyhigh_spinand_manuf_ops = {
+ .init = skyhigh_spinand_init,
+};
+
+const struct spinand_manufacturer skyhigh_spinand_manufacturer = {
+ .id = SPINAND_MFR_SKYHIGH,
+ .name = "SkyHigh",
+ .chips = skyhigh_spinand_table,
+ .nchips = ARRAY_SIZE(skyhigh_spinand_table),
+ .ops = &skyhigh_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index bbbcaa87c0bc..2e2106b2705f 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -17,8 +17,8 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 7180e615ac97..8394a1b1fb0c 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/units.h>
#define SPINAND_MFR_WINBOND 0xEF
@@ -17,13 +18,31 @@
#define W25N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4)
+/*
+ * "X2" in the core is equivalent to "dual output" in the datasheets,
+ * "X4" in the core is equivalent to "quad output" in the datasheets.
+ */
+
+static SPINAND_OP_VARIANTS(read_cache_dtr_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -194,7 +213,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
@@ -223,7 +242,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index 66a4255bdf06..3f539ca0de86 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -27,8 +27,8 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 45d1153a04a0..82c592f0a1e1 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -238,6 +238,10 @@ static const struct flash_info atmel_nor_parts[] = {
.flags = SPI_NOR_HAS_LOCK,
.no_sfdp_flags = SECT_4K,
.fixups = &at25fs_nor_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x87, 0x01),
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
},
};
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 66949d9f0cc5..19eb98bd6821 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -17,6 +17,7 @@
#include <linux/mtd/spi-nor.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
+#include <linux/regulator/consumer.h>
#include <linux/sched/task_stack.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -89,7 +90,7 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor,
op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->dummy.nbytes)
- op->dummy.buswidth = spi_nor_get_protocol_data_nbits(proto);
+ op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->data.nbytes)
op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
@@ -3576,7 +3577,8 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
static int spi_nor_probe(struct spi_mem *spimem)
{
struct spi_device *spi = spimem->spi;
- struct flash_platform_data *data = dev_get_platdata(&spi->dev);
+ struct device *dev = &spi->dev;
+ struct flash_platform_data *data = dev_get_platdata(dev);
struct spi_nor *nor;
/*
* Enable all caps by default. The core will mask them after
@@ -3586,13 +3588,17 @@ static int spi_nor_probe(struct spi_mem *spimem)
char *flash_name;
int ret;
- nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return ret;
+
+ nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
if (!nor)
return -ENOMEM;
nor->spimem = spimem;
- nor->dev = &spi->dev;
- spi_nor_set_flash_node(nor, spi->dev.of_node);
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, dev->of_node);
spi_mem_set_drvdata(spimem, nor);
@@ -3628,9 +3634,8 @@ static int spi_nor_probe(struct spi_mem *spimem)
*/
if (nor->params->page_size > PAGE_SIZE) {
nor->bouncebuf_size = nor->params->page_size;
- devm_kfree(nor->dev, nor->bouncebuf);
- nor->bouncebuf = devm_kmalloc(nor->dev,
- nor->bouncebuf_size,
+ devm_kfree(dev, nor->bouncebuf);
+ nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
GFP_KERNEL);
if (!nor->bouncebuf)
return -ENOMEM;
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 5c33740ed7f5..ceff412f7d65 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -448,7 +448,11 @@ struct spi_nor_id {
* @id: pointer to struct spi_nor_id or NULL, which means "no ID" (mostly
* older chips).
* @name: (obsolete) the name of the flash. Do not set it for new additions.
- * @size: the size of the flash in bytes.
+ * @size: the size of the flash in bytes. The flash size is one
+ * property parsed by the SFDP. We use it as an indicator
+ * whether we need SFDP parsing for a particular flash.
+ * I.e. non-legacy flash entries in flash_info will have
+ * a size of zero iff SFDP should be used.
* @sector_size: (optional) the size listed here is what works with
* SPINOR_OP_SE, which isn't necessarily called a "sector" by
* the vendor. Defaults to 64k.
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 830da21eea08..99936fd25d43 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -143,12 +143,6 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_16M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
- .id = SNOR_ID(0xc2, 0x25, 0x39),
- .name = "mx25u25635f",
- .size = SZ_32M,
- .no_sfdp_flags = SECT_4K,
- .fixup_flags = SPI_NOR_4B_OPCODES,
- }, {
.id = SNOR_ID(0xc2, 0x25, 0x3a),
.name = "mx25u51245g",
.size = SZ_64M,
@@ -230,7 +224,8 @@ static int macronix_nor_octal_dtr_en(struct spi_nor *nor)
return ret;
/* Read flash ID to make sure the switch was successful. */
- ret = spi_nor_read_id(nor, 4, 4, buf, SNOR_PROTO_8_8_8_DTR);
+ ret = spi_nor_read_id(nor, nor->addr_nbytes, 4, buf,
+ SNOR_PROTO_8_8_8_DTR);
if (ret) {
dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret);
return ret;
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 5a88a6096ca8..bf08dbf5e742 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -958,6 +958,11 @@ static const struct flash_info spansion_nor_parts[] = {
.mfr_flags = USE_CLPEF,
.fixups = &s25hx_t_fixups
}, {
+ /* S28HL256T */
+ .id = SNOR_ID(0x34, 0x5a, 0x19),
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
.id = SNOR_ID(0x34, 0x5a, 0x1a),
.name = "s28hl512t",
.mfr_flags = USE_CLPEF,
@@ -968,6 +973,11 @@ static const struct flash_info spansion_nor_parts[] = {
.mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
}, {
+ /* S28HL02GT */
+ .id = SNOR_ID(0x34, 0x5a, 0x1c),
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
.id = SNOR_ID(0x34, 0x5b, 0x19),
.mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index b5ad7118c49a..175211fe6a5e 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -174,7 +174,7 @@ static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
int ret;
nor->program_opcode = op;
- ret = spi_nor_write_data(nor, to, 1, buf);
+ ret = spi_nor_write_data(nor, to, len, buf);
if (ret < 0)
return ret;
WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret);
diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
index 5e9eb268073d..4f12ff755df0 100644
--- a/drivers/mtd/spi-nor/sysfs.c
+++ b/drivers/mtd/spi-nor/sysfs.c
@@ -50,7 +50,7 @@ static struct attribute *spi_nor_sysfs_entries[] = {
};
static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
@@ -62,9 +62,9 @@ static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, nor->sfdp->dwords,
sfdp_size);
}
-static BIN_ATTR_RO(sfdp, 0);
+static const BIN_ATTR_RO(sfdp, 0);
-static struct bin_attribute *spi_nor_sysfs_bin_entries[] = {
+static const struct bin_attribute *const spi_nor_sysfs_bin_entries[] = {
&bin_attr_sfdp,
NULL
};
@@ -104,7 +104,7 @@ static const struct attribute_group spi_nor_sysfs_group = {
.is_visible = spi_nor_sysfs_is_visible,
.is_bin_visible = spi_nor_sysfs_is_bin_visible,
.attrs = spi_nor_sysfs_entries,
- .bin_attrs = spi_nor_sysfs_bin_entries,
+ .bin_attrs_new = spi_nor_sysfs_bin_entries,
};
const struct attribute_group *spi_nor_sysfs_groups[] = {
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 60d0155be869..2836905f0152 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -383,7 +383,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE;
- dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ dev->tag_set.flags = BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 30be4ed68fad..ef6a22f372f9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1537,7 +1537,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
if (token) {
int err = kstrtoint(token, 10, &p->ubi_num);
- if (err) {
+ if (err || p->ubi_num < UBI_DEV_NUM_AUTO) {
pr_err("UBI error: bad value for ubi_num parameter: %s\n",
token);
return -EINVAL;
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 6bb80d7714bc..b700a0efaa93 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -828,6 +828,70 @@ out_free:
return err;
}
+static int ubi_get_ec_info(struct ubi_device *ubi, struct ubi_ecinfo_req __user *ureq)
+{
+ struct ubi_ecinfo_req req;
+ struct ubi_wl_entry *wl;
+ int read_cnt;
+ int peb;
+ int end_peb;
+
+ /* Copy the input arguments */
+ if (copy_from_user(&req, ureq, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ /* Check input arguments */
+ if (req.length <= 0 || req.start < 0 || req.start >= ubi->peb_count)
+ return -EINVAL;
+
+ if (check_add_overflow(req.start, req.length, &end_peb))
+ return -EINVAL;
+
+ if (end_peb > ubi->peb_count)
+ end_peb = ubi->peb_count;
+
+ /* Check access rights before filling erase_counters array */
+ if (!access_ok((void __user *)ureq->erase_counters,
+ (end_peb-req.start) * sizeof(int32_t)))
+ return -EFAULT;
+
+ /* Fill erase counter array */
+ read_cnt = 0;
+ for (peb = req.start; peb < end_peb; read_cnt++, peb++) {
+ int ec;
+
+ if (ubi_io_is_bad(ubi, peb)) {
+ if (__put_user(UBI_UNKNOWN, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ continue;
+ }
+
+ spin_lock(&ubi->wl_lock);
+
+ wl = ubi->lookuptbl[peb];
+ if (wl)
+ ec = wl->ec;
+ else
+ ec = UBI_UNKNOWN;
+
+ spin_unlock(&ubi->wl_lock);
+
+ if (__put_user(ec, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ }
+
+ /* Return actual read length */
+ req.read_length = read_cnt;
+
+ /* Copy everything except erase counter array */
+ if (copy_to_user(ureq, &req, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -991,6 +1055,12 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
+ case UBI_IOCECNFO:
+ {
+ err = ubi_get_ec_info(ubi, argp);
+ break;
+ }
+
default:
err = -ENOTTY;
break;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 26cc53ad34ec..c792b9bcab9b 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -549,7 +549,6 @@ struct ubi_debug_info {
* @peb_buf: a buffer of PEB size used for different purposes
* @buf_mutex: protects @peb_buf
* @ckvol_mutex: serializes static volume checking when opening
- * @wl_reboot_notifier: close all wear-leveling work before reboot
*
* @dbg: debugging information for this UBI device
*/
@@ -652,7 +651,6 @@ struct ubi_device {
void *peb_buf;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
- struct notifier_block wl_reboot_notifier;
struct ubi_debug_info dbg;
};
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 4f6f339d8fb8..fbd399cf6503 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -89,7 +89,6 @@
#include <linux/crc32.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
-#include <linux/reboot.h>
#include "ubi.h"
#include "wl.h"
@@ -128,8 +127,6 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
struct ubi_wl_entry *e, struct rb_root *root);
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e);
-static int ubi_wl_reboot_notifier(struct notifier_block *n,
- unsigned long state, void *cmd);
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
@@ -1953,13 +1950,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (!ubi->ro_mode && !ubi->fm_disabled)
ubi_ensure_anchor_pebs(ubi);
#endif
-
- if (!ubi->wl_reboot_notifier.notifier_call) {
- ubi->wl_reboot_notifier.notifier_call = ubi_wl_reboot_notifier;
- ubi->wl_reboot_notifier.priority = 1; /* Higher than MTD */
- register_reboot_notifier(&ubi->wl_reboot_notifier);
- }
-
return 0;
out_free:
@@ -2005,17 +1995,6 @@ void ubi_wl_close(struct ubi_device *ubi)
kfree(ubi->lookuptbl);
}
-static int ubi_wl_reboot_notifier(struct notifier_block *n,
- unsigned long state, void *cmd)
-{
- struct ubi_device *ubi;
-
- ubi = container_of(n, struct ubi_device, wl_reboot_notifier);
- ubi_wl_close(ubi);
-
- return NOTIFY_DONE;
-}
-
/**
* self_check_ec - make sure that the erase counter of a PEB is correct.
* @ubi: UBI device description object
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 78c0022697ec..02be4ba37257 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -42,7 +42,7 @@ struct mux_state {
unsigned int state;
};
-static struct class mux_class = {
+static const struct class mux_class = {
.name = "mux",
};
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index a2abfade82dd..70814303aab8 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -84,7 +84,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
sizeof(ipversion))) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
ipversion >>= 4;
@@ -94,7 +94,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
proto = htons(ETH_P_IPV6);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
@@ -108,7 +108,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
ipv4_is_multicast(tunnel_hdr->daddr)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else {
@@ -124,7 +124,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
(addr_type & IPV6_ADDR_MULTICAST)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
}
@@ -136,7 +136,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
proto,
!net_eq(bareudp->net,
dev_net(bareudp->dev)))) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
@@ -144,7 +144,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
skb_dst_set(skb, &tun_dst->dst);
@@ -194,7 +194,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
len = skb->len;
err = gro_cells_receive(&bareudp->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(bareudp->dev, len);
+ dev_dstats_rx_add(bareudp->dev, len);
return 0;
drop:
@@ -589,7 +589,7 @@ static void bareudp_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
dev->lltx = true;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index b19492a7f6ad..8adbec7c5084 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -63,13 +63,8 @@ void bond_debug_unregister(struct bonding *bond)
void bond_debug_reregister(struct bonding *bond)
{
- struct dentry *d;
-
- d = debugfs_rename(bonding_debug_root, bond->debug_dir,
- bonding_debug_root, bond->dev->name);
- if (!IS_ERR(d)) {
- bond->debug_dir = d;
- } else {
+ int err = debugfs_change_name(bond->debug_dir, "%s", bond->dev->name);
+ if (err) {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
bond_debug_unregister(bond);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 7b78c2bada81..e45bba240cbc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1538,17 +1538,20 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_GSO_PARTIAL)
#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_GSO_SOFTWARE)
+#define BOND_GSO_PARTIAL_FEATURES (NETIF_F_GSO_ESP)
+
static void bond_compute_features(struct bonding *bond)
{
+ netdev_features_t gso_partial_features = BOND_GSO_PARTIAL_FEATURES;
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
- netdev_features_t gso_partial_features = NETIF_F_GSO_ESP;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
netdev_features_t enc_features = BOND_ENC_FEATURES;
#ifdef CONFIG_XFRM_OFFLOAD
@@ -1582,8 +1585,9 @@ static void bond_compute_features(struct bonding *bond)
BOND_XFRM_FEATURES);
#endif /* CONFIG_XFRM_OFFLOAD */
- if (slave->dev->hw_enc_features & NETIF_F_GSO_PARTIAL)
- gso_partial_features &= slave->dev->gso_partial_features;
+ gso_partial_features = netdev_increment_features(gso_partial_features,
+ slave->dev->gso_partial_features,
+ BOND_GSO_PARTIAL_FEATURES);
mpls_features = netdev_increment_features(mpls_features,
slave->dev->mpls_features,
@@ -1598,12 +1602,8 @@ static void bond_compute_features(struct bonding *bond)
}
bond_dev->hard_header_len = max_hard_header_len;
- if (gso_partial_features & NETIF_F_GSO_ESP)
- bond_dev->gso_partial_features |= NETIF_F_GSO_ESP;
- else
- bond_dev->gso_partial_features &= ~NETIF_F_GSO_ESP;
-
done:
+ bond_dev->gso_partial_features = gso_partial_features;
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HW_VLAN_CTAG_TX |
@@ -6046,6 +6046,7 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+ bond_dev->features |= NETIF_F_GSO_PARTIAL;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
/* Only enable XFRM features if this is an active-backup config */
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 327b6ecdc77e..d1b095af253b 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1242,10 +1242,28 @@ static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *sla
slave->dev->flags & IFF_MULTICAST;
}
+/**
+ * slave_set_ns_maddrs - add/del all NS mac addresses for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @add: add or remove all the NS mac addresses
+ *
+ * This function tries to add or delete all the NS mac addresses on the slave
+ *
+ * Note, the IPv6 NS target address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * arp_validate changes
+ * * enslaving, releasing new slaves
+ */
static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add)
{
struct in6_addr *targets = bond->params.ns_targets;
char slot_maddr[MAX_ADDR_LEN];
+ struct in6_addr mcaddr;
int i;
if (!slave_can_set_ns_maddr(bond, slave))
@@ -1255,7 +1273,8 @@ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool
if (ipv6_addr_any(&targets[i]))
break;
- if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) {
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) {
if (add)
dev_mc_add(slave->dev, slot_maddr);
else
@@ -1278,23 +1297,43 @@ void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
slave_set_ns_maddrs(bond, slave, false);
}
+/**
+ * slave_set_ns_maddr - set new NS mac address for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @target: the new IPv6 target
+ * @slot: the old IPv6 target in the slot
+ *
+ * This function tries to replace the old mac address to new one on the slave.
+ *
+ * Note, the target/slot IPv6 address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * An IPv6 NS target is added or removed.
+ */
static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
struct in6_addr *target, struct in6_addr *slot)
{
- char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN];
+ char mac_addr[MAX_ADDR_LEN];
+ struct in6_addr mcast_addr;
if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
return;
- /* remove the previous maddr from slave */
+ /* remove the previous mac addr from slave */
+ addrconf_addr_solict_mult(slot, &mcast_addr);
if (!ipv6_addr_any(slot) &&
- !ndisc_mc_map(slot, slot_maddr, slave->dev, 0))
- dev_mc_del(slave->dev, slot_maddr);
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_del(slave->dev, mac_addr);
- /* add new maddr on slave if target is set */
+ /* add new mac addr on slave if target is set */
+ addrconf_addr_solict_mult(target, &mcast_addr);
if (!ipv6_addr_any(target) &&
- !ndisc_mc_map(target, target_maddr, slave->dev, 0))
- dev_mc_add(slave->dev, target_maddr);
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_add(slave->dev, mac_addr);
}
static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 7fea00c7ca8a..c60386bf2d1a 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -745,7 +745,7 @@ err:
if (cfv->vr_rx)
vdev->vringh_config->del_vrhs(cfv->vdev);
- if (cfv->vdev)
+ if (cfv->vq_tx)
vdev->config->del_vqs(cfv->vdev);
free_netdev(netdev);
return err;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 6cba9717a6d8..399844809bbe 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -385,15 +385,16 @@ static int c_can_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
- goto exit_free_device;
+ goto exit_pm_runtime;
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
KBUILD_MODNAME, priv->base, dev->irq);
return 0;
-exit_free_device:
+exit_pm_runtime:
pm_runtime_disable(priv->device);
+exit_free_device:
free_c_can_dev(dev);
exit:
dev_err(&pdev->dev, "probe failed\n");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 64c349fd4600..f65c1a1e05cc 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -867,10 +867,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
}
break;
case CAN_STATE_ERROR_ACTIVE:
- cf->can_id |= CAN_ERR_CNT;
- cf->data[1] = CAN_ERR_CRTL_ACTIVE;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
break;
default:
netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 681643ab3780..5ec3170b896a 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -85,8 +85,6 @@ const char *can_get_state_str(const enum can_state state)
default:
return "<unknown>";
}
-
- return "<unknown>";
}
EXPORT_SYMBOL_GPL(can_get_state_str);
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index ac1a860986df..b080740bcb10 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -2260,14 +2260,19 @@ static int __maybe_unused flexcan_suspend(struct device *device)
flexcan_chip_interrupts_disable(dev);
+ err = flexcan_transceiver_disable(priv);
+ if (err)
+ return err;
+
err = pinctrl_pm_select_sleep_state(device);
if (err)
return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
+
+ priv->can.state = CAN_STATE_SLEEPING;
}
- priv->can.state = CAN_STATE_SLEEPING;
return 0;
}
@@ -2278,7 +2283,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
@@ -2292,12 +2296,20 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
- err = flexcan_chip_start(dev);
+ err = flexcan_transceiver_enable(priv);
if (err)
return err;
+ err = flexcan_chip_start(dev);
+ if (err) {
+ flexcan_transceiver_disable(priv);
+ return err;
+ }
+
flexcan_chip_interrupts_enable(dev);
}
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
return 0;
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index cdf0ec9fa7f3..21a61b86f67d 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1073,9 +1073,10 @@ static int grcan_open(struct net_device *dev)
if (err)
goto exit_close_candev;
+ napi_enable(&priv->napi);
+
spin_lock_irqsave(&priv->lock, flags);
- napi_enable(&priv->napi);
grcan_start(dev);
if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(dev);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index fee012b57f33..fa04a7ced02b 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -999,7 +999,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
@@ -1234,11 +1235,15 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
+ const struct can_berr_counter *bec,
struct can_frame *cf,
enum can_state new_state,
enum can_state tx_state,
enum can_state rx_state)
{
+ enum can_state old_state;
+
+ old_state = can->can.state;
can_change_state(can->can.dev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1254,6 +1259,18 @@ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
can_bus_off(ndev);
}
}
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
}
static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
@@ -1288,7 +1305,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
struct net_device *ndev = can->can.dev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct can_frame *cf = NULL;
old_state = can->can.state;
@@ -1297,16 +1314,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
- skb = alloc_can_err_skb(ndev, &cf);
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(ndev, &cf);
if (new_state != old_state) {
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- if (skb)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
}
can->err_rep_cnt++;
@@ -1319,18 +1330,19 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
- if (!skb) {
- ndev->stats.rx_dropped++;
- return -ENOMEM;
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ if (!skb) {
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
}
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
- cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
-
return 0;
}
@@ -1359,6 +1371,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
{
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
+ int ret = 0;
old_state = can->can.state;
@@ -1372,25 +1385,15 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
struct can_frame *cf;
skb = alloc_can_err_skb(ndev, &cf);
- if (!skb) {
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
+ if (skb) {
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ netif_rx(skb);
+ } else {
ndev->stats.rx_dropped++;
- return -ENOMEM;
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ret = -ENOMEM;
}
-
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
-
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
}
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
@@ -1398,7 +1401,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
if (bec.txerr || bec.rxerr)
mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
- return 0;
+ return ret;
}
static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 97cd8bbf2e32..884a6352c42b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1785,6 +1785,13 @@ static void m_can_stop(struct net_device *dev)
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
+
+ if (cdev->ops->deinit) {
+ ret = cdev->ops->deinit(cdev);
+ if (ret)
+ netdev_err(dev, "failed to deinitialize: %pe\n",
+ ERR_PTR(ret));
+ }
}
static int m_can_close(struct net_device *dev)
@@ -2413,12 +2420,11 @@ int m_can_class_register(struct m_can_classdev *cdev)
if (!cdev->net->irq) {
dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- cdev->hrtimer.function = &hrtimer_callback;
+ hrtimer_setup(&cdev->hrtimer, &hrtimer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
} else {
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdev->hrtimer.function = m_can_coalescing_timer;
+ hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = m_can_dev_setup(cdev);
@@ -2466,6 +2472,7 @@ int m_can_class_suspend(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
@@ -2478,6 +2485,9 @@ int m_can_class_suspend(struct device *dev)
if (cdev->pm_wake_source) {
hrtimer_cancel(&cdev->hrtimer);
m_can_write(cdev, M_CAN_IE, IR_RF0N);
+
+ if (cdev->ops->deinit)
+ ret = cdev->ops->deinit(cdev);
} else {
m_can_stop(ndev);
}
@@ -2489,7 +2499,7 @@ int m_can_class_suspend(struct device *dev)
cdev->can.state = CAN_STATE_SLEEPING;
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
@@ -2497,14 +2507,13 @@ int m_can_class_resume(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
pinctrl_pm_select_default_state(dev);
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
- int ret;
-
ret = m_can_clk_start(cdev);
if (ret)
return ret;
@@ -2517,6 +2526,10 @@ int m_can_class_resume(struct device *dev)
* again.
*/
cdev->active_interrupts |= IR_RF0N | IR_TEFN;
+
+ if (cdev->ops->init)
+ ret = cdev->ops->init(cdev);
+
m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
} else {
ret = m_can_start(ndev);
@@ -2530,7 +2543,7 @@ int m_can_class_resume(struct device *dev)
netif_start_queue(ndev);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index ef39e8e527ab..bd4746c63af3 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -68,6 +68,7 @@ struct m_can_ops {
int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
const void *val, size_t val_count);
int (*init)(struct m_can_classdev *cdev);
+ int (*deinit)(struct m_can_classdev *cdev);
};
struct m_can_tx_op {
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 2f73bf3abad8..e5c162f8c589 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -92,6 +92,8 @@
#define TCAN4X5X_MODE_STANDBY BIT(6)
#define TCAN4X5X_MODE_NORMAL BIT(7)
+#define TCAN4X5X_NWKRQ_VOLTAGE_VIO BIT(19)
+
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
@@ -267,9 +269,24 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
if (ret)
return ret;
+ if (tcan4x5x->nwkrq_voltage_vio) {
+ ret = regmap_set_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_NWKRQ_VOLTAGE_VIO);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
+static int tcan4x5x_deinit(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_STANDBY);
+};
+
static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
@@ -318,6 +335,14 @@ static const struct tcan4x5x_version_info
return &tcan4x5x_versions[TCAN4X5X];
}
+static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ tcan4x5x->nwkrq_voltage_vio =
+ of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio");
+}
+
static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
const struct tcan4x5x_version_info *version_info)
{
@@ -359,6 +384,7 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
static const struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
+ .deinit = tcan4x5x_deinit,
.read_reg = tcan4x5x_read_reg,
.write_reg = tcan4x5x_write_reg,
.write_fifo = tcan4x5x_write_fifo,
@@ -392,7 +418,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->power = NULL;
}
- m_can_class_get_clocks(mcan_class);
+ mcan_class->cclk = devm_clk_get(mcan_class->dev, "cclk");
if (IS_ERR(mcan_class->cclk)) {
dev_err(&spi->dev, "no CAN clock source defined\n");
freq = TCAN4X5X_EXT_CLK_DEF;
@@ -453,6 +479,8 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_power;
}
+ tcan4x5x_get_dt_data(mcan_class);
+
tcan4x5x_check_wake(priv);
ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0);
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
index e62c030d3e1e..203399d5e8cc 100644
--- a/drivers/net/can/m_can/tcan4x5x.h
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -42,6 +42,8 @@ struct tcan4x5x_priv {
struct tcan4x5x_map_buf map_buf_rx;
struct tcan4x5x_map_buf map_buf_tx;
+
+ bool nwkrq_voltage_vio;
};
static inline void
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index df1a5d0b37b2..aa3df0d05b85 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -787,22 +787,14 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
}
static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
- u32 ch)
+ u32 ch, u32 rule_entry)
{
- u32 cfg;
- int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ u32 rule_entry_index = rule_entry % 16;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (ch == 0) {
- start = 0; /* Channel 0 always starts from 0th rule */
- } else {
- /* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
- start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
- }
-
/* Enable write access to entry */
- page = RCANFD_GAFL_PAGENUM(start);
+ page = RCANFD_GAFL_PAGENUM(rule_entry);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
(RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
@@ -818,13 +810,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
offset = RCANFD_C_GAFL_OFFSET;
/* Accept all IDs */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0);
/* IDE or RTR is not considered for matching */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0);
/* Any data length accepted */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index),
RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
@@ -1851,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
unsigned long channels_mask = 0;
int err, ch_irq, g_irq;
int g_err_irq, g_recc_irq;
+ u32 rule_entry = 0;
bool fdmode = true; /* CAN FD only mode - default */
char name[9] = "channelX";
int i;
@@ -2023,7 +2016,8 @@ static int rcar_canfd_probe(struct platform_device *pdev)
rcar_canfd_configure_tx(gpriv, ch);
/* Configure receive rules */
- rcar_canfd_configure_afl_rules(gpriv, ch);
+ rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
+ rule_entry += RCANFD_CHANNEL_NUMRULES;
}
/* Configure common interrupts */
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index df18c85fc078..d9a937ba126c 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -622,7 +622,7 @@ rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
- if (skb)
+ if (!skb)
return 0;
rkcanfd_get_berr_counter_corrected(priv, &bec);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index c42ebe9da55a..2d555f854008 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -230,18 +230,9 @@ static int sp_probe(struct platform_device *pdev)
return -ENODEV;
}
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res_mem->start,
- resource_size(res_mem), DRV_NAME))
- return -EBUSY;
-
- addr = devm_ioremap(&pdev->dev, res_mem->start,
- resource_size(res_mem));
- if (!addr)
- return -ENOMEM;
+ addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
if (of) {
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index 7209a831f0f2..c34f2067a989 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -541,11 +541,11 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
}
priv->rx_ring_num = i;
- hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
+ hrtimer_setup(&priv->rx_irq_timer, mcp251xfd_rx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
+ hrtimer_setup(&priv->tx_irq_timer, mcp251xfd_tx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return 0;
}
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 4311c1f0eafd..6fcb301ef611 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -570,7 +570,7 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
- if (skb && state != CAN_STATE_BUS_OFF) {
+ if (likely(skb) && state != CAN_STATE_BUS_OFF) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
index eee20839d96f..0d155eb1b9e9 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
@@ -248,7 +248,11 @@ static int es58x_devlink_info_get(struct devlink *devlink,
return ret;
}
- return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
+ if (es58x_dev->udev->serial)
+ ret = devlink_info_serial_number_put(req,
+ es58x_dev->udev->serial);
+
+ return ret;
}
const struct devlink_ops es58x_dl_ops = {
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 7d12776ab63e..dcb0bcbe0565 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -818,7 +818,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_completion(&priv->stop_comp);
init_completion(&priv->flush_comp);
init_completion(&priv->get_busparams_comp);
- priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
priv->dev = dev;
priv->netdev = netdev;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 3764b263add3..8e88b5917796 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -926,6 +926,42 @@ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
}
}
+static void kvaser_usb_hydra_change_state(struct kvaser_usb_net_priv *priv,
+ const struct can_berr_counter *bec,
+ struct can_frame *cf,
+ enum can_state new_state)
+{
+ struct net_device *netdev = priv->netdev;
+ enum can_state old_state = priv->can.state;
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec->txerr >= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec->txerr <= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ can_change_state(netdev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
+ if (priv->can.restart_ms == 0)
+ kvaser_usb_hydra_send_simple_cmd_async(priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ priv->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
+}
+
static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
u8 bus_status,
const struct can_berr_counter *bec)
@@ -951,41 +987,11 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
return;
skb = alloc_can_err_skb(netdev, &cf);
- if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec->txerr >= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec->txerr <= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- can_change_state(netdev, cf, tx_state, rx_state);
- }
-
- if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
-
- can_bus_off(netdev);
- }
-
- if (!skb) {
+ kvaser_usb_hydra_change_state(priv, bec, cf, new_state);
+ if (skb)
+ netif_rx(skb);
+ else
netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- priv->can.can_stats.restarts++;
-
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
- }
-
- netif_rx(skb);
}
static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
@@ -1078,9 +1084,8 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
{
struct net_device *netdev = priv->netdev;
struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- struct skb_shared_hwtstamps *shhwtstamps;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct can_berr_counter bec;
enum can_state new_state, old_state;
u8 bus_status;
@@ -1096,52 +1101,26 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
&new_state);
- skb = alloc_can_err_skb(netdev, &cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (new_state != old_state)
+ kvaser_usb_hydra_change_state(priv, &bec, cf, new_state);
- if (new_state != old_state) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec.txerr >= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec.txerr <= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
-
- can_change_state(netdev, cf, tx_state, rx_state);
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- if (new_state == CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- can_bus_off(netdev);
+ shhwtstamps->hwtstamp = hwtstamp;
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
}
}
- if (!skb) {
- stats->rx_dropped++;
- netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp = hwtstamp;
-
- cf->can_id |= CAN_ERR_BUSERROR;
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
- }
-
- netif_rx(skb);
-
priv->bec.txerr = bec.txerr;
priv->bec.rxerr = bec.rxerr;
}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 6b9122ab1464..6a45adcc45bd 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -1120,10 +1120,8 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
const struct kvaser_usb_err_summary *es)
{
- struct can_frame *cf;
- struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
- .len = CAN_ERR_DLC };
- struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
struct kvaser_usb_net_leaf_priv *leaf;
@@ -1143,18 +1141,10 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (!netif_running(priv->netdev))
return;
- /* Update all of the CAN interface's state and error counters before
- * trying any memory allocation that can actually fail with -ENOMEM.
- *
- * We send a temporary stack-allocated error CAN frame to
- * can_change_state() for the very same reason.
- *
- * TODO: Split can_change_state() responsibility between updating the
- * CAN interface's state and counters, and the setting up of CAN error
- * frame ID and data to userspace. Remove stack allocation afterwards.
- */
old_state = priv->can.state;
- kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, cf);
new_state = priv->can.state;
/* If there are errors, request status updates periodically as we do
@@ -1168,13 +1158,6 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
schedule_delayed_work(&leaf->chip_state_req_work,
msecs_to_jiffies(500));
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
- memcpy(cf, &tmp_cf, sizeof(*cf));
-
if (new_state != old_state) {
if (es->status &
(M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
@@ -1187,11 +1170,20 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (priv->can.restart_ms &&
old_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_RESTARTED;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
}
}
+ if (!skb) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ stats->rx_dropped++;
+ netdev_warn(priv->netdev, "No memory left for err_skb\n");
+ }
+ return;
+ }
+
switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 39a63b7313a4..07406daf7c88 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -186,7 +186,7 @@ union ucan_ctl_payload {
*/
struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
- u8 raw[128];
+ u8 fw_str[128];
} __packed;
enum {
@@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up,
UCAN_USB_CTL_PIPE_TIMEOUT);
}
-static int ucan_device_request_in(struct ucan_priv *up,
- u8 cmd, u16 subcmd, u16 datalen)
+static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size)
{
- return usb_control_msg(up->udev,
- usb_rcvctrlpipe(up->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- subcmd,
- 0,
- up->ctl_msg_buffer,
- datalen,
- UCAN_USB_CTL_PIPE_TIMEOUT);
+ int ret;
+
+ ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0),
+ UCAN_DEVICE_GET_FW_STRING,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, 0, fw_str, size - 1,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+ if (ret > 0)
+ fw_str[ret] = '\0';
+ else
+ strscpy(fw_str, "unknown", size);
}
/* Parse the device information structure reported by the device and
@@ -1314,7 +1316,6 @@ static int ucan_probe(struct usb_interface *intf,
u8 in_ep_addr;
u8 out_ep_addr;
union ucan_ctl_payload *ctl_msg_buffer;
- char firmware_str[sizeof(union ucan_ctl_payload) + 1];
udev = interface_to_usbdev(intf);
@@ -1527,17 +1528,6 @@ static int ucan_probe(struct usb_interface *intf,
*/
ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
- /* just print some device information - if available */
- ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
- sizeof(union ucan_ctl_payload));
- if (ret > 0) {
- /* copy string while ensuring zero termination */
- strscpy(firmware_str, up->ctl_msg_buffer->raw,
- sizeof(union ucan_ctl_payload) + 1);
- } else {
- strcpy(firmware_str, "unknown");
- }
-
/* device is compatible, reset it */
ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
if (ret < 0)
@@ -1555,7 +1545,10 @@ static int ucan_probe(struct usb_interface *intf,
/* initialisation complete, log device info */
netdev_info(up->netdev, "registered device\n");
- netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+ ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str,
+ sizeof(up->ctl_msg_buffer->fw_str));
+ netdev_info(up->netdev, "firmware string: %s\n",
+ up->ctl_msg_buffer->fw_str);
/* success */
return 0;
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 285785c942b0..79dc77835681 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2224,25 +2224,19 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
+bool b53_support_eee(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
- return 0;
+ return !is5325(dev) && !is5365(dev);
}
-EXPORT_SYMBOL(b53_get_mac_eee);
+EXPORT_SYMBOL(b53_support_eee);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
struct ethtool_keee *p = &dev->ports[port].eee;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
p->eee_enabled = e->eee_enabled;
b53_eee_enable_set(ds, port, e->eee_enabled);
@@ -2298,7 +2292,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phylink_get_caps = b53_phylink_get_caps,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 05141176daf5..9e9b5bc0c5d6 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -384,7 +384,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+bool b53_support_eee(struct dsa_switch *ds, int port);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 3f8a491ce885..4730982b6840 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -99,8 +99,8 @@ static void b53_serdes_an_restart(struct phylink_pcs *pcs)
SERDES_MII_BLK, reg);
}
-static void b53_serdes_get_state(struct phylink_pcs *pcs,
- struct phylink_link_state *state)
+static void b53_serdes_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
+ struct phylink_link_state *state)
{
struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
u8 lane = pcs_to_b53_pcs(pcs)->lane;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 43bde1f583ff..fa2bf3fa9019 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1232,7 +1232,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_wol = bcm_sf2_sw_set_wol,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 1c6d7fc16772..a2beb27459f1 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -127,10 +127,14 @@ static const struct of_device_id ksz9477_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_i2c_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
.of_match_table = ksz9477_dt_ids,
+ .pm = &ksz_i2c_pm_ops,
},
.probe = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 8a03baa6aecc..89f0796894af 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1339,6 +1339,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {false, false, true},
+ .ptp_capable = true,
.wr_table = &ksz8563_register_set,
.rd_table = &ksz8563_register_set,
},
@@ -1550,6 +1551,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
@@ -1677,6 +1679,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {true, true, true},
+ .ptp_capable = true,
},
[KSZ8567] = {
@@ -1712,6 +1715,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, false, false},
.gbit_capable = {false, false, false, false, false,
true, true},
+ .ptp_capable = true,
},
[KSZ9567] = {
@@ -1744,6 +1748,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
},
[LAN9370] = {
@@ -1773,6 +1778,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
.internal_phy = {true, true, true, true, false},
+ .ptp_capable = true,
},
[LAN9371] = {
@@ -1802,6 +1808,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true, true},
.supports_rgmii = {false, false, false, false, true, true},
.internal_phy = {true, true, true, true, false, false},
+ .ptp_capable = true,
},
[LAN9372] = {
@@ -1835,6 +1842,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9373] = {
@@ -1868,6 +1876,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, false,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9374] = {
@@ -1901,6 +1910,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9646] = {
@@ -2544,7 +2554,11 @@ static int ksz_mdio_register(struct ksz_device *dev)
bus->read = ksz_sw_mdio_read;
bus->write = ksz_sw_mdio_write;
bus->name = "ksz user smi";
- snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ if (ds->dst->index != 0) {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index);
+ } else {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ }
}
ret = ksz_parse_dt_phy_config(dev, bus, mdio_np);
@@ -2805,16 +2819,21 @@ static int ksz_setup(struct dsa_switch *ds)
if (ret)
goto out_girq;
- ret = ksz_ptp_irq_setup(ds, dp->index);
- if (ret)
- goto out_pirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_irq_setup(ds, dp->index);
+ if (ret)
+ goto out_pirq;
+ }
}
}
- ret = ksz_ptp_clock_register(ds);
- if (ret) {
- dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret);
- goto out_ptpirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_clock_register(ds);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register PTP clock: %d\n",
+ ret);
+ goto out_ptpirq;
+ }
}
ret = ksz_mdio_register(dev);
@@ -2834,9 +2853,10 @@ static int ksz_setup(struct dsa_switch *ds)
return 0;
out_ptp_clock_unregister:
- ksz_ptp_clock_unregister(ds);
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
out_ptpirq:
- if (dev->irq > 0)
+ if (dev->irq > 0 && dev->info->ptp_capable)
dsa_switch_for_each_user_port(dp, dev->ds)
ksz_ptp_irq_free(ds, dp->index);
out_pirq:
@@ -2855,11 +2875,13 @@ static void ksz_teardown(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
- ksz_ptp_clock_unregister(ds);
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
if (dev->irq > 0) {
dsa_switch_for_each_user_port(dp, dev->ds) {
- ksz_ptp_irq_free(ds, dp->index);
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
ksz_irq_free(&dev->ports[dp->index].pirq);
}
@@ -3444,12 +3466,12 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
return -EOPNOTSUPP;
}
-static int ksz_validate_eee(struct dsa_switch *ds, int port)
+static bool ksz_support_eee(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
if (!dev->info->internal_phy[port])
- return -EOPNOTSUPP;
+ return false;
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
@@ -3461,41 +3483,16 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
case LAN9646_CHIP_ID:
- return 0;
+ return true;
}
- return -EOPNOTSUPP;
-}
-
-static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
-
- /* There is no documented control of Tx LPI configuration. */
- e->tx_lpi_enabled = true;
-
- /* There is no documented control of Tx LPI timer. According to tests
- * Tx LPI timer seems to be set by default to minimal value.
- */
- e->tx_lpi_timer = 0;
-
- return 0;
+ return false;
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
struct ksz_device *dev = ds->priv;
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
if (!e->tx_lpi_enabled) {
dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
@@ -4593,6 +4590,23 @@ static int ksz_hsr_leave(struct dsa_switch *ds, int port,
return 0;
}
+static int ksz_suspend(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ cancel_delayed_work_sync(&dev->mib_read);
+ return 0;
+}
+
+static int ksz_resume(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
+ return 0;
+}
+
static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.connect_tag_protocol = ksz_connect_tag_protocol,
@@ -4633,6 +4647,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_max_mtu = ksz_max_mtu,
.get_wol = ksz_get_wol,
.set_wol = ksz_set_wol,
+ .suspend = ksz_suspend,
+ .resume = ksz_resume,
.get_ts_info = ksz_get_ts_info,
.port_hwtstamp_get = ksz_hwtstamp_get,
.port_hwtstamp_set = ksz_hwtstamp_set,
@@ -4641,7 +4657,7 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.cls_flower_add = ksz_cls_flower_add,
.cls_flower_del = ksz_cls_flower_del,
.port_setup_tc = ksz_setup_tc,
- .get_mac_eee = ksz_get_mac_eee,
+ .support_eee = ksz_support_eee,
.set_mac_eee = ksz_set_mac_eee,
.port_get_default_prio = ksz_port_get_default_prio,
.port_set_default_prio = ksz_port_set_default_prio,
@@ -5132,6 +5148,24 @@ void ksz_switch_remove(struct ksz_device *dev)
}
EXPORT_SYMBOL(ksz_switch_remove);
+#ifdef CONFIG_PM_SLEEP
+int ksz_switch_suspend(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_suspend(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_suspend);
+
+int ksz_switch_resume(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_resume(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_resume);
+#endif
+
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index b3bb75ca0796..af17a9c030d4 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -92,6 +92,7 @@ struct ksz_chip_data {
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
bool internal_phy[KSZ_MAX_NUM_PORTS];
bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ bool ptp_capable;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
};
@@ -444,6 +445,8 @@ struct ksz_dev_ops {
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
+int ksz_switch_suspend(struct device *dev);
+int ksz_switch_resume(struct device *dev);
void ksz_init_mib_timer(struct ksz_device *dev);
bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port);
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 108a958dc356..b633d263098c 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -239,10 +239,14 @@ static const struct spi_device_id ksz_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_spi_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct spi_driver ksz_spi_driver = {
.driver = {
.name = "ksz-switch",
.of_match_table = ksz_dt_ids,
+ .pm = &ksz_spi_pm_ops,
},
.id_table = ksz_spi_ids,
.probe = ksz_spi_probe,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 086b8b3d5b40..5883eb93efb1 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2591,7 +2591,8 @@ mt7531_setup_common(struct dsa_switch *ds)
if (ret < 0)
return ret;
- return 0;
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ return mt7530_setup_vlan0(priv);
}
static int
@@ -2687,11 +2688,6 @@ mt7531_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Setup VLAN ID 0 for VLAN-unaware bridges */
- ret = mt7530_setup_vlan0(priv);
- if (ret)
- return ret;
-
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
@@ -2994,7 +2990,7 @@ static int mt753x_pcs_validate(struct phylink_pcs *pcs,
return 0;
}
-static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
@@ -3085,18 +3081,6 @@ mt753x_setup(struct dsa_switch *ds)
return ret;
}
-static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- struct mt7530_priv *priv = ds->priv;
- u32 eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
-
- e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
- e->tx_lpi_timer = LPI_THRESH_GET(eeecr);
-
- return 0;
-}
-
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
@@ -3238,7 +3222,7 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
.phylink_get_caps = mt753x_phylink_get_caps,
- .get_mac_eee = mt753x_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mt753x_set_mac_eee,
.conduit_state_change = mt753x_conduit_state_change,
.port_setup_tc = mt753x_setup_tc,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3a792f79270d..5db96ca52505 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -394,7 +394,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
kthread_init_delayed_work(&chip->irq_poll_work,
mv88e6xxx_irq_poll);
- chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
+ chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev));
if (IS_ERR(chip->kworker))
return PTR_ERR(chip->kworker);
@@ -1289,9 +1289,6 @@ static size_t mv88e6095_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_PORT)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1301,9 +1298,6 @@ static size_t mv88e6250_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & STATS_TYPE_BANK0))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1313,9 +1307,6 @@ static size_t mv88e6320_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
MV88E6XXX_G1_STATS_OP_HIST_RX);
@@ -1326,9 +1317,6 @@ static size_t mv88e6390_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
0);
@@ -1341,6 +1329,9 @@ static size_t mv88e6xxx_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
{
int ret = 0;
+ if (!(stat->type & chip->info->stats_type))
+ return 0;
+
if (chip->info->ops->stats_get_stat) {
mv88e6xxx_reg_lock(chip);
ret = chip->info->ops->stats_get_stat(chip, port, stat, data);
@@ -1522,13 +1513,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
@@ -2224,13 +2208,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return err;
}
-static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
- const unsigned char *addr, u16 vid,
- u8 state)
+static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid,
+ u16 *fid, struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_atu_entry entry;
struct mv88e6xxx_vtu_entry vlan;
- u16 fid;
int err;
/* Ports have two private address databases: one for when the port is
@@ -2241,7 +2223,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
* VLAN ID into the port's database used for VLAN-unaware bridging.
*/
if (vid == 0) {
- fid = MV88E6XXX_FID_BRIDGED;
+ *fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -2251,14 +2233,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid)
return -EOPNOTSUPP;
- fid = vlan.fid;
+ *fid = vlan.fid;
}
- entry.state = 0;
- ether_addr_copy(entry.mac, addr);
- eth_addr_dec(entry.mac);
+ entry->state = 0;
+ ether_addr_copy(entry->mac, addr);
+ eth_addr_dec(entry->mac);
+
+ return mv88e6xxx_g1_atu_getnext(chip, *fid, entry);
+}
- err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
+static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
+
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
+ if (err)
+ return false;
+
+ return entry.state && ether_addr_equal(entry.mac, addr);
+}
+
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
+
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
if (err)
return err;
@@ -2862,6 +2869,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, addr, vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -5645,6 +5659,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5665,6 +5680,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5687,6 +5703,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5708,6 +5725,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6095_ops,
@@ -5730,6 +5748,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5754,6 +5773,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5776,6 +5796,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6131_ops,
@@ -5800,6 +5821,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5823,6 +5845,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5848,6 +5871,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5872,6 +5896,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5897,6 +5922,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5921,6 +5947,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5946,6 +5973,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5968,6 +5996,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5992,6 +6021,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.atu_move_port_mask = 0x1f,
@@ -6016,6 +6046,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6039,6 +6070,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6063,6 +6095,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6087,6 +6120,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6114,6 +6148,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6138,6 +6173,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6161,6 +6197,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6184,6 +6221,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6208,6 +6246,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6233,6 +6272,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -6259,6 +6299,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -6283,6 +6324,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6307,6 +6349,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6332,6 +6375,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6359,6 +6403,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6383,6 +6428,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6408,6 +6454,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6433,6 +6480,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6596,6 +6644,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -7074,7 +7129,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_sset_count = mv88e6xxx_get_sset_count,
.port_max_mtu = mv88e6xxx_get_max_mtu,
.port_change_mtu = mv88e6xxx_change_mtu,
- .get_mac_eee = mv88e6xxx_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mv88e6xxx_set_mac_eee,
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
.get_eeprom = mv88e6xxx_get_eeprom,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 9fe8e8a7856b..86bf113c9bfa 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -144,6 +144,7 @@ struct mv88e6xxx_info {
unsigned int age_time_coeff;
unsigned int g1_irqs;
unsigned int g2_irqs;
+ int stats_type;
bool pvt;
/* Mark certain ports as invalid. This is required for example for the
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 5a27d047a38e..75ed1fa500a5 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -55,6 +55,7 @@ static irqreturn_t mv88e6185_pcs_handle_irq(int irq, void *dev_id)
}
static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e6185_pcs *mpcs = pcs_to_mv88e6185_pcs(pcs);
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
index 88f624b65470..143fe21d1834 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6352.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
@@ -158,6 +158,7 @@ static void marvell_c22_pcs_disable(struct phylink_pcs *pcs)
}
static void marvell_c22_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
index d758a6c1b226..59f63d6beec8 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/mii.h>
+#include <linux/string_choices.h>
#include "chip.h"
#include "global2.h"
@@ -257,6 +258,7 @@ static int mv88e639x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e639x_sgmii_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
@@ -395,6 +397,7 @@ static void mv88e639x_xg_pcs_disable(struct mv88e639x_pcs *mpcs)
}
static void mv88e639x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -748,7 +751,7 @@ static int mv88e6393x_sgmii_apply_2500basex_an(struct mv88e639x_pcs *mpcs,
if (err)
dev_err(mpcs->mdio.dev.parent,
"failed to %s 2500basex fix: %pe\n",
- enable ? "enable" : "disable", ERR_PTR(err));
+ str_enable_disable(enable), ERR_PTR(err));
return err;
}
@@ -889,6 +892,7 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -896,7 +900,7 @@ static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
int err;
if (state->interface != PHY_INTERFACE_MODE_USXGMII)
- return mv88e639x_xg_pcs_get_state(pcs, state);
+ return mv88e639x_xg_pcs_get_state(pcs, neg_mode, state);
state->link = false;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dc777ddce1f3..66b1b7277281 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/property.h>
+#include <linux/string_choices.h>
#include "chip.h"
#include "global2.h"
@@ -176,7 +177,7 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
dev_dbg(chip->dev, "p%d: %s link %s\n", port,
reg & MV88E6XXX_PORT_MAC_CTL_FORCE_LINK ? "Force" : "Unforce",
- reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP ? "up" : "down");
+ str_up_down(reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP));
return 0;
}
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3aa9c997018a..0a4e682a55ef 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1316,6 +1316,14 @@ static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
}
+static void felix_get_ts_stats(struct dsa_switch *ds, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -2237,6 +2245,7 @@ static const struct dsa_switch_ops felix_switch_ops = {
.get_stats64 = felix_get_stats64,
.get_pause_stats = felix_get_pause_stats,
.get_rmon_stats = felix_get_rmon_stats,
+ .get_ts_stats = felix_get_ts_stats,
.get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
.get_eth_mac_stats = felix_get_eth_mac_stats,
.get_eth_phy_stats = felix_get_eth_phy_stats,
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 59b4a7240b58..e8cb4da15dbe 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -342,7 +342,7 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
@@ -394,7 +394,7 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -1019,7 +1019,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
of_get_phy_mode(port, &mode);
- if (of_property_read_bool(port, "phy-handle") &&
+ if (of_property_present(port, "phy-handle") &&
mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
@@ -1491,7 +1491,7 @@ static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
return container_of(pcs, struct qca8k_pcs, pcs);
}
-static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
@@ -2016,7 +2016,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.set_ageing_time = qca8k_set_ageing_time,
- .get_mac_eee = qca8k_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
.port_disable = qca8k_port_disable,
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 560c74c4ac3d..13005f10edb7 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -557,13 +557,6 @@ exit:
return ret;
}
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int qca8k_port_configure_learning(struct dsa_switch *ds, int port,
bool learning)
{
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 3664a2e2f1f6..d046679265fa 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -16,7 +16,7 @@
#define QCA8K_ETHERNET_MDIO_PRIORITY 7
#define QCA8K_ETHERNET_PHY_PRIORITY 6
-#define QCA8K_ETHERNET_TIMEOUT 5
+#define QCA8K_ETHERNET_TIMEOUT msecs_to_jiffies(5)
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
@@ -520,7 +520,6 @@ int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 6989972eebc3..d6eb6713e5f6 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for Realtek RTL8366RB.
+config NET_DSA_REALTEK_RTL8366RB_LEDS
+ bool
+ depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
+ depends on NET_DSA_REALTEK_RTL8366RB
+ default NET_DSA_REALTEK_RTL8366RB
+
endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 35491dc20d6d..17367bcba496 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -12,4 +12,7 @@ endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
+ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
+rtl8366-objs += rtl8366rb-leds.o
+endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
new file mode 100644
index 000000000000..99c890681ae6
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include "rtl83xx.h"
+#include "rtl8366rb.h"
+
+static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+{
+ switch (led_group) {
+ case 0:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 1:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 2:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 3:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ default:
+ return 0;
+ }
+}
+
+static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "error reading LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+}
+
+static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ rtl8366rb_led_group_port_mask(led_group,
+ port_num),
+ enable ? 0xffff : 0);
+ if (ret) {
+ dev_err(priv->dev, "error updating LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ /* Change the LED group to manual controlled LEDs if required */
+ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+ RTL8366RB_LEDGROUP_FORCE);
+
+ if (ret) {
+ dev_err(priv->dev, "error updating LED GROUP group %d\n",
+ led_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+ cdev);
+
+ return rb8366rb_set_port_led(led, brightness == LED_ON);
+}
+
+static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+ struct fwnode_handle *led_fwnode)
+{
+ struct rtl8366rb *rb = priv->chip_data;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct rtl8366rb_led *led;
+ u32 led_group;
+ int ret;
+
+ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+ if (ret)
+ return ret;
+
+ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_group, dp->index);
+ return -EINVAL;
+ }
+
+ led = &rb->leds[dp->index][led_group];
+ led->port_num = dp->index;
+ led->led_group = led_group;
+ led->priv = priv;
+
+ state = led_init_default_state_get(led_fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = 1;
+ rb8366rb_set_port_led(led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness =
+ rb8366rb_get_port_led(led);
+ break;
+ case LEDS_DEFSTATE_OFF:
+ default:
+ led->cdev.brightness = 0;
+ rb8366rb_set_port_led(led, 0);
+ }
+
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness_set_blocking =
+ rtl8366rb_cled_brightness_set_blocking;
+ init_data.fwnode = led_fwnode;
+ init_data.devname_mandatory = true;
+
+ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+ dp->ds->index, dp->index, led_group);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (ret) {
+ dev_warn(priv->dev, "Failed to init LED %d for port %d",
+ led_group, dp->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ struct device_node *leds_np;
+ struct dsa_port *dp;
+ int ret = 0;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->dn)
+ continue;
+
+ leds_np = of_get_child_by_name(dp->dn, "leds");
+ if (!leds_np) {
+ dev_dbg(priv->dev, "No leds defined for port %d",
+ dp->index);
+ continue;
+ }
+
+ for_each_child_of_node_scoped(leds_np, led_np) {
+ ret = rtl8366rb_setup_led(priv, dp,
+ of_fwnode_handle(led_np));
+ if (ret)
+ break;
+ }
+
+ of_node_put(leds_np);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 23374178a176..f54771cab56d 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -21,16 +21,13 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "realtek.h"
#include "realtek-smi.h"
#include "realtek-mdio.h"
#include "rtl83xx.h"
-
-#define RTL8366RB_PORT_NUM_CPU 5
-#define RTL8366RB_NUM_PORTS 6
-#define RTL8366RB_PHY_NO_MAX 4
-#define RTL8366RB_PHY_ADDR_MAX 31
+#include "rtl8366rb.h"
/* Switch Global Configuration register */
#define RTL8366RB_SGCR 0x0000
@@ -175,39 +172,6 @@
*/
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
-/* LED control registers */
-/* The LED blink rate is global; it is used by all triggers in all groups. */
-#define RTL8366RB_LED_BLINKRATE_REG 0x0430
-#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
-#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
-#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
-#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
-#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
-#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
-#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
-
-/* LED trigger event for each group */
-#define RTL8366RB_LED_CTRL_REG 0x0431
-#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
- (4 * (led_group))
-#define RTL8366RB_LED_CTRL_MASK(led_group) \
- (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
-
-/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
- * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
- * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
- */
-#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
-#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
-#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
- ((led_group) <= 1 ? \
- RTL8366RB_LED_0_1_CTRL_REG : \
- RTL8366RB_LED_2_3_CTRL_REG)
-#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
-#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
-
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
@@ -243,7 +207,6 @@
#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
#define RTL8366RB_NUM_VLANS 16
-#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
#define RTL8366RB_NUM_FIDS 8
@@ -350,46 +313,6 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
-enum rtl8366_ledgroup_mode {
- RTL8366RB_LEDGROUP_OFF = 0x0,
- RTL8366RB_LEDGROUP_DUP_COL = 0x1,
- RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
- RTL8366RB_LEDGROUP_SPD1000 = 0x3,
- RTL8366RB_LEDGROUP_SPD100 = 0x4,
- RTL8366RB_LEDGROUP_SPD10 = 0x5,
- RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
- RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
- RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
- RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
- RTL8366RB_LEDGROUP_FIBER = 0xa,
- RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
- RTL8366RB_LEDGROUP_LINK_RX = 0xc,
- RTL8366RB_LEDGROUP_LINK_TX = 0xd,
- RTL8366RB_LEDGROUP_MASTER = 0xe,
- RTL8366RB_LEDGROUP_FORCE = 0xf,
-
- __RTL8366RB_LEDGROUP_MODE_MAX
-};
-
-struct rtl8366rb_led {
- u8 port_num;
- u8 led_group;
- struct realtek_priv *priv;
- struct led_classdev cdev;
-};
-
-/**
- * struct rtl8366rb - RTL8366RB-specific data
- * @max_mtu: per-port max MTU setting
- * @pvid_enabled: if PVID is set for respective port
- * @leds: per-port and per-ledgroup led info
- */
-struct rtl8366rb {
- unsigned int max_mtu[RTL8366RB_NUM_PORTS];
- bool pvid_enabled[RTL8366RB_NUM_PORTS];
- struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
-};
-
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
@@ -830,9 +753,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
return 0;
}
-static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
- u8 led_group,
- enum rtl8366_ledgroup_mode mode)
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode)
{
int ret;
u32 val;
@@ -849,144 +773,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
return 0;
}
-static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
-{
- switch (led_group) {
- case 0:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 1:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 2:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 3:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- default:
- return 0;
- }
-}
-
-static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
- u32 val;
-
- ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
- &val);
- if (ret) {
- dev_err(priv->dev, "error reading LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
-}
-
-static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
-
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_X_X_CTRL_REG(led_group),
- rtl8366rb_led_group_port_mask(led_group,
- port_num),
- enable ? 0xffff : 0);
- if (ret) {
- dev_err(priv->dev, "error updating LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- /* Change the LED group to manual controlled LEDs if required */
- ret = rb8366rb_set_ledgroup_mode(priv, led_group,
- RTL8366RB_LEDGROUP_FORCE);
-
- if (ret) {
- dev_err(priv->dev, "error updating LED GROUP group %d\n",
- led_group);
- return ret;
- }
-
- return 0;
-}
-
-static int
-rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
- enum led_brightness brightness)
-{
- struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
- cdev);
-
- return rb8366rb_set_port_led(led, brightness == LED_ON);
-}
-
-static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
- struct fwnode_handle *led_fwnode)
-{
- struct rtl8366rb *rb = priv->chip_data;
- struct led_init_data init_data = { };
- enum led_default_state state;
- struct rtl8366rb_led *led;
- u32 led_group;
- int ret;
-
- ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
- if (ret)
- return ret;
-
- if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
- dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
- led_group, dp->index);
- return -EINVAL;
- }
-
- led = &rb->leds[dp->index][led_group];
- led->port_num = dp->index;
- led->led_group = led_group;
- led->priv = priv;
-
- state = led_init_default_state_get(led_fwnode);
- switch (state) {
- case LEDS_DEFSTATE_ON:
- led->cdev.brightness = 1;
- rb8366rb_set_port_led(led, 1);
- break;
- case LEDS_DEFSTATE_KEEP:
- led->cdev.brightness =
- rb8366rb_get_port_led(led);
- break;
- case LEDS_DEFSTATE_OFF:
- default:
- led->cdev.brightness = 0;
- rb8366rb_set_port_led(led, 0);
- }
-
- led->cdev.max_brightness = 1;
- led->cdev.brightness_set_blocking =
- rtl8366rb_cled_brightness_set_blocking;
- init_data.fwnode = led_fwnode;
- init_data.devname_mandatory = true;
-
- init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
- dp->ds->index, dp->index, led_group);
- if (!init_data.devicename)
- return -ENOMEM;
-
- ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
- if (ret) {
- dev_warn(priv->dev, "Failed to init LED %d for port %d",
- led_group, dp->index);
- return ret;
- }
-
- return 0;
-}
-
+/* This code is used also with LEDs disabled */
static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
{
int ret = 0;
@@ -1007,38 +794,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
return ret;
}
-static int rtl8366rb_setup_leds(struct realtek_priv *priv)
-{
- struct dsa_switch *ds = &priv->ds;
- struct device_node *leds_np;
- struct dsa_port *dp;
- int ret = 0;
-
- dsa_switch_for_each_port(dp, ds) {
- if (!dp->dn)
- continue;
-
- leds_np = of_get_child_by_name(dp->dn, "leds");
- if (!leds_np) {
- dev_dbg(priv->dev, "No leds defined for port %d",
- dp->index);
- continue;
- }
-
- for_each_child_of_node_scoped(leds_np, led_np) {
- ret = rtl8366rb_setup_led(priv, dp,
- of_fwnode_handle(led_np));
- if (ret)
- break;
- }
-
- of_node_put(leds_np);
- if (ret)
- return ret;
- }
- return 0;
-}
-
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
@@ -1522,7 +1277,7 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
rb = priv->chip_data;
dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
- vlan_filtering ? "enable" : "disable");
+ str_enable_disable(vlan_filtering));
/* If the port is not in the member set, the frame will be dropped */
ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
@@ -1884,7 +1639,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan
static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
- dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ dev_dbg(priv->dev, "%s VLAN\n", str_enable_disable(enable));
return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
@@ -1892,7 +1647,7 @@ static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
- dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ dev_dbg(priv->dev, "%s VLAN 4k\n", str_enable_disable(enable));
return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
new file mode 100644
index 000000000000..685ff3275faa
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL8366RB_H
+#define _RTL8366RB_H
+
+#include "realtek.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* LED control registers */
+/* The LED blink rate is global; it is used by all triggers in all groups. */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+/* LED trigger event for each group */
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+ */
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+ ((led_group) <= 1 ? \
+ RTL8366RB_LED_0_1_CTRL_REG : \
+ RTL8366RB_LED_2_3_CTRL_REG)
+#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
+
+enum rtl8366_ledgroup_mode {
+ RTL8366RB_LEDGROUP_OFF = 0x0,
+ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+ RTL8366RB_LEDGROUP_SPD100 = 0x4,
+ RTL8366RB_LEDGROUP_SPD10 = 0x5,
+ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+ RTL8366RB_LEDGROUP_FIBER = 0xa,
+ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+ RTL8366RB_LEDGROUP_MASTER = 0xe,
+ RTL8366RB_LEDGROUP_FORCE = 0xf,
+
+ __RTL8366RB_LEDGROUP_MODE_MAX
+};
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+
+struct rtl8366rb_led {
+ u8 port_num;
+ u8 led_group;
+ struct realtek_priv *priv;
+ struct led_classdev cdev;
+};
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv);
+
+#else
+
+static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
+
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
+ * @leds: per-port and per-ledgroup led info
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+#endif
+};
+
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode);
+
+#endif /* _RTL8366RB_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index baba204ad62f..3d790f8c6f4d 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -26,12 +26,8 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
start, end);
} else if (rc == -ERANGE) {
- if ((start - end + 1) > 64)
- pr_err("Field %d-%d too large for 64 bits!\n",
- start, end);
- else
- pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
- *val, start, end);
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
}
dump_stack();
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9a542e3c9b05..977b42bc1e8c 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -159,7 +159,7 @@ config ETHOC
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
config OA_TC6
- tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support"
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" if COMPILE_TEST
depends on SPI
select PHYLIB
help
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 63c8a2328142..c1295dfad0d0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -74,7 +74,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (threshold < time_since_last_napi && napi_scheduled) {
netdev_err(dev,
"napi handler hasn't been called for a long time but is scheduled\n");
- reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
schedule_reset:
/* Change the state of the device to trigger reset
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 72db9f9e7bee..c6bd803f5b0c 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev)
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
}
/*
@@ -889,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -920,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
lp->rx_ring_size, lp->tx_ring_size);
@@ -985,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1122,6 +1125,7 @@ clean_up:
lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return rc;
} /* end pcnet32_loopback_test */
@@ -2101,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev)
return -EAGAIN;
}
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
/* Check for a valid station address */
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -2266,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev)
goto err_free_ring;
}
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2300,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev)
lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return 0; /* Always succeed */
@@ -2315,6 +2321,7 @@ err_free_ring:
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
free_irq(dev->irq, dev);
return rc;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b0a6c96b6ef4..b35808d3d07f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -505,21 +505,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- char *buf;
-
- if (!pdata->xgbe_debugfs)
- return;
-
- buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
- if (!buf)
- return;
-
- if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
- goto out;
-
- debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
-
-out:
- kfree(buf);
+ debugfs_change_name(pdata->xgbe_debugfs,
+ "amd-xgbe-%s", pdata->netdev->name);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 6a716337f48b..268399dfcf22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -923,7 +923,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -945,14 +944,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
-
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
@@ -964,7 +956,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -1028,13 +1019,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index e641dbbea1e2..b854b6b42d77 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -421,18 +421,12 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
if (dev->of_node) {
struct clk *parent = clk_get_parent(pdata->clk);
+ long rate = rgmii_clock(pdata->phy_speed);
- switch (pdata->phy_speed) {
- case SPEED_10:
- clk_set_rate(parent, 2500000);
- break;
- case SPEED_100:
- clk_set_rate(parent, 25000000);
- break;
- default:
- clk_set_rate(parent, 125000000);
- break;
- }
+ if (rate < 0)
+ rate = 125000000;
+
+ clk_set_rate(parent, rate);
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index fe0e3e2a8117..71e50fc65c14 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1441,7 +1441,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
aq_ptp_ring_free(self);
aq_ptp_free(self);
- if (likely(self->aq_fw_ops->deinit) && link_down) {
+ /* May be invoked during hot unplug. */
+ if (pci_device_is_present(self->pdev) &&
+ likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index f93cb3da44b0..8fc75bcedb70 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -348,8 +348,6 @@ struct bcmasp_intf {
/* Used if per intf wol irq */
int wol_irq;
unsigned int wol_irq_enabled:1;
-
- struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -601,5 +599,4 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable);
#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 9da5ae29a105..a537c121d3e2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -348,58 +348,19 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
-{
- u32 reg;
-
- reg = umac_rl(intf, UMC_EEE_CTRL);
- if (enable)
- reg |= EEE_EN;
- else
- reg &= ~EEE_EN;
- umac_wl(intf, reg, UMC_EEE_CTRL);
-
- intf->eee.eee_enabled = enable;
-}
-
static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
-
if (!dev->phydev)
return -ENODEV;
- e->tx_lpi_enabled = p->tx_lpi_enabled;
- e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
-
return phy_ethtool_get_eee(dev->phydev, e);
}
static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
- int ret;
-
if (!dev->phydev)
return -ENODEV;
- if (!p->eee_enabled) {
- bcmasp_eee_enable_set(intf, false);
- } else {
- ret = phy_init_eee(dev->phydev, 0);
- if (ret) {
- netif_err(intf, hw, dev,
- "EEE initialization failed: %d\n", ret);
- return ret;
- }
-
- umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
- bcmasp_eee_enable_set(intf, true);
- }
-
return phy_ethtool_set_eee(dev->phydev, e);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index cfd50efbdbc0..45ec1a9214a2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -619,7 +619,6 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
- bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -677,8 +676,13 @@ static void bcmasp_adj_link(struct net_device *dev)
}
umac_wl(intf, reg, UMC_CMD);
- active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, active);
+ umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (phydev->enable_tx_lpi)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -1055,6 +1059,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
+
+ /* Set phylib's copy of the LPI timer */
+ phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
}
umac_reset(intf);
@@ -1331,7 +1338,8 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, true);
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
@@ -1373,7 +1381,8 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, false);
reg = umac_rl(intf, UMC_MPD_CTRL);
@@ -1404,9 +1413,6 @@ int bcmasp_interface_resume(struct bcmasp_intf *intf)
bcmasp_resume_from_wol(intf);
- if (intf->eee.eee_enabled)
- bcmasp_eee_enable_set(intf, true);
-
netif_device_attach(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index d73ef262991d..6fee9a41839c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -328,8 +328,7 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET)
-/* Jumbo frame size with FCS */
-#define BGMAC_RX_MAX_FRAME_SIZE 9724
+#define BGMAC_RX_MAX_FRAME_SIZE 1536
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aeaa74f03046..55f553debd3b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -81,7 +81,6 @@ MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
-#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 164
@@ -1343,13 +1342,13 @@ static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
skb_put(skb, len);
@@ -1842,7 +1841,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
@@ -2039,6 +2038,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct skb_shared_info *sinfo;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -2165,6 +2165,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
false);
if (!frag_len)
goto oom_next_rx;
+
}
xdp_active = true;
}
@@ -2174,9 +2175,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = 1;
goto next_rx;
}
+ if (xdp_buff_has_frags(&xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ agg_bufs = sinfo->nr_frags;
+ } else {
+ agg_bufs = 0;
+ }
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
if (!xdp_active)
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
else
@@ -2211,7 +2218,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr->page_pool, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -2855,6 +2863,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -4608,6 +4617,17 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags |= BNXT_FLAG_GRO;
}
+static void bnxt_init_ring_params(struct bnxt *bp)
+{
+ unsigned int rx_size;
+
+ bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
@@ -4622,12 +4642,11 @@ void bnxt_set_ring_params(struct bnxt *bp)
rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
- if (bp->flags & BNXT_FLAG_TPA)
+ if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
@@ -4667,7 +4686,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else {
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak,
+ bp->dev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -4708,7 +4730,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
@@ -4729,15 +4751,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -6564,6 +6601,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
+ u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6573,16 +6611,14 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- if (BNXT_RX_PAGE_MODE(bp)) {
- req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- } else {
+ if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
}
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
@@ -8307,16 +8343,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (rc)
goto func_qcfg_exit;
+ flags = le16_to_cpu(resp->flags);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
} else {
bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
- flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
@@ -9145,10 +9185,18 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
- /* allocate extra qps if fw supports RoCE fast qp destroy feature */
- extra_qps += fast_qpmd_qps;
- extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (BNXT_SW_RES_LMT(bp)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(u32, 65536,
+ max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp
+ * destroy feature
+ */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
if (fast_qpmd_qps)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
@@ -9184,14 +9232,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
- num_ah = min_t(u32, num_mr, 1024 * 128);
- ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
- if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
- ctxm->mrav_av_entries = num_ah;
+ if (BNXT_SW_RES_LMT(bp) &&
+ ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
+ num_ah = ctxm->mrav_av_entries;
+ num_mr = ctxm->max_entries - num_ah;
+ } else {
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+ }
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
if (rc)
@@ -9498,6 +9552,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
+ if (flags_ext2 &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
if (BNXT_PF(bp) &&
(flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
@@ -11556,6 +11613,26 @@ hwrm_phy_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcaps_output *resp;
+ struct hwrm_port_mac_qcaps_input *req;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a03)
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
+ if (rc)
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->mac_flags = resp->flags;
+ hwrm_req_drop(bp, req);
+}
+
static bool bnxt_support_dropped(u16 advertising, u16 supported)
{
u16 diff = advertising ^ supported;
@@ -15307,6 +15384,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
struct bnxt_cp_ring_info *cpr;
u64 *sw;
+ if (!bp->bnapi)
+ return;
+
cpr = &bp->bnapi[i]->cp_ring;
sw = cpr->stats.sw_stats;
@@ -15330,6 +15410,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
struct bnxt_napi *bnapi;
u64 *sw;
+ if (!bp->tx_ring)
+ return;
+
bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
sw = bnapi->cp_ring.stats.sw_stats;
@@ -15371,6 +15454,9 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
struct bnxt_ring_struct *ring;
int rc;
+ if (!bp->rx_ring)
+ return -ENETDOWN;
+
rxr = &bp->rx_ring[idx];
clone = qmem;
memcpy(clone, rxr, sizeof(*rxr));
@@ -15453,6 +15539,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
struct bnxt_ring_struct *ring;
bnxt_free_one_rx_ring_skbs(bp, rxr);
+ bnxt_free_one_tpa_info(bp, rxr);
xdp_rxq_info_unreg(&rxr->xdp_rxq);
@@ -15564,7 +15651,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
cpr = &rxr->bnapi->cp_ring;
cpr->sw_stats->rx.rx_resets++;
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i <= bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
@@ -15592,7 +15679,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
struct bnxt_vnic_info *vnic;
int i;
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i <= bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
vnic->mru = 0;
bnxt_hwrm_vnic_update(bp, vnic,
@@ -15686,6 +15773,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
+
+ bp->mac_flags = 0;
+ bnxt_hwrm_mac_qcaps(bp);
+
if (!fw_dflt)
return 0;
@@ -16214,8 +16305,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bp->max_fltr < BNXT_MAX_FLTR)
bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
+ bnxt_init_ring_params(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7df7a2233307..2373f423a523 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -34,6 +34,9 @@
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
+#define BNXT_DEFAULT_RX_COPYBREAK 256
+#define BNXT_MAX_RX_COPYBREAK 1024
+
extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -2241,8 +2244,6 @@ struct bnxt {
#define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
#define BNXT_FLAG_JUMBO 0x10
#define BNXT_FLAG_STRIP_VLAN 0x20
- #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
- BNXT_FLAG_LRO)
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@@ -2263,6 +2264,9 @@ struct bnxt {
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_HDS 0x20000000
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO | BNXT_FLAG_HDS)
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -2270,6 +2274,11 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#ifdef CONFIG_BNXT_SRIOV
+#define BNXT_VF_IS_TRUSTED(bp) ((bp)->vf.flags & BNXT_VF_TRUST)
+#else
+#define BNXT_VF_IS_TRUSTED(bp) 0
+#endif
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
@@ -2342,7 +2351,7 @@ struct bnxt {
enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
int rx_nr_pages;
@@ -2482,6 +2491,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
+ #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
u32 fw_dbg_cap;
@@ -2501,6 +2511,8 @@ struct bnxt {
((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV)
#define BNXT_ROCE_VF_RESC_CAP(bp) \
((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
+#define BNXT_SW_RES_LMT(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2660,6 +2672,11 @@ struct bnxt {
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+ /* copied from flags in hwrm_port_mac_qcaps_output */
+ u8 mac_flags;
+#define BNXT_MAC_FL_NO_MAC_LPBK \
+ PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2762,6 +2779,8 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+#define BNXT_HDS_THRESHOLD_MAX 1023
+
static inline u32 bnxt_tx_avail(struct bnxt *bp,
const struct bnxt_tx_ring_info *txr)
{
@@ -2846,7 +2865,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index d87681d71106..9c5820839514 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -24,6 +24,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
+#include <net/netdev_queues.h>
#include <net/netlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -833,6 +834,8 @@ static void bnxt_get_ringparam(struct net_device *dev,
ering->rx_pending = bp->rx_ring_size;
ering->rx_jumbo_pending = bp->rx_agg_ring_size;
ering->tx_pending = bp->tx_ring_size;
+
+ kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX;
}
static int bnxt_set_ringparam(struct net_device *dev,
@@ -840,16 +843,35 @@ static int bnxt_set_ringparam(struct net_device *dev,
struct kernel_ethtool_ringparam *kernel_ering,
struct netlink_ext_ack *extack)
{
+ u8 tcp_data_split = kernel_ering->tcp_data_split;
struct bnxt *bp = netdev_priv(dev);
+ u8 hds_config_mod;
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
(ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
+ hds_config_mod = tcp_data_split != dev->cfg->hds_config;
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod)
+ return -EINVAL;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ hds_config_mod && BNXT_RX_PAGE_MODE(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached");
+ return -EINVAL;
+ }
+
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
+ if (hds_config_mod) {
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ bp->flags |= BNXT_FLAG_HDS;
+ else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ bp->flags &= ~BNXT_FLAG_HDS;
+ }
+
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
bnxt_set_ring_params(bp);
@@ -2050,7 +2072,8 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
int rc;
regs->version = 0;
- bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
@@ -4161,7 +4184,7 @@ err:
static void bnxt_get_pkgver(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- char buf[FW_VER_STR_LEN];
+ char buf[FW_VER_STR_LEN - 5];
int len;
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
@@ -4327,6 +4350,45 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
return 0;
}
+static int bnxt_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 rx_copybreak;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > BNXT_MAX_RX_COPYBREAK)
+ return -ERANGE;
+ if (rx_copybreak != bp->rx_copybreak) {
+ if (netif_running(dev))
+ return -EBUSY;
+ bp->rx_copybreak = rx_copybreak;
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = bp->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
u16 page_number, u8 bank,
u16 start_addr, u16 data_length,
@@ -4375,6 +4437,9 @@ static int bnxt_get_module_info(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
/* No point in going further if phy status indicates
* module is not inserted or if it is powered down or
* if it is of type 10GBase-T
@@ -4426,6 +4491,9 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
u16 start = eeprom->offset, length = eeprom->len;
int rc = 0;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
memset(data, 0, eeprom->len);
/* Read A0 portion of the EEPROM */
@@ -4480,6 +4548,12 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Module read not permitted on untrusted VF");
+ return -EPERM;
+ }
+
rc = bnxt_get_module_status(bp, extack);
if (rc)
return rc;
@@ -4777,7 +4851,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
cpr = &rxr->bnapi->cp_ring;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
cpr = rxr->rx_cpr;
- pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak));
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
@@ -4887,35 +4962,44 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
- buf[BNXT_MACLPBK_TEST_IDX] = 1;
- bnxt_hwrm_mac_loopback(bp, true);
- msleep(250);
rc = bnxt_half_open_nic(bp);
if (rc) {
- bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK)
+ goto skip_mac_loopback;
+
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
else
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
+skip_mac_loopback:
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK)
+ goto skip_phy_loopback;
+
bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_PHYLPBK_TEST_IDX] = 0;
+skip_phy_loopback:
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
if (do_ext_lpbk) {
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
bnxt_hwrm_phy_loopback(bp, true, true);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_EXTLPBK_TEST_IDX] = 0;
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
@@ -5309,6 +5393,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
@@ -5350,6 +5436,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
+ .get_tunable = bnxt_get_tunable,
+ .set_tunable = bnxt_set_tunable,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 0ed26e3a28f4..e4a7f37036ed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -298,6 +298,7 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
+ bool reset = false;
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
@@ -311,7 +312,9 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
- ops->ulp_irq_stop(ulp->handle);
+ if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
+ reset = true;
+ ops->ulp_irq_stop(ulp->handle, reset);
}
}
@@ -346,9 +349,36 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap,
- u16 max_id)
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ struct bnxt_ulp *ulp;
+
+ if (!bnxt_ulp_registered(edev))
+ return;
+ ulp = edev->ulp_tbl;
+
+ rcu_read_lock();
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ goto exit_unlock_rcu;
+ if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
+ goto exit_unlock_rcu;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -360,7 +390,6 @@ int bnxt_register_async_events(struct bnxt_en_dev *edev,
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
- return 0;
}
EXPORT_SYMBOL(bnxt_register_async_events);
@@ -417,6 +446,8 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->flags |= BNXT_EN_FLAG_VF;
if (BNXT_ROCE_VF_RESC_CAP(bp))
edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
+ if (BNXT_SW_RES_LMT(bp))
+ edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
edev->chip_num = bp->chip_num;
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 5d6aac60f236..7fa3b8d1ebd2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -30,7 +30,9 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- void (*ulp_irq_stop)(void *);
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -65,6 +67,8 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_VF 0x10
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
#define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x40
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
@@ -124,6 +128,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap, u16 max_id);
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f88b641533fc..299822cacca4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -395,6 +395,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
+ if (prog && bp->flags & BNXT_FLAG_HDS) {
+ netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
+ return -EOPNOTSUPP;
+ }
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
@@ -422,15 +426,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bnxt_set_rx_skb_mode(bp, true);
xdp_features_set_redirect_target(dev, true);
} else {
- int rx, tx;
-
xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
@@ -463,23 +460,16 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1)
+ struct page_pool *pool, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!skb)
return NULL;
- skb_checksum_none_assert(skb);
- if (RX_CMP_L4_CS_OK(rxcmp1)) {
- if (bp->dev->features & NETIF_F_RXCSUM) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = RX_CMP_ENCAP(rxcmp1);
- }
- }
+
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
+ BNXT_RX_PAGE_SIZE * num_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0122782400b8..220285e190fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
u8 num_frags, struct page_pool *pool,
- struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1);
+ struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 0715ea5bf13e..3b082114f2e5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -41,9 +41,12 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
- if (dev->phydev)
+ if (dev->phydev) {
phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
/* MAC is not wake-up capable, return what the PHY does */
if (!device_can_wakeup(kdev))
@@ -51,9 +54,14 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Overlay MAC capabilities with that of the PHY queried before */
wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
- wol->wolopts = priv->wolopts;
- memset(wol->sopass, 0, sizeof(wol->sopass));
+ wol->wolopts |= priv->wolopts;
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+
+ /* Otherwise the MAC one */
+ memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
@@ -70,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Try Wake-on-LAN from the PHY first */
if (dev->phydev) {
ret = phy_ethtool_set_wol(dev->phydev, wol);
- if (ret != -EOPNOTSUPP)
+ if (ret != -EOPNOTSUPP && wol->wolopts)
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 9cc8db10a8d6..d9d675f1ebfe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -55,6 +55,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/crc32poly.h>
+#include <linux/dmi.h>
#include <net/checksum.h>
#include <net/gso.h>
@@ -7424,7 +7425,7 @@ static void tg3_napi_enable(struct tg3 *tp)
for (i = 0; i < tp->irq_cnt; i++) {
tnapi = &tp->napi[i];
- napi_enable(&tnapi->napi);
+ napi_enable_locked(&tnapi->napi);
if (tnapi->tx_buffers) {
netif_queue_set_napi(tp->dev, txq_idx,
NETDEV_QUEUE_TYPE_TX,
@@ -7445,9 +7446,10 @@ static void tg3_napi_init(struct tg3 *tp)
int i;
for (i = 0; i < tp->irq_cnt; i++) {
- netif_napi_add(tp->dev, &tp->napi[i].napi,
- i ? tg3_poll_msix : tg3_poll);
- netif_napi_set_irq(&tp->napi[i].napi, tp->napi[i].irq_vec);
+ netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
+ i ? tg3_poll_msix : tg3_poll);
+ netif_napi_set_irq_locked(&tp->napi[i].napi,
+ tp->napi[i].irq_vec);
}
}
@@ -11259,6 +11261,8 @@ static void tg3_timer_stop(struct tg3 *tp)
static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
__releases(tp->lock)
__acquires(tp->lock)
+ __releases(tp->dev->lock)
+ __acquires(tp->dev->lock)
{
int err;
@@ -11271,7 +11275,9 @@ static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
tg3_timer_stop(tp);
tp->irq_sync = 0;
tg3_napi_enable(tp);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 0);
}
return err;
@@ -11299,6 +11305,7 @@ static void tg3_reset_task(struct work_struct *work)
tg3_netif_stop(tp);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 1);
if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
@@ -11318,12 +11325,14 @@ static void tg3_reset_task(struct work_struct *work)
* call cancel_work_sync() and wait forever.
*/
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
goto out;
}
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(tp->dev);
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
@@ -11683,9 +11692,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
if (err)
goto out_ints_fini;
+ netdev_lock(dev);
tg3_napi_init(tp);
tg3_napi_enable(tp);
+ netdev_unlock(dev);
for (i = 0; i < tp->irq_cnt; i++) {
err = tg3_request_irq(tp, i);
@@ -12569,6 +12580,7 @@ static int tg3_set_ringparam(struct net_device *dev,
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
tp->rx_pending = ering->rx_pending;
@@ -12597,6 +12609,7 @@ static int tg3_set_ringparam(struct net_device *dev,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err)
tg3_phy_start(tp);
@@ -12678,6 +12691,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
if (epause->autoneg)
@@ -12707,6 +12721,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -13911,6 +13926,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
data[TG3_INTERRUPT_TEST] = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -13922,6 +13938,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err2)
tg3_phy_start(tp);
@@ -14365,6 +14382,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
+ netdev_lock(dev);
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -14384,6 +14402,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18164,6 +18183,7 @@ static int tg3_resume(struct device *device)
netif_device_attach(dev);
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
@@ -18180,6 +18200,7 @@ static int tg3_resume(struct device *device)
out:
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18192,6 +18213,50 @@ unlock:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
+ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
+ * be, powered down.
+ */
+static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
+ },
+ },
+ {}
+};
+
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -18208,6 +18273,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
+ else if (system_state == SYSTEM_RESTART &&
+ dmi_first_match(tg3_restart_aer_quirk_table) &&
+ pdev->current_state != PCI_D3cold &&
+ pdev->current_state != PCI_UNKNOWN) {
+ /* Disable PCIe AER on the tg3 to avoid a fatal
+ * error during this system restart.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ }
rtnl_unlock();
@@ -18260,7 +18338,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
done:
if (state == pci_channel_io_perm_failure) {
if (netdev) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
err = PCI_ERS_RESULT_DISCONNECT;
@@ -18314,7 +18394,9 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
done:
if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
rtnl_unlock();
@@ -18340,12 +18422,14 @@ static void tg3_io_resume(struct pci_dev *pdev)
if (!netdev || !netif_running(netdev))
goto done;
+ netdev_lock(netdev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
tg3_flag_set(tp, INIT_COMPLETE);
err = tg3_restart_hw(tp, true);
if (err) {
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
goto done;
}
@@ -18357,6 +18441,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
tg3_phy_start(tp);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5740c98d8c9f..2847278d9cd4 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1279,6 +1279,8 @@ struct macb {
struct clk *rx_clk;
struct clk *tsu_clk;
struct net_device *dev;
+ /* Protects hw_stats and ethtool_stats */
+ spinlock_t stats_lock;
union {
struct macb_stats macb;
struct gem_stats gem;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index daa416fb1724..c1f57d96e63f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -530,19 +530,9 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
- switch (speed) {
- case SPEED_10:
- rate = 2500000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0)
return;
- }
rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
@@ -578,6 +568,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
@@ -608,7 +599,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
return 0;
}
-static void macb_pcs_get_state(struct phylink_pcs *pcs,
+static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
state->link = 0;
@@ -1987,10 +1978,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
+ spin_lock(&bp->stats_lock);
if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+ spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
@@ -3111,6 +3104,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
if (!netif_running(bp->dev))
return nstat;
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
@@ -3140,6 +3134,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -3147,12 +3142,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
static void gem_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct macb *bp;
+ struct macb *bp = netdev_priv(dev);
- bp = netdev_priv(dev);
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
+ spin_unlock_irq(&bp->stats_lock);
}
static int gem_get_sset_count(struct net_device *dev, int sset)
@@ -3202,6 +3198,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return gem_get_stats(bp);
/* read stats from hardware */
+ spin_lock_irq(&bp->stats_lock);
macb_update_stats(bp);
/* Convert HW stats into netdevice stats */
@@ -3235,6 +3232,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -5106,6 +5104,7 @@ static int macb_probe(struct platform_device *pdev)
}
}
spin_lock_init(&bp->lock);
+ spin_lock_init(&bp->stats_lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 604dcfd49aa4..2f0b3e389e62 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6562,6 +6562,9 @@ static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
+ if (x->xso.dir != XFRM_DEV_OFFLOAD_IN)
+ return;
+
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9913952ccb42..49f6cab01ed5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -109,7 +109,7 @@ static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{0, 0}, /* 0 - 4 Gbps */
{0, 3}, /* 4 - 10 Gbps */
- {3, 6}, /* 10 - 40 Gbps */
+ {3, 6}, /* 10+ Gbps */
};
static void enic_init_affinity_hint(struct enic *enic)
@@ -428,6 +428,36 @@ static void enic_mtu_check(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (speed > ENIC_LINK_SPEED_10G)
+ index = ENIC_LINK_40G_INDEX;
+ else if (speed > ENIC_LINK_SPEED_4G)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static void enic_link_check(struct enic *enic)
{
int link_status = vnic_dev_link_status(enic->vdev);
@@ -436,6 +466,7 @@ static void enic_link_check(struct enic *enic)
if (link_status && !carrier_ok) {
netdev_info(enic->netdev, "Link UP\n");
netif_carrier_on(enic->netdev);
+ enic_set_rx_coal_setting(enic);
} else if (!link_status && carrier_ok) {
netdev_info(enic->netdev, "Link DOWN\n");
netif_carrier_off(enic->netdev);
@@ -1901,36 +1932,6 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
-static void enic_set_rx_coal_setting(struct enic *enic)
-{
- unsigned int speed;
- int index = -1;
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
-
- /* 1. Read the link speed from fw
- * 2. Pick the default range for the speed
- * 3. Update it in enic->rx_coalesce_setting
- */
- speed = vnic_dev_port_speed(enic->vdev);
- if (ENIC_LINK_SPEED_10G < speed)
- index = ENIC_LINK_40G_INDEX;
- else if (ENIC_LINK_SPEED_4G < speed)
- index = ENIC_LINK_10G_INDEX;
- else
- index = ENIC_LINK_4G_INDEX;
-
- rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
- rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
- rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
-
- /* Start with the value provided by UCSM */
- for (index = 0; index < enic->rq_count; index++)
- enic->cq[index].cur_rx_coal_timeval =
- enic->config.intr_timer_usec;
-
- rx_coal->use_adaptive_rx_coalesce = 1;
-}
-
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -3063,7 +3064,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_rfs_flw_tbl_init(enic);
- enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 991e3839858b..2b4bb74f21bf 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1833,9 +1833,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8735e333034c..b87eaf0c250c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1777,10 +1777,11 @@ static void dm9000_drv_remove(struct platform_device *pdev)
unregister_netdev(ndev);
dm9000_release_board(pdev, dm);
- free_netdev(ndev); /* free device structure */
if (dm->power_supply)
regulator_disable(dm->power_supply);
+ free_netdev(ndev); /* free device structure */
+
dev_dbg(&pdev->dev, "released and freed device\n");
}
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 44af1d13d931..67275aa4f65b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -416,8 +416,7 @@ static int ec_bhf_open(struct net_device *net_dev)
netif_start_queue(net_dev);
- hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_setup(&priv->hrtimer, ec_bhf_timer_fun, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index e48b861e4ce1..270ff9aab335 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -562,7 +562,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 61adcebeef01..51b8377edd1d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 12000 /* 12s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- usleep_range(500, 1000);
+ udelay(100);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- return mutex_unlock(&adapter->mcc_lock);
+ return spin_unlock_bh(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
@@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!get_fat_cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
while (total_size) {
buf_size = min(total_size, (u32)60 * 1024);
@@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
sizeof(adapter->fw_on_flash));
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
memcpy(data, resp->page_data + off, len);
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2611,7 +2611,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3217,7 +3217,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3234,7 +3234,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3249,7 +3249,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3272,7 +3272,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3281,7 +3281,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3298,7 +3298,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3324,7 +3324,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3332,7 +3332,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3348,7 +3348,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3382,7 +3382,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3393,7 +3393,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3409,7 +3409,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3424,7 +3424,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3469,7 +3469,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3479,7 +3479,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3499,7 +3499,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3611,7 +3611,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3643,7 +3643,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3655,7 +3655,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3675,7 +3675,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3707,7 +3707,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3771,7 +3771,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3831,7 +3831,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3853,7 +3853,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
err:
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3889,7 +3889,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3930,7 +3930,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3944,7 +3944,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3991,7 +3991,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4190,7 +4190,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4206,7 +4206,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4684,7 +4684,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4701,7 +4701,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4735,7 +4735,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4756,7 +4756,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4850,7 +4850,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4868,7 +4868,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4941,7 +4941,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
u32 link_config = 0;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4969,7 +4969,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5000,8 +5000,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
int status;
- if (mutex_lock_interruptible(&adapter->mcc_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5039,7 +5038,7 @@ err:
dev_info(&adapter->pdev->dev,
"Adapter does not support HW error recovery\n");
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5053,7 +5052,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5076,7 +5075,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 875fe379eea2..3d2e21592119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5667,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- mutex_init(&adapter->mcc_lock);
mutex_init(&adapter->rx_filter_lock);
+ spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 95a5295d0361..0d030cb0b21c 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1966,23 +1966,41 @@ failed:
static void tsnep_queue_enable(struct tsnep_queue *queue)
{
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ netif_napi_set_irq(&queue->napi, queue->irq);
napi_enable(&queue->napi);
- tsnep_enable_irq(queue->adapter, queue->irq_mask);
+ tsnep_enable_irq(adapter, queue->irq_mask);
- if (queue->tx)
+ if (queue->tx) {
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, &queue->napi);
tsnep_tx_enable(queue->tx);
+ }
- if (queue->rx)
+ if (queue->rx) {
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, &queue->napi);
tsnep_rx_enable(queue->rx);
+ }
}
static void tsnep_queue_disable(struct tsnep_queue *queue)
{
- if (queue->tx)
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ if (queue->rx)
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+
+ if (queue->tx) {
tsnep_tx_disable(queue->tx, &queue->napi);
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
napi_disable(&queue->napi);
- tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ tsnep_disable_irq(adapter, queue->irq_mask);
/* disable RX after NAPI polling has been disabled, because RX can be
* enabled during NAPI polling
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 75401d2a5fb4..a2d7300925a8 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -81,8 +81,7 @@ config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
help
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index bf5baef5c3e0..4948b4906584 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2281,7 +2281,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
- new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+ new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
/* Release the initial buffer */
xdp_return_frame_rx_napi(xdpf);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index a293b08f36d4..147a93bf9fa9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -780,13 +780,14 @@ struct ethsw_dump_ctx {
static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
struct ethsw_dump_ctx *dump)
{
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 535969fa0fdb..2106861463e4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -146,6 +146,45 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
return 0;
}
+static bool enetc_tx_csum_offload_check(struct sk_buff *skb)
+{
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ case offsetof(struct udphdr, check):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool enetc_skb_is_ipv6(struct sk_buff *skb)
+{
+ return vlan_get_protocol(skb) == htons(ETH_P_IPV6);
+}
+
+static bool enetc_skb_is_tcp(struct sk_buff *skb)
+{
+ return skb->csum_offset == offsetof(struct tcphdr, check);
+}
+
+/**
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
+ * @count: Number of Tx buffer descriptors which need to be unmapped
+ * @i: Index of the last successfully mapped Tx buffer descriptor
+ */
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
+{
+ while (count--) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ }
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
@@ -163,6 +202,29 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_addr_t dma;
u8 flags = 0;
+ enetc_clear_tx_bd(&temp_bd);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* Can not support TSD and checksum offload at the same time */
+ if (priv->active_offloads & ENETC_F_TXCSUM &&
+ enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) {
+ temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START,
+ skb_network_offset(skb));
+ temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ skb_network_header_len(skb) / 4);
+ temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T,
+ enetc_skb_is_ipv6(skb));
+ if (enetc_skb_is_tcp(skb))
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_TCP);
+ else
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_UDP);
+ flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ } else if (skb_checksum_help(skb)) {
+ return 0;
+ }
+ }
+
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
prefetchw(txbd);
@@ -173,7 +235,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
- temp_bd.lstatus = 0;
tx_swbd = &tx_ring->tx_swbd[i];
tx_swbd->dma = dma;
@@ -236,9 +297,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- u32 lo, hi, val;
- u64 sec, nsec;
+ __be32 new_sec_l, new_nsec;
+ u32 lo, hi, nsec, val;
+ __be16 new_sec_h;
u8 *data;
+ u64 sec;
lo = enetc_rd_hot(hw, ENETC_SICTR0);
hi = enetc_rd_hot(hw, ENETC_SICTR1);
@@ -252,13 +315,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
/* Update originTimestamp field of Sync packet
* - 48 bits seconds field
* - 32 bits nanseconds field
+ *
+ * In addition, the UDP checksum needs to be updated
+ * by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong
+ * checksum when updating the correction field and
+ * update it to the packet.
*/
data = skb_mac_header(skb);
- *(__be16 *)(data + offset2) =
- htons((sec >> 32) & 0xffff);
- *(__be32 *)(data + offset2 + 2) =
- htonl(sec & 0xffffffff);
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (udp) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + offset2);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + offset2 + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + offset2 + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + offset2) = new_sec_h;
+ *(__be32 *)(data + offset2 + 2) = new_sec_l;
+ *(__be32 *)(data + offset2 + 6) = new_nsec;
/* Configure single-step register */
val = ENETC_PM0_SINGLE_STEP_EN;
@@ -329,25 +417,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_err:
dev_err(tx_ring->dev, "DMA map error");
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- struct enetc_tx_swbd *tx_swbd,
- union enetc_tx_bd *txbd, int *i, int hdr_len,
- int data_len)
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
+ int count = 1;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
@@ -390,7 +473,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
+ count++;
}
+
+ return count;
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
@@ -489,8 +575,233 @@ static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso
}
}
+static int enetc_lso_count_descs(const struct sk_buff *skb)
+{
+ /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD
+ * for linear area data but not include LSO header, namely
+ * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's
+ * okay, we only need to consider the worst case). And 1 BD
+ * for gap.
+ */
+ return skb_shinfo(skb)->nr_frags + 4;
+}
+
+static int enetc_lso_get_hdr_len(const struct sk_buff *skb)
+{
+ int hdr_len, tlen;
+
+ tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
+ hdr_len = skb_transport_offset(skb) + tlen;
+
+ return hdr_len;
+}
+
+static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso)
+{
+ lso->lso_seg_size = skb_shinfo(skb)->gso_size;
+ lso->ipv6 = enetc_skb_is_ipv6(skb);
+ lso->tcp = skb_is_gso_tcp(skb);
+ lso->l3_hdr_len = skb_network_header_len(skb);
+ lso->l3_start = skb_network_offset(skb);
+ lso->hdr_len = enetc_lso_get_hdr_len(skb);
+ lso->total_len = skb->len - lso->hdr_len;
+}
+
+static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso)
+{
+ union enetc_tx_bd txbd_tmp, *txbd;
+ struct enetc_tx_swbd *tx_swbd;
+ u16 frm_len, frm_len_ext;
+ u8 flags, e_flags = 0;
+ dma_addr_t addr;
+ char *hdr;
+
+ /* Get the first BD of the LSO BDs chain */
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Prepare LSO header: MAC + IP + TCP/UDP */
+ hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE;
+ memcpy(hdr, skb->data, lso->hdr_len);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ /* {frm_len_ext, frm_len} indicates the total length of
+ * large transmit data unit. frm_len contains the 16 least
+ * significant bits and frm_len_ext contains the 4 most
+ * significant bits.
+ */
+ frm_len = lso->total_len & 0xffff;
+ frm_len_ext = (lso->total_len >> 16) & 0xf;
+
+ /* Set the flags of the first BD */
+ flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO |
+ ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(frm_len);
+ txbd_tmp.flags = flags;
+
+ txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start);
+ /* l3_hdr_size in 32-bits (4 bytes) */
+ txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ lso->l3_hdr_len / 4);
+ if (lso->ipv6)
+ txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T;
+ else
+ txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS;
+
+ txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ?
+ ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP);
+
+ /* For the LSO header we do not set the dma address since
+ * we do not want it unmapped when we do cleanup. We still
+ * set len so that we count the bytes sent.
+ */
+ tx_swbd->len = lso->hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Get the next BD, and the next BD is extended BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ if (skb_vlan_tag_present(skb)) {
+ /* Setup the VLAN fields */
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = ENETC_TPID_8021Q;
+ e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size);
+ txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext);
+ *txbd = txbd_tmp;
+}
+
+static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso, int *count)
+{
+ union enetc_tx_bd txbd_tmp, *txbd = NULL;
+ struct enetc_tx_swbd *tx_swbd;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u8 flags = 0;
+ int len, f;
+
+ len = skb_headlen(skb) - lso->hdr_len;
+ if (len > 0) {
+ dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ if (txbd)
+ *txbd = txbd_tmp;
+
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ /* Last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ txbd_tmp.flags = flags;
+ *txbd = txbd_tmp;
+
+ tx_swbd->is_eof = 1;
+ tx_swbd->skb = skb;
+
+ return 0;
+}
+
+static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_lso_t lso = {0};
+ int err, i, count = 0;
+
+ /* Initialize the LSO handler */
+ enetc_lso_start(skb, &lso);
+ i = tx_ring->next_to_use;
+
+ enetc_lso_map_hdr(tx_ring, skb, &i, &lso);
+ /* First BD and an extend BD */
+ count += 2;
+
+ err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count);
+ if (err)
+ goto dma_err;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+dma_err:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (--count);
+
+ return 0;
+}
+
static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
int hdr_len, total_len, data_len;
struct enetc_tx_swbd *tx_swbd;
union enetc_tx_bd *txbd;
@@ -522,9 +833,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
+ &i, hdr_len, data_len);
bd_data_num = 0;
- count++;
while (data_len > 0) {
int size;
@@ -548,15 +859,20 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
- if (err)
+ if (err) {
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+
goto err_map_data;
+ }
data_len -= size;
count++;
bd_data_num++;
tso_build_data(skb, &tso, size);
- if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ if (unlikely(bd_data_num >= priv->max_frags && data_len))
goto err_chained_bd;
}
@@ -578,13 +894,7 @@ err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
@@ -594,7 +904,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count, err;
+ int count;
/* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
@@ -608,16 +918,28 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
if (skb_is_gso(skb)) {
- if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ /* LSO data unit lengths of up to 256KB are supported */
+ if (priv->active_offloads & ENETC_F_LSO &&
+ (skb->len - enetc_lso_get_hdr_len(skb)) <=
+ ENETC_LSO_MAX_DATA_LEN) {
+ if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_tso_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ count = enetc_lso_hw_offload(tx_ring, skb);
+ } else {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
} else {
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
@@ -627,11 +949,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- err = skb_checksum_help(skb);
- if (err)
- goto drop_packet_err;
- }
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb);
enetc_unlock_mdio();
@@ -640,7 +957,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
if (unlikely(!count))
goto drop_packet_err;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags))
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
@@ -908,7 +1225,8 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
!test_bit(ENETC_TX_DOWN, &priv->flags) &&
- (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ (enetc_bd_unused(tx_ring) >=
+ ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@@ -1625,7 +1943,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
} else {
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ tx_ring->stats.xdp_tx++;
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
xdp_tx_frm_cnt++;
/* The XDP_TX enqueue was successful, so we
@@ -1759,6 +2077,9 @@ void enetc_get_si_caps(struct enetc_si *si)
rss = enetc_rd(hw, ENETC_SIRSSCAPR);
si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
+
+ if (val & ENETC_SIPCAPR0_LSO)
+ si->hw_features |= ENETC_SI_F_LSO;
}
EXPORT_SYMBOL_GPL(enetc_get_si_caps);
@@ -2055,6 +2376,14 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
return 0;
}
+static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
+{
+ enetc_wr(hw, ENETC4_SILSOSFMR0,
+ SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK,
+ ENETC4_TCP_NL_SEG_FLAGS_DMASK));
+ enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
+}
+
int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
@@ -2068,6 +2397,9 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+ if (si->hw_features & ENETC_SI_F_LSO)
+ enetc_set_lso_flags_mask(hw);
+
/* TODO: RSS support for i.MX95 will be supported later, and the
* is_enetc_rev1() condition will be removed
*/
@@ -2938,6 +3270,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!enetc_si_is_pf(priv->si))
+ return -EOPNOTSUPP;
+
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
@@ -3269,17 +3604,21 @@ EXPORT_SYMBOL_GPL(enetc_pci_remove);
static const struct enetc_drvdata enetc_pf_data = {
.sysclk_freq = ENETC_CLK_400M,
.pmac_offset = ENETC_PMAC_OFFSET,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
.eth_ops = &enetc_pf_ethtool_ops,
};
static const struct enetc_drvdata enetc4_pf_data = {
.sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
.pmac_offset = ENETC4_PMAC_OFFSET,
.eth_ops = &enetc4_pf_ethtool_ops,
};
static const struct enetc_drvdata enetc_vf_data = {
.sysclk_freq = ENETC_CLK_400M,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
.eth_ops = &enetc_vf_ethtool_ops,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 72fa03dbc2dd..4ad4eb5c5a74 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -41,6 +41,18 @@ struct enetc_tx_swbd {
u8 qbv_en:1;
};
+struct enetc_lso_t {
+ bool ipv6;
+ bool tcp;
+ u8 l3_hdr_len;
+ u8 hdr_len; /* LSO header length */
+ u8 l3_start;
+ u16 lso_seg_size;
+ int total_len; /* total data length, not include LSO header */
+};
+
+#define ENETC_LSO_MAX_DATA_LEN SZ_256K
+
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
@@ -59,9 +71,16 @@ struct enetc_rx_swbd {
/* ENETC overhead: optional extension BD + 1 BD gap */
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
+/* For LS1028A, max # of chained Tx BDs is 15, including head and
+ * extension BD.
+ */
#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+/* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
+ * including head and extension BD, but the range of MAX_SKB_FRAGS
+ * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
+ */
+#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS
+#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
struct enetc_ring_stats {
unsigned int packets;
@@ -231,9 +250,12 @@ enum enetc_errata {
#define ENETC_SI_F_PSFP BIT(0)
#define ENETC_SI_F_QBV BIT(1)
#define ENETC_SI_F_QBU BIT(2)
+#define ENETC_SI_F_LSO BIT(3)
struct enetc_drvdata {
u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
+ u8 tx_csum:1;
+ u8 max_frags;
u64 sysclk_freq;
const struct ethtool_ops *eth_ops;
};
@@ -341,6 +363,8 @@ enum enetc_active_offloads {
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
ENETC_F_QBU = BIT(11),
+ ENETC_F_TXCSUM = BIT(12),
+ ENETC_F_LSO = BIT(13),
};
enum enetc_flags_bit {
@@ -375,6 +399,7 @@ struct enetc_ndev_priv {
u16 msg_enable;
u8 preemptible_tcs;
+ u8 max_frags; /* The maximum number of BDs for fragments */
enum enetc_active_offloads active_offloads;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index 26b220677448..695cb07c74bc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -12,6 +12,29 @@
#define NXP_ENETC_VENDOR_ID 0x1131
#define NXP_ENETC_PF_DEV_ID 0xe101
+/**********************Station interface registers************************/
+/* Station interface LSO segmentation flag mask register 0/1 */
+#define ENETC4_SILSOSFMR0 0x1300
+#define SILSOSFMR0_TCP_MID_SEG GENMASK(27, 16)
+#define SILSOSFMR0_TCP_1ST_SEG GENMASK(11, 0)
+#define SILSOSFMR0_VAL_SET(first, mid) (FIELD_PREP(SILSOSFMR0_TCP_MID_SEG, mid) | \
+ FIELD_PREP(SILSOSFMR0_TCP_1ST_SEG, first))
+
+#define ENETC4_SILSOSFMR1 0x1304
+#define SILSOSFMR1_TCP_LAST_SEG GENMASK(11, 0)
+#define ENETC4_TCP_FLAGS_FIN BIT(0)
+#define ENETC4_TCP_FLAGS_SYN BIT(1)
+#define ENETC4_TCP_FLAGS_RST BIT(2)
+#define ENETC4_TCP_FLAGS_PSH BIT(3)
+#define ENETC4_TCP_FLAGS_ACK BIT(4)
+#define ENETC4_TCP_FLAGS_URG BIT(5)
+#define ENETC4_TCP_FLAGS_ECE BIT(6)
+#define ENETC4_TCP_FLAGS_CWR BIT(7)
+#define ENETC4_TCP_FLAGS_NS BIT(8)
+/* According to tso_build_hdr(), clear all special flags for not last packet. */
+#define ENETC4_TCP_NL_SEG_FLAGS_DMASK (ENETC4_TCP_FLAGS_FIN | \
+ ENETC4_TCP_FLAGS_RST | ENETC4_TCP_FLAGS_PSH)
+
/***************************ENETC port registers**************************/
#define ENETC4_ECAPR0 0x0
#define ECAPR0_RFS BIT(2)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index fc41078c4f5d..73ac8c6afb3a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -672,7 +672,6 @@ err_link_init:
err_alloc_msix:
err_config_si:
err_clk_get:
- mutex_destroy(&priv->mm_lock);
free_netdev(ndev);
return err;
@@ -684,6 +683,7 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si)
struct net_device *ndev = si->ndev;
unregister_netdev(ndev);
+ enetc4_link_deinit(priv);
enetc_free_msix(priv);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index bf34b5bb1e35..ece3ae28ba82 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -832,6 +832,7 @@ static int enetc_set_coalesce(struct net_device *ndev,
static int enetc_get_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
int *phc_idx;
phc_idx = symbol_get(enetc_phc_index);
@@ -852,8 +853,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON) |
- (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ (1 << HWTSTAMP_TX_ON);
+
+ if (enetc_si_is_pf(priv->si))
+ info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 55ba949230ff..4098f01479bc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -25,6 +25,7 @@
#define ENETC_SIPCAPR0 0x20
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR0_RFS BIT(2)
+#define ENETC_SIPCAPR0_LSO BIT(1)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -554,11 +555,23 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
union enetc_tx_bd {
struct {
__le64 addr;
- __le16 buf_len;
+ union {
+ __le16 buf_len;
+ __le16 hdr_len; /* For LSO, ENETC 4.1 and later */
+ };
__le16 frm_len;
union {
struct {
- u8 reserved[3];
+ u8 l3_aux0;
+#define ENETC_TX_BD_L3_START GENMASK(6, 0)
+#define ENETC_TX_BD_IPCS BIT(7)
+ u8 l3_aux1;
+#define ENETC_TX_BD_L3_HDR_LEN GENMASK(6, 0)
+#define ENETC_TX_BD_L3T BIT(7)
+ u8 l4_aux;
+#define ENETC_TX_BD_L4T GENMASK(7, 5)
+#define ENETC_TXBD_L4T_UDP 1
+#define ENETC_TXBD_L4T_TCP 2
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -569,23 +582,27 @@ union enetc_tx_bd {
__le32 tstamp;
__le16 tpid;
__le16 vid;
- u8 reserved[6];
+ __le16 lso_sg_size; /* For ENETC 4.1 and later */
+ __le16 frm_len_ext; /* For ENETC 4.1 and later */
+ u8 reserved[2];
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
struct {
__le32 tstamp;
- u8 reserved[10];
+ u8 reserved[8];
+ __le16 lso_err_count; /* For ENETC 4.1 and later */
u8 status;
u8 flags;
} wb; /* writeback descriptor */
};
enum enetc_txbd_flags {
- ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_LSO = BIT(1), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_W = BIT(2),
- ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TXSTART = BIT(4),
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
@@ -654,6 +671,8 @@ union enetc_rx_bd {
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
+#define ENETC_TPID_8021Q 0
+
struct enetc_cmd_rfse {
u8 smac_h[6];
u8 smac_m[6];
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
index 0eecfc833164..3fd9b0727875 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -101,6 +101,7 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -109,16 +110,24 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
ndev->priv_flags |= IFF_UNICAST_FLT;
+ if (si->drvdata->tx_csum)
+ priv->active_offloads |= ENETC_F_TXCSUM;
+
+ if (si->hw_features & ENETC_SI_F_LSO)
+ priv->active_offloads |= ENETC_F_LSO;
+
/* TODO: currently, i.MX95 ENETC driver does not support advanced features */
if (!is_enetc_rev1(si)) {
ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index a5f8ce576b6e..3768752b6008 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -136,6 +136,7 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -144,11 +145,13 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1cca0425d493..c81f2ea588f2 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -671,8 +671,6 @@ struct fec_enet_private {
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
- /* tx lpi eee mode */
- struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b55047c0237..f7c4ce8e9a26 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -840,6 +840,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len, total_len, data_left;
struct bufdesc *bdp = txq->bd.cur;
+ struct bufdesc *tmp_bdp;
+ struct bufdesc_ex *ebdp;
struct tso_t tso;
unsigned int index = 0;
int ret;
@@ -913,7 +915,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
return 0;
err_release:
- /* TODO: Release all used data descriptors for TSO */
+ /* Release all used data descriptors for TSO */
+ tmp_bdp = txq->bd.cur;
+
+ while (tmp_bdp != bdp) {
+ /* Unmap data buffers */
+ if (tmp_bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(tmp_bdp->cbd_bufaddr),
+ fec16_to_cpu(tmp_bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+
+ /* Clear standard buffer descriptor fields */
+ tmp_bdp->cbd_sc = 0;
+ tmp_bdp->cbd_datlen = 0;
+ tmp_bdp->cbd_bufaddr = 0;
+
+ /* Handle extended descriptor if enabled */
+ if (fep->bufdesc_ex) {
+ ebdp = (struct bufdesc_ex *)tmp_bdp;
+ ebdp->cbd_esc = 0;
+ }
+
+ tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd);
+ }
+
+ dev_kfree_skb_any(skb);
+
return ret;
}
@@ -1591,19 +1620,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
fec_enet_tx_queue(ndev, i, budget);
}
-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
struct bufdesc *bdp, int index)
{
struct page *new_page;
dma_addr_t phys_addr;
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
- WARN_ON(!new_page);
- rxq->rx_skb_info[index].page = new_page;
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ rxq->rx_skb_info[index].page = new_page;
rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+
+ return 0;
}
static u32
@@ -1698,6 +1730,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
+ __fec32 cbd_bufaddr;
u32 sub_len = 4;
#if !defined(CONFIG_M5272)
@@ -1766,12 +1799,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_skb_info[index].page;
+ cbd_bufaddr = bdp->cbd_bufaddr;
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
+ fec32_to_cpu(cbd_bufaddr),
pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- fec_enet_update_cbd(rxq, bdp, index);
if (xdp_prog) {
xdp_buff_clear_frags_flag(&xdp);
@@ -2045,14 +2083,14 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
return us * (fep->clk_ref_rate / 1000) / 1000;
}
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer,
+ bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
unsigned int sleep_cycle, wake_cycle;
if (enable) {
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer);
wake_cycle = sleep_cycle;
} else {
sleep_cycle = 0;
@@ -2105,7 +2143,9 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_enable(&fep->napi);
}
if (fep->quirks & FEC_QUIRK_HAS_EEE)
- fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
+ fec_enet_eee_mode_set(ndev,
+ phy_dev->eee_cfg.tx_lpi_timer,
+ phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
netif_stop_queue(ndev);
@@ -3181,7 +3221,6 @@ static int
fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3189,8 +3228,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->tx_lpi_timer = p->tx_lpi_timer;
-
return phy_ethtool_get_eee(ndev->phydev, edata);
}
@@ -3198,7 +3235,6 @@ static int
fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3206,8 +3242,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- p->tx_lpi_timer = edata->tx_lpi_timer;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7f6b57432071..fe4e7f99b6a3 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -739,8 +739,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
- fep->perout_timer.function = fec_ptp_pps_perout_handler;
+ hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index fb416d60dcd7..11887458f050 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2690,13 +2690,12 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
{
struct fman *fman;
struct device_node *fm_node, *muram_node;
+ void __iomem *base_addr;
struct resource *res;
u32 val, range[2];
int err, irq;
struct clk *clk;
u32 clk_rate;
- phys_addr_t phys_base_addr;
- resource_size_t mem_size;
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
@@ -2724,18 +2723,6 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_node_put;
fman->dts_params.err_irq = err;
- /* Get the FM address */
- res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
- __func__);
- goto fman_node_put;
- }
-
- phys_base_addr = res->start;
- mem_size = resource_size(res);
-
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
@@ -2803,24 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
}
}
- fman->dts_params.res =
- devm_request_mem_region(&of_dev->dev, phys_base_addr,
- mem_size, "fman");
- if (!fman->dts_params.res) {
- err = -EBUSY;
- dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
- __func__);
- goto fman_free;
- }
-
- fman->dts_params.base_addr =
- devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (!fman->dts_params.base_addr) {
- err = -ENOMEM;
+ base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res);
+ if (IS_ERR(base_addr)) {
+ err = PTR_ERR(base_addr);
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
+ fman->dts_params.base_addr = base_addr;
+ fman->dts_params.res = res;
+
fman->dev = &of_dev->dev;
err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 85617bb94959..b3e2a596ad2c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -755,12 +755,12 @@ static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
return container_of(pcs, struct fman_mac, pcs);
}
-static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
+static void dtsec_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
+ phylink_mii_c22_pcs_get_state(dtsec->tbidev, neg_mode, state);
}
static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 6663c1768089..88510f822759 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -34,6 +34,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
@@ -132,7 +133,6 @@ static const struct ucc_geth_info ugeth_primary_info = {
.transmitFlowControl = 1,
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
- .prel = 7,
.maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
.minFrameLength = 64,
.maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
@@ -1205,34 +1205,6 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
return 0;
}
-static int init_check_frame_length_mode(int length_check,
- u32 __iomem *maccfg2_register)
-{
- u32 value = 0;
-
- value = in_be32(maccfg2_register);
-
- if (length_check)
- value |= MACCFG2_LC;
- else
- value &= ~MACCFG2_LC;
-
- out_be32(maccfg2_register, value);
- return 0;
-}
-
-static int init_preamble_length(u8 preamble_length,
- u32 __iomem *maccfg2_register)
-{
- if ((preamble_length < 3) || (preamble_length > 7))
- return -EINVAL;
-
- clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
- preamble_length << MACCFG2_PREL_SHIFT);
-
- return 0;
-}
-
static int init_rx_parameters(int reject_broadcast,
int receive_short_frames,
int promiscuous, u32 __iomem *upsmr_register)
@@ -1287,94 +1259,11 @@ static int init_min_frame_len(u16 min_frame_length,
return 0;
}
-static int adjust_enet_interface(struct ucc_geth_private *ugeth)
+static bool phy_interface_mode_is_reduced(phy_interface_t interface)
{
- struct ucc_geth_info *ug_info;
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- int ret_val;
- u32 upsmr, maccfg2;
- u16 value;
-
- ugeth_vdbg("%s: IN", __func__);
-
- ug_info = ugeth->ug_info;
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- /* Set MACCFG2 */
- maccfg2 = in_be32(&ug_regs->maccfg2);
- maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
- if ((ugeth->max_speed == SPEED_10) ||
- (ugeth->max_speed == SPEED_100))
- maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
- else if (ugeth->max_speed == SPEED_1000)
- maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
- maccfg2 |= ug_info->padAndCrc;
- out_be32(&ug_regs->maccfg2, maccfg2);
-
- /* Set UPSMR */
- upsmr = in_be32(&uf_regs->upsmr);
- upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
- UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
- upsmr |= UCC_GETH_UPSMR_RPM;
- switch (ugeth->max_speed) {
- case SPEED_10:
- upsmr |= UCC_GETH_UPSMR_R10M;
- fallthrough;
- case SPEED_100:
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
- upsmr |= UCC_GETH_UPSMR_RMM;
- }
- }
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- upsmr |= UCC_GETH_UPSMR_TBIM;
- }
- if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
- upsmr |= UCC_GETH_UPSMR_SGMM;
-
- out_be32(&uf_regs->upsmr, upsmr);
-
- /* Disable autonegotiation in tbi mode, because by default it
- comes up in autonegotiation mode. */
- /* Note that this depends on proper setting in utbipar register. */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
- struct phy_device *tbiphy;
-
- if (!ug_info->tbi_node)
- pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
-
- tbiphy = of_phy_find_device(ug_info->tbi_node);
- if (!tbiphy)
- pr_warn("Could not get TBI device\n");
-
- value = phy_read(tbiphy, ENET_TBI_MII_CR);
- value &= ~0x1000; /* Turn off autonegotiation */
- phy_write(tbiphy, ENET_TBI_MII_CR, value);
-
- put_device(&tbiphy->mdio.dev);
- }
-
- init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
-
- ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
- if (ret_val != 0) {
- if (netif_msg_probe(ugeth))
- pr_err("Preamble length must be between 3 and 7 inclusive\n");
- return ret_val;
- }
-
- return 0;
+ return phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_RTBI;
}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
@@ -1545,108 +1434,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
/* allow to xmit again */
netif_tx_wake_all_queues(ugeth->ndev);
- __netdev_watchdog_up(ugeth->ndev);
-}
-
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the ugeth structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-
-static void adjust_link(struct net_device *dev)
-{
- struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- struct phy_device *phydev = ugeth->phydev;
- int new_state = 0;
-
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- if (phydev->link) {
- u32 tempval = in_be32(&ug_regs->maccfg2);
- u32 upsmr = in_be32(&uf_regs->upsmr);
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
- if (phydev->duplex != ugeth->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- tempval &= ~(MACCFG2_FDX);
- else
- tempval |= MACCFG2_FDX;
- ugeth->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != ugeth->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case SPEED_1000:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_BYTE);
- break;
- case SPEED_100:
- case SPEED_10:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_NIBBLE);
- /* if reduced mode, re-set UPSMR.R10M */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (phydev->speed == SPEED_10)
- upsmr |= UCC_GETH_UPSMR_R10M;
- else
- upsmr &= ~UCC_GETH_UPSMR_R10M;
- }
- break;
- default:
- if (netif_msg_link(ugeth))
- pr_warn(
- "%s: Ack! Speed (%d) is not 10/100/1000!",
- dev->name, phydev->speed);
- break;
- }
- ugeth->oldspeed = phydev->speed;
- }
-
- if (!ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 1;
- }
-
- if (new_state) {
- /*
- * To change the MAC configuration we need to disable
- * the controller. To do so, we have to either grab
- * ugeth->lock, which is a bad idea since 'graceful
- * stop' commands might take quite a while, or we can
- * quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
- }
- } else if (ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(ugeth))
- phy_print_status(phydev);
+ netdev_watchdog_up(ugeth->ndev);
}
/* Initialize TBI PHY interface for communicating with the
@@ -1664,8 +1452,7 @@ static void uec_configure_serdes(struct net_device *dev)
struct phy_device *tbiphy;
if (!ug_info->tbi_node) {
- dev_warn(&dev->dev, "SGMII mode requires that the device "
- "tree specify a tbi-handle\n");
+ dev_warn(&dev->dev, "SGMII mode requires that the device tree specify a tbi-handle\n");
return;
}
@@ -1696,34 +1483,145 @@ static void uec_configure_serdes(struct net_device *dev)
put_device(&tbiphy->mdio.dev);
}
-/* Configure the PHY for dev.
- * returns 0 if success. -1 if failure
- */
-static int init_phy(struct net_device *dev)
+static void ugeth_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct ucc_geth_private *priv = netdev_priv(dev);
- struct ucc_geth_info *ug_info = priv->ug_info;
- struct phy_device *phydev;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
+ struct ucc_fast __iomem *uf_regs = ugeth->uccf->uf_regs;
+ u32 old_maccfg2, maccfg2 = in_be32(&ug_regs->maccfg2);
+ u32 old_upsmr, upsmr = in_be32(&uf_regs->upsmr);
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
+ old_maccfg2 = maccfg2;
+ old_upsmr = upsmr;
- phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
- priv->phy_interface);
- if (!phydev) {
- dev_err(&dev->dev, "Could not attach to PHY\n");
- return -ENODEV;
+ /* No length check */
+ maccfg2 &= ~MACCFG2_LC;
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+ UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
+
+ if (speed == SPEED_10 || speed == SPEED_100)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (speed == SPEED_1000)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+
+ maccfg2 |= ug_info->padAndCrc;
+
+ if (phy_interface_mode_is_reduced(interface)) {
+
+ if (interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
+
+ switch (speed) {
+ case SPEED_10:
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ fallthrough;
+ case SPEED_100:
+ if (interface != PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_RMM;
+ }
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
- uec_configure_serdes(dev);
+ if (interface == PHY_INTERFACE_MODE_TBI ||
+ interface == PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_TBIM;
- phy_set_max_speed(phydev, priv->max_speed);
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ upsmr |= UCC_GETH_UPSMR_SGMM;
- priv->phydev = phydev;
+ if (duplex == DUPLEX_HALF)
+ maccfg2 &= ~(MACCFG2_FDX);
+ else
+ maccfg2 |= MACCFG2_FDX;
- return 0;
+ if (maccfg2 != old_maccfg2 || upsmr != old_upsmr) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, maccfg2);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
+
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(ndev);
+
+ if (!phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 0;
+ ug_info->receiveFlowControl = rx_pause;
+ ug_info->transmitFlowControl = tx_pause;
+
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ u16 value;
+
+ if (state->interface == PHY_INTERFACE_MODE_TBI ||
+ state->interface == PHY_INTERFACE_MODE_RTBI) {
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ pr_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+ put_device(&tbiphy->mdio.dev);
+ }
+
+ if (phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 1;
+
+ init_flow_control_params(ug_info->aufc, 1, 1,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
}
static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
@@ -1995,7 +1893,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
- struct phy_device *phydev = ugeth->phydev;
ugeth_vdbg("%s: IN", __func__);
@@ -2004,7 +1901,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
* Must be done before disabling the controller
* or deadlock may happen.
*/
- phy_stop(phydev);
+ phylink_stop(ugeth->phylink);
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -3246,12 +3143,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
goto err;
}
- err = adjust_enet_interface(ugeth);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
- goto err;
- }
-
/* Set MACSTNADDR1, MACSTNADDR2 */
/* For more details see the hardware spec. */
init_mac_station_addr_regs(dev->dev_addr[0],
@@ -3263,12 +3154,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
- err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
- goto err;
- }
-
return 0;
err:
ucc_geth_stop(ugeth);
@@ -3291,10 +3176,10 @@ static int ucc_geth_open(struct net_device *dev)
return -EINVAL;
}
- err = init_phy(dev);
+ err = phylink_of_phy_connect(ugeth->phylink, ugeth->dev->of_node, 0);
if (err) {
- netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
- return err;
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
}
err = ucc_geth_init_mac(ugeth);
@@ -3310,13 +3195,13 @@ static int ucc_geth_open(struct net_device *dev)
goto err;
}
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
napi_enable(&ugeth->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
- qe_alive_during_sleep() || ugeth->phydev->irq);
+ qe_alive_during_sleep() || dev->phydev->irq);
device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
return err;
@@ -3337,8 +3222,7 @@ static int ucc_geth_close(struct net_device *dev)
cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
+ phylink_disconnect_phy(ugeth->phylink);
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3372,7 +3256,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
ucc_geth_stop(ugeth);
ucc_geth_init_mac(ugeth);
/* Must start PHY here */
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
netif_tx_start_all_queues(dev);
}
@@ -3397,6 +3281,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ bool mac_wol = false;
if (!netif_running(ndev))
return 0;
@@ -3410,14 +3295,17 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
*/
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- if (ugeth->wol_en & WAKE_MAGIC) {
+ if (ugeth->wol_en & WAKE_MAGIC && !ugeth->phy_wol_en) {
setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
- } else if (!(ugeth->wol_en & WAKE_PHY)) {
- phy_stop(ugeth->phydev);
+ mac_wol = true;
}
+ rtnl_lock();
+ phylink_suspend(ugeth->phylink, mac_wol);
+ rtnl_unlock();
+
return 0;
}
@@ -3451,12 +3339,9 @@ static int ucc_geth_resume(struct platform_device *ofdev)
}
}
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
-
- phy_stop(ugeth->phydev);
- phy_start(ugeth->phydev);
+ rtnl_lock();
+ phylink_resume(ugeth->phylink);
+ rtnl_unlock();
napi_enable(&ugeth->napi);
netif_device_attach(ndev);
@@ -3469,32 +3354,6 @@ static int ucc_geth_resume(struct platform_device *ofdev)
#define ucc_geth_resume NULL
#endif
-static phy_interface_t to_phy_interface(const char *phy_connection_type)
-{
- if (strcasecmp(phy_connection_type, "mii") == 0)
- return PHY_INTERFACE_MODE_MII;
- if (strcasecmp(phy_connection_type, "gmii") == 0)
- return PHY_INTERFACE_MODE_GMII;
- if (strcasecmp(phy_connection_type, "tbi") == 0)
- return PHY_INTERFACE_MODE_TBI;
- if (strcasecmp(phy_connection_type, "rmii") == 0)
- return PHY_INTERFACE_MODE_RMII;
- if (strcasecmp(phy_connection_type, "rgmii") == 0)
- return PHY_INTERFACE_MODE_RGMII;
- if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
- return PHY_INTERFACE_MODE_RGMII_ID;
- if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
- return PHY_INTERFACE_MODE_RGMII_TXID;
- if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
- return PHY_INTERFACE_MODE_RGMII_RXID;
- if (strcasecmp(phy_connection_type, "rtbi") == 0)
- return PHY_INTERFACE_MODE_RTBI;
- if (strcasecmp(phy_connection_type, "sgmii") == 0)
- return PHY_INTERFACE_MODE_SGMII;
-
- return PHY_INTERFACE_MODE_MII;
-}
-
static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
@@ -3502,10 +3361,7 @@ static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (!ugeth->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(ugeth->phydev, rq, cmd);
+ return phylink_mii_ioctl(ugeth->phylink, rq, cmd);
}
static const struct net_device_ops ucc_geth_netdev_ops = {
@@ -3513,7 +3369,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
@@ -3553,6 +3408,12 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which,
return 0;
}
+struct phylink_mac_ops ugeth_mac_ops = {
+ .mac_link_up = ugeth_mac_link_up,
+ .mac_link_down = ugeth_mac_link_down,
+ .mac_config = ugeth_mac_config,
+};
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3560,23 +3421,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
+ struct device_node *phy_node;
+ struct phylink *phylink;
struct resource res;
- int err, ucc_num, max_speed = 0;
+ int err, ucc_num;
const unsigned int *prop;
phy_interface_t phy_interface;
- static const int enet_to_speed[] = {
- SPEED_10, SPEED_10, SPEED_10,
- SPEED_100, SPEED_100, SPEED_100,
- SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
- };
- static const phy_interface_t enet_to_phy_interface[] = {
- PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
- PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
- PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
- PHY_INTERFACE_MODE_SGMII,
- };
ugeth_vdbg("%s: IN", __func__);
@@ -3612,57 +3462,35 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
- ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!ug_info->phy_node && of_phy_is_fixed_link(np)) {
- /*
- * In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- err = of_phy_register_fixed_link(np);
- if (err)
- return err;
- ug_info->phy_node = of_node_get(np);
- }
-
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
- /* get the phy interface type, or default to MII */
- prop = of_get_property(np, "phy-connection-type", NULL);
- if (!prop) {
- /* handle interface property present in old trees */
- prop = of_get_property(ug_info->phy_node, "interface", NULL);
- if (prop != NULL) {
- phy_interface = enet_to_phy_interface[*prop];
- max_speed = enet_to_speed[*prop];
- } else
- phy_interface = PHY_INTERFACE_MODE_MII;
- } else {
- phy_interface = to_phy_interface((const char *)prop);
- }
-
- /* get speed, or derive from PHY interface */
- if (max_speed == 0)
- switch (phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_TBI:
- case PHY_INTERFACE_MODE_RTBI:
- case PHY_INTERFACE_MODE_SGMII:
- max_speed = SPEED_1000;
- break;
- default:
- max_speed = SPEED_100;
- break;
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ prop = of_get_property(phy_node, "interface", NULL);
+ if (prop) {
+ dev_err(&ofdev->dev,
+ "Device-tree property 'interface' is no longer supported. Please use 'phy-connection-type' instead.");
+ of_node_put(phy_node);
+ err = -EINVAL;
+ goto err_put_tbi;
}
+ of_node_put(phy_node);
+ }
+
+ err = of_get_phy_mode(np, &phy_interface);
+ if (err) {
+ dev_err(&ofdev->dev, "Invalid phy-connection-type");
+ goto err_put_tbi;
+ }
- if (max_speed == SPEED_1000) {
+ if (phy_interface == PHY_INTERFACE_MODE_GMII ||
+ phy_interface_mode_is_rgmii(phy_interface) ||
+ phy_interface == PHY_INTERFACE_MODE_TBI ||
+ phy_interface == PHY_INTERFACE_MODE_RTBI ||
+ phy_interface == PHY_INTERFACE_MODE_SGMII) {
unsigned int snums = qe_get_num_of_snums();
- /* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
@@ -3691,7 +3519,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth));
if (!dev) {
err = -ENOMEM;
- goto err_deregister_fixed_link;
+ goto err_put_tbi;
}
ugeth = netdev_priv(dev);
@@ -3718,23 +3546,50 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
- ugeth->phy_interface = phy_interface;
- ugeth->max_speed = max_speed;
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
+ ugeth->phylink_config.dev = &dev->dev;
+ ugeth->phylink_config.type = PHYLINK_NETDEV;
+
+ ugeth->phylink_config.mac_capabilities =
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ugeth->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(ugeth->phylink_config.supported_interfaces);
+
+ if (ug_info->tbi_node) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TBI,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RTBI,
+ ugeth->phylink_config.supported_interfaces);
+ }
+
+ phylink = phylink_create(&ugeth->phylink_config, dev_fwnode(&dev->dev),
+ phy_interface, &ugeth_mac_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_put_tbi;
+ }
+
+ ugeth->phylink = phylink;
err = devm_register_netdev(&ofdev->dev, dev);
if (err) {
if (netif_msg_probe(ugeth))
pr_err("%s: Cannot register net device, aborting\n",
dev->name);
- goto err_deregister_fixed_link;
+ goto err_destroy_phylink;
}
err = of_get_ethdev_address(np, dev);
if (err == -EPROBE_DEFER)
- goto err_deregister_fixed_link;
+ goto err_destroy_phylink;
ugeth->ug_info = ug_info;
ugeth->dev = device;
@@ -3743,11 +3598,11 @@ static int ucc_geth_probe(struct platform_device* ofdev)
return 0;
-err_deregister_fixed_link:
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+err_destroy_phylink:
+ phylink_destroy(phylink);
+err_put_tbi:
of_node_put(ug_info->tbi_node);
- of_node_put(ug_info->phy_node);
+
return err;
}
@@ -3755,13 +3610,10 @@ static void ucc_geth_remove(struct platform_device* ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
ucc_geth_memclean(ugeth);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+ phylink_destroy(ugeth->phylink);
of_node_put(ugeth->ug_info->tbi_node);
- of_node_put(ugeth->ug_info->phy_node);
}
static const struct of_device_id ucc_geth_match[] = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 4294ed096ebb..38789faae706 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/phylink.h>
#include <linux/if_ether.h>
#include <soc/fsl/qe/immap_qe.h>
@@ -921,7 +922,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
#define UCC_GETH_MACCFG1_INIT 0
-#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1 | \
+ (7 << MACCFG2_PREL_SHIFT))
/* Ethernet Address Type. */
enum enet_addr_type {
@@ -1073,6 +1075,9 @@ struct ucc_geth_tad_params {
u16 vid;
};
+struct phylink;
+struct phylink_config;
+
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
@@ -1088,7 +1093,6 @@ struct ucc_geth_info {
u8 miminumInterFrameGapEnforcement;
u8 backToBackInterFrameGap;
int ipAddressAlignment;
- int lengthCheckRx;
u32 mblinterval;
u16 nortsrbytetime;
u8 fracsiz;
@@ -1114,7 +1118,6 @@ struct ucc_geth_info {
int transmitFlowControl;
u8 maxGroupAddrInHash;
u8 maxIndAddrInHash;
- u8 prel;
u16 maxFrameLength;
u16 minFrameLength;
u16 maxD1Length;
@@ -1125,7 +1128,6 @@ struct ucc_geth_info {
u32 eventRegMask;
u16 pausePeriod;
u16 extensionField;
- struct device_node *phy_node;
struct device_node *tbi_node;
u8 weightfactor[NUM_TX_QUEUES];
u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
@@ -1210,14 +1212,12 @@ struct ucc_geth_private {
u16 skb_dirtytx[NUM_TX_QUEUES];
struct ugeth_mii_info *mii_info;
- struct phy_device *phydev;
- phy_interface_t phy_interface;
- int max_speed;
uint32_t msg_enable;
- int oldspeed;
- int oldduplex;
- int oldlink;
- int wol_en;
+ u32 wol_en;
+ u32 phy_wol_en;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct device_node *node;
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 699f346faf5c..1fb49e5a414a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -104,14 +104,8 @@ static int
uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- phy_ethtool_ksettings_get(phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(ugeth->phylink, cmd);
}
static int
@@ -119,12 +113,8 @@ uec_set_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_ksettings_set(phydev, cmd);
+ return phylink_ethtool_ksettings_set(ugeth->phylink, cmd);
}
static void
@@ -133,12 +123,7 @@ uec_get_pauseparam(struct net_device *netdev,
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- pause->autoneg = ugeth->phydev->autoneg;
-
- if (ugeth->ug_info->receiveFlowControl)
- pause->rx_pause = 1;
- if (ugeth->ug_info->transmitFlowControl)
- pause->tx_pause = 1;
+ return phylink_ethtool_get_pauseparam(ugeth->phylink, pause);
}
static int
@@ -146,30 +131,11 @@ uec_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- int ret = 0;
ugeth->ug_info->receiveFlowControl = pause->rx_pause;
ugeth->ug_info->transmitFlowControl = pause->tx_pause;
- if (ugeth->phydev->autoneg) {
- if (netif_running(netdev)) {
- /* FIXME: automatically restart */
- netdev_info(netdev, "Please re-open the interface\n");
- }
- } else {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
-
- ret = init_flow_control_params(ug_info->aufc,
- ug_info->receiveFlowControl,
- ug_info->transmitFlowControl,
- ug_info->pausePeriod,
- ug_info->extensionField,
- &ugeth->uccf->uf_regs->upsmr,
- &ugeth->ug_regs->uempr,
- &ugeth->ug_regs->maccfg1);
- }
-
- return ret;
+ return phylink_ethtool_set_pauseparam(ugeth->phylink, pause);
}
static uint32_t
@@ -343,28 +309,42 @@ uec_get_drvinfo(struct net_device *netdev,
static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (phydev && phydev->irq)
- wol->supported |= WAKE_PHY;
+ phylink_ethtool_get_wol(ugeth->phylink, wol);
+
if (qe_alive_during_sleep())
wol->supported |= WAKE_MAGIC;
- wol->wolopts = ugeth->wol_en;
+ wol->wolopts |= ugeth->wol_en;
}
static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
+ int ret = 0;
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
- else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
+ ret = phylink_ethtool_set_wol(ugeth->phylink, wol);
+ if (ret == -EOPNOTSUPP) {
+ ugeth->phy_wol_en = 0;
+ } else if (ret) {
+ return ret;
+ } else {
+ ugeth->phy_wol_en = wol->wolopts;
+ goto out;
+ }
+
+ /* If the PHY isn't handling the WoL and the MAC is asked to more than
+ * WAKE_MAGIC, error-out
+ */
+ if (!ugeth->phy_wol_en &&
+ wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
+
+ if (wol->wolopts & WAKE_MAGIC &&
+ !qe_alive_during_sleep())
return -EINVAL;
+out:
ugeth->wol_en = wol->wolopts;
device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 8167cc5fb0df..78d2a19593d1 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1116,6 +1116,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
return gve_xdp_tx_queue_id(priv, 0);
}
+static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
+{
+ switch (priv->queue_format) {
+ case GVE_GQI_QPL_FORMAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* gqi napi handler defined in gve_main.c */
int gve_napi_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 060e0e674938..aa7d723011d0 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -1128,20 +1128,6 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
return gve_adminq_execute_cmd(priv, &cmd);
}
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
-{
- union gve_adminq_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
- cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
- .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
- .parameter_value = cpu_to_be64(mtu),
- };
-
- return gve_adminq_execute_cmd(priv, &cmd);
-}
-
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval)
{
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 863683de9694..228217458275 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -612,7 +612,6 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval);
int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 533e659b15b3..92237fb0b60c 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1903,6 +1903,8 @@ static void gve_turndown(struct gve_priv *priv)
/* Stop tx queues */
netif_tx_disable(priv->dev);
+ xdp_features_clear_redirect_target(priv->dev);
+
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
@@ -1972,6 +1974,9 @@ static void gve_turnup(struct gve_priv *priv)
napi_schedule(&block->napi);
}
+ if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
+ xdp_features_set_redirect_target(priv->dev, false);
+
gve_set_napi_enabled(priv);
}
@@ -2246,7 +2251,6 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
xdp_features = NETDEV_XDP_ACT_BASIC;
xdp_features |= NETDEV_XDP_ACT_REDIRECT;
- xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
xdp_features = 0;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8ac0047f1ada..f0674a443567 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -109,10 +109,12 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_rx_ring *rx = &priv->rx[idx];
if (!gve_rx_was_added_to_block(priv, idx))
return;
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index f879426cb552..394debc62268 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -1146,8 +1146,7 @@ static void gve_handle_miss_completion(struct gve_priv *priv,
/* jiffies can wraparound but time comparisons can handle overflows. */
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT);
add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
*bytes += pending_packet->skb->len;
@@ -1191,8 +1190,7 @@ static void remove_miss_completions(struct gve_priv *priv,
pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT);
/* Maintain pending packet in another list so the packet can be
* unallocated at a later time.
*/
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index ae58ac38c206..7ea15f9ef849 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -5,4 +5,5 @@
obj-$(CONFIG_HIBMCGE) += hibmcge.o
-hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o
+hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
+ hbg_debugfs.o hbg_err.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 96daf058d387..b4300d8ea4ad 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -4,6 +4,7 @@
#ifndef __HBG_COMMON_H
#define __HBG_COMMON_H
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "hbg_reg.h"
@@ -33,6 +34,14 @@ enum hbg_tx_state {
enum hbg_nic_state {
HBG_NIC_STATE_EVENT_HANDLING = 0,
+ HBG_NIC_STATE_RESETTING,
+ HBG_NIC_STATE_RESET_FAIL,
+};
+
+enum hbg_reset_type {
+ HBG_RESET_TYPE_NONE = 0,
+ HBG_RESET_TYPE_FLR,
+ HBG_RESET_TYPE_FUNCTION,
};
struct hbg_buffer {
@@ -84,6 +93,7 @@ struct hbg_dev_specs {
u32 vlan_layers;
u32 max_mtu;
u32 min_mtu;
+ u32 uc_mac_num;
u32 max_frame_len;
u32 rx_buf_size;
@@ -114,6 +124,22 @@ struct hbg_mac {
u32 duplex;
u32 autoneg;
u32 link_status;
+ u32 pause_autoneg;
+};
+
+struct hbg_mac_table_entry {
+ u8 addr[ETH_ALEN];
+};
+
+struct hbg_mac_filter {
+ struct hbg_mac_table_entry *mac_table;
+ u32 table_max_len;
+ bool enabled;
+};
+
+/* saved for restore after rest */
+struct hbg_user_def {
+ struct ethtool_pauseparam pause_param;
};
struct hbg_priv {
@@ -126,6 +152,9 @@ struct hbg_priv {
struct hbg_vector vectors;
struct hbg_ring tx_ring;
struct hbg_ring rx_ring;
+ struct hbg_mac_filter filter;
+ enum hbg_reset_type reset_type;
+ struct hbg_user_def user_def;
};
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
new file mode 100644
index 000000000000..8473c43d171a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+#include "hbg_common.h"
+#include "hbg_debugfs.h"
+#include "hbg_hw.h"
+#include "hbg_irq.h"
+#include "hbg_txrx.h"
+
+static struct dentry *hbg_dbgfs_root;
+
+struct hbg_dbg_info {
+ const char *name;
+ int (*read)(struct seq_file *seq, void *data);
+};
+
+#define state_str_true_false(p, s) str_true_false(test_bit(s, &(p)->state))
+
+static void hbg_dbg_ring(struct hbg_priv *priv, struct hbg_ring *ring,
+ struct seq_file *s)
+{
+ u32 irq_mask = ring->dir == HBG_DIR_TX ? HBG_INT_MSK_TX_B :
+ HBG_INT_MSK_RX_B;
+
+ seq_printf(s, "ring used num: %u\n",
+ hbg_get_queue_used_num(ring));
+ seq_printf(s, "ring max num: %u\n", ring->len);
+ seq_printf(s, "ring head: %u, tail: %u\n", ring->head, ring->tail);
+ seq_printf(s, "fifo used num: %u\n",
+ hbg_hw_get_fifo_used_num(priv, ring->dir));
+ seq_printf(s, "fifo max num: %u\n",
+ hbg_get_spec_fifo_max_num(priv, ring->dir));
+ seq_printf(s, "irq enabled: %s\n",
+ str_true_false(hbg_hw_irq_is_enabled(priv, irq_mask)));
+}
+
+static int hbg_dbg_tx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->tx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_rx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->rx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_irq_info *info;
+ u32 i;
+
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ seq_printf(s,
+ "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n",
+ info->name,
+ str_true_false(hbg_hw_irq_is_enabled(priv,
+ info->mask)),
+ str_true_false(info->need_print),
+ info->count);
+ }
+
+ return 0;
+}
+
+static int hbg_dbg_mac_table(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_mac_filter *filter;
+ u32 i;
+
+ filter = &priv->filter;
+ seq_printf(s, "mac addr max count: %u\n", filter->table_max_len);
+ seq_printf(s, "filter enabled: %s\n", str_true_false(filter->enabled));
+
+ for (i = 0; i < filter->table_max_len; i++) {
+ if (is_zero_ether_addr(filter->mac_table[i].addr))
+ continue;
+
+ seq_printf(s, "[%u] %pM\n", i, filter->mac_table[i].addr);
+ }
+
+ return 0;
+}
+
+static const char * const reset_type_str[] = {"None", "FLR", "Function"};
+
+static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ seq_printf(s, "event handling state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING));
+ seq_printf(s, "resetting state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESETTING));
+ seq_printf(s, "reset fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
+ seq_printf(s, "last reset type: %s\n",
+ reset_type_str[priv->reset_type]);
+
+ return 0;
+}
+
+static const struct hbg_dbg_info hbg_dbg_infos[] = {
+ { "tx_ring", hbg_dbg_tx_ring },
+ { "rx_ring", hbg_dbg_rx_ring },
+ { "irq_info", hbg_dbg_irq_info },
+ { "mac_table", hbg_dbg_mac_table },
+ { "nic_state", hbg_dbg_nic_state },
+};
+
+static void hbg_debugfs_uninit(void *data)
+{
+ debugfs_remove_recursive((struct dentry *)data);
+}
+
+void hbg_debugfs_init(struct hbg_priv *priv)
+{
+ const char *name = pci_name(priv->pdev);
+ struct device *dev = &priv->pdev->dev;
+ struct dentry *root;
+ u32 i;
+
+ root = debugfs_create_dir(name, hbg_dbgfs_root);
+
+ for (i = 0; i < ARRAY_SIZE(hbg_dbg_infos); i++)
+ debugfs_create_devm_seqfile(dev, hbg_dbg_infos[i].name,
+ root, hbg_dbg_infos[i].read);
+
+ /* Ignore the failure because debugfs is not a key feature. */
+ devm_add_action_or_reset(dev, hbg_debugfs_uninit, root);
+}
+
+void hbg_debugfs_register(void)
+{
+ hbg_dbgfs_root = debugfs_create_dir("hibmcge", NULL);
+}
+
+void hbg_debugfs_unregister(void)
+{
+ debugfs_remove_recursive(hbg_dbgfs_root);
+ hbg_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
new file mode 100644
index 000000000000..80670d66bbeb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_DEBUGFS_H
+#define __HBG_DEBUGFS_H
+
+void hbg_debugfs_register(void);
+void hbg_debugfs_unregister(void);
+
+void hbg_debugfs_init(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
new file mode 100644
index 000000000000..4d1f4a33391a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
+#include "hbg_hw.h"
+
+static void hbg_restore_mac_table(struct hbg_priv *priv)
+{
+ struct hbg_mac_filter *filter = &priv->filter;
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < filter->table_max_len; i++)
+ if (!is_zero_ether_addr(filter->mac_table[i].addr)) {
+ addr = ether_addr_to_u64(filter->mac_table[i].addr);
+ hbg_hw_set_uc_addr(priv, addr, i);
+ }
+
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_restore_user_def_settings(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ hbg_restore_mac_table(priv);
+ hbg_hw_set_mtu(priv, priv->netdev->mtu);
+ hbg_hw_set_pause_enable(priv, pause_param->tx_pause,
+ pause_param->rx_pause);
+}
+
+int hbg_rebuild(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_restore_user_def_settings(priv);
+ return 0;
+}
+
+static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (netif_running(priv->netdev)) {
+ dev_warn(&priv->pdev->dev,
+ "failed to reset because port is up\n");
+ return -EBUSY;
+ }
+
+ priv->reset_type = type;
+ set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
+ if (ret) {
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ }
+
+ return ret;
+}
+
+static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (!test_bit(HBG_NIC_STATE_RESETTING, &priv->state) ||
+ type != priv->reset_type)
+ return 0;
+
+ ASSERT_RTNL();
+
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ ret = hbg_rebuild(priv);
+ if (ret) {
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
+ return ret;
+ }
+
+ dev_info(&priv->pdev->dev, "reset done\n");
+ return ret;
+}
+
+/* must be protected by rtnl lock */
+int hbg_reset(struct hbg_priv *priv)
+{
+ int ret;
+
+ ASSERT_RTNL();
+ ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
+ if (ret)
+ return ret;
+
+ return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
+}
+
+static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ rtnl_lock();
+ hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
+}
+
+static void hbg_pci_err_reset_done(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers hbg_pci_err_handler = {
+ .reset_prepare = hbg_pci_err_reset_prepare,
+ .reset_done = hbg_pci_err_reset_done,
+};
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv)
+{
+ pdrv->err_handler = &hbg_pci_err_handler;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
new file mode 100644
index 000000000000..d7828e446308
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_ERR_H
+#define __HBG_ERR_H
+
+#include <linux/pci.h>
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv);
+int hbg_reset(struct hbg_priv *priv);
+int hbg_rebuild(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index c3370114aef3..00364a438ec2 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -3,12 +3,193 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+
+enum hbg_reg_dump_type {
+ HBG_DUMP_REG_TYPE_SPEC = 0,
+ HBG_DUMP_REG_TYPE_MDIO,
+ HBG_DUMP_REG_TYPE_GMAC,
+ HBG_DUMP_REG_TYPE_PCU,
+};
+
+struct hbg_reg_info {
+ u32 type;
+ u32 offset;
+ u32 val;
+};
+
+#define HBG_DUMP_SPEC_I(offset) {HBG_DUMP_REG_TYPE_SPEC, offset, 0}
+#define HBG_DUMP_MDIO_I(offset) {HBG_DUMP_REG_TYPE_MDIO, offset, 0}
+#define HBG_DUMP_GMAC_I(offset) {HBG_DUMP_REG_TYPE_GMAC, offset, 0}
+#define HBG_DUMP_PCU_I(offset) {HBG_DUMP_REG_TYPE_PCU, offset, 0}
+
+static const struct hbg_reg_info hbg_dump_reg_infos[] = {
+ /* dev specs */
+ HBG_DUMP_SPEC_I(HBG_REG_SPEC_VALID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_EVENT_REQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_PHY_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_HIGH_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_UC_MAC_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MDIO_FREQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAX_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MIN_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_TX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_RX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_VLAN_LAYERS_ADDR),
+
+ /* mdio */
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_COMMAND_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_ADDR_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_WDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_RDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_STA_ADDR),
+
+ /* gmac */
+ HBG_DUMP_GMAC_I(HBG_REG_DUPLEX_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FC_TX_TIMER_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_LOW_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_HIGH_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MAX_FRAME_SIZE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_MODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PAUSE_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_AN_NEG_STATE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_TRANSMIT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_REC_FILT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LINE_LOOP_BACK_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_CF_CRC_STRIP_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MODE_CHANGE_EN_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LOOP_REG_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_RECV_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_VLAN_CODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_5_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_5_ADDR),
+
+ /* pcu */
+ HBG_DUMP_PCU_I(HBG_REG_TX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CFG_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_TX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_MAX_FRAME_LEN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DEBUG_ST_MCH_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_CURR_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_HIST_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_CFF_DATA_NUM_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_TX_PAUSE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CFF_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUF_SIZE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_PKT_MODE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST0_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST1_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST2_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_RST_EN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_CLR_ADDR),
+};
+
+static const u32 hbg_dump_type_base_array[] = {
+ [HBG_DUMP_REG_TYPE_SPEC] = 0,
+ [HBG_DUMP_REG_TYPE_MDIO] = HBG_REG_MDIO_BASE,
+ [HBG_DUMP_REG_TYPE_GMAC] = HBG_REG_SGMII_BASE,
+ [HBG_DUMP_REG_TYPE_PCU] = HBG_REG_SGMII_BASE,
+};
+
+static int hbg_ethtool_get_regs_len(struct net_device *netdev)
+{
+ return ARRAY_SIZE(hbg_dump_reg_infos) * sizeof(struct hbg_reg_info);
+}
+
+static void hbg_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_reg_info *info;
+ u32 i, offset = 0;
+
+ regs->version = 0;
+ for (i = 0; i < ARRAY_SIZE(hbg_dump_reg_infos); i++) {
+ info = data + offset;
+
+ *info = hbg_dump_reg_infos[i];
+ info->val = hbg_reg_read(priv, info->offset);
+ info->offset -= hbg_dump_type_base_array[info->type];
+
+ offset += sizeof(*info);
+ }
+}
+
+static void hbg_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &param->tx_pause, &param->rx_pause);
+}
+
+static int hbg_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ priv->mac.pause_autoneg = param->autoneg;
+ phy_set_asym_pause(priv->mac.phydev, param->rx_pause, param->tx_pause);
+
+ if (!param->autoneg)
+ hbg_hw_set_pause_enable(priv, param->tx_pause, param->rx_pause);
+
+ priv->user_def.pause_param = *param;
+ return 0;
+}
+
+static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (*flags != ETH_RESET_DEDICATED)
+ return -EOPNOTSUPP;
+
+ *flags = 0;
+ return hbg_reset(priv);
+}
static const struct ethtool_ops hbg_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = hbg_ethtool_get_regs_len,
+ .get_regs = hbg_ethtool_get_regs,
+ .get_pauseparam = hbg_ethtool_get_pauseparam,
+ .set_pauseparam = hbg_ethtool_set_pauseparam,
+ .reset = hbg_ethtool_reset,
+ .nway_reset = phy_ethtool_nway_reset,
};
void hbg_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 05295c2ad439..e7798f213645 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include "hbg_common.h"
@@ -67,6 +68,8 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
+ specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
+
mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
@@ -135,9 +138,13 @@ void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
}
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr)
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
{
- hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr);
+ u32 addr;
+
+ /* mac addr is u64, so the addr offset is 0x8 */
+ addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
+ hbg_reg_write64(priv, addr, mac_addr);
}
static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
@@ -161,8 +168,13 @@ static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
{
- hbg_hw_set_pcu_max_frame_len(priv, mtu);
- hbg_hw_set_mac_max_frame_len(priv, mtu);
+ u32 frame_len;
+
+ frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
+ ETH_HLEN + ETH_FCS_LEN;
+
+ hbg_hw_set_pcu_max_frame_len(priv, frame_len);
+ hbg_hw_set_mac_max_frame_len(priv, frame_len);
}
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
@@ -207,6 +219,34 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
HBG_REG_DUPLEX_B, duplex);
}
+/* only support uc filter */
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
+{
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
+}
+
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
+{
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
+}
+
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
+{
+ *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B);
+ *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B);
+}
+
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
+{
+ hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
+}
+
static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
{
u32 ctrl = 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
index 14fb39241c93..a4a049b5121d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
@@ -51,9 +51,13 @@ bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask);
void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable);
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu);
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable);
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr);
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index);
u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir);
void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc);
void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr);
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable);
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en);
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en);
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index 75505fb5cc4a..bb0f25ac9760 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -6,13 +6,13 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "hbg_common.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
#include "hbg_hw.h"
#include "hbg_irq.h"
#include "hbg_mdio.h"
#include "hbg_txrx.h"
-
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu);
+#include "hbg_debugfs.h"
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
@@ -55,11 +55,7 @@ static int hbg_hw_txrx_clear(struct hbg_priv *priv)
return ret;
/* After reset, regs need to be reconfigured */
- hbg_hw_init(priv);
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(priv->netdev->dev_addr));
- hbg_change_mtu(priv, priv->netdev->mtu);
-
- return 0;
+ return hbg_rebuild(priv);
}
static int hbg_net_stop(struct net_device *netdev)
@@ -74,31 +70,127 @@ static int hbg_net_stop(struct net_device *netdev)
return hbg_hw_txrx_clear(priv);
}
+static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC,
+ * The MAC filter will be enabled.
+ * Otherwise the filter will be disabled.
+ */
+ priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC));
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_set_mac_to_mac_table(struct hbg_priv *priv,
+ u32 index, const u8 *addr)
+{
+ if (addr) {
+ ether_addr_copy(priv->filter.mac_table[index].addr, addr);
+ hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index);
+ } else {
+ eth_zero_addr(priv->filter.mac_table[index].addr);
+ hbg_hw_set_uc_addr(priv, 0, index);
+ }
+}
+
+static int hbg_get_index_from_mac_table(struct hbg_priv *priv,
+ const u8 *addr, u32 *index)
+{
+ u32 i;
+
+ for (i = 0; i < priv->filter.table_max_len; i++)
+ if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) {
+ *index = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* already exists */
+ if (!hbg_get_index_from_mac_table(priv, addr, &index))
+ return 0;
+
+ for (index = 0; index < priv->filter.table_max_len; index++)
+ if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) {
+ hbg_set_mac_to_mac_table(priv, index, addr);
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* not exists */
+ if (hbg_get_index_from_mac_table(priv, addr, &index))
+ return;
+
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+}
+
+static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ return hbg_add_mac_to_filter(priv, addr);
+}
+
+static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (ether_addr_equal(netdev->dev_addr, (u8 *)addr))
+ return 0;
+
+ hbg_del_mac_from_filter(priv, addr);
+ return 0;
+}
+
+static void hbg_net_set_rx_mode(struct net_device *netdev)
+{
+ int ret;
+
+ ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync);
+
+ /* If ret != 0, overflow has occurred */
+ hbg_update_promisc_mode(netdev, !!ret);
+}
+
static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
{
struct hbg_priv *priv = netdev_priv(netdev);
u8 *mac_addr;
+ bool exists;
+ u32 index;
mac_addr = ((struct sockaddr *)addr)->sa_data;
if (!is_valid_ether_addr(mac_addr))
return -EADDRNOTAVAIL;
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(mac_addr));
- dev_addr_set(netdev, mac_addr);
+ /* The index of host mac is always 0.
+ * If new mac address already exists,
+ * delete the existing mac address and
+ * add it to the position with index 0.
+ */
+ exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index);
+ hbg_set_mac_to_mac_table(priv, 0, mac_addr);
+ if (exists)
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+ hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr));
+ dev_addr_set(netdev, mac_addr);
return 0;
}
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu)
-{
- u32 frame_len;
-
- frame_len = new_mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
- ETH_HLEN + ETH_FCS_LEN;
- hbg_hw_set_mtu(priv, frame_len);
-}
-
static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -106,7 +198,7 @@ static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
return -EBUSY;
- hbg_change_mtu(priv, new_mtu);
+ hbg_hw_set_mtu(priv, new_mtu);
WRITE_ONCE(netdev->mtu, new_mtu);
dev_dbg(&priv->pdev->dev,
@@ -142,8 +234,39 @@ static const struct net_device_ops hbg_netdev_ops = {
.ndo_set_mac_address = hbg_net_set_mac_address,
.ndo_change_mtu = hbg_net_change_mtu,
.ndo_tx_timeout = hbg_net_tx_timeout,
+ .ndo_set_rx_mode = hbg_net_set_rx_mode,
};
+static int hbg_mac_filter_init(struct hbg_priv *priv)
+{
+ struct hbg_dev_specs *dev_specs = &priv->dev_specs;
+ struct hbg_mac_filter *filter = &priv->filter;
+ struct hbg_mac_table_entry *tmp_table;
+
+ tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num,
+ sizeof(*tmp_table), GFP_KERNEL);
+ if (!tmp_table)
+ return -ENOMEM;
+
+ filter->mac_table = tmp_table;
+ filter->table_max_len = dev_specs->uc_mac_num;
+ filter->enabled = true;
+
+ hbg_hw_set_mac_filter_enable(priv, filter->enabled);
+ return 0;
+}
+
+static void hbg_init_user_def(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ priv->mac.pause_autoneg = HBG_STATUS_ENABLE;
+
+ pause_param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &pause_param->tx_pause,
+ &pause_param->rx_pause);
+}
+
static int hbg_init(struct hbg_priv *priv)
{
int ret;
@@ -160,7 +283,17 @@ static int hbg_init(struct hbg_priv *priv)
if (ret)
return ret;
- return hbg_mdio_init(priv);
+ ret = hbg_mdio_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_mac_filter_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_debugfs_init(priv);
+ hbg_init_user_def(priv);
+ return 0;
}
static int hbg_pci_init(struct pci_dev *pdev)
@@ -216,13 +349,15 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netdev->max_mtu = priv->dev_specs.max_mtu;
netdev->min_mtu = priv->dev_specs.min_mtu;
netdev->netdev_ops = &hbg_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- hbg_change_mtu(priv, ETH_DATA_LEN);
+ hbg_hw_set_mtu(priv, ETH_DATA_LEN);
hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
hbg_ethtool_set_ops(netdev);
@@ -245,7 +380,27 @@ static struct pci_driver hbg_driver = {
.id_table = hbg_pci_tbl,
.probe = hbg_probe,
};
-module_pci_driver(hbg_driver);
+
+static int __init hbg_module_init(void)
+{
+ int ret;
+
+ hbg_debugfs_register();
+ hbg_set_pci_err_handler(&hbg_driver);
+ ret = pci_register_driver(&hbg_driver);
+ if (ret)
+ hbg_debugfs_unregister();
+
+ return ret;
+}
+module_init(hbg_module_init);
+
+static void __exit hbg_module_exit(void)
+{
+ pci_unregister_driver(&hbg_driver);
+ hbg_debugfs_unregister();
+}
+module_exit(hbg_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index a3479fba8501..db6bc4cfb971 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -114,6 +114,19 @@ static void hbg_mdio_init_hw(struct hbg_priv *priv)
hbg_mdio_set_command(mac, cmd);
}
+static void hbg_flowctrl_cfg(struct hbg_priv *priv)
+{
+ struct phy_device *phydev = priv->mac.phydev;
+ bool rx_pause;
+ bool tx_pause;
+
+ if (!priv->mac.pause_autoneg)
+ return;
+
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
+ hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
+}
+
static void hbg_phy_adjust_link(struct net_device *netdev)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -140,6 +153,7 @@ static void hbg_phy_adjust_link(struct net_device *netdev)
priv->mac.duplex = phydev->duplex;
priv->mac.autoneg = phydev->autoneg;
hbg_hw_adjust_link(priv, speed, phydev->duplex);
+ hbg_flowctrl_cfg(priv);
}
priv->mac.link_status = phydev->link;
@@ -168,6 +182,7 @@ static int hbg_phy_connect(struct hbg_priv *priv)
return ret;
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index 57d81c6d7633..f12efc12f3c5 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -10,6 +10,8 @@
#define HBG_REG_MAC_ID_ADDR 0x0008
#define HBG_REG_PHY_ID_ADDR 0x000C
#define HBG_REG_MAC_ADDR_ADDR 0x0010
+#define HBG_REG_MAC_ADDR_HIGH_ADDR 0x0014
+#define HBG_REG_UC_MAC_NUM_ADDR 0x0018
#define HBG_REG_MDIO_FREQ_ADDR 0x0024
#define HBG_REG_MAX_MTU_ADDR 0x0028
#define HBG_REG_MIN_MTU_ADDR 0x002C
@@ -28,6 +30,7 @@
#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10)
#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5)
#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0)
+#define HBG_REG_MDIO_ADDR_ADDR (HBG_REG_MDIO_BASE + 0x0004)
#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008)
#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0)
#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C)
@@ -36,6 +39,10 @@
/* GMAC */
#define HBG_REG_SGMII_BASE 0x10000
#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008)
+#define HBG_REG_FD_FC_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x000C)
+#define HBG_REG_FC_TX_TIMER_ADDR (HBG_REG_SGMII_BASE + 0x001C)
+#define HBG_REG_FD_FC_ADDR_LOW_ADDR (HBG_REG_SGMII_BASE + 0x0020)
+#define HBG_REG_FD_FC_ADDR_HIGH_ADDR (HBG_REG_SGMII_BASE + 0x0024)
#define HBG_REG_DUPLEX_B BIT(0)
#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C)
#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040)
@@ -43,20 +50,42 @@
#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044)
#define HBG_REG_PORT_ENABLE_RX_B BIT(1)
#define HBG_REG_PORT_ENABLE_TX_B BIT(2)
+#define HBG_REG_PAUSE_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0048)
+#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
+#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
+#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
+#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
+#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
#define HBG_REG_CF_CRC_STRIP_B BIT(0)
#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4)
#define HBG_REG_MODE_CHANGE_EN_B BIT(0)
+#define HBG_REG_LOOP_REG_ADDR (HBG_REG_SGMII_BASE + 0x01DC)
#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
+#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
+#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
+#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
+#define HBG_REG_STATION_ADDR_HIGH_1_ADDR (HBG_REG_SGMII_BASE + 0x020C)
#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210)
#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214)
+#define HBG_REG_STATION_ADDR_LOW_3_ADDR (HBG_REG_SGMII_BASE + 0x0218)
+#define HBG_REG_STATION_ADDR_HIGH_3_ADDR (HBG_REG_SGMII_BASE + 0x021C)
+#define HBG_REG_STATION_ADDR_LOW_4_ADDR (HBG_REG_SGMII_BASE + 0x0220)
+#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224)
+#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228)
+#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C)
/* PCU */
+#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
+#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424)
+#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428)
#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
#define HBG_INT_MSK_WE_ERR_B BIT(31)
#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
@@ -78,11 +107,17 @@
#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438)
+#define HBG_REG_TX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x043C)
+#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
+#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
+#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
@@ -101,6 +136,10 @@
#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
+#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
+#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
+#define HBG_REG_BUS_RST_EN_ADDR (HBG_REG_SGMII_BASE + 0x0688)
#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694)
#define HBG_REG_IND_INTR_MASK_B BIT(0)
#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698)
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index a376d4bdf281..18376bcc718a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -934,8 +934,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->chan = arg.args[1] * RX_DESC_NUM;
priv->group = arg.args[2];
- hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
/* BQL will try to keep the TX queue as short as possible, but it can't
* be faster than tx_coalesce_usecs, so we need a fast timeout here,
* but also long enough to gather up enough frames to ensure we don't
@@ -944,7 +942,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- priv->tx_coalesce_timer.function = tx_done;
+ hrtimer_setup(&priv->tx_coalesce_timer, tx_done, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->map = syscon_node_to_regmap(arg.np);
of_node_put(arg.np);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 851490346261..6b6ced37e490 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3019,115 +3019,6 @@ static struct platform_driver g_dsaf_driver = {
module_platform_driver(g_dsaf_driver);
-/**
- * hns_dsaf_roce_reset - reset dsaf and roce
- * @dsaf_fwnode: Pointer to framework node for the dasf
- * @dereset: false - request reset , true - drop reset
- * return 0 - success , negative -fail
- */
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
-{
- struct dsaf_device *dsaf_dev;
- struct platform_device *pdev;
- u32 mp;
- u32 sl;
- u32 credit;
- int i;
- static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- };
- static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- };
-
- /* find the platform device corresponding to fwnode */
- if (is_of_node(dsaf_fwnode)) {
- pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
- } else if (is_acpi_device_node(dsaf_fwnode)) {
- pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
- } else {
- pr_err("fwnode is neither OF or ACPI type\n");
- return -EINVAL;
- }
-
- /* check if we were a success in fetching pdev */
- if (!pdev) {
- pr_err("couldn't find platform device for node\n");
- return -ENODEV;
- }
-
- /* retrieve the dsaf_device from the driver data */
- dsaf_dev = dev_get_drvdata(&pdev->dev);
- if (!dsaf_dev) {
- dev_err(&pdev->dev, "dsaf_dev is NULL\n");
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* now, make sure we are running on compatible SoC */
- if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
- dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
- dsaf_dev->ae_dev.name);
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* do reset or de-reset according to the flag */
- if (!dereset) {
- /* reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- false);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
- } else {
- /* configure dsaf tx roce correspond to port map and sl map */
- mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(mp, 7 << i * 3, i * 3,
- port_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
-
- sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(sl, 3 << i * 2, i * 2,
- sl_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
-
- /* de-reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- true);
- msleep(SRST_TIME_INTERVAL);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
-
- /* enable dsaf channel rocee credit */
- credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
-
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
- }
-
- put_device(&pdev->dev);
-
- return 0;
-}
-EXPORT_SYMBOL(hns_dsaf_roce_reset);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HNS DSAF driver");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0eb03dff1a8b..653dfbb25d1b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -42,29 +42,6 @@ struct hns_mac_cb;
#define HNS_MAX_WAIT_CNT 10000
-enum dsaf_roce_port_mode {
- DSAF_ROCE_6PORT_MODE,
- DSAF_ROCE_4PORT_MODE,
- DSAF_ROCE_2PORT_MODE,
- DSAF_ROCE_CHAN_MODE_NUM,
-};
-
-enum dsaf_roce_port_num {
- DSAF_ROCE_PORT_0,
- DSAF_ROCE_PORT_1,
- DSAF_ROCE_PORT_2,
- DSAF_ROCE_PORT_3,
- DSAF_ROCE_PORT_4,
- DSAF_ROCE_PORT_5,
-};
-
-enum dsaf_roce_qos_sl {
- DSAF_ROCE_SL_0,
- DSAF_ROCE_SL_1,
- DSAF_ROCE_SL_2,
- DSAF_ROCE_SL_3,
-};
-
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -307,9 +284,6 @@ struct dsaf_misc_op {
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
- void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
- bool dereset);
- void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
@@ -463,6 +437,4 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 5df19c604d09..91391a49fcea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -326,69 +326,6 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
HNS_XGE_RESET_FUNC, port, dereset);
}
-/**
- * hns_dsaf_srst_chns - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- u32 reg_addr;
-
- if (!dereset)
- reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
- else
- reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
-
- dsaf_write_sub(dsaf_dev, reg_addr, msk);
-}
-
-/**
- * hns_dsaf_srst_chns_acpi - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_DSAF_CHN_RESET_FUNC,
- msk, dereset);
-}
-
-static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
-{
- if (!dereset) {
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
- } else {
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
- msleep(20);
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
- }
-}
-
-static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_ROCE_RESET_FUNC, 0, dereset);
-}
-
static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
bool dereset)
{
@@ -729,8 +666,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
misc_op->get_phy_if = hns_mac_get_phy_if;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -746,8 +681,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 46af467aa596..635b3a95dd82 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -195,11 +195,6 @@ void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
}
-void hns_rcb_start(struct hnae_queue *q, u32 val)
-{
- hns_rcb_ring_enable_hw(q, val);
-}
-
/**
*hns_rcb_common_init_commit_hw - make rcb common init completed
*@rcb_common: rcb common device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 0f4cc184ef39..68f81547dfb4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -116,7 +116,6 @@ int hns_rcb_buf_size2type(u32 buf_size);
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
-void hns_rcb_start(struct hnae_queue *q, u32 val);
int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 9a63fbc69408..b25fb400f476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -40,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
*/
static DEFINE_MUTEX(hnae3_common_lock);
+/* ensure the drivers being unloaded one by one */
+static DEFINE_MUTEX(hnae3_unload_lock);
+
+void hnae3_acquire_unload_lock(void)
+{
+ mutex_lock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_acquire_unload_lock);
+
+void hnae3_release_unload_lock(void)
+{
+ mutex_unlock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_release_unload_lock);
+
static bool hnae3_client_match(enum hnae3_client_type client_type)
{
if (client_type == HNAE3_CLIENT_KNIC ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 12ba380eb701..4e44f28288f9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -963,4 +963,6 @@ int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev,
unsigned int inited);
+void hnae3_acquire_unload_lock(void);
+void hnae3_release_unload_lock(void);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a7e3b22f641c..9ff797fb36c4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -6002,9 +6002,11 @@ module_init(hns3_init_module);
*/
static void __exit hns3_exit_module(void)
{
+ hnae3_acquire_unload_lock();
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
hns3_dbg_unregister_debugfs();
+ hnae3_release_unload_lock();
}
module_exit(hns3_exit_module);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index db7845009252..3f17b3073e50 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -12919,9 +12919,11 @@ static int __init hclge_init(void)
static void __exit hclge_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
+ hnae3_release_unload_lock();
}
module_init(hclge_init);
module_exit(hclge_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index bab16c2191b2..181af419b878 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -483,7 +483,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_get_cycle(hdev);
if (ret)
- return ret;
+ goto out;
}
ret = hclge_ptp_int_en(hdev, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 163c6e59ea4c..9ba767740a04 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -3410,8 +3410,10 @@ static int __init hclgevf_init(void)
static void __exit hclgevf_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
+ hnae3_release_unload_lock();
}
module_init(hclgevf_init);
module_exit(hclgevf_exit);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index f81a43d2cdfc..486fb0e20bef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -469,7 +469,7 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
err = HINIC_MGMT_CMD_UNSUPPORTED;
} else if (err || !out_size || vlan_filter.status) {
dev_err(&pdev->dev,
- "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n",
err, vlan_filter.status, out_size);
err = -EINVAL;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e95ae0d39948..f9ba79c1165b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
- for (i = 0; i < stride; i++) {
- cpumask_set_cpu(*cpu, mask);
- *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
- nr_cpu_ids, false);
+ for_each_online_cpu_wrap(i, *cpu) {
+ if (!stride--) {
+ /* For the next queue we start from the first
+ * unused CPU in this queue
+ */
+ *cpu = i;
+ break;
+ }
+ cpumask_set_cpu(i, mask);
}
+
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
- unsigned int num_cpu, cpu;
+ unsigned int num_cpu, cpu = 0;
bool is_rx_queue;
int rc = 0;
@@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
- /* next available cpu to assign irq to */
- cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < total_queues; i++) {
is_rx_queue = false;
@@ -2408,6 +2412,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
unsigned long lpar_rc;
+ unsigned int skblen;
union sub_crq tx_crq;
unsigned int offset;
bool use_scrq_send_direct = false;
@@ -2522,6 +2527,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
+ skblen = skb->len;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -2614,7 +2620,7 @@ early_exit:
netif_stop_subqueue(netdev, queue_num);
}
- tx_bytes += skb->len;
+ tx_bytes += skblen;
txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 20bc40eec487..24ec9a4f1ffa 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -292,6 +292,7 @@ config ICE
select DIMLIB
select LIBIE
select NET_DEVLINK
+ select PACKING
select PLDMFW
select DPLL
help
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 98861cc6df7c..b9dd7b719832 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1180,126 +1180,6 @@ s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
}
/**
- * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
- * @hw: Pointer to hardware structure
- * @results: Pointer array to message, results[0] is pointer to message
- * @mbx: Pointer to mailbox information structure
- *
- * This function is a default handler for MAC/VLAN requests from the VF.
- * The assumption is that in this case it is acceptable to just directly
- * hand off the message from the VF to the underlying shared code.
- **/
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
-{
- struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u8 mac[ETH_ALEN];
- u32 *result;
- int err = 0;
- bool set;
- u16 vlan;
- u32 vid;
-
- /* we shouldn't be updating rules on a disabled interface */
- if (!FM10K_VF_FLAG_ENABLED(vf_info))
- err = FM10K_ERR_PARAM;
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
- result = results[FM10K_MAC_VLAN_MSG_VLAN];
-
- /* record VLAN id requested */
- err = fm10k_tlv_attr_get_u32(result, &vid);
- if (err)
- return err;
-
- set = !(vid & FM10K_VLAN_CLEAR);
- vid &= ~FM10K_VLAN_CLEAR;
-
- /* if the length field has been set, this is a multi-bit
- * update request. For multi-bit requests, simply disallow
- * them when the pf_vid has been set. In this case, the PF
- * should have already cleared the VLAN_TABLE, and if we
- * allowed them, it could allow a rogue VF to receive traffic
- * on a VLAN it was not assigned. In the single-bit case, we
- * need to modify requests for VLAN 0 to use the default PF or
- * SW vid when assigned.
- */
-
- if (vid >> 16) {
- /* prevent multi-bit requests when PF has
- * administratively set the VLAN for this VF
- */
- if (vf_info->pf_vid)
- return FM10K_ERR_PARAM;
- } else {
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
-
- vid = err;
- }
-
- /* update VSI info for VF in regards to VLAN table */
- err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
- result = results[FM10K_MAC_VLAN_MSG_MAC];
-
- /* record unicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* block attempts to set MAC for a locked device */
- if (is_valid_ether_addr(vf_info->mac) &&
- !ether_addr_equal(mac, vf_info->mac))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new unicast address */
- err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
- mac, vlan, set, 0);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
- result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
-
- /* record multicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* verify that the VF is allowed to request multicast */
- if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new multicast address */
- err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
- mac, vlan, set);
- }
-
- return err;
-}
-
-/**
* fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
* @vf_info: VF info structure containing capability flags
* @mode: Requested xcast mode
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 8e814df709d2..ad3696893cb1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -99,8 +99,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid);
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
- struct fm10k_mbx_info *);
s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d4255c2706fa..c67963bfe14e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -88,6 +88,7 @@ enum i40e_state {
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
+ __I40E_MDD_VF_PRINT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
__I40E_TIMEOUT_RECOVERY_PENDING,
@@ -191,6 +192,7 @@ enum i40e_pf_flags {
*/
I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
I40E_FLAG_VF_VLAN_PRUNING_ENA,
+ I40E_FLAG_MDD_AUTO_RESET_VF,
I40E_PF_FLAGS_NBITS, /* must be last */
};
@@ -572,7 +574,7 @@ struct i40e_pf {
int num_alloc_vfs; /* actual number of VFs allocated */
u32 vf_aq_requests;
u32 arq_overflows; /* Not fatal, possibly indicative of problems */
-
+ struct ratelimit_state mdd_message_rate_limit;
/* DCBx/DCBNL capability for PF that indicates
* whether DCBx is managed by firmware or host
* based agent (LLDPAD). Also, indicates what
@@ -1189,7 +1191,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
u32 i40e_get_current_fd_count(struct i40e_pf *pf);
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
@@ -1197,7 +1198,6 @@ void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan);
void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
@@ -1313,7 +1313,6 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
int i40e_get_partition_bw_setting(struct i40e_pf *pf);
int i40e_set_partition_bw_setting(struct i40e_pf *pf);
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index f73f5930fc58..175c1320c143 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1016,16 +1016,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
return status;
}
-int
-i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */ u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
-{
- return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
- cmd_details, true, aq_status);
-}
-
/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e8031f1a9b4f..370b4bddee44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1805,37 +1805,6 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
- * @cmd_details: pointer to command details structure or NULL
- **/
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- u16 flags = 0;
- int status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
- flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
- cmd->promiscuous_flags = cpu_to_le16(flags);
- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
- cmd->seid = cpu_to_le16(seid);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2436,136 +2405,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
}
/**
- * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
- * @hw: pointer to the hw struct
- * @opcode: AQ opcode for add or delete mirror rule
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @id: Destination VSI SEID or Rule ID
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
- * VEBs/VEPA elements only
- **/
-static int i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_delete_mirror_rule *cmd =
- (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
- struct i40e_aqc_add_delete_mirror_rule_completion *resp =
- (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- u16 buf_size;
- int status;
-
- buf_size = count * sizeof(*mr_list);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, opcode);
- cmd->seid = cpu_to_le16(sw_seid);
- cmd->rule_type = cpu_to_le16(rule_type &
- I40E_AQC_MIRROR_RULE_TYPE_MASK);
- cmd->num_entries = cpu_to_le16(count);
- /* Dest VSI for add, rule_id for delete */
- cmd->destination = cpu_to_le16(id);
- if (mr_list) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- }
-
- status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
- cmd_details);
- if (!status ||
- hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
- if (rule_id)
- *rule_id = le16_to_cpu(resp->rule_id);
- if (rules_used)
- *rules_used = le16_to_cpu(resp->mirror_rules_used);
- if (rules_free)
- *rules_free = le16_to_cpu(resp->mirror_rules_free);
- }
- return status;
-}
-
-/**
- * i40e_aq_add_mirrorrule - add a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @dest_vsi: SEID of VSI to which packets will be mirrored
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
- **/
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
- rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
- rule_type, dest_vsi, count, mr_list,
- cmd_details, rule_id, rules_used, rules_free);
-}
-
-/**
- * i40e_aq_delete_mirrorrule - delete a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @count: length of the list
- * @rule_id: Rule ID that is returned in the receive desc as part of
- * add_mirrorrule.
- * @mr_list: list of mirrored VLAN IDs to be removed
- * @cmd_details: pointer to command details structure or NULL
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
- **/
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
-{
- /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- /* count and mr_list shall be valid for rule_type INGRESS VLAN
- * mirroring. For other rule_type, count and rule_type should
- * not matter.
- */
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
- rule_type, rule_id, count, mr_list,
- cmd_details, NULL, rules_used, rules_free);
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF id to send msg
@@ -3180,41 +3019,6 @@ i40e_aq_update_nvm_exit:
}
/**
- * i40e_aq_rearrange_nvm
- * @hw: pointer to the hw struct
- * @rearrange_nvm: defines direction of rearrangement
- * @cmd_details: pointer to command details structure or NULL
- *
- * Rearrange NVM structure, available only for transition FW
- **/
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aqc_nvm_update *cmd;
- struct i40e_aq_desc desc;
- int status;
-
- cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
-
- rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
- I40E_AQ_NVM_REARRANGE_TO_STRUCT);
-
- if (!rearrange_nvm) {
- status = -EINVAL;
- goto i40e_aq_rearrange_nvm_exit;
- }
-
- cmd->command_flags |= rearrange_nvm;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-i40e_aq_rearrange_nvm_exit:
- return status;
-}
-
-/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
@@ -3335,44 +3139,6 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
- * i40e_aq_restore_lldp
- * @hw: pointer to the hw struct
- * @setting: pointer to factory setting variable or NULL
- * @restore: True if factory settings should be restored
- * @cmd_details: pointer to command details structure or NULL
- *
- * Restore LLDP Agent factory settings if @restore set to True. In other case
- * only returns factory setting in AQ response.
- **/
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_restore *cmd =
- (struct i40e_aqc_lldp_restore *)&desc.params.raw;
- int status;
-
- if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Restore LLDP not supported by current FW version.\n");
- return -ENODEV;
- }
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
-
- if (restore)
- cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (setting)
- *setting = cmd->command & 1;
-
- return status;
-}
-
-/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
@@ -4570,84 +4336,6 @@ phy_write_end:
}
/**
- * i40e_write_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Writes value to specified PHY register
- **/
-int i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_write_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
- * i40e_read_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Reads specified PHY register value
- **/
-int i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_read_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -4663,80 +4351,6 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
}
/**
- * i40e_blink_phy_link_led
- * @hw: pointer to the HW structure
- * @time: time how long led will blinks in secs
- * @interval: gap between LED on and off in msecs
- *
- * Blinks PHY link LED
- **/
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
-{
- u16 led_addr = I40E_PHY_LED_PROV_REG_1;
- u16 gpio_led_port;
- u8 phy_addr = 0;
- int status = 0;
- u16 led_ctl;
- u8 port_num;
- u16 led_reg;
- u32 i;
-
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- led_addr++) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
- if (status)
- goto phy_blinking_end;
- led_ctl = led_reg;
- if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
- led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
- if (status)
- goto phy_blinking_end;
- break;
- }
- }
-
- if (time > 0 && interval > 0) {
- for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
- if (status)
- goto restore_config;
- if (led_reg & I40E_PHY_LED_MANUAL_ON)
- led_reg = 0;
- else
- led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
- if (status)
- goto restore_config;
- msleep(interval);
- }
- }
-
-restore_config:
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
-
-phy_blinking_end:
- return status;
-}
-
-/**
* i40e_led_get_reg - read LED register
* @hw: pointer to the HW structure
* @led_addr: LED register address
@@ -5269,39 +4883,6 @@ i40e_find_segment_in_package(u32 segment_type,
(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
/**
- * i40e_find_section_in_profile
- * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
- * @profile: pointer to the i40e segment header to be searched
- *
- * This function searches i40e segment for a particular section type. On
- * success it returns a pointer to the section header, otherwise it will
- * return NULL.
- **/
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile)
-{
- struct i40e_profile_section_header *sec;
- struct i40e_section_table *sec_tbl;
- u32 sec_off;
- u32 i;
-
- if (profile->header.type != SEGMENT_TYPE_I40E)
- return NULL;
-
- I40E_SECTION_TABLE(profile, sec_tbl);
-
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec_off = sec_tbl->section_offset[i];
- sec = I40E_SECTION_HEADER(profile, sec_off);
- if (sec->section.type == section_type)
- return sec;
- }
-
- return NULL;
-}
-
-/**
* i40e_ddp_exec_aq_section - Execute generic AQ for DDP
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
@@ -5524,45 +5105,6 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
}
/**
- * i40e_add_pinfo_to_list
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package
- * @profile_info_sec: buffer for information section
- * @track_id: package tracking id
- *
- * Register a profile to the list of loaded profiles.
- */
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id)
-{
- struct i40e_profile_section_header *sec = NULL;
- struct i40e_profile_info *pinfo;
- u32 offset = 0, info = 0;
- int status = 0;
-
- sec = (struct i40e_profile_section_header *)profile_info_sec;
- sec->tbl_size = 1;
- sec->data_end = sizeof(struct i40e_profile_section_header) +
- sizeof(struct i40e_profile_info);
- sec->section.type = SECTION_TYPE_INFO;
- sec->section.offset = sizeof(struct i40e_profile_section_header);
- sec->section.size = sizeof(struct i40e_profile_info);
- pinfo = (struct i40e_profile_info *)(profile_info_sec +
- sec->section.offset);
- pinfo->track_id = track_id;
- pinfo->version = profile->version;
- pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
-
- status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
-
- return status;
-}
-
-/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 8db1eb0c1768..352e957443fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1491,19 +1491,6 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
}
/**
- * i40e_dcb_hw_get_num_tc
- * @hw: pointer to the hw struct
- *
- * Returns number of traffic classes configured in HW
- **/
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
-{
- u32 reg = rd32(hw, I40E_PRTDCB_GENC);
-
- return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg);
-}
-
-/**
* i40e_dcb_hw_rx_ets_bw_config
* @hw: pointer to the hw struct
* @bw_share: Bandwidth share indexed per traffic class
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index d76497566e40..d5662c639c41 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -253,7 +253,6 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
u8 pfc_en, u8 *prio_tc);
void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw);
void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
u8 *mode, u8 *prio_type);
void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 208c2f0857b6..6cd9da662ae1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -722,7 +722,7 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
dev_info(&pf->pdev->dev, " num MDD=%lld\n",
- vf->num_mdd_events);
+ vf->mdd_tx_events.count + vf->mdd_rx_events.count);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index bce5b76f1e7a..8a7a83f83ee5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -459,6 +459,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
I40E_PRIV_FLAG("vf-vlan-pruning",
I40E_FLAG_VF_VLAN_PRUNING_ENA, 0),
+ I40E_PRIV_FLAG("mdd-auto-reset-vf",
+ I40E_FLAG_MDD_AUTO_RESET_VF, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 0e1d9e2fbf38..65a702668e21 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1666,9 +1666,8 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* @vsi: VSI to remove from
* @f: the filter to remove from the list
*
- * This function should be called instead of i40e_del_filter only if you know
- * the exact filter you will remove already, such as via i40e_find_filter or
- * i40e_find_mac.
+ * This function requires you've found * the exact filter you will remove
+ * already, such as via i40e_find_filter or i40e_find_mac.
*
* NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
@@ -1698,29 +1697,6 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
}
/**
- * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
- * @vsi: the VSI to be searched
- * @macaddr: the MAC address
- * @vlan: the VLAN
- *
- * NOTE: This function is expected to be called with mac_filter_hash_lock
- * being held.
- * ANOTHER NOTE: This function MUST be called from within the context of
- * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
- * instead of list_for_each_entry().
- **/
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
-{
- struct i40e_mac_filter *f;
-
- if (!vsi || !macaddr)
- return;
-
- f = i40e_find_filter(vsi, macaddr, vlan);
- __i40e_del_filter(vsi, f);
-}
-
-/**
* i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
@@ -9629,19 +9605,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
}
/**
- * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
- * @pf: board private structure
- **/
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
-{
- u32 val, fcnt_prog;
-
- val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
- fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
- return fcnt_prog;
-}
-
-/**
* i40e_get_current_fd_count - Get total FD filters programmed for this PF
* @pf: board private structure
**/
@@ -11217,6 +11180,67 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
}
/**
+ * i40e_print_vf_mdd_event - print VF Tx/Rx malicious driver detect event
+ * @pf: board private structure
+ * @vf: pointer to the VF structure
+ * @is_tx: true - for Tx event, false - for Rx
+ */
+static void i40e_print_vf_mdd_event(struct i40e_pf *pf, struct i40e_vf *vf,
+ bool is_tx)
+{
+ dev_err(&pf->pdev->dev, is_tx ?
+ "%lld Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n" :
+ "%lld Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n",
+ is_tx ? vf->mdd_tx_events.count : vf->mdd_rx_events.count,
+ pf->hw.pf_id,
+ vf->vf_id,
+ vf->default_lan_addr.addr,
+ str_on_off(test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)));
+}
+
+/**
+ * i40e_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from i40e_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+static void i40e_print_vfs_mdd_events(struct i40e_pf *pf)
+{
+ unsigned int i;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ if (!__ratelimit(&pf->mdd_message_rate_limit))
+ return;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ struct i40e_vf *vf = &pf->vf[i];
+ bool is_printed = false;
+
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, false);
+ is_printed = true;
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, true);
+ is_printed = true;
+ }
+
+ if (is_printed && !test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF #%d\n",
+ i);
+ }
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the PF structure
*
@@ -11230,8 +11254,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg;
int i;
- if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__I40E_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ i40e_print_vfs_mdd_events(pf);
return;
+ }
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
@@ -11275,36 +11304,48 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* see if one of the VFs needs its hand slapped */
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ bool is_mdd_on_tx = false;
+ bool is_mdd_on_rx = false;
+
vf = &(pf->vf[i]);
reg = rd32(hw, I40E_VP_MDET_TX(i));
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_tx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_tx = true;
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_rx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_rx = true;
+ }
+
+ if ((is_mdd_on_tx || is_mdd_on_rx) &&
+ test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ if (is_mdd_on_rx)
+ i40e_print_vf_mdd_event(pf, vf, false);
+ if (is_mdd_on_tx)
+ i40e_print_vf_mdd_event(pf, vf, true);
+
+ i40e_vc_reset_vf(vf, true);
}
}
- /* re-enable mdd interrupt cause */
- clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw);
+
+ i40e_print_vfs_mdd_events(pf);
}
/**
@@ -12614,89 +12655,6 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf)
}
/**
- * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
- * @pf: board private structure
- **/
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
-{
- /* Commit temporary BW setting to permanent NVM image */
- enum i40e_admin_queue_err last_aq_status;
- u16 nvm_word;
- int ret;
-
- if (pf->hw.partition_id != 1) {
- dev_info(&pf->pdev->dev,
- "Commit BW only works on partition 1! This is partition %d",
- pf->hw.partition_id);
- ret = -EOPNOTSUPP;
- goto bw_commit_out;
- }
-
- /* Acquire NVM for read access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Read word 0x10 of NVM - SW compatibility word 1 */
- ret = i40e_aq_read_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word), &nvm_word,
- false, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Wait a bit for NVM release to complete */
- msleep(50);
-
- /* Acquire NVM for write access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
- /* Write it back out unchanged to initiate update NVM,
- * which will force a write of the shadow (alt) RAM to
- * the NVM - thus storing the bandwidth values permanently.
- */
- ret = i40e_aq_update_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word),
- &nvm_word, true, 0, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret)
- dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
-bw_commit_out:
-
- return ret;
-}
-
-/**
* i40e_is_total_port_shutdown_enabled - read NVM and return value
* if total port shutdown feature is enabled for this PF
* @pf: board private structure
@@ -15998,6 +15956,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* VF MDD event logs are rate limited to one second intervals */
+ ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1);
+
/* Reconfigure hardware for allowing smaller MSS in the case
* of TSO, so that we avoid the MDD being fired and causing
* a reset in the case of small MSS+TSO.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5a0699ca7ce5..099bb8ab7d70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -27,13 +27,6 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
int
-i40e_asq_send_command_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-int
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -72,8 +65,6 @@ int i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode);
int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
/* admin send queue commands */
@@ -141,9 +132,6 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -176,14 +164,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
@@ -220,9 +200,6 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
@@ -234,9 +211,6 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details);
@@ -458,13 +432,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
int i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
-int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
@@ -477,20 +445,12 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile);
int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id);
-
/* i40e_ddp */
int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index dfa785e39458..1120f8e4bb67 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -216,7 +216,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
* @notify_vf: notify vf about reset or not
* Reset VF handler.
**/
-static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
{
struct i40e_pf *pf = vf->pf;
int i;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 66f95e2f3146..5cf74f16f433 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -64,6 +64,12 @@ struct i40evf_channel {
u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
};
+struct i40e_mdd_vf_events {
+ u64 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u64 last_printed;
+};
+
/* VF information structure */
struct i40e_vf {
struct i40e_pf *pf;
@@ -92,7 +98,9 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
- u64 num_mdd_events; /* num of mdd events detected */
+ /* num of mdd tx and rx events detected */
+ struct i40e_mdd_vf_events mdd_rx_events;
+ struct i40e_mdd_vf_events mdd_tx_events;
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
@@ -120,6 +128,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf);
bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 4e885df789ef..e28f1905a4a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -395,32 +395,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
WARN_ON_ONCE(1);
}
-static int
-i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
@@ -486,8 +460,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!first)
first = bi;
- else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
+ else if (!xsk_buff_add_frag(first, bi)) {
+ xsk_buff_free(first);
break;
+ }
if (++next_to_process == count)
next_to_process = 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index a9e54866ae6b..6faa62bced3a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -773,6 +773,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
+ } else if (f->state == IAVF_VLAN_REMOVE) {
+ /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
+ * We can safely only change the state here.
+ */
+ f->state = IAVF_VLAN_ACTIVE;
}
clearout:
@@ -793,8 +798,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
- f->state = IAVF_VLAN_REMOVE;
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ /* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
+ * Remove it from the list.
+ */
+ if (f->state == IAVF_VLAN_ADD) {
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ } else {
+ f->state = IAVF_VLAN_REMOVE;
+ iavf_schedule_aq_request(adapter,
+ IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ }
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -1180,7 +1195,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter)
q_vector = &adapter->q_vectors[q_idx];
napi = &q_vector->napi;
- napi_enable(napi);
+ napi_enable_locked(napi);
}
}
@@ -1196,7 +1211,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
- napi_disable(&q_vector->napi);
+ napi_disable_locked(&q_vector->napi);
}
}
@@ -1800,8 +1815,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
- netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll);
+ netif_napi_add_locked(adapter->netdev, &q_vector->napi,
+ iavf_napi_poll);
}
return 0;
@@ -1827,7 +1842,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
+ netif_napi_del_locked(&q_vector->napi);
}
kfree(adapter->q_vectors);
adapter->q_vectors = NULL;
@@ -1968,6 +1983,7 @@ err:
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
+ bool locks_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
@@ -1976,7 +1992,7 @@ static void iavf_finish_config(struct work_struct *work)
* The dev->lock is needed to update the queue number
*/
rtnl_lock();
- mutex_lock(&adapter->netdev->lock);
+ netdev_lock(adapter->netdev);
mutex_lock(&adapter->crit_lock);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
@@ -1988,26 +2004,34 @@ static void iavf_finish_config(struct work_struct *work)
switch (adapter->state) {
case __IAVF_DOWN:
+ /* Set the real number of queues when reset occurs while
+ * state == __IAVF_DOWN
+ */
+ pairs = adapter->num_active_queues;
+ netif_set_real_num_rx_queues(adapter->netdev, pairs);
+ netif_set_real_num_tx_queues(adapter->netdev, pairs);
+
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(adapter->netdev);
+ locks_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
+ mutex_lock(&adapter->crit_lock);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
+ mutex_unlock(&adapter->crit_lock);
goto out;
}
}
-
- /* Set the real number of queues when reset occurs while
- * state == __IAVF_DOWN
- */
- fallthrough;
+ break;
case __IAVF_RUNNING:
pairs = adapter->num_active_queues;
netif_set_real_num_rx_queues(adapter->netdev, pairs);
@@ -2019,8 +2043,10 @@ static void iavf_finish_config(struct work_struct *work)
}
out:
- mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&adapter->netdev->lock);
+ if (!locks_released) {
+ mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(adapter->netdev);
+ }
rtnl_unlock();
}
@@ -2713,12 +2739,16 @@ static void iavf_watchdog_task(struct work_struct *work)
struct iavf_adapter *adapter = container_of(work,
struct iavf_adapter,
watchdog_task.work);
+ struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
+ netdev_lock(netdev);
if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
+ if (adapter->state == __IAVF_REMOVE) {
+ netdev_unlock(netdev);
return;
+ }
goto restart_watchdog;
}
@@ -2730,30 +2760,35 @@ static void iavf_watchdog_task(struct work_struct *work)
case __IAVF_STARTUP:
iavf_startup(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
@@ -2765,6 +2800,7 @@ static void iavf_watchdog_task(struct work_struct *work)
* as it can loop forever
*/
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
@@ -2773,6 +2809,7 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, (5 * HZ));
return;
@@ -2780,6 +2817,7 @@ static void iavf_watchdog_task(struct work_struct *work)
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
@@ -2792,6 +2830,7 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
@@ -2811,12 +2850,14 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
HZ * 2);
return;
@@ -2847,6 +2888,7 @@ static void iavf_watchdog_task(struct work_struct *work)
case __IAVF_REMOVE:
default:
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
@@ -2858,6 +2900,7 @@ static void iavf_watchdog_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, HZ * 2);
return;
@@ -2865,6 +2908,7 @@ static void iavf_watchdog_task(struct work_struct *work)
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
+ netdev_unlock(netdev);
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
if (adapter->aq_required)
@@ -2990,12 +3034,12 @@ static void iavf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- mutex_lock(&netdev->lock);
+ netdev_lock(netdev);
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state != __IAVF_REMOVE)
queue_work(adapter->wq, &adapter->reset_task);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return;
}
@@ -3043,7 +3087,7 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -3184,7 +3228,7 @@ continue_reset:
wake_up(&adapter->reset_waitqueue);
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return;
reset_err:
@@ -3195,7 +3239,7 @@ reset_err:
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3667,10 +3711,10 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
- mutex_lock(&netdev->lock);
+ netdev_lock(netdev);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return ret;
}
@@ -4340,14 +4384,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
+ netdev_lock(netdev);
while (!mutex_trylock(&adapter->crit_lock)) {
/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
* is already taken and iavf_open is called from an upper
* device's notifier reacting on NETDEV_REGISTER event.
* We have to leave here to avoid dead lock.
*/
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
+ netdev_unlock(netdev);
return -EBUSY;
+ }
usleep_range(500, 1000);
}
@@ -4396,6 +4443,7 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return 0;
@@ -4408,6 +4456,7 @@ err_setup_tx:
iavf_free_all_tx_resources(adapter);
err_unlock:
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return err;
}
@@ -4429,10 +4478,12 @@ static int iavf_close(struct net_device *netdev)
u64 aq_to_restore;
int status;
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
if (adapter->state <= __IAVF_DOWN_PENDING) {
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return 0;
}
@@ -4466,6 +4517,7 @@ static int iavf_close(struct net_device *netdev)
iavf_free_traffic_irqs(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
@@ -5342,6 +5394,7 @@ static int iavf_suspend(struct device *dev_d)
netif_device_detach(netdev);
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
if (netif_running(netdev)) {
@@ -5353,6 +5406,7 @@ static int iavf_suspend(struct device *dev_d)
iavf_reset_interrupt_capability(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return 0;
}
@@ -5451,6 +5505,7 @@ static void iavf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
@@ -5487,6 +5542,7 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_destroy(&hw->aq.asq_mutex);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
+ netdev_unlock(netdev);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 3307d551f431..9e0d9f710441 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -32,7 +32,8 @@ ice-y := ice_main.o \
ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
- devlink/devlink_port.o \
+ devlink/health.o \
+ devlink/port.o \
ice_sf_eth.o \
ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 415445cefdb2..dbdb83567364 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,7 +6,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
@@ -368,14 +368,18 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
break;
case ICE_VERSION_RUNNING:
- err = devlink_info_version_running_put(req, key, ctx->buf);
+ err = devlink_info_version_running_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
goto out_free_ctx;
}
break;
case ICE_VERSION_STORED:
- err = devlink_info_version_stored_put(req, key, ctx->buf);
+ err = devlink_info_version_stored_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
goto out_free_ctx;
@@ -977,6 +981,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
/* preallocate memory for ice_sched_node */
node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
*priv = node;
return 0;
@@ -1207,9 +1214,15 @@ static int ice_devlink_reinit_up(struct ice_pf *pf)
struct ice_vsi *vsi = ice_get_main_vsi(pf);
int err;
+ err = ice_init_hw(&pf->hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
err = ice_init_dev(pf);
if (err)
- return err;
+ goto unroll_hw_init;
vsi->flags = ICE_VSI_FLAG_INIT;
@@ -1232,6 +1245,8 @@ err_load:
rtnl_unlock();
err_vsi_cfg:
ice_deinit_dev(pf);
+unroll_hw_init:
+ ice_deinit_hw(&pf->hw);
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
new file mode 100644
index 000000000000..ea40f7941259
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */
+#include "health.h"
+
+#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \
+ devlink_fmsg_put(fmsg, #name, (obj)->name)
+
+#define ICE_HEALTH_STATUS_DATA_SIZE 2
+
+struct ice_health_status {
+ enum ice_aqc_health_status code;
+ const char *description;
+ const char *solution;
+ const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE];
+};
+
+/*
+ * In addition to the health status codes provided below, the firmware might
+ * generate Health Status Codes that are not pertinent to the end-user.
+ * For instance, Health Code 0x1002 is triggered when the command fails.
+ * Such codes should be disregarded by the end-user.
+ * The below lookup requires to be sorted by code.
+ */
+
+static const char *const ice_common_port_solutions =
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
+static const char *const ice_port_number_label = "Port Number";
+static const char *const ice_update_nvm_solution = "Update to the latest NVM image.";
+
+static const struct ice_health_status ice_health_status_lookup[] = {
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.",
+ "Change or replace the module or cable.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM,
+ "Device cannot communicate with the module.",
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.",
+ "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.",
+ "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.",
+ "Change or replace the module or cable. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.",
+ NULL, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.",
+ "Change the module or configure the port option to match the current module speed. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT,
+ "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.",
+ "Check link partner connection and configuration.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED,
+ "Port speed is limited by PHY capabilities.",
+ "Change the module to align to port option.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD,
+ "Failed to load the firmware image in the external PHY.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.",
+ ice_update_nvm_solution, {"Extended Error"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.",
+ "If issue persists, call customer support.", {"Access Type"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.",
+ "Update to latest base driver and DDP package."},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.",
+ ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB,
+ "Supplied MIB file is invalid. DCB reverted to default configuration.",
+ "Disable FW-LLDP and check DCBx system configuration.",
+ {ice_port_number_label, "MIB ID"}},
+};
+
+static int ice_health_status_lookup_compare(const void *a, const void *b)
+{
+ return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code;
+}
+
+static const struct ice_health_status *ice_get_health_status(u16 code)
+{
+ struct ice_health_status key = { .code = code };
+
+ return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup),
+ sizeof(struct ice_health_status), ice_health_status_lookup_compare);
+}
+
+static void ice_describe_status_code(struct devlink_fmsg *fmsg,
+ struct ice_aqc_health_status_elem *hse)
+{
+ static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" };
+ const struct ice_health_status *health_code;
+ u32 internal_data[2];
+ u16 status_code;
+
+ status_code = le16_to_cpu(hse->health_status_code);
+
+ devlink_fmsg_put(fmsg, "Syndrome", status_code);
+ if (status_code) {
+ internal_data[0] = le32_to_cpu(hse->internal_data1);
+ internal_data[1] = le32_to_cpu(hse->internal_data2);
+
+ health_code = ice_get_health_status(status_code);
+ if (!health_code)
+ return;
+
+ devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description);
+ if (health_code->solution)
+ devlink_fmsg_string_pair_put(fmsg, "Possible Solution",
+ health_code->solution);
+
+ for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) {
+ if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA)
+ devlink_fmsg_u32_pair_put(fmsg,
+ health_code->data_label[i] ?
+ health_code->data_label[i] :
+ aux_label[i],
+ internal_data[i]);
+ }
+ }
+}
+
+static int
+ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack __always_unused *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static void ice_config_health_events(struct ice_pf *pf, bool enable)
+{
+ u8 enable_bits = 0;
+ int ret;
+
+ if (enable)
+ enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK;
+
+ ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits);
+ if (ret)
+ dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n",
+ str_enable_disable(enable), ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+}
+
+/**
+ * ice_process_health_status_event - Process the health status event from FW
+ * @pf: pointer to the PF structure
+ * @event: event structure containing the Health Status Event opcode
+ *
+ * Decode the Health Status Events and print the associated messages
+ */
+void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ const struct ice_aqc_health_status_elem *health_info;
+ u16 count;
+
+ health_info = (struct ice_aqc_health_status_elem *)event->msg_buf;
+ count = le16_to_cpu(event->desc.params.get_health_status.health_status_count);
+
+ if (count > (event->buf_len / sizeof(*health_info))) {
+ dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n");
+ return;
+ }
+
+ for (size_t i = 0; i < count; i++) {
+ const struct ice_health_status *health_code;
+ u16 status_code;
+
+ status_code = le16_to_cpu(health_info->health_status_code);
+ health_code = ice_get_health_status(status_code);
+
+ if (health_code) {
+ switch (le16_to_cpu(health_info->event_source)) {
+ case ICE_AQC_HEALTH_STATUS_GLOBAL:
+ pf->health_reporters.fw_status = *health_info;
+ devlink_health_report(pf->health_reporters.fw,
+ "FW syndrome reported", NULL);
+ break;
+ case ICE_AQC_HEALTH_STATUS_PF:
+ case ICE_AQC_HEALTH_STATUS_PORT:
+ pf->health_reporters.port_status = *health_info;
+ devlink_health_report(pf->health_reporters.port,
+ "Port syndrome reported", NULL);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n");
+ }
+ } else {
+ u32 data1, data2;
+ u16 source;
+
+ source = le16_to_cpu(health_info->event_source);
+ data1 = le32_to_cpu(health_info->internal_data1);
+ data2 = le32_to_cpu(health_info->internal_data2);
+ dev_dbg(ice_pf_to_dev(pf),
+ "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x",
+ status_code, source, data1, data2);
+ }
+ health_info++;
+ }
+}
+
+/**
+ * ice_devlink_health_report - boilerplate to call given @reporter
+ *
+ * @reporter: devlink health reporter to call, do nothing on NULL
+ * @msg: message to pass up, "event name" is fine
+ * @priv_ctx: typically some event struct
+ */
+static void ice_devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ if (!reporter)
+ return;
+
+ /* We do not do auto recovering, so return value of the below function
+ * will always be 0, thus we do ignore it.
+ */
+ devlink_health_report(reporter, msg, priv_ctx);
+}
+
+struct ice_mdd_event {
+ enum ice_mdd_src src;
+ u16 vf_num;
+ u16 queue;
+ u8 pf_num;
+ u8 event;
+};
+
+static const char *ice_mdd_src_to_str(enum ice_mdd_src src)
+{
+ switch (src) {
+ case ICE_MDD_SRC_TX_PQM:
+ return "tx_pqm";
+ case ICE_MDD_SRC_TX_TCLAN:
+ return "tx_tclan";
+ case ICE_MDD_SRC_TX_TDPU:
+ return "tx_tdpu";
+ case ICE_MDD_SRC_RX:
+ return "rx";
+ default:
+ return "invalid";
+ }
+}
+
+static int
+ice_mdd_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_mdd_event *mdd_event = priv_ctx;
+ const char *src;
+
+ if (!mdd_event)
+ return 0;
+
+ src = ice_mdd_src_to_str(mdd_event->src);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "src", src);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+/**
+ * ice_report_mdd_event - Report an MDD event through devlink health
+ * @pf: the PF device structure
+ * @src: the HW block that was the source of this MDD event
+ * @pf_num: the pf_num on which the MDD event occurred
+ * @vf_num: the vf_num on which the MDD event occurred
+ * @event: the event type of the MDD event
+ * @queue: the queue on which the MDD event occurred
+ *
+ * Report an MDD event that has occurred on this PF.
+ */
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue)
+{
+ struct ice_mdd_event ev = {
+ .src = src,
+ .pf_num = pf_num,
+ .vf_num = vf_num,
+ .event = event,
+ .queue = queue,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev);
+}
+
+/**
+ * ice_fmsg_put_ptr - put hex value of pointer into fmsg
+ *
+ * @fmsg: devlink fmsg under construction
+ * @name: name to pass
+ * @ptr: 64 bit value to print as hex and put into fmsg
+ */
+static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name,
+ void *ptr)
+{
+ char buf[sizeof(ptr) * 3];
+
+ sprintf(buf, "%p", ptr);
+ devlink_fmsg_put(fmsg, name, buf);
+}
+
+struct ice_tx_hang_event {
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ u16 queue;
+ u16 next_to_clean;
+ u16 next_to_use;
+ struct ice_tx_ring *tx_ring;
+};
+
+static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_tx_hang_event *event = priv_ctx;
+ struct sk_buff *skb;
+
+ if (!event)
+ return 0;
+
+ skb = event->tx_ring->tx_buf->skb;
+ devlink_fmsg_obj_nest_start(fmsg);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use);
+ devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name);
+ ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc);
+ ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma);
+ ice_fmsg_put_ptr(fmsg, "skb-ptr", skb);
+ devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc,
+ event->tx_ring->count * sizeof(struct ice_tx_desc));
+ devlink_fmsg_dump_skb(fmsg, skb);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+
+ buf->tx_ring = tx_ring;
+ buf->vsi_num = vsi_num;
+ buf->head = head;
+ buf->intr = intr;
+}
+
+void ice_report_tx_hang(struct ice_pf *pf)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+ struct ice_tx_ring *tx_ring = buf->tx_ring;
+
+ struct ice_tx_hang_event ev = {
+ .head = buf->head,
+ .intr = buf->intr,
+ .vsi_num = buf->vsi_num,
+ .queue = tx_ring->q_index,
+ .next_to_clean = tx_ring->next_to_clean,
+ .next_to_use = tx_ring->next_to_use,
+ .tx_ring = tx_ring,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev);
+}
+
+static struct devlink_health_reporter *
+ice_init_devlink_rep(struct ice_pf *pf,
+ const struct devlink_health_reporter_ops *ops)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct devlink_health_reporter *rep;
+ const u64 graceful_period = 0;
+
+ rep = devl_health_reporter_create(devlink, ops, graceful_period, pf);
+ if (IS_ERR(rep)) {
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_err(dev, "failed to create devlink %s health report er",
+ ops->name);
+ return NULL;
+ }
+ return rep;
+}
+
+#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \
+ ._field = ice_##_name##_reporter_##_field,
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ }
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \
+ }
+
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose);
+
+/**
+ * ice_health_init - allocate and init all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_init(struct ice_pf *pf)
+{
+ struct ice_health *reps = &pf->health_reporters;
+
+ reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops);
+ reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops);
+
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops);
+ reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops);
+ ice_config_health_events(pf, true);
+ }
+}
+
+/**
+ * ice_deinit_devl_reporter - destroy given devlink health reporter
+ * @reporter: reporter to destroy
+ */
+static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devl_health_reporter_destroy(reporter);
+}
+
+/**
+ * ice_health_deinit - deallocate all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_deinit(struct ice_pf *pf)
+{
+ ice_deinit_devl_reporter(pf->health_reporters.mdd);
+ ice_deinit_devl_reporter(pf->health_reporters.tx_hang);
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ ice_deinit_devl_reporter(pf->health_reporters.fw);
+ ice_deinit_devl_reporter(pf->health_reporters.port);
+ ice_config_health_events(pf, false);
+ }
+}
+
+static
+void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devlink_health_reporter_state_update(reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+/**
+ * ice_health_clear - clear devlink health issues after a reset
+ * @pf: the PF device structure
+ *
+ * Mark the PF in healthy state again after a reset has completed.
+ */
+void ice_health_clear(struct ice_pf *pf)
+{
+ ice_health_assign_healthy_state(pf->health_reporters.mdd);
+ ice_health_assign_healthy_state(pf->health_reporters.tx_hang);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h
new file mode 100644
index 000000000000..5edfc4d2adce
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _HEALTH_H_
+#define _HEALTH_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: health.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * devlink health mechanism for ice driver.
+ */
+
+struct ice_aqc_health_status_elem;
+struct ice_pf;
+struct ice_tx_ring;
+struct ice_rq_event_info;
+
+enum ice_mdd_src {
+ ICE_MDD_SRC_TX_PQM,
+ ICE_MDD_SRC_TX_TCLAN,
+ ICE_MDD_SRC_TX_TDPU,
+ ICE_MDD_SRC_RX,
+};
+
+/**
+ * struct ice_health - stores ice devlink health reporters and accompanied data
+ * @fw: devlink health reporter for FW Health Status events
+ * @mdd: devlink health reporter for MDD detection event
+ * @port: devlink health reporter for Port Health Status events
+ * @tx_hang: devlink health reporter for tx_hang event
+ * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from
+ * non-sleeping context
+ * @tx_ring: ring that the hang occurred on
+ * @head: descriptor head
+ * @intr: interrupt register value
+ * @vsi_num: VSI owning the queue that the hang occurred on
+ * @fw_status: buffer for last received FW Status event
+ * @port_status: buffer for last received Port Status event
+ */
+struct ice_health {
+ struct devlink_health_reporter *fw;
+ struct devlink_health_reporter *mdd;
+ struct devlink_health_reporter *port;
+ struct devlink_health_reporter *tx_hang;
+ struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf,
+ struct ice_tx_ring *tx_ring;
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ );
+ struct ice_aqc_health_status_elem fw_status;
+ struct ice_aqc_health_status_elem port_status;
+};
+
+void ice_process_health_status_event(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+
+void ice_health_init(struct ice_pf *pf);
+void ice_health_deinit(struct ice_pf *pf);
+void ice_health_clear(struct ice_pf *pf);
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr);
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue);
+void ice_report_tx_hang(struct ice_pf *pf);
+
+#endif /* _HEALTH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/port.c
index c6779d9dffff..767419a67fef 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/port.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_lib.h"
#include "ice_fltr.h"
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/port.h
index d60efc340945..d60efc340945 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/port.h
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2f5d6f974185..71e05d30f0fd 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -78,6 +78,7 @@
#include "ice_irq.h"
#include "ice_dpll.h"
#include "ice_adapter.h"
+#include "devlink/health.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -665,6 +666,7 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
struct device *hwmon_dev;
+ struct ice_health health_reporters;
u8 num_quanta_prof_used;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ef14cff9a333..bdee499f991a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -12,6 +12,13 @@
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TXQ_CTX_SZ 22
+
+typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
+
struct ice_aqc_generic {
__le32 param0;
__le32 param1;
@@ -1491,7 +1498,6 @@ struct ice_aqc_dnl_equa_param {
#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
-#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
@@ -1665,6 +1671,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
u8 global_scid[2];
u8 phy_scid[2];
@@ -1807,6 +1814,7 @@ struct ice_aqc_nvm_pass_comp_tbl {
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK 0x3
u8 component_response_code; /* Response only */
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
@@ -2084,10 +2092,10 @@ struct ice_aqc_add_txqs_perq {
__le16 txq_id;
u8 rsvd[2];
__le32 q_teid;
- u8 txq_ctx[22];
+ ice_txq_ctx_buf_t txq_ctx;
u8 rsvd2[2];
struct ice_aqc_txsched_elem info;
-};
+} __packed;
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
@@ -2510,6 +2518,87 @@ enum ice_aqc_fw_logging_mod {
ICE_AQC_FW_LOG_ID_MAX,
};
+enum ice_aqc_health_status_mask {
+ ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0),
+ ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1),
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2),
+};
+
+/* Set Health Status (direct 0xFF20) */
+struct ice_aqc_set_health_status_cfg {
+ u8 event_source;
+ u8 reserved[15];
+};
+
+enum ice_aqc_health_status {
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106,
+ ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107,
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109,
+ ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F,
+ ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113,
+ ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116,
+ ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121,
+ ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500,
+ ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503,
+ ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508,
+ ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509,
+ ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A,
+ ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C,
+ ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002,
+};
+
+/* Get Health Status (indirect 0xFF22) */
+struct ice_aqc_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+enum ice_aqc_health_status_scope {
+ ICE_AQC_HEALTH_STATUS_PF = 0x1,
+ ICE_AQC_HEALTH_STATUS_PORT = 0x2,
+ ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3,
+};
+
+#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF
+
+/* Get Health Status event buffer entry (0xFF22),
+ * repeated per reported health status.
+ */
+struct ice_aqc_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+ __le32 internal_data1;
+ __le32 internal_data2;
+};
+
/* Set FW Logging configuration (indirect 0xFF30)
* Register for FW Logging (indirect 0xFF31)
* Query FW Logging (indirect 0xFF32)
@@ -2650,6 +2739,8 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_set_health_status_cfg set_health_status_cfg;
+ struct ice_aqc_get_health_status get_health_status;
struct ice_aqc_dnl_call_command dnl_call;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
@@ -2852,6 +2943,10 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
+ /* System Diagnostic commands */
+ ice_aqc_opc_set_health_status_cfg = 0xFF20,
+ ice_aqc_opc_get_health_status = 0xFF22,
+
/* FW Logging Commands */
ice_aqc_opc_fw_logs_config = 0xFF30,
ice_aqc_opc_fw_logs_register = 0xFF31,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 7cee365cc7d1..405ddd17de1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -511,7 +511,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 82a9cd4ec7ae..b2af8e3586f7 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -454,6 +454,9 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* Enable descriptor prefetch */
+ rlan_ctx.prefena = 1;
+
/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
* metadata and flags to allow redirecting to PR netdev
*/
@@ -910,8 +913,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 496d86cbd13f..7a2a2e8da8fa 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,6 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#include "ice_ptp_hw.h"
+#include <linux/packing.h>
#define ICE_PF_RESET_WAIT_COUNT 300
#define ICE_MAX_NETLIST_SIZE 10
@@ -308,6 +309,42 @@ bool ice_is_e825c(struct ice_hw *hw)
}
/**
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
+ *
+ * Return: true if the device has c827 phy.
+ */
+static bool ice_is_pf_c827(struct ice_hw *hw)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
+
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
+ return true;
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
+ return false;
+
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
+ return true;
+
+ return false;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -1025,6 +1062,33 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout.
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -1173,8 +1237,19 @@ int ice_init_hw(struct ice_hw *hw)
mutex_init(&hw->tnl_lock);
ice_init_chk_recipe_reuse_support(hw);
- return 0;
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ status = ice_wait_for_fw(hw, 30000);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out");
+ goto err_unroll_fltr_mgmt_struct;
+ }
+ }
+ return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
err_unroll_sched:
@@ -1360,39 +1435,31 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_copy_rxq_ctx_to_hw
+ * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers
* @hw: pointer to the hardware structure
- * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
- *
- * Copies rxq context from dense structure to HW register space
*/
-static int
-ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
+ const ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
{
- u8 i;
-
- if (!ice_rxq_ctx)
- return -EINVAL;
-
- if (rxq_index > QRX_CTRL_MAX_INDEX)
- return -EINVAL;
-
/* Copy each dword separately to HW */
- for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
- wr32(hw, QRX_CONTEXT(i, rxq_index),
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)rxq_ctx)[i];
- ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
- }
+ wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
- return 0;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
+ }
}
+#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
+ PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
+
/* LAN Rx Queue Context */
-static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
- /* Field Width LSB */
+static const struct packed_field_u8 ice_rlan_ctx_fields[] = {
+ /* Field Width LSB */
ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
@@ -1413,35 +1480,50 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
- { 0 }
};
/**
- * ice_write_rxq_ctx
+ * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
+ * @ctx: the Rx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
+ ice_rxq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
- * @rlan_ctx: pointer to the rxq context
+ * @rlan_ctx: pointer to the unpacked Rx queue context
* @rxq_index: the index of the Rx queue
*
- * Converts rxq context from sparse to dense structure and then writes
- * it to HW register space and enables the hardware to prefetch descriptors
- * instead of only fetching them on demand
+ * Pack the sparse Rx Queue context into dense hardware format and write it
+ * into the HW register space.
+ *
+ * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
- u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ ice_rxq_ctx_buf_t buf = {};
- if (!rlan_ctx)
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
- rlan_ctx->prefena = 1;
+ ice_pack_rxq_ctx(rlan_ctx, &buf);
+ ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
- ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
- return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+ return 0;
}
/* LAN Tx Queue Context */
-const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
/* Field Width LSB */
ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
@@ -1470,10 +1552,22 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
- ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
- { 0 }
};
+/**
+ * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
/* Sideband Queue command wrappers */
/**
@@ -2547,6 +2641,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
+ info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2569,6 +2664,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
info->ts_ll_int_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n",
+ info->ll_phy_tmr_update);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2709,40 +2806,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
- * ice_is_pf_c827 - check if pf contains c827 phy
- * @hw: pointer to the hw struct
- */
-bool ice_is_pf_c827(struct ice_hw *hw)
-{
- struct ice_aqc_get_link_topo cmd = {};
- u8 node_part_number;
- u16 node_handle;
- int status;
-
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
- if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
- return true;
-
- cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
- cmd.addr.topo_params.index = 0;
-
- status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
- &node_handle);
-
- if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
- return false;
-
- if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
- return true;
-
- return false;
-}
-
-/**
* ice_is_phy_rclk_in_netlist
* @hw: pointer to the hw struct
*
@@ -4096,6 +4159,57 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
}
/**
+ * ice_get_phy_lane_number - Get PHY lane number for current adapter
+ * @hw: pointer to the hw struct
+ *
+ * Return: PHY lane number on success, negative error code otherwise.
+ */
+int ice_get_phy_lane_number(struct ice_hw *hw)
+{
+ struct ice_aqc_get_port_options_elem *options;
+ unsigned int lport = 0;
+ unsigned int lane;
+ int err;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
+ u8 options_count = ICE_AQC_PORT_OPT_MAX;
+ u8 speed, active_idx, pending_idx;
+ bool active_valid, pending_valid;
+
+ err = ice_aq_get_port_options(hw, options, &options_count, lane,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (err)
+ goto err;
+
+ if (!active_valid)
+ continue;
+
+ speed = options[active_idx].max_lane_speed;
+ /* If we don't get speed for this lane, it's unoccupied */
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
+ continue;
+
+ if (hw->pf_id == lport) {
+ kfree(options);
+ return lane;
+ }
+
+ lport++;
+ }
+
+ /* PHY lane not found */
+ err = -ENXIO;
+err:
+ kfree(options);
+ return err;
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -4558,205 +4672,6 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
- * ice_pack_ctx_byte - write a byte to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u8 src_byte, dest_byte, mask;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- src_byte = *from;
- src_byte <<= shift_width;
- src_byte &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_byte, dest, sizeof(dest_byte));
-
- dest_byte &= ~mask; /* get the bits not changing */
- dest_byte |= src_byte; /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_byte, sizeof(dest_byte));
-}
-
-/**
- * ice_pack_ctx_word - write a word to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u16 src_word, mask;
- __le16 dest_word;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_word = *(u16 *)from;
- src_word <<= shift_width;
- src_word &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_word, dest, sizeof(dest_word));
-
- dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
- dest_word |= cpu_to_le16(src_word); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_word, sizeof(dest_word));
-}
-
-/**
- * ice_pack_ctx_dword - write a dword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u32 src_dword, mask;
- __le32 dest_dword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_dword = *(u32 *)from;
- src_dword <<= shift_width;
- src_dword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_dword, dest, sizeof(dest_dword));
-
- dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
- dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_dword, sizeof(dest_dword));
-}
-
-/**
- * ice_pack_ctx_qword - write a qword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u64 src_qword, mask;
- __le64 dest_qword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_qword = *(u64 *)from;
- src_qword <<= shift_width;
- src_qword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_qword, dest, sizeof(dest_qword));
-
- dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
- dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_qword, sizeof(dest_qword));
-}
-
-/**
- * ice_set_ctx - set context bits in packed structure
- * @hw: pointer to the hardware structure
- * @src_ctx: pointer to a generic non-packed context structure
- * @dest_ctx: pointer to memory for the packed structure
- * @ce_info: List of Rx context elements
- */
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- int f;
-
- for (f = 0; ce_info[f].width; f++) {
- /* We have to deal with each element of the FW response
- * using the correct size so that we are correct regardless
- * of the endianness of the machine.
- */
- if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
- ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
- f, ce_info[f].width, ce_info[f].size_of);
- continue;
- }
- switch (ce_info[f].size_of) {
- case sizeof(u8):
- ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u16):
- ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u32):
- ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u64):
- ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
* @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
@@ -6032,6 +5947,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
}
/**
+ * ice_is_fw_health_report_supported - checks if firmware supports health events
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if firmware supports health status reports,
+ * false otherwise
+ */
+bool ice_is_fw_health_report_supported(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ,
+ ICE_FW_API_HEALTH_REPORT_MIN,
+ ICE_FW_API_HEALTH_REPORT_PATCH);
+}
+
+/**
+ * ice_aq_set_health_status_cfg - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF. The supported event types are: PF-specific, all PFs, and global.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source)
+{
+ struct ice_aqc_set_health_status_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_health_status_cfg;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg);
+
+ cmd->event_source = event_source;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 27208a60cece..15ba38543738 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -92,9 +92,8 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
-extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf);
extern struct mutex ice_global_cfg_lock_sw;
@@ -113,7 +112,6 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-bool ice_is_pf_c827(struct ice_hw *hw);
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
bool ice_is_cgu_in_netlist(struct ice_hw *hw);
@@ -143,6 +141,8 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+bool ice_is_fw_health_report_supported(struct ice_hw *hw);
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source);
int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
u8 serdes_num, int *output);
int
@@ -193,6 +193,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
+int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 38e151c7ea23..8d806d8ad761 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -2053,7 +2053,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct kthread_worker *kworker;
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
- kworker = kthread_create_worker(0, "ice-dplls-%s",
+ kworker = kthread_run_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index fb527434b58b..ed21d7f55ac1 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_vlan_zero;
- if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
- ICE_FLTR_RX))
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
@@ -50,9 +49,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (vlan_ops->dis_rx_filtering(uplink_vsi))
goto err_vlan_filtering;
- if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_uplink;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
@@ -64,8 +60,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
err_up:
ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
err_vlan_filtering:
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
@@ -276,7 +270,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
ICE_FLTR_TX);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index ac7db100e2cd..5c7dcf21b222 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,7 +5,7 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#ifdef CONFIG_ICE_SWITCHDEV
void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 3072634bf049..f241493a6ac8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -710,7 +710,6 @@ static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
{ ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
{ ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
{ ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
- { ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate },
{ ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
{ ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
{ ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index 8f2ad1c172c0..23b2cfbc9684 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -15,7 +15,6 @@ struct ice_serdes_equalization_to_ethtool {
int rx_equ_post1;
int rx_equ_bflf;
int rx_equ_bfhf;
- int rx_equ_drate;
int rx_equ_ctle_gainhf;
int rx_equ_ctle_gainlf;
int rx_equ_ctle_gaindc;
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 2702a0da5c3e..70c201f569ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -6,6 +6,7 @@
#include <linux/crc32.h>
#include <linux/pldmfw.h>
#include "ice.h"
+#include "ice_lib.h"
#include "ice_fw_update.h"
struct ice_fwu_priv {
@@ -125,6 +126,10 @@ ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code,
case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
dev_info(dev, "firmware has rejected updating %s\n", component);
break;
+ case ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (ice_is_recovery_mode(&pf->hw))
+ return 0;
+ break;
}
switch (code) {
@@ -1004,13 +1009,20 @@ int ice_devlink_flash_update(struct devlink *devlink,
return -EOPNOTSUPP;
}
- if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ if (!hw->dev_caps.common_cap.nvm_unified_update && !ice_is_recovery_mode(hw)) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
memset(&priv, 0, sizeof(priv));
+ if (params->component && strcmp(params->component, "fw.mgmt") == 0) {
+ priv.context.mode = PLDMFW_UPDATE_MODE_SINGLE_COMPONENT;
+ priv.context.component_identifier = NVM_COMP_ID_NVM;
+ } else if (params->component) {
+ return -EOPNOTSUPP;
+ }
+
/* the E822 device needs a slightly different ops */
if (hw->mac_type == ICE_MAC_GENERIC)
priv.context.ops = &ice_fwu_ops_e822;
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index f02e8ca55375..b2148dbe49b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
return NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 1ccb572ce285..22371011c249 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -1001,6 +1001,28 @@ static void ice_lag_link(struct ice_lag *lag)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_lag_unlink - handle unlink event
* @lag: LAG info struct
*/
@@ -1021,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag)
ice_lag_move_vf_nodes(lag, act_port, pri_port);
lag->primary = false;
lag->active_port = ICE_LAG_INVALID_PORT;
+
+ /* Config primary's eswitch back to normal operation. */
+ ice_lag_config_eswitch(lag, lag->netdev);
} else {
struct ice_lag *primary_lag;
@@ -1419,6 +1444,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
ice_lag_move_vf_nodes(lag, prim_port,
event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
return;
}
@@ -1428,6 +1454,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
/* new active port */
ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
} else {
/* port not set as currently active (e.g. new active port
* has already claimed the nodes and filters
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 611577ebc29d..1479b45738af 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -371,29 +371,21 @@ enum ice_rx_flex_desc_status_error_1_bits {
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
-#define ICE_RXQ_CTX_SIZE_DWORDS 8
-#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
-/* RLAN Rx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* RLAN Rx queue context data */
struct ice_rlan_ctx {
u16 head;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
- u16 dbuf; /* bigger than needed, see above for reason */
+ u8 dbuf;
#define ICE_RLAN_CTX_HBUF_S 6
- u16 hbuf; /* bigger than needed, see above for reason */
+ u8 hbuf;
u8 dtype;
u8 dsize;
u8 crcstrip;
@@ -401,29 +393,15 @@ struct ice_rlan_ctx {
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
- u32 rxmax; /* bigger than needed, see above for reason */
+ u16 rxmax;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
- u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 lrxqthresh;
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
-struct ice_ctx_ele {
- u16 offset;
- u16 size_of;
- u16 width;
- u16 lsb;
-};
-
-#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
- .offset = offsetof(struct _struct, _ele), \
- .size_of = sizeof_field(struct _struct, _ele), \
- .width = _width, \
- .lsb = _lsb, \
-}
-
/* for hsplit_0 field of Rx RLAN context */
enum ice_rlan_ctx_rx_hsplit_0 {
ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
@@ -551,18 +529,12 @@ enum ice_tx_ctx_desc_eipt_offload {
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
-/* Tx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* Tx queue context data */
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
- u16 cgd_num; /* bigger than needed, see above for reason */
+ u8 cgd_num;
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
@@ -573,7 +545,7 @@ struct ice_tlan_ctx {
u8 tsyn_ena;
u8 internal_usage_flag;
u8 alt_vlan;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
@@ -582,7 +554,7 @@ struct ice_tlan_ctx {
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
- u32 qlen; /* bigger than needed, see above for reason */
+ u16 qlen;
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
@@ -590,7 +562,6 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a7d45a8ce7ac..d0faa087793d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1700,6 +1700,12 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf)
return true;
}
+#define ICE_FW_MODE_REC_M BIT(1)
+bool ice_is_recovery_mode(struct ice_hw *hw)
+{
+ return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M;
+}
+
/**
* ice_update_eth_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
@@ -3931,24 +3937,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
}
/**
- * ice_vsi_ctx_set_allow_override - allow destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
- * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
* ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
* @vsi: pointer to VSI structure
* @set: set or unset the bit
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 10d6fc479a32..b4c9cb28a016 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -90,6 +90,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
+bool ice_is_recovery_mode(struct ice_hw *hw);
bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_vsi *vsi);
@@ -104,10 +105,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0ab35607e5d5..e13bd5a6cb6c 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -14,7 +14,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
@@ -1144,7 +1144,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
- ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+ ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -1567,6 +1567,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
+ case ice_aqc_opc_get_health_status:
+ ice_process_health_status_event(pf, &event);
+ break;
default:
dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
@@ -1816,6 +1819,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
}
@@ -1829,6 +1834,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
}
@@ -1842,6 +1849,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
+ queue);
wr32(hw, GL_MDET_RX, 0xffffffff);
}
@@ -2355,6 +2364,18 @@ static void ice_check_media_subtask(struct ice_pf *pf)
}
}
+static void ice_service_task_recovery_mode(struct work_struct *work)
+{
+ struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
+
+ set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
+ ice_clean_adminq_subtask(pf);
+
+ ice_service_task_complete(pf);
+
+ mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
@@ -2364,9 +2385,11 @@ static void ice_service_task(struct work_struct *work)
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
unsigned long start_time = jiffies;
- /* subtasks */
+ if (pf->health_reporters.tx_hang_buf.tx_ring) {
+ ice_report_tx_hang(pf);
+ pf->health_reporters.tx_hang_buf.tx_ring = NULL;
+ }
- /* process reset requests first */
ice_reset_subtask(pf);
/* bail if a reset/recovery cycle is pending or rebuild failed */
@@ -4741,55 +4764,12 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-/**
- * ice_wait_for_fw - wait for full FW readiness
- * @hw: pointer to the hardware structure
- * @timeout: milliseconds that can elapse before timing out
- */
-static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
-{
- int fw_loading;
- u32 elapsed = 0;
-
- while (elapsed <= timeout) {
- fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
-
- /* firmware was not yet loaded, we have to wait more */
- if (fw_loading) {
- elapsed += 100;
- msleep(100);
- continue;
- }
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int err;
- err = ice_init_hw(hw);
- if (err) {
- dev_err(dev, "ice_init_hw failed: %d\n", err);
- return err;
- }
-
- /* Some cards require longer initialization times
- * due to necessity of loading FW from an external source.
- * This can take even half a minute.
- */
- if (ice_is_pf_c827(hw)) {
- err = ice_wait_for_fw(hw, 30000);
- if (err) {
- dev_err(dev, "ice_wait_for_fw timed out");
- return err;
- }
- }
-
ice_init_feature_support(pf);
err = ice_init_ddp_config(hw, pf);
@@ -4810,7 +4790,7 @@ int ice_init_dev(struct ice_pf *pf)
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf;
+ return err;
}
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
@@ -4834,7 +4814,7 @@ int ice_init_dev(struct ice_pf *pf)
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_scheme;
+ goto unroll_pf_init;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4845,17 +4825,15 @@ int ice_init_dev(struct ice_pf *pf)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_req_irq_msix_misc;
+ goto unroll_irq_scheme_init;
}
return 0;
-err_req_irq_msix_misc:
+unroll_irq_scheme_init:
ice_clear_interrupt_scheme(pf);
-err_init_interrupt_scheme:
+unroll_pf_init:
ice_deinit_pf(pf);
-err_init_pf:
- ice_deinit_hw(hw);
return err;
}
@@ -5088,12 +5066,14 @@ static int ice_init_devlink(struct ice_pf *pf)
ice_devlink_init_regions(pf);
ice_devlink_register(pf);
+ ice_health_init(pf);
return 0;
}
static void ice_deinit_devlink(struct ice_pf *pf)
{
+ ice_health_deinit(pf);
ice_devlink_unregister(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
@@ -5249,6 +5229,36 @@ void ice_unload(struct ice_pf *pf)
ice_decfg_netdev(vsi);
}
+static int ice_probe_recovery_mode(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n");
+
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
+ err = ice_create_all_ctrlq(&pf->hw);
+ if (err)
+ return err;
+
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ err = ice_init_devlink(pf);
+ if (err)
+ return err;
+ }
+
+ ice_service_task_restart(pf);
+
+ return 0;
+}
+
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
@@ -5312,13 +5322,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
pci_set_master(pdev);
-
- adapter = ice_adapter_get(pdev);
- if (IS_ERR(adapter))
- return PTR_ERR(adapter);
-
pf->pdev = pdev;
- pf->adapter = adapter;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
@@ -5346,29 +5350,47 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->debug_mask = debug;
#endif
+ if (ice_is_recovery_mode(hw))
+ return ice_probe_recovery_mode(pf);
+
+ err = ice_init_hw(hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto unroll_hw_init;
+ }
+ pf->adapter = adapter;
+
err = ice_init(pf);
if (err)
- goto err_init;
+ goto unroll_adapter;
devl_lock(priv_to_devlink(pf));
err = ice_load(pf);
if (err)
- goto err_load;
+ goto unroll_init;
err = ice_init_devlink(pf);
if (err)
- goto err_init_devlink;
+ goto unroll_load;
devl_unlock(priv_to_devlink(pf));
return 0;
-err_init_devlink:
+unroll_load:
ice_unload(pf);
-err_load:
+unroll_init:
devl_unlock(priv_to_devlink(pf));
ice_deinit(pf);
-err_init:
+unroll_adapter:
ice_adapter_put(pdev);
+unroll_hw_init:
+ ice_deinit_hw(hw);
return err;
}
@@ -5448,6 +5470,14 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (ice_is_recovery_mode(&pf->hw)) {
+ ice_service_task_stop(pf);
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ ice_deinit_devlink(pf);
+ }
+ return;
+ }
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -6790,7 +6820,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@@ -7260,7 +7290,7 @@ int ice_down(struct ice_vsi *vsi)
if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
- ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
+ ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}
@@ -7793,6 +7823,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
+ ice_health_clear(pf);
+
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
@@ -8283,16 +8315,18 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
struct ice_hw *hw = &pf->hw;
- u32 head, val = 0;
+ u32 head, intr = 0;
head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
- val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use, val);
+ head, tx_ring->next_to_use, intr);
+
+ ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
}
pf->tx_timeout_last_recovery = jiffies;
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
index 6509d807627c..4f56d53d56b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser.h
+++ b/drivers/net/ethernet/intel/ice/ice_parser.h
@@ -257,7 +257,6 @@ ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
#define ICE_BST_TCAM_TABLE_SIZE 256
#define ICE_BST_TCAM_KEY_SIZE 20
-#define ICE_BST_KEY_TCAM_SIZE 19
/* Boost TCAM item */
struct ice_bst_tcam_item {
@@ -401,7 +400,6 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
#define ICE_PARSER_GPR_NUM 128
#define ICE_PARSER_FLG_NUM 64
#define ICE_PARSER_ERR_NUM 16
-#define ICE_BST_KEY_SIZE 10
#define ICE_MARKER_ID_SIZE 9
#define ICE_MARKER_MAX_SIZE \
(ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
@@ -431,13 +429,13 @@ struct ice_parser_rt {
u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
u16 pkt_len;
u16 po;
- u8 bst_key[ICE_BST_KEY_SIZE];
+ u8 bst_key[ICE_BST_TCAM_KEY_SIZE];
struct ice_pg_cam_key pg_key;
+ u8 pg_prio;
struct ice_alu *alu0;
struct ice_alu *alu1;
struct ice_alu *alu2;
struct ice_pg_cam_action *action;
- u8 pg_prio;
struct ice_gpr_pu pu;
u8 markers[ICE_MARKER_ID_SIZE];
bool protocols[ICE_PO_PAIR_SIZE];
diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
index dedf5e854e4b..3995d662e050 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser_rt.c
+++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
@@ -125,22 +125,20 @@ static void ice_bst_key_init(struct ice_parser_rt *rt,
else
key[idd] = imem->b_kb.prio;
- idd = ICE_BST_KEY_TCAM_SIZE - 1;
+ idd = ICE_BST_TCAM_KEY_SIZE - 2;
for (i = idd; i >= 0; i--) {
int j;
j = ho + idd - i;
if (j < ICE_PARSER_MAX_PKT_LEN)
- key[i] = rt->pkt_buf[ho + idd - i];
+ key[i] = rt->pkt_buf[j];
else
key[i] = 0;
}
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- key[0], key[1], key[2], key[3], key[4],
- key[5], key[6], key[7], key[8], key[9]);
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
+ ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER,
+ KBUILD_MODNAME ": Generated Boost TCAM Key",
+ key, ICE_BST_TCAM_KEY_SIZE);
}
static u16 ice_bit_rev_u16(u16 v, int len)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a999fface272..e26320ce52ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -16,28 +16,28 @@ static const char ice_pin_names[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
- /* name, gpio */
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { TIME_SYNC, { 4, -1 }, { 0, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 11 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 15, 14 }},
+ { SDP1, { 1, 1 }, { 15, 14 }},
+ { SDP2, { 2, 2 }, { 15, 14 }},
+ { SDP3, { 3, 3 }, { 15, 14 }},
+ { TIME_SYNC, { 4, -1 }, { 11, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 9 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 0, 1 }},
+ { SDP1, { 1, 1 }, { 0, 1 }},
+ { SDP2, { 2, 2 }, { 0, 1 }},
+ { SDP3, { 3, 3 }, { 0, 1 }},
+ { ONE_PPS, { -1, 5 }, { 0, 1 }},
};
static const char ice_pin_names_nvm[][64] = {
@@ -49,12 +49,12 @@ static const char ice_pin_names_nvm[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
- /* name, gpio */
- { GNSS, { 1, -1 }},
- { SMA1, { 1, 0 }},
- { UFL1, { -1, 0 }},
- { SMA2, { 3, 2 }},
- { UFL2, { 3, -1 }},
+ /* name, gpio, delay */
+ { GNSS, { 1, -1 }, { 0, 0 }},
+ { SMA1, { 1, 0 }, { 0, 1 }},
+ { UFL1, { -1, 0 }, { 0, 1 }},
+ { SMA2, { 3, 2 }, { 0, 1 }},
+ { UFL2, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
@@ -464,7 +464,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
*/
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
{
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct sk_buff *skb;
struct ice_pf *pf;
@@ -473,6 +475,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ params = &pf->hw.ptp.phy.e810;
/* Drop packets which have waited for more than 2 seconds */
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
@@ -489,11 +492,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
/* Write TS index to read to the PF register so the FW can read it */
- wr32(&pf->hw, PF_SB_ATQBAL,
- TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
- TS_LL_READ_TS);
+ wr32(&pf->hw, REG_LL_PROXY_H,
+ REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
+ REG_LL_PROXY_H_EXEC);
tx->last_ll_ts_idx_read = idx;
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
}
/**
@@ -504,35 +513,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
{
struct skb_shared_hwtstamps shhwtstamps = {};
u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
u64 raw_tstamp, tstamp;
bool drop_ts = false;
struct sk_buff *skb;
+ unsigned long flags;
+ struct device *dev;
struct ice_pf *pf;
- u32 val;
+ u32 reg_ll_high;
if (!tx->init || tx->last_ll_ts_idx_read < 0)
return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ dev = ice_pf_to_dev(pf);
+ params = &pf->hw.ptp.phy.e810;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
- val = rd32(&pf->hw, PF_SB_ATQBAL);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
+ dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
+ __func__);
+
+ /* Read the low 32 bit value */
+ raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
+ /* Read the status together with high TS part */
+ reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
+
+ /* Wake up threads waiting on low latency interface */
+ params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
+ wake_up_locked(&params->atqbal_wq);
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
/* When the bit is cleared, the TS is ready in the register */
- if (val & TS_LL_READ_TS) {
+ if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
return;
}
/* High 8 bit value of the TS is on the bits 16:23 */
- raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
- raw_tstamp <<= 32;
-
- /* Read the low 32 bit value */
- raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+ raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
/* Devices using this interface always verify the timestamp differs
* relative to the last cached timestamp value.
@@ -1388,10 +1414,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
- * @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@@ -1399,14 +1424,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
- return;
-
ptp_port = &pf->ptp.port;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- port *= 2;
- if (WARN_ON_ONCE(ptp_port->port_num != port))
- return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
@@ -1566,18 +1584,29 @@ void ice_ptp_extts_event(struct ice_pf *pf)
* Event is defined in GLTSYN_EVNT_0 register
*/
for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+ int pin_desc_idx;
+
/* Check if channel is enabled */
- if (pf->ptp.ext_ts_irq & (1 << chan)) {
- lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
- hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
- event.timestamp = (((u64)hi) << 32) | lo;
- event.type = PTP_CLOCK_EXTTS;
- event.index = chan;
-
- /* Fire event */
- ptp_clock_event(pf->ptp.clock, &event);
- pf->ptp.ext_ts_irq &= ~(1 << chan);
+ if (!(pf->ptp.ext_ts_irq & (1 << chan)))
+ continue;
+
+ lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+ hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+ event.timestamp = (u64)hi << 32 | lo;
+
+ /* Add delay compensation */
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
+ if (pin_desc_idx >= 0) {
+ const struct ice_ptp_pin_desc *desc;
+
+ desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
+ event.timestamp -= desc->delay[0];
}
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = chan;
+ pf->ptp.ext_ts_irq &= ~(1 << chan);
+ ptp_clock_event(pf->ptp.clock, &event);
}
}
@@ -1772,9 +1801,9 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
int on)
{
+ unsigned int gpio_pin, prop_delay_ns;
u64 clk, period, start, phase;
struct ice_hw *hw = &pf->hw;
- unsigned int gpio_pin;
int pin_desc_idx;
if (rq->flags & ~PTP_PEROUT_PHASE)
@@ -1785,6 +1814,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return -EIO;
gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
+ prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
/* If we're disabling the output or period is 0, clear out CLKO and TGT
@@ -1816,11 +1846,11 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
* at the next multiple of period, maintaining phase.
*/
clk = ice_ptp_read_src_clk_reg(pf, NULL);
- if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw))
+ if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
/* Compensate for propagation delay from the generator to the pin. */
- start -= ice_prop_delay(hw);
+ start -= prop_delay_ns;
return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
@@ -3080,7 +3110,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
/* Allocate a kworker for handling work required for the ports
* connected to the PTP hardware clock.
*/
- kworker = kthread_create_worker(0, "ice-ptp-%s",
+ kworker = kthread_run_worker(0, "ice-ptp-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -3164,10 +3194,17 @@ void ice_ptp_init(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
- int err;
+ int lane_num, err;
ptp->state = ICE_PTP_INITIALIZING;
+ lane_num = ice_get_phy_lane_number(hw);
+ if (lane_num < 0) {
+ err = lane_num;
+ goto err_exit;
+ }
+
+ ptp->port.port_num = (u8)lane_num;
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3188,10 +3225,6 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err_exit;
- ptp->port.port_num = hw->pf_id;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- ptp->port.port_num = hw->pf_id * 2;
-
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err_exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 824e73b677a4..a1d0e988c084 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -211,6 +211,7 @@ enum ice_ptp_pin_nvm {
* struct ice_ptp_pin_desc - hardware pin description data
* @name_idx: index of the name of pin in ice_pin_names
* @gpio: the associated GPIO input and output pins
+ * @delay: input and output signal delays in nanoseconds
*
* Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array
* for the device. Device families have separate sets of available pins with
@@ -219,6 +220,7 @@ enum ice_ptp_pin_nvm {
struct ice_ptp_pin_desc {
int name_idx;
int gpio[2];
+ unsigned int delay[2];
};
/**
@@ -310,7 +312,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -358,7 +360,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index d75f0eddd631..ac46d1183300 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -131,7 +131,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
.no_fec = 0xffffcccd, /* -25.6 */
- .fc = 0xfffe0014, /* -255.96 */
+ .fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
}
@@ -341,8 +341,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
823437500, /* 823.4375 MHz PLL */
/* nominal_incval */
0x136e44fabULL,
- /* pps_delay */
- 11,
},
/* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
@@ -351,8 +349,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
@@ -361,8 +357,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
796875000, /* 796.875 MHz */
/* nominal_incval */
0x141414141ULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
@@ -371,8 +365,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
816000000, /* 816 MHz */
/* nominal_incval */
0x139b9b9baULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
@@ -381,8 +373,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
830078125, /* 830.78125 MHz */
/* nominal_incval */
0x134679aceULL,
- /* pps_delay */
- 11,
},
/* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
@@ -391,8 +381,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 518893f23372..ec91822e9280 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -391,7 +391,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw24.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -469,7 +469,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw24.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -546,7 +546,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw23.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -651,7 +651,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw23.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -901,30 +901,45 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
*/
/**
+ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ *
+ * Return: destination sideband queue PHY device.
+ */
+static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
+{
+ /* On a single complex E825, PHY 0 is always destination device phy_0
+ * and PHY 1 is phy_0_peer.
+ */
+ if (port >= hw->ptp.ports_per_phy)
+ return eth56g_phy_1;
+ else
+ return eth56g_phy_0;
+}
+
+/**
* ice_write_phy_eth56g - Write a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to write to PHY
*/
-static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 val)
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_wr;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = val;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
-
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -935,41 +950,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
/**
* ice_read_phy_eth56g - Read a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to read from PHY
*/
-static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 *val)
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr)
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_rd;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = 0;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
- if (err) {
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
- return err;
- }
-
- *val = phy_msg.data;
+ else
+ *val = msg.data;
- return 0;
+ return err;
}
/**
* ice_phy_res_address_eth56g - Calculate a PHY port register address
- * @port: Port number to be written
+ * @hw: pointer to the HW struct
+ * @lane: Lane number to be written
* @res_type: resource type (register/memory)
* @offset: Offset from PHY port register base
* @addr: The result address
@@ -978,17 +988,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
* * %0 - success
* * %EINVAL - invalid port number or resource type
*/
-static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
- u32 offset, u32 *addr)
+static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
+ enum eth56g_res_type res_type,
+ u32 offset,
+ u32 *addr)
{
- u8 lane = port % ICE_PORTS_PER_QUAD;
- u8 phy = ICE_GET_QUAD_NUM(port);
-
if (res_type >= NUM_ETH56G_PHY_RES)
return -EINVAL;
- *addr = eth56g_phy_res[res_type].base[phy] +
+ /* Lanes 4..7 are in fact 0..3 on a second PHY */
+ lane %= hw->ptp.ports_per_phy;
+ *addr = eth56g_phy_res[res_type].base[0] +
lane * eth56g_phy_res[res_type].step + offset;
+
return 0;
}
@@ -1008,19 +1020,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_write_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1039,19 +1049,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 *val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_read_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1201,6 +1209,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
}
/**
+ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to read
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 *val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
* ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
@@ -1919,7 +1977,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li)
*/
static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 val;
int err;
@@ -1934,8 +1991,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
case ICE_ETH56G_LNK_SPD_1G:
case ICE_ETH56G_LNK_SPD_2_5G:
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, &val);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1946,8 +2003,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
ICE_ETH56G_NOMINAL_TX_THRESH);
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1988,50 +2045,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
*/
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
- u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr, val, peer_delay;
bool enable, sfd_ena;
- u32 val, peer_delay;
int err;
enable = hw->ptp.phy.eth56g.onestep_ena;
peer_delay = hw->ptp.phy.eth56g.peer_delay;
sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
- /* PHY_PTP_1STEP_CONFIG */
- err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ addr = PHY_PTP_1STEP_CONFIG;
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
if (enable)
- val |= blk_port;
+ val |= BIT(quad_lane);
else
- val &= ~blk_port;
+ val &= ~BIT(quad_lane);
val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
- err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_PTP_1STEP_PEER_DELAY */
+ addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
if (peer_delay)
val |= PHY_PTP_1STEP_PD_ADD_PD_M;
val |= PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_MAC_XIF_MODE */
- err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ addr = PHY_MAC_XIF_MODE;
+ err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
@@ -2051,7 +2105,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
- return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+ return ice_write_mac_reg_eth56g(hw, port, addr, val);
}
/**
@@ -2093,21 +2147,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
bool fc, bool rs,
enum ice_eth56g_link_spd spd)
{
- u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 bitslip;
int err;
if (!bs || rs)
return 0;
- if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
&bitslip);
- else
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_REG_SD_BIT_SLIP(port_offset),
- &bitslip);
+ } else {
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr;
+
+ addr = PHY_REG_SD_BIT_SLIP(quad_lane);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
+ }
if (err)
return 0;
@@ -2667,59 +2722,29 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
}
/**
- * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
- * @hw: pointer to the HW struct
- *
- * Return: true if it's 2x50 breakout topology, false otherwise
- */
-static bool ice_is_muxed_topo(struct ice_hw *hw)
-{
- u8 link_topo;
- bool mux;
- u32 val;
-
- val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
- mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
- val = rd32(hw, GLGEN_MAC_LINK_TOPO);
- link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
-
- return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
-}
-
-/**
- * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * ice_ptp_init_phy_e825 - initialize PHY parameters
* @hw: pointer to the HW struct
*/
-static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u8 phy;
+ u32 phy_rev;
+ int err;
ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
params->sfd_ena = false;
- params->phy_addr[0] = eth56g_phy_0;
- params->phy_addr[1] = eth56g_phy_1;
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
ice_sb_access_ena_eth56g(hw, true);
- for (phy = 0; phy < params->num_phys; phy++) {
- u32 phy_rev;
- int err;
-
- err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G) {
- ptp->phy_model = ICE_PHY_UNSUP;
- return;
- }
- }
-
- ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
+ err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
+ if (err || phy_rev != PHY_REVISION_ETH56G)
+ ptp->phy_model = ICE_PHY_UNSUP;
}
/* E822 family functions
@@ -2738,10 +2763,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
struct ice_sbq_msg_input *msg, u8 port,
u16 offset)
{
- int phy_port, phy, quadtype;
+ int phy_port, quadtype;
phy_port = port % hw->ptp.ports_per_phy;
- phy = port / hw->ptp.ports_per_phy;
quadtype = ICE_GET_QUAD_NUM(port) %
ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
@@ -2753,12 +2777,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- if (phy == 0)
- msg->dest_dev = rmn_0;
- else if (phy == 1)
- msg->dest_dev = rmn_1;
- else
- msg->dest_dev = rmn_2;
+ msg->dest_dev = rmn_0;
}
/**
@@ -4857,33 +4876,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
static int
ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
{
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ unsigned long flags;
u32 val;
- u8 i;
+ int err;
+
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
/* Write TS index to read to the PF register so the FW can read it */
- val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
- wr32(hw, PF_SB_ATQBAL, val);
+ val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
/* Read the register repeatedly until the FW provides us the TS */
- for (i = TS_LL_READ_RETRIES; i > 0; i--) {
- val = rd32(hw, PF_SB_ATQBAL);
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10,
+ REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
- /* When the bit is cleared, the TS is ready in the register */
- if (!(FIELD_GET(TS_LL_READ_TS, val))) {
- /* High 8 bit value of the TS is on the bits 16:23 */
- *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val);
- /* Read the low 32 bit value and set the TS valid bit */
- *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
- return 0;
- }
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID;
- udelay(10);
- }
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
- /* FW failed to provide the TS in time */
- ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
- return -EINVAL;
+ return 0;
}
/**
@@ -5066,6 +5098,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
}
/**
+ * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ *
+ * Use the low latency firmware interface to program PHY time adjustment to
+ * all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, adj);
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment value to program
@@ -5083,6 +5164,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_adj_ll_e810(hw, adj);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
@@ -5106,6 +5190,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
}
/**
+ * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Use the low latency firmware interface to program PHY time increment value
+ * for all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval));
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) |
+ FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
* @hw: pointer to HW struct
* @incval: The new 40bit increment value to prepare
@@ -5120,6 +5254,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_incval_ll_e810(hw, incval);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
@@ -5404,6 +5541,8 @@ static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
ptp->phy_model = ICE_PHY_E810;
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
+
+ init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
}
/* Device agnostic functions
@@ -5478,7 +5617,7 @@ void ice_ptp_init_hw(struct ice_hw *hw)
else if (ice_is_e810(hw))
ice_ptp_init_phy_e810(ptp);
else if (ice_is_e825c(hw))
- ice_ptp_init_phy_e825c(hw);
+ ice_ptp_init_phy_e825(hw);
else
ptp->phy_model = ICE_PHY_UNSUP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1cee0f1bba2d..6779ce120515 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g {
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
- * @pps_delay: propagation delay of the PPS output signal
*
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
@@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g {
struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
- u8 pps_delay;
};
/**
@@ -326,8 +324,6 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
-#define ICE_E810_OUT_PROP_DELAY_NS 1
-#define ICE_E825C_OUT_PROP_DELAY_NS 11
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -389,11 +385,6 @@ static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
return e82x_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
-{
- return e82x_time_ref[time_ref].pps_delay;
-}
-
/* E822 Vernier calibration functions */
int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
@@ -432,20 +423,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
-static inline u64 ice_prop_delay(const struct ice_hw *hw)
-{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_E825C_OUT_PROP_DELAY_NS;
- case ICE_PHY_E810:
- return ICE_E810_OUT_PROP_DELAY_NS;
- case ICE_PHY_E82X:
- return ice_e82x_pps_delay(ice_e82x_time_ref(hw));
- default:
- return 0;
- }
-}
-
/**
* ice_get_base_incval - Get base clock increment value
* @hw: pointer to the HW struct
@@ -689,11 +666,18 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define BYTES_PER_IDX_ADDR_L 4
/* Tx timestamp low latency read definitions */
-#define TS_LL_READ_RETRIES 200
-#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
-#define TS_LL_READ_TS_IDX GENMASK(29, 24)
-#define TS_LL_READ_TS_INTR BIT(30)
-#define TS_LL_READ_TS BIT(31)
+#define REG_LL_PROXY_H_TIMEOUT_US 2000
+#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6)
+#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1
+#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2
+#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16)
+#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24)
+#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24)
+#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30)
+#define REG_LL_PROXY_H_EXEC BIT(31)
+
+#define REG_LL_PROXY_L PF_SB_ATQBAH
+#define REG_LL_PROXY_H PF_SB_ATQBAL
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 970a99a52bf1..fb7a1b9a4313 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,7 +4,7 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
index 75d7147e1c01..1a2c94375ca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -5,8 +5,8 @@
#include "ice_txrx.h"
#include "ice_fltr.h"
#include "ice_sf_eth.h"
-#include "devlink/devlink_port.h"
#include "devlink/devlink.h"
+#include "devlink/port.h"
static const struct net_device_ops ice_sf_netdev_ops = {
.ndo_open = ice_open,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index b83f99c01d91..8aabf7749aa5 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
hash_del_rcu(&vf->entry);
+ ice_deinitialize_vf_entry(vf);
ice_put_vf(vf);
}
}
@@ -193,10 +194,6 @@ void ice_free_vfs(struct ice_pf *pf)
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
- /* clear malicious info since the VF is getting released */
- if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
- list_del(&vf->mbx_info.list_entry);
-
mutex_unlock(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 0e740342e294..4a91e0aaf0a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4784,7 +4784,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
*/
if (found && recp[i].tun_type == rinfo->tun_type &&
recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
- recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2 &&
+ recp[i].priority == rinfo->priority)
return i; /* Return the recipe ID */
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5d2d7736fd5f..380ba1e8b3b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -527,15 +527,14 @@ err:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
- * @rx_buf: Rx buffer to store the XDP action
* @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static void
+static u32
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
+ union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -574,7 +573,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ return ret;
}
/**
@@ -860,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
xdp_buff_set_frags_flag(xdp);
}
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS))
return -ENOMEM;
- }
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
@@ -924,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt = page_count(rx_buf->page);
prefetchw(rx_buf->page);
if (!size)
@@ -941,6 +937,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
}
/**
+ * ice_get_pgcnts - grab page_count() for gathered fragments
+ * @rx_ring: Rx descriptor ring to store the page counts on
+ *
+ * This function is intended to be called right before running XDP
+ * program so that the page recycling mechanism will be able to take
+ * a correct decision regarding underlying pages; this is done in such
+ * way as XDP program can change the refcount of page
+ */
+static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ struct ice_rx_buf *rx_buf;
+ u32 cnt = rx_ring->count;
+
+ for (int i = 0; i < nr_frags; i++) {
+ rx_buf = &rx_ring->rx_buf[idx];
+ rx_buf->pgcnt = page_count(rx_buf->page);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+}
+
+/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
@@ -1051,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
rx_buf->page_offset + headlen, size,
xdp->frame_sz);
} else {
- /* buffer is unused, change the act that should be taken later
- * on; data was copied onto skb's linear part so there's no
+ /* buffer is unused, restore biased page count in Rx buffer;
+ * data was copied onto skb's linear part so there's no
* need for adjusting page offset and we can reuse this buffer
* as-is
*/
- rx_buf->act = ICE_SKB_CONSUMED;
+ rx_buf->pagecnt_bias++;
}
if (unlikely(xdp_buff_has_frags(xdp))) {
@@ -1104,6 +1125,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
+ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
+ * @rx_ring: Rx ring with all the auxiliary data
+ * @xdp: XDP buffer carrying linear + frags part
+ * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
+ * @ntc: a current next_to_clean value to be stored at rx_ring
+ * @verdict: return code from XDP program execution
+ *
+ * Walk through gathered fragments and satisfy internal page
+ * recycle mechanism; we take here an action related to verdict
+ * returned by XDP program;
+ */
+static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ u32 *xdp_xmit, u32 ntc, u32 verdict)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ u32 cnt = rx_ring->count;
+ u32 post_xdp_frags = 1;
+ struct ice_rx_buf *buf;
+ int i;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
+
+ for (i = 0; i < post_xdp_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+
+ if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ *xdp_xmit |= verdict;
+ } else if (verdict & ICE_XDP_CONSUMED) {
+ buf->pagecnt_bias++;
+ } else if (verdict == ICE_XDP_PASS) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ }
+
+ ice_put_rx_buf(rx_ring, buf);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+ /* handle buffers that represented frags released by XDP prog;
+ * for these we keep pagecnt_bias as-is; refcount from struct page
+ * has been decremented within XDP prog and we do not have to increase
+ * the biased refcnt
+ */
+ for (; i < nr_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+ ice_put_rx_buf(rx_ring, buf);
+ if (++idx == cnt)
+ idx = 0;
+ }
+
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
+}
+
+/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1120,15 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
struct xdp_buff *xdp = &rx_ring->xdp;
- u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
+ u32 cached_ntu, xdp_verdict;
u32 cnt = rx_ring->count;
u32 xdp_xmit = 0;
- u32 cached_ntu;
bool failure;
- u32 first;
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
@@ -1190,6 +1268,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
break;
}
if (++ntc == cnt)
@@ -1199,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
- if (rx_buf->act == ICE_XDP_PASS)
+ ice_get_pgcnts(rx_ring);
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1217,18 +1296,12 @@ construct_skb:
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- rx_buf->act = ICE_XDP_CONSUMED;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring,
- ICE_XDP_CONSUMED);
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- break;
+ xdp_verdict = ICE_XDP_CONSUMED;
}
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
+ if (!skb)
+ break;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1257,23 +1330,6 @@ construct_skb:
total_rx_pkts++;
}
- first = rx_ring->first_desc;
- while (cached_ntc != first) {
- struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
-
- if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- xdp_xmit |= buf->act;
- } else if (buf->act & ICE_XDP_CONSUMED) {
- buf->pagecnt_bias++;
- } else if (buf->act == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
-
- ice_put_rx_buf(rx_ring, buf);
- if (++cached_ntc >= cnt)
- cached_ntc = 0;
- }
rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
@@ -2368,7 +2424,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
+ if ((ice_is_switchdev_running(vsi->back) ||
+ ice_lag_is_switchdev_running(vsi->back)) &&
+ vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cb347c852ba9..806bce701df3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -201,7 +201,6 @@ struct ice_rx_buf {
struct page *page;
unsigned int page_offset;
unsigned int pgcnt;
- unsigned int act;
unsigned int pagecnt_bias;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 79f960c6680d..6cf32b404127 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,49 +6,6 @@
#include "ice.h"
/**
- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
- * @xdp: XDP buffer representing frame (linear and frags part)
- * @rx_ring: Rx ring struct
- * act: action to store onto Rx buffers related to XDP buffer parts
- *
- * Set action that should be taken before putting Rx buffer from first frag
- * to the last.
- */
-static inline void
-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
- const unsigned int act)
-{
- u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- u32 nr_frags = rx_ring->nr_frags + 1;
- u32 idx = rx_ring->first_desc;
- u32 cnt = rx_ring->count;
- struct ice_rx_buf *buf;
-
- for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- buf->act = act;
-
- if (++idx == cnt)
- idx = 0;
- }
-
- /* adjust pagecnt_bias on frags freed by XDP prog */
- if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
- u32 delta = rx_ring->nr_frags - sinfo_frags;
-
- while (delta) {
- if (idx == 0)
- idx = cnt - 1;
- else
- idx--;
- buf = &rx_ring->rx_buf[idx];
- buf->pagecnt_bias--;
- delta--;
- }
- }
-}
-
-/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index adb168860711..33a1a5934c0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -18,6 +18,7 @@
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
#include "ice_fwlog.h"
+#include <linux/wait.h>
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -368,6 +369,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
+#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30)
struct ice_ts_dev_info {
/* Device specific info */
@@ -382,6 +384,7 @@ struct ice_ts_dev_info {
u8 tmr1_ena;
u8 ts_ll_read;
u8 ts_ll_int_read;
+ u8 ll_phy_tmr_update;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
@@ -848,15 +851,23 @@ struct ice_mbx_data {
#define ICE_PORTS_PER_QUAD 4
#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0)
+
+struct ice_e810_params {
+ /* The wait queue lock also protects the low latency interface */
+ wait_queue_head_t atqbal_wq;
+ unsigned int atqbal_flags;
+};
+
struct ice_eth56g_params {
u8 num_phys;
- u8 phy_addr[2];
bool onestep_ena;
bool sfd_ena;
u32 peer_delay;
};
union ice_phy_params {
+ struct ice_e810_params e810;
struct ice_eth56g_params eth56g;
};
@@ -881,7 +892,6 @@ struct ice_ptp_hw {
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
- bool is_2x50g_muxed_topo;
};
/* Port hardware description */
@@ -1216,4 +1226,9 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* AQ API version for Health Status support */
+#define ICE_FW_API_HEALTH_REPORT_MAJ 1
+#define ICE_FW_API_HEALTH_REPORT_MIN 7
+#define ICE_FW_API_HEALTH_REPORT_PATCH 6
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index c7c0c2f50c26..815ad0bfe832 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -1036,6 +1036,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
mutex_init(&vf->cfg_lock);
}
+void ice_deinitialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ list_del(&vf->mbx_info.list_entry);
+}
+
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a09f..5392b0404986 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -24,6 +24,7 @@
#endif
void ice_initialize_vf_entry(struct ice_vf *vf);
+void ice_deinitialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 334ae945d640..8975d2971bc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -801,35 +801,6 @@ out_failure:
return result;
}
-static int
-ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
@@ -895,7 +866,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
if (!first) {
first = xdp;
- } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+ } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
+ xsk_buff_free(first);
break;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index 4849590a5591..b28991dd1870 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -376,6 +376,9 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
/* strip off FW internal code */
desc_err = le16_to_cpu(desc->ret_val) & 0xff;
@@ -563,6 +566,9 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
if (!(flags & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
q_msg[i].vmvf_type = (flags &
(IDPF_CTLQ_FLAG_FTYPE_VM |
IDPF_CTLQ_FLAG_FTYPE_PF)) >>
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index b4fbb99bfad2..a3d6b8f198a8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2159,8 +2159,13 @@ static int idpf_open(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ err = idpf_set_real_num_queues(vport);
+ if (err)
+ goto unlock;
+
err = idpf_vport_open(vport);
+unlock:
idpf_vport_ctrl_unlock(netdev);
return err;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index f71d3182580b..b6c515d14cbf 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -174,7 +174,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_set_drvdata(pdev, adapter);
- adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
+ adapter->init_wq = alloc_workqueue("%s-%s-init",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->init_wq) {
@@ -183,7 +184,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free;
}
- adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
+ adapter->serv_wq = alloc_workqueue("%s-%s-service",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->serv_wq) {
@@ -192,7 +194,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_serv_wq_alloc;
}
- adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
+ adapter->mbx_wq = alloc_workqueue("%s-%s-mbx",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->mbx_wq) {
@@ -201,7 +204,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mbx_wq_alloc;
}
- adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
+ adapter->stats_wq = alloc_workqueue("%s-%s-stats",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->stats_wq) {
@@ -210,7 +214,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_stats_wq_alloc;
}
- adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
+ adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->vc_event_wq) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 2fa9c36e33c9..977741c41498 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3008,14 +3008,11 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
return -EINVAL;
rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
- if (unlikely(rsc_segments == 1))
- return 0;
NAPI_GRO_CB(skb)->count = rsc_segments;
skb_shinfo(skb)->gso_size = rsc_seg_len;
skb_reset_network_header(skb);
- len = skb->len - skb_transport_offset(skb);
if (ipv4) {
struct iphdr *ipv4h = ip_hdr(skb);
@@ -3024,6 +3021,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* Reset and set transport header offset in skb */
skb_set_transport_header(skb, sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
/* Compute the TCP pseudo header checksum*/
tcp_hdr(skb)->check =
@@ -3033,6 +3031,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
tcp_hdr(skb)->check =
~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
}
@@ -3072,6 +3071,7 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
idpf_rx_hash(rxq, skb, rx_desc, decoded);
skb->protocol = eth_type_trans(skb, rxq->netdev);
+ skb_record_rx_queue(skb, rxq->idx);
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
@@ -3080,8 +3080,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
idpf_rx_csum(rxq, skb, csum_bits, decoded);
- skb_record_rx_queue(skb, rxq->idx);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index d46c95f91b0d..3d2413b8684f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -517,8 +517,10 @@ static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
retval = -ENXIO;
goto only_unlock;
case IDPF_VC_XN_WAITING:
- dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
- params->vc_op, params->timeout_ms);
+ dev_notice_ratelimited(&adapter->pdev->dev,
+ "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
+ params->vc_op, cookie, xn->vc_op,
+ xn->salt, params->timeout_ms);
retval = -ETIME;
break;
case IDPF_VC_XN_COMPLETED_SUCCESS:
@@ -612,14 +614,16 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
return -EINVAL;
}
xn = &adapter->vcxn_mngr->ring[xn_idx];
+ idpf_vc_xn_lock(xn);
salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
if (xn->salt != salt) {
- dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
- xn->salt, salt);
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
+ xn->vc_op, xn->salt, xn->state,
+ ctlq_msg->cookie.mbx.chnl_opcode, salt);
+ idpf_vc_xn_unlock(xn);
return -EINVAL;
}
- idpf_vc_xn_lock(xn);
switch (xn->state) {
case IDPF_VC_XN_WAITING:
/* success */
@@ -3077,12 +3081,21 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
+ bool remove_in_prog;
+
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
return;
+ /* Avoid transaction timeouts when called during reset */
+ remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
+ if (!remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
- idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
+ if (remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 463c0d26b9d4..6c1b702fd992 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-y := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o igb_xsk.o
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 3c2dc7bdebb5..02f340280d20 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -18,8 +18,10 @@
#include <linux/i2c-algo-bit.h>
#include <linux/pci.h>
#include <linux/mdio.h>
+#include <linux/lockdep.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
struct igb_adapter;
@@ -86,6 +88,7 @@ struct igb_adapter;
#define IGB_XDP_CONSUMED BIT(0)
#define IGB_XDP_TX BIT(1)
#define IGB_XDP_REDIR BIT(2)
+#define IGB_XDP_EXIT BIT(3)
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -255,6 +258,7 @@ enum igb_tx_flags {
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
+ IGB_TYPE_XSK
};
/* wrapper around a pointer to a socket buffer,
@@ -320,6 +324,7 @@ struct igb_ring {
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
+ struct xdp_buff **rx_buffer_info_zc;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
@@ -357,6 +362,7 @@ struct igb_ring {
};
};
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -384,7 +390,8 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
- IGB_RING_FLAG_TX_DETECT_HANG
+ IGB_RING_FLAG_TX_DETECT_HANG,
+ IGB_RING_FLAG_TX_DISABLED
};
#define ring_uses_large_buffer(ring) \
@@ -731,12 +738,21 @@ int igb_setup_tx_resources(struct igb_ring *);
int igb_setup_rx_resources(struct igb_ring *);
void igb_free_tx_resources(struct igb_ring *);
void igb_free_rx_resources(struct igb_ring *);
+void igb_clean_tx_ring(struct igb_ring *tx_ring);
+void igb_clean_rx_ring(struct igb_ring *rx_ring);
void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status);
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp);
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *);
bool igb_has_link(struct igb_adapter *adapter);
@@ -797,6 +813,33 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
+/* This function assumes __netif_tx_lock is held by the caller. */
+static inline void igb_xdp_ring_update_tail(struct igb_ring *ring)
+{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
+{
+ unsigned int r_idx = smp_processor_id();
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter)
+{
+ return !!READ_ONCE(adapter->xdp_prog);
+}
+
int igb_add_filter(struct igb_adapter *adapter,
struct igb_nfc_filter *input);
int igb_erase_filter(struct igb_adapter *adapter,
@@ -807,4 +850,17 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter,
int igb_del_mac_steering_filter(struct igb_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring);
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid);
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring);
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget);
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool);
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 288a4bb2683a..d368b753a467 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -33,7 +33,6 @@
#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
-#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -116,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_clean_all_rx_rings(struct igb_adapter *);
-static void igb_clean_tx_ring(struct igb_ring *);
-static void igb_clean_rx_ring(struct igb_ring *);
static void igb_set_rx_mode(struct net_device *);
static void igb_update_phy_info(struct timer_list *);
static void igb_watchdog(struct timer_list *);
@@ -475,12 +472,17 @@ rx_ring_summary:
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
- struct igb_rx_buffer *buffer_info;
- buffer_info = &rx_ring->rx_buffer_info[i];
+ dma_addr_t dma = (dma_addr_t)0;
+ struct igb_rx_buffer *buffer_info = NULL;
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (!rx_ring->xsk_pool) {
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ dma = buffer_info->dma;
+ }
+
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
@@ -500,11 +502,11 @@ rx_ring_summary:
"R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
+ (u64)dma,
next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->page) {
+ buffer_info && dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
@@ -1990,7 +1992,11 @@ static void igb_configure(struct igb_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+ if (ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(ring, ring->xsk_pool,
+ igb_desc_unused(ring));
+ else
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
@@ -2911,37 +2917,20 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return igb_xdp_setup(dev, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return igb_xsk_pool_setup(adapter, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return -EINVAL;
}
}
-/* This function assumes __netif_tx_lock is held by the caller. */
-static void igb_xdp_ring_update_tail(struct igb_ring *ring)
-{
- lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
-
- /* Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
-}
-
-static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
-{
- unsigned int r_idx = smp_processor_id();
-
- if (r_idx >= adapter->num_tx_queues)
- r_idx = r_idx % adapter->num_tx_queues;
-
- return adapter->tx_ring[r_idx];
-}
-
-static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
@@ -2955,7 +2944,8 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return IGB_XDP_CONSUMED;
@@ -2988,10 +2978,14 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return -ENXIO;
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return -ENXIO;
+
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
@@ -3042,6 +3036,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_setup_tc = igb_setup_tc,
.ndo_bpf = igb_xdp,
.ndo_xdp_xmit = igb_xdp_xmit,
+ .ndo_xsk_wakeup = igb_xsk_wakeup,
};
/**
@@ -3338,7 +3333,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -4364,6 +4360,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
@@ -4424,7 +4422,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, 0);
+ rx_ring->queue_index,
+ rx_ring->q_vector->napi.napi_id);
if (res < 0) {
dev_err(dev, "Failed to register xdp_rxq index %u\n",
rx_ring->queue_index);
@@ -4720,12 +4719,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
struct e1000_hw *hw = &adapter->hw;
int reg_idx = ring->reg_idx;
u32 srrctl = 0;
+ u32 buf_size;
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring->xsk_pool)
+ buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ else if (ring_uses_large_buffer(ring))
+ buf_size = IGB_RXBUFFER_3072;
else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ buf_size = IGB_RXBUFFER_2048;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -4757,8 +4761,17 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
u32 rxdctl = 0;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL));
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+ if (ring->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL));
+ }
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4785,9 +4798,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
- /* initialize rx_buffer_info */
- memset(ring->rx_buffer_info, 0,
- sizeof(struct igb_rx_buffer) * ring->count);
+ if (ring->xsk_pool)
+ memset(ring->rx_buffer_info_zc, 0,
+ sizeof(*ring->rx_buffer_info_zc) * ring->count);
+ else
+ memset(ring->rx_buffer_info, 0,
+ sizeof(*ring->rx_buffer_info) * ring->count);
/* initialize Rx descriptor 0 */
rx_desc = IGB_RX_DESC(ring, 0);
@@ -4888,19 +4904,24 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
* igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
-static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ u32 xsk_frames = 0;
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs or xdp frames */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
dev_kfree_skb_any(tx_buffer->skb);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -4931,6 +4952,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+skip_for_xsk:
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
@@ -4945,6 +4967,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ if (tx_ring->xsk_pool && xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -4975,8 +5000,13 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = NULL;
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
+ if (rx_ring->xsk_pool) {
+ vfree(rx_ring->rx_buffer_info_zc);
+ rx_ring->rx_buffer_info_zc = NULL;
+ } else {
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ }
/* if not set, then don't free */
if (!rx_ring->desc)
@@ -5007,13 +5037,18 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
* igb_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
**/
-static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
+ if (rx_ring->xsk_pool) {
+ igb_clean_rx_ring_zc(rx_ring);
+ goto skip_for_xsk;
+ }
+
/* Free all the Rx ring sk_buffs */
while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
@@ -5041,6 +5076,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
i = 0;
}
+skip_for_xsk:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -6467,6 +6503,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return NETDEV_TX_BUSY;
+
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGB_TYPE_SKB;
@@ -6622,7 +6661,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
- if (adapter->xdp_prog) {
+ if (igb_xdp_is_enabled(adapter)) {
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -8195,6 +8234,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
struct igb_q_vector *q_vector = container_of(napi,
struct igb_q_vector,
napi);
+ struct xsk_buff_pool *xsk_pool;
bool clean_complete = true;
int work_done = 0;
@@ -8206,7 +8246,12 @@ static int igb_poll(struct napi_struct *napi, int budget)
clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
- int cleaned = igb_clean_rx_irq(q_vector, budget);
+ int cleaned;
+
+ xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool);
+ cleaned = xsk_pool ?
+ igb_clean_rx_irq_zc(q_vector, xsk_pool, budget) :
+ igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
if (cleaned >= budget)
@@ -8235,13 +8280,18 @@ static int igb_poll(struct napi_struct *napi, int budget)
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
- struct igb_adapter *adapter = q_vector->adapter;
- struct igb_ring *tx_ring = q_vector->tx.ring;
- struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
unsigned int budget = q_vector->tx.work_limit;
+ struct igb_ring *tx_ring = q_vector->tx.ring;
unsigned int i = tx_ring->next_to_clean;
+ union e1000_adv_tx_desc *tx_desc;
+ struct igb_tx_buffer *tx_buffer;
+ struct xsk_buff_pool *xsk_pool;
+ int cpu = smp_processor_id();
+ bool xsk_xmit_done = true;
+ struct netdev_queue *nq;
+ u32 xsk_frames = 0;
if (test_bit(__IGB_DOWN, &adapter->state))
return true;
@@ -8272,10 +8322,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
napi_consume_skb(tx_buffer->skb, napi_budget);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -8307,6 +8361,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
+skip_for_xsk:
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
@@ -8335,6 +8390,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ xsk_pool = READ_ONCE(tx_ring->xsk_pool);
+ if (xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+ xsk_xmit_done = igb_xmit_zc(tx_ring, xsk_pool);
+ __netif_tx_unlock(nq);
+ }
+
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw;
@@ -8397,7 +8467,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
- return !!budget;
+ return !!budget && xsk_xmit_done;
}
/**
@@ -8588,9 +8658,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
return skb;
}
-static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
- struct igb_ring *rx_ring,
- struct xdp_buff *xdp)
+static int igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IGB_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -8630,7 +8699,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
@@ -8756,10 +8825,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -8786,9 +8851,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
-static void igb_process_skb_fields(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
@@ -8870,6 +8935,38 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
rx_buffer->page = NULL;
}
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+
+ if (status & IGB_XDP_REDIR)
+ xdp_do_flush();
+
+ if (status & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+ }
+}
+
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes)
+{
+ struct igb_ring *ring = q_vector->rx.ring;
+
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.packets += packets;
+ ring->rx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->rx_syncp);
+
+ q_vector->rx.total_packets += packets;
+ q_vector->rx.total_bytes += bytes;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
@@ -8877,12 +8974,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
struct igb_ring *rx_ring = q_vector->rx.ring;
u16 cleaned_count = igb_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
- int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
- struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -8940,12 +9036,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
#endif
- skb = igb_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = igb_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
xdp_xmit |= xdp_res;
igb_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -8964,7 +9058,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
&xdp, timestamp);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -8978,7 +9072,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igb_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -9001,24 +9095,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush();
-
- if (xdp_xmit & IGB_XDP_TX) {
- struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
-
- nq = txring_txq(tx_ring);
- __netif_tx_lock(nq, cpu);
- igb_xdp_ring_update_tail(tx_ring);
- __netif_tx_unlock(nq);
- }
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
new file mode 100644
index 000000000000..157d43787fa0
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "e1000_hw.h"
+#include "igb.h"
+
+static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present)
+{
+ int size = pool_present ?
+ sizeof(*ring->rx_buffer_info_zc) * ring->count :
+ sizeof(*ring->rx_buffer_info) * ring->count;
+ void *buff_info = vmalloc(size);
+
+ if (!buff_info)
+ return -ENOMEM;
+
+ if (pool_present) {
+ vfree(ring->rx_buffer_info);
+ ring->rx_buffer_info = NULL;
+ ring->rx_buffer_info_zc = buff_info;
+ } else {
+ vfree(ring->rx_buffer_info_zc);
+ ring->rx_buffer_info_zc = NULL;
+ ring->rx_buffer_info = buff_info;
+ }
+
+ return 0;
+}
+
+static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+ struct e1000_hw *hw = &adapter->hw;
+
+ set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ wr32(E1000_TXDCTL(tx_ring->reg_idx), 0);
+ wr32(E1000_RXDCTL(rx_ring->reg_idx), 0);
+
+ synchronize_net();
+
+ /* Rx/Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
+ igb_clean_tx_ring(tx_ring);
+ igb_clean_rx_ring(rx_ring);
+
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+
+ igb_configure_tx_ring(adapter, tx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+
+ synchronize_net();
+
+ clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ /* call igb_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ if (rx_ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(rx_ring, rx_ring->xsk_pool,
+ igb_desc_unused(rx_ring));
+ else
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
+
+ /* Rx/Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+}
+
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ int qid = ring->queue_index;
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+
+ if (!igb_xdp_is_enabled(adapter))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+static int igb_xsk_pool_enable(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR);
+ if (err)
+ return err;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, true);
+ if (!err) {
+ igb_txrx_ring_enable(adapter, qid);
+ /* Kick start the NAPI context so that receiving will start */
+ err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
+ }
+
+ if (err) {
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct xsk_buff_pool *pool;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, false);
+ if (err)
+ return err;
+
+ igb_txrx_ring_enable(adapter, qid);
+ }
+
+ return 0;
+}
+
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ return pool ? igb_xsk_pool_enable(adapter, pool, qid) :
+ igb_xsk_pool_disable(adapter, qid);
+}
+
+static u16 igb_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union e1000_adv_rx_desc *rx_desc, u16 count)
+{
+ dma_addr_t dma;
+ u16 buffs;
+ int i;
+
+ /* nothing to do */
+ if (!count)
+ return 0;
+
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->wb.upper.length = 0;
+
+ rx_desc++;
+ xdp++;
+ }
+
+ return buffs;
+}
+
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
+{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
+ union e1000_adv_rx_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ xdp = &rx_ring->rx_buffer_info_zc[ntu];
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc,
+ rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->rx_buffer_info_zc;
+ ntu = 0;
+ count -= nb_buffs_extra;
+ }
+
+ nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count)
+ ntu = 0;
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ rx_desc->wb.upper.length = 0;
+
+exit:
+ if (rx_ring->next_to_use != ntu) {
+ rx_ring->next_to_use = ntu;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(ntu, rx_ring->tail);
+ }
+
+ return total_count == (nb_buffs + nb_buffs_extra);
+}
+
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring)
+{
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
+
+ while (ntc != ntu) {
+ struct xdp_buff *xdp = rx_ring->rx_buffer_info_zc[ntc];
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
+ }
+}
+
+static struct sk_buff *igb_construct_skb_zc(struct igb_ring *rx_ring,
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
+{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+
+ net_prefetch(xdp->data_meta);
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
+
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
+ return skb;
+}
+
+static int igb_run_xdp_zc(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp, struct xsk_buff_pool *xsk_pool,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = IGB_XDP_PASS;
+ u32 act;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ return IGB_XDP_REDIR;
+
+ if (xsk_uses_need_wakeup(xsk_pool) &&
+ err == -ENOBUFS)
+ result = IGB_XDP_EXIT;
+ else
+ result = IGB_XDP_CONSUMED;
+ goto out_failure;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ result = IGB_XDP_CONSUMED;
+ break;
+ }
+
+ return result;
+}
+
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ u32 ntc = rx_ring->next_to_clean;
+ struct bpf_prog *xdp_prog;
+ unsigned int xdp_xmit = 0;
+ bool failure = false;
+ u16 entries_to_alloc;
+ struct sk_buff *skb;
+
+ /* xdp_prog cannot be NULL in the ZC path */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ while (likely(total_packets < budget)) {
+ union e1000_adv_rx_desc *rx_desc;
+ ktime_t timestamp = 0;
+ struct xdp_buff *xdp;
+ unsigned int size;
+ int xdp_res = 0;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntc);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ xdp = rx_ring->rx_buffer_info_zc[ntc];
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ xdp->data,
+ &timestamp);
+
+ xdp->data += ts_hdr_len;
+ xdp->data_meta += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
+
+ xdp_res = igb_run_xdp_zc(adapter, rx_ring, xdp, xsk_pool,
+ xdp_prog);
+
+ if (xdp_res) {
+ if (likely(xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IGB_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IGB_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ }
+
+ total_packets++;
+ total_bytes += size;
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+ continue;
+ }
+
+ skb = igb_construct_skb_zc(rx_ring, xdp, timestamp);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ break;
+ }
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* update budget accounting */
+ total_packets++;
+ }
+
+ rx_ring->next_to_clean = ntc;
+
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
+
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
+
+ entries_to_alloc = igb_desc_unused(rx_ring);
+ if (entries_to_alloc >= IGB_RX_BUFFER_WRITE)
+ failure |= !igb_alloc_rx_buffers_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
+
+ if (xsk_uses_need_wakeup(xsk_pool)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(xsk_pool);
+
+ return (int)total_packets;
+ }
+ return failure ? budget : (int)total_packets;
+}
+
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
+{
+ unsigned int budget = igb_desc_unused(tx_ring);
+ u32 cmd_type, olinfo_status, nb_pkts, i = 0;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
+ union e1000_adv_tx_desc *tx_desc = NULL;
+ struct igb_tx_buffer *tx_buffer_info;
+ unsigned int total_bytes = 0;
+ dma_addr_t dma;
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ return true;
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))
+ return true;
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ while (nb_pkts-- > 0) {
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_buffer_info->bytecount = descs[i].len;
+ tx_buffer_info->type = IGB_TYPE_XSK;
+ tx_buffer_info->xdpf = NULL;
+ tx_buffer_info->gso_segs = 1;
+ tx_buffer_info->time_stamp = jiffies;
+
+ tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+ olinfo_status = descs[i].len << E1000_ADVTXD_PAYLEN_SHIFT;
+
+ /* FIXME: This sets the Report Status (RS) bit for every
+ * descriptor. One nice to have optimization would be to set it
+ * only for the last descriptor in the whole batch. See Intel
+ * ice driver for an example on how to do it.
+ */
+ cmd_type |= descs[i].len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+
+ total_bytes += descs[i].len;
+
+ i++;
+ tx_ring->next_to_use++;
+ tx_buffer_info->next_to_watch = tx_desc;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ }
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes);
+ igb_xdp_ring_update_tail(tx_ring);
+
+ return nb_pkts < budget;
+}
+
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
+ u32 eics = 0;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igb_xdp_is_enabled(adapter))
+ return -EINVAL;
+
+ if (qid >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[qid];
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
+ return -ENETDOWN;
+
+ if (!READ_ONCE(ring->xsk_pool))
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ /* Cause software interrupt */
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+ eics |= ring->q_vector->eims_value;
+ wr32(E1000_EICS, eics);
+ } else {
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index eac0f966e0e4..b8111ad9a9a8 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -337,6 +337,8 @@ struct igc_adapter {
struct igc_led_classdev *leds;
};
+void igc_set_queue_napi(struct igc_adapter *adapter, int q_idx,
+ struct napi_struct *napi);
void igc_up(struct igc_adapter *adapter);
void igc_down(struct igc_adapter *adapter);
int igc_open(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index d9d1a1a11daf..be8a49a86d09 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -279,9 +279,4 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-
#endif /* _IGC_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 27872bdea9bd..733820a0c350 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1096,6 +1096,7 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return -ENOMEM;
}
+ buffer->type = IGC_TX_BUFFER_TYPE_SKB;
buffer->skb = skb;
buffer->protocol = 0;
buffer->bytecount = skb->len;
@@ -2123,10 +2124,6 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
@@ -2515,8 +2512,7 @@ out_failure:
}
}
-static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
- struct xdp_buff *xdp)
+static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp)
{
struct bpf_prog *prog;
int res;
@@ -2530,7 +2526,7 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
res = __igc_xdp_run_prog(adapter, prog, xdp);
out:
- return ERR_PTR(-res);
+ return res;
}
/* This function assumes __netif_tx_lock is held by the caller. */
@@ -2585,6 +2581,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = igc_desc_unused(rx_ring);
int xdp_status = 0, rx_buffer_pgcnt;
+ int xdp_res = 0;
while (likely(total_packets < budget)) {
struct igc_xdp_buff ctx = { .rx_ts = NULL };
@@ -2630,12 +2627,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_buff_clear_frags_flag(&ctx.xdp);
ctx.rx_desc = rx_desc;
- skb = igc_xdp_run_prog(adapter, &ctx.xdp);
+ xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
switch (xdp_res) {
case IGC_XDP_CONSUMED:
rx_buffer->pagecnt_bias++;
@@ -2657,7 +2652,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
@@ -2672,7 +2667,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -2707,8 +2702,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
}
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
- struct xdp_buff *xdp)
+ struct igc_xdp_buff *ctx)
{
+ struct xdp_buff *xdp = &ctx->xdp;
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
@@ -2727,27 +2723,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
__skb_pull(skb, metasize);
}
+ if (ctx->rx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
+ skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
+ }
+
return skb;
}
static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
union igc_adv_rx_desc *desc,
- struct xdp_buff *xdp,
- ktime_t timestamp)
+ struct igc_xdp_buff *ctx)
{
struct igc_ring *ring = q_vector->rx.ring;
struct sk_buff *skb;
- skb = igc_construct_skb_zc(ring, xdp);
+ skb = igc_construct_skb_zc(ring, ctx);
if (!skb) {
ring->rx_stats.alloc_failed++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
return;
}
- if (timestamp)
- skb_hwtstamps(skb)->hwtstamp = timestamp;
-
if (igc_cleanup_headers(ring, desc, skb))
return;
@@ -2783,7 +2780,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
union igc_adv_rx_desc *desc;
struct igc_rx_buffer *bi;
struct igc_xdp_buff *ctx;
- ktime_t timestamp = 0;
unsigned int size;
int res;
@@ -2813,6 +2809,8 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
*/
bi->xdp->data_meta += IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
+ } else {
+ ctx->rx_ts = NULL;
}
bi->xdp->data_end = bi->xdp->data + size;
@@ -2821,7 +2819,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
switch (res) {
case IGC_XDP_PASS:
- igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
+ igc_dispatch_skb_zc(q_vector, desc, ctx);
fallthrough;
case IGC_XDP_CONSUMED:
xsk_buff_free(bi->xdp);
@@ -4948,6 +4946,22 @@ static int igc_sw_init(struct igc_adapter *adapter)
return 0;
}
+void igc_set_queue_napi(struct igc_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -4955,6 +4969,7 @@ static int igc_sw_init(struct igc_adapter *adapter)
void igc_up(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i = 0;
/* hardware has been reset, we need to reload some things */
@@ -4962,8 +4977,11 @@ void igc_up(struct igc_adapter *adapter)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
if (adapter->msix_entries)
igc_configure_msix(adapter);
@@ -5192,6 +5210,7 @@ void igc_down(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igc_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
@@ -5576,6 +5595,9 @@ static int igc_request_msix(struct igc_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igc_configure_msix(adapter);
@@ -6018,6 +6040,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err = 0;
int i = 0;
@@ -6053,8 +6076,11 @@ static int __igc_open(struct net_device *netdev, bool resuming)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(IGC_ICR);
@@ -6779,45 +6805,6 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_get_tstamp = igc_get_tstamp,
};
-/* PCIe configuration access */
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_read_config_word(adapter->pdev, reg, value);
-}
-
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_write_config_word(adapter->pdev, reg, *value);
-}
-
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_read_word(adapter->pdev, reg, value);
-
- return IGC_SUCCESS;
-}
-
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_write_word(adapter->pdev, reg, *value);
-
- return IGC_SUCCESS;
-}
-
u32 igc_rd32(struct igc_hw *hw, u32 reg)
{
struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
@@ -7103,8 +7090,8 @@ static int igc_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->reset_task, igc_reset_task);
INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
- hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- adapter->hrtimer.function = &igc_qbv_scheduling_timer;
+ hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
@@ -7338,7 +7325,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int igc_resume(struct device *dev)
+static int __igc_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7381,7 +7368,11 @@ static int igc_resume(struct device *dev)
wr32(IGC_WUS, ~0);
if (netif_running(netdev)) {
+ if (!rpm)
+ rtnl_lock();
err = __igc_open(netdev, true);
+ if (!rpm)
+ rtnl_unlock();
if (!err)
netif_device_attach(netdev);
}
@@ -7389,9 +7380,14 @@ static int igc_resume(struct device *dev)
return err;
}
+static int igc_resume(struct device *dev)
+{
+ return __igc_resume(dev, false);
+}
+
static int igc_runtime_resume(struct device *dev)
{
- return igc_resume(dev);
+ return __igc_resume(dev, true);
}
static int igc_suspend(struct device *dev)
@@ -7436,14 +7432,18 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
igc_down(adapter);
pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7454,7 +7454,7 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igc_resume routine.
+ * resembles the first-half of the __igc_resume routine.
**/
static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
{
@@ -7493,7 +7493,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igc_resume routine.
+ * second-half of the __igc_resume routine.
*/
static void igc_io_resume(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
index 58f81aba0144..efd121c03967 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.c
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -36,56 +36,6 @@ static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
}
/**
- * igc_acquire_nvm - Generic request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -IGC_ERR_NVM (-1).
- */
-s32 igc_acquire_nvm(struct igc_hw *hw)
-{
- s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
- u32 eecd = rd32(IGC_EECD);
- s32 ret_val = 0;
-
- wr32(IGC_EECD, eecd | IGC_EECD_REQ);
- eecd = rd32(IGC_EECD);
-
- while (timeout) {
- if (eecd & IGC_EECD_GNT)
- break;
- udelay(5);
- eecd = rd32(IGC_EECD);
- timeout--;
- }
-
- if (!timeout) {
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
- hw_dbg("Could not acquire NVM grant\n");
- ret_val = -IGC_ERR_NVM;
- }
-
- return ret_val;
-}
-
-/**
- * igc_release_nvm - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit.
- */
-void igc_release_nvm(struct igc_hw *hw)
-{
- u32 eecd;
-
- eecd = rd32(IGC_EECD);
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
-}
-
-/**
* igc_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h
index f9fc2e9cfb03..ab78d0c64547 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.h
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.h
@@ -4,8 +4,6 @@
#ifndef _IGC_NVM_H_
#define _IGC_NVM_H_
-s32 igc_acquire_nvm(struct igc_hw *hw);
-void igc_release_nvm(struct igc_hw *hw);
s32 igc_read_mac_addr(struct igc_hw *hw);
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
s32 igc_validate_nvm_checksum(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index e27af72aada8..13bbd3346e01 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -13,6 +13,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct net_device *dev = adapter->netdev;
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
+ bool need_update;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -22,7 +23,8 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
- if (if_running)
+ need_update = !!adapter->xdp_prog != !!prog;
+ if (if_running && need_update)
igc_close(dev);
old_prog = xchg(&adapter->xdp_prog, prog);
@@ -34,7 +36,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running)
+ if (if_running && need_update)
igc_open(dev);
return 0;
@@ -84,6 +86,7 @@ static int igc_xdp_enable_pool(struct igc_adapter *adapter,
napi_disable(napi);
}
+ igc_set_queue_napi(adapter, queue_id, NULL);
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
@@ -133,6 +136,7 @@ static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
+ igc_set_queue_napi(adapter, queue_id, napi);
if (needs_reset) {
napi_enable(napi);
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 965e5ce1b326..b456d102655a 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 1999 - 2018 Intel Corporation.
+# Copyright(c) 1999 - 2024 Intel Corporation.
#
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
@@ -9,7 +9,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o
+ ixgbe_xsk.o ixgbe_e610.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 559b443c409f..e6a380d4929b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -20,6 +20,7 @@
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
+#include "ixgbe_e610.h"
#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
@@ -173,6 +174,7 @@ enum ixgbe_tx_flags {
#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define IXGBE_E610_VF_DEVICE_ID 0x57AD
#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
{ \
@@ -654,6 +656,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
+#define IXGBE_FLAG2_FW_ASYNC_EVENT BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
@@ -661,6 +664,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
+#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20)
+#define IXGBE_FLAG2_NO_MEDIA BIT(21)
+#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22)
/* Tx fast path data */
int num_tx_queues;
@@ -793,6 +799,7 @@ struct ixgbe_adapter {
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
+ u16 lse_mask;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
@@ -849,6 +856,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -874,6 +882,7 @@ enum ixgbe_state_t {
__IXGBE_PTP_RUNNING,
__IXGBE_PTP_TX_IN_PROGRESS,
__IXGBE_RESET_REQUESTED,
+ __IXGBE_PHY_INIT_COMPLETE,
};
struct ixgbe_cb {
@@ -896,6 +905,7 @@ enum ixgbe_boards {
board_x550em_x_fw,
board_x550em_a,
board_x550em_a_fw,
+ board_e610,
};
extern const struct ixgbe_info ixgbe_82598_info;
@@ -906,6 +916,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
+extern const struct ixgbe_info ixgbe_e610_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index cdaf087b4e85..964988b4d58b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1615,6 +1615,7 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3be1bfb16498..7beaf6ea57f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -58,6 +58,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -88,6 +89,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -469,9 +472,14 @@ int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_e610) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
+ }
+
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
@@ -660,7 +668,11 @@ int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
- link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
+ if (hw->mac.type == ixgbe_mac_e610)
+ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610);
+ else
+ link_status = ixgbe_read_pci_cfg_word(hw,
+ IXGBE_PCI_LINK_STATUS);
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
@@ -2918,6 +2930,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_e610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return 1;
}
@@ -3366,7 +3382,8 @@ int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
- if ((hw->mac.type >= ixgbe_mac_X550) &&
+ if ((hw->mac.type >= ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_e610) &&
(links_reg & IXGBE_LINKS_SPEED_NON_STD))
*speed = IXGBE_LINK_SPEED_5GB_FULL;
else
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f2709b10c2e5..19d6b6fa8fb3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include <linux/dcbnl.h>
@@ -154,6 +154,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..cb07ecd8937d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -0,0 +1,2658 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_type.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_mbx.h"
+#include "ixgbe_phy.h"
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ * It may happen when CSR is busy during link state changes.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - 0 - success.
+ * * - -EIO - CSR mechanism is not enabled.
+ * * - -EBUSY - CSR mechanism is busy.
+ * * - -EINVAL - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - -ETIME - Admin Command X command timeout.
+ * * - -EIO - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
+ struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode, buf_tail_size = buf_size % 4;
+ u32 *raw_desc = (u32 *)desc;
+ u32 hicr, i, buf_tail = 0;
+ bool valid_buf = false;
+
+ hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
+
+ if (!(hicr & IXGBE_PF_HICR_EN))
+ return -EIO;
+
+ if (hicr & IXGBE_PF_HICR_C) {
+ hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ return -EBUSY;
+ }
+
+ opcode = le16_to_cpu(desc->opcode);
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE)
+ return -EINVAL;
+
+ if (buf)
+ desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF);
+
+ if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) {
+ if ((buf && !buf_size) ||
+ (!buf && buf_size))
+ return -EINVAL;
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf) {
+ if (buf_tail_size)
+ memcpy(&buf_tail, buf + buf_size - buf_tail_size,
+ buf_tail_size);
+
+ if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF)
+ desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB);
+
+ desc->datalen = cpu_to_le16(buf_size);
+
+ if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) {
+ for (i = 0; i < buf_size / 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
+ if (buf_tail_size)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail);
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]);
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) &
+ ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr);
+
+#define MAX_SLEEP_RESP_US 1000
+#define MAX_TMOUT_RESP_SYNC_US 100000000
+
+ /* Wait for sync Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_SV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+#define MAX_TMOUT_RESP_ASYNC_US 150000000
+
+ /* Wait for async Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_EV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+ /* Read sync Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & IXGBE_PF_HICR_C)
+ return -ETIME;
+
+ if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV))
+ return -EIO;
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != cpu_to_le16(opcode) &&
+ opcode != ixgbe_aci_opc_get_fw_event)
+ return -EIO;
+
+ if (desc->retval) {
+ hw->aci.last_status = (enum ixgbe_aci_err)
+ le16_to_cpu(desc->retval);
+ return -EIO;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf) {
+ for (i = 0; i < buf_size / 4; i++)
+ ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ if (buf_tail_size) {
+ buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ memcpy(buf + buf_size - buf_tail_size, &buf_tail,
+ buf_tail_size);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode = le16_to_cpu(desc->opcode);
+ struct ixgbe_aci_desc desc_cpy;
+ enum ixgbe_aci_err last_status;
+ u8 idx = 0, *buf_cpy = NULL;
+ bool is_cmd_for_retry;
+ unsigned long timeout;
+ int err;
+
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf_cpy)
+ return -ENOMEM;
+ *buf_cpy = *(u8 *)buf;
+ }
+ desc_cpy = *desc;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS);
+ do {
+ mutex_lock(&hw->aci.lock);
+ err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ mutex_unlock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || !err ||
+ last_status != IXGBE_ACI_RC_EBUSY)
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ *desc = desc_cpy;
+
+ msleep(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE &&
+ time_before(jiffies, timeout));
+
+ kfree(buf_cpy);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+ u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!e || (!e->msg_buf && e->buf_len))
+ return -EINVAL;
+
+ mutex_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (err)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending)
+ *pending = ixgbe_aci_check_event_pending(hw);
+
+aci_get_event_exit:
+ mutex_unlock(&hw->aci.lock);
+
+ return err;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+ /* Zero out the desc. */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access,
+ u8 sdp_number, u32 *timeout)
+{
+ struct ixgbe_aci_cmd_req_res *cmd_resp;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = cpu_to_le16(res);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
+ cmd_resp->timeout = cpu_to_le32(*timeout);
+ *timeout = 0;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_release_res(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_ids res, u8 sdp_number)
+{
+ struct ixgbe_aci_cmd_req_res *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = cpu_to_le16(res);
+ cmd->res_number = cpu_to_le32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the -EALREADY is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout;
+ int err;
+
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of -EALREADY means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (err == -EALREADY)
+ return err;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (err && retry_timeout && res_timeout) {
+ msleep(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* Success - lock acquired.
+ * -EALREADY - lock free, no work to do.
+ */
+ if (!err || err == -EALREADY)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+ u32 total_delay = 0;
+ int err;
+
+ err = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while (err == -ETIME &&
+ total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) {
+ usleep_range(1000, 1500);
+ err = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_e610_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_caps *caps,
+ struct ixgbe_aci_cmd_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = le32_to_cpu(elem->logical_id);
+ u32 phys_id = le32_to_cpu(elem->phys_id);
+ u32 number = le32_to_cpu(elem->number);
+ u16 cap = le16_to_cpu(elem->cap);
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case IXGBE_ACI_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case IXGBE_ACI_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_NVM_VER:
+ break;
+ case IXGBE_ACI_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id);
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ default:
+ /* Not one of the recognized common capabilities */
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i],
+ "dev caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->num_allocd_vfs = le32_to_cpu(cap->number);
+ func_p->vf_base_id = le32_to_cpu(cap->logical_id);
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+#define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0)
+ u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ return funcs ? (max / funcs) : 0;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return -ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of -ENOMEM means the buffer size is too small.
+ */
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct ixgbe_aci_cmd_list_caps *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = le32_to_cpu(cmd->count);
+
+ return err;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!err)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!err)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (err)
+ return err;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.disable_rxen;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= cpu_to_le16(report_mode);
+ err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+ if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+ hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!cfg)
+ return -EINVAL;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ desc.params.set_phy.lport_num = hw->bus.func;
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+ if (!err)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ cmd->lport_num = hw->bus.func;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_topo *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M,
+ IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT);
+
+ /* Set node type. */
+ cmd->addr.topo_params.node_type_ctx |=
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M,
+ IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
+ if (ixgbe_is_media_cage_present(hw))
+ return ixgbe_media_type_aui;
+ fallthrough;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li = &hw->link.link_info;
+
+ err = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (err)
+ return err;
+
+ if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ return 0;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (!err)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ kfree(pcaps);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ if (!hw || !link_up)
+ return -EINVAL;
+
+ if (hw->link.get_link_info) {
+ int err = ixgbe_update_link_info(hw);
+
+ if (err)
+ return err;
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = {};
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct ixgbe_aci_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cpu_to_le16(cmd_flags);
+ resp->lport_num = hw->bus.func;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (err)
+ return err;
+
+ /* Save off old link status information. */
+ *li_old = *li;
+
+ /* Update current link status information. */
+ li->link_speed = le16_to_cpu(link_data.link_speed);
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* Update fc info. */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) &
+ IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* Save link status information. */
+ if (link)
+ *link = *li;
+
+ /* Flag cleared so calling functions don't call AQ again. */
+ hw->link.get_link_info = false;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: true for enable lse, false otherwise
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ int err;
+
+ err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (err)
+ return err;
+
+ /* Enabling link status events generation by fw. */
+ return ixgbe_aci_get_link_info(hw, activate, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_e610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ int rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ int highest_bit;
+
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high));
+ if (highest_bit) {
+ hw->link.link_info.phy_type_high =
+ BIT_ULL(highest_bit - 1);
+ hw->link.link_info.phy_type_low = 0;
+ } else {
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low));
+ if (highest_bit)
+ hw->link.link_info.phy_type_low =
+ BIT_ULL(highest_bit - 1);
+ }
+ }
+
+ /* Based on link status or search above try to discover media type. */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_setup_link_e610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_e610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ int err;
+ u32 i;
+
+ if (!speed || !link_up)
+ return -EINVAL;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command.
+ */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && !(*link_up)) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msleep(100);
+ hw->link.get_link_info = true;
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW.
+ */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_link_capabilities_e610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return -EINVAL;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return -EINVAL;
+
+ switch (req_mode) {
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the old pause settings. */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* Set the new capabilities. */
+ cfg->caps |= pause_mask;
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_fc_e610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {};
+ int err;
+
+ /* Get the current PHY config */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (err)
+ return err;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_fc_autoneg_e610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ /* Get current link err.
+ * Current FC mode will be stored in the hw context.
+ */
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ goto no_autoneg;
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP))
+ goto no_autoneg;
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED))
+ goto no_autoneg;
+
+ hw->fc.fc_was_autonegged = true;
+ return;
+
+no_autoneg:
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
+/**
+ * ixgbe_disable_rx_e610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ u32 pfdtxgswc;
+ int err;
+
+ if (!(rxctrl & IXGBE_RXCTRL_RXEN))
+ return;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ err = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (err) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+}
+
+/**
+ * ixgbe_init_phy_ops_e610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_e610;
+ else
+ phy->ops.set_phy_power = NULL;
+
+ /* Identify the PHY */
+ return phy->ops.identify(hw);
+}
+
+/**
+ * ixgbe_identify_phy_e610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type_low, phy_type_high;
+ int err;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps);
+ if (err)
+ return err;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (err)
+ return err;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+ phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+ phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+
+ if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* 2.5 and 5 Gbps link speeds must be excluded from the
+ * auto-negotiation set used during driver initialization due to
+ * compatibility issues with certain switches. Those issues do not
+ * exist in case of E610 2.5G SKU device (0x57b1).
+ */
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_identify_module_e610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ int err;
+
+ err = ixgbe_update_link_info(hw);
+ if (err)
+ return err;
+
+ media_available =
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE);
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by
+ * ixgbe_update_link_info()
+ */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ u64 phy_type_low = 0, phy_type_high = 0;
+ int err;
+
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ return err;
+
+ /* If media is not available get default config. */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (err)
+ return err;
+
+ sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+ sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+
+ /* Get Active configuration to avoid unintended changes. */
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types. */
+ phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_low = cpu_to_le64(phy_type_low);
+ phy_type_high &= sup_phy_type_high;
+ pcfg.phy_type_high = cpu_to_le64(phy_type_high);
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_phy_power_e610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on)
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ else
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+
+ /* PHY is already in requested power mode. */
+ if (phy_caps.caps == phy_cfg.caps)
+ return 0;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_enter_lplu_e610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw
+ * struct in order to set up EEPROM access.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type != ixgbe_eeprom_uninitialized)
+ return 0;
+
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2). */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type,
+ eeprom->word_size);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return -EOPNOTSUPP;
+
+ if (node_handle)
+ *node_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return 0;
+
+ return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return -EINVAL;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = cpu_to_le16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!err && cmd->checksum !=
+ cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Invalid Shadow Ram checksum");
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ u16 data_local;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (err)
+ return err;
+
+ *data = data_local;
+ return 0;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns an error code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ int err;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u)))
+ return -EINVAL;
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = min_t(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (err)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM
+ * ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+ if (err)
+ return err;
+
+ *words = bytes / 2;
+
+ for (int i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI
+ * @hw: pointer to hardware structure
+ * @offset: offset of words in the EEPROM to read
+ * @words: number of words to read
+ * @data: words to read from the EEPROM
+ *
+ * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params
+ * prior to the read. Acquire/release the NVM ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_nvm_validate_checksum(hw);
+ if (err)
+ return err;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!err)
+ *checksum_val = tmp_checksum;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_reset_hw_e610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and performs a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int err;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ err = hw->mac.ops.stop_adapter(hw);
+ if (err)
+ goto reset_hw_out;
+
+ /* Flush pending Tx transactions. */
+ ixgbe_clear_tx_pending(hw);
+
+ hw->phy.ops.init(hw);
+mac_reset_top:
+ err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (err)
+ return -EBUSY;
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Reset polling failed to complete.");
+ }
+
+ /* Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ msleep(100);
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17));
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Maximum number of Receive Address Registers. */
+#define IXGBE_MAX_NUM_RAR 128
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to the
+ * maximum number of Receive Address Registers, since we modify this
+ * value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+reset_hw_out:
+ return err;
+}
+
+static const struct ixgbe_mac_operations mac_ops_e610 = {
+ .init_hw = ixgbe_init_hw_generic,
+ .start_hw = ixgbe_start_hw_X540,
+ .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic,
+ .enable_rx_dma = ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = ixgbe_get_mac_addr_generic,
+ .get_device_caps = ixgbe_get_device_caps_generic,
+ .stop_adapter = ixgbe_stop_adapter_generic,
+ .set_lan_id = ixgbe_set_lan_id_multi_port_pcie,
+ .set_rxpba = ixgbe_set_rxpba_generic,
+ .check_link = ixgbe_check_link_e610,
+ .blink_led_start = ixgbe_blink_led_start_X540,
+ .blink_led_stop = ixgbe_blink_led_stop_X540,
+ .set_rar = ixgbe_set_rar_generic,
+ .clear_rar = ixgbe_clear_rar_generic,
+ .set_vmdq = ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic,
+ .clear_vmdq = ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = ixgbe_update_mc_addr_list_generic,
+ .enable_mc = ixgbe_enable_mc_generic,
+ .disable_mc = ixgbe_disable_mc_generic,
+ .clear_vfta = ixgbe_clear_vfta_generic,
+ .set_vfta = ixgbe_set_vfta_generic,
+ .fc_enable = ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550,
+ .init_uta_tables = ixgbe_init_uta_tables_generic,
+ .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing,
+ .set_source_address_pruning =
+ ixgbe_set_source_address_pruning_x550,
+ .set_ethertype_anti_spoofing =
+ ixgbe_set_ethertype_anti_spoofing_x550,
+ .disable_rx_buff = ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = ixgbe_enable_rx_buff_generic,
+ .enable_rx = ixgbe_enable_rx_generic,
+ .disable_rx = ixgbe_disable_rx_e610,
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
+ .reset_hw = ixgbe_reset_hw_e610,
+ .get_media_type = ixgbe_get_media_type_e610,
+ .setup_link = ixgbe_setup_link_e610,
+ .get_link_capabilities = ixgbe_get_link_capabilities_e610,
+ .get_bus_info = ixgbe_get_bus_info_generic,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_e610,
+ .fc_autoneg = ixgbe_fc_autoneg_e610,
+};
+
+static const struct ixgbe_phy_operations phy_ops_e610 = {
+ .init = ixgbe_init_phy_ops_e610,
+ .identify = ixgbe_identify_phy_e610,
+ .identify_sfp = ixgbe_identify_module_e610,
+ .setup_link_speed = ixgbe_setup_phy_link_speed_generic,
+ .setup_link = ixgbe_setup_phy_link_e610,
+ .enter_lplu = ixgbe_enter_lplu_e610,
+};
+
+static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
+ .read = ixgbe_read_ee_aci_e610,
+ .read_buffer = ixgbe_read_ee_aci_buffer_e610,
+ .validate_checksum = ixgbe_validate_eeprom_checksum_e610,
+};
+
+const struct ixgbe_info ixgbe_e610_info = {
+ .mac = ixgbe_mac_e610,
+ .get_invariants = ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_e610,
+ .eeprom_ops = &eeprom_ops_e610,
+ .phy_ops = &phy_ops_e610,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..ba8c06b73810
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps);
+int ixgbe_get_caps(struct ixgbe_hw *hw);
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+int ixgbe_update_link_info(struct ixgbe_hw *hw);
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw);
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw);
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on);
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw);
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data);
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val);
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9482e0cca8b7..da91c582d439 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/* ethtool support for ixgbe */
@@ -690,6 +690,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -1613,6 +1614,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1874,6 +1876,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1935,6 +1938,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 16fa621ce0ff..336d47ffb95a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_sriov.h"
@@ -107,6 +107,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2e38e8f6fac1..467f81239e12 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -42,6 +42,7 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
@@ -73,6 +74,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
[board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
+ [board_e610] = &ixgbe_e610_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -131,6 +133,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_BACKPLANE), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SFP), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_10G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_2_5G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SGMII), board_e610},
/* required last entry */
{0, }
};
@@ -173,6 +180,8 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *);
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *);
static const struct net_device_ops ixgbe_netdev_ops;
@@ -236,8 +245,11 @@ static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
* bandwidth details should be gathered from the parent bus instead of from the
* device. Used to ensure that various locations all have the correct device ID
* checks.
+ *
+ * Return: true if information should be collected from the parent bus, false
+ * otherwise
*/
-static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+static bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_SFP_SF_QP:
@@ -876,6 +888,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -915,6 +928,7 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1025,7 +1039,7 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
return ((head <= tail) ? tail : tail + ring->count) - head;
}
-static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
{
u32 tx_done = ixgbe_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
@@ -1909,10 +1923,6 @@ bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
{
struct net_device *netdev = rx_ring->netdev;
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* Verify netdev is present, and that packet does not have any
* errors that would be unacceptable to the netdev.
*/
@@ -2095,7 +2105,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
- if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ if (skb && IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
@@ -2220,9 +2230,9 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -2272,7 +2282,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
@@ -2330,6 +2340,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -2375,12 +2386,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -2400,7 +2409,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -2414,7 +2423,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
+ if (xdp_res || ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
continue;
/* probably a little skewed due to removing CRC */
@@ -2515,6 +2524,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2528,6 +2538,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_EIMS_MAILBOX |
IXGBE_EIMS_LSC);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mask &= ~IXGBE_EIMS_FW_EVENT;
+
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -2744,6 +2757,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2966,6 +2980,218 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_check_phy_fw_load - check if PHY FW load failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check if external PHY FW load failed and print an error message if it did.
+ */
+static void ixgbe_check_phy_fw_load(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ if (!(link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
+ adapter->flags2 &= ~IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ return;
+ }
+
+ if (adapter->flags2 & IXGBE_FLAG2_PHY_FW_LOAD_FAILED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
+ netdev_err(adapter->netdev, "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
+ adapter->flags2 |= IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ }
+}
+
+/**
+ * ixgbe_check_module_power - check module power level
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check module power level returned by a previous call to aci_get_link_info
+ * and print error messages if module power level is not supported.
+ */
+static void ixgbe_check_module_power(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ /* If module power level is supported, clear the flag. */
+ if (!(link_cfg_err & (IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT |
+ IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED))) {
+ adapter->flags2 &= ~IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ return;
+ }
+
+ /* If IXGBE_FLAG2_MOD_POWER_UNSUPPORTED was previously set and the
+ * above block didn't clear this bit, there's nothing to do.
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_MOD_POWER_UNSUPPORTED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT) {
+ netdev_err(adapter->netdev, "The installed module is incompatible with the device's NVM image. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ } else if (link_cfg_err & IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED) {
+ netdev_err(adapter->netdev, "The module's power requirements exceed the device's power supply. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_check_link_cfg_err - check if link configuration failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Print if any link configuration failure happens due to the value in the
+ * link_cfg_err parameter in the link info structure.
+ */
+static void ixgbe_check_link_cfg_err(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ ixgbe_check_module_power(adapter, link_cfg_err);
+ ixgbe_check_phy_fw_load(adapter, link_cfg_err);
+}
+
+/**
+ * ixgbe_process_link_status_event - process the link event
+ * @adapter: pointer to adapter structure
+ * @link_up: true if the physical link is up and false if it is down
+ * @link_speed: current link speed received from the link event
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+static int
+ixgbe_process_link_status_event(struct ixgbe_adapter *adapter, bool link_up,
+ u16 link_speed)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int status;
+
+ /* Update the link info structures and re-enable link events,
+ * don't bail on failure due to other book keeping needed.
+ */
+ status = ixgbe_update_link_info(hw);
+ if (status)
+ e_dev_err("Failed to update link status, err %d aq_err %d\n",
+ status, hw->aci.last_status);
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)
+ link_up = true;
+
+ /* Turn off PHY if media was removed. */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA) &&
+ !(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ adapter->flags2 |= IXGBE_FLAG2_NO_MEDIA;
+
+ if (link_up == adapter->link_up &&
+ link_up == netif_carrier_ok(adapter->netdev) &&
+ link_speed == adapter->link_speed)
+ return 0;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ ixgbe_watchdog_update_link(adapter);
+
+ if (link_up)
+ ixgbe_watchdog_link_is_up(adapter);
+ else
+ ixgbe_watchdog_link_is_down(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_link_status_event - handle link status event via ACI
+ * @adapter: pointer to adapter structure
+ * @e: event structure containing link status info
+ */
+static void
+ixgbe_handle_link_status_event(struct ixgbe_adapter *adapter,
+ struct ixgbe_aci_event *e)
+{
+ struct ixgbe_aci_cmd_get_link_status_data *link_data;
+ u16 link_speed;
+ bool link_up;
+
+ link_data = (struct ixgbe_aci_cmd_get_link_status_data *)e->msg_buf;
+
+ link_up = !!(link_data->link_info & IXGBE_ACI_LINK_UP);
+ link_speed = le16_to_cpu(link_data->link_speed);
+
+ if (ixgbe_process_link_status_event(adapter, link_up, link_speed))
+ e_dev_warn("Could not process link status event");
+}
+
+/**
+ * ixgbe_schedule_fw_event - schedule Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * If the adapter is not in down, removing or resetting state,
+ * an event is scheduled.
+ */
+static void ixgbe_schedule_fw_event(struct ixgbe_adapter *adapter)
+{
+ if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
+ !test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_FW_ASYNC_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+/**
+ * ixgbe_aci_event_cleanup - release msg_buf memory
+ * @event: pointer to the event holding msg_buf to be released
+ *
+ * Clean memory allocated for event's msg_buf. Implements auto memory cleanup.
+ */
+static void ixgbe_aci_event_cleanup(struct ixgbe_aci_event *event)
+{
+ kfree(event->msg_buf);
+}
+
+/**
+ * ixgbe_handle_fw_event - handle Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * Obtain an event from the ACI and then and then process it according to the
+ * type of the event and the opcode.
+ */
+static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_aci_event event __cleanup(ixgbe_aci_event_cleanup);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool pending = false;
+ int err;
+
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ adapter->flags2 &= ~IXGBE_FLAG2_FW_ASYNC_EVENT;
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ err = ixgbe_aci_get_event(hw, &event, &pending);
+ if (err)
+ break;
+
+ switch (le16_to_cpu(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ ixgbe_handle_link_status_event(adapter, &event);
+ break;
+ default:
+ e_warn(hw, "unknown FW async event captured\n");
+ break;
+ }
+ } while (pending);
+}
+
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
@@ -2982,6 +3208,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -3035,6 +3262,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ fallthrough;
case ixgbe_mac_x550em_a:
if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
@@ -3091,12 +3321,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -3334,6 +3568,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
@@ -3342,6 +3579,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
@@ -3442,6 +3680,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -4359,6 +4598,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
fallthrough;
@@ -4526,6 +4766,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4564,6 +4805,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -5148,6 +5390,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -5208,6 +5451,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -5510,6 +5754,48 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_enable_link_status_events - enable link status events
+ * @adapter: pointer to the adapter structure
+ * @mask: event mask to be set
+ *
+ * Enables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_enable_link_status_events(struct ixgbe_adapter *adapter,
+ u16 mask)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, true, mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = mask;
+ return 0;
+}
+
+/**
+ * ixgbe_disable_link_status_events - disable link status events
+ * @adapter: pointer to the adapter structure
+ *
+ * Disables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_disable_link_status_events(struct ixgbe_adapter *adapter)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, false, adapter->lse_mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = 0;
+ return 0;
+}
+
+/**
* ixgbe_sfp_link_config - set up SFP+ link
* @adapter: pointer to private adapter struct
**/
@@ -5532,13 +5818,21 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* ixgbe_non_sfp_link_config - set up non-SFP+ link
* @hw: pointer to private hardware struct
*
- * Returns 0 on success, negative on failure
+ * Configure non-SFP link.
+ *
+ * Return: 0 on success, negative on failure
**/
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
- u32 speed;
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
bool autoneg, link_up = false;
int ret = -EIO;
+ u32 speed;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -5561,13 +5855,53 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (ret)
return ret;
- if (hw->mac.ops.setup_link)
+ if (hw->mac.ops.setup_link) {
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ret = ixgbe_enable_link_status_events(adapter, mask);
+ if (ret)
+ return ret;
+ }
ret = hw->mac.ops.setup_link(hw, speed, link_up);
+ }
return ret;
}
/**
+ * ixgbe_check_media_subtask - check for media
+ * @adapter: pointer to adapter structure
+ *
+ * If media is available then initialize PHY user configuration. Configure the
+ * PHY if the interface is up.
+ */
+static void ixgbe_check_media_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* No need to check for media if it's already present */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA))
+ return;
+
+ /* Refresh link info and check if media is present */
+ if (ixgbe_update_link_info(hw))
+ return;
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ if (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ /* PHY settings are reset on media insertion, reconfigure
+ * PHY to preserve settings.
+ */
+ if (!(ixgbe_non_sfp_link_config(&adapter->hw)))
+ adapter->flags2 &= ~IXGBE_FLAG2_NO_MEDIA;
+
+ /* A Link Status Event will be generated; the event handler
+ * will complete bringing the interface up
+ */
+ }
+}
+
+/**
* ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
* @adapter: board private structure
*
@@ -5630,6 +5964,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5980,6 +6315,7 @@ dma_engine_disable:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -6224,6 +6560,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_disable_link_status_events(adapter);
}
/**
@@ -6279,6 +6617,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
@@ -6342,6 +6681,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
+ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
+
/* get_invariants needs the device IDs */
ii->get_invariants(hw);
@@ -6909,6 +7250,19 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
udp_tunnel_nic_reset_ntf(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ int err = ixgbe_update_link_info(&adapter->hw);
+
+ if (err)
+ e_dev_err("Failed to update link info, err %d.\n", err);
+
+ ixgbe_check_link_cfg_err(adapter,
+ adapter->hw.link.link_info.link_cfg_err);
+
+ err = ixgbe_non_sfp_link_config(&adapter->hw);
+ if (ixgbe_non_sfp_link_config(&adapter->hw))
+ e_dev_err("Link setup failed, err %d.\n", err);
+ }
return 0;
@@ -7062,6 +7416,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -7209,6 +7564,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -7221,11 +7577,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
for (i = 0; i < 16; i++) {
hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540) ||
- (hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x) ||
- (hw->mac.type == ixgbe_mac_x550em_a)) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_x550em_a ||
+ hw->mac.type == ixgbe_mac_e610) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -7251,6 +7608,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -7551,6 +7909,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -8052,6 +8411,11 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_check_media_subtask(adapter);
+ }
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -10771,6 +11135,24 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version_e610 - Set FW version specifically on E610 adapters
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ *
+ */
+static void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
+ nvm->eetrack, orom->major, orom->build, orom->patch);
+}
+
+/**
* ixgbe_set_fw_version - Set FW version
* @adapter: the adapter private structure
*
@@ -10782,6 +11164,11 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_nvm_version nvm_ver;
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_set_fw_version_e610(adapter);
+ return;
+ }
+
ixgbe_get_oem_prod_version(hw, &nvm_ver);
if (nvm_ver.oem_valid) {
snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
@@ -10868,6 +11255,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#else
indices = IXGBE_MAX_RSS_INDICES;
#endif
+ } else if (ii->mac == ixgbe_mac_e610) {
+ indices = IXGBE_MAX_RSS_INDICES_X550;
}
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
@@ -10933,12 +11322,19 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ err = ixgbe_get_caps(&adapter->hw);
+ if (err)
+ dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err);
+ }
+
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
break;
case ixgbe_mac_x550em_a:
@@ -10959,6 +11355,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -11130,6 +11527,8 @@ skip_sriov:
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_init(&hw->aci.lock);
timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
if (ixgbe_removed(hw->hw_addr)) {
@@ -11275,6 +11674,8 @@ err_netdev:
err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
@@ -11321,6 +11722,11 @@ static void ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_disable_link_status_events(adapter);
+ mutex_destroy(&adapter->hw.aci.lock);
+ }
+
if (adapter->mii_bus)
mdiobus_unregister(adapter->mii_bus);
@@ -11452,6 +11858,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_x550em_a:
device_id = IXGBE_DEV_ID_X550EM_A_VF;
break;
+ case ixgbe_mac_e610:
+ device_id = IXGBE_DEV_ID_E610_VF;
+ break;
default:
device_id = 0;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d67d77e5dacc..788b5af07c70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -283,6 +283,7 @@ static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
@@ -407,6 +408,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_x550em_a &&
+ hw->mac.type != ixgbe_mac_e610 &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 07eaa3c3f4d3..0a03a8bb5f88 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1117,7 +1117,7 @@ int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
MDIO_MMD_AN, &autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_e610) {
/* Set or unset auto-negotiation 5G advertisement */
autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
@@ -1233,6 +1233,7 @@ static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
switch (hw->mac.type) {
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9baccacd02a1..5fdf32d79d82 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
#include <linux/netdevice.h>
+#include "ixgbe_type_e610.h"
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
@@ -71,12 +72,19 @@
#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
+
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
+#define IXGBE_DEV_ID_E610_VF 0x57AD
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -1600,7 +1608,7 @@ enum {
#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -1636,6 +1644,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -1654,6 +1663,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -1673,6 +1683,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -2068,6 +2079,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2168,6 +2180,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2288,6 +2301,7 @@ enum {
#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_TPE 0x00000080 /* Tag Promiscuous Ena*/
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
@@ -2351,6 +2365,7 @@ enum {
/* Multiple Transmit Queue Command Register */
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_NUM_TC_OR_Q 0xC /* Number of TCs or TxQs per pool */
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
@@ -2970,6 +2985,29 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
+/* Physical layer type */
+typedef u64 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000
+#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000
+
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
*/
@@ -3145,6 +3183,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
ixgbe_mac_x550em_a,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
@@ -3224,7 +3264,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui,
};
/* Flow Control Settings */
@@ -3233,7 +3275,8 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
- ixgbe_fc_default
+ ixgbe_fc_default,
+ ixgbe_fc_pfc,
};
/* Smart Speed Settings */
@@ -3533,6 +3576,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -3575,6 +3621,7 @@ struct ixgbe_mac_info {
u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
bool set_lben;
+ u32 max_link_up_time;
u8 led_link_act;
};
@@ -3599,6 +3646,10 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 curr_user_speed_req;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
struct ixgbe_mbx_stats {
@@ -3643,6 +3694,19 @@ struct ixgbe_hw {
bool allow_unsupported_sfp;
bool wol_enabled;
bool need_crosstalk_fix;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..8d06ade3c7cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,1074 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+#define BYTES_PER_DWORD 4
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Shadow RAM related */
+#define IXGBE_SR_WORDS_IN_1KB 512
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5)
+
+/* Flash Access Register */
+#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define IXGBE_GLNVM_FLA_LOCKED_S 6
+#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Admin Command Interface (ACI) registers */
+#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define IXGBE_PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define IXGBE_PF_HICR 0x00082048
+
+#define IXGBE_PF_HICR_EN BIT(0)
+#define IXGBE_PF_HICR_C BIT(1)
+#define IXGBE_PF_HICR_SV BIT(2)
+#define IXGBE_PF_HICR_EV BIT(3)
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD)
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+#define IXGBE_ACI_SEND_TIMEOUT_MS \
+ (IXGBE_ACI_SEND_MAX_EXECUTE * IXGBE_ACI_SEND_DELAY_TIME_MS)
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+#define IXGBE_ACI_FLAG_DD BIT(0) /* 0x1 */
+#define IXGBE_ACI_FLAG_CMP BIT(1) /* 0x2 */
+#define IXGBE_ACI_FLAG_ERR BIT(2) /* 0x4 */
+#define IXGBE_ACI_FLAG_VFE BIT(3) /* 0x8 */
+#define IXGBE_ACI_FLAG_LB BIT(9) /* 0x200 */
+#define IXGBE_ACI_FLAG_RD BIT(10) /* 0x400 */
+#define IXGBE_ACI_FLAG_VFC BIT(11) /* 0x800 */
+#define IXGBE_ACI_FLAG_BUF BIT(12) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI BIT(13) /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI BIT(14) /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE BIT(15) /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+ IXGBE_ACI_RC_OK = 0, /* Success */
+ IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
+ IXGBE_ACI_RC_ENOENT = 2, /* No such element */
+ IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
+ IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
+ IXGBE_ACI_RC_EIO = 5, /* I/O error */
+ IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
+ IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
+ IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
+ IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
+ IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
+ IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
+ IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
+ IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
+ IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
+ IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
+ IXGBE_ACI_RC_ENOSPC = 16, /* No space left or alloc failure */
+ IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
+ IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
+ IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IXGBE_ACI_RC_EFBIG = 22, /* File too big */
+ IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
+ IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
+ IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
+ IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_get_link_topo_pin = 0x06E1,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+};
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+
+enum ixgbe_aci_res_access_type {
+ IXGBE_RES_READ = 1,
+ IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+ IXGBE_NVM_RES_ID = 1,
+ IXGBE_SPD_RES_ID,
+ IXGBE_CHANGE_LOCK_RES_ID,
+ IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+ __le16 res_id;
+ __le16 access_type;
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ __le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS 0
+#define IXGBE_ACI_RES_GLBL_IN_PROG 1
+#define IXGBE_ACI_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+ __le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
+#define IXGBE_ACI_CAPS_SRIOV 0x0012
+#define IXGBE_ACI_CAPS_VF 0x0013
+#define IXGBE_ACI_CAPS_VMDQ 0x0014
+#define IXGBE_ACI_CAPS_VSI 0x0017
+#define IXGBE_ACI_CAPS_DCB 0x0018
+#define IXGBE_ACI_CAPS_RSS 0x0040
+#define IXGBE_ACI_CAPS_RXQS 0x0041
+#define IXGBE_ACI_CAPS_TXQS 0x0042
+#define IXGBE_ACI_CAPS_MSIX 0x0043
+#define IXGBE_ACI_CAPS_FD 0x0045
+#define IXGBE_ACI_CAPS_1588 0x0046
+#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
+#define IXGBE_ACI_CAPS_NVM_VER 0x0048
+#define IXGBE_ACI_CAPS_PENDING_NVM_VER 0x0049
+#define IXGBE_ACI_CAPS_OROM_VER 0x004A
+#define IXGBE_ACI_CAPS_PENDING_OROM_VER 0x004B
+#define IXGBE_ACI_CAPS_PENDING_NET_VER 0x004D
+#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_M GENMASK(3, 1)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 29
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK GENMASK(7, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK 0xdf
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 lport_num;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK 0xef
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 lport_num;
+ u8 reserved;
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 lport_num;
+ u8 reserved;
+ __le16 cmd_flags;
+#define IXGBE_ACI_LSE_M GENMASK(1, 0)
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK GENMASK(2, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_M GENMASK(6, 3)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M GENMASK(5, 0)
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M GENMASK(10, 0)
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3;
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+} __packed;
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 lport_num;
+ u8 reserved[7];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M GENMASK(3, 0)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_CTRL 9
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_MUX 10
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M GENMASK(7, 4)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_SI5384 0x25
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ixgbe_aci_cmd_get_link_topo_pin {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 input_io_params;
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GPIO 0
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RESET_N 1
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_INT_N 2
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_PRESENT_N 3
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_DIS 4
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_MODSEL_N 5
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LPMODE 6
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_FAULT 7
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RX_LOSS 8
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS0 9
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS1 10
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_EEPROM_WP 11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RED_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GREEN_LED 13
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_BLUE_LED 14
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_GPIO 3
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_params;
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_flags;
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_POLARITY BIT(5)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_VALUE BIT(6)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
+ u8 rsvd[7];
+};
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M GENMASK(6, 0)
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M GENMASK(9, 0)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M GENMASK(1, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for
+ * struct ixgbe_aci_cmd_nvm.
+ */
+#define IXGBE_ACI_NVM_START_POINT 0
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+/**
+ * struct ixgbe_aci_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the
+ * Admin Command Interface (ACI).
+ * The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ixgbe_aci_cmd_get_ver get_ver;
+ struct ixgbe_aci_cmd_driver_ver driver_ver;
+ struct ixgbe_aci_cmd_get_exp_err exp_err;
+ struct ixgbe_aci_cmd_req_res res_owner;
+ struct ixgbe_aci_cmd_list_caps get_cap;
+ struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+ struct ixgbe_aci_cmd_get_phy_caps get_phy;
+ struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+ struct ixgbe_aci_cmd_restart_an restart_an;
+ struct ixgbe_aci_cmd_get_link_status get_link_status;
+ struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+ struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin;
+ struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+ struct ixgbe_aci_cmd_nvm nvm;
+ struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ } params;
+};
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 topo_media_conflict;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]
+ * of ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M GENMASK(3, 0)
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M GENMASK(7, 4)
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M GENMASK(11, 8)
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 ieee_1588;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool nvm_update_pending_nvm;
+ bool nvm_update_pending_orom;
+ bool nvm_update_pending_netlist;
+#define IXGBE_NVM_PENDING_NVM_IMAGE BIT(0)
+#define IXGBE_NVM_PENDING_OROM BIT(1)
+#define IXGBE_NVM_PENDING_NETLIST BIT(2)
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M GENMASK(15, 8)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+} __packed;
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ struct ixgbe_hw_caps common_cap;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct ixgbe_aci_desc desc;
+ u8 *msg_buf;
+ u16 msg_len;
+ u16 buf_len;
+};
+
+struct ixgbe_aci_info {
+ struct mutex lock; /* admin command interface lock */
+ enum ixgbe_aci_err last_status; /* last status of sent admin command */
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+} __packed;
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+} __packed;
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Ptr to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ u32 flash_size; /* Available flash size in bytes */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 81e1df83f136..1fc821fb351a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -66,7 +66,9 @@ int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
- **/
+ *
+ * Return: 0 on success or negative value on failure
+ */
int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -133,10 +135,14 @@ mac_reset_top:
hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
hw->mac.ops.init_rx_addrs(hw);
+ /* The following is not supported by E610. */
+ if (hw->mac.type == ixgbe_mac_e610)
+ return status;
+
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
- /* Add the SAN MAC address to the RAR only if it's a valid address */
+ /* Add the SAN MAC address to RAR if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index b69a680d3ab5..6ed360c5b605 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
#include "ixgbe_type.h"
@@ -17,3 +20,5 @@ int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_X540_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index d9a8cf018d3b..277ceaf8a793 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe_x540.h"
+#include "ixgbe_x550.h"
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_mbx.h"
@@ -2770,9 +2771,9 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* semaphore, -EIO when command fails or -ENIVAL when incorrect
* params passed.
**/
-static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub, u16 len,
- const char *driver_ver)
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
int ret_val;
@@ -3505,14 +3506,14 @@ mac_reset_top:
return status;
}
-/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
+/** ixgbe_set_ethertype_anti_spoofing_x550 - Enable/Disable Ethertype
* anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
**/
-static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
- bool enable, int vf)
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
@@ -3527,14 +3528,14 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
-/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning
+/** ixgbe_set_source_address_pruning_x550 - Enable/Disable src address pruning
* @hw: pointer to hardware structure
* @enable: enable or disable source address pruning
* @pool: Rx pool to set source address pruning for
**/
-static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
- bool enable,
- unsigned int pool)
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool)
{
u64 pfflp;
@@ -3831,9 +3832,9 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
.set_source_address_pruning = \
- &ixgbe_set_source_address_pruning_X550, \
+ &ixgbe_set_source_address_pruning_x550, \
.set_ethertype_anti_spoofing = \
- &ixgbe_set_ethertype_anti_spoofing_X550, \
+ &ixgbe_set_ethertype_anti_spoofing_x550, \
.disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
@@ -4047,7 +4048,7 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_a)
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
new file mode 100644
index 000000000000..3e4092f8da3e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X550_H_
+#define _IXGBE_X550_H_
+
+#include "ixgbe_type.h"
+
+extern const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT];
+
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver);
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool);
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf);
+
+#endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5f08779c0e4e..a9bc96f6399d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
@@ -16,6 +16,9 @@
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 130cb868774c..4384e892f967 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
@@ -418,6 +418,8 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
board_X550EM_x_vf_hv,
board_x550em_a_vf,
+ board_e610_vf,
+ board_e610_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -434,12 +436,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
+extern const struct ixgbevf_info ixgbevf_e610_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
-extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 149911e3002a..6442f115a262 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2018 Intel Corporation.";
+ "Copyright (c) 2009 - 2024 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
[board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
+ [board_e610_vf] = &ixgbevf_e610_vf_info,
+ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
+ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
+ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
/* required last entry */
{0, }
};
@@ -732,10 +737,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* verify that the packet does not have any known errors */
if (unlikely(ixgbevf_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
@@ -1044,9 +1045,9 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
return IXGBEVF_XDP_TX;
}
-static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int result = IXGBEVF_XDP_PASS;
struct ixgbevf_ring *xdp_ring;
@@ -1080,7 +1081,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
@@ -1122,6 +1123,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -1165,11 +1167,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
+ if (xdp_res) {
+ if (xdp_res == IXGBEVF_XDP_TX) {
xdp_xmit = true;
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
size);
@@ -1189,7 +1191,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -1203,7 +1205,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -4693,6 +4695,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
+ case ixgbe_mac_e610_vf:
+ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
+ break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index a55dd978f7ca..24d0237e7a99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -505,15 +505,3 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
};
-
-/* Mailbox operations when running on Hyper-V.
- * On Hyper-V, PF/VF communication is not through the
- * hardware mailbox; this communication is through
- * a software mediated path.
- * Most mail box operations are noop while running on
- * Hyper-V.
- */
-const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
- .init_params = ixgbevf_init_mbx_params_vf,
- .check_for_rst = ixgbevf_check_for_rst_vf,
-};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 1641d00d8ed3..da7a72ecce7a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
@@ -1076,3 +1076,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
.mac = ixgbe_mac_x550em_a_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_e610_vf_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index b4eef5b6c172..2d791bc26ae4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
@@ -54,6 +54,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_x550em_a_vf,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 660dff5426e7..83ce3bfefa5c 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -90,7 +90,6 @@ struct ltq_etop_priv {
struct net_device *netdev;
struct platform_device *pdev;
struct ltq_eth_data *pldata;
- struct resource *res;
struct mii_bus *mii_bus;
@@ -643,31 +642,14 @@ ltq_etop_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct ltq_etop_priv *priv;
- struct resource *res;
int err;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get etop resource\n");
- err = -ENOENT;
- goto err_out;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request etop resource\n");
- err = -EBUSY;
- goto err_out;
- }
-
- ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_etop_membase) {
+ ltq_etop_membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ltq_etop_membase)) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
- err = -ENOMEM;
+ err = PTR_ERR(ltq_etop_membase);
goto err_out;
}
@@ -679,7 +661,6 @@ ltq_etop_probe(struct platform_device *pdev)
dev->netdev_ops = &ltq_eth_netdev_ops;
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
- priv->res = res;
priv->pdev = pdev;
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 1fb285fa0bdb..4fe121b9f94b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -284,8 +284,12 @@
MVNETA_TXQ_BUCKET_REFILL_PERIOD))
#define MVNETA_LPI_CTRL_0 0x2cc0
+#define MVNETA_LPI_CTRL_0_TS (0xff << 8)
#define MVNETA_LPI_CTRL_1 0x2cc4
-#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_FORCE BIT(1)
+#define MVNETA_LPI_CTRL_1_MANUAL_MODE BIT(2)
+#define MVNETA_LPI_CTRL_1_TW (0xfff << 4)
#define MVNETA_LPI_CTRL_2 0x2cc8
#define MVNETA_LPI_STATUS 0x2ccc
@@ -541,10 +545,6 @@ struct mvneta_port {
struct mvneta_bm_pool *pool_short;
int bm_win_id;
- bool eee_enabled;
- bool eee_active;
- bool tx_lpi_enabled;
-
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -3960,23 +3960,30 @@ static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvneta_port, phylink_pcs);
}
-static int mvneta_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
- * When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* QSGMII, SGMII and RGMII can be configured to use inband
+ * signalling of the AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_QSGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
-static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
@@ -4071,7 +4078,7 @@ static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
- .pcs_validate = mvneta_pcs_validate,
+ .pcs_inband_caps = mvneta_pcs_inband_caps,
.pcs_get_state = mvneta_pcs_get_state,
.pcs_config = mvneta_pcs_config,
.pcs_an_restart = mvneta_pcs_an_restart,
@@ -4206,18 +4213,6 @@ static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
return 0;
}
-static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
-{
- u32 lpi_ctl1;
-
- lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
- if (enable)
- lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
- else
- lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
- mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
-}
-
static void mvneta_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -4233,9 +4228,6 @@ static void mvneta_mac_link_down(struct phylink_config *config,
val |= MVNETA_GMAC_FORCE_LINK_DOWN;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
-
- pp->eee_active = false;
- mvneta_set_eee(pp, false);
}
static void mvneta_mac_link_up(struct phylink_config *config,
@@ -4284,11 +4276,56 @@ static void mvneta_mac_link_up(struct phylink_config *config,
}
mvneta_port_up(pp);
+}
+
+static void mvneta_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 lpi1;
+
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 &= ~(MVNETA_LPI_CTRL_1_REQUEST_ENABLE |
+ MVNETA_LPI_CTRL_1_REQUEST_FORCE |
+ MVNETA_LPI_CTRL_1_MANUAL_MODE);
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+}
+
+static int mvneta_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 ts, tw, lpi0, lpi1, status;
- if (phy && pp->eee_enabled) {
- pp->eee_active = phy_init_eee(phy, false) >= 0;
- mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+ status = mvreg_read(pp, MVNETA_GMAC_STATUS);
+ if (status & MVNETA_GMAC_SPEED_1000) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
}
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+ lpi0 = u32_replace_bits(lpi0, ts, MVNETA_LPI_CTRL_0_TS);
+ mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0);
+
+ /* Configure tw and enable LPI generation */
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 = u32_replace_bits(lpi1, tw, MVNETA_LPI_CTRL_1_TW);
+ lpi1 |= MVNETA_LPI_CTRL_1_REQUEST_ENABLE;
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+
+ return 0;
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
@@ -4298,6 +4335,8 @@ static const struct phylink_mac_ops mvneta_phylink_ops = {
.mac_finish = mvneta_mac_finish,
.mac_link_down = mvneta_mac_link_down,
.mac_link_up = mvneta_mac_link_up,
+ .mac_disable_tx_lpi = mvneta_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mvneta_mac_enable_tx_lpi,
};
static int mvneta_mdio_probe(struct mvneta_port *pp)
@@ -4385,6 +4424,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
if (pp->neta_armada3700)
return 0;
+ netdev_lock(port->napi.dev);
spin_lock(&pp->lock);
/*
* Configuring the driver for a new CPU while the driver is
@@ -4392,6 +4432,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
*/
if (pp->is_stopped) {
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
return 0;
}
netif_tx_stop_all_queues(pp->dev);
@@ -4411,7 +4452,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
/* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- napi_enable(&port->napi);
+ napi_enable_locked(&port->napi);
/*
* Enable per-CPU interrupts on the CPU that is
@@ -4432,6 +4473,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
+
return 0;
}
@@ -5099,14 +5142,6 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
-
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
-
- eee->eee_enabled = pp->eee_enabled;
- eee->eee_active = pp->eee_active;
- eee->tx_lpi_enabled = pp->tx_lpi_enabled;
- eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
return phylink_ethtool_get_eee(pp->phylink, eee);
}
@@ -5115,7 +5150,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
/* The Armada 37x documents do not give limits for this other than
* it being an 8-bit register.
@@ -5123,16 +5157,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
return -EINVAL;
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
- lpi_ctl0 &= ~(0xff << 8);
- lpi_ctl0 |= eee->tx_lpi_timer << 8;
- mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
-
- pp->eee_enabled = eee->eee_enabled;
- pp->tx_lpi_enabled = eee->tx_lpi_enabled;
-
- mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
-
return phylink_ethtool_set_eee(pp->phylink, eee);
}
@@ -5446,6 +5470,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
!phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
+ /* Ensure LPI is disabled */
+ mvneta_mac_disable_tx_lpi(&pp->phylink_config);
+
return 0;
}
@@ -5537,6 +5564,13 @@ static int mvneta_probe(struct platform_device *pdev)
pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
MAC_100 | MAC_1000FD | MAC_2500FD;
+ /* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */
+ __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces);
+ pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ pp->phylink_config.lpi_timer_default = 250;
+ pp->phylink_config.eee_enabled_default = true;
+
phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
pp->phylink_config.supported_interfaces);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 9e02e4367bec..44fe9b68d1c2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -481,6 +481,11 @@
#define MVPP22_GMAC_INT_SUM_MASK 0xa4
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
+#define MVPP2_GMAC_LPI_CTRL0 0xc0
+#define MVPP2_GMAC_LPI_CTRL0_TS_MASK GENMASK(15, 8)
+#define MVPP2_GMAC_LPI_CTRL1 0xc4
+#define MVPP2_GMAC_LPI_CTRL1_REQ_EN BIT(0)
+#define MVPP2_GMAC_LPI_CTRL1_TW_MASK GENMASK(15, 4)
/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
* relative to port->base.
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 1641791a2d5b..8ed83fb98862 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
- MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_TAGGED,
0, 0),
};
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 571631a30320..3c7b43712d25 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5757,6 +5757,28 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
}
+static int mvpp2_ethtool_get_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_get_eee(port->phylink, eee);
+}
+
+static int mvpp2_ethtool_set_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_set_eee(port->phylink, eee);
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5802,6 +5824,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.create_rxfh_context = mvpp2_create_rxfh_context,
.modify_rxfh_context = mvpp2_modify_rxfh_context,
.remove_rxfh_context = mvpp2_remove_rxfh_context,
+ .get_eee = mvpp2_ethtool_get_eee,
+ .set_eee = mvpp2_ethtool_set_eee,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -6188,6 +6212,7 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
}
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
@@ -6224,22 +6249,30 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
.pcs_config = mvpp2_xlg_pcs_config,
};
-static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* SGMII and RGMII can be configured to use inband signalling of the
+ * AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
@@ -6343,7 +6376,7 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
- .pcs_validate = mvpp2_gmac_pcs_validate,
+ .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps,
.pcs_get_state = mvpp2_gmac_pcs_get_state,
.pcs_config = mvpp2_gmac_pcs_config,
.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
@@ -6665,6 +6698,55 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
mvpp2_port_disable(port);
}
+static void mvpp2_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL1,
+ MVPP2_GMAC_LPI_CTRL1_REQ_EN, 0);
+}
+
+static int mvpp2_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ u32 ts, tw, lpi1, status;
+
+ status = readl(port->base + MVPP2_GMAC_STATUS0);
+ if (status & MVPP2_GMAC_STATUS0_GMII_SPEED) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
+ }
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL0,
+ MVPP2_GMAC_LPI_CTRL0_TS_MASK,
+ FIELD_PREP(MVPP2_GMAC_LPI_CTRL0_TS_MASK, ts));
+
+ lpi1 = readl(port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ /* Configure tw */
+ lpi1 = u32_replace_bits(lpi1, tw, MVPP2_GMAC_LPI_CTRL1_TW_MASK);
+
+ /* Enable LPI generation */
+ writel(lpi1 | MVPP2_GMAC_LPI_CTRL1_REQ_EN,
+ port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ return 0;
+}
+
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_select_pcs = mvpp2_select_pcs,
.mac_prepare = mvpp2_mac_prepare,
@@ -6672,6 +6754,8 @@ static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_finish = mvpp2_mac_finish,
.mac_link_up = mvpp2_mac_link_up,
.mac_link_down = mvpp2_mac_link_down,
+ .mac_enable_tx_lpi = mvpp2_mac_enable_tx_lpi,
+ .mac_disable_tx_lpi = mvpp2_mac_disable_tx_lpi,
};
/* Work-around for ACPI */
@@ -6901,9 +6985,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
for (thread = 0; thread < priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->timer_scheduled = false;
port_pcpu->dev = dev;
}
@@ -6950,6 +7033,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.mac_capabilities =
MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.lpi_interfaces);
+
+ port->phylink_config.lpi_capabilities = MAC_1000FD | MAC_100FD;
+
+ /* Setup EEE. Choose 250us idle. */
+ port->phylink_config.lpi_timer_default = 250;
+ port->phylink_config.eee_enabled_default = true;
+
if (port->priv->global_tx_fc)
port->phylink_config.mac_capabilities |=
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
@@ -7024,6 +7116,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_port_pcpu;
}
port->phylink = phylink;
+
+ mvpp2_mac_disable_tx_lpi(&port->phylink_config);
} else {
dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
port->phylink = NULL;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
index 4f4d58189118..a88c006ea65b 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -150,17 +150,14 @@ octep_get_ethtool_stats(struct net_device *netdev,
iface_rx_stats,
iface_tx_stats);
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- tx_busy_errors += iq->stats.tx_busy;
-
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
- rx_alloc_errors += oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
+
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_packets;
@@ -198,22 +195,18 @@ octep_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->err_pkts;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 549436efc204..0a679e95196f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -822,7 +822,7 @@ static inline int octep_iq_full_check(struct octep_iq *iq)
if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
return 0;
}
@@ -960,7 +960,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
wmb();
/* Ring Doorbell to notify the NIC of new packets */
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
@@ -991,37 +991,24 @@ dma_map_err:
static void octep_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
struct octep_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
int q;
- if (netif_running(netdev))
- octep_ctrl_net_get_if_stats(oct,
- OCTEP_CTRL_NET_INVALID_VFID,
- &oct->iface_rx_stats,
- &oct->iface_tx_stats);
-
tx_packets = 0;
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->collisions = oct->iface_tx_stats.xscol;
- stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
}
/**
@@ -1137,6 +1124,43 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
return err;
}
+static int octep_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
+ ivi->spoofchk = true;
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ ivi->trusted = false;
+
+ return 0;
+}
+
+static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
+ return -EADDRNOTAVAIL;
+ }
+
+ dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac);
+ ether_addr_copy(oct->vf_info[vf].mac_addr, mac);
+ oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF;
+
+ err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true);
+ if (err)
+ dev_err(&oct->pdev->dev,
+ "Set VF%d MAC address failed via host control Mbox\n",
+ vf);
+
+ return err;
+}
+
static const struct net_device_ops octep_netdev_ops = {
.ndo_open = octep_open,
.ndo_stop = octep_stop,
@@ -1146,6 +1170,8 @@ static const struct net_device_ops octep_netdev_ops = {
.ndo_set_mac_address = octep_set_mac,
.ndo_change_mtu = octep_change_mtu,
.ndo_set_features = octep_set_features,
+ .ndo_get_vf_config = octep_get_vf_config,
+ .ndo_set_vf_mac = octep_set_vf_mac
};
/**
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index fee59e0e0138..81ac4267811c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -220,6 +220,7 @@ struct octep_iface_link_info {
/* The Octeon VF device specific info data structure.*/
struct octep_pfvf_info {
u8 mac_addr[ETH_ALEN];
+ u32 flags;
u32 mbox_version;
};
@@ -257,11 +258,17 @@ struct octep_device {
/* Pointers to Octeon Tx queues */
struct octep_iq *iq[OCTEP_MAX_IQ];
+ /* Per iq stats */
+ struct octep_iq_stats stats_iq[OCTEP_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_oq *oq[OCTEP_MAX_OQ];
+ /* Per oq stats */
+ struct octep_oq_stats stats_oq[OCTEP_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index e6eb98d70f3c..ebecdd29f3bd 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -156,12 +156,23 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_err(&oct->pdev->dev,
+ "VF%d attempted to override administrative set MAC address\n",
+ vf_id);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+
err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
+
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -171,10 +182,18 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id);
+ ether_addr_copy(rsp->s_set_mac.mac_addr,
+ oct->vf_info[vf_id].mac_addr);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ return;
+ }
err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
index 0dc6eead292a..386a095a99bc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
@@ -8,8 +8,6 @@
#ifndef _OCTEP_PFVF_MBOX_H_
#define _OCTEP_PFVF_MBOX_H_
-/* VF flags */
-#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */
#define OCTEON_SDP_16K_HW_FRS 16380UL
#define OCTEON_SDP_64K_HW_FRS 65531UL
@@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version {
#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+/* VF flags */
+/* PF has set VF MAC address */
+#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0)
+
enum octep_pfvf_mbox_opcode {
OCTEP_PFVF_MBOX_CMD_VERSION,
OCTEP_PFVF_MBOX_CMD_SET_MTU,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 8af75cb37c3e..82b6b19e76b4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -87,7 +87,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -443,7 +444,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
if (!skb) {
octep_oq_drop_rx(oq, buff_info,
&read_idx, &desc_used);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
continue;
}
skb_reserve(skb, data_offset);
@@ -494,8 +495,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 3b08e2d560dc..b4696c93d0e6 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -186,8 +186,8 @@ struct octep_oq {
*/
u8 __iomem *pkts_sent_reg;
- /* Statistics for this OQ. */
- struct octep_oq_stats stats;
+ /* Pointer to statistics for this OQ. */
+ struct octep_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index 06851b78aa28..08ee90013fef 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -81,9 +81,9 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
@@ -187,6 +187,7 @@ static int octep_setup_iq(struct octep_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 875a2c34091f..58fb39dda977 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -170,8 +170,8 @@ struct octep_iq {
*/
u16 flush_index;
- /* Statistics for this input queue. */
- struct octep_iq_stats stats;
+ /* Pointer to statistics for this input queue. */
+ struct octep_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
index 7b21439a315f..d60441928ba9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -114,12 +114,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
iface_tx_stats = &oct->iface_tx_stats;
iface_rx_stats = &oct->iface_rx_stats;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_busy_errors += iq->stats.tx_busy;
- rx_alloc_errors += oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_alloc_errors;
@@ -134,22 +131,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->dropped_octets_fifo_full;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index 7e6771c9cdbb..18c922dd5fc6 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -574,7 +574,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
* caused queues to get re-enabled after
* being stopped
*/
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
fallthrough;
case 1: /* Queue left enabled, since IQ is not yet full*/
return 0;
@@ -731,7 +731,7 @@ ring_dbell:
/* Flush the hw descriptors before writing to doorbell */
smp_wmb();
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
}
@@ -786,27 +786,16 @@ static void octep_vf_get_stats64(struct net_device *netdev,
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- if (!octep_vf_get_if_stats(oct)) {
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
- oct->iface_rx_stats.err_pkts;
- stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
- stats->tx_dropped = oct->iface_tx_stats.dropped;
- }
}
/**
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
index 5769f62545cd..1a352f41f823 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -246,11 +246,17 @@ struct octep_vf_device {
/* Pointers to Octeon Tx queues */
struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+ /* Per iq stats */
+ struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+ /* Per oq stats */
+ struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
index 82821bc28634..d70c8be3cfc4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
index fe46838b5200..9e296b7d7e34 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -187,7 +187,7 @@ struct octep_vf_oq {
u8 __iomem *pkts_sent_reg;
/* Statistics for this OQ. */
- struct octep_vf_oq_stats stats;
+ struct octep_vf_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
index 47a5c054fdb6..8180e5ce3d7e 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
@@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
index f338b975103c..1cede90e3a5f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -129,7 +129,7 @@ struct octep_vf_iq {
u16 flush_index;
/* Statistics for this input queue. */
- struct octep_vf_iq_stats stats;
+ struct octep_vf_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_vf_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 62c07407eb94..005ca8a056c0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -313,6 +313,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
+M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
+ msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index bcc96eed2481..66749b3649c1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -545,8 +545,7 @@ static int ptp_probe(struct pci_dev *pdev,
spin_lock_init(&ptp->ptp_lock);
if (cn10k_ptp_errata(ptp)) {
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
- hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ptp->hrtimer.function = ptp_reset_thresh;
+ hrtimer_setup(&ptp->hrtimer, ptp_reset_thresh, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
} else {
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 148144f5b61d..a1f9ec03c2ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -917,19 +917,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
/* The 'qsize' entry dumps current Aura/Pool context Qsize
* and each context's current enable/disable status in a bitmap.
*/
-static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
int blktype)
{
- void (*print_qsize)(struct seq_file *filp,
+ void (*print_qsize)(struct seq_file *s,
struct rvu_pfvf *pfvf) = NULL;
- struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
int blkaddr;
- rvu = filp->private;
+ rvu = s->private;
switch (blktype) {
case BLKTYPE_NPA:
qsize_id = rvu->rvu_dbg.npa_qsize_id;
@@ -945,32 +944,28 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->file->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(s->file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- print_qsize(filp, pfvf);
+ print_qsize(s, pfvf);
return 0;
}
-static ssize_t rvu_dbg_qsize_write(struct file *filp,
+static ssize_t rvu_dbg_qsize_write(struct file *file,
const char __user *buffer, size_t count,
loff_t *ppos, int blktype)
{
char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
- struct seq_file *seqfile = filp->private_data;
+ struct seq_file *seqfile = file->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
- struct dentry *current_dir;
int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -996,13 +991,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
goto qsize_write_done;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
@@ -2704,8 +2696,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_tx_hits_miss_fops);
debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
- debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ blkaddr, &rvu_dbg_nix_qsize_fops);
debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_band_prof_ctx_fops);
debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
@@ -2854,28 +2846,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
+static int rvu_dbg_derive_lmacid(struct seq_file *s)
{
- struct dentry *current_dir;
- char *buf;
-
- current_dir = filp->file->f_path.dentry->d_parent;
- buf = strrchr(current_dir->d_name.name, 'c');
- if (!buf)
- return -EINVAL;
-
- return kstrtoint(buf + 1, 10, lmac_id);
+ return debugfs_get_aux_num(s->file);
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
{
- int lmac_id, err;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_stats(filp, lmac_id);
-
- return err;
+ return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
@@ -2933,15 +2911,9 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
return 0;
}
-static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
{
- int err, lmac_id;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_dmac_flt(filp, lmac_id);
-
- return err;
+ return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
@@ -2980,10 +2952,10 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
- cgx, &rvu_dbg_cgx_stat_fops);
- debugfs_create_file("mac_filter", 0600,
- rvu->rvu_dbg.lmac, cgx,
+ debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file_aux_num("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx, lmac_id,
&rvu_dbg_cgx_dmac_flt_fops);
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a5d1e2bddd58..613655fcd34f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -569,9 +569,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
mutex_unlock(&rvu->rsrc_lock);
}
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct msg_rsp *rsp)
+static u16 nix_get_channel(u16 chan, bool cpt_link)
+{
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+ return cpt_link ? chan | BIT(11) : chan;
+}
+
+static int nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp, bool cpt_link)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, pf, type, err;
@@ -579,6 +587,7 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
struct nix_bp *bp;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -589,6 +598,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
@@ -597,8 +609,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ chan_v = nix_get_channel(chan, cpt_link);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg & ~BIT_ULL(16));
if (type == NIX_INTF_TYPE_LBK) {
@@ -617,6 +630,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, true);
+}
+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
@@ -696,15 +723,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
return bpid;
}
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp,
+ bool cpt_link)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -717,6 +746,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
type != NIX_INTF_TYPE_SDP)
return 0;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
@@ -730,9 +762,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return -EINVAL;
}
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ chan_v = nix_get_channel(chan, cpt_link);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
cfg &= ~GENMASK_ULL(8, 0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
@@ -750,6 +784,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, true);
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index dbc971266865..cb6513ab35e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -15,5 +15,6 @@ rvu_rep-y := rep.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
new file mode 100644
index 000000000000..09a5b5268205
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <net/xfrm.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <crypto/aead.h>
+#include <crypto/gcm.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "cn10k_ipsec.h"
+
+static bool is_dev_support_ipsec_offload(struct pci_dev *pdev)
+{
+ return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
+}
+
+static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf)
+{
+ enum cn10k_cpt_hw_state_e state;
+
+ while (true) {
+ state = atomic_cmpxchg(&pf->ipsec.cpt_state,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE);
+ if (state == CN10K_CPT_HW_AVAILABLE)
+ return true;
+ if (state == CN10K_CPT_HW_UNAVAILABLE)
+ return false;
+
+ mdelay(1);
+ }
+}
+
+static void cn10k_cpt_device_set_available(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE);
+}
+
+static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE);
+}
+
+static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
+{
+ struct rsrc_attach *attach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
+ if (!attach)
+ goto unlock;
+
+ attach->cptlfs = true;
+ attach->modify = true;
+
+ /* Send attach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
+{
+ struct rsrc_detach *detach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
+ if (!detach)
+ goto unlock;
+
+ detach->partial = true;
+ detach->cptlfs = true;
+
+ /* Send detach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
+{
+ struct cpt_lf_alloc_req_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ /* PF function */
+ req->nix_pf_func = pf->pcifunc;
+ /* Enable SE-IE Engine Group */
+ req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
+
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
+{
+ mutex_lock(&pf->mbox.lock);
+ otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
+static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ req->dir = CPT_INLINE_OUTBOUND;
+ req->enable = 1;
+ req->nix_pf_func = pf->pcifunc;
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
+{
+ u64 reg_val;
+
+ /* Set Execution Enable of instruction queue */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ reg_val |= BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Set iqueue's enqueuing */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
+ reg_val |= BIT_ULL(0);
+ otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
+}
+
+static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
+{
+ u32 inflight, grb_cnt, gwb_cnt;
+ u32 nq_ptr, dq_ptr;
+ int timeout = 20;
+ u64 reg_val;
+ int cnt;
+
+ /* Disable instructions enqueuing */
+ otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
+
+ /* Wait for instruction queue to become empty.
+ * CPT_LF_INPROG.INFLIGHT count is zero
+ */
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ if (!inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
+ break;
+ }
+ } while (1);
+
+ /* Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ reg_val &= ~BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Wait for instruction queue to become empty */
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ if (reg_val & BIT_ULL(31))
+ cnt = 0;
+ else
+ cnt++;
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
+ nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ } while ((cnt < 10) && (nq_ptr != dq_ptr));
+
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
+ gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
+ if (inflight == 0 && gwb_cnt < 40 &&
+ (grb_cnt == 0 || grb_cnt == 40))
+ cnt++;
+ else
+ cnt = 0;
+ } while (cnt < 10);
+}
+
+/* Allocate memory for CPT outbound Instruction queue.
+ * Instruction queue memory format is:
+ * -----------------------------
+ * | Instruction Group memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | x 16 Bytes) |
+ * | |
+ * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
+ * | Flow Control (128 Bytes) |
+ * | |
+ * -----------------------------
+ * | Instruction Memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | × 40 × 64 bytes) |
+ * | |
+ * -----------------------------
+ */
+static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
+ CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
+
+ iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr)
+ return -ENOMEM;
+
+ /* iq->vaddr/dma_addr points to Flow Control location */
+ iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
+ return 0;
+}
+
+static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ if (iq->real_vaddr)
+ dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
+ iq->real_dma_addr);
+
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+}
+
+static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
+{
+ u64 reg_val;
+ int ret;
+
+ /* Allocate Memory for CPT IQ */
+ ret = cn10k_outb_cptlf_iq_alloc(pf);
+ if (ret)
+ return ret;
+
+ /* Disable IQ */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
+
+ /* Set IQ size */
+ reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
+ CN10K_CPT_EXTRA_SIZE_DIV40);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
+
+ return 0;
+}
+
+static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
+{
+ int ret;
+
+ /* Initialize CPTLF Instruction Queue (IQ) */
+ ret = cn10k_outb_cptlf_iq_init(pf);
+ if (ret)
+ return ret;
+
+ /* Configure CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_config(pf);
+ if (ret)
+ goto iq_clean;
+
+ /* Enable CPTLF IQ */
+ cn10k_outb_cptlf_iq_enable(pf);
+ return 0;
+iq_clean:
+ cn10k_outb_cptlf_iq_free(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret;
+
+ /* Attach a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_attach(pf);
+ if (ret)
+ return ret;
+
+ /* Allocate a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_alloc(pf);
+ if (ret)
+ goto detach;
+
+ /* Initialize the CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_init(pf);
+ if (ret)
+ goto lf_free;
+
+ pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
+ CN10K_CPT_LF_NQX(0));
+
+ /* Set ipsec offload enabled for this device */
+ pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ cn10k_cpt_device_set_available(pf);
+ return 0;
+
+lf_free:
+ cn10k_outb_cptlf_free(pf);
+detach:
+ cn10k_outb_cptlf_detach(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
+{
+ int ret;
+
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ netdev_err(pf->netdev, "CPT LF device unavailable\n");
+ return -ENODEV;
+ }
+
+ /* Set ipsec offload disabled for this device */
+ pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ /* Disable CPTLF Instruction Queue (IQ) */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address and size to 0 */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
+
+ /* Free CPTLF IQ */
+ cn10k_outb_cptlf_iq_free(pf);
+
+ /* Free and detach CPT LF */
+ cn10k_outb_cptlf_free(pf);
+ ret = cn10k_outb_cptlf_detach(pf);
+ if (ret)
+ netdev_err(pf->netdev, "Failed to detach CPT LF\n");
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return ret;
+}
+
+static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst,
+ u64 size)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, inst, size);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf,
+ struct cpt_res_s *res)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u64 *completion_ptr = (u64 *)res;
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ netdev_err(pf->netdev, "CPT response timeout\n");
+ return -EBUSY;
+ }
+ } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) ==
+ CN10K_CPT_COMP_E_NOTDONE);
+
+ if (!(res->compcode == CN10K_CPT_COMP_E_GOOD ||
+ res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) {
+ netdev_err(pf->netdev, "compcode=%x doneint=%x\n",
+ res->compcode, res->doneint);
+ netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n",
+ res->uc_compcode, (u64)res->uc_info, res->esn);
+ }
+ return 0;
+}
+
+static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
+{
+ dma_addr_t res_iova, dptr_iova, sa_iova;
+ struct cn10k_tx_sa_s *sa_dptr;
+ struct cpt_inst_s inst = {};
+ struct cpt_res_s *res;
+ u32 sa_size, off;
+ u64 *sptr, *dptr;
+ u64 reg_val;
+ int ret;
+
+ sa_iova = sa_info->iova;
+ if (!sa_iova)
+ return -EINVAL;
+
+ res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s),
+ &res_iova, GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ sa_size = sizeof(struct cn10k_tx_sa_s);
+ sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC);
+ if (!sa_dptr) {
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res,
+ res_iova);
+ return -ENOMEM;
+ }
+
+ sptr = (__force u64 *)sa_info->base;
+ dptr = (__force u64 *)sa_dptr;
+ for (off = 0; off < (sa_size / 8); off++)
+ *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off));
+
+ res->compcode = CN10K_CPT_COMP_E_NOTDONE;
+ inst.res_addr = res_iova;
+ inst.dptr = (u64)dptr_iova;
+ inst.param2 = sa_size >> 3;
+ inst.dlen = sa_size;
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA;
+ inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA;
+ inst.cptr = sa_iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* Check if CPT-LF available */
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ dma_wmb();
+ ret = cn10k_wait_for_cpt_respose(pf, res);
+ if (ret)
+ goto set_available;
+
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7);
+ otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
+
+set_available:
+ cn10k_cpt_device_set_available(pf);
+free_mem:
+ dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova);
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova);
+ return ret;
+}
+
+static int cn10k_ipsec_get_hw_ctx_offset(void)
+{
+ /* Offset on Hardware-context offset in word */
+ return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F;
+}
+
+static int cn10k_ipsec_get_ctx_push_size(void)
+{
+ /* Context push size is round up and in multiple of 8 Byte */
+ return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F;
+}
+
+static int cn10k_ipsec_get_aes_key_len(int key_len)
+{
+ /* key_len is aes key length in bytes */
+ switch (key_len) {
+ case 16:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_128;
+ case 24:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_192;
+ default:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_256;
+ }
+}
+
+static void cn10k_outb_prepare_sa(struct xfrm_state *x,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int key_len = (x->aead->alg_key_len + 7) / 8;
+ struct net_device *netdev = x->xso.dev;
+ u8 *key = x->aead->alg_key;
+ struct otx2_nic *pf;
+ u32 *tmp_salt;
+ u64 *tmp_key;
+ int idx;
+
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+
+ /* context size, 128 Byte aligned up */
+ pf = netdev_priv(netdev);
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset();
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+
+ /* Ucode to skip two words of CPT_CTX_HW_S */
+ sa_entry->ctx_hdr_size = 1;
+
+ /* Allow Atomic operation (AOP) */
+ sa_entry->aop_valid = 1;
+
+ /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */
+ sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB;
+ sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP;
+ sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM;
+ sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET;
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL;
+ else
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT;
+
+ /* Last 4 bytes are salt */
+ key_len -= 4;
+ sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len);
+ memcpy(sa_entry->cipher_key, key, key_len);
+ tmp_key = (u64 *)sa_entry->cipher_key;
+
+ for (idx = 0; idx < key_len / 8; idx++)
+ tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]);
+
+ memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4);
+ tmp_salt = (u32 *)&sa_entry->iv_gcm_salt;
+ *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt);
+
+ /* Write SA context data to memory before enabling */
+ wmb();
+
+ /* Enable SA */
+ sa_entry->sa_valid = 1;
+}
+
+static int cn10k_ipsec_validate_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload authenticated xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only AES-GCM-ICV16 xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload compressed xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET && x->props.family != AF_INET6) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only IPv4/v6 xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload other than crypto-mode");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only tunnel/transport xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only ESP xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulated xfrm state may not be offloaded");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without aead");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD ICV length other than 128bit");
+ return -EINVAL;
+ }
+ if (x->aead->alg_key_len != 128 + 32 &&
+ x->aead->alg_key_len != 192 + 32 &&
+ x->aead->alg_key_len != 256 + 32) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD key length other than 128/192/256bit");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with tfc padding");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without geniv");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with geniv other than seqiv");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported");
+ return -EOPNOTSUPP;
+}
+
+static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ err = cn10k_ipsec_validate_state(x, extack);
+ if (err)
+ return err;
+
+ pf = netdev_priv(netdev);
+
+ err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
+ if (err)
+ return err;
+
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ cn10k_outb_prepare_sa(x, sa_entry);
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA");
+ qmem_free(pf->dev, sa_info);
+ return err;
+ }
+
+ x->xso.offload_handle = (unsigned long)sa_info;
+ /* Enable static branch when first SA setup */
+ if (!pf->ipsec.outb_sa_count)
+ static_branch_enable(&cn10k_ipsec_sa_enabled);
+ pf->ipsec.outb_sa_count++;
+ return 0;
+}
+
+static int cn10k_ipsec_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return cn10k_ipsec_inb_add_state(x, extack);
+ else
+ return cn10k_ipsec_outb_add_state(x, extack);
+}
+
+static void cn10k_ipsec_del_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return;
+
+ pf = netdev_priv(netdev);
+
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+ /* Disable SA in CPT h/w */
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->aop_valid = 1;
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err)
+ netdev_err(netdev, "Error (%d) deleting SA\n", err);
+
+ x->xso.offload_handle = 0;
+ qmem_free(pf->dev, sa_info);
+
+ /* If no more SA's then update netdev feature for potential change
+ * in NETIF_F_HW_ESP.
+ */
+ if (!--pf->ipsec.outb_sa_count)
+ queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
+}
+
+static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+ return true;
+}
+
+static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = cn10k_ipsec_add_state,
+ .xdo_dev_state_delete = cn10k_ipsec_del_state,
+ .xdo_dev_offload_ok = cn10k_ipsec_offload_ok,
+};
+
+static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
+{
+ struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
+ sa_work);
+ struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec);
+
+ /* Disable static branch when no more SA enabled */
+ static_branch_disable(&cn10k_ipsec_sa_enabled);
+ rtnl_lock();
+ netdev_update_features(pf->netdev);
+ rtnl_unlock();
+}
+
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ /* IPsec offload supported on cn10k */
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return -EOPNOTSUPP;
+
+ /* Initialize CPT for outbound ipsec offload */
+ if (enable)
+ return cn10k_outb_cpt_init(netdev);
+
+ /* Don't do CPT cleanup if SA installed */
+ if (pf->ipsec.outb_sa_count) {
+ netdev_err(pf->netdev, "SA installed on this device\n");
+ return -EBUSY;
+ }
+
+ return cn10k_outb_cpt_clean(pf);
+}
+
+int cn10k_ipsec_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u32 sa_size;
+
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return 0;
+
+ /* Each SA entry size is 128 Byte round up in size */
+ sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ?
+ (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) *
+ OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s);
+ pf->ipsec.sa_size = sa_size;
+
+ INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
+ pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0);
+ if (!pf->ipsec.sa_workq) {
+ netdev_err(pf->netdev, "SA alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ /* Set xfrm device ops */
+ netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
+ netdev->hw_features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_ipsec_init);
+
+void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return;
+
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ return;
+
+ if (pf->ipsec.sa_workq) {
+ destroy_workqueue(pf->ipsec.sa_workq);
+ pf->ipsec.sa_workq = NULL;
+ }
+
+ cn10k_outb_cpt_clean(pf);
+}
+EXPORT_SYMBOL(cn10k_ipsec_clean);
+
+static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 *src;
+
+ src = (u8 *)skb->data + ETH_HLEN;
+
+ if (x->props.family == AF_INET) {
+ iph = (struct iphdr *)src;
+ return ntohs(iph->tot_len);
+ }
+
+ ipv6h = (struct ipv6hdr *)src;
+ return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr);
+}
+
+/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure.
+ * SG of NIX and CPT are same in size.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct cpt_sg_s *cpt_sg = NULL;
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u64 *cpt_iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+ cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size);
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ cpt_sg += (seg / MAX_SEGS_PER_SG) * 4;
+ cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[seg % MAX_SEGS_PER_SG] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+ *cpt_iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+
+ *cpt_sg = *(struct cpt_sg_s *)sg;
+ cpt_sg->rsvd_63_50 = 0;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+static u16 cn10k_ipsec_get_param1(u8 iv_offset)
+{
+ u16 param1_val;
+
+ /* Set Crypto mode, disable L3/L4 checksum */
+ param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM |
+ CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM;
+ param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT;
+ return param1_val;
+}
+
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ struct cpt_inst_s inst;
+ struct cpt_res_s *res;
+ struct xfrm_state *x;
+ struct qmem *sa_info;
+ dma_addr_t dptr_iova;
+ struct sec_path *sp;
+ u8 encap_offset;
+ u8 auth_offset;
+ u8 gthr_size;
+ u8 iv_offset;
+ u16 dlen;
+
+ /* Check for IPSEC offload enabled */
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ goto drop;
+
+ sp = skb_sec_path(skb);
+ if (unlikely(!sp->len))
+ goto drop;
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x))
+ goto drop;
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL)
+ goto drop;
+
+ dlen = cn10k_ipsec_get_ip_data_len(x, skb);
+ if (dlen == 0 && netif_msg_tx_err(pf)) {
+ netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
+ goto drop;
+ }
+
+ /* Check for valid SA context */
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ if (!sa_info)
+ goto drop;
+
+ memset(&inst, 0, sizeof(struct cpt_inst_s));
+
+ /* Get authentication offset */
+ if (x->props.family == AF_INET)
+ auth_offset = sizeof(struct iphdr);
+ else
+ auth_offset = sizeof(struct ipv6hdr);
+
+ /* IV offset is after ESP header */
+ iv_offset = auth_offset + sizeof(struct ip_esp_hdr);
+ /* Encap will start after IV */
+ encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
+
+ /* CPT Instruction word-1 */
+ res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head));
+ res->compcode = 0;
+ inst.res_addr = sq->cpt_resp->iova + (64 * sq->head);
+
+ /* CPT Instruction word-2 */
+ inst.rvu_pf_func = pf->pcifunc;
+
+ /* CPT Instruction word-3:
+ * Set QORD to force CPT_RES_S write completion
+ */
+ inst.qord = 1;
+
+ /* CPT Instruction word-4 */
+ /* inst.dlen should not include ICV length */
+ inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8);
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC;
+ inst.param1 = cn10k_ipsec_get_param1(iv_offset);
+
+ inst.param2 = encap_offset <<
+ CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT;
+ inst.param2 |= (u16)auth_offset <<
+ CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT;
+
+ /* CPT Instruction word-5 */
+ gthr_size = num_segs / MAX_SEGS_PER_SG;
+ gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size;
+
+ gthr_size &= 0xF;
+ dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2)));
+ inst.dptr = dptr_iova | ((u64)gthr_size << 60);
+
+ /* CPT Instruction word-6 */
+ inst.rptr = inst.dptr;
+
+ /* CPT Instruction word-7 */
+ inst.cptr = sa_info->iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* CPT Instruction word-0 */
+ inst.nixtxl = (size / 16) - 1;
+ inst.dat_offset = ETH_HLEN;
+ inst.nixtx_offset = sq->sqe_size;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Finally Flush the CPT instruction */
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ return true;
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
new file mode 100644
index 000000000000..9965df0faa3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef CN10K_IPSEC_H
+#define CN10K_IPSEC_H
+
+#include <linux/types.h>
+
+DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+/* CPT instruction size in bytes */
+#define CN10K_CPT_INST_SIZE 64
+
+/* CPT instruction (CPT_INST_S) queue length */
+#define CN10K_CPT_INST_QLEN 8200
+
+/* CPT instruction queue size passed to HW is in units of
+ * 40*CPT_INST_S messages.
+ */
+#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
+
+/* CPT needs 320 free entries */
+#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
+#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
+
+/* CPT instruction queue length in bytes */
+#define CN10K_CPT_INST_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
+ CN10K_CPT_INST_QLEN_EXTRA_BYTES)
+
+/* CPT instruction group queue length in bytes */
+#define CN10K_CPT_INST_GRP_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
+
+/* CPT FC length in bytes */
+#define CN10K_CPT_Q_FC_LEN 128
+
+/* Default CPT engine group for ipsec offload */
+#define CN10K_DEF_CPT_IPSEC_EGRP 1
+
+/* CN10K CPT LF registers */
+#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
+#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
+#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
+#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
+#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
+#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
+#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
+#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
+#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
+
+/* IPSEC Instruction opcodes */
+#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
+#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
+#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL
+
+enum cn10k_cpt_comp_e {
+ CN10K_CPT_COMP_E_NOTDONE = 0x00,
+ CN10K_CPT_COMP_E_GOOD = 0x01,
+ CN10K_CPT_COMP_E_FAULT = 0x02,
+ CN10K_CPT_COMP_E_HWERR = 0x04,
+ CN10K_CPT_COMP_E_INSTERR = 0x05,
+ CN10K_CPT_COMP_E_WARN = 0x06,
+ CN10K_CPT_COMP_E_MASK = 0x3F
+};
+
+struct cn10k_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+enum cn10k_cpt_hw_state_e {
+ CN10K_CPT_HW_UNAVAILABLE,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE
+};
+
+struct cn10k_ipsec {
+ /* Outbound CPT */
+ u64 io_addr;
+ atomic_t cpt_state;
+ struct cn10k_cpt_inst_queue iq;
+
+ /* SA info */
+ u32 sa_size;
+ u32 outb_sa_count;
+ struct work_struct sa_work;
+ struct workqueue_struct *sa_workq;
+};
+
+/* CN10K IPSEC Security Association (SA) */
+/* SA direction */
+#define CN10K_IPSEC_SA_DIR_INB 0
+#define CN10K_IPSEC_SA_DIR_OUTB 1
+/* SA protocol */
+#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0
+#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1
+/* SA Encryption Type */
+#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5
+/* SA IPSEC mode Transport/Tunnel */
+#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0
+#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1
+/* SA AES Key Length */
+#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1
+#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2
+#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3
+/* IV Source */
+#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0
+#define CN10K_IPSEC_SA_IV_SRC_PACKET 3
+
+struct cn10k_tx_sa_s {
+ u64 esn_en : 1; /* W0 */
+ u64 rsvd_w0_1_8 : 8;
+ u64 hw_ctx_off : 7;
+ u64 ctx_id : 16;
+ u64 rsvd_w0_32_47 : 16;
+ u64 ctx_push_size : 7;
+ u64 rsvd_w0_55 : 1;
+ u64 ctx_hdr_size : 2;
+ u64 aop_valid : 1;
+ u64 rsvd_w0_59 : 1;
+ u64 ctx_size : 4;
+ u64 w1; /* W1 */
+ u64 sa_valid : 1; /* W2 */
+ u64 sa_dir : 1;
+ u64 rsvd_w2_2_3 : 2;
+ u64 ipsec_mode : 1;
+ u64 ipsec_protocol : 1;
+ u64 aes_key_len : 2;
+ u64 enc_type : 3;
+ u64 rsvd_w2_11_19 : 9;
+ u64 iv_src : 2;
+ u64 rsvd_w2_22_31 : 10;
+ u64 rsvd_w2_32_63 : 32;
+ u64 w3; /* W3 */
+ u8 cipher_key[32]; /* W4 - W7 */
+ u32 rsvd_w8_0_31; /* W8 : IV */
+ u32 iv_gcm_salt;
+ u64 rsvd_w9_w30[22]; /* W9 - W30 */
+ u64 hw_ctx[6]; /* W31 - W36 */
+};
+
+/* CPT instruction parameter-1 */
+#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1
+#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2
+#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20
+#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8
+
+/* CPT instruction parameter-2 */
+#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0
+#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8
+
+/* CPT Instruction Structure */
+struct cpt_inst_s {
+ u64 nixtxl : 3; /* W0 */
+ u64 doneint : 1;
+ u64 rsvd_w0_4_15 : 12;
+ u64 dat_offset : 8;
+ u64 ext_param1 : 8;
+ u64 nixtx_offset : 20;
+ u64 rsvd_w0_52_63 : 12;
+ u64 res_addr; /* W1 */
+ u64 tag : 32; /* W2 */
+ u64 tt : 2;
+ u64 grp : 10;
+ u64 rsvd_w2_44_47 : 4;
+ u64 rvu_pf_func : 16;
+ u64 qord : 1; /* W3 */
+ u64 rsvd_w3_1_2 : 2;
+ u64 wqe_ptr : 61;
+ u64 dlen : 16; /* W4 */
+ u64 param2 : 16;
+ u64 param1 : 16;
+ u64 opcode_major : 8;
+ u64 opcode_minor : 8;
+ u64 dptr; /* W5 */
+ u64 rptr; /* W6 */
+ u64 cptr : 60; /* W7 */
+ u64 ctx_val : 1;
+ u64 egrp : 3;
+};
+
+/* CPT Instruction Result Structure */
+struct cpt_res_s {
+ u64 compcode : 7; /* W0 */
+ u64 doneint : 1;
+ u64 uc_compcode : 8;
+ u64 uc_info : 48;
+ u64 esn; /* W1 */
+};
+
+/* CPT SG structure */
+struct cpt_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_63_50 : 14;
+};
+
+/* CPT LF_INPROG Register */
+#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
+#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
+#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
+
+/* CPT LF_Q_GRP_PTR Register */
+#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
+#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
+
+/* CPT LF CTX Flush Register */
+#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0)
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int cn10k_ipsec_init(struct net_device *netdev);
+void cn10k_ipsec_clean(struct otx2_nic *pf);
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset);
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size);
+#else
+static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
+{
+ return 0;
+}
+
+static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+}
+
+static inline __maybe_unused
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ return 0;
+}
+
+static inline bool __maybe_unused
+otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ return true;
+}
+
+static inline bool __maybe_unused
+cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ return true;
+}
+#endif
+#endif // CN10K_IPSEC_H
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 6cc7a78968fc..f3b9daffaec3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
return "SA";
default:
return "Unknown";
- };
-
- return "Unknown";
+ }
}
static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 523ecb798a7a..2b49bfec7869 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -10,12 +10,19 @@
#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bitfield.h>
+#include <linux/dcbnl.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
+{
+ return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en;
+}
+
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
@@ -964,6 +971,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
+ /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG.
+ * SG of NIX and CPT are same in size. Allocate memory for CPT SG
+ * same as NIX SQE for base address alignment.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+ err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt,
+ sq->sqe_size * 2);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64);
+ if (err)
+ return err;
+
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
@@ -1722,18 +1752,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
-#ifdef CONFIG_DCB
- req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
- req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
-#else
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
-#endif
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+ }
return otx2_sync_mbox_msg(&pfvf->mbox);
}
EXPORT_SYMBOL(otx2_nix_config_bp);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+EXPORT_SYMBOL(otx2_nix_cpt_config_bp);
+
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
@@ -1947,3 +2002,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* Crypto hardware need write permission for ipsec crypto offload */
+ if (unlikely(xfrm_offload(skb))) {
+ dir = DMA_BIDIRECTIONAL;
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ }
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, dir);
+}
+
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ struct sk_buff *skb = NULL;
+ int seg;
+
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(xfrm_offload(skb)))
+ dir = DMA_BIDIRECTIONAL;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], dir);
+ }
+ sg->num_segs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 566848663fea..65814e3dc93f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -30,6 +30,7 @@
#include <rvu_trace.h>
#include "qos.h"
#include "rep.h"
+#include "cn10k_ipsec.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -40,6 +41,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7
@@ -55,6 +57,9 @@
#define NIX_PF_PFC_PRIO_MAX 8
#endif
+/* Number of segments per SG structure */
+#define MAX_SEGS_PER_SG 3
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -448,6 +453,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18)
#define OTX2_FLAG_PORT_UP BIT_ULL(19)
+#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20)
u64 flags;
u64 *cq_op_addr;
@@ -499,9 +505,9 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
-#ifdef CONFIG_DCB
/* PFC */
u8 pfc_en;
+#ifdef CONFIG_DCB
u8 *queue_to_pfc_map;
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
@@ -522,6 +528,9 @@ struct otx2_nic {
u16 rep_pf_map[RVU_MAX_REP];
u16 esw_mode;
#endif
+
+ /* Inline ipsec */
+ struct cn10k_ipsec ipsec;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -572,6 +581,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -621,6 +639,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
break;
+ case BLKTYPE_CPT:
+ blkaddr = BLKADDR_CPT0;
+ break;
default:
blkaddr = BLKADDR_RVUM;
break;
@@ -985,6 +1006,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
@@ -1149,4 +1171,8 @@ static inline int mcam_entry_cmp(const void *a, const void *b)
{
return *(u16 *)a - *(u16 *)b;
}
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len);
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 294fba58b670..f110dfa42360 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -435,6 +435,9 @@ process_pfc:
return err;
}
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pfvf, false);
+
/* Request Per channel Bpids */
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e310f99b1736..e1dde93e8af8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -26,6 +26,7 @@
#include "cn10k.h"
#include "qos.h"
#include <rvu_trace.h>
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -1484,6 +1485,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
if (!sq->sqe)
continue;
qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->sqe_ring);
+ qmem_free(pf->dev, sq->cpt_resp);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
@@ -1551,6 +1554,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pf, false);
+
/* Enable backpressure for CGX mapped PF/VFs */
if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, true);
@@ -2273,6 +2279,10 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
+ if (changed & NETIF_F_HW_ESP)
+ return cn10k_ipsec_ethtool_init(netdev,
+ features & NETIF_F_HW_ESP);
+
return otx2_handle_ntuple_tc_features(netdev, features);
}
@@ -3162,10 +3172,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* reset CGX/RPM MAC stats */
otx2_reset_mac_stats(pf);
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_mcs_free;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_mcs_free;
+ goto err_ipsec_clean;
}
err = otx2_wq_init(pf);
@@ -3206,6 +3220,8 @@ err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(pf);
err_mcs_free:
cn10k_mcs_free(pf);
err_del_mcam_entries:
@@ -3403,6 +3419,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_ipsec_clean(pf);
cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 04bc06a80e23..224cef938927 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -11,6 +11,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -26,12 +27,25 @@
*/
#define PTP_SYNC_SEC_OFFSET 34
+DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
bool *need_xdp_flush);
+static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
+ struct sk_buff *skb)
+{
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ sq->sqe_base = sq->sqe_ring->base + sq->sqe_size +
+ (sq->head * (sq->sqe_size * 2));
+ else
+ sq->sqe_base = sq->sqe->base;
+}
+
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
{
@@ -80,38 +94,6 @@ static unsigned int frag_num(unsigned int i)
#endif
}
-static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
- struct sk_buff *skb, int seg, int *len)
-{
- const skb_frag_t *frag;
- struct page *page;
- int offset;
-
- /* First segment is always skb->data */
- if (!seg) {
- page = virt_to_page(skb->data);
- offset = offset_in_page(skb->data);
- *len = skb_headlen(skb);
- } else {
- frag = &skb_shinfo(skb)->frags[seg - 1];
- page = skb_frag_page(frag);
- offset = skb_frag_off(frag);
- *len = skb_frag_size(frag);
- }
- return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
-}
-
-static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
-{
- int seg;
-
- for (seg = 0; seg < sg->num_segs; seg++) {
- otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE);
- }
- sg->num_segs = 0;
-}
-
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
struct nix_cqe_tx_s *cqe)
@@ -625,7 +607,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
sq->head &= (sq->sqe_cnt - 1);
}
-#define MAX_SEGS_PER_SG 3
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
@@ -1161,6 +1142,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_nic *pfvf = dev;
+ bool ret;
/* Check if there is enough room between producer
* and consumer index.
@@ -1177,6 +1159,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
+
if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
@@ -1196,6 +1179,9 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
return true;
}
+ /* Set sqe base address */
+ otx2_sq_set_sqe_base(sq, skb);
+
/* Set SQE's SEND_HDR.
* Do not clear the first 64bit as it contains constant info.
*/
@@ -1208,7 +1194,13 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
/* Add SG subdesc with data frags */
- if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset);
+ else
+ ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset);
+
+ if (!ret) {
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
return false;
}
@@ -1217,11 +1209,15 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
sqe_hdr->sizem1 = (offset / 16) - 1;
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs,
+ offset);
+
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
-
return true;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index e1db5f961877..d23810963fdb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -101,6 +101,9 @@ struct otx2_snd_queue {
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
+ /* SQE ring and CPT response queue for Inline IPSEC */
+ struct qmem *sqe_ring;
+ struct qmem *cpt_resp;
} ____cacheline_aligned_in_smp;
enum cq_type {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 839fc77c11b2..e926c6ce96cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -14,6 +14,7 @@
#include "otx2_reg.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicvf"
#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
@@ -693,10 +694,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->bus->number, n);
}
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_ptp_destroy;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_ipsec_clean;
}
err = otx2_vf_wq_init(vf);
@@ -730,6 +735,8 @@ err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(vf);
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
@@ -782,6 +789,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ cn10k_ipsec_clean(vf);
otx2_ptp_destroy(vf);
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 22ca6ee9665e..440a4c42b405 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -280,6 +280,7 @@ prestera_mac_select_pcs(struct phylink_config *config,
}
static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct prestera_port *port = container_of(pcs, struct prestera_port,
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 25bf6ec44289..a1bada9eaaf6 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3742,10 +3742,7 @@ static int skge_device_event(struct notifier_block *unused,
skge = netdev_priv(dev);
switch (event) {
case NETDEV_CHANGENAME:
- if (skge->debugfs)
- skge->debugfs = debugfs_rename(skge_debug,
- skge->debugfs,
- skge_debug, dev->name);
+ debugfs_change_name(skge->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 988fa28cfb5f..d7121c836508 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4494,10 +4494,7 @@ static int sky2_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (sky2->debugfs) {
- sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
- sky2_debug, dev->name);
- }
+ debugfs_change_name(sky2->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 6c683a12d5aa..09f448f29124 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -15,6 +15,7 @@
#include <linux/u64_stats_sync.h>
#include <net/dsa.h>
#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
#include <uapi/linux/ppp_defs.h>
#define AIROHA_MAX_NUM_GDM_PORTS 1
@@ -23,8 +24,12 @@
#define AIROHA_MAX_NUM_XSI_RSTS 5
#define AIROHA_MAX_MTU 2000
#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
#define AIROHA_NUM_TX_RING 32
#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
#define AIROHA_FE_MC_MAX_VLAN_PORT 16
#define AIROHA_NUM_TX_IRQ 2
@@ -40,6 +45,9 @@
#define PSE_RSV_PAGES 128
#define PSE_QUEUE_RSV_PAGES 64
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
/* FE */
#define PSE_BASE 0x0100
#define CSR_IFC_BASE 0x0200
@@ -258,11 +266,11 @@
#define REG_GDM3_FWD_CFG GDM3_BASE
#define GDM3_PAD_EN_MASK BIT(28)
-#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100)
+#define REG_GDM4_FWD_CFG GDM4_BASE
#define GDM4_PAD_EN_MASK BIT(28)
#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
-#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c)
+#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
@@ -541,9 +549,24 @@
#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
#define REG_LMGR_INIT_CFG 0x1000
#define LMGR_INIT_START BIT(31)
#define LMGR_SRAM_MODE_MASK BIT(30)
@@ -565,13 +588,34 @@
#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
#define REG_TXWRR_MODE_CFG 0x1020
#define TWRR_WEIGHT_SCALE_MASK BIT(31)
#define TWRR_WEIGHT_BASE_MASK BIT(3)
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
#define REG_PSE_BUF_USAGE_CFG 0x1028
#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
#define REG_GLB_TRTCM_CFG 0x1080
#define GLB_TRTCM_EN_MASK BIT(31)
#define GLB_TRTCM_MODE_MASK BIT(30)
@@ -720,6 +764,40 @@ enum {
FE_PSE_PORT_DROP = 0xf,
};
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
struct airoha_queue_entry {
union {
void *buf;
@@ -810,6 +888,12 @@ struct airoha_gdm_port {
int id;
struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
};
struct airoha_eth {
@@ -1789,6 +1873,10 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
+ /* xmit ring drop default setting */
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
+ TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
+
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
@@ -1955,6 +2043,27 @@ static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
}
+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
+ /* Tx-cpu transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ /* Tx-fwd transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_SRC_MASK, 1) |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ }
+}
+
static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
@@ -2005,6 +2114,7 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+ airoha_qdma_init_qos_stats(qdma);
return 0;
}
@@ -2138,17 +2248,14 @@ static void airoha_hw_cleanup(struct airoha_qdma *qdma)
if (!qdma->q_rx[i].ndesc)
continue;
- napi_disable(&qdma->q_rx[i].napi);
netif_napi_del(&qdma->q_rx[i].napi);
airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
if (qdma->q_rx[i].page_pool)
page_pool_destroy(qdma->q_rx[i].page_pool);
}
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
- napi_disable(&qdma->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
netif_napi_del(&qdma->q_tx_irq[i].napi);
- }
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
if (!qdma->q_tx[i].ndesc)
@@ -2173,6 +2280,21 @@ static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
}
}
+static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_disable(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ napi_disable(&qdma->q_rx[i].napi);
+ }
+}
+
static void airoha_update_hw_stats(struct airoha_gdm_port *port)
{
struct airoha_eth *eth = port->qdma->eth;
@@ -2413,21 +2535,44 @@ static void airoha_dev_get_stats64(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int queue, channel;
+
+ /* For dsa device select QoS channel according to the dsa user port
+ * index, rely on port id otherwise. Select QoS queue based on the
+ * skb priority.
+ */
+ channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+ queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
+ queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
+
+ return queue < dev->num_tx_queues ? queue : 0;
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct skb_shared_info *sinfo = skb_shinfo(skb);
struct airoha_gdm_port *port = netdev_priv(dev);
- u32 msg0 = 0, msg1, len = skb_headlen(skb);
- int i, qid = skb_get_queue_mapping(skb);
+ u32 msg0, msg1, len = skb_headlen(skb);
struct airoha_qdma *qdma = port->qdma;
u32 nr_frags = 1 + sinfo->nr_frags;
struct netdev_queue *txq;
struct airoha_queue *q;
void *data = skb->data;
+ int i, qid;
u16 index;
u8 fport;
+ qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
+ qid / AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
+ qid % AIROHA_NUM_QOS_QUEUES);
if (skb->ip_summed == CHECKSUM_PARTIAL)
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
@@ -2597,13 +2742,399 @@ airoha_ethtool_get_rmon_stats(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
+ int channel, enum tx_sched_mode mode,
+ const u16 *weights, u8 n_weights)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_TX_RING; i++)
+ airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
+ TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
+
+ for (i = 0; i < n_weights; i++) {
+ u32 status;
+ int err;
+
+ airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
+ TWRR_RW_CMD_MASK |
+ FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
+ FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
+ FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
+ err = read_poll_timeout(airoha_qdma_rr, status,
+ status & TWRR_RW_CMD_DONE,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC,
+ true, port->qdma,
+ REG_TXWRR_WEIGHT_CFG);
+ if (err)
+ return err;
+ }
+
+ airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
+ CHAN_QOS_MODE_MASK(channel),
+ mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
+
+ return 0;
+}
+
+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
+ int channel)
+{
+ static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
+ enum tx_sched_mode mode = TC_SCH_SP;
+ u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+ int i, nstrict = 0, nwrr, qidx;
+
+ if (p->bands > AIROHA_NUM_QOS_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < p->bands; i++) {
+ if (!p->quanta[i])
+ nstrict++;
+ }
+
+ /* this configuration is not supported by the hw */
+ if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
+ return -EINVAL;
+
+ /* EN7581 SoC supports fixed QoS band priority where WRR queues have
+ * lowest priorities with respect to SP ones.
+ * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
+ */
+ nwrr = p->bands - nstrict;
+ qidx = nstrict && nwrr ? nstrict : 0;
+ for (i = 1; i <= p->bands; i++) {
+ if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
+ return -EINVAL;
+
+ qidx = i == nwrr ? 0 : qidx + 1;
+ }
+
+ for (i = 0; i < nwrr; i++)
+ w[i] = p->weights[nstrict + i];
+
+ if (!nstrict)
+ mode = TC_SCH_WRR8;
+ else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
+ mode = nstrict + 1;
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL(channel << 1));
+ u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL((channel << 1) + 1));
+ u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
+ (fwd_tx_packets - port->fwd_tx_packets);
+ _bstats_update(opt->stats.bstats, 0, tx_packets);
+
+ port->cpu_tx_packets = cpu_tx_packets;
+ port->fwd_tx_packets = fwd_tx_packets;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
+ struct tc_ets_qopt_offload *opt)
+{
+ int channel;
+
+ if (opt->parent == TC_H_ROOT)
+ return -EINVAL;
+
+ channel = TC_H_MAJ(opt->handle) >> 16;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+
+ switch (opt->command) {
+ case TC_ETS_REPLACE:
+ return airoha_qdma_set_tx_ets_sched(port, channel, opt);
+ case TC_ETS_DESTROY:
+ /* PRIO is default qdisc scheduler */
+ return airoha_qdma_set_tx_prio_sched(port, channel);
+ case TC_ETS_STATS:
+ return airoha_qdma_get_tx_ets_stats(port, channel, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode, u32 val)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 config = TRTCM_PARAM_RW_MASK |
+ FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_mode_type mode,
+ bool enable, u32 enable_mask)
+{
+ u32 val;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &val, NULL))
+ return -EINVAL;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
+ int channel, u32 addr,
+ enum trtcm_mode_type mode,
+ u32 rate_val, u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &config, NULL))
+ return -EINVAL;
+
+ val = airoha_qdma_rr(qdma, addr);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_TOKEN_RATE_MODE, mode, rate);
+ if (err)
+ return err;
+
+ val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
+ int channel, u32 rate,
+ u32 bucket_size)
+{
+ int i, err;
+
+ for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
+ err = airoha_qdma_set_trtcm_config(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG, i,
+ !!rate, TRTCM_METER_MODE);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG,
+ i, rate, bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+ u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
+ struct net_device *dev = port->dev;
+ int num_tx_queues = dev->real_num_tx_queues;
+ int err;
+
+ if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
+ return -EINVAL;
+ }
+
+ err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed configuring htb offload");
+ return err;
+ }
+
+ if (opt->command == TC_HTB_NODE_MODIFY)
+ return 0;
+
+ err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
+ if (err) {
+ airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed setting real_num_tx_queues");
+ return err;
+ }
+
+ set_bit(channel, port->qos_sq_bmap);
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
+{
+ struct net_device *dev = port->dev;
+
+ netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
+ airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
+ clear_bit(queue, port->qos_sq_bmap);
+}
+
+static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ airoha_tc_remove_htb_queue(port, channel);
+
+ return 0;
+}
+
+static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
+{
+ int q;
+
+ for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
+ airoha_tc_remove_htb_queue(port, q);
+
+ return 0;
+}
+
+static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ opt->qid = channel;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_HTB_CREATE:
+ break;
+ case TC_HTB_DESTROY:
+ return airoha_tc_htb_destroy(port);
+ case TC_HTB_NODE_MODIFY:
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ return airoha_tc_htb_alloc_leaf_queue(port, opt);
+ case TC_HTB_LEAF_DEL:
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return airoha_tc_htb_delete_leaf_queue(port, opt);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ return airoha_tc_get_htb_get_leaf_queue(port, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_ETS:
+ return airoha_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_QDISC_HTB:
+ return airoha_tc_setup_qdisc_htb(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops airoha_netdev_ops = {
.ndo_init = airoha_dev_init,
.ndo_open = airoha_dev_open,
.ndo_stop = airoha_dev_stop,
+ .ndo_select_queue = airoha_dev_select_queue,
.ndo_start_xmit = airoha_dev_xmit,
.ndo_get_stats64 = airoha_dev_get_stats64,
.ndo_set_mac_address = airoha_dev_set_macaddr,
+ .ndo_setup_tc = airoha_dev_tc_setup,
};
static const struct ethtool_ops airoha_ethtool_ops = {
@@ -2640,7 +3171,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
}
dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
- AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
+ AIROHA_NUM_NETDEV_TX_RINGS,
+ AIROHA_NUM_RX_RING);
if (!dev) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
@@ -2653,12 +3185,18 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
dev->watchdog_timeo = 5 * HZ;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_TC;
dev->features |= dev->hw_features;
dev->dev.of_node = np;
dev->irq = qdma->irq;
SET_NETDEV_DEV(dev, eth->dev);
+ /* reserve hw queues for HTB offloading */
+ err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
+ if (err)
+ return err;
+
err = of_get_ethdev_address(np, dev);
if (err) {
if (err == -EPROBE_DEFER)
@@ -2738,7 +3276,7 @@ static int airoha_probe(struct platform_device *pdev)
err = airoha_hw_init(pdev, eth);
if (err)
- goto error;
+ goto error_hw_cleanup;
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_qdma_start_napi(&eth->qdma[i]);
@@ -2753,13 +3291,16 @@ static int airoha_probe(struct platform_device *pdev)
err = airoha_alloc_gdm_port(eth, np);
if (err) {
of_node_put(np);
- goto error;
+ goto error_napi_stop;
}
}
return 0;
-error:
+error_napi_stop:
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_stop_napi(&eth->qdma[i]);
+error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_hw_cleanup(&eth->qdma[i]);
@@ -2780,8 +3321,10 @@ static void airoha_remove(struct platform_device *pdev)
struct airoha_eth *eth = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ airoha_qdma_stop_napi(&eth->qdma[i]);
airoha_hw_cleanup(&eth->qdma[i]);
+ }
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index be3d0876c521..568bbe5f83f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o
#
# Netdev basic
@@ -60,6 +60,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += en/tc/ct_fs_hmfs.o
endif
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
@@ -123,6 +124,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
steering/sws/dr_ste_v0.o \
steering/sws/dr_ste_v1.o \
steering/sws/dr_ste_v2.o \
+ steering/sws/dr_ste_v3.o \
steering/sws/dr_cmd.o \
steering/sws/dr_fw.o \
steering/sws/dr_action.o \
@@ -150,8 +152,9 @@ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
steering/hws/bwc.o \
steering/hws/debug.o \
steering/hws/vport.o \
- steering/hws/bwc_complex.o
-
+ steering/hws/bwc_complex.o \
+ steering/hws/fs_hws_pools.o \
+ steering/hws/fs_hws.o
#
# SF device
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 98d4306929f3..a2cf3e79693d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -46,6 +46,9 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 9aed29fa4900..d6e736c1fb24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -292,7 +292,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
if (rule->dest_attr.type &
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id =
- rule->dest_attr.counter_id;
+ mlx5_fc_id(rule->dest_attr.counter);
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 5d128c5b4529..0f5d7ea8956f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -48,15 +48,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
struct list_head *iter;
netdev_for_each_lower_dev(dev, lower, iter) {
- struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
-
if (!mlx5e_eswitch_rep(lower))
continue;
- priv = netdev_priv(lower);
- mdev = priv->mdev;
- if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
+ if (mlx5_esw_bridge_dev_same_esw(lower, esw))
return lower;
}
@@ -125,7 +120,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *
priv = netdev_priv(rep);
mdev = priv->mdev;
if (netif_is_lag_master(dev))
- return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
+ return mlx5_lag_is_master(mdev);
return true;
}
@@ -455,6 +450,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
if (!rep)
return NOTIFY_DONE;
+ if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
+ return NOTIFY_DONE;
+
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = container_of(info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
index 62b3f7ff5562..e5b30801314b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -48,4 +48,14 @@ mlx5_ct_fs_smfs_ops_get(void)
}
#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+#if IS_ENABLED(CONFIG_MLX5_HW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
new file mode 100644
index 000000000000..a4263137fef5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc_priv.h"
+#include "en/tc/ct_fs.h"
+#include "fs_core.h"
+#include "steering/hws/fs_hws_pools.h"
+#include "steering/hws/mlx5hws.h"
+#include "steering/hws/table.h"
+
+struct mlx5_ct_fs_hmfs_matcher {
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ refcount_t ref;
+};
+
+/* We need {ipv4, ipv6} x {tcp, udp, gre} matchers. */
+#define NUM_MATCHERS (2 * 3)
+
+struct mlx5_ct_fs_hmfs {
+ struct mlx5hws_table *ct_tbl;
+ struct mlx5hws_table *ct_nat_tbl;
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5hws_action *fwd_action;
+ struct mlx5hws_action *last_action;
+ struct mlx5hws_context *ctx;
+ struct mutex lock; /* Guards matchers */
+ struct mlx5_ct_fs_hmfs_matcher matchers[NUM_MATCHERS];
+ struct mlx5_ct_fs_hmfs_matcher matchers_nat[NUM_MATCHERS];
+};
+
+struct mlx5_ct_fs_hmfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5hws_bwc_rule *hws_bwc_rule;
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_fc *counter;
+};
+
+static u32 get_matcher_idx(bool ipv4, bool tcp, bool gre)
+{
+ return ipv4 * 3 + tcp * 2 + gre;
+}
+
+static int mlx5_ct_fs_hmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ ct_tbl = ct->fs_hws_table.hws_table;
+ ct_nat_tbl = ct_nat->fs_hws_table.hws_table;
+ post_ct_tbl = post_ct->fs_hws_table.hws_table;
+ fs_hmfs->ct_nat = ct_nat;
+
+ if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to init, missing backing hws tables");
+ return -EOPNOTSUPP;
+ }
+
+ netdev_dbg(fs->netdev, "using hmfs steering");
+
+ fs_hmfs->ct_tbl = ct_tbl;
+ fs_hmfs->ct_nat_tbl = ct_nat_tbl;
+ fs_hmfs->ctx = ct_tbl->ctx;
+ mutex_init(&fs_hmfs->lock);
+
+ fs_hmfs->fwd_action = mlx5hws_action_create_dest_table(ct_tbl->ctx, post_ct_tbl, flags);
+ if (!fs_hmfs->fwd_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create fwd action\n");
+ return -EINVAL;
+ }
+ fs_hmfs->last_action = mlx5hws_action_create_last(ct_tbl->ctx, flags);
+ if (!fs_hmfs->last_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create last action\n");
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_ct_fs_hmfs_destroy(struct mlx5_ct_fs *fs)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ mlx5hws_action_destroy(fs_hmfs->last_action);
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+}
+
+static struct mlx5hws_bwc_matcher *
+mlx5_ct_fs_hmfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5hws_table *tbl,
+ struct mlx5_flow_spec *spec, bool ipv4, bool tcp, bool gre)
+{
+ u8 match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+ struct mlx5hws_match_parameters mask = {
+ .match_buf = spec->match_criteria,
+ .match_sz = sizeof(spec->match_criteria),
+ };
+ u32 priority = get_matcher_idx(ipv4, tcp, gre); /* Static priority based on params. */
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+
+ hws_bwc_matcher = mlx5hws_bwc_matcher_create(tbl, priority, match_criteria_enable, &mask);
+ if (!hws_bwc_matcher)
+ return ERR_PTR(-EINVAL);
+
+ return hws_bwc_matcher;
+}
+
+static struct mlx5_ct_fs_hmfs_matcher *
+mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ bool nat, bool ipv4, bool tcp, bool gre)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ u32 matcher_idx = get_matcher_idx(ipv4, tcp, gre);
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ struct mlx5hws_table *tbl;
+
+ hmfs_matcher = nat ?
+ (fs_hmfs->matchers_nat + matcher_idx) :
+ (fs_hmfs->matchers + matcher_idx);
+
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ return hmfs_matcher;
+
+ mutex_lock(&fs_hmfs->lock);
+
+ /* Retry with lock, as the matcher might be already created by another cpu. */
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ goto out_unlock;
+
+ tbl = nat ? fs_hmfs->ct_nat_tbl : fs_hmfs->ct_tbl;
+
+ hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre);
+ if (IS_ERR(hws_bwc_matcher)) {
+ netdev_warn(fs->netdev,
+ "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
+ nat, ipv4, tcp, gre, PTR_ERR(hws_bwc_matcher));
+
+ hmfs_matcher = ERR_CAST(hws_bwc_matcher);
+ goto out_unlock;
+ }
+
+ hmfs_matcher->hws_bwc_matcher = hws_bwc_matcher;
+ refcount_set(&hmfs_matcher->ref, 1);
+
+out_unlock:
+ mutex_unlock(&fs_hmfs->lock);
+ return hmfs_matcher;
+}
+
+static void
+mlx5_ct_fs_hmfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ if (!refcount_dec_and_mutex_lock(&hmfs_matcher->ref, &fs_hmfs->lock))
+ return;
+
+ mlx5hws_bwc_matcher_destroy(hmfs_matcher->hws_bwc_matcher);
+ mutex_unlock(&fs_hmfs->lock);
+}
+
+#define NUM_CT_HMFS_RULES 4
+
+static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
+ struct mlx5_flow_attr *attr,
+ struct mlx5hws_rule_action *rule_actions)
+{
+ struct mlx5_fs_hws_action *mh_action = &attr->modify_hdr->fs_hws_action;
+
+ memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
+ rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
+ /* Modify header is special, it may require extra arguments outside the action itself. */
+ if (mh_action->mh_data) {
+ rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
+ rule_actions[1].modify_header.data = mh_action->mh_data->data;
+ }
+ rule_actions[1].action = mh_action->hws_action;
+ rule_actions[2].action = fs_hmfs->fwd_action;
+ rule_actions[3].action = fs_hmfs->last_action;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_hmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ struct mlx5hws_match_parameters match_params = {
+ .match_buf = spec->match_value,
+ .match_sz = ARRAY_SIZE(spec->match_value),
+ };
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule;
+ bool nat, tcp, ipv4, gre;
+ int err;
+
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ hmfs_rule = kzalloc(sizeof(*hmfs_rule), GFP_KERNEL);
+ if (!hmfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ nat = (attr->ft == fs_hmfs->ct_nat);
+ ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+ tcp = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_TCP;
+ gre = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_GRE;
+
+ hmfs_matcher = mlx5_ct_fs_hmfs_matcher_get(fs, spec, nat, ipv4, tcp, gre);
+ if (IS_ERR(hmfs_matcher)) {
+ err = PTR_ERR(hmfs_matcher);
+ goto err_free_rule;
+ }
+ hmfs_rule->hmfs_matcher = hmfs_matcher;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+ hmfs_rule->counter = attr->counter;
+
+ hmfs_rule->hws_bwc_rule =
+ mlx5hws_bwc_rule_create(hmfs_matcher->hws_bwc_matcher, &match_params,
+ spec->flow_context.flow_source, rule_actions);
+ if (!hmfs_rule->hws_bwc_rule) {
+ err = -EINVAL;
+ goto err_put_matcher;
+ }
+
+ return &hmfs_rule->fs_rule;
+
+err_put_matcher:
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_matcher);
+err_free_rule:
+ kfree(hmfs_rule);
+ return ERR_PTR(err);
+}
+
+static void mlx5_ct_fs_hmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ mlx5hws_bwc_rule_destroy(hmfs_rule->hws_bwc_rule);
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_rule->hmfs_matcher);
+ kfree(hmfs_rule);
+}
+
+static int mlx5_ct_fs_hmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ int err;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+
+ err = mlx5hws_bwc_rule_action_update(hmfs_rule->hws_bwc_rule, rule_actions);
+ if (err) {
+ mlx5_fc_put_hws_action(attr->counter);
+ return err;
+ }
+
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ hmfs_rule->counter = attr->counter;
+
+ return 0;
+}
+
+static struct mlx5_ct_fs_ops hmfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_hmfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_hmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_hmfs_ct_rule_update,
+
+ .init = mlx5_ct_fs_hmfs_init,
+ .destroy = mlx5_ct_fs_hmfs_destroy,
+
+ .priv_size = sizeof(struct mlx5_ct_fs_hmfs),
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return &hmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 45737d039252..0c97c5899904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -13,7 +13,6 @@
#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
#define ct_dbg(fmt, args...)\
netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
-#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
struct mlx5_ct_fs_smfs_matcher {
struct mlx5dr_matcher *dr_matcher;
@@ -220,78 +219,6 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
mlx5_smfs_action_destroy(fs_smfs->fwd_action);
}
-static inline bool
-mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys)
-{
-#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
- const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
- DISS_BIT(META);
- const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
- const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
-
- return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
- used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
-}
-
-static bool
-mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
-{
- struct flow_match_ipv4_addrs ipv4_addrs;
- struct flow_match_ipv6_addrs ipv6_addrs;
- struct flow_match_control control;
- struct flow_match_basic basic;
- struct flow_match_ports ports;
- struct flow_match_tcp tcp;
-
- if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
- ct_dbg("rule uses unexpected dissectors (0x%016llx)",
- flow_rule->match.dissector->used_keys);
- return false;
- }
-
- flow_rule_match_basic(flow_rule, &basic);
- flow_rule_match_control(flow_rule, &control);
- flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
- flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
- if (basic.key->ip_proto != IPPROTO_GRE)
- flow_rule_match_ports(flow_rule, &ports);
- if (basic.key->ip_proto == IPPROTO_TCP)
- flow_rule_match_tcp(flow_rule, &tcp);
-
- if (basic.mask->n_proto != htons(0xFFFF) ||
- (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
- basic.mask->ip_proto != 0xFF ||
- (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
- basic.key->ip_proto != IPPROTO_GRE)) {
- ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
- ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
- basic.key->ip_proto, basic.mask->ip_proto);
- return false;
- }
-
- if (basic.key->ip_proto != IPPROTO_GRE &&
- (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
- ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
- ports.mask->src, ports.mask->dst);
- return false;
- }
-
- if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
- ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
- return false;
- }
-
- return true;
-}
-
static struct mlx5_ct_fs_rule *
mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
@@ -304,7 +231,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
int num_actions = 0, err;
bool nat, tcp, ipv4, gre;
- if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
return ERR_PTR(-EOPNOTSUPP);
smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a84ebac2f011..a065e8fafb1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -2065,10 +2065,19 @@ mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
int err;
- if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
- ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
- ct_dbg("Using SMFS ct flow steering provider");
- fs_ops = mlx5_ct_fs_smfs_ops_get();
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_HMFS) {
+ ct_dbg("Using HMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_hmfs_ops_get();
+ } else if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+ ct_dbg("Using SMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_smfs_ops_get();
+ }
+
+ if (!fs_ops) {
+ ct_dbg("Requested flow steering mode is not enabled.");
+ return -EOPNOTSUPP;
+ }
}
ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
@@ -2421,3 +2430,74 @@ out_inc_drop:
atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
return false;
}
+
+static bool mlx5e_tc_ct_valid_used_dissector_keys(const u64 used_keys)
+{
+#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
+ const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
+ DISS_BIT(META);
+ const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
+
+ return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+ used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
+}
+
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule)
+{
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
+ struct flow_match_control control;
+ struct flow_match_basic basic;
+ struct flow_match_ports ports;
+ struct flow_match_tcp tcp;
+
+ if (!mlx5e_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected dissectors (0x%016llx)",
+ flow_rule->match.dissector->used_keys);
+ return false;
+ }
+
+ flow_rule_match_basic(flow_rule, &basic);
+ flow_rule_match_control(flow_rule, &control);
+ flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+ flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+ if (basic.key->ip_proto != IPPROTO_GRE)
+ flow_rule_match_ports(flow_rule, &ports);
+ if (basic.key->ip_proto == IPPROTO_TCP)
+ flow_rule_match_tcp(flow_rule, &tcp);
+
+ if (basic.mask->n_proto != htons(0xFFFF) ||
+ (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+ basic.mask->ip_proto != 0xFF ||
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
+ basic.key->ip_proto != IPPROTO_GRE)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+ ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+ basic.key->ip_proto, basic.mask->ip_proto);
+ return false;
+ }
+
+ if (basic.key->ip_proto != IPPROTO_GRE &&
+ (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
+ netdev_dbg(dev, "ct_debug: rule uses ports match (src 0x%04x, dst 0x%04x)",
+ ports.mask->src, ports.mask->dst);
+ return false;
+ }
+
+ if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected tcp match (flags 0x%02x)",
+ tcp.mask->flags);
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index b66c5f98067f..5e9dbdd4a5e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -128,6 +128,9 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule);
+
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
@@ -202,5 +205,12 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
return false;
}
+static inline bool
+mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev,
+ struct flow_rule *flow_rule)
+{
+ return false;
+}
+
#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
#endif /* __MLX5_EN_TC_CT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index ca92e518be76..501709ac310f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -94,25 +94,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 esn, esn_msb;
u8 overlap;
- switch (x->xso.type) {
- case XFRM_DEV_OFFLOAD_PACKET:
- switch (x->xso.dir) {
- case XFRM_DEV_OFFLOAD_IN:
- esn = x->replay_esn->seq;
- esn_msb = x->replay_esn->seq_hi;
- break;
- case XFRM_DEV_OFFLOAD_OUT:
- esn = x->replay_esn->oseq;
- esn_msb = x->replay_esn->oseq_hi;
- break;
- default:
- WARN_ON(true);
- return false;
- }
- break;
- case XFRM_DEV_OFFLOAD_CRYPTO:
- /* Already parsed by XFRM core */
+ switch (x->xso.dir) {
+ case XFRM_DEV_OFFLOAD_IN:
esn = x->replay_esn->seq;
+ esn_msb = x->replay_esn->seq_hi;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ esn = x->replay_esn->oseq;
+ esn_msb = x->replay_esn->oseq_hi;
break;
default:
WARN_ON(true);
@@ -121,11 +110,15 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
overlap = sa_entry->esn_state.overlap;
- if (esn >= x->replay_esn->replay_window)
- seq_bottom = esn - x->replay_esn->replay_window + 1;
+ if (!x->replay_esn->replay_window) {
+ seq_bottom = esn;
+ } else {
+ if (esn >= x->replay_esn->replay_window)
+ seq_bottom = esn - x->replay_esn->replay_window + 1;
- if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
- esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
+ esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ }
if (sa_entry->esn_state.esn_msb)
sa_entry->esn_state.esn = esn;
@@ -724,6 +717,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = 1;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
@@ -768,9 +767,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
MLX5_IPSEC_RESCHED);
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- x->props.mode == XFRM_MODE_TUNNEL)
- xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
- MLX5E_IPSEC_TUNNEL_SA);
+ x->props.mode == XFRM_MODE_TUNNEL) {
+ xa_lock_bh(&ipsec->sadb);
+ __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+ MLX5E_IPSEC_TUNNEL_SA);
+ xa_unlock_bh(&ipsec->sadb);
+ }
out:
x->xso.offload_handle = (unsigned long)sa_entry;
@@ -797,7 +799,6 @@ err_xfrm:
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
@@ -806,12 +807,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
-
- if (attrs->mode == XFRM_MODE_TUNNEL &&
- attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- /* Make sure that no ARP requests are running in parallel */
- flush_workqueue(ipsec->wq);
-
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e51b03d4c717..e7b64679f121 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -194,7 +194,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
@@ -223,7 +223,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
}
sa_entry->ipsec_rule.trailer.fc = flow_counter;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -275,7 +275,7 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
@@ -348,7 +348,7 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -686,7 +686,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
rx->ft.status = ft;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
+ dest[1].counter = rx->fc->cnt;
err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
@@ -873,7 +873,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ dest.counter = tx->fc->cnt;
fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
@@ -1649,7 +1649,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1718,23 +1718,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
- else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
-
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (attrs->reqid)
- setup_fte_reg_c4(spec, attrs->reqid);
+ setup_fte_reg_c4(spec, attrs->reqid);
err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
@@ -1762,7 +1760,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1835,7 +1833,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+ dest[dstn].counter = tx->fc->drop;
dstn++;
break;
default:
@@ -1913,7 +1911,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
case XFRM_POLICY_BLOCK:
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+ dest[dstn].counter = rx->fc->drop;
dstn++;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 53cfa39188cb..820debf3fbbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -91,8 +91,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
- struct mlx5_accel_esp_xfrm_attrs *attrs)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
@@ -120,8 +121,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+ if (!attrs->replay_esn.trigger)
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ sa_entry->esn_state.esn);
+ }
if (attrs->lft.hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
@@ -175,7 +180,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+ mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0ec17c276bdd..8fcaee381b0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2087,7 +2087,7 @@ static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c,
struct mlx5e_xdpsq *xdpsq;
int err;
- xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, c->cpu);
+ xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, cpu_to_node(c->cpu));
if (!xdpsq)
return ERR_PTR(-ENOMEM);
@@ -3946,6 +3946,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards);
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -5131,11 +5132,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mode, setting;
- int err;
- err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
- if (err)
- return err;
+ if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting))
+ return -EOPNOTSUPP;
mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 6b3b1afe8312..9ba99609999f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1282,7 +1282,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
+ dest[dest_ix].counter = attr->counter;
dest_ix++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 2b229b6226c6..dfb079e59d85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -871,8 +871,8 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
+ struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 6b4c9ffad95b..7dd1dc3f77c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -135,7 +135,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
+ drop_ctr_dst.counter = drop_counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index 093ed86a0acd..1c37098e09ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -260,7 +260,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
+ drop_ctr_dst.counter = counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index c5ea1d1d2b03..5f647358a05c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -570,7 +570,8 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge,
struct mlx5_eswitch *esw)
{
@@ -628,7 +629,7 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[0].ft = bridge->egress_ft;
dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dests[1].counter_id = counter_id;
+ dests[1].counter = counter;
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
ARRAY_SIZE(dests));
@@ -639,17 +640,19 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
- return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, bridge->br_offloads->esw);
}
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
@@ -671,7 +674,7 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
goto out;
}
- handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, peer_esw);
out:
@@ -1385,10 +1388,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
handle = peer ?
mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
- addr, vlan, mlx5_fc_id(counter),
- bridge) :
+ addr, vlan, counter, bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
- mlx5_fc_id(counter), bridge);
+ counter, bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 8b7c843446e1..823c1ba456cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -564,6 +564,9 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_
return err;
esw_qos_normalize_min_rate(parent->esw, parent, extack);
+ trace_mlx5_esw_vport_qos_create(vport->dev, vport,
+ vport->qos.sched_node->max_rate,
+ vport->qos.sched_node->bw_share);
return 0;
}
@@ -591,8 +594,11 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
sched_node->vport = vport;
vport->qos.sched_node = sched_node;
err = esw_qos_vport_enable(vport, parent, extack);
- if (err)
+ if (err) {
+ __esw_qos_free_node(sched_node);
esw_qos_put(esw);
+ vport->qos.sched_node = NULL;
+ }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 06076dd9ec64..20cc01ceee8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -721,7 +721,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[i].counter_id = mlx5_fc_id(attr->counter);
+ dest[i].counter = attr->counter;
i++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 676005854dad..ae20c061e0fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -217,7 +217,8 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
int err;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
- underlay_qpn == 0)
+ underlay_qpn == 0 &&
+ (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX))
return 0;
if (ft->type == FS_FT_FDB &&
@@ -718,7 +719,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
- dst->dest_attr.counter_id);
+ mlx5_fc_id(dst->dest_attr.counter));
in_dests += dst_cnt_size;
list_size++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2eabfcc247c6..22dc23d991d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -658,6 +658,7 @@ static void del_sw_hw_rule(struct fs_node *node)
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ mlx5_fc_local_destroy(rule->dest_attr.counter);
goto out;
}
@@ -820,11 +821,17 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return index;
fte->index = index + fg->start_index;
+retry_insert:
ret = rhashtable_insert_fast(&fg->ftes_hash,
&fte->hash,
rhash_fte);
- if (ret)
+ if (ret) {
+ if (ret == -EBUSY) {
+ cond_resched();
+ goto retry_insert;
+ }
goto err_ida_remove;
+ }
tree_add_node(&fte->node, &fg->node);
list_add_tail(&fte->node.list, &fg->node.children);
@@ -2709,6 +2716,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_BYPASS_PRIO;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
@@ -3528,35 +3536,42 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
char *value = val.vstr;
- int err = 0;
+ u8 eswitch_mode;
- if (!strcmp(value, "dmfs")) {
+ if (!strcmp(value, "dmfs"))
return 0;
- } else if (!strcmp(value, "smfs")) {
- u8 eswitch_mode;
- bool smfs_cap;
- eswitch_mode = mlx5_eswitch_mode(dev);
- smfs_cap = mlx5_fs_dr_is_supported(dev);
+ if (!strcmp(value, "smfs")) {
+ bool smfs_cap = mlx5_fs_dr_is_supported(dev);
if (!smfs_cap) {
- err = -EOPNOTSUPP;
NL_SET_ERR_MSG_MOD(extack,
"Software managed steering is not supported by current device");
+ return -EOPNOTSUPP;
}
+ } else if (!strcmp(value, "hmfs")) {
+ bool hmfs_cap = mlx5_fs_hws_is_supported(dev);
- else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ if (!hmfs_cap) {
NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported when eswitch offloads enabled.");
- err = -EOPNOTSUPP;
+ "Hardware steering is not supported by current device");
+ return -EOPNOTSUPP;
}
} else {
NL_SET_ERR_MSG_MOD(extack,
- "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
- err = -EINVAL;
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\", \"hmfs\"]");
+ return -EINVAL;
}
- return err;
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Moving to %s is not supported when eswitch offloads enabled.",
+ value);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
@@ -3568,6 +3583,8 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
if (!strcmp(ctx->val.vstr, "smfs"))
mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else if (!strcmp(ctx->val.vstr, "hmfs"))
+ mode = MLX5_FLOW_STEERING_MODE_HMFS;
else
mode = MLX5_FLOW_STEERING_MODE_DMFS;
dev->priv.steering->mode = mode;
@@ -3580,10 +3597,17 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ switch (dev->priv.steering->mode) {
+ case MLX5_FLOW_STEERING_MODE_SMFS:
strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
- else
+ break;
+ case MLX5_FLOW_STEERING_MODE_HMFS:
+ strscpy(ctx->val.vstr, "hmfs", sizeof(ctx->val.vstr));
+ break;
+ default:
strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
+ }
+
return 0;
}
@@ -3658,8 +3682,7 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
- MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) {
err = init_rdma_rx_root_ns(steering);
if (err)
goto err;
@@ -4003,6 +4026,8 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
cmds = mlx5_fs_cmd_get_dr_cmds();
+ else if (mode == MLX5_FLOW_STEERING_MODE_HMFS)
+ cmds = mlx5_fs_cmd_get_hws_cmds();
else
cmds = mlx5_fs_cmd_get_fw_cmds();
if (!cmds)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index bad2df0715ec..20837e526679 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -38,6 +38,7 @@
#include <linux/rhashtable.h>
#include <linux/llist.h>
#include <steering/sws/fs_dr.h>
+#include <steering/hws/fs_hws.h>
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -64,6 +65,7 @@ struct mlx5_modify_hdr {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -74,6 +76,7 @@ struct mlx5_pkt_reformat {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -126,7 +129,8 @@ enum fs_fte_status {
enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_DMFS,
- MLX5_FLOW_STEERING_MODE_SMFS
+ MLX5_FLOW_STEERING_MODE_SMFS,
+ MLX5_FLOW_STEERING_MODE_HMFS,
};
enum mlx5_flow_steering_capabilty {
@@ -190,7 +194,10 @@ struct mlx5_flow_handle {
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
- struct mlx5_fs_dr_table fs_dr_table;
+ union {
+ struct mlx5_fs_dr_table fs_dr_table;
+ struct mlx5_fs_hws_table fs_hws_table;
+ };
u32 id;
u16 vport;
unsigned int max_fte;
@@ -247,7 +254,10 @@ struct fs_fte_dup {
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
- struct mlx5_fs_dr_rule fs_dr_rule;
+ union {
+ struct mlx5_fs_dr_rule fs_dr_rule;
+ struct mlx5_fs_hws_rule fs_hws_rule;
+ };
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
struct fs_fte_action act_dests;
struct fs_fte_dup *dup;
@@ -280,7 +290,10 @@ struct mlx5_flow_group_mask {
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
- struct mlx5_fs_dr_matcher fs_dr_matcher;
+ union {
+ struct mlx5_fs_dr_matcher fs_dr_matcher;
+ struct mlx5_fs_hws_matcher fs_hws_matcher;
+ };
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
@@ -293,7 +306,10 @@ struct mlx5_flow_group {
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
enum mlx5_flow_steering_mode mode;
- struct mlx5_fs_dr_domain fs_dr_domain;
+ union {
+ struct mlx5_fs_dr_domain fs_dr_domain;
+ struct mlx5_fs_hws_context fs_hws_context;
+ };
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
@@ -303,6 +319,42 @@ struct mlx5_flow_root_namespace {
const struct mlx5_flow_cmds *cmds;
};
+enum mlx5_fc_type {
+ MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_LOCAL,
+};
+
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ u32 id;
+ bool aging;
+ enum mlx5_fc_type type;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_cache cache;
+ /* last{packets,bytes} are used for calculating deltas since last reading. */
+ u64 lastpackets;
+ u64 lastbytes;
+};
+
+struct mlx5_fc_bulk_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fc_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ u32 base_id;
+ struct mlx5_fc_bulk_hws_data hws_data;
+ struct mlx5_fc fcs[];
+};
+
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 62d0c689796b..492775d3d193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "fs_core.h"
+#include "fs_pool.h"
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
@@ -43,33 +44,6 @@
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
-};
-
-struct mlx5_fc {
- u32 id;
- bool aging;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_cache cache;
- /* last{packets,bytes} are used for calculating deltas since last reading. */
- u64 lastpackets;
- u64 lastbytes;
-};
-
-struct mlx5_fc_pool {
- struct mlx5_core_dev *dev;
- struct mutex pool_lock; /* protects pool lists */
- struct list_head fully_used;
- struct list_head partially_used;
- struct list_head unused;
- int available_fcs;
- int used_fcs;
- int threshold;
-};
-
struct mlx5_fc_stats {
struct xarray counters;
@@ -80,13 +54,13 @@ struct mlx5_fc_stats {
int bulk_query_len;
bool bulk_query_alloc_failed;
unsigned long next_bulk_query_alloc;
- struct mlx5_fc_pool fc_pool;
+ struct mlx5_fs_pool fc_pool;
};
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
-static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
-static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
+static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
@@ -186,6 +160,9 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
+ if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
+ return;
+
if (counter->bulk)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
@@ -435,15 +412,7 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
fc_stats->sampling_interval);
}
-/* Flow counter bluks */
-
-struct mlx5_fc_bulk {
- struct list_head pool_list;
- u32 base_id;
- int bulk_len;
- unsigned long *bitmask;
- struct mlx5_fc fcs[] __counted_by(bulk_len);
-};
+/* Flow counter bulks */
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
u32 id)
@@ -452,16 +421,16 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
counter->id = id;
}
-static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
{
- return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+ return counter->bulk->base_id;
}
-static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
+ void *pool_ctx)
{
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
- struct mlx5_fc_bulk *bulk;
- int err = -ENOMEM;
+ struct mlx5_fc_bulk *fc_bulk;
int bulk_len;
u32 base_id;
int i;
@@ -469,207 +438,141 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
- if (!bulk)
- goto err_alloc_bulk;
-
- bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
- if (!bulk->bitmask)
- goto err_alloc_bitmask;
+ fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
+ if (!fc_bulk)
+ return NULL;
- err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
- if (err)
- goto err_mlx5_cmd_bulk_alloc;
+ if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
+ goto fc_bulk_free;
- bulk->base_id = base_id;
- bulk->bulk_len = bulk_len;
- for (i = 0; i < bulk_len; i++) {
- mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
- set_bit(i, bulk->bitmask);
- }
+ if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
+ goto fs_bulk_cleanup;
+ fc_bulk->base_id = base_id;
+ for (i = 0; i < bulk_len; i++)
+ mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
- return bulk;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+ return &fc_bulk->fs_bulk;
-err_mlx5_cmd_bulk_alloc:
- kvfree(bulk->bitmask);
-err_alloc_bitmask:
- kvfree(bulk);
-err_alloc_bulk:
- return ERR_PTR(err);
+fs_bulk_cleanup:
+ mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
+fc_bulk_free:
+ kvfree(fc_bulk);
+ return NULL;
}
static int
-mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
{
- if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
+ struct mlx5_fc_bulk,
+ fs_bulk);
+
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
return -EBUSY;
}
- mlx5_cmd_fc_free(dev, bulk->base_id);
- kvfree(bulk->bitmask);
- kvfree(bulk);
+ mlx5_cmd_fc_free(dev, fc_bulk->base_id);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(fc_bulk);
return 0;
}
-static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
{
- int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
-
- if (free_fc_index >= bulk->bulk_len)
- return ERR_PTR(-ENOSPC);
-
- clear_bit(free_fc_index, bulk->bitmask);
- return &bulk->fcs[free_fc_index];
-}
-
-static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
-{
- int fc_index = fc->id - bulk->base_id;
-
- if (test_bit(fc_index, bulk->bitmask))
- return -EINVAL;
-
- set_bit(fc_index, bulk->bitmask);
- return 0;
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
}
/* Flow counters pool API */
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
-{
- fc_pool->dev = dev;
- mutex_init(&fc_pool->pool_lock);
- INIT_LIST_HEAD(&fc_pool->fully_used);
- INIT_LIST_HEAD(&fc_pool->partially_used);
- INIT_LIST_HEAD(&fc_pool->unused);
- fc_pool->available_fcs = 0;
- fc_pool->used_fcs = 0;
- fc_pool->threshold = 0;
-}
+static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
+ .bulk_destroy = mlx5_fc_bulk_destroy,
+ .bulk_create = mlx5_fc_bulk_create,
+ .update_threshold = mlx5_fc_pool_update_threshold,
+};
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+static void
+mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_bulk *tmp;
-
- list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops, NULL);
}
-static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
{
- fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
- fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+ mlx5_fs_pool_cleanup(fc_pool);
}
-static struct mlx5_fc_bulk *
-mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fc_bulk *fc_bulk;
+ int err;
- new_bulk = mlx5_fc_bulk_create(dev);
- if (!IS_ERR(new_bulk))
- fc_pool->available_fcs += new_bulk->bulk_len;
- mlx5_fc_pool_update_threshold(fc_pool);
- return new_bulk;
+ err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
+ return &fc_bulk->fcs[pool_index.index];
}
static void
-mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
{
+ struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
struct mlx5_core_dev *dev = fc_pool->dev;
- fc_pool->available_fcs -= bulk->bulk_len;
- mlx5_fc_bulk_destroy(dev, bulk);
- mlx5_fc_pool_update_threshold(fc_pool);
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = fc->id - fc->bulk->base_id;
+ if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
}
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
- struct list_head *next_list,
- bool move_non_full_bulk)
+/**
+ * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which
+ * was already acquired using its counter id and bulk data.
+ *
+ * @counter_id: counter acquired counter id
+ * @offset: counter offset from bulk base
+ * @bulk_size: counter's bulk size as was allocated
+ *
+ * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
+ */
+struct mlx5_fc *
+mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
{
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc *fc;
-
- if (list_empty(src_list))
- return ERR_PTR(-ENODATA);
-
- bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
- fc = mlx5_fc_bulk_acquire_fc(bulk);
- if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
- list_move(&bulk->pool_list, next_list);
- return fc;
-}
+ struct mlx5_fc_bulk *fc_bulk;
+ struct mlx5_fc *counter;
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
-{
- struct mlx5_fc_bulk *new_bulk;
- struct mlx5_fc *fc;
-
- mutex_lock(&fc_pool->pool_lock);
-
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
- &fc_pool->fully_used, false);
- if (IS_ERR(fc))
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
- &fc_pool->partially_used,
- true);
- if (IS_ERR(fc)) {
- new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
- if (IS_ERR(new_bulk)) {
- fc = ERR_CAST(new_bulk);
- goto out;
- }
- fc = mlx5_fc_bulk_acquire_fc(new_bulk);
- list_add(&new_bulk->pool_list, &fc_pool->partially_used);
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ kfree(counter);
+ return ERR_PTR(-ENOMEM);
}
- fc_pool->available_fcs--;
- fc_pool->used_fcs++;
-out:
- mutex_unlock(&fc_pool->pool_lock);
- return fc;
+ counter->type = MLX5_FC_TYPE_LOCAL;
+ counter->id = counter_id;
+ fc_bulk->base_id = counter_id - offset;
+ fc_bulk->fs_bulk.bulk_len = bulk_size;
+ counter->bulk = fc_bulk;
+ return counter;
}
+EXPORT_SYMBOL(mlx5_fc_local_create);
-static void
-mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+void mlx5_fc_local_destroy(struct mlx5_fc *counter)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk = fc->bulk;
- int bulk_free_fcs_amount;
-
- mutex_lock(&fc_pool->pool_lock);
-
- if (mlx5_fc_bulk_release_fc(bulk, fc)) {
- mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
- goto unlock;
- }
-
- fc_pool->available_fcs++;
- fc_pool->used_fcs--;
-
- bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
- if (bulk_free_fcs_amount == 1)
- list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
- if (bulk_free_fcs_amount == bulk->bulk_len) {
- list_del(&bulk->pool_list);
- if (fc_pool->available_fcs > fc_pool->threshold)
- mlx5_fc_pool_free_bulk(fc_pool, bulk);
- else
- list_add(&bulk->pool_list, &fc_pool->unused);
- }
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
-unlock:
- mutex_unlock(&fc_pool->pool_lock);
+ kfree(counter->bulk);
+ kfree(counter);
}
+EXPORT_SYMBOL(mlx5_fc_local_destroy);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
new file mode 100644
index 000000000000..f6c226664602
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_pool.h"
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len)
+{
+ int i;
+
+ fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!fs_bulk->bitmask)
+ return -ENOMEM;
+
+ fs_bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++)
+ set_bit(i, fs_bulk->bitmask);
+
+ return 0;
+}
+
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
+{
+ kvfree(fs_bulk->bitmask);
+}
+
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len);
+
+ WARN_ON_ONCE(!pool_index || !fs_bulk);
+ if (free_index >= fs_bulk->bulk_len)
+ return -ENOSPC;
+
+ clear_bit(free_index, fs_bulk->bitmask);
+ pool_index->fs_bulk = fs_bulk;
+ pool_index->index = free_index;
+ return 0;
+}
+
+static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index)
+{
+ if (test_bit(index, fs_bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(index, fs_bulk->bitmask);
+ return 0;
+}
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx)
+{
+ WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create ||
+ !ops->update_threshold);
+ pool->dev = dev;
+ pool->pool_ctx = pool_ctx;
+ mutex_init(&pool->pool_lock);
+ INIT_LIST_HEAD(&pool->fully_used);
+ INIT_LIST_HEAD(&pool->partially_used);
+ INIT_LIST_HEAD(&pool->unused);
+ pool->available_units = 0;
+ pool->used_units = 0;
+ pool->threshold = 0;
+ pool->ops = ops;
+}
+
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool)
+{
+ struct mlx5_core_dev *dev = pool->dev;
+ struct mlx5_fs_bulk *bulk;
+ struct mlx5_fs_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+ struct mlx5_fs_bulk *new_bulk;
+
+ new_bulk = fs_pool->ops->bulk_create(dev, fs_pool->pool_ctx);
+ if (new_bulk)
+ fs_pool->available_units += new_bulk->bulk_len;
+ fs_pool->ops->update_threshold(fs_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+
+ fs_pool->available_units -= bulk->bulk_len;
+ fs_pool->ops->bulk_destroy(dev, bulk);
+ fs_pool->ops->update_threshold(fs_pool);
+}
+
+static int
+mlx5_fs_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *fs_bulk;
+ int err;
+
+ if (list_empty(src_list))
+ return -ENODATA;
+
+ fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list);
+ err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index);
+ if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0)
+ list_move(&fs_bulk->pool_list, next_list);
+ return err;
+}
+
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *new_bulk;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used,
+ &fs_pool->fully_used, false,
+ pool_index);
+ if (err)
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused,
+ &fs_pool->partially_used,
+ true, pool_index);
+ if (err) {
+ new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool);
+ if (!new_bulk) {
+ err = -ENOENT;
+ goto out;
+ }
+ err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index);
+ WARN_ON_ONCE(err);
+ list_add(&new_bulk->pool_list, &fs_pool->partially_used);
+ }
+ fs_pool->available_units--;
+ fs_pool->used_units++;
+
+out:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
+
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *bulk = pool_index->fs_bulk;
+ int bulk_free_amount;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ /* TBD would rather return void if there was no warn here in original code */
+ err = mlx5_fs_bulk_release_index(bulk, pool_index->index);
+ if (err)
+ goto unlock;
+
+ fs_pool->available_units++;
+ fs_pool->used_units--;
+
+ bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk);
+ if (bulk_free_amount == 1)
+ list_move_tail(&bulk->pool_list, &fs_pool->partially_used);
+ if (bulk_free_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fs_pool->available_units > fs_pool->threshold)
+ mlx5_fs_pool_free_bulk(fs_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fs_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
new file mode 100644
index 000000000000..f04ec3107498
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_POOL_H__
+#define __MLX5_FS_POOL_H__
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_fs_bulk {
+ struct list_head pool_list;
+ int bulk_len;
+ unsigned long *bitmask;
+};
+
+struct mlx5_fs_pool_index {
+ struct mlx5_fs_bulk *fs_bulk;
+ int index;
+};
+
+struct mlx5_fs_pool;
+
+struct mlx5_fs_pool_ops {
+ int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk);
+ struct mlx5_fs_bulk * (*bulk_create)(struct mlx5_core_dev *dev,
+ void *pool_ctx);
+ void (*update_threshold)(struct mlx5_fs_pool *pool);
+};
+
+struct mlx5_fs_pool {
+ struct mlx5_core_dev *dev;
+ void *pool_ctx;
+ const struct mlx5_fs_pool_ops *ops;
+ struct mutex pool_lock; /* protects pool lists */
+ struct list_head fully_used;
+ struct list_head partially_used;
+ struct list_head unused;
+ int available_units;
+ int used_units;
+ int threshold;
+};
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len);
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk);
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx);
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool);
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+
+#endif /* __MLX5_FS_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 76ad46bf477d..b253d1673398 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -281,6 +281,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, shampo)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_SHAMPO, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 1477db7f5307..2691d88cdee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -175,7 +175,7 @@ unlock:
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq);
int cpu;
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index f4b777d4e108..62b6faa4276a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -105,20 +105,20 @@ static int mapping_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool hash = false;
bool lag_active;
+ int i, idx = 0;
int num_ports;
- int i;
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, ports,
&num_ports);
hash = true;
} else {
- for (i = 0; i < ldev->ports; i++)
- ports[i] = ldev->v2p_map[i];
+ mlx5_ldev_for_each(i, 0, ldev)
+ ports[idx++] = ldev->v2p_map[i];
num_ports = ldev->ports;
}
}
@@ -144,11 +144,8 @@ static int members_show(struct seq_file *file, void *priv)
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
+ mlx5_ldev_for_each(i, 0, ldev)
seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
- }
mutex_unlock(&ldev->lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 7f68468c2e75..ed2ba272946b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -43,10 +43,6 @@
#include "mp.h"
#include "mpesw.h"
-enum {
- MLX5_LAG_EGRESS_PORT_1 = 1,
- MLX5_LAG_EGRESS_PORT_2,
-};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -72,7 +68,7 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
int num_enabled;
int idx;
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, enabled_ports,
&num_enabled);
for (idx = 0; idx < num_enabled; idx++)
active_port |= BIT_MASK(enabled_ports[idx]);
@@ -80,23 +76,30 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
return active_port;
}
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
- unsigned long flags)
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
+ int mode, unsigned long flags)
{
bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
&flags);
int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
+ u8 *ports = ldev->v2p_map;
+ int idx0, idx1;
void *lag_ctx;
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
switch (port_sel_mode) {
case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
break;
case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
@@ -113,17 +116,23 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
return mlx5_cmd_exec_in(dev, create_lag, in);
}
-static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
u8 *ports)
{
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+ int idx0, idx1;
+
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
MLX5_SET(modify_lag_in, in, field_select, 0x1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
return mlx5_cmd_exec_in(dev, modify_lag, in);
}
@@ -148,33 +157,31 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
+static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_disabled)
{
int i;
*num_disabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (!tracker->netdev_state[i].tx_enabled ||
!tracker->netdev_state[i].link_up)
ports[(*num_disabled)++] = i;
- }
}
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled)
{
int i;
*num_enabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
ports[(*num_enabled)++] = i;
- }
if (*num_enabled == 0)
- mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
+ mlx5_infer_tx_disabled(tracker, ldev, ports, num_enabled);
}
static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
@@ -192,7 +199,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
int j;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
- mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(tracker, ldev, enabled_ports,
&num_enabled);
for (i = 0; i < num_enabled; i++) {
err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
@@ -203,7 +210,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
buf[written - 2] = 0;
mlx5_core_info(dev, "lag map active ports: %s\n", buf);
} else {
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
err = scnprintf(buf + written, 10,
@@ -286,13 +293,55 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].netdev == ndev)
return i;
return -ENOENT;
}
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return -ENOENT;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (num == seq)
+ return i;
+ num++;
+ }
+ return -ENOENT;
+}
+
+int mlx5_lag_num_devs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ (void)i;
+ num++;
+ }
+ return num;
+}
+
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev)
+ if (ldev->pf[i].netdev)
+ num++;
+ return num;
+}
+
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
return ldev->mode == MLX5_LAG_MODE_ROCE;
@@ -310,7 +359,7 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
* with mapping that points to active ports.
*/
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
- u8 num_ports,
+ struct mlx5_lag *ldev,
u8 buckets,
u8 *ports)
{
@@ -323,7 +372,7 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
int i;
int j;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
enabled[enabled_ports_num++] = i;
@@ -334,15 +383,16 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
/* Use native mapping by default where each port's buckets
* point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
*/
- for (i = 0; i < num_ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < buckets; j++) {
idx = i * buckets + j;
- ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
+ ports[idx] = i + 1;
}
+ }
/* If all ports are disabled/enabled keep native mapping */
- if (enabled_ports_num == num_ports ||
- disabled_ports_num == num_ports)
+ if (enabled_ports_num == ldev->ports ||
+ disabled_ports_num == ldev->ports)
return;
/* Go over the disabled ports and for each assign a random active port */
@@ -358,7 +408,7 @@ static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].has_drop)
return true;
return false;
@@ -368,7 +418,7 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (!ldev->pf[i].has_drop)
continue;
@@ -396,7 +446,7 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
if (!ldev->tracker.has_inactive)
return;
- mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
+ mlx5_infer_tx_disabled(tracker, ldev, disabled_ports, &num_disabled);
for (i = 0; i < num_disabled; i++) {
disabled_index = disabled_ports[i];
@@ -428,10 +478,15 @@ static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
u8 active_ports;
int ret;
+ if (idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[idx].dev;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
ret = mlx5_lag_port_sel_modify(ldev, ports);
if (ret ||
@@ -442,7 +497,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_active_port(dev0, active_ports);
}
- return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
+ return mlx5_cmd_modify_lag(dev0, ldev, ports);
}
static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
@@ -450,7 +505,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
unsigned long flags;
- int i;
+ int i, last_idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
@@ -458,11 +513,15 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
if (!ldev)
goto unlock;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->tracker.netdev_state[i].tx_enabled)
ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
+ if (!ndev) {
+ last_idx = mlx5_lag_get_dev_index_by_seq(ldev, ldev->ports - 1);
+ if (last_idx < 0)
+ goto unlock;
+ ndev = ldev->pf[last_idx].netdev;
+ }
if (ndev)
dev_hold(ndev);
@@ -476,16 +535,21 @@ unlock:
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
int idx;
int err;
int i;
int j;
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
+ if (first_idx < 0)
+ return;
+
+ dev0 = ldev->pf[first_idx].dev;
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ports[idx] == ldev->v2p_map[idx])
@@ -523,8 +587,13 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
unsigned long *flags)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev0 = ldev->pf[first_idx].dev;
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
return -EINVAL;
@@ -544,11 +613,13 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
unsigned long *flags)
{
- struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct lag_func *dev0;
- if (mode == MLX5_LAG_MODE_MPESW)
+ if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW)
return;
+ dev0 = &ldev->pf[first_idx];
if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
if (ldev->ports > 2)
@@ -593,12 +664,18 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
+ int i, j;
int err;
- int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
+ mlx5_ldev_for_each(i, first_idx + 1, ldev) {
struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
@@ -608,9 +685,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
}
return 0;
err:
- for (; i > MLX5_LAG_P1; i--)
+ mlx5_ldev_for_each_reverse(j, i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
- ldev->pf[i].dev->priv.eswitch);
+ ldev->pf[j].dev->priv.eswitch);
return err;
}
@@ -620,16 +697,21 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
unsigned long flags)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
+ struct mlx5_core_dev *dev0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
if (tracker)
mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
+ err = mlx5_cmd_create_lag(dev0, ldev, mode, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -661,17 +743,22 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
bool shared_fdb)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
unsigned long flags = 0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
if (err)
return err;
if (mode != MLX5_LAG_MODE_MPESW) {
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ldev->v2p_map);
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
ldev->v2p_map);
@@ -709,20 +796,26 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags;
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
int err;
int i;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
@@ -754,6 +847,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
u8 mode;
@@ -761,30 +855,29 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
bool roce_support;
int i;
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
- return false;
+ if (first_idx < 0 || mlx5_lag_num_devs(ldev) != ldev->ports)
+ return false;
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[first_idx].dev;
mode = mlx5_eswitch_mode(dev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
#else
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
- roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
- for (i = 1; i < ldev->ports; i++)
+ roce_support = mlx5_get_roce_state(ldev->pf[first_idx].dev);
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
return false;
@@ -795,10 +888,7 @@ void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -812,10 +902,7 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -828,11 +915,16 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
bool roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
roce_lag = __mlx5_lag_is_roce(ldev);
if (shared_fdb) {
@@ -842,7 +934,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
}
- for (i = 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, idx + 1, ldev)
mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
}
@@ -854,17 +946,21 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (idx < 0)
+ return false;
+
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
dev = ldev->pf[i].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
@@ -876,7 +972,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[idx].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
@@ -892,11 +988,11 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
bool roce_lag = true;
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
@@ -917,13 +1013,18 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct lag_tracker tracker = { };
+ struct mlx5_core_dev *dev0;
struct net_device *ndev;
bool do_bond, roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
@@ -937,7 +1038,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
}
if (do_bond && !__mlx5_lag_is_active(ldev)) {
- bool shared_fdb = mlx5_shared_fdb_supported(ldev);
+ bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev);
roce_lag = mlx5_lag_is_roce_lag(ldev);
@@ -956,7 +1057,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
if (mlx5_get_roce_state(ldev->pf[i].dev))
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
}
@@ -966,7 +1067,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
@@ -977,7 +1078,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
@@ -1010,12 +1111,9 @@ struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
int i;
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[i].dev) {
- devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
- break;
- }
- }
+ i = mlx5_get_next_ldev_func(ldev, 0);
+ if (i < MLX5_MAX_PORTS)
+ devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
mutex_unlock(&ldev->lock);
return devcom;
}
@@ -1068,7 +1166,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
u8 bond_status = 0;
int num_slaves = 0;
int changed = 0;
- int idx;
+ int i, idx = -1;
if (!netif_is_lag_master(upper))
return 0;
@@ -1083,8 +1181,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
*/
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
- idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0) {
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == ndev_tmp) {
+ idx++;
+ break;
+ }
+ }
+ if (i < MLX5_MAX_PORTS) {
slave = bond_slave_get_rcu(ndev_tmp);
if (slave)
has_inactive |= bond_is_slave_inactive(slave);
@@ -1234,15 +1337,12 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
}
static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
unsigned long flags;
- if (fn >= ldev->ports)
- return;
-
spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -1257,7 +1357,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
int i;
spin_lock_irqsave(&lag_lock, flags);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
@@ -1267,13 +1367,10 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev)
+ struct mlx5_core_dev *dev)
{
unsigned int fn = mlx5_get_dev_index(dev);
- if (fn >= ldev->ports)
- return;
-
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
}
@@ -1281,16 +1378,13 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- int i;
-
- for (i = 0; i < ldev->ports; i++)
- if (ldev->pf[i].dev == dev)
- break;
+ int fn;
- if (i == ldev->ports)
+ fn = mlx5_get_dev_index(dev);
+ if (ldev->pf[fn].dev != dev)
return;
- ldev->pf[i].dev = NULL;
+ ldev->pf[fn].dev = NULL;
dev->priv.lag = NULL;
}
@@ -1398,7 +1492,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev)
{
struct mlx5_lag *ldev;
- int i;
+ int num = 0;
ldev = mlx5_lag_dev(dev);
if (!ldev)
@@ -1406,17 +1500,33 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev);
-
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].netdev)
- break;
-
- if (i >= ldev->ports)
+ num = mlx5_lag_num_netdevs(ldev);
+ if (num >= ldev->ports)
set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx)
+{
+ int i;
+
+ for (i = start_idx; i >= end_idx; i--)
+ if (ldev->pf[i].dev)
+ return i;
+ return -1;
+}
+
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx)
+{
+ int i;
+
+ for (i = start_idx; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ return i;
+ return MLX5_MAX_PORTS;
+}
+
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1467,12 +1577,13 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
unsigned long flags;
- bool res;
+ bool res = false;
+ int idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_active(ldev) &&
- dev == ldev->pf[MLX5_LAG_P1].dev;
+ idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ res = ldev && __mlx5_lag_is_active(ldev) && idx >= 0 && dev == ldev->pf[idx].dev;
spin_unlock_irqrestore(&lag_lock, flags);
return res;
@@ -1555,7 +1666,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == slave) {
port = i;
break;
@@ -1594,13 +1705,13 @@ struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int
if (!ldev)
goto unlock;
- if (*i == ldev->ports)
+ if (*i == MLX5_MAX_PORTS)
goto unlock;
- for (idx = *i; idx < ldev->ports; idx++)
+ mlx5_ldev_for_each(idx, *i, ldev)
if (ldev->pf[idx].dev != dev)
break;
- if (idx == ldev->ports) {
+ if (idx == MLX5_MAX_PORTS) {
*i = idx;
goto unlock;
}
@@ -1621,10 +1732,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
+ int ret = 0, i, j, idx = 0;
struct mlx5_lag *ldev;
unsigned long flags;
int num_ports;
- int ret, i, j;
void *out;
out = kvzalloc(outlen, GFP_KERNEL);
@@ -1643,8 +1754,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
- for (i = 0; i < ldev->ports; i++)
- mdev[i] = ldev->pf[i].dev;
+ mlx5_ldev_for_each(i, 0, ldev)
+ mdev[idx++] = ldev->pf[i].dev;
} else {
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 50fcb1eee574..c2f256bb2bc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -92,6 +92,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
@@ -103,7 +104,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
@@ -119,9 +120,24 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
+#define mlx5_ldev_for_each(i, start_index, ldev) \
+ for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \
+ i = tmp, tmp < MLX5_MAX_PORTS; tmp++)
+
+#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev) \
+ for (int tmp = start_index, tmp1 = end_index; \
+ tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \
+ i = tmp, tmp >= tmp1; tmp--)
+
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx);
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
+int mlx5_lag_num_devs(struct mlx5_lag *ldev);
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index b1aa494c76ba..aee17fcf3b36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -17,7 +17,10 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
#define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!mlx5_lag_is_ready(ldev))
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
+
+ if (idx0 < 0 || idx1 < 0 || !mlx5_lag_is_ready(ldev))
return false;
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
@@ -26,8 +29,8 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
- ldev->pf[MLX5_LAG_P2].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[idx0].dev,
+ ldev->pf[idx1].dev);
}
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
@@ -50,43 +53,45 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
struct lag_tracker tracker = {};
- if (!__mlx5_lag_is_multipath(ldev))
+ if (idx0 < 0 || idx1 < 0 || !__mlx5_lag_is_multipath(ldev))
return;
switch (port) {
case MLX5_LAG_NORMAL_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
case MLX5_LAG_P1_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P2].link_up = false;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].tx_enabled = false;
+ tracker.netdev_state[idx1].link_up = false;
break;
case MLX5_LAG_P2_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P1].link_up = false;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = false;
+ tracker.netdev_state[idx0].link_up = false;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx0].dev,
"Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
+ if (tracker.netdev_state[idx0].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx0].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
+ if (tracker.netdev_state[idx1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -150,9 +155,14 @@ mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
+
+ if (idx < 0)
+ return;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -179,17 +189,19 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
}
if (nh_dev0 == nh_dev1) {
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx].dev,
"Multipath offload doesn't support routes with multiple nexthops of the same device");
return;
}
if (!nh_dev1) {
if (__mlx5_lag_is_active(ldev)) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
-
- i++;
- mlx5_lag_set_port_affinity(ldev, i);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ dev_idx++;
+ if (ldev->pf[i].netdev == nh_dev0)
+ break;
+ }
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
@@ -214,6 +226,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct fib_info *fi)
{
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
/* Check the nh event is related to the route */
if (!mp->fib.mfi || mp->fib.mfi != fi)
@@ -221,11 +234,15 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == fib_nh->fib_nh_dev)
+ break;
+ dev_idx++;
+ }
- if (i >= 0) {
- i = (i + 1) % 2 + 1; /* peer port */
- mlx5_lag_set_port_affinity(ldev, i);
+ if (dev_idx >= 0) {
+ dev_idx = (dev_idx + 1) % 2 + 1; /* peer port */
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 571ea26edd0c..1770297a112e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -15,7 +15,7 @@ static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
u32 pf_metadata;
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = ldev->lag_mpesw.pf_metadata[i];
@@ -36,7 +36,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
u32 pf_metadata;
int i, err;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = mlx5_esw_match_metadata_alloc(esw);
@@ -52,7 +52,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
goto err_metadata;
}
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
(void *)0);
@@ -68,20 +68,23 @@ err_metadata:
#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
int err;
int i;
- if (ldev->mode != MLX5_LAG_MODE_NONE)
+ if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
+ dev0 = ldev->pf[idx].dev;
if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
return -EOPNOTSUPP;
if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
!MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
- !mlx5_lag_check_prereq(ldev))
+ !mlx5_lag_check_prereq(ldev) ||
+ !mlx5_lag_shared_fdb_supported(ldev))
return -EOPNOTSUPP;
err = mlx5_mpesw_metadata_set(ldev);
@@ -98,7 +101,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
@@ -112,7 +115,7 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ab2717012b79..bde79cac33a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -39,15 +39,18 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
- int err, i;
- int idx;
- int j;
+ struct mlx5_core_dev *dev;
+ int err, i, j, k, idx;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
ft_attr.max_fte = ldev->ports * ldev->buckets;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
@@ -74,7 +77,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
u8 affinity;
@@ -88,13 +91,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- do {
+ mlx5_ldev_for_each_reverse(k, i, 0, ldev) {
while (j--) {
- idx = i * ldev->buckets + j;
+ idx = k * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
j = ldev->buckets;
- } while (i--);
+ };
goto destroy_fg;
}
}
@@ -295,11 +298,16 @@ static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_definer *lag_definer;
+ struct mlx5_core_dev *dev;
u32 *match_definer_mask;
int format_id, err;
+ if (first_idx < 0)
+ return ERR_PTR(-EINVAL);
+
+ dev = ldev->pf[first_idx].dev;
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
return ERR_PTR(-ENOMEM);
@@ -341,12 +349,15 @@ free_lag_definer:
static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
- int idx;
- int i;
- int j;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev;
+ int idx, i, j;
- for (i = 0; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return;
+
+ dev = ldev->pf[first_idx].dev;
+ mlx5_ldev_for_each(i, first_idx, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
@@ -501,10 +512,15 @@ static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
@@ -512,10 +528,15 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
@@ -530,7 +551,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
- return err;
+ goto clear_port_sel;
if (port_sel->tunnel) {
err = mlx5_lag_create_inner_ttc_table(ldev);
@@ -549,6 +570,8 @@ destroy_inner:
mlx5_destroy_ttc_table(port_sel->inner.ttc);
destroy_definers:
mlx5_lag_destroy_definers(ldev);
+clear_port_sel:
+ memset(port_sel, 0, sizeof(*port_sel));
return err;
}
@@ -565,7 +588,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ldev->v2p_map[idx] == ports[idx])
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 4822d01123b4..d61a1a9297c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -322,17 +322,16 @@ static void mlx5_pps_out(struct work_struct *work)
}
}
-static void mlx5_timestamp_overflow(struct work_struct *work)
+static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
{
- struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
- timer = container_of(dwork, struct mlx5_timer, overflow_work);
- clock = container_of(timer, struct mlx5_clock, timer);
+ clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
mdev = container_of(clock, struct mlx5_core_dev, clock);
+ timer = &clock->timer;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
@@ -343,7 +342,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_sequnlock_irqrestore(&clock->lock, flags);
out:
- schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
+ return timer->overflow_period;
}
static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
@@ -517,6 +516,7 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
timer->cycles.mult = mult;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
+ ptp_schedule_worker(clock->ptp, timer->overflow_period);
return 0;
}
@@ -852,6 +852,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
+ .do_aux_work = mlx5_timestamp_overflow,
};
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
@@ -1052,12 +1053,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
do_div(ns, NSEC_PER_SEC / HZ);
timer->overflow_period = ns;
- INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
- if (timer->overflow_period)
- schedule_delayed_work(&timer->overflow_work, 0);
- else
+ if (!timer->overflow_period) {
+ timer->overflow_period = HZ;
mlx5_core_warn(mdev,
- "invalid overflow period, overflow_work is not scheduled\n");
+ "invalid overflow period, overflow_work is scheduled once per second\n");
+ }
if (clock_info)
clock_info->overflow_period = timer->overflow_period;
@@ -1172,6 +1172,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
mlx5_eq_notifier_register(mdev, &clock->pps_nb);
+
+ if (clock->ptp)
+ ptp_schedule_worker(clock->ptp, 0);
}
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
@@ -1188,7 +1191,6 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
}
cancel_work_sync(&clock->pps_info.out_work);
- cancel_delayed_work_sync(&clock->timer.overflow_work);
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index a80ecb672f33..711d14dea248 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -196,6 +196,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
}
+ if (!ns) {
+ mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = chains->group_num;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
index 4a078113e292..762d55ba9e51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -497,7 +497,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&dest, 0, sizeof(struct mlx5_flow_destination));
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ dest.counter = tx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -519,7 +519,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ dest.counter = tx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1200,7 +1200,7 @@ static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ roce_dest[dstn].counter = rx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
if (IS_ERR(rule)) {
@@ -1592,7 +1592,7 @@ static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ dest.counter = rx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 220a9ac75c8b..ec956c4bcebd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -368,6 +368,10 @@ int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_ty
u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
+ if (WARN_ON(!dev->caps.hca[cap_type]))
+ /* this cap_type must be added to mlx5_hca_caps_alloc() */
+ return -EINVAL;
+
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
@@ -664,6 +668,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
ilog2(max_uc_list));
+ /* enable absolute native port num */
+ if (MLX5_CAP_GEN_MAX(dev, abs_native_port_num))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, abs_native_port_num, 1);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -941,9 +949,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
mlx5_pci_vsc_init(dev);
- err = pci_enable_ptm(pdev, NULL);
- if (err)
- mlx5_core_info(dev, "PTM is not supported by PCIe\n");
+ pci_enable_ptm(pdev, NULL);
return 0;
@@ -1788,6 +1794,7 @@ static const int types[] = {
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
+ MLX5_CAP_SHAMPO,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 0881e961d8b1..586688da9940 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -10,12 +10,15 @@
struct mlx5_irq;
struct cpu_rmap;
+struct mlx5_irq_pool;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
@@ -38,7 +41,6 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
int mlx5_irq_get_irq(const struct mlx5_irq *irq);
-struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 7db9cab9bedf..2c5f850c31f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -378,6 +378,11 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
return irq->map.index;
}
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq)
+{
+ return irq->pool;
+}
+
/* irq_pool API */
/* requesting an irq from a given pool according to given index */
@@ -405,18 +410,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab
return irq_table->sf_ctrl_pool;
}
-static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
+static struct mlx5_irq_pool *
+sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table)
{
return irq_table->sf_comp_pool;
}
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = NULL;
if (mlx5_core_is_sf(dev))
- pool = sf_irq_pool_get(irq_table);
+ pool = sf_comp_irq_pool_get(irq_table);
/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
* the PF IRQs pool in case the SF pool doesn't exist.
@@ -572,7 +579,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
- name, size, start);
+ name ? name : "mlx5_pcif_pool", size, start);
return pool;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
index c4d377f8df30..cc064425fe16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -28,7 +28,6 @@ struct mlx5_irq_pool {
struct mlx5_core_dev *dev;
};
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
{
return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
@@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
int mlx5_irq_get_locked(struct mlx5_irq *irq);
int mlx5_irq_read_locked(struct mlx5_irq *irq);
int mlx5_irq_put(struct mlx5_irq *irq);
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq);
#endif /* __PCI_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e393391966e0..39a209b9b684 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -56,6 +56,8 @@ bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierar
return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN;
case TSAR_ELEMENT_TSAR_TYPE_ETS:
return cap & TSAR_TYPE_CAP_MASK_ETS;
+ case TSAR_ELEMENT_TSAR_TYPE_TC_ARB:
+ return cap & TSAR_TYPE_CAP_MASK_TC_ARB;
}
return false;
@@ -87,6 +89,8 @@ bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hie
return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP:
return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT:
+ return cap & ELEMENT_TYPE_CAP_MASK_RATE_LIMIT;
}
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a96be98be032..b96909fbeb12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -257,6 +257,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
return 0;
esw_err:
+ mlx5_sf_function_id_erase(table, sf);
mlx5_sf_free(table, sf);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index a897cdc60fdb..b5332c54d4fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -11,31 +11,29 @@
/* This is the longest supported action sequence for FDB table:
* DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
*/
-static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
- [MLX5HWS_TABLE_TYPE_FDB] = {
- BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
- BIT(MLX5HWS_ACTION_TYP_CTR),
- BIT(MLX5HWS_ACTION_TYP_TAG),
- BIT(MLX5HWS_ACTION_TYP_ASO_METER),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_TBL) |
- BIT(MLX5HWS_ACTION_TYP_VPORT) |
- BIT(MLX5HWS_ACTION_TYP_DROP) |
- BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
- BIT(MLX5HWS_ACTION_TYP_RANGE) |
- BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
- BIT(MLX5HWS_ACTION_TYP_LAST),
- },
+static const u32 action_order_arr[MLX5HWS_ACTION_TYP_MAX] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
};
static const char * const mlx5hws_action_type_str[] = {
@@ -83,8 +81,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
int ret;
mutex_lock(&ctx->ctrl_lock);
- if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+ if (ctx->common_res.shared_stc[stc_type]) {
+ ctx->common_res.shared_stc[stc_type]->refcount++;
mutex_unlock(&ctx->ctrl_lock);
return 0;
}
@@ -124,8 +122,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
goto free_shared_stc;
}
- ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+ ctx->common_res.shared_stc[stc_type] = shared_stc;
+ ctx->common_res.shared_stc[stc_type]->refcount = 1;
mutex_unlock(&ctx->ctrl_lock);
@@ -178,16 +176,16 @@ static void hws_action_put_shared_stc(struct mlx5hws_action *action,
}
mutex_lock(&ctx->ctrl_lock);
- if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+ if (--ctx->common_res.shared_stc[stc_type]->refcount) {
mutex_unlock(&ctx->ctrl_lock);
return;
}
- shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+ shared_stc = ctx->common_res.shared_stc[stc_type];
mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
kfree(shared_stc);
- ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+ ctx->common_res.shared_stc[stc_type] = NULL;
mutex_unlock(&ctx->ctrl_lock);
}
@@ -206,10 +204,10 @@ bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
enum mlx5hws_action_type *user_actions,
enum mlx5hws_table_type table_type)
{
- const u32 *order_arr = action_order_arr[table_type];
+ const u32 *order_arr = action_order_arr;
+ bool valid_combo;
u8 order_idx = 0;
u8 user_idx = 0;
- bool valid_combo;
if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
mlx5hws_err(ctx, "Invalid table_type %d", table_type);
@@ -321,8 +319,8 @@ int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
__must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
bool use_fixup;
u32 obj_0_id;
int ret;
@@ -387,8 +385,8 @@ void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
struct mlx5hws_pool_chunk *stc)
__must_hold(&ctx->ctrl_lock)
{
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
u32 obj_id;
/* Modify the STC not to point to an object */
@@ -473,6 +471,7 @@ static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
break;
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
attr->dest_table_id = obj_id;
@@ -561,7 +560,7 @@ hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto out_err;
}
@@ -585,7 +584,7 @@ hws_action_destroy_stcs(struct mlx5hws_action *action)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
mutex_unlock(&ctx->ctrl_lock);
}
@@ -1639,8 +1638,8 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
- default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
+ stc_pool = ctx->stc_pool;
+ default_stc = ctx->common_res.default_stc;
obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
rtc_attr.stc_base = obj_id;
@@ -1731,7 +1730,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
ste_attr.used_id_rtc_0 = &used_rtc_0_id;
ste_attr.used_id_rtc_1 = &used_rtc_1_id;
- common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+ common_res = &ctx->common_res;
/* init an empty match STE which will always hit */
ste_attr.wqe_ctrl = &wqe_ctrl;
@@ -1750,7 +1749,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
- htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+ htonl(hit_ft_action->stc.offset);
wqe_data_arr = (__force __be32 *)&range_wqe_data;
@@ -1843,7 +1842,7 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto error_unlock;
@@ -1875,7 +1874,50 @@ struct mlx5hws_action *
mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
u32 sampler_id, u32 flags)
{
- mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_cmd_set_fte_dest dest;
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (flags != (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ mlx5hws_err(ctx, "Unsupported flags for flow sampler\n");
+ return NULL;
+ }
+
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ dest.destination_id = sampler_id;
+
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ return NULL;
+
+ action = hws_action_create_generic(ctx, flags,
+ MLX5HWS_ACTION_TYP_SAMPLER);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->flow_sampler.fw_island = fw_island;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
return NULL;
}
@@ -1914,6 +1956,11 @@ static void hws_action_destroy_hws(struct mlx5hws_action *action)
}
kfree(action->dest_array.dest_list);
break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev,
+ action->flow_sampler.fw_island);
+ break;
case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5HWS_ACTION_TYP_MODIFY_HDR:
shared_arg = false;
@@ -1970,8 +2017,8 @@ __must_hold(&ctx->ctrl_lock)
struct mlx5hws_action_default_stc *default_stc;
int ret;
- if (ctx->common_res[tbl_type].default_stc) {
- ctx->common_res[tbl_type].default_stc->refcount++;
+ if (ctx->common_res.default_stc) {
+ ctx->common_res.default_stc->refcount++;
return 0;
}
@@ -2023,8 +2070,8 @@ __must_hold(&ctx->ctrl_lock)
goto free_nop_dw7;
}
- ctx->common_res[tbl_type].default_stc = default_stc;
- ctx->common_res[tbl_type].default_stc->refcount++;
+ ctx->common_res.default_stc = default_stc;
+ ctx->common_res.default_stc->refcount++;
return 0;
@@ -2046,9 +2093,7 @@ __must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_action_default_stc *default_stc;
- default_stc = ctx->common_res[tbl_type].default_stc;
-
- default_stc = ctx->common_res[tbl_type].default_stc;
+ default_stc = ctx->common_res.default_stc;
if (--default_stc->refcount)
return;
@@ -2058,7 +2103,7 @@ __must_hold(&ctx->ctrl_lock)
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
kfree(default_stc);
- ctx->common_res[tbl_type].default_stc = NULL;
+ ctx->common_res.default_stc = NULL;
}
static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
@@ -2150,8 +2195,7 @@ hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
{
struct mlx5hws_action *action = apply->rule_action[action_idx].action;
- apply->wqe_ctrl->stc_ix[stc_idx] =
- htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[stc_idx] = htonl(action->stc.offset);
}
static void
@@ -2181,7 +2225,7 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
rule_action = &apply->rule_action[setter->idx_double];
action = rule_action->action;
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2240,7 +2284,7 @@ hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2272,7 +2316,7 @@ hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2434,6 +2478,7 @@ int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
case MLX5HWS_ACTION_TYP_DROP:
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
case MLX5HWS_ACTION_TYP_VPORT:
case MLX5HWS_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
index e8f562c31826..64b76075f7f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
@@ -70,12 +70,12 @@ struct mlx5hws_action_default_stc {
struct mlx5hws_pool_chunk nop_dw6;
struct mlx5hws_pool_chunk nop_dw7;
struct mlx5hws_pool_chunk default_hit;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_action_shared_stc {
struct mlx5hws_pool_chunk stc_chunk;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_actions_apply_data {
@@ -124,7 +124,7 @@ struct mlx5hws_action {
struct mlx5hws_context *ctx;
union {
struct {
- struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool_chunk stc;
union {
struct {
u32 pat_id;
@@ -166,6 +166,9 @@ struct mlx5hws_action {
struct mlx5hws_cmd_set_fte_dest *dest_list;
} dest_array;
struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ } flow_sampler;
+ struct {
u8 type;
u8 start_anchor;
u8 end_anchor;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index baacf662c0ab..3dbd4efa21a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -152,6 +152,8 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
if (!bwc_matcher)
return NULL;
+ atomic_set(&bwc_matcher->num_of_rules, 0);
+
/* Check if the required match params can be all matched
* in single STE, otherwise complex matcher is needed.
*/
@@ -199,10 +201,12 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- if (bwc_matcher->num_of_rules)
+ u32 num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
+
+ if (num_of_rules)
mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
"BWC matcher destroy: matcher still has %d rules\n",
- bwc_matcher->num_of_rules);
+ num_of_rules);
mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
@@ -215,6 +219,8 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
u32 *pending_rules,
bool drain)
{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT * MSEC_PER_SEC);
struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
bool got_comp = *pending_rules >= burst_th;
@@ -250,6 +256,11 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
}
got_comp = !!ret;
+
+ if (unlikely(!got_comp && time_after(jiffies, timeout))) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d - TIMEOUT\n", queue_id);
+ return -ETIMEDOUT;
+ }
}
return err;
@@ -309,7 +320,7 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- bwc_matcher->num_of_rules++;
+ atomic_inc(&bwc_matcher->num_of_rules);
bwc_rule->bwc_queue_idx = idx;
list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
}
@@ -318,7 +329,7 @@ static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- bwc_matcher->num_of_rules--;
+ atomic_dec(&bwc_matcher->num_of_rules);
list_del_init(&bwc_rule->list_node);
}
@@ -334,22 +345,21 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_rule_attr *rule_attr)
{
struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_flow_op_result completion;
+ u32 expected_completions = 1;
int ret;
ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
if (unlikely(ret))
return ret;
- do {
- ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
- } while (ret != 1);
-
- if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
- (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
- bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
- mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
- completion.status, bwc_rule->rule->status);
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING)) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: rule status %d\n",
+ bwc_rule->rule->status);
return -EINVAL;
}
@@ -458,8 +468,22 @@ hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
{
struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
- return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
- caps->ste_alloc_log_max - 1;
+ /* check the match RTC size */
+ if ((bwc_matcher->size_log +
+ MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
+ (caps->ste_alloc_log_max - 1))
+ return true;
+
+ /* check the action RTC size */
+ if ((bwc_matcher->size_log +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP +
+ ilog2(roundup_pow_of_two(bwc_matcher->matcher->action_ste.max_stes)) +
+ MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT) >
+ (caps->ste_alloc_log_max - 1))
+ return true;
+
+ return false;
}
static bool
@@ -615,8 +639,12 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match
ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
&pending_rules[i], false);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n",
+ ret);
goto free_bwc_rules;
+ }
}
}
} while (!all_done);
@@ -629,8 +657,11 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match
mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
ret = hws_bwc_queue_poll(ctx, queue_id,
&pending_rules[i], true);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n", ret);
goto free_bwc_rules;
+ }
}
}
@@ -704,7 +735,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
* Need to check again if we really need rehash.
* If the reason for rehash was size, but not any more - skip rehash.
*/
- if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
+ if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher,
+ atomic_read(&bwc_matcher->num_of_rules)))
return 0;
/* Now we're done all the checking - do the rehash:
@@ -797,7 +829,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
}
/* check if number of rules require rehash */
- num_of_rules = bwc_matcher->num_of_rules;
+ num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
mutex_unlock(queue_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index 0b745968e21e..47f7ed141553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -8,18 +8,26 @@
#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
-#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+/* Max number of AT attach operations for the same matcher.
+ * When the limit is reached, next attempt to attach new AT
+ * will result in creation of a new matcher and moving all
+ * the rules to this matcher.
+ */
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
#define MLX5HWS_BWC_MAX_ACTS 16
+#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+
struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ u32 priority;
u8 num_of_at;
- u16 priority;
u8 size_log;
- u32 num_of_rules; /* atomically accessed */
+ atomic_t num_of_rules;
struct list_head *rules;
};
@@ -60,9 +68,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
- * reguler HWS queues, and the other half are BWC queues.
+ * regular HWS queues, and the other half are BWC queues.
*/
- return (ctx->queues - 1) / 2;
+ if (mlx5hws_context_bwc_supported(ctx))
+ return (ctx->queues - 1) / 2;
+ return 0;
}
static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index c00c138c3366..487e75476b0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -257,6 +257,12 @@ int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
dest->ext_reformat_id);
}
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ MLX5_SET(dest_format, in_dests,
+ destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
default:
ret = -EOPNOTSUPP;
goto out;
@@ -359,7 +365,7 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
ft_attr->type = fw_ft_type;
ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
- default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+ default_miss_tbl = ctx->common_res.default_miss->ft_id;
if (!default_miss_tbl) {
pr_warn("HWS: no flow table ID for default miss\n");
return;
@@ -622,12 +628,12 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
u32 pd,
u32 *arg_id)
{
+ u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
void *attr;
int ret;
- attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
@@ -635,8 +641,8 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
MLX5_SET(general_obj_in_cmd_hdr,
attr, op_param.create.log_obj_range, log_obj_range);
- attr = MLX5_ADDR_OF(create_arg_in, in, arg);
- MLX5_SET(arg, attr, access_pd, pd);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
+ MLX5_SET(modify_header_arg, attr, access_pd, pd);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (ret) {
@@ -812,7 +818,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_packet_reformat_create_attr *attr,
u32 *reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {0};
size_t insz, cmd_data_sz, cmd_total_sz;
void *prctx;
void *pdata;
@@ -845,7 +851,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
goto out;
}
- *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+ *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
out:
kfree(in);
return ret;
@@ -854,13 +860,13 @@ out:
int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
u32 reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {0};
int ret;
- MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
- MLX5_SET(dealloc_packet_reformat_in, in,
+ MLX5_SET(dealloc_packet_reformat_context_in, in,
packet_reformat_id, reformat_id);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
@@ -889,73 +895,6 @@ int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
return ret;
}
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
-{
- u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
- void *key;
- int ret;
-
- MLX5_SET(allow_other_vhca_access_in,
- in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_type_to_be_accessed, attr->obj_type);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_id_to_be_accessed, attr->obj_id);
-
- key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
- memcpy(key, attr->access_key, sizeof(attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret)
- mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
-
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id)
-{
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
- void *attr;
- void *key;
- int ret;
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, obj_type, alias_attr->obj_type);
- MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
- MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
- MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
-
- key = MLX5_ADDR_OF(alias_context, attr, access_key);
- memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret) {
- mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
- goto out;
- }
-
- *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id)
-{
- return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
-}
-
int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_generate_wqe_attr *attr,
struct mlx5_cqe64 *ret_cqe)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 434f62b0904e..610c63d81ad9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -63,7 +63,7 @@ struct mlx5hws_cmd_forward_tbl {
u8 type;
u32 ft_id;
u32 fg_id;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_cmd_rtc_create_attr {
@@ -334,14 +334,6 @@ mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_forward_tbl *tbl);
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id);
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id);
-
int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
@@ -352,9 +344,6 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
enum mlx5hws_table_type type,
struct mlx5hws_cmd_ft_modify_attr *ft_attr);
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
-
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
u16 vport_number, u16 *gvmi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
index fd48b05e91e0..9cda2774fd64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -23,7 +23,6 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
struct mlx5hws_pool_attr pool_attr = {0};
u8 max_log_sz;
int ret;
- int i;
ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
if (ret)
@@ -39,23 +38,17 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- pool_attr.table_type = i;
- ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
- if (!ctx->stc_pool[i]) {
- mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
- ret = -ENOMEM;
- goto free_stc_pools;
- }
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ ctx->stc_pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool\n");
+ ret = -ENOMEM;
+ goto uninit_cache;
}
return 0;
-free_stc_pools:
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
-
+uninit_cache:
mlx5hws_definer_uninit_cache(ctx->definer_cache);
uninit_pat_cache:
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -64,12 +57,8 @@ uninit_pat_cache:
static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
{
- int i;
-
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
- }
+ if (ctx->stc_pool)
+ mlx5hws_pool_destroy(ctx->stc_pool);
mlx5hws_definer_uninit_cache(ctx->definer_cache);
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -161,8 +150,10 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx,
if (ret)
goto uninit_pd;
- if (attr->bwc)
- ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+ /* Context has support for backward compatible API,
+ * and does not have support for native HWS API.
+ */
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
index 47f5cc8de73f..38c3647444ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -8,6 +8,7 @@ enum mlx5hws_context_flags {
MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+ MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3,
};
enum mlx5hws_context_shared_stc_type {
@@ -37,8 +38,8 @@ struct mlx5hws_context {
struct mlx5_core_dev *mdev;
struct mlx5hws_cmd_query_caps *caps;
u32 pd_num;
- struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
- struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool *stc_pool;
+ struct mlx5hws_context_common_res common_res;
struct mlx5hws_pattern_cache *pattern_cache;
struct mlx5hws_definer_cache *definer_cache;
struct mutex ctrl_lock; /* control lock to protect the whole context */
@@ -58,6 +59,11 @@ static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
}
+static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT;
+}
+
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
index 5b200b4bc1a8..696275fd0ce2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -148,8 +148,8 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->match_ste.rtc_1_id,
(int)ste_1_id);
- ste = &matcher->action_ste[0].ste;
- ste_pool = matcher->action_ste[0].pool;
+ ste = &matcher->action_ste.ste;
+ ste_pool = matcher->action_ste.pool;
if (ste_pool) {
ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
@@ -171,10 +171,8 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
return ret;
seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
- matcher->action_ste[0].rtc_0_id,
- (int)ste_0_id,
- matcher->action_ste[0].rtc_1_id,
- (int)ste_1_id,
+ matcher->action_ste.rtc_0_id, (int)ste_0_id,
+ matcher->action_ste.rtc_1_id, (int)ste_1_id,
0,
mlx5hws_debug_icm_to_idx(icm_addr_0),
mlx5hws_debug_icm_to_idx(icm_addr_1));
@@ -368,9 +366,10 @@ static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_contex
static int hws_debug_dump_context_stc_resource(struct seq_file *f,
struct mlx5hws_context *ctx,
- u32 tbl_type,
struct mlx5hws_pool_resource *resource)
{
+ u32 tbl_type = MLX5HWS_TABLE_TYPE_BASE + MLX5HWS_TABLE_TYPE_FDB;
+
seq_printf(f, "%d,0x%llx,%u,%u\n",
MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
HWS_PTR_TO_ID(ctx),
@@ -382,31 +381,22 @@ static int hws_debug_dump_context_stc_resource(struct seq_file *f,
static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
{
- struct mlx5hws_pool *stc_pool;
- u32 table_type;
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
int ret;
- int i;
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- stc_pool = ctx->stc_pool[i];
- table_type = MLX5HWS_TABLE_TYPE_BASE + i;
-
- if (!stc_pool)
- continue;
+ if (!stc_pool)
+ return 0;
- if (stc_pool->resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->resource[0]);
- if (ret)
- return ret;
- }
+ if (stc_pool->resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->resource[0]);
+ if (ret)
+ return ret;
+ }
- if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->mirror_resource[0]);
- if (ret)
- return ret;
- }
+ if (stc_pool->mirror_resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->mirror_resource[0]);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 8fe96eb76baf..10ece7df1cfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -70,7 +70,7 @@
u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
_HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
_HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
- (bit_off) % BITS_IN_DW, second_dw_mask); \
+ (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
} else { \
_HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
} \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 9432d5084def..5c1a2086efba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -785,7 +785,7 @@ struct mlx5hws_definer_cache {
struct mlx5hws_definer_cache_item {
struct mlx5hws_definer definer;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
struct list_head list_node;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
new file mode 100644
index 000000000000..f34bbbbba1c2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -0,0 +1,1377 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <linux/mlx5/vport.h>
+#include <mlx5_core.h>
+#include <fs_core.h>
+#include <fs_cmd.h>
+#include "fs_hws_pools.h"
+#include "mlx5hws.h"
+
+#define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
+#define MLX5HWS_CTX_QUEUE_SIZE 256
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index);
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index);
+
+static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_context *fs_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ enum mlx5hws_action_type action_type;
+ int err = -ENOSPC;
+
+ hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
+ if (!hws_pool->tag_action)
+ return err;
+ hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
+ if (!hws_pool->pop_vlan_action)
+ goto destroy_tag;
+ hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
+ if (!hws_pool->push_vlan_action)
+ goto destroy_pop_vlan;
+ hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
+ if (!hws_pool->drop_action)
+ goto destroy_push_vlan;
+ action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
+ hws_pool->decapl2_action =
+ mlx5hws_action_create_reformat(ctx, action_type, 1,
+ &reformat_hdr, 0, flags);
+ if (!hws_pool->decapl2_action)
+ goto destroy_drop;
+ hws_pool->remove_hdr_vlan_action =
+ mlx5_fs_create_action_remove_header_vlan(ctx);
+ if (!hws_pool->remove_hdr_vlan_action)
+ goto destroy_decapl2;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (err)
+ goto destroy_remove_hdr;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
+ if (err)
+ goto cleanup_insert_hdr;
+ xa_init(&hws_pool->el2tol3tnl_pools);
+ xa_init(&hws_pool->el2tol2tnl_pools);
+ xa_init(&hws_pool->mh_pools);
+ xa_init(&hws_pool->table_dests);
+ xa_init(&hws_pool->vport_dests);
+ xa_init(&hws_pool->vport_vhca_dests);
+ return 0;
+
+cleanup_insert_hdr:
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+destroy_remove_hdr:
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+destroy_decapl2:
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+destroy_drop:
+ mlx5hws_action_destroy(hws_pool->drop_action);
+destroy_push_vlan:
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+destroy_pop_vlan:
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+destroy_tag:
+ mlx5hws_action_destroy(hws_pool->tag_action);
+ return err;
+}
+
+static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5hws_action *action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i;
+
+ xa_for_each(&hws_pool->vport_vhca_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_vhca_dests);
+ xa_for_each(&hws_pool->vport_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_dests);
+ xa_destroy(&hws_pool->table_dests);
+ xa_for_each(&hws_pool->mh_pools, i, pool)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
+ xa_destroy(&hws_pool->mh_pools);
+ xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol2tnl_pools);
+ xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol3tnl_pools);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+ mlx5hws_action_destroy(hws_pool->drop_action);
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+ mlx5hws_action_destroy(hws_pool->tag_action);
+}
+
+static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ struct mlx5hws_context_attr hws_ctx_attr = {};
+ int err;
+
+ hws_ctx_attr.queues = min_t(int, num_online_cpus(),
+ MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
+ hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
+
+ ns->fs_hws_context.hws_ctx =
+ mlx5hws_context_open(ns->dev, &hws_ctx_attr);
+ if (!ns->fs_hws_context.hws_ctx) {
+ mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
+ return -EINVAL;
+ }
+ err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
+ mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
+ return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+}
+
+static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns,
+ u16 peer_vhca_id)
+{
+ struct mlx5hws_context *peer_ctx = NULL;
+
+ if (peer_ns)
+ peer_ctx = peer_ns->fs_hws_context.hws_ctx;
+ mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
+ peer_vhca_id);
+ return 0;
+}
+
+static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_table *next_tbl;
+ int err;
+
+ if (!ns->fs_hws_context.hws_ctx)
+ return -EINVAL;
+
+ /* if no change required, return */
+ if (!next_ft && !ft->fs_hws_table.miss_ft_set)
+ return 0;
+
+ next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
+ err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
+ return err;
+ }
+ ft->fs_hws_table.miss_ft_set = !!next_tbl;
+ return 0;
+}
+
+static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
+ ft->id, flags);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed creating dest table action\n");
+ return -ENOMEM;
+ }
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
+ if (err)
+ mlx5hws_action_destroy(dest_ft_action);
+ return err;
+}
+
+static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ dest_ft_action = xa_erase(dests_xa, ft->id);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
+ return -ENOENT;
+ }
+
+ err = mlx5hws_action_destroy(dest_ft_action);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
+ return err;
+}
+
+static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table_attr *ft_attr,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
+ struct mlx5hws_table_attr tbl_attr = {};
+ struct mlx5hws_table *tbl;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+ err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
+ next_ft);
+ if (err)
+ return err;
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+ return err;
+ }
+
+ if (ns->table_type != FS_FT_FDB) {
+ mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
+ ns->table_type);
+ return -EOPNOTSUPP;
+ }
+
+ tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
+ tbl_attr.level = ft_attr->level;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl) {
+ mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
+ return -EINVAL;
+ }
+
+ ft->fs_hws_table.hws_table = tbl;
+ ft->id = mlx5hws_table_get_id(tbl);
+
+ if (next_ft) {
+ err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+ if (err)
+ goto destroy_table;
+ }
+
+ ft->max_fte = INT_MAX;
+
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ goto clear_ft_miss;
+ return 0;
+
+clear_ft_miss:
+ mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+destroy_table:
+ mlx5hws_table_destroy(tbl);
+ ft->fs_hws_table.hws_table = NULL;
+ return err;
+}
+
+static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ int err;
+
+ err = mlx5_fs_del_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+
+ err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
+
+ err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
+
+ return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+}
+
+static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
+ disconnect);
+}
+
+static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft, u32 *in,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5hws_match_parameters mask;
+ struct mlx5hws_bwc_matcher *matcher;
+ u8 match_criteria_enable;
+ u32 priority;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
+
+ mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ mask.match_sz = sizeof(fg->mask.match_criteria);
+
+ match_criteria_enable = MLX5_GET(create_flow_group_in, in,
+ match_criteria_enable);
+ priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
+ matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
+ priority, match_criteria_enable,
+ &mask);
+ if (!matcher) {
+ mlx5_core_err(ns->dev, "Failed creating matcher\n");
+ return -EINVAL;
+ }
+
+ fg->fs_hws_matcher.matcher = matcher;
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
+
+ return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst,
+ bool is_dest_type_uplink)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5hws_action *dest;
+ struct xarray *dests_xa;
+ bool vhca_id_valid;
+ unsigned long idx;
+ u16 vport_num;
+ int err;
+
+ vhca_id_valid = is_dest_type_uplink ||
+ (dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
+ vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
+ if (vhca_id_valid) {
+ dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
+ idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
+ } else {
+ dests_xa = &fs_ctx->hws_pool.vport_dests;
+ idx = vport_num;
+ }
+dest_load:
+ dest = xa_load(dests_xa, idx);
+ if (dest)
+ return dest;
+
+ dest = mlx5hws_action_create_dest_vport(ctx, vport_num, vhca_id_valid,
+ dest_attr->vport.vhca_id, flags);
+
+ err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
+ if (err) {
+ mlx5hws_action_destroy(dest);
+ dest = NULL;
+
+ if (err == -EBUSY)
+ /* xarray entry was already stored by another thread */
+ goto dest_load;
+ }
+
+ return dest;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5hws_action_create_dest_match_range(ctx,
+ dest_attr->range.field,
+ dest_attr->range.hit_ft,
+ dest_attr->range.miss_ft,
+ dest_attr->range.min,
+ dest_attr->range.max,
+ flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_dest_attr *dests,
+ u32 num_of_dests, bool ignore_flow_level,
+ u32 flow_source)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
+ ignore_flow_level,
+ flow_source, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.push_vlan_action;
+}
+
+static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
+{
+ u16 n_ethtype = vlan->ethtype;
+ u8 prio = vlan->prio;
+ u16 vid = vlan->vid;
+
+ return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.pop_vlan_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.decapl2_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.drop_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.tag_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_last(ctx, flags);
+}
+
+static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action)
+{
+ switch (mlx5hws_action_get_type(fs_action->action)) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ mlx5_fc_put_hws_action(fs_action->counter);
+ break;
+ default:
+ mlx5hws_action_destroy(fs_action->action);
+ }
+}
+
+static void
+mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions,
+ int *num_fs_actions)
+{
+ int i;
+
+ /* Free in reverse order to handle action dependencies */
+ for (i = *num_fs_actions - 1; i >= 0; i--)
+ mlx5_fs_destroy_fs_action(*fs_actions + i);
+ *num_fs_actions = 0;
+ kfree(*fs_actions);
+ *fs_actions = NULL;
+}
+
+/* Splits FTE's actions into cached, rule and destination actions.
+ * The cached and destination actions are saved on the fte hws rule.
+ * The rule actions are returned as a parameter, together with their count.
+ * We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
+static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte,
+ struct mlx5hws_rule_action **ractions)
+{
+ struct mlx5_flow_act *fte_action = &fte->act_dests.action;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action_dest_attr *dest_actions;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_rule_action *fs_actions;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5hws_action *dest_action;
+ struct mlx5hws_action *tmp_action;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+ bool delay_encap_set = false;
+ struct mlx5_flow_rule *dst;
+ int num_dest_actions = 0;
+ int num_fs_actions = 0;
+ int num_actions = 0;
+ int err;
+
+ *ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
+ GFP_KERNEL);
+ if (!*ractions) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*fs_actions), GFP_KERNEL);
+ if (!fs_actions) {
+ err = -ENOMEM;
+ goto free_actions_alloc;
+ }
+
+ dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*dest_actions), GFP_KERNEL);
+ if (!dest_actions) {
+ err = -ENOMEM;
+ goto free_fs_actions_alloc;
+ }
+
+ /* The order of the actions are must to be kept, only the following
+ * order is supported by HW steering:
+ * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
+ * -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
+ * -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
+ */
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_dest_actions_alloc;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ int reformat_type = fte_action->pkt_reformat->reformat_type;
+
+ if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else {
+ delay_encap_set = true;
+ }
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
+ (*ractions)[num_actions].modify_header.offset = mh_data->offset;
+ (*ractions)[num_actions].modify_header.data = mh_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->modify_hdr->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (delay_encap_set) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_fc *counter;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ counter = dst->dest_attr.counter;
+ tmp_action = mlx5_fc_get_hws_action(ctx, counter);
+ if (!tmp_action) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ (*ractions)[num_actions].counter.offset =
+ mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].counter = counter;
+ }
+ }
+
+ if (fte->act_dests.flow_context.flow_tag) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ tmp_action = mlx5_fs_get_action_tag(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_flow_destination *attr = &dst->dest_attr;
+ bool type_uplink =
+ attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+
+ if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (attr->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
+ dst);
+ if (dest_action)
+ break;
+ dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
+ type_uplink);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+ }
+
+ if (num_dest_actions == 1) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = dest_actions->dest;
+ } else if (num_dest_actions > 1) {
+ u32 flow_source = fte->act_dests.flow_context.flow_source;
+ bool ignore_flow_level;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ ignore_flow_level =
+ !!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ tmp_action = mlx5_fs_create_action_dest_array(ctx, dest_actions,
+ num_dest_actions,
+ ignore_flow_level,
+ flow_source);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_create_action_last(ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+
+ kfree(dest_actions);
+
+ /* Actions created specifically for this rule will be destroyed
+ * once rule is deleted.
+ */
+ fte->fs_hws_rule.num_fs_actions = num_fs_actions;
+ fte->fs_hws_rule.hws_fs_actions = fs_actions;
+
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(&fs_actions, &num_fs_actions);
+free_dest_actions_alloc:
+ kfree(dest_actions);
+free_fs_actions_alloc:
+ kfree(fs_actions);
+free_actions_alloc:
+ kfree(*ractions);
+ *ractions = NULL;
+out_err:
+ return err;
+}
+
+static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ struct mlx5hws_match_parameters params;
+ struct mlx5hws_rule_action *ractions;
+ struct mlx5hws_bwc_rule *rule;
+ int err = 0;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+ /* Packet reformat on terminamtion table not supported yet */
+ if (fte->act_dests.action.action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ return -EOPNOTSUPP;
+ return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+ }
+
+ err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (err)
+ goto out_err;
+
+ params.match_sz = sizeof(fte->val);
+ params.match_buf = fte->val;
+
+ rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
+ fte->act_dests.flow_context.flow_source,
+ ractions);
+ kfree(ractions);
+ if (!rule) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ fte->fs_hws_rule.bwc_rule = rule;
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+out_err:
+ mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
+ return err;
+}
+
+static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+ err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
+ rule->bwc_rule = NULL;
+
+ mlx5_fs_destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
+ struct mlx5hws_rule_action *ractions;
+ int saved_num_fs_actions;
+ int ret;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
+ modify_mask, fte);
+
+ if ((modify_mask & ~allowed_mask) != 0)
+ return -EINVAL;
+
+ saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
+ saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
+
+ ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
+ kfree(ractions);
+ if (ret)
+ goto restore_actions;
+
+ mlx5_fs_destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions);
+ return ret;
+
+restore_actions:
+ mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+ fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
+ fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
+ return ret;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
+
+ /* MAC anchor not supported in HWS reformat, use VLAN anchor */
+ remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
+ remove_hdr_vlan.offset = 0;
+ remove_hdr_vlan.size = sizeof(struct vlan_hdr);
+ return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (!params ||
+ params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
+ params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
+ params->size != sizeof(struct vlan_hdr))
+ return NULL;
+
+ return fs_ctx->hws_pool.remove_hdr_vlan_action;
+}
+
+static int
+mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if ((!params->data && params->size) || (params->data && !params->size) ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
+ mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
+ return -EINVAL;
+ }
+ if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
+ params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
+ params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
+ mlx5_core_err(mdev, "Only vlan insert header supported\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (params->param_0 || params->param_1) {
+ mlx5_core_err(dev, "Invalid reformat params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
+ enum mlx5hws_action_type reformat_type, size_t size)
+{
+ struct mlx5_fs_pool *pr_pool;
+ unsigned long index = size;
+ int err;
+
+ pr_pool = xa_load(pr_pools, index);
+ if (pr_pool)
+ return pr_pool;
+
+ pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
+ if (!pr_pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
+ if (err)
+ goto free_pr_pool;
+ err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pr_pool;
+ return pr_pool;
+
+cleanup_pr_pool:
+ mlx5_fs_hws_pr_pool_cleanup(pr_pool);
+free_pr_pool:
+ kfree(pr_pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index)
+{
+ xa_erase(pr_pools, index);
+ mlx5_fs_hws_pr_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int
+mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5_fs_hws_actions_pool *hws_pool;
+ struct mlx5hws_action *hws_action = NULL;
+ struct mlx5_fs_hws_pr *pr_data = NULL;
+ struct mlx5_fs_pool *pr_pool = NULL;
+ struct mlx5_core_dev *dev = ns->dev;
+ u8 hdr_idx = 0;
+ int err;
+
+ if (!params)
+ return -EINVAL;
+
+ hws_pool = &fs_ctx->hws_pool;
+
+ switch (params->type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ hdr_idx = params->size == ETH_HLEN ?
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ err = mlx5_fs_verify_insert_header_params(dev, params);
+ if (err)
+ return err;
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_REMOVE_HDR:
+ hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
+ if (!hws_action)
+ mlx5_core_err(dev, "Only vlan remove header supported\n");
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
+ params->type);
+ return -EOPNOTSUPP;
+ }
+
+ if (pr_pool) {
+ pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
+ if (IS_ERR_OR_NULL(pr_data))
+ return !pr_data ? -EINVAL : PTR_ERR(pr_data);
+ hws_action = pr_data->bulk->hws_action;
+ if (!hws_action) {
+ mlx5_core_err(dev,
+ "Failed allocating packet-reformat action\n");
+ err = -EINVAL;
+ goto release_pr;
+ }
+ pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
+ if (!pr_data->data) {
+ err = -ENOMEM;
+ goto release_pr;
+ }
+ pr_data->hdr_idx = hdr_idx;
+ pr_data->data_size = params->size;
+ pkt_reformat->fs_hws_action.pr_data = pr_data;
+ }
+
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ pkt_reformat->fs_hws_action.hws_action = hws_action;
+ return 0;
+
+release_pr:
+ if (pr_pool && pr_data)
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ return err;
+}
+
+static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_pool *pr_pool;
+
+ if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
+ return;
+
+ if (!pkt_reformat->fs_hws_action.pr_data) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ pr_data = pkt_reformat->fs_hws_action.pr_data;
+
+ switch (pkt_reformat->reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
+ return;
+ }
+ if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ kfree(pr_data->data);
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ pkt_reformat->fs_hws_action.pr_data = NULL;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern,
+ struct xarray *mh_pools, unsigned long index)
+{
+ struct mlx5_fs_pool *pool;
+ int err;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
+ if (err)
+ goto free_pool;
+ err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pool;
+ return pool;
+
+cleanup_pool:
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+free_pool:
+ kfree(pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index)
+{
+ xa_erase(mh_pools, index);
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5hws_action_mh_pattern pattern = {};
+ struct mlx5_fs_hws_mh *mh_data = NULL;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i, cnt = 0;
+ bool known_pattern;
+ int err;
+
+ pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
+ pattern.data = modify_actions;
+
+ known_pattern = false;
+ xa_for_each(&hws_pool->mh_pools, i, pool) {
+ if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
+ known_pattern = true;
+ break;
+ }
+ cnt++;
+ }
+
+ if (!known_pattern) {
+ pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
+ &hws_pool->mh_pools, cnt);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ }
+ mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
+ if (IS_ERR(mh_data)) {
+ err = PTR_ERR(mh_data);
+ goto destroy_pool;
+ }
+ hws_action = mh_data->bulk->hws_action;
+ mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
+ if (!mh_data->data) {
+ err = -ENOMEM;
+ goto release_mh;
+ }
+ modify_hdr->fs_hws_action.mh_data = mh_data;
+ modify_hdr->fs_hws_action.fs_pool = pool;
+ modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ modify_hdr->fs_hws_action.hws_action = hws_action;
+
+ return 0;
+
+release_mh:
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+destroy_pool:
+ if (!known_pattern)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
+ return err;
+}
+
+static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_mh *mh_data;
+ struct mlx5_fs_pool *pool;
+
+ if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
+ mlx5_core_err(ns->dev, "Failed release modify-header\n");
+ return;
+ }
+
+ mh_data = modify_hdr->fs_hws_action.mh_data;
+ kfree(mh_data->data);
+ pool = modify_hdr->fs_hws_action.fs_pool;
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+ modify_hdr->fs_hws_action.mh_data = NULL;
+}
+
+static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ if (ft_type != FS_FT_FDB)
+ return 0;
+
+ return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
+}
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return mlx5hws_is_supported(dev);
+}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
+ .create_flow_table = mlx5_cmd_hws_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_hws_modify_flow_table,
+ .update_root_ft = mlx5_cmd_hws_update_root_ft,
+ .create_flow_group = mlx5_cmd_hws_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
+ .create_fte = mlx5_cmd_hws_create_fte,
+ .delete_fte = mlx5_cmd_hws_delete_fte,
+ .update_fte = mlx5_cmd_hws_update_fte,
+ .packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_hws_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
+ .create_ns = mlx5_cmd_hws_create_ns,
+ .destroy_ns = mlx5_cmd_hws_destroy_ns,
+ .set_peer = mlx5_cmd_hws_set_peer,
+ .get_capabilities = mlx5_cmd_hws_get_capabilities,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return &mlx5_flow_cmds_hws;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
new file mode 100644
index 000000000000..cbddb72d4362
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef _MLX5_FS_HWS_
+#define _MLX5_FS_HWS_
+
+#include "mlx5hws.h"
+#include "fs_hws_pools.h"
+
+struct mlx5_fs_hws_actions_pool {
+ struct mlx5hws_action *tag_action;
+ struct mlx5hws_action *pop_vlan_action;
+ struct mlx5hws_action *push_vlan_action;
+ struct mlx5hws_action *drop_action;
+ struct mlx5hws_action *decapl2_action;
+ struct mlx5hws_action *remove_hdr_vlan_action;
+ struct mlx5_fs_pool insert_hdr_pool;
+ struct mlx5_fs_pool dl3tnltol2_pool;
+ struct xarray el2tol3tnl_pools;
+ struct xarray el2tol2tnl_pools;
+ struct xarray mh_pools;
+ struct xarray table_dests;
+ struct xarray vport_vhca_dests;
+ struct xarray vport_dests;
+};
+
+struct mlx5_fs_hws_context {
+ struct mlx5hws_context *hws_ctx;
+ struct mlx5_fs_hws_actions_pool hws_pool;
+};
+
+struct mlx5_fs_hws_table {
+ struct mlx5hws_table *hws_table;
+ bool miss_ft_set;
+};
+
+struct mlx5_fs_hws_action {
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *fs_pool;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+};
+
+struct mlx5_fs_hws_matcher {
+ struct mlx5hws_bwc_matcher *matcher;
+};
+
+struct mlx5_fs_hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct mlx5_fc *counter;
+ };
+};
+
+struct mlx5_fs_hws_rule {
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5_fs_hws_rule_action *hws_fs_actions;
+ int num_fs_actions;
+};
+
+#ifdef CONFIG_MLX5_HW_STEERING
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);
+
+#else
+
+static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_MLX5_HWS_STEERING */
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
new file mode 100644
index 000000000000..2ae4ac62b0e2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_hws_pools.h"
+
+#define MLX5_FS_HWS_DEFAULT_BULK_LEN 65536
+#define MLX5_FS_HWS_POOL_MAX_THRESHOLD BIT(18)
+#define MLX5_FS_HWS_POOL_USED_BUFF_RATIO 10
+
+static struct mlx5hws_action *
+mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr[2] = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX].sz = ETH_HLEN;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX].sz = ETH_HLEN + VLAN_HLEN;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 2,
+ reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_insert_header insert_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ insert_hdr.hdr.sz = MLX5_FS_INSERT_HDR_VLAN_SIZE;
+ insert_hdr.anchor = MLX5_FS_INSERT_HDR_VLAN_ANCHOR;
+ insert_hdr.offset = MLX5_FS_INSERT_HDR_VLAN_OFFSET;
+
+ return mlx5hws_action_create_insert_header(ctx, 1, &insert_hdr,
+ log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5hws_context *ctx;
+ size_t encap_data_size;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ encap_data_size = pr_pool_ctx->encap_data_size;
+ switch (pr_pool_ctx->reformat_type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ return mlx5_fs_dl3tnltol2_bulk_action_create(ctx);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ return mlx5_fs_el2tol3tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ return mlx5_fs_el2tol2tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ return mlx5_fs_insert_hdr_bulk_action_create(ctx);
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int bulk_len;
+ int i;
+
+ if (!pool_ctx)
+ return NULL;
+ pr_pool_ctx = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ pr_bulk = kvzalloc(struct_size(pr_bulk, prs_data, bulk_len), GFP_KERNEL);
+ if (!pr_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len))
+ goto free_pr_bulk;
+
+ for (i = 0; i < bulk_len; i++) {
+ pr_bulk->prs_data[i].bulk = pr_bulk;
+ pr_bulk->prs_data[i].offset = i;
+ }
+
+ pr_bulk->hws_action = mlx5_fs_pr_bulk_action_create(dev, pr_pool_ctx);
+ if (!pr_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &pr_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&pr_bulk->fs_bulk);
+free_pr_bulk:
+ kvfree(pr_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+
+ pr_bulk = container_of(fs_bulk, struct mlx5_fs_hws_pr_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all reformats were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(pr_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(pr_bulk);
+
+ return 0;
+}
+
+static void mlx5_hws_pool_update_threshold(struct mlx5_fs_pool *hws_pool)
+{
+ hws_pool->threshold = min_t(int, MLX5_FS_HWS_POOL_MAX_THRESHOLD,
+ hws_pool->used_units / MLX5_FS_HWS_POOL_USED_BUFF_RATIO);
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_pr_pool_ops = {
+ .bulk_create = mlx5_fs_hws_pr_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_pr_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ if (reformat_type != MLX5HWS_ACTION_TYP_INSERT_HEADER &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2)
+ return -EOPNOTSUPP;
+
+ pr_pool_ctx = kzalloc(sizeof(*pr_pool_ctx), GFP_KERNEL);
+ if (!pr_pool_ctx)
+ return -ENOMEM;
+ pr_pool_ctx->reformat_type = reformat_type;
+ pr_pool_ctx->encap_data_size = encap_data_size;
+ mlx5_fs_pool_init(pr_pool, dev, &mlx5_fs_hws_pr_pool_ops, pr_pool_ctx);
+ return 0;
+}
+
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ mlx5_fs_pool_cleanup(pr_pool);
+ pr_pool_ctx = pr_pool->pool_ctx;
+ if (!pr_pool_ctx)
+ return;
+ kfree(pr_pool_ctx);
+}
+
+struct mlx5_fs_hws_pr *
+mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(pr_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ pr_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_pr_bulk,
+ fs_bulk);
+ return &pr_bulk->prs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = pr_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = pr_data->offset;
+ if (mlx5_fs_pool_release_index(pr_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release packet reformat which is not acquired\n");
+}
+
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data)
+{
+ return pr_data->bulk->hws_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_mh_bulk_action_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_modify_header(ctx, 1, pattern,
+ log_bulk_size, flags);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5hws_action_mh_pattern *pattern;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ struct mlx5hws_context *ctx;
+ int bulk_len;
+
+ if (!pool_ctx)
+ return NULL;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ pattern = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ mh_bulk = kvzalloc(struct_size(mh_bulk, mhs_data, bulk_len), GFP_KERNEL);
+ if (!mh_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
+ goto free_mh_bulk;
+
+ for (int i = 0; i < bulk_len; i++) {
+ mh_bulk->mhs_data[i].bulk = mh_bulk;
+ mh_bulk->mhs_data[i].offset = i;
+ }
+
+ mh_bulk->hws_action = mlx5_fs_mh_bulk_action_create(ctx, pattern);
+ if (!mh_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &mh_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk);
+free_mh_bulk:
+ kvfree(mh_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+
+ mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all modify header were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(mh_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(mh_bulk);
+
+ return 0;
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = {
+ .bulk_create = mlx5_fs_hws_mh_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_mh_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ pool_pattern = kzalloc(sizeof(*pool_pattern), GFP_KERNEL);
+ if (!pool_pattern)
+ return -ENOMEM;
+ pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL);
+ if (!pool_pattern->data) {
+ kfree(pool_pattern);
+ return -ENOMEM;
+ }
+ pool_pattern->sz = pattern->sz;
+ mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops,
+ pool_pattern);
+ return 0;
+}
+
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ mlx5_fs_pool_cleanup(fs_hws_mh_pool);
+ pool_pattern = fs_hws_mh_pool->pool_ctx;
+ if (!pool_pattern)
+ return;
+ kfree(pool_pattern->data);
+ kfree(pool_pattern);
+}
+
+struct mlx5_fs_hws_mh *
+mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk,
+ fs_bulk);
+ return &mh_bulk->mhs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = mh_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = mh_data->offset;
+ if (mlx5_fs_pool_release_index(mh_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n");
+}
+
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+ int num_actions, i;
+
+ pool_pattern = mh_pool->pool_ctx;
+ if (WARN_ON_ONCE(!pool_pattern))
+ return false;
+
+ if (pattern->sz != pool_pattern->sz)
+ return false;
+ num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
+ for (i = 0; i < num_actions; i++) {
+ if ((__force __be32)pattern->data[i] !=
+ (__force __be32)pool_pattern->data[i])
+ return false;
+ }
+ return true;
+}
+
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
+
+ fc_bulk_hws = &fc_bulk->hws_data;
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
+ return fc_bulk_hws->hws_action;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return fc_bulk_hws->hws_action;
+ }
+ fc_bulk_hws->hws_action =
+ mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
+ if (!fc_bulk_hws->hws_action) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return NULL;
+ }
+ refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
+ mutex_unlock(&fc_bulk_hws->lock);
+
+ return fc_bulk_hws->hws_action;
+}
+
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
+{
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
+ return;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fc_bulk_hws->hws_action);
+ fc_bulk_hws->hws_action = NULL;
+ mutex_unlock(&fc_bulk_hws->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
new file mode 100644
index 000000000000..34072551dd21
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_HWS_POOLS_H__
+#define __MLX5_FS_HWS_POOLS_H__
+
+#include <linux/if_vlan.h>
+#include "fs_pool.h"
+#include "fs_core.h"
+
+#define MLX5_FS_INSERT_HDR_VLAN_ANCHOR MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START
+#define MLX5_FS_INSERT_HDR_VLAN_OFFSET offsetof(struct vlan_ethhdr, h_vlan_proto)
+#define MLX5_FS_INSERT_HDR_VLAN_SIZE sizeof(struct vlan_hdr)
+
+enum {
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX = 0,
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX,
+};
+
+struct mlx5_fs_hws_pr {
+ struct mlx5_fs_hws_pr_bulk *bulk;
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ size_t data_size;
+};
+
+struct mlx5_fs_hws_pr_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_pr prs_data[];
+};
+
+struct mlx5_fs_hws_pr_pool_ctx {
+ enum mlx5hws_action_type reformat_type;
+ size_t encap_data_size;
+};
+
+struct mlx5_fs_hws_mh {
+ struct mlx5_fs_hws_mh_bulk *bulk;
+ u32 offset;
+ u8 *data;
+};
+
+struct mlx5_fs_hws_mh_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5_fs_pool *mh_pool;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_mh mhs_data[];
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type);
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool);
+
+struct mlx5_fs_hws_pr *mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool);
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data);
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data);
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern);
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool);
+struct mlx5_fs_hws_mh *mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool);
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data);
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern);
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter);
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter);
+#endif /* __MLX5_FS_HWS_POOLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
index 3c8635f286ce..30ccd635b505 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
@@ -39,7 +39,6 @@
#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
#define MLX5HWS_TABLE_TYPE_BASE 2
-#define MLX5HWS_ACTION_STE_IDX_ANY 0
static inline bool is_mem_zero(const u8 *mem, size_t size)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index 1bb3a6f8c3cd..b61864b32053 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -165,14 +165,14 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
next->match_ste.rtc_0_id,
next->match_ste.rtc_1_id);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect matcher\n");
+ return ret;
}
} else {
ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect last matcher\n");
+ return ret;
}
}
@@ -180,27 +180,19 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
if (prev_ft_id == tbl->ft_id) {
ret = mlx5hws_table_update_connected_miss_tables(tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx,
+ "Fatal error, failed to update connected miss table\n");
+ return ret;
}
}
ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
- goto matcher_reconnect;
+ return ret;
}
return 0;
-
-matcher_reconnect:
- if (list_empty(&tbl->matchers_list) || !prev)
- list_add(&matcher->list_node, &tbl->matchers_list);
- else
- /* insert after prev matcher */
- list_add(&matcher->list_node, &prev->list_node);
-
- return ret;
}
static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
@@ -208,7 +200,7 @@ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
enum mlx5hws_matcher_rtc_type rtc_type,
bool is_mirror)
{
- struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
+ struct mlx5hws_pool_chunk *ste = &matcher->action_ste.ste;
enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
@@ -225,8 +217,7 @@ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
}
static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+ enum mlx5hws_matcher_rtc_type rtc_type)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
@@ -286,14 +277,20 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
break;
case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
rtc_0_id = &action_ste->rtc_0_id;
rtc_1_id = &action_ste->rtc_1_id;
ste_pool = action_ste->pool;
ste = &action_ste->ste;
+ /* Action RTC size calculation:
+ * log((max number of rules in matcher) *
+ * (max number of action STEs per rule) *
+ * (2 to support writing new STEs for update rule))
+ */
ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- attr->table.sz_row_log;
+ attr->table.sz_row_log +
+ MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
rtc_attr.log_size = ste->order;
rtc_attr.log_depth = 0;
rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
@@ -318,8 +315,8 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[tbl->type];
- default_stc = ctx->common_res[tbl->type].default_stc;
+ stc_pool = ctx->stc_pool;
+ default_stc = ctx->common_res.default_stc;
obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
rtc_attr.stc_base = obj_id;
@@ -358,8 +355,7 @@ free_ste:
}
static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+ enum mlx5hws_matcher_rtc_type rtc_type)
{
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
@@ -375,7 +371,7 @@ static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
ste = &matcher->match_ste.ste;
break;
case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
rtc_0_id = action_ste->rtc_0_id;
rtc_1_id = action_ste->rtc_1_id;
ste_pool = action_ste->pool;
@@ -466,20 +462,13 @@ static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
if (!resize_data)
return -ENOMEM;
- resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
-
- resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
- resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
- resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
- resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
- src_matcher->action_ste[0].pool :
- NULL;
- resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
- resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
- resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
- resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
- src_matcher->action_ste[1].pool :
- NULL;
+ resize_data->max_stes = src_matcher->action_ste.max_stes;
+
+ resize_data->stc = src_matcher->action_ste.stc;
+ resize_data->rtc_0_id = src_matcher->action_ste.rtc_0_id;
+ resize_data->rtc_1_id = src_matcher->action_ste.rtc_1_id;
+ resize_data->pool = src_matcher->action_ste.max_stes ?
+ src_matcher->action_ste.pool : NULL;
/* Place the new resized matcher on the dst matcher's list */
list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
@@ -512,49 +501,69 @@ static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
if (resize_data->max_stes) {
mlx5hws_action_free_single_stc(matcher->tbl->ctx,
matcher->tbl->type,
- &resize_data->action_ste[1].stc);
- mlx5hws_action_free_single_stc(matcher->tbl->ctx,
- matcher->tbl->type,
- &resize_data->action_ste[0].stc);
+ &resize_data->stc);
- if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_1_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_1_id);
- }
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_0_id);
+ resize_data->rtc_1_id);
+
mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_0_id);
- if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
- mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
- mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
- }
+ resize_data->rtc_0_id);
+
+ if (resize_data->pool)
+ mlx5hws_pool_destroy(resize_data->pool);
}
kfree(resize_data);
}
}
-static int
-hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
struct mlx5hws_pool_attr pool_attr = {0};
struct mlx5hws_context *ctx = tbl->ctx;
- int ret;
+ u32 required_stes;
+ u8 max_stes = 0;
+ int i, ret;
+
+ if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
+ return 0;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret) {
+ mlx5hws_err(ctx, "Invalid at %d", i);
+ return ret;
+ }
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ max_stes = max(max_stes, required_stes);
+
+ /* Future: Optimize reparse */
+ }
+
+ /* There are no additional STEs required for matcher */
+ if (!max_stes)
+ return 0;
+
+ matcher->action_ste.max_stes = max_stes;
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
/* Allocate action STE mempool */
pool_attr.table_type = tbl->type;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ /* Pool size is similar to action RTC size */
pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- matcher->attr.table.sz_row_log;
+ matcher->attr.table.sz_row_log +
+ MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
hws_matcher_set_pool_attr(&pool_attr, matcher);
action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
if (!action_ste->pool) {
@@ -563,7 +572,7 @@ hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
}
/* Allocate action RTC */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
if (ret) {
mlx5hws_err(ctx, "Failed to create action RTC\n");
goto free_ste_pool;
@@ -587,18 +596,18 @@ hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
return 0;
free_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
free_ste_pool:
mlx5hws_pool_destroy(action_ste->pool);
return ret;
}
-static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
if (!action_ste->max_stes ||
matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
@@ -606,65 +615,10 @@ static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action
return;
mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
mlx5hws_pool_destroy(action_ste->pool);
}
-static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
-{
- bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_context *ctx = tbl->ctx;
- u32 required_stes;
- u8 max_stes = 0;
- int i, ret;
-
- if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
- return 0;
-
- for (i = 0; i < matcher->num_of_at; i++) {
- struct mlx5hws_action_template *at = &matcher->at[i];
-
- ret = hws_matcher_check_and_process_at(matcher, at);
- if (ret) {
- mlx5hws_err(ctx, "Invalid at %d", i);
- return ret;
- }
-
- required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- max_stes = max(max_stes, required_stes);
-
- /* Future: Optimize reparse */
- }
-
- /* There are no additional STEs required for matcher */
- if (!max_stes)
- return 0;
-
- matcher->action_ste[0].max_stes = max_stes;
- matcher->action_ste[1].max_stes = max_stes;
-
- ret = hws_matcher_bind_at_idx(matcher, 0);
- if (ret)
- return ret;
-
- ret = hws_matcher_bind_at_idx(matcher, 1);
- if (ret)
- goto free_at_0;
-
- return 0;
-
-free_at_0:
- hws_matcher_unbind_at_idx(matcher, 0);
- return ret;
-}
-
-static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
-{
- hws_matcher_unbind_at_idx(matcher, 1);
- hws_matcher_unbind_at_idx(matcher, 0);
-}
-
static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_context *ctx = matcher->tbl->ctx;
@@ -810,7 +764,7 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
goto unbind_at;
/* Allocate the RTC for the new matcher */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
if (ret)
goto destroy_end_ft;
@@ -822,7 +776,7 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
return 0;
destroy_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
destroy_end_ft:
hws_matcher_destroy_end_ft(matcher);
unbind_at:
@@ -836,7 +790,7 @@ static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
{
hws_matcher_resize_uninit(matcher);
hws_matcher_disconnect(matcher);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
hws_matcher_destroy_end_ft(matcher);
hws_matcher_unbind_at(matcher);
hws_matcher_unbind_mt(matcher);
@@ -970,10 +924,9 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
return ret;
required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
+ if (matcher->action_ste.max_stes < required_stes) {
mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
- required_stes,
- matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
+ required_stes, matcher->action_ste.max_stes);
return -ENOMEM;
}
@@ -1007,9 +960,9 @@ hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
if (!matcher->mt)
return -ENOMEM;
- matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
- sizeof(*matcher->at),
- GFP_KERNEL);
+ matcher->at = kvcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
+ sizeof(*matcher->at),
+ GFP_KERNEL);
if (!matcher->at) {
mlx5hws_err(ctx, "Failed to allocate action template array\n");
ret = -ENOMEM;
@@ -1035,7 +988,7 @@ free_mt:
static void
hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
{
- kfree(matcher->at);
+ kvfree(matcher->at);
kfree(matcher->mt);
}
@@ -1157,8 +1110,7 @@ static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
return -EINVAL;
}
- if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
- dst_matcher->action_ste[0].max_stes) {
+ if (src_matcher->action_ste.max_stes > dst_matcher->action_ste.max_stes) {
mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
index 81ff487f57be..020de70270c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -18,6 +18,11 @@
/* Required depth of the main large table */
#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+/* Action RTC size multiplier that is required in order
+ * to support rule update for rules with action STEs.
+ */
+#define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1
+
enum mlx5hws_matcher_offset {
MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
@@ -52,15 +57,11 @@ struct mlx5hws_matcher_action_ste {
u8 max_stes;
};
-struct mlx5hws_matcher_resize_data_node {
+struct mlx5hws_matcher_resize_data {
struct mlx5hws_pool_chunk stc;
u32 rtc_0_id;
u32 rtc_1_id;
struct mlx5hws_pool *pool;
-};
-
-struct mlx5hws_matcher_resize_data {
- struct mlx5hws_matcher_resize_data_node action_ste[2];
u8 max_stes;
struct list_head list_node;
};
@@ -78,7 +79,7 @@ struct mlx5hws_matcher {
struct mlx5hws_matcher *col_matcher;
struct mlx5hws_matcher *resize_dst;
struct mlx5hws_matcher_match_ste match_ste;
- struct mlx5hws_matcher_action_ste action_ste[2];
+ struct mlx5hws_matcher_action_ste action_ste;
struct list_head list_node;
struct list_head resize_data;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index f39d636ff39a..5121951f2778 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -70,7 +70,6 @@ enum mlx5hws_send_queue_actions {
struct mlx5hws_context_attr {
u16 queues;
u16 queue_size;
- bool bwc; /* add support for backward compatible API*/
};
struct mlx5hws_table_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index 06db5e4726ae..d9dc4f2d0dc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -344,7 +344,7 @@ void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
memset(wqe_ctrl, 0, wqe_len);
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
- memcpy(wqe_arg, arg_data, wqe_len);
+ memcpy(wqe_arg, arg_data, MLX5HWS_ARG_DATA_SIZE);
send_attr.id = arg_idx++;
mlx5hws_send_engine_post_end(&ctrl, &send_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
index 27ca93385b08..8ddb51980044 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
@@ -31,7 +31,7 @@ struct mlx5hws_pattern_cache_item {
u8 *data;
u16 num_of_actions;
} mh_data;
- u32 refcount;
+ u32 refcount; /* protected by pattern_cache lock */
struct list_head ptrn_list_node;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index fed2d913f3b8..50a81d360bb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -183,7 +183,7 @@ static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
*seg = -1;
/* Find the next free place from the buddy array */
- while (*seg == -1) {
+ while (*seg < 0) {
for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
buddy = hws_pool_buddy_get_next_buddy(pool, i,
order,
@@ -194,7 +194,7 @@ static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
}
*seg = mlx5hws_buddy_alloc_mem(buddy, order);
- if (*seg != -1)
+ if (*seg >= 0)
goto found;
if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
index de92cecbeb92..271490a51b96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
@@ -390,11 +390,6 @@ struct mlx5_ifc_definer_bits {
u8 match_mask[0x160];
};
-struct mlx5_ifc_arg_bits {
- u8 rsvd0[0x88];
- u8 access_pd[0x18];
-};
-
struct mlx5_ifc_header_modify_pattern_in_bits {
u8 modify_field_select[0x40];
@@ -428,11 +423,6 @@ struct mlx5_ifc_create_definer_in_bits {
struct mlx5_ifc_definer_bits definer;
};
-struct mlx5_ifc_create_arg_in_bits {
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
- struct mlx5_ifc_arg_bits arg;
-};
-
struct mlx5_ifc_create_header_modify_pattern_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_header_modify_pattern_in_bits pattern;
@@ -479,36 +469,4 @@ enum {
MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
};
-struct mlx5_ifc_alloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
-
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 reserved_at_40[0x40];
-};
-
#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
index e20c67a04203..a27a2d5ffc7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -129,27 +129,18 @@ static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
static void
hws_rule_save_resize_info(struct mlx5hws_rule *rule,
- struct mlx5hws_send_ste_attr *ste_attr,
- bool is_update)
+ struct mlx5hws_send_ste_attr *ste_attr)
{
if (!mlx5hws_matcher_is_resizable(rule->matcher))
return;
- if (likely(!is_update)) {
+ /* resize_info might already exist (if we're in update flow) */
+ if (likely(!rule->resize_info)) {
rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
if (unlikely(!rule->resize_info)) {
pr_warn("HWS: resize info isn't allocated for rule\n");
return;
}
-
- rule->resize_info->max_stes =
- rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
- rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
- rule->matcher->action_ste[0].pool :
- NULL;
- rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
- rule->matcher->action_ste[1].pool :
- NULL;
}
memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
@@ -204,15 +195,14 @@ hws_rule_load_delete_info(struct mlx5hws_rule *rule,
}
}
-static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
+static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule)
{
struct mlx5hws_matcher *matcher = rule->matcher;
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_pool_chunk ste = {0};
int ret;
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
if (unlikely(ret)) {
@@ -220,68 +210,29 @@ static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
"Failed to allocate STE for rule actions");
return ret;
}
- rule->action_ste_idx = ste.offset;
+
+ rule->action_ste.pool = matcher->action_ste.pool;
+ rule->action_ste.num_stes = matcher->action_ste.max_stes;
+ rule->action_ste.index = ste.offset;
return 0;
}
-static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste)
{
- struct mlx5hws_matcher *matcher = rule->matcher;
struct mlx5hws_pool_chunk ste = {0};
- struct mlx5hws_pool *pool;
- u8 max_stes;
-
- if (mlx5hws_matcher_is_resizable(matcher)) {
- /* Free the original action pool if rule was resized */
- max_stes = rule->resize_info->max_stes;
- pool = rule->resize_info->action_ste_pool[action_ste_selector];
- } else {
- max_stes = matcher->action_ste[action_ste_selector].max_stes;
- pool = matcher->action_ste[action_ste_selector].pool;
- }
-
- /* This release is safe only when the rule match part was deleted */
- ste.order = ilog2(roundup_pow_of_two(max_stes));
- ste.offset = rule->action_ste_idx;
-
- mlx5hws_pool_chunk_free(pool, &ste);
-}
-static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
- struct mlx5hws_rule_attr *attr)
-{
- int action_ste_idx;
- int ret;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 0);
- if (unlikely(ret))
- return ret;
-
- action_ste_idx = rule->action_ste_idx;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 1);
- if (unlikely(ret)) {
- hws_rule_free_action_ste_idx(rule, 0);
- return ret;
- }
-
- /* Both pools have to return the same index */
- if (unlikely(rule->action_ste_idx != action_ste_idx)) {
- pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
- return -EINVAL;
- }
+ if (!action_ste->num_stes)
+ return;
- return 0;
-}
+ ste.order = ilog2(roundup_pow_of_two(action_ste->num_stes));
+ ste.offset = action_ste->index;
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
-{
- if (rule->action_ste_idx > -1) {
- hws_rule_free_action_ste_idx(rule, 1);
- hws_rule_free_action_ste_idx(rule, 0);
- }
+ /* This release is safe only when the rule match STE was deleted
+ * (when the rule is being deleted) or replaced with the new STE that
+ * isn't pointing to old action STEs (when the rule is being updated).
+ */
+ mlx5hws_pool_chunk_free(action_ste->pool, &ste);
}
static void hws_rule_create_init(struct mlx5hws_rule *rule,
@@ -298,14 +249,24 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* In update we use these rtc's */
rule->rtc_0 = 0;
rule->rtc_1 = 0;
- rule->action_ste_selector = 0;
+
+ rule->action_ste.pool = NULL;
+ rule->action_ste.num_stes = 0;
+ rule->action_ste.index = -1;
+
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
} else {
- rule->action_ste_selector = !rule->action_ste_selector;
+ rule->status = MLX5HWS_RULE_STATUS_UPDATING;
}
+ /* Initialize the old action STE info - shallow-copy action_ste.
+ * In create flow this will set old_action_ste fields to initial values.
+ * In update flow this will save the existing action STE info,
+ * so that we will later use it to free old STEs.
+ */
+ rule->old_action_ste = rule->action_ste;
+
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->status = MLX5HWS_RULE_STATUS_CREATING;
/* Init default send STE attributes */
ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
@@ -315,8 +276,8 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* Init default action apply */
apply->tbl_type = tbl->type;
- apply->common_res = &ctx->common_res[tbl->type];
- apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+ apply->common_res = &ctx->common_res;
+ apply->jump_to_action_stc = matcher->action_ste.stc.offset;
apply->require_dep = 0;
}
@@ -332,8 +293,6 @@ static void hws_rule_move_init(struct mlx5hws_rule *rule,
rule->rtc_1 = 0;
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->action_ste_selector = 0;
rule->status = MLX5HWS_RULE_STATUS_CREATING;
rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
}
@@ -394,21 +353,17 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (action_stes) {
/* Allocate action STEs for rules that need more than match STE */
- if (!is_update) {
- ret = hws_rule_alloc_action_ste(rule, attr);
- if (ret) {
- mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
- mlx5hws_send_abort_new_dep_wqe(queue);
- return ret;
- }
+ ret = hws_rule_alloc_action_ste(rule);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
}
/* Skip RX/TX based on the dep_wqe init */
- ste_attr.rtc_0 = dep_wqe->rtc_0 ?
- matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
- ste_attr.rtc_1 = dep_wqe->rtc_1 ?
- matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1_id : 0;
/* Action STEs are written to a specific index last to first */
- ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ ste_attr.direct_index = rule->action_ste.index + action_stes;
apply.next_direct_idx = ste_attr.direct_index;
} else {
apply.next_direct_idx = 0;
@@ -459,7 +414,7 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (!is_update)
hws_rule_save_delete_info(rule, &ste_attr);
- hws_rule_save_resize_info(rule, &ste_attr, is_update);
+ hws_rule_save_resize_info(rule, &ste_attr);
mlx5hws_send_engine_inc_rule(queue);
if (!attr->burst)
@@ -480,7 +435,10 @@ static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
/* Rule failed now we can safely release action STEs */
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
+
+ /* Perhaps the rule failed updating - release old action STEs as well */
+ mlx5hws_rule_free_action_ste(&rule->old_action_ste);
/* Clear complex tag */
hws_rule_clear_delete_info(rule);
@@ -517,7 +475,8 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
}
/* Rule is not completed yet */
- if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING ||
+ rule->status == MLX5HWS_RULE_STATUS_UPDATING)
return -EBUSY;
/* Rule failed and doesn't require cleanup */
@@ -534,7 +493,7 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
hws_rule_gen_comp(queue, rule, false,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
mlx5hws_rule_clear_resize_info(rule);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
index 495cdd17e9f3..b5ee94ac449b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
@@ -15,6 +15,8 @@ enum mlx5hws_rule_status {
MLX5HWS_RULE_STATUS_UNKNOWN,
MLX5HWS_RULE_STATUS_CREATING,
MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_UPDATING,
+ MLX5HWS_RULE_STATUS_UPDATED,
MLX5HWS_RULE_STATUS_DELETING,
MLX5HWS_RULE_STATUS_DELETED,
MLX5HWS_RULE_STATUS_FAILING,
@@ -41,13 +43,17 @@ struct mlx5hws_rule_match_tag {
};
};
+struct mlx5hws_rule_action_ste_info {
+ struct mlx5hws_pool *pool;
+ int index; /* STE array index */
+ u8 num_stes;
+};
+
struct mlx5hws_rule_resize_info {
- struct mlx5hws_pool *action_ste_pool[2];
u32 rtc_0;
u32 rtc_1;
u32 rule_idx;
u8 state;
- u8 max_stes;
u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
};
@@ -58,18 +64,18 @@ struct mlx5hws_rule {
struct mlx5hws_rule_match_tag tag;
struct mlx5hws_rule_resize_info *resize_info;
};
+ struct mlx5hws_rule_action_ste_info action_ste;
+ struct mlx5hws_rule_action_ste_info old_action_ste;
u32 rtc_0; /* The RTC into which the STE was inserted */
u32 rtc_1; /* The RTC into which the STE was inserted */
- int action_ste_idx; /* STE array index */
u8 status; /* enum mlx5hws_rule_status */
- u8 action_ste_selector; /* For rule update - which action STE is in use */
u8 pending_wqes;
bool skip_delete; /* For complex rules - another rule with same tag
* still exists, so don't actually delete this rule.
*/
};
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste);
int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
void *queue, void *user_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index 883b4ed30892..cb6abc4ab7df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -377,17 +377,25 @@ static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
*status = MLX5HWS_FLOW_OP_ERROR;
} else {
- /* Increase the status, this only works on good flow as the enum
- * is arrange it away creating -> created -> deleting -> deleted
+ /* Increase the status, this only works on good flow as
+ * the enum is arranged this way:
+ * - creating -> created
+ * - updating -> updated
+ * - deleting -> deleted
*/
priv->rule->status++;
*status = MLX5HWS_FLOW_OP_SUCCESS;
- /* Rule was deleted now we can safely release action STEs
- * and clear resize info
- */
if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
- mlx5hws_rule_free_action_ste(priv->rule);
+ /* Rule was deleted, now we can safely release
+ * action STEs and clear resize info
+ */
+ mlx5hws_rule_free_action_ste(&priv->rule->action_ste);
mlx5hws_rule_clear_resize_info(priv->rule);
+ } else if (priv->rule->status == MLX5HWS_RULE_STATUS_UPDATED) {
+ /* Rule was updated, free the old action STEs */
+ mlx5hws_rule_free_action_ste(&priv->rule->old_action_ste);
+ /* Update completed - move the rule back to "created" */
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
}
}
}
@@ -633,6 +641,7 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ MLX5_SET(sqc, sqc, non_wire, 1);
ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
@@ -896,15 +905,18 @@ close_cq:
return err;
}
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
{
+ if (!queue->num_entries)
+ return; /* this queue wasn't initialized */
+
hws_send_ring_close(queue);
kfree(queue->completed.entries);
}
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size)
+static int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
{
int err;
@@ -1005,7 +1017,7 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
u16 queue_size)
{
int err = 0;
- u32 i;
+ int i = 0;
/* Open one extra queue for control path */
ctx->queues = queues + 1;
@@ -1021,7 +1033,13 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
goto free_bwc_locks;
}
- for (i = 0; i < ctx->queues; i++) {
+ /* If native API isn't supported, skip the unused native queues:
+ * initialize BWC queues and control queue only.
+ */
+ if (!mlx5hws_context_native_supported(ctx))
+ i = mlx5hws_bwc_get_queue_id(ctx, 0);
+
+ for (; i < ctx->queues; i++) {
err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
if (err)
goto close_send_queues;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
index b50825d6dc53..f833092235c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
@@ -189,12 +189,6 @@ void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
-
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size);
-
void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index 9576e02d00c3..ab1297531232 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -37,6 +37,7 @@ static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
}
static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
@@ -48,8 +49,8 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return 0;
- if (ctx->common_res[tbl_type].default_miss) {
- ctx->common_res[tbl_type].default_miss->refcount++;
+ if (ctx->common_res.default_miss) {
+ ctx->common_res.default_miss->refcount++;
return 0;
}
@@ -70,29 +71,28 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
return -EINVAL;
}
- /* ctx->ctrl_lock must be held here */
- ctx->common_res[tbl_type].default_miss = default_miss;
- ctx->common_res[tbl_type].default_miss->refcount++;
+ ctx->common_res.default_miss = default_miss;
+ ctx->common_res.default_miss->refcount++;
return 0;
}
/* Called under ctx->ctrl_lock */
static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_forward_tbl *default_miss;
struct mlx5hws_context *ctx = tbl->ctx;
- u8 tbl_type = tbl->type;
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return;
- default_miss = ctx->common_res[tbl_type].default_miss;
+ default_miss = ctx->common_res.default_miss;
if (--default_miss->refcount)
return;
mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
- ctx->common_res[tbl_type].default_miss = NULL;
+ ctx->common_res.default_miss = NULL;
}
static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
@@ -478,15 +478,9 @@ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
if (old_miss_tbl)
list_del_init(&tbl->default_miss.next);
- old_miss_tbl = tbl->default_miss.miss_tbl;
- if (old_miss_tbl)
- list_del_init(&old_miss_tbl->default_miss.head);
-
if (miss_tbl)
list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
- mutex_unlock(&ctx->ctrl_lock);
- return 0;
out:
mutex_unlock(&ctx->ctrl_lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 49f22cad92bf..60cb4527588a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_8))
bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
index e94fbb015efa..c8b8ff80c7c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
@@ -555,7 +555,7 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_tx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -566,7 +566,7 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_rx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -1458,6 +1458,8 @@ struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
return mlx5dr_ste_get_ctx_v1();
else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
return mlx5dr_ste_get_ctx_v2();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_8)
+ return mlx5dr_ste_get_ctx_v3();
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 54a6619c3ecb..3d5afc832fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -160,13 +160,15 @@ struct mlx5dr_ste_ctx {
/* Actions */
u32 actions_caps;
- void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_rx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
- void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_tx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
@@ -197,7 +199,21 @@ struct mlx5dr_ste_ctx {
u16 *used_hw_action_num);
int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
-
+ /* Actions bit set */
+ void (*set_encap)(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size);
+ void (*set_push_vlan)(u8 *ste, u8 *d_action,
+ u32 vlan_hdr);
+ void (*set_pop_vlan)(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num);
+ void (*set_rx_decap)(u8 *hw_ste_p, u8 *s_action);
+ void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
+ u8 *scnd_d_action, u32 reformat_id,
+ int size);
+ void (*set_insert_hdr)(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+ void (*set_remove_hdr)(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
@@ -205,5 +221,6 @@ struct mlx5dr_ste_ctx {
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
index e9f6c7ed7a7b..42536bee55e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
@@ -406,7 +406,8 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
}
static void
-dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -476,7 +477,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
}
static void
-dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 1d49704b9542..6447efbae00d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -5,136 +5,6 @@
#include "mlx5_ifc_dr_ste_v1.h"
#include "dr_ste_v1.h"
-#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
- ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
- DR_STE_V1_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_v1_entry_format {
- DR_STE_V1_TYPE_BWC_BYTE = 0x0,
- DR_STE_V1_TYPE_BWC_DW = 0x1,
- DR_STE_V1_TYPE_MATCH = 0x2,
- DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
-};
-
-/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
-enum {
- DR_STE_V1_LU_TYPE_NOP = 0x0000,
- DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
- DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
- DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
- DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
- DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
- DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
- DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
- DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
- DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
- DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
- DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
- DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
- DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
- DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
- DR_STE_V1_LU_TYPE_GRE = 0x010d,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
- DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
- DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
- DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
-};
-
-enum dr_ste_v1_header_anchors {
- DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
- DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
- DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
- DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
- DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
-};
-
-enum dr_ste_v1_action_size {
- DR_STE_ACTION_SINGLE_SZ = 4,
- DR_STE_ACTION_DOUBLE_SZ = 8,
- DR_STE_ACTION_TRIPLE_SZ = 12,
-};
-
-enum dr_ste_v1_action_insert_ptr_attr {
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
-};
-
-enum dr_ste_v1_action_id {
- DR_STE_V1_ACTION_ID_NOP = 0x00,
- DR_STE_V1_ACTION_ID_COPY = 0x05,
- DR_STE_V1_ACTION_ID_SET = 0x06,
- DR_STE_V1_ACTION_ID_ADD = 0x07,
- DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
- DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
- DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
- DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
- DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
- DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
- DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
- DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
- DR_STE_V1_ACTION_ID_ASO = 0x12,
- DR_STE_V1_ACTION_ID_TRAILER = 0x13,
- DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
- DR_STE_V1_ACTION_ID_MAX = 0x21,
- /* use for special cases */
- DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
-};
-
-enum {
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
-};
-
-enum dr_ste_v1_aso_ctx_type {
- DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
-};
-
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -379,13 +249,12 @@ static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
}
-static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+void dr_ste_v1_set_reparse(u8 *hw_ste_p)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
-static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id, int size)
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -397,10 +266,10 @@ static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -417,9 +286,9 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -432,8 +301,7 @@ static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
- u32 vlan_hdr)
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
@@ -446,7 +314,7 @@ static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -459,11 +327,8 @@ static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
- u8 *frst_s_action,
- u8 *scnd_d_action,
- u32 reformat_id,
- int size)
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@@ -483,7 +348,7 @@ static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
{
MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
@@ -620,7 +485,8 @@ static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
}
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -640,7 +506,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
@@ -677,8 +543,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -691,9 +557,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@@ -706,10 +572,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
}
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -718,11 +584,11 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
@@ -731,10 +597,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -776,7 +642,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -799,7 +666,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = false;
allow_ctr = false;
} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
- dr_ste_v1_set_rx_decap(last_ste, action);
+ ste_ctx->set_rx_decap(last_ste, action);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_modify_hdr = false;
@@ -827,7 +694,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_ctr = false;
@@ -868,8 +735,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -895,9 +762,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -912,10 +779,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -925,11 +792,11 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -941,10 +808,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = true;
allow_ctr = true;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -1027,9 +894,6 @@ void dr_ste_v1_set_action_copy(u8 *d_action,
MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
}
-#define DR_STE_DECAP_L3_ACTION_NUM 8
-#define DR_STE_L2_HDR_MAX_SZ 20
-
int dr_ste_v1_set_action_decap_l3_list(void *data,
u32 data_sz,
u8 *hw_action,
@@ -2330,7 +2194,14 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
index e2fc69867088..591c20c95a6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -7,6 +7,138 @@
#include "dr_types.h"
#include "dr_ste.h"
+#define DR_STE_DECAP_L3_ACTION_NUM 8
+#define DR_STE_L2_HDR_MAX_SZ 20
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+ DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+ DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+ DR_STE_V1_TYPE_BWC_DW = 0x1,
+ DR_STE_V1_TYPE_MATCH = 0x2,
+ DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+ DR_STE_V1_LU_TYPE_NOP = 0x0000,
+ DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
+ DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
+ DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
+ DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
+ DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
+ DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
+ DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
+ DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
+ DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
+ DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
+ DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
+ DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
+ DR_STE_V1_LU_TYPE_GRE = 0x010d,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
+ DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
+ DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
+ DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+ DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
+ DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+ DR_STE_ACTION_SINGLE_SZ = 4,
+ DR_STE_ACTION_DOUBLE_SZ = 8,
+ DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+ DR_STE_V1_ACTION_ID_NOP = 0x00,
+ DR_STE_V1_ACTION_ID_COPY = 0x05,
+ DR_STE_V1_ACTION_ID_SET = 0x06,
+ DR_STE_V1_ACTION_ID_ADD = 0x07,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
+ DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
+ DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
+ DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
+ DR_STE_V1_ACTION_ID_TRAILER = 0x13,
+ DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
+ DR_STE_V1_ACTION_ID_MAX = 0x21,
+ /* use for special cases */
+ DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
+};
+
+enum {
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
+};
+
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
@@ -17,11 +149,22 @@ u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_reparse(u8 *hw_ste_p);
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size);
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr);
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size);
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
u8 length, u32 data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
index 808b013cf48c..d0ebaf820d42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -2,167 +2,7 @@
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "dr_ste_v1.h"
-
-enum {
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
-};
-
-static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
- },
-};
+#include "dr_ste_v2.h"
static struct mlx5dr_ste_ctx ste_ctx_v2 = {
/* Builders */
@@ -223,7 +63,14 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
new file mode 100644
index 000000000000..d853fde49cfc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V2_
+#define _DR_STE_V2_
+
+enum {
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
+};
+
+#endif /* _DR_STE_V2_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
new file mode 100644
index 000000000000..e468a9ae44e8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+#include "dr_ste_v2.h"
+
+static void dr_ste_v3_set_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_push_vlan(u8 *ste, u8 *d_action,
+ u32 vlan_hdr)
+{
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to vlan header in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, start_offset,
+ HDR_LEN_L2_MACS >> 1);
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, inline_data, vlan_hdr);
+ dr_ste_v1_set_reparse(ste);
+}
+
+static void dr_ste_v3_set_pop_vlan(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
+{
+ /* Remove L2 headers */
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+ /* Encapsulate with given reformat ID */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, u8 anchor,
+ u8 offset, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_offset, offset / 2);
+
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ attributes, DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset, int size)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, size / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_offset, offset / 2);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static int
+dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ uint16_t *used_hw_action_num)
+{
+ u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+ void *data_ptr = padded_data;
+ u16 used_actions = 0;
+ u32 inline_data_sz;
+ u32 i;
+
+ if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ return -EINVAL;
+
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v3, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++; /* Remove and NOP are a single double action */
+
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ void *addr_inline;
+
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to words (2 bytes) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, start_offset, 0);
+
+ /* Copy bytes one by one to avoid endianness problem */
+ addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v3,
+ hw_action, inline_data);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, start_offset, 0);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, remove_size, 1);
+ used_actions++;
+
+ *used_hw_action_num = used_actions;
+
+ return 0;
+}
+
+static struct mlx5dr_ste_ctx ste_ctx_v3 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v3_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+ /* Actions bit set */
+ .set_encap = &dr_ste_v3_set_encap,
+ .set_push_vlan = &dr_ste_v3_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v3_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v3_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v3_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v3_set_remove_hdr,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void)
+{
+ return &ste_ctx_v3;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 4b349d4005e4..8007d3f523c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -521,7 +521,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
- id = dst->dest_attr.counter_id;
+ id = mlx5_fc_id(dst->dest_attr.counter);
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
index fb078fa0f0cc..898c3618ff26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
@@ -600,4 +600,44 @@ struct mlx5_ifc_ste_double_action_aso_v1_bits {
};
};
+struct mlx5_ifc_ste_single_action_remove_header_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 end_anchor[0x7];
+ u8 reserved_at_16[0x1];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x4];
+ u8 decap[0x1];
+ u8 vni_to_cqe[0x1];
+ u8 qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 reserved_at_17[0x9];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 size[0x6];
+ u8 attributes[0x3];
+
+ u8 pointer[0x20];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 3ac7dc67509f..0bb3724c10c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
@@ -160,7 +160,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_7)));
+ MLX5_STEERING_FORMAT_CONNECTX_8)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 1bed75eca97d..740b719e7072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -382,6 +382,7 @@ err_alloc_bfreg:
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
{
+ struct mutex *wc_state_lock = &mdev->wc_state_lock;
struct mlx5_core_dev *parent = NULL;
if (!MLX5_CAP_GEN(mdev, bf)) {
@@ -400,32 +401,31 @@ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
*/
goto out;
- mutex_lock(&mdev->wc_state_lock);
-
- if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
- goto unlock;
-
#ifdef CONFIG_MLX5_SF
- if (mlx5_core_is_sf(mdev))
+ if (mlx5_core_is_sf(mdev)) {
parent = mdev->priv.parent_mdev;
+ wc_state_lock = &parent->wc_state_lock;
+ }
#endif
- if (parent) {
- mutex_lock(&parent->wc_state_lock);
+ mutex_lock(wc_state_lock);
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+ if (parent) {
mlx5_core_test_wc(parent);
mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
parent->wc_state);
mdev->wc_state = parent->wc_state;
- mutex_unlock(&parent->wc_state_lock);
+ } else {
+ mlx5_core_test_wc(mdev);
}
- mlx5_core_test_wc(mdev);
-
unlock:
- mutex_unlock(&mdev->wc_state_lock);
+ mutex_unlock(wc_state_lock);
out:
mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 46245e0b2462..43c84900369a 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -14,7 +14,6 @@
#define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
#define MLXFW_FSM_STATE_WAIT_ROUNDS \
(MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
-#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
static const int mlxfw_fsm_state_errno[] = {
[MLXFW_FSM_STATE_ERR_ERROR] = -EIO,
@@ -229,7 +228,6 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
return err;
}
- comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
if (comp->data_size > comp_max_size) {
MLXFW_ERR_MSG(mlxfw_dev, extack,
"Component size is bigger than limit", -EINVAL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 4a79c0d7e7ad..2bb2b77351bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -35,6 +35,7 @@
#include "reg.h"
#include "resources.h"
#include "../mlxfw/mlxfw.h"
+#include "txheader.h"
static LIST_HEAD(mlxsw_core_driver_list);
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
@@ -677,7 +678,7 @@ struct mlxsw_reg_trans {
struct list_head bulk_list;
struct mlxsw_core *core;
struct sk_buff *tx_skb;
- struct mlxsw_tx_info tx_info;
+ struct mlxsw_txhdr_info txhdr_info;
struct delayed_work timeout_dw;
unsigned int retries;
u64 tid;
@@ -737,12 +738,11 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
if (!skb)
return -ENOMEM;
- trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
- skb->data + mlxsw_core->driver->txhdr_len,
- skb->len - mlxsw_core->driver->txhdr_len);
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, skb->data,
+ skb->len);
atomic_set(&trans->active, 1);
- err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->txhdr_info);
if (err) {
dev_kfree_skb(skb);
return err;
@@ -944,7 +944,7 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
- sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ sizeof(u32) + MLXSW_TXHDR_LEN);
if (mlxsw_core->emad.enable_string_tlv)
emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
if (mlxsw_core->emad.enable_latency_tlv)
@@ -984,8 +984,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
list_add_tail(&trans->bulk_list, bulk_list);
trans->core = mlxsw_core;
trans->tx_skb = skb;
- trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
- trans->tx_info.is_emad = true;
+ trans->txhdr_info.tx_info.local_port = MLXSW_PORT_CPU_PORT;
+ trans->txhdr_info.tx_info.is_emad = true;
INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
trans->tid = tid;
init_completion(&trans->completion);
@@ -995,7 +995,6 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->type = type;
mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid);
- mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
@@ -2330,10 +2329,10 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
- tx_info);
+ txhdr_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6d11225594dd..1a871397a6df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -72,7 +72,14 @@ struct mlxsw_tx_info {
bool is_emad;
};
+struct mlxsw_txhdr_info {
+ struct mlxsw_tx_info tx_info;
+ bool data;
+ u16 max_fid; /* Used for PTP packets which are sent as data. */
+};
+
struct mlxsw_rx_md_info {
+ struct napi_struct *napi;
u32 cookie_index;
u32 latency;
u32 tx_congestion;
@@ -94,7 +101,7 @@ struct mlxsw_rx_md_info {
bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
@@ -425,8 +432,6 @@ struct mlxsw_driver {
int (*trap_policer_counter_get)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer,
u64 *p_drops);
- void (*txhdr_construct)(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
@@ -439,7 +444,6 @@ struct mlxsw_driver {
void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
- u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool sdq_supports_cqe_v2;
};
@@ -486,7 +490,7 @@ struct mlxsw_bus {
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 1e150ce1c73a..f9f565c1036d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -516,7 +516,7 @@ static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index d6f37456fb31..5b44c931b660 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -21,6 +21,7 @@
#include "cmd.h"
#include "port.h"
#include "resources.h"
+#include "txheader.h"
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
@@ -737,6 +738,7 @@ static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
}
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct napi_struct *napi,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
@@ -807,6 +809,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
}
mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+ mlxsw_skb_cb(skb)->rx_md_info.napi = napi;
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -869,7 +872,7 @@ static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
continue;
}
- mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, napi, rdq,
wqe_counter, q->u.cq.v, cqe);
if (++work_done == budget)
@@ -2093,6 +2096,39 @@ static void mlxsw_pci_fini(void *bus_priv)
mlxsw_pci_free_irq_vectors(mlxsw_pci);
}
+static int mlxsw_pci_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_txhdr_info *txhdr_info)
+{
+ const struct mlxsw_tx_info tx_info = txhdr_info->tx_info;
+ char *txhdr;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN))
+ return -ENOMEM;
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+
+ if (unlikely(txhdr_info->data)) {
+ u16 fid = txhdr_info->max_fid + tx_info.local_port - 1;
+
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, fid);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ } else {
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info.local_port);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+ }
+
+ return 0;
+}
+
static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
@@ -2120,7 +2156,7 @@ static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct mlxsw_pci_queue *q;
@@ -2129,13 +2165,17 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
int i;
int err;
+ err = mlxsw_pci_txhdr_construct(skb, txhdr_info);
+ if (err)
+ return err;
+
if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
err = skb_linearize(skb);
if (err)
return err;
}
- q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, &txhdr_info->tx_info);
spin_lock_bh(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
if (!elem_info) {
@@ -2143,7 +2183,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
err = -EAGAIN;
goto unlock;
}
- mlxsw_skb_cb(skb)->tx_info = *tx_info;
+ mlxsw_skb_cb(skb)->tx_info = txhdr_info->tx_info;
elem_info->sdq.skb = skb;
wqe = elem_info->elem;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3f5e5d99251b..d714311fd884 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -107,74 +107,6 @@ static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
};
-/* tx_hdr_version
- * Tx header version.
- * Must be set to 1.
- */
-MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
-
-/* tx_hdr_ctl
- * Packet control type.
- * 0 - Ethernet control (e.g. EMADs, LACP)
- * 1 - Ethernet data
- */
-MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
-
-/* tx_hdr_proto
- * Packet protocol type. Must be set to 1 (Ethernet).
- */
-MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
-
-/* tx_hdr_rx_is_router
- * Packet is sent from the router. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
-
-/* tx_hdr_fid_valid
- * Indicates if the 'fid' field is valid and should be used for
- * forwarding lookup. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
-
-/* tx_hdr_swid
- * Switch partition ID. Must be set to 0.
- */
-MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
-
-/* tx_hdr_control_tclass
- * Indicates if the packet should use the control TClass and not one
- * of the data TClasses.
- */
-MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
-
-/* tx_hdr_etclass
- * Egress TClass to be used on the egress device on the egress port.
- */
-MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
-
-/* tx_hdr_port_mid
- * Destination local port for unicast packets.
- * Destination multicast ID for multicast packets.
- *
- * Control packets are directed to a specific egress port, while data
- * packets are transmitted through the CPU port (0) into the switch partition,
- * where forwarding rules are applied.
- */
-MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
-
-/* tx_hdr_fid
- * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
- * set, otherwise calculated based on the packet's VID using VID to FID mapping.
- * Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
-
-/* tx_hdr_type
- * 0 - Data packets
- * 6 - Control packets
- */
-MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, bool clear,
u64 *packets, u64 *bytes)
@@ -233,61 +165,6 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
-
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_swid_set(txhdr, 0);
- mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
- mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
-}
-
-int
-mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr;
- u16 max_fid;
- int err;
-
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- err = -ENOMEM;
- goto err_skb_cow_head;
- }
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
- err = -EIO;
- goto err_res_valid;
- }
- max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
-
- txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
- mlxsw_tx_hdr_fid_valid_set(txhdr, true);
- mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
- return 0;
-
-err_res_valid:
-err_skb_cow_head:
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return err;
-}
-
static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
{
unsigned int type;
@@ -299,30 +176,49 @@ static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
return !!ptp_parse_header(skb, type);
}
-static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static void mlxsw_sp_txhdr_info_data_init(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ /* Resource validation was done as part of PTP init. */
+ u16 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr_info->data = true;
+ txhdr_info->max_fid = max_fid;
+}
- /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
- * need special handling and cannot be transmitted as regular control
- * packets.
+static struct sk_buff *
+mlxsw_sp_vlan_tag_push(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb)
+{
+ /* In some Spectrum ASICs, in order for PTP event packets to have their
+ * correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
*/
- if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
- return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
- mlxsw_sp_port, skb,
- tx_info);
+ if (skb_vlan_tagged(skb))
+ return skb;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
+ return vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+}
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
+static struct sk_buff *
+mlxsw_sp_txhdr_preparations(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
+{
+ if (likely(!mlxsw_sp_skb_requires_ts(skb)))
+ return skb;
+
+ if (!mlxsw_sp->ptp_ops->tx_as_data)
+ return skb;
+
+ /* Special handling for PTP events that require a time stamp and cannot
+ * be transmitted as regular control packets.
+ */
+ mlxsw_sp_txhdr_info_data_init(mlxsw_sp->core, skb, txhdr_info);
+ return mlxsw_sp_vlan_tag_push(mlxsw_sp, skb);
}
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
@@ -721,16 +617,16 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
- const struct mlxsw_tx_info tx_info = {
- .local_port = mlxsw_sp_port->local_port,
- .is_emad = false,
+ struct mlxsw_txhdr_info txhdr_info = {
+ .tx_info.local_port = mlxsw_sp_port->local_port,
+ .tx_info.is_emad = false,
};
u64 len;
int err;
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
- if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &txhdr_info.tx_info))
return NETDEV_TX_BUSY;
if (eth_skb_pad(skb)) {
@@ -738,10 +634,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
- &tx_info);
- if (err)
+ skb = mlxsw_sp_txhdr_preparations(mlxsw_sp, skb, &txhdr_info);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
return NETDEV_TX_OK;
+ }
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
@@ -751,7 +648,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &txhdr_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -2449,7 +2346,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u64_stats_update_end(&pcpu_stats->syncp);
skb->protocol = eth_type_trans(skb, skb->dev);
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
@@ -2792,7 +2689,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2811,7 +2707,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+ .tx_as_data = true,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
@@ -2830,7 +2726,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -3992,11 +3887,9 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.sdq_supports_cqe_v2 = false,
};
@@ -4030,10 +3923,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4067,10 +3958,8 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4102,10 +3991,8 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp4_config_profile,
.sdq_supports_cqe_v2 = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 8d3c61287696..b10f80fc651b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -243,10 +243,7 @@ struct mlxsw_sp_ptp_ops {
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
- int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ bool tx_as_data;
};
struct mlxsw_sp_fid_core_ops {
@@ -711,12 +708,6 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 2bed8c86b7cf..3f64cdbabfa3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ if (err)
+ return;
for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
if (!hw_stats[i].cells_bytes)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index f07955b5439f..6a4a81c63451 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -192,6 +192,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (sample_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirror action after sample action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
block, out_dev,
extack);
@@ -265,6 +270,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (mirror_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action after mirror action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
block,
act->sample.psample_group,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 69cd689dbc83..5afe6b155ef0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
&bytes);
- if (mr_route->mfc->mfc_un.res.pkt != packets)
- mr_route->mfc->mfc_un.res.lastuse = jiffies;
- mr_route->mfc->mfc_un.res.pkt = packets;
- mr_route->mfc->mfc_un.res.bytes = bytes;
+ if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets)
+ WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes);
}
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index d94081c7658e..ca8b9d18fbb9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -1353,6 +1353,10 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
+ /* Max FID will be used in data path, check validity as part of init. */
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, FID))
+ return ERR_PTR(-EIO);
+
ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
if (!ptp_state)
return ERR_PTR(-ENOMEM);
@@ -1679,43 +1683,3 @@ int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
-
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
-}
-
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
- * their correction field correctly set on the egress port they must be
- * transmitted as data packets. Such packets ingress the ASIC via the
- * CPU port and must have a VLAN tag, as the CPU port is not configured
- * with a PVID. Push the default VLAN (4095), which is configured as
- * egress untagged on all the ports.
- */
- if (!skb_vlan_tagged(skb)) {
- skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
- MLXSW_SP_DEFAULT_VID);
- if (!skb) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- return -ENOMEM;
- }
- }
-
- return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
- tx_info);
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index c8aa1452fbb9..102db9060135 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -49,11 +49,6 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-
struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
@@ -78,11 +73,6 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -157,15 +147,6 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-static inline int
-mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- return -EOPNOTSUPP;
-}
-
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
{
@@ -211,15 +192,6 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
{
return -EOPNOTSUPP;
}
-
-static inline int
-mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- return -EOPNOTSUPP;
-}
#endif
static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 899c954e0e5f..1f9c1c86839f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -173,7 +173,7 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
if (err)
return;
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index da51dd9d5e44..e78cba5821b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -4,6 +4,69 @@
#ifndef _MLXSW_TXHEADER_H
#define _MLXSW_TXHEADER_H
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
#define MLXSW_TXHDR_LEN 0x10
#define MLXSW_TXHDR_VERSION_0 0
#define MLXSW_TXHDR_VERSION_1 1
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index ea6214ca48e7..239b2258ec65 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -13,6 +13,7 @@ fbnic-y := fbnic_csr.o \
fbnic_ethtool.o \
fbnic_fw.o \
fbnic_hw_stats.o \
+ fbnic_hwmon.o \
fbnic_irq.o \
fbnic_mac.o \
fbnic_netdev.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 744eb0d95449..14751f16e125 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -16,10 +16,15 @@
#include "fbnic_mac.h"
#include "fbnic_rpc.h"
+struct fbnic_napi_vector;
+
+#define FBNIC_MAX_NAPI_VECTORS 128u
+
struct fbnic_dev {
struct device *dev;
struct net_device *netdev;
struct dentry *dbg_fbd;
+ struct device *hwmon;
u32 __iomem *uc_addr0;
u32 __iomem *uc_addr4;
@@ -28,10 +33,16 @@ struct fbnic_dev {
unsigned int pcs_msix_vector;
unsigned short num_irqs;
+ struct {
+ u8 users;
+ char name[IFNAMSIZ + 9];
+ } napi_irq[FBNIC_MAX_NAPI_VECTORS];
+
struct delayed_work service_task;
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
+ struct fbnic_fw_completion *cmpl_data;
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -140,9 +151,18 @@ void fbnic_devlink_unregister(struct fbnic_dev *fbd);
int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+void fbnic_hwmon_register(struct fbnic_dev *fbd);
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
+
int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd);
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr);
int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler,
unsigned long flags, const char *name, void *data);
void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index cc8ca94529ca..20cd9f5f89e2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -40,49 +40,99 @@ static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
#define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
#define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN
-static int
-fbnic_get_ts_info(struct net_device *netdev,
- struct kernel_ethtool_ts_info *tsinfo)
+static void
+fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
- tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+ fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
+}
- tsinfo->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+static int fbnic_get_regs_len(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
- tsinfo->tx_types =
- BIT(HWTSTAMP_TX_OFF) |
- BIT(HWTSTAMP_TX_ON);
+ return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
+}
- tsinfo->rx_filters =
- BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_ALL);
+static void fbnic_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
- return 0;
+ fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
}
-static void
-fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
{
- struct fbnic_net *fbn = netdev_priv(netdev);
- struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *clone;
- fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
- sizeof(drvinfo->fw_version));
+ clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
+ if (!clone)
+ return NULL;
+
+ memset(clone->tx, 0, sizeof(clone->tx));
+ memset(clone->rx, 0, sizeof(clone->rx));
+ memset(clone->napi, 0, sizeof(clone->napi));
+ return clone;
}
-static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
+ struct fbnic_net *clone)
{
- if (counter->reported)
- *stat = counter->value;
+ swap(clone->rcq_size, orig->rcq_size);
+ swap(clone->hpq_size, orig->hpq_size);
+ swap(clone->ppq_size, orig->ppq_size);
+ swap(clone->txq_size, orig->txq_size);
+ swap(clone->num_rx_queues, orig->num_rx_queues);
+ swap(clone->num_tx_queues, orig->num_tx_queues);
+ swap(clone->num_napi, orig->num_napi);
+}
+
+static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
+ }
+}
+
+static void fbnic_clone_swap(struct fbnic_net *orig,
+ struct fbnic_net *clone)
+{
+ struct fbnic_dev *fbd = orig->fbd;
+ unsigned int i;
+
+ for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
+ fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
+ for (i = 0; i < orig->num_napi; i++)
+ fbnic_aggregate_vector_counters(orig, orig->napi[i]);
+
+ fbnic_clone_swap_cfg(orig, clone);
+
+ for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
+ swap(clone->napi[i], orig->napi[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
+ swap(clone->tx[i], orig->tx[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
+ swap(clone->rx[i], orig->rx[i]);
+}
+
+static void fbnic_clone_free(struct fbnic_net *clone)
+{
+ kfree(clone);
}
static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
@@ -97,6 +147,21 @@ static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
}
}
+static void fbnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_stat *stat;
+ int i;
+
+ fbnic_get_hw_stats(fbn->fbd);
+
+ for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
+ stat = &fbnic_gstrings_hw_stats[i];
+ data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+ }
+}
+
static int fbnic_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
@@ -107,19 +172,375 @@ static int fbnic_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fbnic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static int fbnic_get_rss_hash_idx(u32 flow_type)
{
- struct fbnic_net *fbn = netdev_priv(dev);
- const struct fbnic_stat *stat;
- int i;
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ return FBNIC_TCP4_HASH_OPT;
+ case TCP_V6_FLOW:
+ return FBNIC_TCP6_HASH_OPT;
+ case UDP_V4_FLOW:
+ return FBNIC_UDP4_HASH_OPT;
+ case UDP_V6_FLOW:
+ return FBNIC_UDP6_HASH_OPT;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IPV4_USER_FLOW:
+ return FBNIC_IPV4_HASH_OPT;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ return FBNIC_IPV6_HASH_OPT;
+ case ETHER_FLOW:
+ return FBNIC_ETHER_HASH_OPT;
+ }
- fbnic_get_hw_stats(fbn->fbd);
+ return -1;
+}
- for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
- stat = &fbnic_gstrings_hw_stats[i];
- data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+static int
+fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Report options from rss_en table in fbn */
+ cmd->data = fbn->rss_flow_hash[hash_opt_idx];
+
+ return 0;
+}
+
+static int fbnic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = fbn->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = fbnic_get_rss_hash_opts(fbn, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+#define FBNIC_L2_HASH_OPTIONS \
+ (RXH_L2DA | RXH_DISCARD)
+#define FBNIC_L3_HASH_OPTIONS \
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
+#define FBNIC_L4_HASH_OPTIONS \
+ (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+static int
+fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
+{
+ int hash_opt_idx;
+
+ /* Verify the type requested is correct */
+ hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Verify the fields asked for can actually be assigned based on type */
+ if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
+ (hash_opt_idx > FBNIC_L4_HASH_OPT &&
+ cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
+ (hash_opt_idx > FBNIC_IP_HASH_OPT &&
+ cmd->data & ~FBNIC_L2_HASH_OPTIONS))
+ return -EINVAL;
+
+ fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ return 0;
+}
+
+static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = fbnic_set_rss_hash_opts(fbn, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_KEY_BYTE_LEN;
+}
+
+static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_TBL_SIZE;
+}
+
+static int
+fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
+ u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
+
+ rxfh->key[i] = rss_key >> 24;
+ }
+ }
+
+ if (rxfh->indir) {
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ rxfh->indir[i] = fbn->indir_tbl[0][i];
+ }
+
+ return 0;
+}
+
+static unsigned int
+fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
+{
+ unsigned int i, changes = 0;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (fbn->indir_tbl[idx][i] == indir[i])
+ continue;
+
+ fbn->indir_tbl[idx][i] = indir[i];
+ changes++;
+ }
+
+ return changes;
+}
+
+static int
+fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i, changes = 0;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EINVAL;
+
+ if (rxfh->key) {
+ u32 rss_key = 0;
+
+ for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
+ rss_key >>= 8;
+ rss_key |= (u32)(rxfh->key[i]) << 24;
+
+ if (i % 4)
+ continue;
+
+ if (fbn->rss_key[i / 4] == rss_key)
+ continue;
+
+ fbn->rss_key[i / 4] = rss_key;
+ changes++;
+ }
}
+
+ if (rxfh->indir)
+ changes += fbnic_set_indir(fbn, 0, rxfh->indir);
+
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static void fbnic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ ch->max_rx = fbd->max_num_queues;
+ ch->max_tx = fbd->max_num_queues;
+ ch->max_combined = min(ch->max_rx, ch->max_tx);
+ ch->max_other = FBNIC_NON_NAPI_VECTORS;
+
+ if (fbn->num_rx_queues > fbn->num_napi ||
+ fbn->num_tx_queues > fbn->num_napi)
+ ch->combined_count = min(fbn->num_rx_queues,
+ fbn->num_tx_queues);
+ else
+ ch->combined_count =
+ fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
+ ch->rx_count = fbn->num_rx_queues - ch->combined_count;
+ ch->tx_count = fbn->num_tx_queues - ch->combined_count;
+ ch->other_count = FBNIC_NON_NAPI_VECTORS;
+}
+
+static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
+ unsigned int max_napis)
+{
+ fbn->num_rx_queues = ch->rx_count + ch->combined_count;
+ fbn->num_tx_queues = ch->tx_count + ch->combined_count;
+ fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
+ max_napis);
+}
+
+static int fbnic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int max_napis, standalone;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *clone;
+ int err;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+ standalone = ch->rx_count + ch->tx_count;
+
+ /* Limits for standalone queues:
+ * - each queue has it's own NAPI (num_napi >= rx + tx + combined)
+ * - combining queues (combined not 0, rx or tx must be 0)
+ */
+ if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
+ (standalone && standalone + ch->combined_count > max_napis) ||
+ ch->rx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->tx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->other_count != FBNIC_NON_NAPI_VECTORS)
+ return -EINVAL;
+
+ if (!netif_running(netdev)) {
+ fbnic_set_queues(fbn, ch, max_napis);
+ fbnic_reset_indir_tbl(fbn);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_queues(clone, ch, max_napis);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ /* Reset RSS indirection table */
+ fbnic_reset_indir_tbl(fbn);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
+static int
+fbnic_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *tsinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+
+ tsinfo->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ tsinfo->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ tsinfo->rx_filters =
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static void fbnic_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u64 ts_packets, ts_lost;
+ struct fbnic_ring *ring;
+ unsigned int start;
+ int i;
+
+ ts_stats->pkts = fbn->tx_stats.ts_packets;
+ ts_stats->lost = fbn->tx_stats.ts_lost;
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ ring = fbn->tx[i];
+ do {
+ start = u64_stats_fetch_begin(&ring->stats.syncp);
+ ts_packets = ring->stats.ts_packets;
+ ts_lost = ring->stats.ts_lost;
+ } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
+ ts_stats->pkts += ts_packets;
+ ts_stats->lost += ts_lost;
+ }
+}
+
+static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+{
+ if (counter->reported)
+ *stat = counter->value;
}
static void
@@ -164,44 +585,6 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
&mac_stats->eth_mac.FrameTooLongErrors);
}
-static void fbnic_get_ts_stats(struct net_device *netdev,
- struct ethtool_ts_stats *ts_stats)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
- u64 ts_packets, ts_lost;
- struct fbnic_ring *ring;
- unsigned int start;
- int i;
-
- ts_stats->pkts = fbn->tx_stats.ts_packets;
- ts_stats->lost = fbn->tx_stats.ts_lost;
- for (i = 0; i < fbn->num_tx_queues; i++) {
- ring = fbn->tx[i];
- do {
- start = u64_stats_fetch_begin(&ring->stats.syncp);
- ts_packets = ring->stats.ts_packets;
- ts_lost = ring->stats.ts_lost;
- } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
- ts_stats->pkts += ts_packets;
- ts_stats->lost += ts_lost;
- }
-}
-
-static void fbnic_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *data)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
-
- fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
-}
-
-static int fbnic_get_regs_len(struct net_device *netdev)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
-
- return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
-}
-
static const struct ethtool_ops fbnic_ethtool_ops = {
.get_drvinfo = fbnic_get_drvinfo,
.get_regs_len = fbnic_get_regs_len,
@@ -209,6 +592,14 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_strings = fbnic_get_strings,
.get_ethtool_stats = fbnic_get_ethtool_stats,
.get_sset_count = fbnic_get_sset_count,
+ .get_rxnfc = fbnic_get_rxnfc,
+ .set_rxnfc = fbnic_set_rxnfc,
+ .get_rxfh_key_size = fbnic_get_rxfh_key_size,
+ .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
+ .get_rxfh = fbnic_get_rxfh,
+ .set_rxfh = fbnic_set_rxfh,
+ .get_channels = fbnic_get_channels,
+ .set_channels = fbnic_set_channels,
.get_ts_info = fbnic_get_ts_info,
.get_ts_stats = fbnic_get_ts_stats,
.get_eth_mac_stats = fbnic_get_eth_mac_stats,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 8f7a2a19ddf8..bbc7c1c0c37e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -228,6 +228,63 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
tx_mbx->head = head;
}
+static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+
+ /* If we are already waiting on a completion then abort */
+ if (cmpl_data && fbd->cmpl_data) {
+ err = -EBUSY;
+ goto unlock_mbx;
+ }
+
+ /* Record completion location and submit request */
+ if (cmpl_data)
+ fbd->cmpl_data = cmpl_data;
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ /* If msg failed then clear completion data for next caller */
+ if (err && cmpl_data)
+ fbd->cmpl_data = NULL;
+
+unlock_mbx:
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static void fbnic_fw_release_cmpl_data(struct kref *kref)
+{
+ struct fbnic_fw_completion *cmpl_data;
+
+ cmpl_data = container_of(kref, struct fbnic_fw_completion,
+ ref_count);
+ kfree(cmpl_data);
+}
+
+static struct fbnic_fw_completion *
+fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_fw_completion *cmpl_data = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) {
+ cmpl_data = fbd->cmpl_data;
+ kref_get(&fbd->cmpl_data->ref_count);
+ }
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return cmpl_data;
+}
+
/**
* fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
* @fbd: FBNIC device structure
@@ -651,6 +708,84 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
dev_warn(fbd->dev, "Failed to send heartbeat message\n");
}
+/**
+ * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion data structure to store sensor response
+ *
+ * Asks the firmware to provide an update with the latest sensor data.
+ * The response will contain temperature and voltage readings.
+ *
+ * Return: 0 on success, negative error value on failure
+ */
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_TSENE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_tsene_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ int err = 0;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+ if (!cmpl_data)
+ return -EINVAL;
+
+ if (results[FBNIC_TSENE_ERROR]) {
+ err = fbnic_tlv_attr_get_unsigned(results[FBNIC_TSENE_ERROR]);
+ if (err)
+ goto exit_complete;
+ }
+
+ if (!results[FBNIC_TSENE_THERM] || !results[FBNIC_TSENE_VOLT]) {
+ err = -EINVAL;
+ goto exit_complete;
+ }
+
+ cmpl_data->u.tsene.millidegrees =
+ fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_THERM]);
+ cmpl_data->u.tsene.millivolts =
+ fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_VOLT]);
+
+exit_complete:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
fbnic_fw_parse_cap_resp),
@@ -658,6 +793,9 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
fbnic_fw_parse_ownership_resp),
FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(TSENE_READ_RESP,
+ fbnic_tsene_read_resp_index,
+ fbnic_fw_parse_tsene_read_resp),
FBNIC_TLV_MSG_ERROR
};
@@ -802,3 +940,25 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
fw_version, str_sz);
}
+
+void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl,
+ u32 msg_type)
+{
+ fw_cmpl->msg_type = msg_type;
+ init_completion(&fw_cmpl->done);
+ kref_init(&fw_cmpl->ref_count);
+}
+
+void fbnic_fw_clear_compl(struct fbnic_dev *fbd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ fbd->cmpl_data = NULL;
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+}
+
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
+{
+ kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 221faf8c6756..fe68333d51b1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -44,6 +44,19 @@ struct fbnic_fw_cap {
u8 link_fec;
};
+struct fbnic_fw_completion {
+ u32 msg_type;
+ struct completion done;
+ struct kref ref_count;
+ int result;
+ union {
+ struct {
+ s32 millivolts;
+ s32 millidegrees;
+ } tsene;
+ } u;
+};
+
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
@@ -52,6 +65,12 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+void fbnic_fw_init_cmpl(struct fbnic_fw_completion *cmpl_data,
+ u32 msg_type);
+void fbnic_fw_clear_compl(struct fbnic_dev *fbd);
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
do { \
@@ -76,6 +95,8 @@ enum {
FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
};
#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
@@ -118,6 +139,13 @@ enum {
};
enum {
+ FBNIC_TSENE_THERM = 0x0,
+ FBNIC_TSENE_VOLT = 0x1,
+ FBNIC_TSENE_ERROR = 0x2,
+ FBNIC_TSENE_MSG_MAX
+};
+
+enum {
FBNIC_FW_OWNERSHIP_FLAG = 0x0,
FBNIC_FW_OWNERSHIP_MSG_MAX
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
new file mode 100644
index 000000000000..def8598aceec
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/hwmon.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+
+static int fbnic_hwmon_sensor_id(enum hwmon_sensor_types type)
+{
+ if (type == hwmon_temp)
+ return FBNIC_SENSOR_TEMP;
+ if (type == hwmon_in)
+ return FBNIC_SENSOR_VOLTAGE;
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t fbnic_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_temp && attr == hwmon_temp_input)
+ return 0444;
+ if (type == hwmon_in && attr == hwmon_in_input)
+ return 0444;
+
+ return 0;
+}
+
+static int fbnic_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ const struct fbnic_mac *mac = fbd->mac;
+ int id;
+
+ id = fbnic_hwmon_sensor_id(type);
+ return id < 0 ? id : mac->get_sensor(fbd, id, val);
+}
+
+static const struct hwmon_ops fbnic_hwmon_ops = {
+ .is_visible = fbnic_hwmon_is_visible,
+ .read = fbnic_hwmon_read,
+};
+
+static const struct hwmon_channel_info *fbnic_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info fbnic_chip_info = {
+ .ops = &fbnic_hwmon_ops,
+ .info = fbnic_hwmon_info,
+};
+
+void fbnic_hwmon_register(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return;
+
+ fbd->hwmon = hwmon_device_register_with_info(fbd->dev, "fbnic",
+ fbd, &fbnic_chip_info,
+ NULL);
+ if (IS_ERR(fbd->hwmon)) {
+ dev_notice(fbd->dev,
+ "Failed to register hwmon device %pe\n",
+ fbd->hwmon);
+ fbd->hwmon = NULL;
+ }
+}
+
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON) || !fbd->hwmon)
+ return;
+
+ hwmon_device_unregister(fbd->hwmon);
+ fbd->hwmon = NULL;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
index 914362195920..1bbc0e56f3a0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -146,6 +146,17 @@ void fbnic_pcs_irq_disable(struct fbnic_dev *fbd)
free_irq(fbd->pcs_msix_vector, fbd);
}
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ synchronize_irq(irq);
+}
+
int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler,
unsigned long flags, const char *name, void *data)
{
@@ -169,6 +180,48 @@ void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data)
free_irq(irq, data);
}
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fbd->napi_irq); i++)
+ snprintf(fbd->napi_irq[i].name,
+ sizeof(fbd->napi_irq[i].name),
+ "%s-TxRx-%u", fbd->netdev->name, i);
+}
+
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+ int err;
+
+ if (!fbd->napi_irq[i].users) {
+ err = fbnic_request_irq(fbd, nv->v_idx,
+ fbnic_msix_clean_rings, 0,
+ fbd->napi_irq[i].name,
+ &fbn->napi[i]);
+ if (err)
+ return err;
+ }
+
+ fbd->napi_irq[i].users++;
+ return 0;
+}
+
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+
+ if (--fbd->napi_irq[i].users)
+ return;
+
+ fbnic_free_irq(fbd, nv->v_idx, &fbn->napi[i]);
+}
+
void fbnic_free_irqs(struct fbnic_dev *fbd)
{
struct pci_dev *pdev = to_pci_dev(fbd->dev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 7b654d0a6dac..14291401f463 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -686,6 +686,77 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
+static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
+ long *val)
+{
+ struct fbnic_fw_completion *fw_cmpl;
+ int err = 0, retries = 5;
+ s32 *sensor;
+
+ fw_cmpl = kzalloc(sizeof(*fw_cmpl), GFP_KERNEL);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Initialize completion and queue it for FW to process */
+ fbnic_fw_init_cmpl(fw_cmpl, FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+
+ switch (id) {
+ case FBNIC_SENSOR_TEMP:
+ sensor = &fw_cmpl->u.tsene.millidegrees;
+ break;
+ case FBNIC_SENSOR_VOLTAGE:
+ sensor = &fw_cmpl->u.tsene.millivolts;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit_free;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, fw_cmpl);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_free;
+ }
+
+ /* Allow 2 seconds for reply, resend and try up to 5 times */
+ while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ retries--;
+
+ if (retries == 0) {
+ dev_err(fbd->dev,
+ "Timed out waiting for TSENE read\n");
+ err = -ETIMEDOUT;
+ goto exit_cleanup;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_cleanup;
+ }
+ }
+
+ /* Handle error returned by firmware */
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ dev_err(fbd->dev, "%s: Firmware returned error %d\n",
+ __func__, err);
+ goto exit_cleanup;
+ }
+
+ *val = *sensor;
+exit_cleanup:
+ fbnic_fw_clear_compl(fbd);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err;
+}
+
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
.pcs_enable = fbnic_pcs_enable_asic,
@@ -695,6 +766,7 @@ static const struct fbnic_mac fbnic_mac_asic = {
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
+ .get_sensor = fbnic_mac_get_sensor_asic,
};
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 476239a9d381..05a591653e09 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -47,6 +47,11 @@ enum {
#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+enum fbnic_sensor_id {
+ FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
+ FBNIC_SENSOR_VOLTAGE, /* Voltage in millivolts */
+};
+
/* This structure defines the interface hooks for the MAC. The MAC hooks
* will be configured as a const struct provided with a set of function
* pointers.
@@ -83,6 +88,8 @@ struct fbnic_mac {
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+
+ int (*get_sensor)(struct fbnic_dev *fbd, int id, long *val);
};
int fbnic_mac_init(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index fc7d80db5fa6..7a96b6ee773f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -23,13 +23,7 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto free_napi_vectors;
- err = netif_set_real_num_tx_queues(fbn->netdev,
- fbn->num_tx_queues);
- if (err)
- goto free_resources;
-
- err = netif_set_real_num_rx_queues(fbn->netdev,
- fbn->num_rx_queues);
+ err = fbnic_set_netif_queues(fbn);
if (err)
goto free_resources;
@@ -74,6 +68,8 @@ static int fbnic_open(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
int err;
+ fbnic_napi_name_irqs(fbn->fbd);
+
err = __fbnic_open(fbn);
if (!err)
fbnic_up(fbn);
@@ -91,6 +87,7 @@ static int fbnic_stop(struct net_device *netdev)
fbnic_time_stop(fbn);
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+ fbnic_reset_netif_queues(fbn);
fbnic_free_resources(fbn);
fbnic_free_napi_vectors(fbn);
@@ -615,7 +612,6 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->netdev = netdev;
fbn->fbd = fbd;
- INIT_LIST_HEAD(&fbn->napis);
fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index b8417b300778..a392ac1cc4f2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -11,10 +11,14 @@
#include "fbnic_rpc.h"
#include "fbnic_txrx.h"
+#define FBNIC_MAX_NAPI_VECTORS 128u
+
struct fbnic_net {
struct fbnic_ring *tx[FBNIC_MAX_TXQS];
struct fbnic_ring *rx[FBNIC_MAX_RXQS];
+ struct fbnic_napi_vector *napi[FBNIC_MAX_NAPI_VECTORS];
+
struct net_device *netdev;
struct fbnic_dev *fbd;
@@ -56,13 +60,12 @@ struct fbnic_net {
/* Time stampinn filter config */
struct kernel_hwtstamp_config hwtstamp_config;
-
- struct list_head napis;
};
int __fbnic_open(struct fbnic_net *fbn);
void fbnic_up(struct fbnic_net *fbn);
void fbnic_down(struct fbnic_net *fbn);
+void fbnic_down_noidle(struct fbnic_net *fbn);
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd);
void fbnic_netdev_free(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 7ccf192f13d5..6cbbc2ee3e1f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -145,7 +145,7 @@ void fbnic_up(struct fbnic_net *fbn)
fbnic_service_task_start(fbn);
}
-static void fbnic_down_noidle(struct fbnic_net *fbn)
+void fbnic_down_noidle(struct fbnic_net *fbn)
{
fbnic_service_task_stop(fbn);
@@ -296,6 +296,8 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Capture snapshot of hardware stats so netdev can calculate delta */
fbnic_reset_hw_stats(fbd);
+ fbnic_hwmon_register(fbd);
+
if (!fbd->dsn) {
dev_warn(&pdev->dev, "Reading serial number failed\n");
goto init_failure_mode;
@@ -358,6 +360,7 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_netdev_free(fbd);
}
+ fbnic_hwmon_unregister(fbd);
fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
fbnic_fw_disable_mbx(fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index 1a5e1e719b30..bb11fc83367d 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -15,7 +15,7 @@ fbnic_pcs_to_net(struct phylink_pcs *pcs)
}
static void
-fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs,
+fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index 908c098cd59e..c25bd300b902 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -13,10 +13,11 @@ void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
unsigned int num_rx = fbn->num_rx_queues;
unsigned int i;
- for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (netif_is_rxfh_configured(fbn->netdev))
+ return;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
- fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx);
- }
}
void fbnic_rss_key_fill(u32 *buffer)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index b5050fabe8fe..d4d7027df9a0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -1033,20 +1033,20 @@ static int fbnic_poll(struct napi_struct *napi, int budget)
if (likely(napi_complete_done(napi, work_done)))
fbnic_nv_irq_rearm(nv);
- return 0;
+ return work_done;
}
-static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
+irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
{
- struct fbnic_napi_vector *nv = data;
+ struct fbnic_napi_vector *nv = *(void **)data;
napi_schedule_irqoff(&nv->napi);
return IRQ_HANDLED;
}
-static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *rxr)
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
{
struct fbnic_queue_stats *stats = &rxr->stats;
@@ -1056,8 +1056,8 @@ static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.dropped += stats->dropped;
}
-static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *txr)
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
{
struct fbnic_queue_stats *stats = &txr->stats;
@@ -1099,7 +1099,6 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
struct fbnic_dev *fbd = nv->fbd;
- u32 v_idx = nv->v_idx;
int i, j;
for (i = 0; i < nv->txt_count; i++) {
@@ -1113,31 +1112,20 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
}
- fbnic_free_irq(fbd, v_idx, nv);
+ fbnic_napi_free_irq(fbd, nv);
page_pool_destroy(nv->page_pool);
netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
}
void fbnic_free_napi_vectors(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv, *temp;
-
- list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
- fbnic_free_napi_vector(fbn, nv);
-}
-
-static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
-{
- unsigned char *dev_name = nv->napi.dev->name;
+ int i;
- if (!nv->rxt_count)
- snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
- else
- snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+ for (i = 0; i < fbn->num_napi; i++)
+ if (fbn->napi[i])
+ fbnic_free_napi_vector(fbn, fbn->napi[i]);
}
#define FBNIC_PAGE_POOL_FLAGS \
@@ -1222,7 +1210,7 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
nv->v_idx = v_idx;
/* Tie napi to netdev */
- list_add(&nv->napis, &fbn->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = nv;
netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
/* Record IRQ to NAPI struct */
@@ -1239,12 +1227,8 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
goto napi_del;
}
- /* Initialize vector name */
- fbnic_name_napi_vector(nv);
-
/* Request the IRQ for napi vector */
- err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
- IRQF_SHARED, nv->name, nv);
+ err = fbnic_napi_request_irq(fbd, nv);
if (err)
goto pp_destroy;
@@ -1307,7 +1291,7 @@ pp_destroy:
page_pool_destroy(nv->page_pool);
napi_del:
netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
return err;
}
@@ -1612,19 +1596,18 @@ free_resources:
void fbnic_free_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
}
int fbnic_alloc_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
- int err = -ENODEV;
+ int i, err = -ENODEV;
- list_for_each_entry(nv, &fbn->napis, napis) {
- err = fbnic_alloc_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++) {
+ err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]);
if (err)
goto free_resources;
}
@@ -1632,12 +1615,77 @@ int fbnic_alloc_resources(struct fbnic_net *fbn)
return 0;
free_resources:
- list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ while (i--)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
return err;
}
+static void fbnic_set_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Associate Tx queue with NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, &nv->napi);
+ }
+
+ /* Associate Rx queue with NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ }
+}
+
+static void fbnic_reset_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Disassociate Tx queue from NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ /* Disassociate Rx queue from NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+}
+
+int fbnic_set_netif_queues(struct fbnic_net *fbn)
+{
+ int i, err;
+
+ err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues,
+ fbn->num_rx_queues);
+ if (err)
+ return err;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_set_netif_napi(fbn->napi[i]);
+
+ return 0;
+}
+
+void fbnic_reset_netif_queues(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_reset_netif_napi(fbn->napi[i]);
+}
+
static void fbnic_disable_twq0(struct fbnic_ring *txr)
{
u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
@@ -1670,33 +1718,34 @@ static void fbnic_disable_rcq(struct fbnic_ring *rxr)
void fbnic_napi_disable(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- napi_disable(&nv->napi);
+ for (i = 0; i < fbn->num_napi; i++) {
+ napi_disable(&fbn->napi[i]->napi);
- fbnic_nv_irq_disable(nv);
+ fbnic_nv_irq_disable(fbn->napi[i]);
}
}
void fbnic_disable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Disable Tx queue triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_disable_twq0(&qt->sub0);
fbnic_disable_tcq(&qt->cmpl);
}
/* Disable Rx queue triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_disable_bdq(&qt->sub0, &qt->sub1);
fbnic_disable_rcq(&qt->cmpl);
@@ -1792,14 +1841,15 @@ int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
void fbnic_flush(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
/* Flush any processed Tx Queue Triads and drop the rest */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
struct netdev_queue *tx_queue;
/* Clean the work queues of unprocessed work */
@@ -1816,15 +1866,11 @@ void fbnic_flush(struct fbnic_net *fbn)
tx_queue = netdev_get_tx_queue(nv->napi.dev,
qt->sub0.q_idx);
netdev_tx_reset_queue(tx_queue);
-
- /* Disassociate Tx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, NULL);
}
/* Flush any processed Rx Queue Triads and drop the rest */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
/* Clean the work queues of unprocessed work */
fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
@@ -1835,43 +1881,23 @@ void fbnic_flush(struct fbnic_net *fbn)
fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
qt->cmpl.pkt->buff.data_hard_start = NULL;
-
- /* Disassociate Rx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, NULL);
}
}
}
void fbnic_fill(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
-
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
-
- /* Configure NAPI mapping for Tx */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
-
- /* Nothing to do if Tx queue is disabled */
- if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
- continue;
+ int i;
- /* Associate Tx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, &nv->napi);
- }
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
/* Configure NAPI mapping and populate pages
* in the BDQ rings to use for Rx
*/
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
-
- /* Associate Rx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
/* Populate the header and payload BDQs */
fbnic_fill_bdq(nv, &qt->sub0);
@@ -2025,21 +2051,23 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
void fbnic_enable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Setup Tx Queue Triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_enable_twq0(&qt->sub0);
fbnic_enable_tcq(nv, &qt->cmpl);
}
/* Setup Rx Queue Triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_enable_bdq(&qt->sub0, &qt->sub1);
fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
@@ -2064,10 +2092,11 @@ void fbnic_napi_enable(struct fbnic_net *fbn)
{
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
napi_enable(&nv->napi);
fbnic_nv_irq_enable(nv);
@@ -2096,17 +2125,18 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Find RQs which are completely out of pages */
- for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
+ for (t = nv->txt_count, j = 0; j < nv->rxt_count; j++, t++) {
/* Assume 4 pages is always enough to fit a packet
* and therefore generate a completion and an IRQ.
*/
- if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
- fbnic_desc_used(&nv->qt[i].sub1) < 4)
+ if (fbnic_desc_used(&nv->qt[t].sub0) < 4 ||
+ fbnic_desc_used(&nv->qt[t].sub1) < 4)
irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
}
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 8d626287c3f4..c2a94f31f71b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -104,14 +104,11 @@ struct fbnic_napi_vector {
struct device *dev; /* Device for DMA unmapping */
struct page_pool *page_pool;
struct fbnic_dev *fbd;
- char name[IFNAMSIZ + 9];
u16 v_idx;
u8 txt_count;
u8 rxt_count;
- struct list_head napis;
-
struct fbnic_q_triad qt[];
};
@@ -123,10 +120,18 @@ netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features);
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr);
+
int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
void fbnic_free_napi_vectors(struct fbnic_net *fbn);
int fbnic_alloc_resources(struct fbnic_net *fbn);
void fbnic_free_resources(struct fbnic_net *fbn);
+int fbnic_set_netif_queues(struct fbnic_net *fbn);
+void fbnic_reset_netif_queues(struct fbnic_net *fbn);
+irqreturn_t fbnic_msix_clean_rings(int irq, void *data);
void fbnic_napi_enable(struct fbnic_net *fbn);
void fbnic_napi_disable(struct fbnic_net *fbn);
void fbnic_enable(struct fbnic_net *fbn);
@@ -137,4 +142,9 @@ void fbnic_fill(struct fbnic_net *fbn);
void fbnic_napi_depletion_check(struct net_device *netdev);
int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
+static inline int fbnic_napi_idx(const struct fbnic_napi_vector *nv)
+{
+ return nv->v_idx - FBNIC_NON_NAPI_VECTORS;
+}
+
#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 1a1cbd034eda..1459acfb1e61 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1055,9 +1055,6 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- eee->tx_lpi_timer = lan743x_csr_read(adapter,
- MAC_EEE_TX_LPI_REQ_DLY_CNT);
-
return phylink_ethtool_get_eee(adapter->phylink, eee);
}
@@ -1065,24 +1062,6 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- u32 tx_lpi_timer;
-
- tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
- if (tx_lpi_timer != eee->tx_lpi_timer) {
- u32 mac_cr = lan743x_csr_read(adapter, MAC_CR);
-
- /* Software should only change this field when Energy Efficient
- * Ethernet Enable (EEEEN) is cleared.
- * This function will trigger an autonegotiation restart and
- * eee will be reenabled during link up if eee was negotiated.
- */
- lan743x_mac_eee_enable(adapter, false);
- lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT,
- eee->tx_lpi_timer);
-
- if (mac_cr & MAC_CR_EEE_EN_)
- lan743x_mac_eee_enable(adapter, true);
- }
return phylink_ethtool_set_eee(adapter->phylink, eee);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 4dc5adcda6a3..23760b613d3e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2966,7 +2966,7 @@ static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
return lan743x_pcs_power_reset(adapter);
}
-void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
+static void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
{
u32 mac_cr;
@@ -3027,10 +3027,8 @@ static void lan743x_phylink_mac_link_down(struct phylink_config *config,
phy_interface_t interface)
{
struct net_device *netdev = to_net_dev(config->dev);
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- netif_tx_stop_all_queues(to_net_dev(config->dev));
- lan743x_mac_eee_enable(adapter, false);
+ netif_tx_stop_all_queues(netdev);
}
static void lan743x_phylink_mac_link_up(struct phylink_config *config,
@@ -3072,16 +3070,40 @@ static void lan743x_phylink_mac_link_up(struct phylink_config *config,
cap & FLOW_CTRL_TX,
cap & FLOW_CTRL_RX);
- if (phydev)
- lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi);
-
netif_tx_wake_all_queues(netdev);
}
+static void lan743x_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ lan743x_mac_eee_enable(adapter, false);
+}
+
+static int lan743x_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
+ * EEEEN during probe, and phylink itself guarantees that
+ * mac_disable_tx_lpi() will have been previously called.
+ */
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, timer);
+ lan743x_mac_eee_enable(adapter, true);
+
+ return 0;
+}
+
static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
.mac_config = lan743x_phylink_mac_config,
.mac_link_down = lan743x_phylink_mac_link_down,
.mac_link_up = lan743x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = lan743x_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = lan743x_mac_enable_tx_lpi,
};
static int lan743x_phylink_create(struct lan743x_adapter *adapter)
@@ -3095,6 +3117,9 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter)
adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+ adapter->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ adapter->phylink_config.lpi_timer_default =
+ lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
lan743x_phy_interface_select(adapter);
@@ -3120,6 +3145,10 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter)
phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
}
+ memcpy(adapter->phylink_config.lpi_interfaces,
+ adapter->phylink_config.supported_interfaces,
+ sizeof(adapter->phylink_config.lpi_interfaces));
+
pl = phylink_create(&adapter->phylink_config, NULL,
adapter->phy_interface, &lan743x_phylink_mac_ops);
@@ -3517,6 +3546,9 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&tx->ring_lock);
}
+ /* Ensure EEEEN is clear */
+ lan743x_mac_eee_enable(adapter, false);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 8ef897c114d3..7f73d66854be 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -1206,6 +1206,5 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
bool tx_enable, bool rx_enable);
int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
-void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 25cb2f61986f..1efa584e7107 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -443,7 +443,7 @@ int lan966x_stats_init(struct lan966x *lan966x);
void lan966x_port_config_down(struct lan966x_port *port);
void lan966x_port_config_up(struct lan966x_port *port);
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state);
int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x_port_config *config);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index 1d63903f9006..75188b99e4e7 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -88,11 +88,12 @@ static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
}
static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x_port *port = lan966x_pcs_to_port(pcs);
- lan966x_port_status_get(port, state);
+ lan966x_port_status_get(port, neg_mode, state);
}
static int lan966x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index fdfa4040d9ee..cf7de0267c32 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -284,7 +284,7 @@ void lan966x_port_config_up(struct lan966x_port *port)
lan966x_port_link_up(port);
}
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x *lan966x = port->lan966x;
@@ -314,7 +314,7 @@ void lan966x_port_status_get(struct lan966x_port *port,
bmsr |= BMSR_ANEGCOMPLETE;
lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
- phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lp_adv);
} else {
if (!state->link)
return;
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index 35b057c9d0cb..35e1c0cf345e 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -28,5 +28,6 @@ config SPARX5_DCB
config LAN969X_SWITCH
bool "Lan969x switch driver"
depends on SPARX5_SWITCH
+ select PAGE_POOL
help
This driver supports the lan969x family of network switch devices.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 4bf2a885a9da..d447f9e84d92 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -20,7 +20,9 @@ sparx5-switch-$(CONFIG_LAN969X_SWITCH) += lan969x/lan969x_regs.o \
lan969x/lan969x.o \
lan969x/lan969x_calendar.o \
lan969x/lan969x_vcap_ag_api.o \
- lan969x/lan969x_vcap_impl.o
+ lan969x/lan969x_vcap_impl.o \
+ lan969x/lan969x_rgmii.o \
+ lan969x/lan969x_fdma.o
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
index c2afa2176b08..f3a9c71bea36 100644
--- a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
@@ -90,9 +90,12 @@ static const struct sparx5_main_io_resource lan969x_main_iomap[] = {
{ TARGET_DEV2G5 + 27, 0x30d8000, 1 }, /* 0xe30d8000 */
{ TARGET_DEV10G + 9, 0x30dc000, 1 }, /* 0xe30dc000 */
{ TARGET_PCS10G_BR + 9, 0x30e0000, 1 }, /* 0xe30e0000 */
+ { TARGET_DEVRGMII, 0x30e4000, 1 }, /* 0xe30e4000 */
+ { TARGET_DEVRGMII + 1, 0x30e8000, 1 }, /* 0xe30e8000 */
{ TARGET_DSM, 0x30ec000, 1 }, /* 0xe30ec000 */
{ TARGET_PORT_CONF, 0x30f0000, 1 }, /* 0xe30f0000 */
{ TARGET_ASM, 0x3200000, 1 }, /* 0xe3200000 */
+ { TARGET_HSIO_WRAP, 0x3408000, 1 }, /* 0xe3408000 */
};
static struct sparx5_sdlb_group lan969x_sdlb_groups[LAN969X_SDLB_GRP_CNT] = {
@@ -329,6 +332,7 @@ static const struct sparx5_ops lan969x_ops = {
.is_port_5g = &lan969x_port_is_5g,
.is_port_10g = &lan969x_port_is_10g,
.is_port_25g = &lan969x_port_is_25g,
+ .is_port_rgmii = &lan969x_port_is_rgmii,
.get_port_dev_index = &lan969x_port_dev_mapping,
.get_port_dev_bit = &lan969x_get_dev_mode_bit,
.get_hsch_max_group_rate = &lan969x_get_hsch_max_group_rate,
@@ -336,6 +340,11 @@ static const struct sparx5_ops lan969x_ops = {
.set_port_mux = &lan969x_port_mux_set,
.ptp_irq_handler = &lan969x_ptp_irq_handler,
.dsm_calendar_calc = &lan969x_dsm_calendar_calc,
+ .port_config_rgmii = &lan969x_port_config_rgmii,
+ .fdma_init = &lan969x_fdma_init,
+ .fdma_deinit = &lan969x_fdma_deinit,
+ .fdma_poll = &lan969x_fdma_napi_poll,
+ .fdma_xmit = &lan969x_fdma_xmit,
};
const struct sparx5_match_data lan969x_desc = {
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
index 2489d0d32dfd..529fde3d4deb 100644
--- a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
@@ -59,7 +59,24 @@ static inline bool lan969x_port_is_25g(int portno)
return false;
}
+static inline bool lan969x_port_is_rgmii(int portno)
+{
+ return portno == 28 || portno == 29;
+}
+
/* lan969x_calendar.c */
int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+
+/* lan969x_rgmii.c */
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+/* lan969x_fdma.c */
+int lan969x_fdma_init(struct sparx5 *sparx5);
+int lan969x_fdma_deinit(struct sparx5 *sparx5);
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight);
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
+
#endif
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
new file mode 100644
index 000000000000..1282f5c3ee6d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries.
+ */
+#include <net/page_pool/helpers.h>
+
+#include "../sparx5_main.h"
+#include "../sparx5_main_regs.h"
+#include "../sparx5_port.h"
+
+#include "fdma_api.h"
+#include "lan969x.h"
+
+#define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
+
+static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ *dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr;
+
+ return 0;
+}
+
+static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+
+ *dataptr = page_pool_get_dma_addr(page);
+
+ return 0;
+}
+
+static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx)
+{
+ struct fdma *fdma = &tx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i)
+ if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i]))
+ return i;
+
+ return -ENOSPC;
+}
+
+static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+ struct sparx5_tx_buf *db;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&sparx5->tx_lock, flags);
+
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ db = &sparx5->tx.dbs[i];
+
+ if (!db->used)
+ continue;
+
+ if (!fdma_db_is_done(fdma_db_get(fdma, i, 0)))
+ continue;
+
+ db->dev->stats.tx_bytes += db->skb->len;
+ db->dev->stats.tx_packets++;
+ sparx5->tx.packets++;
+
+ dma_unmap_single(sparx5->dev,
+ db->dma_addr,
+ db->skb->len,
+ DMA_TO_DEVICE);
+
+ if (!db->ptp)
+ napi_consume_skb(db->skb, weight);
+
+ db->used = false;
+ }
+
+ spin_unlock_irqrestore(&sparx5->tx_lock, flags);
+}
+
+static void lan969x_fdma_free_pages(struct sparx5_rx *rx)
+{
+ struct fdma *fdma = &rx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i) {
+ for (int j = 0; j < fdma->n_dbs; ++j)
+ page_pool_put_full_page(rx->page_pool,
+ rx->page[i][j], false);
+ }
+}
+
+static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5,
+ struct sparx5_rx *rx)
+{
+ const struct sparx5_consts *consts = sparx5->data->consts;
+ struct fdma *fdma = &rx->fdma;
+ struct sparx5_port *port;
+ struct frame_info fi;
+ struct sk_buff *skb;
+ struct fdma_db *db;
+ struct page *page;
+
+ db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
+
+ sparx5_ifh_parse(sparx5, page_address(page), &fi);
+ port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] :
+ NULL;
+ if (WARN_ON(!port))
+ goto free_page;
+
+ skb = build_skb(page_address(page), fdma->db_size);
+ if (unlikely(!skb))
+ goto free_page;
+
+ skb_mark_for_recycle(skb);
+ skb_put(skb, fdma_db_len_get(db));
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ skb->dev = port->ndev;
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ skb->offload_fwd_mark = 1;
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+free_page:
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ return NULL;
+}
+
+static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct fdma *fdma = &rx->fdma;
+ int err;
+
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fdma->n_dcbs * fdma->n_dbs,
+ .nid = NUMA_NO_NODE,
+ .dev = sparx5->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = fdma->db_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ };
+
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool))
+ return PTR_ERR(rx->page_pool);
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ return 0;
+}
+
+static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ int err;
+
+ tx->dbs = kcalloc(fdma->n_dcbs,
+ sizeof(struct sparx5_tx_buf),
+ GFP_KERNEL);
+ if (!tx->dbs)
+ return -ENOMEM;
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err) {
+ kfree(tx->dbs);
+ return err;
+ }
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_DONE);
+
+ return 0;
+}
+
+static void lan969x_fdma_rx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->rx.fdma;
+
+ fdma->channel_id = FDMA_XTR_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+
+ /* Fetch a netdev for SKB and NAPI use, any will do */
+ for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
+ struct sparx5_port *port = sparx5->ports[idx];
+
+ if (port && port->ndev) {
+ sparx5->rx.ndev = port->ndev;
+ break;
+ }
+ }
+}
+
+static void lan969x_fdma_tx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+
+ fdma->channel_id = FDMA_INJ_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+}
+
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
+ struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
+ struct sk_buff *skb;
+
+ dcb_reload = fdma->dcb_index;
+
+ lan969x_fdma_tx_clear_buf(sparx5, weight);
+
+ /* Process RX data */
+ while (counter < weight) {
+ if (!fdma_has_frames(fdma))
+ break;
+
+ skb = lan969x_fdma_rx_get_frame(sparx5, rx);
+ if (!skb)
+ break;
+
+ napi_gro_receive(&rx->napi, skb);
+
+ fdma_db_advance(fdma);
+ counter++;
+ /* Check if the DCB can be reused */
+ if (fdma_dcb_is_reusable(fdma))
+ continue;
+
+ fdma_db_reset(fdma);
+ fdma_dcb_advance(fdma);
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
+ dcb_reload++;
+ /* n_dcbs must be a power of 2 */
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma,
+ old_dcb,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ sparx5_fdma_reload(sparx5, fdma);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int next_dcb, needed_headroom, needed_tailroom, err;
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ struct sparx5_tx_buf *db_buf;
+ u64 status;
+
+ next_dcb = lan969x_fdma_get_next_dcb(tx);
+ if (next_dcb < 0)
+ return -EBUSY;
+
+ needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ }
+
+ skb_push(skb, IFH_LEN * 4);
+ memcpy(skb->data, ifh, IFH_LEN * 4);
+ skb_put(skb, ETH_FCS_LEN);
+
+ db_buf = &tx->dbs[next_dcb];
+ db_buf->dma_addr = dma_map_single(sparx5->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sparx5->dev, db_buf->dma_addr))
+ return -ENOMEM;
+
+ db_buf->dev = dev;
+ db_buf->skb = skb;
+ db_buf->ptp = false;
+ db_buf->used = true;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ db_buf->ptp = true;
+
+ status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len) |
+ FDMA_DCB_STATUS_INTR;
+
+ fdma_dcb_advance(fdma);
+ fdma_dcb_add(fdma, next_dcb, 0, status);
+
+ sparx5_fdma_reload(sparx5, fdma);
+
+ return NETDEV_TX_OK;
+}
+
+int lan969x_fdma_init(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ int err;
+
+ lan969x_fdma_rx_init(sparx5);
+ lan969x_fdma_tx_init(sparx5);
+ sparx5_fdma_injection_mode(sparx5);
+
+ err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask");
+ return err;
+ }
+
+ err = lan969x_fdma_rx_alloc(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ err = lan969x_fdma_tx_alloc(sparx5);
+ if (err) {
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ /* Reset FDMA state */
+ spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
+ spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
+
+ return err;
+}
+
+int lan969x_fdma_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ sparx5_fdma_stop(sparx5);
+ fdma_free_coherent(sparx5->dev, &tx->fdma);
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ lan969x_fdma_free_pages(rx);
+ page_pool_destroy(rx->page_pool);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
new file mode 100644
index 000000000000..4e422ca50828
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+/* Tx clock selectors */
+#define LAN969X_RGMII_TX_CLK_SEL_125MHZ 1 /* 1000Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_25MHZ 2 /* 100Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_2M5MHZ 3 /* 10Mbps */
+
+/* Port speed selectors */
+#define LAN969X_RGMII_SPEED_SEL_10 0 /* Select 10Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_100 1 /* Select 100Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_1000 2 /* Select 1000Mbps speed */
+
+/* Clock delay selectors */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS 2 /* Phase shift 45deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS 3 /* Phase shift 77deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS 4 /* Phase shift 90deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS 5 /* Phase shift 112deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS 6 /* Phase shift 135deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS 7 /* Phase shift 147deg */
+
+#define LAN969X_RGMII_PORT_START_IDX 28 /* Index of the first RGMII port */
+#define LAN969X_RGMII_IFG_TX 4 /* TX Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX1 5 /* RX1 Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX2 1 /* RX2 Inter Frame Gap value */
+
+#define RGMII_PORT_IDX(port) ((port)->portno - LAN969X_RGMII_PORT_START_IDX)
+
+/* Get the tx clock selector based on the port speed. */
+static int lan969x_rgmii_get_clk_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_TX_CLK_SEL_2M5MHZ :
+ speed == SPEED_100 ? LAN969X_RGMII_TX_CLK_SEL_25MHZ :
+ LAN969X_RGMII_TX_CLK_SEL_125MHZ);
+}
+
+/* Get the port speed selector based on the port speed. */
+static int lan969x_rgmii_get_speed_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_SPEED_SEL_10 :
+ speed == SPEED_100 ? LAN969X_RGMII_SPEED_SEL_100 :
+ LAN969X_RGMII_SPEED_SEL_1000);
+}
+
+/* Get the clock delay selector based on the clock delay in picoseconds. */
+static int lan969x_rgmii_get_clk_delay_sel(struct sparx5_port *port,
+ u32 delay_ps, u32 *clk_delay_sel)
+{
+ switch (delay_ps) {
+ case 0:
+ /* Hardware default selector. */
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 1000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS;
+ break;
+ case 1700:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS;
+ break;
+ case 2000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS;
+ break;
+ case 2500:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 3000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS;
+ break;
+ case 3300:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS;
+ break;
+ default:
+ dev_err(port->sparx5->dev, "Invalid RGMII delay: %u", delay_ps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Configure the RGMII tx clock frequency. */
+static void lan969x_rgmii_tx_clk_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_sel = lan969x_rgmii_get_clk_sel(conf->speed);
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Take the RGMII clock domain out of reset and set tx clock
+ * frequency.
+ */
+ spx5_rmw(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(clk_sel) |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(0) |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(0),
+ HSIO_WRAP_RGMII_CFG_TX_CLK_CFG |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST,
+ port->sparx5, HSIO_WRAP_RGMII_CFG(idx));
+}
+
+/* Configure the RGMII port device. */
+static void lan969x_rgmii_port_device_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 dtag, dotag, etype, speed_sel, idx = RGMII_PORT_IDX(port);
+
+ speed_sel = lan969x_rgmii_get_speed_sel(conf->speed);
+
+ etype = (port->vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+ port->custom_etype :
+ port->vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+ ETH_P_8021Q : ETH_P_8021AD);
+
+ dtag = port->max_vlan_tags == SPX5_PORT_MAX_TAGS_TWO;
+ dotag = port->max_vlan_tags != SPX5_PORT_MAX_TAGS_NONE;
+
+ /* Enable the MAC. */
+ spx5_wr(DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(1),
+ port->sparx5, DEVRGMII_MAC_ENA_CFG(idx));
+
+ /* Configure the Inter Frame Gap. */
+ spx5_wr(DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(LAN969X_RGMII_IFG_TX) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(LAN969X_RGMII_IFG_RX1) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(LAN969X_RGMII_IFG_RX2),
+ port->sparx5, DEVRGMII_MAC_IFG_CFG(idx));
+
+ /* Configure port data rate. */
+ spx5_wr(DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(speed_sel),
+ port->sparx5, DEVRGMII_DEV_RST_CTRL(idx));
+
+ /* Configure VLAN awareness. */
+ spx5_wr(DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+ port->sparx5,
+ DEVRGMII_MAC_TAGS_CFG(idx));
+}
+
+/* Configure the RGMII delay lines in the MAC.
+ *
+ * We use the rx-internal-delay-ps" and "tx-internal-delay-ps" properties to
+ * configure the rx and tx delays for the MAC. If these properties are missing
+ * or set to zero, the MAC will not apply any delay.
+ *
+ * The PHY side delays are determined by the PHY mode
+ * (e.g. PHY_INTERFACE_MODE_RGMII_{ID, RXID, TXID}), and ignored by the MAC side
+ * entirely.
+ */
+static int lan969x_rgmii_delay_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 tx_clk_sel, rx_clk_sel, tx_delay_ps = 0, rx_delay_ps = 0;
+ u32 idx = RGMII_PORT_IDX(port);
+ int err;
+
+ of_property_read_u32(port->of_node, "rx-internal-delay-ps",
+ &rx_delay_ps);
+
+ of_property_read_u32(port->of_node, "tx-internal-delay-ps",
+ &tx_delay_ps);
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, rx_delay_ps, &rx_clk_sel);
+ if (err)
+ return err;
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, tx_delay_ps, &tx_clk_sel);
+ if (err)
+ return err;
+
+ /* Configure rx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!rx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(rx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 0));
+
+ /* Configure tx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!tx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(tx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 1));
+
+ return 0;
+}
+
+/* Configure GPIO's to be used as RGMII interface. */
+static void lan969x_rgmii_gpio_config(struct sparx5_port *port)
+{
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Enable the RGMII on the GPIOs. */
+ spx5_wr(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(1), port->sparx5,
+ HSIO_WRAP_XMII_CFG(!idx));
+}
+
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ int err;
+
+ err = lan969x_rgmii_delay_config(port, conf);
+ if (err)
+ return err;
+
+ lan969x_rgmii_tx_clk_config(port, conf);
+ lan969x_rgmii_gpio_config(port);
+ lan969x_rgmii_port_device_config(port, conf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 0027144a2af2..dbe86f937b21 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -18,9 +18,6 @@
#include "sparx5_main.h"
#include "sparx5_port.h"
-#define FDMA_XTR_CHANNEL 6
-#define FDMA_INJ_CHANNEL 0
-
#define FDMA_XTR_BUFFER_SIZE 2048
#define FDMA_WEIGHT 4
@@ -133,7 +130,7 @@ static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *t
sparx5, FDMA_CH_ACTIVATE);
}
-static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
{
/* Reload the RX channel */
spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD);
@@ -183,7 +180,7 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
return true;
}
-static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
{
struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
@@ -213,11 +210,11 @@ static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
return counter;
}
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
{
struct sparx5_tx *tx = &sparx5->tx;
struct fdma *fdma = &tx->fdma;
- static bool first_time = true;
void *virt_addr;
fdma_dcb_advance(fdma);
@@ -238,12 +235,8 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4));
- if (first_time) {
- sparx5_fdma_tx_activate(sparx5, tx);
- first_time = false;
- } else {
- sparx5_fdma_reload(sparx5, fdma);
- }
+ sparx5_fdma_reload(sparx5, fdma);
+
return NETDEV_TX_OK;
}
@@ -260,10 +253,6 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
FDMA_DCB_STATUS_INTR);
- netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
- FDMA_WEIGHT);
- napi_enable(&rx->napi);
- sparx5_fdma_rx_activate(sparx5, rx);
return 0;
}
@@ -348,7 +337,7 @@ irqreturn_t sparx5_fdma_handler(int irq, void *args)
return IRQ_HANDLED;
}
-static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
{
const int byte_swap = 1;
int portno;
@@ -410,7 +399,7 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
}
}
-int sparx5_fdma_start(struct sparx5 *sparx5)
+int sparx5_fdma_init(struct sparx5 *sparx5)
{
int err;
@@ -443,24 +432,55 @@ int sparx5_fdma_start(struct sparx5 *sparx5)
return err;
}
+int sparx5_fdma_deinit(struct sparx5 *sparx5)
+{
+ sparx5_fdma_stop(sparx5);
+ fdma_free_phys(&sparx5->rx.fdma);
+ fdma_free_phys(&sparx5->tx.fdma);
+
+ return 0;
+}
+
static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5)
{
return spx5_rd(sparx5, FDMA_PORT_CTRL(0));
}
+int sparx5_fdma_start(struct sparx5 *sparx5)
+{
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ netif_napi_add_weight(rx->ndev,
+ &rx->napi,
+ ops->fdma_poll,
+ FDMA_WEIGHT);
+
+ napi_enable(&rx->napi);
+
+ sparx5_fdma_rx_activate(sparx5, rx);
+ sparx5_fdma_tx_activate(sparx5, tx);
+
+ return 0;
+}
+
int sparx5_fdma_stop(struct sparx5 *sparx5)
{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
u32 val;
- napi_disable(&sparx5->rx.napi);
+ napi_disable(&rx->napi);
+
/* Stop the fdma and channel interrupts */
- sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx);
- sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx);
+ sparx5_fdma_rx_deactivate(sparx5, rx);
+ sparx5_fdma_tx_deactivate(sparx5, tx);
+
/* Wait for the RX channel to stop */
read_poll_timeout(sparx5_fdma_port_ctrl, val,
FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
500, 10000, 0, sparx5);
- fdma_free_phys(&sparx5->rx.fdma);
- fdma_free_phys(&sparx5->tx.fdma);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index f61aa15beab7..6a0e5b83ecd0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -313,10 +313,13 @@ static int sparx5_create_port(struct sparx5 *sparx5,
struct initial_port_config *config)
{
struct sparx5_port *spx5_port;
+ const struct sparx5_ops *ops;
struct net_device *ndev;
struct phylink *phylink;
int err;
+ ops = sparx5->data->ops;
+
ndev = sparx5_create_netdev(sparx5, config->portno);
if (IS_ERR(ndev)) {
dev_err(sparx5->dev, "Could not create net device: %02u\n",
@@ -357,6 +360,9 @@ static int sparx5_create_port(struct sparx5 *sparx5,
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+ if (ops->is_port_rgmii(spx5_port->portno))
+ phy_interface_set_rgmii(spx5_port->phylink_config.supported_interfaces);
+
__set_bit(PHY_INTERFACE_MODE_SGMII,
spx5_port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
@@ -778,15 +784,19 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Start Frame DMA with fallback to register based INJ/XTR */
err = -ENXIO;
- if (sparx5->fdma_irq >= 0 && is_sparx5(sparx5)) {
- if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
+ if (sparx5->fdma_irq >= 0) {
+ if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0 ||
+ !is_sparx5(sparx5))
err = devm_request_irq(sparx5->dev,
sparx5->fdma_irq,
sparx5_fdma_handler,
0,
"sparx5-fdma", sparx5);
- if (!err)
- err = sparx5_fdma_start(sparx5);
+ if (!err) {
+ err = ops->fdma_init(sparx5);
+ if (!err)
+ sparx5_fdma_start(sparx5);
+ }
if (err)
sparx5->fdma_irq = -ENXIO;
} else {
@@ -830,6 +840,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
struct initial_port_config *configs, *config;
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
+ const struct sparx5_ops *ops;
struct reset_control *reset;
struct sparx5 *sparx5;
int idx = 0, err = 0;
@@ -851,6 +862,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
return -EINVAL;
regs = sparx5->data->regs;
+ ops = sparx5->data->ops;
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
@@ -880,7 +892,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
for_each_available_child_of_node(ports, portnp) {
struct sparx5_port_config *conf;
- struct phy *serdes;
+ struct phy *serdes = NULL;
u32 portno;
err = of_property_read_u32(portnp, "reg", &portno);
@@ -910,13 +922,17 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
conf->sd_sgpio = ~0;
else
sparx5->sd_sgpio_remapping = true;
- serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
- if (IS_ERR(serdes)) {
- err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
- "port %u: missing serdes\n",
- portno);
- of_node_put(portnp);
- goto cleanup_config;
+ /* There is no SerDes node for RGMII ports. */
+ if (!ops->is_port_rgmii(portno)) {
+ serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = dev_err_probe(sparx5->dev,
+ PTR_ERR(serdes),
+ "port %u: missing serdes\n",
+ portno);
+ of_node_put(portnp);
+ goto cleanup_config;
+ }
}
config->portno = portno;
config->node = portnp;
@@ -1014,6 +1030,7 @@ cleanup_pnode:
static void mchp_sparx5_remove(struct platform_device *pdev)
{
struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+ const struct sparx5_ops *ops = sparx5->data->ops;
debugfs_remove_recursive(sparx5->debugfs_root);
if (sparx5->xtr_irq) {
@@ -1025,7 +1042,7 @@ static void mchp_sparx5_remove(struct platform_device *pdev)
sparx5->fdma_irq = -ENXIO;
}
sparx5_ptp_deinit(sparx5);
- sparx5_fdma_stop(sparx5);
+ ops->fdma_deinit(sparx5);
sparx5_cleanup_ports(sparx5);
sparx5_vcap_destroy(sparx5);
/* Unregister netdevs */
@@ -1072,6 +1089,7 @@ static const struct sparx5_ops sparx5_ops = {
.is_port_5g = &sparx5_port_is_5g,
.is_port_10g = &sparx5_port_is_10g,
.is_port_25g = &sparx5_port_is_25g,
+ .is_port_rgmii = &sparx5_port_is_rgmii,
.get_port_dev_index = &sparx5_port_dev_mapping,
.get_port_dev_bit = &sparx5_port_dev_mapping,
.get_hsch_max_group_rate = &sparx5_get_hsch_max_group_rate,
@@ -1079,6 +1097,10 @@ static const struct sparx5_ops sparx5_ops = {
.set_port_mux = &sparx5_port_mux_set,
.ptp_irq_handler = &sparx5_ptp_irq_handler,
.dsm_calendar_calc = &sparx5_dsm_calendar_calc,
+ .fdma_init = &sparx5_fdma_init,
+ .fdma_deinit = &sparx5_fdma_deinit,
+ .fdma_poll = &sparx5_fdma_napi_callback,
+ .fdma_xmit = &sparx5_fdma_xmit,
};
static const struct sparx5_match_data sparx5_desc = {
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index d5dd953b0a71..fe7d8bcc0cd9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -112,6 +112,8 @@ enum sparx5_feature {
#define XTR_QUEUE 0
#define INJ_QUEUE 0
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
#define FDMA_DCB_MAX 64
#define FDMA_RX_DCB_MAX_DBS 15
#define FDMA_TX_DCB_MAX_DBS 1
@@ -157,11 +159,25 @@ struct sparx5_calendar_data {
*/
struct sparx5_rx {
struct fdma fdma;
- struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page_pool *page_pool;
+ union {
+ struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ };
dma_addr_t dma;
struct napi_struct napi;
struct net_device *ndev;
u64 packets;
+ u8 page_order;
+};
+
+/* Used to store information about TX buffers. */
+struct sparx5_tx_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
};
/* Frame DMA transmit state:
@@ -169,6 +185,7 @@ struct sparx5_rx {
*/
struct sparx5_tx {
struct fdma fdma;
+ struct sparx5_tx_buf *dbs;
u64 packets;
u64 dropped;
};
@@ -313,6 +330,7 @@ struct sparx5_ops {
bool (*is_port_5g)(int portno);
bool (*is_port_10g)(int portno);
bool (*is_port_25g)(int portno);
+ bool (*is_port_rgmii)(int portno);
u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port);
u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port);
u32 (*get_hsch_max_group_rate)(int grp);
@@ -323,6 +341,13 @@ struct sparx5_ops {
irqreturn_t (*ptp_irq_handler)(int irq, void *args);
int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+ int (*port_config_rgmii)(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+ int (*fdma_init)(struct sparx5 *sparx5);
+ int (*fdma_deinit)(struct sparx5 *sparx5);
+ int (*fdma_poll)(struct napi_struct *napi, int weight);
+ int (*fdma_xmit)(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
};
struct sparx5_main_io_resource {
@@ -433,10 +458,16 @@ int sparx5_manual_injection_mode(struct sparx5 *sparx5);
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
/* sparx5_fdma.c */
+int sparx5_fdma_init(struct sparx5 *sparx5);
+int sparx5_fdma_deinit(struct sparx5 *sparx5);
int sparx5_fdma_start(struct sparx5 *sparx5);
int sparx5_fdma_stop(struct sparx5 *sparx5);
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb);
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight);
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
irqreturn_t sparx5_fdma_handler(int irq, void *args);
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma);
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5);
/* sparx5_mactable.c */
void sparx5_mact_pull_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 561344f19062..d9ef4ef137b8 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -37,6 +37,7 @@ enum sparx5_target {
TARGET_FDMA = 117,
TARGET_GCB = 118,
TARGET_HSCH = 119,
+ TARGET_HSIO_WRAP = 120,
TARGET_LRN = 122,
TARGET_PCEP = 129,
TARGET_PCS10G_BR = 132,
@@ -54,6 +55,7 @@ enum sparx5_target {
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
+ TARGET_DEVRGMII = 392,
NUM_TARGETS = 517
};
@@ -5367,6 +5369,69 @@ extern const struct sparx5_regs *regs;
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:XMII_CFG */
+#define HSIO_WRAP_XMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 0, 0, 1, 4)
+
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG GENMASK(2, 1)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:RGMII_CFG */
+#define HSIO_WRAP_RGMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 4, 0, 1, 4)
+
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST BIT(1)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST BIT(0)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:DLL_CFG */
+#define HSIO_WRAP_DLL_CFG(g, r) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 12, r, 2, 4)
+
+#define HSIO_WRAP_DLL_CFG_DLL_ENA BIT(19)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA BIT(18)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL GENMASK(17, 15)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_RST BIT(0)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+
/* LRN:COMMON:COMMON_ACCESS_CTRL */
#define LRN_COMMON_ACCESS_CTRL \
__REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
@@ -8110,4 +8175,84 @@ extern const struct sparx5_regs *regs;
#define XQS_CNT(g) \
__REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+/* LAN969X ONLY */
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEVRGMII_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEVRGMII_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEVRGMII_MAC_TAGS_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEVRGMII_MAC_IFG_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+
#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index b6f635d85820..f713656f1fae 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -232,9 +232,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
+ const struct sparx5_ops *ops;
u32 ifh[IFH_LEN];
netdev_tx_t ret;
+ ops = sparx5->data->ops;
+
memset(ifh, 0, IFH_LEN * 4);
sparx5_set_port_ifh(sparx5, ifh, port->portno);
@@ -254,7 +257,7 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
- ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ ret = ops->fdma_xmit(sparx5, ifh, skb, dev);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
spin_unlock(&sparx5->tx_lock);
@@ -264,6 +267,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
if (ret < 0)
goto drop;
+ if (!is_sparx5(sparx5))
+ /* When lan969x and TX_OK, stats and SKB consumption is handled
+ * in the TX completion loop, so dont go any further.
+ */
+ return NETDEV_TX_OK;
+
stats->tx_bytes += skb->len;
stats->tx_packets++;
sparx5->tx.packets++;
@@ -366,6 +375,6 @@ irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
void sparx5_port_inj_timer_setup(struct sparx5_port *port)
{
- hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->inj_timer.function = sparx5_injection_timeout;
+ hrtimer_setup(&port->inj_timer, sparx5_injection_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index f8562c1a894d..cfb4b2e17ace 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -32,7 +32,19 @@ sparx5_phylink_mac_select_pcs(struct phylink_config *config,
{
struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
- return &port->phylink_pcs;
+ /* Return the PCS for all the modes that require it. */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ return &port->phylink_pcs;
+ default:
+ return NULL;
+ }
}
static void sparx5_phylink_mac_config(struct phylink_config *config,
@@ -77,7 +89,7 @@ static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct sparx5_port, phylink_pcs);
}
-static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct sparx5_port *port = sparx5_pcs_to_port(pcs);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index f9d1a6bb9bff..04bc8fffaf96 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -257,6 +257,15 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
conf->speed != SPEED_25000))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ if (conf->speed != SPEED_1000 &&
+ conf->speed != SPEED_100 &&
+ conf->speed != SPEED_10)
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
default:
return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
}
@@ -994,6 +1003,7 @@ int sparx5_port_config(struct sparx5 *sparx5,
struct sparx5_port *port,
struct sparx5_port_config *conf)
{
+ bool rgmii = phy_interface_mode_is_rgmii(conf->phy_mode);
bool high_speed_dev = sparx5_is_baser(conf->portmode);
const struct sparx5_ops *ops = sparx5->data->ops;
int err, urgency, stop_wm;
@@ -1002,8 +1012,14 @@ int sparx5_port_config(struct sparx5 *sparx5,
if (err)
return err;
+ if (rgmii) {
+ err = ops->port_config_rgmii(port, conf);
+ if (err)
+ return err;
+ }
+
/* high speed device is already configured */
- if (!high_speed_dev)
+ if (!rgmii && !high_speed_dev)
sparx5_port_config_low_set(sparx5, port, conf);
/* Configure flow control */
@@ -1067,24 +1083,6 @@ int sparx5_port_init(struct sparx5 *sparx5,
if (err)
return err;
- /* Configure MAC vlan awareness */
- err = sparx5_port_max_tags_set(sparx5, port);
- if (err)
- return err;
-
- /* Set Max Length */
- spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
- DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
- sparx5,
- DEV2G5_MAC_MAXLEN_CFG(port->portno));
-
- /* 1G/2G5: Signal Detect configuration */
- spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
- DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
- DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
- sparx5,
- DEV2G5_PCS1G_SD_CFG(port->portno));
-
/* Set Pause WM hysteresis */
spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
@@ -1108,6 +1106,27 @@ int sparx5_port_init(struct sparx5 *sparx5,
ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
sparx5, ANA_CL_FILTER_CTRL(port->portno));
+ if (ops->is_port_rgmii(port->portno))
+ return 0; /* RGMII device - nothing more to configure */
+
+ /* Configure MAC vlan awareness */
+ err = sparx5_port_max_tags_set(sparx5, port);
+ if (err)
+ return err;
+
+ /* Set Max Length */
+ spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+ sparx5,
+ DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+ /* 1G/2G5: Signal Detect configuration */
+ spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV2G5_PCS1G_SD_CFG(port->portno));
+
if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
conf->portmode == PHY_INTERFACE_MODE_SGMII) {
err = sparx5_serdes_set(sparx5, port, conf);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index 9b9bcc6834bc..c8a37468a3d1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -40,6 +40,11 @@ static inline bool sparx5_port_is_25g(int portno)
return portno >= 56 && portno <= 63;
}
+static inline bool sparx5_port_is_rgmii(int portno)
+{
+ return false;
+}
+
static inline u32 sparx5_to_high_dev(struct sparx5 *sparx5, int port)
{
const struct sparx5_ops *ops = sparx5->data->ops;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 2dc0c6ad54be..638ef64d639f 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -134,9 +134,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
- u32 i, max_num_devs;
+ int found_dev = 0;
u16 dev_type;
int err;
+ u32 i;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
@@ -148,12 +149,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
-
- for (i = 0; i < max_num_devs; i++) {
+ for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+ found_dev < resp.num_of_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
+ /* Skip empty devices */
+ if (dev.as_uint32 == 0)
+ continue;
+
+ found_dev++;
+
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
@@ -1547,6 +1553,7 @@ unmap_bar:
* adapter-MTU file and apc->mana_pci_debugfs folder.
*/
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1569,6 +1576,8 @@ static void mana_gd_remove(struct pci_dev *pdev)
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
@@ -1622,6 +1631,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_disable_device(pdev);
}
@@ -1648,17 +1659,21 @@ static int __init mana_driver_init(void)
mana_debugfs_root = debugfs_create_dir("mana", NULL);
err = pci_register_driver(&mana_driver);
- if (err)
+ if (err) {
debugfs_remove(mana_debugfs_root);
+ mana_debugfs_root = NULL;
+ }
return err;
}
static void __exit mana_driver_exit(void)
{
+ pci_unregister_driver(&mana_driver);
+
debugfs_remove(mana_debugfs_root);
- pci_unregister_driver(&mana_driver);
+ mana_debugfs_root = NULL;
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index aa1e47233fe5..ae76ecc7a5d3 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -738,12 +738,11 @@ static const struct net_device_ops mana_devops = {
static void mana_cleanup_port_context(struct mana_port_context *apc)
{
/*
- * at this point all dir/files under the vport directory
- * are already cleaned up.
- * We are sure the apc->mana_port_debugfs remove will not
- * cause any freed memory access issues
+ * make sure subsequent cleanup attempts don't end up removing already
+ * cleaned dentry pointer
*/
debugfs_remove(apc->mana_port_debugfs);
+ apc->mana_port_debugfs = NULL;
kfree(apc->rxqs);
apc->rxqs = NULL;
}
@@ -1254,6 +1253,7 @@ static void mana_destroy_eq(struct mana_context *ac)
return;
debugfs_remove_recursive(ac->mana_eqs_debugfs);
+ ac->mana_eqs_debugfs = NULL;
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
@@ -1914,6 +1914,7 @@ static void mana_destroy_txq(struct mana_port_context *apc)
for (i = 0; i < apc->num_queues; i++) {
debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
+ apc->tx_qp[i].mana_tx_debugfs = NULL;
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
@@ -2099,6 +2100,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
return;
debugfs_remove_recursive(rxq->mana_rx_debugfs);
+ rxq->mana_rx_debugfs = NULL;
napi = &rxq->rx_cq.napi;
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 558e03301aa8..7663d196eaf8 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -758,12 +758,13 @@ static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct ocelot_dump_ctx *dump = data;
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -992,6 +993,16 @@ static int ocelot_port_get_ts_info(struct net_device *dev,
return ocelot_get_ts_info(ocelot, port, info);
}
+static void ocelot_port_ts_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_port_get_strings,
.get_ethtool_stats = ocelot_port_get_ethtool_stats,
@@ -999,6 +1010,7 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ocelot_port_get_ts_info,
+ .get_ts_stats = ocelot_port_ts_stats,
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 808ce8e68d39..cc1088988da0 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -680,9 +680,14 @@ static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port,
skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time +
OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) {
- dev_warn_ratelimited(ocelot->dev,
- "port %d invalidating stale timestamp ID %u which seems lost\n",
- port, OCELOT_SKB_CB(skb)->ts_id);
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->lost++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d invalidating stale timestamp ID %u which seems lost\n",
+ port, OCELOT_SKB_CB(skb)->ts_id);
+
__skb_unlink(skb, &ocelot_port->tx_skbs);
kfree_skb(skb);
ocelot->ptp_skbs_in_flight--;
@@ -748,13 +753,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
return 0;
ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return -EINVAL;
+ if (ptp_class == PTP_CLASS_NONE) {
+ err = -EINVAL;
+ goto error;
+ }
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->onestep_pkts_unconfirmed++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
return 0;
}
@@ -764,14 +776,16 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
*clone = skb_clone_sk(skb);
- if (!(*clone))
- return -ENOMEM;
+ if (!(*clone)) {
+ err = -ENOMEM;
+ goto error;
+ }
/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone);
if (err) {
kfree_skb(*clone);
- return err;
+ goto error;
}
skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -780,6 +794,12 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
}
return 0;
+
+error:
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+ return err;
}
EXPORT_SYMBOL(ocelot_port_txtstamp_request);
@@ -816,6 +836,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
while (budget--) {
struct skb_shared_hwtstamps shhwtstamps;
+ struct ocelot_port *ocelot_port;
u32 val, id, seqid, txport;
struct sk_buff *skb_match;
struct timespec64 ts;
@@ -832,17 +853,27 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
+ ocelot_port = ocelot->ports[txport];
/* Retrieve its associated skb */
skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id,
seqid);
if (!skb_match) {
- dev_warn_ratelimited(ocelot->dev,
- "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n",
- txport, seqid, id);
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n",
+ txport, seqid, id);
+
goto next_ts;
}
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->pkts++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index c018783757fb..545710dadcf5 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -821,6 +821,26 @@ void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
+void ocelot_port_get_ts_stats(struct ocelot *ocelot, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_ts_stats *stats = ocelot_port->ts_stats;
+ unsigned int start;
+
+ if (!ocelot->ptp)
+ return;
+
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ ts_stats->pkts = stats->pkts;
+ ts_stats->onestep_pkts_unconfirmed = stats->onestep_pkts_unconfirmed;
+ ts_stats->lost = stats->lost;
+ ts_stats->err = stats->err;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_ts_stats);
+
void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
struct rtnl_link_stats64 *stats)
{
@@ -960,6 +980,23 @@ int ocelot_stats_init(struct ocelot *ocelot)
if (!ocelot->stats)
return -ENOMEM;
+ if (ocelot->ptp) {
+ for (int port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ ocelot_port->ts_stats = devm_kzalloc(ocelot->dev,
+ sizeof(*ocelot_port->ts_stats),
+ GFP_KERNEL);
+ if (!ocelot_port->ts_stats)
+ return -ENOMEM;
+
+ u64_stats_init(&ocelot_port->ts_stats->syncp);
+ }
+ }
+
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 2ec62c8d86e1..59486fe2ad18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
struct sk_buff *skb;
skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
skb_put(skb, size);
return skb;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 9d97cd281f18..c03558adda91 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
map_id_full = be64_to_cpu(cbe->map_ptr);
map_id = map_id_full;
- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ if (size_add(pkt_size, data_size) > INT_MAX ||
+ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 98e098c09c03..abba165738a3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2779,7 +2779,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
break;
}
- netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+ netdev->watchdog_timeo = secs_to_jiffies(5);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 720f577929db..499e5e39d513 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1120,20 +1120,6 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
}
}
-static void nv_napi_enable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_enable(&np->napi);
-}
-
-static void nv_napi_disable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_disable(&np->napi);
-}
-
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
@@ -3114,7 +3100,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* Changing the MTU is a rare event, it shouldn't matter.
*/
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -3143,7 +3129,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -4731,7 +4717,7 @@ static int nv_set_ringparam(struct net_device *dev,
if (netif_running(dev)) {
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -4784,7 +4770,7 @@ static int nv_set_ringparam(struct net_device *dev,
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -5277,7 +5263,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (netif_running(dev)) {
netif_stop_queue(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock_irq(&np->lock);
@@ -5334,7 +5320,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
/* restart rx engine */
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_hw_interrupts(dev, np->irqmask);
}
}
@@ -5576,6 +5562,7 @@ static int nv_open(struct net_device *dev)
/* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask);
+ netdev_lock(dev);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
@@ -5594,7 +5581,7 @@ static int nv_open(struct net_device *dev)
ret = nv_update_linkspeed(dev);
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable_locked(&np->napi);
if (ret) {
netif_carrier_on(dev);
@@ -5611,6 +5598,7 @@ static int nv_open(struct net_device *dev)
round_jiffies(jiffies + STATS_INTERVAL));
spin_unlock_irq(&np->lock);
+ netdev_unlock(dev);
/* If the loopback feature was set while the device was down, make sure
* that it's set correctly now.
@@ -5632,7 +5620,7 @@ static int nv_close(struct net_device *dev)
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
spin_unlock_irq(&np->lock);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
synchronize_irq(np->pci_dev->irq);
del_timer_sync(&np->oom_kick);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 1c61390677f7..04f00ea94230 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,8 +18,6 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define IONIC_ASIC_TYPE_ELBA 2
-
#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
@@ -59,7 +57,6 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
cpumask_var_t *affinity_masks;
struct delayed_work doorbell_check_dwork;
- struct work_struct nb_work;
struct notifier_block nb;
struct rw_semaphore vf_op_lock; /* lock for VF operations */
struct ionic_vf *vfs;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 9b7f78b6cdb1..a2d4336d2766 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -158,6 +158,20 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
25000baseCR_Full);
copper_seen++;
break;
+ case IONIC_XCVR_PID_QSFP_50G_CR2_FC:
+ case IONIC_XCVR_PID_QSFP_50G_CR2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 200000baseCR4_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 400000baseCR4_Full);
+ copper_seen++;
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_AOC:
case IONIC_XCVR_PID_SFP_10GBASE_CU:
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -196,6 +210,31 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
+ case IONIC_XCVR_PID_QSFP_200G_AOC:
+ case IONIC_XCVR_PID_QSFP_200G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseSR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseSR4_Full);
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_SR:
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
@@ -929,6 +968,7 @@ static int ionic_get_module_info(struct net_device *netdev,
break;
case SFF8024_ID_QSFP_8436_8636:
case SFF8024_ID_QSFP28_8636:
+ case SFF8024_ID_QSFP_PLUS_CMIS:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9c85c0706c6e..830c8adbfbee 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1277,7 +1277,10 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3,
IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4,
IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5,
-
+ IONIC_XCVR_PID_QSFP_50G_CR2_FC = 6,
+ IONIC_XCVR_PID_QSFP_50G_CR2 = 7,
+ IONIC_XCVR_PID_QSFP_200G_CR4 = 8,
+ IONIC_XCVR_PID_QSFP_400G_CR4 = 9,
/* Fiber */
IONIC_XCVR_PID_QSFP_100G_AOC = 50,
IONIC_XCVR_PID_QSFP_100G_ACC = 51,
@@ -1303,6 +1306,15 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
IONIC_XCVR_PID_SFP_10GBASE_T = 72,
IONIC_XCVR_PID_SFP_1000BASE_T = 73,
+ IONIC_XCVR_PID_QSFP_200G_AOC = 74,
+ IONIC_XCVR_PID_QSFP_200G_FR4 = 75,
+ IONIC_XCVR_PID_QSFP_200G_DR4 = 76,
+ IONIC_XCVR_PID_QSFP_200G_SR4 = 77,
+ IONIC_XCVR_PID_QSFP_200G_ACC = 78,
+ IONIC_XCVR_PID_QSFP_400G_FR4 = 79,
+ IONIC_XCVR_PID_QSFP_400G_DR4 = 80,
+ IONIC_XCVR_PID_QSFP_400G_SR4 = 81,
+ IONIC_XCVR_PID_QSFP_400G_VR4 = 82,
};
/**
@@ -1404,6 +1416,8 @@ struct ionic_xcvr_status {
*/
union ionic_port_config {
struct {
+#define IONIC_SPEED_400G 400000 /* 400G in Mbps */
+#define IONIC_SPEED_200G 200000 /* 200G in Mbps */
#define IONIC_SPEED_100G 100000 /* 100G in Mbps */
#define IONIC_SPEED_50G 50000 /* 50G in Mbps */
#define IONIC_SPEED_40G 40000 /* 40G in Mbps */
@@ -3209,7 +3223,11 @@ union ionic_adminq_comp {
#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000
#define IONIC_DEV_CMD_DONE 0x00000001
-#define IONIC_ASIC_TYPE_CAPRI 0
+#define IONIC_ASIC_TYPE_NONE 0
+#define IONIC_ASIC_TYPE_CAPRI 1
+#define IONIC_ASIC_TYPE_ELBA 2
+#define IONIC_ASIC_TYPE_GIGLIO 3
+#define IONIC_ASIC_TYPE_SALINA 4
/**
* struct ionic_doorbell - Doorbell register layout
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 3d3f936779f7..7707a9e53c43 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3265,7 +3265,7 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
- le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
+ le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
lif->neqs = ionic->neqs_per_lif;
lif->nxqs = ionic->ntxqs_per_lif;
@@ -3804,10 +3804,6 @@ err_out_adminq_deinit:
return err;
}
-static void ionic_lif_notify_work(struct work_struct *ws)
-{
-}
-
static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
{
struct ionic_admin_ctx ctx = {
@@ -3858,8 +3854,6 @@ int ionic_lif_register(struct ionic_lif *lif)
ionic_lif_register_phc(lif);
- INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
-
lif->ionic->nb.notifier_call = ionic_lif_notify;
err = register_netdevice_notifier(&lif->ionic->nb);
@@ -3885,7 +3879,6 @@ void ionic_lif_unregister(struct ionic_lif *lif)
{
if (lif->ionic->nb.notifier_call) {
unregister_netdevice_notifier(&lif->ionic->nb);
- cancel_work_sync(&lif->ionic->nb_work);
lif->ionic->nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 0f817c3f92d8..daf1e82cb76b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -81,8 +81,9 @@ static int ionic_error_to_errno(enum ionic_status_code code)
case IONIC_RC_EQTYPE:
case IONIC_RC_EQID:
case IONIC_RC_EINVAL:
- case IONIC_RC_ENOSUPP:
return -EINVAL;
+ case IONIC_RC_ENOSUPP:
+ return -EOPNOTSUPP;
case IONIC_RC_EPERM:
return -EPERM;
case IONIC_RC_ENOENT:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 9cff0a8ffb2c..3383ee1dad14 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2832,7 +2832,7 @@ netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2860,7 +2860,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
static ssize_t
netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2901,7 +2901,7 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2922,7 +2922,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2946,20 +2946,20 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read = netxen_sysfs_read_crb,
- .write = netxen_sysfs_write_crb,
+ .read_new = netxen_sysfs_read_crb,
+ .write_new = netxen_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read = netxen_sysfs_read_mem,
- .write = netxen_sysfs_write_mem,
+ .read_new = netxen_sysfs_read_mem,
+ .write_new = netxen_sysfs_write_mem,
};
static ssize_t
netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3082,7 +3082,7 @@ out:
static const struct bin_attribute bin_attr_dimm = {
.attr = { .name = "dimm", .mode = 0644 },
.size = sizeof(struct netxen_dimm_cfg),
- .read = netxen_sysfs_read_dimm,
+ .read_new = netxen_sysfs_read_dimm,
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f9dd50152b1e..28d24d59efb8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
- if (!sriov->allowed_vlans)
+ if (!sriov->allowed_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
vlans = (u16 *)&cmd->rsp.arg[3];
for (i = 0; i < num_vlans; i++)
@@ -2167,8 +2169,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
- if (!vf->sriov_vlans)
+ if (!vf->sriov_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 74125188beb8..c0f20464fd1e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -264,7 +264,7 @@ static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -281,7 +281,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -310,7 +310,7 @@ static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -332,7 +332,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -396,7 +396,7 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -446,7 +446,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -539,7 +539,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -623,7 +623,7 @@ out:
static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -675,7 +675,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -722,7 +722,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -769,7 +769,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -804,7 +804,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -839,7 +839,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -868,7 +868,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -898,7 +898,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -938,7 +938,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1115,7 +1115,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1195,64 +1195,63 @@ static const struct device_attribute dev_attr_beacon = {
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_crb,
- .write = qlcnic_sysfs_write_crb,
+ .read_new = qlcnic_sysfs_read_crb,
+ .write_new = qlcnic_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_mem,
- .write = qlcnic_sysfs_write_mem,
+ .read_new = qlcnic_sysfs_read_mem,
+ .write_new = qlcnic_sysfs_write_mem,
};
static const struct bin_attribute bin_attr_npar_config = {
.attr = { .name = "npar_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_npar_config,
- .write = qlcnic_sysfs_write_npar_config,
+ .read_new = qlcnic_sysfs_read_npar_config,
+ .write_new = qlcnic_sysfs_write_npar_config,
};
static const struct bin_attribute bin_attr_pci_config = {
.attr = { .name = "pci_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_pci_config,
- .write = NULL,
+ .read_new = qlcnic_sysfs_read_pci_config,
};
static const struct bin_attribute bin_attr_port_stats = {
.attr = { .name = "port_stats", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_get_port_stats,
- .write = qlcnic_sysfs_clear_port_stats,
+ .read_new = qlcnic_sysfs_get_port_stats,
+ .write_new = qlcnic_sysfs_clear_port_stats,
};
static const struct bin_attribute bin_attr_esw_stats = {
.attr = { .name = "esw_stats", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_get_esw_stats,
- .write = qlcnic_sysfs_clear_esw_stats,
+ .read_new = qlcnic_sysfs_get_esw_stats,
+ .write_new = qlcnic_sysfs_clear_esw_stats,
};
static const struct bin_attribute bin_attr_esw_config = {
.attr = { .name = "esw_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_esw_config,
- .write = qlcnic_sysfs_write_esw_config,
+ .read_new = qlcnic_sysfs_read_esw_config,
+ .write_new = qlcnic_sysfs_write_esw_config,
};
static const struct bin_attribute bin_attr_pm_config = {
.attr = { .name = "pm_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_pm_config,
- .write = qlcnic_sysfs_write_pm_config,
+ .read_new = qlcnic_sysfs_read_pm_config,
+ .write_new = qlcnic_sysfs_write_pm_config,
};
static const struct bin_attribute bin_attr_flash = {
.attr = { .name = "flash", .mode = 0644 },
.size = 0,
- .read = qlcnic_83xx_sysfs_flash_read_handler,
- .write = qlcnic_83xx_sysfs_flash_write_handler,
+ .read_new = qlcnic_83xx_sysfs_flash_read_handler,
+ .write_new = qlcnic_83xx_sysfs_flash_write_handler,
};
#ifdef CONFIG_QLCNIC_HWMON
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a5e3d1a88305..8b4640c5d61e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -686,8 +686,8 @@ void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
- hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ hrtimer_setup(&port->hrtimer, rmnet_map_flush_tx_packet_queue, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->agg_lock);
rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 9ce0e8a64ba8..a73dcaffa8c5 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1684,6 +1684,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
if (tmp8 & CmdTxEnb)
RTL_W8 (ChipCmd, CmdRxEnb);
+ netdev_lock(dev);
spin_lock_bh(&tp->rx_lock);
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16 (IntrMask, 0x0000);
@@ -1694,11 +1695,12 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_irq(&tp->lock);
/* ...and finally, reset everything */
- napi_enable(&tp->napi);
+ napi_enable_locked(&tp->napi);
rtl8139_hw_start(dev);
netif_wake_queue(dev);
spin_unlock_bh(&tp->rx_lock);
+ netdev_unlock(dev);
}
static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index be4c9622618d..7a194a8ab989 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -23,7 +23,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_08,
RTL_GIGA_MAC_VER_09,
RTL_GIGA_MAC_VER_10,
- RTL_GIGA_MAC_VER_11,
+ /* support for RTL_GIGA_MAC_VER_11 has been removed */
/* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */
/* RTL_GIGA_MAC_VER_13 was merged with VER_10 */
RTL_GIGA_MAC_VER_14,
@@ -71,6 +71,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_64,
RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_VER_66,
+ RTL_GIGA_MAC_VER_70,
+ RTL_GIGA_MAC_VER_71,
RTL_GIGA_MAC_NONE
};
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 739707a7b40f..5a5eba49c651 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/hwmon.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -57,6 +56,8 @@
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
#define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw"
+#define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw"
+#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
@@ -104,7 +105,6 @@ static const struct {
[RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
[RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
- [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_14] = {"RTL8401" },
[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
@@ -141,8 +141,10 @@ static const struct {
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
[RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1},
- [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
- [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3},
+ [RTL_GIGA_MAC_VER_65] = {"RTL8125D", FIRMWARE_8125D_2},
+ [RTL_GIGA_MAC_VER_66] = {"RTL8125BP", FIRMWARE_8125BP_2},
+ [RTL_GIGA_MAC_VER_70] = {"RTL8126A", FIRMWARE_8126A_2},
+ [RTL_GIGA_MAC_VER_71] = {"RTL8126A", FIRMWARE_8126A_3},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -623,7 +625,6 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -632,6 +633,7 @@ enum rtl_dash_type {
RTL_DASH_NONE,
RTL_DASH_DP,
RTL_DASH_EP,
+ RTL_DASH_25_BP,
};
struct rtl8169_private {
@@ -708,6 +710,8 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
MODULE_FIRMWARE(FIRMWARE_8125D_1);
+MODULE_FIRMWARE(FIRMWARE_8125D_2);
+MODULE_FIRMWARE(FIRMWARE_8125BP_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
@@ -1230,7 +1234,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1245,7 +1249,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1360,10 +1364,19 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
+static void rtl8125bp_driver_start(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_start(tp);
else
rtl8168ep_driver_start(tp);
}
@@ -1384,10 +1397,19 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
+static void rtl8125bp_driver_stop(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_stop(tp);
else
rtl8168ep_driver_stop(tp);
}
@@ -1410,6 +1432,7 @@ static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
case RTL_DASH_DP:
return r8168dp_check_dash(tp);
case RTL_DASH_EP:
+ case RTL_DASH_25_BP:
return r8168ep_check_dash(tp);
default:
return false;
@@ -1424,6 +1447,8 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
return RTL_DASH_DP;
case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
return RTL_DASH_EP;
+ case RTL_GIGA_MAC_VER_66:
+ return RTL_DASH_25_BP;
default:
return RTL_DASH_NONE;
}
@@ -1576,7 +1601,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_71:
r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
break;
default:
@@ -2049,7 +2074,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
tp->tx_lpi_timer = timer_val;
r8168_mac_ocp_write(tp, 0xe048, timer_val);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2257,10 +2282,14 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
enum mac_version ver;
} mac_info[] = {
/* 8126A family. */
- { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 },
- { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_71 },
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70 },
+
+ /* 8125BP family. */
+ { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66 },
/* 8125D family. */
+ { 0x7cf, 0x689, RTL_GIGA_MAC_VER_65 },
{ 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 },
/* 8125B family. */
@@ -2336,7 +2365,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168B family. */
{ 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- /* This one is very old and rare, let's see if anybody complains.
+ /* This one is very old and rare, support has been removed.
* { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
*/
@@ -2528,7 +2557,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2660,7 +2689,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2903,7 +2932,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2917,7 +2946,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2943,8 +2972,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_71:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -2955,7 +2984,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2967,7 +2996,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -2975,8 +3004,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_71:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3696,12 +3725,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_71)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_71)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3719,8 +3748,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_71)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3804,7 +3833,6 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
@@ -3840,8 +3868,10 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
[RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
- [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
- [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_71] = rtl_hw_start_8126a,
};
if (hw_configs[tp->mac_version])
@@ -3858,12 +3888,14 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_64:
+ case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
for (i = 0xa00; i < 0xb00; i += 4)
RTL_W32(tp, i, 0);
break;
case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_71:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -4095,7 +4127,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4252,7 +4284,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -4680,12 +4712,6 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (status & LinkChg)
phy_mac_interrupt(tp->phydev);
- if (unlikely(status & RxFIFOOver &&
- tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(tp->dev);
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
- }
-
rtl_irq_disable(tp);
napi_schedule(&tp->napi);
out:
@@ -4723,8 +4749,6 @@ static void rtl_task(struct work_struct *work)
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
- } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
- rtl_reset_work(tp);
}
}
@@ -5103,9 +5127,6 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp)
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
tp->irq_mask |= SYSErr | RxFIFOOver;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
- /* special workaround needed */
- tp->irq_mask |= RxFIFOOver;
}
static int rtl_alloc_irq(struct rtl8169_private *tp)
@@ -5281,7 +5302,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
rtl_hw_init_8125(tp);
break;
default:
@@ -5300,7 +5321,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
return JUMBO_7K;
/* RTL8168b */
- case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_17:
return JUMBO_4K;
/* RTL8168c */
@@ -5347,43 +5367,6 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
return false;
}
-static umode_t r8169_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
-static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct rtl8169_private *tp = dev_get_drvdata(dev);
- int val_raw;
-
- val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff;
- if (val_raw >= 512)
- val_raw -= 1024;
-
- *val = 1000 * val_raw / 2;
-
- return 0;
-}
-
-static const struct hwmon_ops r8169_hwmon_ops = {
- .is_visible = r8169_hwmon_is_visible,
- .read = r8169_hwmon_read,
-};
-
-static const struct hwmon_channel_info * const r8169_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
- NULL
-};
-
-static const struct hwmon_chip_info r8169_hwmon_chip_info = {
- .ops = &r8169_hwmon_ops,
- .info = r8169_hwmon_info,
-};
-
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5563,12 +5546,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* The temperature sensor is available from RTl8125B */
- if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63)
- /* ignore errors */
- devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp,
- &r8169_hwmon_chip_info,
- NULL);
rc = register_netdev(dev);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 5307c6ff4e25..cf95e579c65d 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -276,15 +276,6 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp,
rtl_writephy_batch(phydev, phy_reg_init);
}
-static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- phy_write(phydev, 0x1f, 0x0001);
- phy_set_bits(phydev, 0x16, BIT(0));
- phy_write(phydev, 0x10, 0xf41b);
- phy_write(phydev, 0x1f, 0x0000);
-}
-
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1111,6 +1102,28 @@ static void rtl8125d_hw_phy_config(struct rtl8169_private *tp,
rtl8125_config_eee_phy(phydev);
}
+static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0b87);
+ phy_write(phydev, 0x16, 0x8088);
+ phy_modify(phydev, 0x17, 0xff00, 0x9000);
+ phy_write(phydev, 0x16, 0x808f);
+ phy_modify(phydev, 0x17, 0xff00, 0x9000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
+}
+
static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1136,7 +1149,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
[RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
@@ -1172,8 +1184,10 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
[RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
- [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
- [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_65] = rtl8125d_hw_phy_config,
+ [RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config,
+ [RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_71] = rtl8126a_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index dbc3f92eebc4..2bbfcad613ab 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -13,6 +13,7 @@
#define RTASE_HW_VER_906X_7XA 0x00800000
#define RTASE_HW_VER_906X_7XC 0x04000000
#define RTASE_HW_VER_907XD_V1 0x04800000
+#define RTASE_HW_VER_907XD_VA 0x08000000
#define RTASE_RX_DMA_BURST_256 4
#define RTASE_TX_DMA_BURST_UNLIMITED 7
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index c42c0516656b..2aacc1996796 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1501,7 +1501,10 @@ static void rtase_wait_for_quiescence(const struct net_device *dev)
static void rtase_sw_reset(struct net_device *dev)
{
struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_ring *ring, *tmp;
+ struct rtase_int_vector *ivec;
int ret;
+ u32 i;
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1512,6 +1515,13 @@ static void rtase_sw_reset(struct net_device *dev)
rtase_tx_clear(tp);
rtase_rx_clear(tp);
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry)
+ list_del(&ring->ring_entry);
+ }
+
ret = rtase_init_ring(dev);
if (ret) {
netdev_err(dev, "unable to init ring\n");
@@ -1725,6 +1735,7 @@ static int rtase_get_settings(struct net_device *dev,
cmd->base.speed = SPEED_5000;
break;
case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
cmd->base.speed = SPEED_10000;
break;
}
@@ -1993,6 +2004,7 @@ static int rtase_check_mac_version_valid(struct rtase_private *tp)
case RTASE_HW_VER_906X_7XA:
case RTASE_HW_VER_906X_7XC:
case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
ret = 0;
break;
}
@@ -2016,7 +2028,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
SET_NETDEV_DEV(dev, &pdev->dev);
ret = pci_enable_device(pdev);
- if (ret < 0)
+ if (ret)
goto err_out_free_dev;
/* make sure PCI base addr 1 is MMIO */
@@ -2032,7 +2044,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
ret = pci_request_regions(pdev, KBUILD_MODNAME);
- if (ret < 0)
+ if (ret)
goto err_out_disable;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -2108,7 +2120,7 @@ static int rtase_init_one(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
ret = rtase_init_board(pdev, &dev, &ioaddr);
- if (ret != 0)
+ if (ret)
return ret;
tp = netdev_priv(dev);
@@ -2118,7 +2130,7 @@ static int rtase_init_one(struct pci_dev *pdev,
/* identify chip attached to board */
ret = rtase_check_mac_version_valid(tp);
- if (ret != 0) {
+ if (ret) {
dev_err(&pdev->dev,
"unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n",
tp->hw_ver);
@@ -2129,7 +2141,7 @@ static int rtase_init_one(struct pci_dev *pdev,
rtase_init_hardware(tp);
ret = rtase_alloc_interrupt(pdev, tp);
- if (ret < 0) {
+ if (ret) {
dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
goto err_out_del_napi;
}
@@ -2174,7 +2186,7 @@ static int rtase_init_one(struct pci_dev *pdev,
netif_carrier_off(dev);
ret = register_netdev(dev);
- if (ret != 0)
+ if (ret)
goto err_out_free_dma;
netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac0f093f647a..c9f4976a3527 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2763,6 +2763,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -3216,10 +3217,15 @@ static int ravb_suspend(struct device *dev)
netif_device_detach(ndev);
- if (priv->wol_enabled)
- return ravb_wol_setup(ndev);
+ rtnl_lock();
+ if (priv->wol_enabled) {
+ ret = ravb_wol_setup(ndev);
+ rtnl_unlock();
+ return ret;
+ }
ret = ravb_close(ndev);
+ rtnl_unlock();
if (ret)
return ret;
@@ -3244,19 +3250,20 @@ static int ravb_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
/* If WoL is enabled restore the interface. */
- if (priv->wol_enabled) {
+ if (priv->wol_enabled)
ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- } else {
+ else
ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
+ if (ret) {
+ rtnl_unlock();
+ return ret;
}
/* Reopening the interface will restore the device to the working state. */
ret = ravb_open(ndev);
+ rtnl_unlock();
if (ret < 0)
goto out_rpm_put;
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 9ac6e2aad18f..84d09a8973b7 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -111,25 +111,35 @@ static void rswitch_top_init(struct rswitch_private *priv)
/* Forwarding engine block (MFWD) */
static void rswitch_fwd_init(struct rswitch_private *priv)
{
+ u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
unsigned int i;
- /* For ETHA */
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
+ /* Start with empty configuration */
+ for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
+ /* Disable all port features */
+ iowrite32(0, priv->addr + FWPC0(i));
+ /* Disallow L3 forwarding and direct descriptor forwarding */
+ iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
+ priv->addr + FWPC1(i));
+ /* Disallow L2 forwarding */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
+ priv->addr + FWPC2(i));
+ /* Disallow port based forwarding */
iowrite32(0, priv->addr + FWPBFC(i));
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ /* For enabled ETHA ports, setup port based forwarding */
+ rswitch_for_each_enabled_port(priv, i) {
+ /* Port based forwarding from port i to GWCA port */
+ rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
+ FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
+ /* Within GWCA port, forward to Rx queue for port i */
iowrite32(priv->rdev[i]->rx_queue->index,
priv->addr + FWPBFCSDC(GWCA_INDEX, i));
- iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
}
- /* For GWCA */
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
- iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
- iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
- iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
+ /* For GWCA port, allow direct descriptor forwarding */
+ rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
}
/* Gateway CPU agent block (GWCA) */
@@ -1159,9 +1169,9 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
- rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
- rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
+ rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
+ FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
+ FIELD_PREP(MPIC_PSMHT, 0x06));
}
static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
@@ -1190,42 +1200,29 @@ static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
}
-static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
- int phyad, int devad, int regad, int data)
+static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
+ unsigned int mmf, unsigned int pda,
+ unsigned int pra, unsigned int pop,
+ unsigned int prd)
{
- int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
u32 val;
int ret;
- if (devad == 0xffffffff)
- return -ENODEV;
-
- writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
+ val = MPSM_PSME |
+ FIELD_PREP(MPSM_MFF, mmf) |
+ FIELD_PREP(MPSM_PDA, pda) |
+ FIELD_PREP(MPSM_PRA, pra) |
+ FIELD_PREP(MPSM_POP, pop) |
+ FIELD_PREP(MPSM_PRD, prd);
+ iowrite32(val, etha->addr + MPSM);
- val = MPSM_PSME | MPSM_MFF_C45;
- iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
+ ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
if (ret)
return ret;
- rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
-
if (read) {
- writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- if (ret)
- return ret;
-
- ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
-
- rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- } else {
- iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
- etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
+ val = ioread32(etha->addr + MPSM);
+ ret = FIELD_GET(MPSM_PRD, val);
}
return ret;
@@ -1235,16 +1232,47 @@ static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
int regad)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
- return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_READ_C45, 0);
}
static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
int regad, u16 val)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
- return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_WRITE, val);
+}
+
+static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_READ_C22, 0);
+}
+
+static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
+ int regad, u16 val)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_WRITE, val);
}
/* Call of_node_put(port) after done */
@@ -1329,6 +1357,8 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->priv = rdev->etha;
mii_bus->read_c45 = rswitch_etha_mii_read_c45;
mii_bus->write_c45 = rswitch_etha_mii_write_c45;
+ mii_bus->read = rswitch_etha_mii_read_c22;
+ mii_bus->write = rswitch_etha_mii_write_c22;
mii_bus->parent = &rdev->priv->pdev->dev;
mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
@@ -1549,7 +1579,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
{
unsigned int i;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
@@ -1924,9 +1954,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_get_params;
- if (rdev->priv->gwca.speed < rdev->etha->speed)
- rdev->priv->gwca.speed = rdev->etha->speed;
-
err = rswitch_rxdmac_alloc(ndev);
if (err < 0)
goto out_rxdmac;
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index d8d4ed7d7f8b..532192cbca4b 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -12,6 +12,7 @@
#define RSWITCH_MAX_NUM_QUEUES 128
+#define RSWITCH_NUM_AGENTS 5
#define RSWITCH_NUM_PORTS 3
#define rswitch_for_each_enabled_port(priv, i) \
for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
@@ -731,28 +732,21 @@ enum rswitch_etha_mode {
#define MPIC_LSC_100M 1
#define MPIC_LSC_1G 2
#define MPIC_LSC_2_5G 3
-
-#define MDIO_READ_C45 0x03
-#define MDIO_WRITE_C45 0x01
+#define MPIC_PSMCS GENMASK(22, 16)
+#define MPIC_PSMHT GENMASK(26, 24)
#define MPSM_PSME BIT(0)
-#define MPSM_MFF_C45 BIT(2)
-#define MPSM_PRD_SHIFT 16
-#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT)
-
-/* Completion flags */
-#define MMIS1_PAACS BIT(2) /* Address */
-#define MMIS1_PWACS BIT(1) /* Write */
-#define MMIS1_PRACS BIT(0) /* Read */
-#define MMIS1_CLEAR_FLAGS 0xf
-
-#define MPIC_PSMCS_SHIFT 16
-#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT)
-#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT)
-
-#define MPIC_PSMHT_SHIFT 24
-#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
-#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT)
+#define MPSM_MFF BIT(2)
+#define MPSM_MMF_C22 0
+#define MPSM_MMF_C45 1
+#define MPSM_PDA GENMASK(7, 3)
+#define MPSM_PRA GENMASK(12, 8)
+#define MPSM_POP GENMASK(14, 13)
+#define MPSM_POP_ADDRESS 0
+#define MPSM_POP_WRITE 1
+#define MPSM_POP_READ_C22 2
+#define MPSM_POP_READ_C45 3
+#define MPSM_PRD GENMASK(31, 16)
#define MLVC_PLV BIT(16)
@@ -806,6 +800,7 @@ enum rswitch_gwca_mode {
#define CABPPFLC_INIT_VALUE 0x00800080
/* MFWD */
+#define FWPC0(i) (FWPC00 + (i) * 0x10)
#define FWPC0_LTHTA BIT(0)
#define FWPC0_IP4UE BIT(3)
#define FWPC0_IP4TE BIT(4)
@@ -819,15 +814,15 @@ enum rswitch_gwca_mode {
#define FWPC0_MACHMA BIT(27)
#define FWPC0_VLANSA BIT(28)
-#define FWPC0(i) (FWPC00 + (i) * 0x10)
-#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \
- FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \
- FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \
- FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA)
#define FWPC1(i) (FWPC10 + (i) * 0x10)
+#define FWCP1_LTHFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
#define FWPC1_DDE BIT(0)
-#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPC2(i) (FWPC20 + (i) * 0x10)
+#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+
+#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0)
#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
@@ -984,7 +979,6 @@ struct rswitch_gwca {
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
- int speed;
};
#define NUM_QUEUES_PER_NDEV 2
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8887b8921009..5fc8027c92c7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3494,10 +3494,12 @@ static int sh_eth_suspend(struct device *dev)
netif_device_detach(ndev);
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_setup(ndev);
else
ret = sh_eth_close(ndev);
+ rtnl_unlock();
return ret;
}
@@ -3511,10 +3513,12 @@ static int sh_eth_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_restore(ndev);
else
ret = sh_eth_open(ndev);
+ rtnl_unlock();
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4cc7b501135f..ef374a8e05c3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -217,28 +217,4 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
(reg) != 0xa1c), \
page)
-/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
- * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
- * collector register.
- */
-static inline void _efx_writed_page_locked(struct efx_nic *efx,
- const efx_dword_t *value,
- unsigned int reg,
- unsigned int page)
-{
- unsigned long flags __attribute__ ((unused));
-
- if (page == 0) {
- spin_lock_irqsave(&efx->biu_lock, flags);
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- spin_unlock_irqrestore(&efx->biu_lock, flags);
- } else {
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- }
-}
-#define efx_writed_page_locked(efx, value, reg, page) \
- _efx_writed_page_locked(efx, value, \
- reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
- page)
-
#endif /* EFX_IO_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 620ba6ef3514..f70a7b7d6345 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -831,6 +831,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -838,6 +839,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index ab358fe13e1d..4cc83203e188 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -897,7 +897,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -989,7 +989,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 9785eff10607..2be3bad3c993 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -753,6 +753,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -760,6 +761,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 082e35c6caaa..2839d0e0a9c1 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -888,7 +888,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -980,7 +980,8 @@ int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6658536a4e17..4cc85a36a1ab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -154,6 +154,18 @@ config DWMAC_RZN1
the stmmac device driver. This support can make use of a custom MII
converter PCS device.
+config DWMAC_S32
+ tristate "NXP S32G/S32R GMAC support"
+ default ARCH_S32
+ depends on OF && (ARCH_S32 || COMPILE_TEST)
+ help
+ Support for ethernet controller on NXP S32CC SOCs.
+
+ This selects NXP SoC glue layer support for the stmmac
+ device driver. This driver is used for the S32CC series
+ SOCs GMAC ethernet controller, ie. S32G2xx, S32G3xx and
+ S32R45.
+
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_INTEL_SOCFPGA
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 2389fd261344..b26f0e79c2b3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
+obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1367fa5c9b8e..e25db747a81a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -257,6 +257,8 @@ struct stmmac_safety_stats {
#define CSR_F_150M 150000000
#define CSR_F_250M 250000000
#define CSR_F_300M 300000000
+#define CSR_F_500M 500000000
+#define CSR_F_800M 800000000
#define MAC_CSR_H_FRQ_MASK 0x20
@@ -543,18 +545,8 @@ struct dma_features {
#define STMMAC_VLAN_INSERT 0x2
#define STMMAC_VLAN_REPLACE 0x3
-extern const struct stmmac_desc_ops enh_desc_ops;
-extern const struct stmmac_desc_ops ndesc_ops;
-
struct mac_device_info;
-extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern const struct stmmac_hwtimestamp dwmac1000_ptp;
-extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
-
-extern const struct ptp_clock_info stmmac_ptp_clock_ops;
-extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
-
struct mac_link {
u32 caps;
u32 speed_mask;
@@ -641,8 +633,4 @@ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_mode_ops ring_mode_ops;
-extern const struct stmmac_mode_ops chain_mode_ops;
-extern const struct stmmac_desc_ops dwmac4_desc_ops;
-
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 83290e707df5..b5a7e05ab7a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -46,7 +46,9 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
u32 a_index = 0;
if (!plat_dat->axi) {
- plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+ plat_dat->axi = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_axi),
+ GFP_KERNEL);
if (!plat_dat->axi)
return -ENOMEM;
@@ -181,24 +183,19 @@ static void dwc_qos_remove(struct platform_device *pdev)
static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct tegra_eqos *eqos = priv;
- unsigned long rate = 125000000;
bool needs_calibration = false;
+ long rate = 125000000;
u32 value;
int err;
switch (speed) {
case SPEED_1000:
- needs_calibration = true;
- rate = 125000000;
- break;
-
case SPEED_100:
needs_calibration = true;
- rate = 25000000;
- break;
+ fallthrough;
case SPEED_10:
- rate = 2500000;
+ rate = rgmii_clock(speed);
break;
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 641f3cd019a3..20d3a202bb8d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -36,6 +36,8 @@
#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1)
#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1)
#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
+#define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0)
+#define MX93_GPR_CLK_SEL_OFFSET (4)
#define DMA_BUS_MODE 0x00001000
#define DMA_BUS_MODE_SFT_RESET (0x1 << 0)
@@ -108,13 +110,21 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
{
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
- int val;
+ int val, ret;
switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
break;
case PHY_INTERFACE_MODE_RMII:
+ if (dwmac->rmii_refclk_ext) {
+ ret = regmap_clear_bits(dwmac->intf_regmap,
+ dwmac->intf_reg_off +
+ MX93_GPR_CLK_SEL_OFFSET,
+ MX93_GPR_ENET_QOS_CLK_SEL_MASK);
+ if (ret)
+ return ret;
+ }
val = MX93_GPR_ENET_QOS_INTF_SEL_RMII;
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -186,7 +196,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
- unsigned long rate;
+ long rate;
int err;
plat_dat = dwmac->plat_dat;
@@ -196,17 +206,8 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
(plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
return;
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dwmac->dev, "invalid speed %u\n", speed);
return;
}
@@ -301,15 +302,11 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
* is required by i.MX8MP, i.MX93.
* is optinoal for i.MX8DXL.
*/
- dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
+ dwmac->intf_regmap =
+ syscon_regmap_lookup_by_phandle_args(np, "intf_mode", 1,
+ &dwmac->intf_reg_off);
if (IS_ERR(dwmac->intf_regmap))
return PTR_ERR(dwmac->intf_regmap);
-
- err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off);
- if (err) {
- dev_err(dev, "Can't get intf mode reg offset (%d)\n", err);
- return err;
- }
}
return err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index d94f0a150e93..ddee6154d40b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -31,27 +31,13 @@ struct intel_dwmac_data {
static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct intel_dwmac *dwmac = priv;
- unsigned long rate;
+ long rate;
int ret;
- rate = clk_get_rate(dwmac->tx_clk);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
-
- case SPEED_100:
- rate = 25000000;
- break;
-
- case SPEED_10:
- rate = 2500000;
- break;
-
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dwmac->dev, "Invalid speed\n");
- break;
+ return;
}
ret = clk_set_rate(dwmac->tx_clk, rate);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index bfe6e2d631bd..ab7c2750c104 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -11,6 +11,8 @@
#include "dwmac_dma.h"
#include "dwmac1000.h"
+#define DRIVER_NAME "dwmac-loongson-pci"
+
/* Normal Loongson Tx Summary */
#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
/* Normal Loongson Rx Summary */
@@ -516,6 +518,19 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
return 0;
}
+/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
+static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 2000000);
+}
+
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
@@ -555,7 +570,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME);
if (ret)
goto err_disable_device;
break;
@@ -566,6 +581,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
+ plat->fix_soc_reset = loongson_dwmac_fix_reset;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
@@ -673,7 +689,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = {
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
static struct pci_driver loongson_dwmac_driver = {
- .name = "dwmac-loongson-pci",
+ .name = DRIVER_NAME,
.id_table = loongson_dwmac_id_table,
.probe = loongson_dwmac_probe,
.remove = loongson_dwmac_remove,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 901a3c1959fa..2a5b38723635 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -777,7 +777,7 @@ static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv)
netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err);
plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref);
- netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate);
+ netdev_dbg(priv->dev, "PTP rate %lu\n", plat_dat->clk_ptp_rate);
}
static int qcom_ethqos_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 8cb374668b74..a4dc89e23a68 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1079,20 +1079,11 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
+ long rate;
int ret;
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
return;
}
@@ -1540,20 +1531,11 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
+ long rate;
int ret;
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
return;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
new file mode 100644
index 000000000000..9cc0e5817416
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP S32G/R GMAC glue layer
+ *
+ * Copyright 2019-2024 NXP
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define GMAC_INTF_RATE_125M 125000000 /* 125MHz */
+
+/* SoC PHY interface control register */
+#define PHY_INTF_SEL_MII 0x00
+#define PHY_INTF_SEL_SGMII 0x01
+#define PHY_INTF_SEL_RGMII 0x02
+#define PHY_INTF_SEL_RMII 0x08
+
+struct s32_priv_data {
+ void __iomem *ioaddr;
+ void __iomem *ctrl_sts;
+ struct device *dev;
+ phy_interface_t *intf_mode;
+ struct clk *tx_clk;
+ struct clk *rx_clk;
+};
+
+static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
+{
+ writel(PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+
+ dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode));
+
+ return 0;
+}
+
+static int s32_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+ int ret;
+
+ /* Set initial TX interface clock */
+ ret = clk_prepare_enable(gmac->tx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable tx clock\n");
+ return ret;
+ }
+ ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set tx clock\n");
+ goto err_tx_disable;
+ }
+
+ /* Set initial RX interface clock */
+ ret = clk_prepare_enable(gmac->rx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable rx clock\n");
+ goto err_tx_disable;
+ }
+ ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set rx clock\n");
+ goto err_txrx_disable;
+ }
+
+ /* Set interface mode */
+ ret = s32_gmac_write_phy_intf_select(gmac);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set PHY interface mode\n");
+ goto err_txrx_disable;
+ }
+
+ return 0;
+
+err_txrx_disable:
+ clk_disable_unprepare(gmac->rx_clk);
+err_tx_disable:
+ clk_disable_unprepare(gmac->tx_clk);
+ return ret;
+}
+
+static void s32_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+
+ clk_disable_unprepare(gmac->tx_clk);
+ clk_disable_unprepare(gmac->rx_clk);
+}
+
+static void s32_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+{
+ struct s32_priv_data *gmac = priv;
+ long tx_clk_rate;
+ int ret;
+
+ tx_clk_rate = rgmii_clock(speed);
+ if (tx_clk_rate < 0) {
+ dev_err(gmac->dev, "Unsupported/Invalid speed: %d\n", speed);
+ return;
+ }
+
+ dev_dbg(gmac->dev, "Set tx clock to %ld Hz\n", tx_clk_rate);
+ ret = clk_set_rate(gmac->tx_clk, tx_clk_rate);
+ if (ret)
+ dev_err(gmac->dev, "Can't set tx clock\n");
+}
+
+static int s32_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct device *dev = &pdev->dev;
+ struct stmmac_resources res;
+ struct s32_priv_data *gmac;
+ int ret;
+
+ gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->dev = &pdev->dev;
+
+ ret = stmmac_get_platform_resources(pdev, &res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get platform resources\n");
+
+ plat = devm_stmmac_probe_config_dt(pdev, res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(dev, PTR_ERR(plat),
+ "dt configuration failed\n");
+
+ /* PHY interface mode control reg */
+ gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(gmac->ctrl_sts))
+ return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
+ "S32CC config region is missing\n");
+
+ /* tx clock */
+ gmac->tx_clk = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(gmac->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->tx_clk),
+ "tx clock not found\n");
+
+ /* rx clock */
+ gmac->rx_clk = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(gmac->rx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->rx_clk),
+ "rx clock not found\n");
+
+ gmac->intf_mode = &plat->phy_interface;
+ gmac->ioaddr = res.addr;
+
+ /* S32CC core feature set */
+ plat->has_gmac4 = true;
+ plat->pmt = 1;
+ plat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat->rx_fifo_size = 20480;
+ plat->tx_fifo_size = 20480;
+
+ plat->init = s32_gmac_init;
+ plat->exit = s32_gmac_exit;
+ plat->fix_mac_speed = s32_fix_mac_speed;
+
+ plat->bsp_priv = gmac;
+
+ return stmmac_pltfr_probe(pdev, plat, &res);
+}
+
+static const struct of_device_id s32_dwmac_match[] = {
+ { .compatible = "nxp,s32g2-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s32_dwmac_match);
+
+static struct platform_driver s32_dwmac_driver = {
+ .probe = s32_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "s32-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = s32_dwmac_match,
+ },
+};
+module_platform_driver(s32_dwmac_driver);
+
+MODULE_AUTHOR("Jan Petrous (OSS) <jan.petrous@oss.nxp.com>");
+MODULE_DESCRIPTION("NXP S32G/R common chassis GMAC driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 421666279dd3..0a0a363d3730 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -34,24 +34,13 @@ struct starfive_dwmac {
static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct starfive_dwmac *dwmac = priv;
- unsigned long rate;
+ long rate;
int err;
- rate = clk_get_rate(dwmac->clk_tx);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dwmac->dev, "invalid speed %u\n", speed);
- break;
+ return;
}
err = clk_set_rate(dwmac->clk_tx, rate);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a6ff02d905a9..f25461c292fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,10 +21,7 @@
#include "stmmac_platform.h"
-#define DWMAC_125MHZ 125000000
#define DWMAC_50MHZ 50000000
-#define DWMAC_25MHZ 25000000
-#define DWMAC_2_5MHZ 2500000
#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
iface == PHY_INTERFACE_MODE_RGMII_ID || \
@@ -140,7 +137,7 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
u32 reg = dwmac->ctrl_reg;
- u32 freq = 0;
+ long freq = 0;
if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
src = TX_RETIME_SRC_TXCLK;
@@ -153,19 +150,14 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
}
} else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
/* On GiGa clk source can be either ext or from clkgen */
- if (spd == SPEED_1000) {
- freq = DWMAC_125MHZ;
- } else {
+ freq = rgmii_clock(spd);
+
+ if (spd != SPEED_1000 && freq > 0)
/* Switch to clkgen for these speeds */
src = TX_RETIME_SRC_CLKGEN;
- if (spd == SPEED_100)
- freq = DWMAC_25MHZ;
- else if (spd == SPEED_10)
- freq = DWMAC_2_5MHZ;
- }
}
- if (src == TX_RETIME_SRC_CLKGEN && freq)
+ if (src == TX_RETIME_SRC_CLKGEN && freq > 0)
clk_set_rate(dwmac->clk, freq);
regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
@@ -207,16 +199,11 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (res)
dwmac->clk_sel_reg = res->start;
- regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->ctrl_reg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
- return err;
- }
-
err = of_get_phy_mode(np, &dwmac->interface);
if (err && err != -ENODEV) {
dev_err(dev, "Can't get phy-mode\n");
@@ -321,7 +308,6 @@ static void sti_dwmac_remove(struct platform_device *pdev)
clk_disable_unprepare(dwmac->clk);
}
-#ifdef CONFIG_PM_SLEEP
static int sti_dwmac_suspend(struct device *dev)
{
struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
@@ -341,10 +327,9 @@ static int sti_dwmac_resume(struct device *dev)
return stmmac_resume(dev);
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
- sti_dwmac_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
+ sti_dwmac_resume);
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
@@ -361,7 +346,7 @@ static struct platform_driver sti_dwmac_driver = {
.remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
- .pm = &sti_dwmac_pm_ops,
+ .pm = pm_sleep_ptr(&sti_dwmac_pm_ops),
.of_match_table = sti_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1e8bac665cc9..1fcb74e9e3ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -419,16 +419,11 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
}
/* Get mode register */
- dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ dwmac->regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->mode_reg);
if (IS_ERR(dwmac->regmap))
return PTR_ERR(dwmac->regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
- return err;
- }
-
if (dwmac->ops->is_mp2)
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c25781874aa7..9ed8620580a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -27,7 +27,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
- u32 clk_rate;
+ unsigned long clk_rate;
value |= GMAC_CORE_INIT;
@@ -420,10 +420,10 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
-static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
+static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
- int value = et & STMMAC_ET_MAX;
+ u32 value = et & STMMAC_ET_MAX;
int regval;
/* Program LPI entry timer value into register */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 1ce6f43d545a..806555976496 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -144,4 +144,7 @@
/* TDS3 use for both format (read and write back) */
#define RDES3_OWN BIT(31)
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index a04a79003692..20027d3c25a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -493,4 +493,9 @@
#define XGMAC_RDES3_TSD BIT(6)
#define XGMAC_RDES3_TSA BIT(4)
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_ops dwxlgmac2_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
+
#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index a72d336a8350..31bdbab9a46c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -9,6 +9,8 @@
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "stmmac_est.h"
+#include "dwmac4_descs.h"
+#include "dwxgmac2.h"
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
{
@@ -265,7 +267,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
@@ -288,7 +290,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 64f8ed67dcc4..0f200b72c225 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -363,7 +363,7 @@ struct stmmac_ops {
void (*set_eee_mode)(struct mac_device_info *hw,
bool en_tx_lpi_clockgating);
void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
+ void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, u32 et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -665,6 +665,15 @@ struct stmmac_regs_off {
u32 est_off;
};
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_hwtimestamp dwmac1000_ptp;
+
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
+
extern const struct stmmac_ops dwmac100_ops;
extern const struct stmmac_dma_ops dwmac100_dma_ops;
extern const struct stmmac_ops dwmac1000_ops;
@@ -676,14 +685,6 @@ extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
-extern const struct stmmac_tc_ops dwxgmac_tc_ops;
-extern const struct stmmac_ops dwxgmac210_ops;
-extern const struct stmmac_ops dwxlgmac2_ops;
-extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
-extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
-extern const struct stmmac_mmc_ops dwmac_mmc_ops;
-extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
-extern const struct stmmac_est_ops dwmac510_est_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 5d1ea3e07459..1cba39fb2c44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -139,4 +139,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fpe_fragment_cntr;
};
+extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 1d86439b8a14..f05cae103d83 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -126,7 +126,7 @@ struct stmmac_rx_queue {
unsigned int cur_rx;
unsigned int dirty_rx;
unsigned int buf_alloc_num;
- u32 rx_zeroc_thresh;
+ unsigned int napi_skb_frag_size;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
unsigned int state_saved;
@@ -266,7 +266,6 @@ struct stmmac_priv {
int sph_cap;
u32 sarc_type;
- unsigned int rx_copybreak;
u32 rx_riwt[MTL_MAX_TX_QUEUES];
int hwts_rx_en;
@@ -307,11 +306,9 @@ struct stmmac_priv {
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
- int eee_enabled;
- int eee_active;
- int tx_lpi_timer;
- int tx_lpi_enabled;
- int eee_tw_timer;
+ u32 tx_lpi_timer;
+ bool eee_enabled;
+ bool eee_active;
bool eee_sw_timer_en;
unsigned int mode;
unsigned int chain_mode;
@@ -407,8 +404,6 @@ void stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
-void stmmac_disable_eee_mode(struct stmmac_priv *priv);
-bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
@@ -418,14 +413,6 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
return !!priv->xdp_prog;
}
-static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
-{
- if (stmmac_xdp_is_enabled(priv))
- return XDP_PACKET_HEADROOM;
-
- return 0;
-}
-
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
index 7a858c566e7e..d247fa383a6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -62,3 +62,5 @@
#define EST_SRWO BIT(0)
#define EST_GCL_DATA 0x00000034
+
+extern const struct stmmac_est_ops dwmac510_est_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1d77389ce953..918a32f8fda8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -654,7 +654,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
- if (priv->eee_enabled) {
+ if (priv->dma_cap.eee) {
int val = phylink_get_eee_err(priv->phylink);
if (val)
priv->xstats.phy_eee_wakeup_error_n = val;
@@ -898,9 +898,6 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- edata->tx_lpi_timer = priv->tx_lpi_timer;
- edata->tx_lpi_enabled = priv->tx_lpi_enabled;
-
return phylink_ethtool_get_eee(priv->phylink, edata);
}
@@ -908,29 +905,11 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret;
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- if (priv->tx_lpi_enabled != edata->tx_lpi_enabled)
- netdev_warn(priv->dev,
- "Setting EEE tx-lpi is not supported\n");
-
- if (!edata->eee_enabled)
- stmmac_disable_eee_mode(priv);
-
- ret = phylink_ethtool_set_eee(priv->phylink, edata);
- if (ret)
- return ret;
-
- if (edata->eee_enabled &&
- priv->tx_lpi_timer != edata->tx_lpi_timer) {
- priv->tx_lpi_timer = edata->tx_lpi_timer;
- stmmac_eee_init(priv);
- }
-
- return 0;
+ return phylink_ethtool_set_eee(priv->phylink, edata);
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
@@ -1227,43 +1206,6 @@ static int stmmac_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
}
-static int stmmac_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = priv->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int stmmac_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- priv->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static int stmmac_get_mm(struct net_device *ndev,
struct ethtool_mm_state *state)
{
@@ -1390,8 +1332,6 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
.get_channels = stmmac_get_channels,
.set_channels = stmmac_set_channels,
- .get_tunable = stmmac_get_tunable,
- .set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
.get_mm = stmmac_get_mm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c81ea8cdfe6e..554d2c0a8fde 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -77,7 +77,6 @@ module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -107,15 +106,13 @@ static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
-#define STMMAC_RX_COPYBREAK 256
-
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
#define STMMAC_DEFAULT_LPI_TIMER 1000
-static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
-module_param(eee_timer, int, 0644);
+static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, uint, 0644);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
@@ -197,8 +194,6 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
- if (eee_timer < 0)
- eee_timer = STMMAC_DEFAULT_LPI_TIMER;
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -301,7 +296,7 @@ static void stmmac_global_err(struct stmmac_priv *priv)
*/
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
- u32 clk_rate;
+ unsigned long clk_rate;
clk_rate = clk_get_rate(priv->plat->stmmac_clk);
@@ -325,6 +320,10 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_150_250M;
else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
+ else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
+ priv->clk_csr = STMMAC_CSR_300_500M;
+ else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
+ priv->clk_csr = STMMAC_CSR_500_800M;
}
if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
@@ -391,23 +390,17 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
+static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
{
- int tx_lpi_timer;
+ stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
+}
- /* Clear/set the SW EEE timer flag based on LPI ET enablement */
- priv->eee_sw_timer_en = en ? 0 : 1;
- tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
- stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
+static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
+{
+ stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
}
-/**
- * stmmac_enable_eee_mode - check and enter in LPI mode
- * @priv: driver private structure
- * Description: this function is to verify and enter in LPI mode in case of
- * EEE.
- */
-static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -417,29 +410,43 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return -EBUSY; /* still unfinished work */
+ return true; /* still unfinished work */
+ }
+
+ return false;
+}
+
+static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
+{
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+}
+
+/**
+ * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
+ */
+static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
+{
+ if (stmmac_eee_tx_busy(priv)) {
+ stmmac_restart_sw_lpi_timer(priv);
+ return;
}
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
- return 0;
}
/**
- * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * stmmac_stop_sw_lpi - stop transmitting LPI
* @priv: driver private structure
- * Description: this function is to exit and disable EEE in case of
- * LPI state is true. This is called by the xmit.
+ * Description: When using software-controlled LPI, stop transmitting LPI state.
*/
-void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
{
- if (!priv->eee_sw_timer_en) {
- stmmac_lpi_entry_timer_config(priv, 0);
- return;
- }
-
stmmac_reset_eee_mode(priv, priv->hw);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
@@ -456,25 +463,27 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ stmmac_try_to_start_sw_lpi(priv);
}
/**
* stmmac_eee_init - init EEE
* @priv: driver private structure
+ * @active: indicates whether EEE should be enabled.
* Description:
* if the GMAC supports the EEE (from the HW cap reg) and the phy device
* can also manage EEE, this function enable the LPI state and start related
* timer.
*/
-bool stmmac_eee_init(struct stmmac_priv *priv)
+static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
{
- int eee_tw_timer = priv->eee_tw_timer;
+ priv->eee_active = active;
/* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee)
- return false;
+ if (!priv->dma_cap.eee) {
+ priv->eee_enabled = false;
+ return;
+ }
mutex_lock(&priv->lock);
@@ -482,22 +491,24 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
if (!priv->eee_active) {
if (priv->eee_enabled) {
netdev_dbg(priv->dev, "disable EEE\n");
- stmmac_lpi_entry_timer_config(priv, 0);
+ priv->eee_sw_timer_en = false;
+ stmmac_disable_hw_lpi_timer(priv);
del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+ stmmac_set_eee_timer(priv, priv->hw, 0,
+ STMMAC_DEFAULT_TWT_LS);
if (priv->hw->xpcs)
xpcs_config_eee(priv->hw->xpcs,
priv->plat->mult_fact_100ns,
false);
}
+ priv->eee_enabled = false;
mutex_unlock(&priv->lock);
- return false;
+ return;
}
if (priv->eee_active && !priv->eee_enabled) {
- timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- eee_tw_timer);
+ STMMAC_DEFAULT_TWT_LS);
if (priv->hw->xpcs)
xpcs_config_eee(priv->hw->xpcs,
priv->plat->mult_fact_100ns,
@@ -505,18 +516,22 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
}
if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
+ /* Use hardware LPI mode */
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
- stmmac_lpi_entry_timer_config(priv, 1);
+ priv->eee_sw_timer_en = false;
+ stmmac_enable_hw_lpi_timer(priv);
} else {
- stmmac_lpi_entry_timer_config(priv, 0);
- mod_timer(&priv->eee_ctrl_timer,
- STMMAC_LPI_T(priv->tx_lpi_timer));
+ /* Use software LPI mode */
+ priv->eee_sw_timer_en = true;
+ stmmac_disable_hw_lpi_timer(priv);
+ stmmac_restart_sw_lpi_timer(priv);
}
+ priv->eee_enabled = true;
+
mutex_unlock(&priv->lock);
netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
- return true;
}
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
@@ -973,10 +988,8 @@ static void stmmac_mac_link_down(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- priv->tx_lpi_enabled = false;
- priv->eee_enabled = stmmac_eee_init(priv);
- stmmac_set_eee_pls(priv, priv->hw, false);
+ if (priv->dma_cap.eee)
+ stmmac_set_eee_pls(priv, priv->hw, false);
if (stmmac_fpe_supported(priv))
stmmac_fpe_link_state_handle(priv, false);
@@ -1083,14 +1096,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
stmmac_mac_set(priv, priv->ioaddr, true);
- if (phy && priv->dma_cap.eee) {
- priv->eee_active =
- phy_init_eee(phy, !(priv->plat->flags &
- STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
- priv->eee_enabled = stmmac_eee_init(priv);
- priv->tx_lpi_enabled = priv->eee_enabled;
+ if (priv->dma_cap.eee)
stmmac_set_eee_pls(priv, priv->hw, true);
- }
if (stmmac_fpe_supported(priv))
stmmac_fpe_link_state_handle(priv, true);
@@ -1099,12 +1106,32 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_hwtstamp_correct_latency(priv, priv);
}
+static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_eee_init(priv, false);
+}
+
+static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ priv->tx_lpi_timer = timer;
+ stmmac_eee_init(priv, true);
+
+ return 0;
+}
+
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.mac_get_caps = stmmac_mac_get_caps,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
+ .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
};
/**
@@ -1177,15 +1204,27 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
- if (priv->dma_cap.eee)
- phy_support_eee(phydev);
-
ret = phylink_connect_phy(priv->phylink, phydev);
} else {
fwnode_handle_put(phy_fwnode);
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
+ if (ret == 0) {
+ struct ethtool_keee eee;
+
+ /* Configure phylib's copy of the LPI timer. Normally,
+ * phylink_config.lpi_timer_default would do this, but there is
+ * a chance that userspace could change the eee_timer setting
+ * via sysfs before the first open. Thus, preserve existing
+ * behaviour.
+ */
+ if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
+ eee.tx_lpi_timer = priv->tx_lpi_timer;
+ phylink_ethtool_set_eee(priv->phylink, &eee);
+ }
+ }
+
if (!priv->plat->pmt) {
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
@@ -1202,6 +1241,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
struct stmmac_mdio_bus_data *mdio_bus_data;
int mode = priv->plat->phy_interface;
struct fwnode_handle *fwnode;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
priv->phylink_config.dev = &priv->dev->dev;
@@ -1211,6 +1251,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
/* Stmmac always requires an RX clock for hardware initialization */
priv->phylink_config.mac_requires_rxc = true;
+ if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
+ priv->phylink_config.eee_rx_clk_stop_enable = true;
+
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
priv->phylink_config.default_an_inband =
@@ -1223,8 +1266,27 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
/* If we have an xpcs, it defines which PHY interfaces are supported. */
if (priv->hw->xpcs)
- xpcs_get_interfaces(priv->hw->xpcs,
- priv->phylink_config.supported_interfaces);
+ pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
+ else
+ pcs = priv->hw->phylink_pcs;
+
+ if (pcs)
+ phy_interface_or(priv->phylink_config.supported_interfaces,
+ priv->phylink_config.supported_interfaces,
+ pcs->supported_interfaces);
+
+ if (priv->dma_cap.eee) {
+ /* Assume all supported interfaces also support LPI */
+ memcpy(priv->phylink_config.lpi_interfaces,
+ priv->phylink_config.supported_interfaces,
+ sizeof(priv->phylink_config.lpi_interfaces));
+
+ /* All full duplex speeds above 100Mbps are supported */
+ priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
+ MAC_100FD;
+ priv->phylink_config.lpi_timer_default = eee_timer * 1000;
+ priv->phylink_config.eee_enabled_default = true;
+ }
fwnode = priv->plat->port_node;
if (!fwnode)
@@ -1307,6 +1369,14 @@ static void stmmac_display_rings(struct stmmac_priv *priv,
stmmac_display_tx_rings(priv, dma_conf);
}
+static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
+{
+ if (stmmac_xdp_is_enabled(priv))
+ return XDP_PACKET_HEADROOM;
+
+ return NET_SKB_PAD;
+}
+
static int stmmac_set_bfsize(int mtu, int bufsize)
{
int ret = bufsize;
@@ -2003,22 +2073,31 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
- unsigned int num_pages;
+ unsigned int dma_buf_sz_pad, num_pages;
unsigned int napi_id;
int ret;
+ dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
+
rx_q->queue_index = queue;
rx_q->priv_data = priv;
+ rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = dma_conf->dma_rx_size;
- num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
+ pp_params.order = order_base_2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
- pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+ pp_params.max_len = dma_conf->dma_buf_sz;
+
+ if (priv->sph) {
+ pp_params.offset = 0;
+ pp_params.max_len += stmmac_rx_offset(priv);
+ }
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
@@ -2757,11 +2836,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
xmits = budget;
}
- if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
- priv->eee_sw_timer_en) {
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
- }
+ if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
+ stmmac_restart_sw_lpi_timer(priv);
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
@@ -3123,8 +3199,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
for (chan = 0; chan < rx_channel_count; chan++)
@@ -3436,12 +3511,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
else if (ptp_register)
stmmac_ptp_register(priv);
- priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
-
- /* Convert the timer from msec to usec */
- if (!priv->tx_lpi_timer)
- priv->tx_lpi_timer = eee_timer * 1000;
-
if (priv->use_riwt) {
u32 queue;
@@ -3908,6 +3977,10 @@ static int __stmmac_open(struct net_device *dev,
u32 chan;
int ret;
+ /* Initialise the tx lpi timer, converting from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
ret = pm_runtime_resume_and_get(priv->device);
if (ret < 0)
return ret;
@@ -3923,8 +3996,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- priv->rx_copybreak = STMMAC_RX_COPYBREAK;
-
buf_sz = dma_conf->dma_buf_sz;
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
@@ -4024,11 +4095,6 @@ static int stmmac_release(struct net_device *dev)
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
- if (priv->eee_enabled) {
- priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
- }
-
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
@@ -4117,11 +4183,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
- if (priv->dma_cap.addr64 <= 32)
- desc->des0 = cpu_to_le32(curr_addr);
- else
- stmmac_set_desc_addr(priv, desc, curr_addr);
-
+ stmmac_set_desc_addr(priv, desc, curr_addr);
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -4167,17 +4229,27 @@ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
* First Descriptor
* --------
* | DES0 |---> buffer1 = L2/L3/L4 header
- * | DES1 |---> TCP Payload (can continue on next descr...)
- * | DES2 |---> buffer 1 and 2 len
+ * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
+ * | | width is 32-bit, but we never use it.
+ * | | Also can be used as the most-significant 8-bits or 16-bits of
+ * | | buffer1 address pointer if the DMA AXI address width is 40-bit
+ * | | or 48-bit, and we always use it.
+ * | DES2 |---> buffer1 len
* | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
* --------
+ * --------
+ * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
+ * | DES3 |
+ * --------
* |
* ...
* |
* --------
- * | DES0 | --| Split TCP Payload on Buffers 1 and 2
- * | DES1 | --|
- * | DES2 | --> buffer 1 and 2 len
+ * | DES0 |---> buffer1 = Split TCP Payload
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
* | DES3 |
* --------
*
@@ -4187,15 +4259,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int tmp_pay_len = 0, first_tx, nfrags;
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
u32 pay_len, mss, queue;
- dma_addr_t tso_des, des;
+ int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
+ dma_addr_t des;
bool set_ic;
- int i;
/* Always insert VLAN tag to SKB payload for TSO frames.
*
@@ -4280,24 +4351,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- if (priv->dma_cap.addr64 <= 32) {
- first->des0 = cpu_to_le32(des);
-
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
-
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
- tso_des = des;
- } else {
- stmmac_set_desc_addr(priv, first, des);
- tmp_pay_len = pay_len;
- tso_des = des + proto_hdr_len;
- pay_len = 0;
- }
-
- stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
+ stmmac_set_desc_addr(priv, first, des);
+ stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
+ (nfrags == 0), queue);
/* In case two or more DMA transmit descriptors are allocated for this
* non-paged SKB data, the DMA buffer address should be saved to
@@ -4401,11 +4457,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Complete the first descriptor before granting the DMA */
- stmmac_prepare_tso_tx_desc(priv, first, 1,
- proto_hdr_len,
- pay_len,
- 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- hdr / 4, (skb->len - proto_hdr_len));
+ stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
+ tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -4492,7 +4546,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
- stmmac_disable_eee_mode(priv);
+ stmmac_stop_sw_lpi(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
@@ -5473,7 +5527,7 @@ read_again:
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
@@ -5491,10 +5545,6 @@ read_again:
/* Buffer is good. Go on. */
- prefetch(page_address(buf->page) + buf->page_offset);
- if (buf->sec_page)
- prefetch(page_address(buf->sec_page));
-
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
@@ -5516,6 +5566,8 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
+ net_prefetch(page_address(buf->page) +
+ buf->page_offset);
xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
@@ -5569,22 +5621,26 @@ read_again:
}
if (!skb) {
+ unsigned int head_pad_len;
+
/* XDP program may expand or reduce tail */
buf1_len = ctx.xdp.data_end - ctx.xdp.data;
- skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ skb = napi_build_skb(page_address(buf->page),
+ rx_q->napi_skb_frag_size);
if (!skb) {
+ page_pool_recycle_direct(rx_q->page_pool,
+ buf->page);
rx_dropped++;
count++;
goto drain_data;
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
+ head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
+ skb_reserve(skb, head_pad_len);
skb_put(skb, buf1_len);
-
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
} else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
@@ -5592,9 +5648,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->page = NULL;
}
@@ -5604,9 +5657,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->sec_page = NULL;
}
@@ -6489,11 +6539,7 @@ static int stmmac_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (priv->dbgfs_dir)
- priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
- priv->dbgfs_dir,
- stmmac_fs_dir,
- dev->name);
+ debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
break;
}
done:
@@ -6923,8 +6969,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
/* Enable the MAC Rx/Tx */
@@ -7175,6 +7220,36 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
+ if (priv->dma_cap.number_rx_queues &&
+ priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
+ dev_warn(priv->device,
+ "Number of Rx queues (%u) exceeds dma capability\n",
+ priv->plat->rx_queues_to_use);
+ priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
+ }
+ if (priv->dma_cap.number_tx_queues &&
+ priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
+ dev_warn(priv->device,
+ "Number of Tx queues (%u) exceeds dma capability\n",
+ priv->plat->tx_queues_to_use);
+ priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
+ }
+
+ if (priv->dma_cap.rx_fifo_size &&
+ priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
+ dev_warn(priv->device,
+ "Rx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->rx_fifo_size);
+ priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
+ }
+ if (priv->dma_cap.tx_fifo_size &&
+ priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
+ dev_warn(priv->device,
+ "Tx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->tx_fifo_size);
+ priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
+ }
+
priv->hw->vlan_fail_q_en =
(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
@@ -7407,6 +7482,8 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -7714,7 +7791,7 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- if (priv->eee_enabled) {
+ if (priv->eee_sw_timer_en) {
priv->tx_path_in_lpi_mode = false;
del_timer_sync(&priv->eee_ctrl_timer);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index dc9884130b91..d0e61aa1a495 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -626,7 +626,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_info(&pdev->dev, "PTP uses main clock\n");
} else {
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
- dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+ dev_dbg(&pdev->dev, "PTP rate %lu\n", plat->clk_ptp_rate);
}
plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 4cc70480ce0f..3fe0e3a80e80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -104,4 +104,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time);
void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv);
+extern const struct ptp_clock_info stmmac_ptp_clock_ops;
+extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
+
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 6a79e6a111ed..694d6ee14381 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1284,14 +1284,3 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.query_caps = tc_query_caps,
.setup_mqprio = tc_setup_dwmac510_mqprio,
};
-
-const struct stmmac_tc_ops dwxgmac_tc_ops = {
- .init = tc_init,
- .setup_cls_u32 = tc_setup_cls_u32,
- .setup_cbs = tc_setup_cbs,
- .setup_cls = tc_setup_cls,
- .setup_taprio = tc_setup_taprio,
- .setup_etf = tc_setup_etf,
- .query_caps = tc_query_caps,
- .setup_mqprio = tc_setup_dwmac510_mqprio,
-};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
index 896dc987d4ef..77ce8cfbe976 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
@@ -4,7 +4,6 @@
#ifndef _STMMAC_XDP_H_
#define _STMMAC_XDP_H_
-#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index df6d35d41b97..72177fea1cfb 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3303,7 +3303,7 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
addr &= PAGE_MASK;
pp = &rp->rxhash[h];
for (; (p = *pp) != NULL; pp = &niu_next_page(p)) {
- if (p->index == addr) {
+ if (p->private == addr) {
*link = pp;
goto found;
}
@@ -3318,7 +3318,7 @@ static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
{
unsigned int h = niu_hash_rxaddr(rp, base);
- page->index = base;
+ page->private = base;
niu_next_page(page) = rp->rxhash[h];
rp->rxhash[h] = page;
}
@@ -3400,11 +3400,11 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
RCR_ENTRY_PKTBUFSZ_SHIFT];
- if ((page->index + PAGE_SIZE) - rcr_size == addr) {
+ if ((page->private + PAGE_SIZE) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
rp->rbr_refill_pending++;
@@ -3469,11 +3469,11 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
- if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+ if ((page->private + rp->rbr_block_size) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
rp->rbr_refill_pending++;
} else
@@ -3538,11 +3538,11 @@ static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
page = rp->rxhash[i];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
np->ops->unmap_page(np->device, base, PAGE_SIZE,
DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
@@ -6086,7 +6086,7 @@ static void niu_enable_napi(struct niu *np)
int i;
for (i = 0; i < np->num_ldg; i++)
- napi_enable(&np->ldg[i].napi);
+ napi_enable_locked(&np->ldg[i].napi);
}
static void niu_disable_napi(struct niu *np)
@@ -6116,7 +6116,9 @@ static int niu_open(struct net_device *dev)
if (err)
goto out_free_channels;
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
spin_lock_irq(&np->lock);
@@ -6460,7 +6462,7 @@ static void niu_reset_buffers(struct niu *np)
page = rp->rxhash[j];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
base = base >> RBR_DESCR_ADDR_SHIFT;
rp->rbr[k++] = cpu_to_le32(base);
page = next;
@@ -6521,6 +6523,7 @@ static void niu_reset_task(struct work_struct *work)
niu_reset_buffers(np);
+ netdev_lock(np->dev);
spin_lock_irqsave(&np->lock, flags);
err = niu_init_hw(np);
@@ -6531,6 +6534,7 @@ static void niu_reset_task(struct work_struct *work)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(np->dev);
}
static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -6761,7 +6765,9 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
niu_free_channels(np);
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
err = niu_alloc_channels(np);
if (err)
@@ -9937,6 +9943,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
spin_lock_irqsave(&np->lock, flags);
+ netdev_lock(dev);
err = niu_init_hw(np);
if (!err) {
np->timer.expires = jiffies + HZ;
@@ -9945,6 +9952,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(dev);
return err;
}
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 721d8ed3f302..5e0e4c9ecbb0 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -199,7 +199,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = spl2sw_ethernet_start_xmit,
.ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode,
.ndo_set_mac_address = spl2sw_ethernet_set_mac_address,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = spl2sw_ethernet_tx_timeout,
};
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 0d5a862cd78a..3a13d60a947a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
select NET_DEVLINK
select TI_DAVINCI_MDIO
select PHYLINK
+ select PAGE_POOL
select TI_K3_CPPI_DESC_POOL
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 5465bf872734..4a8e6b9413e3 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -32,6 +32,7 @@
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
#include <net/page_pool/helpers.h>
+#include <net/dsa.h>
#include <net/switchdev.h>
#include "cpsw_ale.h"
@@ -497,35 +498,62 @@ static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
+ struct page *page,
+ bool allow_direct);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
-static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
+static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
- int id, port;
+ int port;
- for (id = 0; id < common->rx_ch_num_flows; id++) {
- flow = &rx_chn->flows[id];
+ flow = &rx_chn->flows[id];
+ napi_disable(&flow->napi_rx);
+ hrtimer_cancel(&flow->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn,
+ am65_cpsw_nuss_rx_cleanup, !!id);
- for (port = 0; port < common->port_num; port++) {
- if (!common->ports[port].ndev)
- continue;
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- rxq = &common->ports[port].xdp_rxq[id];
+ rxq = &common->ports[port].xdp_rxq[id];
- if (xdp_rxq_info_is_reg(rxq))
- xdp_rxq_info_unreg(rxq);
- }
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
- if (flow->page_pool) {
- page_pool_destroy(flow->page_pool);
- flow->page_pool = NULL;
- }
+ if (flow->page_pool) {
+ page_pool_destroy(flow->page_pool);
+ flow->page_pool = NULL;
}
}
-static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
+static void am65_cpsw_destroy_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ int id;
+
+ reinit_completion(&common->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ id = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "rx teardown timeout\n");
+ }
+
+ for (id = common->rx_ch_num_flows - 1; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+}
+
+static int am65_cpsw_create_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct page_pool_params pp_params = {
@@ -540,45 +568,162 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
struct page_pool *pool;
- int id, port, ret;
+ struct page *page;
+ int port, ret, i;
+
+ flow = &rx_chn->flows[id];
+ pp_params.napi = &flow->napi_rx;
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ return ret;
+ }
+
+ flow->page_pool = pool;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ /* FIXME should we BUG here? */
+ continue;
+
+ rxq = &common->ports[port].xdp_rxq[id];
+ ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
+ id, flow->napi_rx.napi_id);
+ if (ret)
+ goto err;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
+ page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (!page) {
+ dev_err(common->dev, "cannot allocate page in flow %d\n",
+ id);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, page, id);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit page to rx channel flow %d, error %d\n",
+ id, ret);
+ am65_cpsw_put_page(flow, page, false);
+ goto err;
+ }
+ }
+
+ napi_enable(&flow->napi_rx);
+ return 0;
+
+err:
+ am65_cpsw_destroy_rxq(common, id);
+ return ret;
+}
+
+static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
for (id = 0; id < common->rx_ch_num_flows; id++) {
- flow = &rx_chn->flows[id];
- pp_params.napi = &flow->napi_rx;
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool)) {
- ret = PTR_ERR(pool);
+ ret = am65_cpsw_create_rxq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create rxq %d: %d\n",
+ id, ret);
goto err;
}
+ }
+
+ ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ if (ret) {
+ dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
+ goto err;
+ }
- flow->page_pool = pool;
+ return 0;
- /* using same page pool is allowed as no running rx handlers
- * simultaneously for both ndevs
- */
- for (port = 0; port < common->port_num; port++) {
- if (!common->ports[port].ndev)
- continue;
+err:
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
- rxq = &common->ports[port].xdp_rxq[id];
+ return ret;
+}
+
+static void am65_cpsw_destroy_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
- ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
- id, flow->napi_rx.napi_id);
- if (ret)
- goto err;
+ napi_disable(&tx_chn->napi_tx);
+ hrtimer_cancel(&tx_chn->tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(tx_chn->tx_chn, tx_chn,
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chn->tx_chn);
+}
+
+static void am65_cpsw_destroy_txqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
+ int id;
- ret = xdp_rxq_info_reg_mem_model(rxq,
- MEM_TYPE_PAGE_POOL,
- pool);
- if (ret)
- goto err;
+ /* shutdown tx channels */
+ atomic_set(&common->tdown_cnt, common->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ reinit_completion(&common->tdown_complete);
+
+ for (id = 0; id < common->tx_ch_num; id++)
+ k3_udma_glue_tdown_tx_chn(tx_chn[id].tx_chn, false);
+
+ id = wait_for_completion_timeout(&common->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "tx teardown timeout\n");
+
+ for (id = common->tx_ch_num - 1; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+}
+
+static int am65_cpsw_create_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
+ int ret;
+
+ ret = k3_udma_glue_enable_tx_chn(tx_chn->tx_chn);
+ if (ret)
+ return ret;
+
+ napi_enable(&tx_chn->napi_tx);
+
+ return 0;
+}
+
+static int am65_cpsw_create_txqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
+
+ for (id = 0; id < common->tx_ch_num; id++) {
+ ret = am65_cpsw_create_txq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create txq %d: %d\n",
+ id, ret);
+ goto err;
}
}
return 0;
err:
- am65_cpsw_destroy_xdp_rxqs(common);
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+
return ret;
}
@@ -642,7 +787,6 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
}
@@ -684,21 +828,30 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
+ enum am65_cpsw_tx_buf_type buf_type;
struct cppi5_host_desc_t *desc_tx;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = *(swdata);
+ dev_kfree_skb_any(skb);
+ } else {
+ xdpf = *(swdata);
+ xdp_return_frame(xdpf);
+ }
- dev_kfree_skb_any(skb);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
}
static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
struct net_device *ndev,
- unsigned int len)
+ unsigned int len,
+ unsigned int headroom)
{
struct sk_buff *skb;
@@ -708,7 +861,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, AM65_CPSW_HEADROOM);
+ skb_reserve(skb, headroom);
skb->dev = ndev;
return skb;
@@ -717,12 +870,8 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
- struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int port_idx, i, ret, tx, flow_idx;
- struct am65_cpsw_rx_flow *flow;
u32 val, port_mask;
- struct page *page;
+ int port_idx, ret;
if (common->usage_count)
return 0;
@@ -762,7 +911,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -782,151 +931,38 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
am65_cpsw_qos_tx_p0_rate_init(common);
- ret = am65_cpsw_create_xdp_rxqs(common);
- if (ret) {
- dev_err(common->dev, "Failed to create XDP rx queues\n");
+ ret = am65_cpsw_create_rxqs(common);
+ if (ret)
return ret;
- }
-
- for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
- flow = &rx_chn->flows[flow_idx];
- for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
- page = page_pool_dev_alloc_pages(flow->page_pool);
- if (!page) {
- dev_err(common->dev, "cannot allocate page in flow %d\n",
- flow_idx);
- ret = -ENOMEM;
- goto fail_rx;
- }
-
- ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
- if (ret < 0) {
- dev_err(common->dev,
- "cannot submit page to rx channel flow %d, error %d\n",
- flow_idx, ret);
- am65_cpsw_put_page(flow, page, false);
- goto fail_rx;
- }
- }
- }
- ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
- goto fail_rx;
- }
-
- for (i = 0; i < common->rx_ch_num_flows ; i++) {
- napi_enable(&rx_chn->flows[i].napi_rx);
- if (rx_chn->flows[i].irq_disabled) {
- rx_chn->flows[i].irq_disabled = false;
- enable_irq(rx_chn->flows[i].irq);
- }
- }
-
- for (tx = 0; tx < common->tx_ch_num; tx++) {
- ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
- tx, ret);
- tx--;
- goto fail_tx;
- }
- napi_enable(&tx_chn[tx].napi_tx);
- }
+ ret = am65_cpsw_create_txqs(common);
+ if (ret)
+ goto cleanup_rx;
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
-fail_tx:
- while (tx >= 0) {
- napi_disable(&tx_chn[tx].napi_tx);
- k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
- tx--;
- }
-
- for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
- flow = &rx_chn->flows[flow_idx];
- if (!flow->irq_disabled) {
- disable_irq(flow->irq);
- flow->irq_disabled = true;
- }
- napi_disable(&flow->napi_rx);
- }
-
- k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
-
-fail_rx:
- for (i = 0; i < common->rx_ch_num_flows; i++)
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
-
- am65_cpsw_destroy_xdp_rxqs(common);
+cleanup_rx:
+ am65_cpsw_destroy_rxqs(common);
return ret;
}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
- struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int i;
-
if (common->usage_count != 1)
return 0;
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
- /* shutdown tx channels */
- atomic_set(&common->tdown_cnt, common->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- reinit_completion(&common->tdown_complete);
-
- for (i = 0; i < common->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
-
- i = wait_for_completion_timeout(&common->tdown_complete,
- msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "tx timeout\n");
- for (i = 0; i < common->tx_ch_num; i++) {
- napi_disable(&tx_chn[i].napi_tx);
- hrtimer_cancel(&tx_chn[i].tx_hrtimer);
- }
-
- for (i = 0; i < common->tx_ch_num; i++) {
- k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
- am65_cpsw_nuss_tx_cleanup);
- k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
- }
-
- reinit_completion(&common->tdown_complete);
- k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
-
- if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
- i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "rx teardown timeout\n");
- }
-
- for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
- napi_disable(&rx_chn->flows[i].napi_rx);
- hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
- }
-
- k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
-
+ am65_cpsw_destroy_txqs(common);
+ am65_cpsw_destroy_rxqs(common);
cpsw_ale_stop(common->ale);
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
- am65_cpsw_destroy_xdp_rxqs(common);
-
dev_dbg(common->dev, "cpsw_nuss stopped\n");
return 0;
}
@@ -1014,6 +1050,15 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
common->usage_count++;
+ /* VLAN aware CPSW mode is incompatible with some DSA tagging schemes.
+ * Therefore disable VLAN_AWARE mode if any of the ports is a DSA Port.
+ */
+ if (netdev_uses_dsa(ndev)) {
+ reg = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+ reg &= ~AM65_CPSW_CTL_VLAN_AWARE;
+ writel(reg, common->cpsw_base + AM65_CPSW_REG_CTL);
+ }
+
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
am65_cpsw_port_enable_dscp_map(port);
@@ -1133,9 +1178,11 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct xdp_frame *xdpf;
struct bpf_prog *prog;
struct page *page;
+ int pkt_len;
u32 act;
int err;
+ pkt_len = *len;
prog = READ_ONCE(port->xdp_prog);
if (!prog)
return AM65_CPSW_XDP_PASS;
@@ -1153,8 +1200,10 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
goto drop;
+ }
__netif_tx_lock(netif_txq, cpu);
err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
@@ -1163,14 +1212,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
if (err)
goto drop;
- dev_sw_netstats_tx_add(ndev, 1, *len);
+ dev_sw_netstats_rx_add(ndev, pkt_len);
ret = AM65_CPSW_XDP_CONSUMED;
goto out;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
- dev_sw_netstats_rx_add(ndev, *len);
+ dev_sw_netstats_rx_add(ndev, pkt_len);
ret = AM65_CPSW_XDP_REDIRECT;
goto out;
default:
@@ -1279,16 +1328,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
-
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- skb = am65_cpsw_build_skb(page_addr, ndev,
- AM65_CPSW_MAX_PACKET_SIZE);
- if (unlikely(!skb)) {
- new_page = page;
- goto requeue;
- }
-
if (port->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
@@ -1298,9 +1339,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
- /* Compute additional headroom to be reserved */
- headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
- skb_reserve(skb, headroom);
+ headroom = xdp.data - xdp.data_hard_start;
+ } else {
+ headroom = AM65_CPSW_HEADROOM;
+ }
+
+ skb = am65_cpsw_build_skb(page_addr, ndev,
+ AM65_CPSW_MAX_PACKET_SIZE, headroom);
+ if (unlikely(!skb)) {
+ new_page = page;
+ goto requeue;
}
ndev_priv = netdev_priv(ndev);
@@ -2242,13 +2290,11 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
struct device *dev = common->dev;
int i;
- devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
-
common->tx_ch_rate_msk = 0;
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- if (tx_chn->irq)
+ if (tx_chn->irq > 0)
devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx);
@@ -2260,15 +2306,17 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
int i, ret = 0;
for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ tx_chn = &common->tx_chns[i];
+
+ hrtimer_setup(&tx_chn->tx_hrtimer, &am65_cpsw_nuss_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -2281,7 +2329,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
}
}
+ return 0;
+
err:
+ netif_napi_del(&tx_chn->napi_tx);
+ for (--i; i >= 0; i--) {
+ tx_chn = &common->tx_chns[i];
+ devm_free_irq(dev, tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
+ }
+
return ret;
}
@@ -2362,12 +2419,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
goto err;
}
+ return 0;
+
err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
- return i;
- }
+ am65_cpsw_nuss_free_tx_chns(common);
return ret;
}
@@ -2395,7 +2450,6 @@ static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
rx_chn = &common->rx_chns;
flows = rx_chn->flows;
- devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
for (i = 0; i < common->rx_ch_num_flows; i++) {
if (!(flows[i].irq < 0))
@@ -2494,7 +2548,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
i, &rx_flow_cfg);
if (ret) {
dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
- goto err;
+ goto err_flow;
}
if (!i)
fdqring_id =
@@ -2506,17 +2560,17 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "Failed to get rx dma irq %d\n",
flow->irq);
ret = flow->irq;
- goto err;
+ goto err_flow;
}
snprintf(flow->name,
sizeof(flow->name), "%s-rx%d",
dev_name(dev), i);
+ hrtimer_setup(&flow->rx_hrtimer, &am65_cpsw_nuss_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+
netif_napi_add(common->dma_ndev, &flow->napi_rx,
am65_cpsw_nuss_rx_poll);
- hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
ret = devm_request_irq(dev, flow->irq,
am65_cpsw_nuss_rx_irq,
@@ -2526,20 +2580,28 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
i, flow->irq, ret);
flow->irq = -EINVAL;
- goto err;
+ goto err_request_irq;
}
}
/* setup classifier to route priorities to flows */
cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
-err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
- return i;
+ return 0;
+
+err_request_irq:
+ netif_napi_del(&flow->napi_rx);
+
+err_flow:
+ for (--i; i >= 0; i--) {
+ flow = &rx_chn->flows[i];
+ devm_free_irq(dev, flow->irq, flow);
+ netif_napi_del(&flow->napi_rx);
}
+err:
+ am65_cpsw_nuss_free_rx_chns(common);
+
return ret;
}
@@ -2559,20 +2621,15 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
{
u32 mac_lo, mac_hi, offset;
struct regmap *syscon;
- int ret;
- syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
+ syscon = syscon_regmap_lookup_by_phandle_args(of_node, "ti,syscon-efuse",
+ 1, &offset);
if (IS_ERR(syscon)) {
if (PTR_ERR(syscon) == -ENODEV)
return 0;
return PTR_ERR(syscon);
}
- ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
- &offset);
- if (ret)
- return ret;
-
regmap_read(syscon, offset, &mac_lo);
regmap_read(syscon, offset + 4, &mac_hi);
@@ -3349,7 +3406,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
if (ret)
- return ret;
+ goto err_remove_tx;
/* The DMA Channels are not guaranteed to be in a clean state.
* Reset and disable them to ensure that they are back to the
@@ -3370,7 +3427,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
- return ret;
+ goto err_remove_rx;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
@@ -3401,6 +3458,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
+err_remove_rx:
+ am65_cpsw_nuss_remove_rx_chns(common);
+err_remove_tx:
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
@@ -3420,6 +3481,8 @@ int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
@@ -3678,6 +3741,8 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
+ am65_cpsw_nuss_remove_rx_chns(common);
+ am65_cpsw_nuss_remove_tx_chns(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
am65_cpsw_disable_serdes_phy(common);
@@ -3739,8 +3804,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
if (ret)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
- if (ret)
+ if (ret) {
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
+ }
/* If RX IRQ was disabled before suspend, keep it disabled */
for (i = 0; i < common->rx_ch_num_flows; i++) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4ef8cf6ea135..0cb6fa6e5b7d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -635,6 +635,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -684,7 +686,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -1225,7 +1227,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 52e4e350b734..5cc72a91f220 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -127,15 +127,15 @@ struct cpsw_ale_dev_id {
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
- int idx, idx2;
+ int idx, idx2, index;
u32 hi_val = 0;
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be fetched exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ index = 2 - idx2; /* flip */
+ hi_val = ale_entry[index] << ((idx2 * 32) - start);
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -145,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
- int idx, idx2;
+ int idx, idx2, index;
value &= BITMASK(bits);
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be set exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
- ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ index = 2 - idx2; /* flip */
+ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[index] |= (value >> ((idx2 * 32) - start));
}
start -= idx * 32;
idx = 2 - idx; /* flip */
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 21d55a180ef6..bdc4db0d169c 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -434,18 +434,6 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
int cpsw_nway_reset(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index a98bcc5eb566..cec0a90659d9 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -554,7 +554,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -778,6 +778,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -1209,7 +1211,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 1f448290b9f4..f2fc55d9295d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -497,7 +497,6 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 768578c0d958..d59c1744840a 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -474,26 +474,7 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- int ret = 0;
-
- mutex_lock(&iep->ptp_clk_mutex);
-
- if (iep->pps_enabled) {
- ret = -EBUSY;
- goto exit;
- }
-
- if (iep->perout_enabled == !!on)
- goto exit;
-
- ret = icss_iep_perout_enable_hw(iep, req, on);
- if (!ret)
- iep->perout_enabled = !!on;
-
-exit:
- mutex_unlock(&iep->ptp_clk_mutex);
-
- return ret;
+ return -EOPNOTSUPP;
}
static void icss_iep_cap_cmp_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 74f0f200a89d..6c1b8ff563e0 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -249,9 +249,8 @@ int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
+ hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
ret = request_irq(tx_chn->irq, prueth_tx_irq,
IRQF_TRIGGER_HIGH, tx_chn->name,
tx_chn);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index d76fe6d05e10..4d496c8f479d 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -561,61 +561,134 @@ const struct icss_iep_clockops prueth_iep_clockops = {
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ int port_mask;
+ u8 vlan_id;
- port_mask |= icssg_fdb_lookup(emac, addr, 0);
- icssg_fdb_add_del(emac, addr, 0, port_mask, true);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, true);
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
+
+ port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
return 0;
}
static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
int other_port_mask;
+ int port_mask;
+ u8 vlan_id;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
- other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0);
+ port_mask = BIT(emac->port_id);
+ other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
- icssg_fdb_add_del(emac, addr, 0, port_mask, false);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, false);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
if (other_port_mask) {
- icssg_fdb_add_del(emac, addr, 0, other_port_mask, true);
- icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true);
+ icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, other_port_mask,
+ other_port_mask, true);
}
return 0;
}
-static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
+ const u8 *addr, u8 vid, bool add)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
-
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
+ icssg_fdb_add_del(emac, addr, vid,
ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, true);
+ ICSSG_FDB_ENTRY_BLOCK, add);
+
+ if (add)
+ icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
+ BIT(emac->port_id), add);
+}
+
+static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+ if (!emac)
+ return -EINVAL;
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ true);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
+ }
- icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
- BIT(emac->port_id), true);
return 0;
}
static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+ if (!emac)
+ return -EINVAL;
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ false);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
+ }
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, false);
+ return 0;
+}
+
+static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
+ void *args)
+{
+ struct prueth_emac *emac = args;
+
+ if (!vdev || !vid)
+ return 0;
+
+ netif_addr_lock_bh(vdev);
+ __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
+ vdev->addr_len);
+ netif_addr_unlock_bh(vdev);
+
+ if (emac->prueth->is_hsr_offload_mode)
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_hsr_add_mcast,
+ icssg_prueth_hsr_del_mcast);
+ else
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_add_mcast,
+ icssg_prueth_del_mcast);
return 0;
}
@@ -857,12 +930,22 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
return;
}
- if (emac->prueth->is_hsr_offload_mode)
+ if (emac->prueth->is_hsr_offload_mode) {
__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
icssg_prueth_hsr_del_mcast);
- else
+ if (rtnl_trylock()) {
+ vlan_for_each(emac->prueth->hsr_dev,
+ icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ } else {
__dev_mc_sync(ndev, icssg_prueth_add_mcast,
icssg_prueth_del_mcast);
+ if (rtnl_trylock()) {
+ vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ }
}
/**
@@ -907,19 +990,19 @@ static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
int untag_mask = 0;
- int port_mask;
- if (prueth->is_hsr_offload_mode) {
- port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id);
- untag_mask = 0;
+ if (prueth->is_hsr_offload_mode)
+ port_mask |= BIT(PRUETH_PORT_HOST);
- netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
- vid, port_mask, untag_mask);
+ __hw_addr_init(&emac->vlan_mcast_list[vid]);
+ netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
+ icssg_set_pvid(emac->prueth, vid, emac->port_id);
- icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
- icssg_set_pvid(emac->prueth, vid, emac->port_id);
- }
return 0;
}
@@ -928,18 +1011,16 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
int untag_mask = 0;
- int port_mask;
- if (prueth->is_hsr_offload_mode) {
+ if (prueth->is_hsr_offload_mode)
port_mask = BIT(PRUETH_PORT_HOST);
- untag_mask = 0;
- netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
- vid, port_mask, untag_mask);
+ netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
- icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
- }
return 0;
}
@@ -1088,9 +1169,8 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
- hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- emac->rx_hrtimer.function = &emac_rx_timer_callback;
+ hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
prueth->emac[mac] = emac;
return 0;
@@ -1254,7 +1334,7 @@ static int prueth_netdevice_port_link(struct net_device *ndev,
if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
prueth->br_members & BIT(PRUETH_PORT_MII1)) {
prueth->is_switch_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
emac->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
}
@@ -1312,7 +1392,7 @@ static int prueth_hsr_port_link(struct net_device *ndev)
NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
return -EOPNOTSUPP;
prueth->is_hsr_offload_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
emac0->port_vlan = prueth->default_vlan;
emac1->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
@@ -1598,6 +1678,7 @@ static int prueth_probe(struct platform_device *pdev)
}
spin_lock_init(&prueth->vtbl_lock);
+ spin_lock_init(&prueth->stats_lock);
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 5473315ea204..f41786b05741 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -83,6 +83,12 @@
#define ICSS_CMD_ADD_FILTER 0x7
#define ICSS_CMD_ADD_MAC 0x8
+/* VLAN Filtering Related MACROs */
+#define PRUETH_DFLT_VLAN_HSR 1
+#define PRUETH_DFLT_VLAN_SW 1
+#define PRUETH_DFLT_VLAN_MAC 0
+#define MAX_VLAN_ID 256
+
/* In switch mode there are 3 real ports i.e. 3 mac addrs.
* however Linux sees only the host side port. The other 2 ports
* are the switch ports.
@@ -200,6 +206,8 @@ struct prueth_emac {
/* RX IRQ Coalescing Related */
struct hrtimer rx_hrtimer;
unsigned long rx_pace_timeout_ns;
+
+ struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
};
/**
@@ -297,6 +305,8 @@ struct prueth {
int default_vlan;
/** @vtbl_lock: Lock for vtbl in shared memory */
spinlock_t vtbl_lock;
+ /** @stats_lock: Lock for reading icssg stats */
+ spinlock_t stats_lock;
};
struct emac_tx_ts_response {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 3dc86397c367..64a19ff39562 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -1031,8 +1031,6 @@ static int prueth_probe(struct platform_device *pdev)
(unsigned long)prueth->msmcram.va);
prueth->msmcram.size = msmc_ram_size;
memset_io(prueth->msmcram.va, 0, msmc_ram_size);
- dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
- prueth->msmcram.va, prueth->msmcram.size);
prueth->iep0 = icss_iep_get_idx(np, 0);
if (IS_ERR(prueth->iep0)) {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 8800bd3a8d07..6f0edae38ea2 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -26,6 +26,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
u32 val, reg;
int i;
+ spin_lock(&prueth->stats_lock);
+
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset,
@@ -51,6 +53,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
emac->pa_stats[i] += val;
}
}
+
+ spin_unlock(&prueth->stats_lock);
}
void icssg_stats_work_handler(struct work_struct *work)
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 894911f3d560..e56ebbdd428d 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1568,7 +1568,7 @@ static void init_registers(struct net_device *dev)
if (rp->quirks & rqMgmt)
rhine_init_cam_filter(dev);
- napi_enable(&rp->napi);
+ napi_enable_locked(&rp->napi);
iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
@@ -1696,7 +1696,10 @@ static int rhine_open(struct net_device *dev)
rhine_power_init(dev);
rhine_chip_reset(dev);
rhine_task_enable(rp);
+
+ netdev_lock(dev);
init_registers(dev);
+ netdev_unlock(dev);
netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
__func__, ioread16(ioaddr + ChipCmd),
@@ -1727,6 +1730,8 @@ static void rhine_reset_task(struct work_struct *work)
napi_disable(&rp->napi);
netif_tx_disable(dev);
+
+ netdev_lock(dev);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
@@ -1740,6 +1745,7 @@ static void rhine_reset_task(struct work_struct *work)
init_registers(dev);
spin_unlock_bh(&rp->lock);
+ netdev_unlock(dev);
netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
@@ -2541,9 +2547,12 @@ static int rhine_resume(struct device *device)
alloc_tbufs(dev);
rhine_reset_rbufs(rp);
rhine_task_enable(rp);
+
+ netdev_lock(dev);
spin_lock_bh(&rp->lock);
init_registers(dev);
spin_unlock_bh(&rp->lock);
+ netdev_unlock(dev);
netif_device_attach(dev);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index dd4a07c97eee..5aa93144a4f5 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2320,7 +2320,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
if (ret < 0)
goto out_free_tmp_vptr_1;
- napi_disable(&vptr->napi);
+ netdev_lock(dev);
+ napi_disable_locked(&vptr->napi);
spin_lock_irqsave(&vptr->lock, flags);
@@ -2342,12 +2343,13 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
velocity_give_many_rx_descs(vptr);
- napi_enable(&vptr->napi);
+ napi_enable_locked(&vptr->napi);
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
spin_unlock_irqrestore(&vptr->lock, flags);
+ netdev_unlock(dev);
velocity_free_rings(tmp_vptr);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index d64b8abcf018..a3f4f3e42587 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -120,6 +120,9 @@
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+/* Constant to convert delay counts to microseconds */
+#define XAXIDMA_DELAY_SCALE (125ULL * USEC_PER_SEC)
+
/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_USEC 50
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0f4b02fe6f85..f33178f90c42 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -238,11 +238,8 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
/* 1 Timeout Interval = 125 * (clock period of SG clock) */
result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
- (u64)125000000);
- if (result > 255)
- result = 255;
-
- return result;
+ XAXIDMA_DELAY_SCALE);
+ return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK));
}
/**
@@ -2056,14 +2053,31 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EBUSY;
}
- if (ecoalesce->rx_max_coalesced_frames)
- lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
- if (ecoalesce->rx_coalesce_usecs)
- lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
- if (ecoalesce->tx_max_coalesced_frames)
- lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
- if (ecoalesce->tx_coalesce_usecs)
- lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
+ if (ecoalesce->rx_max_coalesced_frames > 255 ||
+ ecoalesce->tx_max_coalesced_frames > 255) {
+ NL_SET_ERR_MSG(extack, "frames must be less than 256");
+ return -EINVAL;
+ }
+
+ if (!ecoalesce->rx_max_coalesced_frames ||
+ !ecoalesce->tx_max_coalesced_frames) {
+ NL_SET_ERR_MSG(extack, "frames must be non-zero");
+ return -EINVAL;
+ }
+
+ if ((ecoalesce->rx_max_coalesced_frames > 1 &&
+ !ecoalesce->rx_coalesce_usecs) ||
+ (ecoalesce->tx_max_coalesced_frames > 1 &&
+ !ecoalesce->tx_coalesce_usecs)) {
+ NL_SET_ERR_MSG(extack,
+ "usecs must be non-zero when frames is greater than one");
+ return -EINVAL;
+ }
+
+ lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
+ lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+ lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
return 0;
}
@@ -2331,11 +2345,12 @@ static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
}
static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_get_state(pcs_phy, state);
+ phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
}
static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
@@ -2882,6 +2897,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_managed_pm = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index bc658bc60885..dbb3960126ee 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -235,7 +235,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
vni_to_tunnel_id(gnvh->vni),
gnvh->opt_len * 4);
if (!tun_dst) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
/* Update tunnel dst according to Geneve options. */
@@ -322,7 +322,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
len = skb->len;
err = gro_cells_receive(&geneve->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(geneve->dev, len);
+ dev_dstats_rx_add(geneve->dev, len);
return;
drop:
@@ -387,14 +387,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely((!geneve->cfg.inner_proto_inherit &&
inner_proto != htons(ETH_P_TEB)))) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
opts_len = geneveh->opt_len * 4;
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
!net_eq(geneve->net, dev_net(geneve->dev)))) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
@@ -1023,7 +1023,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
dev_kfree_skb(skb);
- DEV_STATS_INC(dev, tx_dropped);
+ dev_dstats_tx_dropped(dev);
return NETDEV_TX_OK;
}
} else {
@@ -1202,7 +1202,7 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
/* MTU range: 68 - (something less than 65535) */
dev->min_mtu = ETH_MIN_MTU;
/* The max_mtu calculation does not take account of GENEVE
@@ -1902,21 +1902,9 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
- struct net_device *dev, *aux;
- /* gather any geneve devices that were moved into this ns */
- for_each_netdev_safe(net, dev, aux)
- if (dev->rtnl_link_ops == &geneve_link_ops)
- unregister_netdevice_queue(dev, head);
-
- /* now gather any other geneve devices that were created in this ns */
- list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
- /* If geneve->dev is in the same netns, it was already added
- * to the list by the previous loop.
- */
- if (!net_eq(dev_net(geneve->dev), net))
- unregister_netdevice_queue(geneve->dev, head);
- }
+ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
+ geneve_dellink(geneve->dev, head);
}
static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 89a996ad8cd0..b7b46c5e6399 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -23,6 +23,8 @@
#include <net/net_namespace.h>
#include <net/protocol.h>
+#include <net/inet_dscp.h>
+#include <net/inet_sock.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/udp.h>
@@ -350,7 +352,7 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
fl4->saddr = saddr;
- fl4->flowi4_tos = ip_sock_rt_tos(sk);
+ fl4->flowi4_tos = inet_dscp_to_dsfield(inet_sk_dscp(inet_sk(sk)));
fl4->flowi4_scope = ip_sock_rt_scope(sk);
fl4->flowi4_proto = sk->sk_protocol;
@@ -1524,8 +1526,8 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
goto out_encap;
}
- gn = net_generic(dev_net(dev), gtp_net_id);
- list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+ gn = net_generic(src_net, gtp_net_id);
+ list_add(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
@@ -1551,7 +1553,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
- list_del_rcu(&gtp->list);
+ list_del(&gtp->list);
unregister_netdevice_queue(dev, head);
}
@@ -2271,16 +2273,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
struct pdp_ctx *pctx;
- struct gtp_net *gn;
-
- gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
rcu_read_lock();
- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ for_each_netdev_rcu(net, dev) {
+ if (dev->rtnl_link_ops != &gtp_link_ops)
+ continue;
+
+ gtp = netdev_priv(dev);
+
if (last_gtp && last_gtp != gtp)
continue;
else
@@ -2475,9 +2480,9 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
list_for_each_entry(net, net_list, exit_list) {
struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
+ struct gtp_dev *gtp, *gtp_next;
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
}
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e690b95b1bbb..234db693cefa 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -464,7 +464,7 @@ struct nvsp_1_message_send_receive_buffer_complete {
* LargeOffset SmallOffset
*/
- struct nvsp_1_receive_buffer_section sections[1];
+ struct nvsp_1_receive_buffer_section sections[];
} __packed;
/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9afb08dbc350..d6f5b9ea3109 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -866,7 +866,8 @@ static void netvsc_send_completion(struct net_device *ndev,
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
- sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
+ struct_size_t(struct nvsp_1_message_send_receive_buffer_complete,
+ sections, 1)) {
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
msglen);
return;
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index f632b0cfd5ae..fd91f8a45bce 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -776,8 +776,8 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp,
state->trx.tx_buf = state->buf;
state->trx.rx_buf = state->buf;
spi_message_add_tail(&state->trx, &state->msg);
- hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- state->timer.function = at86rf230_async_state_timer;
+ hrtimer_setup(&state->timer, at86rf230_async_state_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static irqreturn_t at86rf230_isr(int irq, void *data)
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index c8c23d9be961..41f212209993 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -28,20 +28,18 @@ enum ipa_resource_type {
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
- IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
- IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
- IPA_RSRC_GROUP_DST_UNUSED_1,
+ IPA_RSRC_GROUP_DST_UL_DL = 0,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.7 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
- .max_writes = 8,
- .max_reads = 0, /* no limit (hardware max) */
+ .max_writes = 12,
+ .max_reads = 13,
.max_reads_beats = 120,
},
};
@@ -81,7 +79,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -106,6 +104,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -128,7 +127,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
@@ -197,12 +197,12 @@ static const struct ipa_resource ipa_resource_src[] = {
/* Destination resource configuration data for an SoC having IPA v4.7 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 7, .max = 7,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 2, .max = 2,
},
},
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index fd591ddb3884..ca62188a317a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -416,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
- const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
- struct rtable *rt;
int err, ret = NET_XMIT_DROP;
+ const struct iphdr *ip4h;
+ struct rtable *rt;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
- .daddr = ip4h->daddr,
- .saddr = ip4h->saddr,
};
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ goto err;
+
+ ip4h = ip_hdr(skb);
+ fl4.daddr = ip4h->daddr;
+ fl4.saddr = ip4h->saddr;
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;
@@ -488,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
struct net_device *dev = skb->dev;
int err, ret = NET_XMIT_DROP;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
+ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return ret;
+ }
+
err = ipvlan_route_v6_outbound(dev, skb);
if (unlikely(err)) {
DEV_STATS_INC(dev, tx_errors);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index ee2c3cf4df36..da3a97a65507 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -799,6 +799,12 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlying device to change its type. */
return NOTIFY_BAD;
+
+ case NETDEV_NOTIFY_PEERS:
+ case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ call_netdevice_notifiers(event, ipvlan->dev);
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1993b90b1a5f..f1d68153987e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -244,8 +244,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+ return 0;
+}
+
+static int blackhole_neigh_construct(struct net_device *dev,
+ struct neighbour *n)
+{
+ n->output = blackhole_neigh_output;
+ return 0;
+}
+
static const struct net_device_ops blackhole_netdev_ops = {
.ndo_start_xmit = blackhole_netdev_xmit,
+ .ndo_neigh_construct = blackhole_neigh_construct,
};
/* This is a dst-dummy device used specifically for invalidated
@@ -264,13 +278,12 @@ static int __init blackhole_netdev_init(void)
if (!blackhole_netdev)
return -ENOMEM;
- rtnl_lock();
+ rtnl_net_lock(&init_net);
dev_init_scheduler(blackhole_netdev);
dev_activate(blackhole_netdev);
- rtnl_unlock();
+ rtnl_net_unlock(&init_net);
blackhole_netdev->flags |= IFF_UP | IFF_RUNNING;
- dev_net_set(blackhole_netdev, &init_net);
return 0;
}
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index d2b3f5a59141..d74d47dd6e04 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -177,8 +177,7 @@ static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client)
return mcli;
err:
if (mcli) {
- if (mcli->client)
- i2c_unregister_device(mcli->client);
+ i2c_unregister_device(mcli->client);
kfree(mcli);
}
return ERR_PTR(rc);
@@ -584,6 +583,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
struct mctp_i2c_hdr *hdr;
struct mctp_hdr *mhdr;
u8 lldst, llsrc;
+ int rc;
if (len > MCTP_I2C_MAXMTU)
return -EMSGSIZE;
@@ -594,6 +594,10 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
lldst = *((u8 *)daddr);
llsrc = *((u8 *)saddr);
+ rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr));
+ if (rc)
+ return rc;
+
skb_push(skb, sizeof(struct mctp_i2c_hdr));
skb_reset_mac_header(skb);
hdr = (void *)skb_mac_header(skb);
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index d247fe483c58..c678f79aa356 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -506,6 +506,14 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned int len)
{
struct mctp_i3c_internal_hdr *ihdr;
+ int rc;
+
+ if (!daddr || !saddr)
+ return -EINVAL;
+
+ rc = skb_cow_head(skb, sizeof(struct mctp_i3c_internal_hdr));
+ if (rc)
+ return rc;
skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
skb_reset_mac_header(skb);
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 2beb83154d39..cb53dccbde1a 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -17,37 +17,20 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
{
struct cavium_mdiobus *bus;
struct mii_bus *mii_bus;
- struct resource *res_mem;
- resource_size_t mdio_phys;
- resource_size_t regsize;
union cvmx_smix_en smi_en;
- int err = -ENOENT;
+ int err;
mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus));
if (!mii_bus)
return -ENOMEM;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res_mem == NULL) {
- dev_err(&pdev->dev, "found no memory resource\n");
- return -ENXIO;
- }
-
bus = mii_bus->priv;
bus->mii_bus = mii_bus;
- mdio_phys = res_mem->start;
- regsize = resource_size(res_mem);
- if (!devm_request_mem_region(&pdev->dev, mdio_phys, regsize,
- res_mem->name)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- return -ENXIO;
- }
-
- bus->register_base = devm_ioremap(&pdev->dev, mdio_phys, regsize);
- if (!bus->register_base) {
+ bus->register_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bus->register_base)) {
dev_err(&pdev->dev, "dev_ioremap failed\n");
- return -ENOMEM;
+ return PTR_ERR(bus->register_base);
}
smi_en.u64 = 0;
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 22680f47385d..37bc3131d31a 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -213,6 +213,9 @@ void mii_ethtool_get_link_ksettings(struct mii_if_info *mii,
lp_advertising = 0;
}
+ if (!(bmsr & BMSR_LSTATUS))
+ cmd->base.speed = SPEED_UNKNOWN;
+
mii->full_duplex = cmd->base.duplex;
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4ea44a2f48f7..86ab4a42769a 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -36,6 +36,7 @@
#include <linux/inet.h>
#include <linux/configfs.h>
#include <linux/etherdevice.h>
+#include <linux/u64_stats_sync.h>
#include <linux/utsname.h>
#include <linux/rtnetlink.h>
@@ -90,6 +91,12 @@ static DEFINE_MUTEX(target_cleanup_list_lock);
*/
static struct console netconsole_ext;
+struct netconsole_target_stats {
+ u64_stats_t xmit_drop_count;
+ u64_stats_t enomem_count;
+ struct u64_stats_sync syncp;
+};
+
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
@@ -97,6 +104,7 @@ static struct console netconsole_ext;
* @userdata_group: Links to the userdata configfs hierarchy
* @userdata_complete: Cached, formatted string of append
* @userdata_length: String length of userdata_complete
+ * @stats: Packet send stats for the target. Used for debugging.
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
* We maintain a strict 1:1 correspondence between this and
@@ -124,6 +132,7 @@ struct netconsole_target {
char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
size_t userdata_length;
#endif
+ struct netconsole_target_stats stats;
bool enabled;
bool extended;
bool release;
@@ -262,6 +271,7 @@ static void netconsole_process_cleanups_core(void)
* | remote_ip
* | local_mac
* | remote_mac
+ * | transmit_errors
* | userdata/
* | <key>/
* | value
@@ -371,6 +381,21 @@ static ssize_t remote_mac_show(struct config_item *item, char *buf)
return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac);
}
+static ssize_t transmit_errors_show(struct config_item *item, char *buf)
+{
+ struct netconsole_target *nt = to_target(item);
+ u64 xmit_drop_count, enomem_count;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&nt->stats.syncp);
+ xmit_drop_count = u64_stats_read(&nt->stats.xmit_drop_count);
+ enomem_count = u64_stats_read(&nt->stats.enomem_count);
+ } while (u64_stats_fetch_retry(&nt->stats.syncp, start));
+
+ return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count);
+}
+
/*
* This one is special -- targets created through the configfs interface
* are not enabled (and the corresponding netpoll activated) by default.
@@ -705,7 +730,7 @@ static void update_userdata(struct netconsole_target *nt)
struct userdatum *udm_item;
struct config_item *item;
- if (child_count >= MAX_USERDATA_ITEMS)
+ if (WARN_ON_ONCE(child_count >= MAX_USERDATA_ITEMS))
break;
child_count++;
@@ -842,6 +867,7 @@ CONFIGFS_ATTR(, remote_ip);
CONFIGFS_ATTR_RO(, local_mac);
CONFIGFS_ATTR(, remote_mac);
CONFIGFS_ATTR(, release);
+CONFIGFS_ATTR_RO(, transmit_errors);
static struct configfs_attribute *netconsole_target_attrs[] = {
&attr_enabled,
@@ -854,6 +880,7 @@ static struct configfs_attribute *netconsole_target_attrs[] = {
&attr_remote_ip,
&attr_local_mac,
&attr_remote_mac,
+ &attr_transmit_errors,
NULL,
};
@@ -1058,6 +1085,33 @@ static struct notifier_block netconsole_netdev_notifier = {
.notifier_call = netconsole_netdev_event,
};
+/**
+ * send_udp - Wrapper for netpoll_send_udp that counts errors
+ * @nt: target to send message to
+ * @msg: message to send
+ * @len: length of message
+ *
+ * Calls netpoll_send_udp and classifies the return value. If an error
+ * occurred it increments statistics in nt->stats accordingly.
+ * Only calls netpoll_send_udp if CONFIG_NETCONSOLE_DYNAMIC is disabled.
+ */
+static void send_udp(struct netconsole_target *nt, const char *msg, int len)
+{
+ int result = netpoll_send_udp(&nt->np, msg, len);
+
+ if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) {
+ if (result == NET_XMIT_DROP) {
+ u64_stats_update_begin(&nt->stats.syncp);
+ u64_stats_inc(&nt->stats.xmit_drop_count);
+ u64_stats_update_end(&nt->stats.syncp);
+ } else if (result == -ENOMEM) {
+ u64_stats_update_begin(&nt->stats.syncp);
+ u64_stats_inc(&nt->stats.enomem_count);
+ u64_stats_update_end(&nt->stats.syncp);
+ }
+ }
+}
+
static void send_msg_no_fragmentation(struct netconsole_target *nt,
const char *msg,
int msg_len,
@@ -1085,7 +1139,7 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt,
MAX_PRINT_CHUNK - msg_len,
"%s", userdata);
- netpoll_send_udp(&nt->np, buf, msg_len);
+ send_udp(nt, buf, msg_len);
}
static void append_release(char *buf)
@@ -1178,7 +1232,7 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf,
this_offset += this_chunk;
}
- netpoll_send_udp(&nt->np, buf, this_header + this_offset);
+ send_udp(nt, buf, this_header + this_offset);
offset += this_offset;
}
}
@@ -1288,7 +1342,7 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
tmp = msg;
for (left = len; left;) {
frag = min(left, MAX_PRINT_CHUNK);
- netpoll_send_udp(&nt->np, tmp, frag);
+ send_udp(nt, tmp, frag);
tmp += frag;
left -= frag;
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 5fe1eaef99b5..7ab358616e03 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -2,8 +2,8 @@
// Copyright (c) 2020 Facebook
#include <linux/debugfs.h>
-#include <linux/ethtool.h>
#include <linux/random.h>
+#include <net/netdev_queues.h>
#include "netdevsim.h"
@@ -72,6 +72,10 @@ static void nsim_get_ringparam(struct net_device *dev,
struct netdevsim *ns = netdev_priv(dev);
memcpy(ring, &ns->ethtool.ring, sizeof(ns->ethtool.ring));
+ kernel_ring->hds_thresh_max = NSIM_HDS_THRESHOLD_MAX;
+
+ if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
}
static int nsim_set_ringparam(struct net_device *dev,
@@ -103,10 +107,10 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct netdevsim *ns = netdev_priv(dev);
int err;
- mutex_lock(&dev->lock);
+ netdev_lock(dev);
err = netif_set_real_num_queues(dev, ch->combined_count,
ch->combined_count);
- mutex_unlock(&dev->lock);
+ netdev_unlock(dev);
if (err)
return err;
@@ -161,6 +165,8 @@ static int nsim_get_ts_info(struct net_device *dev,
static const struct ethtool_ops nsim_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_pause_stats = nsim_get_pause_stats,
.get_pauseparam = nsim_get_pauseparam,
.set_pauseparam = nsim_set_pauseparam,
@@ -178,9 +184,11 @@ static const struct ethtool_ops nsim_ethtool_ops = {
static void nsim_ethtool_ring_init(struct netdevsim *ns)
{
+ ns->ethtool.ring.rx_pending = 512;
ns->ethtool.ring.rx_max_pending = 4096;
ns->ethtool.ring.rx_jumbo_max_pending = 4096;
ns->ethtool.ring.rx_mini_max_pending = 4096;
+ ns->ethtool.ring.tx_pending = 512;
ns->ethtool.ring.tx_max_pending = 4096;
}
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
index 0e58aa7f0374..66b3215db3ac 100644
--- a/drivers/net/netdevsim/hwstats.c
+++ b/drivers/net/netdevsim/hwstats.c
@@ -331,7 +331,6 @@ enum nsim_dev_hwstats_do {
};
struct nsim_dev_hwstats_fops {
- const struct file_operations fops;
enum nsim_dev_hwstats_do action;
enum netdev_offload_xstats_type type;
};
@@ -342,13 +341,12 @@ nsim_dev_hwstats_do_write(struct file *file,
size_t count, loff_t *ppos)
{
struct nsim_dev_hwstats *hwstats = file->private_data;
- struct nsim_dev_hwstats_fops *hwsfops;
+ const struct nsim_dev_hwstats_fops *hwsfops;
struct list_head *hwsdev_list;
int ifindex;
int err;
- hwsfops = container_of(debugfs_real_fops(file),
- struct nsim_dev_hwstats_fops, fops);
+ hwsfops = debugfs_get_aux(file);
err = kstrtoint_from_user(data, count, 0, &ifindex);
if (err)
@@ -381,14 +379,13 @@ nsim_dev_hwstats_do_write(struct file *file,
return count;
}
+static struct debugfs_short_fops debugfs_ops = {
+ .write = nsim_dev_hwstats_do_write,
+ .llseek = generic_file_llseek,
+};
+
#define NSIM_DEV_HWSTATS_FOPS(ACTION, TYPE) \
{ \
- .fops = { \
- .open = simple_open, \
- .write = nsim_dev_hwstats_do_write, \
- .llseek = generic_file_llseek, \
- .owner = THIS_MODULE, \
- }, \
.action = ACTION, \
.type = TYPE, \
}
@@ -433,12 +430,12 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
goto err_remove_hwstats_recursive;
}
- debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
- &nsim_dev_hwstats_l3_enable_fops.fops);
- debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
- &nsim_dev_hwstats_l3_disable_fops.fops);
- debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
- &nsim_dev_hwstats_l3_fail_fops.fops);
+ debugfs_create_file_aux("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_enable_fops, &debugfs_ops);
+ debugfs_create_file_aux("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_disable_fops, &debugfs_ops);
+ debugfs_create_file_aux("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_fail_fops, &debugfs_ops);
INIT_DELAYED_WORK(&hwstats->traffic_dw,
&nsim_dev_hwstats_traffic_work);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e068a9761c09..42f247cbdcee 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -15,11 +15,13 @@
#include <linux/debugfs.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
#include <net/netlink.h>
#include <net/net_shaper.h>
@@ -29,6 +31,8 @@
#include "netdevsim.h"
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
+
#define NSIM_RING_SIZE 256
static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
@@ -54,6 +58,7 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *peer_dev;
unsigned int len = skb->len;
struct netdevsim *peer_ns;
+ struct netdev_config *cfg;
struct nsim_rq *rq;
int rxq;
@@ -69,7 +74,14 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq >= peer_dev->num_rx_queues)
rxq = rxq % peer_dev->num_rx_queues;
- rq = &peer_ns->rq[rxq];
+ rq = peer_ns->rq[rxq];
+
+ cfg = peer_dev->cfg;
+ if (skb_is_nonlinear(skb) &&
+ (cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED ||
+ (cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ cfg->hds_thresh > len)))
+ skb_linearize(skb);
skb_tx_timestamp(skb);
if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
@@ -359,25 +371,24 @@ static int nsim_poll(struct napi_struct *napi, int budget)
return done;
}
-static int nsim_create_page_pool(struct nsim_rq *rq)
+static int nsim_create_page_pool(struct page_pool **p, struct napi_struct *napi)
{
- struct page_pool_params p = {
+ struct page_pool_params params = {
.order = 0,
.pool_size = NSIM_RING_SIZE,
.nid = NUMA_NO_NODE,
- .dev = &rq->napi.dev->dev,
- .napi = &rq->napi,
+ .dev = &napi->dev->dev,
+ .napi = napi,
.dma_dir = DMA_BIDIRECTIONAL,
- .netdev = rq->napi.dev,
+ .netdev = napi->dev,
};
+ struct page_pool *pool;
- rq->page_pool = page_pool_create(&p);
- if (IS_ERR(rq->page_pool)) {
- int err = PTR_ERR(rq->page_pool);
+ pool = page_pool_create(&params);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
- rq->page_pool = NULL;
- return err;
- }
+ *p = pool;
return 0;
}
@@ -388,15 +399,15 @@ static int nsim_init_napi(struct netdevsim *ns)
int err, i;
for (i = 0; i < dev->num_rx_queues; i++) {
- rq = &ns->rq[i];
+ rq = ns->rq[i];
- netif_napi_add(dev, &rq->napi, nsim_poll);
+ netif_napi_add_config(dev, &rq->napi, nsim_poll, i);
}
for (i = 0; i < dev->num_rx_queues; i++) {
- rq = &ns->rq[i];
+ rq = ns->rq[i];
- err = nsim_create_page_pool(rq);
+ err = nsim_create_page_pool(&rq->page_pool, &rq->napi);
if (err)
goto err_pp_destroy;
}
@@ -405,12 +416,12 @@ static int nsim_init_napi(struct netdevsim *ns)
err_pp_destroy:
while (i--) {
- page_pool_destroy(ns->rq[i].page_pool);
- ns->rq[i].page_pool = NULL;
+ page_pool_destroy(ns->rq[i]->page_pool);
+ ns->rq[i]->page_pool = NULL;
}
for (i = 0; i < dev->num_rx_queues; i++)
- __netif_napi_del(&ns->rq[i].napi);
+ __netif_napi_del(&ns->rq[i]->napi);
return err;
}
@@ -421,7 +432,7 @@ static void nsim_enable_napi(struct netdevsim *ns)
int i;
for (i = 0; i < dev->num_rx_queues; i++) {
- struct nsim_rq *rq = &ns->rq[i];
+ struct nsim_rq *rq = ns->rq[i];
netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
napi_enable(&rq->napi);
@@ -448,7 +459,7 @@ static void nsim_del_napi(struct netdevsim *ns)
int i;
for (i = 0; i < dev->num_rx_queues; i++) {
- struct nsim_rq *rq = &ns->rq[i];
+ struct nsim_rq *rq = ns->rq[i];
napi_disable(&rq->napi);
__netif_napi_del(&rq->napi);
@@ -456,8 +467,8 @@ static void nsim_del_napi(struct netdevsim *ns)
synchronize_net();
for (i = 0; i < dev->num_rx_queues; i++) {
- page_pool_destroy(ns->rq[i].page_pool);
- ns->rq[i].page_pool = NULL;
+ page_pool_destroy(ns->rq[i]->page_pool);
+ ns->rq[i]->page_pool = NULL;
}
}
@@ -595,6 +606,182 @@ static const struct netdev_stat_ops nsim_stat_ops = {
.get_base_stats = nsim_get_base_stats,
};
+static struct nsim_rq *nsim_queue_alloc(void)
+{
+ struct nsim_rq *rq;
+
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL_ACCOUNT);
+ if (!rq)
+ return NULL;
+
+ skb_queue_head_init(&rq->skb_queue);
+ return rq;
+}
+
+static void nsim_queue_free(struct nsim_rq *rq)
+{
+ skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
+ kfree(rq);
+}
+
+/* Queue reset mode is controlled by ns->rq_reset_mode.
+ * - normal - new NAPI new pool (old NAPI enabled when new added)
+ * - mode 1 - allocate new pool (NAPI is only disabled / enabled)
+ * - mode 2 - new NAPI new pool (old NAPI removed before new added)
+ * - mode 3 - new NAPI new pool (old NAPI disabled when new added)
+ */
+struct nsim_queue_mem {
+ struct nsim_rq *rq;
+ struct page_pool *pp;
+};
+
+static int
+nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ if (ns->rq_reset_mode > 3)
+ return -EINVAL;
+
+ if (ns->rq_reset_mode == 1)
+ return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi);
+
+ qmem->rq = nsim_queue_alloc();
+ if (!qmem->rq)
+ return -ENOMEM;
+
+ err = nsim_create_page_pool(&qmem->rq->page_pool, &qmem->rq->napi);
+ if (err)
+ goto err_free;
+
+ if (!ns->rq_reset_mode)
+ netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+
+ return 0;
+
+err_free:
+ nsim_queue_free(qmem->rq);
+ return err;
+}
+
+static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ page_pool_destroy(qmem->pp);
+ if (qmem->rq) {
+ if (!ns->rq_reset_mode)
+ netif_napi_del(&qmem->rq->napi);
+ page_pool_destroy(qmem->rq->page_pool);
+ nsim_queue_free(qmem->rq);
+ }
+}
+
+static int
+nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->rq_reset_mode == 1) {
+ ns->rq[idx]->page_pool = qmem->pp;
+ napi_enable(&ns->rq[idx]->napi);
+ return 0;
+ }
+
+ /* netif_napi_add()/_del() should normally be called from alloc/free,
+ * here we want to test various call orders.
+ */
+ if (ns->rq_reset_mode == 2) {
+ netif_napi_del(&ns->rq[idx]->napi);
+ netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ } else if (ns->rq_reset_mode == 3) {
+ netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ netif_napi_del(&ns->rq[idx]->napi);
+ }
+
+ ns->rq[idx] = qmem->rq;
+ napi_enable(&ns->rq[idx]->napi);
+
+ return 0;
+}
+
+static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ napi_disable(&ns->rq[idx]->napi);
+
+ if (ns->rq_reset_mode == 1) {
+ qmem->pp = ns->rq[idx]->page_pool;
+ page_pool_disable_direct_recycling(qmem->pp);
+ } else {
+ qmem->rq = ns->rq[idx];
+ }
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops nsim_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct nsim_queue_mem),
+ .ndo_queue_mem_alloc = nsim_queue_mem_alloc,
+ .ndo_queue_mem_free = nsim_queue_mem_free,
+ .ndo_queue_start = nsim_queue_start,
+ .ndo_queue_stop = nsim_queue_stop,
+};
+
+static ssize_t
+nsim_qreset_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ unsigned int queue, mode;
+ char buf[32];
+ ssize_t ret;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+ if (copy_from_user(buf, data, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ ret = sscanf(buf, "%u %u", &queue, &mode);
+ if (ret != 2)
+ return -EINVAL;
+
+ rtnl_lock();
+ if (!netif_running(ns->netdev)) {
+ ret = -ENETDOWN;
+ goto exit_unlock;
+ }
+
+ if (queue >= ns->netdev->real_num_rx_queues) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ ns->rq_reset_mode = mode;
+ ret = netdev_rx_queue_restart(ns->netdev, queue);
+ ns->rq_reset_mode = 0;
+ if (ret)
+ goto exit_unlock;
+
+ ret = count;
+exit_unlock:
+ rtnl_unlock();
+ return ret;
+}
+
+static const struct file_operations nsim_qreset_fops = {
+ .open = simple_open,
+ .write = nsim_qreset_write,
+ .owner = THIS_MODULE,
+};
+
static ssize_t
nsim_pp_hold_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
@@ -628,7 +815,7 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
if (!netif_running(ns->netdev) && val) {
ret = -ENETDOWN;
} else if (val) {
- ns->page = page_pool_dev_alloc_pages(ns->rq[0].page_pool);
+ ns->page = page_pool_dev_alloc_pages(ns->rq[0]->page_pool);
if (!ns->page)
ret = -ENOMEM;
} else {
@@ -677,27 +864,35 @@ static int nsim_queue_init(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
int i;
- ns->rq = kvcalloc(dev->num_rx_queues, sizeof(*ns->rq),
- GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ ns->rq = kcalloc(dev->num_rx_queues, sizeof(*ns->rq),
+ GFP_KERNEL_ACCOUNT);
if (!ns->rq)
return -ENOMEM;
- for (i = 0; i < dev->num_rx_queues; i++)
- skb_queue_head_init(&ns->rq[i].skb_queue);
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ ns->rq[i] = nsim_queue_alloc();
+ if (!ns->rq[i])
+ goto err_free_prev;
+ }
return 0;
+
+err_free_prev:
+ while (i--)
+ kfree(ns->rq[i]);
+ kfree(ns->rq);
+ return -ENOMEM;
}
-static void nsim_queue_free(struct netdevsim *ns)
+static void nsim_queue_uninit(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
int i;
for (i = 0; i < dev->num_rx_queues; i++)
- skb_queue_purge_reason(&ns->rq[i].skb_queue,
- SKB_DROP_REASON_QUEUE_PURGE);
+ nsim_queue_free(ns->rq[i]);
- kvfree(ns->rq);
+ kfree(ns->rq);
ns->rq = NULL;
}
@@ -713,6 +908,7 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
ns->phc = phc;
ns->netdev->netdev_ops = &nsim_netdev_ops;
ns->netdev->stat_ops = &nsim_stat_ops;
+ ns->netdev->queue_mgmt_ops = &nsim_queue_mgmt_ops;
err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
if (err)
@@ -741,7 +937,7 @@ err_ipsec_teardown:
nsim_macsec_teardown(ns);
nsim_bpf_uninit(ns);
err_rq_destroy:
- nsim_queue_free(ns);
+ nsim_queue_uninit(ns);
err_utn_destroy:
rtnl_unlock();
nsim_udp_tunnels_info_destroy(ns->netdev);
@@ -798,6 +994,9 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->pp_dfs = debugfs_create_file("pp_hold", 0600, nsim_dev_port->ddir,
ns, &nsim_pp_hold_fops);
+ ns->qr_dfs = debugfs_create_file("queue_reset", 0200,
+ nsim_dev_port->ddir, ns,
+ &nsim_qreset_fops);
return ns;
@@ -811,6 +1010,7 @@ void nsim_destroy(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
struct netdevsim *peer;
+ debugfs_remove(ns->qr_dfs);
debugfs_remove(ns->pp_dfs);
rtnl_lock();
@@ -823,7 +1023,7 @@ void nsim_destroy(struct netdevsim *ns)
nsim_macsec_teardown(ns);
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
- nsim_queue_free(ns);
+ nsim_queue_uninit(ns);
}
rtnl_unlock();
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index bf02efa10956..96d54c08043d 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -16,6 +16,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
@@ -36,6 +37,8 @@
#define NSIM_IPSEC_VALID BIT(31)
#define NSIM_UDP_TUNNEL_N_PORTS 4
+#define NSIM_HDS_THRESHOLD_MAX 1024
+
struct nsim_sa {
struct xfrm_state *xs;
__be32 ipaddr[4];
@@ -101,7 +104,9 @@ struct netdevsim {
struct nsim_dev *nsim_dev;
struct nsim_dev_port *nsim_dev_port;
struct mock_phc *phc;
- struct nsim_rq *rq;
+ struct nsim_rq **rq;
+
+ int rq_reset_mode;
u64 tx_packets;
u64 tx_bytes;
@@ -129,11 +134,13 @@ struct netdevsim {
u32 sleep;
u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS];
+ struct dentry *ddir;
struct debugfs_u32_array dfs_ports[2];
} udp_ports;
struct page *page;
struct dentry *pp_dfs;
+ struct dentry *qr_dfs;
struct nsim_ethtool ethtool;
struct netdevsim __rcu *peer;
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
index 02dc3123eb6c..640b4983a9a0 100644
--- a/drivers/net/netdevsim/udp_tunnels.c
+++ b/drivers/net/netdevsim/udp_tunnels.c
@@ -112,9 +112,11 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
struct net_device *dev = file->private_data;
struct netdevsim *ns = netdev_priv(dev);
- memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
rtnl_lock();
- udp_tunnel_nic_reset_ntf(dev);
+ if (dev->reg_state == NETREG_REGISTERED) {
+ memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
+ udp_tunnel_nic_reset_ntf(dev);
+ }
rtnl_unlock();
return count;
@@ -144,23 +146,23 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
else
ns->udp_ports.ports = nsim_dev->udp_ports.__ports;
- debugfs_create_u32("udp_ports_inject_error", 0600,
- ns->nsim_dev_port->ddir,
+ ns->udp_ports.ddir = debugfs_create_dir("udp_ports",
+ ns->nsim_dev_port->ddir);
+
+ debugfs_create_u32("inject_error", 0600, ns->udp_ports.ddir,
&ns->udp_ports.inject_error);
ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0];
ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
- debugfs_create_u32_array("udp_ports_table0", 0400,
- ns->nsim_dev_port->ddir,
+ debugfs_create_u32_array("table0", 0400, ns->udp_ports.ddir,
&ns->udp_ports.dfs_ports[0]);
ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1];
ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
- debugfs_create_u32_array("udp_ports_table1", 0400,
- ns->nsim_dev_port->ddir,
+ debugfs_create_u32_array("table1", 0400, ns->udp_ports.ddir,
&ns->udp_ports.dfs_ports[1]);
- debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir,
+ debugfs_create_file("reset", 0200, ns->udp_ports.ddir,
dev, &nsim_udp_tunnels_info_reset_fops);
/* Note: it's not normal to allocate the info struct like this!
@@ -196,6 +198,9 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
void nsim_udp_tunnels_info_destroy(struct net_device *dev)
{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ debugfs_remove_recursive(ns->udp_ports.ddir);
kfree(dev->udp_tunnel_nic_info);
dev->udp_tunnel_nic_info = NULL;
}
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index c1d881dc6409..1e1b00756be7 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -338,6 +338,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
enum netkit_mode mode = NETKIT_L3;
unsigned char ifname_assign_type;
+ u16 headroom = 0, tailroom = 0;
struct ifinfomsg *ifmp = NULL;
struct net_device *peer;
char ifname[IFNAMSIZ];
@@ -371,6 +372,10 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
if (err < 0)
return err;
}
+ if (data[IFLA_NETKIT_HEADROOM])
+ headroom = nla_get_u16(data[IFLA_NETKIT_HEADROOM]);
+ if (data[IFLA_NETKIT_TAILROOM])
+ tailroom = nla_get_u16(data[IFLA_NETKIT_TAILROOM]);
}
if (ifmp && tbp[IFLA_IFNAME]) {
@@ -390,6 +395,14 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
return PTR_ERR(peer);
netif_inherit_tso_max(peer, dev);
+ if (headroom) {
+ peer->needed_headroom = headroom;
+ dev->needed_headroom = headroom;
+ }
+ if (tailroom) {
+ peer->needed_tailroom = tailroom;
+ dev->needed_tailroom = tailroom;
+ }
if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
eth_hw_addr_random(peer);
@@ -401,6 +414,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
nk->policy = policy_peer;
nk->scrub = scrub_peer;
nk->mode = mode;
+ nk->headroom = headroom;
bpf_mprog_bundle_init(&nk->bundle);
err = register_netdevice(peer);
@@ -426,6 +440,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
nk->policy = policy_prim;
nk->scrub = scrub_prim;
nk->mode = mode;
+ nk->headroom = headroom;
bpf_mprog_bundle_init(&nk->bundle);
err = register_netdevice(dev);
@@ -850,7 +865,18 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
struct net_device *peer = rtnl_dereference(nk->peer);
enum netkit_action policy;
struct nlattr *attr;
- int err;
+ int err, i;
+ static const struct {
+ u32 attr;
+ char *name;
+ } fixed_params[] = {
+ { IFLA_NETKIT_MODE, "operating mode" },
+ { IFLA_NETKIT_SCRUB, "scrubbing" },
+ { IFLA_NETKIT_PEER_SCRUB, "peer scrubbing" },
+ { IFLA_NETKIT_PEER_INFO, "peer info" },
+ { IFLA_NETKIT_HEADROOM, "headroom" },
+ { IFLA_NETKIT_TAILROOM, "tailroom" },
+ };
if (!nk->primary) {
NL_SET_ERR_MSG(extack,
@@ -858,28 +884,14 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
return -EACCES;
}
- if (data[IFLA_NETKIT_MODE]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_MODE],
- "netkit link operating mode cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_SCRUB]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_SCRUB],
- "netkit scrubbing cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_PEER_SCRUB]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_SCRUB],
- "netkit scrubbing cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_PEER_INFO]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
- "netkit peer info cannot be changed after device creation");
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(fixed_params); i++) {
+ attr = data[fixed_params[i].attr];
+ if (attr) {
+ NL_SET_ERR_MSG_ATTR_FMT(extack, attr,
+ "netkit link %s cannot be changed after device creation",
+ fixed_params[i].name);
+ return -EACCES;
+ }
}
if (data[IFLA_NETKIT_POLICY]) {
@@ -914,6 +926,8 @@ static size_t netkit_get_size(const struct net_device *dev)
nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */
nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */
nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */
+ nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_HEADROOM */
+ nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_TAILROOM */
0;
}
@@ -930,6 +944,10 @@ static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub))
return -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_NETKIT_HEADROOM, dev->needed_headroom))
+ return -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_NETKIT_TAILROOM, dev->needed_tailroom))
+ return -EMSGSIZE;
if (peer) {
nk = netkit_priv(peer);
@@ -947,6 +965,8 @@ static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
[IFLA_NETKIT_MODE] = NLA_POLICY_MAX(NLA_U32, NETKIT_L3),
[IFLA_NETKIT_POLICY] = { .type = NLA_U32 },
[IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 },
+ [IFLA_NETKIT_HEADROOM] = { .type = NLA_U16 },
+ [IFLA_NETKIT_TAILROOM] = { .type = NLA_U16 },
[IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
[IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
[IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT,
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index b79aedad855b..e46f588cae7d 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -35,6 +35,27 @@ enum sgmii_speed {
#define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs)
#define lynx_to_phylink_pcs(lynx) (&(lynx)->pcs)
+static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ case PHY_INTERFACE_MODE_USXGMII:
+ return LINK_INBAND_ENABLE;
+
+ default:
+ return 0;
+ }
+}
+
static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
struct phylink_link_state *state)
{
@@ -79,7 +100,7 @@ static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
state->duplex = DUPLEX_FULL;
}
-static void lynx_pcs_get_state(struct phylink_pcs *pcs,
+static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
@@ -88,7 +109,7 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs,
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- phylink_mii_c22_pcs_get_state(lynx->mdio, state);
+ phylink_mii_c22_pcs_get_state(lynx->mdio, neg_mode, state);
break;
case PHY_INTERFACE_MODE_2500BASEX:
lynx_pcs_get_state_2500basex(lynx->mdio, state);
@@ -306,15 +327,26 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
+ .pcs_inband_caps = lynx_pcs_inband_caps,
.pcs_get_state = lynx_pcs_get_state,
.pcs_config = lynx_pcs_config,
.pcs_an_restart = lynx_pcs_an_restart,
.pcs_link_up = lynx_pcs_link_up,
};
+static const phy_interface_t lynx_interfaces[] = {
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_USXGMII,
+};
+
static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
{
struct lynx_pcs *lynx;
+ int i;
lynx = kzalloc(sizeof(*lynx), GFP_KERNEL);
if (!lynx)
@@ -326,6 +358,9 @@ static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
lynx->pcs.neg_mode = true;
lynx->pcs.poll = true;
+ for (i = 0; i < ARRAY_SIZE(lynx_interfaces); i++)
+ __set_bit(lynx_interfaces[i], lynx->pcs.supported_interfaces);
+
return lynx_to_phylink_pcs(lynx);
}
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 4f63abe638c4..7d6261dee534 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -88,7 +88,24 @@ static struct mtk_pcs_lynxi *pcs_to_mtk_pcs_lynxi(struct phylink_pcs *pcs)
return container_of(pcs, struct mtk_pcs_lynxi, pcs);
}
+static unsigned int mtk_pcs_lynxi_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ default:
+ return 0;
+ }
+}
+
static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs);
@@ -98,7 +115,8 @@ static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm);
regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv);
- phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm),
+ phylink_mii_c22_pcs_decode_state(state, neg_mode,
+ FIELD_GET(SGMII_BMSR, bm),
FIELD_GET(SGMII_LPA, adv));
}
@@ -241,6 +259,7 @@ static void mtk_pcs_lynxi_disable(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = {
+ .pcs_inband_caps = mtk_pcs_lynxi_inband_caps,
.pcs_get_state = mtk_pcs_lynxi_get_state,
.pcs_config = mtk_pcs_lynxi_config,
.pcs_an_restart = mtk_pcs_lynxi_restart_an,
@@ -290,6 +309,10 @@ struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
mpcs->pcs.poll = true;
mpcs->interface = PHY_INTERFACE_MODE_NA;
+ __set_bit(PHY_INTERFACE_MODE_SGMII, mpcs->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, mpcs->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, mpcs->pcs.supported_interfaces);
+
return &mpcs->pcs;
}
EXPORT_SYMBOL(mtk_pcs_lynxi_create);
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 7246a910728d..1faa37f0e7b9 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -567,14 +567,40 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
return 0;
}
-void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
+static unsigned int xpcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+ const struct dw_xpcs_compat *compat;
+
+ compat = xpcs_find_compat(xpcs, interface);
+ if (!compat)
+ return 0;
+
+ switch (compat->an_mode) {
+ case DW_AN_C73:
+ return LINK_INBAND_ENABLE;
+
+ case DW_AN_C37_SGMII:
+ case DW_AN_C37_1000BASEX:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case DW_10GBASER:
+ case DW_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ default:
+ return 0;
+ }
+}
+
+static void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
{
const struct dw_xpcs_compat *compat;
for (compat = xpcs->desc->compat; compat->supported; compat++)
__set_bit(compat->interface, interfaces);
}
-EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
{
@@ -684,7 +710,9 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
if (ret < 0)
return ret;
- mask = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ val = 0;
+ mask = DW_VR_MII_DIG_CTRL1_2G5_EN | DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+
if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
val = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
@@ -1004,6 +1032,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
}
static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
int lpa, bmsr;
@@ -1032,7 +1061,7 @@ static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
}
}
- phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
}
return 0;
@@ -1060,7 +1089,7 @@ static int xpcs_get_state_2500basex(struct dw_xpcs *xpcs,
return 0;
}
-static void xpcs_get_state(struct phylink_pcs *pcs,
+static void xpcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
@@ -1088,7 +1117,7 @@ static void xpcs_get_state(struct phylink_pcs *pcs,
"xpcs_get_state_c37_sgmii", ERR_PTR(ret));
break;
case DW_AN_C37_1000BASEX:
- ret = xpcs_get_state_c37_1000basex(xpcs, state);
+ ret = xpcs_get_state_c37_1000basex(xpcs, neg_mode, state);
if (ret)
dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n",
"xpcs_get_state_c37_1000basex", ERR_PTR(ret));
@@ -1306,6 +1335,7 @@ static const struct dw_xpcs_desc xpcs_desc_list[] = {
static const struct phylink_pcs_ops xpcs_phylink_ops = {
.pcs_validate = xpcs_validate,
+ .pcs_inband_caps = xpcs_inband_caps,
.pcs_pre_config = xpcs_pre_config,
.pcs_config = xpcs_config,
.pcs_get_state = xpcs_get_state,
@@ -1418,6 +1448,8 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev)
if (ret)
goto out_clear_clks;
+ xpcs_get_interfaces(xpcs, xpcs->pcs.supported_interfaces);
+
if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
xpcs->pcs.poll = false;
else
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index 69434fd13f96..68d0d9e92a22 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -206,8 +206,8 @@ static int pfcp_newlink(struct net *net, struct net_device *dev,
goto exit_del_pfcp_sock;
}
- pn = net_generic(dev_net(dev), pfcp_net_id);
- list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
+ pn = net_generic(net, pfcp_net_id);
+ list_add(&pfcp->list, &pn->pfcp_dev_list);
netdev_dbg(dev, "registered new PFCP interface\n");
@@ -224,7 +224,7 @@ static void pfcp_dellink(struct net_device *dev, struct list_head *head)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
- list_del_rcu(&pfcp->list);
+ list_del(&pfcp->list);
unregister_netdevice_queue(dev, head);
}
@@ -247,11 +247,16 @@ static int __net_init pfcp_net_init(struct net *net)
static void __net_exit pfcp_net_exit(struct net *net)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
- struct pfcp_dev *pfcp;
+ struct pfcp_dev *pfcp, *pfcp_next;
+ struct net_device *dev;
LIST_HEAD(list);
rtnl_lock();
- list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
+ for_each_netdev(net, dev)
+ if (dev->rtnl_link_ops == &pfcp_link_ops)
+ pfcp_dellink(dev, &list);
+
+ list_for_each_entry_safe(pfcp, pfcp_next, &pn->pfcp_dev_list, list)
pfcp_dellink(pfcp->dev, &list);
unregister_netdevice_many(&list);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 15828f4710a9..41c15a2c2037 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -287,8 +287,15 @@ config MICROCHIP_PHY
config MICROCHIP_T1_PHY
tristate "Microchip T1 PHYs"
+ select MICROCHIP_PHY_RDS_PTP if NETWORK_PHY_TIMESTAMPING
+ depends on PTP_1588_CLOCK_OPTIONAL
help
- Supports the LAN87XX PHYs.
+ Supports the LAN8XXX PHYs.
+
+config MICROCHIP_PHY_RDS_PTP
+ tristate
+ help
+ Currently supports LAN887X T1 PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
@@ -343,10 +350,7 @@ config QSEMI_PHY
help
Currently supports the qs6612
-config REALTEK_PHY
- tristate "Realtek PHYs"
- help
- Supports the Realtek 821x PHY.
+source "drivers/net/phy/realtek/Kconfig"
config RENESAS_PHY
tristate "Renesas PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e6145153e837..c8dac6e92278 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
+obj-$(CONFIG_MICROCHIP_PHY_RDS_PTP) += microchip_rds_ptp.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROCHIP_T1S_PHY) += microchip_t1s.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc/
@@ -94,7 +95,7 @@ obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-y += qcom/
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
-obj-$(CONFIG_REALTEK_PHY) += realtek.o
+obj-$(CONFIG_REALTEK_PHY) += realtek/
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
obj-$(CONFIG_ROCKCHIP_PHY) += rockchip.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index a2a862bae2ed..7fa713ca8d45 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -1038,7 +1038,7 @@ static struct phy_driver adin_driver[] = {
module_phy_driver(adin_driver);
-static struct mdio_device_id __maybe_unused adin_tbl[] = {
+static const struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300) },
{ }
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 85f910e2d4fb..6bb469429b9d 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -340,7 +340,7 @@ static struct phy_driver adin_driver[] = {
module_phy_driver(adin_driver);
-static struct mdio_device_id __maybe_unused adin_tbl[] = {
+static const struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1110) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN2111) },
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
index 8d076b9609fd..e9fd24cb7270 100644
--- a/drivers/net/phy/air_en8811h.c
+++ b/drivers/net/phy/air_en8811h.c
@@ -1075,7 +1075,7 @@ static struct phy_driver en8811h_driver[] = {
module_phy_driver(en8811h_driver);
-static struct mdio_device_id __maybe_unused en8811h_tbl[] = {
+static const struct mdio_device_id __maybe_unused en8811h_tbl[] = {
{ PHY_ID_MATCH_MODEL(EN8811H_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 930b15fa6ce9..75b5fe65500a 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -111,7 +111,7 @@ static struct phy_driver am79c_drivers[] = {
module_phy_driver(am79c_drivers);
-static struct mdio_device_id __maybe_unused amd_tbl[] = {
+static const struct mdio_device_id __maybe_unused amd_tbl[] = {
{ PHY_ID_AC101L, 0xfffffff0 },
{ PHY_ID_AM79C874, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index bb56a66d2a48..e42ace4e682a 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -1200,7 +1200,7 @@ static struct phy_driver aqr_driver[] = {
module_phy_driver(aqr_driver);
-static struct mdio_device_id __maybe_unused aqr_tbl[] = {
+static const struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index eb74a8cf8df1..694df1401aa2 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -121,7 +121,7 @@ static struct phy_driver asix_driver[] = {
module_phy_driver(asix_driver);
-static struct mdio_device_id __maybe_unused asix_tbl[] = {
+static const struct mdio_device_id __maybe_unused asix_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A) },
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C) },
{ PHY_ID_ASIX_AX88796B, 0xfffffff0 },
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index da8f7cb41b44..15cbef8202bc 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -278,7 +278,7 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
}
};
-static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
{ PHY_ID_BCM_CYGNUS, 0xfffffff0, },
{ PHY_ID_BCM_OMEGA, 0xfffffff0, },
{ }
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index 2eea3d09b1e6..7969345f6b35 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -883,7 +883,7 @@ static struct phy_driver bcm54140_drivers[] = {
};
module_phy_driver(bcm54140_drivers);
-static struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
{ PHY_ID_BCM54140, BCM54140_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 0eb33be824f1..b46a736a3130 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -93,7 +93,7 @@ static struct phy_driver bcm63xx_driver[] = {
module_phy_driver(bcm63xx_driver);
-static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
{ 0x00406000, 0xfffffc00 },
{ 0x002bdc00, 0xfffffc00 },
{ }
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 97638ba7ae85..00e8fa14aa77 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -929,7 +929,7 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_16NM_EPHY(PHY_ID_BCM7712, "Broadcom BCM7712"),
};
-static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM72113, 0xfffffff0 },
{ PHY_ID_BCM72116, 0xfffffff0, },
{ PHY_ID_BCM72165, 0xfffffff0, },
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
index 97da3aee4942..d7f7cc44c532 100644
--- a/drivers/net/phy/bcm84881.c
+++ b/drivers/net/phy/bcm84881.c
@@ -235,11 +235,21 @@ static int bcm84881_read_status(struct phy_device *phydev)
return genphy_c45_read_mdix(phydev);
}
+/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
+ * or 802.3z control word, so inband will not work.
+ */
+static unsigned int bcm84881_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE;
+}
+
static struct phy_driver bcm84881_drivers[] = {
{
.phy_id = 0xae025150,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM84881",
+ .inband_caps = bcm84881_inband_caps,
.config_init = bcm84881_config_init,
.probe = bcm84881_probe,
.get_features = bcm84881_get_features,
@@ -252,7 +262,7 @@ static struct phy_driver bcm84881_drivers[] = {
module_phy_driver(bcm84881_drivers);
/* FIXME: module auto-loading for Clause 45 PHYs seems non-functional */
-static struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
{ 0xae025150, 0xfffffff0 },
{ },
};
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index ddded162c44c..22edb7e4c1a1 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -1717,7 +1717,7 @@ static struct phy_driver broadcom_drivers[] = {
module_phy_driver(broadcom_drivers);
-static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+static const struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
{ PHY_ID_BCM5421, 0xfffffff0 },
{ PHY_ID_BCM54210E, 0xfffffff0 },
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index ef5f412e101f..d87cf8b94cf8 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -145,7 +145,7 @@ static struct phy_driver cis820x_driver[] = {
module_phy_driver(cis820x_driver);
-static struct mdio_device_id __maybe_unused cicada_tbl[] = {
+static const struct mdio_device_id __maybe_unused cicada_tbl[] = {
{ 0x000fc410, 0x000ffff0 },
{ 0x000fc440, 0x000fffc0 },
{ }
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 40514a94e6ff..3b65f37f1c57 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -87,7 +87,7 @@ static struct phy_driver cortina_driver[] = {
module_phy_driver(cortina_driver);
-static struct mdio_device_id __maybe_unused cortina_tbl[] = {
+static const struct mdio_device_id __maybe_unused cortina_tbl[] = {
{ PHY_ID_CS4340, 0xffffffff},
{},
};
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 4ac4bce1bf32..fa3692508f16 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -209,7 +209,7 @@ static struct phy_driver dm91xx_driver[] = {
module_phy_driver(dm91xx_driver);
-static struct mdio_device_id __maybe_unused davicom_tbl[] = {
+static const struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
{ 0x0181b8b0, 0x0ffffff0 },
{ 0x0181b8a0, 0x0ffffff0 },
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 075d2beea716..85e231451093 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1548,7 +1548,7 @@ MODULE_LICENSE("GPL");
module_init(dp83640_init);
module_exit(dp83640_exit);
-static struct mdio_device_id __maybe_unused dp83640_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83640_tbl[] = {
{ DP83640_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index cf8b6d0bfaa9..6599feca1967 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -22,8 +22,6 @@
#define DP83826C_PHY_ID 0x2000a130
#define DP83826NC_PHY_ID 0x2000a110
-#define DP83822_DEVADDR 0x1f
-
#define MII_DP83822_CTRL_2 0x0a
#define MII_DP83822_PHYSTS 0x10
#define MII_DP83822_PHYSCR 0x11
@@ -32,6 +30,10 @@
#define MII_DP83822_FCSCR 0x14
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
+#define MII_DP83822_MLEDCR 0x25
+#define MII_DP83822_LEDCFG1 0x460
+#define MII_DP83822_IOCTRL1 0x462
+#define MII_DP83822_IOCTRL2 0x463
#define MII_DP83822_GENCFG 0x465
#define MII_DP83822_SOR1 0x467
@@ -106,6 +108,50 @@
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
+/* MLEDCR bits */
+#define DP83822_MLEDCR_CFG GENMASK(6, 3)
+#define DP83822_MLEDCR_ROUTE GENMASK(1, 0)
+#define DP83822_MLEDCR_ROUTE_LED_0 DP83822_MLEDCR_ROUTE
+
+/* LEDCFG1 bits */
+#define DP83822_LEDCFG1_LED1_CTRL GENMASK(11, 8)
+#define DP83822_LEDCFG1_LED3_CTRL GENMASK(7, 4)
+
+/* IOCTRL1 bits */
+#define DP83822_IOCTRL1_GPIO3_CTRL GENMASK(10, 8)
+#define DP83822_IOCTRL1_GPIO3_CTRL_LED3 BIT(0)
+#define DP83822_IOCTRL1_GPIO1_CTRL GENMASK(2, 0)
+#define DP83822_IOCTRL1_GPIO1_CTRL_LED_1 BIT(0)
+
+/* IOCTRL2 bits */
+#define DP83822_IOCTRL2_GPIO2_CLK_SRC GENMASK(6, 4)
+#define DP83822_IOCTRL2_GPIO2_CTRL GENMASK(2, 0)
+#define DP83822_IOCTRL2_GPIO2_CTRL_CLK_REF GENMASK(1, 0)
+#define DP83822_IOCTRL2_GPIO2_CTRL_MLED BIT(0)
+
+#define DP83822_CLK_SRC_MAC_IF 0x0
+#define DP83822_CLK_SRC_XI 0x1
+#define DP83822_CLK_SRC_INT_REF 0x2
+#define DP83822_CLK_SRC_RMII_MASTER_MODE_REF 0x4
+#define DP83822_CLK_SRC_FREE_RUNNING 0x6
+#define DP83822_CLK_SRC_RECOVERED 0x7
+
+#define DP83822_LED_FN_LINK 0x0 /* Link established */
+#define DP83822_LED_FN_RX_TX 0x1 /* Receive or Transmit activity */
+#define DP83822_LED_FN_TX 0x2 /* Transmit activity */
+#define DP83822_LED_FN_RX 0x3 /* Receive activity */
+#define DP83822_LED_FN_COLLISION 0x4 /* Collision detected */
+#define DP83822_LED_FN_LINK_100_BTX 0x5 /* 100 BTX link established */
+#define DP83822_LED_FN_LINK_10_BT 0x6 /* 10BT link established */
+#define DP83822_LED_FN_FULL_DUPLEX 0x7 /* Full duplex */
+#define DP83822_LED_FN_LINK_RX_TX 0x8 /* Link established, blink for rx or tx activity */
+#define DP83822_LED_FN_ACTIVE_STRETCH 0x9 /* Active Stretch Signal */
+#define DP83822_LED_FN_MII_LINK 0xa /* MII LINK (100BT+FD) */
+#define DP83822_LED_FN_LPI_MODE 0xb /* LPI Mode (EEE) */
+#define DP83822_LED_FN_RX_TX_ERR 0xc /* TX/RX MII Error */
+#define DP83822_LED_FN_LINK_LOST 0xd /* Link Lost */
+#define DP83822_LED_FN_PRBS_ERR 0xe /* Blink for PRBS error */
+
/* SOR1 mode */
#define DP83822_STRAP_MODE1 0
#define DP83822_STRAP_MODE2 BIT(0)
@@ -134,6 +180,13 @@
ADVERTISED_FIBRE | \
ADVERTISED_Pause | ADVERTISED_Asym_Pause)
+#define DP83822_MAX_LED_PINS 4
+
+#define DP83822_LED_INDEX_LED_0 0
+#define DP83822_LED_INDEX_LED_1_GPIO1 1
+#define DP83822_LED_INDEX_COL_GPIO2 2
+#define DP83822_LED_INDEX_RX_D3_GPIO3 3
+
struct dp83822_private {
bool fx_signal_det_low;
int fx_enabled;
@@ -141,6 +194,9 @@ struct dp83822_private {
u8 cfg_dac_minus;
u8 cfg_dac_plus;
struct ethtool_wolinfo wol;
+ bool set_gpio2_clk_out;
+ u32 gpio2_clk_out;
+ bool led_pin_enable[DP83822_MAX_LED_PINS];
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -159,14 +215,14 @@ static int dp83822_config_wol(struct phy_device *phydev,
/* MAC addresses start with byte 5, but stored in mac[0].
* 822 PHYs store bytes 4|5, 2|3, 0|1
*/
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA1,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA1,
(mac[1] << 8) | mac[0]);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA2,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA2,
(mac[3] << 8) | mac[2]);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA3,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA3,
(mac[5] << 8) | mac[4]);
- value = phy_read_mmd(phydev, DP83822_DEVADDR,
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG);
if (wol->wolopts & WAKE_MAGIC)
value |= DP83822_WOL_MAGIC_EN;
@@ -174,13 +230,13 @@ static int dp83822_config_wol(struct phy_device *phydev,
value &= ~DP83822_WOL_MAGIC_EN;
if (wol->wolopts & WAKE_MAGICSECURE) {
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP1,
(wol->sopass[1] << 8) | wol->sopass[0]);
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP2,
(wol->sopass[3] << 8) | wol->sopass[2]);
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP3,
(wol->sopass[5] << 8) | wol->sopass[4]);
value |= DP83822_WOL_SECURE_ON;
@@ -194,10 +250,10 @@ static int dp83822_config_wol(struct phy_device *phydev,
value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
DP83822_WOL_CLR_INDICATION;
- return phy_write_mmd(phydev, DP83822_DEVADDR,
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG, value);
} else {
- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG,
DP83822_WOL_EN |
DP83822_WOL_MAGIC_EN |
@@ -226,23 +282,23 @@ static void dp83822_get_wol(struct phy_device *phydev,
wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE);
wol->wolopts = 0;
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
if (value & DP83822_WOL_MAGIC_EN)
wol->wolopts |= WAKE_MAGIC;
if (value & DP83822_WOL_SECURE_ON) {
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP1);
wol->sopass[0] = (sopass_val & 0xff);
wol->sopass[1] = (sopass_val >> 8);
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP2);
wol->sopass[2] = (sopass_val & 0xff);
wol->sopass[3] = (sopass_val >> 8);
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP3);
wol->sopass[4] = (sopass_val & 0xff);
wol->sopass[5] = (sopass_val >> 8);
@@ -405,6 +461,48 @@ static int dp83822_read_status(struct phy_device *phydev)
return 0;
}
+static int dp83822_config_init_leds(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int ret;
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_0]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_MLEDCR,
+ DP83822_MLEDCR_ROUTE,
+ FIELD_PREP(DP83822_MLEDCR_ROUTE,
+ DP83822_MLEDCR_ROUTE_LED_0));
+ if (ret)
+ return ret;
+ } else if (dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL2,
+ DP83822_IOCTRL2_GPIO2_CTRL,
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CTRL,
+ DP83822_IOCTRL2_GPIO2_CTRL_MLED));
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_1_GPIO1]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL1,
+ DP83822_IOCTRL1_GPIO1_CTRL,
+ FIELD_PREP(DP83822_IOCTRL1_GPIO1_CTRL,
+ DP83822_IOCTRL1_GPIO1_CTRL_LED_1));
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_RX_D3_GPIO3]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL1,
+ DP83822_IOCTRL1_GPIO3_CTRL,
+ FIELD_PREP(DP83822_IOCTRL1_GPIO3_CTRL,
+ DP83822_IOCTRL1_GPIO3_CTRL_LED3));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int dp83822_config_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
@@ -415,6 +513,19 @@ static int dp83822_config_init(struct phy_device *phydev)
int err = 0;
int bmcr;
+ if (dp83822->set_gpio2_clk_out)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL2,
+ DP83822_IOCTRL2_GPIO2_CTRL |
+ DP83822_IOCTRL2_GPIO2_CLK_SRC,
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CTRL,
+ DP83822_IOCTRL2_GPIO2_CTRL_CLK_REF) |
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CLK_SRC,
+ dp83822->gpio2_clk_out));
+
+ err = dp83822_config_init_leds(phydev);
+ if (err)
+ return err;
+
if (phy_interface_is_rgmii(phydev)) {
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
true);
@@ -430,18 +541,18 @@ static int dp83822_config_init(struct phy_device *phydev)
if (tx_int_delay <= 0)
rgmii_delay |= DP83822_TX_CLK_SHIFT;
- err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ err = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
if (err)
return err;
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
if (err)
return err;
} else {
- err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
if (err)
@@ -496,7 +607,7 @@ static int dp83822_config_init(struct phy_device *phydev)
return err;
if (dp83822->fx_signal_det_low) {
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_GENCFG,
DP83822_SIG_DET_LOW);
if (err)
@@ -514,10 +625,10 @@ static int dp8382x_config_rmii_mode(struct phy_device *phydev)
if (!device_property_read_string(dev, "ti,rmii-mode", &of_val)) {
if (strcmp(of_val, "master") == 0) {
- ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_SEL);
} else if (strcmp(of_val, "slave") == 0) {
- ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_SEL);
} else {
phydev_err(phydev, "Invalid value for ti,rmii-mode property (%s)\n",
@@ -539,7 +650,7 @@ static int dp83826_config_init(struct phy_device *phydev)
int ret;
if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
- ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_EN);
if (ret)
return ret;
@@ -548,7 +659,7 @@ static int dp83826_config_init(struct phy_device *phydev)
if (ret)
return ret;
} else {
- ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_EN);
if (ret)
return ret;
@@ -560,7 +671,7 @@ static int dp83826_config_init(struct phy_device *phydev)
FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_5_TO_4,
dp83822->cfg_dac_minus));
mask = DP83826_VOD_CFG1_MINUS_MDIX_MASK | DP83826_VOD_CFG1_MINUS_MDI_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG1, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG1, mask, val);
if (ret)
return ret;
@@ -568,7 +679,7 @@ static int dp83826_config_init(struct phy_device *phydev)
FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_3_TO_0,
dp83822->cfg_dac_minus));
mask = DP83826_VOD_CFG2_MINUS_MDIX_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val);
if (ret)
return ret;
}
@@ -577,7 +688,7 @@ static int dp83826_config_init(struct phy_device *phydev)
val = FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDIX_MASK, dp83822->cfg_dac_plus) |
FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDI_MASK, dp83822->cfg_dac_plus);
mask = DP83826_VOD_CFG2_PLUS_MDIX_MASK | DP83826_VOD_CFG2_PLUS_MDI_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val);
if (ret)
return ret;
}
@@ -609,10 +720,66 @@ static int dp83822_phy_reset(struct phy_device *phydev)
}
#ifdef CONFIG_OF_MDIO
+static int dp83822_of_init_leds(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device_node *leds;
+ u32 index;
+ int err;
+
+ if (!node)
+ return 0;
+
+ leds = of_get_child_by_name(node, "leds");
+ if (!leds)
+ return 0;
+
+ for_each_available_child_of_node_scoped(leds, led) {
+ err = of_property_read_u32(led, "reg", &index);
+ if (err) {
+ of_node_put(leds);
+ return err;
+ }
+
+ if (index <= DP83822_LED_INDEX_RX_D3_GPIO3) {
+ dp83822->led_pin_enable[index] = true;
+ } else {
+ of_node_put(leds);
+ return -EINVAL;
+ }
+ }
+
+ of_node_put(leds);
+ /* LED_0 and COL(GPIO2) use the MLED function. MLED can be routed to
+ * only one of these two pins at a time.
+ */
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_0] &&
+ dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2]) {
+ phydev_err(phydev, "LED_0 and COL(GPIO2) cannot be used as LED output at the same time\n");
+ return -EINVAL;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2] &&
+ dp83822->set_gpio2_clk_out) {
+ phydev_err(phydev, "COL(GPIO2) cannot be used as LED output, already used as clock output\n");
+ return -EINVAL;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_RX_D3_GPIO3] &&
+ phydev->interface != PHY_INTERFACE_MODE_RMII) {
+ phydev_err(phydev, "RX_D3 can only be used as LED output when in RMII mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dp83822_of_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
+ const char *of_val;
/* Signal detection for the PHY is only enabled if the FX_EN and the
* SD_EN pins are strapped. Signal detection can only enabled if FX_EN
@@ -625,7 +792,30 @@ static int dp83822_of_init(struct phy_device *phydev)
dp83822->fx_enabled = device_property_present(dev,
"ti,fiber-mode");
- return 0;
+ if (!device_property_read_string(dev, "ti,gpio2-clk-out", &of_val)) {
+ if (strcmp(of_val, "mac-if") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_MAC_IF;
+ } else if (strcmp(of_val, "xi") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_XI;
+ } else if (strcmp(of_val, "int-ref") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_INT_REF;
+ } else if (strcmp(of_val, "rmii-master-mode-ref") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_RMII_MASTER_MODE_REF;
+ } else if (strcmp(of_val, "free-running") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_FREE_RUNNING;
+ } else if (strcmp(of_val, "recovered") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_RECOVERED;
+ } else {
+ phydev_err(phydev,
+ "Invalid value for ti,gpio2-clk-out property (%s)\n",
+ of_val);
+ return -EINVAL;
+ }
+
+ dp83822->set_gpio2_clk_out = true;
+ }
+
+ return dp83822_of_init_leds(phydev);
}
static int dp83826_to_dac_minus_one_regval(int percent)
@@ -673,7 +863,7 @@ static int dp83822_read_straps(struct phy_device *phydev)
int fx_enabled, fx_sd_enable;
int val;
- val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_SOR1);
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_SOR1);
if (val < 0)
return val;
@@ -723,7 +913,9 @@ static int dp83822_probe(struct phy_device *phydev)
if (ret)
return ret;
- dp83822_of_init(phydev);
+ ret = dp83822_of_init(phydev);
+ if (ret)
+ return ret;
if (dp83822->fx_enabled)
phydev->port = PORT_FIBRE;
@@ -748,7 +940,7 @@ static int dp83822_suspend(struct phy_device *phydev)
{
int value;
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
if (!(value & DP83822_WOL_EN))
genphy_suspend(phydev);
@@ -762,14 +954,138 @@ static int dp83822_resume(struct phy_device *phydev)
genphy_resume(phydev);
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, value |
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG, value |
DP83822_WOL_CLR_INDICATION);
return 0;
}
+static int dp83822_led_mode(u8 index, unsigned long rules)
+{
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return DP83822_LED_FN_LINK;
+ case BIT(TRIGGER_NETDEV_LINK_10):
+ return DP83822_LED_FN_LINK_10_BT;
+ case BIT(TRIGGER_NETDEV_LINK_100):
+ return DP83822_LED_FN_LINK_100_BTX;
+ case BIT(TRIGGER_NETDEV_FULL_DUPLEX):
+ return DP83822_LED_FN_FULL_DUPLEX;
+ case BIT(TRIGGER_NETDEV_TX):
+ return DP83822_LED_FN_TX;
+ case BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_RX;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_RX_TX;
+ case BIT(TRIGGER_NETDEV_TX_ERR) | BIT(TRIGGER_NETDEV_RX_ERR):
+ return DP83822_LED_FN_RX_TX_ERR;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_LINK_RX_TX;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83822_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = dp83822_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ return 0;
+}
+
+static int dp83822_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = dp83822_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ if (index == DP83822_LED_INDEX_LED_0 || index == DP83822_LED_INDEX_COL_GPIO2)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_MLEDCR, DP83822_MLEDCR_CFG,
+ FIELD_PREP(DP83822_MLEDCR_CFG, mode));
+ else if (index == DP83822_LED_INDEX_LED_1_GPIO1)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_LEDCFG1,
+ DP83822_LEDCFG1_LED1_CTRL,
+ FIELD_PREP(DP83822_LEDCFG1_LED1_CTRL,
+ mode));
+ else
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_LEDCFG1,
+ DP83822_LEDCFG1_LED3_CTRL,
+ FIELD_PREP(DP83822_LEDCFG1_LED3_CTRL,
+ mode));
+}
+
+static int dp83822_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index == DP83822_LED_INDEX_LED_0 || index == DP83822_LED_INDEX_COL_GPIO2) {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_MLEDCR);
+ if (val < 0)
+ return val;
+
+ val = FIELD_GET(DP83822_MLEDCR_CFG, val);
+ } else {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LEDCFG1);
+ if (val < 0)
+ return val;
+
+ if (index == DP83822_LED_INDEX_LED_1_GPIO1)
+ val = FIELD_GET(DP83822_LEDCFG1_LED1_CTRL, val);
+ else
+ val = FIELD_GET(DP83822_LEDCFG1_LED3_CTRL, val);
+ }
+
+ switch (val) {
+ case DP83822_LED_FN_LINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ case DP83822_LED_FN_LINK_10_BT:
+ *rules = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case DP83822_LED_FN_LINK_100_BTX:
+ *rules = BIT(TRIGGER_NETDEV_LINK_100);
+ break;
+ case DP83822_LED_FN_FULL_DUPLEX:
+ *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+ break;
+ case DP83822_LED_FN_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case DP83822_LED_FN_RX:
+ *rules = BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83822_LED_FN_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83822_LED_FN_RX_TX_ERR:
+ *rules = BIT(TRIGGER_NETDEV_TX_ERR) | BIT(TRIGGER_NETDEV_RX_ERR);
+ break;
+ case DP83822_LED_FN_LINK_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
#define DP83822_PHY_DRIVER(_id, _name) \
{ \
PHY_ID_MATCH_MODEL(_id), \
@@ -785,6 +1101,9 @@ static int dp83822_resume(struct phy_device *phydev)
.handle_interrupt = dp83822_handle_interrupt, \
.suspend = dp83822_suspend, \
.resume = dp83822_resume, \
+ .led_hw_is_supported = dp83822_led_hw_is_supported, \
+ .led_hw_control_set = dp83822_led_hw_control_set, \
+ .led_hw_control_get = dp83822_led_hw_control_get, \
}
#define DP83825_PHY_DRIVER(_id, _name) \
@@ -830,7 +1149,7 @@ static struct phy_driver dp83822_driver[] = {
};
module_phy_driver(dp83822_driver);
-static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
{ DP83825I_PHY_ID, 0xfffffff0 },
{ DP83826C_PHY_ID, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 351411f0aa6f..d88b1999d596 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -123,7 +123,7 @@ static int dp83848_config_init(struct phy_device *phydev)
return 0;
}
-static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TI_DP83620_PHY_ID, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 4120385c5a79..c1451df430ac 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1210,7 +1210,7 @@ static struct phy_driver dp83867_driver[] = {
};
module_phy_driver(dp83867_driver);
-static struct mdio_device_id __maybe_unused dp83867_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83867_tbl[] = {
{ DP83867_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index b6b38caf9c0e..a62cd838a9ea 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -928,7 +928,7 @@ static struct phy_driver dp83869_driver[] = {
};
module_phy_driver(dp83869_driver);
-static struct mdio_device_id __maybe_unused dp83869_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83869_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83869_PHY_ID) },
{ PHY_ID_MATCH_MODEL(DP83561_PHY_ID) },
{ }
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 7ea32fb77190..e480c2a07450 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -403,7 +403,7 @@ static struct phy_driver dp83811_driver[] = {
};
module_phy_driver(dp83811_driver);
-static struct mdio_device_id __maybe_unused dp83811_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83811_tbl[] = {
{ DP83TC811_PHY_ID, 0xfffffff0 },
{ },
};
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index 92aa3a2b9744..a42af9c168ec 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -34,6 +34,29 @@
#define DP83TD510E_CTRL_HW_RESET BIT(15)
#define DP83TD510E_CTRL_SW_RESET BIT(14)
+/*
+ * DP83TD510E_PKT_STAT_x registers correspond to similarly named registers
+ * in the datasheet (PKT_STAT_1 through PKT_STAT_6). These registers store
+ * 32-bit or 16-bit counters for TX and RX statistics and must be read in
+ * sequence to ensure the counters are cleared correctly.
+ *
+ * - DP83TD510E_PKT_STAT_1: Contains TX packet count bits [15:0].
+ * - DP83TD510E_PKT_STAT_2: Contains TX packet count bits [31:16].
+ * - DP83TD510E_PKT_STAT_3: Contains TX error packet count.
+ * - DP83TD510E_PKT_STAT_4: Contains RX packet count bits [15:0].
+ * - DP83TD510E_PKT_STAT_5: Contains RX packet count bits [31:16].
+ * - DP83TD510E_PKT_STAT_6: Contains RX error packet count.
+ *
+ * Keeping the register names as defined in the datasheet helps maintain
+ * clarity and alignment with the documentation.
+ */
+#define DP83TD510E_PKT_STAT_1 0x12b
+#define DP83TD510E_PKT_STAT_2 0x12c
+#define DP83TD510E_PKT_STAT_3 0x12d
+#define DP83TD510E_PKT_STAT_4 0x12e
+#define DP83TD510E_PKT_STAT_5 0x12f
+#define DP83TD510E_PKT_STAT_6 0x130
+
#define DP83TD510E_AN_STAT_1 0x60c
#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
@@ -58,8 +81,16 @@ static const u16 dp83td510_mse_sqi_map[] = {
0x0000 /* 24dB =< SNR */
};
+struct dp83td510_stats {
+ u64 tx_pkt_cnt;
+ u64 tx_err_pkt_cnt;
+ u64 rx_pkt_cnt;
+ u64 rx_err_pkt_cnt;
+};
+
struct dp83td510_priv {
bool alcd_test_active;
+ struct dp83td510_stats stats;
};
/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY
@@ -177,6 +208,85 @@ struct dp83td510_priv {
#define DP83TD510E_ALCD_COMPLETE BIT(15)
#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0)
+/**
+ * dp83td510_update_stats - Update the PHY statistics for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * The function reads the PHY statistics registers and updates the statistics
+ * structure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_update_stats(struct phy_device *phydev)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+ u32 count;
+ int ret;
+
+ /* The DP83TD510E_PKT_STAT registers are divided into two groups:
+ * - Group 1 (TX stats): DP83TD510E_PKT_STAT_1 to DP83TD510E_PKT_STAT_3
+ * - Group 2 (RX stats): DP83TD510E_PKT_STAT_4 to DP83TD510E_PKT_STAT_6
+ *
+ * Registers in each group are cleared only after reading them in a
+ * plain sequence (e.g., 1, 2, 3 for Group 1 or 4, 5, 6 for Group 2).
+ * Any deviation from the sequence, such as reading 1, 2, 1, 2, 3, will
+ * prevent the group from being cleared. Additionally, the counters
+ * for a group are frozen as soon as the first register in that group
+ * is accessed.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_1);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_2);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.tx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_3);
+ if (ret < 0)
+ return ret;
+ /* tx_err_pkt_cnt */
+ priv->stats.tx_err_pkt_cnt += ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_4);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_5);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.rx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_6);
+ if (ret < 0)
+ return ret;
+ /* rx_err_pkt_cnt */
+ priv->stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static void dp83td510_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+
+ stats->tx_packets = priv->stats.tx_pkt_cnt;
+ stats->tx_errors = priv->stats.tx_err_pkt_cnt;
+ stats->rx_packets = priv->stats.rx_pkt_cnt;
+ stats->rx_errors = priv->stats.rx_err_pkt_cnt;
+}
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -599,13 +709,15 @@ static struct phy_driver dp83td510_driver[] = {
.get_sqi_max = dp83td510_get_sqi_max,
.cable_test_start = dp83td510_cable_test_start,
.cable_test_get_status = dp83td510_cable_test_get_status,
+ .get_phy_stats = dp83td510_get_phy_stats,
+ .update_stats = dp83td510_update_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
module_phy_driver(dp83td510_driver);
-static struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 0ef4d7dba065..050f4537d140 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -51,6 +51,9 @@
/* Register 0x0405: Unknown Register */
#define DP83TG720S_UNKNOWN_0405 0x405
+#define DP83TG720S_LINK_QUAL_3 0x547
+#define DP83TG720S_LINK_LOSS_CNT_MASK GENMASK(15, 10)
+
/* Register 0x0576: TDR Master Link Down Control */
#define DP83TG720S_TDR_MASTER_LINK_DOWN 0x576
@@ -60,6 +63,29 @@
/* In RGMII mode, Enable or disable the internal delay for TXD */
#define DP83TG720S_RGMII_TX_CLK_SEL BIT(0)
+/*
+ * DP83TG720S_PKT_STAT_x registers correspond to similarly named registers
+ * in the datasheet (PKT_STAT_1 through PKT_STAT_6). These registers store
+ * 32-bit or 16-bit counters for TX and RX statistics and must be read in
+ * sequence to ensure the counters are cleared correctly.
+ *
+ * - DP83TG720S_PKT_STAT_1: Contains TX packet count bits [15:0].
+ * - DP83TG720S_PKT_STAT_2: Contains TX packet count bits [31:16].
+ * - DP83TG720S_PKT_STAT_3: Contains TX error packet count.
+ * - DP83TG720S_PKT_STAT_4: Contains RX packet count bits [15:0].
+ * - DP83TG720S_PKT_STAT_5: Contains RX packet count bits [31:16].
+ * - DP83TG720S_PKT_STAT_6: Contains RX error packet count.
+ *
+ * Keeping the register names as defined in the datasheet helps maintain
+ * clarity and alignment with the documentation.
+ */
+#define DP83TG720S_PKT_STAT_1 0x639
+#define DP83TG720S_PKT_STAT_2 0x63a
+#define DP83TG720S_PKT_STAT_3 0x63b
+#define DP83TG720S_PKT_STAT_4 0x63c
+#define DP83TG720S_PKT_STAT_5 0x63d
+#define DP83TG720S_PKT_STAT_6 0x63e
+
/* Register 0x083F: Unknown Register */
#define DP83TG720S_UNKNOWN_083F 0x83f
@@ -69,6 +95,113 @@
#define DP83TG720_SQI_MAX 7
+struct dp83tg720_stats {
+ u64 link_loss_cnt;
+ u64 tx_pkt_cnt;
+ u64 tx_err_pkt_cnt;
+ u64 rx_pkt_cnt;
+ u64 rx_err_pkt_cnt;
+};
+
+struct dp83tg720_priv {
+ struct dp83tg720_stats stats;
+};
+
+/**
+ * dp83tg720_update_stats - Update the PHY statistics for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * The function reads the PHY statistics registers and updates the statistics
+ * structure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83tg720_update_stats(struct phy_device *phydev)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+ u32 count;
+ int ret;
+
+ /* Read the link loss count */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LINK_QUAL_3);
+ if (ret < 0)
+ return ret;
+ /* link_loss_cnt */
+ count = FIELD_GET(DP83TG720S_LINK_LOSS_CNT_MASK, ret);
+ priv->stats.link_loss_cnt += count;
+
+ /* The DP83TG720S_PKT_STAT registers are divided into two groups:
+ * - Group 1 (TX stats): DP83TG720S_PKT_STAT_1 to DP83TG720S_PKT_STAT_3
+ * - Group 2 (RX stats): DP83TG720S_PKT_STAT_4 to DP83TG720S_PKT_STAT_6
+ *
+ * Registers in each group are cleared only after reading them in a
+ * plain sequence (e.g., 1, 2, 3 for Group 1 or 4, 5, 6 for Group 2).
+ * Any deviation from the sequence, such as reading 1, 2, 1, 2, 3, will
+ * prevent the group from being cleared. Additionally, the counters
+ * for a group are frozen as soon as the first register in that group
+ * is accessed.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_1);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_2);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.tx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_3);
+ if (ret < 0)
+ return ret;
+ /* tx_err_pkt_cnt */
+ priv->stats.tx_err_pkt_cnt += ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_4);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_5);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.rx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_6);
+ if (ret < 0)
+ return ret;
+ /* rx_err_pkt_cnt */
+ priv->stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static void dp83tg720_get_link_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+
+ link_stats->link_down_events = priv->stats.link_loss_cnt;
+}
+
+static void dp83tg720_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+
+ stats->tx_packets = priv->stats.tx_pkt_cnt;
+ stats->tx_errors = priv->stats.tx_err_pkt_cnt;
+ stats->rx_packets = priv->stats.rx_pkt_cnt;
+ stats->rx_errors = priv->stats.rx_err_pkt_cnt;
+}
+
/**
* dp83tg720_cable_test_start - Start the cable test for the DP83TG720 PHY.
* @phydev: Pointer to the phy_device structure.
@@ -182,6 +315,11 @@ static int dp83tg720_cable_test_get_status(struct phy_device *phydev,
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat);
+ /* save the current stats before resetting the PHY */
+ ret = dp83tg720_update_stats(phydev);
+ if (ret)
+ return ret;
+
return phy_init_hw(phydev);
}
@@ -217,6 +355,11 @@ static int dp83tg720_read_status(struct phy_device *phydev)
phy_sts = phy_read(phydev, DP83TG720S_MII_REG_10);
phydev->link = !!(phy_sts & DP83TG720S_LINK_STATUS);
if (!phydev->link) {
+ /* save the current stats before resetting the PHY */
+ ret = dp83tg720_update_stats(phydev);
+ if (ret)
+ return ret;
+
/* According to the "DP83TC81x, DP83TG72x Software
* Implementation Guide", the PHY needs to be reset after a
* link loss or if no link is created after at least 100ms.
@@ -341,12 +484,27 @@ static int dp83tg720_config_init(struct phy_device *phydev)
return genphy_c45_pma_baset1_read_master_slave(phydev);
}
+static int dp83tg720_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct dp83tg720_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver dp83tg720_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
.name = "TI DP83TG720S",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = dp83tg720_probe,
.config_aneg = dp83tg720_config_aneg,
.read_status = dp83tg720_read_status,
.get_features = genphy_c45_pma_read_ext_abilities,
@@ -355,13 +513,16 @@ static struct phy_driver dp83tg720_driver[] = {
.get_sqi_max = dp83tg720_get_sqi_max,
.cable_test_start = dp83tg720_cable_test_start,
.cable_test_get_status = dp83tg720_cable_test_get_status,
+ .get_link_stats = dp83tg720_get_link_stats,
+ .get_phy_stats = dp83tg720_get_phy_stats,
+ .update_stats = dp83tg720_update_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
module_phy_driver(dp83tg720_driver);
-static struct mdio_device_id __maybe_unused dp83tg720_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83tg720_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index be1b71d7cab7..6cd8d77586fd 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -94,7 +94,7 @@ static struct phy_driver et1011c_driver[] = { {
module_phy_driver(et1011c_driver);
-static struct mdio_device_id __maybe_unused et1011c_tbl[] = {
+static const struct mdio_device_id __maybe_unused et1011c_tbl[] = {
{ 0x0282f014, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index ee438b71a0b4..bbcc7d2b54cd 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -623,7 +623,7 @@ static struct phy_driver icplus_driver[] = {
module_phy_driver(icplus_driver);
-static struct mdio_device_id __maybe_unused icplus_tbl[] = {
+static const struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ PHY_ID_MATCH_MODEL(IP175C_PHY_ID) },
{ PHY_ID_MATCH_MODEL(IP1001_PHY_ID) },
{ PHY_ID_MATCH_EXACT(IP101A_PHY_ID) },
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index e6ed2413e514..a44771e8acdc 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -691,7 +691,7 @@ static struct phy_driver xway_gphy[] = {
};
module_phy_driver(xway_gphy);
-static struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
+static const struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
{ PHY_ID_PHY11G_1_3, 0xffffffff },
{ PHY_ID_PHY22F_1_3, 0xffffffff },
{ PHY_ID_PHY11G_1_4, 0xffffffff },
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index e3bf827b7959..5251a61c8b0f 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -348,7 +348,7 @@ static struct phy_driver lxt97x_driver[] = {
module_phy_driver(lxt97x_driver);
-static struct mdio_device_id __maybe_unused lxt_tbl[] = {
+static const struct mdio_device_id __maybe_unused lxt_tbl[] = {
{ 0x78100000, 0xfffffff0 },
{ 0x001378e0, 0xfffffff0 },
{ 0x00137a10, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 5107f58338af..a3996471a1c9 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -95,6 +95,10 @@
#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246
+struct mv88q2xxx_priv {
+ bool enable_temp;
+};
+
struct mmd_val {
int devad;
u32 regnum;
@@ -710,17 +714,12 @@ static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
struct device *hwmon;
char *hwmon_name;
- int ret;
-
- /* Enable temperature sense */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
- if (ret < 0)
- return ret;
+ priv->enable_temp = true;
hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
if (IS_ERR(hwmon_name))
return PTR_ERR(hwmon_name);
@@ -743,6 +742,14 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
static int mv88q2xxx_probe(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
return mv88q2xxx_hwmon_probe(phydev);
}
@@ -810,6 +817,18 @@ static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev)
static int mv88q222x_config_init(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv = phydev->priv;
+ int ret;
+
+ /* Enable temperature sense */
+ if (priv->enable_temp) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+ if (ret < 0)
+ return ret;
+ }
+
if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0)
return mv88q222x_revb0_config_init(phydev);
else
@@ -939,7 +958,7 @@ static struct phy_driver mv88q2xxx_driver[] = {
module_phy_driver(mv88q2xxx_driver);
-static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88Q2220, MARVELL_PHY_ID_MASK },
{ /*sentinel*/ }
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index 0b777cdd7078..fad2f54c1eac 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -613,7 +613,7 @@ static struct phy_driver mv2222_drivers[] = {
};
module_phy_driver(mv2222_drivers);
-static struct mdio_device_id __maybe_unused mv2222_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv2222_tbl[] = {
{ MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index cd50cd6a7f75..44e1927de499 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -717,6 +717,48 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
return genphy_check_and_restart_aneg(phydev, changed);
}
+static unsigned int m88e1111_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ /* In 1000base-X and SGMII modes, the inband mode can be changed
+ * through the Fibre page BMCR ANENABLE bit.
+ */
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII)
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE |
+ LINK_INBAND_BYPASS;
+
+ return 0;
+}
+
+static int m88e1111_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ u16 extsr, bmcr;
+ int err;
+
+ if (phydev->interface != PHY_INTERFACE_MODE_1000BASEX &&
+ phydev->interface != PHY_INTERFACE_MODE_SGMII)
+ return -EINVAL;
+
+ if (modes == LINK_INBAND_BYPASS)
+ extsr = MII_M1111_HWCFG_SERIAL_AN_BYPASS;
+ else
+ extsr = 0;
+
+ if (modes == LINK_INBAND_DISABLE)
+ bmcr = 0;
+ else
+ bmcr = BMCR_ANENABLE;
+
+ err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+ MII_M1111_HWCFG_SERIAL_AN_BYPASS, extsr);
+ if (err < 0)
+ return extsr;
+
+ return phy_modify_paged(phydev, MII_MARVELL_FIBER_PAGE, MII_BMCR,
+ BMCR_ANENABLE, bmcr);
+}
+
static int m88e1111_config_aneg(struct phy_device *phydev)
{
int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
@@ -1508,7 +1550,6 @@ static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
{
- struct ethtool_keee eee;
int val, ret;
if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
@@ -1518,8 +1559,7 @@ static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
/* According to the Marvell data sheet EEE must be disabled for
* Fast Link Down detection to work properly
*/
- ret = genphy_c45_ethtool_get_eee(phydev, &eee);
- if (!ret && eee.eee_enabled) {
+ if (phydev->eee_cfg.eee_enabled) {
phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n");
return -EBUSY;
}
@@ -3677,6 +3717,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1112",
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1112_config_init,
.config_aneg = marvell_config_aneg,
.config_intr = marvell_config_intr,
@@ -3698,6 +3740,8 @@ static struct phy_driver marvell_drivers[] = {
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
@@ -3721,6 +3765,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1111 (Finisar)",
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
@@ -4143,7 +4189,7 @@ static struct phy_driver marvell_drivers[] = {
module_phy_driver(marvell_drivers);
-static struct mdio_device_id __maybe_unused marvell_tbl[] = {
+static const struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3082, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 6642eb642d4b..623bdb8466b8 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -1484,7 +1484,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
-static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv3310_tbl[] = {
{ MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index 38dc898eaf7b..bdf99b327029 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -1356,7 +1356,7 @@ static struct phy_driver mtk_socphy_driver[] = {
module_phy_driver(mtk_socphy_driver);
-static struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
+static const struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981) },
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7988) },
{ }
diff --git a/drivers/net/phy/mediatek/mtk-ge.c b/drivers/net/phy/mediatek/mtk-ge.c
index ed2617bc20f4..b517ca8573e7 100644
--- a/drivers/net/phy/mediatek/mtk-ge.c
+++ b/drivers/net/phy/mediatek/mtk-ge.c
@@ -93,7 +93,7 @@ static struct phy_driver mtk_gephy_driver[] = {
module_phy_driver(mtk_gephy_driver);
-static struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = {
+static const struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = {
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7530) },
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7531) },
{ }
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index bb9b33b6bce2..962ebbbc1348 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -221,7 +221,7 @@ static struct phy_driver meson_gxl_phy[] = {
},
};
-static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
+static const struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
{ PHY_ID_MATCH_VENDOR(0x01814400) },
{ PHY_ID_MATCH_VENDOR(0x01803301) },
{ }
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index eeb33eb181ac..9c0b1c229af6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2006,7 +2006,7 @@ static int ksz9477_config_init(struct phy_device *phydev)
* in this switch shall be regarded as broken.
*/
if (phydev->dev_flags & MICREL_NO_EEE)
- linkmode_fill(phydev->eee_broken_modes);
+ phy_disable_eee(phydev);
return kszphy_config_init(phydev);
}
@@ -5689,7 +5689,7 @@ MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused micrel_tbl[] = {
+static const struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
{ PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ9131, MICREL_PHY_ID_MASK },
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 691969a4910f..0e17cc458efd 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -548,7 +548,7 @@ static struct phy_driver microchip_phy_driver[] = {
module_phy_driver(microchip_phy_driver);
-static struct mdio_device_id __maybe_unused microchip_tbl[] = {
+static const struct mdio_device_id __maybe_unused microchip_tbl[] = {
{ 0x0007c132, 0xfffffff2 },
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX) },
{ }
diff --git a/drivers/net/phy/microchip_rds_ptp.c b/drivers/net/phy/microchip_rds_ptp.c
new file mode 100644
index 000000000000..3e6bf10cdeed
--- /dev/null
+++ b/drivers/net/phy/microchip_rds_ptp.c
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024 Microchip Technology
+
+#include "microchip_rds_ptp.h"
+
+static int mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_read_mmd(phydev, PTP_MMD(clock), addr);
+}
+
+static int mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_write_mmd(phydev, PTP_MMD(clock), addr, val);
+}
+
+static int mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 mask, u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_modify_mmd(phydev, PTP_MMD(clock), addr, mask, val);
+}
+
+static int mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_set_bits_mmd(phydev, PTP_MMD(clock), addr, val);
+}
+
+static int mchp_get_pulsewidth(struct phy_device *phydev,
+ struct ptp_perout_request *perout_request,
+ int *pulse_width)
+{
+ struct timespec64 ts_period;
+ s64 ts_on_nsec, period_nsec;
+ struct timespec64 ts_on;
+ static const s64 sup_on_necs[] = {
+ 100, /* 100ns */
+ 500, /* 500ns */
+ 1000, /* 1us */
+ 5000, /* 5us */
+ 10000, /* 10us */
+ 50000, /* 50us */
+ 100000, /* 100us */
+ 500000, /* 500us */
+ 1000000, /* 1ms */
+ 5000000, /* 5ms */
+ 10000000, /* 10ms */
+ 50000000, /* 50ms */
+ 100000000, /* 100ms */
+ 200000000, /* 200ms */
+ };
+
+ ts_period.tv_sec = perout_request->period.sec;
+ ts_period.tv_nsec = perout_request->period.nsec;
+
+ ts_on.tv_sec = perout_request->on.sec;
+ ts_on.tv_nsec = perout_request->on.nsec;
+ ts_on_nsec = timespec64_to_ns(&ts_on);
+ period_nsec = timespec64_to_ns(&ts_period);
+
+ if (period_nsec < 200) {
+ phydev_warn(phydev, "perout period small, minimum is 200ns\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(sup_on_necs); i++) {
+ if (ts_on_nsec <= sup_on_necs[i]) {
+ *pulse_width = i;
+ break;
+ }
+ }
+
+ phydev_info(phydev, "pulse width is %d\n", *pulse_width);
+ return 0;
+}
+
+static int mchp_general_event_config(struct mchp_rds_ptp_clock *clock,
+ int pulse_width)
+{
+ int general_config;
+
+ general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK);
+ if (general_config < 0)
+ return general_config;
+
+ general_config &= ~MCHP_RDS_PTP_GEN_CFG_LTC_EVT_MASK;
+ general_config |= MCHP_RDS_PTP_GEN_CFG_LTC_EVT_SET(pulse_width);
+ general_config &= ~MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD;
+ general_config |= MCHP_RDS_PTP_GEN_CFG_POLARITY;
+
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK, general_config);
+}
+
+static int mchp_set_clock_reload(struct mchp_rds_ptp_clock *clock,
+ s64 period_sec, u32 period_nsec)
+{
+ int rc;
+
+ rc = mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(period_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(period_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(period_nsec));
+ if (rc < 0)
+ return rc;
+
+ return mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(period_nsec) & 0x3fff);
+}
+
+static int mchp_set_clock_target(struct mchp_rds_ptp_clock *clock,
+ s64 start_sec, u32 start_nsec)
+{
+ int rc;
+
+ /* Set the start time */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(start_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(start_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(start_nsec));
+ if (rc < 0)
+ return rc;
+
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(start_nsec) & 0x3fff);
+}
+
+static int mchp_rds_ptp_perout_off(struct mchp_rds_ptp_clock *clock)
+{
+ u16 general_config;
+ int rc;
+
+ /* Set target to too far in the future, effectively disabling it */
+ rc = mchp_set_clock_target(clock, 0xFFFFFFFF, 0);
+ if (rc < 0)
+ return rc;
+
+ general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK);
+ general_config |= MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD;
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK, general_config);
+ if (rc < 0)
+ return rc;
+
+ clock->mchp_rds_ptp_event = -1;
+
+ return 0;
+}
+
+static bool mchp_get_event(struct mchp_rds_ptp_clock *clock, int pin)
+{
+ if (clock->mchp_rds_ptp_event < 0 && pin == clock->event_pin) {
+ clock->mchp_rds_ptp_event = pin;
+ return true;
+ }
+
+ return false;
+}
+
+static int mchp_rds_ptp_perout(struct ptp_clock_info *ptpci,
+ struct ptp_perout_request *perout, int on)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(ptpci,
+ struct mchp_rds_ptp_clock,
+ caps);
+ struct phy_device *phydev = clock->phydev;
+ int ret, event_pin, pulsewidth;
+
+ /* Reject requests with unsupported flags */
+ if (perout->flags & ~PTP_PEROUT_DUTY_CYCLE)
+ return -EOPNOTSUPP;
+
+ event_pin = ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT,
+ perout->index);
+ if (event_pin != clock->event_pin)
+ return -EINVAL;
+
+ if (!on) {
+ ret = mchp_rds_ptp_perout_off(clock);
+ return ret;
+ }
+
+ if (!mchp_get_event(clock, event_pin))
+ return -EINVAL;
+
+ ret = mchp_get_pulsewidth(phydev, perout, &pulsewidth);
+ if (ret < 0)
+ return ret;
+
+ /* Configure to pulse every period */
+ ret = mchp_general_event_config(clock, pulsewidth);
+ if (ret < 0)
+ return ret;
+
+ ret = mchp_set_clock_target(clock, perout->start.sec,
+ perout->start.nsec);
+ if (ret < 0)
+ return ret;
+
+ return mchp_set_clock_reload(clock, perout->period.sec,
+ perout->period.nsec);
+}
+
+static int mchp_rds_ptpci_enable(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *request, int on)
+{
+ switch (request->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return mchp_rds_ptp_perout(ptpci, &request->perout, on);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mchp_rds_ptpci_verify(struct ptp_clock_info *ptpci, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(ptpci,
+ struct mchp_rds_ptp_clock,
+ caps);
+
+ if (!(pin == clock->event_pin && chan == 0))
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock,
+ enum mchp_rds_ptp_fifo_dir dir)
+{
+ int rc;
+
+ if (dir == MCHP_RDS_PTP_EGRESS_FIFO)
+ skb_queue_purge(&clock->tx_queue);
+ else
+ skb_queue_purge(&clock->rx_queue);
+
+ for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) {
+ rc = mchp_rds_phy_read_mmd(clock,
+ dir == MCHP_RDS_PTP_EGRESS_FIFO ?
+ MCHP_RDS_PTP_TX_MSG_HDR2 :
+ MCHP_RDS_PTP_RX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return rc;
+ }
+ return mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
+ MCHP_RDS_PTP_PORT);
+}
+
+static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock,
+ bool enable)
+{
+ /* Enable or disable ptp interrupts */
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_INT_EN,
+ MCHP_RDS_PTP_PORT,
+ enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0);
+}
+
+static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ switch (clock->hwts_tx_type) {
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (ptp_msg_is_sync(skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&clock->tx_queue, skb);
+ break;
+ case HWTSTAMP_TX_OFF:
+ default:
+ kfree_skb(skb);
+ break;
+ }
+}
+
+static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ int type;
+
+ skb_push(skb, ETH_HLEN);
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return false;
+
+ ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
+ skb_pull_inline(skb, ETH_HLEN);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+
+ return true;
+}
+
+static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock,
+ struct mchp_rds_ptp_rx_ts *rx_ts)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool rc = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&clock->rx_queue.lock, flags);
+ skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) {
+ if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
+ continue;
+
+ if (skb_sig != rx_ts->seq_id)
+ continue;
+
+ __skb_unlink(skb, &clock->rx_queue);
+
+ rc = true;
+ break;
+ }
+ spin_unlock_irqrestore(&clock->rx_queue.lock, flags);
+
+ if (rc) {
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+ }
+
+ return rc;
+}
+
+static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock,
+ struct mchp_rds_ptp_rx_ts *rx_ts)
+{
+ unsigned long flags;
+
+ /* If we failed to match the skb add it to the queue for when
+ * the frame will come
+ */
+ if (!mchp_rds_ptp_match_skb(clock, rx_ts)) {
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_add(&rx_ts->list, &clock->rx_ts_list);
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+ } else {
+ kfree(rx_ts);
+ }
+}
+
+static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock *clock,
+ struct sk_buff *skb)
+{
+ struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ unsigned long flags;
+ u16 skb_sig;
+
+ if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
+ return;
+
+ /* Iterate over all RX timestamps and match it with the received skbs */
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
+ /* Check if we found the signature we were looking for. */
+ if (skb_sig != rx_ts->seq_id)
+ continue;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+
+ rx_ts_var = rx_ts;
+
+ break;
+ }
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+
+ if (rx_ts_var) {
+ list_del(&rx_ts_var->list);
+ kfree(rx_ts_var);
+ } else {
+ skb_queue_tail(&clock->rx_queue, skb);
+ }
+}
+
+static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ if (clock->rx_filter == HWTSTAMP_FILTER_NONE ||
+ type == PTP_CLASS_NONE)
+ return false;
+
+ if ((type & clock->version) == 0 || (type & clock->layer) == 0)
+ return false;
+
+ /* Here if match occurs skb is sent to application, If not skb is added
+ * to queue and sending skb to application will get handled when
+ * interrupt occurs i.e., it get handles in interrupt handler. By
+ * any means skb will reach the application so we should not return
+ * false here if skb doesn't matches.
+ */
+ mchp_rds_ptp_match_rx_skb(clock, skb);
+
+ return true;
+}
+
+static int mchp_rds_ptp_hwtstamp(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct mchp_rds_ptp_clock *clock =
+ container_of(mii_ts, struct mchp_rds_ptp_clock,
+ mii_ts);
+ struct mchp_rds_ptp_rx_ts *rx_ts, *tmp;
+ int txcfg = 0, rxcfg = 0;
+ unsigned long flags;
+ int rc;
+
+ clock->hwts_tx_type = config->tx_type;
+ clock->rx_filter = config->rx_filter;
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ clock->layer = 0;
+ clock->version = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ clock->layer = PTP_CLASS_L4;
+ clock->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ clock->layer = PTP_CLASS_L2;
+ clock->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
+ clock->version = PTP_CLASS_V2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Setup parsing of the frames and enable the timestamping for ptp
+ * frames
+ */
+ if (clock->layer & PTP_CLASS_L2) {
+ rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
+ txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
+ }
+ if (clock->layer & PTP_CLASS_L4) {
+ rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
+ MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
+ txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
+ MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
+ }
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, rxcfg);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, txcfg);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_TIMESTAMP_EN,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_TIMESTAMP_EN,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
+ if (rc < 0)
+ return rc;
+
+ if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ /* Enable / disable of the TX timestamp in the SYNC frames */
+ rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
+ else
+ rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
+ (u16)~MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
+
+ if (rc < 0)
+ return rc;
+
+ /* In case of multiple starts and stops, these needs to be cleared */
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+ }
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+
+ rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO);
+ if (rc < 0)
+ return rc;
+
+ /* Now enable the timestamping interrupts */
+ rc = mchp_rds_ptp_config_intr(clock,
+ config->rx_filter != HWTSTAMP_FILTER_NONE);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ info->phc_index = ptp_clock_index(clock->ptp_clock);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static int mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ struct timespec64 ts;
+ bool add = true;
+ int rc = 0;
+ u32 nsec;
+ s32 sec;
+
+ /* The HW allows up to 15 sec to adjust the time, but here we limit to
+ * 10 sec the adjustment. The reason is, in case the adjustment is 14
+ * sec and 999999999 nsec, then we add 8ns to compensate the actual
+ * increment so the value can be bigger than 15 sec. Therefore limit the
+ * possible adjustments so we will not have these corner cases
+ */
+ if (delta > 10000000000LL || delta < -10000000000LL) {
+ /* The timeadjustment is too big, so fall back using set time */
+ u64 now;
+
+ info->gettime64(info, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ info->settime64(info, &ts);
+ return 0;
+ }
+ sec = div_u64_rem(abs(delta), NSEC_PER_SEC, &nsec);
+ if (delta < 0 && nsec != 0) {
+ /* It is not allowed to adjust low the nsec part, therefore
+ * subtract more from second part and add to nanosecond such
+ * that would roll over, so the second part will increase
+ */
+ sec--;
+ nsec = NSEC_PER_SEC - nsec;
+ }
+
+ /* Calculate the adjustments and the direction */
+ if (delta < 0)
+ add = false;
+
+ if (nsec > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nsec += 8;
+
+ if (nsec >= NSEC_PER_SEC) {
+ /* carry into seconds */
+ sec++;
+ nsec -= NSEC_PER_SEC;
+ }
+ }
+
+ mutex_lock(&clock->ptp_lock);
+ if (sec) {
+ sec = abs(sec);
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK, sec);
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK,
+ ((add ?
+ MCHP_RDS_PTP_STEP_ADJ_HI_DIR :
+ 0) | ((sec >> 16) &
+ GENMASK(13, 0))));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC);
+ if (rc < 0)
+ goto out_unlock;
+ }
+
+ if (nsec) {
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK,
+ nsec & GENMASK(15, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK,
+ (nsec >> 16) & GENMASK(13, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC);
+ }
+
+ mutex_unlock(&clock->ptp_lock);
+ info->gettime64(info, &ts);
+ mutex_lock(&clock->ptp_lock);
+
+ /* Target update is required for pulse generation on events that
+ * are enabled
+ */
+ if (clock->mchp_rds_ptp_event >= 0)
+ mchp_set_clock_target(clock,
+ ts.tv_sec + MCHP_RDS_PTP_BUFFER_TIME, 0);
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info *info,
+ long scaled_ppm)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ u16 rate_lo, rate_hi;
+ bool faster = true;
+ u32 rate;
+ int rc;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ faster = false;
+ }
+
+ rate = MCHP_RDS_PTP_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (MCHP_RDS_PTP_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+
+ rate_lo = rate & GENMASK(15, 0);
+ rate_hi = (rate >> 16) & GENMASK(13, 0);
+
+ if (faster)
+ rate_hi |= MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR;
+
+ mutex_lock(&clock->ptp_lock);
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK, rate_hi);
+ if (rc < 0)
+ goto error;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK, rate_lo);
+ if (rc > 0)
+ rc = 0;
+error:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info *info,
+ struct timespec64 *ts)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ time64_t secs;
+ int rc = 0;
+ s64 nsecs;
+
+ mutex_lock(&clock->ptp_lock);
+ /* Set read bit to 1 to save current values of 1588 local time counter
+ * into PTP LTC seconds and nanoseconds registers.
+ */
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_CLOCK_READ);
+ if (rc < 0)
+ goto out_unlock;
+
+ /* Get LTC clock values */
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_HI,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_MID,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs |= rc;
+ secs <<= 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_LO,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_HI,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ nsecs = (rc & GENMASK(13, 0));
+ nsecs <<= 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_LO,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ nsecs |= rc;
+
+ set_normalized_timespec64(ts, secs, nsecs);
+
+ if (rc > 0)
+ rc = 0;
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ int rc;
+
+ mutex_lock(&clock->ptp_lock);
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(ts->tv_sec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_MID,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(ts->tv_sec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_32_bits(ts->tv_sec) & GENMASK(15, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(ts->tv_nsec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(ts->tv_nsec) & GENMASK(13, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ /* Set load bit to 1 to write PTP LTC seconds and nanoseconds
+ * registers to 1588 local time counter.
+ */
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD);
+ if (rc > 0)
+ rc = 0;
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static bool mchp_rds_ptp_get_sig_tx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ int type;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return false;
+
+ ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+
+ return true;
+}
+
+static void mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock *clock,
+ u32 seconds, u32 nsec, u16 seq_id)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool rc = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&clock->tx_queue.lock, flags);
+ skb_queue_walk_safe(&clock->tx_queue, skb, skb_tmp) {
+ if (!mchp_rds_ptp_get_sig_tx(skb, &skb_sig))
+ continue;
+
+ if (skb_sig != seq_id)
+ continue;
+
+ __skb_unlink(skb, &clock->tx_queue);
+ rc = true;
+ break;
+ }
+ spin_unlock_irqrestore(&clock->tx_queue.lock, flags);
+
+ if (rc) {
+ shhwtstamps.hwtstamp = ktime_set(seconds, nsec);
+ skb_complete_tx_timestamp(skb, &shhwtstamps);
+ }
+}
+
+static struct mchp_rds_ptp_rx_ts
+ *mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ struct phy_device *phydev = clock->phydev;
+ struct mchp_rds_ptp_rx_ts *rx_ts = NULL;
+ u32 sec, nsec;
+ int rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ if (!(rc & MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID)) {
+ phydev_err(phydev, "RX Timestamp is not valid!\n");
+ goto error;
+ }
+ nsec = (rc & GENMASK(13, 0)) << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ nsec |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ sec = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ sec |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+
+ rx_ts = kmalloc(sizeof(*rx_ts), GFP_KERNEL);
+ if (!rx_ts)
+ return NULL;
+
+ rx_ts->seconds = sec;
+ rx_ts->nsec = nsec;
+ rx_ts->seq_id = rc;
+
+error:
+ return rx_ts;
+}
+
+static void mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ int caps;
+
+ do {
+ struct mchp_rds_ptp_rx_ts *rx_ts;
+
+ rx_ts = mchp_rds_ptp_get_rx_ts(clock);
+ if (rx_ts)
+ mchp_rds_ptp_match_rx_ts(clock, rx_ts);
+
+ caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
+ MCHP_RDS_PTP_PORT);
+ if (caps < 0)
+ return;
+ } while (MCHP_RDS_PTP_RX_TS_CNT(caps) > 0);
+}
+
+static bool mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock *clock,
+ u32 *sec, u32 *nsec, u16 *seq)
+{
+ int rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ if (!(rc & MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID))
+ return false;
+ *nsec = (rc & GENMASK(13, 0)) << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *nsec = *nsec | rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *sec = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *sec = *sec | rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+
+ *seq = rc;
+
+ return true;
+}
+
+static void mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ int caps;
+
+ do {
+ u32 sec, nsec;
+ u16 seq;
+
+ if (mchp_rds_ptp_get_tx_ts(clock, &sec, &nsec, &seq))
+ mchp_rds_ptp_match_tx_skb(clock, sec, nsec, seq);
+
+ caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
+ MCHP_RDS_PTP_PORT);
+ if (caps < 0)
+ return;
+ } while (MCHP_RDS_PTP_TX_TS_CNT(caps) > 0);
+}
+
+int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool clear)
+{
+ if (clear)
+ return phy_clear_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
+ val);
+ else
+ return phy_set_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
+ val);
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_top_config_intr);
+
+irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock)
+{
+ int irq_sts;
+
+ /* To handle rogue interrupt scenarios */
+ if (!clock)
+ return IRQ_NONE;
+
+ do {
+ irq_sts = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
+ MCHP_RDS_PTP_PORT);
+ if (irq_sts < 0)
+ return IRQ_NONE;
+
+ if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_EN)
+ mchp_rds_ptp_process_rx_ts(clock);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_EN)
+ mchp_rds_ptp_process_tx_ts(clock);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN)
+ mchp_rds_ptp_flush_fifo(clock,
+ MCHP_RDS_PTP_EGRESS_FIFO);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN)
+ mchp_rds_ptp_flush_fifo(clock,
+ MCHP_RDS_PTP_INGRESS_FIFO);
+ } while (irq_sts & (MCHP_RDS_PTP_INT_RX_TS_EN |
+ MCHP_RDS_PTP_INT_TX_TS_EN |
+ MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN |
+ MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN));
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_handle_interrupt);
+
+static int mchp_rds_ptp_init(struct mchp_rds_ptp_clock *clock)
+{
+ int rc;
+
+ /* Disable PTP */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_DIS);
+ if (rc < 0)
+ return rc;
+
+ /* Disable TSU */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ /* Clear PTP interrupt status registers */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_HARD_RESET,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TSU_HARDRESET);
+ if (rc < 0)
+ return rc;
+
+ /* Predictor enable */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LATENCY_CORRECTION_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_LATENCY_SETTING);
+ if (rc < 0)
+ return rc;
+
+ /* Configure PTP operational mode */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_OP_MODE,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_OP_MODE_STANDALONE);
+ if (rc < 0)
+ return rc;
+
+ /* Reference clock configuration */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_REF_CLK_CFG,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_REF_CLK_CFG_SET);
+ if (rc < 0)
+ return rc;
+
+ /* Classifier configurations */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_VERSION,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_MAX_VERSION(0xff) |
+ MCHP_RDS_PTP_MIN_VERSION(0x0));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_VERSION,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_MAX_VERSION(0xff) |
+ MCHP_RDS_PTP_MIN_VERSION(0x0));
+ if (rc < 0)
+ return rc;
+
+ /* Enable TSU */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN);
+ if (rc < 0)
+ return rc;
+
+ /* Enable PTP */
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_EN);
+}
+
+struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
+ u16 clk_base_addr,
+ u16 port_base_addr)
+{
+ struct mchp_rds_ptp_clock *clock;
+ int rc;
+
+ clock = devm_kzalloc(&phydev->mdio.dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->port_base_addr = port_base_addr;
+ clock->clk_base_addr = clk_base_addr;
+ clock->mmd = mmd;
+
+ mutex_init(&clock->ptp_lock);
+ clock->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
+ MCHP_RDS_PTP_N_PIN,
+ sizeof(*clock->pin_config),
+ GFP_KERNEL);
+ if (!clock->pin_config)
+ return ERR_PTR(-ENOMEM);
+
+ for (int i = 0; i < MCHP_RDS_PTP_N_PIN; ++i) {
+ struct ptp_pin_desc *p = &clock->pin_config[i];
+
+ memset(p, 0, sizeof(*p));
+ snprintf(p->name, sizeof(p->name), "pin%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+ /* Register PTP clock */
+ clock->caps.owner = THIS_MODULE;
+ snprintf(clock->caps.name, 30, "%s", phydev->drv->name);
+ clock->caps.max_adj = MCHP_RDS_PTP_MAX_ADJ;
+ clock->caps.n_ext_ts = 0;
+ clock->caps.pps = 0;
+ clock->caps.n_pins = MCHP_RDS_PTP_N_PIN;
+ clock->caps.n_per_out = MCHP_RDS_PTP_N_PEROUT;
+ clock->caps.pin_config = clock->pin_config;
+ clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine;
+ clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime;
+ clock->caps.gettime64 = mchp_rds_ptp_ltc_gettime64;
+ clock->caps.settime64 = mchp_rds_ptp_ltc_settime64;
+ clock->caps.enable = mchp_rds_ptpci_enable;
+ clock->caps.verify = mchp_rds_ptpci_verify;
+ clock->caps.getcrosststamp = NULL;
+ clock->ptp_clock = ptp_clock_register(&clock->caps,
+ &phydev->mdio.dev);
+ if (IS_ERR(clock->ptp_clock))
+ return ERR_PTR(-EINVAL);
+
+ /* Check if PHC support is missing at the configuration level */
+ if (!clock->ptp_clock)
+ return NULL;
+
+ /* Initialize the SW */
+ skb_queue_head_init(&clock->tx_queue);
+ skb_queue_head_init(&clock->rx_queue);
+ INIT_LIST_HEAD(&clock->rx_ts_list);
+ spin_lock_init(&clock->rx_ts_lock);
+
+ clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp;
+ clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp;
+ clock->mii_ts.hwtstamp = mchp_rds_ptp_hwtstamp;
+ clock->mii_ts.ts_info = mchp_rds_ptp_ts_info;
+
+ phydev->mii_ts = &clock->mii_ts;
+
+ clock->mchp_rds_ptp_event = -1;
+
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
+ clock->phydev = phydev;
+
+ rc = mchp_rds_ptp_init(clock);
+ if (rc < 0)
+ return ERR_PTR(rc);
+
+ return clock;
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MICROCHIP PHY RDS PTP driver");
+MODULE_AUTHOR("Divya Koppera");
diff --git a/drivers/net/phy/microchip_rds_ptp.h b/drivers/net/phy/microchip_rds_ptp.h
new file mode 100644
index 000000000000..25af68337b94
--- /dev/null
+++ b/drivers/net/phy/microchip_rds_ptp.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2024 Microchip Technology
+ */
+
+#ifndef _MICROCHIP_RDS_PTP_H
+#define _MICROCHIP_RDS_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_clock.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#define MCHP_RDS_PTP_CMD_CTL 0x0
+#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC BIT(6)
+#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC BIT(5)
+#define MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD BIT(4)
+#define MCHP_RDS_PTP_CMD_CTL_CLOCK_READ BIT(3)
+#define MCHP_RDS_PTP_CMD_CTL_EN BIT(1)
+#define MCHP_RDS_PTP_CMD_CTL_DIS BIT(0)
+
+#define MCHP_RDS_PTP_REF_CLK_CFG 0x2
+#define MCHP_RDS_PTP_REF_CLK_SRC_250MHZ 0x0
+#define MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE BIT(9)
+#define MCHP_RDS_PTP_REF_CLK_PERIOD 4
+#define MCHP_RDS_PTP_REF_CLK_CFG_SET (MCHP_RDS_PTP_REF_CLK_SRC_250MHZ |\
+ MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE |\
+ MCHP_RDS_PTP_REF_CLK_PERIOD)
+
+#define MCHP_RDS_PTP_LTC_SEC_HI 0x5
+#define MCHP_RDS_PTP_LTC_SEC_MID 0x6
+#define MCHP_RDS_PTP_LTC_SEC_LO 0x7
+#define MCHP_RDS_PTP_LTC_NS_HI 0x8
+#define MCHP_RDS_PTP_LTC_NS_LO 0x9
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI 0xc
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR BIT(15)
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_LO 0xd
+#define MCHP_RDS_PTP_STEP_ADJ_HI 0x12
+#define MCHP_RDS_PTP_STEP_ADJ_HI_DIR BIT(15)
+#define MCHP_RDS_PTP_STEP_ADJ_LO 0x13
+#define MCHP_RDS_PTP_LTC_READ_SEC_HI 0x29
+#define MCHP_RDS_PTP_LTC_READ_SEC_MID 0x2a
+#define MCHP_RDS_PTP_LTC_READ_SEC_LO 0x2b
+#define MCHP_RDS_PTP_LTC_READ_NS_HI 0x2c
+#define MCHP_RDS_PTP_LTC_READ_NS_LO 0x2d
+#define MCHP_RDS_PTP_OP_MODE 0x41
+#define MCHP_RDS_PTP_OP_MODE_DIS 0
+#define MCHP_RDS_PTP_OP_MODE_STANDALONE 1
+#define MCHP_RDS_PTP_LATENCY_CORRECTION_CTL 0x44
+#define MCHP_RDS_PTP_PREDICTOR_EN BIT(6)
+#define MCHP_RDS_PTP_TX_PRED_DIS BIT(1)
+#define MCHP_RDS_PTP_RX_PRED_DIS BIT(0)
+#define MCHP_RDS_PTP_LATENCY_SETTING (MCHP_RDS_PTP_PREDICTOR_EN | \
+ MCHP_RDS_PTP_TX_PRED_DIS | \
+ MCHP_RDS_PTP_RX_PRED_DIS)
+
+#define MCHP_RDS_PTP_INT_EN 0x0
+#define MCHP_RDS_PTP_INT_STS 0x01
+#define MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN BIT(3)
+#define MCHP_RDS_PTP_INT_TX_TS_EN BIT(2)
+#define MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN BIT(1)
+#define MCHP_RDS_PTP_INT_RX_TS_EN BIT(0)
+#define MCHP_RDS_PTP_INT_ALL_MSK (MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN | \
+ MCHP_RDS_PTP_INT_TX_TS_EN | \
+ MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN |\
+ MCHP_RDS_PTP_INT_RX_TS_EN)
+
+#define MCHP_RDS_PTP_CAP_INFO 0x2e
+#define MCHP_RDS_PTP_TX_TS_CNT(v) (((v) & GENMASK(11, 8)) >> 8)
+#define MCHP_RDS_PTP_RX_TS_CNT(v) ((v) & GENMASK(3, 0))
+
+#define MCHP_RDS_PTP_RX_PARSE_CONFIG 0x42
+#define MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN 0x44
+#define MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN 0x45
+
+#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG 0x4e
+#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0)
+
+#define MCHP_RDS_PTP_RX_VERSION 0x48
+#define MCHP_RDS_PTP_RX_TIMESTAMP_EN 0x4d
+
+#define MCHP_RDS_PTP_RX_INGRESS_NS_HI 0x54
+#define MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID BIT(15)
+
+#define MCHP_RDS_PTP_RX_INGRESS_NS_LO 0x55
+#define MCHP_RDS_PTP_RX_INGRESS_SEC_HI 0x56
+#define MCHP_RDS_PTP_RX_INGRESS_SEC_LO 0x57
+#define MCHP_RDS_PTP_RX_MSG_HDR2 0x59
+
+#define MCHP_RDS_PTP_TX_PARSE_CONFIG 0x82
+#define MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN BIT(0)
+#define MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN BIT(1)
+#define MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN BIT(2)
+
+#define MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN 0x84
+#define MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN 0x85
+
+#define MCHP_RDS_PTP_TX_VERSION 0x88
+#define MCHP_RDS_PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
+#define MCHP_RDS_PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
+
+#define MCHP_RDS_PTP_TX_TIMESTAMP_EN 0x8d
+#define MCHP_RDS_PTP_TIMESTAMP_EN_SYNC BIT(0)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_DREQ BIT(1)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ BIT(2)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_PDRES BIT(3)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_ALL (MCHP_RDS_PTP_TIMESTAMP_EN_SYNC |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_DREQ |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_PDRES)
+
+#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG 0x8e
+#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0)
+
+#define MCHP_RDS_PTP_TX_MOD 0x8f
+#define MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT BIT(12)
+
+#define MCHP_RDS_PTP_TX_EGRESS_NS_HI 0x94
+#define MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID BIT(15)
+
+#define MCHP_RDS_PTP_TX_EGRESS_NS_LO 0x95
+#define MCHP_RDS_PTP_TX_EGRESS_SEC_HI 0x96
+#define MCHP_RDS_PTP_TX_EGRESS_SEC_LO 0x97
+#define MCHP_RDS_PTP_TX_MSG_HDR2 0x99
+
+#define MCHP_RDS_PTP_TSU_GEN_CONFIG 0xc0
+#define MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN BIT(0)
+
+#define MCHP_RDS_PTP_TSU_HARD_RESET 0xc1
+#define MCHP_RDS_PTP_TSU_HARDRESET BIT(0)
+
+#define MCHP_RDS_PTP_CLK_TRGT_SEC_HI 0x15
+#define MCHP_RDS_PTP_CLK_TRGT_SEC_LO 0x16
+#define MCHP_RDS_PTP_CLK_TRGT_NS_HI 0x17
+#define MCHP_RDS_PTP_CLK_TRGT_NS_LO 0x18
+
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_HI 0x19
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_LO 0x1a
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_HI 0x1b
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_LO 0x1c
+
+#define MCHP_RDS_PTP_GEN_CFG 0x01
+#define MCHP_RDS_PTP_GEN_CFG_LTC_EVT_MASK GENMASK(11, 8)
+
+#define MCHP_RDS_PTP_GEN_CFG_LTC_EVT_SET(value) (((value) & 0xF) << 4)
+#define MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD BIT(0)
+#define MCHP_RDS_PTP_GEN_CFG_POLARITY BIT(1)
+
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 4 clock cycles in 250MHz.
+ * The value is calculated as following: (1/1000000)/((2^-32)/4)
+ */
+#define MCHP_RDS_PTP_1PPM_FORMAT 17179
+#define MCHP_RDS_PTP_FIFO_SIZE 8
+#define MCHP_RDS_PTP_MAX_ADJ 31249999
+
+#define MCHP_RDS_PTP_BUFFER_TIME 2
+#define MCHP_RDS_PTP_N_PIN 4
+#define MCHP_RDS_PTP_N_PEROUT 1
+
+#define BASE_CLK(p) ((p)->clk_base_addr)
+#define BASE_PORT(p) ((p)->port_base_addr)
+#define PTP_MMD(p) ((p)->mmd)
+
+enum mchp_rds_ptp_base {
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_CLOCK
+};
+
+enum mchp_rds_ptp_fifo_dir {
+ MCHP_RDS_PTP_INGRESS_FIFO,
+ MCHP_RDS_PTP_EGRESS_FIFO
+};
+
+struct mchp_rds_ptp_clock {
+ struct mii_timestamper mii_ts;
+ struct phy_device *phydev;
+ struct ptp_clock *ptp_clock;
+
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+ struct list_head rx_ts_list;
+
+ struct ptp_clock_info caps;
+
+ /* Lock for Rx ts fifo */
+ spinlock_t rx_ts_lock;
+ int hwts_tx_type;
+
+ enum hwtstamp_rx_filters rx_filter;
+ int layer;
+ int version;
+ u16 port_base_addr;
+ u16 clk_base_addr;
+
+ /* Lock for phc */
+ struct mutex ptp_lock;
+ u8 mmd;
+ int mchp_rds_ptp_event;
+ int event_pin;
+ struct ptp_pin_desc *pin_config;
+};
+
+struct mchp_rds_ptp_rx_ts {
+ struct list_head list;
+ u32 seconds;
+ u32 nsec;
+ u16 seq_id;
+};
+
+#if IS_ENABLED(CONFIG_MICROCHIP_PHY_RDS_PTP)
+
+struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
+ u16 clk_base, u16 port_base);
+
+int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool enable);
+
+irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock);
+
+#else
+
+static inline struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device
+ *phydev, u8 mmd,
+ u16 clk_base,
+ u16 port_base)
+{
+ return NULL;
+}
+
+static inline int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool enable)
+{
+ return 0;
+}
+
+static inline irqreturn_t mchp_rds_ptp_handle_interrupt(struct
+ mchp_rds_ptp_clock
+ * clock)
+{
+ return IRQ_NONE;
+}
+
+#endif //CONFIG_MICROCHIP_PHY_RDS_PTP
+
+#endif //_MICROCHIP_RDS_PTP_H
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index b17bf6708003..62b36a318100 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -10,11 +10,15 @@
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <linux/bitfield.h>
+#include "microchip_rds_ptp.h"
#define PHY_ID_LAN87XX 0x0007c150
#define PHY_ID_LAN937X 0x0007c180
#define PHY_ID_LAN887X 0x0007c1f0
+#define MCHP_RDS_PTP_LTC_BASE_ADDR 0xe000
+#define MCHP_RDS_PTP_PORT_BASE_ADDR (MCHP_RDS_PTP_LTC_BASE_ADDR + 0x800)
+
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
@@ -229,10 +233,14 @@
#define LAN887X_INT_STS 0xf000
#define LAN887X_INT_MSK 0xf001
+#define LAN887X_INT_MSK_P1588_MOD_INT_MSK BIT(3)
#define LAN887X_INT_MSK_T1_PHY_INT_MSK BIT(2)
#define LAN887X_INT_MSK_LINK_UP_MSK BIT(1)
#define LAN887X_INT_MSK_LINK_DOWN_MSK BIT(0)
+#define LAN887X_MX_CHIP_TOP_REG_CONTROL1 0xF002
+#define LAN887X_MX_CHIP_TOP_REG_CONTROL1_EVT_EN BIT(8)
+
#define LAN887X_MX_CHIP_TOP_LINK_MSK (LAN887X_INT_MSK_LINK_UP_MSK |\
LAN887X_INT_MSK_LINK_DOWN_MSK)
@@ -319,6 +327,8 @@ struct lan887x_regwr_map {
struct lan887x_priv {
u64 stats[ARRAY_SIZE(lan887x_hw_stats)];
+ struct mchp_rds_ptp_clock *clock;
+ bool init_done;
};
static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
@@ -1269,8 +1279,28 @@ static int lan887x_get_features(struct phy_device *phydev)
static int lan887x_phy_init(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
int ret;
+ if (!priv->init_done && phy_interrupt_is_valid(phydev)) {
+ priv->clock = mchp_rds_ptp_probe(phydev, MDIO_MMD_VEND1,
+ MCHP_RDS_PTP_LTC_BASE_ADDR,
+ MCHP_RDS_PTP_PORT_BASE_ADDR);
+ if (IS_ERR(priv->clock))
+ return PTR_ERR(priv->clock);
+
+ /* Enable pin mux for EVT */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_MX_CHIP_TOP_REG_CONTROL1,
+ LAN887X_MX_CHIP_TOP_REG_CONTROL1_EVT_EN,
+ LAN887X_MX_CHIP_TOP_REG_CONTROL1_EVT_EN);
+
+ /* Initialize pin numbers specific to PEROUT */
+ priv->clock->event_pin = 3;
+
+ priv->init_done = true;
+ }
+
/* Clear loopback */
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
LAN887X_MIS_CFG_REG2,
@@ -1470,6 +1500,7 @@ static int lan887x_probe(struct phy_device *phydev)
if (!priv)
return -ENOMEM;
+ priv->init_done = false;
phydev->priv = priv;
return lan887x_phy_setup(phydev);
@@ -1518,6 +1549,7 @@ static void lan887x_get_strings(struct phy_device *phydev, u8 *data)
static int lan887x_config_intr(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -1537,12 +1569,24 @@ static int lan887x_config_intr(struct phy_device *phydev)
rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS);
}
+ if (rc < 0)
+ return rc;
- return rc < 0 ? rc : 0;
+ if (phy_is_default_hwtstamp(phydev)) {
+ return mchp_rds_ptp_top_config_intr(priv->clock,
+ LAN887X_INT_MSK,
+ LAN887X_INT_MSK_P1588_MOD_INT_MSK,
+ (phydev->interrupts ==
+ PHY_INTERRUPT_ENABLED));
+ }
+
+ return 0;
}
static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
+ int rc = IRQ_NONE;
int irq_status;
irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS);
@@ -1553,10 +1597,13 @@ static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev)
if (irq_status & LAN887X_MX_CHIP_TOP_LINK_MSK) {
phy_trigger_machine(phydev);
- return IRQ_HANDLED;
+ rc = IRQ_HANDLED;
}
- return IRQ_NONE;
+ if (irq_status & LAN887X_INT_MSK_P1588_MOD_INT_MSK)
+ rc = mchp_rds_ptp_handle_interrupt(priv->clock);
+
+ return rc;
}
static int lan887x_cd_reset(struct phy_device *phydev,
diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c
index 75d291154b4c..e50a0c102a86 100644
--- a/drivers/net/phy/microchip_t1s.c
+++ b/drivers/net/phy/microchip_t1s.c
@@ -497,7 +497,7 @@ static struct phy_driver microchip_t1s_driver[] = {
module_phy_driver(microchip_t1s_driver);
-static struct mdio_device_id __maybe_unused tbl[] = {
+static const struct mdio_device_id __maybe_unused tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1) },
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC1) },
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC2) },
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index bee381200ab8..19cf12ee8990 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -2699,7 +2699,7 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
-static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
{ PHY_ID_MATCH_VENDOR(PHY_VENDOR_MSCC) },
{ }
};
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index a8ccf257c109..94d9cb727121 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -1274,7 +1274,7 @@ static struct phy_driver gpy_drivers[] = {
};
module_phy_driver(gpy_drivers);
-static struct mdio_device_id __maybe_unused gpy_tbl[] = {
+static const struct mdio_device_id __maybe_unused gpy_tbl[] = {
{PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx)},
{PHY_ID_GPY115B, PHY_ID_GPYx15B_MASK},
{PHY_ID_MATCH_MODEL(PHY_ID_GPY115C)},
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 9ae9cc6b23c2..7f3ff322892e 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -173,7 +173,7 @@ MODULE_DESCRIPTION("NatSemi PHY driver");
MODULE_AUTHOR("Stuart Menefy");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused ns_tbl[] = {
+static const struct mdio_device_id __maybe_unused ns_tbl[] = {
{ DP83865_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/ncn26000.c b/drivers/net/phy/ncn26000.c
index 5680584f659e..cabdd83c614f 100644
--- a/drivers/net/phy/ncn26000.c
+++ b/drivers/net/phy/ncn26000.c
@@ -159,7 +159,7 @@ static struct phy_driver ncn26000_driver[] = {
module_phy_driver(ncn26000_driver);
-static struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
+static const struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_NCN26000) },
{ }
};
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index ade544bc007d..e9fc54517449 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -22,6 +22,11 @@
#define PHY_ID_TJA_1103 0x001BB010
#define PHY_ID_TJA_1120 0x001BB031
+#define VEND1_DEVICE_ID3 0x0004
+#define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12)
+#define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8)
+#define DEVICE_ID3_SAMPLE_TYPE_R 0x9
+
#define VEND1_DEVICE_CONTROL 0x0040
#define DEVICE_CONTROL_RESET BIT(15)
#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
@@ -109,6 +114,9 @@
#define MII_BASIC_CONFIG_RMII 0x5
#define MII_BASIC_CONFIG_MII 0x4
+#define VEND1_SGMII_BASIC_CONTROL 0xB000
+#define SGMII_LPM BIT(11)
+
#define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
#define EXTENDED_CNT_EN BIT(15)
#define VEND1_MONITOR_STATUS 0xAC80
@@ -1297,6 +1305,8 @@ static int nxp_c45_soft_reset(struct phy_device *phydev)
if (ret)
return ret;
+ usleep_range(2000, 2050);
+
return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
VEND1_DEVICE_CONTROL, ret,
!(ret & DEVICE_CONTROL_RESET), 20000,
@@ -1591,6 +1601,63 @@ static int nxp_c45_set_phy_mode(struct phy_device *phydev)
return 0;
}
+/* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
+static void nxp_c45_tja1120_errata(struct phy_device *phydev)
+{
+ bool macsec_ability, sgmii_ability;
+ int silicon_version, sample_type;
+ int phy_abilities;
+ int ret = 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
+ if (ret < 0)
+ return;
+
+ sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
+ if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
+ return;
+
+ silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
+
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+ sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
+ if ((!macsec_ability && silicon_version == 2) ||
+ (macsec_ability && silicon_version == 1)) {
+ /* TJA1120/TJA1121 PHY configuration errata workaround.
+ * Apply PHY writes sequence before link up.
+ */
+ if (!macsec_ability) {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
+ } else {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
+ }
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
+
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
+
+ if (sgmii_ability) {
+ /* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
+ * Put SGMII PCS into power down mode and back up.
+ */
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ }
+ }
+}
+
static int nxp_c45_config_init(struct phy_device *phydev)
{
int ret;
@@ -1607,6 +1674,9 @@ static int nxp_c45_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
+ if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
+ nxp_c45_tja1120_errata(phydev);
+
phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
PHY_CONFIG_AUTO);
@@ -2008,7 +2078,7 @@ static struct phy_driver nxp_c45_driver[] = {
module_phy_driver(nxp_c45_driver);
-static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
+static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
{ /*sentinel*/ },
diff --git a/drivers/net/phy/nxp-cbtx.c b/drivers/net/phy/nxp-cbtx.c
index 3d25491043a3..3286fcb4f47e 100644
--- a/drivers/net/phy/nxp-cbtx.c
+++ b/drivers/net/phy/nxp-cbtx.c
@@ -215,7 +215,7 @@ static struct phy_driver cbtx_driver[] = {
module_phy_driver(cbtx_driver);
-static struct mdio_device_id __maybe_unused cbtx_tbl[] = {
+static const struct mdio_device_id __maybe_unused cbtx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_CBTX_SJA1110) },
{ },
};
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 2c263ae44b4f..ed7fa26bac8e 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -888,7 +888,7 @@ static struct phy_driver tja11xx_driver[] = {
module_phy_driver(tja11xx_driver);
-static struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1102) },
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 944ae98ad110..0dac08e85304 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -1469,18 +1469,17 @@ EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status);
* @phydev: target phy_device struct
* @adv: variable to store advertised linkmodes
* @lp: variable to store LP advertised linkmodes
- * @is_enabled: variable to store EEE enabled/disabled configuration value
*
* Description: this function will read local and link partner PHY
* advertisements. Compare them return current EEE state.
*/
int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
- unsigned long *lp, bool *is_enabled)
+ unsigned long *lp)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- bool eee_enabled, eee_active;
+ bool eee_active;
int ret;
ret = genphy_c45_read_eee_adv(phydev, tmp_adv);
@@ -1491,9 +1490,8 @@ int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
if (ret)
return ret;
- eee_enabled = !linkmode_empty(tmp_adv);
linkmode_and(common, tmp_adv, tmp_lp);
- if (eee_enabled && !linkmode_empty(common))
+ if (!linkmode_empty(tmp_adv) && !linkmode_empty(common))
eee_active = phy_check_valid(phydev->speed, phydev->duplex,
common);
else
@@ -1503,8 +1501,6 @@ int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
linkmode_copy(adv, tmp_adv);
if (lp)
linkmode_copy(lp, tmp_lp);
- if (is_enabled)
- *is_enabled = eee_enabled;
return eee_active;
}
@@ -1521,15 +1517,13 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active);
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
struct ethtool_keee *data)
{
- bool is_enabled;
int ret;
ret = genphy_c45_eee_is_active(phydev, data->advertised,
- data->lp_advertised, &is_enabled);
+ data->lp_advertised);
if (ret < 0)
return ret;
- data->eee_enabled = is_enabled;
data->eee_active = phydev->eee_active;
linkmode_copy(data->supported, phydev->supported_eee);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 0d20b534122b..d0c1718e2b16 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -616,6 +616,49 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
EXPORT_SYMBOL(phy_ethtool_get_stats);
/**
+ * __phy_ethtool_get_phy_stats - Retrieve standardized PHY statistics
+ * @phydev: Pointer to the PHY device
+ * @phy_stats: Pointer to ethtool_eth_phy_stats structure
+ * @phydev_stats: Pointer to ethtool_phy_stats structure
+ *
+ * Fetches PHY statistics using a kernel-defined interface for consistent
+ * diagnostics. Unlike phy_ethtool_get_stats(), which allows custom stats,
+ * this function enforces a standardized format for better interoperability.
+ */
+void __phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+ if (!phydev->drv || !phydev->drv->get_phy_stats)
+ return;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_phy_stats(phydev, phy_stats, phydev_stats);
+ mutex_unlock(&phydev->lock);
+}
+
+/**
+ * __phy_ethtool_get_link_ext_stats - Retrieve extended link statistics for a PHY
+ * @phydev: Pointer to the PHY device
+ * @link_stats: Pointer to the structure to store extended link statistics
+ *
+ * Populates the ethtool_link_ext_stats structure with link down event counts
+ * and additional driver-specific link statistics, if available.
+ */
+void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ link_stats->link_down_events = READ_ONCE(phydev->link_down_events);
+
+ if (!phydev->drv || !phydev->drv->get_link_stats)
+ return;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_link_stats(phydev, link_stats);
+ mutex_unlock(&phydev->lock);
+}
+
+/**
* phy_ethtool_get_plca_cfg - Get PLCA RS configuration
* @phydev: the phy_device struct
* @plca_cfg: where to store the retrieved configuration
@@ -988,8 +1031,7 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
- err = genphy_c45_eee_is_active(phydev,
- NULL, NULL, NULL);
+ err = genphy_c45_eee_is_active(phydev, NULL, NULL);
phydev->eee_active = err > 0;
phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled &&
phydev->eee_active;
@@ -1006,6 +1048,59 @@ static int phy_check_link_status(struct phy_device *phydev)
}
/**
+ * phy_inband_caps - query which in-band signalling modes are supported
+ * @phydev: a pointer to a &struct phy_device
+ * @interface: the interface mode for the PHY
+ *
+ * Returns zero if it is unknown what in-band signalling is supported by the
+ * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise,
+ * returns a bit mask of the LINK_INBAND_* values from
+ * &enum link_inband_signalling to describe which inband modes are supported
+ * by the PHY for this interface mode.
+ */
+unsigned int phy_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (phydev->drv && phydev->drv->inband_caps)
+ return phydev->drv->inband_caps(phydev, interface);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phy_inband_caps);
+
+/**
+ * phy_config_inband - configure the desired PHY in-band mode
+ * @phydev: the phy_device struct
+ * @modes: in-band modes to configure
+ *
+ * Description: disables, enables or enables-with-bypass in-band signalling
+ * between the PHY and host system.
+ *
+ * Returns: zero on success, or negative errno value.
+ */
+int phy_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ int err;
+
+ if (!!(modes & LINK_INBAND_DISABLE) +
+ !!(modes & LINK_INBAND_ENABLE) +
+ !!(modes & LINK_INBAND_BYPASS) != 1)
+ return -EINVAL;
+
+ mutex_lock(&phydev->lock);
+ if (!phydev->drv)
+ err = -EIO;
+ else if (!phydev->drv->config_inband)
+ err = -EOPNOTSUPP;
+ else
+ err = phydev->drv->config_inband(phydev, modes);
+ mutex_unlock(&phydev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_config_inband);
+
+/**
* _phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
@@ -1347,6 +1442,23 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
+ * phy_update_stats - Update PHY device statistics if supported.
+ * @phydev: Pointer to the PHY device structure.
+ *
+ * If the PHY driver provides an update_stats callback, this function
+ * invokes it to update the PHY statistics. If not, it returns 0.
+ *
+ * Return: 0 on success, or a negative error code if the callback fails.
+ */
+static int phy_update_stats(struct phy_device *phydev)
+{
+ if (!phydev->drv->update_stats)
+ return 0;
+
+ return phydev->drv->update_stats(phydev);
+}
+
+/**
* phy_request_interrupt - request and enable interrupt for a PHY device
* @phydev: target phy_device struct
*
@@ -1415,6 +1527,9 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
case PHY_RUNNING:
err = phy_check_link_status(phydev);
func = &phy_check_link_status;
+
+ if (!err)
+ err = phy_update_stats(phydev);
break;
case PHY_CABLETEST:
err = phydev->drv->cable_test_get_status(phydev, &finished);
@@ -1589,6 +1704,47 @@ void phy_mac_interrupt(struct phy_device *phydev)
EXPORT_SYMBOL(phy_mac_interrupt);
/**
+ * phy_eee_tx_clock_stop_capable() - indicate whether the MAC can stop tx clock
+ * @phydev: target phy_device struct
+ *
+ * Indicate whether the MAC can disable the transmit xMII clock while in LPI
+ * state. Returns 1 if the MAC may stop the transmit clock, 0 if the MAC must
+ * not stop the transmit clock, or negative error.
+ */
+int phy_eee_tx_clock_stop_capable(struct phy_device *phydev)
+{
+ int stat1;
+
+ stat1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (stat1 < 0)
+ return stat1;
+
+ return !!(stat1 & MDIO_PCS_STAT1_CLKSTOP_CAP);
+}
+EXPORT_SYMBOL_GPL(phy_eee_tx_clock_stop_capable);
+
+/**
+ * phy_eee_rx_clock_stop() - configure PHY receive clock in LPI
+ * @phydev: target phy_device struct
+ * @clk_stop_enable: flag to indicate whether the clock can be stopped
+ *
+ * Configure whether the PHY can disable its receive clock during LPI mode,
+ * See IEEE 802.3 sections 22.2.2.2, 35.2.2.10, and 45.2.3.1.4.
+ *
+ * Returns: 0 or negative error.
+ */
+int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable)
+{
+ /* Configure the PHY to stop receiving xMII
+ * clock while it is signaling LPI.
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_PCS_CTRL1_CLKSTOP_EN,
+ clk_stop_enable ? MDIO_PCS_CTRL1_CLKSTOP_EN : 0);
+}
+EXPORT_SYMBOL_GPL(phy_eee_rx_clock_stop);
+
+/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
* @clk_stop_enable: PHY may stop the clock during LPI
@@ -1605,18 +1761,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (!phydev->drv)
return -EIO;
- ret = genphy_c45_eee_is_active(phydev, NULL, NULL, NULL);
+ ret = genphy_c45_eee_is_active(phydev, NULL, NULL);
if (ret < 0)
return ret;
if (!ret)
return -EPROTONOSUPPORT;
if (clk_stop_enable)
- /* Configure the PHY to stop receiving xMII
- * clock while it is signaling LPI.
- */
- ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
- MDIO_PCS_CTRL1_CLKSTOP_EN);
+ ret = phy_eee_rx_clock_stop(phydev, true);
return ret < 0 ? ret : 0;
}
@@ -1649,8 +1801,8 @@ EXPORT_SYMBOL(phy_get_eee_err);
* @phydev: target phy_device struct
* @data: ethtool_keee data
*
- * Description: reports the Supported/Advertisement/LP Advertisement
- * capabilities, etc.
+ * Description: get the current EEE settings, filling in all members of
+ * @data.
*/
int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b26bb33cd1d4..46713d27412b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -32,6 +32,7 @@
#include <linux/phy_link_topology.h>
#include <linux/pse-pd/pse.h>
#include <linux/property.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/rtnetlink.h>
#include <linux/sfp.h>
#include <linux/skbuff.h>
@@ -59,15 +60,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_features);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_gbit_fibre_features);
-__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
-EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
-
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_features);
-__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
-EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
-
const int phy_basic_ports_array[3] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
@@ -75,12 +70,7 @@ const int phy_basic_ports_array[3] = {
};
EXPORT_SYMBOL_GPL(phy_basic_ports_array);
-const int phy_fibre_port_array[1] = {
- ETHTOOL_LINK_MODE_FIBRE_BIT,
-};
-EXPORT_SYMBOL_GPL(phy_fibre_port_array);
-
-const int phy_all_ports_features_array[7] = {
+static const int phy_all_ports_features_array[7] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_MII_BIT,
@@ -89,7 +79,6 @@ const int phy_all_ports_features_array[7] = {
ETHTOOL_LINK_MODE_BNC_BIT,
ETHTOOL_LINK_MODE_Backplane_BIT,
};
-EXPORT_SYMBOL_GPL(phy_all_ports_features_array);
const int phy_10_100_features_array[4] = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
@@ -123,20 +112,6 @@ const int phy_10gbit_features_array[1] = {
};
EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
-static const int phy_10gbit_fec_features_array[1] = {
- ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
-};
-
-__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
-EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
-
-static const int phy_10gbit_full_features_array[] = {
- ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
-};
-
static const int phy_eee_cap1_features_array[] = {
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
@@ -198,20 +173,7 @@ static void features_init(void)
linkmode_set_bit_array(phy_gbit_features_array,
ARRAY_SIZE(phy_gbit_features_array),
phy_gbit_fibre_features);
- linkmode_set_bit_array(phy_fibre_port_array,
- ARRAY_SIZE(phy_fibre_port_array),
- phy_gbit_fibre_features);
-
- /* 10/100 half/full + 1000 half/full + TP/MII/FIBRE/AUI/BNC/Backplane*/
- linkmode_set_bit_array(phy_all_ports_features_array,
- ARRAY_SIZE(phy_all_ports_features_array),
- phy_gbit_all_ports_features);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- phy_gbit_all_ports_features);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- phy_gbit_all_ports_features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phy_gbit_fibre_features);
/* 10/100 half/full + 1000 half/full + 10G full*/
linkmode_set_bit_array(phy_all_ports_features_array,
@@ -227,17 +189,6 @@ static void features_init(void)
ARRAY_SIZE(phy_10gbit_features_array),
phy_10gbit_features);
- /* 10/100/1000/10G full */
- linkmode_set_bit_array(phy_all_ports_features_array,
- ARRAY_SIZE(phy_all_ports_features_array),
- phy_10gbit_full_features);
- linkmode_set_bit_array(phy_10gbit_full_features_array,
- ARRAY_SIZE(phy_10gbit_full_features_array),
- phy_10gbit_full_features);
- /* 10G FEC only */
- linkmode_set_bit_array(phy_10gbit_fec_features_array,
- ARRAY_SIZE(phy_10gbit_fec_features_array),
- phy_10gbit_fec_features);
linkmode_set_bit_array(phy_eee_cap1_features_array,
ARRAY_SIZE(phy_eee_cap1_features_array),
phy_eee_cap1_features);
@@ -1998,6 +1949,15 @@ void phy_detach(struct phy_device *phydev)
phy_suspend(phydev);
if (dev) {
+ struct hwtstamp_provider *hwprov;
+
+ hwprov = rtnl_dereference(dev->hwprov);
+ /* Disable timestamp if it is the one selected */
+ if (hwprov && hwprov->phydev == phydev) {
+ rcu_assign_pointer(dev->hwprov, NULL);
+ kfree_rcu(hwprov, rcu_head);
+ }
+
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_link_topo_del_phy(dev, phydev);
@@ -2994,6 +2954,23 @@ void phy_support_eee(struct phy_device *phydev)
EXPORT_SYMBOL(phy_support_eee);
/**
+ * phy_disable_eee - Disable EEE for the PHY
+ * @phydev: Target phy_device struct
+ *
+ * This function is used by MAC drivers for MAC's which don't support EEE.
+ * It disables EEE on the PHY layer.
+ */
+void phy_disable_eee(struct phy_device *phydev)
+{
+ linkmode_zero(phydev->advertising_eee);
+ phydev->eee_cfg.tx_lpi_enabled = false;
+ phydev->eee_cfg.eee_enabled = false;
+ /* don't let userspace re-enable EEE advertisement */
+ linkmode_fill(phydev->eee_broken_modes);
+}
+EXPORT_SYMBOL_GPL(phy_disable_eee);
+
+/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
*
@@ -3773,6 +3750,8 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = {
static const struct phylib_stubs __phylib_stubs = {
.hwtstamp_get = __phy_hwtstamp_get,
.hwtstamp_set = __phy_hwtstamp_set,
+ .get_phy_stats = __phy_ethtool_get_phy_stats,
+ .get_link_ext_stats = __phy_ethtool_get_link_ext_stats,
};
static void phylib_register_stubs(void)
diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c
index 4a5d73002a1a..0e9e987f37dd 100644
--- a/drivers/net/phy/phy_link_topology.c
+++ b/drivers/net/phy/phy_link_topology.c
@@ -73,7 +73,7 @@ int phy_link_topo_add_phy(struct net_device *dev,
xa_limit_32b, &topo->next_phy_index,
GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err;
return 0;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 30a654e98352..b00a315de060 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -56,9 +56,11 @@ struct phylink {
struct phy_device *phydev;
phy_interface_t link_interface; /* PHY_INTERFACE_xxx */
u8 cfg_link_an_mode; /* MLO_AN_xxx */
- u8 cur_link_an_mode;
+ u8 req_link_an_mode; /* Requested MLO_AN_xxx mode */
+ u8 act_link_an_mode; /* Active MLO_AN_xxx mode */
u8 link_port; /* The current non-phy ethtool port */
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_lpi);
/* The link configuration settings */
struct phylink_link_state link_config;
@@ -74,17 +76,26 @@ struct phylink {
struct mutex state_mutex;
struct phylink_link_state phy_state;
+ unsigned int phy_ib_mode;
struct work_struct resolve;
unsigned int pcs_neg_mode;
unsigned int pcs_state;
bool link_failed;
+ bool mac_supports_eee_ops;
+ bool mac_supports_eee;
+ bool phy_enable_tx_lpi;
+ bool mac_enable_tx_lpi;
+ bool mac_tx_clk_stop;
+ u32 mac_tx_lpi_timer;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
u8 sfp_port;
+
+ struct eee_config eee_cfg;
};
#define phylink_printk(level, pl, fmt, ...) \
@@ -174,6 +185,24 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
+static const char *phylink_pcs_mode_str(unsigned int mode)
+{
+ if (!mode)
+ return "none";
+
+ if (mode & PHYLINK_PCS_NEG_OUTBAND)
+ return "outband";
+
+ if (mode & PHYLINK_PCS_NEG_INBAND) {
+ if (mode & PHYLINK_PCS_NEG_ENABLED)
+ return "inband,an-enabled";
+ else
+ return "inband,an-disabled";
+ }
+
+ return "unknown";
+}
+
static unsigned int phylink_interface_signal_rate(phy_interface_t interface)
{
switch (interface) {
@@ -671,6 +700,17 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
return -EINVAL;
}
+ /* Ensure that this PCS supports the interface which the MAC
+ * returned it for. It is an error for the MAC to return a PCS
+ * that does not support the interface mode.
+ */
+ if (!phy_interface_empty(pcs->supported_interfaces) &&
+ !test_bit(state->interface, pcs->supported_interfaces)) {
+ phylink_err(pl, "MAC returned PCS which does not support %s\n",
+ phy_modes(state->interface));
+ return -EINVAL;
+ }
+
/* Validate the link parameters with the PCS */
if (pcs->ops->pcs_validate) {
ret = pcs->ops->pcs_validate(pcs, supported, state);
@@ -971,6 +1011,15 @@ static void phylink_resolve_an_pause(struct phylink_link_state *state)
}
}
+static unsigned int phylink_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ if (pcs && pcs->ops->pcs_inband_caps)
+ return pcs->ops->pcs_inband_caps(pcs, interface);
+
+ return 0;
+}
+
static void phylink_pcs_pre_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
@@ -1024,6 +1073,24 @@ static void phylink_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
pcs->ops->pcs_link_up(pcs, neg_mode, interface, speed, duplex);
}
+/* Query inband for a specific interface mode, asking the MAC for the
+ * PCS which will be used to handle the interface mode.
+ */
+static unsigned int phylink_inband_caps(struct phylink *pl,
+ phy_interface_t interface)
+{
+ struct phylink_pcs *pcs;
+
+ if (!pl->mac_ops->mac_select_pcs)
+ return 0;
+
+ pcs = pl->mac_ops->mac_select_pcs(pl->config, interface);
+ if (!pcs)
+ return 0;
+
+ return phylink_pcs_inband_caps(pcs, interface);
+}
+
static void phylink_pcs_poll_stop(struct phylink *pl)
{
if (pl->cfg_link_an_mode == MLO_AN_INBAND)
@@ -1065,13 +1132,13 @@ static void phylink_mac_config(struct phylink *pl,
phylink_dbg(pl,
"%s: mode=%s/%s/%s adv=%*pb pause=%02x\n",
- __func__, phylink_an_mode_str(pl->cur_link_an_mode),
+ __func__, phylink_an_mode_str(pl->act_link_an_mode),
phy_modes(st.interface),
phy_rate_matching_to_str(st.rate_matching),
__ETHTOOL_LINK_MODE_MASK_NBITS, st.advertising,
st.pause);
- pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, &st);
+ pl->mac_ops->mac_config(pl->config, pl->act_link_an_mode, &st);
}
static void phylink_pcs_an_restart(struct phylink *pl)
@@ -1079,13 +1146,14 @@ static void phylink_pcs_an_restart(struct phylink *pl)
if (pl->pcs && linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
pl->link_config.advertising) &&
phy_interface_mode_is_8023z(pl->link_config.interface) &&
- phylink_autoneg_inband(pl->cur_link_an_mode))
+ phylink_autoneg_inband(pl->act_link_an_mode))
pl->pcs->ops->pcs_an_restart(pl->pcs);
}
/**
* phylink_pcs_neg_mode() - helper to determine PCS inband mode
- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pcs: a pointer to &struct phylink_pcs
* @interface: interface mode to be used
* @advertising: adertisement ethtool link mode mask
*
@@ -1102,11 +1170,21 @@ static void phylink_pcs_an_restart(struct phylink *pl)
* Note: this is for cases where the PCS itself is involved in negotiation
* (e.g. Clause 37, SGMII and similar) not Clause 73.
*/
-static unsigned int phylink_pcs_neg_mode(unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertising)
+static void phylink_pcs_neg_mode(struct phylink *pl, struct phylink_pcs *pcs,
+ phy_interface_t interface,
+ const unsigned long *advertising)
{
- unsigned int neg_mode;
+ unsigned int pcs_ib_caps = 0;
+ unsigned int phy_ib_caps = 0;
+ unsigned int neg_mode, mode;
+ enum {
+ INBAND_CISCO_SGMII,
+ INBAND_BASEX,
+ } type;
+
+ mode = pl->req_link_an_mode;
+
+ pl->phy_ib_mode = 0;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -1119,10 +1197,7 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode,
* inband communication. Note: there exist PHYs that run
* with SGMII but do not send the inband data.
*/
- if (!phylink_autoneg_inband(mode))
- neg_mode = PHYLINK_PCS_NEG_OUTBAND;
- else
- neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ type = INBAND_CISCO_SGMII;
break;
case PHY_INTERFACE_MODE_1000BASEX:
@@ -1133,21 +1208,143 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode,
* as well, but drivers may not support this, so may
* need to override this.
*/
- if (!phylink_autoneg_inband(mode))
+ type = INBAND_BASEX;
+ break;
+
+ default:
+ pl->pcs_neg_mode = PHYLINK_PCS_NEG_NONE;
+ pl->act_link_an_mode = mode;
+ return;
+ }
+
+ if (pcs)
+ pcs_ib_caps = phylink_pcs_inband_caps(pcs, interface);
+
+ if (pl->phydev)
+ phy_ib_caps = phy_inband_caps(pl->phydev, interface);
+
+ phylink_dbg(pl, "interface %s inband modes: pcs=%02x phy=%02x\n",
+ phy_modes(interface), pcs_ib_caps, phy_ib_caps);
+
+ if (!phylink_autoneg_inband(mode)) {
+ bool pcs_ib_only = false;
+ bool phy_ib_only = false;
+
+ if (pcs_ib_caps && pcs_ib_caps != LINK_INBAND_DISABLE) {
+ /* PCS supports reporting in-band capabilities, and
+ * supports more than disable mode.
+ */
+ if (pcs_ib_caps & LINK_INBAND_DISABLE)
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ else if (pcs_ib_caps & LINK_INBAND_ENABLE)
+ pcs_ib_only = true;
+ }
+
+ if (phy_ib_caps && phy_ib_caps != LINK_INBAND_DISABLE) {
+ /* PHY supports in-band capabilities, and supports
+ * more than disable mode.
+ */
+ if (phy_ib_caps & LINK_INBAND_DISABLE)
+ pl->phy_ib_mode = LINK_INBAND_DISABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+ else if (phy_ib_caps & LINK_INBAND_ENABLE)
+ phy_ib_only = true;
+ }
+
+ /* If either the PCS or PHY requires inband to be enabled,
+ * this is an invalid configuration. Provide a diagnostic
+ * message for this case, but don't try to force the issue.
+ */
+ if (pcs_ib_only || phy_ib_only)
+ phylink_warn(pl,
+ "firmware wants %s mode, but %s%s%s requires inband\n",
+ phylink_an_mode_str(mode),
+ pcs_ib_only ? "PCS" : "",
+ pcs_ib_only && phy_ib_only ? " and " : "",
+ phy_ib_only ? "PHY" : "");
+
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ } else if (type == INBAND_CISCO_SGMII || pl->phydev) {
+ /* For SGMII modes which are designed to be used with PHYs, or
+ * Base-X with a PHY, we try to use in-band mode where-ever
+ * possible. However, there are some PHYs e.g. BCM84881 which
+ * do not support in-band.
+ */
+ const unsigned int inband_ok = LINK_INBAND_ENABLE |
+ LINK_INBAND_BYPASS;
+ const unsigned int outband_ok = LINK_INBAND_DISABLE |
+ LINK_INBAND_BYPASS;
+ /* PCS PHY
+ * D E D E
+ * 0 0 0 0 no information inband enabled
+ * 1 0 0 0 pcs doesn't support outband
+ * 0 1 0 0 pcs required inband enabled
+ * 1 1 0 0 pcs optional inband enabled
+ * 0 0 1 0 phy doesn't support outband
+ * 1 0 1 0 pcs+phy doesn't support outband
+ * 0 1 1 0 pcs required, phy doesn't support, invalid
+ * 1 1 1 0 pcs optional, phy doesn't support, outband
+ * 0 0 0 1 phy required inband enabled
+ * 1 0 0 1 pcs doesn't support, phy required, invalid
+ * 0 1 0 1 pcs+phy required inband enabled
+ * 1 1 0 1 pcs optional, phy required inband enabled
+ * 0 0 1 1 phy optional inband enabled
+ * 1 0 1 1 pcs doesn't support, phy optional, outband
+ * 0 1 1 1 pcs required, phy optional inband enabled
+ * 1 1 1 1 pcs+phy optional inband enabled
+ */
+ if ((!pcs_ib_caps || pcs_ib_caps & inband_ok) &&
+ (!phy_ib_caps || phy_ib_caps & inband_ok)) {
+ /* In-band supported or unknown at both ends. Enable
+ * in-band mode with or without bypass at the PHY.
+ */
+ if (phy_ib_caps & LINK_INBAND_ENABLE)
+ pl->phy_ib_mode = LINK_INBAND_ENABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ } else if ((!pcs_ib_caps || pcs_ib_caps & outband_ok) &&
+ (!phy_ib_caps || phy_ib_caps & outband_ok)) {
+ /* Either in-band not supported at at least one end.
+ * In-band bypass at the other end is possible.
+ */
+ if (phy_ib_caps & LINK_INBAND_DISABLE)
+ pl->phy_ib_mode = LINK_INBAND_DISABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+
neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ if (pl->phydev)
+ mode = MLO_AN_PHY;
+ } else {
+ /* invalid */
+ phylink_warn(pl, "%s: incompatible in-band capabilities, trying in-band",
+ phy_modes(interface));
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ }
+ } else {
+ /* For Base-X without a PHY */
+ if (pcs_ib_caps == LINK_INBAND_DISABLE)
+ /* If the PCS doesn't support inband, then inband must
+ * be disabled.
+ */
+ neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
+ else if (pcs_ib_caps == LINK_INBAND_ENABLE)
+ /* If the PCS requires inband, then inband must always
+ * be enabled.
+ */
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
advertising))
neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else
neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
- break;
-
- default:
- neg_mode = PHYLINK_PCS_NEG_NONE;
- break;
}
- return neg_mode;
+ pl->pcs_neg_mode = neg_mode;
+ pl->act_link_an_mode = mode;
}
static void phylink_major_config(struct phylink *pl, bool restart,
@@ -1159,11 +1356,9 @@ static void phylink_major_config(struct phylink *pl, bool restart,
unsigned int neg_mode;
int err;
- phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
-
- pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
- state->interface,
- state->advertising);
+ phylink_dbg(pl, "major config, requested %s/%s\n",
+ phylink_an_mode_str(pl->req_link_an_mode),
+ phy_modes(state->interface));
if (pl->mac_ops->mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
@@ -1177,10 +1372,17 @@ static void phylink_major_config(struct phylink *pl, bool restart,
pcs_changed = pl->pcs != pcs;
}
+ phylink_pcs_neg_mode(pl, pcs, state->interface, state->advertising);
+
+ phylink_dbg(pl, "major config, active %s/%s/%s\n",
+ phylink_an_mode_str(pl->act_link_an_mode),
+ phylink_pcs_mode_str(pl->pcs_neg_mode),
+ phy_modes(state->interface));
+
phylink_pcs_poll_stop(pl);
if (pl->mac_ops->mac_prepare) {
- err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
+ err = pl->mac_ops->mac_prepare(pl->config, pl->act_link_an_mode,
state->interface);
if (err < 0) {
phylink_err(pl, "mac_prepare failed: %pe\n",
@@ -1214,7 +1416,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
phylink_pcs_enable(pl->pcs);
- neg_mode = pl->cur_link_an_mode;
+ neg_mode = pl->act_link_an_mode;
if (pl->pcs && pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
@@ -1230,13 +1432,20 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_pcs_an_restart(pl);
if (pl->mac_ops->mac_finish) {
- err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
+ err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode,
state->interface);
if (err < 0)
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
}
+ if (pl->phydev && pl->phy_ib_mode) {
+ err = phy_config_inband(pl->phydev, pl->phy_ib_mode);
+ if (err < 0)
+ phylink_err(pl, "phy_config_inband: %pe\n",
+ ERR_PTR(err));
+ }
+
if (pl->sfp_bus) {
rate_kbd = phylink_interface_signal_rate(state->interface);
if (rate_kbd)
@@ -1261,17 +1470,16 @@ static int phylink_change_inband_advert(struct phylink *pl)
return 0;
phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__,
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(pl->link_config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->link_config.advertising,
pl->link_config.pause);
/* Recompute the PCS neg mode */
- pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
- pl->link_config.interface,
- pl->link_config.advertising);
+ phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface,
+ pl->link_config.advertising);
- neg_mode = pl->cur_link_an_mode;
+ neg_mode = pl->act_link_an_mode;
if (pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
@@ -1293,12 +1501,24 @@ static int phylink_change_inband_advert(struct phylink *pl)
static void phylink_mac_pcs_get_state(struct phylink *pl,
struct phylink_link_state *state)
{
+ struct phylink_pcs *pcs;
+ bool autoneg;
+
linkmode_copy(state->advertising, pl->link_config.advertising);
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->rate_matching = pl->link_config.rate_matching;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising)) {
+ state->an_complete = 0;
+ state->link = 1;
+
+ pcs = pl->pcs;
+ if (!pcs || pcs->neg_mode)
+ autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
+ else
+ autoneg = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ state->advertising);
+
+ if (autoneg) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
state->pause = MLO_PAUSE_NONE;
@@ -1307,11 +1527,9 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->duplex = pl->link_config.duplex;
state->pause = pl->link_config.pause;
}
- state->an_complete = 0;
- state->link = 1;
- if (pl->pcs)
- pl->pcs->ops->pcs_get_state(pl->pcs, state);
+ if (pcs)
+ pcs->ops->pcs_get_state(pcs, pl->pcs_neg_mode, state);
else
state->link = 0;
}
@@ -1336,7 +1554,7 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
{
struct phylink_link_state link_state;
- switch (pl->cur_link_an_mode) {
+ switch (pl->req_link_an_mode) {
case MLO_AN_PHY:
link_state = pl->phy_state;
break;
@@ -1375,6 +1593,39 @@ static const char *phylink_pause_to_str(int pause)
}
}
+static void phylink_deactivate_lpi(struct phylink *pl)
+{
+ if (pl->mac_enable_tx_lpi) {
+ pl->mac_enable_tx_lpi = false;
+
+ phylink_dbg(pl, "disabling LPI\n");
+
+ pl->mac_ops->mac_disable_tx_lpi(pl->config);
+ }
+}
+
+static void phylink_activate_lpi(struct phylink *pl)
+{
+ int err;
+
+ if (!test_bit(pl->cur_interface, pl->config->lpi_interfaces)) {
+ phylink_dbg(pl, "MAC does not support LPI with %s\n",
+ phy_modes(pl->cur_interface));
+ return;
+ }
+
+ phylink_dbg(pl, "LPI timer %uus, tx clock stop %u\n",
+ pl->mac_tx_lpi_timer, pl->mac_tx_clk_stop);
+
+ err = pl->mac_ops->mac_enable_tx_lpi(pl->config, pl->mac_tx_lpi_timer,
+ pl->mac_tx_clk_stop);
+ if (!err)
+ pl->mac_enable_tx_lpi = true;
+ else
+ phylink_err(pl, "%ps() failed: %pe\n",
+ pl->mac_ops->mac_enable_tx_lpi, ERR_PTR(err));
+}
+
static void phylink_link_up(struct phylink *pl,
struct phylink_link_state link_state)
{
@@ -1410,17 +1661,20 @@ static void phylink_link_up(struct phylink *pl,
pl->cur_interface = link_state.interface;
- neg_mode = pl->cur_link_an_mode;
+ neg_mode = pl->act_link_an_mode;
if (pl->pcs && pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
phylink_pcs_link_up(pl->pcs, neg_mode, pl->cur_interface, speed,
duplex);
- pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode,
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->act_link_an_mode,
pl->cur_interface, speed, duplex,
!!(link_state.pause & MLO_PAUSE_TX), rx_pause);
+ if (pl->mac_supports_eee && pl->phy_enable_tx_lpi)
+ phylink_activate_lpi(pl);
+
if (ndev)
netif_carrier_on(ndev);
@@ -1437,25 +1691,29 @@ static void phylink_link_down(struct phylink *pl)
if (ndev)
netif_carrier_off(ndev);
- pl->mac_ops->mac_link_down(pl->config, pl->cur_link_an_mode,
+
+ phylink_deactivate_lpi(pl);
+
+ pl->mac_ops->mac_link_down(pl->config, pl->act_link_an_mode,
pl->cur_interface);
phylink_info(pl, "Link is Down\n");
}
+static bool phylink_link_is_up(struct phylink *pl)
+{
+ return pl->netdev ? netif_carrier_ok(pl->netdev) : pl->old_link_state;
+}
+
static void phylink_resolve(struct work_struct *w)
{
struct phylink *pl = container_of(w, struct phylink, resolve);
struct phylink_link_state link_state;
- struct net_device *ndev = pl->netdev;
bool mac_config = false;
bool retrigger = false;
bool cur_link_state;
mutex_lock(&pl->state_mutex);
- if (pl->netdev)
- cur_link_state = netif_carrier_ok(ndev);
- else
- cur_link_state = pl->old_link_state;
+ cur_link_state = phylink_link_is_up(pl);
if (pl->phylink_disable_state) {
pl->link_failed = false;
@@ -1463,10 +1721,10 @@ static void phylink_resolve(struct work_struct *w)
} else if (pl->link_failed) {
link_state.link = false;
retrigger = true;
- } else if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ } else if (pl->act_link_an_mode == MLO_AN_FIXED) {
phylink_get_fixed_state(pl, &link_state);
mac_config = link_state.link;
- } else if (pl->cur_link_an_mode == MLO_AN_PHY) {
+ } else if (pl->act_link_an_mode == MLO_AN_PHY) {
link_state = pl->phy_state;
mac_config = link_state.link;
} else {
@@ -1520,7 +1778,7 @@ static void phylink_resolve(struct work_struct *w)
}
}
- if (pl->cur_link_an_mode != MLO_AN_FIXED)
+ if (pl->act_link_an_mode != MLO_AN_FIXED)
phylink_apply_manual_flow(pl, &link_state);
if (mac_config) {
@@ -1644,7 +1902,7 @@ int phylink_set_fixed_link(struct phylink *pl,
pl->link_config.an_complete = 1;
pl->cfg_link_an_mode = MLO_AN_FIXED;
- pl->cur_link_an_mode = pl->cfg_link_an_mode;
+ pl->req_link_an_mode = pl->cfg_link_an_mode;
return 0;
}
@@ -1699,6 +1957,17 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(-EINVAL);
}
+ pl->mac_supports_eee_ops = mac_ops->mac_disable_tx_lpi &&
+ mac_ops->mac_enable_tx_lpi;
+ pl->mac_supports_eee = pl->mac_supports_eee_ops &&
+ pl->config->lpi_capabilities &&
+ !phy_interface_empty(pl->config->lpi_interfaces);
+
+ /* Set the default EEE configuration */
+ pl->eee_cfg.eee_enabled = pl->config->eee_enabled_default;
+ pl->eee_cfg.tx_lpi_enabled = pl->eee_cfg.eee_enabled;
+ pl->eee_cfg.tx_lpi_timer = pl->config->lpi_timer_default;
+
pl->phy_state.interface = iface;
pl->link_interface = iface;
if (iface == PHY_INTERFACE_MODE_MOCA)
@@ -1732,7 +2001,7 @@ struct phylink *phylink_create(struct phylink_config *config,
}
}
- pl->cur_link_an_mode = pl->cfg_link_an_mode;
+ pl->req_link_an_mode = pl->cfg_link_an_mode;
ret = phylink_register_sfp(pl, fwnode);
if (ret < 0) {
@@ -1803,16 +2072,22 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
pl->phy_state.link = up;
if (!up)
pl->link_failed = true;
+
+ /* Get the LPI state from phylib */
+ pl->phy_enable_tx_lpi = phydev->enable_tx_lpi;
+ pl->mac_tx_lpi_timer = phydev->eee_cfg.tx_lpi_timer;
mutex_unlock(&pl->state_mutex);
phylink_run_resolve(pl);
- phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s\n", up ? "up" : "down",
+ phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s/%slpi\n",
+ up ? "up" : "down",
phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
phy_rate_matching_to_str(phydev->rate_matching),
- phylink_pause_to_str(pl->phy_state.pause));
+ phylink_pause_to_str(pl->phy_state.pause),
+ phydev->enable_tx_lpi ? "" : "no");
}
static int phylink_validate_phy(struct phylink *pl, struct phy_device *phy,
@@ -1942,6 +2217,36 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
/* Restrict the phy advertisement according to the MAC support. */
linkmode_copy(phy->advertising, config.advertising);
+
+ /* If the MAC supports phylink managed EEE, restrict the EEE
+ * advertisement according to the MAC's LPI capabilities.
+ */
+ if (pl->mac_supports_eee) {
+ /* If EEE is enabled, then we need to call phy_support_eee()
+ * to ensure that the advertising mask is appropriately set.
+ * This also enables EEE at the PHY.
+ */
+ if (pl->eee_cfg.eee_enabled)
+ phy_support_eee(phy);
+
+ phy->eee_cfg.tx_lpi_enabled = pl->eee_cfg.tx_lpi_enabled;
+ phy->eee_cfg.tx_lpi_timer = pl->eee_cfg.tx_lpi_timer;
+
+ /* Convert the MAC's LPI capabilities to linkmodes */
+ linkmode_zero(pl->supported_lpi);
+ phylink_caps_to_linkmodes(pl->supported_lpi,
+ pl->config->lpi_capabilities);
+
+ /* Restrict the PHYs EEE support/advertisement to the modes
+ * that the MAC supports.
+ */
+ linkmode_and(phy->advertising_eee, phy->advertising_eee,
+ pl->supported_lpi);
+ } else if (pl->mac_supports_eee_ops) {
+ /* MAC supports phylink EEE, but wants EEE always disabled. */
+ phy_disable_eee(phy);
+ }
+
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
@@ -1957,7 +2262,20 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
if (pl->config->mac_managed_pm)
phy->mac_managed_pm = true;
- return 0;
+ /* Allow the MAC to stop its clock if the PHY has the capability */
+ pl->mac_tx_clk_stop = phy_eee_tx_clock_stop_capable(phy) > 0;
+
+ if (pl->mac_supports_eee_ops) {
+ /* Explicitly configure whether the PHY is allowed to stop it's
+ * receive clock.
+ */
+ ret = phy_eee_rx_clock_stop(phy,
+ pl->config->eee_rx_clk_stop_enable);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
+
+ return ret;
}
static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
@@ -2114,6 +2432,8 @@ void phylink_disconnect_phy(struct phylink *pl)
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
pl->phydev = NULL;
+ pl->phy_enable_tx_lpi = false;
+ pl->mac_tx_clk_stop = false;
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
flush_work(&pl->resolve);
@@ -2189,7 +2509,7 @@ void phylink_start(struct phylink *pl)
ASSERT_RTNL();
phylink_info(pl, "configuring for %s/%s link mode\n",
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(pl->link_config.interface));
/* Always set the carrier off */
@@ -2474,7 +2794,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
linkmode_copy(kset->link_modes.supported, pl->supported);
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
/* We are using fixed settings. Report these as the
* current link settings - and note that these also
@@ -2505,6 +2825,26 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
+static bool phylink_validate_pcs_inband_autoneg(struct phylink *pl,
+ phy_interface_t interface,
+ unsigned long *adv)
+{
+ unsigned int inband = phylink_inband_caps(pl, interface);
+ unsigned int mask;
+
+ /* If the PCS doesn't implement inband support, be permissive. */
+ if (!inband)
+ return true;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv))
+ mask = LINK_INBAND_ENABLE;
+ else
+ mask = LINK_INBAND_DISABLE;
+
+ /* Check whether the PCS implements the required mode */
+ return !!(inband & mask);
+}
+
/**
* phylink_ethtool_ksettings_set() - set the link settings
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -2566,7 +2906,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* If we have a fixed link, refuse to change link parameters.
* If the link parameters match, accept them but do nothing.
*/
- if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ if (pl->req_link_an_mode == MLO_AN_FIXED) {
if (s->speed != pl->link_config.speed ||
s->duplex != pl->link_config.duplex)
return -EINVAL;
@@ -2582,7 +2922,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* is our default case) but do not allow the advertisement to
* be changed. If the advertisement matches, simply return.
*/
- if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ if (pl->req_link_an_mode == MLO_AN_FIXED) {
if (!linkmode_equal(config.advertising,
pl->link_config.advertising))
return -EINVAL;
@@ -2617,7 +2957,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
linkmode_copy(support, pl->supported);
if (phylink_validate(pl, support, &config)) {
phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
return -EINVAL;
@@ -2635,6 +2975,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
phylink_is_empty_linkmode(config.advertising))
return -EINVAL;
+ /* Validate the autonegotiation state. We don't have a PHY in this
+ * situation, so the PCS is the media-facing entity.
+ */
+ if (!phylink_validate_pcs_inband_autoneg(pl, config.interface,
+ config.advertising))
+ return -EINVAL;
+
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@@ -2717,7 +3064,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
ASSERT_RTNL();
- if (pl->cur_link_an_mode == MLO_AN_FIXED)
+ if (pl->req_link_an_mode == MLO_AN_FIXED)
return -EOPNOTSUPP;
if (!phylink_test(pl->supported, Pause) &&
@@ -2841,8 +3188,16 @@ int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_keee *eee)
ASSERT_RTNL();
- if (pl->phydev)
+ if (pl->mac_supports_eee_ops && !pl->mac_supports_eee)
+ return ret;
+
+ if (pl->phydev) {
ret = phy_ethtool_get_eee(pl->phydev, eee);
+ /* Restrict supported linkmode mask */
+ if (ret == 0 && pl->mac_supports_eee_ops)
+ linkmode_and(eee->supported, eee->supported,
+ pl->supported_lpi);
+ }
return ret;
}
@@ -2855,12 +3210,29 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
*/
int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_keee *eee)
{
+ bool mac_eee = pl->mac_supports_eee;
int ret = -EOPNOTSUPP;
ASSERT_RTNL();
- if (pl->phydev)
+ phylink_dbg(pl, "mac %s phylink EEE%s, adv %*pbl, LPI%s timer %uus\n",
+ mac_eee ? "supports" : "does not support",
+ eee->eee_enabled ? ", enabled" : "",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, eee->advertised,
+ eee->tx_lpi_enabled ? " enabled" : "", eee->tx_lpi_timer);
+
+ if (pl->mac_supports_eee_ops && !mac_eee)
+ return ret;
+
+ if (pl->phydev) {
+ /* Restrict advertisement mask */
+ if (pl->mac_supports_eee_ops)
+ linkmode_and(eee->advertised, eee->advertised,
+ pl->supported_lpi);
ret = phy_ethtool_set_eee(pl->phydev, eee);
+ if (ret == 0)
+ eee_to_eeecfg(&pl->eee_cfg, eee);
+ }
return ret;
}
@@ -2981,7 +3353,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
struct phylink_link_state state;
int val = 0xffff;
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
if (phy_id == 0) {
phylink_get_fixed_state(pl, &state);
@@ -3006,7 +3378,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
unsigned int reg, unsigned int val)
{
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
break;
@@ -3176,11 +3548,11 @@ static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl,
return interface;
}
-static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void phylink_sfp_set_config(struct phylink *pl, unsigned long *supported,
+ struct phylink_link_state *state,
+ bool changed)
{
- bool changed = false;
+ u8 mode = MLO_AN_INBAND;
phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
phylink_an_mode_str(mode), phy_modes(state->interface),
@@ -3196,9 +3568,9 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
changed = true;
}
- if (pl->cur_link_an_mode != mode ||
+ if (pl->req_link_an_mode != mode ||
pl->link_config.interface != state->interface) {
- pl->cur_link_an_mode = mode;
+ pl->req_link_an_mode = mode;
pl->link_config.interface = state->interface;
changed = true;
@@ -3213,8 +3585,7 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
phylink_mac_initial_config(pl, false);
}
-static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
- struct phy_device *phy)
+static int phylink_sfp_config_phy(struct phylink *pl, struct phy_device *phy)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
@@ -3258,7 +3629,7 @@ static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
pl->link_port = pl->sfp_port;
- phylink_sfp_set_config(pl, mode, support, &config);
+ phylink_sfp_set_config(pl, support, &config, true);
return 0;
}
@@ -3314,6 +3685,12 @@ static int phylink_sfp_config_optical(struct phylink *pl)
phylink_dbg(pl, "optical SFP: chosen %s interface\n",
phy_modes(interface));
+ if (!phylink_validate_pcs_inband_autoneg(pl, interface,
+ config.advertising)) {
+ phylink_err(pl, "autoneg setting not compatible with PCS");
+ return -EINVAL;
+ }
+
config.interface = interface;
/* Ignore errors if we're expecting a PHY to attach later */
@@ -3327,7 +3704,7 @@ static int phylink_sfp_config_optical(struct phylink *pl)
pl->link_port = pl->sfp_port;
- phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config);
+ phylink_sfp_set_config(pl, pl->sfp_support, &config, false);
return 0;
}
@@ -3398,19 +3775,16 @@ static void phylink_sfp_link_up(void *upstream)
phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK);
}
-/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
- * or 802.3z control word, so inband will not work.
- */
-static bool phylink_phy_no_inband(struct phy_device *phy)
-{
- return phy->is_c45 && phy_id_compare(phy->c45_ids.device_ids[1],
- 0xae025150, 0xfffffff0);
-}
-
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
struct phylink *pl = upstream;
- u8 mode;
+
+ if (!phy->drv) {
+ phylink_err(pl, "PHY %s (id 0x%.8lx) has no driver loaded\n",
+ phydev_name(phy), (unsigned long)phy->phy_id);
+ phylink_err(pl, "Drivers which handle known common cases: CONFIG_BCM84881_PHY, CONFIG_MARVELL_PHY\n");
+ return -EINVAL;
+ }
/*
* This is the new way of dealing with flow control for PHYs,
@@ -3421,17 +3795,12 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
*/
phy_support_asym_pause(phy);
- if (phylink_phy_no_inband(phy))
- mode = MLO_AN_PHY;
- else
- mode = MLO_AN_INBAND;
-
/* Set the PHY's host supported interfaces */
phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces,
pl->config->supported_interfaces);
/* Do the initial configuration */
- return phylink_sfp_config_phy(pl, mode, phy);
+ return phylink_sfp_config_phy(pl, phy);
}
static void phylink_sfp_disconnect_phy(void *upstream,
@@ -3626,6 +3995,7 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state,
/**
* phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
* @state: a pointer to a &struct phylink_link_state.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @bmsr: The value of the %MII_BMSR register
* @lpa: The value of the %MII_LPA register
*
@@ -3638,32 +4008,45 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state,
* accessing @bmsr and @lpa cannot be done with MDIO directly.
*/
void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
- u16 bmsr, u16 lpa)
+ unsigned int neg_mode, u16 bmsr, u16 lpa)
{
state->link = !!(bmsr & BMSR_LSTATUS);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
- /* If there is no link or autonegotiation is disabled, the LP advertisement
- * data is not meaningful, so don't go any further.
- */
- if (!state->link || !linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising))
+
+ /* If the link is down, the advertisement data is undefined. */
+ if (!state->link)
return;
switch (state->interface) {
case PHY_INTERFACE_MODE_1000BASEX:
- phylink_decode_c37_word(state, lpa, SPEED_1000);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ phylink_decode_c37_word(state, lpa, SPEED_1000);
+ } else {
+ state->speed = SPEED_1000;
+ state->duplex = DUPLEX_FULL;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ }
break;
case PHY_INTERFACE_MODE_2500BASEX:
- phylink_decode_c37_word(state, lpa, SPEED_2500);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ phylink_decode_c37_word(state, lpa, SPEED_2500);
+ } else {
+ state->speed = SPEED_2500;
+ state->duplex = DUPLEX_FULL;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ }
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- phylink_decode_sgmii_word(state, lpa);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ phylink_decode_sgmii_word(state, lpa);
break;
+
case PHY_INTERFACE_MODE_QUSGMII:
- phylink_decode_usgmii_word(state, lpa);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ phylink_decode_usgmii_word(state, lpa);
break;
default:
@@ -3676,6 +4059,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
/**
* phylink_mii_c22_pcs_get_state() - read the MAC PCS state
* @pcs: a pointer to a &struct mdio_device.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @state: a pointer to a &struct phylink_link_state.
*
* Helper for MAC PCS supporting the 802.3 clause 22 register set for
@@ -3688,6 +4072,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
* structure.
*/
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
int bmsr, lpa;
@@ -3699,7 +4084,7 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
return;
}
- phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_get_state);
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 105602581a03..26350b962890 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -1098,7 +1098,7 @@ static struct phy_driver at803x_driver[] = {
module_phy_driver(at803x_driver);
-static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+static const struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
{ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index bd8a51ec0ecd..2ad8c2586d64 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -774,7 +774,7 @@ static int qca807x_config_init(struct phy_device *phydev)
control_dac &= ~QCA807X_CONTROL_DAC_MASK;
if (!priv->dac_full_amplitude)
control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
- if (!priv->dac_full_amplitude)
+ if (!priv->dac_full_bias_current)
control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
if (!priv->dac_disable_bias_current_tweak)
control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
@@ -828,7 +828,7 @@ static struct phy_driver qca807x_drivers[] = {
};
module_phy_driver(qca807x_drivers);
-static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca807x_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
{ }
diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
index 5048304ccc9e..71498c518f0f 100644
--- a/drivers/net/phy/qcom/qca808x.c
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -655,7 +655,7 @@ static struct phy_driver qca808x_driver[] = {
module_phy_driver(qca808x_driver);
-static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca808x_tbl[] = {
{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
index 7a5039920b9f..bc70ed8efd86 100644
--- a/drivers/net/phy/qcom/qca83xx.c
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -259,7 +259,7 @@ static struct phy_driver qca83xx_driver[] = {
module_phy_driver(qca83xx_driver);
-static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 30d15f7c9b03..7b70ba6cab66 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -155,7 +155,7 @@ static struct phy_driver qs6612_driver[] = { {
module_phy_driver(qs6612_driver);
-static struct mdio_device_id __maybe_unused qs6612_tbl[] = {
+static const struct mdio_device_id __maybe_unused qs6612_tbl[] = {
{ 0x00181440, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/realtek/Kconfig b/drivers/net/phy/realtek/Kconfig
new file mode 100644
index 000000000000..31935f147d87
--- /dev/null
+++ b/drivers/net/phy/realtek/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config REALTEK_PHY
+ tristate "Realtek PHYs"
+ help
+ Currently supports RTL821x/RTL822x and fast ethernet PHYs
+
+config REALTEK_PHY_HWMON
+ def_bool REALTEK_PHY && HWMON
+ depends on !(REALTEK_PHY=y && HWMON=m)
+ help
+ Optional hwmon support for the temperature sensor
diff --git a/drivers/net/phy/realtek/Makefile b/drivers/net/phy/realtek/Makefile
new file mode 100644
index 000000000000..dd21cf87f2f1
--- /dev/null
+++ b/drivers/net/phy/realtek/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+realtek-y += realtek_main.o
+realtek-$(CONFIG_REALTEK_PHY_HWMON) += realtek_hwmon.o
+obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/realtek/realtek.h b/drivers/net/phy/realtek/realtek.h
new file mode 100644
index 000000000000..a39b44fa18a0
--- /dev/null
+++ b/drivers/net/phy/realtek/realtek.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef REALTEK_H
+#define REALTEK_H
+
+#include <linux/phy.h>
+
+int rtl822x_hwmon_init(struct phy_device *phydev);
+
+#endif /* REALTEK_H */
diff --git a/drivers/net/phy/realtek/realtek_hwmon.c b/drivers/net/phy/realtek/realtek_hwmon.c
new file mode 100644
index 000000000000..1ecb410bb941
--- /dev/null
+++ b/drivers/net/phy/realtek/realtek_hwmon.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HWMON support for Realtek PHY's
+ *
+ * Author: Heiner Kallweit <hkallweit1@gmail.com>
+ */
+
+#include <linux/hwmon.h>
+#include <linux/phy.h>
+
+#include "realtek.h"
+
+#define RTL822X_VND2_TSALRM 0xa662
+#define RTL822X_VND2_TSRR 0xbd84
+#define RTL822X_VND2_TSSR 0xb54c
+
+static int rtl822x_hwmon_get_temp(int raw)
+{
+ if (raw >= 512)
+ raw -= 1024;
+
+ return 1000 * raw / 2;
+}
+
+static int rtl822x_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int raw;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSRR) & 0x3ff;
+ *val = rtl822x_hwmon_get_temp(raw);
+ break;
+ case hwmon_temp_max:
+ /* Chip reduces speed to 1G if threshold is exceeded */
+ raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSSR) >> 6;
+ *val = rtl822x_hwmon_get_temp(raw);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops rtl822x_hwmon_ops = {
+ .visible = 0444,
+ .read = rtl822x_hwmon_read,
+};
+
+static const struct hwmon_channel_info * const rtl822x_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX),
+ NULL
+};
+
+static const struct hwmon_chip_info rtl822x_hwmon_chip_info = {
+ .ops = &rtl822x_hwmon_ops,
+ .info = rtl822x_hwmon_info,
+};
+
+int rtl822x_hwmon_init(struct phy_device *phydev)
+{
+ struct device *hwdev, *dev = &phydev->mdio.dev;
+ const char *name;
+
+ /* Ensure over-temp alarm is reset. */
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSALRM, 3);
+
+ name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ hwdev = devm_hwmon_device_register_with_info(dev, name, phydev,
+ &rtl822x_hwmon_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwdev);
+}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek/realtek_main.c
index f65d7f1f348e..572a933636b0 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -14,6 +14,8 @@
#include <linux/delay.h>
#include <linux/clk.h>
+#include "realtek.h"
+
#define RTL821x_PHYSR 0x11
#define RTL821x_PHYSR_DUPLEX BIT(13)
#define RTL821x_PHYSR_SPEED GENMASK(15, 14)
@@ -736,7 +738,11 @@ static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret;
- if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
+ if (devnum == MDIO_MMD_VEND2) {
+ rtl821x_write_page(phydev, regnum >> 4);
+ ret = __phy_read(phydev, 0x10 + ((regnum & 0xf) >> 1));
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
rtl821x_write_page(phydev, 0xa5c);
ret = __phy_read(phydev, 0x12);
rtl821x_write_page(phydev, 0);
@@ -760,7 +766,11 @@ static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
{
int ret;
- if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
+ if (devnum == MDIO_MMD_VEND2) {
+ rtl821x_write_page(phydev, regnum >> 4);
+ ret = __phy_write(phydev, 0x10 + ((regnum & 0xf) >> 1), val);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
rtl821x_write_page(phydev, 0xa5d);
ret = __phy_write(phydev, 0x10, val);
rtl821x_write_page(phydev, 0);
@@ -812,6 +822,15 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return ret;
}
+static int rtl822x_probe(struct phy_device *phydev)
+{
+ if (IS_ENABLED(CONFIG_REALTEK_PHY_HWMON) &&
+ phydev->phy_id != RTL_GENERIC_PHYID)
+ return rtl822x_hwmon_init(phydev);
+
+ return 0;
+}
+
static int rtl822xb_config_init(struct phy_device *phydev)
{
bool has_2500, has_sgmii;
@@ -952,15 +971,15 @@ static int rtl822x_read_status(struct phy_device *phydev)
{
int lpadv, ret;
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+
ret = rtlgen_read_status(phydev);
if (ret < 0)
return ret;
if (phydev->autoneg == AUTONEG_DISABLE ||
- !phydev->autoneg_complete) {
- mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ !phydev->autoneg_complete)
return 0;
- }
lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
if (lpadv < 0)
@@ -1023,26 +1042,25 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
{
int ret, val;
- ret = genphy_c45_read_status(phydev);
- if (ret < 0)
- return ret;
-
- if (phydev->autoneg == AUTONEG_DISABLE ||
- !genphy_c45_aneg_done(phydev))
- mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
-
/* Vendor register as C45 has no standardized support for 1000BaseT */
- if (phydev->autoneg == AUTONEG_ENABLE) {
+ if (phydev->autoneg == AUTONEG_ENABLE && genphy_c45_aneg_done(phydev)) {
val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
RTL822X_VND2_GANLPAR);
if (val < 0)
return val;
-
- mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+ } else {
+ val = 0;
}
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
- if (!phydev->link)
+ ret = genphy_c45_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (!phydev->link) {
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
return 0;
+ }
/* Read actual speed from vendor register. */
val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_VND2_PHYSR);
@@ -1456,6 +1474,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vb_cg_c22_match_phy_device,
.name = "RTL8221B-VB-CG 2.5Gbps PHY (C22)",
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.config_init = rtl822xb_config_init,
@@ -1468,6 +1487,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vb_cg_c45_match_phy_device,
.name = "RTL8221B-VB-CG 2.5Gbps PHY (C45)",
+ .probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
@@ -1478,6 +1498,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
.name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.config_init = rtl822xb_config_init,
@@ -1490,6 +1511,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vn_cg_c45_match_phy_device,
.name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)",
+ .probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
@@ -1500,6 +1522,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8251b_c45_match_phy_device,
.name = "RTL8251B 5Gbps PHY",
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.read_status = rtl822x_read_status,
@@ -1511,6 +1534,7 @@ static struct phy_driver realtek_drvs[] = {
.match_phy_device = rtl_internal_nbaset_match_phy_device,
.name = "Realtek Internal NBASE-T PHY",
.flags = PHY_IS_INTERNAL,
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.read_status = rtl822x_read_status,
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index bb13e75183ee..b338f385e15a 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -188,7 +188,7 @@ static struct phy_driver rockchip_phy_driver[] = {
module_phy_driver(rockchip_phy_driver);
-static struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = {
+static const struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = {
{ INTERNAL_EPHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index e1853599d9ba..31463b9e5697 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -838,7 +838,7 @@ MODULE_DESCRIPTION("SMSC PHY driver");
MODULE_AUTHOR("Herbert Valerio Riedel");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused smsc_tbl[] = {
+static const struct mdio_device_id __maybe_unused smsc_tbl[] = {
{ 0x0007c0a0, 0xfffffff0 },
{ 0x0007c0b0, 0xfffffff0 },
{ 0x0007c0c0, 0xfffffff0 },
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 7196e927c2cd..076a370be849 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -289,7 +289,7 @@ static int ks8995_reset(struct ks8995_switch *ks)
}
static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
struct device *dev;
struct ks8995_switch *ks8995;
@@ -301,7 +301,7 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
}
static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
struct device *dev;
struct ks8995_switch *ks8995;
@@ -401,8 +401,8 @@ static const struct bin_attribute ks8995_registers_attr = {
.mode = 0600,
},
.size = KS8995_REGS_SIZE,
- .read = ks8995_registers_read,
- .write = ks8995_registers_write,
+ .read_new = ks8995_registers_read,
+ .write_new = ks8995_registers_write,
};
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 309e4c3496c4..d4835d4c50e0 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -124,7 +124,7 @@ static struct phy_driver ste10xp_pdriver[] = {
module_phy_driver(ste10xp_pdriver);
-static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
+static const struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
{ STE101P_PHY_ID, 0xfffffff0 },
{ STE100P_PHY_ID, 0xffffffff },
{ }
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 8057ea8dbc21..752d4bf7bb99 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -87,7 +87,7 @@ static struct phy_driver teranetics_driver[] = {
module_phy_driver(teranetics_driver);
-static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+static const struct mdio_device_id __maybe_unused teranetics_tbl[] = {
{ PHY_ID_TN2020, 0xffffffff },
{ }
};
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 38834347a427..900cb756c366 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -90,7 +90,7 @@ static struct phy_driver upd60620_driver[1] = { {
module_phy_driver(upd60620_driver);
-static struct mdio_device_id __maybe_unused upd60620_tbl[] = {
+static const struct mdio_device_id __maybe_unused upd60620_tbl[] = {
{ UPD60620_PHY_ID, 0xfffffffe },
{ }
};
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2377179de017..b1b7bbba284e 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -674,7 +674,7 @@ static struct phy_driver vsc82xx_driver[] = {
module_phy_driver(vsc82xx_driver);
-static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
+static const struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
{ PHY_ID_VSC8244, 0x000fffc0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 4583e15ad03a..1420c4efa48e 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -72,6 +72,17 @@
#define PPP_PROTO_LEN 2
#define PPP_LCP_HDRLEN 4
+/* The filter instructions generated by libpcap are constructed
+ * assuming a four-byte PPP header on each packet, where the last
+ * 2 bytes are the protocol field defined in the RFC and the first
+ * byte of the first 2 bytes indicates the direction.
+ * The second byte is currently unused, but we still need to initialize
+ * it to prevent crafted BPF programs from reading them which would
+ * cause reading of uninitialized data.
+ */
+#define PPP_FILTER_OUTBOUND_TAG 0x0100
+#define PPP_FILTER_INBOUND_TAG 0x0000
+
/*
* An instance of /dev/ppp can be associated with either a ppp
* interface unit or a ppp channel. In both cases, file->private_data
@@ -1762,10 +1773,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
- /* check if we should pass this packet */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
- *(u8 *)skb_push(skb, 2) = 1;
+ /* check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_OUTBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
@@ -2482,14 +2493,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* network protocol frame - give it to the kernel */
#ifdef CONFIG_PPP_FILTER
- /* check if the packet passes the pass and active filters */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
if (skb_unclone(skb, GFP_ATOMIC))
goto err;
-
- *(u8 *)skb_push(skb, 2) = 0;
+ /* Check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_INBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index 0af7db80b2f8..7d60a714ca53 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -431,31 +431,6 @@ static int pd692x0_pi_disable(struct pse_controller_dev *pcdev, int id)
return 0;
}
-static int pd692x0_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
-{
- struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
- struct pd692x0_msg msg, buf = {0};
- int ret;
-
- ret = pd692x0_fw_unavailable(priv);
- if (ret)
- return ret;
-
- msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
- msg.sub[2] = id;
- ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf.sub[1]) {
- priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
- return 1;
- } else {
- priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
- return 0;
- }
-}
-
struct pd692x0_pse_ext_state_mapping {
u32 status_code;
enum ethtool_c33_pse_ext_state pse_ext_state;
@@ -517,21 +492,38 @@ pd692x0_pse_ext_state_map[] = {
{ /* sentinel */ }
};
-static void
-pd692x0_get_ext_state(struct ethtool_c33_pse_ext_state_info *c33_ext_state_info,
- u32 status_code)
+static int
+pd692x0_pi_get_ext_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_ext_state_info *ext_state_info)
{
+ struct ethtool_c33_pse_ext_state_info *c33_ext_state_info;
const struct pd692x0_pse_ext_state_mapping *ext_state_map;
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ c33_ext_state_info = &ext_state_info->c33_ext_state_info;
ext_state_map = pd692x0_pse_ext_state_map;
while (ext_state_map->status_code) {
- if (ext_state_map->status_code == status_code) {
+ if (ext_state_map->status_code == buf.sub[0]) {
c33_ext_state_info->c33_pse_ext_state = ext_state_map->pse_ext_state;
c33_ext_state_info->__c33_pse_ext_substate = ext_state_map->pse_ext_substate;
- return;
+ return 0;
}
ext_state_map++;
}
+
+ return 0;
}
struct pd692x0_class_pw {
@@ -613,35 +605,36 @@ static int pd692x0_pi_set_pw_from_table(struct device *dev,
}
static int
-pd692x0_pi_get_pw_ranges(struct pse_control_status *st)
+pd692x0_pi_get_pw_limit_ranges(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges)
{
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
const struct pd692x0_class_pw *pw_table;
int i;
pw_table = pd692x0_class_pw_table;
- st->c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
- sizeof(struct ethtool_c33_pse_pw_limit_range),
- GFP_KERNEL);
- if (!st->c33_pw_limit_ranges)
+ c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
+ sizeof(*c33_pw_limit_ranges),
+ GFP_KERNEL);
+ if (!c33_pw_limit_ranges)
return -ENOMEM;
for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
- st->c33_pw_limit_ranges[i].min = pw_table->class_pw;
- st->c33_pw_limit_ranges[i].max = pw_table->class_pw + pw_table->max_added_class_pw;
+ c33_pw_limit_ranges[i].min = pw_table->class_pw;
+ c33_pw_limit_ranges[i].max = pw_table->class_pw +
+ pw_table->max_added_class_pw;
}
- st->c33_pw_limit_nb_ranges = i;
- return 0;
+ pw_limit_ranges->c33_pw_limit_ranges = c33_pw_limit_ranges;
+ return i;
}
-static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
- unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+static int
+pd692x0_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_msg msg, buf = {0};
- u32 class;
int ret;
ret = pd692x0_fw_unavailable(priv);
@@ -654,39 +647,65 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
if (ret < 0)
return ret;
- /* Compare Port Status (Communication Protocol Document par. 7.1) */
- if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
- else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
- else if (buf.sub[0] == 0x12)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
- else
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
-
if (buf.sub[1])
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
else
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
- priv->admin_state[id] = status->c33_admin_state;
+ priv->admin_state[id] = admin_state->c33_admin_state;
- pd692x0_get_ext_state(&status->c33_ext_state_info, buf.sub[0]);
- status->c33_actual_pw = (buf.data[0] << 4 | buf.data[1]) * 100;
+ return 0;
+}
- msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+static int
+pd692x0_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
msg.sub[2] = id;
- memset(&buf, 0, sizeof(buf));
ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
if (ret < 0)
return ret;
- ret = pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
- if (ret < 0)
+ /* Compare Port Status (Communication Protocol Document par. 7.1) */
+ if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
+ else if (buf.sub[0] == 0x12)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
+ else
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ return 0;
+}
+
+static int
+pd692x0_pi_get_pw_class(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ u32 class;
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
return ret;
- status->c33_avail_pw_limit = ret;
- memset(&buf, 0, sizeof(buf));
msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_CLASS];
msg.sub[2] = id;
ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
@@ -695,13 +714,29 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
class = buf.data[3] >> 4;
if (class <= 8)
- status->c33_pw_class = class;
+ return class;
- ret = pd692x0_pi_get_pw_ranges(status);
+ return 0;
+}
+
+static int
+pd692x0_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
if (ret < 0)
return ret;
- return 0;
+ return (buf.data[0] << 4 | buf.data[1]) * 100;
}
static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv)
@@ -999,13 +1034,12 @@ static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
return (buf.sub[0] << 8 | buf.sub[1]) * 100000;
}
-static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
- int id)
+static int pd692x0_pi_get_pw_limit(struct pse_controller_dev *pcdev,
+ int id)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_msg msg, buf = {0};
- int mW, uV, uA, ret;
- s64 tmp_64;
+ int ret;
msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
msg.sub[2] = id;
@@ -1013,48 +1047,24 @@ static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
if (ret < 0)
return ret;
- ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
- if (ret < 0)
- return ret;
- mW = ret;
-
- ret = pd692x0_pi_get_voltage(pcdev, id);
- if (ret < 0)
- return ret;
- uV = ret;
-
- tmp_64 = mW;
- tmp_64 *= 1000000000ull;
- /* uA = mW * 1000000000 / uV */
- uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
- return uA;
+ return pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
}
-static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
- int id, int max_uA)
+static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev,
+ int id, int max_mW)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct device *dev = &priv->client->dev;
struct pd692x0_msg msg, buf = {0};
- int uV, ret, mW;
- s64 tmp_64;
+ int ret;
ret = pd692x0_fw_unavailable(priv);
if (ret)
return ret;
- ret = pd692x0_pi_get_voltage(pcdev, id);
- if (ret < 0)
- return ret;
- uV = ret;
-
msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
msg.sub[2] = id;
- tmp_64 = uV;
- tmp_64 *= max_uA;
- /* mW = uV * uA / 1000000000 */
- mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
- ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW);
+ ret = pd692x0_pi_set_pw_from_table(dev, &msg, max_mW);
if (ret)
return ret;
@@ -1063,13 +1073,17 @@ static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
static const struct pse_controller_ops pd692x0_ops = {
.setup_pi_matrix = pd692x0_setup_pi_matrix,
- .ethtool_get_status = pd692x0_ethtool_get_status,
+ .pi_get_admin_state = pd692x0_pi_get_admin_state,
+ .pi_get_pw_status = pd692x0_pi_get_pw_status,
+ .pi_get_ext_state = pd692x0_pi_get_ext_state,
+ .pi_get_pw_class = pd692x0_pi_get_pw_class,
+ .pi_get_actual_pw = pd692x0_pi_get_actual_pw,
.pi_enable = pd692x0_pi_enable,
.pi_disable = pd692x0_pi_disable,
- .pi_is_enabled = pd692x0_pi_is_enabled,
.pi_get_voltage = pd692x0_pi_get_voltage,
- .pi_get_current_limit = pd692x0_pi_get_current_limit,
- .pi_set_current_limit = pd692x0_pi_set_current_limit,
+ .pi_get_pw_limit = pd692x0_pi_get_pw_limit,
+ .pi_set_pw_limit = pd692x0_pi_set_pw_limit,
+ .pi_get_pw_limit_ranges = pd692x0_pi_get_pw_limit_ranges,
};
#define PD692X0_FW_LINE_MAX_SZ 0xff
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 2906ce173f66..4602e26eb8c8 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -6,6 +6,7 @@
//
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/of.h>
#include <linux/pse-pd/pse.h>
#include <linux/regulator/driver.h>
@@ -210,16 +211,25 @@ out:
static int pse_pi_is_enabled(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ struct pse_admin_state admin_state = {0};
const struct pse_controller_ops *ops;
int id, ret;
ops = pcdev->ops;
- if (!ops->pi_is_enabled)
+ if (!ops->pi_get_admin_state)
return -EOPNOTSUPP;
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = ops->pi_is_enabled(pcdev, id);
+ ret = ops->pi_get_admin_state(pcdev, id, &admin_state);
+ if (ret)
+ goto out;
+
+ if (admin_state.podl_admin_state == ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED ||
+ admin_state.c33_admin_state == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED)
+ ret = 1;
+
+out:
mutex_unlock(&pcdev->lock);
return ret;
@@ -291,32 +301,24 @@ static int pse_pi_get_voltage(struct regulator_dev *rdev)
return ret;
}
-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
- int id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status);
-
static int pse_pi_get_current_limit(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- struct netlink_ext_ack extack = {};
- struct pse_control_status st = {};
- int id, uV, ret;
+ int id, uV, mW, ret;
s64 tmp_64;
ops = pcdev->ops;
id = rdev_get_id(rdev);
+ if (!ops->pi_get_pw_limit || !ops->pi_get_voltage)
+ return -EOPNOTSUPP;
+
mutex_lock(&pcdev->lock);
- if (ops->pi_get_current_limit) {
- ret = ops->pi_get_current_limit(pcdev, id);
+ ret = ops->pi_get_pw_limit(pcdev, id);
+ if (ret < 0)
goto out;
- }
+ mW = ret;
- /* If pi_get_current_limit() callback not populated get voltage
- * from pi_get_voltage() and power limit from ethtool_get_status()
- * to calculate current limit.
- */
ret = _pse_pi_get_voltage(rdev);
if (!ret) {
dev_err(pcdev->dev, "Voltage null\n");
@@ -327,16 +329,7 @@ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
goto out;
uV = ret;
- ret = _pse_ethtool_get_status(pcdev, id, &extack, &st);
- if (ret)
- goto out;
-
- if (!st.c33_avail_pw_limit) {
- ret = -ENODATA;
- goto out;
- }
-
- tmp_64 = st.c33_avail_pw_limit;
+ tmp_64 = mW;
tmp_64 *= 1000000000ull;
/* uA = mW * 1000000000 / uV */
ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
@@ -351,15 +344,33 @@ static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- int id, ret;
+ int id, mW, ret;
+ s64 tmp_64;
ops = pcdev->ops;
- if (!ops->pi_set_current_limit)
+ if (!ops->pi_set_pw_limit || !ops->pi_get_voltage)
return -EOPNOTSUPP;
+ if (max_uA > MAX_PI_CURRENT)
+ return -ERANGE;
+
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = ops->pi_set_current_limit(pcdev, id, max_uA);
+ ret = _pse_pi_get_voltage(rdev);
+ if (!ret) {
+ dev_err(pcdev->dev, "Voltage null\n");
+ ret = -ERANGE;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ tmp_64 = ret;
+ tmp_64 *= max_uA;
+ /* mW = uA * uV / 1000000000 */
+ mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+ ret = ops->pi_set_pw_limit(pcdev, id, mW);
+out:
mutex_unlock(&pcdev->lock);
return ret;
@@ -403,17 +414,16 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
- if (pcdev->ops->pi_set_current_limit) {
+ if (pcdev->ops->pi_set_pw_limit)
rinit_data->constraints.valid_ops_mask |=
REGULATOR_CHANGE_CURRENT;
- rinit_data->constraints.max_uA = MAX_PI_CURRENT;
- }
rinit_data->supply_regulator = "vpwr";
rconfig.dev = pcdev->dev;
rconfig.driver_data = pcdev;
rconfig.init_data = rinit_data;
+ rconfig.of_node = pcdev->pi[id].np;
rdev = devm_regulator_register(pcdev->dev, rdesc, &rconfig);
if (IS_ERR(rdev)) {
@@ -444,6 +454,13 @@ int pse_controller_register(struct pse_controller_dev *pcdev)
if (!pcdev->nr_lines)
pcdev->nr_lines = 1;
+ if (!pcdev->ops->pi_get_admin_state ||
+ !pcdev->ops->pi_get_pw_status) {
+ dev_err(pcdev->dev,
+ "Mandatory status report callbacks are missing");
+ return -EINVAL;
+ }
+
ret = of_load_pse_pis(pcdev);
if (ret)
return ret;
@@ -736,23 +753,6 @@ out:
}
EXPORT_SYMBOL_GPL(of_pse_control_get);
-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
- int id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
-{
- const struct pse_controller_ops *ops;
-
- ops = pcdev->ops;
- if (!ops->ethtool_get_status) {
- NL_SET_ERR_MSG(extack,
- "PSE driver does not support status report");
- return -EOPNOTSUPP;
- }
-
- return ops->ethtool_get_status(pcdev, id, extack, status);
-}
-
/**
* pse_ethtool_get_status - get status of PSE control
* @psec: PSE control pointer
@@ -763,15 +763,81 @@ static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
*/
int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+ struct ethtool_pse_control_status *status)
{
- int err;
+ struct pse_admin_state admin_state = {0};
+ struct pse_pw_status pw_status = {0};
+ const struct pse_controller_ops *ops;
+ struct pse_controller_dev *pcdev;
+ int ret;
- mutex_lock(&psec->pcdev->lock);
- err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status);
- mutex_unlock(&psec->pcdev->lock);
+ pcdev = psec->pcdev;
+ ops = pcdev->ops;
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_get_admin_state(pcdev, psec->id, &admin_state);
+ if (ret)
+ goto out;
+ status->podl_admin_state = admin_state.podl_admin_state;
+ status->c33_admin_state = admin_state.c33_admin_state;
- return err;
+ ret = ops->pi_get_pw_status(pcdev, psec->id, &pw_status);
+ if (ret)
+ goto out;
+ status->podl_pw_status = pw_status.podl_pw_status;
+ status->c33_pw_status = pw_status.c33_pw_status;
+
+ if (ops->pi_get_ext_state) {
+ struct pse_ext_state_info ext_state_info = {0};
+
+ ret = ops->pi_get_ext_state(pcdev, psec->id,
+ &ext_state_info);
+ if (ret)
+ goto out;
+
+ memcpy(&status->c33_ext_state_info,
+ &ext_state_info.c33_ext_state_info,
+ sizeof(status->c33_ext_state_info));
+ }
+
+ if (ops->pi_get_pw_class) {
+ ret = ops->pi_get_pw_class(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_pw_class = ret;
+ }
+
+ if (ops->pi_get_actual_pw) {
+ ret = ops->pi_get_actual_pw(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_actual_pw = ret;
+ }
+
+ if (ops->pi_get_pw_limit) {
+ ret = ops->pi_get_pw_limit(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_avail_pw_limit = ret;
+ }
+
+ if (ops->pi_get_pw_limit_ranges) {
+ struct pse_pw_limit_ranges pw_limit_ranges = {0};
+
+ ret = ops->pi_get_pw_limit_ranges(pcdev, psec->id,
+ &pw_limit_ranges);
+ if (ret < 0)
+ goto out;
+
+ status->c33_pw_limit_ranges =
+ pw_limit_ranges.c33_pw_limit_ranges;
+ status->c33_pw_limit_nb_ranges = ret;
+ }
+out:
+ mutex_unlock(&psec->pcdev->lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
@@ -876,6 +942,9 @@ int pse_ethtool_set_pw_limit(struct pse_control *psec,
int uV, uA, ret;
s64 tmp_64;
+ if (pw_limit > MAX_PI_PW)
+ return -ERANGE;
+
ret = regulator_get_voltage(psec->ps);
if (!ret) {
NL_SET_ERR_MSG(extack,
diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c
index 64ab36974fe0..6ce6773fff31 100644
--- a/drivers/net/pse-pd/pse_regulator.c
+++ b/drivers/net/pse-pd/pse_regulator.c
@@ -52,17 +52,19 @@ pse_reg_pi_disable(struct pse_controller_dev *pcdev, int id)
}
static int
-pse_reg_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+pse_reg_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
- return regulator_is_enabled(priv->ps);
+ admin_state->podl_admin_state = priv->admin_state;
+
+ return 0;
}
static int
-pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+pse_reg_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
int ret;
@@ -72,20 +74,19 @@ pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
return ret;
if (!ret)
- status->podl_pw_status = ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
+ pw_status->podl_pw_status =
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
else
- status->podl_pw_status =
+ pw_status->podl_pw_status =
ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING;
- status->podl_admin_state = priv->admin_state;
-
return 0;
}
static const struct pse_controller_ops pse_reg_ops = {
- .ethtool_get_status = pse_reg_ethtool_get_status,
+ .pi_get_admin_state = pse_reg_pi_get_admin_state,
+ .pi_get_pw_status = pse_reg_pi_get_pw_status,
.pi_enable = pse_reg_pi_enable,
- .pi_is_enabled = pse_reg_pi_is_enabled,
.pi_disable = pse_reg_pi_disable,
};
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 8797ca1a8a21..5e9dda2c0eac 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -25,20 +25,32 @@
#define TPS23881_REG_GEN_MASK 0x17
#define TPS23881_REG_NBITACC BIT(5)
#define TPS23881_REG_PW_EN 0x19
+#define TPS23881_REG_2PAIR_POL1 0x1e
#define TPS23881_REG_PORT_MAP 0x26
#define TPS23881_REG_PORT_POWER 0x29
-#define TPS23881_REG_POEPLUS 0x40
+#define TPS23881_REG_4PAIR_POL1 0x2a
+#define TPS23881_REG_INPUT_V 0x2e
+#define TPS23881_REG_CHAN1_A 0x30
+#define TPS23881_REG_CHAN1_V 0x32
+#define TPS23881_REG_FOLDBACK 0x40
#define TPS23881_REG_TPON BIT(0)
#define TPS23881_REG_FWREV 0x41
#define TPS23881_REG_DEVID 0x43
#define TPS23881_REG_DEVID_MASK 0xF0
#define TPS23881_DEVICE_ID 0x02
+#define TPS23881_REG_CHAN1_CLASS 0x4c
#define TPS23881_REG_SRAM_CTRL 0x60
#define TPS23881_REG_SRAM_DATA 0x61
+#define TPS23881_UV_STEP 3662
+#define TPS23881_NA_STEP 70190
+#define TPS23881_MW_STEP 500
+#define TPS23881_MIN_PI_PW_LIMIT_MW 2000
+
struct tps23881_port_desc {
u8 chan[2];
bool is_4p;
+ int pw_pol;
};
struct tps23881_priv {
@@ -53,36 +65,123 @@ static struct tps23881_priv *to_tps23881_priv(struct pse_controller_dev *pcdev)
return container_of(pcdev, struct tps23881_priv, pcdev);
}
+/*
+ * Helper to extract a value from a u16 register value, which is made of two
+ * u8 registers. The function calculates the bit offset based on the channel
+ * and extracts the relevant bits using a provided field mask.
+ *
+ * @param reg_val: The u16 register value (composed of two u8 registers).
+ * @param chan: The channel number (0-7).
+ * @param field_offset: The base bit offset to apply (e.g., 0 or 4).
+ * @param field_mask: The mask to apply to extract the required bits.
+ * @return: The extracted value for the specific channel.
+ */
+static u16 tps23881_calc_val(u16 reg_val, u8 chan, u8 field_offset,
+ u16 field_mask)
+{
+ if (chan >= 4)
+ reg_val >>= 8;
+
+ return (reg_val >> field_offset) & field_mask;
+}
+
+/*
+ * Helper to combine individual channel values into a u16 register value.
+ * The function sets the value for a specific channel in the appropriate
+ * position.
+ *
+ * @param reg_val: The current u16 register value.
+ * @param chan: The channel number (0-7).
+ * @param field_offset: The base bit offset to apply (e.g., 0 or 4).
+ * @param field_mask: The mask to apply for the field (e.g., 0x0F).
+ * @param field_val: The value to set for the specific channel (masked by
+ * field_mask).
+ * @return: The updated u16 register value with the channel value set.
+ */
+static u16 tps23881_set_val(u16 reg_val, u8 chan, u8 field_offset,
+ u16 field_mask, u16 field_val)
+{
+ field_val &= field_mask;
+
+ if (chan < 4) {
+ reg_val &= ~(field_mask << field_offset);
+ reg_val |= (field_val << field_offset);
+ } else {
+ reg_val &= ~(field_mask << (field_offset + 8));
+ reg_val |= (field_val << (field_offset + 8));
+ }
+
+ return reg_val;
+}
+
+static int
+tps23881_pi_set_pw_pol_limit(struct tps23881_priv *priv, int id, u8 pw_pol,
+ bool is_4p)
+{
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u16 val;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ if (!is_4p) {
+ reg = TPS23881_REG_2PAIR_POL1 + (chan % 4);
+ } else {
+ /* One chan is enough to configure the 4p PI power limit */
+ if ((chan % 4) < 2)
+ reg = TPS23881_REG_4PAIR_POL1;
+ else
+ reg = TPS23881_REG_4PAIR_POL1 + 1;
+ }
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_set_val(ret, chan, 0, 0xff, pw_pol);
+ return i2c_smbus_write_word_data(client, reg, val);
+}
+
+static int tps23881_pi_enable_manual_pol(struct tps23881_priv *priv, int id)
+{
+ struct i2c_client *client = priv->client;
+ int ret;
+ u8 chan;
+ u16 val;
+
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_FOLDBACK);
+ if (ret < 0)
+ return ret;
+
+ /* No need to test if the chan is PoE4 as setting either bit for a
+ * 4P configured port disables the automatic configuration on both
+ * channels.
+ */
+ chan = priv->port[id].chan[0];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ return i2c_smbus_write_byte_data(client, TPS23881_REG_FOLDBACK, val);
+}
+
static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
u8 chan;
u16 val;
- int ret;
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
chan = priv->port[id].chan[0];
- if (chan < 4)
- val = BIT(chan);
- else
- val = BIT(chan + 4);
+ val = tps23881_set_val(0, chan, 0, BIT(chan % 4), BIT(chan % 4));
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- val |= BIT(chan);
- else
- val |= BIT(chan + 4);
+ val = tps23881_set_val(val, chan, 0, BIT(chan % 4),
+ BIT(chan % 4));
}
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
}
static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
@@ -97,32 +196,67 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
return -ERANGE;
chan = priv->port[id].chan[0];
- if (chan < 4)
- val = BIT(chan + 4);
- else
- val = BIT(chan + 8);
+ val = tps23881_set_val(0, chan, 4, BIT(chan % 4), BIT(chan % 4));
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- val |= BIT(chan + 4);
- else
- val |= BIT(chan + 8);
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4),
+ BIT(chan % 4));
}
ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
if (ret)
return ret;
- return 0;
+ /* PWOFF command resets lots of register which need to be
+ * configured again. According to the datasheet "It may take upwards
+ * of 5ms after PWOFFn command for all register values to be updated"
+ */
+ mdelay(5);
+
+ /* Enable detection and classification */
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DET_CLA_EN);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4), BIT(chan % 4));
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4),
+ BIT(chan % 4));
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4),
+ BIT(chan % 4));
+ }
+
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
+ if (ret)
+ return ret;
+
+ /* No power policy */
+ if (priv->port[id].pw_pol < 0)
+ return 0;
+
+ ret = tps23881_pi_enable_manual_pol(priv, id);
+ if (ret < 0)
+ return ret;
+
+ /* Set power policy */
+ return tps23881_pi_set_pw_pol_limit(priv, id, priv->port[id].pw_pol,
+ priv->port[id].is_4p);
}
-static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+static int
+tps23881_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
bool enabled;
u8 chan;
+ u16 val;
int ret;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
@@ -130,32 +264,35 @@ static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
return ret;
chan = priv->port[id].chan[0];
- if (chan < 4)
- enabled = ret & BIT(chan);
- else
- enabled = ret & BIT(chan + 4);
+ val = tps23881_calc_val(ret, chan, 0, BIT(chan % 4));
+ enabled = !!(val);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- enabled &= !!(ret & BIT(chan));
- else
- enabled &= !!(ret & BIT(chan + 4));
+ val = tps23881_calc_val(ret, chan, 0, BIT(chan % 4));
+ enabled &= !!(val);
}
/* Return enabled status only if both channel are on this state */
- return enabled;
+ if (enabled)
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
}
-static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
- unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+static int
+tps23881_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
- bool enabled, delivering;
+ bool delivering;
u8 chan;
+ u16 val;
int ret;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
@@ -163,40 +300,197 @@ static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
return ret;
chan = priv->port[id].chan[0];
- if (chan < 4) {
- enabled = ret & BIT(chan);
- delivering = ret & BIT(chan + 4);
- } else {
- enabled = ret & BIT(chan + 4);
- delivering = ret & BIT(chan + 8);
- }
+ val = tps23881_calc_val(ret, chan, 4, BIT(chan % 4));
+ delivering = !!(val);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4) {
- enabled &= !!(ret & BIT(chan));
- delivering &= !!(ret & BIT(chan + 4));
- } else {
- enabled &= !!(ret & BIT(chan + 4));
- delivering &= !!(ret & BIT(chan + 8));
- }
+ val = tps23881_calc_val(ret, chan, 4, BIT(chan % 4));
+ delivering &= !!(val);
}
/* Return delivering status only if both channel are on this state */
if (delivering)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
else
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
-
- /* Return enabled status only if both channel are on this state */
- if (enabled)
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
- else
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
return 0;
}
+static int tps23881_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret;
+ u64 uV;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_INPUT_V);
+ if (ret < 0)
+ return ret;
+
+ uV = ret & 0x3fff;
+ uV *= TPS23881_UV_STEP;
+
+ return (int)uV;
+}
+
+static int
+tps23881_pi_get_chan_current(struct tps23881_priv *priv, u8 chan)
+{
+ struct i2c_client *client = priv->client;
+ int reg, ret;
+ u64 tmp_64;
+
+ /* Registers 0x30 to 0x3d */
+ reg = TPS23881_REG_CHAN1_A + (chan % 4) * 4 + (chan >= 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ tmp_64 = ret & 0x3fff;
+ tmp_64 *= TPS23881_NA_STEP;
+ /* uA = nA / 1000 */
+ tmp_64 = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000);
+ return (int)tmp_64;
+}
+
+static int tps23881_pi_get_pw_class(struct pse_controller_dev *pcdev,
+ int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ reg = TPS23881_REG_CHAN1_CLASS + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ return tps23881_calc_val(ret, chan, 4, 0x0f);
+}
+
+static int
+tps23881_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, uV, uA;
+ u64 tmp_64;
+ u8 chan;
+
+ ret = tps23881_pi_get_voltage(&priv->pcdev, id);
+ if (ret < 0)
+ return ret;
+ uV = ret;
+
+ chan = priv->port[id].chan[0];
+ ret = tps23881_pi_get_chan_current(priv, chan);
+ if (ret < 0)
+ return ret;
+ uA = ret;
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ ret = tps23881_pi_get_chan_current(priv, chan);
+ if (ret < 0)
+ return ret;
+ uA += ret;
+ }
+
+ tmp_64 = uV;
+ tmp_64 *= uA;
+ /* mW = uV * uA / 1000000000 */
+ return DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+}
+
+static int
+tps23881_pi_get_pw_limit_chan(struct tps23881_priv *priv, u8 chan)
+{
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u16 val;
+
+ reg = TPS23881_REG_2PAIR_POL1 + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, chan, 0, 0xff);
+ return val * TPS23881_MW_STEP;
+}
+
+static int tps23881_pi_get_pw_limit(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, mW;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ ret = tps23881_pi_get_pw_limit_chan(priv, chan);
+ if (ret < 0)
+ return ret;
+
+ mW = ret;
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ ret = tps23881_pi_get_pw_limit_chan(priv, chan);
+ if (ret < 0)
+ return ret;
+ mW += ret;
+ }
+
+ return mW;
+}
+
+static int tps23881_pi_set_pw_limit(struct pse_controller_dev *pcdev,
+ int id, int max_mW)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ u8 pw_pol;
+ int ret;
+
+ if (max_mW < TPS23881_MIN_PI_PW_LIMIT_MW || MAX_PI_PW < max_mW) {
+ dev_err(&priv->client->dev,
+ "power limit %d out of ranges [%d,%d]",
+ max_mW, TPS23881_MIN_PI_PW_LIMIT_MW, MAX_PI_PW);
+ return -ERANGE;
+ }
+
+ ret = tps23881_pi_enable_manual_pol(priv, id);
+ if (ret < 0)
+ return ret;
+
+ pw_pol = DIV_ROUND_CLOSEST_ULL(max_mW, TPS23881_MW_STEP);
+
+ /* Save power policy to reconfigure it after a disabled call */
+ priv->port[id].pw_pol = pw_pol;
+ return tps23881_pi_set_pw_pol_limit(priv, id, pw_pol,
+ priv->port[id].is_4p);
+}
+
+static int
+tps23881_pi_get_pw_limit_ranges(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges)
+{
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+
+ c33_pw_limit_ranges = kzalloc(sizeof(*c33_pw_limit_ranges),
+ GFP_KERNEL);
+ if (!c33_pw_limit_ranges)
+ return -ENOMEM;
+
+ c33_pw_limit_ranges->min = TPS23881_MIN_PI_PW_LIMIT_MW;
+ c33_pw_limit_ranges->max = MAX_PI_PW;
+ pw_limit_ranges->c33_pw_limit_ranges = c33_pw_limit_ranges;
+
+ /* Return the number of ranges */
+ return 1;
+}
+
/* Parse managers subnode into a array of device node */
static int
tps23881_get_of_channels(struct tps23881_priv *priv,
@@ -480,7 +774,7 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
struct i2c_client *client = priv->client;
u8 pi_id, lgcl_chan, hw_chan;
u16 val = 0;
- int i, ret;
+ int i;
for (i = 0; i < port_cnt; i++) {
pi_id = port_matrix[i].pi_id;
@@ -491,6 +785,9 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
if (port_matrix[i].exist)
priv->port[pi_id].chan[0] = lgcl_chan;
+ /* Initialize power policy internal value */
+ priv->port[pi_id].pw_pol = -1;
+
/* Set hardware port matrix for all ports */
val |= hw_chan << (lgcl_chan * 2);
@@ -511,11 +808,7 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
}
/* Write hardware ports matrix */
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
}
static int
@@ -564,11 +857,7 @@ tps23881_set_ports_conf(struct tps23881_priv *priv,
val |= BIT(port_matrix[i].lgcl_chan[1]) |
BIT(port_matrix[i].lgcl_chan[1] + 4);
}
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
}
static int
@@ -594,11 +883,7 @@ tps23881_set_ports_matrix(struct tps23881_priv *priv,
if (ret)
return ret;
- ret = tps23881_set_ports_conf(priv, port_matrix);
- if (ret)
- return ret;
-
- return 0;
+ return tps23881_set_ports_conf(priv, port_matrix);
}
static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev)
@@ -626,8 +911,14 @@ static const struct pse_controller_ops tps23881_ops = {
.setup_pi_matrix = tps23881_setup_pi_matrix,
.pi_enable = tps23881_pi_enable,
.pi_disable = tps23881_pi_disable,
- .pi_is_enabled = tps23881_pi_is_enabled,
- .ethtool_get_status = tps23881_ethtool_get_status,
+ .pi_get_admin_state = tps23881_pi_get_admin_state,
+ .pi_get_pw_status = tps23881_pi_get_pw_status,
+ .pi_get_pw_class = tps23881_pi_get_pw_class,
+ .pi_get_actual_pw = tps23881_pi_get_actual_pw,
+ .pi_get_voltage = tps23881_pi_get_voltage,
+ .pi_get_pw_limit = tps23881_pi_get_pw_limit,
+ .pi_set_pw_limit = tps23881_pi_set_pw_limit,
+ .pi_get_pw_limit_ranges = tps23881_pi_get_pw_limit_ranges,
};
static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin";
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 5aa41d5f7765..5ca6ecf0ce5f 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1329,9 +1329,9 @@ int tap_queue_resize(struct tap_dev *tap)
list_for_each_entry(q, &tap->queue_list, next)
rings[i++] = &q->ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- __skb_array_destroy_skb);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);
kfree(rings);
return ret;
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index c7690adec8db..f4019815f473 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -1175,6 +1175,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EBUSY;
}
+ if (netdev_has_upper_dev(port_dev, dev)) {
+ NL_SET_ERR_MSG(extack, "Device is already a lower device of the team interface");
+ netdev_err(dev, "Device %s is already a lower device of the team interface\n",
+ portname);
+ return -EBUSY;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
@@ -2632,7 +2639,9 @@ int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info)
ctx.data.u32_val = nla_get_u32(attr_data);
break;
case TEAM_OPTION_TYPE_STRING:
- if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
+ if (nla_len(attr_data) > TEAM_STRING_MAX_LEN ||
+ !memchr(nla_data(attr_data), '\0',
+ nla_len(attr_data))) {
err = -EINVAL;
goto team_put;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e816aaba8e5f..acf96f262488 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -580,7 +580,7 @@ static inline bool tun_not_capable(struct tun_struct *tun)
struct net *net = dev_net(tun->dev);
return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
- (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+ (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
!ns_capable(net->user_ns, CAP_NET_ADMIN);
}
@@ -3697,9 +3697,9 @@ static int tun_queue_resize(struct tun_struct *tun)
list_for_each_entry(tfile, &tun->disabled, next)
rings[i++] = &tfile->tx_ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- tun_ptr_free);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ tun_ptr_free);
kfree(rings);
return ret;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d5c47a2a62dc..34e82f1e37d9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -833,8 +833,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->dev = dev;
- hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
+ hrtimer_setup(&ctx->tx_timer, &cdc_ncm_tx_timer_cb, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
tasklet_setup(&ctx->bh, cdc_ncm_txpath_bh);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 46af78caf457..0bfa37c14059 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
{
dev->hard_mtu = GL_RCV_BUF_SIZE;
dev->net->hard_header_len += 4;
- dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
- dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
- return 0;
+ return usbnet_get_endpoints(dev, intf);
}
static const struct driver_info genelink_info = {
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 46afb95ffabe..a19789b57190 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -61,7 +61,18 @@
#define IPHETH_USBINTF_PROTO 1
#define IPHETH_IP_ALIGN 2 /* padding at front of URB */
-#define IPHETH_NCM_HEADER_SIZE (12 + 96) /* NCMH + NCM0 */
+/* On iOS devices, NCM headers in RX have a fixed size regardless of DPE count:
+ * - NTH16 (NCMH): 12 bytes, as per CDC NCM 1.0 spec
+ * - NDP16 (NCM0): 96 bytes, of which
+ * - NDP16 fixed header: 8 bytes
+ * - maximum of 22 DPEs (21 datagrams + trailer), 4 bytes each
+ */
+#define IPHETH_NDP16_MAX_DPE 22
+#define IPHETH_NDP16_HEADER_SIZE (sizeof(struct usb_cdc_ncm_ndp16) + \
+ IPHETH_NDP16_MAX_DPE * \
+ sizeof(struct usb_cdc_ncm_dpe16))
+#define IPHETH_NCM_HEADER_SIZE (sizeof(struct usb_cdc_ncm_nth16) + \
+ IPHETH_NDP16_HEADER_SIZE)
#define IPHETH_TX_BUF_SIZE ETH_FRAME_LEN
#define IPHETH_RX_BUF_SIZE_LEGACY (IPHETH_IP_ALIGN + ETH_FRAME_LEN)
#define IPHETH_RX_BUF_SIZE_NCM 65536
@@ -207,15 +218,23 @@ static int ipheth_rcvbulk_callback_legacy(struct urb *urb)
return ipheth_consume_skb(buf, len, dev);
}
+/* In "NCM mode", the iOS device encapsulates RX (phone->computer) traffic
+ * in NCM Transfer Blocks (similarly to CDC NCM). However, unlike reverse
+ * tethering (handled by the `cdc_ncm` driver), regular tethering is not
+ * compliant with the CDC NCM spec, as the device is missing the necessary
+ * descriptors, and TX (computer->phone) traffic is not encapsulated
+ * at all. Thus `ipheth` implements a very limited subset of the spec with
+ * the sole purpose of parsing RX URBs.
+ */
static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
{
struct usb_cdc_ncm_nth16 *ncmh;
struct usb_cdc_ncm_ndp16 *ncm0;
struct usb_cdc_ncm_dpe16 *dpe;
struct ipheth_device *dev;
+ u16 dg_idx, dg_len;
int retval = -EINVAL;
char *buf;
- int len;
dev = urb->context;
@@ -226,40 +245,42 @@ static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
ncmh = urb->transfer_buffer;
if (ncmh->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN) ||
- le16_to_cpu(ncmh->wNdpIndex) >= urb->actual_length) {
- dev->net->stats.rx_errors++;
- return retval;
- }
+ /* On iOS, NDP16 directly follows NTH16 */
+ ncmh->wNdpIndex != cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)))
+ goto rx_error;
- ncm0 = urb->transfer_buffer + le16_to_cpu(ncmh->wNdpIndex);
- if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN) ||
- le16_to_cpu(ncmh->wHeaderLength) + le16_to_cpu(ncm0->wLength) >=
- urb->actual_length) {
- dev->net->stats.rx_errors++;
- return retval;
- }
+ ncm0 = urb->transfer_buffer + sizeof(struct usb_cdc_ncm_nth16);
+ if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN))
+ goto rx_error;
dpe = ncm0->dpe16;
- while (le16_to_cpu(dpe->wDatagramIndex) != 0 &&
- le16_to_cpu(dpe->wDatagramLength) != 0) {
- if (le16_to_cpu(dpe->wDatagramIndex) >= urb->actual_length ||
- le16_to_cpu(dpe->wDatagramIndex) +
- le16_to_cpu(dpe->wDatagramLength) > urb->actual_length) {
+ for (int dpe_i = 0; dpe_i < IPHETH_NDP16_MAX_DPE; ++dpe_i, ++dpe) {
+ dg_idx = le16_to_cpu(dpe->wDatagramIndex);
+ dg_len = le16_to_cpu(dpe->wDatagramLength);
+
+ /* Null DPE must be present after last datagram pointer entry
+ * (3.3.1 USB CDC NCM spec v1.0)
+ */
+ if (dg_idx == 0 && dg_len == 0)
+ return 0;
+
+ if (dg_idx < IPHETH_NCM_HEADER_SIZE ||
+ dg_idx >= urb->actual_length ||
+ dg_len > urb->actual_length - dg_idx) {
dev->net->stats.rx_length_errors++;
return retval;
}
- buf = urb->transfer_buffer + le16_to_cpu(dpe->wDatagramIndex);
- len = le16_to_cpu(dpe->wDatagramLength);
+ buf = urb->transfer_buffer + dg_idx;
- retval = ipheth_consume_skb(buf, len, dev);
+ retval = ipheth_consume_skb(buf, dg_len, dev);
if (retval != 0)
return retval;
-
- dpe++;
}
- return 0;
+rx_error:
+ dev->net->stats.rx_errors++;
+ return retval;
}
static void ipheth_rcvbulk_callback(struct urb *urb)
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 531b1b6a37d1..137adf6d5b08 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -439,7 +439,7 @@ struct lan78xx_net {
struct usb_anchor deferred;
struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
- struct mutex phy_mutex; /* for phy access */
+ struct mutex mdiobus_mutex; /* for MDIO bus access */
unsigned int pipe_in, pipe_out, pipe_intr;
unsigned int bulk_in_delay;
@@ -472,10 +472,6 @@ struct lan78xx_net {
struct irq_domain_data domain_data;
};
-/* define external phy id */
-#define PHY_LAN8835 (0x0007C130)
-#define PHY_KSZ9031RNX (0x00221620)
-
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param(msg_level, int, 0);
@@ -625,13 +621,13 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
*data = *buf;
} else if (net_ratelimit()) {
netdev_warn(dev->net,
- "Failed to read register index 0x%08x. ret = %d",
- index, ret);
+ "Failed to read register index 0x%08x. ret = %pe",
+ index, ERR_PTR(ret));
}
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
@@ -656,13 +652,13 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
if (unlikely(ret < 0) &&
net_ratelimit()) {
netdev_warn(dev->net,
- "Failed to write register index 0x%08x. ret = %d",
- index, ret);
+ "Failed to write register index 0x%08x. ret = %pe",
+ index, ERR_PTR(ret));
}
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
@@ -678,11 +674,7 @@ static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
buf &= ~mask;
buf |= (mask & data);
- ret = lan78xx_write_reg(dev, reg, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return lan78xx_write_reg(dev, reg, buf);
}
static int lan78xx_read_stats(struct lan78xx_net *dev,
@@ -812,8 +804,156 @@ static void lan78xx_update_stats(struct lan78xx_net *dev)
usb_autopm_put_interface(dev->intf);
}
-/* Loop until the read is completed with timeout called with phy_mutex held */
-static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
+{
+ return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
+}
+
+static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
+ u32 hw_disabled)
+{
+ unsigned long timeout;
+ bool stopped = true;
+ int ret;
+ u32 buf;
+
+ /* Stop the h/w block (if not already stopped) */
+
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_enabled) {
+ buf &= ~hw_enabled;
+
+ ret = lan78xx_write_reg(dev, reg, buf);
+ if (ret < 0)
+ return ret;
+
+ stopped = false;
+ timeout = jiffies + HW_DISABLE_TIMEOUT;
+ do {
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_disabled)
+ stopped = true;
+ else
+ msleep(HW_DISABLE_DELAY_MS);
+ } while (!stopped && !time_after(jiffies, timeout));
+ }
+
+ return stopped ? 0 : -ETIMEDOUT;
+}
+
+static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
+{
+ return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
+}
+
+static int lan78xx_start_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start tx path");
+
+ /* Start the MAC transmitter */
+
+ ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the Tx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop tx path");
+
+ /* Stop the Tx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the MAC transmitter */
+
+ ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Tx path is stopped before calling
+ * lan78xx_flush_tx_fifo().
+ */
+static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
+}
+
+static int lan78xx_start_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start rx path");
+
+ /* Start the Rx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the MAC receiver*/
+
+ ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop rx path");
+
+ /* Stop the MAC receiver */
+
+ ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the Rx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Rx path is stopped before calling
+ * lan78xx_flush_rx_fifo().
+ */
+static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
+}
+
+/* Loop until the read is completed with timeout called with mdiobus_mutex held */
+static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
{
unsigned long start_time = jiffies;
u32 val;
@@ -821,14 +961,14 @@ static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, MII_ACC, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & MII_ACC_MII_BUSY_))
return 0;
} while (!time_after(jiffies, start_time + HZ));
- return -EIO;
+ return -ETIMEDOUT;
}
static inline u32 mii_access(int id, int index, int read)
@@ -854,8 +994,8 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, E2P_CMD, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & E2P_CMD_EPC_BUSY_) ||
(val & E2P_CMD_EPC_TIMEOUT_))
@@ -865,7 +1005,7 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
netdev_warn(dev->net, "EEPROM read operation timeout");
- return -EIO;
+ return -ETIMEDOUT;
}
return 0;
@@ -879,8 +1019,8 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, E2P_CMD, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & E2P_CMD_EPC_BUSY_))
return 0;
@@ -889,75 +1029,81 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
} while (!time_after(jiffies, start_time + HZ));
netdev_warn(dev->net, "EEPROM is busy");
- return -EIO;
+ return -ETIMEDOUT;
}
static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- u32 val;
- u32 saved;
+ u32 val, saved;
int i, ret;
- int retval;
/* depends on chip, some EEPROM pins are muxed with LED function.
* disable & restore LED function to access EEPROM.
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ return ret;
+
saved = val;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
+ if (ret < 0)
+ return ret;
}
- retval = lan78xx_eeprom_confirm_not_busy(dev);
- if (retval)
- return retval;
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ if (ret == -ETIMEDOUT)
+ goto read_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto read_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
ret = lan78xx_read_reg(dev, E2P_DATA, &val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
data[i] = val & 0xFF;
offset++;
}
- retval = 0;
-exit:
+read_raw_eeprom_done:
if (dev->chipid == ID_REV_CHIP_ID_7800_)
- ret = lan78xx_write_reg(dev, HW_CFG, saved);
+ return lan78xx_write_reg(dev, HW_CFG, saved);
- return retval;
+ return 0;
}
static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- u8 sig;
int ret;
+ u8 sig;
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
- if ((ret == 0) && (sig == EEPROM_INDICATOR))
- ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
- else
- ret = -EINVAL;
+ if (ret < 0)
+ return ret;
- return ret;
+ if (sig != EEPROM_INDICATOR)
+ return -ENODATA;
+
+ return lan78xx_read_raw_eeprom(dev, offset, length, data);
}
static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
@@ -966,113 +1112,144 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 val;
u32 saved;
int i, ret;
- int retval;
/* depends on chip, some EEPROM pins are muxed with LED function.
* disable & restore LED function to access EEPROM.
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ return ret;
+
saved = val;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
+ if (ret < 0)
+ return ret;
}
- retval = lan78xx_eeprom_confirm_not_busy(dev);
- if (retval)
- goto exit;
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
/* Issue write/erase enable command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
/* Fill data register */
val = data[i];
ret = lan78xx_write_reg(dev, E2P_DATA, val);
- if (ret < 0) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
/* Send "write" command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (ret < 0) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
offset++;
}
- retval = 0;
-exit:
+write_raw_eeprom_done:
if (dev->chipid == ID_REV_CHIP_ID_7800_)
- ret = lan78xx_write_reg(dev, HW_CFG, saved);
+ return lan78xx_write_reg(dev, HW_CFG, saved);
- return retval;
+ return 0;
}
static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- int i;
- u32 buf;
unsigned long timeout;
+ int ret, i;
+ u32 buf;
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
usleep_range(1, 10);
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_PWR_DN_PWRDN_N_);
}
for (i = 0; i < length; i++) {
- lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+ if (ret < 0)
+ return ret;
- lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
- lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_STATUS");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_STATUS_BUSY_);
- lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ if (ret < 0)
+ return ret;
data[i] = (u8)(buf & 0xFF);
}
@@ -1086,45 +1263,72 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
int i;
u32 buf;
unsigned long timeout;
+ int ret;
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN completion");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_PWR_DN_PWRDN_N_);
}
/* set to BYTE program mode */
- lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
- lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
- lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
- lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
- lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"Timeout on OTP_STATUS completion");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_STATUS_BUSY_);
}
@@ -1161,7 +1365,7 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
if (unlikely(ret < 0))
- return -EIO;
+ return ret;
if (dp_sel & DP_SEL_DPRDY_)
return 0;
@@ -1171,44 +1375,51 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
netdev_warn(dev->net, "%s timed out", __func__);
- return -EIO;
+ return -ETIMEDOUT;
}
static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
u32 addr, u32 length, u32 *buf)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
- u32 dp_sel;
int i, ret;
- if (usb_autopm_get_interface(dev->intf) < 0)
- return 0;
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
mutex_lock(&pdata->dataport_mutex);
ret = lan78xx_dataport_wait_not_busy(dev);
if (ret < 0)
- goto done;
-
- ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+ goto dataport_write;
- dp_sel &= ~DP_SEL_RSEL_MASK_;
- dp_sel |= ram_select;
- ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+ ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
+ if (ret < 0)
+ goto dataport_write;
for (i = 0; i < length; i++) {
ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_dataport_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto dataport_write;
}
-done:
+dataport_write:
+ if (ret < 0)
+ netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
+
mutex_unlock(&pdata->dataport_mutex);
usb_autopm_put_interface(dev->intf);
@@ -1244,23 +1455,39 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
struct lan78xx_priv *pdata =
container_of(param, struct lan78xx_priv, set_multicast);
struct lan78xx_net *dev = pdata->dev;
- int i;
+ int i, ret;
netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
pdata->rfe_ctl);
- lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
- DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+ ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
+ DP_SEL_VHF_VLAN_LEN,
+ DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+ if (ret < 0)
+ goto multicast_write_done;
for (i = 1; i < NUM_OF_MAF; i++) {
- lan78xx_write_reg(dev, MAF_HI(i), 0);
- lan78xx_write_reg(dev, MAF_LO(i),
- pdata->pfilter_table[i][1]);
- lan78xx_write_reg(dev, MAF_HI(i),
- pdata->pfilter_table[i][0]);
+ ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+ if (ret < 0)
+ goto multicast_write_done;
+
+ ret = lan78xx_write_reg(dev, MAF_LO(i),
+ pdata->pfilter_table[i][1]);
+ if (ret < 0)
+ goto multicast_write_done;
+
+ ret = lan78xx_write_reg(dev, MAF_HI(i),
+ pdata->pfilter_table[i][0]);
+ if (ret < 0)
+ goto multicast_write_done;
}
- lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+multicast_write_done:
+ if (ret < 0)
+ netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
+ return;
}
static void lan78xx_set_multicast(struct net_device *netdev)
@@ -1369,24 +1596,24 @@ static int lan78xx_mac_reset(struct lan78xx_net *dev)
u32 val;
int ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* Resetting the device while there is activity on the MDIO
* bus can result in the MAC interface locking up and not
* completing register access transactions.
*/
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto exit_unlock;
ret = lan78xx_read_reg(dev, MAC_CR, &val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
val |= MAC_CR_RST_;
ret = lan78xx_write_reg(dev, MAC_CR, val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
/* Wait for the reset to complete before allowing any further
* MAC register accesses otherwise the MAC may lock up.
@@ -1394,17 +1621,17 @@ static int lan78xx_mac_reset(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, MAC_CR, &val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
if (!(val & MAC_CR_RST_)) {
ret = 0;
- goto done;
+ goto exit_unlock;
}
} while (!time_after(jiffies, start_time + HZ));
ret = -ETIMEDOUT;
-done:
- mutex_unlock(&dev->phy_mutex);
+exit_unlock:
+ mutex_unlock(&dev->mdiobus_mutex);
return ret;
}
@@ -1630,6 +1857,7 @@ static void lan78xx_get_wol(struct net_device *netdev,
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
if (unlikely(ret < 0)) {
+ netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
wol->supported = 0;
wol->wolopts = 0;
} else {
@@ -1661,10 +1889,13 @@ static int lan78xx_set_wol(struct net_device *netdev,
pdata->wol = wol->wolopts;
- device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+ ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+ if (ret < 0)
+ goto exit_pm_put;
- phy_ethtool_set_wol(netdev->phydev, wol);
+ ret = phy_ethtool_set_wol(netdev->phydev, wol);
+exit_pm_put:
usb_autopm_put_interface(dev->intf);
return ret;
@@ -1869,30 +2100,35 @@ exit:
static int lan78xx_get_regs_len(struct net_device *netdev)
{
- if (!netdev->phydev)
- return (sizeof(lan78xx_regs));
- else
- return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
+ return sizeof(lan78xx_regs);
}
static void
lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *buf)
{
- u32 *data = buf;
- int i, j;
struct lan78xx_net *dev = netdev_priv(netdev);
+ unsigned int data_count = 0;
+ u32 *data = buf;
+ int i, ret;
/* Read Device/MAC registers */
- for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
- lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
+ for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
+ ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
+ if (ret < 0) {
+ netdev_warn(dev->net,
+ "failed to read register 0x%08x\n",
+ lan78xx_regs[i]);
+ goto clean_data;
+ }
- if (!netdev->phydev)
- return;
+ data_count++;
+ }
- /* Read PHY registers */
- for (j = 0; j < 32; i++, j++)
- data[i] = phy_read(netdev->phydev, j);
+ return;
+
+clean_data:
+ memset(data, 0, data_count * sizeof(u32));
}
static const struct ethtool_ops lan78xx_ethtool_ops = {
@@ -1920,13 +2156,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_regs = lan78xx_get_regs,
};
-static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+static int lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
u8 addr[6];
+ int ret;
- lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
- lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ if (ret < 0)
+ return ret;
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1959,14 +2201,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
- lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (ret < 0)
+ return ret;
}
- lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ if (ret < 0)
+ return ret;
eth_hw_addr_set(dev->net, addr);
+
+ return 0;
}
/* MDIO read and write wrappers for phylib */
@@ -1980,27 +2234,31 @@ static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
if (ret < 0)
return ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* confirm MII not busy */
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
/* set the address, index & direction (read from PHY) */
addr = mii_access(phy_id, idx, MII_READ);
ret = lan78xx_write_reg(dev, MII_ACC, addr);
+ if (ret < 0)
+ goto done;
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
ret = lan78xx_read_reg(dev, MII_DATA, &val);
+ if (ret < 0)
+ goto done;
ret = (int)(val & 0xFFFF);
done:
- mutex_unlock(&dev->phy_mutex);
+ mutex_unlock(&dev->mdiobus_mutex);
usb_autopm_put_interface(dev->intf);
return ret;
@@ -2017,28 +2275,32 @@ static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
if (ret < 0)
return ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* confirm MII not busy */
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
val = (u32)regval;
ret = lan78xx_write_reg(dev, MII_DATA, val);
+ if (ret < 0)
+ goto done;
/* set the address, index & direction (write to PHY) */
addr = mii_access(phy_id, idx, MII_WRITE);
ret = lan78xx_write_reg(dev, MII_ACC, addr);
+ if (ret < 0)
+ goto done;
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
done:
- mutex_unlock(&dev->phy_mutex);
+ mutex_unlock(&dev->mdiobus_mutex);
usb_autopm_put_interface(dev->intf);
- return 0;
+ return ret;
}
static int lan78xx_mdio_init(struct lan78xx_net *dev)
@@ -2164,13 +2426,22 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
struct lan78xx_net *dev =
container_of(data, struct lan78xx_net, domain_data);
u32 buf;
+ int ret;
/* call register access here because irq_bus_lock & irq_bus_sync_unlock
* are only two callbacks executed in non-atomic contex.
*/
- lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ if (ret < 0)
+ goto irq_bus_sync_unlock;
+
if (buf != data->irqenable)
- lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+ ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+
+irq_bus_sync_unlock:
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
+ ERR_PTR(ret));
mutex_unlock(&data->irq_lock);
}
@@ -2195,7 +2466,10 @@ static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
mutex_init(&dev->domain_data.irq_lock);
- lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ if (ret < 0)
+ return ret;
+
dev->domain_data.irqenable = buf;
dev->domain_data.irqchip = &lan78xx_irqchip;
@@ -2234,46 +2508,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqdomain = NULL;
}
-static int lan8835_fixup(struct phy_device *phydev)
-{
- int buf;
- struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
-
- /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
- buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
- buf &= ~0x1800;
- buf |= 0x0800;
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
-
- /* RGMII MAC TXC Delay Enable */
- lan78xx_write_reg(dev, MAC_RGMII_ID,
- MAC_RGMII_ID_TXC_DELAY_EN_);
-
- /* RGMII TX DLL Tune Adjust */
- lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
-
- dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
-
- return 1;
-}
-
-static int ksz9031rnx_fixup(struct phy_device *phydev)
-{
- struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
-
- /* Micrel9301RNX PHY configuration */
- /* RGMII Control Signal Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
- /* RGMII RX Data Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
- /* RGMII RX Clock Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
-
- dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
-
- return 1;
-}
-
static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
{
u32 buf;
@@ -2307,22 +2541,11 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
netdev_err(dev->net, "no PHY driver found\n");
return NULL;
}
- dev->interface = PHY_INTERFACE_MODE_RGMII;
- /* external PHY fixup for KSZ9031RNX */
- ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
- ksz9031rnx_fixup);
- if (ret < 0) {
- netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
- return NULL;
- }
- /* external PHY fixup for LAN8835 */
- ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
- lan8835_fixup);
- if (ret < 0) {
- netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
- return NULL;
- }
- /* add more external PHY fixup here if needed */
+ dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ /* The PHY driver is responsible to configure proper RGMII
+ * interface delays. Disable RGMII delays on MAC side.
+ */
+ lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
phydev->is_internal = false;
}
@@ -2381,11 +2604,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
if (phy_is_pseudo_fixed_link(phydev)) {
fixed_phy_unregister(phydev);
phy_device_free(phydev);
- } else {
- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
- 0xfffffff0);
- phy_unregister_fixup_for_uid(PHY_LAN8835,
- 0xfffffff0);
}
}
return -EIO;
@@ -2437,27 +2655,36 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
{
- u32 buf;
bool rxenabled;
+ u32 buf;
+ int ret;
- lan78xx_read_reg(dev, MAC_RX, &buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ if (ret < 0)
+ return ret;
rxenabled = ((buf & MAC_RX_RXEN_) != 0);
if (rxenabled) {
buf &= ~MAC_RX_RXEN_;
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
}
/* add 4 to size for FCS */
buf &= ~MAC_RX_MAX_SIZE_MASK_;
buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
if (rxenabled) {
buf |= MAC_RX_RXEN_;
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -2523,7 +2750,10 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
- if (!ret)
+ if (ret < 0)
+ netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
+ new_mtu, netdev->mtu, ERR_PTR(ret));
+ else
WRITE_ONCE(netdev->mtu, new_mtu);
usb_autopm_put_interface(dev->intf);
@@ -2536,6 +2766,7 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
struct lan78xx_net *dev = netdev_priv(netdev);
struct sockaddr *addr = p;
u32 addr_lo, addr_hi;
+ int ret;
if (netif_running(netdev))
return -EBUSY;
@@ -2552,14 +2783,20 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
addr_hi = netdev->dev_addr[4] |
netdev->dev_addr[5] << 8;
- lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (ret < 0)
+ return ret;
/* Added to support MAC address changes */
- lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ if (ret < 0)
+ return ret;
- return 0;
+ return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
}
/* Enable or disable Rx checksum offload engine */
@@ -2592,9 +2829,7 @@ static int lan78xx_set_features(struct net_device *netdev,
spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
- lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-
- return 0;
+ return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
}
static void lan78xx_deferred_vlan_write(struct work_struct *param)
@@ -2645,13 +2880,16 @@ static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
}
-static void lan78xx_init_ltm(struct lan78xx_net *dev)
+static int lan78xx_init_ltm(struct lan78xx_net *dev)
{
+ u32 regs[6] = { 0 };
int ret;
u32 buf;
- u32 regs[6] = { 0 };
ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (ret < 0)
+ goto init_ltm_failed;
+
if (buf & USB_CFG1_LTM_ENABLE_) {
u8 temp[2];
/* Get values from EEPROM first */
@@ -2662,7 +2900,7 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
24,
(u8 *)regs);
if (ret < 0)
- return;
+ return ret;
}
} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
if (temp[0] == 24) {
@@ -2671,17 +2909,40 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
24,
(u8 *)regs);
if (ret < 0)
- return;
+ return ret;
}
}
}
- lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
- lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
- lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
- lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
- lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
- lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+ ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ return 0;
+
+init_ltm_failed:
+ netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
+ return ret;
}
static int lan78xx_urb_config_init(struct lan78xx_net *dev)
@@ -2722,156 +2983,6 @@ static int lan78xx_urb_config_init(struct lan78xx_net *dev)
return result;
}
-static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
-{
- return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
-}
-
-static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
- u32 hw_disabled)
-{
- unsigned long timeout;
- bool stopped = true;
- int ret;
- u32 buf;
-
- /* Stop the h/w block (if not already stopped) */
-
- ret = lan78xx_read_reg(dev, reg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf & hw_enabled) {
- buf &= ~hw_enabled;
-
- ret = lan78xx_write_reg(dev, reg, buf);
- if (ret < 0)
- return ret;
-
- stopped = false;
- timeout = jiffies + HW_DISABLE_TIMEOUT;
- do {
- ret = lan78xx_read_reg(dev, reg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf & hw_disabled)
- stopped = true;
- else
- msleep(HW_DISABLE_DELAY_MS);
- } while (!stopped && !time_after(jiffies, timeout));
- }
-
- ret = stopped ? 0 : -ETIME;
-
- return ret;
-}
-
-static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
-{
- return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
-}
-
-static int lan78xx_start_tx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "start tx path");
-
- /* Start the MAC transmitter */
-
- ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
- if (ret < 0)
- return ret;
-
- /* Start the Tx FIFO */
-
- ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "stop tx path");
-
- /* Stop the Tx FIFO */
-
- ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
- if (ret < 0)
- return ret;
-
- /* Stop the MAC transmitter */
-
- ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* The caller must ensure the Tx path is stopped before calling
- * lan78xx_flush_tx_fifo().
- */
-static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
-{
- return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
-}
-
-static int lan78xx_start_rx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "start rx path");
-
- /* Start the Rx FIFO */
-
- ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
- if (ret < 0)
- return ret;
-
- /* Start the MAC receiver*/
-
- ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "stop rx path");
-
- /* Stop the MAC receiver */
-
- ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
- if (ret < 0)
- return ret;
-
- /* Stop the Rx FIFO */
-
- ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* The caller must ensure the Rx path is stopped before calling
- * lan78xx_flush_rx_fifo().
- */
-static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
-{
- return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
-}
-
static int lan78xx_reset(struct lan78xx_net *dev)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
@@ -2905,7 +3016,9 @@ static int lan78xx_reset(struct lan78xx_net *dev)
}
} while (buf & HW_CFG_LRST_);
- lan78xx_init_mac_address(dev);
+ ret = lan78xx_init_mac_address(dev);
+ if (ret < 0)
+ return ret;
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
@@ -2927,7 +3040,9 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
/* Init LTM */
- lan78xx_init_ltm(dev);
+ ret = lan78xx_init_ltm(dev);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
if (ret < 0)
@@ -4242,9 +4357,6 @@ static void lan78xx_disconnect(struct usb_interface *intf)
phydev = net->phydev;
- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
- phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
-
phy_disconnect(net->phydev);
if (phy_is_pseudo_fixed_link(phydev)) {
@@ -4349,7 +4461,7 @@ static int lan78xx_probe(struct usb_interface *intf,
skb_queue_head_init(&dev->rxq_done);
skb_queue_head_init(&dev->txq_pend);
skb_queue_head_init(&dev->rxq_overflow);
- mutex_init(&dev->phy_mutex);
+ mutex_init(&dev->mdiobus_mutex);
mutex_init(&dev->dev_mutex);
ret = lan78xx_urb_config_init(dev);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 01a3b2417a54..ddff6f19ff98 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -71,6 +71,14 @@
#define MSR_SPEED (1<<3)
#define MSR_LINK (1<<2)
+/* USB endpoints */
+enum rtl8150_usb_ep {
+ RTL8150_USB_EP_CONTROL = 0,
+ RTL8150_USB_EP_BULK_IN = 1,
+ RTL8150_USB_EP_BULK_OUT = 2,
+ RTL8150_USB_EP_INT_IN = 3,
+};
+
/* Interrupt pipe data */
#define INT_TSR 0x00
#define INT_RSR 0x01
@@ -867,6 +875,13 @@ static int rtl8150_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
rtl8150_t *dev;
struct net_device *netdev;
+ static const u8 bulk_ep_addr[] = {
+ RTL8150_USB_EP_BULK_IN | USB_DIR_IN,
+ RTL8150_USB_EP_BULK_OUT | USB_DIR_OUT,
+ 0};
+ static const u8 int_ep_addr[] = {
+ RTL8150_USB_EP_INT_IN | USB_DIR_IN,
+ 0};
netdev = alloc_etherdev(sizeof(rtl8150_t));
if (!netdev)
@@ -880,6 +895,13 @@ static int rtl8150_probe(struct usb_interface *intf,
return -ENOMEM;
}
+ /* Verify that all required endpoints are present */
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
+ !usb_check_int_endpoints(intf, int_ep_addr)) {
+ dev_err(&intf->dev, "couldn't find required endpoints\n");
+ goto out;
+ }
+
tasklet_setup(&dev->tl, rx_fixup);
spin_lock_init(&dev->rx_pool_lock);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 07ebb800edf1..01251868a9c2 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -634,7 +634,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
break;
case XDP_TX:
orig_frame = *frame;
- xdp->rxq->mem = frame->mem;
+ xdp->rxq->mem.type = frame->mem_type;
if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
@@ -646,7 +646,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
case XDP_REDIRECT:
orig_frame = *frame;
- xdp->rxq->mem = frame->mem;
+ xdp->rxq->mem.type = frame->mem_type;
if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
frame = &orig_frame;
stats->rx_drops++;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7646ddd9bef7..9d7c37e968b5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3826,7 +3826,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
cpumask_var_t mask;
int stragglers;
int group_size;
- int i, j, cpu;
+ int i, start = 0, cpu;
int num_cpu;
int stride;
@@ -3840,16 +3840,18 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
- for (j = 0; j < group_size; j++) {
+ for_each_online_cpu_wrap(cpu, start) {
+ if (!group_size--) {
+ start = cpu;
+ break;
+ }
cpumask_set_cpu(cpu, mask);
- cpu = cpumask_next_wrap(cpu, cpu_online_mask,
- nr_cpu_ids, false);
}
+
virtqueue_set_affinity(vi->rq[i].vq, mask);
virtqueue_set_affinity(vi->sq[i].vq, mask);
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 1341374a4588..616ecc38d172 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -28,7 +28,7 @@ vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
if (likely(cpu < tq_number))
tq = &adapter->tx_queue[cpu];
else
- tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
+ tq = &adapter->tx_queue[cpu % tq_number];
return tq;
}
@@ -124,6 +124,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
u32 buf_size;
u32 dw2;
+ spin_lock_irq(&tq->tx_lock);
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
dw2 |= xdpf->len;
ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
@@ -134,6 +135,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
tq->stats.tx_ring_full++;
+ spin_unlock_irq(&tq->tx_lock);
return -ENOSPC;
}
@@ -142,8 +144,10 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
xdpf->data, buf_size,
DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) {
+ spin_unlock_irq(&tq->tx_lock);
return -EFAULT;
+ }
tbi->map_type |= VMXNET3_MAP_SINGLE;
} else { /* XDP buffer from page pool */
page = virt_to_page(xdpf->data);
@@ -182,6 +186,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
dma_wmb();
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
+ spin_unlock_irq(&tq->tx_lock);
/* No need to handle the case when tx_num_deferred doesn't reach
* threshold. Backend driver at hypervisor side will poll and reset
@@ -225,6 +230,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
{
struct vmxnet3_adapter *adapter = netdev_priv(dev);
struct vmxnet3_tx_queue *tq;
+ struct netdev_queue *nq;
int i;
if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
@@ -236,6 +242,9 @@ vmxnet3_xdp_xmit(struct net_device *dev,
if (tq->stopped)
return -ENETDOWN;
+ nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
+
+ __netif_tx_lock(nq, smp_processor_id());
for (i = 0; i < n; i++) {
if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
tq->stats.xdp_xmit_err++;
@@ -243,6 +252,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
}
}
tq->stats.xdp_xmit += i;
+ __netif_tx_unlock(nq);
return i;
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 67d25f4f94ef..ca81b212a246 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -122,16 +122,6 @@ struct net_vrf {
int ifindex;
};
-static void vrf_rx_stats(struct net_device *dev, int len)
-{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- u64_stats_inc(&dstats->rx_packets);
- u64_stats_add(&dstats->rx_bytes, len);
- u64_stats_update_end(&dstats->syncp);
-}
-
static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
{
vrf_dev->stats.tx_errors++;
@@ -369,7 +359,7 @@ static bool qdisc_tx_is_default(const struct net_device *dev)
static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
struct dst_entry *dst)
{
- int len = skb->len;
+ unsigned int len = skb->len;
skb_orphan(skb);
@@ -382,15 +372,10 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
- vrf_rx_stats(dev, len);
- } else {
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- u64_stats_inc(&dstats->rx_drops);
- u64_stats_update_end(&dstats->syncp);
- }
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
+ dev_dstats_rx_add(dev, len);
+ else
+ dev_dstats_rx_dropped(dev);
return NETDEV_TX_OK;
}
@@ -578,20 +563,14 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- int len = skb->len;
- netdev_tx_t ret = is_ip_tx_frame(skb, dev);
-
- u64_stats_update_begin(&dstats->syncp);
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ unsigned int len = skb->len;
+ netdev_tx_t ret;
- u64_stats_inc(&dstats->tx_packets);
- u64_stats_add(&dstats->tx_bytes, len);
- } else {
- u64_stats_inc(&dstats->tx_drops);
- }
- u64_stats_update_end(&dstats->syncp);
+ ret = is_ip_tx_frame(skb, dev);
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
+ dev_dstats_tx_add(dev, len);
+ else
+ dev_dstats_tx_dropped(dev);
return ret;
}
@@ -1364,7 +1343,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (!is_ndisc) {
struct net_device *orig_dev = skb->dev;
- vrf_rx_stats(vrf_dev, skb->len);
+ dev_dstats_rx_add(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -1420,7 +1399,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
goto out;
}
- vrf_rx_stats(vrf_dev, skb->len);
+ dev_dstats_rx_add(vrf_dev, skb->len);
if (!list_empty(&vrf_dev->ptype_all)) {
int err;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 9ea63059d52d..92516189e792 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -622,9 +622,9 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
-static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
+static bool vxlan_parse_gpe_proto(const struct vxlanhdr *hdr, __be16 *protocol)
{
- struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
+ const struct vxlanhdr_gpe *gpe = (const struct vxlanhdr_gpe *)hdr;
/* Need to have Next Protocol set for interfaces in GPE mode. */
if (!gpe->np_applied)
@@ -1352,6 +1352,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev, int *idx)
{
+ struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
struct vxlan_dev *vxlan = netdev_priv(dev);
unsigned int h;
int err = 0;
@@ -1364,7 +1365,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct vxlan_rdst *rd;
if (rcu_access_pointer(f->nh)) {
- if (*idx < cb->args[2])
+ if (*idx < ctx->fdb_idx)
goto skip_nh;
err = vxlan_fdb_info(skb, vxlan, f,
NETLINK_CB(cb->skb).portid,
@@ -1381,7 +1382,7 @@ skip_nh:
}
list_for_each_entry_rcu(rd, &f->remotes, list) {
- if (*idx < cb->args[2])
+ if (*idx < ctx->fdb_idx)
goto skip;
err = vxlan_fdb_info(skb, vxlan, f,
@@ -1554,18 +1555,17 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
#endif
}
-static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed,
- struct sk_buff *skb,
- u32 vxflags)
+static enum skb_drop_reason vxlan_remcsum(struct sk_buff *skb, u32 vxflags)
{
+ const struct vxlanhdr *vh = vxlan_hdr(skb);
enum skb_drop_reason reason;
size_t start, offset;
- if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
- goto out;
+ if (!(vh->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
+ return SKB_NOT_DROPPED_YET;
- start = vxlan_rco_start(unparsed->vx_vni);
- offset = start + vxlan_rco_offset(unparsed->vx_vni);
+ start = vxlan_rco_start(vh->vx_vni);
+ offset = start + vxlan_rco_offset(vh->vx_vni);
reason = pskb_may_pull_reason(skb, offset + sizeof(u16));
if (reason)
@@ -1573,22 +1573,20 @@ static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed,
skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
!!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
-out:
- unparsed->vx_flags &= ~VXLAN_HF_RCO;
- unparsed->vx_vni &= VXLAN_VNI_MASK;
-
return SKB_NOT_DROPPED_YET;
}
-static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
- struct sk_buff *skb, u32 vxflags,
+static void vxlan_parse_gbp_hdr(struct sk_buff *skb, u32 vxflags,
struct vxlan_metadata *md)
{
- struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
+ const struct vxlanhdr *vh = vxlan_hdr(skb);
+ const struct vxlanhdr_gbp *gbp;
struct metadata_dst *tun_dst;
- if (!(unparsed->vx_flags & VXLAN_HF_GBP))
- goto out;
+ gbp = (const struct vxlanhdr_gbp *)vh;
+
+ if (!(vh->vx_flags & VXLAN_HF_GBP))
+ return;
md->gbp = ntohs(gbp->policy_id);
@@ -1607,8 +1605,6 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
/* In flow-based mode, GBP is carried in dst_metadata */
if (!(vxflags & VXLAN_F_COLLECT_METADATA))
skb->mark = md->gbp;
-out:
- unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
static enum skb_drop_reason vxlan_set_mac(struct vxlan_dev *vxlan,
@@ -1672,9 +1668,9 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_vni_node *vninode = NULL;
+ const struct vxlanhdr *vh;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
- struct vxlanhdr unparsed;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 protocol = htons(ETH_P_TEB);
@@ -1689,24 +1685,21 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (reason)
goto drop;
- unparsed = *vxlan_hdr(skb);
+ vh = vxlan_hdr(skb);
/* VNI flag always required to be set */
- if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
+ if (!(vh->vx_flags & VXLAN_HF_VNI)) {
netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
- ntohl(vxlan_hdr(skb)->vx_flags),
- ntohl(vxlan_hdr(skb)->vx_vni));
+ ntohl(vh->vx_flags), ntohl(vh->vx_vni));
reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
/* Return non vxlan pkt */
goto drop;
}
- unparsed.vx_flags &= ~VXLAN_HF_VNI;
- unparsed.vx_vni &= ~VXLAN_VNI_MASK;
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
- vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
+ vni = vxlan_vni(vh->vx_vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
if (!vxlan) {
@@ -1714,13 +1707,27 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- /* For backwards compatibility, only allow reserved fields to be
- * used by VXLAN extensions if explicitly requested.
- */
- if (vs->flags & VXLAN_F_GPE) {
- if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
+ if (vh->vx_flags & vxlan->cfg.reserved_bits.vx_flags ||
+ vh->vx_vni & vxlan->cfg.reserved_bits.vx_vni) {
+ /* If the header uses bits besides those enabled by the
+ * netdevice configuration, treat this as a malformed packet.
+ * This behavior diverges from VXLAN RFC (RFC7348) which
+ * stipulates that bits in reserved in reserved fields are to be
+ * ignored. The approach here maintains compatibility with
+ * previous stack code, and also is more robust and provides a
+ * little more security in adding extensions to VXLAN.
+ */
+ reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
+ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_GPE) {
+ if (!vxlan_parse_gpe_proto(vh, &protocol))
goto drop;
- unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
raw_proto = true;
}
@@ -1730,8 +1737,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (vs->flags & VXLAN_F_REMCSUM_RX) {
- reason = vxlan_remcsum(&unparsed, skb, vs->flags);
+ if (vxlan->cfg.flags & VXLAN_F_REMCSUM_RX) {
+ reason = vxlan_remcsum(skb, vxlan->cfg.flags);
if (unlikely(reason))
goto drop;
}
@@ -1756,25 +1763,12 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
memset(md, 0, sizeof(*md));
}
- if (vs->flags & VXLAN_F_GBP)
- vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
+ if (vxlan->cfg.flags & VXLAN_F_GBP)
+ vxlan_parse_gbp_hdr(skb, vxlan->cfg.flags, md);
/* Note that GBP and GPE can never be active together. This is
* ensured in vxlan_dev_configure.
*/
- if (unparsed.vx_flags || unparsed.vx_vni) {
- /* If there are any unprocessed flags remaining treat
- * this as a malformed packet. This behavior diverges from
- * VXLAN RFC (RFC7348) which stipulates that bits in reserved
- * in reserved fields are to be ignored. The approach here
- * maintains compatibility with previous stack code, and also
- * is more robust and provides a little more security in
- * adding extensions to VXLAN.
- */
- reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
- goto drop;
- }
-
if (!raw_proto) {
reason = vxlan_set_mac(vxlan, vs, skb, vni);
if (reason)
@@ -1818,14 +1812,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
- dev_core_stats_rx_dropped_inc(vxlan->dev);
+ dev_dstats_rx_dropped(vxlan->dev);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_DROPS, 0);
reason = SKB_DROP_REASON_DEV_READY;
goto drop;
}
- dev_sw_netstats_rx_add(vxlan->dev, skb->len);
+ dev_dstats_rx_add(vxlan->dev, skb->len);
vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
@@ -1880,7 +1874,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
goto out;
@@ -1938,7 +1932,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2097,7 +2091,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2271,8 +2265,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
{
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+ unsigned int len = skb->len;
struct net_device *dev;
- int len = skb->len;
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
@@ -2299,16 +2293,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop)
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
- dev_sw_netstats_tx_add(src_vxlan->dev, 1, len);
+ dev_dstats_tx_add(src_vxlan->dev, len);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
- dev_sw_netstats_rx_add(dst_vxlan->dev, len);
+ dev_dstats_rx_add(dst_vxlan->dev, len);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);
} else {
drop:
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2621,7 +2615,7 @@ out_unlock:
return;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb_reason(skb, reason);
return;
@@ -2666,7 +2660,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
return;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2704,7 +2698,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2801,10 +2795,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
return NETDEV_TX_OK;
}
}
@@ -2827,7 +2821,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (fdst)
vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
else
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
}
return NETDEV_TX_OK;
@@ -2904,8 +2898,11 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
- if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
- vxlan_vnigroup_init(vxlan);
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ err = vxlan_vnigroup_init(vxlan);
+ if (err)
+ return err;
+ }
err = gro_cells_init(&vxlan->gro_cells, dev);
if (err)
@@ -3371,7 +3368,7 @@ static void vxlan_setup(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
INIT_LIST_HEAD(&vxlan->next);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
@@ -3435,6 +3432,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 },
[IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1),
[IFLA_VXLAN_LABEL_POLICY] = NLA_POLICY_MAX(NLA_U32, VXLAN_LABEL_MAX),
+ [IFLA_VXLAN_RESERVED_BITS] = NLA_POLICY_EXACT_LEN(sizeof(struct vxlanhdr)),
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -4070,6 +4068,10 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
struct net_device *dev, struct vxlan_config *conf,
bool changelink, struct netlink_ext_ack *extack)
{
+ struct vxlanhdr used_bits = {
+ .vx_flags = VXLAN_HF_VNI,
+ .vx_vni = VXLAN_VNI_MASK,
+ };
struct vxlan_dev *vxlan = netdev_priv(dev);
int err = 0;
@@ -4296,6 +4298,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
extack);
if (err)
return err;
+ used_bits.vx_flags |= VXLAN_HF_RCO;
+ used_bits.vx_vni |= ~VXLAN_VNI_MASK;
}
if (data[IFLA_VXLAN_GBP]) {
@@ -4303,6 +4307,7 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
VXLAN_F_GBP, changelink, false, extack);
if (err)
return err;
+ used_bits.vx_flags |= VXLAN_GBP_USED_BITS;
}
if (data[IFLA_VXLAN_GPE]) {
@@ -4311,6 +4316,46 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
extack);
if (err)
return err;
+
+ used_bits.vx_flags |= VXLAN_GPE_USED_BITS;
+ }
+
+ if (data[IFLA_VXLAN_RESERVED_BITS]) {
+ struct vxlanhdr reserved_bits;
+
+ if (changelink) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ data[IFLA_VXLAN_RESERVED_BITS],
+ "Cannot change reserved_bits");
+ return -EOPNOTSUPP;
+ }
+
+ nla_memcpy(&reserved_bits, data[IFLA_VXLAN_RESERVED_BITS],
+ sizeof(reserved_bits));
+ if (used_bits.vx_flags & reserved_bits.vx_flags ||
+ used_bits.vx_vni & reserved_bits.vx_vni) {
+ __be64 ub_be64, rb_be64;
+
+ memcpy(&ub_be64, &used_bits, sizeof(ub_be64));
+ memcpy(&rb_be64, &reserved_bits, sizeof(rb_be64));
+
+ NL_SET_ERR_MSG_ATTR_FMT(extack,
+ data[IFLA_VXLAN_RESERVED_BITS],
+ "Used bits %#018llx cannot overlap reserved bits %#018llx",
+ be64_to_cpu(ub_be64),
+ be64_to_cpu(rb_be64));
+ return -EINVAL;
+ }
+
+ conf->reserved_bits = reserved_bits;
+ } else {
+ /* For backwards compatibility, only allow reserved fields to be
+ * used by VXLAN extensions if explicitly requested.
+ */
+ conf->reserved_bits = (struct vxlanhdr) {
+ .vx_flags = ~used_bits.vx_flags,
+ .vx_vni = ~used_bits.vx_vni,
+ };
}
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
@@ -4497,6 +4542,8 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(0) + /* IFLA_VXLAN_GPE */
nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */
+ /* IFLA_VXLAN_RESERVED_BITS */
+ nla_total_size(sizeof(struct vxlanhdr)) +
0;
}
@@ -4599,6 +4646,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)))
goto nla_put_failure;
+ if (nla_put(skb, IFLA_VXLAN_RESERVED_BITS,
+ sizeof(vxlan->cfg.reserved_bits),
+ &vxlan->cfg.reserved_bits))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c
index 8735891ee128..816ab1aa0526 100644
--- a/drivers/net/vxlan/vxlan_mdb.c
+++ b/drivers/net/vxlan/vxlan_mdb.c
@@ -1712,7 +1712,7 @@ netdev_tx_t vxlan_mdb_xmit(struct vxlan_dev *vxlan,
vxlan_xmit_one(skb, vxlan->dev, src_vni,
rcu_dereference(fremote->rd), false);
else
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index d2023e7131bd..6e6e9f05509a 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -411,6 +411,11 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb
struct tunnel_msg *tmsg;
struct net_device *dev;
+ if (cb->nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct tunnel_msg))) {
+ NL_SET_ERR_MSG(cb->extack, "Invalid msg length");
+ return -EINVAL;
+ }
+
tmsg = nlmsg_data(cb->nlh);
if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
index f547c22e26ac..58f5143359df 100644
--- a/drivers/net/wan/framer/framer-core.c
+++ b/drivers/net/wan/framer/framer-core.c
@@ -732,8 +732,8 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
/**
* framer_provider_simple_of_xlate() - returns the framer instance from framer provider
- * @dev: the framer provider device
- * @args: of_phandle_args (not used here)
+ * @dev: the framer provider device (not used here)
+ * @args: of_phandle_args
*
* Intended to be used by framer provider for the common case where #framer-cells is
* 0. For other cases where #framer-cells is greater than '0', the framer provider
@@ -743,21 +743,14 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
struct framer *framer_provider_simple_of_xlate(struct device *dev,
const struct of_phandle_args *args)
{
- struct class_dev_iter iter;
- struct framer *framer;
-
- class_dev_iter_init(&iter, &framer_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- framer = dev_to_framer(dev);
- if (args->np != framer->dev.of_node)
- continue;
+ struct device *target_dev;
- class_dev_iter_exit(&iter);
- return framer;
- }
+ target_dev = class_find_device_by_of_node(&framer_class, args->np);
+ if (!target_dev)
+ return ERR_PTR(-ENODEV);
- class_dev_iter_exit(&iter);
- return ERR_PTR(-ENODEV);
+ put_device(target_dev);
+ return dev_to_framer(target_dev);
}
EXPORT_SYMBOL_GPL(framer_provider_simple_of_xlate);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index be67382c00f6..c576bbba52bf 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -123,6 +123,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -207,6 +208,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "qca6390 hw2.0",
@@ -296,6 +298,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "qcn9074 hw1.0",
@@ -379,6 +382,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "wcn6855 hw2.0",
@@ -468,6 +472,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "wcn6855 hw2.1",
@@ -555,6 +560,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "wcn6750 hw1.0",
@@ -637,6 +643,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
.support_dual_stations = false,
+ .pdev_suspend = true,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -719,6 +726,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "qca2066 hw2.1",
@@ -809,6 +817,94 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = true,
},
+ {
+ .name = "qca6698aq hw2.1",
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ .fw = {
+ .dir = "QCA6698AQ/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
+ .pdev_suspend = false,
+ },
};
static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
@@ -1669,11 +1765,47 @@ err_pdev_debug:
return ret;
}
+static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ unsigned long time_left;
+ int ret;
+ int i;
+
+ if (!ab->hw_params.pdev_suspend)
+ return;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ab, "could not suspend target :%d\n", ret);
+ /* pointless to try other pdevs */
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+
+ if (!time_left) {
+ ath11k_warn(ab, "suspend timed out - target pause event never came\n");
+ /* pointless to try other pdevs */
+ return;
+ }
+ }
+}
+
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
ath11k_spectral_deinit(ab);
ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
+ ath11k_core_pdev_suspend_target(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_debugfs_pdev_destroy(ab);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 09c37e19a168..a9dc7fe7765a 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -148,6 +148,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
ATH11K_HW_QCA2066_HW21,
+ ATH11K_HW_QCA6698AQ_HW21,
};
enum ath11k_firmware_mode {
@@ -340,7 +341,6 @@ struct ath11k_chan_power_info {
* @ap_power_type: type of power (SP/LPI/VLP)
* @num_pwr_levels: number of power levels
* @reg_max: Array of maximum TX power (dBm) per PSD value
- * @ap_constraint_power: AP constraint power (dBm)
* @tpe: TPE values processed from TPE IE
* @chan_power_info: power info to send to firmware
*/
@@ -350,7 +350,6 @@ struct ath11k_reg_tpc_power_info {
enum wmi_reg_6ghz_ap_type ap_power_type;
u8 num_pwr_levels;
u8 reg_max[ATH11K_NUM_PWR_LEVELS];
- u8 ap_constraint_power;
s8 tpe[ATH11K_NUM_PWR_LEVELS];
struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
};
@@ -370,7 +369,6 @@ struct ath11k_vif {
struct ath11k *ar;
struct ieee80211_vif *vif;
- u16 tx_seq_no;
struct wmi_wmm_params_all_arg wmm_params;
struct list_head list;
union {
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 57281a135dd7..bf192529e3fe 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -178,7 +178,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
* received 'update stats' event, we keep a 3 seconds timeout in case,
* fw_stats_done is not marked yet
*/
- timeout = jiffies + msecs_to_jiffies(3 * 1000);
+ timeout = jiffies + secs_to_jiffies(3);
ath11k_debugfs_fw_stats_reset(ar);
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 65d2bc0687c8..f777314db8b3 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -165,7 +165,6 @@ struct ath11k_mon_data {
struct ath11k_pdev_mon_stats rx_mon_stats;
/* lock for monitor data */
spinlock_t mon_lock;
- struct sk_buff_head rx_status_q;
};
struct ath11k_pdev_dp {
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 40088e62572e..029ecf51c9ef 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -3872,6 +3872,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW1_BM &&
rbm != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
@@ -4690,11 +4691,12 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
}
}
-static u32
-ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
- void *ring_entry, struct sk_buff **head_msdu,
- struct sk_buff **tail_msdu, u32 *npackets,
- u32 *ppdu_id)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu, u32 *npackets,
+ u32 *ppdu_id)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
@@ -5705,8 +5707,6 @@ static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- skb_queue_head_init(&pmon->rx_status_q);
-
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
memset(&pmon->rx_mon_stats, 0,
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index dc8bbe073017..601542410c75 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -700,7 +700,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
#define HAL_REO_CMD_UPD0_VLD BIT(9)
#define HAL_REO_CMD_UPD0_ALDC BIT(10)
@@ -725,7 +725,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
#define HAL_REO_CMD_UPD0_PN BIT(30)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
#define HAL_REO_CMD_UPD1_VLD BIT(16)
#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
@@ -741,7 +741,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
#define HAL_REO_CMD_UPD2_SVLD BIT(10)
#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 8f7dd43dc1bd..753bd93f0212 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -372,7 +372,8 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1);
- if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
+ if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM &&
+ ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 300322535766..52d9f4c13b13 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -227,6 +227,7 @@ struct ath11k_hw_params {
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
bool support_dual_stations;
+ bool pdev_suspend;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index e6acbff06749..1556392f7ad4 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1697,8 +1697,6 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
return;
}
- arvif->tx_seq_no = 0x1000;
-
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
@@ -2230,7 +2228,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
/* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
- * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
+ * VHT mcs rate 10 and 11 is not supported in 11ac standard.
* so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
*/
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
@@ -6952,7 +6950,7 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
- /* TODO: recal traffic pause state based on the available vdevs */
+ /* TODO: recalc traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
}
@@ -9356,6 +9354,7 @@ static int ath11k_fw_stats_request(struct ath11k *ar,
static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ unsigned int link_id,
int *dbm)
{
struct ath11k *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6974a551883f..6e45f464a429 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -398,6 +398,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
case ATH11K_HW_QCA2066_HW21:
+ case ATH11K_HW_QCA6698AQ_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index be9d2c69cc41..b93f04973ad7 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -846,6 +846,9 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
case 0x1019D0E1:
ab->hw_rev = ATH11K_HW_QCA2066_HW21;
break;
+ case 0x001e60e1:
+ ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21;
+ break;
default:
ab->hw_rev = ATH11K_HW_WCN6855_HW21;
}
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index debe7c5919ef..3fe77310c71f 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -126,6 +126,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_QCA2066_HW21,
},
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ },
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 7a22483b35cd..5759fc521316 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1704,7 +1704,9 @@ static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
},
};
-static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
@@ -2570,7 +2572,9 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
m3_mem->size = 0;
}
-static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
index 827085a926b2..b6f08755129f 100644
--- a/drivers/net/wireless/ath/ath11k/wow.c
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -148,8 +148,10 @@ static int ath11k_wow_cleanup(struct ath11k *ar)
* 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
* +--+------------+----+-----------+---------------+-----------+
*/
-static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
- const struct cfg80211_pkt_pattern *old)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
{
u8 hdr_8023_pattern[ETH_HLEN] = {};
u8 hdr_8023_bit_mask[ETH_HLEN] = {};
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index c57322221e1d..0606116d6b9c 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -9,6 +9,7 @@
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
@@ -22,6 +23,11 @@ unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
+/* protected with ath12k_hw_group_mutex */
+static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
+
+static DEFINE_MUTEX(ath12k_hw_group_mutex);
+
static int ath12k_core_rfkill_config(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -79,11 +85,17 @@ int ath12k_core_suspend(struct ath12k_base *ab)
ar = ab->pdevs[i].ar;
if (!ar)
continue;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
ret = ath12k_mac_wait_tx_complete(ar);
if (ret) {
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
+
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
/* PM framework skips suspend_late/resume_early callbacks
@@ -593,14 +605,17 @@ u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
static void ath12k_core_stop(struct ath12k_base *ab)
{
+ ath12k_core_stopped(ab);
+
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
ath12k_acpi_stop(ab);
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
- ath12k_dp_rx_pdev_reo_cleanup(ab);
+ ath12k_dp_free(ab);
/* De-Init of components as needed */
}
@@ -702,7 +717,7 @@ err_qmi_deinit:
static void ath12k_core_soc_destroy(struct ath12k_base *ab)
{
- ath12k_dp_free(ab);
+ ath12k_hif_power_down(ab, false);
ath12k_reg_free(ab);
ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
@@ -712,30 +727,17 @@ static int ath12k_core_pdev_create(struct ath12k_base *ab)
{
int ret;
- ret = ath12k_mac_register(ab);
- if (ret) {
- ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
- return ret;
- }
-
ret = ath12k_dp_pdev_alloc(ab);
if (ret) {
ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
- goto err_mac_unregister;
+ return ret;
}
return 0;
-
-err_mac_unregister:
- ath12k_mac_unregister(ab);
-
- return ret;
}
static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
{
- ath12k_mac_unregister(ab);
- ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
}
@@ -744,6 +746,8 @@ static int ath12k_core_start(struct ath12k_base *ab,
{
int ret;
+ lockdep_assert_held(&ab->core_lock);
+
ret = ath12k_wmi_attach(ab);
if (ret) {
ath12k_err(ab, "failed to attach wmi: %d\n", ret);
@@ -793,19 +797,12 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_hif_stop;
}
- ret = ath12k_mac_allocate(ab);
- if (ret) {
- ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
- ret);
- goto err_hif_stop;
- }
-
ath12k_dp_cc_config(ab);
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
- goto err_mac_destroy;
+ goto err_hif_stop;
}
ath12k_dp_hal_rx_desc_init(ab);
@@ -844,12 +841,14 @@ static int ath12k_core_start(struct ath12k_base *ab,
/* ACPI is optional so continue in case of an error */
ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+ if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
+ /* Indicate the core start in the appropriate group */
+ ath12k_core_started(ab);
+
return 0;
err_reo_cleanup:
ath12k_dp_rx_pdev_reo_cleanup(ab);
-err_mac_destroy:
- ath12k_mac_destroy(ab);
err_hif_stop:
ath12k_hif_stop(ab);
err_wmi_detach:
@@ -857,6 +856,169 @@ err_wmi_detach:
return ret;
}
+static void ath12k_core_device_cleanup(struct ath12k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath12k_hif_irq_disable(ab);
+ ath12k_core_pdev_destroy(ab);
+
+ mutex_unlock(&ab->core_lock);
+}
+
+static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+ ath12k_mac_unregister(ag);
+
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+ ath12k_core_device_cleanup(ab);
+ }
+
+ ath12k_mac_destroy(ag);
+}
+
+static int __ath12k_mac_mlo_ready(struct ath12k *ar)
+{
+ int ret;
+
+ ret = ath12k_wmi_mlo_ready(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
+ ar->pdev_idx);
+
+ return 0;
+}
+
+int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret;
+ int i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_ready(ar);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
+{
+ int ret, i;
+
+ if (!ag->mlo_capable || ag->num_devices == 1)
+ return 0;
+
+ ret = ath12k_mac_mlo_setup(ag);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ag->num_devices; i++)
+ ath12k_dp_partner_cc_init(ag->ab[i]);
+
+ ret = ath12k_mac_mlo_ready(ag);
+ if (ret)
+ goto err_mlo_teardown;
+
+ return 0;
+
+err_mlo_teardown:
+ ath12k_mac_mlo_teardown(ag);
+
+ return ret;
+}
+
+static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int ret, i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
+ goto core_pdev_create;
+
+ ret = ath12k_mac_allocate(ag);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = ath12k_core_mlo_setup(ag);
+ if (WARN_ON(ret))
+ goto err_mac_destroy;
+
+ ret = ath12k_mac_register(ag);
+ if (WARN_ON(ret))
+ goto err_mlo_teardown;
+
+ set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+core_pdev_create:
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ ret = ath12k_core_pdev_create(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create pdev core %d\n", ret);
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ ath12k_hif_irq_enable(ab);
+
+ ret = ath12k_core_rfkill_config(ab);
+ if (ret && ret != -EOPNOTSUPP) {
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+
+err:
+ ath12k_core_hw_group_stop(ag);
+ return ret;
+
+err_mlo_teardown:
+ ath12k_mac_mlo_teardown(ag);
+
+err_mac_destroy:
+ ath12k_mac_destroy(ag);
+
+ return ret;
+}
+
static int ath12k_core_start_firmware(struct ath12k_base *ab,
enum ath12k_firmware_mode mode)
{
@@ -874,9 +1036,37 @@ static int ath12k_core_start_firmware(struct ath12k_base *ab,
return ret;
}
+static inline
+bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
+{
+ lockdep_assert_held(&ag->mutex);
+
+ return (ag->num_started == ag->num_devices);
+}
+
+static void ath12k_core_trigger_partner(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_base *partner_ab;
+ bool found = false;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+ if (!partner_ab)
+ continue;
+
+ if (found)
+ ath12k_qmi_trigger_host_cap(partner_ab);
+
+ found = (partner_ab == ab);
+ }
+}
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
{
- int ret;
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ int ret, i;
ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
@@ -896,41 +1086,52 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
goto err_firmware_stop;
}
+ mutex_lock(&ag->mutex);
mutex_lock(&ab->core_lock);
+
ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
}
- ret = ath12k_core_pdev_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create pdev core: %d\n", ret);
- goto err_core_stop;
- }
- ath12k_hif_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
- ret = ath12k_core_rfkill_config(ab);
- if (ret && ret != -EOPNOTSUPP) {
- ath12k_err(ab, "failed to config rfkill: %d\n", ret);
- goto err_core_pdev_destroy;
+ if (ath12k_core_hw_group_start_ready(ag)) {
+ ret = ath12k_core_hw_group_start(ag);
+ if (ret) {
+ ath12k_warn(ab, "unable to start hw group\n");
+ goto err_core_stop;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
+ } else {
+ ath12k_core_trigger_partner(ab);
}
- mutex_unlock(&ab->core_lock);
+ mutex_unlock(&ag->mutex);
return 0;
-err_core_pdev_destroy:
- ath12k_core_pdev_destroy(ab);
err_core_stop:
- ath12k_core_stop(ab);
- ath12k_mac_destroy(ab);
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+ goto exit;
+
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
+exit:
+ mutex_unlock(&ag->mutex);
return ret;
}
@@ -972,6 +1173,7 @@ err_hal_srng_deinit:
static void ath12k_rfkill_work(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_hw *ah;
struct ieee80211_hw *hw;
@@ -982,8 +1184,8 @@ static void ath12k_rfkill_work(struct work_struct *work)
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -1023,6 +1225,7 @@ void ath12k_core_halt(struct ath12k *ar)
static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_hw *ah;
int i, j;
@@ -1034,8 +1237,8 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
@@ -1069,12 +1272,13 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k_hw *ah;
struct ath12k *ar;
int i, j;
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
@@ -1117,6 +1321,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
static void ath12k_core_restart(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k_hw *ah;
int ret, i;
@@ -1127,8 +1332,16 @@ static void ath12k_core_restart(struct work_struct *work)
}
if (ab->is_reset) {
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
+ }
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ab->ag, i);
ieee80211_restart_hw(ah->hw);
}
}
@@ -1142,7 +1355,7 @@ static void ath12k_core_reset(struct work_struct *work)
int reset_count, fail_cont_count;
long time_left;
- if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
+ if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
return;
}
@@ -1241,38 +1454,430 @@ static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
&ab->panic_nb);
}
-int ath12k_core_init(struct ath12k_base *ab)
+static inline
+bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
{
- int ret;
+ lockdep_assert_held(&ag->mutex);
- ret = ath12k_core_soc_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create soc core: %d\n", ret);
- return ret;
+ return (ag->num_probed == ag->num_devices);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int count = 0;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ list_for_each_entry(ag, &ath12k_hw_group_list, list)
+ count++;
+
+ ag = kzalloc(sizeof(*ag), GFP_KERNEL);
+ if (!ag)
+ return NULL;
+
+ ag->id = count;
+ list_add(&ag->list, &ath12k_hw_group_list);
+ mutex_init(&ag->mutex);
+ ag->mlo_capable = false;
+
+ return ag;
+}
+
+static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
+{
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ list_del(&ag->list);
+ kfree(ag);
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int i;
+
+ if (!ab->dev->of_node)
+ return NULL;
+
+ list_for_each_entry(ag, &ath12k_hw_group_list, list)
+ for (i = 0; i < ag->num_devices; i++)
+ if (ag->wsi_node[i] == ab->dev->of_node)
+ return ag;
+
+ return NULL;
+}
+
+static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
+ struct ath12k_base *ab)
+{
+ struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
+ struct device_node *tx_endpoint, *next_rx_endpoint;
+ int device_count = 0;
+
+ next_wsi_dev = wsi_dev;
+
+ if (!next_wsi_dev)
+ return -ENODEV;
+
+ do {
+ ag->wsi_node[device_count] = next_wsi_dev;
+
+ tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
+ if (!tx_endpoint) {
+ of_node_put(next_wsi_dev);
+ return -ENODEV;
+ }
+
+ next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
+ if (!next_rx_endpoint) {
+ of_node_put(next_wsi_dev);
+ of_node_put(tx_endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(tx_endpoint);
+ of_node_put(next_wsi_dev);
+
+ next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
+ if (!next_wsi_dev) {
+ of_node_put(next_rx_endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(next_rx_endpoint);
+
+ device_count++;
+ if (device_count > ATH12K_MAX_SOCS) {
+ ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
+ device_count, ATH12K_MAX_SOCS);
+ of_node_put(next_wsi_dev);
+ return -EINVAL;
+ }
+ } while (wsi_dev != next_wsi_dev);
+
+ of_node_put(next_wsi_dev);
+ ag->num_devices = device_count;
+
+ return 0;
+}
+
+static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
+ struct ath12k_base *ab)
+{
+ int i, wsi_controller_index = -1, node_index = -1;
+ bool control;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
+ if (control)
+ wsi_controller_index = i;
+
+ if (ag->wsi_node[i] == ab->dev->of_node)
+ node_index = i;
+ }
+
+ if (wsi_controller_index == -1) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
+ return -EINVAL;
+ }
+
+ if (node_index == -1) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
+ return -EINVAL;
+ }
+
+ ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
+ ag->num_devices;
+
+ return 0;
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
+{
+ struct ath12k_wsi_info *wsi = &ab->wsi_info;
+ struct ath12k_hw_group *ag;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ /* The grouping of multiple devices will be done based on device tree file.
+ * The platforms that do not have any valid group information would have
+ * each device to be part of its own invalid group.
+ *
+ * We use group id ATH12K_INVALID_GROUP_ID for single device group
+ * which didn't have dt entry or wrong dt entry, there could be many
+ * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
+ * default group id of ATH12K_INVALID_GROUP_ID combined with
+ * num devices in ath12k_hw_group determines if the group is
+ * multi device or single device group
+ */
+
+ ag = ath12k_core_hw_group_find_by_dt(ab);
+ if (!ag) {
+ ag = ath12k_core_hw_group_alloc(ab);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
+ }
+
+ if (ath12k_core_get_wsi_info(ag, ab) ||
+ ath12k_core_get_wsi_index(ag, ab)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "unable to get wsi info from dt, grouping single device");
+ ag->id = ATH12K_INVALID_GROUP_ID;
+ ag->num_devices = 1;
+ memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
+ wsi->index = 0;
+ }
+
+ goto exit;
+ } else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
+ ag->id);
+ goto invalid_group;
+ } else {
+ if (ath12k_core_get_wsi_index(ag, ab))
+ goto invalid_group;
+ goto exit;
+ }
+
+invalid_group:
+ ag = ath12k_core_hw_group_alloc(ab);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
+ }
+
+ ag->id = ATH12K_INVALID_GROUP_ID;
+ ag->num_devices = 1;
+ wsi->index = 0;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
+
+exit:
+ if (ag->num_probed >= ag->num_devices) {
+ ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
+ goto invalid_group;
+ }
+
+ ab->device_id = ag->num_probed++;
+ ag->ab[ab->device_id] = ab;
+ ab->ag = ag;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
+ ag->id, ag->num_devices, wsi->index);
+
+ return ag;
+}
+
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ u8 device_id = ab->device_id;
+ int num_probed;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ if (WARN_ON(device_id >= ag->num_devices)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ if (WARN_ON(ag->ab[device_id] != ab)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ ag->ab[device_id] = NULL;
+ ab->ag = NULL;
+ ab->device_id = ATH12K_INVALID_DEVICE_ID;
+
+ if (ag->num_probed)
+ ag->num_probed--;
+
+ num_probed = ag->num_probed;
+
+ mutex_unlock(&ag->mutex);
+
+ if (!num_probed)
+ ath12k_core_hw_group_free(ag);
+}
+
+static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (WARN_ON(!ag))
+ return;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_core_soc_destroy(ab);
+ }
+}
+
+static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
+
+ ath12k_core_hw_group_stop(ag);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+
+ mutex_unlock(&ag->mutex);
+}
+
+static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i, ret;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ ret = ath12k_core_soc_create(ab);
+ if (ret) {
+ mutex_unlock(&ab->core_lock);
+ ath12k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+}
+
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ /* If more than one devices are grouped, then inter MLO
+ * functionality can work still independent of whether internally
+ * each device supports single_chip_mlo or not.
+ * Only when there is one device, then it depends whether the
+ * device can support intra chip MLO or not
+ */
+ if (ag->num_devices > 1) {
+ ag->mlo_capable = true;
+ } else {
+ ab = ag->ab[0];
+ ag->mlo_capable = ab->single_chip_mlo_supp;
+
+ /* WCN chipsets does not advertise in firmware features
+ * hence skip checking
+ */
+ if (ab->hw_params->def_num_link)
+ return;
}
+ if (!ag->mlo_capable)
+ return;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ /* even if 1 device's firmware feature indicates MLO
+ * unsupported, make MLO unsupported for the whole group
+ */
+ if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) {
+ ag->mlo_capable = false;
+ return;
+ }
+ }
+}
+
+int ath12k_core_init(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int ret;
+
ret = ath12k_core_panic_notifier_register(ab);
if (ret)
ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ ag = ath12k_core_hw_group_assign(ab);
+ if (!ag) {
+ mutex_unlock(&ath12k_hw_group_mutex);
+ ath12k_warn(ab, "unable to get hw group\n");
+ return -ENODEV;
+ }
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+
+ mutex_lock(&ag->mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
+ ag->num_devices, ag->num_probed);
+
+ if (ath12k_core_hw_group_create_ready(ag)) {
+ ret = ath12k_core_hw_group_create(ag);
+ if (ret) {
+ mutex_unlock(&ag->mutex);
+ ath12k_warn(ab, "unable to create hw group\n");
+ goto err;
+ }
+ }
+
+ mutex_unlock(&ag->mutex);
+
return 0;
+
+err:
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_hw_group_unassign(ab);
+ return ret;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
ath12k_core_panic_notifier_unregister(ab);
-
- mutex_lock(&ab->core_lock);
-
- ath12k_core_pdev_destroy(ab);
- ath12k_core_stop(ab);
-
- mutex_unlock(&ab->core_lock);
-
- ath12k_hif_power_down(ab, false);
- ath12k_mac_destroy(ab);
- ath12k_core_soc_destroy(ab);
- ath12k_fw_unmap(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_hw_group_unassign(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1322,7 +1927,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
- ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ ab->single_chip_mlo_supp = false;
/* Device index used to identify the devices in a group.
*
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 3bf31ee5b9fa..ee595794a7ae 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CORE_H
@@ -63,6 +63,14 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_MAX_SOCS 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
+#define ATH12K_INVALID_GROUP_ID 0xFF
+#define ATH12K_INVALID_DEVICE_ID 0xFF
+
+#define ATH12K_MAX_MLO_PEERS 256
+#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF
+
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@@ -115,6 +123,7 @@ struct ath12k_skb_cb {
dma_addr_t paddr_ext_desc;
u32 cipher;
u8 flags;
+ u8 link_id;
};
struct ath12k_skb_rxcb {
@@ -127,7 +136,7 @@ struct ath12k_skb_rxcb {
struct hal_rx_desc *rx_desc;
u8 err_rel_src;
u8 err_code;
- u8 mac_id;
+ u8 hw_link_id;
u8 unmapped;
u8 is_frag;
u8 tid;
@@ -208,8 +217,13 @@ enum ath12k_scan_state {
ATH12K_SCAN_ABORTING,
};
+enum ath12k_hw_group_flags {
+ ATH12K_GROUP_FLAG_REGISTERED,
+ ATH12K_GROUP_FLAG_UNREGISTER,
+};
+
enum ath12k_dev_flags {
- ATH12K_CAC_RUNNING,
+ ATH12K_FLAG_CAC_RUNNING,
ATH12K_FLAG_CRASH_FLUSH,
ATH12K_FLAG_RAW_MODE,
ATH12K_FLAG_HW_CRYPTO_DISABLED,
@@ -220,6 +234,7 @@ enum ath12k_dev_flags {
ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
ATH12K_FLAG_CE_IRQ_ENABLED,
ATH12K_FLAG_EXT_IRQ_ENABLED,
+ ATH12K_FLAG_QMI_FW_READY_COMPLETE,
};
struct ath12k_tx_conf {
@@ -314,10 +329,11 @@ struct ath12k_vif {
bool ps;
struct ath12k_link_vif deflink;
- struct ath12k_link_vif __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link vif created in FW */
u16 links_map;
+ u8 last_scan_link;
/* Must be last - ends in a flexible-array member.
*
@@ -365,10 +381,6 @@ struct ath12k_rx_peer_stats {
u64 non_ampdu_msdu_count;
u64 stbc_count;
u64 beamformed_count;
- u64 mcs_count[HAL_RX_MAX_MCS + 1];
- u64 nss_count[HAL_RX_MAX_NSS];
- u64 bw_count[HAL_RX_BW_MAX];
- u64 gi_count[HAL_RX_GI_MAX];
u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
u64 tid_count[IEEE80211_NUM_TIDS + 1];
u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
@@ -469,6 +481,9 @@ struct ath12k_link_sta {
struct ath12k_link_vif *arvif;
struct ath12k_sta *ahsta;
+ /* link address similar to ieee80211_link_sta */
+ u8 addr[ETH_ALEN];
+
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
@@ -485,14 +500,26 @@ struct ath12k_link_sta {
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
+
+ /* For now the assoc link will be considered primary */
+ bool is_assoc_link;
+
+ /* for firmware use only */
+ u8 link_idx;
};
struct ath12k_sta {
+ struct ath12k_vif *ahvif;
enum hal_pn_type pn_type;
struct ath12k_link_sta deflink;
struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link sta created in FW */
u16 links_map;
+ u8 assoc_link_id;
+ u16 ml_peer_id;
+ u8 num_peer;
+
+ enum ieee80211_sta_state state;
};
#define ATH12K_MIN_5G_FREQ 4150
@@ -572,9 +599,10 @@ struct ath12k {
struct delayed_work timeout;
enum ath12k_scan_state state;
bool is_roc;
- int vdev_id;
int roc_freq;
bool roc_notify;
+ struct wiphy_work vdev_clean_wk;
+ struct ath12k_link_vif *arvif;
} scan;
struct {
@@ -657,7 +685,7 @@ struct ath12k {
struct work_struct regd_update_work;
- struct work_struct wmi_mgmt_tx_work;
+ struct wiphy_work wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
struct ath12k_wow wow;
@@ -680,14 +708,18 @@ struct ath12k {
bool monitor_started;
int monitor_vdev_id;
- u32 freq_low;
- u32 freq_high;
+ struct wiphy_radio_freq_range freq_range;
bool nlo_enabled;
+
+ struct completion mlo_setup_done;
+ u32 mlo_setup_status;
};
struct ath12k_hw {
struct ieee80211_hw *hw;
+ struct device *dev;
+
/* Protect the write operation of the hardware state ath12k_hw::state
* between hardware start<=>reconfigure<=>stop transitions.
*/
@@ -698,6 +730,11 @@ struct ath12k_hw {
u8 num_radio;
+ DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
+
+ /* protected by wiphy_lock() */
+ struct list_head ml_peers;
+
/* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
};
@@ -732,6 +769,8 @@ struct ath12k_pdev_cap {
u32 tx_chain_mask_shift;
u32 rx_chain_mask_shift;
struct ath12k_band_cap band[NUM_NL80211_BANDS];
+ u32 eml_cap;
+ u32 mld_cap;
};
struct mlo_timestamp {
@@ -784,19 +823,51 @@ struct ath12k_soc_dp_stats {
struct ath12k_soc_dp_tx_err_stats tx_err;
};
-/**
- * enum ath12k_link_capable_flags - link capable flags
- *
- * Single/Multi link capability information
- *
- * @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- * the links (radios) present within a device.
- * @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- * the links (radios) present across the devices.
+struct ath12k_mlo_memory {
+ struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ int mlo_mem_size;
+ bool init_done;
+};
+
+struct ath12k_hw_link {
+ u8 device_id;
+ u8 pdev_idx;
+};
+
+/* Holds info on the group of devices that are registered as a single
+ * wiphy, protected with struct ath12k_hw_group::mutex.
*/
-enum ath12k_link_capable_flags {
- ATH12K_INTRA_DEVICE_MLO_SUPPORT = BIT(0),
- ATH12K_INTER_DEVICE_MLO_SUPPORT = BIT(1),
+struct ath12k_hw_group {
+ struct list_head list;
+ u8 id;
+ u8 num_devices;
+ u8 num_probed;
+ u8 num_started;
+ unsigned long flags;
+ struct ath12k_base *ab[ATH12K_MAX_SOCS];
+
+ /* protects access to this struct */
+ struct mutex mutex;
+
+ /* Holds information of wiphy (hw) registration.
+ *
+ * In Multi/Single Link Operation case, all pdevs are registered as
+ * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+ * registered as separate wiphys.
+ */
+ struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
+ u8 num_hw;
+ bool mlo_capable;
+ struct device_node *wsi_node[ATH12K_MAX_SOCS];
+ struct ath12k_mlo_memory mlo_mem;
+ struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
+ bool hw_link_id_init_done;
+};
+
+/* Holds WSI info specific to each device, excluding WSI group info */
+struct ath12k_wsi_info {
+ u32 index;
+ u32 hw_link_id_base;
};
/* Master structure to hold the hw data which may be used in core module */
@@ -862,15 +933,6 @@ struct ath12k_base {
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
- /* Holds information of wiphy (hw) registration.
- *
- * In Multi/Single Link Operation case, all pdevs are registered as
- * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
- * registered as separate wiphys.
- */
- struct ath12k_hw *ah[MAX_RADIOS];
- u8 num_hw;
-
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@@ -964,12 +1026,8 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
- /* mlo_capable_flags denotes the single/multi link operation
- * capabilities of the Device.
- *
- * See enum ath12k_link_capable_flags
- */
- u8 mlo_capable_flags;
+ /* Denotes the whether MLO is possible within the chip */
+ bool single_chip_mlo_supp;
struct completion restart_completed;
@@ -992,6 +1050,9 @@ struct ath12k_base {
struct notifier_block panic_nb;
+ struct ath12k_hw_group *ag;
+ struct ath12k_wsi_info wsi_info;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1022,6 +1083,7 @@ int ath12k_core_resume_early(struct ath12k_base *ab);
int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
@@ -1029,6 +1091,8 @@ u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
+
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
switch (state) {
@@ -1129,4 +1193,41 @@ static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
#define for_each_ar(ah, ar, index) \
for ((index) = 0; ((index) < (ah)->num_radio && \
((ar) = &(ah)->radio[(index)])); (index)++)
+
+static inline struct ath12k_hw *ath12k_ag_to_ah(struct ath12k_hw_group *ag, int idx)
+{
+ return ag->ah[idx];
+}
+
+static inline void ath12k_ag_set_ah(struct ath12k_hw_group *ag, int idx,
+ struct ath12k_hw *ah)
+{
+ ag->ah[idx] = ah;
+}
+
+static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab)
+{
+ return ab->ag;
+}
+
+static inline void ath12k_core_started(struct ath12k_base *ab)
+{
+ lockdep_assert_held(&ab->ag->mutex);
+
+ ab->ag->num_started++;
+}
+
+static inline void ath12k_core_stopped(struct ath12k_base *ab)
+{
+ lockdep_assert_held(&ab->ag->mutex);
+
+ ab->ag->num_started--;
+}
+
+static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag,
+ u8 device_id)
+{
+ return ag->ab[device_id];
+}
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/coredump.c b/drivers/net/wireless/ath/ath12k/coredump.c
index 72d675d15e64..ce1beeb54836 100644
--- a/drivers/net/wireless/ath/ath12k/coredump.c
+++ b/drivers/net/wireless/ath/ath12k/coredump.c
@@ -27,6 +27,9 @@ ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type(enum ath12k_qmi_target_m
case CALDB_MEM_REGION_TYPE:
dump_type = FW_CRASH_DUMP_NONE;
break;
+ case MLO_GLOBAL_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_MLO_GLOBAL_DATA;
+ break;
default:
dump_type = FW_CRASH_DUMP_TYPE_MAX;
break;
diff --git a/drivers/net/wireless/ath/ath12k/coredump.h b/drivers/net/wireless/ath/ath12k/coredump.h
index 5d6003b1c12d..13f46a605113 100644
--- a/drivers/net/wireless/ath/ath12k/coredump.h
+++ b/drivers/net/wireless/ath/ath12k/coredump.h
@@ -15,6 +15,7 @@ enum ath12k_fw_crash_dump_type {
FW_CRASH_DUMP_PAGEABLE_DATA,
FW_CRASH_DUMP_M3_DUMP,
FW_CRASH_DUMP_NONE,
+ FW_CRASH_DUMP_MLO_GLOBAL_DATA,
/* keep last */
FW_CRASH_DUMP_TYPE_MAX,
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
index fe5a732ba9ec..ff6eaeafa092 100644
--- a/drivers/net/wireless/ath/ath12k/debug.c
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -36,7 +36,7 @@ void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
va_end(args);
}
-void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
+void __ath12k_warn(struct device *dev, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -45,7 +45,7 @@ void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
- dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ dev_warn_ratelimited(dev, "%pV", &vaf);
/* TODO: Trace the log */
va_end(args);
}
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index f7005917362c..90e801136bc6 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -31,7 +31,10 @@ enum ath12k_debug_mask {
__printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
-__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
+
+#define ath12k_warn(ab, fmt, ...) __ath12k_warn((ab)->dev, fmt, ##__VA_ARGS__)
+#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
extern unsigned int ath12k_debug_mask;
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index c9980c0193d1..41e4ef2ef3af 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -48,6 +48,28 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header,
footer);
}
+static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26:
+ return "26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52:
+ return "52";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106:
+ return "106";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242:
+ return "242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484:
+ return "484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996:
+ return "996";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2:
+ return "996x2";
+ default:
+ return "unknown";
+ }
+}
+
static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
{
switch (ru_size) {
@@ -88,6 +110,17 @@ static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
}
}
+static const char*
+ath12k_tx_ru_size_to_str(enum ath12k_htt_stats_ru_type ru_type, u8 ru_size)
+{
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ return ath12k_htt_ax_tx_rx_ru_size_to_str(ru_size);
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ return ath12k_htt_be_tx_rx_ru_size_to_str(ru_size);
+ else
+ return "unknown";
+}
+
static void
htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
@@ -1562,7 +1595,8 @@ ath12k_htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u16 tag_len,
le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp));
len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1,
htt_stats_buf->ac_mu_mimo_brpoll,
- ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS, "\n\n");
+ ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1,
+ "\n\n");
stats_req->buf_len = len;
}
@@ -1590,7 +1624,7 @@ ath12k_htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u16 tag_len,
le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp));
len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1,
htt_stats_buf->ax_mu_mimo_brpoll,
- ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
le32_to_cpu(htt_stats_buf->ax_basic_trigger));
len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n",
@@ -2276,9 +2310,9 @@ ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(const void *tag_buf, u16 tag_len,
len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size",
htt_stats_buf->ul_mumimo_grp_best_grp_size,
ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
- len += print_array_to_buf_index(buf, len, "ul_mumimo_grp_best_num_usrs = ", 1,
- htt_stats_buf->ul_mumimo_grp_best_usrs,
- ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_num_usrs = ",
+ htt_stats_buf->ul_mumimo_grp_best_usrs,
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
len += print_array_to_buf(buf, len,
"ul_mumimo_grp_tputs_observed (per bin = 300 mbps)",
htt_stats_buf->ul_mumimo_grp_tputs,
@@ -2543,6 +2577,1050 @@ ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
}
static void
+ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u8 i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "Legacy OFDM Rates: 6 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]));
+ len += scnprintf(buf + len, buf_len - len, "9 Mbps: %u, 12 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]));
+ len += scnprintf(buf + len, buf_len - len, "18 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]));
+ len += scnprintf(buf + len, buf_len - len, "24 Mbps: %u, 36 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]));
+ len += scnprintf(buf + len, buf_len - len, "48 Mbps: %u, 54 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7]));
+
+ len += print_array_to_buf(buf, len, "tx_ol_mcs", htt_stats_buf->tx_su_ol_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_ibf_mcs", htt_stats_buf->tx_su_ibf_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_txbf_mcs", htt_stats_buf->tx_su_txbf_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf_index(buf, len, "tx_ol_nss", 1,
+ htt_stats_buf->tx_su_ol_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "tx_ibf_nss", 1,
+ htt_stats_buf->tx_su_ibf_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "tx_txbf_nss", 1,
+ htt_stats_buf->tx_su_txbf_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf(buf, len, "tx_ol_bw", htt_stats_buf->tx_su_ol_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_ol_bw" :
+ "half_tx_ol_bw",
+ htt_stats_buf->ol[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "tx_ibf_bw", htt_stats_buf->tx_su_ibf_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_ibf_bw" :
+ "half_tx_ibf_bw",
+ htt_stats_buf->ibf[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "tx_txbf_bw", htt_stats_buf->tx_su_txbf_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_txbf_bw" :
+ "half_tx_txbf_bw",
+ htt_stats_buf->txbf[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TXBF_FLAG_RETURN_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "TXBF_reason_code_stats: 0:%u, 1:%u,",
+ le32_to_cpu(htt_stats_buf->txbf_flag_set_mu_mode),
+ le32_to_cpu(htt_stats_buf->txbf_flag_set_final_status));
+ len += scnprintf(buf + len, buf_len - len, " 2:%u, 3:%u, 4:%u, 5:%u, ",
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_verified_txbf_mode),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_p2p_access),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_max_nss_in_he160),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_uldlofdma));
+ len += scnprintf(buf + len, buf_len - len, "6:%u, 7:%u\n\n",
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_mcs_threshold_val),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_final_status));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndpa_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDPA_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_tried =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_flush));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndp_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDP_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_tried =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_flush));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_brp_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_BRP_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_tied =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_flushed));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err_num_cbf_rcvd =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_num_cbf_rcvd));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_steer_arr);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_AX_STEER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_steer =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_ppdu_steer));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_prefetch =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_prefetch));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_sound =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_sound));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_force_sound =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_force_sound));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_AX_STEER_MPDU_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_tried = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_failed = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_tried = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_failed = %u\n\n",
+ le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_failed));
+
+ stats_req->buf_len = len;
+}
+
+static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info,
+ int idx, char *str_buf)
+{
+ u64 page_timestamp;
+ u16 index = 0;
+
+ page_timestamp = ath12k_le32hilo_to_u64(pg_info->ts_msb, pg_info->ts_lsb);
+
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Index - %u ; Page Number - %u ; ",
+ idx, le32_to_cpu(pg_info->page_num));
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Num of pages - %u ; Timestamp - %lluus\n",
+ le32_to_cpu(pg_info->num_pgs), page_timestamp);
+}
+
+static void
+ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_dl_pager_stats_tlv *stat_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 dword_lock, dword_unlock;
+ int i;
+ u8 *buf = stats_req->buf;
+ u8 pg_locked;
+ u8 pg_unlock;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+
+ if (tag_len < sizeof(*stat_buf))
+ return;
+
+ dword_lock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2);
+ dword_unlock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2);
+
+ pg_locked = ATH12K_HTT_STATS_PAGE_LOCKED;
+ pg_unlock = ATH12K_HTT_STATS_PAGE_UNLOCKED;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1));
+ len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1));
+
+ len += scnprintf(buf + len, buf_len - len, "\nLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n",
+ dword_lock ? dword_lock - 1 : (ATH12K_PAGER_MAX - 1));
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_locked][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nUNLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n",
+ dword_unlock ? dword_unlock - 1 : ATH12K_PAGER_MAX - 1);
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_unlock][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf, i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->nf_chain[i]));
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->runtime_nf_chain[i]));
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n",
+ le32_to_cpu(htt_stats_buf->false_radar_cnt),
+ le32_to_cpu(htt_stats_buf->fw_run_time));
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->radar_cs_cnt));
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n\n",
+ a_sle32_to_cpu(htt_stats_buf->ani_level));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_ofdma_timing_err_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_cck_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mactx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->macrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phytx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_defer_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_lstf_event_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_non_legacy_cnt));
+ len += print_array_to_buf(buf, len, "rx_pkt_cnt", htt_stats_buf->rx_pkt_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_pkt_crc_pass_cnt",
+ htt_stats_buf->rx_pkt_crc_pass_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT, "\n");
+ len += print_array_to_buf(buf, len, "per_blk_err_cnt",
+ htt_stats_buf->per_blk_err_cnt,
+ ATH12K_HTT_MAX_PER_BLK_ERR_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_ota_err_cnt",
+ htt_stats_buf->rx_ota_err_cnt,
+ ATH12K_HTT_MAX_RX_OTA_ERR_CNT, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_mhz));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq1));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq2));
+ len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_phy_mode));
+ len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->chan_flags));
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_num));
+ len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->prev_reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_warm_reset_src));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_tbl_mode));
+ len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->xbar_val));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrf_mode));
+ len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_homechan));
+ len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_tx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_rx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phybb_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phyrf_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_dfs_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_sscan_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_synth_sel_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_adfs_freq));
+ len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cck_fir_settings));
+ len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_dyn_pri_chan));
+ len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cca_thresh));
+ len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
+ le32_to_cpu(htt_stats_buf->dyn_cca_status));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_hw));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_sw));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_pass_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_off_through_vreg_cnt));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rf_mode_switch_phy_off_cnt));
+ len += scnprintf(buf + len, buf_len - len, "temperature_recal_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->temperature_recal_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_tpc_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_tpc_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale_db = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale_db));
+ len += scnprintf(buf + len, buf_len - len, "min_negative_tx_power = %d\n",
+ le32_to_cpu(htt_stats_buf->min_negative_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "reg_ctl_domain = %u\n",
+ le32_to_cpu(htt_stats_buf->reg_ctl_domain));
+ len += scnprintf(buf + len, buf_len - len, "twice_max_rd_power = %u\n",
+ le32_to_cpu(htt_stats_buf->twice_max_rd_power));
+ len += scnprintf(buf + len, buf_len - len, "max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "home_max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->home_max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "psd_power = %d\n",
+ le32_to_cpu(htt_stats_buf->psd_power));
+ len += scnprintf(buf + len, buf_len - len, "eirp_power = %u\n",
+ le32_to_cpu(htt_stats_buf->eirp_power));
+ len += scnprintf(buf + len, buf_len - len, "power_type_6ghz = %u\n",
+ le32_to_cpu(htt_stats_buf->power_type_6ghz));
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power",
+ htt_stats_buf->max_reg_allowed_power,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power_6ghz",
+ htt_stats_buf->max_reg_allowed_power_6ghz,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_cfreq",
+ htt_stats_buf->sub_band_cfreq,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_txpower",
+ htt_stats_buf->sub_band_txpower,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_soc_txrx_stats_common_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf;
+ u64 drop_count;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ drop_count = ath12k_le32hilo_to_u64(htt_stats_buf->inv_peers_msdu_drop_count_hi,
+ htt_stats_buf->inv_peers_msdu_drop_count_lo);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "soc_drop_count = %llu\n\n",
+ drop_count);
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_per_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_per_rate_stats_tlv *stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 ru_size_cnt = 0;
+ u32 rc_mode, ru_type;
+ u8 *buf = stats_req->buf, i;
+ const char *mode_prefix;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ rc_mode = le32_to_cpu(stats_buf->rc_mode);
+ ru_type = le32_to_cpu(stats_buf->ru_type);
+
+ switch (rc_mode) {
+ case ATH12K_HTT_STATS_RC_MODE_DLSU:
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_SU:\n");
+ mode_prefix = "su";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n");
+ mode_prefix = "mu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_OFDMA:\n");
+ mode_prefix = "ofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_MUMIMO:\n");
+ mode_prefix = "ulmu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_OFDMA:\n");
+ mode_prefix = "ulofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ default:
+ return;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_tried));
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_ack_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_tried));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per MCS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if ((rc_mode == ATH12K_HTT_STATS_RC_MODE_DLOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) &&
+ ru_type != ATH12K_HTT_STATS_RU_TYPE_INVALID) {
+ len += scnprintf(buf + len, buf_len - len, "\nPER per RU:\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len,
+ "non_data_ppdus_%s = ", mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len,
+ "ppdus_ack_failed_%s = ", mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+ }
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_DLMUMIMO) {
+ len += scnprintf(buf + len, buf_len - len, "\nlast_probed_bw = %u\n",
+ le32_to_cpu(stats_buf->last_probed_bw));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n",
+ le32_to_cpu(stats_buf->last_probed_nss));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n",
+ le32_to_cpu(stats_buf->last_probed_mcs));
+ len += print_array_to_buf(buf, len, "MU Probe count per RC MODE",
+ stats_buf->probe_cnt,
+ ATH12K_HTT_RC_MODE_2D_COUNT, "\n\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ast_entry_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_ast_entry_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_addr_l32;
+ u32 mac_addr_h16;
+ u32 ast_info;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_addr_l32 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_l32);
+ mac_addr_h16 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_h16);
+ ast_info = le32_to_cpu(htt_stats_buf->info);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_AST_ENTRY_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ast_index = %u\n",
+ le32_to_cpu(htt_stats_buf->ast_index));
+ len += scnprintf(buf + len, buf_len - len,
+ "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_0),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_1),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_2),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_3),
+ u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_0),
+ u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_1));
+
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ le32_to_cpu(htt_stats_buf->sw_peer_id));
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_PDEV_ID_INFO));
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_VDEV_ID_INFO));
+ len += scnprintf(buf + len, buf_len - len, "next_hop = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_NEXT_HOP_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mcast = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MCAST_INFO));
+ len += scnprintf(buf + len, buf_len - len, "monitor_direct = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MONITOR_DIRECT_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mesh_sta = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MESH_STA_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mec = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MEC_INFO));
+ len += scnprintf(buf + len, buf_len - len, "intra_bss = %u\n\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_INTRA_BSS_INFO));
+
+ stats_req->buf_len = len;
+}
+
+static const char*
+ath12k_htt_get_punct_dir_type_str(enum ath12k_htt_stats_direction direction)
+{
+ switch (direction) {
+ case ATH12K_HTT_STATS_DIRECTION_TX:
+ return "tx";
+ case ATH12K_HTT_STATS_DIRECTION_RX:
+ return "rx";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_htt_get_punct_ppdu_type_str(enum ath12k_htt_stats_ppdu_type ppdu_type)
+{
+ switch (ppdu_type) {
+ case ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU:
+ return "su";
+ case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO:
+ return "dl_mu_mimo";
+ case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO:
+ return "ul_mu_mimo";
+ case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA:
+ return "dl_mu_ofdma";
+ case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA:
+ return "ul_mu_ofdma";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_htt_get_punct_pream_type_str(enum ath12k_htt_stats_param_type pream_type)
+{
+ switch (pream_type) {
+ case ATH12K_HTT_STATS_PREAM_OFDM:
+ return "ofdm";
+ case ATH12K_HTT_STATS_PREAM_CCK:
+ return "cck";
+ case ATH12K_HTT_STATS_PREAM_HT:
+ return "ht";
+ case ATH12K_HTT_STATS_PREAM_VHT:
+ return "ac";
+ case ATH12K_HTT_STATS_PREAM_HE:
+ return "ax";
+ case ATH12K_HTT_STATS_PREAM_EHT:
+ return "be";
+ default:
+ return "unknown";
+ }
+}
+
+static void
+ath12k_htt_print_puncture_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_puncture_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ const char *direction;
+ const char *ppdu_type;
+ const char *preamble;
+ u32 mac_id__word;
+ u32 subband_limit;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ mac_id__word = le32_to_cpu(stats_buf->mac_id__word);
+ subband_limit = min(le32_to_cpu(stats_buf->subband_cnt),
+ ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT);
+
+ direction = ath12k_htt_get_punct_dir_type_str(le32_to_cpu(stats_buf->direction));
+ ppdu_type = ath12k_htt_get_punct_ppdu_type_str(le32_to_cpu(stats_buf->ppdu_type));
+ preamble = ath12k_htt_get_punct_pream_type_str(le32_to_cpu(stats_buf->preamble));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_PUNCTURE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id__word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_%s_%s_last_used_pattern_mask = 0x%08x\n",
+ direction, preamble, ppdu_type,
+ le32_to_cpu(stats_buf->last_used_pattern_mask));
+
+ for (i = 0; i < subband_limit; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_%s_%s_num_subbands_used_cnt_%02d = %u\n",
+ direction, preamble, ppdu_type, i + 1,
+ le32_to_cpu(stats_buf->num_subbands_used_cnt[i]));
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
@@ -2561,7 +3639,6 @@ ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms,
htt_stats_buf->reset_time_lo_ms);
len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time);
-
time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms,
htt_stats_buf->disengage_time_lo_ms);
len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time);
@@ -2680,7 +3757,7 @@ ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_le
len += scnprintf(buf + len, buf_len - len, "\n");
len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1,
htt_stats_buf->be_ofdma_tx_nss,
- ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS,
"\n");
len += print_array_to_buf(buf, len, "be_ofdma_tx_bw",
htt_stats_buf->be_ofdma_tx_bw,
@@ -2696,6 +3773,45 @@ ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_le
stats_req->buf_len = len;
}
+static void
+ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_MBSSID_CTRL_FRAME_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_trigger_within_bss = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_within_bss));
+
+ stats_req->buf_len = len;
+}
+
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -2869,6 +3985,55 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_PDEV_OBSS_PD_TAG:
ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+ ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_DLPAGER_STATS_TAG:
+ ath12k_htt_print_dlpager_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_STATS_TAG:
+ ath12k_htt_print_phy_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_COUNTERS_TAG:
+ ath12k_htt_print_phy_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_STATS_TAG:
+ ath12k_htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_COUNTERS_TAG:
+ ath12k_htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_TPC_STATS_TAG:
+ ath12k_htt_print_phy_tpc_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
+ ath12k_htt_print_soc_txrx_stats_common_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PER_RATE_STATS_TAG:
+ ath12k_htt_print_tx_per_rate_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_AST_ENTRY_TAG:
+ ath12k_htt_print_ast_entry_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_PUNCTURE_STATS_TAG:
+ ath12k_htt_print_puncture_stats_tlv(tag_buf, len, stats_req);
+ break;
case HTT_STATS_DMAC_RESET_STATS_TAG:
ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req);
break;
@@ -2878,6 +4043,10 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG:
ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG:
+ ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
index ac86cab234ec..4b59976fbc35 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -135,9 +135,18 @@ enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31,
+ ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
+ ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
+ ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
+ ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
+ ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41,
ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
+ ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46,
ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
+ ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@@ -192,16 +201,33 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_HW_WAR_TAG = 89,
HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
+ HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111,
HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112,
+ HTT_STATS_DLPAGER_STATS_TAG = 120,
+ HTT_STATS_PHY_COUNTERS_TAG = 121,
+ HTT_STATS_PHY_STATS_TAG = 122,
+ HTT_STATS_PHY_RESET_COUNTERS_TAG = 123,
+ HTT_STATS_PHY_RESET_STATS_TAG = 124,
+ HTT_STATS_SOC_TXRX_STATS_COMMON_TAG = 125,
+ HTT_STATS_PER_RATE_STATS_TAG = 128,
HTT_STATS_MU_PPDU_DIST_TAG = 129,
HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130,
+ HTT_STATS_AST_ENTRY_TAG = 132,
HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135,
HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG = 137,
HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138,
HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139,
+ HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG = 147,
+ HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG = 148,
+ HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG = 149,
+ HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG = 150,
HTT_STATS_DMAC_RESET_STATS_TAG = 155,
+ HTT_STATS_PHY_TPC_STATS_TAG = 157,
+ HTT_STATS_PDEV_PUNCTURE_STATS_TAG = 158,
HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165,
+ HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG = 172,
+ HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG = 176,
HTT_STATS_MAX_TAG,
};
@@ -1054,6 +1080,275 @@ struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_sr_ppdu_abort_flush_cnt;
} __packed;
+#define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_TXBF_NUM_BW_CNTRS 5
+#define ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES 2
+
+struct ath12k_htt_pdev_txrate_txbf_stats_tlv {
+ __le32 tx_su_txbf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_ibf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_ol_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_txbf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_su_ibf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_su_ol_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_su_txbf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 tx_su_ibf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 tx_su_ol_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ __le32 txbf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 ibf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 ol[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 txbf_flag_set_mu_mode;
+ __le32 txbf_flag_set_final_status;
+ __le32 txbf_flag_not_set_verified_txbf_mode;
+ __le32 txbf_flag_not_set_disable_p2p_access;
+ __le32 txbf_flag_not_set_max_nss_in_he160;
+ __le32 txbf_flag_not_set_disable_uldlofdma;
+ __le32 txbf_flag_not_set_mcs_threshold_val;
+ __le32 txbf_flag_not_set_final_status;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t {
+ __le32 ax_ofdma_ndpa_queued;
+ __le32 ax_ofdma_ndpa_tried;
+ __le32 ax_ofdma_ndpa_flush;
+ __le32 ax_ofdma_ndpa_err;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv {
+ __le32 num_elems_ax_ndpa_arr;
+ __le32 arr_elem_size_ax_ndpa;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t, ax_ndpa);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t {
+ __le32 ax_ofdma_ndp_queued;
+ __le32 ax_ofdma_ndp_tried;
+ __le32 ax_ofdma_ndp_flush;
+ __le32 ax_ofdma_ndp_err;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv {
+ __le32 num_elems_ax_ndp_arr;
+ __le32 arr_elem_size_ax_ndp;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t, ax_ndp);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t {
+ __le32 ax_ofdma_brp_queued;
+ __le32 ax_ofdma_brp_tried;
+ __le32 ax_ofdma_brp_flushed;
+ __le32 ax_ofdma_brp_err;
+ __le32 ax_ofdma_num_cbf_rcvd;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv {
+ __le32 num_elems_ax_brp_arr;
+ __le32 arr_elem_size_ax_brp;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t, ax_brp);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t {
+ __le32 num_ppdu_steer;
+ __le32 num_ppdu_ol;
+ __le32 num_usr_prefetch;
+ __le32 num_usr_sound;
+ __le32 num_usr_force_sound;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv {
+ __le32 num_elems_ax_steer_arr;
+ __le32 arr_elem_size_ax_steer;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t, ax_steer);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv {
+ __le32 ax_ofdma_rbo_steer_mpdus_tried;
+ __le32 ax_ofdma_rbo_steer_mpdus_failed;
+ __le32 ax_ofdma_sifs_steer_mpdus_tried;
+ __le32 ax_ofdma_sifs_steer_mpdus_failed;
+} __packed;
+
+enum ath12k_htt_stats_page_lock_state {
+ ATH12K_HTT_STATS_PAGE_LOCKED = 0,
+ ATH12K_HTT_STATS_PAGE_UNLOCKED = 1,
+ ATH12K_NUM_PG_LOCK_STATE
+};
+
+#define ATH12K_PAGER_MAX 10
+
+#define ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0 GENMASK(7, 0)
+#define ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0 GENMASK(15, 8)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1 GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1 GENMASK(31, 16)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2 GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2 GENMASK(31, 16)
+
+struct ath12k_htt_pgs_info {
+ __le32 page_num;
+ __le32 num_pgs;
+ __le32 ts_lsb;
+ __le32 ts_msb;
+} __packed;
+
+struct ath12k_htt_dl_pager_stats_tlv {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ struct ath12k_htt_pgs_info pgs_info[ATH12K_NUM_PG_LOCK_STATE][ATH12K_PAGER_MAX];
+} __packed;
+
+#define ATH12K_HTT_STATS_MAX_CHAINS 8
+#define ATH12K_HTT_MAX_RX_PKT_CNT 8
+#define ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define ATH12K_HTT_MAX_PER_BLK_ERR_CNT 20
+#define ATH12K_HTT_MAX_RX_OTA_ERR_CNT 14
+#define ATH12K_HTT_MAX_CH_PWR_INFO_SIZE 16
+
+struct ath12k_htt_phy_stats_tlv {
+ a_sle32 nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 false_radar_cnt;
+ __le32 radar_cs_cnt;
+ a_sle32 ani_level;
+ __le32 fw_run_time;
+ a_sle32 runtime_nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+} __packed;
+
+struct ath12k_htt_phy_counters_tlv {
+ __le32 rx_ofdma_timing_err_cnt;
+ __le32 rx_cck_fail_cnt;
+ __le32 mactx_abort_cnt;
+ __le32 macrx_abort_cnt;
+ __le32 phytx_abort_cnt;
+ __le32 phyrx_abort_cnt;
+ __le32 phyrx_defer_abort_cnt;
+ __le32 rx_gain_adj_lstf_event_cnt;
+ __le32 rx_gain_adj_non_legacy_cnt;
+ __le32 rx_pkt_cnt[ATH12K_HTT_MAX_RX_PKT_CNT];
+ __le32 rx_pkt_crc_pass_cnt[ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT];
+ __le32 per_blk_err_cnt[ATH12K_HTT_MAX_PER_BLK_ERR_CNT];
+ __le32 rx_ota_err_cnt[ATH12K_HTT_MAX_RX_OTA_ERR_CNT];
+} __packed;
+
+struct ath12k_htt_phy_reset_stats_tlv {
+ __le32 pdev_id;
+ __le32 chan_mhz;
+ __le32 chan_band_center_freq1;
+ __le32 chan_band_center_freq2;
+ __le32 chan_phy_mode;
+ __le32 chan_flags;
+ __le32 chan_num;
+ __le32 reset_cause;
+ __le32 prev_reset_cause;
+ __le32 phy_warm_reset_src;
+ __le32 rx_gain_tbl_mode;
+ __le32 xbar_val;
+ __le32 force_calibration;
+ __le32 phyrf_mode;
+ __le32 phy_homechan;
+ __le32 phy_tx_ch_mask;
+ __le32 phy_rx_ch_mask;
+ __le32 phybb_ini_mask;
+ __le32 phyrf_ini_mask;
+ __le32 phy_dfs_en_mask;
+ __le32 phy_sscan_en_mask;
+ __le32 phy_synth_sel_mask;
+ __le32 phy_adfs_freq;
+ __le32 cck_fir_settings;
+ __le32 phy_dyn_pri_chan;
+ __le32 cca_thresh;
+ __le32 dyn_cca_status;
+ __le32 rxdesense_thresh_hw;
+ __le32 rxdesense_thresh_sw;
+} __packed;
+
+struct ath12k_htt_phy_reset_counters_tlv {
+ __le32 pdev_id;
+ __le32 cf_active_low_fail_cnt;
+ __le32 cf_active_low_pass_cnt;
+ __le32 phy_off_through_vreg_cnt;
+ __le32 force_calibration_cnt;
+ __le32 rf_mode_switch_phy_off_cnt;
+ __le32 temperature_recal_cnt;
+} __packed;
+
+struct ath12k_htt_phy_tpc_stats_tlv {
+ __le32 pdev_id;
+ __le32 tx_power_scale;
+ __le32 tx_power_scale_db;
+ __le32 min_negative_tx_power;
+ __le32 reg_ctl_domain;
+ __le32 max_reg_allowed_power[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 max_reg_allowed_power_6ghz[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 twice_max_rd_power;
+ __le32 max_tx_power;
+ __le32 home_max_tx_power;
+ __le32 psd_power;
+ __le32 eirp_power;
+ __le32 power_type_6ghz;
+ __le32 sub_band_cfreq[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+ __le32 sub_band_txpower[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+} __packed;
+
+struct ath12k_htt_t2h_soc_txrx_stats_common_tlv {
+ __le32 inv_peers_msdu_drop_count_hi;
+ __le32 inv_peers_msdu_drop_count_lo;
+} __packed;
+
+#define ATH12K_HTT_AST_PDEV_ID_INFO GENMASK(1, 0)
+#define ATH12K_HTT_AST_VDEV_ID_INFO GENMASK(9, 2)
+#define ATH12K_HTT_AST_NEXT_HOP_INFO BIT(10)
+#define ATH12K_HTT_AST_MCAST_INFO BIT(11)
+#define ATH12K_HTT_AST_MONITOR_DIRECT_INFO BIT(12)
+#define ATH12K_HTT_AST_MESH_STA_INFO BIT(13)
+#define ATH12K_HTT_AST_MEC_INFO BIT(14)
+#define ATH12K_HTT_AST_INTRA_BSS_INFO BIT(15)
+
+struct ath12k_htt_ast_entry_tlv {
+ __le32 sw_peer_id;
+ __le32 ast_index;
+ struct htt_mac_addr mac_addr;
+ __le32 info;
+} __packed;
+
+enum ath12k_htt_stats_direction {
+ ATH12K_HTT_STATS_DIRECTION_TX,
+ ATH12K_HTT_STATS_DIRECTION_RX
+};
+
+enum ath12k_htt_stats_ppdu_type {
+ ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU,
+ ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO,
+ ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO,
+ ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA,
+ ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA
+};
+
+enum ath12k_htt_stats_param_type {
+ ATH12K_HTT_STATS_PREAM_OFDM,
+ ATH12K_HTT_STATS_PREAM_CCK,
+ ATH12K_HTT_STATS_PREAM_HT,
+ ATH12K_HTT_STATS_PREAM_VHT,
+ ATH12K_HTT_STATS_PREAM_HE,
+ ATH12K_HTT_STATS_PREAM_EHT,
+ ATH12K_HTT_STATS_PREAM_RSVD1,
+ ATH12K_HTT_STATS_PREAM_COUNT,
+};
+
+#define ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT 32
+
+struct ath12k_htt_pdev_puncture_stats_tlv {
+ __le32 mac_id__word;
+ __le32 direction;
+ __le32 preamble;
+ __le32 ppdu_type;
+ __le32 subband_cnt;
+ __le32 last_used_pattern_mask;
+ __le32 num_subbands_used_cnt[ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT];
+} __packed;
+
struct ath12k_htt_dmac_reset_stats_tlv {
__le32 reset_count;
__le32 reset_time_lo_ms;
@@ -1085,6 +1380,10 @@ struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv {
__le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM];
} __packed;
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS 4
+#define ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS 14
+
enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52,
@@ -1105,7 +1404,65 @@ enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS,
};
-#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+enum ATH12K_HTT_RC_MODE {
+ ATH12K_HTT_RC_MODE_SU_OL,
+ ATH12K_HTT_RC_MODE_SU_BF,
+ ATH12K_HTT_RC_MODE_MU1_INTF,
+ ATH12K_HTT_RC_MODE_MU2_INTF,
+ ATH12K_HTT_RC_MODE_MU3_INTF,
+ ATH12K_HTT_RC_MODE_MU4_INTF,
+ ATH12K_HTT_RC_MODE_MU5_INTF,
+ ATH12K_HTT_RC_MODE_MU6_INTF,
+ ATH12K_HTT_RC_MODE_MU7_INTF,
+ ATH12K_HTT_RC_MODE_2D_COUNT
+};
+
+enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
+ ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS
+};
+
+enum ath12k_htt_stats_rc_mode {
+ ATH12K_HTT_STATS_RC_MODE_DLSU = 0,
+ ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1,
+ ATH12K_HTT_STATS_RC_MODE_DLOFDMA = 2,
+ ATH12K_HTT_STATS_RC_MODE_ULMUMIMO = 3,
+ ATH12K_HTT_STATS_RC_MODE_ULOFDMA = 4,
+};
+
+enum ath12k_htt_stats_ru_type {
+ ATH12K_HTT_STATS_RU_TYPE_INVALID,
+ ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY,
+ ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU,
+};
+
+struct ath12k_htt_tx_rate_stats {
+ __le32 ppdus_tried;
+ __le32 ppdus_ack_failed;
+ __le32 mpdus_tried;
+ __le32 mpdus_failed;
+} __packed;
+
+struct ath12k_htt_tx_per_rate_stats_tlv {
+ __le32 rc_mode;
+ __le32 last_probed_mcs;
+ __le32 last_probed_nss;
+ __le32 last_probed_bw;
+ struct ath12k_htt_tx_rate_stats per_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS];
+ struct ath12k_htt_tx_rate_stats per_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
+ struct ath12k_htt_tx_rate_stats per_mcs[ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS];
+ struct ath12k_htt_tx_rate_stats per_bw320;
+ __le32 probe_cnt[ATH12K_HTT_RC_MODE_2D_COUNT];
+ __le32 ru_type;
+ struct ath12k_htt_tx_rate_stats ru[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
+} __packed;
+
#define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS 16
#define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS 5
#define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS 4
@@ -1115,11 +1472,23 @@ struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv {
__le32 mac_id__word;
__le32 be_ofdma_tx_ldpc;
__le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
- __le32 be_ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 be_ofdma_tx_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
__le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS];
__le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
__le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
__le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS];
} __packed;
+struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv {
+ __le32 mac_id__word;
+ __le32 basic_trigger_across_bss;
+ __le32 basic_trigger_within_bss;
+ __le32 bsr_trigger_across_bss;
+ __le32 bsr_trigger_within_bss;
+ __le32 mu_rts_across_bss;
+ __le32 mu_rts_within_bss;
+ __le32 ul_mumimo_trigger_across_bss;
+ __le32 ul_mumimo_trigger_within_bss;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index c99e9ceb1a6e..9e5a4e75f2f6 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -41,6 +41,11 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
return;
}
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
crypto_free_shash(peer->tfm_mmic);
peer->dp_setup_done = false;
@@ -977,27 +982,23 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
int i;
+ if (!ab->mon_reap_timer.function)
+ return;
+
del_timer_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
}
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar)
{
- struct ath12k *ar;
- struct ath12k_pdev_dp *dp;
- int i;
+ struct ath12k_pdev_dp *dp = &ar->dp;
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- dp = &ar->dp;
- dp->mac_id = i;
- atomic_set(&dp->num_tx_pending, 0);
- init_waitqueue_head(&dp->tx_empty_waitq);
-
- /* TODO: Add any RXDMA setup required per pdev */
- }
+ dp->mac_id = ar->pdev_idx;
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+ /* TODO: Add any RXDMA setup required per pdev */
}
bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
@@ -1260,15 +1261,23 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
if (!ab->hw_params->reoq_lut_support)
return;
- if (!dp->reoq_lut.vaddr)
- return;
-
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
- dp->reoq_lut.vaddr = NULL;
+ if (dp->reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ }
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ if (dp->ml_reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE1(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
+ dp->ml_reoq_lut.vaddr = NULL;
+ }
}
void ath12k_dp_free(struct ath12k_base *ab)
@@ -1276,6 +1285,9 @@ void ath12k_dp_free(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
int i;
+ if (!dp->ab)
+ return;
+
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
@@ -1293,6 +1305,7 @@ void ath12k_dp_free(struct ath12k_base *ab)
ath12k_dp_rx_free(ab);
/* Deinit any SOC level resource */
+ dp->ab = NULL;
}
void ath12k_dp_cc_config(struct ath12k_base *ab)
@@ -1432,6 +1445,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
+ rx_descs[j].device_id = ab->device_id;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
/* Update descriptor VA in SPT */
@@ -1508,6 +1522,19 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab,
return 0;
}
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ if (ag->ab[i] == ab)
+ continue;
+
+ ath12k_dp_cmem_init(ab, &ag->ab[i]->dp, ATH12K_DP_RX_DESC);
+ }
+}
+
static int ath12k_dp_cc_init(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
@@ -1594,8 +1621,23 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return -ENOMEM;
}
+ dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
+ DP_REOQ_LUT_SIZE,
+ &dp->ml_reoq_lut.paddr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!dp->ml_reoq_lut.vaddr) {
+ ath12k_warn(ab, "failed to allocate memory for ML reoq table");
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ return -ENOMEM;
+ }
+
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
dp->reoq_lut.paddr);
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
+ dp->ml_reoq_lut.paddr >> 8);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 2e05fc19410e..7ac3143de016 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -287,7 +287,8 @@ struct ath12k_rx_desc_info {
u32 cookie;
u32 magic;
u8 in_use : 1,
- reserved : 7;
+ device_id : 3,
+ reserved : 4;
};
struct ath12k_tx_desc_info {
@@ -368,6 +369,7 @@ struct ath12k_dp {
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
struct ath12k_reo_q_addr_lut reoq_lut;
+ struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
/* HTT definitions */
@@ -694,9 +696,9 @@ enum htt_stats_internal_ppdu_frametype {
*
* The message would appear as follows:
*
- * |31 26|25|24|23 16|15 8|7 0|
- * |-----------------+----------------+----------------+---------------|
- * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |31 29|28|27|26|25|24|23 16|15 8|7 0|
+ * |-------+--+--+--+--+--+-----------+----------------+---------------|
+ * | rsvd1 |ED|DT|OV|PS|SS| ring_id | pdev_id | msg_type |
* |-------------------------------------------------------------------|
* | rsvd2 | ring_buffer_size |
* |-------------------------------------------------------------------|
@@ -723,7 +725,13 @@ enum htt_stats_internal_ppdu_frametype {
* More details can be got from enum htt_srng_ring_id
* b'24 - status_swap: 1 is to swap status TLV
* b'25 - pkt_swap: 1 is to swap packet TLV
- * b'26:31 - rsvd1: reserved for future use
+ * b'26 - rx_offset_valid (OV): flag to indicate rx offsets
+ * configuration fields are valid
+ * b'27 - drop_thresh_valid (DT): flag to indicate if the
+ * rx_drop_threshold field is valid
+ * b'28 - rx_mon_global_en: Enable/Disable global register
+ * configuration in Rx monitor module.
+ * b'29:31 - rsvd1: reserved for future use
* dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring,
* in byte units.
* Valid only for HW_TO_SW_RING and SW_TO_HW_RING
@@ -1790,6 +1798,18 @@ enum vdev_stats_offload_timer_duration {
ATH12K_STATS_TIMER_DUR_2SEC = 3,
};
+#define ATH12K_HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
+#define ATH12K_HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
+#define ATH12K_HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
+#define ATH12K_HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+
+struct htt_mac_addr {
+ __le32 mac_addr_l32;
+ __le32 mac_addr_h16;
+} __packed;
+
static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
{
memcpy(addr, &addr_l32, 4);
@@ -1804,8 +1824,9 @@ void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif);
void ath12k_dp_free(struct ath12k_base *ab);
int ath12k_dp_alloc(struct ath12k_base *ab);
void ath12k_dp_cc_config(struct ath12k_base *ab);
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab);
int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab);
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar);
void ath12k_dp_pdev_free(struct ath12k_base *ab);
int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type);
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 494984133a91..5a21961cfd46 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -10,11 +10,10 @@
#include "dp_tx.h"
#include "peer.h"
-static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv,
- struct hal_rx_user_status *rx_user_status)
+static void
+ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user,
+ struct hal_rx_user_status *rx_user_status)
{
- struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv;
-
rx_user_status->ul_ofdma_user_v0_word0 =
__le32_to_cpu(ppdu_end_user->usr_resp_ref);
rx_user_status->ul_ofdma_user_v0_word1 =
@@ -35,7 +34,7 @@ ath12k_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *st
}
static void
-ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
+ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *rx_tlv,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *rx_user_status)
{
@@ -73,11 +72,9 @@ ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
}
-static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
+static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_vht_sig_a_info *vht_sig =
- (struct hal_rx_vht_sig_a_info *)tlv_data;
u32 nsts, group_id, info0, info1;
u8 gi_setting;
@@ -119,11 +116,9 @@ static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
}
-static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
+static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_ht_sig_info *ht_sig =
- (struct hal_rx_ht_sig_info *)tlv_data;
u32 info0 = __le32_to_cpu(ht_sig->info0);
u32 info1 = __le32_to_cpu(ht_sig->info1);
@@ -136,11 +131,9 @@ static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
-static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
+static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_lsig_b_info *lsigb =
- (struct hal_rx_lsig_b_info *)tlv_data;
u32 info0 = __le32_to_cpu(lsigb->info0);
u8 rate;
@@ -170,11 +163,9 @@ static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
-static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
+static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_lsig_a_info *lsiga =
- (struct hal_rx_lsig_a_info *)tlv_data;
u32 info0 = __le32_to_cpu(lsiga->info0);
u8 rate;
@@ -212,14 +203,13 @@ static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
-static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *ofdma,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
- (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
u32 info0, value;
- info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+ info0 = __le32_to_cpu(ofdma->info0);
ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN;
@@ -250,11 +240,10 @@ static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
}
-static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
- (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
u32 info0, value;
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
@@ -277,11 +266,10 @@ static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS);
}
-static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
- (struct hal_rx_he_sig_b1_mu_info *)tlv_data;
u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0);
u16 ru_tones;
@@ -292,11 +280,10 @@ static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
-static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
- (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
u32 info0, info1, value;
u16 he_gi = 0, he_ltf = 0;
@@ -427,11 +414,9 @@ static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
-static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
+static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_a_su_info *he_sig_a =
- (struct hal_rx_he_sig_a_su_info *)tlv_data;
u32 info0, info1, value;
u32 dcm;
u8 he_dcm = 0, he_stbc = 0;
@@ -580,15 +565,15 @@ static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
static enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
struct ath12k_mon_data *pmon,
- u32 tlv_tag, u8 *tlv_data, u32 userid)
+ u32 tlv_tag, const void *tlv_data,
+ u32 userid)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
u32 info[7];
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
- struct hal_rx_ppdu_start *ppdu_start =
- (struct hal_rx_ppdu_start *)tlv_data;
+ const struct hal_rx_ppdu_start *ppdu_start = tlv_data;
u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32,
ppdu_start->ppdu_start_ts_31_0);
@@ -615,8 +600,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_PPDU_END_USER_STATS: {
- struct hal_rx_ppdu_end_user_stats *eu_stats =
- (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+ const struct hal_rx_ppdu_end_user_stats *eu_stats = tlv_data;
+ u32 tid_bitmap;
info[0] = __le32_to_cpu(eu_stats->info0);
info[1] = __le32_to_cpu(eu_stats->info1);
@@ -629,10 +614,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX);
ppdu_info->fc_valid =
u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
- ppdu_info->tid =
- ffs(u32_get_bits(info[6],
- HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP)
- - 1);
+ tid_bitmap = u32_get_bits(info[6],
+ HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP);
+ ppdu_info->tid = ffs(tid_bitmap) - 1;
ppdu_info->tcp_msdu_count =
u32_get_bits(info[4],
HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
@@ -673,8 +657,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
&ppdu_info->userstats[userid];
ppdu_info->num_users += 1;
- ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats);
- ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats);
+ ath12k_dp_mon_rx_populate_mu_user_info(eu_stats, ppdu_info,
rxuser_stats);
}
ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]);
@@ -682,8 +666,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_PPDU_END_USER_STATS_EXT: {
- struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
- (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ const struct hal_rx_ppdu_end_user_stats_ext *eu_stats = tlv_data;
+
ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1);
ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2);
ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3);
@@ -729,8 +713,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
case HAL_PHYRX_RSSI_LEGACY: {
- struct hal_rx_phyrx_rssi_legacy_info *rssi =
- (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+ const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
info[0] = __le32_to_cpu(rssi->info0);
info[1] = __le32_to_cpu(rssi->info1);
@@ -748,8 +731,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RXPCU_PPDU_END_INFO: {
- struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
- (struct hal_rx_ppdu_end_duration *)tlv_data;
+ const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data;
info[0] = __le32_to_cpu(ppdu_rx_duration->info0);
ppdu_info->rx_duration =
@@ -760,8 +742,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_MPDU_START: {
- struct hal_rx_mpdu_start *mpdu_start =
- (struct hal_rx_mpdu_start *)tlv_data;
+ const struct hal_rx_mpdu_start *mpdu_start = tlv_data;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
u16 peer_id;
@@ -790,8 +771,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
case HAL_MON_BUF_ADDR: {
struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
- struct dp_mon_packet_info *packet_info =
- (struct dp_mon_packet_info *)tlv_data;
+ const struct dp_mon_packet_info *packet_info = tlv_data;
int buf_id = u32_get_bits(packet_info->cookie,
DP_RXDMA_BUF_COOKIE_BUF_ID);
struct sk_buff *msdu;
@@ -823,8 +803,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_MSDU_END: {
- struct rx_msdu_end_qcn9274 *msdu_end =
- (struct rx_msdu_end_qcn9274 *)tlv_data;
+ const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data;
bool is_first_msdu_in_mpdu;
u16 msdu_end_info;
@@ -1093,8 +1072,14 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
- if (peer && peer->sta)
+ if (peer && peer->sta) {
pubsta = peer->sta;
+ if (pubsta->valid_links) {
+ status->link_valid = 1;
+ status->link_id = peer->link_id;
+ }
+ }
+
spin_unlock_bh(&ar->ab->base_lock);
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
@@ -1199,19 +1184,19 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
struct sk_buff *skb)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- struct hal_tlv_hdr *tlv;
+ struct hal_tlv_64_hdr *tlv;
enum hal_rx_mon_status hal_status;
- u32 tlv_userid = 0;
+ u32 tlv_userid;
u16 tlv_tag, tlv_len;
u8 *ptr = skb->data;
memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
do {
- tlv = (struct hal_tlv_hdr *)ptr;
- tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
- tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
- tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
+ tlv = (struct hal_tlv_64_hdr *)ptr;
+ tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
+ tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
+ tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
@@ -1226,7 +1211,7 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
- ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+ ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
break;
@@ -1603,7 +1588,7 @@ ath12k_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
static enum dp_mon_tx_tlv_status
ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
struct ath12k_mon_data *pmon,
- u16 tlv_tag, u8 *tlv_data, u32 userid)
+ u16 tlv_tag, const void *tlv_data, u32 userid)
{
struct dp_mon_tx_ppdu_info *tx_ppdu_info;
enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
@@ -1613,8 +1598,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
switch (tlv_tag) {
case HAL_TX_FES_SETUP: {
- struct hal_tx_fes_setup *tx_fes_setup =
- (struct hal_tx_fes_setup *)tlv_data;
+ const struct hal_tx_fes_setup *tx_fes_setup = tlv_data;
info[0] = __le32_to_cpu(tx_fes_setup->info0);
tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id);
@@ -1625,8 +1609,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_END: {
- struct hal_tx_fes_status_end *tx_fes_status_end =
- (struct hal_tx_fes_status_end *)tlv_data;
+ const struct hal_tx_fes_status_end *tx_fes_status_end = tlv_data;
u32 tst_15_0, tst_31_16;
info[0] = __le32_to_cpu(tx_fes_status_end->info0);
@@ -1643,8 +1626,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_RX_RESPONSE_REQUIRED_INFO: {
- struct hal_rx_resp_req_info *rx_resp_req_info =
- (struct hal_rx_resp_req_info *)tlv_data;
+ const struct hal_rx_resp_req_info *rx_resp_req_info = tlv_data;
u32 addr_32;
u16 addr_16;
@@ -1689,8 +1671,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_PCU_PPDU_SETUP_INIT: {
- struct hal_tx_pcu_ppdu_setup_init *ppdu_setup =
- (struct hal_tx_pcu_ppdu_setup_init *)tlv_data;
+ const struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = tlv_data;
u32 addr_32;
u16 addr_16;
@@ -1736,8 +1717,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_QUEUE_EXTENSION: {
- struct hal_tx_queue_exten *tx_q_exten =
- (struct hal_tx_queue_exten *)tlv_data;
+ const struct hal_tx_queue_exten *tx_q_exten = tlv_data;
info[0] = __le32_to_cpu(tx_q_exten->info0);
@@ -1749,8 +1729,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_START: {
- struct hal_tx_fes_status_start *tx_fes_start =
- (struct hal_tx_fes_status_start *)tlv_data;
+ const struct hal_tx_fes_status_start *tx_fes_start = tlv_data;
info[0] = __le32_to_cpu(tx_fes_start->info0);
@@ -1761,8 +1740,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_PROT: {
- struct hal_tx_fes_status_prot *tx_fes_status =
- (struct hal_tx_fes_status_prot *)tlv_data;
+ const struct hal_tx_fes_status_prot *tx_fes_status = tlv_data;
u32 start_timestamp;
u32 end_timestamp;
@@ -1789,8 +1767,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
case HAL_TX_FES_STATUS_START_PPDU:
case HAL_TX_FES_STATUS_START_PROT: {
- struct hal_tx_fes_status_start_prot *tx_fes_stat_start =
- (struct hal_tx_fes_status_start_prot *)tlv_data;
+ const struct hal_tx_fes_status_start_prot *tx_fes_stat_start = tlv_data;
u64 ppdu_ts;
info[0] = __le32_to_cpu(tx_fes_stat_start->info0);
@@ -1805,8 +1782,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_USER_PPDU: {
- struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu =
- (struct hal_tx_fes_status_user_ppdu *)tlv_data;
+ const struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = tlv_data;
info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0);
@@ -1849,8 +1825,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
break;
case HAL_RX_FRAME_BITMAP_ACK: {
- struct hal_rx_frame_bitmap_ack *fbm_ack =
- (struct hal_rx_frame_bitmap_ack *)tlv_data;
+ const struct hal_rx_frame_bitmap_ack *fbm_ack = tlv_data;
u32 addr_32;
u16 addr_16;
@@ -1868,8 +1843,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_MACTX_PHY_DESC: {
- struct hal_tx_phy_desc *tx_phy_desc =
- (struct hal_tx_phy_desc *)tlv_data;
+ const struct hal_tx_phy_desc *tx_phy_desc = tlv_data;
info[0] = __le32_to_cpu(tx_phy_desc->info0);
info[1] = __le32_to_cpu(tx_phy_desc->info1);
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 9ae579e50557..dad35bfd83f6 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -740,15 +740,22 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(lower_32_bits(paddr),
BUFFER_ADDR_INFO0_ADDR);
@@ -761,15 +768,22 @@ static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
@@ -802,7 +816,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
rx_tid->vaddr = NULL;
}
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
+ else
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
rx_tid->active = false;
}
@@ -940,7 +957,13 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return -ENOENT;
}
- if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ if (ab->hw_params->reoq_lut_support &&
+ (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "reo qref table is not setup\n");
return -EINVAL;
@@ -1021,7 +1044,11 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
- ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr);
+ else
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
@@ -1038,15 +1065,25 @@ err_mem_free:
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
int ret;
- ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
+ ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
params->tid, params->buf_size,
params->ssn, arsta->ahsta->pn_type);
if (ret)
@@ -1056,19 +1093,29 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
}
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
bool active;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
+ peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
@@ -1650,7 +1697,11 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
rcu_read_lock();
ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
- ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+ /* It is possible that the ar is not yet active (started).
+ * The above function will only look for the active pdev
+ * and hence %NULL return is possible. Just silently
+ * discard this message
+ */
goto exit;
}
@@ -2427,6 +2478,11 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
pubsta = peer ? peer->sta : NULL;
+ if (pubsta && pubsta->valid_links) {
+ status->link_valid = 1;
+ status->link_id = peer->link_id;
+ }
+
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
@@ -2548,11 +2604,14 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
struct sk_buff_head *msdu_list,
int ring_id)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ieee80211_rx_status rx_status = {0};
struct ath12k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath12k *ar;
- u8 mac_id, pdev_id;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, pdev_id;
int ret;
if (skb_queue_empty(msdu_list))
@@ -2562,15 +2621,18 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
while ((msdu = __skb_dequeue(msdu_list))) {
rxcb = ATH12K_SKB_RXCB(msdu);
- mac_id = rxcb->mac_id;
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
- if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
+ hw_link_id = rxcb->hw_link_id;
+ partner_ab = ath12k_ag_to_ab(ag,
+ hw_links[hw_link_id].device_id);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
+ if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
@@ -2615,23 +2677,29 @@ static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
- LIST_HEAD(rx_desc_used_list);
+ struct ath12k_hw_group *ag = ab->ag;
+ struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
struct hal_reo_dest_ring *desc;
- int num_buffs_reaped = 0;
+ struct ath12k_base *partner_ab;
struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
+ u8 hw_link_id, device_id;
struct hal_srng *srng;
struct sk_buff *msdu;
bool done = false;
- int mac_id;
u64 desc_va;
__skb_queue_head_init(&msdu_list);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
@@ -2648,18 +2716,29 @@ try_again:
cookie = le32_get_bits(desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_SW_COOKIE);
- mac_id = le32_get_bits(desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ hw_link_id = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
le32_to_cpu(desc->buf_va_lo));
desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ if (desc_info->skb) {
+ dev_kfree_skb_any(desc_info->skb);
+ desc_info->skb = NULL;
+ }
+
+ continue;
+ }
+
/* retry manual desc retrieval */
if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+ desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
cookie);
continue;
}
@@ -2671,14 +2750,14 @@ try_again:
msdu = desc_info->skb;
desc_info->skb = NULL;
- list_add_tail(&desc_info->list, &rx_desc_used_list);
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
+ dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[device_id]++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
@@ -2698,7 +2777,7 @@ try_again:
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
- rxcb->mac_id = mac_id;
+ rxcb->hw_link_id = hw_link_id;
rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
mpdu_info->peer_meta_data);
rxcb->tid = le32_get_bits(mpdu_info->info0,
@@ -2735,8 +2814,17 @@ try_again:
if (!total_msdu_reaped)
goto exit;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
- num_buffs_reaped);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
@@ -2781,6 +2869,12 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
return -ENOENT;
}
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
+ return 0;
+ }
+
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
rx_tid->ab = ab;
@@ -3390,7 +3484,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
goto exit;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
goto exit;
}
@@ -3420,7 +3514,10 @@ exit:
int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int budget)
{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
@@ -3428,11 +3525,11 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct hal_reo_dest_ring *reo_desc;
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
- LIST_HEAD(rx_desc_used_list);
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, device_id;
u32 desc_bank, num_msdus;
struct hal_srng *srng;
- struct ath12k_dp *dp;
- int mac_id;
struct ath12k *ar;
dma_addr_t paddr;
bool is_frag;
@@ -3442,9 +3539,10 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
tot_n_bufs_reaped = 0;
quota = budget;
- dp = &ab->dp;
- reo_except = &dp->reo_except_ring;
- link_desc_banks = dp->link_desc_banks;
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
+ reo_except = &ab->dp.reo_except_ring;
srng = &ab->hal.srng_list[reo_except->ring_id];
@@ -3464,16 +3562,27 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
ret);
continue;
}
+
+ hw_link_id = le32_get_bits(reo_desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
+
+ link_desc_banks = partner_ab->dp.link_desc_banks;
link_desc_va = link_desc_banks[desc_bank].vaddr +
(paddr - link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
- if (rbm != dp->idle_link_rbm &&
+ if (rbm != partner_ab->dp.idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
- rbm != ab->hw_params->hal_params->rx_buf_rbm) {
+ rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
- ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
@@ -3483,26 +3592,26 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
/* Process only rx fragments with one msdu per link desc below, and drop
* msdu's indicated due to error reasons.
+ * Dynamic fragmentation not supported in Multi-link client, so drop the
+ * partner device buffers.
*/
- if (!is_frag || num_msdus > 1) {
+ if (!is_frag || num_msdus > 1 ||
+ partner_ab->device_id != ab->device_id) {
drop = true;
+
/* Return the link desc back to wbm idle list */
- ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
for (i = 0; i < num_msdus; i++) {
- mac_id = le32_get_bits(reo_desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
-
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
-
if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
- &rx_desc_used_list,
+ &rx_desc_used_list[device_id],
drop,
- msdu_cookies[i]))
+ msdu_cookies[i])) {
+ num_buffs_reaped[device_id]++;
tot_n_bufs_reaped++;
+ }
}
if (tot_n_bufs_reaped >= quota) {
@@ -3518,10 +3627,17 @@ exit:
spin_unlock_bh(&srng->lock);
- rx_ring = &dp->rx_refill_buf_ring;
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
- tot_n_bufs_reaped);
+ ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
return tot_n_bufs_reaped;
}
@@ -3738,7 +3854,8 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
- LIST_HEAD(rx_desc_used_list);
+ struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
@@ -3748,17 +3865,22 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- u8 mac_id;
- int num_buffs_reaped = 0;
+ int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int total_num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, device_id;
int ret, pdev_id;
struct hal_rx_desc *msdu_data;
__skb_queue_head_init(&msdu_list);
__skb_queue_head_init(&scatter_msdu_list);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
- rx_ring = &dp->rx_refill_buf_ring;
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
@@ -3794,14 +3916,27 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
msdu = desc_info->skb;
desc_info->skb = NULL;
- list_add_tail(&desc_info->list, &rx_desc_used_list);
+ device_id = desc_info->device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
+ dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[device_id]++;
+ total_num_buffs_reaped++;
if (!err_info.continuation)
budget--;
@@ -3825,9 +3960,9 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
continue;
}
- mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
- msdu_data);
- if (mac_id >= MAX_RADIOS) {
+ hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab,
+ msdu_data);
+ if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
dev_kfree_skb_any(msdu);
/* In any case continuation bit is set
@@ -3842,7 +3977,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
skb_queue_walk(&scatter_msdu_list, msdu) {
rxcb = ATH12K_SKB_RXCB(msdu);
- rxcb->mac_id = mac_id;
+ rxcb->hw_link_id = hw_link_id;
}
skb_queue_splice_tail_init(&scatter_msdu_list,
@@ -3850,7 +3985,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
rxcb = ATH12K_SKB_RXCB(msdu);
- rxcb->mac_id = mac_id;
+ rxcb->hw_link_id = hw_link_id;
__skb_queue_tail(&msdu_list, msdu);
}
@@ -3863,26 +3998,46 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
spin_unlock_bh(&srng->lock);
- if (!num_buffs_reaped)
+ if (!total_num_buffs_reaped)
goto done;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
- num_buffs_reaped);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
rcu_read_lock();
while ((msdu = __skb_dequeue(&msdu_list))) {
rxcb = ATH12K_SKB_RXCB(msdu);
- mac_id = rxcb->mac_id;
+ hw_link_id = rxcb->hw_link_id;
+
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
+ hw_link_id, device_id);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
- if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) {
dev_kfree_skb_any(msdu);
continue;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
@@ -3890,7 +4045,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
rcu_read_unlock();
done:
- return num_buffs_reaped;
+ return total_num_buffs_reaped;
}
void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
@@ -3912,7 +4067,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
ath12k_hal_srng_access_begin(ab, srng);
while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
- tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
+ tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
switch (tag) {
case HAL_REO_GET_QUEUE_STATS_STATUS:
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index bfd4f814553e..1ce82088c954 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -85,9 +85,11 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
- struct ieee80211_ampdu_params *params);
+ struct ieee80211_ampdu_params *params,
+ u8 link_id);
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
- struct ieee80211_ampdu_params *params);
+ struct ieee80211_ampdu_params *params,
+ u8 link_id);
int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
index 3ff041f15fa0..273c003eff3b 100644
--- a/drivers/net/wireless/ath/ath12k/fw.h
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -23,6 +23,9 @@ enum ath12k_fw_features {
*/
ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0,
+ /* The firmware supports MLO capability */
+ ATH12K_FW_FEATURE_MLO,
+
/* keep last */
ATH12K_FW_FEATURE_COUNT,
};
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index fd98fac16dd5..cd59ff8e6c7b 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -181,7 +181,7 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
},
[HAL_TX_MONITOR_BUF] = {
- .start_ring_id = HAL_SRNG_SW2TXMON_BUF0,
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
.max_rings = 1,
.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 8a78bb9a10bc..94e2e8735958 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -485,8 +485,8 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
- HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+ HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
HAL_SRNG_RING_ID_PMAC1_ID_END,
};
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 739f73370015..7b0403d245e5 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -522,7 +522,7 @@ enum hal_tlv_tag {
HAL_PHYRXHT_SIG_USR_SU = 468 /* 0x1d4 */,
HAL_PHYRXHT_SIG_USR_MU_MIMO = 469 /* 0x1d5 */,
HAL_PHYRX_GENERIC_U_SIG = 470 /* 0x1d6 */,
- HAL_PHYRX_GENERICHT_SIG = 471 /* 0x1d7 */,
+ HAL_PHYRX_GENERIC_EHT_SIG = 471 /* 0x1d7 */,
HAL_OVERWRITE_RESP_START = 472 /* 0x1d8 */,
HAL_OVERWRITE_RESP_PREAMBLE_INFO = 473 /* 0x1d9 */,
HAL_OVERWRITE_RESP_FRAME_INFO = 474 /* 0x1da */,
@@ -579,9 +579,11 @@ struct hal_tlv_hdr {
#define HAL_TLV_64_HDR_TAG GENMASK(9, 1)
#define HAL_TLV_64_HDR_LEN GENMASK(21, 10)
+#define HAL_TLV_64_USR_ID GENMASK(31, 26)
+#define HAL_TLV_64_ALIGN 8
struct hal_tlv_64_hdr {
- u64 tl;
+ __le64 tl;
u8 value[];
} __packed;
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index f7c1aaa3b5d4..ac17d6223fa7 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -26,8 +26,8 @@ static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_get_queue_stats *desc;
- tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_get_queue_stats *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
@@ -59,8 +59,8 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
hal->current_blk_index = avail_slot;
}
- tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_flush_cache *)tlv->value;
memset_startat(desc, 0, cache_addr_lo);
@@ -97,8 +97,8 @@ static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_update_rx_queue *desc;
- tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_update_rx_queue *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index 2de7b0eba9f2..b08aa2e79f41 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -19,7 +19,7 @@ struct hal_rx_wbm_rel_info {
bool hw_cc_done;
};
-#define HAL_INVALID_PEERID 0xffff
+#define HAL_INVALID_PEERID 0x3fff
#define VHT_SIG_SU_NSS_MASK 0x7
#define HAL_RX_MAX_MCS 12
@@ -245,6 +245,8 @@ struct hal_rx_ppdu_start {
__le32 rsvd[2];
} __packed;
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID GENMASK(13, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_DEVICE_ID GENMASK(15, 14)
#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(26, 16)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(10, 0)
@@ -299,6 +301,7 @@ struct hal_rx_ppdu_end_user_stats_ext {
__le32 info4;
__le32 info5;
__le32 info6;
+ __le32 rsvd;
} __packed;
#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
@@ -395,11 +398,9 @@ struct hal_rx_he_sig_a_su_info {
#define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION BIT(25)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION GENMASK(6, 0)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING BIT(7)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA BIT(11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC BIT(12)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF BIT(10)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM BIT(15)
@@ -425,7 +426,7 @@ struct hal_rx_he_sig_b2_mu_info {
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
-#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(14)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
@@ -453,7 +454,8 @@ struct hal_rx_phyrx_rssi_legacy_info {
} __packed;
#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
-#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16)
+#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(29, 16)
+#define HAL_RX_MPDU_START_INFO1_DEVICE_ID GENMASK(31, 30)
#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0)
struct hal_rx_mpdu_start {
__le32 rsvd0[9];
@@ -468,7 +470,7 @@ struct hal_rx_mpdu_start {
struct hal_rx_ppdu_end_duration {
__le32 rsvd0[9];
__le32 info0;
- __le32 rsvd1[4];
+ __le32 rsvd1[18];
} __packed;
struct hal_rx_rxpcu_classification_overview {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index d493ec812055..2d062b5904a8 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
+#include <net/cfg80211.h>
#include <linux/etherdevice.h>
#include "mac.h"
@@ -501,6 +502,41 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id,
return 0;
}
+static struct ieee80211_bss_conf *
+ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif)
+{
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ link_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ vif->link_conf[arvif->link_id]);
+
+ return link_conf;
+}
+
+static struct ieee80211_link_sta *ath12k_mac_get_link_sta(struct ath12k_link_sta *arsta)
+{
+ struct ath12k_sta *ahsta = arsta->ahsta;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ieee80211_link_sta *link_sta;
+
+ lockdep_assert_wiphy(ahsta->ahvif->ah->hw->wiphy);
+
+ if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ link_sta = wiphy_dereference(ahsta->ahvif->ah->hw->wiphy,
+ sta->link[arsta->link_id]);
+
+ return link_sta;
+}
+
static bool ath12k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -648,6 +684,18 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
return NULL;
}
+static bool ath12k_mac_is_ml_arvif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+ if (ahvif->vif->valid_links & BIT(arvif->link_id))
+ return true;
+
+ return false;
+}
+
static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
struct ieee80211_channel *channel)
{
@@ -661,8 +709,8 @@ static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
return ar;
for_each_ar(ah, ar, i) {
- if (channel->center_freq >= ar->freq_low &&
- channel->center_freq <= ar->freq_high)
+ if (channel->center_freq >= KHZ_TO_MHZ(ar->freq_range.start_freq) &&
+ channel->center_freq <= KHZ_TO_MHZ(ar->freq_range.end_freq))
return ar;
}
return NULL;
@@ -678,11 +726,14 @@ static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw,
}
static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ u8 link_id)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif = &ahvif->deflink;
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(hw->wiphy);
/* If there is one pdev within ah, then we return
* ar directly.
@@ -690,12 +741,27 @@ static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
if (ah->num_radio == 1)
return ah->radio;
- if (arvif->is_created)
+ if (!(ahvif->links_map & BIT(link_id)))
+ return NULL;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (arvif && arvif->is_created)
return arvif->ar;
return NULL;
}
+void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct ath12k_mac_get_any_chanctx_conf_arg *arg = data;
+ struct ath12k *ctx_ar = ath12k_get_ar_by_ctx(hw, conf);
+
+ if (ctx_ar == arg->ar)
+ arg->chanctx_conf = conf;
+}
+
static struct ath12k_link_vif *ath12k_mac_get_vif_up(struct ath12k *ar)
{
struct ath12k_link_vif *arvif;
@@ -1239,7 +1305,7 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ath12k *ar = arvif->ar;
@@ -1269,8 +1335,8 @@ static int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
ahvif->vif->addr, arvif->vdev_id);
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
arvif->vdev_id);
}
@@ -1486,7 +1552,7 @@ static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_bu
static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_bss_conf *bss_conf = &ahvif->vif->bss_conf;
+ struct ieee80211_bss_conf *bss_conf;
struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
struct ieee80211_ema_beacons *beacons;
struct ath12k_link_vif *tx_arvif;
@@ -1495,10 +1561,19 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
int ret = 0;
u8 i;
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(arvif->ar->ab,
+ "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
tx_arvif = &tx_ahvif->deflink;
beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
- tx_ahvif->vif, 0);
+ tx_ahvif->vif,
+ tx_arvif->link_id);
if (!beacons || !beacons->cnt) {
ath12k_warn(arvif->ar->ab,
"failed to get ema beacon templates from mac80211\n");
@@ -1540,6 +1615,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_link_vif *tx_arvif = arvif;
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
@@ -1552,18 +1628,25 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
if (ahvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf to set bcn tmpl for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
if (vif->mbssid_tx_vif) {
tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
tx_arvif = &tx_ahvif->deflink;
if (tx_arvif != arvif && arvif->is_up)
return 0;
- if (vif->bss_conf.ema_ap)
+ if (link_conf->ema_ap)
return ath12k_mac_setup_bcn_tmpl_ema(arvif);
}
bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif,
- &offs, 0);
+ &offs, tx_arvif->link_id);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
@@ -1573,7 +1656,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL);
} else {
ath12k_mac_set_arvif_ies(arvif, bcn,
- ahvif->vif->bss_conf.bssid_index,
+ link_conf->bssid_index,
&nontx_profile_found);
if (!nontx_profile_found)
ath12k_warn(ab,
@@ -1644,7 +1727,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
ahvif->aid = 0;
- ether_addr_copy(arvif->bssid, info->bssid);
+ ether_addr_copy(arvif->bssid, info->addr);
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
@@ -1749,6 +1832,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ieee80211_bss_conf *bss_conf;
u32 aid;
lockdep_assert_wiphy(hw->wiphy);
@@ -1758,14 +1842,22 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
else
aid = sta->aid;
- ether_addr_copy(arg->peer_mac, sta->addr);
+ ether_addr_copy(arg->peer_mac, arsta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
arg->peer_listen_intval = hw->conf.listen_interval;
arg->peer_nss = 1;
- arg->peer_caps = vif->bss_conf.assoc_capability;
+
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ arg->peer_caps = bss_conf->assoc_capability;
}
static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
@@ -1775,7 +1867,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct ieee80211_bss_conf *info;
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
@@ -1784,6 +1876,13 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
lockdep_assert_wiphy(hw->wiphy);
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf for peer assoc crypto for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
@@ -1839,6 +1938,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
@@ -1853,9 +1953,16 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc rates for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
band = def.chan->band;
sband = hw->wiphy->bands[band];
- ratemask = sta->deflink.supp_rates[band];
+ ratemask = link_sta->supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -1902,7 +2009,8 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -1915,6 +2023,14 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc ht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ ht_cap = &link_sta->ht_cap;
if (!ht_cap->ht_supported)
return;
@@ -1938,7 +2054,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->ldpc_flag = true;
- if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+ if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->bw_40 = true;
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
@@ -1988,7 +2104,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -2064,7 +2180,8 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u16 *vht_mcs_mask;
@@ -2078,6 +2195,14 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc vht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ vht_cap = &link_sta->vht_cap;
if (!vht_cap->vht_supported)
return;
@@ -2110,10 +2235,10 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
/* Calculate peer NSS capability from VHT capabilities if STA
@@ -2127,7 +2252,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
vht_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
@@ -2150,7 +2275,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
arg->tx_max_mcs_nss = 0xFF;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ arsta->addr, arg->peer_max_mpdu, arg->peer_flags);
/* TODO: rxnss_override */
}
@@ -2162,7 +2287,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
int i;
u8 ampdu_factor, max_nss;
u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
@@ -2171,6 +2298,21 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
bool support_160;
u16 v;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc he for vif %pM link %u",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ he_cap = &link_sta->he_cap;
if (!he_cap->has_he)
return;
@@ -2208,13 +2350,13 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
else
max_nss = rx_mcs_80;
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
sizeof(he_cap->he_cap_elem.mac_cap_info));
memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
sizeof(he_cap->he_cap_elem.phy_cap_info));
- arg->peer_he_ops = vif->bss_conf.he_oper.params;
+ arg->peer_he_ops = link_conf->he_oper.params;
/* the top most byte is used to indicate BSS color info */
arg->peer_he_ops &= 0xffffff;
@@ -2235,10 +2377,10 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
- if (sta->deflink.vht_cap.vht_supported)
+ if (link_sta->vht_cap.vht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
- else if (sta->deflink.ht_cap.ht_supported)
+ else if (link_sta->ht_cap.ht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
}
@@ -2279,7 +2421,7 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
@@ -2319,7 +2461,8 @@ static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
u8 ampdu_factor, mpdu_density;
@@ -2329,22 +2472,31 @@ static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
band = def.chan->band;
- if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he 6ghz for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
return;
+ }
+
+ he_cap = &link_sta->he_cap;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !link_sta->he_6ghz_capa.capa)
+ return;
+
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
arg->bw_40 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
arg->bw_320 = true;
- arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
+ arg->peer_he_caps_6ghz = le16_to_cpu(link_sta->he_6ghz_capa.capa);
mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
@@ -2388,10 +2540,23 @@ static void ath12k_peer_assoc_h_smps(struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa;
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+ struct ath12k_link_vif *arvif = arsta->arvif;
+ const struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_link_sta *link_sta;
+ struct ath12k *ar = arvif->ar;
int smps;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ he_6ghz_capa = &link_sta->he_6ghz_capa;
+ ht_cap = &link_sta->ht_cap;
+
if (!ht_cap->ht_supported && !he_6ghz_capa->capa)
return;
@@ -2446,7 +2611,7 @@ static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
- sta->addr, arg->qos_flag);
+ arsta->addr, arg->qos_flag);
}
static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
@@ -2486,26 +2651,26 @@ static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
arg.param = WMI_AP_PS_PEER_PARAM_UAPSD;
arg.value = uapsd;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
arg.value = max_sp;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
/* TODO: revisit during testing */
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
@@ -2517,17 +2682,17 @@ err:
return ret;
}
-static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_link_sta *sta)
{
- return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
ATH12K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (sta->deflink.vht_cap.cap &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (link_sta->vht_cap.cap &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
@@ -2539,74 +2704,74 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
}
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11AX_HE160;
- else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ else if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11AX_HE80_80;
/* not sure if this is a valid case? */
return MODE_11AX_HE160;
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AX_HE80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AX_HE40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AX_HE20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
- if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
+ if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[0] &
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
return MODE_11BE_EHT320;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11BE_EHT160;
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11BE_EHT80_80;
ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n",
- sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]);
+ link_sta->he_cap.he_cap_elem.phy_cap_info[0]);
return MODE_11BE_EHT160;
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11BE_EHT80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11BE_EHT40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11BE_EHT20;
return MODE_UNKNOWN;
@@ -2617,6 +2782,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -2635,33 +2801,40 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->deflink.eht_cap.has_eht) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->eht_cap.has_eht) {
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11BE_EHT40_2G;
else
phymode = MODE_11BE_EHT20_2G;
- } else if (sta->deflink.he_cap.has_he) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ } else if (link_sta->he_cap.has_he) {
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
- else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ else if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AX_HE40_2G;
else
phymode = MODE_11AX_HE20_2G;
- } else if (sta->deflink.vht_cap.vht_supported &&
+ } else if (link_sta->vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->deflink.ht_cap.ht_supported &&
+ } else if (link_sta->ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
- } else if (ath12k_mac_sta_has_ofdm_only(sta)) {
+ } else if (ath12k_mac_sta_has_ofdm_only(link_sta)) {
phymode = MODE_11G;
} else {
phymode = MODE_11B;
@@ -2670,16 +2843,16 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check EHT first */
- if (sta->deflink.eht_cap.has_eht) {
- phymode = ath12k_mac_get_phymode_eht(ar, sta);
- } else if (sta->deflink.he_cap.has_he) {
- phymode = ath12k_mac_get_phymode_he(ar, sta);
- } else if (sta->deflink.vht_cap.vht_supported &&
+ if (link_sta->eht_cap.has_eht) {
+ phymode = ath12k_mac_get_phymode_eht(ar, link_sta);
+ } else if (link_sta->he_cap.has_he) {
+ phymode = ath12k_mac_get_phymode_he(ar, link_sta);
+ } else if (link_sta->vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- phymode = ath12k_mac_get_phymode_vht(ar, sta);
- } else if (sta->deflink.ht_cap.ht_supported &&
+ phymode = ath12k_mac_get_phymode_vht(ar, link_sta);
+ } else if (link_sta->ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -2692,7 +2865,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
- sta->addr, ath12k_mac_phymode_str(phymode));
+ arsta->addr, ath12k_mac_phymode_str(phymode));
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
@@ -2767,15 +2940,25 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20;
const struct ieee80211_eht_mcs_nss_supp_bw *bw;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_link_sta *link_sta;
u32 *rx_mcs, *tx_mcs;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht)
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc eht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ eht_cap = &link_sta->eht_cap;
+ he_cap = &link_sta->he_cap;
+ if (!he_cap->has_he || !eht_cap->has_eht)
return;
arg->eht_flag = true;
@@ -2794,7 +2977,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
rx_mcs = arg->peer_eht_rx_mcs_set;
tx_mcs = arg->peer_eht_tx_mcs_set;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_320:
bw = &eht_cap->eht_mcs_nss_supp.bw._320;
ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss,
@@ -2846,6 +3029,67 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
arg->punct_bitmap = ~arvif->punct_bitmap;
}
+static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct peer_assoc_mlo_params *ml = &arg->ml;
+ struct ath12k_sta *ahsta = arsta->ahsta;
+ struct ath12k_link_sta *arsta_p;
+ struct ath12k_link_vif *arvif;
+ unsigned long links;
+ u8 link_id;
+ int i;
+
+ if (!sta->mlo || ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID)
+ return;
+
+ ml->enabled = true;
+ ml->assoc_link = arsta->is_assoc_link;
+
+ /* For now considering the primary umac based on assoc link */
+ ml->primary_umac = arsta->is_assoc_link;
+ ml->peer_id_valid = true;
+ ml->logical_link_idx_valid = true;
+
+ ether_addr_copy(ml->mld_addr, sta->addr);
+ ml->logical_link_idx = arsta->link_idx;
+ ml->ml_peer_id = ahsta->ml_peer_id;
+ ml->ieee_link_id = arsta->link_id;
+ ml->num_partner_links = 0;
+ links = ahsta->links_map;
+
+ rcu_read_lock();
+
+ i = 0;
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (i >= ATH12K_WMI_MLO_MAX_LINKS)
+ break;
+
+ arsta_p = rcu_dereference(ahsta->link[link_id]);
+ arvif = rcu_dereference(ahsta->ahvif->link[link_id]);
+
+ if (arsta_p == arsta)
+ continue;
+
+ if (!arvif->is_started)
+ continue;
+
+ ml->partner_info[i].vdev_id = arvif->vdev_id;
+ ml->partner_info[i].hw_link_id = arvif->ar->pdev->hw_link_id;
+ ml->partner_info[i].assoc_link = arsta_p->is_assoc_link;
+ ml->partner_info[i].primary_umac = arsta_p->is_assoc_link;
+ ml->partner_info[i].logical_link_idx_valid = true;
+ ml->partner_info[i].logical_link_idx = arsta_p->link_idx;
+ ml->num_partner_links++;
+
+ i++;
+ }
+
+ rcu_read_unlock();
+}
+
static void ath12k_peer_assoc_prepare(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta,
@@ -2870,6 +3114,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
ath12k_peer_assoc_h_qos(ar, arvif, arsta, arg);
ath12k_peer_assoc_h_phymode(ar, arvif, arsta, arg);
ath12k_peer_assoc_h_smps(arsta, arg);
+ ath12k_peer_assoc_h_mlo(arsta, arg);
/* TODO: amsdu_disable req? */
}
@@ -2900,7 +3145,8 @@ static void ath12k_bss_assoc(struct ath12k *ar,
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_wmi_vdev_up_params params = {};
- struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ieee80211_link_sta *link_sta;
+ u8 link_id = bss_conf->link_id;
struct ath12k_link_sta *arsta;
struct ieee80211_sta *ap_sta;
struct ath12k_sta *ahsta;
@@ -2910,32 +3156,48 @@ static void ath12k_bss_assoc(struct ath12k *ar,
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
- arvif->vdev_id, arvif->bssid, ahvif->aid);
+ struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+ kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+ if (!peer_arg)
+ return;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac vdev %i link id %u assoc bssid %pM aid %d\n",
+ arvif->vdev_id, link_id, arvif->bssid, ahvif->aid);
rcu_read_lock();
- ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ /* During ML connection, cfg.ap_addr has the MLD address. For
+ * non-ML connection, it has the BSSID.
+ */
+ ap_sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!ap_sta) {
ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
- bss_conf->bssid, arvif->vdev_id);
+ vif->cfg.ap_addr, arvif->vdev_id);
rcu_read_unlock();
return;
}
ahsta = ath12k_sta_to_ahsta(ap_sta);
- arsta = &ahsta->deflink;
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
if (WARN_ON(!arsta)) {
rcu_read_unlock();
return;
}
- ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, false);
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (WARN_ON(!link_sta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, false);
rcu_read_unlock();
- ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
@@ -2949,8 +3211,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->deflink.ht_cap,
- &ap_sta->deflink.he_6ghz_capa);
+ &link_sta->ht_cap, &link_sta->he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3058,6 +3319,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const struct ieee80211_supported_band *sband;
+ struct ieee80211_bss_conf *bss_conf;
u8 basic_rate_idx;
int hw_rate_code;
u32 vdev_param;
@@ -3066,8 +3328,15 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
lockdep_assert_wiphy(hw->wiphy);
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in mgmt rate calc for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
sband = hw->wiphy->bands[def->chan->band];
- basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ basic_rate_idx = ffs(bss_conf->basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
@@ -3151,6 +3420,7 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long links = ahvif->links_map;
+ struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
struct ath12k *ar;
u8 link_id;
@@ -3171,10 +3441,15 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
ar = arvif->ar;
- if (vif->cfg.assoc)
- ath12k_bss_assoc(ar, arvif, &vif->bss_conf);
- else
+ if (vif->cfg.assoc) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info)
+ continue;
+
+ ath12k_bss_assoc(ar, arvif, info);
+ } else {
ath12k_bss_disassoc(ar, arvif);
+ }
}
}
}
@@ -3185,6 +3460,7 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
struct ieee80211_vif *vif = arvif->ahvif->vif;
struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
enum wmi_sta_powersave_param param;
+ struct ieee80211_bss_conf *info;
enum wmi_sta_ps_mode psmode;
int ret;
int timeout;
@@ -3202,8 +3478,15 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
timeout = conf->dynamic_ps_timeout;
if (timeout == 0) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
/* firmware doesn't like 0 */
- timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
}
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
@@ -3314,8 +3597,8 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
ath12k_control_beaconing(arvif, info);
- if (arvif->is_up && vif->bss_conf.he_support &&
- vif->bss_conf.he_oper.params) {
+ if (arvif->is_up && info->he_support &&
+ info->he_oper.params) {
/* TODO: Extend to support 1024 BA Bitmap size */
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BA_MODE,
@@ -3326,7 +3609,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
arvif->vdev_id);
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
- param_value = vif->bss_conf.he_oper.params;
+ param_value = info->he_oper.params;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
@@ -3418,12 +3701,12 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
if (changed & BSS_CHANGED_MCAST_RATE &&
!ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) {
band = def.chan->band;
- mcast_rate = vif->bss_conf.mcast_rate[band];
+ mcast_rate = info->mcast_rate[band];
if (mcast_rate > 0)
rateidx = mcast_rate - 1;
else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ rateidx = ffs(info->basic_rates) - 1;
if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
@@ -3537,6 +3820,9 @@ static void ath12k_ahvif_put_link_key_cache(struct ath12k_vif_cache *cache)
static void ath12k_ahvif_put_link_cache(struct ath12k_vif *ahvif, u8 link_id)
{
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return;
+
ath12k_ahvif_put_link_key_cache(ahvif->cache[link_id]);
kfree(ahvif->cache[link_id]);
ahvif->cache[link_id] = NULL;
@@ -3597,9 +3883,9 @@ static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
arvif = &ahvif->deflink;
} else {
/* If this is the first link arvif being created for an ML VIF
- * use the preallocated deflink memory
+ * use the preallocated deflink memory except for scan arvifs
*/
- if (!ahvif->links_map) {
+ if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
arvif = &ahvif->deflink;
} else {
arvif = (struct ath12k_link_vif *)
@@ -3730,22 +4016,9 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
ieee80211_remain_on_channel_expired(hw);
fallthrough;
case ATH12K_SCAN_STARTING:
- if (!ar->scan.is_roc) {
- struct cfg80211_scan_info info = {
- .aborted = ((ar->scan.state ==
- ATH12K_SCAN_ABORTING) ||
- (ar->scan.state ==
- ATH12K_SCAN_STARTING)),
- };
-
- ieee80211_scan_completed(hw, &info);
- }
-
- ar->scan.state = ATH12K_SCAN_IDLE;
- ar->scan_channel = NULL;
- ar->scan.roc_freq = 0;
cancel_delayed_work(&ar->scan.timeout);
complete(&ar->scan.completed);
+ wiphy_work_queue(ar->ah->hw->wiphy, &ar->scan.vdev_clean_wk);
break;
}
}
@@ -3786,15 +4059,15 @@ static int ath12k_scan_stop(struct ath12k *ar)
}
out:
- /* Scan state should be updated upon scan completion but in case
- * firmware fails to deliver the event (for whatever reason) it is
- * desired to clean up scan state anyway. Firmware may have just
- * dropped the scan completion event delivery due to transport pipe
- * being overflown with data and/or it can recover on its own before
- * next scan request is submitted.
+ /* Scan state should be updated in scan completion worker but in
+ * case firmware fails to deliver the event (for whatever reason)
+ * it is desired to clean up scan state anyway. Firmware may have
+ * just dropped the scan completion event delivery due to transport
+ * pipe being overflown with data and/or it can recover on its own
+ * before next scan request is submitted.
*/
spin_lock_bh(&ar->data_lock);
- if (ar->scan.state != ATH12K_SCAN_IDLE)
+ if (ret)
__ath12k_mac_scan_finish(ar);
spin_unlock_bh(&ar->data_lock);
@@ -3845,6 +4118,53 @@ static void ath12k_scan_timeout_work(struct work_struct *work)
wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
+static void ath12k_scan_vdev_clean_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k,
+ scan.vdev_clean_wk);
+ struct ath12k_hw *ah = ar->ah;
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(wiphy);
+
+ arvif = ar->scan.arvif;
+
+ /* The scan vdev has already been deleted. This can occur when a
+ * new scan request is made on the same vif with a different
+ * frequency, causing the scan arvif to move from one radio to
+ * another. Or, scan was abrupted and via remove interface, the
+ * arvif is already deleted. Alternatively, if the scan vdev is not
+ * being used as an actual vdev, then do not delete it.
+ */
+ if (!arvif || arvif->is_started)
+ goto work_complete;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac clean scan vdev (link id %u)",
+ arvif->link_id);
+
+ ath12k_mac_remove_link_interface(ah->hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+
+work_complete:
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.arvif = NULL;
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = ((ar->scan.state ==
+ ATH12K_SCAN_ABORTING) ||
+ (ar->scan.state ==
+ ATH12K_SCAN_STARTING)),
+ };
+
+ ieee80211_scan_completed(ar->ah->hw, &info);
+ }
+
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
static int ath12k_start_scan(struct ath12k *ar,
struct ath12k_wmi_scan_req_arg *arg)
{
@@ -3899,10 +4219,10 @@ ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
return link_id;
}
- /* input ar is not assigned to any of the links, use link id
- * 0 for scan vdev creation.
+ /* input ar is not assigned to any of the links of ML VIF, use scan
+ * link (15) for scan vdev creation.
*/
- return 0;
+ return ATH12K_DEFAULT_SCAN_LINK;
}
static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
@@ -3933,11 +4253,14 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
/* check if any of the links of ML VIF is already started on
* radio(ar) correpsondig to given scan frequency and use it,
- * if not use deflink(link 0) for scan purpose.
+ * if not use scan link (link 15) for scan purpose.
*/
link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar);
arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac link ID %d selected for scan",
+ arvif->link_id);
+
/* If the vif is already assigned to a specific vdev of an ar,
* check whether its already started, vdev which is started
* are not allowed to switch to a new radio.
@@ -3961,6 +4284,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
create = false;
}
}
+
if (create) {
/* Previous arvif would've been cleared in radio switch block
* above, assign arvif again for create.
@@ -3981,7 +4305,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
reinit_completion(&ar->scan.completed);
ar->scan.state = ATH12K_SCAN_STARTING;
ar->scan.is_roc = false;
- ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.arvif = arvif;
ret = 0;
break;
case ATH12K_SCAN_STARTING:
@@ -4043,6 +4367,15 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
}
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started");
+
+ /* As per cfg80211/mac80211 scan design, it allows only one
+ * scan at a time. Hence last_scan link id is used for
+ * tracking the link id on which the scan is been done on
+ * this vif.
+ */
+ ahvif->last_scan_link = arvif->link_id;
+
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
msecs_to_jiffies(arg->max_scan_time +
@@ -4062,14 +4395,14 @@ static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ u16 link_id = ahvif->last_scan_link;
struct ath12k_link_vif *arvif;
struct ath12k *ar;
lockdep_assert_wiphy(hw->wiphy);
- arvif = &ahvif->deflink;
-
- if (!arvif->is_created)
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || arvif->is_started)
return;
ar = arvif->ar;
@@ -4203,6 +4536,7 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_sta *sta = NULL;
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
@@ -4219,12 +4553,19 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
return 1;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
if (sta)
- peer_addr = sta->addr;
+ peer_addr = arsta->addr;
else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
- peer_addr = vif->bss_conf.bssid;
+ peer_addr = link_conf->bssid;
else
- peer_addr = vif->addr;
+ peer_addr = link_conf->addr;
key->hw_key_idx = key->keyidx;
@@ -4316,7 +4657,23 @@ static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath12k_key_conf *key_conf = NULL, *tmp;
+ struct ath12k_key_conf *key_conf, *tmp;
+
+ list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
+ if (key_conf->key != key)
+ continue;
+
+ /* If SET key entry is already present in cache, nothing to do,
+ * just return
+ */
+ if (cmd == SET_KEY)
+ return 0;
+
+ /* DEL key for an old SET key which driver hasn't flushed yet.
+ */
+ list_del(&key_conf->list);
+ kfree(key_conf);
+ }
if (cmd == SET_KEY) {
key_conf = kzalloc(sizeof(*key_conf), GFP_KERNEL);
@@ -4330,17 +4687,7 @@ static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache,
list_add_tail(&key_conf->list,
&cache->key_conf.list);
}
- if (list_empty(&cache->key_conf.list))
- return 0;
- list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
- if (key_conf->key == key) {
- /* DEL key for an old SET key which driver hasn't flushed yet.
- */
- list_del(&key_conf->list);
- kfree(key_conf);
- break;
- }
- }
+
return 0;
}
@@ -4372,6 +4719,7 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta) {
ahsta = ath12k_sta_to_ahsta(sta);
+
/* For an ML STA Pairwise key is same for all associated link Stations,
* hence do set key for all link STAs which are active.
*/
@@ -4394,41 +4742,47 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (ret)
break;
}
- } else {
- arsta = &ahsta->deflink;
- arvif = arsta->arvif;
- if (WARN_ON(!arvif)) {
- ret = -EINVAL;
- goto out;
- }
- ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
- }
- } else {
- if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
- link_id = key->link_id;
- arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
- } else {
- link_id = 0;
- arvif = &ahvif->deflink;
+ return 0;
}
- if (!arvif || !arvif->is_created) {
- cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
- if (!cache)
- return -ENOSPC;
+ arsta = &ahsta->deflink;
+ arvif = arsta->arvif;
+ if (WARN_ON(!arvif))
+ return -EINVAL;
- ret = ath12k_mac_update_key_cache(cache, cmd, sta, key);
+ ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
+ if (ret)
+ return ret;
+ return 0;
+ }
+
+ if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
+ link_id = key->link_id;
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ } else {
+ link_id = 0;
+ arvif = &ahvif->deflink;
+ }
+
+ if (!arvif || !arvif->is_created) {
+ cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
+ if (!cache)
+ return -ENOSPC;
+
+ ret = ath12k_mac_update_key_cache(cache, cmd, sta, key);
+ if (ret)
return ret;
- }
- ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key);
+ return 0;
}
-out:
+ ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
static int
@@ -4451,7 +4805,6 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
const struct cfg80211_bitrate_mask *mask,
enum nl80211_band band)
{
- struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k *ar = arvif->ar;
u8 vht_rate, nss;
u32 rate_code;
@@ -4470,67 +4823,76 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
if (!nss) {
ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
- sta->addr);
+ arsta->addr);
return -EINVAL;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
- sta->addr);
+ arsta->addr);
rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1,
WMI_RATE_PREAMBLE_VHT);
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
rate_code);
if (ret)
ath12k_warn(ar->ab,
"failed to update STA %pM Fixed Rate %d: %d\n",
- sta->addr, rate_code, ret);
+ arsta->addr, rate_code, ret);
return ret;
}
-static int ath12k_station_assoc(struct ath12k *ar,
- struct ath12k_link_vif *arvif,
- struct ath12k_link_sta *arsta,
- bool reassoc)
+static int ath12k_mac_station_assoc(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ bool reassoc)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ieee80211_link_sta *link_sta;
int ret;
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
u8 num_vht_rates;
+ u8 link_id = arvif->link_id;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return -EPERM;
+ if (WARN_ON(!rcu_access_pointer(sta->link[link_id])))
+ return -EINVAL;
+
band = def.chan->band;
mask = &arvif->bitrate_mask;
- ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, reassoc);
+ struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+ kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+ if (!peer_arg)
+ return -ENOMEM;
+
+ ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, reassoc);
- if (peer_arg.peer_nss < 1) {
+ if (peer_arg->peer_nss < 1) {
ath12k_warn(ar->ab,
- "invalid peer NSS %d\n", peer_arg.peer_nss);
+ "invalid peer NSS %d\n", peer_arg->peer_nss);
return -EINVAL;
}
- ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
return ret;
}
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
return -ETIMEDOUT;
}
@@ -4541,7 +4903,13 @@ static int ath12k_station_assoc(struct ath12k *ar,
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
- if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in station assoc\n");
+ return -EINVAL;
+ }
+
+ if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
if (ret)
@@ -4554,9 +4922,8 @@ static int ath12k_station_assoc(struct ath12k *ar,
if (reassoc)
return 0;
- ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->deflink.ht_cap,
- &sta->deflink.he_6ghz_capa);
+ ret = ath12k_setup_peer_smps(ar, arvif, arsta->addr,
+ &link_sta->ht_cap, &link_sta->he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -4574,7 +4941,7 @@ static int ath12k_station_assoc(struct ath12k *ar,
ret = ath12k_peer_assoc_qos_ap(ar, arvif, arsta);
if (ret) {
ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
return ret;
}
}
@@ -4582,33 +4949,25 @@ static int ath12k_station_assoc(struct ath12k *ar,
return 0;
}
-static int ath12k_station_disassoc(struct ath12k *ar,
- struct ath12k_link_vif *arvif,
- struct ath12k_link_sta *arsta)
+static int ath12k_mac_station_disassoc(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (!sta->wme) {
arvif->num_legacy_stations--;
- ret = ath12k_recalc_rtscts_prot(arvif);
- if (ret)
- return ret;
+ return ath12k_recalc_rtscts_prot(arvif);
}
- ret = ath12k_clear_peer_keys(arvif, sta->addr);
- if (ret) {
- ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
return 0;
}
static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
+ struct ieee80211_link_sta *link_sta;
struct ath12k *ar;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@@ -4619,7 +4978,6 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
u32 changed, bw, nss, smps, bw_prev;
int err, num_vht_rates;
const struct cfg80211_bitrate_mask *mask;
- struct ath12k_wmi_peer_assoc_arg peer_arg;
enum wmi_phy_mode peer_phymode;
struct ath12k_link_sta *arsta;
struct ieee80211_vif *vif;
@@ -4655,9 +5013,14 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask),
ath12k_mac_max_vht_nss(vht_mcs_mask)));
+ struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+ kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+ if (!peer_arg)
+ return;
+
if (changed & IEEE80211_RC_BW_CHANGED) {
- ath12k_peer_assoc_h_phymode(ar, arvif, arsta, &peer_arg);
- peer_phymode = peer_arg.peer_phymode;
+ ath12k_peer_assoc_h_phymode(ar, arvif, arsta, peer_arg);
+ peer_phymode = peer_arg->peer_phymode;
if (bw > bw_prev) {
/* Phymode shows maximum supported channel width, if we
@@ -4666,65 +5029,65 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
* WMI_PEER_CHWIDTH
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n",
- sta->addr, bw, bw_prev);
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arsta->addr, bw, bw_prev);
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
- sta->addr, peer_phymode, err);
+ arsta->addr, peer_phymode, err);
return;
}
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n",
- sta->addr, bw, err);
+ arsta->addr, bw, err);
} else {
/* When we downgrade bandwidth this will conflict with phymode
* and cause to trigger firmware crash. In this case we send
* WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n",
- sta->addr, bw, bw_prev);
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arsta->addr, bw, bw_prev);
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n",
- sta->addr, bw, err);
+ arsta->addr, bw, err);
return;
}
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
- sta->addr, peer_phymode, err);
+ arsta->addr, peer_phymode, err);
}
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
- sta->addr, nss);
+ arsta->addr, nss);
- err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_NSS, nss);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
- sta->addr, nss, err);
+ arsta->addr, nss, err);
}
if (changed & IEEE80211_RC_SMPS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
- sta->addr, smps);
+ arsta->addr, smps);
- err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE, smps);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
- sta->addr, smps, err);
+ arsta->addr, smps, err);
}
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
@@ -4743,7 +5106,14 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
* TODO: Check RATEMASK_CMDID to support auto rates selection
* across HT/VHT and for multiple VHT MCS support.
*/
- if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
} else {
@@ -4752,20 +5122,49 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
* other rates using peer_assoc command.
*/
ath12k_peer_assoc_prepare(ar, arvif, arsta,
- &peer_arg, true);
+ peer_arg, true);
- err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ err = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (err)
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
- sta->addr, arvif->vdev_id, err);
+ arsta->addr, arvif->vdev_id, err);
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
}
}
}
+static void ath12k_mac_free_unassign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ u8 link_id)
+{
+ struct ath12k_link_sta *arsta;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return;
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (WARN_ON(!arsta))
+ return;
+
+ ahsta->links_map &= ~BIT(link_id);
+ rcu_assign_pointer(ahsta->link[link_id], NULL);
+ synchronize_rcu();
+
+ if (arsta == &ahsta->deflink) {
+ arsta->link_id = ATH12K_INVALID_LINK_ID;
+ arsta->ahsta = NULL;
+ arsta->arvif = NULL;
+ return;
+ }
+
+ kfree(arsta);
+}
+
static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta)
{
@@ -4799,6 +5198,144 @@ static void ath12k_mac_dec_num_stations(struct ath12k_link_vif *arvif,
ar->num_stations--;
}
+static void ath12k_mac_station_post_remove(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k_peer *peer;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ ath12k_mac_dec_num_stations(arvif, arsta);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer && peer->sta == sta) {
+ ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+}
+
+static int ath12k_mac_station_unauthorize(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ath12k_peer *peer;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ /* Driver must clear the keys during the state change from
+ * IEEE80211_STA_AUTHORIZED to IEEE80211_STA_ASSOC, since after
+ * returning from here, mac80211 is going to delete the keys
+ * in __sta_info_destroy_part2(). This will ensure that the driver does
+ * not retain stale key references after mac80211 deletes the keys.
+ */
+ ret = ath12k_clear_peer_keys(arvif, arsta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_station_authorize(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ath12k_peer *peer;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret) {
+ ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ arsta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_station_remove(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ int ret = 0;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ wiphy_work_cancel(ar->ah->hw->wiphy, &arsta->update_wk);
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (sta->mlo)
+ return ret;
+
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+
+ ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+ if (sta->valid_links)
+ ath12k_mac_free_unassign_link_sta(ahvif->ah,
+ arsta->ahsta, arsta->link_id);
+
+ return ret;
+}
+
static int ath12k_mac_station_add(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta)
@@ -4806,7 +5343,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
struct ath12k_base *ab = ar->ab;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ath12k_wmi_peer_create_arg peer_param;
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -4824,34 +5361,35 @@ static int ath12k_mac_station_add(struct ath12k *ar,
}
peer_param.vdev_id = arvif->vdev_id;
- peer_param.peer_addr = sta->addr;
+ peer_param.peer_addr = arsta->addr;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ peer_param.ml_enabled = sta->mlo;
ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
if (ret) {
ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
goto free_peer;
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
if (ieee80211_vif_is_mesh(vif)) {
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret) {
ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
- sta->addr, ret);
+ arsta->addr, ret);
goto free_peer;
}
}
- ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, arsta->addr);
if (ret) {
ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
goto free_peer;
}
@@ -4868,7 +5406,9 @@ static int ath12k_mac_station_add(struct ath12k *ar,
return 0;
free_peer:
- ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
dec_num_station:
ath12k_mac_dec_num_stations(arvif, arsta);
exit:
@@ -4906,101 +5446,131 @@ static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
return bw;
}
-static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
+static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ struct ath12k_link_sta *arsta,
+ struct ath12k_vif *ahvif,
+ u8 link_id)
{
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k *ar;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ieee80211_link_sta *link_sta;
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!arsta || link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return -EINVAL;
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ if (!arvif)
+ return -EINVAL;
+
+ memset(arsta, 0, sizeof(*arsta));
+
+ link_sta = wiphy_dereference(ah->hw->wiphy, sta->link[link_id]);
+ if (!link_sta)
+ return -EINVAL;
+
+ ether_addr_copy(arsta->addr, link_sta->addr);
+
+ /* logical index of the link sta in order of creation */
+ arsta->link_idx = ahsta->num_peer++;
+
+ arsta->link_id = link_id;
+ ahsta->links_map |= BIT(arsta->link_id);
+ arsta->arvif = arvif;
+ arsta->ahsta = ahsta;
+ ahsta->ahvif = ahvif;
+
+ wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk);
+
+ rcu_assign_pointer(ahsta->link[link_id], arsta);
+
+ return 0;
+}
+
+static void ath12k_mac_ml_station_remove(struct ath12k_vif *ahvif,
+ struct ath12k_sta *ahsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ath12k_hw *ah = ahvif->ah;
struct ath12k_link_vif *arvif;
struct ath12k_link_sta *arsta;
- struct ath12k_peer *peer;
+ unsigned long links;
+ struct ath12k *ar;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ ath12k_peer_mlo_link_peers_delete(ahvif, ahsta);
+
+ /* validate link station removal and clear arsta links */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+
+ ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+ ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+ }
+
+ ath12k_peer_ml_delete(ah, sta);
+}
+
+static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k *ar = arvif->ar;
int ret = 0;
lockdep_assert_wiphy(hw->wiphy);
- arvif = &ahvif->deflink;
- arsta = &ahsta->deflink;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
+ arsta->link_id, arsta->addr, old_state, new_state);
- ar = ath12k_get_ar_by_vif(hw, vif);
- if (!ar) {
- WARN_ON_ONCE(1);
- return -EINVAL;
+ /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station
+ * from driver
+ */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ret = ath12k_mac_station_remove(ar, arvif, arsta);
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ goto exit;
+ }
}
+ /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE: Add new station to driver */
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
- memset(arsta, 0, sizeof(*arsta));
- rcu_assign_pointer(ahsta->link[0], arsta);
- /* TODO use appropriate link id once MLO support is added */
- arsta->link_id = ATH12K_DEFAULT_LINK_ID;
- ahsta->links_map = BIT(arsta->link_id);
- arsta->ahsta = ahsta;
- arsta->arvif = arvif;
- wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk);
-
- synchronize_rcu();
-
ret = ath12k_mac_station_add(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- } else if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- wiphy_work_cancel(hw->wiphy, &arsta->update_wk);
+ arsta->addr, arvif->vdev_id);
- if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
- ath12k_bss_disassoc(ar, arvif);
- ret = ath12k_mac_vdev_stop(arvif);
- if (ret)
- ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
- }
- ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
-
- ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
-
- ath12k_mac_dec_num_stations(arvif, arsta);
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer && peer->sta == sta) {
- ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
- vif->addr, arvif->vdev_id);
- peer->sta = NULL;
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
- }
- spin_unlock_bh(&ar->ab->base_lock);
-
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
-
- if (arsta->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
- rcu_assign_pointer(ahsta->link[arsta->link_id], NULL);
- synchronize_rcu();
- ahsta->links_map &= ~(BIT(arsta->link_id));
- arsta->link_id = ATH12K_INVALID_LINK_ID;
- arsta->ahsta = NULL;
- }
+ /* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for
+ * peer associated to AP/Mesh/ADHOC vif type.
+ */
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath12k_station_assoc(ar, arvif, arsta, false);
+ ret = ath12k_mac_station_assoc(ar, arvif, arsta, false);
if (ret)
ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
- sta->addr);
+ arsta->addr);
spin_lock_bh(&ar->data_lock);
@@ -5008,45 +5578,154 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
arsta->bw_prev = sta->deflink.bandwidth;
spin_unlock_bh(&ar->data_lock);
+
+ /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
+ * authorized
+ */
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = true;
-
- spin_unlock_bh(&ar->ab->base_lock);
+ ret = ath12k_mac_station_authorize(ar, arvif, arsta);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to authorize station: %pM\n",
+ arsta->addr);
- if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_AUTHORIZE,
- 1);
- if (ret)
- ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
- sta->addr, arvif->vdev_id, ret);
- }
+ /* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal,
+ * deauthorize it.
+ */
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = false;
+ ath12k_mac_station_unauthorize(ar, arvif, arsta);
- spin_unlock_bh(&ar->ab->base_lock);
+ /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTH: disassoc peer connected to
+ * AP/mesh/ADHOC vif type.
+ */
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath12k_station_disassoc(ar, arvif, arsta);
+ ret = ath12k_mac_station_disassoc(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
- sta->addr);
+ arsta->addr);
+ }
+
+exit:
+ return ret;
+}
+
+static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long valid_links;
+ u8 link_id = 0;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (ieee80211_vif_is_mld(vif) && sta->valid_links) {
+ WARN_ON(!sta->mlo && hweight16(sta->valid_links) != 1);
+ link_id = ffs(sta->valid_links) - 1;
}
+ /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE:
+ * New station add received. If this is a ML station then
+ * ahsta->links_map will be zero and sta->valid_links will be 1.
+ * Assign default link to the first link sta.
+ */
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(ahsta, 0, sizeof(*ahsta));
+
+ arsta = &ahsta->deflink;
+
+ /* ML sta */
+ if (sta->mlo && !ahsta->links_map &&
+ (hweight16(sta->valid_links) == 1)) {
+ ret = ath12k_peer_ml_create(ah, sta);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to create ML peer for sta %pM",
+ sta->addr);
+ goto exit;
+ }
+ }
+
+ ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif,
+ link_id);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable assign link %d for sta %pM",
+ link_id, sta->addr);
+ goto exit;
+ }
+
+ /* above arsta will get memset, hence do this after assign
+ * link sta
+ */
+ if (sta->mlo) {
+ arsta->is_assoc_link = true;
+ ahsta->assoc_link_id = link_id;
+ }
+ }
+
+ /* In the ML station scenario, activate all partner links once the
+ * client is transitioning to the associated state.
+ *
+ * FIXME: Ideally, this activation should occur when the client
+ * transitions to the authorized state. However, there are some
+ * issues with handling this in the firmware. Until the firmware
+ * can manage it properly, activate the links when the client is
+ * about to move to the associated state.
+ */
+ if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
+ old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC)
+ ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif));
+
+ /* Handle all the other state transitions in generic way */
+ valid_links = ahsta->links_map;
+ for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
+ /* some assumptions went wrong! */
+ if (WARN_ON(!arvif || !arsta))
+ continue;
+
+ /* vdev might be in deleted */
+ if (WARN_ON(!arvif->ar))
+ continue;
+
+ ret = ath12k_mac_handle_link_sta_state(hw, arvif, arsta,
+ old_state, new_state);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to move link sta %d of sta %pM from state %d to %d",
+ link_id, arsta->addr, old_state, new_state);
+ goto exit;
+ }
+ }
+
+ /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST:
+ * Remove the station from driver (handle ML sta here since that
+ * needs special handling. Normal sta will be handled in generic
+ * handler below
+ */
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST && sta->mlo)
+ ath12k_mac_ml_station_remove(ahvif, ahsta);
+
+ ret = 0;
+
+exit:
+ /* update the state if everything went well */
+ if (!ret)
+ ahsta->state = new_state;
+
return ret;
}
@@ -5054,16 +5733,22 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k *ar;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ u8 link_id;
int ret;
s16 txpwr;
lockdep_assert_wiphy(hw->wiphy);
- arvif = &ahvif->deflink;
+ /* TODO: use link id from mac80211 once that's implemented */
+ link_id = 0;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
@@ -5080,9 +5765,9 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
goto out;
}
- ar = ath12k_ah_to_ar(ah, 0);
+ ar = arvif->ar;
- ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_USE_FIXED_PWR, txpwr);
if (ret) {
ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
@@ -5094,62 +5779,69 @@ out:
return ret;
}
-static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_link_sta *link_sta,
- u32 changed)
+static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
{
struct ieee80211_sta *sta = link_sta->sta;
struct ath12k *ar;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_link_sta *arsta;
struct ath12k_link_vif *arvif;
struct ath12k_peer *peer;
u32 bw, smps;
- /* TODO: use proper link id once link sta specific rc update support is
- * available in mac80211.
- */
- u8 link_id = ATH12K_DEFAULT_LINK_ID;
-
- ar = ath12k_get_ar_by_vif(hw, vif);
- if (!ar) {
- WARN_ON_ONCE(1);
- return;
- }
rcu_read_lock();
- arvif = rcu_dereference(ahvif->link[link_id]);
+ arvif = rcu_dereference(ahvif->link[link_sta->link_id]);
if (!arvif) {
- ath12k_warn(ar->ab, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n",
- link_id, sta->addr);
+ ath12k_hw_warn(ah, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n",
+ link_sta->link_id, sta->addr);
rcu_read_unlock();
return;
}
- arsta = rcu_dereference(ahsta->link[link_id]);
+
+ ar = arvif->ar;
+
+ arsta = rcu_dereference(ahsta->link[link_sta->link_id]);
if (!arsta) {
rcu_read_unlock();
ath12k_warn(ar->ab, "mac sta rc update failed to fetch link sta on link id %u for peer %pM\n",
- link_id, sta->addr);
+ link_sta->link_id, sta->addr);
return;
}
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
rcu_read_unlock();
ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
return;
}
spin_unlock_bh(&ar->ab->base_lock);
+ if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ rcu_read_unlock();
+ return;
+ }
+
+ link_sta = rcu_dereference(sta->link[arsta->link_id]);
+ if (!link_sta) {
+ rcu_read_unlock();
+ ath12k_warn(ar->ab, "unable to access link sta in rc update for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
- sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss,
- sta->deflink.smps_mode);
+ arsta->addr, changed, link_sta->bandwidth, link_sta->rx_nss,
+ link_sta->smps_mode);
spin_lock_bh(&ar->data_lock);
@@ -5160,12 +5852,12 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
}
if (changed & IEEE80211_RC_NSS_CHANGED)
- arsta->nss = sta->deflink.rx_nss;
+ arsta->nss = link_sta->rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->deflink.smps_mode) {
+ switch (link_sta->smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -5177,8 +5869,8 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
smps = WMI_PEER_SMPS_DYNAMIC;
break;
default:
- ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
- sta->deflink.smps_mode, sta->addr);
+ ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM link %u\n",
+ link_sta->smps_mode, arsta->addr, link_sta->link_id);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -5195,6 +5887,110 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
rcu_read_unlock();
}
+static struct ath12k_link_sta *ath12k_mac_alloc_assign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ struct ath12k_vif *ahvif,
+ u8 link_id)
+{
+ struct ath12k_link_sta *arsta;
+ int ret;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (arsta)
+ return NULL;
+
+ arsta = kmalloc(sizeof(*arsta), GFP_KERNEL);
+ if (!arsta)
+ return NULL;
+
+ ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, link_id);
+ if (ret) {
+ kfree(arsta);
+ return NULL;
+ }
+
+ return arsta;
+}
+
+static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long valid_links;
+ struct ath12k *ar;
+ u8 link_id;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (!sta->valid_links)
+ return -EINVAL;
+
+ /* Firmware does not support removal of one of link stas. All sta
+ * would be removed during ML STA delete in sta_state(), hence link
+ * sta removal is not handled here.
+ */
+ if (new_links < old_links)
+ return 0;
+
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to add link for ml sta %pM", sta->addr);
+ return -EINVAL;
+ }
+
+ /* this op is expected only after initial sta insertion with default link */
+ if (WARN_ON(ahsta->links_map == 0))
+ return -EINVAL;
+
+ valid_links = new_links;
+ for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (ahsta->links_map & BIT(link_id))
+ continue;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = ath12k_mac_alloc_assign_link_sta(ah, ahsta, ahvif, link_id);
+
+ if (!arvif || !arsta) {
+ ath12k_hw_warn(ah, "Failed to alloc/assign link sta");
+ continue;
+ }
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ret = ath12k_mac_station_add(ar, arvif, arsta);
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool ath12k_mac_op_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links)
+{
+ /* TODO: Handle recovery case */
+
+ return true;
+}
+
static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif,
u16 ac, bool enable)
{
@@ -6054,6 +6850,8 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -6115,6 +6913,8 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
int buf_id;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ATH12K_SKB_CB(skb)->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
@@ -6169,15 +6969,18 @@ static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
-static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
+static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
+ struct ath12k_hw *ah = ar->ah;
struct ath12k_skb_cb *skb_cb;
struct ath12k_vif *ahvif;
struct ath12k_link_vif *arvif;
struct sk_buff *skb;
int ret;
+ lockdep_assert_wiphy(wiphy);
+
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
skb_cb = ATH12K_SKB_CB(skb);
if (!skb_cb->vif) {
@@ -6187,7 +6990,15 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
ahvif = ath12k_vif_to_ahvif(skb_cb->vif);
- arvif = &ahvif->deflink;
+ if (!(ahvif->links_map & BIT(skb_cb->link_id))) {
+ ath12k_warn(ar->ab,
+ "invalid linkid %u in mgmt over wmi tx with linkmap 0x%x\n",
+ skb_cb->link_id, ahvif->links_map);
+ ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+ continue;
+ }
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[skb_cb->link_id]);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
@@ -6197,8 +7008,9 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
} else {
ath12k_warn(ar->ab,
- "dropping mgmt frame for vdev %d, is_started %d\n",
+ "dropping mgmt frame for vdev %d link %u is_started %d\n",
arvif->vdev_id,
+ skb_cb->link_id,
arvif->is_started);
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
@@ -6232,7 +7044,7 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
+ wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
return 0;
}
@@ -6258,6 +7070,105 @@ static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
spin_unlock_bh(&ar->data_lock);
}
+/* Note: called under rcu_read_lock() */
+static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ u8 link, struct sk_buff *skb, u32 info_flags)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath12k_sta *ahsta;
+
+ /* Use the link id passed or the default vif link */
+ if (!sta) {
+ if (link != IEEE80211_LINK_UNSPECIFIED)
+ return link;
+
+ return ahvif->deflink.link_id;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+
+ /* Below translation ensures we pass proper A2 & A3 for non ML clients.
+ * Also it assumes for now support only for MLO AP in this path
+ */
+ if (!sta->mlo) {
+ link = ahsta->deflink.link_id;
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return link;
+
+ bss_conf = rcu_dereference(vif->link_conf[link]);
+ if (bss_conf) {
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+ if (!ieee80211_has_tods(hdr->frame_control) &&
+ !ieee80211_has_fromds(hdr->frame_control))
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+
+ return link;
+ }
+
+ /* enqueue eth enacap & data frames on primary link, FW does link
+ * selection and address translation.
+ */
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+ ieee80211_is_data(hdr->frame_control))
+ return ahsta->assoc_link_id;
+
+ /* 802.11 frame cases */
+ if (link == IEEE80211_LINK_UNSPECIFIED)
+ link = ahsta->deflink.link_id;
+
+ if (!ieee80211_is_mgmt(hdr->frame_control))
+ return link;
+
+ /* Perform address conversion for ML STA Tx */
+ bss_conf = rcu_dereference(vif->link_conf[link]);
+ link_sta = rcu_dereference(sta->link[link]);
+
+ if (bss_conf && link_sta) {
+ ether_addr_copy(hdr->addr1, link_sta->addr);
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+
+ return link;
+ }
+
+ if (bss_conf) {
+ /* In certain cases where a ML sta associated and added subset of
+ * links on which the ML AP is active, but now sends some frame
+ * (ex. Probe request) on a different link which is active in our
+ * MLD but was not added during previous association, we can
+ * still honor the Tx to that ML STA via the requested link.
+ * The control would reach here in such case only when that link
+ * address is same as the MLD address or in worst case clients
+ * used MLD address at TA wrongly which would have helped
+ * identify the ML sta object and pass it here.
+ * If the link address of that STA is different from MLD address,
+ * then the sta object would be NULL and control won't reach
+ * here but return at the start of the function itself with !sta
+ * check. Also this would not need any translation at hdr->addr1
+ * from MLD to link address since the RA is the MLD address
+ * (same as that link address ideally) already.
+ */
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+
+ return link;
+}
+
+/* Note: called under rcu_read_lock() */
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -6267,13 +7178,16 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = info->control.vif;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif = &ahvif->deflink;
- struct ath12k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_sta *sta = control->sta;
u32 info_flags = info->flags;
+ struct ath12k *ar;
bool is_prb_rsp;
+ u8 link_id;
int ret;
+ link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
memset(skb_cb, 0, sizeof(*skb_cb));
skb_cb->vif = vif;
@@ -6282,6 +7196,27 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
+ /* handle only for MLO case, use deflink for non MLO case */
+ if (ieee80211_vif_is_mld(vif)) {
+ link_id = ath12k_mac_get_tx_link(sta, vif, link_id, skb, info_flags);
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ } else {
+ link_id = 0;
+ }
+
+ arvif = rcu_dereference(ahvif->link[link_id]);
+ if (!arvif || !arvif->ar) {
+ ath12k_warn(ahvif->ah, "failed to find arvif link id %u for frame transmission",
+ link_id);
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ ar = arvif->ar;
+ skb_cb->link_id = link_id;
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
@@ -6309,10 +7244,12 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
void ath12k_mac_drain_tx(struct ath12k *ar)
{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
- cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
ath12k_mgmt_over_wmi_tx_purge(ar);
}
@@ -6429,6 +7366,8 @@ static void ath12k_drain_tx(struct ath12k_hw *ah)
struct ath12k *ar;
int i;
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
for_each_ar(ah, ar, i)
ath12k_mac_drain_tx(ar);
}
@@ -6554,9 +7493,10 @@ static void ath12k_mac_stop(struct ath12k *ar)
ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
ret);
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
cancel_delayed_work_sync(&ar->scan.timeout);
+ wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->scan.vdev_clean_wk);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->rfkill_work);
@@ -6622,6 +7562,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif;
+ struct ieee80211_bss_conf *link_conf;
struct ath12k *ar = arvif->ar;
struct ath12k_link_vif *tx_arvif;
struct ath12k_vif *tx_ahvif;
@@ -6629,10 +7570,17 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
if (!tx_vif)
return 0;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in set mbssid params for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
tx_arvif = &tx_ahvif->deflink;
- if (ahvif->vif->bss_conf.nontransmitted) {
+ if (link_conf->nontransmitted) {
if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
return -EINVAL;
@@ -6644,7 +7592,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
return -EINVAL;
}
- if (ahvif->vif->bss_conf.ema_ap)
+ if (link_conf->ema_ap)
*flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE;
return 0;
@@ -6658,6 +7606,8 @@ static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
struct ath12k_vif *ahvif = arvif->ahvif;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
arg->if_id = arvif->vdev_id;
arg->type = ahvif->vdev_type;
arg->subtype = ahvif->vdev_subtype;
@@ -6689,6 +7639,17 @@ static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
}
arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
+
+ if (ath12k_mac_is_ml_arvif(arvif)) {
+ if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) {
+ ath12k_warn(ar->ab, "too many MLO links during setting up vdev: %d",
+ ahvif->vif->valid_links);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(arg->mld_addr, ahvif->vif->addr);
+ }
+
return 0;
}
@@ -6839,16 +7800,25 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
- struct ath12k_wmi_peer_create_arg peer_param;
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
struct ieee80211_bss_conf *link_conf;
u32 param_id, param_value;
u16 nss;
int i;
int ret, vdev_id;
+ u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
- link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[arvif->link_id]);
+ /* If no link is active and scan vdev is requested
+ * use a default link conf for scan address purpose.
+ */
+ if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links)
+ link_id = ffs(vif->valid_links) - 1;
+ else
+ link_id = arvif->link_id;
+
+ link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
if (!link_conf) {
ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
vif->addr, arvif->link_id);
@@ -6999,7 +7969,7 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
break;
}
- arvif->txpower = vif->bss_conf.txpower;
+ arvif->txpower = link_conf->txpower;
ret = ath12k_mac_txpower_recalc(ar);
if (ret)
goto err_peer_del;
@@ -7034,8 +8004,7 @@ err_peer_del:
ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
arvif->bssid);
if (ret)
- /* KVALO: why not goto err? */
- return ret;
+ goto err_vdev_del;
ar->num_peers--;
}
@@ -7129,7 +8098,9 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_link_vif *scan_arvif;
struct ath12k_hw *ah = hw->priv;
struct ath12k *ar;
struct ath12k_base *ab;
@@ -7148,6 +8119,19 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
if (!ar)
return NULL;
+ /* cleanup the scan vdev if we are done scan on that ar
+ * and now we want to create for actual usage.
+ */
+ if (ieee80211_vif_is_mld(vif)) {
+ scan_arvif = wiphy_dereference(hw->wiphy,
+ ahvif->link[ATH12K_DEFAULT_SCAN_LINK]);
+ if (scan_arvif && scan_arvif->ar == ar) {
+ ar->scan.arvif = NULL;
+ ath12k_mac_remove_link_interface(hw, scan_arvif);
+ ath12k_mac_unassign_link_vif(scan_arvif);
+ }
+ }
+
if (arvif->ar) {
/* This is not expected really */
if (WARN_ON(!arvif->is_created)) {
@@ -7173,9 +8157,6 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
ab = ar->ab;
- if (arvif->is_created)
- goto flush;
-
/* Assign arvif again here since previous radio switch block
* would've unassigned and cleared it.
*/
@@ -7186,6 +8167,9 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
goto unlock;
}
+ if (arvif->is_created)
+ goto flush;
+
if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
TARGET_NUM_VDEVS);
@@ -7243,14 +8227,9 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
- /* For non-ml vifs, vif->addr is the actual vdev address but for
- * ML vif link(link BSSID) address is the vdev address and it can be a
- * different one from vif->addr (i.e ML address).
- * Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
+ /* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
* will not know if this interface is an ML vif at this point.
*/
- ath12k_mac_assign_vif_to_vdev(hw, arvif, NULL);
-
return 0;
}
@@ -7348,11 +8327,12 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
+ struct ath12k *ar;
u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
- for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ for (link_id = 0; link_id < ATH12K_NUM_MAX_LINKS; link_id++) {
/* if we cached some config but never received assign chanctx,
* free the allocated cache.
*/
@@ -7361,6 +8341,31 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
if (!arvif || !arvif->is_created)
continue;
+ ar = arvif->ar;
+
+ /* Scan abortion is in progress since before this, cancel_hw_scan()
+ * is expected to be executed. Since link is anyways going to be removed
+ * now, just cancel the worker and send the scan aborted to user space
+ */
+ if (ar->scan.arvif == arvif) {
+ wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.arvif = NULL;
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(ar->ah->hw, &info);
+ }
+
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
ath12k_mac_remove_link_interface(hw, arvif);
ath12k_mac_unassign_link_vif(arvif);
}
@@ -7453,20 +8458,26 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
return ret;
}
-static int ath12k_mac_ampdu_action(struct ath12k_link_vif *arvif,
- struct ieee80211_ampdu_params *params)
+static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
- struct ath12k *ar = arvif->ar;
+ struct ath12k *ar;
int ret = -EINVAL;
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+ if (!ar)
+ return -EINVAL;
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
- ret = ath12k_dp_rx_ampdu_start(ar, params);
+ ret = ath12k_dp_rx_ampdu_start(ar, params, link_id);
break;
case IEEE80211_AMPDU_RX_STOP:
- ret = ath12k_dp_rx_ampdu_stop(ar, params);
+ ret = ath12k_dp_rx_ampdu_stop(ar, params, link_id);
break;
case IEEE80211_AMPDU_TX_START:
case IEEE80211_AMPDU_TX_STOP_CONT:
@@ -7480,6 +8491,10 @@ static int ath12k_mac_ampdu_action(struct ath12k_link_vif *arvif,
break;
}
+ if (ret)
+ ath12k_warn(ar->ab, "unable to perform ampdu action %d for vif %pM link %u ret %d\n",
+ params->action, vif->addr, link_id, ret);
+
return ret;
}
@@ -7487,27 +8502,24 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif;
+ struct ieee80211_sta *sta = params->sta;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ unsigned long links_map = ahsta->links_map;
int ret = -EINVAL;
+ u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
- ar = ath12k_get_ar_by_vif(hw, vif);
- if (!ar)
- return -EINVAL;
-
- ar = ath12k_ah_to_ar(ah, 0);
- arvif = &ahvif->deflink;
+ if (WARN_ON(!links_map))
+ return ret;
- ret = ath12k_mac_ampdu_action(arvif, params);
- if (ret)
- ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
- ar->pdev_idx, params->action, ret);
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ ret = ath12k_mac_ampdu_action(hw, vif, params, link_id);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
@@ -7627,6 +8639,58 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
return down_mode;
}
+static void
+ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
+ struct wmi_ml_arg *ml_arg)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct wmi_ml_partner_info *partner_info;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k_link_vif *arvif_p;
+ unsigned long links;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+ if (!ath12k_mac_is_ml_arvif(arvif))
+ return;
+
+ if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS)
+ return;
+
+ ml_arg->enabled = true;
+
+ /* Driver always add a new link via VDEV START, FW takes
+ * care of internally adding this link to existing
+ * link vdevs which are advertised as partners below
+ */
+ ml_arg->link_add = true;
+ partner_info = ml_arg->partner_info;
+
+ links = ahvif->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif_p = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+
+ if (WARN_ON(!arvif_p))
+ continue;
+
+ if (arvif == arvif_p)
+ continue;
+
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ ahvif->vif->link_conf[arvif_p->link_id]);
+
+ if (!link_conf)
+ continue;
+
+ partner_info->vdev_id = arvif_p->vdev_id;
+ partner_info->hw_link_id = arvif_p->ar->pdev->hw_link_id;
+ ether_addr_copy(partner_info->addr, link_conf->addr);
+ ml_arg->num_partner_links++;
+ partner_info++;
+ }
+}
+
static int
ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx,
@@ -7636,11 +8700,20 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
struct ath12k_base *ab = ar->ab;
struct wmi_vdev_start_req_arg arg = {};
const struct cfg80211_chan_def *chandef = &ctx->def;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ath12k_vif *ahvif = arvif->ahvif;
- int he_support = ahvif->vif->bss_conf.he_support;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int dfs_cac_time;
int ret;
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in vdev start for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
reinit_completion(&ar->vdev_setup_done);
@@ -7658,9 +8731,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
chandef->chan->band,
ahvif->vif->type);
arg.min_power = 0;
- arg.max_power = chandef->chan->max_power * 2;
- arg.max_reg_power = chandef->chan->max_reg_power * 2;
- arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+ arg.max_power = chandef->chan->max_power;
+ arg.max_reg_power = chandef->chan->max_reg_power;
+ arg.max_antenna_gain = chandef->chan->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
@@ -7693,7 +8766,7 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
spin_unlock_bh(&ab->base_lock);
/* TODO: Notify if secondary 80Mhz also needs radar detection */
- if (he_support) {
+ if (link_conf->he_support) {
ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
if (ret) {
ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
@@ -7705,6 +8778,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+ if (!restart)
+ ath12k_mac_mlo_get_vdev_args(arvif, &arg.ml);
+
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n",
arg.vdev_id, arg.freq,
@@ -7728,20 +8804,20 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
ahvif->vif->addr, arvif->vdev_id);
- /* Enable CAC Flag in the driver by checking the channel DFS cac time,
- * i.e dfs_cac_ms value which will be valid only for radar channels
- * and state as NL80211_DFS_USABLE which indicates CAC needs to be
- * done before channel usage. This flags is used to drop rx packets.
+ /* Enable CAC Running Flag in the driver by checking all sub-channel's DFS
+ * state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flag is used to drop rx packets.
* during CAC.
*/
/* TODO: Set the flag for other interface types as required */
- if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP &&
- chandef->chan->dfs_cac_ms &&
- chandef->chan->dfs_state == NL80211_DFS_USABLE) {
- set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled &&
+ cfg80211_chandef_dfs_usable(hw->wiphy, chandef)) {
+ set_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
+ dfs_cac_time = cfg80211_chandef_dfs_cac_time(hw->wiphy, chandef);
+
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "CAC Started in chan_freq %d for vdev %d\n",
- arg.freq, arg.vdev_id);
+ "CAC started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n",
+ dfs_cac_time, arg.freq, arg.band_center_freq1, arg.vdev_id);
}
ret = ath12k_mac_set_txbf_conf(arvif);
@@ -7778,19 +8854,32 @@ ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ u8 link_id;
lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
- arvif = &ahvif->deflink;
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ continue;
- if (arvif->ar != arg->ar)
- return;
+ if (arvif->ar != arg->ar)
+ continue;
- if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
- return;
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ vif->link_conf[link_id]);
+ if (WARN_ON(!link_conf))
+ continue;
+
+ if (rcu_access_pointer(link_conf->chanctx_conf) != arg->ctx)
+ continue;
- arg->n_vifs++;
+ arg->n_vifs++;
+ }
}
static void
@@ -7799,27 +8888,41 @@ ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_chanctx_conf *ctx;
struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ u8 link_id;
lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
- arvif = &ahvif->deflink;
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ continue;
- if (arvif->ar != arg->ar)
- return;
+ if (arvif->ar != arg->ar)
+ continue;
- ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
- if (ctx != arg->ctx)
- return;
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ vif->link_conf[arvif->link_id]);
+ if (WARN_ON(!link_conf))
+ continue;
- if (WARN_ON(arg->next_vif == arg->n_vifs))
- return;
+ ctx = rcu_access_pointer(link_conf->chanctx_conf);
+ if (ctx != arg->ctx)
+ continue;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
- arg->vifs[arg->next_vif].vif = vif;
- arg->vifs[arg->next_vif].old_ctx = ctx;
- arg->vifs[arg->next_vif].new_ctx = ctx;
- arg->next_vif++;
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->vifs[arg->next_vif].link_conf = link_conf;
+ arg->next_vif++;
+ }
}
static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width)
@@ -7879,10 +8982,12 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
int n_vifs)
{
struct ath12k_wmi_vdev_up_params params = {};
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_base *ab = ar->ab;
struct ath12k_link_vif *arvif;
struct ieee80211_vif *vif;
struct ath12k_vif *ahvif;
+ u8 link_id;
int ret;
int i;
bool monitor_vif = false;
@@ -7892,7 +8997,10 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
for (i = 0; i < n_vifs; i++) {
vif = vifs[i].vif;
ahvif = ath12k_vif_to_ahvif(vif);
- arvif = &ahvif->deflink;
+ link_conf = vifs[i].link_conf;
+ link_id = link_conf->link_id;
+ arvif = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahvif->link[link_id]);
if (vif->type == NL80211_IFTYPE_MONITOR)
monitor_vif = true;
@@ -7945,13 +9053,13 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
if (vif->mbssid_tx_vif) {
- struct ath12k_vif *ahvif =
+ struct ath12k_vif *tx_ahvif =
ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
- struct ath12k_link_vif *arvif = &ahvif->deflink;
+ struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
- params.tx_bssid = arvif->bssid;
- params.nontx_profile_idx = vif->bss_conf.bssid_index;
- params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator;
+ params.tx_bssid = tx_arvif->bssid;
+ params.nontx_profile_idx = link_conf->bssid_index;
+ params.nontx_profile_cnt = 1 << link_conf->bssid_indicator;
}
ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
@@ -8099,11 +9207,8 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
return -ENOMEM;
}
- if (!arvif->is_started) {
- ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
- if (!ar)
- return -EINVAL;
- } else {
+ ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
+ if (!ar) {
ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started",
vif->addr, link_id);
return -EINVAL;
@@ -8348,6 +9453,8 @@ static int ath12k_mac_flush(struct ath12k *ar)
int ath12k_mac_wait_tx_complete(struct ath12k *ar)
{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ath12k_mac_drain_tx(ar);
return ath12k_mac_flush(ar);
}
@@ -8356,7 +9463,11 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
u32 queues, bool drop)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_vif *ahvif;
+ unsigned long links;
struct ath12k *ar;
+ u8 link_id;
int i;
lockdep_assert_wiphy(hw->wiphy);
@@ -8371,12 +9482,18 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
return;
}
- ar = ath12k_get_ar_by_vif(hw, vif);
+ for_each_ar(ah, ar, i)
+ wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
- if (!ar)
- return;
+ ahvif = ath12k_vif_to_ahvif(vif);
+ links = ahvif->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!(arvif && arvif->ar))
+ continue;
- ath12k_mac_flush(ar);
+ ath12k_mac_flush(arvif->ar);
+ }
}
static int
@@ -8575,10 +9692,11 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
{
struct ath12k_link_vif *arvif = data;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
+ struct ath12k_link_sta *arsta;
struct ath12k *ar = arvif->ar;
- if (arsta->arvif != arvif)
+ arsta = rcu_dereference(ahsta->link[arvif->link_id]);
+ if (!arsta || arsta->arvif != arvif)
return;
spin_lock_bh(&ar->data_lock);
@@ -8593,21 +9711,26 @@ static void ath12k_mac_disable_peer_fixed_rate(void *data,
{
struct ath12k_link_vif *arvif = data;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
+ struct ath12k_link_sta *arsta;
struct ath12k *ar = arvif->ar;
int ret;
- if (arsta->arvif != arvif)
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[arvif->link_id]);
+
+ if (!arsta || arsta->arvif != arvif)
return;
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
WMI_FIXED_RATE_NONE);
if (ret)
ath12k_warn(ar->ab,
"failed to disable peer fixed rate for STA %pM ret %d\n",
- sta->addr, ret);
+ arsta->addr, ret);
}
static int
@@ -8950,6 +10073,7 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
ath12k_scan_abort(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
+ wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
return 0;
}
@@ -8962,7 +10086,6 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k_wmi_scan_req_arg arg;
struct ath12k_link_vif *arvif;
struct ath12k *ar;
u32 scan_time_msec;
@@ -8973,10 +10096,8 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
lockdep_assert_wiphy(hw->wiphy);
ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq);
- if (!ar) {
- ret = -EINVAL;
- goto exit;
- }
+ if (!ar)
+ return -EINVAL;
/* check if any of the links of ML VIF is already started on
* radio(ar) correpsondig to given scan frequency and use it,
@@ -8995,15 +10116,11 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
* always on the same band for the vif
*/
if (arvif->is_created) {
- if (WARN_ON(!arvif->ar)) {
- ret = -EINVAL;
- goto exit;
- }
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
- if (ar != arvif->ar && arvif->is_started) {
- ret = -EBUSY;
- goto exit;
- }
+ if (ar != arvif->ar && arvif->is_started)
+ return -EBUSY;
if (ar != arvif->ar) {
ath12k_mac_remove_link_interface(hw, arvif);
@@ -9020,7 +10137,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
if (ret) {
ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n",
ret);
- goto exit;
+ return ret;
}
}
@@ -9033,7 +10150,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
reinit_completion(&ar->scan.on_channel);
ar->scan.state = ATH12K_SCAN_STARTING;
ar->scan.is_roc = true;
- ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.arvif = arvif;
ar->scan.roc_freq = chan->center_freq;
ar->scan.roc_notify = true;
ret = 0;
@@ -9048,37 +10165,41 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
if (ret)
- goto exit;
+ return ret;
scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2;
- memset(&arg, 0, sizeof(arg));
- ath12k_wmi_start_scan_init(ar, &arg);
- arg.num_chan = 1;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
- if (!arg.chan_list) {
- ret = -ENOMEM;
- goto exit;
- }
+ struct ath12k_wmi_scan_req_arg *arg __free(kfree) =
+ kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH12K_SCAN_ID;
- arg.chan_list[0] = chan->center_freq;
- arg.dwell_time_active = scan_time_msec;
- arg.dwell_time_passive = scan_time_msec;
- arg.max_scan_time = scan_time_msec;
- arg.scan_f_passive = 1;
- arg.burst_duration = duration;
-
- ret = ath12k_start_scan(ar, &arg);
+ ath12k_wmi_start_scan_init(ar, arg);
+ arg->num_chan = 1;
+
+ u32 *chan_list __free(kfree) = kcalloc(arg->num_chan, sizeof(*chan_list),
+ GFP_KERNEL);
+ if (!chan_list)
+ return -ENOMEM;
+
+ arg->chan_list = chan_list;
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH12K_SCAN_ID;
+ arg->chan_list[0] = chan->center_freq;
+ arg->dwell_time_active = scan_time_msec;
+ arg->dwell_time_passive = scan_time_msec;
+ arg->max_scan_time = scan_time_msec;
+ arg->scan_f_passive = 1;
+ arg->burst_duration = duration;
+
+ ret = ath12k_start_scan(ar, arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH12K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
- goto free_chan_list;
+ return ret;
}
ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
@@ -9087,20 +10208,13 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
ret = ath12k_scan_stop(ar);
if (ret)
ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
- ret = -ETIMEDOUT;
- goto free_chan_list;
+ return -ETIMEDOUT;
}
ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
msecs_to_jiffies(duration));
- ret = 0;
-
-free_chan_list:
- kfree(arg.chan_list);
-
-exit:
- return ret;
+ return 0;
}
static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
@@ -9159,7 +10273,7 @@ static const struct ieee80211_ops ath12k_ops = {
.set_rekey_data = ath12k_mac_op_set_rekey_data,
.sta_state = ath12k_mac_op_sta_state,
.sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
- .link_sta_rc_update = ath12k_mac_op_sta_rc_update,
+ .link_sta_rc_update = ath12k_mac_op_link_sta_rc_update,
.conf_tx = ath12k_mac_op_conf_tx,
.set_antenna = ath12k_mac_op_set_antenna,
.get_antenna = ath12k_mac_op_get_antenna,
@@ -9178,7 +10292,8 @@ static const struct ieee80211_ops ath12k_ops = {
.sta_statistics = ath12k_mac_op_sta_statistics,
.remain_on_channel = ath12k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
-
+ .change_sta_links = ath12k_mac_op_change_sta_links,
+ .can_activate_links = ath12k_mac_op_can_activate_links,
#ifdef CONFIG_PM
.suspend = ath12k_wow_op_suspend,
.resume = ath12k_wow_op_resume,
@@ -9201,8 +10316,8 @@ static void ath12k_mac_update_ch_list(struct ath12k *ar,
band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
- ar->freq_low = freq_low;
- ar->freq_high = freq_high;
+ ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
+ ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
}
static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
@@ -9334,14 +10449,20 @@ static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
{
struct ath12k *ar;
int i;
- u16 interface_modes, mode;
- bool is_enable = true;
+ u16 interface_modes, mode = 0;
+ bool is_enable = false;
+
+ if (type == NL80211_IFTYPE_MESH_POINT) {
+ if (IS_ENABLED(CONFIG_MAC80211_MESH))
+ mode = BIT(type);
+ } else {
+ mode = BIT(type);
+ }
- mode = BIT(type);
for_each_ar(ah, ar, i) {
interface_modes = ar->ab->hw_params->interface_modes;
- if (!(interface_modes & mode)) {
- is_enable = false;
+ if (interface_modes & mode) {
+ is_enable = true;
break;
}
}
@@ -9349,23 +10470,20 @@ static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
return is_enable;
}
-static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+static int
+ath12k_mac_setup_radio_iface_comb(struct ath12k *ar,
+ struct ieee80211_iface_combination *comb)
{
- struct wiphy *wiphy = ah->hw->wiphy;
- struct ieee80211_iface_combination *combinations;
+ u16 interface_modes = ar->ab->hw_params->interface_modes;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
bool ap, mesh, p2p;
- ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
- p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
+ ap = interface_modes & BIT(NL80211_IFTYPE_AP);
+ p2p = interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
-
- combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
- if (!combinations)
- return -ENOMEM;
+ (interface_modes & BIT(NL80211_IFTYPE_MESH_POINT));
if ((ap || mesh) && !p2p) {
n_limits = 2;
@@ -9382,10 +10500,8 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
}
limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
- if (!limits) {
- kfree(combinations);
+ if (!limits)
return -ENOMEM;
- }
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
@@ -9401,26 +10517,181 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
if (p2p) {
limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_P2P_GO);
limits[2].max = 1;
limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
}
- combinations[0].limits = limits;
- combinations[0].n_limits = n_limits;
- combinations[0].max_interfaces = max_interfaces;
- combinations[0].num_different_channels = 1;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80);
+ comb[0].limits = limits;
+ comb[0].n_limits = n_limits;
+ comb[0].max_interfaces = max_interfaces;
+ comb[0].num_different_channels = 1;
+ comb[0].beacon_int_infra_match = true;
+ comb[0].beacon_int_min_gcd = 100;
+ comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80);
+
+ return 0;
+}
+
+static int
+ath12k_mac_setup_global_iface_comb(struct ath12k_hw *ah,
+ struct wiphy_radio *radio,
+ u8 n_radio,
+ struct ieee80211_iface_combination *comb)
+{
+ const struct ieee80211_iface_combination *iter_comb;
+ struct ieee80211_iface_limit *limits;
+ int i, j, n_limits;
+ bool ap, mesh, p2p;
+
+ if (!n_radio)
+ return 0;
+
+ ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
+ p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
+ mesh = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
+
+ if ((ap || mesh) && !p2p)
+ n_limits = 2;
+ else if (p2p)
+ n_limits = 3;
+ else
+ n_limits = 1;
+
+ limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+ if (!limits)
+ return -ENOMEM;
+
+ for (i = 0; i < n_radio; i++) {
+ iter_comb = radio[i].iface_combinations;
+ for (j = 0; j < iter_comb->n_limits && j < n_limits; j++) {
+ limits[j].types |= iter_comb->limits[j].types;
+ limits[j].max += iter_comb->limits[j].max;
+ }
+
+ comb->max_interfaces += iter_comb->max_interfaces;
+ comb->num_different_channels += iter_comb->num_different_channels;
+ comb->radar_detect_widths |= iter_comb->radar_detect_widths;
+ }
+
+ comb->limits = limits;
+ comb->n_limits = n_limits;
+ comb->beacon_int_infra_match = true;
+ comb->beacon_int_min_gcd = 100;
+
+ return 0;
+}
+
+static
+void ath12k_mac_cleanup_iface_comb(const struct ieee80211_iface_combination *iface_comb)
+{
+ kfree(iface_comb[0].limits);
+ kfree(iface_comb);
+}
+
+static void ath12k_mac_cleanup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct wiphy *wiphy = ah->hw->wiphy;
+ const struct wiphy_radio *radio;
+ int i;
+
+ if (wiphy->n_radio > 0) {
+ radio = wiphy->radio;
+ for (i = 0; i < wiphy->n_radio; i++)
+ ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations);
+
+ kfree(wiphy->radio);
+ }
+
+ ath12k_mac_cleanup_iface_comb(wiphy->iface_combinations);
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct ieee80211_iface_combination *combinations, *comb;
+ struct wiphy *wiphy = ah->hw->wiphy;
+ struct wiphy_radio *radio;
+ struct ath12k *ar;
+ int i, ret;
+
+ combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ if (ah->num_radio == 1) {
+ ret = ath12k_mac_setup_radio_iface_comb(&ah->radio[0],
+ combinations);
+ if (ret) {
+ ath12k_hw_warn(ah, "failed to setup radio interface combinations for one radio: %d",
+ ret);
+ goto err_free_combinations;
+ }
+
+ goto out;
+ }
+
+ /* there are multiple radios */
+
+ radio = kcalloc(ah->num_radio, sizeof(*radio), GFP_KERNEL);
+ if (!radio) {
+ ret = -ENOMEM;
+ goto err_free_combinations;
+ }
+
+ for_each_ar(ah, ar, i) {
+ comb = kzalloc(sizeof(*comb), GFP_KERNEL);
+ if (!comb) {
+ ret = -ENOMEM;
+ goto err_free_radios;
+ }
+
+ ret = ath12k_mac_setup_radio_iface_comb(ar, comb);
+ if (ret) {
+ ath12k_hw_warn(ah, "failed to setup radio interface combinations for radio %d: %d",
+ i, ret);
+ kfree(comb);
+ goto err_free_radios;
+ }
+
+ radio[i].freq_range = &ar->freq_range;
+ radio[i].n_freq_range = 1;
+
+ radio[i].iface_combinations = comb;
+ radio[i].n_iface_combinations = 1;
+ }
+
+ ret = ath12k_mac_setup_global_iface_comb(ah, radio, ah->num_radio, combinations);
+ if (ret) {
+ ath12k_hw_warn(ah, "failed to setup global interface combinations: %d",
+ ret);
+ goto err_free_all_radios;
+ }
+ wiphy->radio = radio;
+ wiphy->n_radio = ah->num_radio;
+
+out:
wiphy->iface_combinations = combinations;
wiphy->n_iface_combinations = 1;
return 0;
+
+err_free_all_radios:
+ i = ah->num_radio;
+
+err_free_radios:
+ while (i--)
+ ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations);
+
+ kfree(radio);
+
+err_free_combinations:
+ kfree(combinations);
+
+ return ret;
}
static const u8 ath12k_if_types_ext_capa[] = {
@@ -9444,7 +10715,7 @@ static const u8 ath12k_if_types_ext_capa_ap[] = {
[10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
};
-static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
+static struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
{
.extended_capabilities = ath12k_if_types_ext_capa,
.extended_capabilities_mask = ath12k_if_types_ext_capa,
@@ -9461,6 +10732,8 @@ static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
.extended_capabilities_mask = ath12k_if_types_ext_capa_ap,
.extended_capabilities_len =
sizeof(ath12k_if_types_ext_capa_ap),
+ .eml_capabilities = 0,
+ .mld_capa_and_ops = 0,
},
};
@@ -9477,7 +10750,6 @@ static void ath12k_mac_cleanup_unregister(struct ath12k *ar)
static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
- struct wiphy *wiphy = hw->wiphy;
struct ath12k *ar;
int i;
@@ -9491,8 +10763,7 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
for_each_ar(ah, ar, i)
ath12k_mac_cleanup_unregister(ar);
- kfree(wiphy->iface_combinations[0].limits);
- kfree(wiphy->iface_combinations);
+ ath12k_mac_cleanup_iface_combinations(ah);
SET_IEEE80211_DEV(hw, NULL);
}
@@ -9567,7 +10838,10 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
if (ret)
goto err_cleanup_unregister;
- ht_cap &= ht_cap_info;
+ /* 6 GHz does not support HT Cap, hence do not consider it */
+ if (!ar->supports_6ghz)
+ ht_cap &= ht_cap_info;
+
wiphy->max_ap_assoc_sta += ar->max_num_stations;
/* Advertise the max antenna support of all radios, driver can handle
@@ -9631,7 +10905,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(hw, REPORTS_LOW_ACK);
- if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || is_6ghz) {
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -9647,7 +10921,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
* handle it when the ht capability different for each band.
*/
if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
- (ar->supports_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
+ (is_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -9669,6 +10943,15 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
*/
wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+ /* Copy over MLO related capabilities received from
+ * WMI_SERVICE_READY_EXT2_EVENT if single_chip_mlo_supp is set.
+ */
+ if (ab->ag->mlo_capable) {
+ ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap;
+ ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap;
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ }
+
hw->queues = ATH12K_HW_MAX_QUEUES;
wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
@@ -9722,13 +11005,13 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ret = ath12k_wow_init(ar);
if (ret) {
ath12k_warn(ar->ab, "failed to init wow: %d\n", ret);
- goto err_free_if_combs;
+ goto err_cleanup_if_combs;
}
ret = ieee80211_register_hw(hw);
if (ret) {
ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
- goto err_free_if_combs;
+ goto err_cleanup_if_combs;
}
if (is_monitor_disable)
@@ -9758,9 +11041,8 @@ err_unregister_hw:
ieee80211_unregister_hw(hw);
-err_free_if_combs:
- kfree(wiphy->iface_combinations[0].limits);
- kfree(wiphy->iface_combinations);
+err_cleanup_if_combs:
+ ath12k_mac_cleanup_iface_combinations(ah);
err_complete_cleanup_unregister:
i = ah->num_radio;
@@ -9794,6 +11076,7 @@ static void ath12k_mac_setup(struct ath12k *ar)
ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+ ar->scan.arvif = NULL;
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->arvifs);
@@ -9808,40 +11091,179 @@ static void ath12k_mac_setup(struct ath12k *ar)
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
+ init_completion(&ar->mlo_setup_done);
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+ wiphy_work_init(&ar->scan.vdev_clean_wk, ath12k_scan_vdev_clean_work);
INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
}
-int ath12k_mac_register(struct ath12k_base *ab)
+static int __ath12k_mac_mlo_setup(struct ath12k *ar)
{
- struct ath12k_hw *ah;
- int i;
+ u8 num_link = 0, partner_link_id[ATH12K_GROUP_MAX_RADIO] = {};
+ struct ath12k_base *partner_ab, *ab = ar->ab;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct wmi_mlo_setup_arg mlo = {};
+ struct ath12k_pdev *pdev;
+ unsigned long time_left;
+ int i, j, ret;
+
+ lockdep_assert_held(&ag->mutex);
+
+ reinit_completion(&ar->mlo_setup_done);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+
+ for (j = 0; j < partner_ab->num_radios; j++) {
+ pdev = &partner_ab->pdevs[j];
+
+ /* Avoid the self link */
+ if (ar == pdev->ar)
+ continue;
+
+ partner_link_id[num_link] = pdev->hw_link_id;
+ num_link++;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "device %d pdev %d hw_link_id %d num_link %d\n",
+ i, j, pdev->hw_link_id, num_link);
+ }
+ }
+
+ mlo.group_id = cpu_to_le32(ag->id);
+ mlo.partner_link_id = partner_link_id;
+ mlo.num_partner_links = num_link;
+ ar->mlo_setup_status = 0;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "group id %d num_link %d\n", ag->id, num_link);
+
+ ret = ath12k_wmi_mlo_setup(ar, &mlo);
+ if (ret) {
+ ath12k_err(ab, "failed to send setup MLO WMI command for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->mlo_setup_done,
+ WMI_MLO_CMD_TIMEOUT_HZ);
+
+ if (!time_left || ar->mlo_setup_status)
+ return ar->mlo_setup_status ? : -ETIMEDOUT;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo setup done for pdev %d\n", ar->pdev_idx);
+
+ return 0;
+}
+
+static int __ath12k_mac_mlo_teardown(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
int ret;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
return 0;
- /* Initialize channel counters frequency value in hertz */
- ab->cc_freq_hz = 320000;
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ ret = ath12k_wmi_mlo_teardown(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo teardown for pdev %d\n", ar->pdev_idx);
+
+ return 0;
+}
+
+int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret;
+ int i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_setup(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to setup MLO: %d\n", ret);
+ goto err_setup;
+ }
+ }
+ }
+
+ return 0;
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+err_setup:
+ for (i = i - 1; i >= 0; i--) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for (j = j - 1; j >= 0; j--) {
+ ar = &ah->radio[j];
+ if (!ar)
+ continue;
+
+ __ath12k_mac_mlo_teardown(ar);
+ }
+ }
+
+ return ret;
+}
+
+void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret, i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_teardown(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to teardown MLO: %d\n", ret);
+ break;
+ }
+ }
+ }
+}
+
+int ath12k_mac_register(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab = ag->ab[0];
+ struct ath12k_hw *ah;
+ int i;
+ int ret;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
ret = ath12k_mac_hw_register(ah);
if (ret)
goto err;
}
+ set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
return 0;
err:
for (i = i - 1; i >= 0; i--) {
- ah = ab->ah[i];
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -9851,13 +11273,16 @@ err:
return ret;
}
-void ath12k_mac_unregister(struct ath12k_base *ab)
+void ath12k_mac_unregister(struct ath12k_hw_group *ag)
{
+ struct ath12k_base *ab = ag->ab[0];
struct ath12k_hw *ah;
int i;
- for (i = ab->num_hw - 1; i >= 0; i--) {
- ah = ab->ah[i];
+ clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+ for (i = ag->num_hw - 1; i >= 0; i--) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -9870,12 +11295,13 @@ static void ath12k_mac_hw_destroy(struct ath12k_hw *ah)
ieee80211_free_hw(ah->hw);
}
-static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
+static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_hw_group *ag,
struct ath12k_pdev_map *pdev_map,
u8 num_pdev_map)
{
struct ieee80211_hw *hw;
struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_pdev *pdev;
struct ath12k_hw *ah;
int i;
@@ -9891,6 +11317,7 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ah->num_radio = num_pdev_map;
mutex_init(&ah->hw_mutex);
+ INIT_LIST_HEAD(&ah->ml_peers);
for (i = 0; i < num_pdev_map; i++) {
ab = pdev_map[i].ab;
@@ -9905,54 +11332,116 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ar->pdev_idx = pdev_idx;
pdev->ar = ar;
+ ag->hw_links[ar->hw_link_id].device_id = ab->device_id;
+ ag->hw_links[ar->hw_link_id].pdev_idx = pdev_idx;
+
ath12k_mac_setup(ar);
+ ath12k_dp_pdev_pre_alloc(ar);
}
return ah;
}
-void ath12k_mac_destroy(struct ath12k_base *ab)
+void ath12k_mac_destroy(struct ath12k_hw_group *ag)
{
struct ath12k_pdev *pdev;
- int i;
+ struct ath12k_base *ab = ag->ab[0];
+ int i, j;
+ struct ath12k_hw *ah;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- if (!pdev->ar)
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
continue;
- pdev->ar = NULL;
+ for (j = 0; j < ab->num_radios; j++) {
+ pdev = &ab->pdevs[j];
+ if (!pdev->ar)
+ continue;
+ pdev->ar = NULL;
+ }
}
- for (i = 0; i < ab->num_hw; i++) {
- if (!ab->ah[i])
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
+ if (!ah)
continue;
- ath12k_mac_hw_destroy(ab->ah[i]);
- ab->ah[i] = NULL;
+ ath12k_mac_hw_destroy(ah);
+ ath12k_ag_set_ah(ag, i, NULL);
}
}
-int ath12k_mac_allocate(struct ath12k_base *ab)
+static void ath12k_mac_set_device_defaults(struct ath12k_base *ab)
{
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = 320000;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+}
+
+int ath12k_mac_allocate(struct ath12k_hw_group *ag)
+{
+ struct ath12k_pdev_map pdev_map[ATH12K_GROUP_MAX_RADIO];
+ int mac_id, device_id, total_radio, num_hw;
+ struct ath12k_base *ab;
struct ath12k_hw *ah;
- struct ath12k_pdev_map pdev_map[MAX_RADIOS];
int ret, i, j;
u8 radio_per_hw;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
+ total_radio = 0;
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_mac_set_device_defaults(ab);
+ total_radio += ab->num_radios;
+ }
- ab->num_hw = ab->num_radios;
- radio_per_hw = 1;
+ if (!total_radio)
+ return -EINVAL;
- for (i = 0; i < ab->num_hw; i++) {
+ if (WARN_ON(total_radio > ATH12K_GROUP_MAX_RADIO))
+ return -ENOSPC;
+
+ /* All pdev get combined and register as single wiphy based on
+ * hardware group which participate in multi-link operation else
+ * each pdev get register separately.
+ */
+ if (ag->mlo_capable)
+ radio_per_hw = total_radio;
+ else
+ radio_per_hw = 1;
+
+ num_hw = total_radio / radio_per_hw;
+
+ ag->num_hw = 0;
+ device_id = 0;
+ mac_id = 0;
+ for (i = 0; i < num_hw; i++) {
for (j = 0; j < radio_per_hw; j++) {
+ if (device_id >= ag->num_devices || !ag->ab[device_id]) {
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ ab = ag->ab[device_id];
pdev_map[j].ab = ab;
- pdev_map[j].pdev_idx = (i * radio_per_hw) + j;
+ pdev_map[j].pdev_idx = mac_id;
+ mac_id++;
+
+ /* If mac_id falls beyond the current device MACs then
+ * move to next device
+ */
+ if (mac_id >= ab->num_radios) {
+ mac_id = 0;
+ device_id++;
+ }
}
- ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw);
+ ab = pdev_map->ab;
+
+ ah = ath12k_mac_hw_allocate(ag, pdev_map, radio_per_hw);
if (!ah) {
ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
i);
@@ -9960,20 +11449,22 @@ int ath12k_mac_allocate(struct ath12k_base *ab)
goto err;
}
- ab->ah[i] = ah;
- }
+ ah->dev = ab->dev;
- ath12k_dp_pdev_pre_alloc(ab);
+ ag->ah[i] = ah;
+ ag->num_hw++;
+ }
return 0;
err:
for (i = i - 1; i >= 0; i--) {
- if (!ab->ah[i])
+ ah = ath12k_ag_to_ah(ag, i);
+ if (!ah)
continue;
- ath12k_mac_hw_destroy(ab->ah[i]);
- ab->ah[i] = NULL;
+ ath12k_mac_hw_destroy(ah);
+ ath12k_ag_set_ah(ag, i, NULL);
}
return ret;
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index d382337ba649..3594729b6397 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -14,6 +14,7 @@
struct ath12k;
struct ath12k_base;
struct ath12k_hw;
+struct ath12k_hw_group;
struct ath12k_pdev_map;
struct ath12k_generic_iter {
@@ -44,6 +45,12 @@ struct ath12k_generic_iter {
#define ATH12K_DEFAULT_LINK_ID 0
#define ATH12K_INVALID_LINK_ID 255
+/* Default link after the IEEE802.11 defined Max link id limit
+ * for driver usage purpose.
+ */
+#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
+#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1)
+
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,
@@ -52,12 +59,17 @@ enum ath12k_supported_bw {
ATH12K_BW_320 = 4,
};
+struct ath12k_mac_get_any_chanctx_conf_arg {
+ struct ath12k *ar;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+};
+
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
-void ath12k_mac_destroy(struct ath12k_base *ab);
-void ath12k_mac_unregister(struct ath12k_base *ab);
-int ath12k_mac_register(struct ath12k_base *ab);
-int ath12k_mac_allocate(struct ath12k_base *ab);
+void ath12k_mac_destroy(struct ath12k_hw_group *ag);
+void ath12k_mac_unregister(struct ath12k_hw_group *ag);
+int ath12k_mac_register(struct ath12k_hw_group *ag);
+int ath12k_mac_allocate(struct ath12k_hw_group *ag);
int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate);
u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
@@ -89,5 +101,12 @@ int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);
+int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag);
+int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag);
+void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag);
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
+void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index cf907550e6a4..06cff3849ab8 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1123,6 +1123,9 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
+ if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return;
+
__ath12k_pci_ext_irq_disable(ab);
ath12k_pci_sync_ext_irqs(ab);
}
@@ -1147,6 +1150,11 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab)
void ath12k_pci_stop(struct ath12k_base *ab)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
+ return;
+
ath12k_pci_ce_irq_disable_sync(ab);
ath12k_ce_cleanup_pipes(ab);
}
@@ -1717,6 +1725,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
+ ath12k_core_hw_group_unassign(ab);
goto qmi_fail;
}
@@ -1725,6 +1734,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
+ ath12k_fw_unmap(ab);
qmi_fail:
ath12k_mhi_unregister(ab_pci);
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
index 7a62665b8af9..792cca8a3fb1 100644
--- a/drivers/net/wireless/ath/ath12k/peer.c
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -8,6 +8,22 @@
#include "peer.h"
#include "debug.h"
+static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
+{
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ list_for_each_entry(ml_peer, &ah->ml_peers, list) {
+ if (!ether_addr_equal(ml_peer->addr, addr))
+ continue;
+
+ return ml_peer;
+ }
+
+ return NULL;
+}
+
struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
const u8 *addr)
{
@@ -63,6 +79,20 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
return NULL;
}
+static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
+ int ml_peer_id)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (ml_peer_id == peer->ml_id)
+ return peer;
+
+ return NULL;
+}
+
struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
int peer_id)
{
@@ -70,6 +100,9 @@ struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
lockdep_assert_held(&ab->base_lock);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID)
+ return ath12k_peer_find_by_ml_id(ab, peer_id);
+
list_for_each_entry(peer, &ab->peers, list)
if (peer_id == peer->peer_id)
return peer;
@@ -231,8 +264,9 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
return 0;
}
-int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
{
+ struct ath12k_base *ab = ar->ab;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -241,12 +275,25 @@ int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
- ath12k_warn(ar->ab,
+ ath12k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
}
+ return 0;
+}
+
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ ret = ath12k_peer_delete_send(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
if (ret)
return ret;
@@ -266,7 +313,11 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ath12k_wmi_peer_create_arg *arg)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_link_sta *arsta;
+ u8 link_id = arvif->link_id;
struct ath12k_peer *peer;
+ struct ath12k_sta *ahsta;
+ u16 ml_peer_id;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -332,6 +383,29 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
arvif->ast_idx = peer->hw_peer_id;
}
+ if (sta) {
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+
+ peer->link_id = arsta->link_id;
+
+ /* Fill ML info into created peer */
+ if (sta->mlo) {
+ ml_peer_id = ahsta->ml_peer_id;
+ peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID;
+ ether_addr_copy(peer->ml_addr, sta->addr);
+
+ /* the assoc link is considered primary for now */
+ peer->primary_link = arsta->is_assoc_link;
+ peer->mlo = true;
+ } else {
+ peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
+ peer->primary_link = true;
+ peer->mlo = false;
+ }
+ }
+
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
@@ -341,3 +415,150 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
return 0;
}
+
+static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
+{
+ u16 ml_peer_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
+ if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
+ continue;
+
+ set_bit(ml_peer_id, ah->free_ml_peer_id_map);
+ break;
+ }
+
+ if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
+ ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+ return ml_peer_id;
+}
+
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+ if (ml_peer) {
+ ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
+ ml_peer->id, sta->addr);
+ return -EEXIST;
+ }
+
+ ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
+ if (!ml_peer)
+ return -ENOMEM;
+
+ ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
+
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
+ sta->addr);
+ kfree(ml_peer);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(ml_peer->addr, sta->addr);
+ ml_peer->id = ahsta->ml_peer_id;
+ list_add(&ml_peer->list, &ah->ml_peers);
+
+ return 0;
+}
+
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+ ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+ ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+ if (!ml_peer) {
+ ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
+ return -EINVAL;
+ }
+
+ list_del(&ml_peer->list);
+ kfree(ml_peer);
+
+ return 0;
+}
+
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ath12k_hw *ah = ahvif->ah;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long links;
+ struct ath12k *ar;
+ int ret, err_ret = 0;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ /* FW expects delete of all link peers at once before waiting for reception
+ * of peer unmap or delete responses
+ */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+ ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ arvif->vdev_id, arsta->addr, ret);
+ err_ret = ret;
+ continue;
+ }
+ }
+
+ /* Ensure all link peers are deleted and unmapped */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
+ if (ret) {
+ err_ret = ret;
+ continue;
+ }
+ ar->num_peers--;
+ }
+
+ return err_ret;
+}
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
index b955f0cdf598..5870ee11a8c7 100644
--- a/drivers/net/wireless/ath/ath12k/peer.h
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -19,6 +19,8 @@ struct ppdu_user_delayba {
u32 resp_rate_flags;
};
+#define ATH12K_PEER_ML_ID_VALID BIT(13)
+
struct ath12k_peer {
struct list_head list;
struct ieee80211_sta *sta;
@@ -44,9 +46,28 @@ struct ath12k_peer {
struct ppdu_user_delayba ppdu_stats_delayba;
bool delayba_flag;
bool is_authorized;
-
+ bool mlo;
/* protected by ab->data_lock */
bool dp_setup_done;
+
+ u16 ml_id;
+
+ /* any other ML info common for all partners can be added
+ * here and would be same for all partner peers.
+ */
+ u8 ml_addr[ETH_ALEN];
+
+ /* To ensure only certain work related to dp is done once */
+ bool primary_link;
+
+ /* for reference to ath12k_link_sta */
+ u8 link_id;
+};
+
+struct ath12k_ml_peer {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u16 id;
};
void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
@@ -66,5 +87,8 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
const u8 *addr);
bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index b93ce9f87f61..5c3563383fab 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -2016,26 +2016,57 @@ static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
},
};
-static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
- struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+static void ath12k_host_cap_hw_link_id_init(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab, *partner_ab;
+ int i, j, hw_id_base;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ hw_id_base = 0;
+ ab = ag->ab[i];
+
+ for (j = 0; j < ag->num_devices; j++) {
+ partner_ab = ag->ab[j];
+
+ if (partner_ab->wsi_info.index >= ab->wsi_info.index)
+ continue;
+
+ hw_id_base += partner_ab->qmi.num_radios;
+ }
+
+ ab->wsi_info.hw_link_id_base = hw_id_base;
+ }
+
+ ag->hw_link_id_init_done = true;
+}
+
+static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
+ struct qmi_wlanfw_host_cap_req_msg_v01 *req)
{
struct wlfw_host_mlo_chip_info_s_v01 *info;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_base *partner_ab;
u8 hw_link_id = 0;
- int i;
+ int i, j, ret;
- if (!(ab->mlo_capable_flags & ATH12K_INTRA_DEVICE_MLO_SUPPORT)) {
+ if (!ag->mlo_capable) {
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "intra device MLO is disabled hence skip QMI MLO cap");
- return;
+ "MLO is disabled hence skip QMI MLO cap");
+ return 0;
}
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
- ab->mlo_capable_flags = 0;
+ ab->single_chip_mlo_supp = false;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
ab->qmi.num_radios);
- return;
+ return 0;
+ }
+
+ if (ab->device_id == ATH12K_INVALID_DEVICE_ID) {
+ ath12k_err(ab, "failed to send MLO cap due to invalid device id\n");
+ return -EINVAL;
}
req->mlo_capable_valid = 1;
@@ -2043,30 +2074,88 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_chip_id_valid = 1;
req->mlo_chip_id = ab->device_id;
req->mlo_group_id_valid = 1;
- req->mlo_group_id = 0;
+ req->mlo_group_id = ag->id;
req->max_mlo_peer_valid = 1;
/* Max peer number generally won't change for the same device
* but needs to be synced with host driver.
*/
req->max_mlo_peer = ab->hw_params->max_mlo_peer;
req->mlo_num_chips_valid = 1;
- req->mlo_num_chips = 1;
+ req->mlo_num_chips = ag->num_devices;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo capability advertisement device_id %d group_id %d num_devices %d",
+ req->mlo_chip_id, req->mlo_group_id, req->mlo_num_chips);
- info = &req->mlo_chip_info[0];
- info->chip_id = ab->device_id;
- info->num_local_links = ab->qmi.num_radios;
+ mutex_lock(&ag->mutex);
- for (i = 0; i < info->num_local_links; i++) {
- info->hw_link_id[i] = hw_link_id;
- info->valid_mlo_link_id[i] = 1;
+ if (!ag->hw_link_id_init_done)
+ ath12k_host_cap_hw_link_id_init(ag);
- hw_link_id++;
+ for (i = 0; i < ag->num_devices; i++) {
+ info = &req->mlo_chip_info[i];
+ partner_ab = ag->ab[i];
+
+ if (partner_ab->device_id == ATH12K_INVALID_DEVICE_ID) {
+ ath12k_err(ab, "failed to send MLO cap due to invalid partner device id\n");
+ ret = -EINVAL;
+ goto device_cleanup;
+ }
+
+ info->chip_id = partner_ab->device_id;
+ info->num_local_links = partner_ab->qmi.num_radios;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo device id %d num_link %d\n",
+ info->chip_id, info->num_local_links);
+
+ for (j = 0; j < info->num_local_links; j++) {
+ info->hw_link_id[j] = partner_ab->wsi_info.hw_link_id_base + j;
+ info->valid_mlo_link_id[j] = 1;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo hw_link_id %d\n",
+ info->hw_link_id[j]);
+
+ hw_link_id++;
+ }
}
+ if (hw_link_id <= 0)
+ ag->mlo_capable = false;
+
req->mlo_chip_info_valid = 1;
+
+ mutex_unlock(&ag->mutex);
+
+ return 0;
+
+device_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ info = &req->mlo_chip_info[i];
+
+ memset(info, 0, sizeof(*info));
+ }
+
+ req->mlo_num_chips = 0;
+ req->mlo_num_chips_valid = 0;
+
+ req->max_mlo_peer = 0;
+ req->max_mlo_peer_valid = 0;
+ req->mlo_group_id = 0;
+ req->mlo_group_id_valid = 0;
+ req->mlo_chip_id = 0;
+ req->mlo_chip_id_valid = 0;
+ req->mlo_capable = 0;
+ req->mlo_capable_valid = 0;
+
+ ag->mlo_capable = false;
+
+ mutex_unlock(&ag->mutex);
+
+ return ret;
}
-static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
@@ -2111,7 +2200,9 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
}
- ath12k_host_cap_parse_mlo(ab, &req);
+ ret = ath12k_host_cap_parse_mlo(ab, &req);
+ if (ret < 0)
+ goto out;
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
@@ -2174,12 +2265,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
- if (resp.single_chip_mlo_support_valid) {
- if (resp.single_chip_mlo_support)
- ab->mlo_capable_flags |= ATH12K_INTRA_DEVICE_MLO_SUPPORT;
- else
- ab->mlo_capable_flags &= ~ATH12K_INTRA_DEVICE_MLO_SUPPORT;
- }
+ if (resp.single_chip_mlo_support_valid &&
+ resp.single_chip_mlo_support)
+ ab->single_chip_mlo_supp = true;
if (!resp.num_phy_valid) {
ret = -ENODATA;
@@ -2275,7 +2363,9 @@ resp_out:
return ret;
}
-static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
@@ -2350,30 +2440,125 @@ out:
return ret;
}
+static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
+ struct target_mem_chunk *chunk,
+ int idx)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct target_mem_chunk *mlo_chunk;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (!ag->mlo_mem.init_done || ag->num_started)
+ return;
+
+ if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) {
+ ath12k_warn(ab, "invalid index for MLO memory chunk free: %d\n", idx);
+ return;
+ }
+
+ mlo_chunk = &ag->mlo_mem.chunk[idx];
+ if (mlo_chunk->v.addr) {
+ dma_free_coherent(ab->dev,
+ mlo_chunk->size,
+ mlo_chunk->v.addr,
+ mlo_chunk->paddr);
+ mlo_chunk->v.addr = NULL;
+ }
+
+ mlo_chunk->paddr = 0;
+ mlo_chunk->size = 0;
+ chunk->v.addr = NULL;
+ chunk->paddr = 0;
+ chunk->size = 0;
+}
+
static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
{
- int i;
+ struct ath12k_hw_group *ag = ab->ag;
+ int i, mlo_idx;
- for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
if (!ab->qmi.target_mem[i].v.addr)
continue;
- dma_free_coherent(ab->dev,
- ab->qmi.target_mem[i].prev_size,
- ab->qmi.target_mem[i].v.addr,
- ab->qmi.target_mem[i].paddr);
- ab->qmi.target_mem[i].v.addr = NULL;
+ if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
+ ath12k_qmi_free_mlo_mem_chunk(ab,
+ &ab->qmi.target_mem[i],
+ mlo_idx++);
+ } else {
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].prev_size,
+ ab->qmi.target_mem[i].v.addr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].v.addr = NULL;
+ }
+ }
+
+ if (!ag->num_started && ag->mlo_mem.init_done) {
+ ag->mlo_mem.init_done = false;
+ ag->mlo_mem.mlo_mem_size = 0;
}
}
+static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab,
+ struct target_mem_chunk *chunk)
+{
+ /* Firmware reloads in recovery/resume.
+ * In such cases, no need to allocate memory for FW again.
+ */
+ if (chunk->v.addr) {
+ if (chunk->prev_type == chunk->type &&
+ chunk->prev_size == chunk->size)
+ goto this_chunk_done;
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->prev_size,
+ chunk->v.addr, chunk->paddr);
+ chunk->v.addr = NULL;
+ }
+
+ chunk->v.addr = dma_alloc_coherent(ab->dev,
+ chunk->size,
+ &chunk->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!chunk->v.addr) {
+ if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) {
+ ab->qmi.target_mem_delayed = true;
+ ath12k_warn(ab,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath12k_qmi_free_target_mem_chunk(ab);
+ return -EAGAIN;
+ }
+ ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
+ chunk->type, chunk->size);
+ return -ENOMEM;
+ }
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
+this_chunk_done:
+ return 0;
+}
+
static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
{
- int i;
- struct target_mem_chunk *chunk;
+ struct target_mem_chunk *chunk, *mlo_chunk;
+ struct ath12k_hw_group *ag = ab->ag;
+ int i, mlo_idx, ret;
+ int mlo_size = 0;
+
+ mutex_lock(&ag->mutex);
+
+ if (!ag->mlo_mem.init_done) {
+ memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk));
+ ag->mlo_mem.init_done = true;
+ }
ab->qmi.target_mem_delayed = false;
- for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
/* Allocate memory for the region and the functionality supported
@@ -2385,42 +2570,41 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
case M3_DUMP_REGION_TYPE:
case PAGEABLE_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
- /* Firmware reloads in recovery/resume.
- * In such cases, no need to allocate memory for FW again.
- */
- if (chunk->v.addr) {
- if (chunk->prev_type == chunk->type &&
- chunk->prev_size == chunk->size)
- goto this_chunk_done;
-
- /* cannot reuse the existing chunk */
- dma_free_coherent(ab->dev, chunk->prev_size,
- chunk->v.addr, chunk->paddr);
- chunk->v.addr = NULL;
+ ret = ath12k_qmi_alloc_chunk(ab, chunk);
+ if (ret)
+ goto err;
+ break;
+ case MLO_GLOBAL_MEM_REGION_TYPE:
+ mlo_size += chunk->size;
+ if (ag->mlo_mem.mlo_mem_size &&
+ mlo_size > ag->mlo_mem.mlo_mem_size) {
+ ath12k_err(ab, "QMI MLO memory allocation failure, requested size %d is more than allocated size %d",
+ mlo_size, ag->mlo_mem.mlo_mem_size);
+ ret = -EINVAL;
+ goto err;
}
- chunk->v.addr = dma_alloc_coherent(ab->dev,
- chunk->size,
- &chunk->paddr,
- GFP_KERNEL | __GFP_NOWARN);
- if (!chunk->v.addr) {
- if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) {
- ab->qmi.target_mem_delayed = true;
- ath12k_warn(ab,
- "qmi dma allocation failed (%d B type %u), will try later with small size\n",
- chunk->size,
- chunk->type);
- ath12k_qmi_free_target_mem_chunk(ab);
- return 0;
+ mlo_chunk = &ag->mlo_mem.chunk[mlo_idx];
+ if (mlo_chunk->paddr) {
+ if (chunk->size != mlo_chunk->size) {
+ ath12k_err(ab, "QMI MLO chunk memory allocation failure for index %d, requested size %d is more than allocated size %d",
+ mlo_idx, chunk->size, mlo_chunk->size);
+ ret = -EINVAL;
+ goto err;
}
- ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
- chunk->type, chunk->size);
- return -ENOMEM;
+ } else {
+ mlo_chunk->size = chunk->size;
+ mlo_chunk->type = chunk->type;
+ ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk);
+ if (ret)
+ goto err;
+ memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
}
- chunk->prev_type = chunk->type;
- chunk->prev_size = chunk->size;
-this_chunk_done:
+ chunk->paddr = mlo_chunk->paddr;
+ chunk->v.addr = mlo_chunk->v.addr;
+ mlo_idx++;
+
break;
default:
ath12k_warn(ab, "memory type %u not supported\n",
@@ -2430,10 +2614,39 @@ this_chunk_done:
break;
}
}
+
+ if (!ag->mlo_mem.mlo_mem_size) {
+ ag->mlo_mem.mlo_mem_size = mlo_size;
+ } else if (ag->mlo_mem.mlo_mem_size != mlo_size) {
+ ath12k_err(ab, "QMI MLO memory size error, expected size is %d but requested size is %d",
+ ag->mlo_mem.mlo_mem_size, mlo_size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mutex_unlock(&ag->mutex);
+
return 0;
+
+err:
+ ath12k_qmi_free_target_mem_chunk(ab);
+
+ mutex_unlock(&ag->mutex);
+
+ /* The firmware will attempt to request memory in smaller chunks
+ * on the next try. However, the current caller should be notified
+ * that this instance of request parsing was successful.
+ * Therefore, return 0 only.
+ */
+ if (ret == -EAGAIN)
+ ret = 0;
+
+ return ret;
}
-static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req = {};
struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
@@ -2619,8 +2832,10 @@ out:
return ret;
}
-static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
- enum ath12k_qmi_bdf_type type)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
+ enum ath12k_qmi_bdf_type type)
{
struct device *dev = ab->dev;
char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE];
@@ -2791,7 +3006,9 @@ out:
return ret;
}
-static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
@@ -3023,6 +3240,8 @@ void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
+ clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags);
+
ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan mode off\n");
@@ -3079,9 +3298,69 @@ ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi,
return 0;
}
-static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab)
{
- struct ath12k_base *ab = qmi->ab;
+ struct ath12k_qmi *qmi = &ab->qmi;
+
+ spin_lock(&qmi->event_lock);
+
+ if (ath12k_qmi_get_event_block(qmi))
+ ath12k_qmi_set_event_block(qmi, false);
+
+ spin_unlock(&qmi->event_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n",
+ ab->device_id);
+
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL);
+}
+
+static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+
+ if (!(ab && ab->qmi.num_radios != U8_MAX))
+ return false;
+ }
+
+ return true;
+}
+
+static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ spin_lock(&ab->qmi.event_lock);
+
+ if (ath12k_qmi_get_event_block(&ab->qmi)) {
+ spin_unlock(&ab->qmi.event_lock);
+ return ab;
+ }
+
+ spin_unlock(&ab->qmi.event_lock);
+ }
+
+ return NULL;
+}
+
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab, *block_ab;
+ struct ath12k_hw_group *ag = ab->ag;
int ret;
ath12k_qmi_phy_cap_send(ab);
@@ -3092,16 +3371,30 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
return ret;
}
- ret = ath12k_qmi_host_cap_send(ab);
- if (ret < 0) {
- ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
- return ret;
+ spin_lock(&qmi->event_lock);
+
+ ath12k_qmi_set_event_block(qmi, true);
+
+ spin_unlock(&qmi->event_lock);
+
+ mutex_lock(&ag->mutex);
+
+ if (ath12k_qmi_hw_group_host_cap_ready(ag)) {
+ ath12k_core_hw_group_set_mlo_capable(ag);
+
+ block_ab = ath12k_qmi_hw_group_find_blocked(ag);
+ if (block_ab)
+ ath12k_qmi_trigger_host_cap(block_ab);
}
+ mutex_unlock(&ag->mutex);
+
return ret;
}
-static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@@ -3115,7 +3408,9 @@ static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
return ret;
}
-static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@@ -3280,6 +3575,21 @@ static const struct qmi_ops ath12k_qmi_ops = {
.del_server = ath12k_qmi_ops_del_server,
};
+static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n",
+ ab->device_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
static void ath12k_qmi_driver_event_work(struct work_struct *work)
{
struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
@@ -3306,7 +3616,6 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_SERVER_EXIT:
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
- set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
break;
case ATH12K_QMI_EVENT_REQUEST_MEM:
ret = ath12k_qmi_event_mem_request(qmi);
@@ -3320,20 +3629,28 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_FW_READY:
clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) {
if (ab->is_reset)
ath12k_hal_dump_srng_stats(ab);
+
+ set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
clear_bit(ATH12K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
- clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
- ath12k_core_qmi_firmware_ready(ab);
- set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+ ret = ath12k_core_qmi_firmware_ready(ab);
+ if (!ret)
+ set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+ &ab->dev_flags);
break;
+ case ATH12K_QMI_EVENT_HOST_CAP:
+ ret = ath12k_qmi_event_host_cap(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
default:
ath12k_warn(ab, "invalid event type: %d", event->type);
break;
@@ -3386,11 +3703,15 @@ int ath12k_qmi_init_service(struct ath12k_base *ab)
void ath12k_qmi_deinit_service(struct ath12k_base *ab)
{
+ if (!ab->qmi.ab)
+ return;
+
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
ath12k_qmi_m3_free(ab);
ath12k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.ab = NULL;
}
void ath12k_qmi_free_resource(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 0dfcbd8cb59b..45d7c3fcafdd 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -68,6 +68,7 @@ enum ath12k_qmi_event_type {
ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
ATH12K_QMI_EVENT_POWER_UP,
ATH12K_QMI_EVENT_POWER_DOWN,
+ ATH12K_QMI_EVENT_HOST_CAP,
ATH12K_QMI_EVENT_MAX,
};
@@ -142,6 +143,10 @@ struct ath12k_qmi {
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
+
+ /* protected with struct ath12k_qmi::event_lock */
+ bool block_event;
+
u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
@@ -167,6 +172,7 @@ enum ath12k_qmi_target_mem {
BDF_MEM_REGION_TYPE = 0x2,
M3_DUMP_REGION_TYPE = 0x3,
CALDB_MEM_REGION_TYPE = 0x4,
+ MLO_GLOBAL_MEM_REGION_TYPE = 0x8,
PAGEABLE_MEM_REGION_TYPE = 0x9,
};
@@ -594,11 +600,26 @@ struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block)
+{
+ lockdep_assert_held(&qmi->event_lock);
+
+ qmi->block_event = block;
+}
+
+static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi)
+{
+ lockdep_assert_held(&qmi->event_lock);
+
+ return qmi->block_event;
+}
+
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode);
void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
void ath12k_qmi_deinit_service(struct ath12k_base *ab);
int ath12k_qmi_init_service(struct ath12k_base *ab);
void ath12k_qmi_free_resource(struct ath12k_base *ab);
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index dced2aa9ba1a..abb510d235a5 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -821,6 +821,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
+ bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
+ struct wmi_vdev_create_mlo_params *ml_params;
struct wmi_tlv *tlv;
int ret, len;
void *ptr;
@@ -830,7 +832,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
* both the bands.
*/
len = sizeof(*cmd) + TLV_HDR_SIZE +
- (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
+ (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -879,6 +882,21 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
txrx_streams->supported_rx_streams =
cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
+ ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ if (is_ml_vdev) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
+ sizeof(*ml_params));
+ ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
args->if_id, args->type, args->subtype,
@@ -1020,19 +1038,27 @@ static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart)
{
+ struct wmi_vdev_start_mlo_params *ml_params;
+ struct wmi_partner_link_info *partner_info;
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_channel_params *chan;
struct wmi_tlv *tlv;
void *ptr;
- int ret, len;
+ int ret, len, i, ml_arg_size = 0;
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+ if (!restart && arg->ml.enabled) {
+ ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ len += ml_arg_size;
+ }
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1085,6 +1111,61 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
ptr += sizeof(*tlv);
+ if (ml_arg_size) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
+ sizeof(*ml_params));
+
+ ml_params->flags = le32_encode_bits(arg->ml.enabled,
+ ATH12K_WMI_FLAG_MLO_ENABLED) |
+ le32_encode_bits(arg->ml.assoc_link,
+ ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
+ le32_encode_bits(arg->ml.mcast_link,
+ ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
+ le32_encode_bits(arg->ml.link_add,
+ ATH12K_WMI_FLAG_MLO_LINK_ADD);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
+ arg->vdev_id, ml_params->flags);
+
+ ptr += sizeof(*ml_params);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ ptr += TLV_HDR_SIZE;
+
+ partner_info = ptr;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ partner_info->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+ sizeof(*partner_info));
+ partner_info->vdev_id =
+ cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ ether_addr_copy(partner_info->vdev_addr.addr,
+ arg->ml.partner_info[i].addr);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
+ partner_info->vdev_id, partner_info->hw_link_id,
+ partner_info->vdev_addr.addr);
+
+ partner_info++;
+ }
+
+ ptr = partner_info;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
restart ? "restart" : "start", arg->vdev_id,
arg->freq, arg->mode);
@@ -1149,9 +1230,14 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
- int ret;
+ int ret, len;
+ struct wmi_peer_create_mlo_params *ml_param;
+ void *ptr;
+ struct wmi_tlv *tlv;
- skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1163,9 +1249,23 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
cmd->peer_type = cpu_to_le32(arg->peer_type);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ptr = skb->data + sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_param));
+ ptr += TLV_HDR_SIZE;
+ ml_param = ptr;
+ ml_param->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
+ sizeof(*ml_param));
+ if (arg->ml_enabled)
+ ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ ptr += sizeof(*ml_param);
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
- "WMI peer create vdev_id %d peer_addr %pM\n",
- arg->vdev_id, arg->peer_addr);
+ "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
+ arg->vdev_id, arg->peer_addr, ml_param->flags);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
if (ret) {
@@ -2001,12 +2101,15 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct ath12k_wmi_vht_rate_set_params *mcs;
struct ath12k_wmi_he_rate_set_params *he_mcs;
struct ath12k_wmi_eht_rate_set_params *eht_mcs;
+ struct wmi_peer_assoc_mlo_params *ml_params;
+ struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 peer_legacy_rates_align;
u32 peer_ht_rates_align;
int i, ret, len;
+ __le32 v;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
sizeof(u32));
@@ -2018,8 +2121,13 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
sizeof(*mcs) + TLV_HDR_SIZE +
(sizeof(*he_mcs) * arg->peer_he_mcs_count) +
- TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
- TLV_HDR_SIZE + TLV_HDR_SIZE;
+ TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
+
+ if (arg->ml.enabled)
+ len += TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
+ else
+ len += (2 * TLV_HDR_SIZE);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -2143,12 +2251,38 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*he_mcs);
}
- /* MLO header tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? sizeof(*ml_params) : 0;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (!len)
+ goto skip_ml_params;
+ ml_params = ptr;
+ ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
+ len);
+ ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.assoc_link)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+ if (arg->ml.primary_umac)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.logical_link_idx_valid)
+ ml_params->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
+
+ if (arg->ml.peer_id_valid)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
+
+ ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
+ ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
+ ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
+ ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
+ ptr += sizeof(*ml_params);
+
+skip_ml_params:
/* Loop through the EHT rate set */
len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
tlv = ptr;
@@ -2165,12 +2299,45 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*eht_mcs);
}
- /* ML partner links tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
+ /* fill ML Partner links */
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (len == 0)
+ goto send;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
+
+ partner_info = ptr;
+ partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
+ sizeof(*partner_info));
+ partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.partner_info[i].assoc_link)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+ if (arg->ml.partner_info[i].primary_umac)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.partner_info[i].logical_link_idx_valid) {
+ v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
+ partner_info->flags |= v;
+ }
+
+ partner_info->logical_link_idx =
+ cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
+ ptr += sizeof(*partner_info);
+ }
+
+send:
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
@@ -4495,6 +4662,9 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
caps->eht_cap_info_internal);
}
+ pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
+ pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
+
return 0;
}
@@ -4681,6 +4851,22 @@ static struct ath12k_reg_rule
return reg_rule_ptr;
}
+static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
+ u32 num_reg_rules)
+{
+ u8 num_invalid_5ghz_rules = 0;
+ u32 count, start_freq;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
+
+ if (start_freq >= ATH12K_MIN_6G_FREQ)
+ num_invalid_5ghz_rules++;
+ }
+
+ return num_invalid_5ghz_rules;
+}
+
static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
struct sk_buff *skb,
struct ath12k_reg_info *reg_info)
@@ -4691,6 +4877,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
u32 num_2g_reg_rules, num_5g_reg_rules;
u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u8 num_invalid_5ghz_ext_rules;
u32 total_reg_rules = 0;
int ret, i, j;
@@ -4784,20 +4971,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
- /* FIXME: Currently FW includes 6G reg rule also in 5G rule
- * list for country US.
- * Having same 6G reg rule in 5G and 6G rules list causes
- * intersect check to be true, and same rules will be shown
- * multiple times in iw cmd. So added hack below to avoid
- * parsing 6G rule from 5G reg rule list, and this can be
- * removed later, after FW updates to remove 6G reg rule
- * from 5G rules list.
- */
- if (memcmp(reg_info->alpha2, "US", 2) == 0) {
- reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
- num_5g_reg_rules = reg_info->num_5g_reg_rules;
- }
-
reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
reg_info->num_phy = le32_to_cpu(ev->num_phy);
@@ -4900,8 +5073,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
+ ext_wmi_reg_rule += num_2g_reg_rules;
+
+ /* Firmware might include 6 GHz reg rule in 5 GHz rule list
+ * for few countries along with separate 6 GHz rule.
+ * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
+ * causes intersect check to be true, and same rules will be
+ * shown multiple times in iw cmd.
+ * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
+ */
+ num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
+ num_5g_reg_rules);
+
+ if (num_invalid_5ghz_ext_rules) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
+ reg_info->alpha2, reg_info->num_5g_reg_rules,
+ num_invalid_5ghz_ext_rules);
+
+ num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
+ reg_info->num_5g_reg_rules = num_5g_reg_rules;
+ }
+
if (num_5g_reg_rules) {
- ext_wmi_reg_rule += num_2g_reg_rules;
reg_info->reg_rules_5g_ptr =
create_ext_reg_rules_from_wmi(num_5g_reg_rules,
ext_wmi_reg_rule);
@@ -4913,7 +5107,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
- ext_wmi_reg_rule += num_5g_reg_rules;
+ /* We have adjusted the number of 5 GHz reg rules above. But still those
+ * many rules needs to be adjusted in ext_wmi_reg_rule.
+ *
+ * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
+ */
+ ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
reg_info->reg_rules_6g_ap_ptr[i] =
@@ -5192,6 +5391,9 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
info->flags |= IEEE80211_TX_STAT_ACK;
+ if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -6042,7 +6244,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
goto exit;
}
- if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
+ if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
(rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
WMI_RX_STATUS_ERR_CRC))) {
@@ -6171,7 +6373,8 @@ static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
spin_lock_bh(&ar->data_lock);
if (ar->scan.state == state &&
- ar->scan.vdev_id == vdev_id) {
+ ar->scan.arvif &&
+ ar->scan.arvif->vdev_id == vdev_id) {
spin_unlock_bh(&ar->data_lock);
return ar;
}
@@ -6687,6 +6890,7 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const u32 *vdev_ids)
{
int i;
+ struct ieee80211_bss_conf *conf;
struct ath12k_link_vif *arvif;
struct ath12k_vif *ahvif;
@@ -6705,7 +6909,20 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
}
ahvif = arvif->ahvif;
- if (arvif->is_up && ahvif->vif->bss_conf.csa_active)
+ if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
+ arvif->link_id);
+ continue;
+ }
+
+ conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+ if (!conf) {
+ ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ continue;
+ }
+
+ if (arvif->is_up && conf->csa_active)
ieee80211_csa_finish(ahvif->vif, 0);
}
rcu_read_unlock();
@@ -6750,6 +6967,7 @@ static void
ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
{
const void **tb;
+ struct ath12k_mac_get_any_chanctx_conf_arg arg;
const struct ath12k_wmi_pdev_radar_event *ev;
struct ath12k *ar;
int ret;
@@ -6785,13 +7003,22 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
+ arg.ar = ar;
+ arg.chanctx_conf = NULL;
+ ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
+ ath12k_mac_get_any_chanctx_conf_iter, &arg);
+ if (!arg.chanctx_conf) {
+ ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
+ goto exit;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
ev->pdev_id);
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
exit:
rcu_read_unlock();
@@ -7146,6 +7373,79 @@ static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
kfree(tb);
}
+static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_mlo_setup_complete_event *ev;
+ struct ath12k *ar = NULL;
+ struct ath12k_pdev *pdev;
+ const void **tb;
+ int ret, i;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
+ kfree(tb);
+ return;
+ }
+
+ if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
+ goto skip_lookup;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
+ ar = pdev->ar;
+ break;
+ }
+ }
+
+skip_lookup:
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
+ ev->pdev_id, ev->status);
+ goto out;
+ }
+
+ ar->mlo_setup_status = le32_to_cpu(ev->status);
+ complete(&ar->mlo_setup_done);
+
+out:
+ kfree(tb);
+}
+
+static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_mlo_teardown_complete_event *ev;
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch teardown complete event\n");
+ kfree(tb);
+ return;
+ }
+
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7250,13 +7550,6 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_P2P_NOA_EVENTID:
ath12k_wmi_p2p_noa_event(ab, skb);
break;
- /* add Unsupported events here */
- case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
- case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
- case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
- ath12k_dbg(ab, ATH12K_DBG_WMI,
- "ignoring unsupported event 0x%x\n", id);
- break;
case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
break;
@@ -7272,6 +7565,25 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_GTK_OFFLOAD_STATUS_EVENTID:
ath12k_wmi_gtk_offload_status_event(ab, skb);
break;
+ case WMI_MLO_SETUP_COMPLETE_EVENTID:
+ ath12k_wmi_event_mlo_setup_complete(ab, skb);
+ break;
+ case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
+ ath12k_wmi_event_teardown_complete(ab, skb);
+ break;
+ /* add Unsupported events (rare) here */
+ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+ case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+ case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "ignoring unsupported event 0x%x\n", id);
+ break;
+ /* add Unsupported events (frequent) here */
+ case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
+ case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
+ case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+ /* debug might flood hence silently ignore (no-op) */
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -8088,3 +8400,104 @@ int ath12k_wmi_sta_keepalive(struct ath12k *ar,
return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
}
+
+int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
+{
+ struct wmi_mlo_setup_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ u32 *partner_links, num_links;
+ int i, ret, buf_len, arg_len;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+
+ num_links = mlo_params->num_partner_links;
+ arg_len = num_links * sizeof(u32);
+ buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_setup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
+ sizeof(*cmd));
+ cmd->mld_group_id = mlo_params->group_id;
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
+ ptr += TLV_HDR_SIZE;
+
+ partner_links = ptr;
+ for (i = 0; i < num_links; i++)
+ partner_links[i] = mlo_params->partner_link_id[i];
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_mlo_ready(struct ath12k *ar)
+{
+ struct wmi_mlo_ready_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_ready_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_mlo_teardown(struct ath12k *ar)
+{
+ struct wmi_mlo_teardown_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 6f55dbdf629d..45fe699ce8a5 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -285,6 +285,7 @@ enum wmi_cmd_group {
WMI_GRP_TWT = 0x3e,
WMI_GRP_MOTION_DET = 0x3f,
WMI_GRP_SPATIAL_REUSE = 0x40,
+ WMI_GRP_MLO = 0x48,
};
#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
@@ -665,6 +666,10 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+ WMI_MLO_LINK_SET_ACTIVE_CMDID = WMI_TLV_CMD(WMI_GRP_MLO),
+ WMI_MLO_SETUP_CMDID,
+ WMI_MLO_READY_CMDID,
+ WMI_MLO_TEARDOWN_CMDID,
};
enum wmi_tlv_event_id {
@@ -706,6 +711,8 @@ enum wmi_tlv_event_id {
WMI_PDEV_RAP_INFO_EVENTID,
WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
WMI_SERVICE_READY_EXT2_EVENTID,
+ WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID =
+ WMI_SERVICE_READY_EXT2_EVENTID + 4,
WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_STOPPED_EVENTID,
WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
@@ -747,6 +754,7 @@ enum wmi_tlv_event_id {
WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
WMI_HOST_FILS_DISCOVERY_EVENTID,
+ WMI_MGMT_RX_FW_CONSUMED_EVENTID = WMI_HOST_FILS_DISCOVERY_EVENTID + 3,
WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
WMI_TX_ADDBA_COMPLETE_EVENTID,
WMI_BA_RSP_SSN_EVENTID,
@@ -845,6 +853,8 @@ enum wmi_tlv_event_id {
WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
+ WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
WMI_DCC_GET_STATS_RESP_EVENTID,
@@ -874,6 +884,9 @@ enum wmi_tlv_event_id {
WMI_TWT_DEL_DIALOG_EVENTID,
WMI_TWT_PAUSE_DIALOG_EVENTID,
WMI_TWT_RESUME_DIALOG_EVENTID,
+ WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MLO),
+ WMI_MLO_SETUP_COMPLETE_EVENTID,
+ WMI_MLO_TEARDOWN_COMPLETE_EVENTID,
};
enum wmi_tlv_pdev_param {
@@ -1929,6 +1942,19 @@ enum wmi_tlv_tag {
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
+ WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
+ WMI_TAG_MLO_TX_SEND_PARAMS,
+ WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+ WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC,
+ WMI_TAG_MLO_SETUP_CMD = 0x3C9,
+ WMI_TAG_MLO_SETUP_COMPLETE_EVENT,
+ WMI_TAG_MLO_READY_CMD,
+ WMI_TAG_MLO_TEARDOWN_CMD,
+ WMI_TAG_MLO_TEARDOWN_COMPLETE,
+ WMI_TAG_MLO_PEER_ASSOC_PARAMS = 0x3D0,
+ WMI_TAG_MLO_PEER_CREATE_PARAMS = 0x3D5,
+ WMI_TAG_MLO_VDEV_START_PARAMS = 0x3D6,
+ WMI_TAG_MLO_VDEV_CREATE_PARAMS = 0x3D7,
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
@@ -2690,6 +2716,8 @@ struct ath12k_wmi_caps_ext_params {
__le32 eht_cap_info_internal;
__le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2G_SIZE];
__le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5G_SIZE];
+ __le32 eml_capability;
+ __le32 mld_capability;
} __packed;
/* 2 word representation of MAC addr */
@@ -2740,6 +2768,7 @@ struct ath12k_wmi_vdev_create_arg {
u8 if_stats_id;
u32 mbssid_flags;
u32 mbssid_tx_vdev_id;
+ u8 mld_addr[ETH_ALEN];
};
#define ATH12K_MAX_VDEV_STATS_ID 0x30
@@ -2766,6 +2795,33 @@ struct ath12k_wmi_vdev_txrx_streams_params {
__le32 supported_rx_streams;
} __packed;
+struct wmi_vdev_create_mlo_params {
+ __le32 tlv_header;
+ struct ath12k_wmi_mac_addr_params mld_macaddr;
+} __packed;
+
+#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
+#define ATH12K_WMI_FLAG_MLO_MCAST_VDEV BIT(5)
+#define ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT BIT(6)
+#define ATH12K_WMI_FLAG_MLO_FORCED_INACTIVE BIT(7)
+#define ATH12K_WMI_FLAG_MLO_LINK_ADD BIT(8)
+
+struct wmi_vdev_start_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
+} __packed;
+
+struct wmi_partner_link_info {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 hw_link_id;
+ struct ath12k_wmi_mac_addr_params vdev_addr;
+} __packed;
+
struct wmi_vdev_delete_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -2909,6 +2965,27 @@ enum wmi_phy_mode {
MODE_MAX = 33,
};
+#define ATH12K_WMI_MLO_MAX_LINKS 4
+
+struct wmi_ml_partner_info {
+ u32 vdev_id;
+ u32 hw_link_id;
+ u8 addr[ETH_ALEN];
+ bool assoc_link;
+ bool primary_umac;
+ bool logical_link_idx_valid;
+ u32 logical_link_idx;
+};
+
+struct wmi_ml_arg {
+ bool enabled;
+ bool assoc_link;
+ bool mcast_link;
+ bool link_add;
+ u8 num_partner_links;
+ struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
struct wmi_vdev_start_req_arg {
u32 vdev_id;
u32 freq;
@@ -2946,12 +3023,19 @@ struct wmi_vdev_start_req_arg {
u32 mbssid_flags;
u32 mbssid_tx_vdev_id;
u32 punct_bitmap;
+ struct wmi_ml_arg ml;
};
struct ath12k_wmi_peer_create_arg {
const u8 *peer_addr;
u32 peer_type;
u32 vdev_id;
+ bool ml_enabled;
+};
+
+struct wmi_peer_create_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
};
struct ath12k_wmi_pdev_set_regdomain_arg {
@@ -3618,6 +3702,24 @@ struct wmi_vdev_install_key_arg {
#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+#define ATH12K_WMI_MLO_MAX_PARTNER_LINKS \
+ (ATH12K_WMI_MLO_MAX_LINKS + ATH12K_MAX_NUM_BRIDGE_LINKS - 1)
+
+struct peer_assoc_mlo_params {
+ bool enabled;
+ bool assoc_link;
+ bool primary_umac;
+ bool peer_id_valid;
+ bool logical_link_idx_valid;
+ bool bridge_peer;
+ u8 mld_addr[ETH_ALEN];
+ u32 logical_link_idx;
+ u32 ml_peer_id;
+ u32 ieee_link_id;
+ u8 num_partner_links;
+ struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
struct wmi_rate_set_arg {
u32 num_rates;
u8 rates[WMI_MAX_SUPPORTED_RATES];
@@ -3692,8 +3794,36 @@ struct ath12k_wmi_peer_assoc_arg {
u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET];
struct ath12k_wmi_ppe_threshold_arg peer_eht_ppet;
u32 punct_bitmap;
+ bool is_assoc;
+ struct peer_assoc_mlo_params ml;
};
+#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LINK_ID_VALID BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
+
+struct wmi_peer_assoc_mlo_partner_info_params {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 hw_link_id;
+ __le32 flags;
+ __le32 logical_link_idx;
+} __packed;
+
+struct wmi_peer_assoc_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
+ struct ath12k_wmi_mac_addr_params mld_addr;
+ __le32 logical_link_idx;
+ __le32 ml_peer_id;
+ __le32 ieee_link_id;
+ __le32 emlsr_trans_timeout_us;
+ __le32 emlsr_trans_delay_us;
+ __le32 emlsr_padding_delay_us;
+} __packed;
+
struct wmi_peer_assoc_complete_cmd {
__le32 tlv_header;
struct ath12k_wmi_mac_addr_params peer_macaddr;
@@ -3943,7 +4073,6 @@ struct ath12k_wmi_eht_rate_set_params {
#define MAX_REG_RULES 10
#define REG_ALPHA2_LEN 2
#define MAX_6G_REG_RULES 5
-#define REG_US_5G_NUM_REG_RULES 4
enum wmi_start_event_param {
WMI_VDEV_START_RESP_EVENT = 0,
@@ -4815,6 +4944,7 @@ struct wmi_probe_tmpl_cmd {
#define MAX_RADIOS 2
+#define WMI_MLO_CMD_TIMEOUT_HZ (5 * HZ)
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
@@ -4911,6 +5041,43 @@ struct wmi_twt_disable_event {
__le32 status;
} __packed;
+struct wmi_mlo_setup_cmd {
+ __le32 tlv_header;
+ __le32 mld_group_id;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_mlo_setup_arg {
+ __le32 group_id;
+ u8 num_partner_links;
+ u8 *partner_link_id;
+};
+
+struct wmi_mlo_ready_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+} __packed;
+
+enum wmi_mlo_tear_down_reason_code_type {
+ WMI_MLO_TEARDOWN_SSR_REASON,
+};
+
+struct wmi_mlo_teardown_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 reason_code;
+} __packed;
+
+struct wmi_mlo_setup_complete_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
+struct wmi_mlo_teardown_complete_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
@@ -5636,5 +5803,8 @@ int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
struct ath12k_link_vif *arvif);
int ath12k_wmi_sta_keepalive(struct ath12k *ar,
const struct wmi_sta_keepalive_arg *arg);
+int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params);
+int ath12k_wmi_mlo_ready(struct ath12k *ar);
+int ath12k_wmi_mlo_teardown(struct ath12k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 61b2e3f15f0e..72ce321f2a77 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1441,6 +1441,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
int *dbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index acc84e6711b0..e5e274bc9e68 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -193,7 +193,7 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
struct ath_hw_antcomb_conf *conf)
{
- /* set alt to the conf with maximun ratio */
+ /* set alt to the conf with maximum ratio */
if (antcomb->first_ratio && antcomb->second_ratio) {
if (antcomb->rssi_second > antcomb->rssi_third) {
/* first alt*/
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index d08ea0b28530..b26224480041 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -395,7 +395,7 @@ static void ar9002_hw_init_hang_checks(struct ath_hw *ah)
ah->config.hw_hang_checks |= HW_MAC_HANG;
}
-/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
+/* Sets up the AR5008/AR9001/AR9002 hardware family callbacks */
int ar9002_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index e9bd13eeee92..6595eca74997 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1170,7 +1170,7 @@ exit:
return false;
}
-/* Sets up the AR9003 hardware familiy callbacks */
+/* Sets up the AR9003 hardware family callbacks */
void ar9003_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 2b9c07961cd7..3f0543e55d9b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -637,7 +637,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
* same time. Since BT's calibration doesn't happen
* that often, we'll let BT completes calibration then
* we continue to wait for cal_grant from BT.
- * Orginal: Wait BT_CAL_GRANT.
+ * Original: Wait BT_CAL_GRANT.
* New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
* BT_CAL_DONE -> Wait BT_CAL_GRANT.
*/
@@ -747,7 +747,7 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
* BT is sleeping. Check if BT wakes up during
* WLAN calibration. If BT wakes up during
* WLAN calibration, need to go through all
- * message exchanges again and recal.
+ * message exchanges again and recalibrate.
*/
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
(AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index ad72a30b67c3..e13873fb8e2f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -246,7 +246,7 @@
/*
- * MRC Feild Definitions
+ * MRC Field Definitions
*/
#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
#define AR_PHY_SGI_DSC_MAN_S 4
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 29ca65a732a6..a728cc0387df 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -338,7 +338,7 @@ struct ath_chanctx {
struct ath_beacon_config beacon;
struct ath9k_hw_cal_data caldata;
- struct timespec64 tsf_ts;
+ ktime_t tsf_ts;
u64 tsf_val;
u32 last_beacon;
@@ -592,8 +592,8 @@ void ath_txq_schedule_all(struct ath_softc *sc);
int ath_tx_init(struct ath_softc *sc, int nbufs);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
-u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
- int width, int half_gi, bool shortPreamble);
+u32 ath_pkt_duration(u8 rix, int pktlen, int width,
+ int half_gi, bool shortPreamble);
void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
void ath_assign_seq(struct ath_common *common, struct sk_buff *skb);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -1011,13 +1011,15 @@ struct ath_softc {
struct ath_offchannel offchannel;
struct ath_chanctx *next_chan;
struct completion go_beacon;
- struct timespec64 last_event_time;
+ ktime_t last_event_time;
#endif
unsigned long driver_data;
u8 gtt_cnt;
u32 intrstatus;
+ u32 rx_active_check_time;
+ u32 rx_active_count;
u16 ps_flags; /* PS_* */
bool ps_enabled;
bool ps_idle;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b399a7926ef5..4a27e3753c03 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -293,7 +293,7 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc)
/* Modify TSF as required and update the HW. */
avp->chanctx->tsf_val += tsfadjust;
if (sc->cur_chan == avp->chanctx) {
- offset = ath9k_hw_get_tsf_offset(&avp->chanctx->tsf_ts, NULL);
+ offset = ath9k_hw_get_tsf_offset(avp->chanctx->tsf_ts, 0);
ath9k_hw_settsf64(sc->sc_ah, avp->chanctx->tsf_val + offset);
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 4b331c85509c..b4ab85bd7895 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -16,29 +16,25 @@
#include "hw.h"
#include "hw-ops.h"
+#include <linux/sort.h>
#include <linux/export.h>
/* Common calibration code */
+static int rcmp_i16(const void *x, const void *y)
+{
+ /* Sort in reverse order. */
+ return *(int16_t *)y - *(int16_t *)x;
+}
static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
{
- int16_t nfval;
- int16_t sort[ATH9K_NF_CAL_HIST_MAX];
- int i, j;
-
- for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
- sort[i] = nfCalBuffer[i];
+ int16_t nfcal[ATH9K_NF_CAL_HIST_MAX];
- for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
- for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
- if (sort[j] > sort[j - 1])
- swap(sort[j], sort[j - 1]);
- }
- }
- nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
+ memcpy(nfcal, nfCalBuffer, sizeof(nfcal));
+ sort(nfcal, ATH9K_NF_CAL_HIST_MAX, sizeof(int16_t), rcmp_i16, NULL);
- return nfval;
+ return nfcal[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
}
static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 571062f2e82a..bae24e3d3168 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -17,7 +17,7 @@
#include "ath9k.h"
/* Set/change channels. If the channel is really being changed, it's done
- * by reseting the chip. To accomplish this we must first cleanup any pending
+ * by resetting the chip. To accomplish this we must first cleanup any pending
* DMA, then restart stuff.
*/
static int ath_set_channel(struct ath_softc *sc)
@@ -232,16 +232,11 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
static u32 chanctx_event_delta(struct ath_softc *sc)
{
- u64 ms;
- struct timespec64 ts, *old;
+ ktime_t ts = ktime_get_raw();
+ s64 ms = ktime_ms_delta(ts, sc->last_event_time);
- ktime_get_raw_ts64(&ts);
- old = &sc->last_event_time;
- ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
- ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
sc->last_event_time = ts;
-
- return (u32)ms;
+ return ms;
}
void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
@@ -334,8 +329,8 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
{
struct ath_chanctx *prev, *cur;
- struct timespec64 ts;
u32 cur_tsf, prev_tsf, beacon_int;
+ ktime_t ts;
s32 offset;
beacon_int = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval);
@@ -346,12 +341,12 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
if (!prev->switch_after_beacon)
return;
- ktime_get_raw_ts64(&ts);
+ ts = ktime_get_raw();
cur_tsf = (u32) cur->tsf_val +
- ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
+ ath9k_hw_get_tsf_offset(cur->tsf_ts, ts);
prev_tsf = prev->last_beacon - (u32) prev->tsf_val + cur_tsf;
- prev_tsf -= ath9k_hw_get_tsf_offset(&prev->tsf_ts, &ts);
+ prev_tsf -= ath9k_hw_get_tsf_offset(prev->tsf_ts, ts);
/* Adjust the TSF time of the AP chanctx to keep its beacons
* at half beacon interval offset relative to the STA chanctx.
@@ -691,7 +686,7 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
*/
tsf_time = sc->sched.switch_start_time;
tsf_time -= (u32) sc->cur_chan->tsf_val +
- ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
+ ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0);
tsf_time += ath9k_hw_gettsf32(ah);
sc->sched.beacon_adjust = false;
@@ -1230,10 +1225,10 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_chanctx *old_ctx;
- struct timespec64 ts;
bool measure_time = false;
bool send_ps = false;
bool queues_stopped = false;
+ ktime_t ts;
spin_lock_bh(&sc->chan_lock);
if (!sc->next_chan) {
@@ -1260,7 +1255,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_unlock_bh(&sc->chan_lock);
if (sc->next_chan == &sc->offchannel.chan) {
- ktime_get_raw_ts64(&ts);
+ ts = ktime_get_raw();
measure_time = true;
}
@@ -1277,7 +1272,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_lock_bh(&sc->chan_lock);
if (sc->cur_chan != &sc->offchannel.chan) {
- ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
+ sc->cur_chan->tsf_ts = ktime_get_raw();
sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
}
}
@@ -1303,7 +1298,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
ath_set_channel(sc);
if (measure_time)
sc->sched.channel_switch_time =
- ath9k_hw_get_tsf_offset(&ts, NULL);
+ ath9k_hw_get_tsf_offset(ts, 0);
/*
* A reset will ensure that all queues are woken up,
* so there is no need to awaken them again.
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 4b27445a5fb8..628eeec4b82f 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -734,7 +734,7 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
ATH9K_RX_FILTER_PHYRADAR |
ATH9K_RX_FILTER_PHYERR);
- /* TODO: usually this should not be neccesary, but for some reason
+ /* TODO: usually this should not be necessary, but for some reason
* (or in some mode?) the trigger must be called after the
* configuration, otherwise the register will have its values reset
* (on my ar9220 to value 0x01002310)
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index eff894958a73..74a0134075cf 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -750,6 +750,7 @@ static int read_file_reset(struct seq_file *file, void *data)
[RESET_TYPE_CALIBRATION] = "Calibration error",
[RESET_TX_DMA_ERROR] = "Tx DMA stop error",
[RESET_RX_DMA_ERROR] = "Rx DMA stop error",
+ [RESET_TYPE_RX_INACTIVE] = "Rx path inactive",
};
int i;
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 389459c04d14..cb3e75969875 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -53,6 +53,7 @@ enum ath_reset_type {
RESET_TYPE_CALIBRATION,
RESET_TX_DMA_ERROR,
RESET_RX_DMA_ERROR,
+ RESET_TYPE_RX_INACTIVE,
__RESET_TYPE_MAX
};
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 3689e12db9f7..2fb73a5e1d51 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -79,7 +79,7 @@ static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
const int DFS_UPPER_BIN_OFFSET = 64;
/* if detected radar on both channels, select the significant one */
if (is_ctl && is_ext) {
- /* first check wether channels have 'strong' bins */
+ /* first check whether channels have 'strong' bins */
is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
is_ext = fft_bitmap_weight(fft->upper_bins) != 0;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 7265766cddbd..fe9abe8cd268 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1198,7 +1198,7 @@ static int ath9k_hif_request_firmware(struct hif_device_usb *hif_dev,
filename = FIRMWARE_AR9271;
/* expected fw locations:
- * - htc_9271.fw (stable version 1.3, depricated)
+ * - htc_9271.fw (stable version 1.3, deprecated)
*/
snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name),
"%s", filename);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e2bef099adb3..f9a774bd0e13 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1847,20 +1847,11 @@ fail:
return -EINVAL;
}
-u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
+u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur)
{
- struct timespec64 ts;
- s64 usec;
-
- if (!cur) {
- ktime_get_raw_ts64(&ts);
- cur = &ts;
- }
-
- usec = cur->tv_sec * 1000000ULL + cur->tv_nsec / 1000;
- usec -= last->tv_sec * 1000000ULL + last->tv_nsec / 1000;
-
- return (u32) usec;
+ if (cur == 0)
+ cur = ktime_get_raw();
+ return ktime_us_delta(cur, last);
}
EXPORT_SYMBOL(ath9k_hw_get_tsf_offset);
@@ -1871,7 +1862,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
- struct timespec64 tsf_ts;
+ ktime_t tsf_ts;
u32 tsf_offset;
u64 tsf = 0;
int r;
@@ -1917,7 +1908,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* Save TSF before chip reset, a cold reset clears it */
- ktime_get_raw_ts64(&tsf_ts);
+ tsf_ts = ktime_get_raw();
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -1951,7 +1942,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
}
/* Restore TSF */
- tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL);
+ tsf_offset = ath9k_hw_get_tsf_offset(tsf_ts, 0);
ath9k_hw_settsf64(ah, tsf + tsf_offset);
if (AR_SREV_9280_20_OR_LATER(ah))
@@ -1975,7 +1966,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
* value after the initvals have been applied.
*/
if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
- tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL);
+ tsf_offset = ath9k_hw_get_tsf_offset(tsf_ts, 0);
ath9k_hw_settsf64(ah, tsf + tsf_offset);
}
@@ -2149,7 +2140,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah)
/* When chip goes into network sleep, it could be waken
* up by MCI_INT interrupt caused by BT's HW messages
- * (LNA_xxx, CONT_xxx) which chould be in a very fast
+ * (LNA_xxx, CONT_xxx) which could be in a very fast
* rate (~100us). This will cause chip to leave and
* re-enter network sleep mode frequently, which in
* consequence will have WLAN MCI HW to generate lots of
@@ -2544,7 +2535,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
/*
- * For AR9271 we will temporarilly uses the rx chainmax as read from
+ * For AR9271 we will temporarily use the rx chainmax as read from
* the EEPROM.
*/
if ((ah->hw_version.devid == AR5416_DEVID_PCI) &&
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 450ab19b1d4e..eaa07d6dbde0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -282,7 +282,7 @@ enum ath9k_hw_caps {
* an exact user defined pattern or de-authentication/disassoc pattern.
* @ATH9K_HW_WOW_PATTERN_MATCH_DWORD: device requires the first four
* bytes of the pattern for user defined pattern, de-authentication and
- * disassociation patterns for all types of possible frames recieved
+ * disassociation patterns for all types of possible frames received
* of those types.
*/
@@ -1066,7 +1066,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
-u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
+u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index d1e5767aab3c..d078a59d7d3c 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -50,7 +50,36 @@ reset:
"tx hung, resetting the chip\n");
ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
return false;
+}
+
+#define RX_INACTIVE_CHECK_INTERVAL (4 * MSEC_PER_SEC)
+
+static bool ath_hw_rx_inactive_check(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 interval, count;
+
+ interval = jiffies_to_msecs(jiffies - sc->rx_active_check_time);
+ count = sc->rx_active_count;
+
+ if (interval < RX_INACTIVE_CHECK_INTERVAL)
+ return true; /* too soon to check */
+ sc->rx_active_count = 0;
+ sc->rx_active_check_time = jiffies;
+
+ /* Need at least one interrupt per second, and we should only react if
+ * we are within a factor two of the expected interval
+ */
+ if (interval > RX_INACTIVE_CHECK_INTERVAL * 2 ||
+ count >= interval / MSEC_PER_SEC)
+ return true;
+
+ ath_dbg(common, RESET,
+ "RX inactivity detected. Schedule chip reset\n");
+ ath9k_queue_reset(sc, RESET_TYPE_RX_INACTIVE);
+
+ return false;
}
void ath_hw_check_work(struct work_struct *work)
@@ -58,8 +87,8 @@ void ath_hw_check_work(struct work_struct *work)
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_check_work.work);
- if (!ath_hw_check(sc) ||
- !ath_tx_complete_check(sc))
+ if (!ath_hw_check(sc) || !ath_tx_complete_check(sc) ||
+ !ath_hw_rx_inactive_check(sc))
return;
ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index f03d792732da..16203e7ecf29 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -251,7 +251,7 @@ struct ath_desc {
* when the descriptor is specifically marked to generate
* an interrupt with this flag. Descriptors should be
* marked periodically to insure timely replenishing of the
- * supply needed for sending frames. Defering interrupts
+ * supply needed for sending frames. Deferring interrupts
* reduces system load and potentially allows more concurrent
* work to be done but if done to aggressively can cause
* senders to backup. When the hardware queue is left too
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b92c89dad8de..a70c94564814 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -249,8 +249,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
if (sc->cur_chan->tsf_val) {
u32 offset;
- offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts,
- NULL);
+ offset = ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0);
ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset);
}
@@ -453,6 +452,7 @@ void ath9k_tasklet(struct tasklet_struct *t)
ath_rx_tasklet(sc, 0, true);
ath_rx_tasklet(sc, 0, false);
+ sc->rx_active_count++;
}
if (status & ATH9K_INT_TX) {
@@ -1001,7 +1001,7 @@ static bool ath9k_uses_beacons(int type)
static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
struct ieee80211_vif *vif)
{
- /* Use the first (configured) interface, but prefering AP interfaces. */
+ /* Use the first (configured) interface, but preferring AP interfaces. */
if (!iter_data->primary_beacon_vif) {
iter_data->primary_beacon_vif = vif;
} else {
@@ -1955,7 +1955,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
tsf = ath9k_hw_gettsf64(sc->sc_ah);
} else {
tsf = sc->cur_chan->tsf_val +
- ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
+ ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0);
}
tsf += le64_to_cpu(avp->tsf_adjust);
ath9k_ps_restore(sc);
@@ -1974,7 +1974,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
tsf -= le64_to_cpu(avp->tsf_adjust);
- ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
+ avp->chanctx->tsf_ts = ktime_get_raw();
if (sc->cur_chan == avp->chanctx)
ath9k_hw_settsf64(sc->sc_ah, tsf);
avp->chanctx->tsf_val = tsf;
@@ -1990,7 +1990,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
+ avp->chanctx->tsf_ts = ktime_get_raw();
if (sc->cur_chan == avp->chanctx)
ath9k_hw_reset_tsf(sc->sc_ah);
avp->chanctx->tsf_val = 0;
@@ -2767,7 +2767,7 @@ void ath9k_fill_chanctx_ops(void)
#endif
static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct ath_softc *sc = hw->priv;
struct ath_vif *avp = (void *)vif->drv_priv;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 0c0624a3b40d..34c74ed99b7b 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1042,8 +1042,8 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
if (!!(rxs->encoding == RX_ENC_HT)) {
/* MCS rates */
- airtime += ath_pkt_duration(sc, rxs->rate_idx, len,
- is_40, is_sgi, is_sp);
+ airtime += ath_pkt_duration(rxs->rate_idx, len,
+ is_40, is_sgi, is_sp);
} else {
phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM;
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 8d0b1730a9d5..ed4152cd44f0 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -60,7 +60,7 @@ static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
/*
- * Create Dissassociate / Deauthenticate packet filter
+ * Create Disassociate / Deauthenticate packet filter
*
* 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
* +--------------+----------+---------+--------+--------+----
@@ -70,7 +70,7 @@ static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
* The above is the management frame format for disassociate/
* deauthenticate pattern, from this we need to match the first byte
* of 'Frame Control' and DA, SA, and BSSID fields
- * (skipping 2nd byte of FC and Duration feild.
+ * (skipping 2nd byte of FC and Duration field.
*
* Disassociate pattern
* --------------------
@@ -225,7 +225,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
ath9k_stop_btcoex(sc);
/*
- * Enable wake up on recieving disassoc/deauth
+ * Enable wake up on receiving disassoc/deauth
* frame by default.
*/
ret = ath9k_wow_add_disassoc_deauth_pattern(sc);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 35aa47a9db90..db07ce6dbc08 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -67,8 +67,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok);
-static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf);
+static void ath_tx_update_baw(struct ath_atx_tid *tid, struct ath_buf *bf);
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
@@ -208,10 +207,10 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
ARRAY_SIZE(bf->rates));
}
-static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
- struct sk_buff *skb)
+static void ath_txq_skb_done(struct ath_softc *sc, struct sk_buff *skb)
{
struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_txq *txq;
int q = fi->txq;
if (q < 0)
@@ -224,7 +223,7 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
}
static struct ath_atx_tid *
-ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
+ath_get_skb_tid(struct ath_node *an, struct sk_buff *skb)
{
u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
return ATH_AN_2_TID(an, tidno);
@@ -294,13 +293,13 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
fi = get_frame_info(skb);
bf = fi->bf;
if (!bf) {
- ath_txq_skb_done(sc, txq, skb);
+ ath_txq_skb_done(sc, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
if (fi->baw_tracked) {
- ath_tx_update_baw(sc, tid, bf);
+ ath_tx_update_baw(tid, bf);
sendbar = true;
}
@@ -315,8 +314,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
}
-static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf)
+static void ath_tx_update_baw(struct ath_atx_tid *tid, struct ath_buf *bf)
{
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
u16 seqno = bf->bf_state.seqno;
@@ -338,8 +336,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
}
}
-static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf)
+static void ath_tx_addto_baw(struct ath_atx_tid *tid, struct ath_buf *bf)
{
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
u16 seqno = bf->bf_state.seqno;
@@ -452,9 +449,8 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
return tbf;
}
-static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts, int txok,
- int *nframes, int *nbad)
+static void ath_tx_count_frames(struct ath_buf *bf, struct ath_tx_status *ts,
+ int txok, int *nframes, int *nbad)
{
u16 seq_st = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
@@ -557,7 +553,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
/*
* AR5416 can become deaf/mute when BA
* issue happens. Chip needs to be reset.
- * But AP code may have sychronization issues
+ * But AP code may have synchronization issues
* when perform internal reset in this routine.
* Only enable reset in STA mode for now.
*/
@@ -568,7 +564,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
__skb_queue_head_init(&bf_pending);
- ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
+ ath_tx_count_frames(bf, ts, txok, &nframes, &nbad);
while (bf) {
u16 seqno = bf->bf_state.seqno;
@@ -621,7 +617,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update
* block-ack window
*/
- ath_tx_update_baw(sc, tid, bf);
+ ath_tx_update_baw(tid, bf);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -651,7 +647,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* run out of tx buf.
*/
if (!tbf) {
- ath_tx_update_baw(sc, tid, bf);
+ ath_tx_update_baw(tid, bf);
ath_tx_complete_buf(sc, bf, txq,
&bf_head, NULL, ts,
@@ -752,7 +748,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
if (sta) {
struct ath_node *an = (struct ath_node *)sta->drv_priv;
- tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
+ tid = ath_get_skb_tid(an, bf->bf_mpdu);
ath_tx_count_airtime(sc, sta, bf, ts, tid->tidno);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->clear_ps_filter = true;
@@ -962,7 +958,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_state.stale = false;
if (!bf) {
- ath_txq_skb_done(sc, txq, skb);
+ ath_txq_skb_done(sc, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
@@ -1012,13 +1008,13 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
- ath_tx_update_baw(sc, tid, bf);
+ ath_tx_update_baw(tid, bf);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
continue;
}
if (bf_isampdu(bf))
- ath_tx_addto_baw(sc, tid, bf);
+ ath_tx_addto_baw(tid, bf);
break;
}
@@ -1114,8 +1110,8 @@ finish:
* width - 0 for 20 MHz, 1 for 40 MHz
* half_gi - to use 4us v/s 3.6 us for symbol time
*/
-u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
- int width, int half_gi, bool shortPreamble)
+u32 ath_pkt_duration(u8 rix, int pktlen, int width,
+ int half_gi, bool shortPreamble)
{
u32 nbits, nsymbits, duration, nsymbols;
int streams;
@@ -1327,7 +1323,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
info->rates[i].Rate = rix | 0x80;
info->rates[i].ChSel = ath_txchainmask_reduction(sc,
ah->txchainmask, info->rates[i].Rate);
- info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
+ info->rates[i].PktDuration = ath_pkt_duration(rix, len,
is_40, is_sgi, is_sp);
if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
@@ -2122,7 +2118,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_state.bf_type = 0;
if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
bf->bf_state.bf_type = BUF_AMPDU;
- ath_tx_addto_baw(sc, tid, bf);
+ ath_tx_addto_baw(tid, bf);
}
bf->bf_next = NULL;
@@ -2368,7 +2364,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
if (txctl->sta) {
an = (struct ath_node *) sta->drv_priv;
- tid = ath_get_skb_tid(sc, an, skb);
+ tid = ath_get_skb_tid(an, skb);
}
ath_txq_lock(sc, txq);
@@ -2379,7 +2375,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
- ath_txq_skb_done(sc, txq, skb);
+ ath_txq_skb_done(sc, skb);
if (txctl->paprd)
dev_kfree_skb_any(skb);
else
@@ -2514,7 +2510,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- ath_txq_skb_done(sc, txq, skb);
+ ath_txq_skb_done(sc, skb);
tx_info->status.status_driver_data[0] = sta;
__skb_queue_tail(&txq->complete_q, skb);
}
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index bb40889d7c72..2d734567000a 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -54,7 +54,6 @@ struct carl9170_debugfs_fops {
char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
ssize_t *len);
ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
- const struct file_operations fops;
enum carl9170_device_state req_dev_state;
};
@@ -62,7 +61,7 @@ struct carl9170_debugfs_fops {
static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct carl9170_debugfs_fops *dfops;
+ const struct carl9170_debugfs_fops *dfops;
struct ar9170 *ar;
char *buf = NULL, *res_buf = NULL;
ssize_t ret = 0;
@@ -75,8 +74,7 @@ static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
if (!ar)
return -ENODEV;
- dfops = container_of(debugfs_real_fops(file),
- struct carl9170_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->read)
return -ENOSYS;
@@ -113,7 +111,7 @@ out_free:
static ssize_t carl9170_debugfs_write(struct file *file,
const char __user *userbuf, size_t count, loff_t *ppos)
{
- struct carl9170_debugfs_fops *dfops;
+ const struct carl9170_debugfs_fops *dfops;
struct ar9170 *ar;
char *buf = NULL;
int err = 0;
@@ -128,8 +126,7 @@ static ssize_t carl9170_debugfs_write(struct file *file,
if (!ar)
return -ENODEV;
- dfops = container_of(debugfs_real_fops(file),
- struct carl9170_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->write)
return -ENOSYS;
@@ -165,6 +162,11 @@ out_free:
return err;
}
+static struct debugfs_short_fops debugfs_fops = {
+ .read = carl9170_debugfs_read,
+ .write = carl9170_debugfs_write,
+};
+
#define __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
_attr, _dstate) \
static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\
@@ -173,12 +175,6 @@ static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\
.write = _write, \
.attr = _attr, \
.req_dev_state = _dstate, \
- .fops = { \
- .open = simple_open, \
- .read = carl9170_debugfs_read, \
- .write = carl9170_debugfs_write, \
- .owner = THIS_MODULE \
- }, \
}
#define DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, _attr) \
@@ -816,9 +812,9 @@ void carl9170_debugfs_register(struct ar9170 *ar)
ar->hw->wiphy->debugfsdir);
#define DEBUGFS_ADD(name) \
- debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \
- ar->debug_dir, ar, \
- &carl_debugfs_##name ## _ops.fops)
+ debugfs_create_file_aux(#name, carl_debugfs_##name ##_ops.attr, \
+ ar->debug_dir, ar, &carl_debugfs_##name ## _ops, \
+ &debugfs_fops)
DEBUGFS_ADD(usb_tx_anch_urbs);
DEBUGFS_ADD(usb_rx_pool_urbs);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 8557d4826a46..94d08d6ae1a3 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1590,7 +1590,10 @@ static int wcn36xx_probe(struct platform_device *pdev)
}
n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
- wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
+ wcn->chan_survey = devm_kcalloc(wcn->dev,
+ n_channels,
+ sizeof(struct wcn36xx_chan_survey),
+ GFP_KERNEL);
if (!wcn->chan_survey) {
ret = -ENOMEM;
goto out_wq;
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index efa98444e3fb..5a49970afc8c 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -30,7 +30,6 @@ static struct dentry *rootdir;
struct b43_debugfs_fops {
ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
- struct file_operations fops;
/* Offset of struct b43_dfs_file in struct b43_dfsentry */
size_t file_struct_offset;
};
@@ -491,7 +490,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct b43_wldev *dev;
- struct b43_debugfs_fops *dfops;
+ const struct b43_debugfs_fops *dfops;
struct b43_dfs_file *dfile;
ssize_t ret;
char *buf;
@@ -511,8 +510,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
goto out_unlock;
}
- dfops = container_of(debugfs_real_fops(file),
- struct b43_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->read) {
err = -ENOSYS;
goto out_unlock;
@@ -555,7 +553,7 @@ static ssize_t b43_debugfs_write(struct file *file,
size_t count, loff_t *ppos)
{
struct b43_wldev *dev;
- struct b43_debugfs_fops *dfops;
+ const struct b43_debugfs_fops *dfops;
char *buf;
int err = 0;
@@ -573,8 +571,7 @@ static ssize_t b43_debugfs_write(struct file *file,
goto out_unlock;
}
- dfops = container_of(debugfs_real_fops(file),
- struct b43_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->write) {
err = -ENOSYS;
goto out_unlock;
@@ -602,16 +599,16 @@ out_unlock:
}
+static struct debugfs_short_fops debugfs_ops = {
+ .read = b43_debugfs_read,
+ .write = b43_debugfs_write,
+ .llseek = generic_file_llseek,
+};
+
#define B43_DEBUGFS_FOPS(name, _read, _write) \
static struct b43_debugfs_fops fops_##name = { \
.read = _read, \
.write = _write, \
- .fops = { \
- .open = simple_open, \
- .read = b43_debugfs_read, \
- .write = b43_debugfs_write, \
- .llseek = generic_file_llseek, \
- }, \
.file_struct_offset = offsetof(struct b43_dfsentry, \
file_##name), \
}
@@ -703,9 +700,9 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
#define ADD_FILE(name, mode) \
do { \
- debugfs_create_file(__stringify(name), \
+ debugfs_create_file_aux(__stringify(name), \
mode, e->subdir, dev, \
- &fops_##name.fops); \
+ &fops_##name, &debugfs_ops); \
} while (0)
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index 6b0e8d117061..5d04bcc216e5 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -31,7 +31,6 @@ static struct dentry *rootdir;
struct b43legacy_debugfs_fops {
ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
- struct file_operations fops;
/* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
size_t file_struct_offset;
/* Take wl->irq_lock before calling read/write? */
@@ -188,7 +187,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct b43legacy_wldev *dev;
- struct b43legacy_debugfs_fops *dfops;
+ const struct b43legacy_debugfs_fops *dfops;
struct b43legacy_dfs_file *dfile;
ssize_t ret;
char *buf;
@@ -208,8 +207,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
goto out_unlock;
}
- dfops = container_of(debugfs_real_fops(file),
- struct b43legacy_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->read) {
err = -ENOSYS;
goto out_unlock;
@@ -257,7 +255,7 @@ static ssize_t b43legacy_debugfs_write(struct file *file,
size_t count, loff_t *ppos)
{
struct b43legacy_wldev *dev;
- struct b43legacy_debugfs_fops *dfops;
+ const struct b43legacy_debugfs_fops *dfops;
char *buf;
int err = 0;
@@ -275,8 +273,7 @@ static ssize_t b43legacy_debugfs_write(struct file *file,
goto out_unlock;
}
- dfops = container_of(debugfs_real_fops(file),
- struct b43legacy_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->write) {
err = -ENOSYS;
goto out_unlock;
@@ -308,17 +305,16 @@ out_unlock:
return err ? err : count;
}
+static struct debugfs_short_fops debugfs_ops = {
+ .read = b43legacy_debugfs_read,
+ .write = b43legacy_debugfs_write,
+ .llseek = generic_file_llseek
+};
#define B43legacy_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \
static struct b43legacy_debugfs_fops fops_##name = { \
.read = _read, \
.write = _write, \
- .fops = { \
- .open = simple_open, \
- .read = b43legacy_debugfs_read, \
- .write = b43legacy_debugfs_write, \
- .llseek = generic_file_llseek, \
- }, \
.file_struct_offset = offsetof(struct b43legacy_dfsentry, \
file_##name), \
.take_irqlock = _take_irqlock, \
@@ -386,9 +382,9 @@ void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev)
#define ADD_FILE(name, mode) \
do { \
- debugfs_create_file(__stringify(name), mode, \
+ debugfs_create_file_aux(__stringify(name), mode, \
e->subdir, dev, \
- &fops_##name.fops); \
+ &fops_##name, &debugfs_ops); \
} while (0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 42d991d9f8cb..6bc107476a2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -455,6 +455,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
if (sg_data_sz > max_req_sz - req_sz)
sg_data_sz = max_req_sz - req_sz;
+ if (!sgl) {
+ /* out of (pre-allocated) scatterlist entries */
+ ret = -ENOMEM;
+ goto exit;
+ }
sg_set_buf(sgl, pkt_data, sg_data_sz);
sg_cnt++;
@@ -1167,6 +1172,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
mmc_pm_flag_t sdio_flags;
+ bool cap_power_off;
int ret = 0;
func = container_of(dev, struct sdio_func, dev);
@@ -1174,19 +1180,23 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
if (func->num != 1)
return 0;
+ cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- if (sdiodev->wowl_enabled) {
+ if (sdiodev->wowl_enabled || !cap_power_off) {
brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
sdio_flags = MMC_PM_KEEP_POWER;
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
- enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
- else
- sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+
+ if (sdiodev->wowl_enabled) {
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ else
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ }
if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
@@ -1208,18 +1218,19 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
int ret = 0;
+ bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != 2)
return 0;
- if (!sdiodev->wowl_enabled) {
+ if (!sdiodev->wowl_enabled && cap_power_off) {
/* bus was powered off and device removed, probe again */
ret = brcmf_sdiod_probe(sdiodev);
if (ret)
brcmf_err("Failed to probe device on resume\n");
} else {
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported)
disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
brcmf_sdiod_freezer_off(sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 297a7c738c01..4b70845e1a26 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -814,6 +814,8 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
* @name: name of the new interface.
* @params: contains mac address for AP or STA device.
* @type: interface type.
+ *
+ * Return: pointer to new vif on success, ERR_PTR(-errno) if not
*/
static
struct wireless_dev *brcmf_apsta_add_vif(struct wiphy *wiphy, const char *name,
@@ -900,6 +902,8 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
*
* @wiphy: wiphy device of new interface.
* @name: name of the new interface.
+ *
+ * Return: pointer to new vif on success, ERR_PTR(-errno) if not
*/
static struct wireless_dev *brcmf_mon_add_vif(struct wiphy *wiphy,
const char *name)
@@ -2676,7 +2680,7 @@ done:
static s32
brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- s32 *dbm)
+ unsigned int link_id, s32 *dbm)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev);
@@ -4999,12 +5003,16 @@ exit:
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
{
static const s32 pktflags[] = {
- BRCMF_VNDR_IE_PRBREQ_FLAG,
BRCMF_VNDR_IE_PRBRSP_FLAG,
BRCMF_VNDR_IE_BEACON_FLAG
};
int i;
+ if (vif->wdev.iftype == NL80211_IFTYPE_AP)
+ brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_ASSOCRSP_FLAG, NULL, 0);
+ else
+ brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG, NULL, 0);
+
for (i = 0; i < ARRAY_SIZE(pktflags); i++)
brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);
@@ -7408,6 +7416,8 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
* p2p, rsdb, and no mbss:
* #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
* channels = 2, 4 total
+ *
+ * Return: 0 on success, negative errno on failure
*/
static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index da72fd2d541f..3d63010ae079 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -327,8 +327,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
- brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
- brcmf_ifname(ifp), head_delta);
+ brcmf_dbg(INFO, "%s: %s headroom\n", brcmf_ifname(ifp),
+ head_delta ? "insufficient" : "unmodifiable");
atomic_inc(&drvr->bus_if->stats.pktcowed);
ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0,
GFP_ATOMIC);
@@ -540,6 +540,11 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
struct ethhdr *eh;
u16 type;
+ if (!ifp) {
+ brcmu_pkt_buf_free_skb(txp);
+ return;
+ }
+
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 31e080e4da66..ab3d6cfcb02b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -6,6 +6,8 @@
#ifndef _fwil_h_
#define _fwil_h_
+#include "debug.h"
+
/*******************************************************************************
* Dongle command codes that are interpreted by firmware
******************************************************************************/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 0949e7975ff1..b70d20128f98 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -1810,7 +1810,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
rfi->cur_idx = cur_idx;
}
} else {
- /* explicity window move updating the expected index */
+ /* explicitly window move updating the expected index */
exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index c1f18e2fe540..1681ad00f82e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -99,13 +99,13 @@ int brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
if (root && err) {
- char *board_type;
+ char *board_type = NULL;
const char *tmp;
- of_property_read_string_index(root, "compatible", 0, &tmp);
-
/* get rid of '/' in the compatible string to be able to find the FW */
- board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!of_property_read_string_index(root, "compatible", 0, &tmp))
+ board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+
if (!board_type) {
of_node_put(root);
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index e4395b1f8c11..d2caa80e9412 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2712,7 +2712,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID, WCC),
@@ -2723,7 +2723,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID, BCA),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index d69879e1bd87..d362c4337616 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -23423,6 +23423,9 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
break;
}
+ if (WARN_ON(k == NPHY_IQCAL_NUMGAINS))
+ return;
+
params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1];
params->pga = tbl_iqcal_gainparams_nphy[band_idx][k][2];
params->pad = tbl_iqcal_gainparams_nphy[band_idx][k][3];
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 74fc76c00ebc..4013443698a2 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1127,44 +1127,6 @@ il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
rxq->rb_stts = NULL;
}
-/* Convert linear signal-to-noise ratio into dB */
-static u8 ratio2dB[100] = {
-/* 0 1 2 3 4 5 6 7 8 9 */
- 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
- 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
- 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
- 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
- 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
- 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
- 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
- 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
- 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
- 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
-};
-
-/* Calculates a relative dB value from a ratio of linear
- * (i.e. not dB) signal levels.
- * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
-int
-il3945_calc_db_from_ratio(int sig_ratio)
-{
- /* 1000:1 or higher just report as 60 dB */
- if (sig_ratio >= 1000)
- return 60;
-
- /* 100:1 or higher, divide by 10 and use table,
- * add 20 dB to make up for divide by 10 */
- if (sig_ratio >= 100)
- return 20 + (int)ratio2dB[sig_ratio / 10];
-
- /* We shouldn't see this */
- if (sig_ratio < 1)
- return 0;
-
- /* Use table for ratios 1:1 - 99:1 */
- return (int)ratio2dB[sig_ratio];
-}
-
/*
* il3945_rx_handle - Main entry function for receiving responses from uCode
*
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.h b/drivers/net/wireless/intel/iwlegacy/3945.h
index ffbe11902628..fb1e33c89d0e 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.h
+++ b/drivers/net/wireless/intel/iwlegacy/3945.h
@@ -173,7 +173,6 @@ struct il3945_ibss_seq {
* for use by iwl-*.c
*
*****************************************************************************/
-int il3945_calc_db_from_ratio(int sig_ratio);
void il3945_rx_replenish(void *data);
void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
unsigned int il3945_fill_beacon_frame(struct il_priv *il,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 958dd4f9bc69..af4f42534ea0 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -3915,37 +3915,6 @@ il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
}
EXPORT_SYMBOL(il_set_rxon_ht);
-/* Return valid, unused, channel for a passive scan to reset the RF */
-u8
-il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
-{
- const struct il_channel_info *ch_info;
- int i;
- u8 channel = 0;
- u8 min, max;
-
- if (band == NL80211_BAND_5GHZ) {
- min = 14;
- max = il->channel_count;
- } else {
- min = 0;
- max = 14;
- }
-
- for (i = min; i < max; i++) {
- channel = il->channel_info[i].channel;
- if (channel == le16_to_cpu(il->staging.channel))
- continue;
-
- ch_info = il_get_channel_info(il, band, channel);
- if (il_is_channel_valid(ch_info))
- break;
- }
-
- return channel;
-}
-EXPORT_SYMBOL(il_get_single_channel_number);
-
/*
* il_set_rxon_channel - Set the band and channel values in staging RXON
* @ch: requested channel as a pointer to struct ieee80211_channel
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 725c2a88ddb7..92285412ab10 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -1705,7 +1705,6 @@ int il_full_rxon_required(struct il_priv *il);
int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
void il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif);
-u8 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band);
void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
bool il_is_ht40_tx_allowed(struct il_priv *il,
struct ieee80211_sta_ht_cap *ht_cap);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 64c123314245..19c4ce6f2465 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -5,13 +5,14 @@ iwlwifi-objs += iwl-io.o
iwlwifi-objs += iwl-drv.o
iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-nvm-utils.o
+iwlwifi-objs += iwl-utils.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index 975e8aed1526..dcba1a5d793b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -31,40 +31,21 @@
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0"
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0"
-#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0"
#define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0"
#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0"
#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0"
-#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0"
#define IWL_MA_B_HR_B_FW_PRE "iwlwifi-ma-b0-hr-b0"
#define IWL_MA_B_GF_A_FW_PRE "iwlwifi-ma-b0-gf-a0"
#define IWL_MA_B_GF4_A_FW_PRE "iwlwifi-ma-b0-gf4-a0"
-#define IWL_MA_B_MR_A_FW_PRE "iwlwifi-ma-b0-mr-a0"
#define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \
IWL_SO_A_JF_B_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \
IWL_SO_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \
- IWL_SO_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \
- IWL_TY_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_A_MR_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_MA_B_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_B_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_B_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(api) \
- IWL_MA_B_MR_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_ax210_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -267,13 +248,6 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
.trans.low_latency_xtal = true,
};
-const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = {
- .fw_name_pre = IWL_SO_A_MR_A_FW_PRE,
- .uhb_supported = false,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
const struct iwl_cfg iwl_cfg_ma = {
.fw_name_mac = "ma",
.uhb_supported = true,
@@ -289,19 +263,11 @@ const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+IWL_FW_AND_PNVM(IWL_SO_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_TY_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+IWL_FW_AND_PNVM(IWL_MA_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_MA_A_GF4_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
-
-MODULE_FIRMWARE("iwlwifi-so-a0-gf-a0.pnvm");
-MODULE_FIRMWARE("iwlwifi-so-a0-gf4-a0.pnvm");
-MODULE_FIRMWARE("iwlwifi-ty-a0-gf-a0.pnvm");
-MODULE_FIRMWARE("iwlwifi-ma-b0-gf-a0.pnvm");
-MODULE_FIRMWARE("iwlwifi-ma-b0-gf4-a0.pnvm");
+IWL_FW_AND_PNVM(IWL_MA_B_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_MA_B_GF4_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 1c43f283ac4a..efa3e0e35f79 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 94
+#define IWL_BZ_UCODE_API_MAX 96
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 92
@@ -37,20 +37,6 @@
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \
- IWL_BZ_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \
- IWL_BZ_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_FM_B_MODULE_FIRMWARE(api) \
- IWL_BZ_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_FM_C_MODULE_FIRMWARE(api) \
- IWL_BZ_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BZ_A_FM4_B_MODULE_FIRMWARE(api) \
- IWL_BZ_A_FM4_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_GL_B_FM_B_MODULE_FIRMWARE(api) \
- IWL_GL_B_FM_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_GL_C_FM_C_MODULE_FIRMWARE(api) \
- IWL_GL_C_FM_C_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_bz_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -181,14 +167,11 @@ const struct iwl_cfg iwl_cfg_gl = {
.num_rbds = IWL_NUM_RBDS_BZ_EHT,
};
-
MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
-
-MODULE_FIRMWARE("iwlwifi-gl-c0-fm-c0.pnvm");
+IWL_FW_AND_PNVM(IWL_BZ_A_GF_A_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_BZ_A_GF4_A_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
new file mode 100644
index 000000000000..ab7c0f8d54f4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-prph.h"
+#include "fw/api/txq.h"
+
+/* Highest firmware API version supported */
+#define IWL_DR_UCODE_API_MAX 96
+
+/* Lowest firmware API version supported */
+#define IWL_DR_UCODE_API_MIN 96
+
+/* NVM versions */
+#define IWL_DR_NVM_VERSION 0x0a1d
+
+/* Memory offsets and lengths */
+#define IWL_DR_DCCM_OFFSET 0x800000 /* LMAC1 */
+#define IWL_DR_DCCM_LEN 0x10000 /* LMAC1 */
+#define IWL_DR_DCCM2_OFFSET 0x880000
+#define IWL_DR_DCCM2_LEN 0x8000
+#define IWL_DR_SMEM_OFFSET 0x400000
+#define IWL_DR_SMEM_LEN 0xD0000
+
+#define IWL_DR_A_PE_A_FW_PRE "iwlwifi-dr-a0-pe-a0"
+#define IWL_BR_A_PET_A_FW_PRE "iwlwifi-br-a0-petc-a0"
+#define IWL_BR_A_PE_A_FW_PRE "iwlwifi-br-a0-pe-a0"
+
+#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \
+ IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(api) \
+ IWL_BR_A_PET_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(api) \
+ IWL_BR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl_dr_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+ .num_of_queues = 512,
+ .max_tfd_queue_size = 65536,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+#define IWL_DEVICE_DR_COMMON \
+ .ucode_api_max = IWL_DR_UCODE_API_MAX, \
+ .ucode_api_min = IWL_DR_UCODE_API_MIN, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = 10, \
+ .non_shared_ant = ANT_B, \
+ .dccm_offset = IWL_DR_DCCM_OFFSET, \
+ .dccm_len = IWL_DR_DCCM_LEN, \
+ .dccm2_offset = IWL_DR_DCCM2_OFFSET, \
+ .dccm2_len = IWL_DR_DCCM2_LEN, \
+ .smem_offset = IWL_DR_SMEM_OFFSET, \
+ .smem_len = IWL_DR_SMEM_LEN, \
+ .apmg_not_supported = true, \
+ .trans.mq_rx_supported = true, \
+ .vht_mu_mimo_supported = true, \
+ .mac_addr_from_csr = 0x30, \
+ .nvm_ver = IWL_DR_NVM_VERSION, \
+ .trans.rf_id = true, \
+ .trans.gen2 = true, \
+ .nvm_type = IWL_NVM_EXT, \
+ .dbgc_supported = true, \
+ .min_umac_error_event_table = 0xD0000, \
+ .d3_debug_data_base_addr = 0x401000, \
+ .d3_debug_data_length = 60 * 1024, \
+ .mon_smem_regs = { \
+ .write_ptr = { \
+ .addr = LDBG_M2S_BUF_WPTR, \
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = LDBG_M2S_BUF_WRAP_CNT, \
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
+ }, \
+ }, \
+ .trans.umac_prph_offset = 0x300000, \
+ .trans.device_family = IWL_DEVICE_FAMILY_DR, \
+ .trans.base_params = &iwl_dr_base_params, \
+ .min_txq_size = 128, \
+ .gp2_reg_addr = 0xd02c68, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = DBGC_DBGBUF_WRAP_AROUND, \
+ .mask = 0xffffffff, \
+ }, \
+ .cur_frag = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
+ }, \
+ }, \
+ .mon_dbgi_regs = { \
+ .write_ptr = { \
+ .addr = DBGI_SRAM_FIFO_POINTERS, \
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
+ }, \
+ }
+
+#define IWL_DEVICE_DR \
+ IWL_DEVICE_DR_COMMON, \
+ .uhb_supported = true, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .num_rbds = IWL_NUM_RBDS_DR_EHT, \
+ .ht_params = &iwl_22000_ht_params
+
+/*
+ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
+ * A-MPDU, with additional overhead to account for processing time.
+ */
+#define IWL_NUM_RBDS_DR_EHT (512 * 16)
+
+const struct iwl_cfg_trans_params iwl_dr_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_DR,
+ .base_params = &iwl_dr_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+ .gen2 = true,
+ .integrated = true,
+ .umac_prph_offset = 0x300000,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+};
+
+const char iwl_dr_name[] = "Intel(R) TBD Dr device";
+
+const struct iwl_cfg iwl_cfg_dr = {
+ .fw_name_mac = "dr",
+ IWL_DEVICE_DR,
+};
+
+const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_DR,
+ .base_params = &iwl_dr_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+ .gen2 = true,
+ .integrated = true,
+ .umac_prph_offset = 0x300000,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+};
+
+const char iwl_br_name[] = "Intel(R) TBD Br device";
+
+const struct iwl_cfg iwl_cfg_br = {
+ .fw_name_mac = "br",
+ IWL_DEVICE_DR,
+};
+
+MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index fc5e6e44c6aa..c9eeb3f6704e 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 94
+#define IWL_SC_UCODE_API_MAX 96
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 92
@@ -38,28 +38,10 @@
#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0"
#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0"
-#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_HR_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(api) \
- IWL_SC2_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC2_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(api) \
- IWL_SC2F_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(api) \
- IWL_SC2F_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_sc_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -181,14 +163,14 @@ const struct iwl_cfg iwl_cfg_sc2f = {
IWL_DEVICE_SC,
};
-MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+IWL_FW_AND_PNVM(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
MODULE_FIRMWARE(IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+IWL_FW_AND_PNVM(IWL_SC_A_GF_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC_A_GF4_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC2F_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_FW_AND_PNVM(IWL_SC2F_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
index 931aa3f5798d..cdc05f7e75a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
@@ -676,12 +676,12 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+ CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM);
/* See if we got it */
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM,
IWL_EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_EEPROM(trans->dev,
@@ -697,7 +697,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
{
iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+ CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM);
}
static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 74d163e56511..56d19a034c24 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -1565,6 +1565,16 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static void
+iwlagn_mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+ if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART)
+ iwl_trans_finish_sw_reset(priv->trans);
+}
+
const struct ieee80211_ops iwlagn_hw_ops = {
.add_chanctx = ieee80211_emulate_add_chanctx,
.remove_chanctx = ieee80211_emulate_remove_chanctx,
@@ -1598,6 +1608,7 @@ const struct ieee80211_ops iwlagn_hw_ops = {
.tx_last_beacon = iwlagn_mac_tx_last_beacon,
.event_callback = iwlagn_mac_event_callback,
.set_tim = iwlagn_mac_set_tim,
+ .reconfig_complete = iwlagn_mac_reconfig_complete,
};
/* This function both allocates and initializes hw and priv. */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 769b75c3fa5b..30789ba06d9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1241,7 +1241,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
STATISTICS_NOTIFICATION,
REPLY_TX,
};
- int i;
+ int i, err;
/************************
* 1. Allocating HW data
@@ -1249,6 +1249,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
hw = iwl_alloc_all();
if (!hw) {
pr_err("%s: Cannot allocate network device\n", trans->name);
+ err = -ENOMEM;
goto out;
}
@@ -1299,8 +1300,10 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
break;
}
- if (WARN_ON(!priv->lib))
+ if (WARN_ON(!priv->lib)) {
+ err = -ENODEV;
goto out_free_hw;
+ }
/*
* Populate the state variables that the transport layer needs
@@ -1377,12 +1380,14 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->trans->name, priv->trans->hw_rev);
- if (iwl_trans_start_hw(priv->trans))
+ err = iwl_trans_start_hw(priv->trans);
+ if (err)
goto out_free_hw;
/* Read the EEPROM */
- if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
- &priv->eeprom_blob_size)) {
+ err = iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
+ &priv->eeprom_blob_size);
+ if (err) {
IWL_ERR(priv, "Unable to init EEPROM\n");
goto out_free_hw;
}
@@ -1393,13 +1398,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
priv->nvm_data = iwl_parse_eeprom_data(priv->trans, priv->cfg,
priv->eeprom_blob,
priv->eeprom_blob_size);
- if (!priv->nvm_data)
+ if (!priv->nvm_data) {
+ err = -ENOMEM;
goto out_free_eeprom_blob;
+ }
- if (iwl_nvm_check_version(priv->nvm_data, priv->trans))
+ err = iwl_nvm_check_version(priv->nvm_data, priv->trans);
+ if (err)
goto out_free_eeprom;
- if (iwl_eeprom_init_hw_params(priv))
+ err = iwl_eeprom_init_hw_params(priv);
+ if (err)
goto out_free_eeprom;
/* extract MAC Address */
@@ -1446,7 +1455,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
atomic_set(&priv->queue_stop_count[i], 0);
}
- if (iwl_init_drv(priv))
+ err = iwl_init_drv(priv);
+ if (err)
goto out_free_eeprom;
/* At this point both hw and priv are initialized. */
@@ -1480,7 +1490,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
*
* 7. Setup and register with mac80211 and debugfs
**************************************************/
- if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
+ err = iwlagn_mac_setup_register(priv, &fw->ucode_capa);
+ if (err)
goto out_destroy_workqueue;
iwl_dbgfs_register(priv, dbgfs_dir);
@@ -1500,8 +1511,7 @@ out_free_eeprom:
out_free_hw:
ieee80211_free_hw(priv->hw);
out:
- op_mode = NULL;
- return op_mode;
+ return ERR_PTR(err);
}
static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
@@ -1895,17 +1905,9 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
unsigned int reload_msec;
unsigned long reload_jiffies;
- if (iwl_have_debug_level(IWL_DL_FW))
- iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
-
/* uCode is no longer loaded. */
priv->ucode_loaded = false;
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
-
- iwl_abort_notification_waits(&priv->notif_wait);
-
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
clear_bit(STATUS_READY, &priv->status);
@@ -1942,27 +1944,43 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
}
}
-static void iwl_nic_error(struct iwl_op_mode *op_mode, bool sync)
+static void iwl_nic_error(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ /* Set the FW error flag -- cleared on iwl_down */
+ set_bit(STATUS_FW_ERROR, &priv->status);
+
+ iwl_abort_notification_waits(&priv->notif_wait);
+
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL && iwl_check_for_ct_kill(priv))
+ return;
+
IWL_ERR(priv, "Loaded firmware version: %s\n",
priv->fw->fw_version);
- iwl_dump_nic_error_log(priv);
- iwl_dump_nic_event_log(priv, false, NULL);
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL) {
+ IWL_ERR(priv, "Command queue full!\n");
+ } else {
+ iwl_dump_nic_error_log(priv);
+ iwl_dump_nic_event_log(priv, false, NULL);
+ }
- iwlagn_fw_error(priv, false);
+ if (iwl_have_debug_level(IWL_DL_FW))
+ iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
}
-static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
+static bool iwlagn_sw_reset(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- if (!iwl_check_for_ct_kill(priv)) {
- IWL_ERR(priv, "Restarting adapter queue is full\n");
- iwlagn_fw_error(priv, false);
- }
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL && iwl_check_for_ct_kill(priv))
+ return false;
+
+ iwlagn_fw_error(priv, false);
+ return true;
}
#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
@@ -2117,7 +2135,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
.hw_rf_kill = iwl_set_hw_rfkill_state,
.free_skb = iwl_free_skb,
.nic_error = iwl_nic_error,
- .cmd_queue_full = iwl_cmd_queue_full,
+ .sw_reset = iwlagn_sw_reset,
.nic_config = iwl_nic_config,
.wimax_active = iwl_wimax_active,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 0bc32291815e..efa7b673ebc7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -79,9 +79,9 @@ static void *iwl_acpi_get_object(struct device *dev, acpi_string method)
* method (DSM) interface. The returned acpi object must be freed by calling
* function.
*/
-static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
- union acpi_object *args,
- const guid_t *guid)
+union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev,
+ int func, union acpi_object *args,
+ const guid_t *guid)
{
union acpi_object *obj;
@@ -108,7 +108,7 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
size_t expected_size)
{
union acpi_object *obj;
- int ret = 0;
+ int ret;
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid);
if (IS_ERR(obj)) {
@@ -123,8 +123,10 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
} else if (obj->type == ACPI_TYPE_BUFFER) {
__le64 le_value = 0;
- if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
- return -EINVAL;
+ if (WARN_ON_ONCE(expected_size > sizeof(le_value))) {
+ ret = -EINVAL;
+ goto out;
+ }
/* if the buffer size doesn't match the expected size */
if (obj->buffer.length != expected_size)
@@ -145,8 +147,9 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
}
IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM method evaluated: func=%d, ret=%d\n",
- func, ret);
+ "ACPI: DSM method evaluated: func=%d, value=%lld\n",
+ func, *value);
+ ret = 0;
out:
ACPI_FREE(obj);
return ret;
@@ -259,13 +262,14 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
struct iwl_tas_data *tas_data)
{
union acpi_object *wifi_pkg, *data;
- int ret, tbl_rev, i, block_list_size, enabled;
+ int ret, tbl_rev, block_list_size, enabled;
+ u32 tas_selection;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read wtas table revision 1 or revision 0*/
+ /* try to read wtas table */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WTAS_WIFI_DATA_SIZE,
&tbl_rev);
@@ -274,27 +278,23 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
goto out_free;
}
- if (tbl_rev == 1 && wifi_pkg->package.elements[1].type ==
- ACPI_TYPE_INTEGER) {
- u32 tas_selection =
- (u32)wifi_pkg->package.elements[1].integer.value;
-
- enabled = iwl_parse_tas_selection(fwrt, tas_data,
- tas_selection);
-
- } else if (tbl_rev == 0 &&
- wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
- enabled = !!wifi_pkg->package.elements[1].integer.value;
- } else {
+ if (tbl_rev < 0 || tbl_rev > 2 ||
+ wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
- if (!enabled) {
- IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
- ret = 0;
- goto out_free;
- }
+ tas_selection = (u32)wifi_pkg->package.elements[1].integer.value;
+ enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
+ tas_selection);
+ tas_data->table_source = BIOS_SOURCE_ACPI;
+ tas_data->table_revision = tbl_rev;
+ tas_data->tas_selection = tas_selection;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS %s enabled\n",
+ enabled ? "is" : "not");
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
@@ -305,13 +305,14 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
ret = -EINVAL;
goto out_free;
}
+
block_list_size = wifi_pkg->package.elements[2].integer.value;
- tas_data->block_list_size = cpu_to_le32(block_list_size);
+ tas_data->block_list_size = block_list_size;
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
- for (i = 0; i < block_list_size; i++) {
- u32 country;
+ for (int i = 0; i < block_list_size; i++) {
+ u16 country;
if (wifi_pkg->package.elements[3 + i].type !=
ACPI_TYPE_INTEGER) {
@@ -322,11 +323,11 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- tas_data->block_list_array[i] = cpu_to_le32(country);
+ tas_data->block_list_array[i] = country;
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
- ret = 1;
+ ret = enabled;
out_free:
kfree(data);
return ret;
@@ -1023,3 +1024,37 @@ out_free:
kfree(data);
return ret;
}
+
+int iwl_acpi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret = -ENOENT;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_DSBR_METHOD);
+ if (IS_ERR(data))
+ return ret;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_DSBR_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != ACPI_DSBR_WIFI_DATA_REV) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported ACPI DSBR revision:%d\n",
+ tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
+
+ *value = wifi_pkg->package.elements[1].integer.value;
+ IWL_DEBUG_RADIO(fwrt, "Loaded DSBR config from ACPI value: 0x%x\n",
+ *value);
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index bb88398a6987..e50b93472dd2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -28,6 +28,7 @@
#define ACPI_WPFC_METHOD "WPFC"
#define ACPI_GLAI_METHOD "GLAI"
#define ACPI_WBEM_METHOD "WBEM"
+#define ACPI_DSBR_METHOD "DSBR"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -76,6 +77,13 @@
#define ACPI_WBEM_WIFI_DATA_SIZE 2
/*
* One element for domain type,
+ * and one for DSBR response data
+ */
+#define ACPI_DSBR_WIFI_DATA_SIZE 2
+#define ACPI_DSBR_WIFI_DATA_REV 1
+
+/*
+ * One element for domain type,
* and one for the status
*/
#define ACPI_GLAI_WIFI_DATA_SIZE 2
@@ -101,6 +109,30 @@
#define ACPI_DSM_REV 0
+#define DSM_INTERNAL_FUNC_GET_PLAT_INFO 1
+/* TBD: VPRO is BIT(0) in the result, but what's the result? */
+
+#define DSM_INTERNAL_FUNC_PRODUCT_RESET 2
+
+/* DSM_INTERNAL_FUNC_PRODUCT_RESET - product reset (aka "PLDR") */
+enum iwl_dsm_internal_product_reset_cmds {
+ DSM_INTERNAL_PLDR_CMD_GET_MODE = 1,
+ DSM_INTERNAL_PLDR_CMD_SET_MODE = 2,
+ DSM_INTERNAL_PLDR_CMD_GET_STATUS = 3,
+};
+
+enum iwl_dsm_internal_product_reset_mode {
+ DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET = BIT(0),
+ DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR = BIT(1),
+ DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON = BIT(2),
+};
+
+struct iwl_dsm_internal_product_reset_cmd {
+ /* cmd is from enum iwl_dsm_internal_product_reset_cmds */
+ u16 cmd;
+ u16 value;
+} __packed;
+
#define IWL_ACPI_WBEM_REV0_MASK (BIT(0) | BIT(1))
#define IWL_ACPI_WBEM_REVISION 0
@@ -110,6 +142,10 @@ struct iwl_fw_runtime;
extern const guid_t iwl_guid;
+union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev,
+ int func, union acpi_object *args,
+ const guid_t *guid);
+
/**
* iwl_acpi_get_mcc - read MCC from ACPI, if available
*
@@ -153,10 +189,14 @@ int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
enum iwl_dsm_funcs func, u32 *value);
int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
+
+int iwl_acpi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
+
#else /* CONFIG_ACPI */
-static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
- int func, union acpi_object *args)
+static inline union acpi_object *
+iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
+ union acpi_object *args, const guid_t *guid)
{
return ERR_PTR(-ENOENT);
}
@@ -221,6 +261,11 @@ static inline int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
{
return -ENOENT;
}
+
+static inline int iwl_acpi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 2f40e69db318..34a1f97653c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -447,7 +447,7 @@ enum iwl_legacy_cmds {
/**
* @BA_NOTIF:
- * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif
+ * BlockAck notification, uses &struct iwl_compressed_ba_notif
* or &struct iwl_mvm_ba_notif depending on the HW
*/
BA_NOTIF = 0xc5,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 2ab38eaeb290..570a3f722510 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -391,7 +391,7 @@ enum iwl_datapath_monitor_notif_type {
struct iwl_datapath_monitor_notif {
__le32 type;
- u8 mac_id;
+ u8 link_id;
u8 reserved[3];
} __packed; /* MONITOR_NTF_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index bea0f4668cc8..aa88e91d117e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -477,6 +477,9 @@ struct iwl_mvm_tas_status_per_mac {
* @tas_status_mac: TAS status per lmac, uses
* &struct iwl_mvm_tas_status_per_mac
* @in_dual_radio: is TAS in dual radio? - TRUE/FALSE
+ * @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags.
+ * This member is valid only when fw has
+ * %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability.
* @reserved: reserved
*/
struct iwl_mvm_tas_status_resp {
@@ -486,7 +489,8 @@ struct iwl_mvm_tas_status_resp {
__le16 block_list[16];
struct iwl_mvm_tas_status_per_mac tas_status_mac[2];
u8 in_dual_radio;
- u8 reserved[3];
+ u8 uhb_allowed_flags;
+ u8 reserved[2];
} __packed; /*DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3*/
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index b23d5fc4bbe6..37bb7002c1c9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -74,7 +74,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
ROC_NOTIF = 0xF8,
/**
- * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
+ * @SESSION_PROTECTION_NOTIF: &struct iwl_session_prot_notif
*/
SESSION_PROTECTION_NOTIF = 0xFB,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index d424d0126367..5cdc09d465d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -464,21 +464,30 @@ struct iwl_tas_config_cmd_v3 {
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
+ * enum iwl_tas_uhb_allowed_flags - per country TAS UHB allowed flags.
+ * @TAS_UHB_ALLOWED_CANADA: TAS UHB is allowed in Canada. This flag is valid
+ * only when fw has %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability.
+ */
+enum iwl_tas_uhb_allowed_flags {
+ TAS_UHB_ALLOWED_CANADA = BIT(0),
+};
+
+/**
* struct iwl_tas_config_cmd_v4 - configures the TAS
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
* @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
- * @reserved: reserved
-*/
+ * @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags.
+ */
struct iwl_tas_config_cmd_v4 {
u8 override_tas_iec;
u8 enable_tas_iec;
u8 usa_tas_uhb_allowed;
- u8 reserved;
+ u8 uhb_allowed_flags;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
-struct iwl_tas_config_cmd {
+struct iwl_tas_config_cmd_v2_v4 {
struct iwl_tas_config_cmd_common common;
union {
struct iwl_tas_config_cmd_v3 v3;
@@ -487,6 +496,46 @@ struct iwl_tas_config_cmd {
};
/**
+ * enum bios_source - source of bios data
+ * @BIOS_SOURCE_NONE: BIOS source is not defined
+ * @BIOS_SOURCE_ACPI: BIOS source is ACPI
+ * @BIOS_SOURCE_UEFI: BIOS source is UEFI
+ */
+enum bios_source {
+ BIOS_SOURCE_NONE,
+ BIOS_SOURCE_ACPI,
+ BIOS_SOURCE_UEFI,
+};
+
+/**
+ * struct bios_value_u32 - BIOS configuration.
+ * @table_source: see &enum bios_source
+ * @table_revision: table revision.
+ * @reserved: reserved
+ * @value: value in bios.
+ */
+struct bios_value_u32 {
+ u8 table_source;
+ u8 table_revision;
+ u8 reserved[2];
+ __le32 value;
+} __packed; /* BIOS_TABLE_SOURCE_U32_S_VER_1 */
+
+/**
+ * struct iwl_tas_config_cmd - configures the TAS.
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ * @reserved: reserved
+ * @tas_config_info: see @struct bios_value_u32
+ */
+struct iwl_tas_config_cmd {
+ __le16 block_list_size;
+ __le16 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
+ u8 reserved[2];
+ struct bios_value_u32 tas_config_info;
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_5 */
+
+/**
* enum iwl_lari_config_masks - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
* @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 6a7bbfd6b2b7..9b09b835560b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -31,7 +31,7 @@ enum iwl_prot_offload_subcmd_ids {
/**
* @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif_v2 or
- * &struct iwl_stored_beacon_notif_v3
+ * &struct iwl_stored_beacon_notif
*/
STORED_BEACON_NTF = 0xFF,
};
@@ -71,18 +71,18 @@ struct iwl_stored_beacon_notif_v2 {
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
/**
- * struct iwl_stored_beacon_notif_v3 - Stored beacon notification
+ * struct iwl_stored_beacon_notif - Stored beacon notification
*
* @common: fields common for all versions
* @sta_id: station for which the beacon was received
* @reserved: reserved for alignment
* @data: beacon data, length in @byte_count
*/
-struct iwl_stored_beacon_notif_v3 {
+struct iwl_stored_beacon_notif {
struct iwl_stored_beacon_notif_common common;
u8 sta_id;
u8 reserved[3];
u8 data[MAX_STORED_BEACON_SIZE];
-} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3 */
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3, _VER_4 */
#endif /* __iwl_fw_api_offload_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index df0680eae30c..37ec26596ee7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -266,7 +266,7 @@ struct iwl_reduce_tx_power_cmd {
} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
enum iwl_dev_tx_power_cmd_mode {
- IWL_TX_POWER_MODE_SET_MAC = 0,
+ IWL_TX_POWER_MODE_SET_LINK = 0,
IWL_TX_POWER_MODE_SET_DEVICE = 1,
IWL_TX_POWER_MODE_SET_CHAINS = 2,
IWL_TX_POWER_MODE_SET_ACK = 3,
@@ -283,12 +283,14 @@ enum iwl_dev_tx_power_cmd_mode {
/**
* struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd
* @set_mode: see &enum iwl_dev_tx_power_cmd_mode
- * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @link_id: id of the link ctx for which we are reducing TX power.
+ * For version 9 / 10, this is the link id. For earlier versions, it is
+ * the mac id.
* @pwr_restriction: TX power restriction in 1/8 dBms.
*/
struct iwl_dev_tx_power_common {
__le32 set_mode;
- __le32 mac_context_id;
+ __le32 link_id;
__le16 pwr_restriction;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
index 893438aadab0..cfa6532a3cdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -10,7 +10,7 @@
#include "fw/api/tx.h"
#include "fw/api/phy-ctxt.h"
-#define IWL_MVM_TDLS_STA_COUNT 4
+#define IWL_TDLS_STA_COUNT 4
/* Type of TDLS request */
enum iwl_tdls_channel_switch_type {
@@ -128,7 +128,7 @@ struct iwl_tdls_config_cmd {
u8 tdls_peer_count;
u8 tx_to_ap_tid;
__le16 tx_to_ap_ssn;
- struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
+ struct iwl_tdls_sta_info sta_info[IWL_TDLS_STA_COUNT];
__le32 pti_req_data_offset;
struct iwl_tx_cmd pti_req_tx_cmd;
@@ -155,7 +155,7 @@ struct iwl_tdls_config_sta_info_res {
*/
struct iwl_tdls_config_res {
__le32 tx_to_ap_last_seq;
- struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
+ struct iwl_tdls_config_sta_info_res sta_info[IWL_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
#endif /* __iwl_fw_api_tdls_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index f4b827b58bd3..18d030334a6a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -395,7 +395,7 @@ struct iwl_roc_notif {
} __packed; /* ROC_NOTIF_API_S_VER_1 */
/**
- * enum iwl_mvm_session_prot_conf_id - session protection's configurations
+ * enum iwl_session_prot_conf_id - session protection's configurations
* @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
* The firmware will allocate two events.
* Valid for BSS_STA and P2P_STA.
@@ -424,7 +424,7 @@ struct iwl_roc_notif {
* be taken into account.
* @SESSION_PROTECT_CONF_MAX_ID: not used
*/
-enum iwl_mvm_session_prot_conf_id {
+enum iwl_session_prot_conf_id {
SESSION_PROTECT_CONF_ASSOC,
SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
@@ -433,12 +433,12 @@ enum iwl_mvm_session_prot_conf_id {
}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
/**
- * struct iwl_mvm_session_prot_cmd - configure a session protection
+ * struct iwl_session_prot_cmd - configure a session protection
* @id_and_color: the id and color of the link (or mac, for command version 1)
* for which this session protection is sent
* @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE,
* see &enum iwl_ctxt_action
- * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+ * @conf_id: see &enum iwl_session_prot_conf_id
* @duration_tu: the duration of the whole protection in TUs.
* @repetition_count: not used
* @interval: not used
@@ -448,7 +448,7 @@ enum iwl_mvm_session_prot_conf_id {
* The firmware supports only one concurrent session protection per vif.
* Adding a new session protection will remove any currently running session.
*/
-struct iwl_mvm_session_prot_cmd {
+struct iwl_session_prot_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 hdr */
__le32 id_and_color;
__le32 action;
@@ -462,17 +462,17 @@ struct iwl_mvm_session_prot_cmd {
*/
/**
- * struct iwl_mvm_session_prot_notif - session protection started / ended
+ * struct iwl_session_prot_notif - session protection started / ended
* @mac_link_id: the mac id (or link id, for notif ver > 2) for which the
* session protection started / ended
* @status: 1 means success, 0 means failure
* @start: 1 means the session protection started, 0 means it ended
- * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+ * @conf_id: see &enum iwl_session_prot_conf_id
*
* Note that any session protection will always get two notifications: start
* and end even the firmware could not schedule it.
*/
-struct iwl_mvm_session_prot_notif {
+struct iwl_session_prot_notif {
__le32 mac_link_id;
__le32 status;
__le32 start;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index f3bf2e087a40..0a39e4b6eb62 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -191,7 +191,7 @@ enum iwl_tx_offload_assist_flags_pos {
* cleared. Combination of RATE_MCS_*
* @sta_id: index of destination station in FW station table
* @sec_ctl: security control, TX_CMD_SEC_*
- * @initial_rate_index: index into the the rate table for initial TX attempt.
+ * @initial_rate_index: index into the rate table for initial TX attempt.
* Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
* @reserved2: reserved
* @key: security key
@@ -298,8 +298,7 @@ struct iwl_tx_cmd_gen3 {
__le32 rate_n_flags;
u8 reserved[8];
struct ieee80211_hdr hdr[];
-} __packed; /* TX_CMD_API_S_VER_8,
- TX_CMD_API_S_VER_10 */
+} __packed; /* TX_CMD_API_S_VER_8, TX_CMD_API_S_VER_10 */
/*
* TX response related data
@@ -482,8 +481,8 @@ struct agg_tx_status {
#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
TX_RES_RATE_TABLE_COLOR_POS)
-#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
-#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+#define IWL_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWL_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
/**
* struct iwl_tx_resp_v3 - notifies that fw is TXing a packet
@@ -601,7 +600,8 @@ struct iwl_tx_resp {
__le16 reserved2;
struct agg_tx_status status;
} __packed; /* TX_RSP_API_S_VER_6,
- TX_RSP_API_S_VER_7 */
+ TX_RSP_API_S_VER_7,
+ TX_RSP_API_S_VER_8 */
/**
* struct iwl_mvm_ba_notif - notifies about reception of BA
@@ -638,14 +638,14 @@ struct iwl_mvm_ba_notif {
} __packed;
/**
- * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
+ * struct iwl_compressed_ba_tfd - progress of a TFD queue
* @q_num: TFD queue number
* @tfd_index: Index of first un-acked frame in the TFD queue
* @scd_queue: For debug only - the physical queue the TFD queue is bound to
* @tid: TID of the queue (0-7)
* @reserved: reserved for alignment
*/
-struct iwl_mvm_compressed_ba_tfd {
+struct iwl_compressed_ba_tfd {
__le16 q_num;
__le16 tfd_index;
u8 scd_queue;
@@ -654,12 +654,12 @@ struct iwl_mvm_compressed_ba_tfd {
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
/**
- * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue
+ * struct iwl_compressed_ba_ratid - progress of a RA TID queue
* @q_num: RA TID queue number
* @tid: TID of the queue
* @ssn: BA window current SSN
*/
-struct iwl_mvm_compressed_ba_ratid {
+struct iwl_compressed_ba_ratid {
u8 q_num;
u8 tid;
__le16 ssn;
@@ -685,7 +685,7 @@ enum iwl_mvm_ba_resp_flags {
};
/**
- * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA
+ * struct iwl_compressed_ba_notif - notifies about reception of BA
* ( BA_NOTIF = 0xc5 )
* @flags: status flag, see the &iwl_mvm_ba_resp_flags
* @sta_id: Index of recipient (BA-sending) station in fw's station table
@@ -704,12 +704,12 @@ enum iwl_mvm_ba_resp_flags {
* @tx_rate: the rate the aggregation was sent at
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
- * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
+ * @tfd: array of TFD queue status updates. See &iwl_compressed_ba_tfd
* for details. Length in @tfd_cnt.
* @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
- * &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
+ * &iwl_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
*/
-struct iwl_mvm_compressed_ba_notif {
+struct iwl_compressed_ba_notif {
__le32 flags;
u8 sta_id;
u8 reduced_txp;
@@ -726,8 +726,8 @@ struct iwl_mvm_compressed_ba_notif {
__le16 tfd_cnt;
__le16 ra_tid_cnt;
union {
- DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid);
- DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd);
+ DECLARE_FLEX_ARRAY(struct iwl_compressed_ba_ratid, ra_tid);
+ DECLARE_FLEX_ARRAY(struct iwl_compressed_ba_tfd, tfd);
};
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4,
COMPRESSED_BA_RES_API_S_VER_5 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fb2ea38e89ac..6594216f873c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -558,41 +558,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
}
/*
- * alloc_sgtable - allocates scallerlist table in the given size,
- * fills it with pages and returns it
+ * alloc_sgtable - allocates (chained) scatterlist in the given size,
+ * fills it with pages and returns it
* @size: the size (in bytes) of the table
-*/
-static struct scatterlist *alloc_sgtable(int size)
+ */
+static struct scatterlist *alloc_sgtable(ssize_t size)
{
- int alloc_size, nents, i;
- struct page *new_page;
- struct scatterlist *iter;
- struct scatterlist *table;
+ struct scatterlist *result = NULL, *prev;
+ int nents, i, n_prev;
nents = DIV_ROUND_UP(size, PAGE_SIZE);
- table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
- if (!table)
- return NULL;
- sg_init_table(table, nents);
- iter = table;
- for_each_sg(table, iter, sg_nents(table), i) {
- new_page = alloc_page(GFP_KERNEL);
- if (!new_page) {
- /* release all previous allocated pages in the table */
- iter = table;
- for_each_sg(table, iter, sg_nents(table), i) {
- new_page = sg_page(iter);
- if (new_page)
- __free_page(new_page);
- }
- kfree(table);
+
+#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result))
+ /*
+ * We need an additional entry for table chaining,
+ * this ensures the loop can finish i.e. we can
+ * fit at least two entries per page (obviously,
+ * many more really fit.)
+ */
+ BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2);
+
+ while (nents > 0) {
+ struct scatterlist *new, *iter;
+ int n_fill, n_alloc;
+
+ if (nents <= N_ENTRIES_PER_PAGE) {
+ /* last needed table */
+ n_fill = nents;
+ n_alloc = nents;
+ nents = 0;
+ } else {
+ /* fill a page with entries */
+ n_alloc = N_ENTRIES_PER_PAGE;
+ /* reserve one for chaining */
+ n_fill = n_alloc - 1;
+ nents -= n_fill;
+ }
+
+ new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ if (result)
+ _devcd_free_sgtable(result);
return NULL;
}
- alloc_size = min_t(int, size, PAGE_SIZE);
- size -= PAGE_SIZE;
- sg_set_page(iter, new_page, alloc_size, 0);
+ sg_init_table(new, n_alloc);
+
+ if (!result)
+ result = new;
+ else
+ sg_chain(prev, n_prev, new);
+ prev = new;
+ n_prev = n_alloc;
+
+ for_each_sg(new, iter, n_fill, i) {
+ struct page *new_page = alloc_page(GFP_KERNEL);
+
+ if (!new_page) {
+ _devcd_free_sgtable(result);
+ return NULL;
+ }
+
+ sg_set_page(iter, new_page, PAGE_SIZE, 0);
+ }
}
- return table;
+
+ return result;
}
static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index f4803b55adb9..87998374f459 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -287,7 +287,7 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
trans->dbg.umac_error_event_table = umac_error_event_table;
}
-static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
+static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
{
enum iwl_fw_ini_time_point tp_id;
@@ -303,7 +303,7 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT;
}
- _iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync);
+ iwl_dbg_tlv_time_point_sync(fwrt, tp_id, NULL);
}
static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 893b21fcaf87..f0c813d675f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -123,6 +123,24 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_fw_runtime *fwrt,
+ char *buf, size_t count)
+{
+ if (count == 0)
+ return 0;
+
+ if (!iwl_trans_fw_running(fwrt->trans))
+ return count;
+
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, NULL);
+
+ iwl_fw_dbg_collect(fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL);
+
+ return count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 16);
+
static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
char *buf, size_t count)
{
@@ -282,6 +300,26 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
+static ssize_t iwl_dbgfs_fw_ver_read(struct iwl_fw_runtime *fwrt,
+ size_t size, char *buf)
+{
+ char *pos = buf;
+ char *endpos = buf + size;
+
+ pos += scnprintf(pos, endpos - pos, "FW id: %s\n",
+ fwrt->fw->fw_version);
+ pos += scnprintf(pos, endpos - pos, "FW: %s\n",
+ fwrt->fw->human_readable);
+ pos += scnprintf(pos, endpos - pos, "Device: %s\n",
+ fwrt->trans->name);
+ pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
+ fwrt->dev->bus->name);
+
+ return pos - buf;
+}
+
+FWRT_DEBUGFS_READ_FILE_OPS(fw_ver, 1024);
+
struct iwl_dbgfs_fw_info_priv {
struct iwl_fw_runtime *fwrt;
};
@@ -403,5 +441,7 @@ void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(enabled_severities, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(fw_dbg_collect, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
+ FWRT_DEBUGFS_ADD_FILE(fw_ver, dbgfs_dir, 0400);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index 8e0c85a1240d..c7b261c8ec96 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -540,6 +540,9 @@ bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id)
} err_info = {};
int ret;
+ if (err_id)
+ *err_id = 0;
+
if (!base)
return false;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index e63b08b7d336..3af275133da0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -169,7 +169,7 @@ struct iwl_fw_error_dump_info {
* @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
* @fw_mon_base_ptr: base pointer of the data
* @fw_mon_cycle_cnt: number of wraparounds
- * @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit
+ * @fw_mon_base_high_ptr: used in AX210 devices, the base address is 64 bit
* so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold
* MSB 32 bits
* @reserved: for future use
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index ae05227b6153..9860903ecd3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -104,6 +104,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_CURRENT_PC = 68,
IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
+ IWL_UCODE_TLV_FW_NUM_LINKS = IWL_UCODE_TLV_CONST_BASE + 1,
IWL_UCODE_TLV_FW_NUM_BEACONS = IWL_UCODE_TLV_CONST_BASE + 2,
IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
@@ -384,7 +385,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* to report the CSI information with (certain) RX frames
* @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both
* initiator and responder
- * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
+ * @IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA: supports (de)activating UNII-4
+ * for US/CA/WW from BIOS
* @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames
* @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in
* reset flow
@@ -397,6 +399,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement.
* @IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS: Support monitor mode on otherwise
* passive channels
+ * @IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA: supports (de)activating 5G9
+ * for CA from BIOS.
+ * @IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT: supports %TAS_UHB_ALLOWED_CANADA
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -474,7 +479,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)93,
/* set 3 */
- IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA = (__force iwl_ucode_tlv_capa_t)96,
/*
* @IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT: supports PSC channels
@@ -497,6 +502,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117,
IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121,
IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = (__force iwl_ucode_tlv_capa_t)122,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA = (__force iwl_ucode_tlv_capa_t)123,
+ IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT = (__force iwl_ucode_tlv_capa_t)124,
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
index b7deca05a953..c2f4fc83a22c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2019 - 2021 Intel Corporation
+ * Copyright(c) 2024 Intel Corporation
*/
#include <fw/api/commands.h>
#include "img.h"
@@ -75,6 +76,7 @@ static const struct {
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "NMI_INTERRUPT_PREG", 0x88 },
{ "PNVM_MISSING", FW_SYSASSERT_PNVM_MISSING },
{ "ADVANCED_SYSASSERT", 0 },
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 96bda80632f3..f9de139561a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -51,6 +51,7 @@ struct iwl_ucode_capabilities {
u32 error_log_addr;
u32 error_log_size;
u32 num_stations;
+ u32 num_links;
u32 num_beacons;
unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index 945bc4160cc9..a7b7cae874a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -249,7 +249,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
};
int blk_idx;
- /* loop for for all paging blocks + CSS block */
+ /* loop for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys;
__le32 phy_addr;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 4d9a1f83ef8c..ea435ee94312 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -39,6 +39,7 @@ IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
IWL_BIOS_TABLE_LOADER_DATA(wbem, u32);
+IWL_BIOS_TABLE_LOADER_DATA(dsbr, u32);
static const struct dmi_system_id dmi_ppag_approved_list[] = {
@@ -100,6 +101,11 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
},
},
+ { .ident = "WIKO",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WIKO"),
+ },
+ },
{}
};
@@ -424,25 +430,31 @@ bool iwl_is_tas_approved(void)
}
IWL_EXPORT_SYMBOL(iwl_is_tas_approved);
-int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_data *tas_data,
- const u32 tas_selection)
+struct iwl_tas_selection_data
+iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev)
{
- u8 override_iec = u32_get_bits(tas_selection,
+ struct iwl_tas_selection_data tas_selection_out = {};
+ u8 override_iec = u32_get_bits(tas_selection_in,
IWL_WTAS_OVERRIDE_IEC_MSK);
- u8 enabled_iec = u32_get_bits(tas_selection, IWL_WTAS_ENABLE_IEC_MSK);
- u8 usa_tas_uhb = u32_get_bits(tas_selection, IWL_WTAS_USA_UHB_MSK);
- int enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
-
- IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
- tas_selection);
+ u8 canada_tas_uhb = u32_get_bits(tas_selection_in,
+ IWL_WTAS_CANADA_UHB_MSK);
+ u8 enabled_iec = u32_get_bits(tas_selection_in,
+ IWL_WTAS_ENABLE_IEC_MSK);
+ u8 usa_tas_uhb = u32_get_bits(tas_selection_in,
+ IWL_WTAS_USA_UHB_MSK);
+
+ if (tbl_rev > 0) {
+ tas_selection_out.usa_tas_uhb_allowed = usa_tas_uhb;
+ tas_selection_out.override_tas_iec = override_iec;
+ tas_selection_out.enable_tas_iec = enabled_iec;
+ }
- tas_data->usa_tas_uhb_allowed = usa_tas_uhb;
- tas_data->override_tas_iec = override_iec;
- tas_data->enable_tas_iec = enabled_iec;
+ if (tbl_rev > 1)
+ tas_selection_out.canada_tas_uhb_allowed = canada_tas_uhb;
- return enabled;
+ return tas_selection_out;
}
+IWL_EXPORT_SYMBOL(iwl_parse_tas_selection);
static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
@@ -552,10 +564,16 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
if (!ret) {
- if (cmd_ver < 9)
- value &= DSM_UNII4_ALLOW_BITMAP_CMD_V8;
- else
- value &= DSM_UNII4_ALLOW_BITMAP;
+ value &= DSM_UNII4_ALLOW_BITMAP;
+
+ /* Since version 9, bits 4 and 5 are supported
+ * regardless of this capability.
+ */
+ if (cmd_ver < 9 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA))
+ value &= ~(DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK |
+ DSM_VALUE_UNII4_CANADA_EN_MSK);
cmd->oem_unii4_allow_bitmap = cpu_to_le32(value);
}
@@ -564,7 +582,13 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
if (!ret) {
if (cmd_ver < 8)
value &= ~ACTIVATE_5G2_IN_WW_MASK;
- if (cmd_ver < 12)
+
+ /* Since version 12, bits 5 and 6 are supported
+ * regardless of this capability.
+ */
+ if (cmd_ver < 12 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA))
value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11;
cmd->chan_state_active_bitmap = cpu_to_le32(value);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index 81787501d4a4..b355d7bef14c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -40,11 +40,19 @@
#define IWL_PPAG_ETSI_CHINA_MASK 3
#define IWL_PPAG_REV3_MASK 0x7FF
-#define IWL_WTAS_ENABLED_MSK 0x1
-#define IWL_WTAS_OVERRIDE_IEC_MSK 0x2
-#define IWL_WTAS_ENABLE_IEC_MSK 0x4
+#define IWL_WTAS_ENABLED_MSK BIT(0)
+#define IWL_WTAS_OVERRIDE_IEC_MSK BIT(1)
+#define IWL_WTAS_ENABLE_IEC_MSK BIT(2)
+#define IWL_WTAS_CANADA_UHB_MSK BIT(15)
#define IWL_WTAS_USA_UHB_MSK BIT(16)
+struct iwl_tas_selection_data {
+ u8 override_tas_iec:1,
+ enable_tas_iec:1,
+ usa_tas_uhb_allowed:1,
+ canada_tas_uhb_allowed:1;
+};
+
#define BIOS_MCC_CHINA 0x434e
/*
@@ -97,11 +105,11 @@ struct iwl_ppag_chain {
};
struct iwl_tas_data {
- __le32 block_list_size;
- __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
- u8 override_tas_iec;
- u8 enable_tas_iec;
- u8 usa_tas_uhb_allowed;
+ u8 block_list_size;
+ u16 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
+ u8 table_source;
+ u8 table_revision;
+ u32 tas_selection;
};
/* For DSM revision 0 and 4 */
@@ -144,12 +152,11 @@ enum iwl_dsm_unii4_bitmap {
DSM_VALUE_UNII4_CANADA_EN_MSK = BIT(5),
};
-#define DSM_UNII4_ALLOW_BITMAP_CMD_V8 (DSM_VALUE_UNII4_US_OVERRIDE_MSK | \
- DSM_VALUE_UNII4_US_EN_MSK | \
- DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK | \
- DSM_VALUE_UNII4_ETSI_EN_MSK)
-#define DSM_UNII4_ALLOW_BITMAP (DSM_UNII4_ALLOW_BITMAP_CMD_V8 | \
- DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | \
+#define DSM_UNII4_ALLOW_BITMAP (DSM_VALUE_UNII4_US_OVERRIDE_MSK |\
+ DSM_VALUE_UNII4_US_EN_MSK |\
+ DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK |\
+ DSM_VALUE_UNII4_ETSI_EN_MSK |\
+ DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK |\
DSM_VALUE_UNII4_CANADA_EN_MSK)
enum iwl_dsm_values_rfi {
@@ -184,9 +191,8 @@ bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
bool iwl_is_tas_approved(void);
-int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_data *tas_data,
- const u32 tas_selection);
+struct iwl_tas_selection_data
+iwl_parse_tas_selection(const u32 tas_selection, const u8 tbl_rev);
int iwl_bios_get_wrds_table(struct iwl_fw_runtime *fwrt);
@@ -221,4 +227,27 @@ static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
}
bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc);
+
+#define IWL_DSBR_FW_MODIFIED_URM_MASK BIT(8)
+#define IWL_DSBR_PERMANENT_URM_MASK BIT(9)
+
+int iwl_bios_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
+
+static inline void iwl_bios_setup_step(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt)
+{
+ u32 dsbr;
+
+ if (!trans->trans_cfg->integrated)
+ return;
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ return;
+
+ if (iwl_bios_get_dsbr(fwrt, &dsbr))
+ dsbr = 0;
+
+ trans->dsbr_urm_fw_dependent = !!(dsbr & IWL_DSBR_FW_MODIFIED_URM_MASK);
+ trans->dsbr_urm_permanent = !!(dsbr & IWL_DSBR_PERMANENT_URM_MASK);
+}
#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 091fb6fd7c78..434eed4130b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -13,9 +13,12 @@
#include <linux/efi.h>
#include "fw/runtime.h"
-#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
- 0xb2, 0xec, 0xf5, 0xa3, \
- 0x59, 0x4f, 0x4a, 0xea)
+#define IWL_EFI_WIFI_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
+ 0xb2, 0xec, 0xf5, 0xa3, \
+ 0x59, 0x4f, 0x4a, 0xea)
+#define IWL_EFI_WIFI_BT_GUID EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, \
+ 0x8d, 0x03, 0x77, 0x2e, \
+ 0xcc, 0x3d, 0xa5, 0x31)
struct iwl_uefi_pnvm_mem_desc {
__le32 addr;
@@ -61,7 +64,7 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
*len = 0;
- data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID,
+ data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_WIFI_GUID,
&package_size);
if (IS_ERR(data)) {
IWL_DEBUG_FW(trans,
@@ -76,18 +79,18 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
return data;
}
-static
-void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
- efi_char16_t *uefi_var_name,
- char *var_name,
- unsigned int expected_size,
- unsigned long *size)
+static void *
+iwl_uefi_get_verified_variable_guid(struct iwl_trans *trans,
+ efi_guid_t *guid,
+ efi_char16_t *uefi_var_name,
+ char *var_name,
+ unsigned int expected_size,
+ unsigned long *size)
{
void *var;
unsigned long var_size;
- var = iwl_uefi_get_variable(uefi_var_name, &IWL_EFI_VAR_GUID,
- &var_size);
+ var = iwl_uefi_get_variable(uefi_var_name, guid, &var_size);
if (IS_ERR(var)) {
IWL_DEBUG_RADIO(trans,
@@ -112,6 +115,18 @@ void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
return var;
}
+static void *
+iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+ efi_char16_t *uefi_var_name,
+ char *var_name,
+ unsigned int expected_size,
+ unsigned long *size)
+{
+ return iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_GUID,
+ uefi_var_name, var_name,
+ expected_size, size);
+}
+
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data)
{
@@ -311,8 +326,9 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
- data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_STEP_NAME,
- "STEP", sizeof(*data), NULL);
+ data = iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_BT_GUID,
+ IWL_UEFI_STEP_NAME,
+ "STEP", sizeof(*data), NULL);
if (IS_ERR(data))
return;
@@ -554,27 +570,31 @@ int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
struct iwl_tas_data *tas_data)
{
struct uefi_cnv_var_wtas *uefi_tas;
- int ret = 0, enabled, i;
+ int ret, enabled;
uefi_tas = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WTAS_NAME,
"WTAS", sizeof(*uefi_tas), NULL);
if (IS_ERR(uefi_tas))
return -EINVAL;
- if (uefi_tas->revision != IWL_UEFI_WTAS_REVISION) {
+ if (uefi_tas->revision < IWL_UEFI_MIN_WTAS_REVISION ||
+ uefi_tas->revision > IWL_UEFI_MAX_WTAS_REVISION) {
ret = -EINVAL;
IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WTAS revision:%d\n",
uefi_tas->revision);
goto out;
}
- enabled = iwl_parse_tas_selection(fwrt, tas_data,
- uefi_tas->tas_selection);
- if (!enabled) {
- IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
- ret = 0;
- goto out;
- }
+ IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
+ uefi_tas->tas_selection);
+
+ enabled = uefi_tas->tas_selection & IWL_WTAS_ENABLED_MSK;
+ tas_data->table_source = BIOS_SOURCE_UEFI;
+ tas_data->table_revision = uefi_tas->revision;
+ tas_data->tas_selection = uefi_tas->tas_selection;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS %s enabled\n",
+ enabled ? "is" : "not");
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n",
uefi_tas->revision);
@@ -584,15 +604,16 @@ int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
ret = -EINVAL;
goto out;
}
- tas_data->block_list_size = cpu_to_le32(uefi_tas->black_list_size);
+
+ tas_data->block_list_size = uefi_tas->black_list_size;
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", uefi_tas->black_list_size);
- for (i = 0; i < uefi_tas->black_list_size; i++) {
- tas_data->block_list_array[i] =
- cpu_to_le32(uefi_tas->black_list[i]);
+ for (u8 i = 0; i < uefi_tas->black_list_size; i++) {
+ tas_data->block_list_array[i] = uefi_tas->black_list[i];
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n",
uefi_tas->black_list[i]);
}
+ ret = enabled;
out:
kfree(uefi_tas);
return ret;
@@ -758,3 +779,29 @@ int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
return puncturing;
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_puncturing);
+
+int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ struct uefi_cnv_wlan_dsbr_data *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable_guid(fwrt->trans,
+ &IWL_EFI_WIFI_BT_GUID,
+ IWL_UEFI_DSBR_NAME, "DSBR",
+ sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_DSBR_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI DSBR revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *value = data->config;
+ IWL_DEBUG_RADIO(fwrt, "Loaded DSBR config from UEFI value: 0x%x\n",
+ *value);
+out:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index e525d449e656..0c8943a8bd01 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -23,6 +23,7 @@
#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
+#define IWL_UEFI_DSBR_NAME L"UefiCnvCommonDSBR"
#define IWL_SGOM_MAP_SIZE 339
@@ -33,13 +34,15 @@
#define IWL_UEFI_WGDS_REVISION 3
#define IWL_UEFI_MIN_PPAG_REV 1
#define IWL_UEFI_MAX_PPAG_REV 3
-#define IWL_UEFI_WTAS_REVISION 1
+#define IWL_UEFI_MIN_WTAS_REVISION 1
+#define IWL_UEFI_MAX_WTAS_REVISION 2
#define IWL_UEFI_SPLC_REVISION 0
#define IWL_UEFI_WRDD_REVISION 0
#define IWL_UEFI_ECKV_REVISION 0
#define IWL_UEFI_WBEM_REVISION 0
#define IWL_UEFI_DSM_REVISION 4
#define IWL_UEFI_PUNCTURING_REVISION 0
+#define IWL_UEFI_DSBR_REVISION 1
struct pnvm_sku_package {
u8 rev;
@@ -213,6 +216,20 @@ struct uefi_cnv_var_puncturing_data {
u32 puncturing;
} __packed;
+/**
+ * struct uefi_cnv_wlan_dsbr_data - BIOS STEP configuration information
+ * @revision: the revision of the table
+ * @config: STEP configuration flags:
+ * bit 8, switch to URM depending on FW setting
+ * bit 9, switch to URM
+ *
+ * Platform information for STEP configuration/workarounds.
+ */
+struct uefi_cnv_wlan_dsbr_data {
+ u8 revision;
+ u32 config;
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -244,6 +261,7 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwr
int iwl_uefi_get_uats_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -346,5 +364,11 @@ int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
{
return 0;
}
+
+static inline
+int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 17721bb47e25..2b6a80142aba 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -38,6 +38,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_AX210,
IWL_DEVICE_FAMILY_BZ,
IWL_DEVICE_FAMILY_SC,
+ IWL_DEVICE_FAMILY_DR,
};
/*
@@ -102,6 +103,10 @@ enum iwl_nvm_type {
#define ANT_ABC (ANT_A | ANT_B | ANT_C)
+#define IWL_FW_AND_PNVM(pfx, api) \
+ MODULE_FIRMWARE(pfx "-" __stringify(api) ".ucode"); \
+ MODULE_FIRMWARE(pfx ".pnvm")
+
static inline u8 num_of_ant(u8 mask)
{
return !!((mask) & ANT_A) +
@@ -424,6 +429,8 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_SC2 0x49
#define IWL_CFG_MAC_TYPE_SC2F 0x4A
#define IWL_CFG_MAC_TYPE_BZ_W 0x4B
+#define IWL_CFG_MAC_TYPE_BR 0x4C
+#define IWL_CFG_MAC_TYPE_DR 0x4D
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -434,6 +441,7 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_GF 0x10D
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_TYPE_WH 0x113
+#define IWL_CFG_RF_TYPE_PE 0x114
#define IWL_CFG_RF_ID_TH 0x1
#define IWL_CFG_RF_ID_TH1 0x1
@@ -506,6 +514,8 @@ extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_dr_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_br_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
extern const char iwl9260_1_name[];
@@ -551,6 +561,8 @@ extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
extern const char iwl_sc2_name[];
extern const char iwl_sc2f_name[];
+extern const char iwl_dr_name[];
+extern const char iwl_br_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -649,7 +661,6 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
extern const struct iwl_cfg iwl_cfg_ma;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
-extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz;
@@ -658,6 +669,8 @@ extern const struct iwl_cfg iwl_cfg_gl;
extern const struct iwl_cfg iwl_cfg_sc;
extern const struct iwl_cfg iwl_cfg_sc2;
extern const struct iwl_cfg iwl_cfg_sc2f;
+extern const struct iwl_cfg iwl_cfg_dr;
+extern const struct iwl_cfg iwl_cfg_br;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 5b62933134cf..cd25a1b9f2ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -76,7 +76,17 @@ enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29),
};
-/*
+/**
+ * enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags
+ * @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting
+ * @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode
+ */
+enum iwl_prph_scratch_ext_flags {
+ IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
+ IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
+};
+
+/**
* struct iwl_prph_scratch_version - version structure
* @mac_id: SKU and revision id
* @version: prph scratch information version id
@@ -90,17 +100,18 @@ struct iwl_prph_scratch_version {
__le16 reserved;
} __packed; /* PERIPH_SCRATCH_VERSION_S */
-/*
+/**
* struct iwl_prph_scratch_control - control structure
* @control_flags: context information flags see &enum iwl_prph_scratch_flags
- * @reserved: reserved
+ * @control_flags_ext: context information for extended flags,
+ * see &enum iwl_prph_scratch_ext_flags
*/
struct iwl_prph_scratch_control {
__le32 control_flags;
- __le32 reserved;
+ __le32 control_flags_ext;
} __packed; /* PERIPH_SCRATCH_CONTROL_S */
-/*
+/**
* struct iwl_prph_scratch_pnvm_cfg - PNVM scratch
* @pnvm_base_addr: PNVM start address
* @pnvm_size: the size of the PNVM image in bytes
@@ -120,7 +131,8 @@ struct iwl_prph_scratch_pnvm_cfg {
struct iwl_prph_scrath_mem_desc_addr_array {
__le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX];
} __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */
-/*
+
+/**
* struct iwl_prph_scratch_hwm_cfg - hwm config
* @hwm_base_addr: hwm start address
* @hwm_size: hwm size in DWs
@@ -132,7 +144,7 @@ struct iwl_prph_scratch_hwm_cfg {
__le32 debug_token_config;
} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
-/*
+/**
* struct iwl_prph_scratch_rbd_cfg - RBDs configuration
* @free_rbd_addr: default queue free RB CB base address
* @reserved: reserved
@@ -142,10 +154,11 @@ struct iwl_prph_scratch_rbd_cfg {
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
-/*
+/**
* struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
* @base_addr: reduce power table address
* @size: the size of the entire power table image
+ * @reserved: (reserved)
*/
struct iwl_prph_scratch_uefi_cfg {
__le64 base_addr;
@@ -153,7 +166,7 @@ struct iwl_prph_scratch_uefi_cfg {
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
-/*
+/**
* struct iwl_prph_scratch_step_cfg - prph scratch step configuration
* @mbx_addr_0: [0:7] revision,
* [8:15] cnvi_to_cnvr length,
@@ -167,13 +180,14 @@ struct iwl_prph_scratch_step_cfg {
__le32 mbx_addr_1;
} __packed;
-/*
+/**
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
* @version: version information of context info and HW
* @control: control flags of FH configurations
* @pnvm_cfg: ror configuration
* @hwm_cfg: hwm configuration
* @rbd_cfg: default RX queue configuration
+ * @reduce_power_cfg: UEFI power reduction table
* @step_cfg: step configuration
*/
struct iwl_prph_scratch_ctrl_cfg {
@@ -186,7 +200,7 @@ struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_step_cfg step_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
-/*
+/**
* struct iwl_prph_scratch - peripheral scratch mapping
* @ctrl_cfg: control and configuration of prph scratch
* @dram: firmware images addresses in DRAM
@@ -202,7 +216,7 @@ struct iwl_prph_scratch {
struct iwl_context_info_dram dram;
} __packed; /* PERIPH_SCRATCH_S */
-/*
+/**
* struct iwl_prph_info - peripheral information
* @boot_stage_mirror: reflects the value in the Boot Stage CSR register
* @ipc_status_mirror: reflects the value in the IPC Status CSR register
@@ -216,7 +230,7 @@ struct iwl_prph_info {
__le32 reserved;
} __packed; /* PERIPH_INFO_S */
-/*
+/**
* struct iwl_context_info_gen3 - device INIT configuration
* @version: version of the context information
* @size: size of context information in DWs
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index 1a1321db137c..dfd44fabf237 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2022 Intel Corporation
+ * Copyright (C) 2018-2020, 2022, 2024 Intel Corporation
*/
#ifndef __iwl_context_info_file_h__
#define __iwl_context_info_file_h__
-/* maximmum number of DRAM map entries supported by FW */
+/* maximum number of DRAM map entries supported by FW */
#define IWL_MAX_DRAM_ENTRY 64
#define CSR_CTXT_INFO_BA 0x40
@@ -53,11 +53,12 @@ enum iwl_context_info_flags {
IWL_CTXT_INFO_RB_SIZE_32K = 0xe,
};
-/*
+/**
* struct iwl_context_info_version - version structure
* @mac_id: SKU and revision id
* @version: context information version id
* @size: the size of the context information in DWs
+ * @reserved: (reserved)
*/
struct iwl_context_info_version {
__le16 mac_id;
@@ -66,16 +67,17 @@ struct iwl_context_info_version {
__le16 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info_control - version structure
* @control_flags: context information flags see &enum iwl_context_info_flags
+ * @reserved: (reserved)
*/
struct iwl_context_info_control {
__le32 control_flags;
__le32 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info_dram - images DRAM map
* each entry in the map represents a DRAM chunk of up to 32 KB
* @umac_img: UMAC image DRAM map
@@ -88,7 +90,7 @@ struct iwl_context_info_dram {
__le64 virtual_img[IWL_MAX_DRAM_ENTRY];
} __packed;
-/*
+/**
* struct iwl_context_info_rbd_cfg - RBDs configuration
* @free_rbd_addr: default queue free RB CB base address
* @used_rbd_addr: default queue used RB CB base address
@@ -100,10 +102,11 @@ struct iwl_context_info_rbd_cfg {
__le64 status_wr_ptr;
} __packed;
-/*
+/**
* struct iwl_context_info_hcmd_cfg - command queue configuration
* @cmd_queue_addr: address of command queue
* @cmd_queue_size: number of entries
+ * @reserved: (reserved)
*/
struct iwl_context_info_hcmd_cfg {
__le64 cmd_queue_addr;
@@ -111,10 +114,11 @@ struct iwl_context_info_hcmd_cfg {
u8 reserved[7];
} __packed;
-/*
+/**
* struct iwl_context_info_dump_cfg - Core Dump configuration
* @core_dump_addr: core dump (debug DRAM address) start address
* @core_dump_size: size, in DWs
+ * @reserved: (reserved)
*/
struct iwl_context_info_dump_cfg {
__le64 core_dump_addr;
@@ -122,10 +126,11 @@ struct iwl_context_info_dump_cfg {
__le32 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info_pnvm_cfg - platform NVM data configuration
* @platform_nvm_addr: Platform NVM data start address
* @platform_nvm_size: size in DWs
+ * @reserved: (reserved)
*/
struct iwl_context_info_pnvm_cfg {
__le64 platform_nvm_addr;
@@ -133,11 +138,12 @@ struct iwl_context_info_pnvm_cfg {
__le32 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info_early_dbg_cfg - early debug configuration for
* dumping DRAM addresses
* @early_debug_addr: early debug start address
* @early_debug_size: size in DWs
+ * @reserved: (reserved)
*/
struct iwl_context_info_early_dbg_cfg {
__le64 early_debug_addr;
@@ -145,16 +151,20 @@ struct iwl_context_info_early_dbg_cfg {
__le32 reserved;
} __packed;
-/*
+/**
* struct iwl_context_info - device INIT configuration
* @version: version information of context info and HW
* @control: control flags of FH configurations
+ * @reserved0: (reserved)
* @rbd_cfg: default RX queue configuration
* @hcmd_cfg: command queue configuration
+ * @reserved1: (reserved)
* @dump_cfg: core dump data
* @edbg_cfg: early debug configuration
* @pnvm_cfg: platform nvm configuration
+ * @reserved2: (reserved)
* @dram: firmware image addresses in DRAM
+ * @reserved3: (reserved)
*/
struct iwl_context_info {
struct iwl_context_info_version version;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 98563757ce2c..be9e464c9b7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -167,13 +167,15 @@
#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
-#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
-#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
-#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
-#define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000)
-#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
+#define CSR_HW_IF_CONFIG_REG_HAP_WAKE 0x00080000
+/* NOTE: EEPROM_OWN_SEM is no longer defined for new HW */
+#define CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM 0x00200000
+#define CSR_HW_IF_CONFIG_REG_PCI_OWN_SET 0x00400000
+#define CSR_HW_IF_CONFIG_REG_IAMT_UP 0x01000000
+#define CSR_HW_IF_CONFIG_REG_ME_OWN 0x02000000
+#define CSR_HW_IF_CONFIG_REG_WAKE_ME 0x08000000
+#define CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN 0x10000000
+#define CSR_HW_IF_CONFIG_REG_PERSISTENCE 0x40000000
#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5)
@@ -351,7 +353,6 @@ enum {
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
-#define CSR_HW_RF_ID_TYPE_MS (0x00111000)
#define CSR_HW_RF_ID_TYPE_FM (0x00112000)
#define CSR_HW_RF_ID_TYPE_WP (0x00113000)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index 1b9f16a31b54..bf52c2edaad1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2021 Intel Corporation
+ * Copyright(c) 2018 - 2021, 2024 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project.
*****************************************************************************/
@@ -209,6 +209,7 @@ do { \
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_DEV_RADIO(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
+#define IWL_DEBUG_DEV_POWER(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
#define IWL_DEBUG_WOWLAN(p, f, a...) IWL_DEBUG(p, IWL_DL_WOWLAN, f, ## a)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c620911a1193..352b6e73e08f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -22,6 +22,7 @@
#include "iwl-modparams.h"
#include "fw/api/alive.h"
#include "fw/api/mac.h"
+#include "fw/api/mac-cfg.h"
/******************************************************************************
*
@@ -137,8 +138,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
memset(&drv->fw, 0, sizeof(drv->fw));
}
-static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
- struct fw_sec *sec)
+static int iwl_alloc_fw_desc(struct fw_desc *desc, struct fw_sec *sec)
{
void *data;
@@ -318,17 +318,6 @@ struct iwl_firmware_pieces {
size_t n_mem_tlv;
};
-/*
- * These functions are just to extract uCode section data from the pieces
- * structure.
- */
-static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
- enum iwl_ucode_type type,
- int sec)
-{
- return &pieces->img[type].sec[sec];
-}
-
static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
enum iwl_ucode_type type,
int sec)
@@ -389,22 +378,18 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
/*
* Gets uCode section from tlv.
*/
-static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
- const void *data, enum iwl_ucode_type type,
- int size)
+static int iwl_store_ucode_sec(struct fw_img_parsing *img,
+ const void *data, int size)
{
- struct fw_img_parsing *img;
struct fw_sec *sec;
const struct fw_sec_parsing *sec_parse;
size_t alloc_size;
- if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
- return -1;
+ if (WARN_ON(!img || !data))
+ return -EINVAL;
sec_parse = (const struct fw_sec_parsing *)data;
- img = &pieces->img[type];
-
alloc_size = sizeof(*img->sec) * (img->sec_counter + 1);
sec = krealloc(img->sec, alloc_size, GFP_KERNEL);
if (!sec)
@@ -900,18 +885,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_SEC_RT:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_REGULAR],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SEC_INIT:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_INIT],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SEC_WOWLAN:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_WOWLAN],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_DEF_CALIB:
@@ -932,18 +917,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
FW_PHY_CFG_RX_CHAIN_POS;
break;
case IWL_UCODE_TLV_SECURE_SEC_RT:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_REGULAR],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SECURE_SEC_INIT:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_INIT],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
- iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_WOWLAN],
+ tlv_data, tlv_len);
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_NUM_OF_CPU:
@@ -1110,9 +1095,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
*usniffer_images = true;
- iwl_store_ucode_sec(pieces, tlv_data,
- IWL_UCODE_REGULAR_USNIFFER,
- tlv_len);
+ iwl_store_ucode_sec(&pieces->img[IWL_UCODE_REGULAR_USNIFFER],
+ tlv_data, tlv_len);
break;
case IWL_UCODE_TLV_PAGING:
if (tlv_len != sizeof(u32))
@@ -1197,7 +1181,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
- IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
+ IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n",
fseq_ver->version);
}
break;
@@ -1214,6 +1198,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->num_stations =
le32_to_cpup((const __le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_FW_NUM_LINKS:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ if (le32_to_cpup((const __le32 *)tlv_data) >
+ IWL_FW_MAX_LINK_ID + 1) {
+ IWL_ERR(drv,
+ "%d is an invalid number of links\n",
+ le32_to_cpup((const __le32 *)tlv_data));
+ goto tlv_error;
+ }
+ capa->num_links =
+ le32_to_cpup((const __le32 *)tlv_data);
+ break;
case IWL_UCODE_TLV_FW_NUM_BEACONS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
@@ -1337,26 +1334,31 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
-static int iwl_alloc_ucode(struct iwl_drv *drv,
- struct iwl_firmware_pieces *pieces,
- enum iwl_ucode_type type)
+static int iwl_alloc_ucode_mem(struct fw_img *out, struct fw_img_parsing *img)
{
- int i;
struct fw_desc *sec;
- sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL);
+ sec = kcalloc(img->sec_counter, sizeof(*sec), GFP_KERNEL);
if (!sec)
return -ENOMEM;
- drv->fw.img[type].sec = sec;
- drv->fw.img[type].num_sec = pieces->img[type].sec_counter;
- for (i = 0; i < pieces->img[type].sec_counter; i++)
- if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i)))
+ out->sec = sec;
+ out->num_sec = img->sec_counter;
+
+ for (int i = 0; i < out->num_sec; i++)
+ if (iwl_alloc_fw_desc(&sec[i], &img->sec[i]))
return -ENOMEM;
return 0;
}
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+ struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type)
+{
+ return iwl_alloc_ucode_mem(&drv->fw.img[type], &pieces->img[type]);
+}
+
static int validate_sec_sizes(struct iwl_drv *drv,
struct iwl_firmware_pieces *pieces,
const struct iwl_cfg *cfg)
@@ -1429,18 +1431,21 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
op_mode = ops->start(drv->trans, drv->trans->cfg,
&drv->fw, dbgfs_dir);
- if (op_mode)
+ if (!IS_ERR(op_mode))
return op_mode;
if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status))
break;
- IWL_ERR(drv, "retry init count %d\n", retry);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(drv->dbgfs_op_mode);
drv->dbgfs_op_mode = NULL;
#endif
+
+ if (PTR_ERR(op_mode) != -ETIMEDOUT)
+ break;
+
+ IWL_ERR(drv, "retry init count %d\n", retry);
}
return NULL;
@@ -1944,6 +1949,7 @@ module_init(iwl_drv_init);
static void __exit iwl_drv_exit(void)
{
iwl_pci_unregister_driver();
+ iwl_trans_free_restart_list();
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(iwl_dbgfs_root);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 060becfd64f3..0653ca8b974a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -526,5 +526,5 @@ void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
if (interrupts_enabled)
iwl_trans_interrupts(trans, true);
- iwl_trans_fw_error(trans, false);
+ iwl_trans_fw_error(trans, IWL_ERR_TYPE_NMI_FORCED);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index d902121da009..9f7e013252fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -141,8 +141,10 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
/**
* enum iwl_nvm_channel_flags - channel flags in NVM
* @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
- * @NVM_CHANNEL_IBSS: usable as an IBSS channel
- * @NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
* @NVM_CHANNEL_RADAR: radar detection required
* @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
* @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 8ef5ed2db051..34eca1a568ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -45,6 +45,55 @@ struct iwl_cfg;
*/
/**
+ * enum iwl_fw_error_type - FW error types/sources
+ * @IWL_ERR_TYPE_IRQ: "normal" FW error through an IRQ
+ * @IWL_ERR_TYPE_NMI_FORCED: NMI was forced by driver
+ * @IWL_ERR_TYPE_RESET_HS_TIMEOUT: reset handshake timed out,
+ * any debug collection must happen synchronously as
+ * the device will be shut down
+ * @IWL_ERR_TYPE_CMD_QUEUE_FULL: command queue was full
+ */
+enum iwl_fw_error_type {
+ IWL_ERR_TYPE_IRQ,
+ IWL_ERR_TYPE_NMI_FORCED,
+ IWL_ERR_TYPE_RESET_HS_TIMEOUT,
+ IWL_ERR_TYPE_CMD_QUEUE_FULL,
+};
+
+/**
+ * enum iwl_fw_error_context - error dump context
+ * @IWL_ERR_CONTEXT_WORKER: regular from worker context,
+ * opmode must acquire locks and must also check
+ * for @IWL_ERR_CONTEXT_ABORT after acquiring locks
+ * @IWL_ERR_CONTEXT_FROM_OPMODE: context is in a call
+ * originating from the opmode, e.g. while resetting
+ * or stopping the device, so opmode must not acquire
+ * any locks
+ * @IWL_ERR_CONTEXT_ABORT: after lock acquisition, indicates
+ * that the dump already happened via another callback
+ * (currently only while stopping the device) via the
+ * @IWL_ERR_CONTEXT_FROM_OPMODE context, and this call
+ * must be aborted
+ */
+enum iwl_fw_error_context {
+ IWL_ERR_CONTEXT_WORKER,
+ IWL_ERR_CONTEXT_FROM_OPMODE,
+ IWL_ERR_CONTEXT_ABORT,
+};
+
+/**
+ * struct iwl_fw_error_dump_mode - error dump mode for callback
+ * @type: The reason for the dump, per &enum iwl_fw_error_type.
+ * @context: The context for the dump, may also indicate this
+ * call needs to be skipped. This MUST be checked before
+ * and after acquiring any locks in the op-mode!
+ */
+struct iwl_fw_error_dump_mode {
+ enum iwl_fw_error_type type;
+ enum iwl_fw_error_context context;
+};
+
+/**
* struct iwl_op_mode_ops - op_mode specific operations
*
* The op_mode exports its ops so that external components can start it and
@@ -77,10 +126,11 @@ struct iwl_cfg;
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
* Must be atomic
- * @nic_error: error notification. Must be atomic and must be called with BH
- * disabled, unless the sync parameter is true.
- * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
- * called with BH disabled.
+ * @nic_error: error notification. Must be atomic, the op mode should handle
+ * the error (e.g. abort notification waiters) and print the error if
+ * applicable
+ * @dump_error: NIC error dump collection (can sleep, synchronous)
+ * @sw_reset: (maybe) initiate a software reset, return %true if started
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
@@ -104,8 +154,12 @@ struct iwl_op_mode_ops {
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
- void (*nic_error)(struct iwl_op_mode *op_mode, bool sync);
- void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
+ void (*nic_error)(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type);
+ void (*dump_error)(struct iwl_op_mode *op_mode,
+ struct iwl_fw_error_dump_mode *mode);
+ bool (*sw_reset)(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
void (*time_point)(struct iwl_op_mode *op_mode,
@@ -177,14 +231,19 @@ static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
op_mode->ops->free_skb(op_mode, skb);
}
-static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync)
+static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
{
- op_mode->ops->nic_error(op_mode, sync);
+ op_mode->ops->nic_error(op_mode, type);
}
-static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
+static inline void iwl_op_mode_dump_error(struct iwl_op_mode *op_mode,
+ struct iwl_fw_error_dump_mode *mode)
{
- op_mode->ops->cmd_queue_full(op_mode);
+ might_sleep();
+
+ if (op_mode->ops->dump_error)
+ op_mode->ops->dump_error(op_mode, mode);
}
static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index dc171c29eb7b..23b2009fbb28 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -381,6 +381,10 @@ enum {
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
+#define CNVI_SCU_REG_FOR_ECO_1 0xA26EF8
+#define CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN BIT(4)
+#define CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT BIT(5)
+
#define CNVI_PMU_STEP_FLOW 0xA2D588
#define CNVI_PMU_STEP_FLOW_FORCE_URM BIT(2)
@@ -458,6 +462,7 @@ enum {
#define REG_CRF_ID_TYPE_GF 0x410
#define REG_CRF_ID_TYPE_FM 0x910
#define REG_CRF_ID_TYPE_WHP 0xA10
+#define REG_CRF_ID_TYPE_PE 0xA30
#define HPM_DEBUG 0xA03440
#define PERSISTENCE_BIT BIT(12)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 3c9d91496c82..47854a36413e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -6,6 +6,7 @@
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
+#include <linux/list.h>
#include "fw/api/tx.h"
#include "iwl-trans.h"
@@ -16,13 +17,200 @@
#include "pcie/internal.h"
#include "iwl-context-info-gen3.h"
+struct iwl_trans_dev_restart_data {
+ struct list_head list;
+ unsigned int restart_count;
+ time64_t last_error;
+ char name[];
+};
+
+static LIST_HEAD(restart_data_list);
+static DEFINE_SPINLOCK(restart_data_lock);
+
+static struct iwl_trans_dev_restart_data *
+iwl_trans_get_restart_data(struct device *dev)
+{
+ struct iwl_trans_dev_restart_data *tmp, *data = NULL;
+ const char *name = dev_name(dev);
+
+ spin_lock(&restart_data_lock);
+ list_for_each_entry(tmp, &restart_data_list, list) {
+ if (strcmp(tmp->name, name))
+ continue;
+ data = tmp;
+ break;
+ }
+ spin_unlock(&restart_data_lock);
+
+ if (data)
+ return data;
+
+ data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC);
+ if (!data)
+ return NULL;
+
+ strcpy(data->name, name);
+ spin_lock(&restart_data_lock);
+ list_add_tail(&data->list, &restart_data_list);
+ spin_unlock(&restart_data_lock);
+
+ return data;
+}
+
+static void iwl_trans_inc_restart_count(struct device *dev)
+{
+ struct iwl_trans_dev_restart_data *data;
+
+ data = iwl_trans_get_restart_data(dev);
+ if (data) {
+ data->last_error = ktime_get_boottime_seconds();
+ data->restart_count++;
+ }
+}
+
+void iwl_trans_free_restart_list(void)
+{
+ struct iwl_trans_dev_restart_data *tmp;
+
+ while ((tmp = list_first_entry_or_null(&restart_data_list,
+ typeof(*tmp), list))) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+struct iwl_trans_reprobe {
+ struct device *dev;
+ struct work_struct work;
+};
+
+static void iwl_trans_reprobe_wk(struct work_struct *wk)
+{
+ struct iwl_trans_reprobe *reprobe;
+
+ reprobe = container_of(wk, typeof(*reprobe), work);
+
+ if (device_reprobe(reprobe->dev))
+ dev_err(reprobe->dev, "reprobe failed!\n");
+ put_device(reprobe->dev);
+ kfree(reprobe);
+ module_put(THIS_MODULE);
+}
+
+#define IWL_TRANS_RESET_OK_TIME 180 /* seconds */
+
+static enum iwl_reset_mode
+iwl_trans_determine_restart_mode(struct iwl_trans *trans)
+{
+ struct iwl_trans_dev_restart_data *data;
+ enum iwl_reset_mode at_least = 0;
+ unsigned int index;
+ static const enum iwl_reset_mode escalation_list[] = {
+ IWL_RESET_MODE_SW_RESET,
+ IWL_RESET_MODE_REPROBE,
+ IWL_RESET_MODE_REPROBE,
+ IWL_RESET_MODE_FUNC_RESET,
+ /* FIXME: add TOP reset */
+ IWL_RESET_MODE_PROD_RESET,
+ /* FIXME: add TOP reset */
+ IWL_RESET_MODE_PROD_RESET,
+ /* FIXME: add TOP reset */
+ IWL_RESET_MODE_PROD_RESET,
+ };
+
+ if (trans->restart.during_reset)
+ at_least = IWL_RESET_MODE_REPROBE;
+
+ data = iwl_trans_get_restart_data(trans->dev);
+ if (!data)
+ return at_least;
+
+ if (ktime_get_boottime_seconds() - data->last_error >=
+ IWL_TRANS_RESET_OK_TIME)
+ data->restart_count = 0;
+
+ index = data->restart_count;
+ if (index >= ARRAY_SIZE(escalation_list))
+ index = ARRAY_SIZE(escalation_list) - 1;
+
+ return max(at_least, escalation_list[index]);
+}
+
+#define IWL_TRANS_RESET_DELAY (HZ * 60)
+
+static void iwl_trans_restart_wk(struct work_struct *wk)
+{
+ struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk);
+ struct iwl_trans_reprobe *reprobe;
+ enum iwl_reset_mode mode;
+
+ if (!trans->op_mode)
+ return;
+
+ /* might have been scheduled before marked as dead, re-check */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return;
+
+ iwl_op_mode_dump_error(trans->op_mode, &trans->restart.mode);
+
+ /*
+ * If the opmode stopped the device while we were trying to dump and
+ * reset, then we'll have done the dump already (synchronized by the
+ * opmode lock that it will acquire in iwl_op_mode_dump_error()) and
+ * managed that via trans->restart.mode.
+ * Additionally, make sure that in such a case we won't attempt to do
+ * any resets now, since it's no longer requested.
+ */
+ if (!test_and_clear_bit(STATUS_RESET_PENDING, &trans->status))
+ return;
+
+ if (!iwlwifi_mod_params.fw_restart)
+ return;
+
+ mode = iwl_trans_determine_restart_mode(trans);
+
+ iwl_trans_inc_restart_count(trans->dev);
+
+ switch (mode) {
+ case IWL_RESET_MODE_SW_RESET:
+ IWL_ERR(trans, "Device error - SW reset\n");
+ iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type);
+ break;
+ case IWL_RESET_MODE_REPROBE:
+ IWL_ERR(trans, "Device error - reprobe!\n");
+
+ /*
+ * get a module reference to avoid doing this while unloading
+ * anyway and to avoid scheduling a work with code that's
+ * being removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(trans, "Module is being unloaded - abort\n");
+ return;
+ }
+
+ reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
+ if (!reprobe) {
+ module_put(THIS_MODULE);
+ return;
+ }
+ reprobe->dev = get_device(trans->dev);
+ INIT_WORK(&reprobe->work, iwl_trans_reprobe_wk);
+ schedule_work(&reprobe->work);
+ break;
+ default:
+ iwl_trans_pcie_reset(trans, mode);
+ break;
+ }
+}
+
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
- static struct lock_class_key __key;
+ static struct lock_class_key __sync_cmd_key;
#endif
trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL);
@@ -33,12 +221,14 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
#ifdef CONFIG_LOCKDEP
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
- &__key, 0);
+ &__sync_cmd_key, 0);
#endif
trans->dev = dev;
trans->num_rx_queues = 1;
+ INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk);
+
return trans;
}
@@ -81,6 +271,7 @@ int iwl_trans_init(struct iwl_trans *trans)
void iwl_trans_free(struct iwl_trans *trans)
{
+ cancel_work_sync(&trans->restart.wk);
kmem_cache_destroy(trans->dev_cmd_pool);
}
@@ -212,6 +403,8 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans)
iwl_trans_pcie_op_mode_leave(trans);
+ cancel_work_sync(&trans->restart.wk);
+
trans->op_mode = NULL;
trans->state = IWL_TRANS_NO_FW;
@@ -391,6 +584,34 @@ void iwl_trans_stop_device(struct iwl_trans *trans)
{
might_sleep();
+ /*
+ * See also the comment in iwl_trans_restart_wk().
+ *
+ * When the opmode stops the device while a reset is pending, the
+ * worker (iwl_trans_restart_wk) might not have run yet or, more
+ * likely, will be blocked on the opmode lock. Due to the locking,
+ * we can't just flush the worker.
+ *
+ * If this is the case, then the test_and_clear_bit() ensures that
+ * the worker won't attempt to do anything after the stop.
+ *
+ * The trans->restart.mode is a handshake with the opmode, we set
+ * the context there to ABORT so that when the worker can finally
+ * acquire the lock in the opmode, the code there won't attempt to
+ * do any dumps. Since we'd really like to have the dump though,
+ * also do it inline here (with the opmode locks already held),
+ * but use a separate mode struct to avoid races.
+ */
+ if (test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) {
+ struct iwl_fw_error_dump_mode mode;
+
+ mode = trans->restart.mode;
+ mode.context = IWL_ERR_CONTEXT_FROM_OPMODE;
+ trans->restart.mode.context = IWL_ERR_CONTEXT_ABORT;
+
+ iwl_op_mode_dump_error(trans->op_mode, &mode);
+ }
+
if (trans->trans_cfg->gen2)
iwl_trans_pcie_gen2_stop_device(trans);
else
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index c70da7281551..f6234065dbdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -300,6 +300,10 @@ enum iwl_d3_status {
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
* @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
* e.g. for testing
+ * @STATUS_IN_SW_RESET: device is undergoing reset, cleared by opmode
+ * via iwl_trans_finish_sw_reset()
+ * @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump
+ * the firmware state yet
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -311,6 +315,8 @@ enum iwl_trans_status {
STATUS_FW_ERROR,
STATUS_TRANS_DEAD,
STATUS_SUPPRESS_CMD_ERROR_ONCE,
+ STATUS_IN_SW_RESET,
+ STATUS_RESET_PENDING,
};
static inline int
@@ -322,7 +328,6 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
case IWL_AMSDU_4K:
return get_order(4 * 1024);
case IWL_AMSDU_8K:
- return get_order(8 * 1024);
case IWL_AMSDU_12K:
return get_order(16 * 1024);
default:
@@ -628,8 +633,6 @@ struct iwl_pc_data {
* @n_dest_reg: num of reg_ops in %dbg_dest_tlv
* @rec_on: true iff there is a fw debug recording currently active
* @dest_tlv: points to the destination TLV for debug
- * @conf_tlv: array of pointers to configuration TLVs for debug
- * @trigger_tlv: array of pointers to triggers TLVs for debug
* @lmac_error_event_table: addrs of lmacs error tables
* @umac_error_event_table: addr of umac error table
* @tcm_error_event_table: address(es) of TCM error table(s)
@@ -664,8 +667,6 @@ struct iwl_trans_debug {
bool rec_on;
const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
- const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
- struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
@@ -877,7 +878,16 @@ struct iwl_txq {
* @reduced_cap_sku: reduced capability supported SKU
* @no_160: device not supporting 160 MHz
* @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
+ * @restart: restart worker data
+ * @restart.wk: restart worker
+ * @restart.mode: reset/restart error mode information
+ * @restart.during_reset: error occurred during previous software reset
+ * @me_recheck_wk: worker to recheck WiAMT/CSME presence
+ * @me_present: WiAMT/CSME is detected as present (1), not present (0)
+ * or unknown (-1, so can still use it as a boolean safely)
* @trans_specific: data for the specific transport this is allocated for/with
+ * @dsbr_urm_fw_dependent: switch to URM based on fw settings
+ * @dsbr_urm_permanent: switch to URM permanently
*/
struct iwl_trans {
bool csme_own;
@@ -902,6 +912,9 @@ struct iwl_trans {
bool reduced_cap_sku;
u8 no_160:1, step_urm:1;
+ u8 dsbr_urm_fw_dependent:1,
+ dsbr_urm_permanent:1;
+
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
bool pm_support;
@@ -944,6 +957,15 @@ struct iwl_trans {
struct iwl_dma_ptr invalid_tx_cmd;
+ struct {
+ struct work_struct wk;
+ struct iwl_fw_error_dump_mode mode;
+ bool during_reset;
+ } restart;
+
+ struct delayed_work me_recheck_wk;
+ s8 me_present;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[] __aligned(sizeof(void *));
@@ -1120,7 +1142,28 @@ bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans);
-static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
+static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,
+ enum iwl_fw_error_type type)
+{
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return;
+
+ trans->restart.mode.type = type;
+ trans->restart.mode.context = IWL_ERR_CONTEXT_WORKER;
+
+ set_bit(STATUS_RESET_PENDING, &trans->status);
+
+ /*
+ * keep track of whether or not this happened while resetting,
+ * by the timer the worker runs it might have finished
+ */
+ trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET,
+ &trans->status);
+ queue_work(system_unbound_wq, &trans->restart.wk);
+}
+
+static inline void iwl_trans_fw_error(struct iwl_trans *trans,
+ enum iwl_fw_error_type type)
{
if (WARN_ON_ONCE(!trans->op_mode))
return;
@@ -1128,10 +1171,24 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
/* prevent double restarts due to the same erroneous FW */
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
trans->state = IWL_TRANS_NO_FW;
- iwl_op_mode_nic_error(trans->op_mode, sync);
+ iwl_op_mode_nic_error(trans->op_mode, type);
+ iwl_trans_schedule_reset(trans, type);
}
}
+static inline void iwl_trans_opmode_sw_reset(struct iwl_trans *trans,
+ enum iwl_fw_error_type type)
+{
+ if (WARN_ON_ONCE(!trans->op_mode))
+ return;
+
+ set_bit(STATUS_IN_SW_RESET, &trans->status);
+
+ if (!trans->op_mode->ops->sw_reset ||
+ !trans->op_mode->ops->sw_reset(trans->op_mode, type))
+ clear_bit(STATUS_IN_SW_RESET, &trans->status);
+}
+
static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
{
return trans->state == IWL_TRANS_FW_ALIVE;
@@ -1164,6 +1221,11 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
+static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)
+{
+ clear_bit(STATUS_IN_SW_RESET, &trans->status);
+}
+
/*****************************************************
* transport helper functions
*****************************************************/
@@ -1178,12 +1240,27 @@ static inline bool iwl_trans_is_hw_error_value(u32 val)
return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);
}
+void iwl_trans_free_restart_list(void);
+
/*****************************************************
* PCIe handling
*****************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
-void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
+
+/* Note: order matters */
+enum iwl_reset_mode {
+ /* upper level modes: */
+ IWL_RESET_MODE_SW_RESET,
+ IWL_RESET_MODE_REPROBE,
+ /* PCIE level modes: */
+ IWL_RESET_MODE_REMOVE_ONLY,
+ IWL_RESET_MODE_RESCAN,
+ IWL_RESET_MODE_FUNC_RESET,
+ IWL_RESET_MODE_PROD_RESET,
+};
+
+void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
new file mode 100644
index 000000000000..b14ec98e28b6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <net/gso.h>
+#include <linux/ieee80211.h>
+#include <net/gso.h>
+#include <net/ip.h>
+
+#include "iwl-drv.h"
+#include "iwl-utils.h"
+
+#ifdef CONFIG_INET
+int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ netdev_features_t netdev_flags,
+ struct sk_buff_head *mpdus_skbs)
+{
+ struct sk_buff *tmp, *next;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ char cb[sizeof(skb->cb)];
+ u16 i = 0;
+ unsigned int tcp_payload_len;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ bool ipv4 = (skb->protocol == htons(ETH_P_IP));
+ bool qos = ieee80211_is_data_qos(hdr->frame_control);
+ u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
+
+ skb_shinfo(skb)->gso_size = num_subframes * mss;
+ memcpy(cb, skb->cb, sizeof(cb));
+
+ next = skb_gso_segment(skb, netdev_flags);
+ skb_shinfo(skb)->gso_size = mss;
+ skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+
+ if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
+ return -ENOMEM;
+
+ if (WARN_ONCE(IS_ERR(next),
+ "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
+ return PTR_ERR(next);
+
+ if (next)
+ consume_skb(skb);
+
+ skb_list_walk_safe(next, tmp, next) {
+ memcpy(tmp->cb, cb, sizeof(tmp->cb));
+ /*
+ * Compute the length of all the data added for the A-MSDU.
+ * This will be used to compute the length to write in the TX
+ * command. We have: SNAP + IP + TCP for n -1 subframes and
+ * ETH header for n subframes.
+ */
+ tcp_payload_len = skb_tail_pointer(tmp) -
+ skb_transport_header(tmp) -
+ tcp_hdrlen(tmp) + tmp->data_len;
+
+ if (ipv4)
+ ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
+
+ if (tcp_payload_len > mss) {
+ skb_shinfo(tmp)->gso_size = mss;
+ skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
+ SKB_GSO_TCPV6;
+ } else {
+ if (qos) {
+ u8 *qc;
+
+ if (ipv4)
+ ip_send_check(ip_hdr(tmp));
+
+ qc = ieee80211_get_qos_ctl((void *)tmp->data);
+ *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+ skb_shinfo(tmp)->gso_size = 0;
+ }
+
+ skb_mark_not_on_list(tmp);
+ __skb_queue_tail(mpdus_skbs, tmp);
+ i++;
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_tx_tso_segment);
+#endif /* CONFIG_INET */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.h b/drivers/net/wireless/intel/iwlwifi/iwl-utils.h
new file mode 100644
index 000000000000..8f1f11d06fbe
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_utils_h__
+#define __iwl_utils_h__
+
+#include <net/cfg80211.h>
+
+#ifdef CONFIG_INET
+/**
+ * iwl_tx_tso_segment - Segments a TSO packet into subframes for A-MSDU.
+ * @skb: buffer to segment.
+ * @num_subframes: number of subframes to create.
+ * @netdev_flags: netdev feature flags.
+ * @mpdus_skbs: list to hold the segmented subframes.
+ *
+ * This function segments a large TCP packet into subframes.
+ * subframes are added to the mpdus_skbs list
+ *
+ * Returns: 0 on success and negative value on failure.
+ */
+int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ netdev_features_t netdev_flags,
+ struct sk_buff_head *mpdus_skbs);
+#else
+static inline
+int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ netdev_features_t netdev_flags,
+ struct sk_buff_head *mpdus_skbs)
+{
+ WARN_ON(1);
+
+ return -1;
+}
+#endif /* CONFIG_INET */
+
+static inline
+u32 iwl_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+{
+ struct ieee80211_mgmt *mgmt = (void *)beacon;
+ const u8 *ie;
+
+ if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon)))
+ return 0;
+
+ frame_size -= mgmt->u.beacon.variable - beacon;
+
+ ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size);
+ if (!ie)
+ return 0;
+
+ return ie - beacon;
+}
+
+#endif /* __iwl_utils_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
index 458b97930059..58e9a940024d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2012-2014, 2020 Intel Corporation
* Copyright (C) 2016 Intel Deutschland GmbH
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2022, 2024 Intel Corporation
*/
#include <net/mac80211.h>
#include "fw-api.h"
@@ -158,9 +158,8 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
ret = iwl_mvm_binding_update(mvm, vif, mvmvif->deflink.phy_ctxt,
false);
- if (!ret)
- if (iwl_mvm_sf_update(mvm, vif, true))
- IWL_ERR(mvm, "Failed to update SF state\n");
+ if (!ret && iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 36726ea4b822..21641d41a958 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -530,18 +530,15 @@ static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = _data;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
lockdep_assert_held(&mvm->mutex);
if (vif->type != NL80211_IFTYPE_STATION)
return;
- for (int link_id = 0;
- link_id < IEEE80211_MLD_MAX_NUM_LINKS;
- link_id++) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference_check(vif->link_conf[link_id],
- lockdep_is_held(&mvm->mutex));
+ for_each_vif_active_link(vif, link_conf, link_id) {
struct ieee80211_chanctx_conf *chanctx_conf =
rcu_dereference_check(link_conf->chanctx_conf,
lockdep_is_held(&mvm->mutex));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 7d973546c9fb..82ca7f8b1bb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1395,13 +1395,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret < 0) {
iwl_mvm_free_nd(mvm);
- if (!unified_image) {
- if (mvm->fw_restart > 0) {
- mvm->fw_restart--;
- ieee80211_restart_hw(mvm->hw);
- }
- }
-
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
}
out_noreset:
@@ -2498,12 +2491,6 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
u32 expected_len = sizeof(*data) +
data->num_mlo_link_keys * sizeof(status->mlo_keys[0]);
- if (!data) {
- IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
- status = NULL;
- return;
- }
-
if (len < expected_len) {
IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
status = NULL;
@@ -2555,12 +2542,6 @@ iwl_mvm_parse_wowlan_info_notif_v4(struct iwl_mvm *mvm,
u32 i;
u32 expected_len = sizeof(*data);
- if (!data) {
- IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
- status = NULL;
- return;
- }
-
if (has_mlo_keys)
expected_len += (data->num_mlo_link_keys *
sizeof(status->mlo_keys[0]));
@@ -2609,12 +2590,6 @@ iwl_mvm_parse_wowlan_info_notif_v2(struct iwl_mvm *mvm,
{
u32 i;
- if (!data) {
- IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
- status = NULL;
- return;
- }
-
if (len < sizeof(*data)) {
IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
status = NULL;
@@ -3117,8 +3092,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
-static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+enum rt_status {
+ FW_ALIVE,
+ FW_NEEDS_RESET,
+ FW_ERROR,
+};
+
+static enum rt_status iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
u32 err_id;
@@ -3126,29 +3107,35 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[0],
&err_id)) {
- if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ IWL_WARN(mvm, "Rfkill was toggled during suspend\n");
+ if (vif) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+
+ return FW_NEEDS_RESET;
}
- return true;
+ return FW_ERROR;
}
/* check if we have lmac2 set and check for error */
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[1],
NULL))
- return true;
+ return FW_ERROR;
/* check for umac error */
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.umac_error_event_table,
NULL))
- return true;
+ return FW_ERROR;
- return false;
+ return FW_ALIVE;
}
/*
@@ -3517,6 +3504,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm);
+ enum rt_status rt_status;
bool keep = false;
mutex_lock(&mvm->mutex);
@@ -3540,13 +3528,19 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
- if (iwl_mvm_check_rt_status(mvm, vif)) {
+ rt_status = iwl_mvm_check_rt_status(mvm, vif);
+ if (rt_status != FW_ALIVE) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
- iwl_mvm_dump_nic_error_log(mvm);
- iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
ret = 1;
goto err;
}
@@ -3703,6 +3697,7 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
.notif_expected =
IWL_D3_NOTIF_D3_END_NOTIF,
};
+ enum rt_status rt_status;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -3712,15 +3707,20 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
mvm->last_reset_or_resume_time_jiffies = jiffies;
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
- if (iwl_mvm_check_rt_status(mvm, NULL)) {
- IWL_ERR(mvm,
- "iwl_mvm_check_rt_status failed, device is gone during suspend\n");
+ rt_status = iwl_mvm_check_rt_status(mvm, NULL);
+ if (rt_status != FW_ALIVE) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
- iwl_mvm_dump_nic_error_log(mvm);
- iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm,
+ "iwl_mvm_check_rt_status failed, device is gone during suspend\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
mvm->trans->state = IWL_TRANS_NO_FW;
ret = -ENODEV;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 91ca830a7b60..55d035b896e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -16,6 +16,7 @@
#include "debugfs.h"
#include "iwl-modparams.h"
#include "iwl-drv.h"
+#include "iwl-utils.h"
#include "fw/error-dump.h"
#include "fw/api/phy-ctxt.h"
@@ -462,7 +463,6 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_link_sta *link_sta,
if (amsdu_len) {
mvm_link_sta->orig_amsdu_len = link_sta->agg.max_amsdu_len;
link_sta->agg.max_amsdu_len = amsdu_len;
- link_sta->agg.max_amsdu_len = amsdu_len;
for (i = 0; i < ARRAY_SIZE(link_sta->agg.max_tid_amsdu_len); i++)
link_sta->agg.max_tid_amsdu_len[i] = amsdu_len;
} else {
@@ -537,43 +537,12 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
-static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- char *buff, *pos, *endpos;
- static const size_t bufsz = 1024;
- int ret;
-
- buff = kmalloc(bufsz, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- pos = buff;
- endpos = pos + bufsz;
-
- pos += scnprintf(pos, endpos - pos, "FW id: %s\n",
- mvm->fwrt.fw->fw_version);
- pos += scnprintf(pos, endpos - pos, "FW: %s\n",
- mvm->fwrt.fw->human_readable);
- pos += scnprintf(pos, endpos - pos, "Device: %s\n",
- mvm->fwrt.trans->name);
- pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
- mvm->fwrt.dev->bus->name);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
- kfree(buff);
-
- return ret;
-}
-
static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- struct iwl_mvm_tas_status_resp tas_rsp;
- struct iwl_mvm_tas_status_resp *rsp = &tas_rsp;
+ struct iwl_mvm_tas_status_resp *rsp = NULL;
static const size_t bufsz = 1024;
char *buff, *pos, *endpos;
const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = {
@@ -609,6 +578,10 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
if (!iwl_mvm_firmware_running(mvm))
return -ENODEV;
+ if (iwl_fw_lookup_notif_ver(mvm->fw, DEBUG_GROUP, GET_TAS_STATUS,
+ 0) != 3)
+ return -EOPNOTSUPP;
+
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
mutex_unlock(&mvm->mutex);
@@ -659,6 +632,14 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
rsp->tas_fw_version);
pos += scnprintf(pos, endpos - pos, "Is UHB enabled for USA?: %s\n",
rsp->is_uhb_for_usa_enable ? "True" : "False");
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT))
+ pos += scnprintf(pos, endpos - pos,
+ "Is UHB enabled for CANADA?: %s\n",
+ rsp->uhb_allowed_flags &
+ TAS_UHB_ALLOWED_CANADA ? "True" : "False");
+
pos += scnprintf(pos, endpos - pos, "Current MCC: 0x%x\n",
le16_to_cpu(rsp->curr_mcc));
@@ -1159,10 +1140,6 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
mutex_lock(&mvm->mutex);
- /* allow one more restart that we're provoking here */
- if (mvm->fw_restart >= 0)
- mvm->fw_restart++;
-
if (count == 6 && !strcmp(buf, "nolog\n")) {
set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mvm->trans->status);
@@ -1409,9 +1386,9 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
if (iwl_fw_lookup_cmd_ver(mvm->fw,
BEACON_TEMPLATE_CMD, 0) >= 14) {
- u32 offset = iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_S1G_TWT,
- beacon->len);
+ u32 offset = iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len);
beacon_cmd.btwt_offset = cpu_to_le32(offset);
}
@@ -1495,22 +1472,6 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
return ret ?: count;
}
-static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- if (count == 0)
- return 0;
-
- iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER,
- NULL);
-
- iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
- (count - 1), NULL);
-
- return count;
-}
-
static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
@@ -1518,6 +1479,13 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
return -EOPNOTSUPP;
+ /*
+ * If the firmware is not running, silently succeed since there is
+ * no data to clear.
+ */
+ if (!iwl_mvm_firmware_running(mvm))
+ return count;
+
mutex_lock(&mvm->mutex);
iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt);
mutex_unlock(&mvm->mutex);
@@ -1964,14 +1932,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(fw_system_stats);
-MVM_DEBUGFS_READ_FILE_OPS(fw_ver);
MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
MVM_DEBUGFS_READ_FILE_OPS(tas_get_status);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
-MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_clear, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
@@ -2164,7 +2130,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
- MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400);
@@ -2173,7 +2138,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
- MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(fw_dbg_clear, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 5ea684802ad1..d10877856049 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -422,6 +422,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
/* if reached this point, Alive notification was received */
iwl_mei_alive_notif(true);
+ iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
&mvm->fw->ucode_capa);
if (ret) {
@@ -430,8 +432,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return ret;
}
- iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
-
/*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
@@ -642,7 +642,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
/* if we needed reset then fail here, but notify and remove */
if (mvm->fw_product_reset) {
iwl_mei_alive_notif(false);
- iwl_trans_pcie_remove(mvm->trans, true);
+ iwl_trans_pcie_reset(mvm->trans,
+ IWL_RESET_MODE_RESCAN);
}
goto error;
@@ -1093,36 +1094,40 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
return iwl_mvm_ppag_send_cmd(mvm);
}
-static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
+static bool
+iwl_mvm_add_to_tas_block_list(u16 *list, u8 *size, u16 mcc)
{
- int i;
- u32 size = le32_to_cpu(*le_size);
-
/* Verify that there is room for another country */
- if (size >= IWL_WTAS_BLACK_LIST_MAX)
+ if (*size >= IWL_WTAS_BLACK_LIST_MAX)
return false;
- for (i = 0; i < size; i++) {
- if (list[i] == cpu_to_le32(mcc))
+ for (u8 i = 0; i < *size; i++) {
+ if (list[i] == mcc)
return true;
}
- list[size++] = cpu_to_le32(mcc);
- *le_size = cpu_to_le32(size);
+ list[*size++] = mcc;
return true;
}
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
- int ret;
+ int fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ struct iwl_tas_selection_data selection_data = {};
+ struct iwl_tas_config_cmd_v2_v4 cmd_v2_v4 = {};
+ struct iwl_tas_config_cmd cmd_v5 = {};
struct iwl_tas_data data = {};
- struct iwl_tas_config_cmd cmd = {};
- int cmd_size, fw_ver;
+ void *cmd_data = &cmd_v2_v4;
+ int cmd_size;
+ int ret;
BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
IWL_WTAS_BLACK_LIST_MAX);
- BUILD_BUG_ON(ARRAY_SIZE(cmd.common.block_list_array) !=
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v2_v4.common.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v5.block_list_array) !=
IWL_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
@@ -1138,7 +1143,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
return;
}
- if (ret == 0)
+ if (ret == 0 && fw_ver < 5)
return;
if (!iwl_is_tas_approved()) {
@@ -1161,27 +1166,49 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
}
- fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- memcpy(&cmd.common, &data, sizeof(struct iwl_tas_config_cmd_common));
-
- /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */
- if (fw_ver == 4) {
- cmd.v4.override_tas_iec = data.override_tas_iec;
- cmd.v4.enable_tas_iec = data.enable_tas_iec;
- cmd.v4.usa_tas_uhb_allowed = data.usa_tas_uhb_allowed;
+ if (fw_ver < 5) {
+ selection_data = iwl_parse_tas_selection(data.tas_selection,
+ data.table_revision);
+ cmd_v2_v4.common.block_list_size =
+ cpu_to_le32(data.block_list_size);
+ for (u8 i = 0; i < data.block_list_size; i++)
+ cmd_v2_v4.common.block_list_array[i] =
+ cpu_to_le32(data.block_list_array[i]);
+ }
+
+ if (fw_ver == 5) {
+ cmd_size = sizeof(cmd_v5);
+ cmd_data = &cmd_v5;
+ cmd_v5.block_list_size = cpu_to_le16(data.block_list_size);
+ for (u16 i = 0; i < data.block_list_size; i++)
+ cmd_v5.block_list_array[i] =
+ cpu_to_le16(data.block_list_array[i]);
+ cmd_v5.tas_config_info.table_source = data.table_source;
+ cmd_v5.tas_config_info.table_revision = data.table_revision;
+ cmd_v5.tas_config_info.value = cpu_to_le32(data.tas_selection);
+ } else if (fw_ver == 4) {
+ cmd_size = sizeof(cmd_v2_v4.common) + sizeof(cmd_v2_v4.v4);
+ cmd_v2_v4.v4.override_tas_iec = selection_data.override_tas_iec;
+ cmd_v2_v4.v4.enable_tas_iec = selection_data.enable_tas_iec;
+ cmd_v2_v4.v4.usa_tas_uhb_allowed =
+ selection_data.usa_tas_uhb_allowed;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT) &&
+ selection_data.canada_tas_uhb_allowed)
+ cmd_v2_v4.v4.uhb_allowed_flags = TAS_UHB_ALLOWED_CANADA;
+ } else if (fw_ver == 3) {
+ cmd_size = sizeof(cmd_v2_v4.common) + sizeof(cmd_v2_v4.v3);
+ cmd_v2_v4.v3.override_tas_iec =
+ cpu_to_le16(selection_data.override_tas_iec);
+ cmd_v2_v4.v3.enable_tas_iec =
+ cpu_to_le16(selection_data.enable_tas_iec);
+ } else if (fw_ver == 2) {
+ cmd_size = sizeof(cmd_v2_v4.common);
} else {
- cmd.v3.override_tas_iec = cpu_to_le16(data.override_tas_iec);
- cmd.v3.enable_tas_iec = cpu_to_le16(data.enable_tas_iec);
+ return;
}
- cmd_size = sizeof(struct iwl_tas_config_cmd_common);
- if (fw_ver >= 3)
- /* v4 is the same size as v3 */
- cmd_size += sizeof(struct iwl_tas_config_cmd_v3);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, cmd_data);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index 272da41567ef..851869c0bd50 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -412,9 +412,8 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_REMOVE);
- if (!ret)
- if (iwl_mvm_sf_update(mvm, vif, true))
- IWL_ERR(mvm, "Failed to update SF state\n");
+ if (!ret && iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
return ret;
}
@@ -762,9 +761,8 @@ bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false))
return false;
- if (a->chandef->width != b->chandef->width ||
- !(a->chandef->chan->band == NL80211_BAND_6GHZ &&
- b->chandef->chan->band == NL80211_BAND_5GHZ))
+ if (a->chandef->chan->band == b->chandef->chan->band ||
+ a->chandef->width != b->chandef->width)
ret |= IWL_MVM_ESR_EXIT_BANDWIDTH;
if (ret) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 51ee62ae70fb..6b06732441c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -12,6 +12,7 @@
#include "fw-api.h"
#include "mvm.h"
#include "time-event.h"
+#include "iwl-utils.h"
const u8 iwl_mvm_ac_to_tx_fifo[] = {
IWL_MVM_TX_FIFO_VO,
@@ -868,23 +869,6 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
}
}
-u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
-{
- struct ieee80211_mgmt *mgmt = (void *)beacon;
- const u8 *ie;
-
- if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon)))
- return 0;
-
- frame_size -= mgmt->u.beacon.variable - beacon;
-
- ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size);
- if (!ie)
- return 0;
-
- return ie - beacon;
-}
-
u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct ieee80211_vif *vif)
@@ -1078,22 +1062,23 @@ static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm,
beacon->data, beacon->len);
beacon_cmd.csa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_CHANNEL_SWITCH,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
beacon_cmd.ecsa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_EXT_CHANSWITCH_ANN,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *ctx)
{
- if (IWL_MVM_DISABLE_AP_FILS)
+ if (vif->type != NL80211_IFTYPE_AP || IWL_MVM_DISABLE_AP_FILS)
return false;
if (cfg80211_channel_is_psc(ctx->def.chan))
@@ -1122,7 +1107,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
ctx = rcu_dereference(link_conf->chanctx_conf);
channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq);
WARN_ON(channel == 0);
- if (iwl_mvm_enable_fils(mvm, ctx)) {
+ if (iwl_mvm_enable_fils(mvm, vif, ctx)) {
flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
0) > 10 ?
IWL_MAC_BEACON_FILS :
@@ -1151,20 +1136,20 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
beacon->data, beacon->len);
beacon_cmd.csa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_CHANNEL_SWITCH,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
beacon_cmd.ecsa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_EXT_CHANSWITCH_ANN,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
if (vif->type == NL80211_IFTYPE_AP &&
iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) >= 14)
beacon_cmd.btwt_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_S1G_TWT,
- beacon->len));
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len));
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
@@ -1767,7 +1752,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
data = sb_v2->data;
} else {
- struct iwl_stored_beacon_notif_v3 *sb_v3 = (void *)pkt->data;
+ struct iwl_stored_beacon_notif *sb_v3 = (void *)pkt->data;
if (pkt_len < struct_size(sb_v3, data, size))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 07778d55878b..af6644b7e95f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1153,7 +1153,7 @@ static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
* Delete the stale data to avoid issues later on.
*/
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
- link_id, false);
+ link_id);
}
}
}
@@ -1300,23 +1300,16 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
- iwlwifi_mod_params.fw_restart) {
+ iwlwifi_mod_params.fw_restart)
max_retry = IWL_MAX_INIT_RETRY;
- /*
- * This will prevent mac80211 recovery flows to trigger during
- * init failures
- */
- set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
- }
for (retry = 0; retry <= max_retry; retry++) {
ret = __iwl_mvm_mac_start(mvm);
- if (!ret)
+ if (ret != -ETIMEDOUT)
break;
IWL_ERR(mvm, "mac start retry %d\n", retry);
}
- clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
@@ -1347,6 +1340,11 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
iwl_mvm_teardown_tdls_peers(mvm);
IWL_INFO(mvm, "restart completed\n");
+ iwl_trans_finish_sw_reset(mvm->trans);
+
+ /* no need to lock, adding in parallel would schedule too */
+ if (!list_empty(&mvm->add_stream_txqs))
+ schedule_work(&mvm->add_stream_wk);
}
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
@@ -1485,11 +1483,12 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm,
s16 tx_power)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
- u32 mac_id = iwl_mvm_vif_from_mac80211(link_conf->vif)->id;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(link_conf->vif);
+ u32 mac_id = mvmvif->id;
int len;
struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
- .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
- .common.mac_context_id = cpu_to_le32(mac_id),
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK),
+ .common.link_id = cpu_to_le32(mac_id),
};
struct iwl_dev_tx_power_cmd cmd_v9_v10;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
@@ -1500,9 +1499,16 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm,
cmd.common.pwr_restriction = cpu_to_le16(u_tx_power);
if (cmd_ver > 8) {
+ u32 link_id;
+
+ if (WARN_ON(!mvmvif->link[link_conf->link_id]))
+ return -ENODEV;
+
+ link_id = mvmvif->link[link_conf->link_id]->fw_link_id;
+
/* Those fields sit on the same place for v9 and v10 */
- cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC);
- cmd_v9_v10.common.mac_context_id = cpu_to_le32(mac_id);
+ cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK);
+ cmd_v9_v10.common.link_id = cpu_to_le32(link_id);
cmd_v9_v10.common.pwr_restriction = cpu_to_le16(u_tx_power);
cmd_data = &cmd_v9_v10;
}
@@ -1802,6 +1808,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvmvif->deflink.active = 0;
mvmvif->link[0] = &mvmvif->deflink;
+ vif->driver_flags = IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
+
ret = iwl_mvm_set_link_mapping(mvm, vif, &vif->bss_conf);
if (ret)
goto out;
@@ -2967,33 +2975,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
&mvm->status),
"Failed to update SF upon disassociation\n");
- /*
- * If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
- &mvm->status)) {
- /* first remove remaining keys */
- iwl_mvm_sec_key_remove_ap(mvm, vif,
- &mvmvif->deflink, 0);
-
- /*
- * Remove AP station now that
- * the MAC is unassoc
- */
- ret = iwl_mvm_rm_sta_id(mvm, vif,
- mvmvif->deflink.ap_sta_id);
- if (ret)
- IWL_ERR(mvm,
- "failed to remove AP station\n");
-
- mvmvif->deflink.ap_sta_id = IWL_INVALID_STA;
- }
-
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret)
@@ -3913,7 +3894,7 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
if (sta->tdls &&
(vif->p2p ||
- iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_MVM_TDLS_STA_COUNT ||
+ iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_TDLS_STA_COUNT ||
iwl_mvm_phy_ctx_count(mvm) > 1)) {
IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
return -EBUSY;
@@ -4113,10 +4094,6 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
wiphy_delayed_work_cancel(mvm->hw->wiphy,
&mvmvif->unblock_esr_tmp_non_bss_wk);
-
- /* No need for the periodic statistics anymore */
- if (ieee80211_vif_is_mld(vif) && mvmvif->esr_active)
- iwl_mvm_request_periodic_system_statistics(mvm, false);
}
return 0;
@@ -5004,34 +4981,46 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
return 0;
}
-struct iwl_mvm_ftm_responder_iter_data {
- bool responder;
+struct iwl_mvm_chanctx_usage_data {
+ struct iwl_mvm *mvm;
struct ieee80211_chanctx_conf *ctx;
+ bool use_def;
};
-static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_chanctx_usage_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct iwl_mvm_ftm_responder_iter_data *data = _data;
+ struct iwl_mvm_chanctx_usage_data *data = _data;
+ struct ieee80211_bss_conf *link_conf;
+ int link_id;
- if (rcu_access_pointer(vif->bss_conf.chanctx_conf) == data->ctx &&
- vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params)
- data->responder = true;
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx)
+ continue;
+
+ if (iwl_mvm_enable_fils(data->mvm, vif, data->ctx))
+ data->use_def = true;
+
+ if (vif->type == NL80211_IFTYPE_AP && link_conf->ftmr_params)
+ data->use_def = true;
+ }
}
-bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
- struct ieee80211_chanctx_conf *ctx)
+struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
{
- struct iwl_mvm_ftm_responder_iter_data data = {
- .responder = false,
+ struct iwl_mvm_chanctx_usage_data data = {
+ .mvm = mvm,
.ctx = ctx,
+ .use_def = false,
};
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_ftm_responder_chanctx_iter,
- &data);
- return data.responder;
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_chanctx_usage_iter,
+ &data);
+
+ return data.use_def ? &ctx->def : &ctx->min_def;
}
static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
@@ -5415,7 +5404,7 @@ out_reassign:
out_restart:
/* things keep failing, better restart the hw */
- iwl_mvm_nic_restart(mvm, false);
+ iwl_force_nmi(mvm->trans);
return ret;
}
@@ -5451,7 +5440,7 @@ out_reassign:
out_restart:
/* things keep failing, better restart the hw */
- iwl_mvm_nic_restart(mvm, false);
+ iwl_force_nmi(mvm->trans);
return ret;
}
@@ -6278,7 +6267,7 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
guard(mvm)(mvm);
- if (mvmvif->deflink.ap_sta_id != mvmsta->deflink.sta_id)
+ if (sta != mvmvif->ap_sta)
return;
if (iwl_mvm_request_statistics(mvm, false))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index b807046144c0..341a2a7a49ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -18,6 +18,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
mvmvif->mvm = mvm;
+ vif->driver_flags |= IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
+
/* Not much to do here. The stack will not allow interface
* types or combinations that we didn't advertise, so we
* don't really have to check the types.
@@ -208,32 +210,6 @@ static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
return n_active;
}
-static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif)
-{
- struct ieee80211_sta *ap_sta = mvmvif->ap_sta;
- struct iwl_mvm_sta *mvmsta;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!ap_sta)
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
- if (!mvmsta->mpdu_counters)
- return;
-
- for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
- spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
- memset(mvmsta->mpdu_counters[q].per_link, 0,
- sizeof(mvmsta->mpdu_counters[q].per_link));
- mvmsta->mpdu_counters[q].window_start = jiffies;
- spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
- }
-
- IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
-}
-
static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -267,16 +243,6 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
else
mvmvif->primary_link = __ffs(vif->active_links);
- /* Needed for tracking RSSI */
- iwl_mvm_request_periodic_system_statistics(mvm, true);
-
- /*
- * Restart the MPDU counters and the counting window, so when the
- * statistics arrive (which is where we look at the counters) we
- * will be at the end of the window.
- */
- iwl_mvm_restart_mpdu_count(mvm, mvmvif);
-
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
NULL);
@@ -323,7 +289,6 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
ret = iwl_mvm_esr_mode_active(mvm, vif);
if (ret) {
IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret);
- iwl_mvm_request_periodic_system_statistics(mvm, false);
goto out;
}
}
@@ -449,11 +414,6 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
break;
}
- iwl_mvm_request_periodic_system_statistics(mvm, false);
-
- /* Start a new counting window */
- iwl_mvm_restart_mpdu_count(mvm, mvmvif);
-
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_DOWN,
NULL);
@@ -831,30 +791,6 @@ static bool iwl_mvm_mld_vif_have_valid_ap_sta(struct iwl_mvm_vif *mvmvif)
return false;
}
-static void iwl_mvm_mld_vif_delete_all_stas(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int i, ret;
-
- if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- return;
-
- for_each_mvm_vif_valid_link(mvmvif, i) {
- struct iwl_mvm_vif_link_info *link = mvmvif->link[i];
-
- if (!link)
- continue;
-
- iwl_mvm_sec_key_remove_ap(mvm, vif, link, i);
- ret = iwl_mvm_mld_rm_sta_id(mvm, link->ap_sta_id);
- if (ret)
- IWL_ERR(mvm, "failed to remove AP station\n");
-
- link->ap_sta_id = IWL_INVALID_STA;
- }
-}
-
static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u64 changes)
@@ -881,8 +817,13 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (vif->cfg.assoc) {
mvmvif->session_prot_connection_loss = false;
- /* clear statistics to get clean beacon counter */
+ /*
+ * Clear statistics to get clean beacon counter, and ask for
+ * periodic statistics, as they are needed for link
+ * selection and RX OMI decisions.
+ */
iwl_mvm_request_statistics(mvm, true);
+ iwl_mvm_request_periodic_system_statistics(mvm, true);
iwl_mvm_sf_update(mvm, vif, false);
iwl_mvm_power_vif_assoc(mvm, vif);
@@ -930,6 +871,8 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
} else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
iwl_mvm_mei_host_disassociated(mvm);
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
+
/* If update fails - SF might be running in associated
* mode while disassociated - which is forbidden.
*/
@@ -938,15 +881,6 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status),
"Failed to update SF upon disassociation\n");
-
- /* If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
}
iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 019839604011..2f159024eeb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -518,14 +518,12 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
struct iwl_mvm_link_sta *mvm_sta_link,
- unsigned int link_id,
- bool is_in_fw)
+ unsigned int link_id)
{
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
- RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
- is_in_fw ? ERR_PTR(-EINVAL) : NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], NULL);
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[mvm_sta_link->sta_id], NULL);
RCU_INIT_POINTER(mvm_sta->link[link_id], NULL);
@@ -546,7 +544,7 @@ static void iwl_mvm_mld_sta_rm_all_sta_links(struct iwl_mvm *mvm,
if (!link)
continue;
- iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id, false);
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id);
}
}
@@ -844,18 +842,11 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_link_sta *mvm_link_sta =
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
- bool stay_in_fw;
+ iwl_mvm_sta_del(mvm, vif, sta, link_sta);
- stay_in_fw = iwl_mvm_sta_del(mvm, vif, sta, link_sta, &ret);
- if (ret)
- break;
-
- if (!stay_in_fw)
- ret = iwl_mvm_mld_rm_sta_from_fw(mvm,
- mvm_link_sta->sta_id);
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_link_sta->sta_id);
- iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
- link_id, stay_in_fw);
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, link_id);
}
kfree(mvm_sta->mpdu_counters);
mvm_sta->mpdu_counters = NULL;
@@ -1122,8 +1113,7 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION)
mvm_vif_link->ap_sta_id = IWL_INVALID_STA;
- iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id,
- false);
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id);
}
for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
@@ -1227,8 +1217,7 @@ err:
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
- iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id,
- false);
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id);
}
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2ad615293c75..ee769da72e68 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -103,6 +103,7 @@ struct iwl_mvm_phy_ctxt {
u32 center_freq1;
bool rlc_disabled;
u32 channel_load_by_us;
+ u32 channel_load_not_by_us;
};
struct iwl_mvm_time_event_data {
@@ -1174,8 +1175,6 @@ struct iwl_mvm {
struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_FW_MAX_LINK_ID + 1];
- /* -1 for always, 0 for never, >0 for that many times */
- s8 fw_restart;
u8 *error_recovery_buf;
#ifdef CONFIG_IWLWIFI_LEDS
@@ -1401,8 +1400,6 @@ DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mu
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
* @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
* if this is set, when intentionally triggered
- * @IWL_MVM_STATUS_STARTING: starting mac,
- * used to disable restart flow while in STARTING state
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@@ -1414,7 +1411,6 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_IN_D3,
IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
- IWL_MVM_STATUS_STARTING,
};
struct iwl_mvm_csme_conn_info {
@@ -1736,12 +1732,19 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans)
{
- if ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM) &&
- !CSR_HW_RFID_IS_CDB(trans->hw_rf_id))
+ if (CSR_HW_RFID_IS_CDB(trans->hw_rf_id))
+ return false;
+
+ switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_FM:
/* Step A doesn't support eSR */
return CSR_HW_RFID_STEP(trans->hw_rf_id);
-
- return false;
+ case IWL_CFG_RF_TYPE_WH:
+ case IWL_CFG_RF_TYPE_PE:
+ return true;
+ default:
+ return false;
+ }
}
static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
@@ -1824,7 +1827,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
u64 *boottime, ktime_t *realtime);
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
-u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
@@ -2591,7 +2593,6 @@ void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
-void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -2996,18 +2997,11 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
struct cfg80211_set_hw_timestamp *hwts);
int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *ctx);
-bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
- struct ieee80211_chanctx_conf *ctx);
-
-static inline struct cfg80211_chan_def *
-iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
-{
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- return use_def ? &ctx->def : &ctx->min_def;
-}
+struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx);
void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
u32 duration_ms,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 30fcc733395e..984f407f7027 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -208,7 +208,8 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
return;
- vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
+ /* FIXME: should fetch the link and not the vif */
+ vif = iwl_mvm_get_vif_by_macid(mvm, notif->link_id);
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
@@ -408,7 +409,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_SYNC, struct iwl_time_event_notif),
RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
- struct iwl_mvm_session_prot_notif),
+ struct iwl_session_prot_notif),
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
@@ -1285,6 +1286,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
+ int err;
/*
* We use IWL_STATION_COUNT_MAX to check the validity of the station
@@ -1302,7 +1304,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_has_mld_api(fw) ? &iwl_mvm_mld_hw_ops :
&iwl_mvm_hw_ops);
if (!hw)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
max_agg = 512;
@@ -1331,6 +1333,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_get_bios_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
iwl_uefi_get_step_table(trans);
+ iwl_bios_setup_step(trans, &mvm->fwrt);
mvm->init_status = 0;
@@ -1346,11 +1349,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->rx_mpdu_cmd_hdr_size =
sizeof(struct iwl_rx_mpdu_res_start);
- if (WARN_ON(trans->num_rx_queues > 1))
+ if (WARN_ON(trans->num_rx_queues > 1)) {
+ err = -EINVAL;
goto out_free;
+ }
}
- mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
mvm->bios_enable_puncturing = iwl_uefi_get_puncturing(&mvm->fwrt);
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -1424,8 +1428,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_RESPONSE_NOTIF, 5);
/* we only support up to version 9 */
- if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9))
+ if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) {
+ err = -EINVAL;
goto out_free;
+ }
/*
* Populate the state variables that the transport layer needs
@@ -1474,9 +1480,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
- memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv,
- sizeof(trans->dbg.conf_tlv));
- trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -1488,6 +1491,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->phy_db = iwl_phy_db_init(trans);
if (!mvm->phy_db) {
IWL_ERR(mvm, "Cannot init phy_db\n");
+ err = -ENOMEM;
goto out_free;
}
@@ -1500,8 +1504,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
scan_size = iwl_mvm_scan_size(mvm);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
- if (!mvm->scan_cmd)
+ if (!mvm->scan_cmd) {
+ err = -ENOMEM;
goto out_free;
+ }
mvm->scan_cmd_size = scan_size;
/* invalidate ids to prevent accidental removal of sta_id 0 */
@@ -1530,7 +1536,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter);
- if (iwl_mvm_start_get_nvm(mvm)) {
+ err = iwl_mvm_start_get_nvm(mvm);
+ if (err) {
/*
* Getting NVM failed while CSME is the owner, but we are
* registered to MEI, we'll get the NVM later when it'll be
@@ -1543,7 +1550,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
- if (iwl_mvm_start_post_nvm(mvm))
+ err = iwl_mvm_start_post_nvm(mvm);
+ if (err)
goto out_thermal_exit;
return op_mode;
@@ -1563,7 +1571,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_trans_op_mode_leave(trans);
ieee80211_free_hw(mvm->hw);
- return NULL;
+ return ERR_PTR(err);
}
void iwl_mvm_stop_device(struct iwl_mvm *mvm)
@@ -1998,27 +2006,62 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
ieee80211_free_txskb(mvm->hw, skb);
}
-struct iwl_mvm_reprobe {
- struct device *dev;
- struct work_struct work;
-};
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_abort_notification_waits(&mvm->notif_wait);
+ iwl_dbg_tlv_del_timers(mvm->trans);
-static void iwl_mvm_reprobe_wk(struct work_struct *wk)
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL)
+ IWL_ERR(mvm, "Command queue full!\n");
+ else if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
+ !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
+ &mvm->status))
+ iwl_mvm_dump_nic_error_log(mvm);
+
+ /*
+ * This should be first thing before trying to collect any
+ * data to avoid endless loops if any HW error happens while
+ * collecting debug data.
+ * It might not actually be true that we'll restart, but the
+ * setting of the bit doesn't matter if we're going to be
+ * unbound either.
+ */
+ if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+}
+
+static void iwl_mvm_dump_error(struct iwl_op_mode *op_mode,
+ struct iwl_fw_error_dump_mode *mode)
{
- struct iwl_mvm_reprobe *reprobe;
-
- reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
- if (device_reprobe(reprobe->dev))
- dev_err(reprobe->dev, "reprobe failed!\n");
- put_device(reprobe->dev);
- kfree(reprobe);
- module_put(THIS_MODULE);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ /* if we come in from opmode we have the mutex held */
+ if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) {
+ lockdep_assert_held(&mvm->mutex);
+ iwl_fw_error_collect(&mvm->fwrt);
+ } else {
+ mutex_lock(&mvm->mutex);
+ if (mode->context != IWL_ERR_CONTEXT_ABORT)
+ iwl_fw_error_collect(&mvm->fwrt);
+ mutex_unlock(&mvm->mutex);
+ }
}
-void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
+static bool iwl_mvm_sw_reset(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
{
- iwl_abort_notification_waits(&mvm->notif_wait);
- iwl_dbg_tlv_del_timers(mvm->trans);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ /*
+ * If the firmware crashes while we're already considering it
+ * to be dead then don't ask for a restart, that cannot do
+ * anything useful anyway.
+ */
+ if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
+ return false;
/*
* This is a bit racy, but worst case we tell mac80211 about
@@ -2033,52 +2076,11 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
iwl_mvm_report_scan_aborted(mvm);
/*
- * If we're restarting already, don't cycle restarts.
* If INIT fw asserted, it will likely fail again.
* If WoWLAN fw asserted, don't restart either, mac80211
* can't recover this since we're already half suspended.
*/
- if (!mvm->fw_restart && fw_error) {
- iwl_fw_error_collect(&mvm->fwrt, false);
- } else if (test_bit(IWL_MVM_STATUS_STARTING,
- &mvm->status)) {
- IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
- } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- struct iwl_mvm_reprobe *reprobe;
-
- IWL_ERR(mvm,
- "Firmware error during reconfiguration - reprobe!\n");
-
- /*
- * get a module reference to avoid doing this while unloading
- * anyway and to avoid scheduling a work with code that's
- * being removed.
- */
- if (!try_module_get(THIS_MODULE)) {
- IWL_ERR(mvm, "Module is being unloaded - abort\n");
- return;
- }
-
- reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
- if (!reprobe) {
- module_put(THIS_MODULE);
- return;
- }
- reprobe->dev = get_device(mvm->trans->dev);
- INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
- schedule_work(&reprobe->work);
- } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status)) {
- IWL_ERR(mvm, "HW restart already requested, but not started\n");
- } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
- mvm->hw_registered &&
- !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
- /* This should be first thing before trying to collect any
- * data to avoid endless loops if any HW error happens while
- * collecting debug data.
- */
- set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
-
+ if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered) {
if (mvm->fw->ucode_capa.error_log_size) {
u32 src_size = mvm->fw->ucode_capa.error_log_size;
u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
@@ -2093,57 +2095,18 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
}
}
- iwl_fw_error_collect(&mvm->fwrt, false);
-
- if (fw_error && mvm->fw_restart > 0) {
- mvm->fw_restart--;
- ieee80211_restart_hw(mvm->hw);
- } else if (mvm->fwrt.trans->dbg.restart_required) {
+ if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
+ return true;
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
+ return true;
}
}
-}
-
-static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
-{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
- if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
- !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
- &mvm->status))
- iwl_mvm_dump_nic_error_log(mvm);
-
- if (sync) {
- iwl_fw_error_collect(&mvm->fwrt, true);
- /*
- * Currently, the only case for sync=true is during
- * shutdown, so just stop in this case. If/when that
- * changes, we need to be a bit smarter here.
- */
- return;
- }
-
- /*
- * If the firmware crashes while we're already considering it
- * to be dead then don't ask for a restart, that cannot do
- * anything useful anyway.
- */
- if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
- return;
-
- iwl_mvm_nic_restart(mvm, false);
-}
-
-static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
-{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- WARN_ON(1);
- iwl_mvm_nic_restart(mvm, true);
+ return false;
}
static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
@@ -2179,7 +2142,8 @@ static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
.hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
.free_skb = iwl_mvm_free_skb, \
.nic_error = iwl_mvm_nic_error, \
- .cmd_queue_full = iwl_mvm_cmd_queue_full, \
+ .dump_error = iwl_mvm_dump_error, \
+ .sw_reset = iwl_mvm_sw_reset, \
.nic_config = iwl_mvm_nic_config, \
/* as we only register one, these MUST be common! */ \
.start = iwl_op_mode_mvm_start, \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 7cab5373c8ae..5e7e2926be0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -31,7 +31,7 @@ u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef)
/*
* Maps the driver specific control channel position (relative to the center
- * freq) definitions to the the fw values
+ * freq) definitions to the fw values
*/
u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index bc363e8427e4..a386b315e52f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -567,7 +567,7 @@ struct iwl_power_vifs {
bool monitor_active;
};
-static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac,
+static void iwl_mvm_power_disable_pm_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -575,7 +575,7 @@ static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac,
mvmvif->pm_enabled = false;
}
-static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
+static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 9e72db9bab40..2dbef7b46355 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -789,6 +789,8 @@ static void iwl_mvm_handle_per_phy_stats(struct iwl_mvm *mvm,
continue;
mvm->phy_ctxts[i].channel_load_by_us =
le32_to_cpu(per_phy[i].channel_load_by_us);
+ mvm->phy_ctxts[i].channel_load_not_by_us =
+ le32_to_cpu(per_phy[i].channel_load_not_by_us);
}
}
@@ -962,6 +964,9 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
#define SEC_LINK_MIN_TX 3000
#define SEC_LINK_MIN_RX 400
+/* Accept a ~20% short window to avoid issues due to jitter */
+#define IWL_MVM_TPT_MIN_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ * 4 / 5)
+
static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
{
struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
@@ -971,6 +976,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
unsigned long sec_link_tx = 0, sec_link_rx = 0;
u8 sec_link_tx_perc, sec_link_rx_perc;
u8 sec_link;
+ bool skip = false;
lockdep_assert_held(&mvm->mutex);
@@ -1010,13 +1016,25 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
/*
* In EMLSR we have statistics every 5 seconds, so we can reset
* the counters upon every statistics notification.
+ * The FW sends the notification regularly, but it will be
+ * misaligned at the start. Skipping the measurement if it is
+ * short will synchronize us.
*/
+ if (jiffies - mvmsta->mpdu_counters[q].window_start <
+ IWL_MVM_TPT_MIN_COUNT_WINDOW)
+ skip = true;
+ mvmsta->mpdu_counters[q].window_start = jiffies;
memset(mvmsta->mpdu_counters[q].per_link, 0,
sizeof(mvmsta->mpdu_counters[q].per_link));
spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
}
+ if (skip) {
+ IWL_DEBUG_INFO(mvm, "MPDU statistics window was short\n");
+ return;
+ }
+
IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
total_tx, total_rx);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a2f16bfaec44..14ea89f931bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -636,15 +636,21 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
baid, nssn);
- if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
- baid >= ARRAY_SIZE(mvm->baid_map)))
+ if (IWL_FW_CHECK(mvm,
+ baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= ARRAY_SIZE(mvm->baid_map),
+ "invalid BAID from FW: %d\n", baid))
return;
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (WARN(!ba_data, "BAID %d not found in map\n", baid))
+ if (!ba_data) {
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID %d but not allocated, invalid frame release!\n",
+ baid);
goto out;
+ }
/* pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
@@ -989,7 +995,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
*/
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
u32 rate_n_flags = phy_data->rate_n_flags;
- u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
@@ -1044,13 +1050,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
if (he_mu)
he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
- else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1)
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG)
he->data6 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
}
@@ -2506,19 +2512,24 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bar_frame_release *release = (void *)pkt->data;
- unsigned int baid = le32_get_bits(release->ba_info,
- IWL_BAR_FRAME_RELEASE_BAID_MASK);
- unsigned int nssn = le32_get_bits(release->ba_info,
- IWL_BAR_FRAME_RELEASE_NSSN_MASK);
- unsigned int sta_id = le32_get_bits(release->sta_tid,
- IWL_BAR_FRAME_RELEASE_STA_MASK);
- unsigned int tid = le32_get_bits(release->sta_tid,
- IWL_BAR_FRAME_RELEASE_TID_MASK);
struct iwl_mvm_baid_data *baid_data;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ unsigned int baid, nssn, sta_id, tid;
- if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
+ if (IWL_FW_CHECK(mvm, pkt_len < sizeof(*release),
+ "Unexpected frame release notif size %d (expected %zu)\n",
+ pkt_len, sizeof(*release)))
return;
+ baid = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_BAID_MASK);
+ nssn = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_NSSN_MASK);
+ sta_id = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_STA_MASK);
+ tid = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_TID_MASK);
+
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
baid >= ARRAY_SIZE(mvm->baid_map)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 376b9b12fa62..60bd9c7e5f03 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -462,7 +462,7 @@ static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
if (!ssid_list[i].len)
break;
if (ssid_list[i].len == ssid_len &&
- !memcmp(ssid_list->ssid, ssid, ssid_len))
+ !memcmp(ssid_list[i].ssid, ssid, ssid_len))
return i;
}
return -1;
@@ -3477,7 +3477,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
* restart_hw, so do not report if FW is about to be
* restarted.
*/
- if (!mvm->fw_restart)
+ if (!iwlwifi_mod_params.fw_restart)
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
@@ -3528,7 +3528,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
* restarted.
*/
if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
- !mvm->fw_restart) {
+ !iwlwifi_mod_params.fw_restart) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index cd74c181c260..7a4844ec3c10 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1520,7 +1520,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
add_stream_wk);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
+
+ /* will reschedule to run after restart */
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ||
+ test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
@@ -1564,8 +1569,6 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
local_bh_enable();
}
-
- mutex_unlock(&mvm->mutex);
}
static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
@@ -2045,9 +2048,9 @@ int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
* Returns if we're done with removing the station, either
* with error or success
*/
-bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_link_sta *link_sta, int *ret)
+ struct ieee80211_link_sta *link_sta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_vif_link_info *mvm_link =
@@ -2063,38 +2066,12 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_is_held(&mvm->mutex));
sta_id = mvm_link_sta->sta_id;
- /* If there is a TXQ still marked as reserved - free it */
- if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
- u8 reserved_txq = mvm_sta->reserved_queue;
- enum iwl_mvm_queue_status *status;
-
- /*
- * If no traffic has gone through the reserved TXQ - it
- * is still marked as IWL_MVM_QUEUE_RESERVED, and
- * should be manually marked as free again
- */
- status = &mvm->queue_info[reserved_txq].status;
- if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
- (*status != IWL_MVM_QUEUE_FREE),
- "sta_id %d reserved txq %d status %d",
- sta_id, reserved_txq, *status)) {
- *ret = -EINVAL;
- return true;
- }
-
- *status = IWL_MVM_QUEUE_FREE;
- }
-
if (vif->type == NL80211_IFTYPE_STATION &&
mvm_link->ap_sta_id == sta_id) {
- /* if associated - we can't remove the AP STA now */
- if (vif->cfg.assoc)
- return true;
-
/* first remove remaining keys */
- iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0);
+ iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link,
+ link_sta->link_id);
- /* unassoc - go ahead - remove the AP STA now */
mvm_link->ap_sta_id = IWL_INVALID_STA;
}
@@ -2106,8 +2083,6 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
cancel_delayed_work(&mvm->tdls_cs.dwork);
}
-
- return false;
}
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
@@ -2143,8 +2118,27 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
iwl_mvm_disable_sta_queues(mvm, vif, sta);
- if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret))
- return ret;
+ /* If there is a TXQ still marked as reserved - free it */
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+ u8 reserved_txq = mvm_sta->reserved_queue;
+ enum iwl_mvm_queue_status *status;
+
+ /*
+ * If no traffic has gone through the reserved TXQ - it
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
+ * should be manually marked as free again
+ */
+ status = &mvm->queue_info[reserved_txq].status;
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+ (*status != IWL_MVM_QUEUE_FREE),
+ "sta_id %d reserved txq %d status %d",
+ mvm_sta->deflink.sta_id, reserved_txq, *status))
+ return -EINVAL;
+
+ *status = IWL_MVM_QUEUE_FREE;
+ }
+
+ iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id);
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL);
@@ -2912,7 +2906,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/*
* The division below will be OK if either the cache line size
* can be divided by the entry size (ALIGN will round up) or if
- * if the entry size can be divided by the cache line size, in
+ * the entry size can be divided by the cache line size, in
* which case the ALIGN() will do nothing.
*/
BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 4a3799ae7c18..6856f7440ef3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -133,7 +133,7 @@ struct iwl_mvm_vif;
* and no TID data as this is also not needed.
* One thing to note, is that these stations have an ID in the fw, but not
* in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
- * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
+ * we fill ERR_PTR(-EINVAL) in this mapping and all other dereferencing of
* pointers from this mapping need to check that the value is not error
* or NULL.
*
@@ -507,9 +507,9 @@ void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta);
-bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_link_sta *link_sta, int *ret);
+ struct ieee80211_link_sta *link_sta);
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -665,8 +665,7 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
struct iwl_mvm_link_sta *mvm_sta_link,
- unsigned int link_id,
- bool is_in_fw);
+ unsigned int link_id);
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 65927ebbabb7..36379b738de1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -24,7 +24,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
- if (!sta || IS_ERR(sta) || !sta->tdls)
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
continue;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -47,7 +47,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
- if (!sta || IS_ERR(sta) || !sta->tdls)
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
continue;
if (vif) {
@@ -472,7 +472,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
lockdep_is_held(&mvm->mutex));
/* the station may not be here, but if it is, it must be a TDLS peer */
- if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
+ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
index 1dc57e022191..d692f1813d44 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
@@ -262,7 +262,7 @@ static const struct valid_link_pair_case {
.desc = "LB + HB, no BT.",
.chan_a = &chan_2ghz,
.chan_b = &chan_5ghz,
- .valid = false,
+ .valid = true,
},
{
.desc = "LB + HB, with BT.",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 72fa7ac86516..ebfa88b38b71 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -751,7 +751,7 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
u32 id, s8 link_id)
{
int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
- struct iwl_mvm_session_prot_cmd cmd = {
+ struct iwl_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.conf_id = cpu_to_le32(id),
@@ -955,7 +955,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
+ struct iwl_session_prot_notif *notif = (void *)pkt->data;
unsigned int ver =
iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
SESSION_PROTECTION_NOTIF, 2);
@@ -1030,6 +1030,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
mvmvif->time_event_data.link_id = -1;
+ /* set the bit so the ROC cleanup will actually clean up */
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
iwl_mvm_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
} else if (le32_to_cpu(notif->start)) {
@@ -1148,7 +1150,7 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
enum ieee80211_roc_type type)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_session_prot_cmd cmd = {
+ struct iwl_session_prot_cmd cmd = {
.id_and_color =
cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
@@ -1417,7 +1419,7 @@ static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
{
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
- struct iwl_mvm_session_prot_notif *resp;
+ struct iwl_session_prot_notif *resp;
int resp_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
@@ -1449,7 +1451,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
- struct iwl_mvm_session_prot_cmd cmd = {
+ struct iwl_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index c9867d26361b..f67afb66ef2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -13,6 +13,7 @@
#include "iwl-trans.h"
#include "iwl-nvm-utils.h"
+#include "iwl-utils.h"
#include "mvm.h"
#include "sta.h"
#include "time-sync.h"
@@ -938,78 +939,6 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
#ifdef CONFIG_INET
-static int
-iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
- netdev_features_t netdev_flags,
- struct sk_buff_head *mpdus_skb)
-{
- struct sk_buff *tmp, *next;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- char cb[sizeof(skb->cb)];
- u16 i = 0;
- unsigned int tcp_payload_len;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- bool ipv4 = (skb->protocol == htons(ETH_P_IP));
- bool qos = ieee80211_is_data_qos(hdr->frame_control);
- u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
-
- skb_shinfo(skb)->gso_size = num_subframes * mss;
- memcpy(cb, skb->cb, sizeof(cb));
-
- next = skb_gso_segment(skb, netdev_flags);
- skb_shinfo(skb)->gso_size = mss;
- skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
-
- if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
- return -ENOMEM;
-
- if (WARN_ONCE(IS_ERR(next),
- "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
- return PTR_ERR(next);
-
- if (next)
- consume_skb(skb);
-
- skb_list_walk_safe(next, tmp, next) {
- memcpy(tmp->cb, cb, sizeof(tmp->cb));
- /*
- * Compute the length of all the data added for the A-MSDU.
- * This will be used to compute the length to write in the TX
- * command. We have: SNAP + IP + TCP for n -1 subframes and
- * ETH header for n subframes.
- */
- tcp_payload_len = skb_tail_pointer(tmp) -
- skb_transport_header(tmp) -
- tcp_hdrlen(tmp) + tmp->data_len;
-
- if (ipv4)
- ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
-
- if (tcp_payload_len > mss) {
- skb_shinfo(tmp)->gso_size = mss;
- skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
- SKB_GSO_TCPV6;
- } else {
- if (qos) {
- u8 *qc;
-
- if (ipv4)
- ip_send_check(ip_hdr(tmp));
-
- qc = ieee80211_get_qos_ctl((void *)tmp->data);
- *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- }
- skb_shinfo(tmp)->gso_size = 0;
- }
-
- skb_mark_not_on_list(tmp);
- __skb_queue_tail(mpdus_skb, tmp);
- i++;
- }
-
- return 0;
-}
-
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
@@ -1028,7 +957,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
!mvmsta->amsdu_enabled)
- return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
/*
* Do not build AMSDU for IPv6 with extension headers.
@@ -1038,7 +967,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
IPPROTO_TCP) {
netdev_flags &= ~NETIF_F_CSUM_MASK;
- return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
}
tid = ieee80211_get_tid(hdr);
@@ -1052,7 +981,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
!mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
!(mvmsta->amsdu_enabled & BIT(tid)))
- return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
/*
* Take the min of ieee80211 station and mvm station
@@ -1110,8 +1039,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* Trick the segmentation function to make it
* create SKBs that can fit into one A-MSDU.
*/
- return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
- mpdus_skb);
+ return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skb);
}
#else /* CONFIG_INET */
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
@@ -1698,8 +1626,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
int txq_id = SEQ_TO_QUEUE(sequence);
/* struct iwl_tx_resp_v3 is almost the same */
struct iwl_tx_resp *tx_resp = (void *)pkt->data;
- int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
- int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
struct agg_tx_status *agg_status =
iwl_mvm_get_agg_status(mvm, tx_resp);
u32 status = le16_to_cpu(agg_status->status);
@@ -1880,7 +1808,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
IWL_DEBUG_TX_REPLY(mvm,
"Next reclaimed packet:%d\n",
next_reclaimed);
- iwl_mvm_count_mpdu(mvmsta, sta_id, 1, true, 0);
+ if (tid < IWL_MAX_TID_COUNT)
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1,
+ true, 0);
} else {
IWL_DEBUG_TX_REPLY(mvm,
"NDP - don't update next_reclaimed\n");
@@ -1989,8 +1919,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_tx_resp *tx_resp = (void *)pkt->data;
- int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
- int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
struct iwl_mvm_sta *mvmsta;
int queue = SEQ_TO_QUEUE(sequence);
@@ -2193,7 +2123,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_info.flags = IEEE80211_TX_STAT_AMPDU;
if (iwl_mvm_has_new_tx_api(mvm)) {
- struct iwl_mvm_compressed_ba_notif *ba_res =
+ struct iwl_compressed_ba_notif *ba_res =
(void *)pkt->data;
u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
u16 tfd_cnt;
@@ -2241,8 +2171,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
/* Free per TID */
for (i = 0; i < tfd_cnt; i++) {
- struct iwl_mvm_compressed_ba_tfd *ba_tfd =
- &ba_res->tfd[i];
+ struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
tid = ba_tfd->tid;
if (tid == IWL_MGMT_TID)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
index 080a1587caa5..0f7fa6032c66 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
@@ -104,9 +104,9 @@ static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
};
enum iwl_mvm_vendor_events_idx {
- /* 0x0 - 0x3 are deprecated */
- IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4,
- NUM_IWL_MVM_VENDOR_EVENT_IDX
+ /* 0x0 - 0x3 are deprecated */
+ IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4,
+ NUM_IWL_MVM_VENDOR_EVENT_IDX
};
static const struct nl80211_vendor_cmd_info
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index ae93a72542b2..838c426db7f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -106,6 +106,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
struct iwl_prph_info *prph_info;
u32 control_flags = 0;
+ u32 control_flags_ext = 0;
int ret;
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
@@ -130,6 +131,12 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
break;
}
+ if (trans->dsbr_urm_fw_dependent)
+ control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW;
+
+ if (trans->dsbr_urm_permanent)
+ control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;
+
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
&trans_pcie->prph_scratch_dma_addr,
@@ -165,6 +172,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
&control_flags);
prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+ prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext);
/* initialize the Step equalizer data */
prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 8fb2aa282242..e0b657b2f74b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -540,6 +540,9 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
{IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
{IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
+
+/* Dr devices */
+ {IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -1182,6 +1185,19 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc2f, iwl_sc2f_name),
+/* Dr */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_dr, iwl_dr_name),
+
+/* Br */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_br, iwl_br_name),
#endif /* CONFIG_IWLMVM */
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
@@ -1286,6 +1302,9 @@ static int map_crf_id(struct iwl_trans *iwl_trans)
case REG_CRF_ID_TYPE_WHP:
iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12);
break;
+ case REG_CRF_ID_TYPE_PE:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12);
+ break;
default:
ret = -EIO;
IWL_ERR(iwl_trans,
@@ -1391,6 +1410,47 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
}
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);
+static void iwl_pcie_recheck_me_status(struct work_struct *wk)
+{
+ struct iwl_trans *trans = container_of(wk, typeof(*trans),
+ me_recheck_wk.work);
+ u32 val;
+
+ val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);
+ trans->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP);
+}
+
+static void iwl_pcie_check_me_status(struct iwl_trans *trans)
+{
+ u32 val;
+
+ trans->me_present = -1;
+
+ INIT_DELAYED_WORK(&trans->me_recheck_wk,
+ iwl_pcie_recheck_me_status);
+
+ /* we don't have a good way of determining this until BZ */
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ return;
+
+ val = iwl_read_prph(trans, CNVI_SCU_REG_FOR_ECO_1);
+ if (val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN) {
+ trans->me_present =
+ !!(val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT);
+ return;
+ }
+
+ val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);
+ if (val & (CSR_HW_IF_CONFIG_REG_ME_OWN |
+ CSR_HW_IF_CONFIG_REG_IAMT_UP)) {
+ trans->me_present = 1;
+ return;
+ }
+
+ /* recheck again later, ME might still be initializing */
+ schedule_delayed_work(&trans->me_recheck_wk, HZ);
+}
+
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg_trans_params *trans;
@@ -1420,6 +1480,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
+ iwl_trans_pcie_check_product_reset_status(pdev);
+ iwl_trans_pcie_check_product_reset_mode(pdev);
+
/*
* Let's try to grab NIC access early here. Sometimes, NICs may
* fail to initialize, and if that happens it's better if we see
@@ -1566,6 +1629,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, iwl_trans);
+ iwl_pcie_check_me_status(iwl_trans);
+
/* try to get ownership so that we'll know if we don't own it */
iwl_pcie_prepare_card_hw(iwl_trans);
@@ -1593,6 +1658,8 @@ static void iwl_pci_remove(struct pci_dev *pdev)
if (!trans)
return;
+ cancel_delayed_work_sync(&trans->me_recheck_wk);
+
iwl_drv_stop(trans->drv);
iwl_trans_pcie_free(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 27a7e0b5b3d5..45460f93d24a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -563,6 +563,9 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
__cond_lock(nic_access_nobh, \
likely(__iwl_trans_pcie_grab_nic_access(trans)))
+void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev);
+void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);
+
/*****************************************************
* RX
******************************************************/
@@ -643,7 +646,8 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
unsigned int len);
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room);
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset);
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta);
@@ -1134,9 +1138,6 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
-void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
-void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
- bool test, bool reset);
int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index afb88eab8174..4a442d03d8d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1702,7 +1702,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
/* The STATUS_FW_ERROR bit is set in this function. This must happen
* before we wake up the command caller, to ensure a proper cleanup. */
- iwl_trans_fw_error(trans, false);
+ iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans->wait_command_queue);
@@ -2297,7 +2297,9 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {
IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
inta_hw);
- /* TODO: PLDR flow required here for >= Bz */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_trans_pcie_reset(trans,
+ IWL_RESET_MODE_PROD_RESET);
}
/* Error detected by uCode */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 8903a5692dfb..793514a1852a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -43,7 +43,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
* wake device's PCI Express link L1a -> L0s
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+ CSR_HW_IF_CONFIG_REG_HAP_WAKE);
iwl_pcie_apm_config(trans);
@@ -68,8 +68,8 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE |
- CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ CSR_HW_IF_CONFIG_REG_WAKE_ME |
+ CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
mdelay(1);
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@@ -123,14 +123,21 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
"timeout waiting for FW reset ACK (inta_hw=0x%x)\n",
inta_hw);
- if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE))
- iwl_trans_fw_error(trans, true);
+ if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)) {
+ struct iwl_fw_error_dump_mode mode = {
+ .type = IWL_ERR_TYPE_RESET_HS_TIMEOUT,
+ .context = IWL_ERR_CONTEXT_FROM_OPMODE,
+ };
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_RESET_HS_TIMEOUT);
+ iwl_op_mode_dump_error(trans->op_mode, &mode);
+ }
}
trans_pcie->fw_reset_state = FW_RESET_IDLE;
}
-void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -139,9 +146,9 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
if (trans_pcie->is_down)
return;
- if (trans->state >= IWL_TRANS_FW_STARTED)
- if (trans_pcie->fw_reset_handshake)
- iwl_trans_pcie_fw_reset_handshake(trans);
+ if (trans->state >= IWL_TRANS_FW_STARTED &&
+ trans_pcie->fw_reset_handshake)
+ iwl_trans_pcie_fw_reset_handshake(trans);
trans_pcie->is_down = true;
@@ -287,9 +294,6 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
pos = scnprintf(buf, buflen, "HRCDB");
break;
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_MS):
- pos = scnprintf(buf, buflen, "MS");
- break;
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_FM):
pos = scnprintf(buf, buflen, "FM");
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 86f1d87a909c..c917ed4c19bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -24,6 +24,7 @@
#include "fw/error-dump.h"
#include "fw/dbg.h"
#include "fw/api/tx.h"
+#include "fw/acpi.h"
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -311,7 +312,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
* wake device's PCI Express link L1a -> L0s
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+ CSR_HW_IF_CONFIG_REG_HAP_WAKE);
iwl_pcie_apm_config(trans);
@@ -439,7 +440,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* SHRD_HW_RST is applied in S3.
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+ CSR_HW_IF_CONFIG_REG_PERSISTENCE);
/*
* Clear "initialization complete" bit to move adapter from
@@ -508,8 +509,8 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE |
- CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ CSR_HW_IF_CONFIG_REG_WAKE_ME |
+ CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
mdelay(1);
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@@ -581,12 +582,12 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
int ret;
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+ CSR_HW_IF_CONFIG_REG_PCI_OWN_SET);
/* See if we got it */
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,
+ CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,
HW_READY_TIMEOUT);
if (ret >= 0)
@@ -620,7 +621,7 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
+ CSR_HW_IF_CONFIG_REG_WAKE_ME);
do {
ret = iwl_pcie_set_hw_ready(trans);
@@ -1488,8 +1489,8 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
_iwl_trans_pcie_stop_device(trans, from_irq);
}
-void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
- bool test, bool reset)
+static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
+ bool test, bool reset)
{
iwl_disable_interrupts(trans);
@@ -1566,7 +1567,7 @@ int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
if (!reset)
/* Enable persistence mode to avoid reset */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+ CSR_HW_IF_CONFIG_REG_PERSISTENCE);
ret = iwl_pcie_d3_handshake(trans, true);
if (ret)
@@ -2105,10 +2106,157 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_trans_free(trans);
}
+static union acpi_object *
+iwl_trans_pcie_call_prod_reset_dsm(struct pci_dev *pdev, u16 cmd, u16 value)
+{
+#ifdef CONFIG_ACPI
+ struct iwl_dsm_internal_product_reset_cmd pldr_arg = {
+ .cmd = cmd,
+ .value = value,
+ };
+ union acpi_object arg = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(pldr_arg),
+ .buffer.pointer = (void *)&pldr_arg,
+ };
+ static const guid_t dsm_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
+ 0x81, 0x4F, 0x75, 0xE4,
+ 0xDD, 0x26, 0xB5, 0xFD);
+
+ if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &dsm_guid, ACPI_DSM_REV,
+ DSM_INTERNAL_FUNC_PRODUCT_RESET))
+ return ERR_PTR(-ENODEV);
+
+ return iwl_acpi_get_dsm_object(&pdev->dev, ACPI_DSM_REV,
+ DSM_INTERNAL_FUNC_PRODUCT_RESET,
+ &arg, &dsm_guid);
+#else
+ return ERR_PTR(-EOPNOTSUPP);
+#endif
+}
+
+void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev)
+{
+ union acpi_object *res;
+
+ res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
+ DSM_INTERNAL_PLDR_CMD_GET_MODE,
+ 0);
+ if (IS_ERR(res))
+ return;
+
+ if (res->type != ACPI_TYPE_INTEGER)
+ IWL_ERR_DEV(&pdev->dev,
+ "unexpected return type from product reset DSM\n");
+ else
+ IWL_DEBUG_DEV_POWER(&pdev->dev,
+ "product reset mode is 0x%llx\n",
+ res->integer.value);
+
+ ACPI_FREE(res);
+}
+
+static void iwl_trans_pcie_set_product_reset(struct pci_dev *pdev, bool enable,
+ bool integrated)
+{
+ union acpi_object *res;
+ u16 mode = enable ? DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET : 0;
+
+ if (!integrated)
+ mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR |
+ DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON;
+
+ res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
+ DSM_INTERNAL_PLDR_CMD_SET_MODE,
+ mode);
+ if (IS_ERR(res)) {
+ if (enable)
+ IWL_ERR_DEV(&pdev->dev,
+ "ACPI _DSM not available (%d), cannot do product reset\n",
+ (int)PTR_ERR(res));
+ return;
+ }
+
+ ACPI_FREE(res);
+ IWL_DEBUG_DEV_POWER(&pdev->dev, "%sabled product reset via DSM\n",
+ enable ? "En" : "Dis");
+ iwl_trans_pcie_check_product_reset_mode(pdev);
+}
+
+void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev)
+{
+ union acpi_object *res;
+
+ res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
+ DSM_INTERNAL_PLDR_CMD_GET_STATUS,
+ 0);
+ if (IS_ERR(res))
+ return;
+
+ if (res->type != ACPI_TYPE_INTEGER)
+ IWL_ERR_DEV(&pdev->dev,
+ "unexpected return type from product reset DSM\n");
+ else
+ IWL_DEBUG_DEV_POWER(&pdev->dev,
+ "product reset status is 0x%llx\n",
+ res->integer.value);
+
+ ACPI_FREE(res);
+}
+
+static void iwl_trans_pcie_call_reset(struct pci_dev *pdev)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *p, *ref;
+ acpi_status status;
+ int ret = -EINVAL;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev),
+ "_PRR", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n");
+ goto out;
+ }
+ p = buffer.pointer;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 1) {
+ pci_err(pdev, "Bad _PRR return type\n");
+ goto out;
+ }
+
+ ref = &p->package.elements[0];
+ if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) {
+ pci_err(pdev, "_PRR wasn't a reference\n");
+ goto out;
+ }
+
+ status = acpi_evaluate_object(ref->reference.handle,
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ pci_err(pdev,
+ "Failed to call _RST on object returned by _PRR (%d)\n",
+ status);
+ goto out;
+ }
+ ret = 0;
+out:
+ kfree(buffer.pointer);
+ if (!ret) {
+ IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n");
+ return;
+ }
+ IWL_DEBUG_DEV_POWER(&pdev->dev,
+ "No BIOS support, using pci_reset_function()\n");
+#endif
+ pci_reset_function(pdev);
+}
+
struct iwl_trans_pcie_removal {
struct pci_dev *pdev;
struct work_struct work;
- bool rescan;
+ enum iwl_reset_mode mode;
+ bool integrated;
};
static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
@@ -2126,14 +2274,66 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
if (!bus)
goto out;
- dev_err(&pdev->dev, "Device gone - attempting removal\n");
-
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
+ if (removal->mode == IWL_RESET_MODE_PROD_RESET) {
+ struct pci_dev *bt = NULL;
+
+ if (!removal->integrated) {
+ /* discrete devices have WiFi/BT at function 0/1 */
+ int slot = PCI_SLOT(pdev->devfn);
+ int func = PCI_FUNC(pdev->devfn);
+
+ if (func == 0)
+ bt = pci_get_slot(bus, PCI_DEVFN(slot, 1));
+ else
+ pci_info(pdev, "Unexpected function %d\n",
+ func);
+ } else {
+ /* on integrated we have to look up by ID (same bus) */
+ static const struct pci_device_id bt_device_ids[] = {
+#define BT_DEV(_id) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, _id) }
+ BT_DEV(0xA876), /* LNL */
+ BT_DEV(0xE476), /* PTL-P */
+ BT_DEV(0xE376), /* PTL-H */
+ BT_DEV(0xD346), /* NVL-H */
+ BT_DEV(0x6E74), /* NVL-S */
+ BT_DEV(0x4D76), /* WCL */
+ BT_DEV(0xD246), /* RZL-H */
+ BT_DEV(0x6C46), /* RZL-M */
+ {}
+ };
+ struct pci_dev *tmp = NULL;
+
+ for_each_pci_dev(tmp) {
+ if (tmp->bus != bus)
+ continue;
+
+ if (pci_match_id(bt_device_ids, tmp)) {
+ bt = tmp;
+ break;
+ }
+ }
+ }
+
+ if (bt) {
+ pci_info(bt, "Removal by WiFi due to product reset\n");
+ pci_stop_and_remove_bus_device(bt);
+ pci_dev_put(bt);
+ }
+ }
+
+ iwl_trans_pcie_set_product_reset(pdev,
+ removal->mode ==
+ IWL_RESET_MODE_PROD_RESET,
+ removal->integrated);
+ if (removal->mode >= IWL_RESET_MODE_FUNC_RESET)
+ iwl_trans_pcie_call_reset(pdev);
+
pci_stop_and_remove_bus_device(pdev);
pci_dev_put(pdev);
- if (removal->rescan) {
+ if (removal->mode >= IWL_RESET_MODE_RESCAN) {
if (bus->parent)
bus = bus->parent;
pci_rescan_bus(bus);
@@ -2146,14 +2346,27 @@ out:
module_put(THIS_MODULE);
}
-void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
+void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
{
struct iwl_trans_pcie_removal *removal;
+ char _msg = 0, *msg = &_msg;
+
+ if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY))
+ return;
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return;
- IWL_ERR(trans, "Device gone - scheduling removal!\n");
+ if (trans->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
+ mode = IWL_RESET_MODE_FUNC_RESET;
+ if (trans->me_present < 0)
+ msg = " instead of product reset as ME may be present";
+ else
+ msg = " instead of product reset as ME is present";
+ }
+
+ IWL_INFO(trans, "scheduling reset (mode=%d%s)\n", mode, msg);
+
iwl_pcie_dump_csr(trans);
/*
@@ -2180,12 +2393,13 @@ void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
set_bit(STATUS_TRANS_DEAD, &trans->status);
removal->pdev = to_pci_dev(trans->dev);
- removal->rescan = rescan;
+ removal->mode = mode;
+ removal->integrated = trans->trans_cfg->integrated;
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
pci_dev_get(removal->pdev);
schedule_work(&removal->work);
}
-EXPORT_SYMBOL(iwl_trans_pcie_remove);
+EXPORT_SYMBOL(iwl_trans_pcie_reset);
/*
* This version doesn't disable BHs but rather assumes they're
@@ -2250,7 +2464,8 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
iwl_trans_pcie_dump_regs(trans);
if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
- iwl_trans_pcie_remove(trans, false);
+ iwl_trans_pcie_reset(trans,
+ IWL_RESET_MODE_REMOVE_ONLY);
else
iwl_write32(trans, CSR_RESET,
CSR_RESET_REG_FLAG_FORCE_NMI);
@@ -3037,12 +3252,47 @@ static ssize_t iwl_dbgfs_rf_read(struct file *file,
strlen(trans_pcie->rf_name));
}
+static ssize_t iwl_dbgfs_reset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ static const char * const modes[] = {
+ [IWL_RESET_MODE_SW_RESET] = "n/a",
+ [IWL_RESET_MODE_REPROBE] = "n/a",
+ [IWL_RESET_MODE_REMOVE_ONLY] = "remove",
+ [IWL_RESET_MODE_RESCAN] = "rescan",
+ [IWL_RESET_MODE_FUNC_RESET] = "function",
+ [IWL_RESET_MODE_PROD_RESET] = "product",
+ };
+ char buf[10] = {};
+ int mode;
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ mode = sysfs_match_string(modes, buf);
+ if (mode < 0)
+ return mode;
+
+ if (mode < IWL_RESET_MODE_REMOVE_ONLY)
+ return -EINVAL;
+
+ iwl_trans_pcie_reset(trans, mode);
+
+ return count;
+}
+
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
DEBUGFS_READ_FILE_OPS(rf);
+DEBUGFS_WRITE_FILE_OPS(reset);
static const struct file_operations iwl_dbgfs_tx_queue_ops = {
.owner = THIS_MODULE,
@@ -3071,6 +3321,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
DEBUGFS_ADD_FILE(rf, dir, 0400);
+ DEBUGFS_ADD_FILE(reset, dir, 0200);
}
void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index b1846abb99b7..401919f9fe88 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2023-2024 Intel Corporation
+ * Copyright (C) 2018-2020, 2023-2025 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -188,7 +188,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len);
if (!sgt)
return -ENOMEM;
@@ -347,6 +348,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
return tfd;
out_err:
+ iwl_pcie_free_tso_pages(trans, skb, out_meta);
iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
return NULL;
}
@@ -1298,7 +1300,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
- iwl_op_mode_cmd_queue_full(trans->op_mode);
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_CMD_QUEUE_FULL);
+ iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
idx = -ENOSPC;
goto free_dup_buf;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 1ef14340953c..7c1dd5cc084a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1449,7 +1449,9 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
- iwl_op_mode_cmd_queue_full(trans->op_mode);
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_CMD_QUEUE_FULL);
+ iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
idx = -ENOSPC;
goto free_dup_buf;
}
@@ -1853,6 +1855,7 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
* @cmd_meta: command meta to store the scatter list information for unmapping
* @hdr: output argument for TSO headers
* @hdr_room: requested length for TSO headers
+ * @offset: offset into the data from which mapping should start
*
* Allocate space for a scatter gather list and TSO headers and map the SKB
* using the scatter gather list. The SKB is unmapped again when the page is
@@ -1862,9 +1865,12 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
*/
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room)
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset)
{
struct sg_table *sgt;
+ unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1;
+ int orig_nents;
if (WARN_ON_ONCE(skb_has_frag_list(skb)))
return NULL;
@@ -1872,8 +1878,7 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
*hdr = iwl_pcie_get_page_hdr(trans,
hdr_room + __alignof__(struct sg_table) +
sizeof(struct sg_table) +
- (skb_shinfo(skb)->nr_frags + 1) *
- sizeof(struct scatterlist),
+ n_segments * sizeof(struct scatterlist),
skb);
if (!*hdr)
return NULL;
@@ -1881,14 +1886,15 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
sgt->sgl = (void *)(sgt + 1);
- sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
+ sg_init_table(sgt->sgl, n_segments);
/* Only map the data, not the header (it is copied to the TSO page) */
- sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
- skb->data_len);
- if (WARN_ON_ONCE(sgt->orig_nents <= 0))
+ orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset);
+ if (WARN_ON_ONCE(orig_nents <= 0))
return NULL;
+ sgt->orig_nents = orig_nents;
+
/* And map the entire SKB */
if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
return NULL;
@@ -1937,7 +1943,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len + iv_len);
if (!sgt)
return -ENOMEM;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index fca3eea7ee84..a099fdaafa45 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -410,7 +410,7 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
static int
mwifiex_cfg80211_get_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv = mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index e06a0622973e..f79589cafe57 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -545,7 +545,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
adapter->hs_activate_wait_q_woken,
- (10 * HZ)) <= 0) {
+ (5 * HZ)) <= 0) {
mwifiex_dbg(adapter, ERROR,
"hs_activate_wait_q terminated\n");
return false;
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index f7f2d9a8ab0f..87512d101a91 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_MT792x_USB) += mt792x-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o wed.o
+ tx.o agg-rx.o mcu.o wed.o scan.o channel.o
mt76-$(CONFIG_PCI) += pci.o
mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c
new file mode 100644
index 000000000000..6a35c6ebd823
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/channel.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2024 Felix Fietkau <nbd@nbd.name>
+ */
+#include "mt76.h"
+
+static struct mt76_vif_link *
+mt76_alloc_mlink(struct mt76_dev *dev, struct mt76_vif_data *mvif)
+{
+ struct mt76_vif_link *mlink;
+
+ mlink = kzalloc(dev->drv->link_data_size, GFP_KERNEL);
+ if (!mlink)
+ return NULL;
+
+ mlink->mvif = mvif;
+
+ return mlink;
+}
+
+static int
+mt76_phy_update_channel(struct mt76_phy *phy,
+ struct ieee80211_chanctx_conf *conf)
+{
+ phy->radar_enabled = conf->radar_enabled;
+ phy->main_chandef = conf->def;
+ phy->chanctx = (struct mt76_chanctx *)conf->drv_priv;
+
+ return __mt76_set_channel(phy, &phy->main_chandef, false);
+}
+
+int mt76_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ int ret = -EINVAL;
+
+ phy = ctx->phy = dev->band_phys[conf->def.chan->band];
+ if (WARN_ON_ONCE(!phy))
+ return ret;
+
+ if (dev->scan.phy == phy)
+ mt76_abort_scan(dev);
+
+ mutex_lock(&dev->mutex);
+ if (!phy->chanctx)
+ ret = mt76_phy_update_channel(phy, conf);
+ else
+ ret = 0;
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_add_chanctx);
+
+void mt76_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+
+ phy = ctx->phy;
+ if (WARN_ON_ONCE(!phy))
+ return;
+
+ if (dev->scan.phy == phy)
+ mt76_abort_scan(dev);
+
+ mutex_lock(&dev->mutex);
+ if (phy->chanctx == ctx)
+ phy->chanctx = NULL;
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_remove_chanctx);
+
+void mt76_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ u32 changed)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_phy *phy = ctx->phy;
+ struct mt76_dev *dev = phy->dev;
+
+ if (!(changed & (IEEE80211_CHANCTX_CHANGE_WIDTH |
+ IEEE80211_CHANCTX_CHANGE_RADAR)))
+ return;
+
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mutex);
+ mt76_phy_update_channel(phy, conf);
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_change_chanctx);
+
+
+int mt76_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ int link_id = link_conf->link_id;
+ struct mt76_phy *phy = ctx->phy;
+ struct mt76_dev *dev = phy->dev;
+ bool mlink_alloc = false;
+ int ret = 0;
+
+ if (dev->scan.vif == vif)
+ mt76_abort_scan(dev);
+
+ mutex_lock(&dev->mutex);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR &&
+ is_zero_ether_addr(vif->addr))
+ goto out;
+
+ mlink = mt76_vif_conf_link(dev, vif, link_conf);
+ if (!mlink) {
+ mlink = mt76_alloc_mlink(dev, mvif);
+ if (!mlink) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ mlink_alloc = true;
+ }
+
+ mlink->ctx = conf;
+ ret = dev->drv->vif_link_add(phy, vif, link_conf, mlink);
+ if (ret) {
+ if (mlink_alloc)
+ kfree(mlink);
+ goto out;
+ }
+
+ if (link_conf != &vif->bss_conf)
+ rcu_assign_pointer(mvif->link[link_id], mlink);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_assign_vif_chanctx);
+
+void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ int link_id = link_conf->link_id;
+ struct mt76_phy *phy = ctx->phy;
+ struct mt76_dev *dev = phy->dev;
+
+ if (dev->scan.vif == vif)
+ mt76_abort_scan(dev);
+
+ mutex_lock(&dev->mutex);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR &&
+ is_zero_ether_addr(vif->addr))
+ goto out;
+
+ mlink = mt76_vif_conf_link(dev, vif, link_conf);
+ if (!mlink)
+ goto out;
+
+ if (link_conf != &vif->bss_conf)
+ rcu_assign_pointer(mvif->link[link_id], NULL);
+
+ dev->drv->vif_link_remove(phy, vif, link_conf, mlink);
+ mlink->ctx = NULL;
+
+ if (link_conf != &vif->bss_conf)
+ kfree_rcu(mlink, rcu_head);
+
+out:
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_unassign_vif_chanctx);
+
+int mt76_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct mt76_chanctx *old_ctx = (struct mt76_chanctx *)vifs->old_ctx->drv_priv;
+ struct mt76_chanctx *new_ctx = (struct mt76_chanctx *)vifs->new_ctx->drv_priv;
+ struct ieee80211_chanctx_conf *conf = vifs->new_ctx;
+ struct mt76_phy *old_phy = old_ctx->phy;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_vif_link *mlink;
+ bool update_chan;
+ int i, ret = 0;
+
+ if (mode == CHANCTX_SWMODE_SWAP_CONTEXTS)
+ phy = new_ctx->phy = dev->band_phys[conf->def.chan->band];
+ else
+ phy = new_ctx->phy;
+ if (!phy)
+ return -EINVAL;
+
+ update_chan = phy->chanctx != new_ctx;
+ if (update_chan) {
+ if (dev->scan.phy == phy)
+ mt76_abort_scan(dev);
+
+ cancel_delayed_work_sync(&phy->mac_work);
+ }
+
+ mutex_lock(&dev->mutex);
+
+ if (mode == CHANCTX_SWMODE_SWAP_CONTEXTS &&
+ phy != old_phy && old_phy->chanctx == old_ctx)
+ old_phy->chanctx = NULL;
+
+ if (update_chan)
+ ret = mt76_phy_update_channel(phy, vifs->new_ctx);
+
+ if (ret)
+ goto out;
+
+ if (old_phy == phy)
+ goto skip_link_replace;
+
+ for (i = 0; i < n_vifs; i++) {
+ mlink = mt76_vif_conf_link(dev, vifs[i].vif, vifs[i].link_conf);
+ if (!mlink)
+ continue;
+
+ dev->drv->vif_link_remove(old_phy, vifs[i].vif,
+ vifs[i].link_conf, mlink);
+
+ ret = dev->drv->vif_link_add(phy, vifs[i].vif,
+ vifs[i].link_conf, mlink);
+ if (ret)
+ goto out;
+
+ }
+
+skip_link_replace:
+ for (i = 0; i < n_vifs; i++) {
+ mlink = mt76_vif_conf_link(dev, vifs[i].vif, vifs[i].link_conf);
+ if (!mlink)
+ continue;
+
+ mlink->ctx = vifs->new_ctx;
+ }
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_switch_vif_chanctx);
+
+struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt76_dev *dev = phy->dev;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(mvif->link); i++) {
+ mlink = mt76_dereference(mvif->link[i], dev);
+ if (!mlink)
+ continue;
+
+ if (mt76_vif_link_phy(mlink) == phy)
+ return mlink;
+ }
+
+ if (!dev->drv->vif_link_add)
+ return ERR_PTR(-EINVAL);
+
+ mlink = mt76_alloc_mlink(dev, mvif);
+ if (!mlink)
+ return ERR_PTR(-ENOMEM);
+
+ mlink->offchannel = true;
+ ret = dev->drv->vif_link_add(phy, vif, &vif->bss_conf, mlink);
+ if (ret) {
+ kfree(mlink);
+ return ERR_PTR(ret);
+ }
+
+ return mlink;
+}
+
+void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct mt76_vif_link *mlink)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ if (IS_ERR_OR_NULL(mlink) || !mlink->offchannel)
+ return;
+
+ dev->drv->vif_link_remove(phy, vif, &vif->bss_conf, mlink);
+ kfree(mlink);
+}
+
+static void mt76_roc_complete(struct mt76_phy *phy)
+{
+ struct mt76_vif_link *mlink = phy->roc_link;
+
+ if (!phy->roc_vif)
+ return;
+
+ if (mlink)
+ mlink->mvif->roc_phy = NULL;
+ if (phy->main_chandef.chan)
+ mt76_set_channel(phy, &phy->main_chandef, false);
+ mt76_put_vif_phy_link(phy, phy->roc_vif, phy->roc_link);
+ phy->roc_vif = NULL;
+ phy->roc_link = NULL;
+ ieee80211_remain_on_channel_expired(phy->hw);
+}
+
+void mt76_roc_complete_work(struct work_struct *work)
+{
+ struct mt76_phy *phy = container_of(work, struct mt76_phy, roc_work.work);
+ struct mt76_dev *dev = phy->dev;
+
+ mutex_lock(&dev->mutex);
+ mt76_roc_complete(phy);
+ mutex_unlock(&dev->mutex);
+}
+
+void mt76_abort_roc(struct mt76_phy *phy)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ cancel_delayed_work_sync(&phy->roc_work);
+
+ mutex_lock(&dev->mutex);
+ mt76_roc_complete(phy);
+ mutex_unlock(&dev->mutex);
+}
+
+int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration,
+ enum ieee80211_roc_type type)
+{
+ struct cfg80211_chan_def chandef = {};
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_vif_link *mlink;
+ int ret = 0;
+
+ phy = dev->band_phys[chan->band];
+ if (!phy)
+ return -EINVAL;
+
+ mutex_lock(&dev->mutex);
+
+ if (phy->roc_vif || dev->scan.phy == phy) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mlink = mt76_get_vif_phy_link(phy, vif);
+ if (IS_ERR(mlink)) {
+ ret = PTR_ERR(mlink);
+ goto out;
+ }
+
+ mlink->mvif->roc_phy = phy;
+ phy->roc_vif = vif;
+ phy->roc_link = mlink;
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
+ mt76_set_channel(phy, &chandef, true);
+ ieee80211_ready_on_channel(hw);
+ ieee80211_queue_delayed_work(phy->hw, &phy->roc_work,
+ msecs_to_jiffies(duration));
+
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_remain_on_channel);
+
+int mt76_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt76_phy *phy = mvif->roc_phy;
+
+ if (!phy)
+ return 0;
+
+ mt76_abort_roc(phy);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_cancel_remain_on_channel);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 5f46d6daeaa7..844af16ee551 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -631,7 +631,8 @@ free_skb:
return ret;
}
-int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+static int
+mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct)
{
int len = SKB_WITH_OVERHEAD(q->buf_size);
@@ -640,8 +641,6 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
if (!q->ndesc)
return 0;
- spin_lock_bh(&q->lock);
-
while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf = {};
enum dma_data_direction dir;
@@ -674,6 +673,19 @@ done:
if (frames || mt76_queue_is_wed_rx(q))
mt76_dma_kick_queue(dev, q);
+ return frames;
+}
+
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct)
+{
+ int frames;
+
+ if (!q->ndesc)
+ return 0;
+
+ spin_lock_bh(&q->lock);
+ frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
spin_unlock_bh(&q->lock);
return frames;
@@ -796,7 +808,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
return;
mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill(dev, q, false);
+ mt76_dma_rx_fill_buf(dev, q, false);
}
static void
@@ -969,7 +981,7 @@ mt76_dma_init(struct mt76_dev *dev,
mt76_for_each_q_rx(dev, i) {
netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+ mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
napi_enable(&dev->napi[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 9d5561f44134..508b472408c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -411,13 +411,16 @@ mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
}
if (found) {
- phy->chandef.chan = &sband->channels[0];
+ cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
+ NL80211_CHAN_HT20);
phy->chan_state = &msband->chan[0];
+ phy->dev->band_phys[band] = phy;
return;
}
sband->n_channels = 0;
- phy->hw->wiphy->bands[band] = NULL;
+ if (phy->hw->wiphy->bands[band] == sband)
+ phy->hw->wiphy->bands[band] = NULL;
}
static int
@@ -428,6 +431,10 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
INIT_LIST_HEAD(&phy->tx_list);
spin_lock_init(&phy->tx_lock);
+ INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
+
+ if ((void *)phy != hw->priv)
+ return 0;
SET_IEEE80211_DEV(hw, dev->dev);
SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
@@ -481,6 +488,28 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
}
struct mt76_phy *
+mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
+ u8 band_idx)
+{
+ struct ieee80211_hw *hw = dev->phy.hw;
+ unsigned int phy_size;
+ struct mt76_phy *phy;
+
+ phy_size = ALIGN(sizeof(*phy), 8);
+ phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ phy->dev = dev;
+ phy->hw = hw;
+ phy->priv = (void *)phy + phy_size;
+ phy->band_idx = band_idx;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
+
+struct mt76_phy *
mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
const struct ieee80211_ops *ops, u8 band_idx)
{
@@ -552,9 +581,11 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
- ret = ieee80211_register_hw(phy->hw);
- if (ret)
- return ret;
+ if ((void *)phy == phy->hw->priv) {
+ ret = ieee80211_register_hw(phy->hw);
+ if (ret)
+ return ret;
+ }
set_bit(MT76_STATE_REGISTERED, &phy->state);
phy->dev->phys[phy->band_idx] = phy;
@@ -690,6 +721,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
INIT_LIST_HEAD(&dev->txwi_cache);
INIT_LIST_HEAD(&dev->rxwi_cache);
dev->token_size = dev->drv->token_size;
+ INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
@@ -712,7 +744,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
int ret;
dev_set_drvdata(dev->dev, dev);
- mt76_wcid_init(&dev->global_wcid);
+ mt76_wcid_init(&dev->global_wcid, phy->band_idx);
ret = mt76_phy_init(phy, hw);
if (ret)
return ret;
@@ -784,6 +816,22 @@ void mt76_free_device(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_free_device);
+static struct mt76_phy *
+mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_chanctx *ctx;
+
+ if (!hw->wiphy->n_radio)
+ return hw->priv;
+
+ if (!mlink->ctx)
+ return NULL;
+
+ ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
+ return ctx->phy;
+}
+
static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
{
struct sk_buff *skb = phy->rx_amsdu[q].head;
@@ -929,16 +977,13 @@ void mt76_update_survey(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_update_survey);
-int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
- bool offchannel)
+int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel)
{
struct mt76_dev *dev = phy->dev;
int timeout = HZ / 5;
int ret;
- cancel_delayed_work_sync(&phy->mac_work);
-
- mutex_lock(&dev->mutex);
set_bit(MT76_RESET, &phy->state);
mt76_worker_disable(&dev->tx_worker);
@@ -954,17 +999,30 @@ int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
phy->offchannel = offchannel;
if (!offchannel)
- phy->main_chan = chandef->chan;
+ phy->main_chandef = *chandef;
- if (chandef->chan != phy->main_chan)
+ if (chandef->chan != phy->main_chandef.chan)
memset(phy->chan_state, 0, sizeof(*phy->chan_state));
- mt76_worker_enable(&dev->tx_worker);
ret = dev->drv->set_channel(phy);
clear_bit(MT76_RESET, &phy->state);
+ mt76_worker_enable(&dev->tx_worker);
mt76_worker_schedule(&dev->tx_worker);
+ return ret;
+}
+
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel)
+{
+ struct mt76_dev *dev = phy->dev;
+ int ret;
+
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mutex);
+ ret = __mt76_set_channel(phy, chandef, offchannel);
mutex_unlock(&dev->mutex);
return ret;
@@ -976,37 +1034,59 @@ int mt76_update_channel(struct mt76_phy *phy)
struct cfg80211_chan_def *chandef = &hw->conf.chandef;
bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ phy->radar_enabled = hw->conf.radar_enabled;
+
return mt76_set_channel(phy, chandef, offchannel);
}
EXPORT_SYMBOL_GPL(mt76_update_channel);
+static struct mt76_sband *
+mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
+{
+ if (*idx < phy->sband_2g.sband.n_channels)
+ return &phy->sband_2g;
+
+ *idx -= phy->sband_2g.sband.n_channels;
+ if (*idx < phy->sband_5g.sband.n_channels)
+ return &phy->sband_5g;
+
+ *idx -= phy->sband_5g.sband.n_channels;
+ if (*idx < phy->sband_6g.sband.n_channels)
+ return &phy->sband_6g;
+
+ *idx -= phy->sband_6g.sband.n_channels;
+ return NULL;
+}
+
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
- struct mt76_sband *sband;
+ struct mt76_sband *sband = NULL;
struct ieee80211_channel *chan;
struct mt76_channel_state *state;
+ int phy_idx = 0;
int ret = 0;
mutex_lock(&dev->mutex);
- if (idx == 0 && dev->drv->update_survey)
- mt76_update_survey(phy);
-
- if (idx >= phy->sband_2g.sband.n_channels +
- phy->sband_5g.sband.n_channels) {
- idx -= (phy->sband_2g.sband.n_channels +
- phy->sband_5g.sband.n_channels);
- sband = &phy->sband_6g;
- } else if (idx >= phy->sband_2g.sband.n_channels) {
- idx -= phy->sband_2g.sband.n_channels;
- sband = &phy->sband_5g;
- } else {
- sband = &phy->sband_2g;
+
+ for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
+ sband = NULL;
+ phy = dev->phys[phy_idx];
+ if (!phy || phy->hw != hw)
+ continue;
+
+ sband = mt76_get_survey_sband(phy, &idx);
+
+ if (idx == 0 && phy->dev->drv->update_survey)
+ mt76_update_survey(phy);
+
+ if (sband || !hw->wiphy->n_radio)
+ break;
}
- if (idx >= sband->sband.n_channels) {
+ if (!sband) {
ret = -ENOENT;
goto out;
}
@@ -1021,7 +1101,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
if (state->noise)
survey->filled |= SURVEY_INFO_NOISE_DBM;
- if (chan == phy->main_chan) {
+ if (chan == phy->main_chandef.chan) {
survey->filled |= SURVEY_INFO_IN_USE;
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
@@ -1462,21 +1542,20 @@ mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
ewma_signal_init(&wcid->rssi);
- if (phy->band_idx == MT_BAND1)
- mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
- wcid->phy_idx = phy->band_idx;
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
+ phy->num_sta++;
- mt76_wcid_init(wcid);
+ mt76_wcid_init(wcid, phy->band_idx);
out:
mutex_unlock(&dev->mutex);
return ret;
}
-void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int i, idx = wcid->idx;
@@ -1489,16 +1568,18 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
mt76_wcid_cleanup(dev, wcid);
mt76_wcid_mask_clear(dev->wcid_mask, idx);
- mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
+ phy->num_sta--;
}
EXPORT_SYMBOL_GPL(__mt76_sta_remove);
static void
-mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct mt76_dev *dev = phy->dev;
+
mutex_lock(&dev->mutex);
- __mt76_sta_remove(dev, vif, sta);
+ __mt76_sta_remove(phy, vif, sta);
mutex_unlock(&dev->mutex);
}
@@ -1511,13 +1592,17 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_dev *dev = phy->dev;
enum mt76_sta_event ev;
+ phy = mt76_vif_phy(hw, vif);
+ if (!phy)
+ return -EINVAL;
+
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
return mt76_sta_add(phy, vif, sta);
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
- mt76_sta_remove(dev, vif, sta);
+ mt76_sta_remove(phy, vif, sta);
if (!dev->drv->sta_event)
return 0;
@@ -1553,14 +1638,19 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
-void mt76_wcid_init(struct mt76_wcid *wcid)
+void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
{
+ wcid->hw_key_idx = -1;
+ wcid->phy_idx = band_idx;
+
INIT_LIST_HEAD(&wcid->tx_list);
skb_queue_head_init(&wcid->tx_pending);
skb_queue_head_init(&wcid->tx_offchannel);
INIT_LIST_HEAD(&wcid->list);
idr_init(&wcid->pktid);
+
+ INIT_LIST_HEAD(&wcid->poll_list);
}
EXPORT_SYMBOL_GPL(mt76_wcid_init);
@@ -1595,13 +1685,29 @@ void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
}
EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
+void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
+{
+ if (test_bit(MT76_MCU_RESET, &dev->phy.state))
+ return;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&wcid->poll_list))
+ list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
+
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
- struct mt76_phy *phy = hw->priv;
- int n_chains = hweight16(phy->chainmask);
- int delta = mt76_tx_power_nss_delta(n_chains);
+ struct mt76_phy *phy = mt76_vif_phy(hw, vif);
+ int n_chains, delta;
+
+ if (!phy)
+ return -EINVAL;
+ n_chains = hweight16(phy->chainmask);
+ delta = mt76_tx_power_nss_delta(n_chains);
*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
return 0;
@@ -1776,10 +1882,14 @@ int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
+ int i;
mutex_lock(&dev->mutex);
- *tx_ant = phy->antenna_mask;
- *rx_ant = phy->antenna_mask;
+ *tx_ant = 0;
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
+ if (dev->phys[i] && dev->phys[i]->hw == hw)
+ *tx_ant |= dev->phys[i]->chainmask;
+ *rx_ant = *tx_ant;
mutex_unlock(&dev->mutex);
return 0;
@@ -1808,30 +1918,6 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
}
EXPORT_SYMBOL_GPL(mt76_init_queue);
-u16 mt76_calculate_default_rate(struct mt76_phy *phy,
- struct ieee80211_vif *vif, int rateidx)
-{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = mvif->ctx ?
- &mvif->ctx->def :
- &phy->chandef;
- int offset = 0;
-
- if (chandef->chan->band != NL80211_BAND_2GHZ)
- offset = 4;
-
- /* pick the lowest rate for hidden nodes */
- if (rateidx < 0)
- rateidx = 0;
-
- rateidx += offset;
- if (rateidx >= ARRAY_SIZE(mt76_rates))
- rateidx = offset;
-
- return mt76_rates[rateidx].hw_value;
-}
-EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
-
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
struct mt76_sta_stats *stats, bool eht)
{
@@ -1892,7 +1978,7 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
test_bit(MT76_SCANNING, &phy->state))
return MT_DFS_STATE_DISABLED;
- if (!hw->conf.radar_enabled) {
+ if (!phy->radar_enabled) {
if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
(phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
return MT_DFS_STATE_ACTIVE;
@@ -1906,3 +1992,15 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
return MT_DFS_STATE_ACTIVE;
}
EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
+
+void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+
+ rcu_assign_pointer(mvif->link[0], NULL);
+ mt76_abort_scan(dev);
+ if (mvif->roc_phy)
+ mt76_abort_roc(mvif->roc_phy);
+}
+EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 0b75a45ad2e8..132148f7b107 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -50,6 +50,8 @@ struct mt76_dev;
struct mt76_phy;
struct mt76_wcid;
struct mt76s_intr;
+struct mt76_chanctx;
+struct mt76_vif_link;
struct mt76_reg_pair {
u32 reg;
@@ -497,6 +499,8 @@ struct mt76_driver_ops {
u16 token_size;
u8 mcs_rates;
+ unsigned int link_data_size;
+
void (*update_survey)(struct mt76_phy *phy);
int (*set_channel)(struct mt76_phy *phy);
@@ -528,6 +532,15 @@ struct mt76_driver_ops {
void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+
+ int (*vif_link_add)(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink);
+
+ void (*vif_link_remove)(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink);
};
struct mt76_channel_state {
@@ -636,6 +649,7 @@ struct mt76_sdio {
u8 hw_ver;
wait_queue_head_t wait;
+ int pse_mcu_quota_max;
struct {
int pse_data_quota;
int ple_data_quota;
@@ -753,7 +767,7 @@ struct mt76_testmode_data {
} rx_stats;
};
-struct mt76_vif {
+struct mt76_vif_link {
u8 idx;
u8 omac_idx;
u8 band_idx;
@@ -763,7 +777,19 @@ struct mt76_vif {
u8 basic_rates_idx;
u8 mcast_rates_idx;
u8 beacon_rates_idx;
+ bool offchannel;
struct ieee80211_chanctx_conf *ctx;
+ struct mt76_wcid *wcid;
+ struct mt76_vif_data *mvif;
+ struct rcu_head rcu_head;
+};
+
+struct mt76_vif_data {
+ struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct mt76_phy *roc_phy;
+ u16 valid_links;
+ u8 deflink_id;
};
struct mt76_phy {
@@ -772,6 +798,7 @@ struct mt76_phy {
void *priv;
unsigned long state;
+ unsigned int num_sta;
u8 band_idx;
spinlock_t tx_lock;
@@ -779,8 +806,15 @@ struct mt76_phy {
struct mt76_queue *q_tx[__MT_TXQ_MAX];
struct cfg80211_chan_def chandef;
- struct ieee80211_channel *main_chan;
+ struct cfg80211_chan_def main_chandef;
bool offchannel;
+ bool radar_enabled;
+
+ struct delayed_work roc_work;
+ struct ieee80211_vif *roc_vif;
+ struct mt76_vif_link *roc_link;
+
+ struct mt76_chanctx *chanctx;
struct mt76_channel_state *chan_state;
enum mt76_dfs_state dfs_state;
@@ -825,6 +859,7 @@ struct mt76_phy {
struct mt76_dev {
struct mt76_phy phy; /* must be first */
struct mt76_phy *phys[__MT_MAX_BAND];
+ struct mt76_phy *band_phys[NUM_NL80211_BANDS];
struct ieee80211_hw *hw;
@@ -880,7 +915,6 @@ struct mt76_dev {
spinlock_t status_lock;
u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
- u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
u64 vif_mask;
@@ -909,6 +943,16 @@ struct mt76_dev {
u32 rxfilter;
+ struct delayed_work scan_work;
+ struct {
+ struct cfg80211_scan_request *req;
+ struct ieee80211_channel *chan;
+ struct ieee80211_vif *vif;
+ struct mt76_vif_link *mlink;
+ struct mt76_phy *phy;
+ int chan_idx;
+ } scan;
+
#ifdef CONFIG_NL80211_TESTMODE
const struct mt76_testmode_ops *test_ops;
struct {
@@ -1036,6 +1080,10 @@ struct mt76_ethtool_worker_info {
int sta_count;
};
+struct mt76_chanctx {
+ struct mt76_phy *phy;
+};
+
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
@@ -1156,6 +1204,10 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
if ((dev)->q_rx[i].ndesc)
+
+#define mt76_dereference(p, dev) \
+ rcu_dereference_protected(p, lockdep_is_held(&(dev)->mutex))
+
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops);
@@ -1165,6 +1217,8 @@ void mt76_unregister_device(struct mt76_dev *dev);
void mt76_free_device(struct mt76_dev *dev);
void mt76_unregister_phy(struct mt76_phy *phy);
+struct mt76_phy *mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
+ u8 band_idx);
struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
const struct ieee80211_ops *ops,
u8 band_idx);
@@ -1191,8 +1245,6 @@ int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep,
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
int ring_base, void *wed, u32 flags);
-u16 mt76_calculate_default_rate(struct mt76_phy *phy,
- struct ieee80211_vif *vif, int rateidx);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
int n_desc, int ring_base, void *wed,
u32 flags)
@@ -1423,15 +1475,15 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state);
-void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm);
+ unsigned int link_id, int *dbm);
int mt76_init_sar_power(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
int mt76_get_sar_power(struct mt76_phy *phy,
@@ -1447,11 +1499,38 @@ void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
int mt76_get_rate(struct mt76_dev *dev,
struct ieee80211_supported_band *sband,
int idx, bool cck);
+int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req);
+void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
+int mt76_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void mt76_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void mt76_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ u32 changed);
+int mt76_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf);
+void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf);
+int mt76_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode);
+int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration,
+ enum ieee80211_roc_type type);
+int mt76_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -1497,8 +1576,18 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_phy *phy);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
+int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel);
int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
bool offchannel);
+void mt76_scan_work(struct work_struct *work);
+void mt76_abort_scan(struct mt76_dev *dev);
+void mt76_roc_complete_work(struct work_struct *work);
+void mt76_abort_roc(struct mt76_phy *phy);
+struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
+ struct ieee80211_vif *vif);
+void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct mt76_vif_link *mlink);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@@ -1734,7 +1823,54 @@ mt76_token_put(struct mt76_dev *dev, int token)
return txwi;
}
-void mt76_wcid_init(struct mt76_wcid *wcid);
+void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx);
void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid);
+void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid);
+
+static inline void
+mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+
+ mlink->mvif = mvif;
+ rcu_assign_pointer(mvif->link[0], mlink);
+}
+
+void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif);
+
+static inline struct mt76_vif_link *
+mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+
+ return mt76_dereference(mvif->link[link_id], dev);
+}
+
+static inline struct mt76_vif_link *
+mt76_vif_conf_link(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+
+ if (link_conf == &vif->bss_conf)
+ return mlink;
+
+ return mt76_dereference(mvif->link[link_conf->link_id], dev);
+}
+
+static inline struct mt76_phy *
+mt76_vif_link_phy(struct mt76_vif_link *mlink)
+{
+ struct mt76_chanctx *ctx;
+
+ if (!mlink->ctx)
+ return NULL;
+
+ ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
+
+ return ctx->phy;
+}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index dc8a77f0a1cc..413973d05b43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1277,12 +1277,7 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
msta = container_of(wcid, struct mt7603_sta, wcid);
sta = wcid_to_sta(wcid);
-
- if (list_empty(&msta->wcid.poll_list)) {
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
- }
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
goto out;
@@ -1484,14 +1479,13 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, -1, beacon_int);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
-
napi_enable(&dev->mt76.napi[0]);
- napi_schedule(&dev->mt76.napi[0]);
-
napi_enable(&dev->mt76.napi[1]);
+
+ local_bh_disable();
+ napi_schedule(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.napi[0]);
napi_schedule(&dev->mt76.napi[1]);
local_bh_enable();
@@ -1793,7 +1787,7 @@ mt7603_false_cca_check(struct mt7603_dev *dev)
mt7603_cca_stats_reset(dev);
- min_signal = mt76_get_min_avg_rssi(&dev->mt76, false);
+ min_signal = mt76_get_min_avg_rssi(&dev->mt76, 0);
if (!min_signal) {
dev->sensitivity = 0;
dev->last_cca_adj = jiffies;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 574f74ad325d..3e8b1ec76169 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -66,11 +66,9 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
dev->mt76.vif_mask |= BIT_ULL(mvif->idx);
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.vif = mvif;
- mt76_wcid_init(&mvif->sta.wcid);
+ mt76_wcid_init(&mvif->sta.wcid, 0);
eth_broadcast_addr(bc_addr);
mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 7ba789834e8d..3ca4fae7c4b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -387,11 +387,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_sta *msta;
msta = container_of(status->wcid, struct mt7615_sta, wcid);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
}
if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
@@ -734,7 +730,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
u16 seqno = 0;
if (vif) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -1514,11 +1510,7 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
msta = container_of(wcid, struct mt7615_sta, wcid);
sta = wcid_to_sta(wcid);
-
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 376975388007..2e7b05eeef7a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -209,6 +209,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
mvif->mt76.band_idx = ext_phy;
mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ mvif->mt76.wcid = &mvif->sta.wcid;
if (ext_phy)
mvif->mt76.wmm_idx += 2;
@@ -224,9 +225,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
- mvif->sta.wcid.hw_key_idx = -1;
- mt76_wcid_init(&mvif->sta.wcid);
+ mt76_wcid_init(&mvif->sta.wcid, mvif->mt76.band_idx);
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -463,7 +462,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
int err;
@@ -1249,7 +1248,7 @@ static int mt7615_suspend(struct ieee80211_hw *hw,
phy->mt76);
if (!mt7615_dev_running(dev))
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
mt7615_mutex_release(dev);
@@ -1271,7 +1270,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
if (!running) {
int err;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
if (err < 0) {
mt7615_mutex_release(dev);
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 96e34277fece..b8fcd4eb3fbb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -865,8 +865,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
mvif->sta_added = true;
}
conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta,
- conn_state, new_entry);
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, &vif->bss_conf,
+ link_sta, conn_state, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1113,7 +1113,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf,
+ return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf, &mvif->mt76,
&mvif->sta.wcid, enable);
}
@@ -1700,7 +1700,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
};
int ret;
- dev->mt76.mcu_ops = &mt7615_mcu_ops,
+ dev->mt76.mcu_ops = &mt7615_mcu_ops;
ret = mt7615_mcu_drv_pmctrl(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 530da48ce3ea..9bdd29e8d25e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -139,7 +139,7 @@ struct mt7615_sta {
};
struct mt7615_vif {
- struct mt76_vif mt76; /* must be first */
+ struct mt76_vif_link mt76; /* must be first */
struct mt7615_sta sta;
bool sta_added;
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 9f43e673518b..68010e27f065 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -83,7 +83,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev);
if (hif_suspend) {
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true, true);
if (err)
return err;
}
@@ -131,7 +131,7 @@ restore:
}
napi_enable(&mdev->tx_napi);
if (hif_suspend)
- mt76_connac_mcu_set_hif_suspend(mdev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false, true);
return err;
}
@@ -164,18 +164,22 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
dev_err(mdev->dev, "PDMA engine must be reinitialized\n");
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
+
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev))
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index fbb1181c58ff..c2e4e6aabd9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -48,7 +48,7 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
if (vif) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
txp->bss_idx = mvif->idx;
}
@@ -262,12 +262,14 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt76_worker_enable(&dev->mt76.tx_worker);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
-
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
+ }
+
+ local_bh_disable();
+ napi_schedule(&dev->mt76.tx_napi);
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_schedule(&dev->mt76.napi[i]);
}
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index aebfc4576aa4..f56038cd4d3a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -191,7 +191,7 @@ static int mt7663s_suspend(struct device *dev)
mt7615_firmware_offload(mdev)) {
int err;
- err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true);
+ err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true, true);
if (err < 0)
return err;
}
@@ -230,7 +230,7 @@ static int mt7663s_resume(struct device *dev)
if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
mt7615_firmware_offload(mdev))
- err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false);
+ err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false, true);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index b0094205ba95..a7b8acb2da83 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -147,7 +147,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
if (ret)
return ret;
- dev->mt76.mcu_ops = &mt7663s_mcu_ops,
+ dev->mt76.mcu_ops = &mt7663s_mcu_ops;
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
if (ret) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 5020af52c68c..4aa9fa1c4a23 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -225,7 +225,7 @@ static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state)
mt7615_firmware_offload(dev)) {
int err;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
if (err < 0)
return err;
}
@@ -253,7 +253,7 @@ static int mt7663u_resume(struct usb_interface *intf)
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev))
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index a8b1a0f8b2d7..33c01f8ce8e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -72,7 +72,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
};
int ret;
- dev->mt76.mcu_ops = &mt7663u_mcu_ops,
+ dev->mt76.mcu_ops = &mt7663u_mcu_ops;
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 445d0f0ab779..455979476d11 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -405,7 +405,7 @@ mt76_connac_mutex_release(struct mt76_dev *dev, struct mt76_connac_pm *pm)
mutex_unlock(&dev->mutex);
}
-void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss);
+void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss, enum nl80211_band band);
int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
int ring_base, void *wed, u32 flags);
@@ -427,7 +427,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
bool beacon, bool mcast);
bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
__le32 *txs_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
index 92ad1ecf6c9d..2d300948308d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
@@ -231,7 +231,8 @@ void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv,
EHT_PREP(DATA0_PE_DISAMBIGUITY_OM, PE_DISAMBIG, rxv[5]) |
EHT_PREP(DATA0_LDPC_EXTRA_SYM_OM, LDPC_EXT_SYM, rxv[4]);
- eht->data[7] |= le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+ /* iwlwifi and wireshark expect radiotap to report zero-based NSS, so subtract 1. */
+ eht->data[7] |= le32_encode_bits(status->nss - 1, IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
eht->user_info[0] |=
EHT_BITS(USER_INFO_MCS_KNOWN) |
@@ -240,7 +241,7 @@ void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv,
EHT_BITS(USER_INFO_BEAMFORMING_KNOWN_O) |
EHT_BITS(USER_INFO_DATA_FOR_USER) |
le32_encode_bits(status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
- le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O);
+ le32_encode_bits(status->nss - 1, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O);
if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
eht->user_info[0] |= EHT_BITS(USER_INFO_BEAMFORMING_O);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index a3db65254e37..e9ac8a7317a1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -9,10 +9,13 @@
#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
IEEE80211_RADIOTAP_HE_##f)
-void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss)
+void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss, enum nl80211_band band)
{
static const u8 ppet16_ppet8_ru3_ru0[] = { 0x1c, 0xc7, 0x71 };
- u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
+ u8 i, ppet_bits, ppet_size, ru_bit_mask = 0xf;
+
+ if (band == NL80211_BAND_2GHZ)
+ ru_bit_mask = 0x3;
he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
@@ -291,27 +294,28 @@ EXPORT_SYMBOL_GPL(mt76_connac_init_tx_queues);
})
u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
bool beacon, bool mcast)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = mt76_vif_conf_link(mphy->dev, conf->vif, conf);
struct cfg80211_chan_def *chandef = mvif->ctx ?
&mvif->ctx->def : &mphy->chandef;
u8 nss = 0, mode = 0, band = chandef->chan->band;
int rateidx = 0, mcast_rate;
+ int offset = 0;
- if (!vif)
+ if (!conf)
goto legacy;
if (is_mt7921(mphy->dev)) {
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ rateidx = ffs(conf->basic_rates) - 1;
goto legacy;
}
if (beacon) {
struct cfg80211_bitrate_mask *mask;
- mask = &vif->bss_conf.beacon_tx_rate;
+ mask = &conf->beacon_tx_rate;
__bitrate_mask_check(he_mcs, HE_SU);
__bitrate_mask_check(vht_mcs, VHT);
@@ -323,14 +327,25 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
}
}
- mcast_rate = vif->bss_conf.mcast_rate[band];
+ mcast_rate = conf->mcast_rate[band];
if (mcast && mcast_rate > 0)
rateidx = mcast_rate - 1;
else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ rateidx = ffs(conf->basic_rates) - 1;
legacy:
- rateidx = mt76_calculate_default_rate(mphy, vif, rateidx);
+ if (band != NL80211_BAND_2GHZ)
+ offset = 4;
+
+ /* pick the lowest rate for hidden nodes */
+ if (rateidx < 0)
+ rateidx = 0;
+
+ rateidx += offset;
+ if (rateidx >= ARRAY_SIZE(mt76_rates))
+ rateidx = offset;
+
+ rateidx = mt76_rates[rateidx].hw_value;
mode = rateidx >> 8;
rateidx &= GENMASK(7, 0);
out:
@@ -493,7 +508,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
bool amsdu_en = wcid->amsdu;
if (vif) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -569,7 +584,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool multicast = ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1);
- u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
+ u16 rate = mt76_connac2_mac_tx_rate_val(mphy, &vif->bss_conf, beacon,
multicast);
u32 val = MT_TXD6_FIXED_BW;
@@ -1162,11 +1177,7 @@ void mt76_connac2_txwi_free(struct mt76_dev *dev, struct mt76_txwi_cache *t,
if (wcid && wcid->sta) {
sta = container_of((void *)wcid, struct ieee80211_sta,
drv_priv);
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&wcid->poll_list))
- list_add_tail(&wcid->poll_list,
- &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ mt76_wcid_add_poll(dev, wcid);
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 864246f94088..f30cf9e71610 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_mac_enable);
int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
u8 bss_idx;
u8 ps_state; /* 0: device awake
@@ -232,7 +232,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rts_thresh);
void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_connac_beacon_loss_event *event = priv;
if (mvif->idx != event->bss_idx)
@@ -273,7 +273,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
struct sk_buff *
-__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct mt76_wcid *wcid, int len)
{
struct sta_req_hdr hdr = {
@@ -329,7 +329,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
u8 omac_idx = mvif->omac_idx;
struct bss_info_omac *omac;
struct tlv *tlv;
@@ -369,10 +369,11 @@ void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_link_sta *link_sta,
int conn_state, bool newly)
{
+ struct ieee80211_vif *vif = link_conf->vif;
struct sta_rec_basic *basic;
struct tlv *tlv;
int conn_type;
@@ -390,8 +391,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
if (vif->type == NL80211_IFTYPE_STATION &&
- !is_zero_ether_addr(vif->bss_conf.bssid)) {
- memcpy(basic->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ !is_zero_ether_addr(link_conf->bssid)) {
+ memcpy(basic->peer_addr, link_conf->bssid, ETH_ALEN);
basic->aid = cpu_to_le16(vif->cfg.aid);
} else {
eth_broadcast_addr(basic->peer_addr);
@@ -497,7 +498,7 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid, int cmd)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
@@ -545,7 +546,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct ieee80211_sta *sta,
void *sta_wtbl, void *wtbl_tlv)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct wtbl_generic *generic;
struct wtbl_rx *rx;
struct wtbl_spe *spe;
@@ -849,7 +850,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_vif *vif,
u8 rcpi, u8 sta_state)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_chan_def *chandef = mvif->ctx ?
&mvif->ctx->def : &mphy->chandef;
enum nl80211_band band = chandef->chan->band;
@@ -1041,7 +1042,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info)
{
- struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)info->vif->drv_priv;
struct ieee80211_link_sta *link_sta;
struct mt76_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
@@ -1049,6 +1050,9 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct sk_buff *skb;
int conn_state;
+ if (!info->link_conf)
+ info->link_conf = &info->vif->bss_conf;
+
skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1057,7 +1061,7 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
CONN_STATE_DISCONNECT;
link_sta = info->sta ? &info->sta->deflink : NULL;
if (info->sta || !info->offload_fw)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
link_sta, conn_state,
info->newly);
if (info->sta && info->enable)
@@ -1137,10 +1141,10 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv);
int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
struct ieee80211_bss_conf *bss_conf,
+ struct mt76_vif_link *mvif,
struct mt76_wcid *wcid,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)bss_conf->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct {
struct {
@@ -1202,6 +1206,9 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
case NL80211_IFTYPE_STATION:
basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_P2P_GO);
+ break;
case NL80211_IFTYPE_ADHOC:
basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
break;
@@ -1263,7 +1270,7 @@ int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_wed_update);
-int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
int cmd, bool enable, bool tx)
{
@@ -1364,7 +1371,7 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
+u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_bss_conf *conf,
enum nl80211_band band)
{
const struct ieee80211_sta_eht_cap *eht_cap;
@@ -1375,9 +1382,9 @@ u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
mode |= PHY_MODE_AX_6G;
sband = phy->hw->wiphy->bands[band];
- eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, conf->vif->type);
- if (!eht_cap || !eht_cap->has_eht || !vif->bss_conf.eht_support)
+ if (!eht_cap || !eht_cap->has_eht || !conf->eht_support)
return mode;
switch (band) {
@@ -1401,7 +1408,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_chan_def *chandef = mvif->ctx ?
&mvif->ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
@@ -1450,7 +1457,7 @@ mt76_connac_mcu_uni_bss_he_tlv(struct mt76_phy *phy, struct ieee80211_vif *vif,
he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
}
-int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_chanctx_conf *ctx)
{
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
@@ -1538,7 +1545,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
bool enable,
struct ieee80211_chanctx_conf *ctx)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
struct mt76_dev *mdev = phy->dev;
@@ -1664,7 +1671,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_scan_request *sreq = &scan_req->req;
int n_ssids = 0, err, i, duration;
int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
@@ -1770,7 +1777,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_hw_scan);
int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
u8 seq_num;
u8 is_ext_channel;
@@ -1796,7 +1803,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *sreq)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_connac_sched_scan_req *req;
@@ -2208,7 +2215,7 @@ int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
- struct mt76_vif *vif,
+ struct mt76_vif_link *vif,
struct ieee80211_bss_conf *info)
{
struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif,
@@ -2251,7 +2258,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter);
int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
int ct_window = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
struct mt76_phy *phy = hw->priv;
struct {
@@ -2318,7 +2325,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *key)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_connac_gtk_rekey_tlv *gtk_tlv;
struct mt76_phy *phy = hw->priv;
struct sk_buff *skb;
@@ -2359,7 +2366,7 @@ static int
mt76_connac_mcu_set_arp_filter(struct mt76_dev *dev, struct ieee80211_vif *vif,
bool suspend)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct {
u8 bss_idx;
@@ -2385,7 +2392,7 @@ int
mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif,
bool suspend)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct {
u8 bss_idx;
@@ -2414,7 +2421,7 @@ mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev,
bool enable, u8 mdtim,
bool wow_suspend)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct {
u8 bss_idx;
@@ -2445,7 +2452,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
u8 index, bool enable,
struct cfg80211_pkt_pattern *pattern)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_connac_wow_pattern_tlv *ptlv;
struct sk_buff *skb;
struct req_hdr {
@@ -2477,7 +2484,7 @@ int
mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
bool suspend, struct cfg80211_wowlan *wowlan)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct {
struct {
@@ -2527,7 +2534,7 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_wow_ctrl);
-int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
+int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend, bool wait_resp)
{
struct {
struct {
@@ -2559,7 +2566,7 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
req.hdr.hif_type = 0;
return mt76_mcu_send_msg(dev, MCU_UNI_CMD(HIF_CTRL), &req,
- sizeof(req), true);
+ sizeof(req), wait_resp);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_hif_suspend);
@@ -2686,7 +2693,7 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct sk_buff *skb;
int ret;
@@ -2708,7 +2715,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
/* SIFS 20us + 512 byte beacon transmitted by 1Mbps (3906us) */
#define BCN_TX_ESTIMATE_TIME (4096 + 20)
-void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif_link *mvif)
{
struct bss_info_ext_bss *ext;
int ext_bss_idx, tsf_offset;
@@ -2732,7 +2739,7 @@ int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
struct mt76_phy *phy, u16 wlan_idx,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
struct bss_info_basic *bss;
struct tlv *tlv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 1b0e80dfc346..43237e518373 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -1043,12 +1043,14 @@ enum {
MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
MCU_EXT_EVENT_WA_TX_STAT = 0x74,
MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
+ MCU_EXT_EVENT_WF_RF_PIN_CTRL = 0x9a,
MCU_EXT_EVENT_MURU_CTRL = 0x9f,
};
/* unified event table */
enum {
MCU_UNI_EVENT_RESULT = 0x01,
+ MCU_UNI_EVENT_HIF_CTRL = 0x03,
MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04,
MCU_UNI_EVENT_ACCESS_REG = 0x6,
MCU_UNI_EVENT_IE_COUNTDOWN = 0x09,
@@ -1251,6 +1253,7 @@ enum {
MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab,
MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac,
MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
+ MCU_EXT_CMD_WF_RF_PIN_CTRL = 0xbd,
};
enum {
@@ -1756,6 +1759,7 @@ struct mt76_sta_cmd_info {
struct mt76_wcid *wcid;
struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link_conf;
bool offload_fw;
bool enable;
@@ -1876,10 +1880,10 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
struct sk_buff *
-__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct mt76_wcid *wcid, int len);
static inline struct sk_buff *
-mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct mt76_wcid *wcid)
{
return __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
@@ -1901,7 +1905,7 @@ mt76_connac_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy);
int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_link_sta *link_sta,
int state, bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
@@ -1938,13 +1942,14 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
bool enable, bool tx);
int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
struct ieee80211_bss_conf *bss_conf,
+ struct mt76_vif_link *mvif,
struct mt76_wcid *wcid,
bool enable);
-int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
int cmd, bool enable, bool tx);
int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy,
- struct mt76_vif *vif,
+ struct mt76_vif_link *vif,
struct ieee80211_chanctx_conf *ctx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
@@ -1975,7 +1980,7 @@ int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
struct ieee80211_vif *vif,
bool enable);
int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
- struct mt76_vif *vif,
+ struct mt76_vif_link *vif,
struct ieee80211_bss_conf *info);
int mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif,
bool suspend);
@@ -1988,7 +1993,7 @@ int mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev,
struct ieee80211_vif *vif,
bool enable, u8 mdtim,
bool wow_suspend);
-int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
+int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend, bool wait_resp);
void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt76_connac_sta_state_dp(struct mt76_dev *dev,
@@ -2014,7 +2019,7 @@ mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
enum nl80211_band band,
struct ieee80211_link_sta *sta);
-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
+u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_bss_conf *conf,
enum nl80211_band band);
int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
@@ -2022,7 +2027,7 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd);
-void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif);
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif_link *mvif);
void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif);
int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 1eb955f3ca13..b456ccd00d58 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -282,14 +282,16 @@ static int mt76x0e_resume(struct pci_dev *pdev)
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
mt76_queue_rx_reset(dev, i);
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
-
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index d543ef3de65b..ec554a059216 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -1071,7 +1071,7 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
u8 gain_delta;
int low_gain;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, 0);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 4a49a3036a46..a82c75ba26e6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -423,7 +423,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
priv = msta->vif;
vif = container_of(priv, struct ieee80211_vif, drv_priv);
- __mt76_sta_remove(&dev->mt76, vif, sta);
+ __mt76_sta_remove(&dev->mphy, vif, sta);
memset(msta, 0, sizeof(*msta));
}
@@ -504,12 +504,14 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mt76_worker_enable(&dev->mt76.tx_worker);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
-
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
+ }
+
+ local_bh_disable();
+ napi_schedule(&dev->mt76.tx_napi);
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_schedule(&dev->mt76.napi[i]);
}
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 0e1ede9314d8..4840d0b500b3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -264,8 +264,8 @@ void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
};
dev->beacon_ops = &beacon_ops;
- hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
+ hrtimer_setup(&dev->pre_tbtt_timer, mt76x02u_pre_tbtt_interrupt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
mt76x02_init_beacon_config(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 8020446be37b..4fb30589fa7a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -287,8 +287,7 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
- mvif->group_wcid.hw_key_idx = -1;
- mt76_wcid_init(&mvif->group_wcid);
+ mt76_wcid_init(&mvif->group_wcid, 0);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 67c9d1caa0bd..727bfdd00b40 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -151,12 +151,15 @@ mt76x2e_resume(struct pci_dev *pdev)
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index f84517d932dc..e2b4cf30dc44 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -280,7 +280,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, 0);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index bfdbc15abaa9..928e0b07a9bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -2,9 +2,14 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/firmware.h>
+#include <linux/moduleparam.h>
#include "mt7915.h"
#include "eeprom.h"
+static bool enable_6ghz;
+module_param(enable_6ghz, bool, 0644);
+MODULE_PARM_DESC(enable_6ghz, "Enable 6 GHz instead of 5 GHz on hardware that supports both");
+
static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
@@ -170,8 +175,20 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
phy->mt76->cap.has_6ghz = true;
return;
case MT_EE_V2_BAND_SEL_5GHZ_6GHZ:
- phy->mt76->cap.has_5ghz = true;
- phy->mt76->cap.has_6ghz = true;
+ if (enable_6ghz) {
+ phy->mt76->cap.has_6ghz = true;
+ u8p_replace_bits(&eeprom[MT_EE_WIFI_CONF + band],
+ MT_EE_V2_BAND_SEL_6GHZ,
+ MT_EE_WIFI_CONF0_BAND_SEL);
+ } else {
+ phy->mt76->cap.has_5ghz = true;
+ u8p_replace_bits(&eeprom[MT_EE_WIFI_CONF + band],
+ MT_EE_V2_BAND_SEL_5GHZ,
+ MT_EE_WIFI_CONF0_BAND_SEL);
+ }
+ /* force to buffer mode */
+ dev->flash_mode = true;
+
return;
default:
phy->mt76->cap.has_2ghz = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 6bef96e3d2a3..bee4beabc4eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -53,7 +53,9 @@ static ssize_t mt7915_thermal_temp_show(struct device *dev,
switch (i) {
case 0:
+ mutex_lock(&phy->dev->mt76.mutex);
temperature = mt7915_mcu_get_temperature(phy);
+ mutex_unlock(&phy->dev->mt76.mutex);
if (temperature < 0)
return temperature;
/* display in millidegree celcius */
@@ -82,7 +84,7 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
return ret;
mutex_lock(&phy->dev->mt76.mutex);
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 60 * 1000, 130 * 1000), 1000);
if ((i - 1 == MT7915_CRIT_TEMP_IDX &&
val > phy->throttle_temp[MT7915_MAX_TEMP_IDX]) ||
@@ -95,9 +97,8 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
}
phy->throttle_temp[i - 1] = val;
- mutex_unlock(&phy->dev->mt76.mutex);
-
ret = mt7915_mcu_set_thermal_protect(phy);
+ mutex_unlock(&phy->dev->mt76.mutex);
if (ret)
return ret;
@@ -159,7 +160,9 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
* cooling_device convention: 0 = no cooling, more = more cooling
* mcu convention: 1 = max cooling, more = less cooling
*/
+ mutex_lock(&phy->dev->mt76.mutex);
ret = mt7915_mcu_set_thermal_throttling(phy, throttling);
+ mutex_unlock(&phy->dev->mt76.mutex);
if (ret)
return ret;
@@ -512,6 +515,15 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME4(band),
MT_WF_RMAC_MIB_QOS23_BACKOFF);
+ /* clear backoff time for Tx duration */
+ mt76_clear(dev, MT_WTBLOFF_TOP_ACR(band),
+ MT_WTBLOFF_TOP_ADM_BACKOFFTIME);
+
+ /* exclude estimated backoff time for Tx duration on MT7915 */
+ if (is_mt7915(&dev->mt76))
+ mt76_set(dev, MT_AGG_ATCR0(band),
+ MT_AGG_ATCR_MAC_BFF_TIME_EN);
+
/* clear backoff time and set software compensation for OBSS time */
mask = MT_WF_RMAC_MIB_OBSS_BACKOFF | MT_WF_RMAC_MIB_ED_OFFSET;
set = FIELD_PREP(MT_WF_RMAC_MIB_OBSS_BACKOFF, 0) |
@@ -1114,7 +1126,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss, band);
} else {
he_cap_elem->phy_cap_info[9] |=
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
@@ -1239,14 +1251,14 @@ int mt7915_register_device(struct mt7915_dev *dev)
if (ret)
goto unreg_dev;
- ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
-
if (phy2) {
ret = mt7915_register_ext_phy(dev, phy2);
if (ret)
goto unreg_thermal;
}
+ ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+
dev->recovery.hw_init_done = true;
ret = mt7915_init_debugfs(&dev->phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index cf77ce0c8759..2ba6eb3038ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -333,11 +333,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb,
if (status->wcid) {
msta = container_of(status->wcid, struct mt7915_sta, wcid);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
}
status->freq = mphy->chandef.chan->center_freq;
@@ -927,11 +923,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
continue;
msta = container_of(wcid, struct mt7915_sta, wcid);
- spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &mdev->sta_poll_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
continue;
}
@@ -1040,10 +1032,7 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
if (!wcid->sta)
goto out;
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
out:
rcu_read_unlock();
@@ -1163,7 +1152,7 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
u8 band = phy->mt76->band_idx;
- int eifs_ofdm = 360, sifs = 10, offset;
+ int eifs_ofdm = 84, sifs = 10, offset;
bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
@@ -1367,10 +1356,15 @@ mt7915_mac_restart(struct mt7915_dev *dev)
mt7915_dma_reset(dev, true);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
if (mdev->q_rx[i].ndesc) {
napi_enable(&dev->mt76.napi[i]);
+ }
+ }
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ if (mdev->q_rx[i].ndesc) {
napi_schedule(&dev->mt76.napi[i]);
}
}
@@ -1388,6 +1382,8 @@ mt7915_mac_restart(struct mt7915_dev *dev)
if (dev_is_pci(mdev->dev)) {
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
if (dev->hif2) {
+ mt76_wr(dev, MT_PCIE_RECOG_ID,
+ dev->hif2->index | MT_PCIE_RECOG_ID_SEM);
if (is_mt7915(mdev))
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
else
@@ -1428,8 +1424,9 @@ out:
if (phy2)
clear_bit(MT76_RESET, &phy2->mt76->state);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
@@ -1442,9 +1439,11 @@ static void
mt7915_mac_full_reset(struct mt7915_dev *dev)
{
struct mt76_phy *ext_phy;
+ struct mt7915_phy *phy2;
int i;
ext_phy = dev->mt76.phys[MT_BAND1];
+ phy2 = ext_phy ? ext_phy->priv : NULL;
dev->recovery.hw_full_reset = true;
@@ -1474,6 +1473,9 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
dev->mt76.vif_mask = 0;
+ dev->phy.omac_mask = 0;
+ if (phy2)
+ phy2->omac_mask = 0;
i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
dev->mt76.global_wcid.idx = i;
@@ -1574,9 +1576,12 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (phy2)
clear_bit(MT76_RESET, &phy2->mt76->state);
- local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
+ }
+
+ local_bh_disable();
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_schedule(&dev->mt76.napi[i]);
}
local_bh_enable();
@@ -1585,8 +1590,8 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt76_worker_enable(&dev->mt76.tx_worker);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index c6f498fc81ff..3aa31c5cefa6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -233,6 +233,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
mvif->mt76.band_idx = phy->mt76->band_idx;
+ mvif->mt76.wcid = &mvif->sta.wcid;
mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
if (ext_phy)
@@ -246,16 +247,15 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7915_wtbl_size(dev));
- if (idx < 0)
- return -ENOSPC;
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
INIT_LIST_HEAD(&mvif->sta.rc_list);
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = ext_phy;
- mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mvif->sta.wcid);
+ mt76_wcid_init(&mvif->sta.wcid, phy->mt76->band_idx);
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -366,8 +366,12 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int err = 0;
- if (sta && !wcid->sta)
+ if (sta && !wcid->sta) {
+ if (cmd != SET_KEY)
+ return 0;
+
return -EOPNOTSUPP;
+ }
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -619,8 +623,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC)
set_bss_info = vif->cfg.assoc;
if (changed & BSS_CHANGED_BEACON_ENABLED &&
+ info->enable_beacon &&
vif->type != NL80211_IFTYPE_AP)
- set_bss_info = set_sta = info->enable_beacon;
+ set_bss_info = set_sta = 1;
if (set_bss_info == 1)
mt7915_mcu_add_bss_info(phy, vif, true);
@@ -631,7 +636,11 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
if (changed & BSS_CHANGED_ERP_SLOT) {
- int slottime = info->use_short_slot ? 9 : 20;
+ int slottime = 9;
+
+ if (phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ &&
+ !info->use_short_slot)
+ slottime = 20;
if (slottime != phy->slottime) {
phy->slottime = slottime;
@@ -758,6 +767,57 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return 0;
}
+struct drop_sta_iter {
+ struct mt7915_dev *dev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ u8 sta_addr[ETH_ALEN];
+};
+
+static void
+__mt7915_drop_sta(void *ptr, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct drop_sta_iter *data = ptr;
+ struct ieee80211_sta *sta;
+ struct mt7915_sta *msta;
+
+ if (vif == data->vif || vif->type != NL80211_IFTYPE_AP)
+ return;
+
+ sta = ieee80211_find_sta_by_ifaddr(data->hw, data->sta_addr, mac);
+ if (!sta)
+ return;
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+ mt7915_mcu_add_sta(data->dev, vif, sta, CONN_STATE_DISCONNECT, false);
+ msta->wcid.sta_disabled = 1;
+ msta->wcid.sta = 0;
+}
+
+static void
+mt7915_drop_other_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_phy *ext_phy = dev->mt76.phys[MT_BAND1];
+ struct drop_sta_iter data = {
+ .dev = dev,
+ .hw = dev->mphy.hw,
+ .vif = vif,
+ };
+
+ if (vif->type != NL80211_IFTYPE_AP)
+ return;
+
+ memcpy(data.sta_addr, sta->addr, ETH_ALEN);
+ ieee80211_iterate_active_interfaces(data.hw, 0, __mt7915_drop_sta, &data);
+
+ if (!ext_phy)
+ return;
+
+ data.hw = ext_phy->hw;
+ ieee80211_iterate_active_interfaces(data.hw, 0, __mt7915_drop_sta, &data);
+}
+
int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
@@ -786,6 +846,7 @@ int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return 0;
case MT76_STA_EVENT_AUTHORIZE:
+ mt7915_drop_other_sta(dev, vif, sta);
return mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_PORT_SECURE, false);
case MT76_STA_EVENT_DISASSOC:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 87d0dd040001..9d790f234e82 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -194,6 +194,25 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return ret;
}
+static void
+mt7915_mcu_set_timeout(struct mt76_dev *mdev, int cmd)
+{
+ if ((cmd & __MCU_CMD_FIELD_ID) != MCU_CMD_EXT_CID)
+ return;
+
+ switch (FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd)) {
+ case MCU_EXT_CMD_THERMAL_CTRL:
+ case MCU_EXT_CMD_GET_MIB_INFO:
+ case MCU_EXT_CMD_PHY_STAT_INFO:
+ case MCU_EXT_CMD_STA_REC_UPDATE:
+ case MCU_EXT_CMD_BSS_INFO_UPDATE:
+ mdev->mcu.timeout = 2 * HZ;
+ return;
+ default:
+ break;
+ }
+}
+
static int
mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq)
@@ -208,6 +227,8 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
qid = MT_MCUQ_WM;
+ mt7915_mcu_set_timeout(mdev, cmd);
+
return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
}
@@ -1678,7 +1699,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, &vif->bss_conf, link_sta,
conn_state, newly);
/* tag order is in accordance with firmware dependency. */
if (sta && conn_state != CONN_STATE_DISCONNECT) {
@@ -2388,7 +2409,7 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
- .max_retry = 3,
+ .max_retry = 1,
.headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_prepare_msg = mt76_connac2_mcu_fill_message,
.mcu_skb_send_msg = mt7915_mcu_send_message,
@@ -3150,8 +3171,13 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
res = (struct mt7915_mcu_mib *)(skb->data + offs_cc);
#define __res_u64(s) le64_to_cpu(res[s].data)
- /* subtract Tx backoff time from Tx duration */
- cc_tx = is_mt7915(&dev->mt76) ? __res_u64(1) - __res_u64(4) : __res_u64(1);
+ /* subtract Tx backoff time from Tx duration for MT7915 */
+ if (is_mt7915(&dev->mt76)) {
+ u64 backoff = (__res_u64(4) & 0xffff) * 79; /* 16us + 9us * 7 */
+ cc_tx = __res_u64(1) - backoff;
+ } else {
+ cc_tx = __res_u64(1);
+ }
if (chan_switch)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 44e112b8b5b3..876f0692850a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -138,6 +138,7 @@ static const u32 mt7915_offs[] = {
[AGG_ACR0] = 0x084,
[AGG_ACR4] = 0x08c,
[AGG_MRCR] = 0x098,
+ [AGG_ATCR0] = 0x0ec,
[AGG_ATCR1] = 0x0f0,
[AGG_ATCR3] = 0x0f4,
[LPON_UTTR0] = 0x080,
@@ -212,6 +213,7 @@ static const u32 mt7916_offs[] = {
[AGG_ACR0] = 0x054,
[AGG_ACR4] = 0x05c,
[AGG_MRCR] = 0x068,
+ [AGG_ATCR0] = 0x1a4,
[AGG_ATCR1] = 0x1a8,
[AGG_ATCR3] = 0x080,
[LPON_UTTR0] = 0x360,
@@ -484,7 +486,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
continue;
ofs = addr - dev->reg.map[i].phys;
- if (ofs > dev->reg.map[i].size)
+ if (ofs >= dev->reg.map[i].size)
continue;
return dev->reg.map[i].maps + ofs;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index ac0b1f0eb27c..533939f2b7ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -166,7 +166,7 @@ struct mt7915_vif_cap {
};
struct mt7915_vif {
- struct mt76_vif mt76; /* must be first */
+ struct mt76_vif_link mt76; /* must be first */
struct mt7915_vif_cap cap;
struct mt7915_sta sta;
@@ -191,6 +191,7 @@ struct mt7915_hif {
struct device *dev;
void __iomem *regs;
int irq;
+ u32 index;
};
struct mt7915_phy {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 39132894e8ea..07b0a5766eab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -42,6 +42,7 @@ static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
continue;
get_device(hif->dev);
+ hif->index = idx;
goto out;
}
hif = NULL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 89ac8e6707b8..c5ec63a25a42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -66,6 +66,7 @@ enum offs_rev {
AGG_ACR0,
AGG_ACR4,
AGG_MRCR,
+ AGG_ATCR0,
AGG_ATCR1,
AGG_ATCR3,
LPON_UTTR0,
@@ -254,6 +255,9 @@ enum offs_rev {
#define MT_WTBLOFF_TOP_RSCR_RCPI_MODE GENMASK(31, 30)
#define MT_WTBLOFF_TOP_RSCR_RCPI_PARAM GENMASK(25, 24)
+#define MT_WTBLOFF_TOP_ACR(_band) MT_WTBLOFF_TOP(_band, 0x010)
+#define MT_WTBLOFF_TOP_ADM_BACKOFFTIME BIT(29)
+
/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
@@ -505,6 +509,9 @@ enum offs_rev {
#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24)
+#define MT_AGG_ATCR0(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR0))
+#define MT_AGG_ATCR_MAC_BFF_TIME_EN BIT(30)
+
#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR1))
#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR3))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index d1d64fa7d35d..14e17dc90256 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -137,6 +137,13 @@ mt7921_regd_notifier(struct wiphy *wiphy,
dev->mt76.region = request->dfs_region;
dev->country_ie_env = request->country_ie_env;
+ if (request->initiator == NL80211_REGDOM_SET_BY_USER) {
+ if (dev->mt76.alpha2[0] == '0' && dev->mt76.alpha2[1] == '0')
+ wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE;
+ else
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ }
+
if (pm->suspended)
return;
@@ -227,6 +234,7 @@ static void mt7921_init_work(struct work_struct *work)
mt76_set_stream_caps(&dev->mphy, true);
mt7921_set_stream_he_caps(&dev->phy);
+ mt792x_config_mac_addr_list(dev);
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 047106b65d2b..5dd57de59f27 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -216,11 +216,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
if (status->wcid) {
mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
msta = container_of(mlink, struct mt792x_sta, deflink);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
}
mt792x_get_status_freq_info(status, chfreq);
@@ -479,10 +475,7 @@ void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data)
if (!wcid->sta)
goto out;
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
out:
rcu_read_unlock();
@@ -529,11 +522,7 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
continue;
mlink = container_of(wcid, struct mt792x_link_sta, wcid);
- spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list,
- &mdev->sta_poll_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
continue;
}
@@ -647,6 +636,7 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
ieee80211_disconnect(vif, true);
mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
+ &mvif->bss_conf.mt76,
&mvif->sta.deflink.wcid, true);
mt7921_mcu_set_tx(dev, vif);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index a7f5bfbc02ed..13e58c328aff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -147,7 +147,7 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss, band);
} else {
he_cap_elem->phy_cap_info[9] |=
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
@@ -252,6 +252,11 @@ int __mt7921_start(struct mt792x_phy *phy)
return err;
}
+ if (phy->chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN) {
+ mt7921_mcu_wf_rf_pin_ctrl(phy, WF_RF_PIN_INIT);
+ wiphy_rfkill_start_polling(mphy->hw->wiphy);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(__mt7921_start);
@@ -308,6 +313,7 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
+ &mvif->bss_conf.mt76,
&mvif->sta.deflink.wcid, true);
if (ret)
goto out;
@@ -319,10 +325,8 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
INIT_LIST_HEAD(&mvif->sta.deflink.wcid.poll_list);
mvif->sta.deflink.wcid.idx = idx;
- mvif->sta.deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
- mvif->sta.deflink.wcid.hw_key_idx = -1;
mvif->sta.deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mvif->sta.deflink.wcid);
+ mt76_wcid_init(&mvif->sta.deflink.wcid, mvif->bss_conf.mt76.band_idx);
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -338,6 +342,9 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
if (phy->chip_cap & MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN)
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
+ INIT_WORK(&mvif->csa_work, mt7921_csa_work);
+ timer_setup(&mvif->csa_timer, mt792x_csa_timer, 0);
out:
mt792x_mutex_release(dev);
@@ -360,9 +367,9 @@ void mt7921_roc_abort_sync(struct mt792x_dev *dev)
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
- ieee80211_iterate_active_interfaces(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7921_roc_iter, (void *)phy);
+ ieee80211_iterate_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_roc_iter, (void *)phy);
}
EXPORT_SYMBOL_GPL(mt7921_roc_abort_sync);
@@ -531,7 +538,13 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
} else {
if (idx == *wcid_keyidx)
*wcid_keyidx = -1;
- goto out;
+
+ /* For security issue we don't trigger the key deletion when
+ * reassociating. But we should trigger the deletion process
+ * to avoid using incorrect cipher after disconnection,
+ */
+ if (vif->type != NL80211_IFTYPE_STATION || vif->cfg.assoc)
+ goto out;
}
mt76_wcid_key_setup(&dev->mt76, wcid, key);
@@ -858,6 +871,7 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ mt7921_roc_abort_sync(dev);
mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->deflink.wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
@@ -1334,6 +1348,9 @@ static int
mt7921_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+
+ dev->new_ctx = ctx;
return 0;
}
@@ -1341,6 +1358,10 @@ static void
mt7921_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+
+ if (dev->new_ctx == ctx)
+ dev->new_ctx = NULL;
}
static void
@@ -1391,6 +1412,101 @@ static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw,
mt7921_abort_roc(mvif->phy, mvif);
}
+static int mt7921_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ return mt792x_assign_vif_chanctx(hw, vifs->vif, vifs->link_conf,
+ vifs->new_ctx);
+}
+
+void mt7921_csa_work(struct work_struct *work)
+{
+ struct mt792x_vif *mvif;
+ struct mt792x_dev *dev;
+ struct ieee80211_vif *vif;
+ int ret;
+
+ mvif = (struct mt792x_vif *)container_of(work, struct mt792x_vif,
+ csa_work);
+ dev = mvif->phy->dev;
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ mt792x_mutex_acquire(dev);
+ ret = mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->bss_conf.mt76,
+ dev->new_ctx);
+ mt792x_mutex_release(dev);
+
+ ieee80211_chswitch_done(vif, !ret, 0);
+}
+
+static int mt7921_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
+ return -EOPNOTSUPP;
+
+ /* Avoid beacon loss due to the CAC(Channel Availability Check) time
+ * of the AP.
+ */
+ if (!cfg80211_chandef_usable(hw->wiphy, &chsw->chandef,
+ IEEE80211_CHAN_RADAR))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void mt7921_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ u16 beacon_interval = vif->bss_conf.beacon_int;
+
+ mvif->csa_timer.expires = TU_TO_EXP_TIME(beacon_interval * chsw->count);
+ add_timer(&mvif->csa_timer);
+}
+
+static void mt7921_abort_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+
+ del_timer_sync(&mvif->csa_timer);
+ cancel_work_sync(&mvif->csa_work);
+}
+
+static void mt7921_channel_switch_rx_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ u16 beacon_interval = vif->bss_conf.beacon_int;
+
+ if (cfg80211_chandef_identical(&chsw->chandef,
+ &dev->new_ctx->def) &&
+ chsw->count) {
+ mod_timer(&mvif->csa_timer,
+ TU_TO_EXP_TIME(beacon_interval * chsw->count));
+ }
+}
+
+static void mt7921_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ int ret = 0;
+
+ mt792x_mutex_acquire(phy->dev);
+ ret = mt7921_mcu_wf_rf_pin_ctrl(phy, WF_RF_PIN_POLL);
+ mt792x_mutex_release(phy->dev);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, ret ? false : true);
+}
+
const struct ieee80211_ops mt7921_ops = {
.tx = mt792x_tx,
.start = mt7921_start,
@@ -1441,6 +1557,7 @@ const struct ieee80211_ops mt7921_ops = {
#endif /* CONFIG_PM */
.flush = mt792x_flush,
.set_sar_specs = mt7921_set_sar_specs,
+ .rfkill_poll = mt7921_rfkill_poll,
.remain_on_channel = mt7921_remain_on_channel,
.cancel_remain_on_channel = mt7921_cancel_remain_on_channel,
.add_chanctx = mt7921_add_chanctx,
@@ -1450,6 +1567,11 @@ const struct ieee80211_ops mt7921_ops = {
.unassign_vif_chanctx = mt792x_unassign_vif_chanctx,
.mgd_prepare_tx = mt7921_mgd_prepare_tx,
.mgd_complete_tx = mt7921_mgd_complete_tx,
+ .switch_vif_chanctx = mt7921_switch_vif_chanctx,
+ .pre_channel_switch = mt7921_pre_channel_switch,
+ .channel_switch = mt7921_channel_switch,
+ .abort_channel_switch = mt7921_abort_channel_switch,
+ .channel_switch_rx_beacon = mt7921_channel_switch_rx_beacon,
};
EXPORT_SYMBOL_GPL(mt7921_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 02c1de8620a7..86bd33b916a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -61,6 +61,12 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
skb_pull(skb, sizeof(*rxd));
event = (struct mt76_connac_mcu_reg_event *)skb->data;
ret = (int)le32_to_cpu(event->val);
+ } else if (cmd == MCU_EXT_CMD(WF_RF_PIN_CTRL)) {
+ struct mt7921_wf_rf_pin_ctrl_event *event;
+
+ skb_pull(skb, sizeof(*rxd));
+ event = (struct mt7921_wf_rf_pin_ctrl_event *)skb->data;
+ ret = (int)event->result;
} else {
skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
}
@@ -174,7 +180,7 @@ static void
mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_connac_beacon_loss_event *event = priv;
if (mvif->idx != event->bss_idx)
@@ -507,7 +513,10 @@ static void mt7921_mcu_parse_tx_resource(struct mt76_dev *dev,
tx_res = (struct mt7921_tx_resource *)skb->data;
sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota);
- sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota);
+ sdio->pse_mcu_quota_max = le32_to_cpu(tx_res->pse_mcu_quota);
+ /* The mcu quota usage of this function itself must be taken into consideration */
+ sdio->sched.pse_mcu_quota =
+ sdio->sched.pse_mcu_quota ? sdio->pse_mcu_quota_max : sdio->pse_mcu_quota_max - 1;
sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota);
sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size);
sdio->sched.deficit = tx_res->pp_padding;
@@ -1122,7 +1131,7 @@ int mt7921_get_txpwr_info(struct mt792x_dev *dev, struct mt7921_txpwr *txpwr)
int mt7921_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct {
u8 band_idx;
@@ -1424,6 +1433,21 @@ int mt7921_mcu_get_temperature(struct mt792x_phy *phy)
sizeof(req), true);
}
+int mt7921_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy, u8 action)
+{
+ struct mt792x_dev *dev = phy->dev;
+ struct {
+ u8 action;
+ u8 value;
+ } req = {
+ .action = action,
+ .value = 0,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WF_RF_PIN_CTRL), &req,
+ sizeof(req), action ? true : false);
+}
+
int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index f9a259ee6b82..2834c6c53e58 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -74,6 +74,11 @@ struct mt7921_txpwr_event {
struct mt7921_txpwr txpwr;
} __packed;
+struct mt7921_wf_rf_pin_ctrl_event {
+ u8 result;
+ u8 value;
+} __packed;
+
enum {
TM_SWITCH_MODE,
TM_SET_AT_CMD,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 16c89815c0b8..c88793fcec64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -31,6 +31,9 @@
#define EXT_CMD_RADIO_ON_LED 0x2
#define EXT_CMD_RADIO_OFF_LED 0x3
+#define WF_RF_PIN_INIT 0x0
+#define WF_RF_PIN_POLL 0x1
+
enum {
UNI_ROC_ACQUIRE,
UNI_ROC_ABORT,
@@ -202,6 +205,7 @@ void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb);
int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map);
int mt7921_mcu_radio_led_ctrl(struct mt792x_dev *dev, u8 value);
+int mt7921_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy, u8 action);
static inline u32
mt7921_reg_map_l1(struct mt792x_dev *dev, u32 addr)
@@ -273,6 +277,7 @@ int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev,
bool enable);
void mt7921_scan_work(struct work_struct *work);
void mt7921_roc_work(struct work_struct *work);
+void mt7921_csa_work(struct work_struct *work);
int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif);
void mt7921_coredump_work(struct work_struct *work);
int mt7921_get_txpwr_info(struct mt792x_dev *dev, struct mt7921_txpwr *txpwr);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 67723c22aea6..a0c9df3c2cc7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -42,6 +42,10 @@ static void mt7921e_unregister_device(struct mt792x_dev *dev)
{
int i;
struct mt76_connac_pm *pm = &dev->pm;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ if (dev->phy.chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN)
+ wiphy_rfkill_stop_polling(hw->wiphy);
cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
@@ -435,7 +439,7 @@ static int mt7921_pci_suspend(struct device *device)
if (err < 0)
goto restore_suspend;
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true, true);
if (err)
goto restore_suspend;
@@ -481,7 +485,7 @@ restore_napi:
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
- mt76_connac_mcu_set_hif_suspend(mdev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false, true);
restore_suspend:
pm->suspended = false;
@@ -519,12 +523,15 @@ static int mt7921_pci_resume(struct device *device)
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
@@ -532,7 +539,7 @@ static int mt7921_pci_resume(struct device *device)
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
if (err < 0)
goto failed;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index 2452b1a2d118..881812ba03ff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -81,9 +81,12 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
mt792x_wpdma_reset(dev, true);
- local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
+ }
+
+ local_bh_disable();
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_schedule(&dev->mt76.napi[i]);
}
local_bh_enable();
@@ -115,8 +118,8 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
err = __mt7921_start(&dev->phy);
out:
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 95f526f7bb99..45b9f35aab17 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -240,7 +240,7 @@ static int mt7921s_suspend(struct device *__dev)
mt76s_txqs_empty(&dev->mt76), 5 * HZ);
/* It is supposed that SDIO bus is idle at the point */
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true, true);
if (err)
goto restore_worker;
@@ -258,7 +258,7 @@ static int mt7921s_suspend(struct device *__dev)
restore_txrx_worker:
mt76_worker_enable(&mdev->sdio.net_worker);
mt76_worker_enable(&mdev->sdio.txrx_worker);
- mt76_connac_mcu_set_hif_suspend(mdev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false, true);
restore_worker:
mt76_worker_enable(&mdev->tx_worker);
@@ -302,7 +302,7 @@ static int mt7921s_resume(struct device *__dev)
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(mdev, false);
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
failed:
pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 8aa4f0203208..fe9751851ff7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = {
/* Netgear, Inc. [A8000,AXE3000] */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ /* TP-Link TXE50UH */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ },
};
@@ -257,7 +260,7 @@ static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state)
pm->suspended = true;
flush_work(&dev->reset_work);
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
if (err)
goto failed;
@@ -307,7 +310,7 @@ static int mt7921u_resume(struct usb_interface *intf)
if (err < 0)
goto failed;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
failed:
pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index 039949b344b9..f41ca4248497 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -57,6 +57,22 @@ static int mt7925_thermal_init(struct mt792x_phy *phy)
mt7925_hwmon_groups);
return PTR_ERR_OR_ZERO(hwmon);
}
+
+void mt7925_regd_update(struct mt792x_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct ieee80211_hw *hw = mdev->hw;
+
+ if (!dev->regd_change)
+ return;
+
+ mt7925_mcu_set_clc(dev, mdev->alpha2, dev->country_ie_env);
+ mt7925_mcu_set_channel_domain(hw->priv);
+ mt7925_set_tx_sar_pwr(hw, NULL);
+ dev->regd_change = false;
+}
+EXPORT_SYMBOL_GPL(mt7925_regd_update);
+
static void
mt7925_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
@@ -64,6 +80,7 @@ mt7925_regd_notifier(struct wiphy *wiphy,
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_connac_pm *pm = &dev->pm;
/* allow world regdom at the first boot only */
if (!memcmp(req->alpha2, "00", 2) &&
@@ -78,12 +95,17 @@ mt7925_regd_notifier(struct wiphy *wiphy,
memcpy(mdev->alpha2, req->alpha2, 2);
mdev->region = req->dfs_region;
dev->country_ie_env = req->country_ie_env;
+ dev->regd_change = true;
+ if (pm->suspended)
+ return;
+
+ dev->regd_in_progress = true;
mt792x_mutex_acquire(dev);
- mt7925_mcu_set_clc(dev, req->alpha2, req->country_ie_env);
- mt7925_mcu_set_channel_domain(hw->priv);
- mt7925_set_tx_sar_pwr(hw, NULL);
+ mt7925_regd_update(dev);
mt792x_mutex_release(dev);
+ dev->regd_in_progress = false;
+ wake_up(&dev->wait);
}
static void mt7925_mac_init_basic_rates(struct mt792x_dev *dev)
@@ -178,6 +200,7 @@ static void mt7925_init_work(struct work_struct *work)
mt76_set_stream_caps(&dev->mphy, true);
mt7925_set_stream_he_eht_caps(&dev->phy);
+ mt792x_config_mac_addr_list(dev);
ret = mt7925_init_mlo_caps(&dev->phy);
if (ret) {
@@ -225,6 +248,7 @@ int mt7925_register_device(struct mt792x_dev *dev)
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
+ init_waitqueue_head(&dev->wait);
spin_lock_init(&dev->pm.txq_lock);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt792x_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7925_scan_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index 634c42bbf23f..c871d2f9688b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -49,7 +49,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
break;
mlink = list_first_entry(&sta_poll_list,
struct mt792x_link_sta, wcid.poll_list);
- msta = container_of(mlink, struct mt792x_sta, deflink);
+ msta = mlink->sta;
spin_lock_bh(&dev->mt76.sta_poll_lock);
list_del_init(&mlink->wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
@@ -395,11 +395,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
if (status->wcid) {
mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
}
mt792x_get_status_freq_info(status, chfreq);
@@ -734,7 +730,7 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- struct mt76_vif *mvif;
+ struct mt76_vif_link *mvif;
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED));
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -743,7 +739,7 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv,
wcid->link_id) : NULL;
- mvif = mconf ? (struct mt76_vif *)&mconf->mt76 : NULL;
+ mvif = mconf ? (struct mt76_vif_link *)&mconf->mt76 : NULL;
if (mvif) {
omac_idx = mvif->omac_idx;
@@ -1054,10 +1050,7 @@ void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
if (!wcid->sta)
goto out;
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
out:
rcu_read_unlock();
@@ -1135,11 +1128,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
continue;
mlink = container_of(wcid, struct mt792x_link_sta, wcid);
- spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&mlink->wcid.poll_list))
- list_add_tail(&mlink->wcid.poll_list,
- &mdev->sta_poll_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
continue;
}
@@ -1271,6 +1260,7 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
struct mt792x_dev *dev = mvif->phy->dev;
struct ieee80211_hw *hw = mt76_hw(dev);
struct ieee80211_bss_conf *bss_conf;
+ struct mt792x_bss_conf *mconf;
int i;
if (vif->type == NL80211_IFTYPE_STATION)
@@ -1278,8 +1268,9 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mconf = mt792x_vif_to_link(mvif, i);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf,
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf, &mconf->mt76,
&mvif->sta.deflink.wcid, true);
mt7925_mcu_set_tx(dev, bss_conf);
}
@@ -1309,6 +1300,7 @@ void mt7925_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&dev->mphy.mac_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ dev->sar_inited = false;
for (i = 0; i < 10; i++) {
mutex_lock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 791c8b00e112..98daf80ac131 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -130,7 +130,7 @@ mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss, band);
} else {
he_cap_elem->phy_cap_info[9] |=
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
@@ -310,6 +310,7 @@ void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy)
int __mt7925_start(struct mt792x_phy *phy)
{
struct mt76_phy *mphy = phy->mt76;
+ struct mt792x_dev *dev = phy->dev;
int err;
err = mt7925_mcu_set_channel_domain(mphy);
@@ -320,9 +321,12 @@ int __mt7925_start(struct mt792x_phy *phy)
if (err)
return err;
- err = mt7925_set_tx_sar_pwr(mphy->hw, NULL);
- if (err)
- return err;
+ if (!dev->sar_inited) {
+ err = mt7925_set_tx_sar_pwr(mphy->hw, NULL);
+ if (err)
+ return err;
+ dev->sar_inited = true;
+ }
mt792x_mac_reset_counters(phy);
set_bit(MT76_STATE_RUNNING, &mphy->state);
@@ -365,29 +369,22 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ?
0 : mconf->mt76.idx;
mconf->mt76.band_idx = 0xff;
- mconf->mt76.wmm_idx = mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ mconf->mt76.wmm_idx = ieee80211_vif_is_mld(vif) ?
+ 0 : mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
else
mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL;
- ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf,
- &mlink->wcid, true);
- if (ret)
- goto out;
-
dev->mt76.vif_mask |= BIT_ULL(mconf->mt76.idx);
mvif->phy->omac_mask |= BIT_ULL(mconf->mt76.omac_idx);
idx = MT792x_WTBL_RESERVED - mconf->mt76.idx;
- INIT_LIST_HEAD(&mlink->wcid.poll_list);
mlink->wcid.idx = idx;
- mlink->wcid.phy_idx = mconf->mt76.band_idx;
- mlink->wcid.hw_key_idx = -1;
mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mlink->wcid);
+ mt76_wcid_init(&mlink->wcid, 0);
mt7925_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -395,6 +392,12 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
ewma_rssi_init(&mconf->rssi);
rcu_assign_pointer(dev->mt76.wcid[idx], &mlink->wcid);
+
+ ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76,
+ &mlink->wcid, true);
+ if (ret)
+ goto out;
+
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = idx;
@@ -801,12 +804,12 @@ static u8
mt7925_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bool beacon, bool mcast)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_phy *mphy = hw->priv;
u16 rate;
u8 i, idx, ht;
- rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon, mcast);
+ rate = mt76_connac2_mac_tx_rate_val(mphy, &vif->bss_conf, beacon, mcast);
ht = FIELD_GET(MT_TX_RATE_MODE, rate) > MT_PHY_TYPE_OFDM;
if (beacon && ht) {
@@ -837,6 +840,7 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
u8 link_id = link_sta->link_id;
struct mt792x_link_sta *mlink;
struct mt792x_sta *msta;
+ struct mt76_wcid *wcid;
int ret, idx;
msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
@@ -847,14 +851,22 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
return -ENOSPC;
mconf = mt792x_vif_to_link(mvif, link_id);
- INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ mt76_wcid_init(&mlink->wcid, 0);
mlink->wcid.sta = 1;
mlink->wcid.idx = idx;
- mlink->wcid.phy_idx = mconf->mt76.band_idx;
mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
mlink->last_txs = jiffies;
mlink->wcid.link_id = link_sta->link_id;
mlink->wcid.link_valid = !!link_sta->sta->valid_links;
+ mlink->sta = msta;
+
+ wcid = &mlink->wcid;
+ ewma_signal_init(&wcid->rssi);
+ rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
+ mt76_wcid_init(wcid, 0);
+ ewma_avg_signal_init(&mlink->avg_ack_signal);
+ memset(mlink->airtime_ac, 0,
+ sizeof(msta->deflink.airtime_ac));
ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
if (ret)
@@ -866,9 +878,14 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
link_conf = mt792x_vif_to_bss_conf(vif, link_id);
/* should update bss info before STA add */
- if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls)
- mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
- link_conf, link_sta, false);
+ if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) {
+ if (ieee80211_vif_is_mld(vif))
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+ link_conf, link_sta, link_sta != mlink->pri_link);
+ else
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+ link_conf, link_sta, false);
+ }
if (ieee80211_vif_is_mld(vif) &&
link_sta == mlink->pri_link) {
@@ -904,7 +921,6 @@ mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, unsigned long new_links)
{
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct mt76_wcid *wcid;
unsigned int link_id;
int err = 0;
@@ -921,14 +937,6 @@ mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
err = -ENOMEM;
break;
}
-
- wcid = &mlink->wcid;
- ewma_signal_init(&wcid->rssi);
- rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
- mt76_wcid_init(wcid);
- ewma_avg_signal_init(&mlink->avg_ack_signal);
- memset(mlink->airtime_ac, 0,
- sizeof(msta->deflink.airtime_ac));
}
msta->valid_links |= BIT(link_id);
@@ -1141,8 +1149,7 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
struct mt792x_bss_conf *mconf;
mconf = mt792x_link_conf_to_mconf(link_conf);
- mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
- link_sta, false);
+ mt792x_mac_link_bss_remove(dev, mconf, mlink);
}
spin_lock_bh(&mdev->sta_poll_lock);
@@ -1185,7 +1192,6 @@ mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
if (link_sta != mlink->pri_link) {
mt76_wcid_cleanup(mdev, wcid);
mt76_wcid_mask_clear(mdev->wcid_mask, wcid->idx);
- mt76_wcid_mask_clear(mdev->wcid_phy_mask, wcid->idx);
}
if (msta->deflink_id == link_id)
@@ -1200,12 +1206,45 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct {
+ struct {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 pad;
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 link_idx; /* hw link idx */
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } dev_req = {
+ .hdr = {
+ .omac_idx = 0,
+ .band_idx = 0,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = true,
+ },
+ };
unsigned long rem;
rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+ if (ieee80211_vif_is_mld(vif)) {
+ mt7925_mcu_set_dbdc(&dev->mphy, false);
+
+ /* recovery omac address for the legacy interface */
+ memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+ mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
+ &dev_req, sizeof(dev_req), true);
+ }
+
if (vif->type == NL80211_IFTYPE_STATION) {
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
@@ -1250,22 +1289,22 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
params->buf_size);
- mt7925_mcu_uni_rx_ba(dev, params, true);
+ mt7925_mcu_uni_rx_ba(dev, vif, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
- mt7925_mcu_uni_rx_ba(dev, params, false);
+ mt7925_mcu_uni_rx_ba(dev, vif, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7925_mcu_uni_tx_ba(dev, params, true);
+ mt7925_mcu_uni_tx_ba(dev, vif, params, true);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->deflink.wcid.ampdu_state);
- mt7925_mcu_uni_tx_ba(dev, params, false);
+ mt7925_mcu_uni_tx_ba(dev, vif, params, false);
break;
case IEEE80211_AMPDU_TX_START:
set_bit(tid, &msta->deflink.wcid.ampdu_state);
@@ -1274,7 +1313,7 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->deflink.wcid.ampdu_state);
- mt7925_mcu_uni_tx_ba(dev, params, false);
+ mt7925_mcu_uni_tx_ba(dev, vif, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
@@ -1895,6 +1934,13 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
mt7925_mcu_set_tx(dev, info);
+ if (changed & BSS_CHANGED_BSSID) {
+ if (ieee80211_vif_is_mld(vif) &&
+ hweight16(mvif->valid_links) == 2)
+ /* Indicate the secondary setup done */
+ mt7925_mcu_uni_bss_bcnft(dev, info, true);
+ }
+
mt792x_mutex_release(dev);
}
@@ -1946,6 +1992,8 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
GFP_KERNEL);
mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink),
GFP_KERNEL);
+ if (!mconf || !mlink)
+ return -ENOMEM;
}
mconfs[link_id] = mconf;
@@ -1974,6 +2022,8 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto free;
if (mconf != &mvif->bss_conf) {
+ mt7925_mcu_set_bss_pm(dev, link_conf, true);
+
err = mt7925_set_mlo_roc(phy, &mvif->bss_conf,
vif->active_links);
if (err < 0)
@@ -2071,18 +2121,16 @@ static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
- struct ieee80211_bss_conf *pri_link_conf;
struct mt792x_bss_conf *mconf;
mutex_lock(&dev->mt76.mutex);
if (ieee80211_vif_is_mld(vif)) {
mconf = mt792x_vif_to_link(mvif, link_conf->link_id);
- pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
if (vif->type == NL80211_IFTYPE_STATION &&
mconf == &mvif->bss_conf)
- mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf,
+ mt7925_mcu_add_bss_info(&dev->phy, NULL, link_conf,
NULL, false);
} else {
mconf = &mvif->bss_conf;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 748ea6adbc6b..15815ad84713 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -39,7 +39,6 @@ int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd,
} else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
- cmd == MCU_UNI_CMD(HIF_CTRL) ||
cmd == MCU_UNI_CMD(OFFLOAD) ||
cmd == MCU_UNI_CMD(SUSPEND)) {
struct mt7925_mcu_uni_event *event;
@@ -123,10 +122,8 @@ EXPORT_SYMBOL_GPL(mt7925_mcu_regval);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
struct ieee80211_bss_conf *link_conf)
{
- struct ieee80211_vif *mvif = container_of((void *)link_conf->vif,
- struct ieee80211_vif,
- drv_priv);
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct ieee80211_vif *mvif = link_conf->vif;
struct sk_buff *skb;
int i, len = min_t(int, mvif->cfg.arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
@@ -166,7 +163,7 @@ static int
mt7925_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
bool suspend, struct cfg80211_wowlan *wowlan)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct {
struct {
@@ -221,7 +218,7 @@ mt7925_mcu_set_wow_pattern(struct mt76_dev *dev,
u8 index, bool enable,
struct cfg80211_pkt_pattern *pattern)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt7925_wow_pattern_tlv *tlv;
struct sk_buff *skb;
struct {
@@ -276,7 +273,7 @@ static void
mt7925_mcu_connection_loss_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt7925_uni_beacon_loss_event *event = priv;
if (mvif->idx != event->hdr.bss_idx)
@@ -306,7 +303,7 @@ mt7925_mcu_connection_loss_event(struct mt792x_dev *dev, struct sk_buff *skb)
static void
mt7925_mcu_roc_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct mt7925_roc_grant_tlv *grant = priv;
if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION)
@@ -344,6 +341,51 @@ static void mt7925_mcu_roc_handle_grant(struct mt792x_dev *dev,
}
static void
+mt7925_mcu_handle_hif_ctrl_basic(struct mt792x_dev *dev, struct tlv *tlv)
+{
+ struct mt7925_mcu_hif_ctrl_basic_tlv *basic;
+
+ basic = (struct mt7925_mcu_hif_ctrl_basic_tlv *)tlv;
+
+ if (basic->hifsuspend) {
+ if (basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE &&
+ basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE)
+ /* success */
+ dev->hif_idle = true;
+ else
+ /* busy */
+ /* invalid */
+ dev->hif_idle = false;
+ } else {
+ dev->hif_resumed = true;
+ }
+ wake_up(&dev->wait);
+}
+
+static void
+mt7925_mcu_uni_hif_ctrl_event(struct mt792x_dev *dev, struct sk_buff *skb)
+{
+ struct tlv *tlv;
+ u32 tlv_len;
+
+ skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4);
+ tlv = (struct tlv *)skb->data;
+ tlv_len = skb->len;
+
+ while (tlv_len > 0 && le16_to_cpu(tlv->len) <= tlv_len) {
+ switch (le16_to_cpu(tlv->tag)) {
+ case UNI_EVENT_HIF_CTRL_BASIC:
+ mt7925_mcu_handle_hif_ctrl_basic(dev, tlv);
+ break;
+ default:
+ break;
+ }
+ tlv_len -= le16_to_cpu(tlv->len);
+ tlv = (struct tlv *)((char *)(tlv) + le16_to_cpu(tlv->len));
+ }
+}
+
+static void
mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
{
struct tlv *tlv;
@@ -388,7 +430,7 @@ mt7925_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb)
struct mt7925_mcu_txs_event {
u8 ver;
u8 rsv[3];
- u8 data[0];
+ u8 data[];
} __packed * txs;
struct tlv *tlv;
u32 tlv_len;
@@ -489,6 +531,9 @@ mt7925_mcu_uni_rx_unsolicited_event(struct mt792x_dev *dev,
rxd = (struct mt7925_mcu_rxd *)skb->data;
switch (rxd->eid) {
+ case MCU_UNI_EVENT_HIF_CTRL:
+ mt7925_mcu_uni_hif_ctrl_event(dev, skb);
+ break;
case MCU_UNI_EVENT_FW_LOG_2_HOST:
mt7925_mcu_uni_debug_msg_event(dev, skb);
break;
@@ -530,11 +575,11 @@ void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
}
static int
-mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
+ struct mt76_wcid *wcid,
struct ieee80211_ampdu_params *params,
bool enable, bool tx)
{
- struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct sta_rec_ba_uni *ba;
struct sk_buff *skb;
struct tlv *tlv;
@@ -562,28 +607,60 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
/** starec & wtbl **/
int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
+ struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable)
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- struct mt792x_vif *mvif = msta->vif;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_bss_conf *mconf;
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ struct mt76_wcid *wcid;
+ u8 link_id, ret;
- if (enable && !params->amsdu)
- msta->deflink.wcid.amsdu = false;
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ mlink = mt792x_sta_to_link(msta, link_id);
+ wcid = &mlink->wcid;
+
+ if (enable && !params->amsdu)
+ mlink->wcid.amsdu = false;
+
+ ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
+ enable, true);
+ if (ret < 0)
+ break;
+ }
- return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
- enable, true);
+ return ret;
}
int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
+ struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable)
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- struct mt792x_vif *mvif = msta->vif;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_bss_conf *mconf;
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ struct mt76_wcid *wcid;
+ u8 link_id, ret;
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ mlink = mt792x_sta_to_link(msta, link_id);
+ wcid = &mlink->wcid;
+
+ ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
+ enable, false);
+ if (ret < 0)
+ break;
+ }
- return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
- enable, false);
+ return ret;
}
static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
@@ -638,7 +715,7 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
clc = (const struct mt7925_clc *)(clc_base + offset);
- if (clc->idx > ARRAY_SIZE(phy->clc))
+ if (clc->idx >= ARRAY_SIZE(phy->clc))
break;
/* do not init buf again if chip reset triggered */
@@ -823,7 +900,7 @@ mt7925_mcu_get_nic_capability(struct mt792x_dev *dev)
mt7925_mcu_parse_phy_cap(dev, tlv->data);
break;
case MT_NIC_CAP_CHIP_CAP:
- memcpy(&dev->phy.chip_cap, (void *)skb->data, sizeof(u64));
+ dev->phy.chip_cap = le64_to_cpu(*(__le64 *)tlv->data);
break;
case MT_NIC_CAP_EML_CAP:
mt7925_mcu_parse_eml_cap(dev, tlv->data);
@@ -1153,7 +1230,12 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
u8 rsv[4];
} __packed hdr;
struct roc_acquire_tlv roc[2];
- } __packed req;
+ } __packed req = {
+ .roc[0].tag = cpu_to_le16(UNI_ROC_NUM),
+ .roc[0].len = cpu_to_le16(sizeof(struct roc_acquire_tlv)),
+ .roc[1].tag = cpu_to_le16(UNI_ROC_NUM),
+ .roc[1].len = cpu_to_le16(sizeof(struct roc_acquire_tlv))
+ };
if (!mconf || hweight16(vif->valid_links) < 2 ||
hweight16(sel_links) != 2)
@@ -1200,6 +1282,8 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
req.roc[i].bw_from_ap = CMD_CBW_20MHZ;
req.roc[i].center_chan = center_ch;
req.roc[i].center_chan_from_ap = center_ch;
+ req.roc[i].center_chan2 = 0;
+ req.roc[i].center_chan2_from_ap = 0;
/* STR : 0xfe indicates BAND_ALL with enabling DBDC
* EMLSR : 0xff indicates (BAND_AUTO) without DBDC
@@ -1215,7 +1299,7 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
}
return mt76_mcu_send_msg(&mvif->phy->dev->mt76, MCU_UNI_CMD(ROC),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
@@ -1264,7 +1348,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
}
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
@@ -1294,7 +1378,7 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
@@ -1357,7 +1441,7 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
&ps_req, sizeof(ps_req), true);
}
-static int
+int
mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
struct ieee80211_bss_conf *link_conf, bool enable)
{
@@ -1447,12 +1531,12 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev,
int err;
err = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
- &req1, sizeof(req1), false);
+ &req1, sizeof(req1), true);
if (err < 0 || !enable)
return err;
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
static void
@@ -1770,7 +1854,7 @@ static int
mt7925_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info)
{
- struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *skb;
int conn_state;
@@ -1783,7 +1867,7 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
conn_state = info->enable ? CONN_STATE_PORT_SECURE :
CONN_STATE_DISCONNECT;
if (info->link_sta)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
info->link_sta,
conn_state, info->newly);
if (info->link_sta && info->enable) {
@@ -1837,7 +1921,7 @@ mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy,
return PTR_ERR(skb);
if (info->enable)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
info->link_sta,
info->enable, info->newly);
@@ -1883,6 +1967,7 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev,
struct mt76_sta_cmd_info info = {
.link_sta = link_sta,
.vif = vif,
+ .link_conf = &vif->bss_conf,
.enable = enable,
.cmd = MCU_UNI_CMD(STA_REC_UPDATE),
.state = state,
@@ -1898,7 +1983,11 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev,
mlink = mt792x_sta_to_link(msta, link_sta->link_id);
}
info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid;
- info.newly = link_sta ? state != MT76_STA_INFO_STATE_ASSOC : true;
+
+ if (link_sta)
+ info.newly = state != MT76_STA_INFO_STATE_ASSOC;
+ else
+ info.newly = state == MT76_STA_INFO_STATE_ASSOC ? false : true;
if (ieee80211_vif_is_mld(vif))
err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info);
@@ -1914,32 +2003,21 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
{
#define MT7925_FIF_BIT_CLR BIT(1)
#define MT7925_FIF_BIT_SET BIT(0)
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- unsigned long valid = ieee80211_vif_is_mld(vif) ?
- mvif->valid_links : BIT(0);
- struct ieee80211_bss_conf *bss_conf;
int err = 0;
- int i;
if (enable) {
- for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
- bss_conf = mt792x_vif_to_bss_conf(vif, i);
- err = mt7925_mcu_uni_bss_bcnft(dev, bss_conf, true);
- if (err < 0)
- return err;
- }
+ err = mt7925_mcu_uni_bss_bcnft(dev, &vif->bss_conf, true);
+ if (err < 0)
+ return err;
return mt7925_mcu_set_rxfilter(dev, 0,
MT7925_FIF_BIT_SET,
MT_WF_RFCR_DROP_OTHER_BEACON);
}
- for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
- bss_conf = mt792x_vif_to_bss_conf(vif, i);
- err = mt7925_mcu_set_bss_pm(dev, bss_conf, false);
- if (err)
- return err;
- }
+ err = mt7925_mcu_set_bss_pm(dev, &vif->bss_conf, false);
+ if (err < 0)
+ return err;
return mt7925_mcu_set_rxfilter(dev, 0,
MT7925_FIF_BIT_CLR,
@@ -1976,8 +2054,6 @@ int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, struct mt7925_txp
int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
-
struct {
struct {
u8 band_idx;
@@ -1991,7 +2067,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed enable;
} __packed req = {
.hdr = {
- .band_idx = mvif->bss_conf.mt76.band_idx,
+ .band_idx = 0,
},
.enable = {
.tag = cpu_to_le16(UNI_SNIFFER_ENABLE),
@@ -2050,7 +2126,7 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
} __packed tlv;
} __packed req = {
.hdr = {
- .band_idx = vif->bss_conf.mt76.band_idx,
+ .band_idx = 0,
},
.tlv = {
.tag = cpu_to_le16(UNI_SNIFFER_CONFIG),
@@ -2179,11 +2255,27 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
req = (struct bss_rlm_tlv *)tlv;
req->control_channel = chandef->chan->hw_value;
req->center_chan = ieee80211_frequency_to_channel(freq1);
- req->center_chan2 = ieee80211_frequency_to_channel(freq2);
+ req->center_chan2 = 0;
req->tx_streams = hweight8(phy->antenna_mask);
req->ht_op_info = 4; /* set HT 40M allowed */
req->rx_streams = hweight8(phy->antenna_mask);
- req->band = band;
+ req->center_chan2 = 0;
+ req->sco = 0;
+ req->band = 1;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ req->band = 1;
+ break;
+ case NL80211_BAND_5GHZ:
+ req->band = 2;
+ break;
+ case NL80211_BAND_6GHZ:
+ req->band = 3;
+ break;
+ default:
+ break;
+ }
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
@@ -2194,6 +2286,7 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
break;
case NL80211_CHAN_WIDTH_80P80:
req->bw = CMD_CBW_8080MHZ;
+ req->center_chan2 = ieee80211_frequency_to_channel(freq2);
break;
case NL80211_CHAN_WIDTH_160:
req->bw = CMD_CBW_160MHZ;
@@ -2219,7 +2312,7 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
}
static struct sk_buff *
-__mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
+__mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int len)
{
struct bss_req_hdr hdr = {
.bss_idx = mvif->idx,
@@ -2235,7 +2328,7 @@ __mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
return skb;
}
-int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
@@ -2388,7 +2481,7 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb,
struct ieee80211_bss_conf *link_conf)
{
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
- struct mt76_vif *mvif = &mconf->mt76;
+ struct mt76_vif_link *mvif = &mconf->mt76;
struct bss_sec_tlv {
__le16 tag;
__le16 len;
@@ -2439,7 +2532,7 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
&link_conf->chanreq.oper;
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
enum nl80211_band band = chandef->chan->band;
- struct mt76_vif *mvif = &mconf->mt76;
+ struct mt76_vif_link *mvif = &mconf->mt76;
struct bss_rate_tlv *bmc;
struct tlv *tlv;
u8 idx = mvif->mcast_rates_idx ?
@@ -2463,6 +2556,7 @@ static void
mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
struct ieee80211_bss_conf *link_conf)
{
+ struct ieee80211_vif *vif = link_conf->vif;
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
struct bss_mld_tlv *mld;
@@ -2483,7 +2577,7 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
IEEE80211_EML_CAP_EMLSR_SUPP);
- memcpy(mld->mac_addr, link_conf->addr, ETH_ALEN);
+ memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
}
static void
@@ -2614,7 +2708,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
}
-int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
+int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable)
{
struct mt76_dev *mdev = phy->dev;
@@ -2634,7 +2728,7 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
tlv = mt76_connac_mcu_add_tlv(skb, UNI_MBMC_SETTING, sizeof(*conf));
conf = (struct mbmc_conf_tlv *)tlv;
- conf->mbmc_en = 1;
+ conf->mbmc_en = enable;
conf->band = 0; /* unused */
err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS),
@@ -2643,14 +2737,12 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
return err;
}
-#define MT76_CONNAC_SCAN_CHANNEL_TIME 60
-
int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct cfg80211_scan_request *sreq = &scan_req->req;
- int n_ssids = 0, err, i, duration;
+ int n_ssids = 0, err, i;
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_dev *mdev = phy->dev;
struct mt76_connac_mcu_scan_channel *chan;
@@ -2686,14 +2778,6 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req->scan_type = sreq->n_ssids ? 1 : 0;
req->probe_req_num = sreq->n_ssids ? 2 : 0;
- duration = MT76_CONNAC_SCAN_CHANNEL_TIME;
- /* increase channel time for passive scan */
- if (!sreq->n_ssids)
- duration *= 2;
- req->timeout_value = cpu_to_le16(sreq->n_channels * duration);
- req->channel_min_dwell_time = cpu_to_le16(duration);
- req->channel_dwell_time = cpu_to_le16(duration);
-
tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SSID, sizeof(*ssid));
ssid = (struct scan_ssid_tlv *)tlv;
for (i = 0; i < sreq->n_ssids; i++) {
@@ -2765,7 +2849,7 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *sreq)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_dev *mdev = phy->dev;
@@ -2901,7 +2985,7 @@ mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
struct ieee80211_vif *vif)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct {
struct scan_hdr {
u8 seq_num;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index ac53bdc99332..1e47d2c61b54 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -616,7 +616,7 @@ mt7925_mcu_get_cipher(int cipher)
}
}
-int mt7925_mcu_set_dbdc(struct mt76_phy *phy);
+int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable);
int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
@@ -637,10 +637,13 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy,
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
-int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
struct ieee80211_bss_conf *link_conf);
+int
+mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf, bool enable);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index f5c02e5f5066..8707b5d04743 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -27,6 +27,26 @@
#define MCU_UNI_EVENT_ROC 0x27
+#define HIF_TRAFFIC_IDLE 0x2
+
+enum {
+ UNI_EVENT_HIF_CTRL_BASIC = 0,
+ UNI_EVENT_HIF_CTRL_TAG_NUM
+};
+
+struct mt7925_mcu_hif_ctrl_basic_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 cid;
+ u8 pad[3];
+ u32 status;
+ u8 hif_type;
+ u8 hif_tx_traffic_status;
+ u8 hif_rx_traffic_status;
+ u8 hifsuspend;
+ u8 rsv[4];
+} __packed;
+
enum {
UNI_ROC_ACQUIRE,
UNI_ROC_ABORT,
@@ -215,6 +235,7 @@ int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd);
int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map);
+void mt7925_regd_update(struct mt792x_dev *dev);
int mt7925_mac_init(struct mt792x_dev *dev);
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -242,9 +263,11 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
bool enable);
int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
+ struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable);
int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
+ struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable);
void mt7925_scan_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 9aec675450f2..c7b5dc1dbb34 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -442,9 +442,10 @@ static int mt7925_pci_suspend(struct device *device)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
- int i, err;
+ int i, err, ret;
pm->suspended = true;
+ dev->hif_resumed = false;
flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -455,14 +456,21 @@ static int mt7925_pci_suspend(struct device *device)
if (err < 0)
goto restore_suspend;
+ wait_event_timeout(dev->wait,
+ !dev->regd_in_progress, 5 * HZ);
+
/* always enable deep sleep during suspend to reduce
* power consumption
*/
mt7925_mcu_set_deep_sleep(dev, true);
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
- if (err)
+ mt76_connac_mcu_set_hif_suspend(mdev, true, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_idle, 3 * HZ);
+ if (!ret) {
+ err = -ETIMEDOUT;
goto restore_suspend;
+ }
napi_disable(&mdev->tx_napi);
mt76_worker_disable(&mdev->tx_worker);
@@ -506,8 +514,11 @@ restore_napi:
if (!pm->ds_enable)
mt7925_mcu_set_deep_sleep(dev, false);
- mt76_connac_mcu_set_hif_suspend(mdev, false);
-
+ mt76_connac_mcu_set_hif_suspend(mdev, false, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_resumed, 3 * HZ);
+ if (!ret)
+ err = -ETIMEDOUT;
restore_suspend:
pm->suspended = false;
@@ -523,8 +534,9 @@ static int mt7925_pci_resume(struct device *device)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
- int i, err;
+ int i, err, ret;
+ dev->hif_idle = false;
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto failed;
@@ -544,21 +556,31 @@ static int mt7925_pci_resume(struct device *device)
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_resumed, 3 * HZ);
+ if (!ret) {
+ err = -ETIMEDOUT;
+ goto failed;
+ }
/* restore previous ds setting */
if (!pm->ds_enable)
mt7925_mcu_set_deep_sleep(dev, false);
+ mt7925_regd_update(dev);
failed:
pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
index faedbf766d1a..4578d16bf456 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
@@ -101,12 +101,15 @@ int mt7925e_mac_reset(struct mt792x_dev *dev)
mt792x_wpdma_reset(dev, true);
- local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
- napi_schedule(&dev->mt76.napi[i]);
}
napi_enable(&dev->mt76.tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_schedule(&dev->mt76.napi[i]);
+ }
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 682db1bab21c..4dfbc1b6cfdd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -243,14 +243,19 @@ static int mt7925u_suspend(struct usb_interface *intf, pm_message_t state)
{
struct mt792x_dev *dev = usb_get_intfdata(intf);
struct mt76_connac_pm *pm = &dev->pm;
- int err;
+ int err, ret;
pm->suspended = true;
+ dev->hif_resumed = false;
flush_work(&dev->reset_work);
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
- if (err)
+ mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_idle, 3 * HZ);
+ if (!ret) {
+ err = -ETIMEDOUT;
goto failed;
+ }
mt76u_stop_rx(&dev->mt76);
mt76u_stop_tx(&dev->mt76);
@@ -271,8 +276,9 @@ static int mt7925u_resume(struct usb_interface *intf)
struct mt792x_dev *dev = usb_get_intfdata(intf);
struct mt76_connac_pm *pm = &dev->pm;
bool reinit = true;
- int err, i;
+ int err, i, ret;
+ dev->hif_idle = false;
for (i = 0; i < 10; i++) {
u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT);
@@ -298,7 +304,11 @@ static int mt7925u_resume(struct usb_interface *intf)
if (err < 0)
goto failed;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, false);
+ ret = wait_event_timeout(dev->wait,
+ dev->hif_resumed, 3 * HZ);
+ if (!ret)
+ err = -ETIMEDOUT;
failed:
pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index ab12616ec2b8..32ed01a96bf7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -28,6 +28,7 @@
#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0)
#define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1)
#define MT792x_CHIP_CAP_MLO_EVT_EN BIT(2)
+#define MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN BIT(3)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
#define MT792x_BASIC_RATES_TBL 11
@@ -116,7 +117,7 @@ struct mt792x_chanctx {
};
struct mt792x_bss_conf {
- struct mt76_vif mt76; /* must be first */
+ struct mt76_vif_link mt76; /* must be first */
struct mt792x_vif *vif;
struct ewma_rssi rssi;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
@@ -133,6 +134,9 @@ struct mt792x_vif {
struct mt792x_phy *phy;
u16 valid_links;
u8 deflink_id;
+
+ struct work_struct csa_work;
+ struct timer_list csa_timer;
};
struct mt792x_phy {
@@ -206,6 +210,8 @@ struct mt792x_dev {
struct mt76_phy mphy;
};
+ struct mac_address macaddr_list[8];
+
const struct mt76_bus_ops *bus_ops;
struct mt792x_phy phy;
@@ -216,6 +222,10 @@ struct mt792x_dev {
bool has_eht:1;
bool regd_in_progress:1;
bool aspm_supported:1;
+ bool hif_idle:1;
+ bool hif_resumed:1;
+ bool sar_inited:1;
+ bool regd_change:1;
wait_queue_head_t wait;
struct work_struct init_work;
@@ -235,12 +245,15 @@ struct mt792x_dev {
enum environment_cap country_ie_env;
u32 backup_l1;
u32 backup_l2;
+
+ struct ieee80211_chanctx_conf *new_ctx;
};
static inline struct mt792x_bss_conf *
mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
{
struct ieee80211_vif *vif;
+ struct mt792x_bss_conf *bss_conf;
vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
@@ -248,8 +261,10 @@ mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
link_id >= IEEE80211_LINK_UNSPECIFIED)
return &mvif->bss_conf;
- return rcu_dereference_protected(mvif->link_conf[link_id],
- lockdep_is_held(&mvif->phy->dev->mt76.mutex));
+ bss_conf = rcu_dereference_protected(mvif->link_conf[link_id],
+ lockdep_is_held(&mvif->phy->dev->mt76.mutex));
+
+ return bss_conf ? bss_conf : &mvif->bss_conf;
}
static inline struct mt792x_link_sta *
@@ -364,6 +379,7 @@ void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u64 timestamp);
void mt792x_tx_worker(struct mt76_worker *w);
void mt792x_roc_timer(struct timer_list *timer);
+void mt792x_csa_timer(struct timer_list *timer);
void mt792x_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw,
@@ -414,6 +430,7 @@ int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev);
void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
struct mt792x_bss_conf *mconf,
struct mt792x_link_sta *mlink);
+void mt792x_config_mac_addr_list(struct mt792x_dev *dev);
static inline char *mt792x_ram_name(struct mt792x_dev *dev)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index 78fe37c2e07b..8799627f6292 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -38,6 +38,10 @@ static const struct ieee80211_iface_limit if_limits_chanctx[] = {
.max = 1,
.types = BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
}
};
@@ -45,7 +49,7 @@ static const struct ieee80211_iface_combination if_comb_chanctx[] = {
{
.limits = if_limits_chanctx,
.n_limits = ARRAY_SIZE(if_limits_chanctx),
- .max_interfaces = 2,
+ .max_interfaces = 3,
.num_different_channels = 2,
.beacon_int_infra_match = false,
}
@@ -147,7 +151,8 @@ void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mlink->wcid, false);
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76,
+ &mlink->wcid, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
@@ -284,6 +289,14 @@ void mt792x_roc_timer(struct timer_list *timer)
}
EXPORT_SYMBOL_GPL(mt792x_roc_timer);
+void mt792x_csa_timer(struct timer_list *timer)
+{
+ struct mt792x_vif *mvif = from_timer(mvif, timer, csa_timer);
+
+ ieee80211_queue_work(mvif->phy->mt76->hw, &mvif->csa_work);
+}
+EXPORT_SYMBOL_GPL(mt792x_csa_timer);
+
void mt792x_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
@@ -325,6 +338,11 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw,
mctx->bss_conf = NULL;
mvif->bss_conf.mt76.ctx = NULL;
mutex_unlock(&dev->mt76.mutex);
+
+ if (vif->bss_conf.csa_active) {
+ del_timer_sync(&mvif->csa_timer);
+ cancel_work_sync(&mvif->csa_work);
+ }
}
EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx);
@@ -614,7 +632,8 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
wiphy->max_remain_on_channel_duration = 5000;
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_scan_ssids = 4;
@@ -646,6 +665,7 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
if (dev->pm.enable)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -911,6 +931,28 @@ int mt792x_load_firmware(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt792x_load_firmware);
+void mt792x_config_mac_addr_list(struct mt792x_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+ u8 *addr = dev->macaddr_list[i].addr;
+
+ memcpy(addr, dev->mphy.macaddr, ETH_ALEN);
+
+ if (!i)
+ continue;
+
+ addr[0] |= BIT(1);
+ addr[0] ^= ((i - 1) << 2);
+ }
+ wiphy->addresses = dev->macaddr_list;
+ wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+}
+EXPORT_SYMBOL_GPL(mt792x_config_mac_addr_list);
+
MODULE_DESCRIPTION("MediaTek MT792x core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
index 106273935b26..05978d9c7b91 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
@@ -153,7 +153,7 @@ struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
return NULL;
link = container_of(wcid, struct mt792x_link_sta, wcid);
- sta = container_of(link, struct mt792x_sta, deflink);
+ sta = link->sta;
if (!sta->vif)
return NULL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 62c03d088925..7b2bb72b407d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -51,12 +51,10 @@ static ssize_t
mt7996_sys_recovery_set(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct mt7996_phy *phy = file->private_data;
- struct mt7996_dev *dev = phy->dev;
- bool band = phy->mt76->band_idx;
- char buf[16];
+ struct mt7996_dev *dev = file->private_data;
+ char buf[16], *sep;
int ret = 0;
- u16 val;
+ u16 band, val;
if (count >= sizeof(buf))
return -EINVAL;
@@ -69,21 +67,26 @@ mt7996_sys_recovery_set(struct file *file, const char __user *user_buf,
else
buf[count] = '\0';
- if (kstrtou16(buf, 0, &val))
+ sep = strchr(buf, ',');
+ if (!sep)
+ return -EINVAL;
+
+ *sep = 0;
+ if (kstrtou16(buf, 0, &band) || kstrtou16(sep + 1, 0, &val))
return -EINVAL;
switch (val) {
/*
- * 0: grab firmware current SER state.
- * 1: trigger & enable system error L1 recovery.
- * 2: trigger & enable system error L2 recovery.
- * 3: trigger & enable system error L3 rx abort.
- * 4: trigger & enable system error L3 tx abort
- * 5: trigger & enable system error L3 tx disable.
- * 6: trigger & enable system error L3 bf recovery.
- * 7: trigger & enable system error L4 mdp recovery.
- * 8: trigger & enable system error full recovery.
- * 9: trigger firmware crash.
+ * <band>,0: grab firmware current SER state.
+ * <band>,1: trigger & enable system error L1 recovery.
+ * <band>,2: trigger & enable system error L2 recovery.
+ * <band>,3: trigger & enable system error L3 rx abort.
+ * <band>,4: trigger & enable system error L3 tx abort
+ * <band>,5: trigger & enable system error L3 tx disable.
+ * <band>,6: trigger & enable system error L3 bf recovery.
+ * <band>,7: trigger & enable system error L4 mdp recovery.
+ * <band>,8: trigger & enable system error full recovery.
+ * <band>,9: trigger firmware crash.
*/
case UNI_CMD_SER_QUERY:
ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_QUERY, 0, band);
@@ -126,8 +129,7 @@ static ssize_t
mt7996_sys_recovery_get(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct mt7996_phy *phy = file->private_data;
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = file->private_data;
char *buff;
int desc = 0;
ssize_t ret;
@@ -141,25 +143,25 @@ mt7996_sys_recovery_get(struct file *file, char __user *user_buf,
desc += scnprintf(buff + desc, bufsz - desc,
"Please echo the correct value ...\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "0: grab firmware transient SER state\n");
+ "<band>,0: grab firmware transient SER state\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "1: trigger system error L1 recovery\n");
+ "<band>,1: trigger system error L1 recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "2: trigger system error L2 recovery\n");
+ "<band>,2: trigger system error L2 recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "3: trigger system error L3 rx abort\n");
+ "<band>,3: trigger system error L3 rx abort\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "4: trigger system error L3 tx abort\n");
+ "<band>,4: trigger system error L3 tx abort\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "5: trigger system error L3 tx disable\n");
+ "<band>,5: trigger system error L3 tx disable\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "6: trigger system error L3 bf recovery\n");
+ "<band>,6: trigger system error L3 bf recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "7: trigger system error L4 mdp recovery\n");
+ "<band>,7: trigger system error L4 mdp recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "8: trigger system error full recovery\n");
+ "<band>,8: trigger system error full recovery\n");
desc += scnprintf(buff + desc, bufsz - desc,
- "9: trigger firmware crash\n");
+ "<band>,9: trigger firmware crash\n");
/* SER statistics */
desc += scnprintf(buff + desc, bufsz - desc,
@@ -524,16 +526,12 @@ mt7996_txbf_stat_read_phy(struct mt7996_phy *phy, struct seq_file *s)
seq_puts(s, "\n");
}
-static int
-mt7996_tx_stats_show(struct seq_file *file, void *data)
+static void
+mt7996_tx_stats_show_phy(struct seq_file *file, struct mt7996_phy *phy)
{
- struct mt7996_phy *phy = file->private;
- struct mt7996_dev *dev = phy->dev;
struct mt76_mib_stats *mib = &phy->mib;
- int i;
u32 attempts, success, per;
-
- mutex_lock(&dev->mt76.mutex);
+ int i;
mt7996_mac_update_stats(phy);
mt7996_ampdu_stat_read_phy(phy, file);
@@ -558,6 +556,23 @@ mt7996_tx_stats_show(struct seq_file *file, void *data)
else
seq_puts(file, "\n");
}
+}
+
+static int
+mt7996_tx_stats_show(struct seq_file *file, void *data)
+{
+ struct mt7996_dev *dev = file->private;
+ struct mt7996_phy *phy = &dev->phy;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7996_tx_stats_show_phy(file, phy);
+ phy = mt7996_phy2(dev);
+ if (phy)
+ mt7996_tx_stats_show_phy(file, phy);
+ phy = mt7996_phy3(dev);
+ if (phy)
+ mt7996_tx_stats_show_phy(file, phy);
mutex_unlock(&dev->mt76.mutex);
@@ -601,7 +616,7 @@ static void
mt7996_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_dev *dev = msta->vif->phy->dev;
+ struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
struct seq_file *s = data;
u8 ac;
@@ -621,15 +636,15 @@ mt7996_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
GENMASK(11, 0));
seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
sta->addr, msta->wcid.idx,
- msta->vif->mt76.wmm_idx, ac, qlen);
+ msta->vif->deflink.mt76.wmm_idx, ac, qlen);
}
}
static int
mt7996_hw_queues_show(struct seq_file *file, void *data)
{
- struct mt7996_phy *phy = file->private;
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = file->private;
+ struct mt7996_phy *phy = &dev->phy;
static const struct hw_queue_map ple_queue_map[] = {
{ "CPU_Q0", 0, 1, MT_CTX0 },
{ "CPU_Q1", 1, 1, MT_CTX0 + 1 },
@@ -685,6 +700,15 @@ mt7996_hw_queues_show(struct seq_file *file, void *data)
/* iterate per-sta ple queue */
ieee80211_iterate_stations_atomic(phy->mt76->hw,
mt7996_sta_hw_queue_read, file);
+ phy = mt7996_phy2(dev);
+ if (phy)
+ ieee80211_iterate_stations_atomic(phy->mt76->hw,
+ mt7996_sta_hw_queue_read, file);
+ phy = mt7996_phy3(dev);
+ if (phy)
+ ieee80211_iterate_stations_atomic(phy->mt76->hw,
+ mt7996_sta_hw_queue_read, file);
+
/* pse queue */
seq_puts(file, "PSE non-empty queue info:\n");
mt7996_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
@@ -698,19 +722,29 @@ DEFINE_SHOW_ATTRIBUTE(mt7996_hw_queues);
static int
mt7996_xmit_queues_show(struct seq_file *file, void *data)
{
- struct mt7996_phy *phy = file->private;
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = file->private;
+ struct mt7996_phy *phy;
struct {
struct mt76_queue *q;
char *queue;
} queue_map[] = {
- { phy->mt76->q_tx[MT_TXQ_BE], " MAIN" },
+ { dev->mphy.q_tx[MT_TXQ_BE], " MAIN0" },
+ { NULL, " MAIN1" },
+ { NULL, " MAIN2" },
{ dev->mt76.q_mcu[MT_MCUQ_WM], " MCUWM" },
{ dev->mt76.q_mcu[MT_MCUQ_WA], " MCUWA" },
{ dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWDL" },
};
int i;
+ phy = mt7996_phy2(dev);
+ if (phy)
+ queue_map[1].q = phy->mt76->q_tx[MT_TXQ_BE];
+
+ phy = mt7996_phy3(dev);
+ if (phy)
+ queue_map[2].q = phy->mt76->q_tx[MT_TXQ_BE];
+
seq_puts(file, " queue | hw-queued | head | tail |\n");
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
struct mt76_queue *q = queue_map[i].q;
@@ -785,20 +819,20 @@ mt7996_rf_regval_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7996_rf_regval_get,
mt7996_rf_regval_set, "0x%08llx\n");
-int mt7996_init_debugfs(struct mt7996_phy *phy)
+int mt7996_init_debugfs(struct mt7996_dev *dev)
{
- struct mt7996_dev *dev = phy->dev;
struct dentry *dir;
- dir = mt76_register_debugfs_fops(phy->mt76, NULL);
+ dir = mt76_register_debugfs_fops(&dev->mphy, NULL);
if (!dir)
return -ENOMEM;
- debugfs_create_file("hw-queues", 0400, dir, phy,
+
+ debugfs_create_file("hw-queues", 0400, dir, dev,
&mt7996_hw_queues_fops);
- debugfs_create_file("xmit-queues", 0400, dir, phy,
+ debugfs_create_file("xmit-queues", 0400, dir, dev,
&mt7996_xmit_queues_fops);
- debugfs_create_file("tx_stats", 0400, dir, phy, &mt7996_tx_stats_fops);
- debugfs_create_file("sys_recovery", 0600, dir, phy,
+ debugfs_create_file("tx_stats", 0400, dir, dev, &mt7996_tx_stats_fops);
+ debugfs_create_file("sys_recovery", 0600, dir, dev,
&mt7996_sys_recovery_ops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
@@ -812,17 +846,13 @@ int mt7996_init_debugfs(struct mt7996_phy *phy)
mt7996_twt_stats);
debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
- if (phy->mt76->cap.has_5ghz) {
- debugfs_create_u32("dfs_hw_pattern", 0400, dir,
- &dev->hw_pattern);
- debugfs_create_file("radar_trigger", 0200, dir, dev,
- &fops_radar_trigger);
- debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
- mt7996_rdd_monitor);
- }
+ debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
+ debugfs_create_file("radar_trigger", 0200, dir, dev,
+ &fops_radar_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
+ mt7996_rdd_monitor);
- if (phy == &dev->phy)
- dev->debugfs_dir = dir;
+ dev->debugfs_dir = dir;
return 0;
}
@@ -899,7 +929,7 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
#define LONG_PREAMBLE 1
struct ieee80211_sta *sta = file->private_data;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_dev *dev = msta->vif->phy->dev;
+ struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
struct ra_rate phy = {};
char buf[100];
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
index 4a8237118287..53dfac02f8af 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -25,17 +25,108 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev)
static char *mt7996_eeprom_name(struct mt7996_dev *dev)
{
switch (mt76_chip(&dev->mt76)) {
- case 0x7990:
- return MT7996_EEPROM_DEFAULT;
case 0x7992:
- return MT7992_EEPROM_DEFAULT;
+ switch (dev->var.type) {
+ case MT7992_VAR_TYPE_23:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7992_EEPROM_DEFAULT_23_INT;
+ return MT7992_EEPROM_DEFAULT_23;
+ case MT7992_VAR_TYPE_44:
+ default:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7992_EEPROM_DEFAULT_INT;
+ if (dev->var.fem == MT7996_FEM_MIX)
+ return MT7992_EEPROM_DEFAULT_MIX;
+ return MT7992_EEPROM_DEFAULT;
+ }
+ case 0x7990:
+ default:
+ switch (dev->var.type) {
+ case MT7996_VAR_TYPE_233:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7996_EEPROM_DEFAULT_233_INT;
+ return MT7996_EEPROM_DEFAULT_233;
+ case MT7996_VAR_TYPE_444:
+ default:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7996_EEPROM_DEFAULT_INT;
+ return MT7996_EEPROM_DEFAULT;
+ }
+ }
+}
+
+static void
+mt7996_eeprom_parse_stream(const u8 *eeprom, u8 band_idx, u8 *path,
+ u8 *rx_path, u8 *nss)
+{
+ switch (band_idx) {
+ case MT_BAND1:
+ *path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND1,
+ eeprom[MT_EE_WIFI_CONF + 2]);
+ *rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND1,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ *nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND1,
+ eeprom[MT_EE_WIFI_CONF + 5]);
+ break;
+ case MT_BAND2:
+ *path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND2,
+ eeprom[MT_EE_WIFI_CONF + 2]);
+ *rx_path = FIELD_GET(MT_EE_WIFI_CONF4_RX_PATH_BAND2,
+ eeprom[MT_EE_WIFI_CONF + 4]);
+ *nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND2,
+ eeprom[MT_EE_WIFI_CONF + 5]);
+ break;
default:
- return MT7996_EEPROM_DEFAULT;
+ *path = FIELD_GET(MT_EE_WIFI_CONF1_TX_PATH_BAND0,
+ eeprom[MT_EE_WIFI_CONF + 1]);
+ *rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ *nss = FIELD_GET(MT_EE_WIFI_CONF4_STREAM_NUM_BAND0,
+ eeprom[MT_EE_WIFI_CONF + 4]);
+ break;
}
}
+static bool mt7996_eeprom_variant_valid(struct mt7996_dev *dev, const u8 *def)
+{
+#define FEM_INT 0
+#define FEM_EXT 3
+ u8 *eeprom = dev->mt76.eeprom.data, fem[2];
+ int i;
+
+ for (i = 0; i < 2; i++)
+ fem[i] = u8_get_bits(eeprom[MT_EE_WIFI_CONF + 6 + i],
+ MT_EE_WIFI_PA_LNA_CONFIG);
+
+ if (dev->var.fem == MT7996_FEM_EXT &&
+ !(fem[0] == FEM_EXT && fem[1] == FEM_EXT))
+ return false;
+ else if (dev->var.fem == MT7996_FEM_INT &&
+ !(fem[0] == FEM_INT && fem[1] == FEM_INT))
+ return false;
+ else if (dev->var.fem == MT7996_FEM_MIX &&
+ !(fem[0] == FEM_INT && fem[1] == FEM_EXT))
+ return false;
+
+ for (i = 0; i < __MT_MAX_BAND; i++) {
+ u8 path, rx_path, nss;
+ u8 def_path, def_rx_path, def_nss;
+
+ if (!dev->mt76.phys[i])
+ continue;
+
+ mt7996_eeprom_parse_stream(eeprom, i, &path, &rx_path, &nss);
+ mt7996_eeprom_parse_stream(def, i, &def_path, &def_rx_path,
+ &def_nss);
+ if (path > def_path || rx_path > def_rx_path || nss > def_nss)
+ return false;
+ }
+
+ return true;
+}
+
static int
-mt7996_eeprom_load_default(struct mt7996_dev *dev)
+mt7996_eeprom_check_or_use_default(struct mt7996_dev *dev, bool use_default)
{
u8 *eeprom = dev->mt76.eeprom.data;
const struct firmware *fw = NULL;
@@ -51,6 +142,10 @@ mt7996_eeprom_load_default(struct mt7996_dev *dev)
goto out;
}
+ if (!use_default && mt7996_eeprom_variant_valid(dev, fw->data))
+ goto out;
+
+ dev_warn(dev->mt76.dev, "eeprom load fail, use default bin\n");
memcpy(eeprom, fw->data, MT7996_EEPROM_SIZE);
dev->flash_mode = true;
@@ -62,43 +157,68 @@ out:
static int mt7996_eeprom_load(struct mt7996_dev *dev)
{
+ bool use_default = false;
int ret;
ret = mt76_eeprom_init(&dev->mt76, MT7996_EEPROM_SIZE);
if (ret < 0)
return ret;
- if (ret) {
+ if (ret && !mt7996_check_eeprom(dev)) {
dev->flash_mode = true;
- } else {
- u8 free_block_num;
- u32 block_num, i;
+ goto out;
+ }
+
+ if (!dev->flash_mode) {
u32 eeprom_blk_size = MT7996_EEPROM_BLOCK_SIZE;
+ u32 block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, eeprom_blk_size);
+ u8 free_block_num;
+ int i;
+ memset(dev->mt76.eeprom.data, 0, MT7996_EEPROM_SIZE);
ret = mt7996_mcu_get_eeprom_free_block(dev, &free_block_num);
if (ret < 0)
return ret;
/* efuse info isn't enough */
- if (free_block_num >= 59)
- return -EINVAL;
-
- /* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, eeprom_blk_size);
- for (i = 0; i < block_num; i++) {
- ret = mt7996_mcu_get_eeprom(dev, i * eeprom_blk_size);
- if (ret < 0)
- return ret;
+ if (free_block_num >= 59) {
+ use_default = true;
+ goto out;
+ }
+
+ /* check if eeprom data from fw is valid */
+ if (mt7996_mcu_get_eeprom(dev, 0, NULL, 0) ||
+ mt7996_check_eeprom(dev)) {
+ use_default = true;
+ goto out;
+ }
+
+ /* read eeprom data from fw */
+ for (i = 1; i < block_num; i++) {
+ u32 len = eeprom_blk_size;
+
+ if (i == block_num - 1)
+ len = MT7996_EEPROM_SIZE % eeprom_blk_size;
+ ret = mt7996_mcu_get_eeprom(dev, i * eeprom_blk_size,
+ NULL, len);
+ if (ret && ret != -EINVAL) {
+ use_default = true;
+ goto out;
+ }
}
}
- return mt7996_check_eeprom(dev);
+out:
+ return mt7996_eeprom_check_or_use_default(dev, use_default);
}
-static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_dev *dev)
+static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_phy *phy,
+ u8 *path, u8 *rx_path, u8 *nss)
{
#define MODE_HE_ONLY BIT(0)
#define WTBL_SIZE_GROUP GENMASK(31, 28)
+#define STREAM_CAP(_offs) ((cap & (0x7 << (_offs))) >> (_offs))
+ struct mt7996_dev *dev = phy->dev;
u32 cap = 0;
int ret;
@@ -107,13 +227,17 @@ static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_dev *dev)
return ret;
if (cap) {
+ u8 band_offs = phy->mt76->band_idx * 3;
+
dev->has_eht = !(cap & MODE_HE_ONLY);
dev->wtbl_size_group = u32_get_bits(cap, WTBL_SIZE_GROUP);
+ *nss = min_t(u8, *nss, STREAM_CAP(1 + band_offs));
+ *path = min_t(u8, *path, STREAM_CAP(10 + band_offs));
+ *rx_path = min_t(u8, *rx_path, STREAM_CAP(19 + band_offs));
}
- if (dev->wtbl_size_group < 2 || dev->wtbl_size_group > 4 ||
- is_mt7992(&dev->mt76))
- dev->wtbl_size_group = 2; /* set default */
+ if (dev->wtbl_size_group < 2 || dev->wtbl_size_group > 4)
+ dev->wtbl_size_group = is_mt7996(&dev->mt76) ? 4 : 2;
return 0;
}
@@ -163,32 +287,10 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
int max_path = 5, max_nss = 4;
int ret;
- switch (band_idx) {
- case MT_BAND1:
- path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND1,
- eeprom[MT_EE_WIFI_CONF + 2]);
- rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND1,
- eeprom[MT_EE_WIFI_CONF + 3]);
- nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND1,
- eeprom[MT_EE_WIFI_CONF + 5]);
- break;
- case MT_BAND2:
- path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND2,
- eeprom[MT_EE_WIFI_CONF + 2]);
- rx_path = FIELD_GET(MT_EE_WIFI_CONF4_RX_PATH_BAND2,
- eeprom[MT_EE_WIFI_CONF + 4]);
- nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND2,
- eeprom[MT_EE_WIFI_CONF + 5]);
- break;
- default:
- path = FIELD_GET(MT_EE_WIFI_CONF1_TX_PATH_BAND0,
- eeprom[MT_EE_WIFI_CONF + 1]);
- rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND0,
- eeprom[MT_EE_WIFI_CONF + 3]);
- nss = FIELD_GET(MT_EE_WIFI_CONF4_STREAM_NUM_BAND0,
- eeprom[MT_EE_WIFI_CONF + 4]);
- break;
- }
+ mt7996_eeprom_parse_stream(eeprom, band_idx, &path, &rx_path, &nss);
+ ret = mt7996_eeprom_parse_efuse_hw_cap(phy, &path, &rx_path, &nss);
+ if (ret)
+ return ret;
if (!path || path > max_path)
path = max_path;
@@ -203,15 +305,12 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
mphy->antenna_mask = BIT(nss) - 1;
mphy->chainmask = (BIT(path) - 1) << dev->chainshift[band_idx];
+ phy->orig_chainmask = mphy->chainmask;
dev->chainmask |= mphy->chainmask;
if (band_idx < MT_BAND2)
dev->chainshift[band_idx + 1] = dev->chainshift[band_idx] +
hweight16(mphy->chainmask);
- ret = mt7996_eeprom_parse_efuse_hw_cap(dev);
- if (ret)
- return ret;
-
return mt7996_eeprom_parse_band_config(phy);
}
@@ -220,15 +319,8 @@ int mt7996_eeprom_init(struct mt7996_dev *dev)
int ret;
ret = mt7996_eeprom_load(dev);
- if (ret < 0) {
- if (ret != -EINVAL)
- return ret;
-
- dev_warn(dev->mt76.dev, "eeprom load fail, use default bin\n");
- ret = mt7996_eeprom_load_default(dev);
- if (ret)
- return ret;
- }
+ if (ret < 0)
+ return ret;
ret = mt7996_eeprom_parse_hw_cap(dev, &dev->phy);
if (ret < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
index 412d6e2f8014..7a771ca2434c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
@@ -40,6 +40,8 @@ enum mt7996_eeprom_field {
#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND1 GENMASK(2, 0)
#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND2 GENMASK(5, 3)
+#define MT_EE_WIFI_PA_LNA_CONFIG GENMASK(1, 0)
+
#define MT_EE_RATE_DELTA_MASK GENMASK(5, 0)
#define MT_EE_RATE_DELTA_SIGN BIT(6)
#define MT_EE_RATE_DELTA_EN BIT(7)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 5e96973226bb..6b660424aedc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -14,11 +14,30 @@
#include "coredump.h"
#include "eeprom.h"
+static const struct ieee80211_iface_limit if_limits_global = {
+ .max = MT7996_MAX_INTERFACES * MT7996_MAX_RADIOS,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_ADHOC)
+ | BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+};
+
+static const struct ieee80211_iface_combination if_comb_global = {
+ .limits = &if_limits_global,
+ .n_limits = 1,
+ .max_interfaces = MT7996_MAX_INTERFACES * MT7996_MAX_RADIOS,
+ .num_different_channels = MT7996_MAX_RADIOS,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+};
+
static const struct ieee80211_iface_limit if_limits[] = {
{
- .max = 1,
- .types = BIT(NL80211_IFTYPE_ADHOC)
- }, {
.max = 16,
.types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
@@ -30,20 +49,18 @@ static const struct ieee80211_iface_limit if_limits[] = {
}
};
-static const struct ieee80211_iface_combination if_comb[] = {
- {
- .limits = if_limits,
- .n_limits = ARRAY_SIZE(if_limits),
- .max_interfaces = MT7996_MAX_INTERFACES,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
- .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160),
- .beacon_int_min_gcd = 100,
- }
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = MT7996_MAX_INTERFACES,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ .beacon_int_min_gcd = 100,
};
static ssize_t mt7996_thermal_temp_show(struct device *dev,
@@ -85,7 +102,7 @@ static ssize_t mt7996_thermal_temp_store(struct device *dev,
return ret;
mutex_lock(&phy->dev->mt76.mutex);
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 40, 130);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 40 * 1000, 130 * 1000), 1000);
/* add a safety margin ~10 */
if ((i - 1 == MT7996_CRIT_TEMP_IDX &&
@@ -180,28 +197,32 @@ static const struct thermal_cooling_device_ops mt7996_thermal_ops = {
static void mt7996_unregister_thermal(struct mt7996_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ char name[sizeof("cooling_deviceXXX")];
if (!phy->cdev)
return;
- sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
+ snprintf(name, sizeof(name), "cooling_device%d", phy->mt76->band_idx);
+ sysfs_remove_link(&wiphy->dev.kobj, name);
thermal_cooling_device_unregister(phy->cdev);
}
static int mt7996_thermal_init(struct mt7996_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ char cname[sizeof("cooling_deviceXXX")];
struct thermal_cooling_device *cdev;
struct device *hwmon;
const char *name;
- name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7996_%s",
- wiphy_name(wiphy));
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7996_%s.%d",
+ wiphy_name(wiphy), phy->mt76->band_idx);
+ snprintf(cname, sizeof(cname), "cooling_device%d", phy->mt76->band_idx);
cdev = thermal_cooling_device_register(name, phy, &mt7996_thermal_ops);
if (!IS_ERR(cdev)) {
if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj,
- "cooling_device") < 0)
+ cname) < 0)
thermal_cooling_device_unregister(cdev);
else
phy->cdev = cdev;
@@ -333,28 +354,88 @@ mt7996_regd_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_phy *phy;
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
- if (dev->mt76.region == NL80211_DFS_UNSET)
- mt7996_mcu_rdd_background_enable(phy, NULL);
+ mt7996_for_each_phy(dev, phy) {
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ mt7996_mcu_rdd_background_enable(phy, NULL);
- mt7996_init_txpower(phy);
+ mt7996_init_txpower(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ mt7996_dfs_init_radar_detector(phy);
+ }
+}
+
+static void
+mt7996_init_wiphy_band(struct ieee80211_hw *hw, struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct wiphy *wiphy = hw->wiphy;
+ int n_radios = hw->wiphy->n_radio;
+ struct wiphy_radio_freq_range *freq = &dev->radio_freqs[n_radios];
+ struct wiphy_radio *radio = &dev->radios[n_radios];
+
+ phy->slottime = 9;
+ phy->beacon_rate = -1;
- phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
- mt7996_dfs_init_radar_detector(phy);
+ if (phy->mt76->cap.has_2ghz) {
+ phy->mt76->sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ phy->mt76->sband_2g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_2;
+ freq->start_freq = 2400000;
+ freq->end_freq = 2500000;
+ } else if (phy->mt76->cap.has_5ghz) {
+ phy->mt76->sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ phy->mt76->sband_5g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_1;
+
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ freq->start_freq = 5000000;
+ freq->end_freq = 5900000;
+ } else if (phy->mt76->cap.has_6ghz) {
+ freq->start_freq = 5900000;
+ freq->end_freq = 7200000;
+ } else {
+ return;
+ }
+
+ dev->radio_phy[n_radios] = phy;
+ radio->freq_range = freq;
+ radio->n_freq_range = 1;
+ radio->iface_combinations = &if_comb;
+ radio->n_iface_combinations = 1;
+ hw->wiphy->n_radio++;
+
+ wiphy->available_antennas_rx |= phy->mt76->chainmask;
+ wiphy->available_antennas_tx |= phy->mt76->chainmask;
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7996_set_stream_vht_txbf_caps(phy);
+ mt7996_set_stream_he_eht_caps(phy);
+ mt7996_init_txpower(phy);
}
static void
mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt76_dev *mdev = &phy->dev->mt76;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt76_dev *mdev = &dev->mt76;
struct wiphy *wiphy = hw->wiphy;
- u16 max_subframes = phy->dev->has_eht ? IEEE80211_MAX_AMPDU_BUF_EHT :
- IEEE80211_MAX_AMPDU_BUF_HE;
+ u16 max_subframes = dev->has_eht ? IEEE80211_MAX_AMPDU_BUF_EHT :
+ IEEE80211_MAX_AMPDU_BUF_HE;
hw->queues = 4;
hw->max_rx_aggregation_subframes = max_subframes;
@@ -366,14 +447,15 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
hw->radiotap_timestamp.units_pos =
IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
- phy->slottime = 9;
- phy->beacon_rate = -1;
-
hw->sta_data_size = sizeof(struct mt7996_sta);
hw->vif_data_size = sizeof(struct mt7996_vif);
+ hw->chanctx_data_size = sizeof(struct mt76_chanctx);
+
+ wiphy->iface_combinations = &if_comb_global;
+ wiphy->n_iface_combinations = 1;
+
+ wiphy->radio = dev->radios;
- wiphy->iface_combinations = if_comb;
- wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7996_regd_notifier;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->mbssid_max_interfaces = 16;
@@ -390,57 +472,31 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
- if (!mdev->dev->of_node ||
- !of_property_read_bool(mdev->dev->of_node,
- "mediatek,disable-radar-background"))
+ if (mt7996_has_background_radar(dev) &&
+ (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background")))
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_RADAR_BACKGROUND);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
- ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, NO_VIRTUAL_MONITOR);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
hw->max_tx_fragments = 4;
- if (phy->mt76->cap.has_2ghz) {
- phy->mt76->sband_2g.sband.ht_cap.cap |=
- IEEE80211_HT_CAP_LDPC_CODING |
- IEEE80211_HT_CAP_MAX_AMSDU;
- phy->mt76->sband_2g.sband.ht_cap.ampdu_density =
- IEEE80211_HT_MPDU_DENSITY_2;
- }
-
- if (phy->mt76->cap.has_5ghz) {
- phy->mt76->sband_5g.sband.ht_cap.cap |=
- IEEE80211_HT_CAP_LDPC_CODING |
- IEEE80211_HT_CAP_MAX_AMSDU;
-
- phy->mt76->sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
- phy->mt76->sband_5g.sband.ht_cap.ampdu_density =
- IEEE80211_HT_MPDU_DENSITY_1;
-
- ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
- }
-
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- phy->mt76->leds.cdev.brightness_set = mt7996_led_set_brightness;
- phy->mt76->leds.cdev.blink_set = mt7996_led_set_blink;
+ dev->mphy.leds.cdev.brightness_set = mt7996_led_set_brightness;
+ dev->mphy.leds.cdev.blink_set = mt7996_led_set_blink;
}
- mt76_set_stream_caps(phy->mt76, true);
- mt7996_set_stream_vht_txbf_caps(phy);
- mt7996_set_stream_he_eht_caps(phy);
- mt7996_init_txpower(phy);
+ wiphy->max_scan_ssids = 4;
+ wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
- wiphy->available_antennas_rx = phy->mt76->antenna_mask;
- wiphy->available_antennas_tx = phy->mt76->antenna_mask;
+ mt7996_init_wiphy_band(hw, &dev->phy);
}
static void
@@ -459,6 +515,10 @@ mt7996_mac_init_band(struct mt7996_dev *dev, u8 band)
mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME4(band),
MT_WF_RMAC_MIB_QOS23_BACKOFF);
+ /* clear backoff time for Tx duration */
+ mt76_clear(dev, MT_WTBLOFF_ACR(band),
+ MT_WTBLOFF_ADM_BACKOFFTIME);
+
/* clear backoff time and set software compensation for OBSS time */
mask = MT_WF_RMAC_MIB_OBSS_BACKOFF | MT_WF_RMAC_MIB_ED_OFFSET;
set = FIELD_PREP(MT_WF_RMAC_MIB_OBSS_BACKOFF, 0) |
@@ -557,18 +617,15 @@ int mt7996_txbf_init(struct mt7996_dev *dev)
return mt7996_mcu_set_txbf(dev, BF_HW_EN_UPDATE);
}
-static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
- enum mt76_band_id band)
+static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
{
+ struct mt7996_phy *phy;
struct mt76_phy *mphy;
u32 mac_ofs, hif1_ofs = 0;
int ret;
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
- if (!mt7996_band_valid(dev, band) || band == MT_BAND0)
- return 0;
-
- if (phy)
+ if (!mt7996_band_valid(dev, band))
return 0;
if (is_mt7996(&dev->mt76) && band == MT_BAND2 && dev->hif2) {
@@ -576,7 +633,7 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
wed = &dev->mt76.mmio.wed_hif2;
}
- mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7996_ops, band);
+ mphy = mt76_alloc_radio_phy(&dev->mt76, sizeof(*phy), band);
if (!mphy)
return -ENOMEM;
@@ -607,7 +664,7 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
mt76_eeprom_override(mphy);
/* init wiphy according to mphy and phy */
- mt7996_init_wiphy(mphy->hw, wed);
+ mt7996_init_wiphy_band(mphy->hw, phy);
ret = mt7996_init_tx_queues(mphy->priv,
MT_TXQ_ID(band),
MT7996_TX_RING_SIZE,
@@ -621,14 +678,6 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
if (ret)
goto error;
- ret = mt7996_thermal_init(phy);
- if (ret)
- goto error;
-
- ret = mt7996_init_debugfs(phy);
- if (ret)
- goto error;
-
if (wed == &dev->mt76.mmio.wed_hif2 && mtk_wed_device_active(wed)) {
u32 irq_mask = dev->mt76.mmio.irqmask | MT_INT_TX_DONE_BAND2;
@@ -640,24 +689,14 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
error:
mphy->dev->phys[band] = NULL;
- ieee80211_free_hw(mphy->hw);
return ret;
}
static void
-mt7996_unregister_phy(struct mt7996_phy *phy, enum mt76_band_id band)
+mt7996_unregister_phy(struct mt7996_phy *phy)
{
- struct mt76_phy *mphy;
-
- if (!phy)
- return;
-
- mt7996_unregister_thermal(phy);
-
- mphy = phy->dev->mt76.phys[band];
- mt76_unregister_phy(mphy);
- ieee80211_free_hw(mphy->hw);
- phy->dev->mt76.phys[band] = NULL;
+ if (phy)
+ mt7996_unregister_thermal(phy);
}
static void mt7996_init_work(struct work_struct *work)
@@ -884,6 +923,76 @@ out:
#endif
}
+static int mt7996_variant_type_init(struct mt7996_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_PAD_GPIO);
+ u8 var_type;
+
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7990:
+ if (val & MT_PAD_GPIO_2ADIE_TBTC)
+ var_type = MT7996_VAR_TYPE_233;
+ else
+ var_type = MT7996_VAR_TYPE_444;
+ break;
+ case 0x7992:
+ if (val & MT_PAD_GPIO_ADIE_SINGLE)
+ var_type = MT7992_VAR_TYPE_23;
+ else if (u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB_7992))
+ var_type = MT7992_VAR_TYPE_44;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev->var.type = var_type;
+ return 0;
+}
+
+static int mt7996_variant_fem_init(struct mt7996_dev *dev)
+{
+#define MT7976C_EFUSE_OFFSET 0x470
+ u8 buf[MT7996_EEPROM_BLOCK_SIZE], idx, adie_idx, adie_comb;
+ u32 regval, val = mt76_rr(dev, MT_PAD_GPIO);
+ u16 adie_id, adie_ver;
+ bool is_7976c;
+ int ret;
+
+ if (is_mt7992(&dev->mt76)) {
+ adie_idx = (val & MT_PAD_GPIO_ADIE_SINGLE) ? 0 : 1;
+ adie_comb = u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB_7992);
+ } else {
+ adie_idx = 0;
+ adie_comb = u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB);
+ }
+
+ ret = mt7996_mcu_rf_regval(dev, MT_ADIE_CHIP_ID(adie_idx), &regval, false);
+ if (ret)
+ return ret;
+
+ ret = mt7996_mcu_get_eeprom(dev, MT7976C_EFUSE_OFFSET, buf, sizeof(buf));
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ adie_ver = u32_get_bits(regval, MT_ADIE_VERSION_MASK);
+ idx = MT7976C_EFUSE_OFFSET % MT7996_EEPROM_BLOCK_SIZE;
+ is_7976c = adie_ver == 0x8a10 || adie_ver == 0x8b00 ||
+ adie_ver == 0x8c10 || buf[idx] == 0xc;
+
+ adie_id = u32_get_bits(regval, MT_ADIE_CHIP_ID_MASK);
+ if (adie_id == 0x7975 || adie_id == 0x7979 ||
+ (adie_id == 0x7976 && is_7976c))
+ dev->var.fem = MT7996_FEM_INT;
+ else if (adie_id == 0x7977 && adie_comb == 1)
+ dev->var.fem = MT7996_FEM_MIX;
+ else
+ dev->var.fem = MT7996_FEM_EXT;
+
+ return 0;
+}
+
static int mt7996_init_hardware(struct mt7996_dev *dev)
{
int ret, idx;
@@ -899,6 +1008,10 @@ static int mt7996_init_hardware(struct mt7996_dev *dev)
INIT_LIST_HEAD(&dev->wed_rro.poll_list);
spin_lock_init(&dev->wed_rro.lock);
+ ret = mt7996_variant_type_init(dev);
+ if (ret)
+ return ret;
+
ret = mt7996_dma_init(dev);
if (ret)
return ret;
@@ -913,6 +1026,10 @@ static int mt7996_init_hardware(struct mt7996_dev *dev)
if (ret)
return ret;
+ ret = mt7996_variant_fem_init(dev);
+ if (ret)
+ return ret;
+
ret = mt7996_eeprom_init(dev);
if (ret < 0)
return ret;
@@ -963,10 +1080,12 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
static void
mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
- struct ieee80211_sta_he_cap *he_cap, int vif)
+ struct ieee80211_sta_he_cap *he_cap, int vif,
+ enum nl80211_band band)
{
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
int sts = hweight16(phy->mt76->chainmask);
+ bool non_2g = band != NL80211_BAND_2GHZ;
u8 c;
#ifdef CONFIG_MAC80211_MESH
@@ -996,10 +1115,10 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
if (is_mt7996(phy->mt76->dev))
c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 * non_2g);
else
c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5;
+ (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 * non_2g);
elem->phy_cap_info[4] |= c;
@@ -1025,8 +1144,9 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
- FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
- sts - 1);
+ (FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ sts - 1) * non_2g);
+
elem->phy_cap_info[5] |= c;
if (vif != NL80211_IFTYPE_AP)
@@ -1038,8 +1158,10 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
- c = IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
- IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ c = 0;
+ if (non_2g)
+ c |= IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
elem->phy_cap_info[7] |= c;
}
@@ -1080,6 +1202,9 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
he_cap_elem->phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+ he_cap_elem->phy_cap_info[7] =
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+
switch (iftype) {
case NL80211_IFTYPE_AP:
he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_RES;
@@ -1119,8 +1244,7 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
he_cap_elem->phy_cap_info[7] |=
- IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
@@ -1143,12 +1267,12 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
- mt7996_set_stream_he_txbf_caps(phy, he_cap, iftype);
+ mt7996_set_stream_he_txbf_caps(phy, he_cap, iftype, band);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss, band);
} else {
he_cap_elem->phy_cap_info[9] |=
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
@@ -1190,7 +1314,9 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
eht_cap_elem->mac_cap_info[0] =
IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
- IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
+ IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
eht_cap_elem->phy_cap_info[0] =
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
@@ -1205,13 +1331,20 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
eht_cap_elem->phy_cap_info[1] =
u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
- u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK);
eht_cap_elem->phy_cap_info[2] =
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK);
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK);
+
+ if (band != NL80211_BAND_2GHZ) {
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(val,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK);
+ }
if (band == NL80211_BAND_6GHZ) {
eht_cap_elem->phy_cap_info[0] |=
@@ -1233,21 +1366,20 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
eht_cap_elem->phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
u8_encode_bits(min_t(int, sts - 1, 2),
IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
eht_cap_elem->phy_cap_info[5] =
u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
- u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
+ u8_encode_bits(u8_get_bits(1, GENMASK(1, 0)),
IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK);
val = width == NL80211_CHAN_WIDTH_320 ? 0xf :
width == NL80211_CHAN_WIDTH_160 ? 0x7 :
width == NL80211_CHAN_WIDTH_80 ? 0x3 : 0x1;
eht_cap_elem->phy_cap_info[6] =
- u8_encode_bits(u8_get_bits(0x11, GENMASK(4, 2)),
- IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
@@ -1273,8 +1405,13 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
eht_cap_elem->phy_cap_info[7] =
IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ;
+
+ if (band == NL80211_BAND_2GHZ)
+ return;
+
+ eht_cap_elem->phy_cap_info[7] |=
IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ;
if (band != NL80211_BAND_6GHZ)
@@ -1333,6 +1470,7 @@ void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy)
int mt7996_register_device(struct mt7996_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7996_phy *phy;
int ret;
dev->phy.dev = dev;
@@ -1354,28 +1492,27 @@ int mt7996_register_device(struct mt7996_dev *dev)
mt7996_init_wiphy(hw, &dev->mt76.mmio.wed);
- ret = mt76_register_device(&dev->mt76, true, mt76_rates,
- ARRAY_SIZE(mt76_rates));
+ ret = mt7996_register_phy(dev, MT_BAND1);
if (ret)
return ret;
- ret = mt7996_thermal_init(&dev->phy);
+ ret = mt7996_register_phy(dev, MT_BAND2);
if (ret)
return ret;
- ret = mt7996_register_phy(dev, mt7996_phy2(dev), MT_BAND1);
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
if (ret)
return ret;
- ret = mt7996_register_phy(dev, mt7996_phy3(dev), MT_BAND2);
- if (ret)
- return ret;
+ mt7996_for_each_phy(dev, phy)
+ mt7996_thermal_init(phy);
ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
dev->recovery.hw_init_done = true;
- ret = mt7996_init_debugfs(&dev->phy);
+ ret = mt7996_init_debugfs(dev);
if (ret)
goto error;
@@ -1394,8 +1531,8 @@ error:
void mt7996_unregister_device(struct mt7996_dev *dev)
{
cancel_work_sync(&dev->wed_rro.work);
- mt7996_unregister_phy(mt7996_phy3(dev), MT_BAND2);
- mt7996_unregister_phy(mt7996_phy2(dev), MT_BAND1);
+ mt7996_unregister_phy(mt7996_phy3(dev));
+ mt7996_unregister_phy(mt7996_phy2(dev));
mt7996_unregister_thermal(&dev->phy);
mt7996_coredump_unregister(dev);
mt76_unregister_device(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 0d21414e2c88..019c925ae600 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -72,7 +72,7 @@ static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
if (!sta->vif)
return NULL;
- return &sta->vif->sta.wcid;
+ return &sta->vif->deflink.sta.wcid;
}
bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
@@ -182,7 +182,7 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
rssi[3] = to_rssi(GENMASK(31, 14), val);
msta->ack_signal =
- mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
+ mt76_rx_signal(msta->vif->deflink.phy->mt76->antenna_mask, rssi);
ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
}
@@ -196,7 +196,7 @@ void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
u32 addr;
- addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
+ addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->deflink.sta.wcid.idx, 5);
if (enable)
mt76_set(dev, addr, BIT(5));
else
@@ -478,11 +478,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
if (status->wcid) {
msta = container_of(status->wcid, struct mt7996_sta, wcid);
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
}
status->freq = mphy->chandef.chan->center_freq;
@@ -679,14 +675,25 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
*qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
+ skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
} else {
status->flag |= RX_FLAG_8023;
mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
*info);
}
- if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
- mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
+ if (rxv && !(status->flag & RX_FLAG_8023)) {
+ switch (status->encoding) {
+ case RX_ENC_EHT:
+ mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
+ break;
+ case RX_ENC_HE:
+ mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
+ break;
+ default:
+ break;
+ }
+ }
if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
return 0;
@@ -819,12 +826,13 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- struct mt76_vif *mvif;
+ struct mt76_vif_link *mvif;
u16 tx_count = 15;
u32 val;
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -832,7 +840,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
- mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
+ mvif = vif ? (struct mt76_vif_link *)vif->drv_priv : NULL;
if (mvif) {
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -886,8 +894,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
if (is_mt7996(&dev->mt76))
val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
- else
+ else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
+
txwi[6] = cpu_to_le32(val);
txwi[7] = 0;
@@ -897,7 +906,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool mcast = ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1);
u8 idx = MT7996_BASIC_RATES_TBL;
@@ -977,7 +985,7 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (vif) {
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- txp->fw.bss_idx = mvif->mt76.idx;
+ txp->fw.bss_idx = mvif->deflink.mt76.idx;
}
txp->fw.token = cpu_to_le16(id);
@@ -1138,11 +1146,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
continue;
msta = container_of(wcid, struct mt7996_sta, wcid);
- spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &mdev->sta_poll_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
continue;
} else if (info & MT_TXFREE_INFO_HEADER) {
u32 tx_retries = 0, tx_failed = 0;
@@ -1368,10 +1372,7 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
if (!wcid->sta)
goto out;
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
out:
rcu_read_unlock();
@@ -1593,7 +1594,7 @@ mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
- mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
+ mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
break;
default:
break;
@@ -1694,7 +1695,6 @@ mt7996_mac_restart(struct mt7996_dev *dev)
mt7996_dma_reset(dev, true);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
mt76_queue_is_wed_rro(&mdev->q_rx[i]))
@@ -1702,10 +1702,11 @@ mt7996_mac_restart(struct mt7996_dev *dev)
if (mdev->q_rx[i].ndesc) {
napi_enable(&dev->mt76.napi[i]);
+ local_bh_disable();
napi_schedule(&dev->mt76.napi[i]);
+ local_bh_enable();
}
}
- local_bh_enable();
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
@@ -1738,19 +1739,19 @@ mt7996_mac_restart(struct mt7996_dev *dev)
ret = mt7996_txbf_init(dev);
if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
- ret = mt7996_run(dev->mphy.hw);
+ ret = mt7996_run(&dev->phy);
if (ret)
goto out;
}
if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
- ret = mt7996_run(phy2->mt76->hw);
+ ret = mt7996_run(phy2);
if (ret)
goto out;
}
if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
- ret = mt7996_run(phy3->mt76->hw);
+ ret = mt7996_run(phy3);
if (ret)
goto out;
}
@@ -1763,8 +1764,8 @@ out:
if (phy3)
clear_bit(MT76_RESET, &phy3->mt76->state);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
@@ -1957,23 +1958,23 @@ void mt7996_mac_reset_work(struct work_struct *work)
if (phy3)
clear_bit(MT76_RESET, &phy3->mt76->state);
- local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
continue;
napi_enable(&dev->mt76.napi[i]);
+ local_bh_disable();
napi_schedule(&dev->mt76.napi[i]);
+ local_bh_enable();
}
- local_bh_enable();
tasklet_schedule(&dev->mt76.irq_tasklet);
mt76_worker_enable(&dev->mt76.tx_worker);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 2b34ae5e0cb5..69dd565d8319 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -7,76 +7,44 @@
#include "mcu.h"
#include "mac.h"
-static bool mt7996_dev_running(struct mt7996_dev *dev)
+int mt7996_run(struct mt7996_phy *phy)
{
- struct mt7996_phy *phy;
-
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
- return true;
-
- phy = mt7996_phy2(dev);
- if (phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
- return true;
-
- phy = mt7996_phy3(dev);
-
- return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
-}
-
-int mt7996_run(struct ieee80211_hw *hw)
-{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- bool running;
+ struct mt7996_dev *dev = phy->dev;
int ret;
- running = mt7996_dev_running(dev);
- if (!running) {
- ret = mt7996_mcu_set_hdr_trans(dev, true);
- if (ret)
- goto out;
-
- if (is_mt7992(&dev->mt76)) {
- u8 queue = mt76_connac_lmac_mapping(IEEE80211_AC_VI);
-
- ret = mt7996_mcu_cp_support(dev, queue);
- if (ret)
- goto out;
- }
- }
-
mt7996_mac_enable_nf(dev, phy->mt76->band_idx);
ret = mt7996_mcu_set_rts_thresh(phy, 0x92b);
if (ret)
- goto out;
+ return ret;
ret = mt7996_mcu_set_radio_en(phy, true);
if (ret)
- goto out;
+ return ret;
ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH);
if (ret)
- goto out;
+ return ret;
ret = mt7996_mcu_set_thermal_throttling(phy, MT7996_THERMAL_THROTTLE_MAX);
if (ret)
- goto out;
+ return ret;
ret = mt7996_mcu_set_thermal_protect(phy, true);
if (ret)
- goto out;
+ return ret;
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
- ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(dev->mphy.hw, &phy->mt76->mac_work,
MT7996_WATCHDOG_TIME);
- if (!running)
+ if (!phy->counter_reset) {
mt7996_mac_reset_counters(phy);
+ phy->counter_reset = true;
+ }
-out:
- return ret;
+ return 0;
}
static int mt7996_start(struct ieee80211_hw *hw)
@@ -87,16 +55,23 @@ static int mt7996_start(struct ieee80211_hw *hw)
flush_work(&dev->init_work);
mutex_lock(&dev->mt76.mutex);
- ret = mt7996_run(hw);
+ ret = mt7996_mcu_set_hdr_trans(dev, true);
+ if (!ret && is_mt7992(&dev->mt76)) {
+ u8 queue = mt76_connac_lmac_mapping(IEEE80211_AC_VI);
+
+ ret = mt7996_mcu_cp_support(dev, queue);
+ }
mutex_unlock(&dev->mt76.mutex);
return ret;
}
-static void mt7996_stop(struct ieee80211_hw *hw, bool suspend)
+static void mt7996_stop_phy(struct mt7996_phy *phy)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = phy->dev;
+
+ if (!phy || !test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
cancel_delayed_work_sync(&phy->mt76->mac_work);
@@ -109,6 +84,10 @@ static void mt7996_stop(struct ieee80211_hw *hw, bool suspend)
mutex_unlock(&dev->mt76.mutex);
}
+static void mt7996_stop(struct ieee80211_hw *hw, bool suspend)
+{
+}
+
static inline int get_free_idx(u32 mask, u8 start, u8 end)
{
return ffs(~mask & GENMASK(end, start));
@@ -157,73 +136,133 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
return -1;
}
-static void mt7996_init_bitrate_mask(struct ieee80211_vif *vif)
+static void
+mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlink)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
int i;
- for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
- mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
- mvif->bitrate_mask.control[i].he_gi = 0xff;
- mvif->bitrate_mask.control[i].he_ltf = 0xff;
- mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
- memset(mvif->bitrate_mask.control[i].ht_mcs, 0xff,
- sizeof(mvif->bitrate_mask.control[i].ht_mcs));
- memset(mvif->bitrate_mask.control[i].vht_mcs, 0xff,
- sizeof(mvif->bitrate_mask.control[i].vht_mcs));
- memset(mvif->bitrate_mask.control[i].he_mcs, 0xff,
- sizeof(mvif->bitrate_mask.control[i].he_mcs));
+ for (i = 0; i < ARRAY_SIZE(mlink->bitrate_mask.control); i++) {
+ mlink->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
+ mlink->bitrate_mask.control[i].he_gi = 0xff;
+ mlink->bitrate_mask.control[i].he_ltf = 0xff;
+ mlink->bitrate_mask.control[i].legacy = GENMASK(31, 0);
+ memset(mlink->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(mlink->bitrate_mask.control[i].ht_mcs));
+ memset(mlink->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(mlink->bitrate_mask.control[i].vht_mcs));
+ memset(mlink->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(mlink->bitrate_mask.control[i].he_mcs));
}
}
-static int mt7996_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int
+mt7996_set_hw_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct mt7996_vif_link *mlink, struct ieee80211_key_conf *key)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt76_txq *mtxq;
- u8 band_idx = phy->mt76->band_idx;
- int idx, ret = 0;
+ struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv :
+ &mlink->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ u8 *wcid_keyidx = &wcid->hw_key_idx;
+ struct mt7996_phy *phy;
+ int idx = key->keyidx;
- mutex_lock(&dev->mt76.mutex);
+ phy = mt7996_vif_link_phy(mlink);
+ if (!phy)
+ return -EINVAL;
- if (vif->type == NL80211_IFTYPE_MONITOR &&
- is_zero_ether_addr(vif->addr))
- phy->monitor_vif = vif;
+ if (sta && !wcid->sta)
+ return -EOPNOTSUPP;
- mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mvif->mt76.idx >= mt7996_max_interface_num(dev)) {
- ret = -ENOSPC;
- goto out;
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ }
+ break;
+ default:
+ break;
}
- idx = get_omac_idx(vif->type, phy->omac_mask);
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
+ if (cmd == SET_KEY && !sta && !mlink->mt76.cipher) {
+ mlink->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7996_mcu_add_bss_info(phy, vif, &vif->bss_conf, &mlink->mt76, true);
+ }
+
+ if (cmd == SET_KEY) {
+ *wcid_keyidx = idx;
+ } else {
+ if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
+ return 0;
}
- mvif->mt76.omac_idx = idx;
- mvif->phy = phy;
- mvif->mt76.band_idx = band_idx;
- mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
- ret = mt7996_mcu_add_dev_info(phy, vif, true);
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (key->keyidx == 6 || key->keyidx == 7)
+ return mt7996_mcu_bcn_prot_enable(dev, vif, key);
+
+ return mt7996_mcu_add_key(&dev->mt76, vif, key,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
+}
+
+static void
+mt7996_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct mt7996_vif_link *mlink = data;
+
+ if (sta)
+ return;
+
+ WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, mlink, key));
+}
+
+int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink)
+{
+ struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76);
+ struct mt7996_phy *phy = mphy->priv;
+ struct mt7996_dev *dev = phy->dev;
+ u8 band_idx = phy->mt76->band_idx;
+ struct mt76_txq *mtxq;
+ int idx, ret;
+
+ mlink->idx = __ffs64(~dev->mt76.vif_mask);
+ if (mlink->idx >= mt7996_max_interface_num(dev))
+ return -ENOSPC;
+
+ idx = get_omac_idx(vif->type, phy->omac_mask);
+ if (idx < 0)
+ return -ENOSPC;
+
+ link->phy = phy;
+ mlink->omac_idx = idx;
+ mlink->band_idx = band_idx;
+ mlink->wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
+ mlink->wcid = &link->sta.wcid;
+
+ ret = mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, true);
if (ret)
- goto out;
+ return ret;
- dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
- phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+ dev->mt76.vif_mask |= BIT_ULL(mlink->idx);
+ phy->omac_mask |= BIT_ULL(mlink->omac_idx);
- idx = MT7996_WTBL_RESERVED - mvif->mt76.idx;
+ idx = MT7996_WTBL_RESERVED - mlink->idx;
- INIT_LIST_HEAD(&mvif->sta.rc_list);
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
- mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = band_idx;
- mvif->sta.wcid.hw_key_idx = -1;
- mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mvif->sta.wcid);
+ INIT_LIST_HEAD(&link->sta.rc_list);
+ link->sta.wcid.idx = idx;
+ link->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_wcid_init(&link->sta.wcid, band_idx);
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -234,54 +273,50 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
}
if (vif->type != NL80211_IFTYPE_AP &&
- (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3))
+ (!mlink->omac_idx || mlink->omac_idx > 3))
vif->offload_flags = 0;
- vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
if (phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
- mvif->mt76.basic_rates_idx = MT7996_BASIC_RATES_TBL + 4;
+ mlink->basic_rates_idx = MT7996_BASIC_RATES_TBL + 4;
else
- mvif->mt76.basic_rates_idx = MT7996_BASIC_RATES_TBL;
+ mlink->basic_rates_idx = MT7996_BASIC_RATES_TBL;
- mt7996_init_bitrate_mask(vif);
+ mt7996_init_bitrate_mask(vif, link);
- mt7996_mcu_add_bss_info(phy, vif, true);
+ mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, true);
/* defer the first STA_REC of BMC entry to BSS_CHANGED_BSSID for STA
* interface, since firmware only records BSSID when the entry is new
*/
if (vif->type != NL80211_IFTYPE_STATION)
- mt7996_mcu_add_sta(dev, vif, NULL, true, true);
- rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_PORT_SECURE, true);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &link->sta.wcid);
-out:
- mutex_unlock(&dev->mt76.mutex);
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, link);
- return ret;
+ return 0;
}
-static void mt7996_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta = &mvif->sta;
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- int idx = msta->wcid.idx;
-
- mt7996_mcu_add_sta(dev, vif, NULL, false, false);
- mt7996_mcu_add_bss_info(phy, vif, false);
+ struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76);
+ struct mt7996_phy *phy = mphy->priv;
+ struct mt7996_dev *dev = phy->dev;
+ struct mt7996_sta *msta;
+ int idx;
- if (vif == phy->monitor_vif)
- phy->monitor_vif = NULL;
+ msta = &link->sta;
+ idx = msta->wcid.idx;
+ mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_DISCONNECT, false);
+ mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, false);
- mt7996_mcu_add_dev_info(phy, vif, false);
+ mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- mutex_lock(&dev->mt76.mutex);
- dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
- phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
- mutex_unlock(&dev->mt76.mutex);
+ dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx);
+ phy->omac_mask &= ~BIT_ULL(mlink->omac_idx);
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (!list_empty(&msta->wcid.poll_list))
@@ -291,6 +326,124 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
+static void mt7996_phy_set_rxfilter(struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
+ u32 filter = phy->rxfilter;
+
+ if (filter & MT_WF_RFCR_DROP_OTHER_UC) {
+ filter |= MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_FCSFAIL;
+ }
+
+ mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), filter);
+ if (filter & MT_WF_RFCR_DROP_CTL_RSV)
+ mt76_set(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
+ else
+ mt76_clear(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
+}
+
+static void mt7996_set_monitor(struct mt7996_phy *phy, bool enabled)
+{
+ struct mt7996_dev *dev = phy->dev;
+
+ if (!phy)
+ return;
+
+ if (enabled == !(phy->rxfilter & MT_WF_RFCR_DROP_OTHER_UC))
+ return;
+
+ if (!enabled)
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_rmw_field(dev, MT_DMA_DCR0(phy->mt76->band_idx),
+ MT_DMA_DCR0_RXD_G5_EN, enabled);
+ mt7996_phy_set_rxfilter(phy);
+ mt7996_mcu_set_sniffer_mode(phy, enabled);
+}
+
+static int mt7996_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct wireless_dev *wdev = ieee80211_vif_to_wdev(vif);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ int i, err = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ for (i = 0; i < MT7996_MAX_RADIOS; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+
+ if (!phy || !(wdev->radio_mask & BIT(i)) ||
+ test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ continue;
+
+ err = mt7996_run(phy);
+ if (err)
+ goto out;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mt7996_set_monitor(phy, true);
+ }
+
+ mt76_vif_init(vif, &mvif->mt76);
+
+ vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+
+struct mt7996_radio_data {
+ u32 active_mask;
+ u32 monitor_mask;
+};
+
+static void mt7996_remove_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct wireless_dev *wdev = ieee80211_vif_to_wdev(vif);
+ struct mt7996_radio_data *rdata = data;
+
+ rdata->active_mask |= wdev->radio_mask;
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ rdata->monitor_mask |= wdev->radio_mask;
+}
+
+static void mt7996_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_radio_data rdata = {};
+ int i;
+
+ ieee80211_iterate_active_interfaces_mtx(hw, 0, mt7996_remove_iter,
+ &rdata);
+ mt76_vif_cleanup(&dev->mt76, vif);
+
+ for (i = 0; i < MT7996_MAX_RADIOS; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+
+ if (!phy)
+ continue;
+ if (!(rdata.monitor_mask & BIT(i)))
+ mt7996_set_monitor(phy, false);
+ if (!(rdata.active_mask & BIT(i)))
+ mt7996_stop_phy(phy);
+ }
+}
+
int mt7996_set_channel(struct mt76_phy *mphy)
{
struct mt7996_phy *phy = mphy->priv;
@@ -304,6 +457,10 @@ int mt7996_set_channel(struct mt76_phy *mphy)
if (ret)
goto out;
+ ret = mt7996_mcu_set_txpower_sku(phy);
+ if (ret)
+ goto out;
+
ret = mt7996_dfs_init_radar_detector(phy);
mt7996_mac_cca_stats_reset(phy);
@@ -322,14 +479,9 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv :
- &mvif->sta;
- struct mt76_wcid *wcid = &msta->wcid;
- u8 *wcid_keyidx = &wcid->hw_key_idx;
- int idx = key->keyidx;
- int err = 0;
+ struct mt7996_vif_link *mlink = &mvif->deflink;
+ int err;
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -354,11 +506,8 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7) {
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ if (key->keyidx == 6 || key->keyidx == 7)
break;
- }
fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -366,30 +515,11 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- mutex_lock(&dev->mt76.mutex);
-
- if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
- mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
- mt7996_mcu_add_bss_info(phy, vif, true);
- }
+ if (!mt7996_vif_link_phy(mlink))
+ return 0; /* defer until after link add */
- if (cmd == SET_KEY) {
- *wcid_keyidx = idx;
- } else {
- if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- goto out;
- }
-
- mt76_wcid_key_setup(&dev->mt76, wcid, key);
-
- if (key->keyidx == 6 || key->keyidx == 7)
- err = mt7996_mcu_bcn_prot_enable(dev, vif, key);
- else
- err = mt7996_mcu_add_key(&dev->mt76, vif, key,
- MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &msta->wcid, cmd);
-out:
+ mutex_lock(&dev->mt76.mutex);
+ err = mt7996_set_hw_key(hw, cmd, vif, sta, mlink, key);
mutex_unlock(&dev->mt76.mutex);
return err;
@@ -397,40 +527,6 @@ out:
static int mt7996_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- int ret;
-
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ret = mt76_update_channel(phy->mt76);
- if (ret)
- return ret;
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_POWER |
- IEEE80211_CONF_CHANGE_CHANNEL)) {
- ret = mt7996_mcu_set_txpower_sku(phy);
- if (ret)
- return ret;
- }
-
- mutex_lock(&dev->mt76.mutex);
-
- if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
-
- if (!enabled)
- phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
- else
- phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
-
- mt76_rmw_field(dev, MT_DMA_DCR0(phy->mt76->band_idx),
- MT_DMA_DCR0_RXD_G5_EN, enabled);
- mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), phy->rxfilter);
- }
-
- mutex_unlock(&dev->mt76.mutex);
-
return 0;
}
@@ -439,7 +535,8 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_vif_link *mlink = mt7996_vif_link(dev, vif, link_id);
static const u8 mq_to_aci[] = {
[IEEE80211_AC_VO] = 3,
[IEEE80211_AC_VI] = 2,
@@ -448,7 +545,7 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
};
/* firmware uses access class index */
- mvif->queue_params[mq_to_aci[queue]] = *params;
+ mlink->queue_params[mq_to_aci[queue]] = *params;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
return 0;
@@ -460,34 +557,18 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
- MT_WF_RFCR1_DROP_BF_POLL |
- MT_WF_RFCR1_DROP_BA |
- MT_WF_RFCR1_DROP_CFEND |
- MT_WF_RFCR1_DROP_CFACK;
+ struct mt7996_phy *phy;
+ u32 filter_mask = 0, filter_set = 0;
u32 flags = 0;
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- phy->rxfilter &= ~(_hw); \
- phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ filter_mask |= (_hw); \
+ filter_set |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
mutex_lock(&dev->mt76.mutex);
- phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
- MT_WF_RFCR_DROP_OTHER_BEACON |
- MT_WF_RFCR_DROP_FRAME_REPORT |
- MT_WF_RFCR_DROP_PROBEREQ |
- MT_WF_RFCR_DROP_MCAST_FILTERED |
- MT_WF_RFCR_DROP_MCAST |
- MT_WF_RFCR_DROP_BCAST |
- MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
- MT_WF_RFCR_DROP_UNWANTED_CTL |
- MT_WF_RFCR_DROP_STBC_MULTI);
-
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
MT_WF_RFCR_DROP_A3_BSSID);
@@ -496,57 +577,42 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
MT_WF_RFCR_DROP_RTS |
- MT_WF_RFCR_DROP_CTL_RSV |
- MT_WF_RFCR_DROP_NDPA);
+ MT_WF_RFCR_DROP_CTL_RSV);
*total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), phy->rxfilter);
- if (*total_flags & FIF_CONTROL)
- mt76_clear(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
- else
- mt76_set(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
+ mt7996_for_each_phy(dev, phy) {
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI |
+ filter_mask);
+ phy->rxfilter |= filter_set;
+ mt7996_phy_set_rxfilter(phy);
+ }
mutex_unlock(&dev->mt76.mutex);
}
-static void
-mt7996_update_bss_color(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_he_bss_color *bss_color)
-{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
-
- switch (vif->type) {
- case NL80211_IFTYPE_AP: {
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
-
- if (mvif->mt76.omac_idx > HW_BSSID_MAX)
- return;
- fallthrough;
- }
- case NL80211_IFTYPE_STATION:
- mt7996_mcu_update_bss_color(dev, vif, bss_color);
- break;
- default:
- break;
- }
-}
-
static u8
-mt7996_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+mt7996_get_rates_table(struct mt7996_phy *phy, struct ieee80211_bss_conf *conf,
bool beacon, bool mcast)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
- struct mt76_phy *mphy = hw->priv;
+ struct mt7996_dev *dev = phy->dev;
+ struct mt76_vif_link *mvif = mt76_vif_conf_link(&dev->mt76, conf->vif, conf);
u16 rate;
u8 i, idx;
- rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon, mcast);
+ rate = mt76_connac2_mac_tx_rate_val(phy->mt76, conf, beacon, mcast);
if (beacon) {
- struct mt7996_phy *phy = mphy->priv;
-
/* odd index for driver, even index for firmware */
idx = MT7996_BEACON_RATES_TBL + 2 * phy->mt76->band_idx;
if (phy->beacon_rate != rate)
@@ -569,7 +635,7 @@ mt7996_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- u8 band = mvif->mt76.band_idx;
+ u8 band = mvif->deflink.mt76.band_idx;
u32 *mu;
mu = (u32 *)info->mu_group.membership;
@@ -588,20 +654,31 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *info,
u64 changed)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt76_vif_link *mvif;
+ struct mt7996_phy *phy;
+ struct mt76_phy *mphy;
mutex_lock(&dev->mt76.mutex);
+ mvif = mt76_vif_conf_link(&dev->mt76, vif, info);
+ if (!mvif)
+ goto out;
+
+ mphy = mt76_vif_link_phy(mvif);
+ if (!mphy)
+ goto out;
+
+ phy = mphy->priv;
+
/* station mode uses BSSID to map the wlan entry to a peer,
* and then peer references bss_info_rfch to set bandwidth cap.
*/
if ((changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) ||
(changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) ||
(changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) {
- mt7996_mcu_add_bss_info(phy, vif, true);
- mt7996_mcu_add_sta(dev, vif, NULL, true,
+ mt7996_mcu_add_bss_info(phy, vif, info, mvif, true);
+ mt7996_mcu_add_sta(dev, vif, mvif, NULL, CONN_STATE_PORT_SECURE,
!!(changed & BSS_CHANGED_BSSID));
}
@@ -613,34 +690,39 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
if (slottime != phy->slottime) {
phy->slottime = slottime;
- mt7996_mcu_set_timing(phy, vif);
+ mt7996_mcu_set_timing(phy, vif, info);
}
}
if (changed & BSS_CHANGED_MCAST_RATE)
mvif->mcast_rates_idx =
- mt7996_get_rates_table(hw, vif, false, true);
+ mt7996_get_rates_table(phy, info, false, true);
if (changed & BSS_CHANGED_BASIC_RATES)
mvif->basic_rates_idx =
- mt7996_get_rates_table(hw, vif, false, false);
+ mt7996_get_rates_table(phy, info, false, false);
/* ensure that enable txcmd_mode after bss_info */
if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
- mt7996_mcu_set_tx(dev, vif);
+ mt7996_mcu_set_tx(dev, vif, info);
if (changed & BSS_CHANGED_HE_OBSS_PD)
mt7996_mcu_add_obss_spr(phy, vif, &info->he_obss_pd);
- if (changed & BSS_CHANGED_HE_BSS_COLOR)
- mt7996_update_bss_color(hw, vif, &info->he_bss_color);
+ if (changed & BSS_CHANGED_HE_BSS_COLOR) {
+ if ((vif->type == NL80211_IFTYPE_AP &&
+ mvif->omac_idx <= HW_BSSID_MAX) ||
+ vif->type == NL80211_IFTYPE_STATION)
+ mt7996_mcu_update_bss_color(dev, mvif,
+ &info->he_bss_color);
+ }
if (changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED)) {
mvif->beacon_rates_idx =
- mt7996_get_rates_table(hw, vif, true, false);
+ mt7996_get_rates_table(phy, info, true, false);
- mt7996_mcu_add_beacon(hw, vif, info->enable_beacon);
+ mt7996_mcu_add_beacon(hw, vif, info);
}
if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -650,6 +732,13 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_MU_GROUPS)
mt7996_update_mu_group(hw, vif, info);
+ if (changed & BSS_CHANGED_TXPOWER &&
+ info->txpower != phy->txpower) {
+ phy->txpower = info->txpower;
+ mt7996_mcu_set_txpower_sku(phy);
+ }
+
+out:
mutex_unlock(&dev->mt76.mutex);
}
@@ -661,7 +750,7 @@ mt7996_channel_switch_beacon(struct ieee80211_hw *hw,
struct mt7996_dev *dev = mt7996_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7996_mcu_add_beacon(hw, vif, true);
+ mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
mutex_unlock(&dev->mt76.mutex);
}
@@ -671,8 +760,9 @@ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- u8 band_idx = mvif->phy->mt76->band_idx;
- int ret, idx;
+ struct mt7996_vif_link *link = &mvif->deflink;
+ u8 band_idx = link->phy->mt76->band_idx;
+ int idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
if (idx < 0)
@@ -684,18 +774,59 @@ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.sta = 1;
msta->wcid.idx = idx;
msta->wcid.phy_idx = band_idx;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
ewma_avg_signal_init(&msta->avg_ack_signal);
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mt7996_mcu_add_sta(dev, vif, &link->mt76, sta, CONN_STATE_DISCONNECT,
+ true);
- ret = mt7996_mcu_add_sta(dev, vif, sta, true, true);
- if (ret)
- return ret;
+ return 0;
+}
+
+int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_vif_link *link = &mvif->deflink;
+ int i, ret;
+
+ switch (ev) {
+ case MT76_STA_EVENT_ASSOC:
+ ret = mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
+ CONN_STATE_CONNECT, true);
+ if (ret)
+ return ret;
- return mt7996_mcu_add_rate_ctrl(dev, vif, sta, false);
+ ret = mt7996_mcu_add_rate_ctrl(dev, vif, sta, false);
+ if (ret)
+ return ret;
+
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->wcid.sta = 1;
+
+ return 0;
+
+ case MT76_STA_EVENT_AUTHORIZE:
+ return mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
+ CONN_STATE_PORT_SECURE, false);
+
+ case MT76_STA_EVENT_DISASSOC:
+ for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
+ mt7996_mac_twt_teardown_flow(dev, msta, i);
+
+ mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
+ CONN_STATE_DISCONNECT, false);
+ msta->wcid.sta_disabled = 1;
+ msta->wcid.sta = 0;
+
+ return 0;
+ }
+
+ return 0;
}
void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -703,16 +834,10 @@ void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- int i;
-
- mt7996_mcu_add_sta(dev, vif, sta, false, false);
mt7996_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
- mt7996_mac_twt_teardown_flow(dev, msta, i);
-
spin_lock_bh(&mdev->sta_poll_lock);
if (!list_empty(&msta->wcid.poll_list))
list_del_init(&msta->wcid.poll_list);
@@ -731,6 +856,22 @@ static void mt7996_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ if (vif) {
+ struct mt7996_vif *mvif;
+
+ mvif = (struct mt7996_vif *)vif->drv_priv;
+ wcid = &mvif->deflink.sta.wcid;
+
+ if (mvif->mt76.roc_phy &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) {
+ mphy = mvif->mt76.roc_phy;
+ if (mphy->roc_link)
+ wcid = mphy->roc_link->wcid;
+ } else {
+ mphy = mt76_vif_link_phy(&mvif->deflink.mt76);
+ }
+ }
+
if (control->sta) {
struct mt7996_sta *sta;
@@ -738,11 +879,9 @@ static void mt7996_tx(struct ieee80211_hw *hw,
wcid = &sta->wcid;
}
- if (vif && !control->sta) {
- struct mt7996_vif *mvif;
-
- mvif = (struct mt7996_vif *)vif->drv_priv;
- wcid = &mvif->sta.wcid;
+ if (!mphy) {
+ ieee80211_free_txskb(hw, skb);
+ return;
}
mt76_tx(mphy, control->sta, wcid, skb);
@@ -750,12 +889,20 @@ static void mt7996_tx(struct ieee80211_hw *hw,
static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- int ret;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ int i, ret;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
- mutex_lock(&phy->dev->mt76.mutex);
- ret = mt7996_mcu_set_rts_thresh(phy, val);
- mutex_unlock(&phy->dev->mt76.mutex);
+ ret = mt7996_mcu_set_rts_thresh(phy, val);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
@@ -818,35 +965,24 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static int
-mt7996_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
- IEEE80211_STA_NONE);
-}
-
-static int
-mt7996_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
- IEEE80211_STA_NOTEXIST);
-}
-
-static int
mt7996_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt76_mib_stats *mib = &phy->mib;
+ int i;
mutex_lock(&dev->mt76.mutex);
- stats->dot11RTSSuccessCount = mib->rts_cnt;
- stats->dot11RTSFailureCount = mib->rts_retries_cnt;
- stats->dot11FCSErrorCount = mib->fcs_err_cnt;
- stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+ struct mt76_mib_stats *mib = &phy->mib;
+
+ stats->dot11RTSSuccessCount += mib->rts_cnt;
+ stats->dot11RTSFailureCount += mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount += mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount += mib->ack_fail_cnt;
+ }
mutex_unlock(&dev->mt76.mutex);
@@ -856,17 +992,20 @@ mt7996_get_stats(struct ieee80211_hw *hw,
u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
union {
u64 t64;
u32 t32[2];
} tsf;
u16 n;
+ if (!phy)
+ return 0;
+
lockdep_assert_held(&dev->mt76.mutex);
- n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->mt76.omac_idx;
+ n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->deflink.mt76.omac_idx;
/* TSF software read */
mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
@@ -896,17 +1035,20 @@ mt7996_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
u16 n;
+ if (!phy)
+ return;
+
mutex_lock(&dev->mt76.mutex);
- n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->mt76.omac_idx;
+ n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->deflink.mt76.omac_idx;
mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
/* TSF software overwrite */
@@ -922,17 +1064,20 @@ mt7996_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
u16 n;
+ if (!phy)
+ return;
+
mutex_lock(&dev->mt76.mutex);
- n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->mt76.omac_idx;
+ n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->deflink.mt76.omac_idx;
mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
/* TSF software adjust*/
@@ -945,12 +1090,14 @@ mt7996_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static void
mt7996_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy;
mutex_lock(&dev->mt76.mutex);
- phy->coverage_class = max_t(s16, coverage_class, 0);
- mt7996_mac_set_coverage_class(phy);
+ mt7996_for_each_phy(dev, phy) {
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7996_mac_set_coverage_class(phy);
+ }
mutex_unlock(&dev->mt76.mutex);
}
@@ -958,33 +1105,33 @@ static int
mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- int max_nss = hweight8(hw->wiphy->available_antennas_tx);
- u8 band_idx = phy->mt76->band_idx, shift = dev->chainshift[band_idx];
+ int i;
- if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ if (tx_ant != rx_ant)
return -EINVAL;
- if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
- tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+
+ if (!(tx_ant & phy->orig_chainmask))
+ return -EINVAL;
+ }
mutex_lock(&dev->mt76.mutex);
- phy->mt76->antenna_mask = tx_ant;
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ struct mt7996_phy *phy = dev->radio_phy[i];
+ u8 band_idx = phy->mt76->band_idx;
+ u8 shift = dev->chainshift[band_idx];
- /* restore to the origin chainmask which might have auxiliary path */
- if (hweight8(tx_ant) == max_nss && band_idx < MT_BAND2)
- phy->mt76->chainmask = ((dev->chainmask >> shift) &
- (BIT(dev->chainshift[band_idx + 1] - shift) - 1)) << shift;
- else if (hweight8(tx_ant) == max_nss)
- phy->mt76->chainmask = (dev->chainmask >> shift) << shift;
- else
- phy->mt76->chainmask = tx_ant << shift;
+ phy->mt76->chainmask = tx_ant & phy->orig_chainmask;
+ phy->mt76->antenna_mask = phy->mt76->chainmask >> shift;
- mt76_set_stream_caps(phy->mt76, true);
- mt7996_set_stream_vht_txbf_caps(phy);
- mt7996_set_stream_he_eht_caps(phy);
- mt7996_mcu_set_txpower_sku(phy);
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7996_set_stream_vht_txbf_caps(phy);
+ mt7996_set_stream_he_eht_caps(phy);
+ mt7996_mcu_set_txpower_sku(phy);
+ }
mutex_unlock(&dev->mt76.mutex);
@@ -996,7 +1143,7 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct rate_info *txrate = &msta->wcid.rate;
@@ -1030,7 +1177,7 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
- if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
@@ -1048,7 +1195,7 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
static void mt7996_sta_rc_work(void *data, struct ieee80211_sta *sta)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_dev *dev = msta->vif->phy->dev;
+ struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
u32 *changed = data;
spin_lock_bh(&dev->mt76.sta_poll_lock);
@@ -1063,9 +1210,8 @@ static void mt7996_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = link_sta->sta;
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_dev *dev = phy->dev;
mt7996_sta_rc_work(&changed, sta);
ieee80211_queue_work(hw, &dev->rc_work);
@@ -1075,12 +1221,11 @@ static int
mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_dev *dev = phy->dev;
u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
- mvif->bitrate_mask = *mask;
+ mvif->deflink.bitrate_mask = *mask;
/* if multiple rates across different preambles are given we can
* reconfigure this info with all peers using sta_rec command with
@@ -1109,6 +1254,9 @@ static void mt7996_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
}
@@ -1125,6 +1273,9 @@ static void mt7996_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
}
@@ -1258,7 +1409,7 @@ static void mt7996_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
struct mt76_ethtool_worker_info *wi = wi_data;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- if (msta->vif->mt76.idx != wi->idx)
+ if (msta->vif->deflink.mt76.idx != wi->idx)
return;
mt76_ethtool_worker(wi, &msta->wcid.stats, true);
@@ -1270,16 +1421,19 @@ void mt7996_get_et_stats(struct ieee80211_hw *hw,
struct ethtool_stats *stats, u64 *data)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
struct mt76_mib_stats *mib = &phy->mib;
struct mt76_ethtool_worker_info wi = {
.data = data,
- .idx = mvif->mt76.idx,
+ .idx = mvif->deflink.mt76.idx,
};
/* See mt7996_ampdu_stat_read_phy, etc */
int i, ei = 0;
+ if (!phy)
+ return;
+
mutex_lock(&dev->mt76.mutex);
mt7996_mac_update_stats(phy);
@@ -1373,11 +1527,18 @@ static int
mt7996_set_radar_background(struct ieee80211_hw *hw,
struct cfg80211_chan_def *chandef)
{
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy;
int ret = -EINVAL;
bool running;
+ if (chandef)
+ phy = mt7996_band_phy(dev, chandef->chan->band);
+ else
+ phy = dev->rdd2_phy;
+ if (!phy)
+ return -EINVAL;
+
mutex_lock(&dev->mt76.mutex);
if (dev->mt76.region == NL80211_DFS_UNSET)
@@ -1428,9 +1589,14 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_vif_link *mlink = &mvif->deflink;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct mt7996_phy *phy;
+
+ phy = mt7996_vif_link_phy(mlink);
+ if (!phy)
+ return -ENODEV;
if (phy != &dev->phy && phy->mt76->band_idx == MT_BAND2)
wed = &dev->mt76.mmio.wed_hif2;
@@ -1438,13 +1604,13 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
if (!mtk_wed_device_active(wed))
return -ENODEV;
- if (msta->wcid.idx > MT7996_WTBL_STA)
+ if (!msta->wcid.sta || msta->wcid.idx > MT7996_WTBL_STA)
return -EIO;
path->type = DEV_PATH_MTK_WDMA;
path->dev = ctx->dev;
path->mtk_wdma.wdma_idx = wed->wdma_idx;
- path->mtk_wdma.bss = mvif->mt76.idx;
+ path->mtk_wdma.bss = mvif->deflink.mt76.idx;
path->mtk_wdma.queue = 0;
path->mtk_wdma.wcid = msta->wcid.idx;
@@ -1457,10 +1623,12 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
const struct ieee80211_ops mt7996_ops = {
- .add_chanctx = ieee80211_emulate_add_chanctx,
- .remove_chanctx = ieee80211_emulate_remove_chanctx,
- .change_chanctx = ieee80211_emulate_change_chanctx,
- .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
+ .add_chanctx = mt76_add_chanctx,
+ .remove_chanctx = mt76_remove_chanctx,
+ .change_chanctx = mt76_change_chanctx,
+ .assign_vif_chanctx = mt76_assign_vif_chanctx,
+ .unassign_vif_chanctx = mt76_unassign_vif_chanctx,
+ .switch_vif_chanctx = mt76_switch_vif_chanctx,
.tx = mt7996_tx,
.start = mt7996_start,
.stop = mt7996_stop,
@@ -1470,16 +1638,17 @@ const struct ieee80211_ops mt7996_ops = {
.conf_tx = mt7996_conf_tx,
.configure_filter = mt7996_configure_filter,
.bss_info_changed = mt7996_bss_info_changed,
- .sta_add = mt7996_sta_add,
- .sta_remove = mt7996_sta_remove,
+ .sta_state = mt76_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.link_sta_rc_update = mt7996_sta_rc_update,
.set_key = mt7996_set_key,
.ampdu_action = mt7996_ampdu_action,
.set_rts_threshold = mt7996_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
- .sw_scan_start = mt76_sw_scan,
- .sw_scan_complete = mt76_sw_scan_complete,
+ .hw_scan = mt76_hw_scan,
+ .cancel_hw_scan = mt76_cancel_hw_scan,
+ .remain_on_channel = mt76_remain_on_channel,
+ .cancel_remain_on_channel = mt76_cancel_remain_on_channel,
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
.channel_switch_beacon = mt7996_channel_switch_beacon,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 6c445a9dbc03..e4569d032221 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -14,11 +14,23 @@
char *_fw; \
switch (mt76_chip(&(_dev)->mt76)) { \
case 0x7992: \
- _fw = MT7992_##name; \
+ switch ((_dev)->var.type) { \
+ case MT7992_VAR_TYPE_23: \
+ _fw = MT7992_##name##_23; \
+ break; \
+ default: \
+ _fw = MT7992_##name; \
+ } \
break; \
case 0x7990: \
default: \
- _fw = MT7996_##name; \
+ switch ((_dev)->var.type) { \
+ case MT7996_VAR_TYPE_233: \
+ _fw = MT7996_##name##_233; \
+ break; \
+ default: \
+ _fw = MT7996_##name; \
+ } \
break; \
} \
_fw; \
@@ -110,8 +122,8 @@ mt7996_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
u16 mcs_map)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
- const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
+ enum nl80211_band band = msta->vif->deflink.phy->mt76->chandef.chan->band;
+ const u16 *mask = msta->vif->deflink.bitrate_mask.control[band].he_mcs;
int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
for (nss = 0; nss < max_nss; nss++) {
@@ -744,8 +756,7 @@ mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
}
static void
-mt7996_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7996_phy *phy)
+mt7996_mcu_bss_rfch_tlv(struct sk_buff *skb, struct mt7996_phy *phy)
{
static const u8 rlm_ch_band[] = {
[NL80211_BAND_2GHZ] = 1,
@@ -775,8 +786,7 @@ mt7996_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7996_phy *phy)
+mt7996_mcu_bss_ra_tlv(struct sk_buff *skb, struct mt7996_phy *phy)
{
struct bss_ra_tlv *ra;
struct tlv *tlv;
@@ -789,6 +799,7 @@ mt7996_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
static void
mt7996_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct mt7996_phy *phy)
{
#define DEFAULT_HE_PE_DURATION 4
@@ -802,11 +813,11 @@ mt7996_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_uni_he *)tlv;
- he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
+ he->he_pe_duration = link_conf->htc_trig_based_pkt_ext;
if (!he->he_pe_duration)
he->he_pe_duration = DEFAULT_HE_PE_DURATION;
- he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
+ he->he_rts_thres = cpu_to_le16(link_conf->frame_time_rts_th);
if (!he->he_rts_thres)
he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
@@ -816,13 +827,13 @@ mt7996_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, int enable)
+mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
+ bool enable)
{
struct bss_info_uni_mbssid *mbssid;
struct tlv *tlv;
- if (!vif->bss_conf.bssid_indicator && enable)
+ if (!link_conf->bssid_indicator && enable)
return;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid));
@@ -830,23 +841,22 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
mbssid = (struct bss_info_uni_mbssid *)tlv;
if (enable) {
- mbssid->max_indicator = vif->bss_conf.bssid_indicator;
- mbssid->mbss_idx = vif->bss_conf.bssid_index;
+ mbssid->max_indicator = link_conf->bssid_indicator;
+ mbssid->mbss_idx = link_conf->bssid_index;
mbssid->tx_bss_omac_idx = 0;
}
}
static void
-mt7996_mcu_bss_bmc_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+mt7996_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink,
struct mt7996_phy *phy)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct bss_rate_tlv *bmc;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
enum nl80211_band band = chandef->chan->band;
struct tlv *tlv;
- u8 idx = mvif->mcast_rates_idx ?
- mvif->mcast_rates_idx : mvif->basic_rates_idx;
+ u8 idx = mlink->mcast_rates_idx ?
+ mlink->mcast_rates_idx : mlink->basic_rates_idx;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_RATE, sizeof(*bmc));
@@ -870,9 +880,8 @@ mt7996_mcu_bss_txcmd_tlv(struct sk_buff *skb, bool en)
}
static void
-mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct bss_mld_tlv *mld;
struct tlv *tlv;
@@ -880,33 +889,28 @@ mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
mld = (struct bss_mld_tlv *)tlv;
mld->group_mld_id = 0xff;
- mld->own_mld_id = mvif->mt76.idx;
+ mld->own_mld_id = mlink->idx;
mld->remap_idx = 0xff;
}
static void
-mt7996_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7996_mcu_bss_sec_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct bss_sec_tlv *sec;
struct tlv *tlv;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_SEC, sizeof(*sec));
sec = (struct bss_sec_tlv *)tlv;
- sec->cipher = mvif->cipher;
+ sec->cipher = mlink->cipher;
}
static int
-mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif,
- bool bssid, bool enable)
+mt7996_mcu_muar_config(struct mt7996_dev *dev, struct mt76_vif_link *mlink,
+ const u8 *addr, bool bssid, bool enable)
{
#define UNI_MUAR_ENTRY 2
- struct mt7996_dev *dev = phy->dev;
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START;
- const u8 *addr = vif->addr;
-
+ u32 idx = mlink->omac_idx - REPEATER_BSSID_START;
struct {
struct {
u8 band;
@@ -923,7 +927,7 @@ mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif,
u8 addr[ETH_ALEN];
u8 __rsv[2];
} __packed req = {
- .hdr.band = phy->mt76->band_idx,
+ .hdr.band = mlink->band_idx,
.tag = cpu_to_le16(UNI_MUAR_ENTRY),
.len = cpu_to_le16(sizeof(req) - sizeof(req.hdr)),
.smesh = false,
@@ -931,9 +935,6 @@ mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif,
.entry_add = true,
};
- if (bssid)
- addr = vif->bss_conf.bssid;
-
if (enable)
memcpy(req.addr, addr, ETH_ALEN);
@@ -942,10 +943,8 @@ mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_bss_ifs_timing_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7996_mcu_bss_ifs_timing_tlv(struct sk_buff *skb, struct mt7996_phy *phy)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->phy;
struct bss_ifs_time_tlv *ifs_time;
struct tlv *tlv;
bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ;
@@ -972,15 +971,16 @@ mt7996_mcu_bss_ifs_timing_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
static int
mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mvif,
struct mt76_phy *phy, u16 wlan_idx,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct cfg80211_chan_def *chandef = &phy->chandef;
struct mt76_connac_bss_basic_tlv *bss;
u32 type = CONNECTION_INFRA_AP;
u16 sta_wlan_idx = wlan_idx;
+ struct ieee80211_sta *sta;
struct tlv *tlv;
int idx;
@@ -992,9 +992,7 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
case NL80211_IFTYPE_STATION:
if (enable) {
rcu_read_lock();
- if (!sta)
- sta = ieee80211_find_sta(vif,
- vif->bss_conf.bssid);
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
/* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
if (sta) {
struct mt76_wcid *wcid;
@@ -1017,8 +1015,8 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*bss));
bss = (struct mt76_connac_bss_basic_tlv *)tlv;
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->bcn_interval = cpu_to_le16(link_conf->beacon_int);
+ bss->dtim_period = link_conf->dtim_period;
bss->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx);
bss->sta_idx = cpu_to_le16(sta_wlan_idx);
bss->conn_type = cpu_to_le32(type);
@@ -1036,19 +1034,19 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
return 0;
}
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ memcpy(bss->bssid, link_conf->bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(link_conf->beacon_int);
bss->dtim_period = vif->bss_conf.dtim_period;
bss->phymode = mt76_connac_get_phy_mode(phy, vif,
chandef->chan->band, NULL);
- bss->phymode_ext = mt76_connac_get_phy_mode_ext(phy, vif,
+ bss->phymode_ext = mt76_connac_get_phy_mode_ext(phy, &vif->bss_conf,
chandef->chan->band);
return 0;
}
static struct sk_buff *
-__mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
+__mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int len)
{
struct bss_req_hdr hdr = {
.bss_idx = mvif->idx,
@@ -1064,71 +1062,72 @@ __mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
return skb;
}
-int mt7996_mcu_add_bss_info(struct mt7996_phy *phy,
- struct ieee80211_vif *vif, int enable)
+int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink, int enable)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = phy->dev;
struct sk_buff *skb;
- if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) {
- mt7996_mcu_muar_config(phy, vif, false, enable);
- mt7996_mcu_muar_config(phy, vif, true, enable);
+ if (mlink->omac_idx >= REPEATER_BSSID_START) {
+ mt7996_mcu_muar_config(dev, mlink, link_conf->addr, false, enable);
+ mt7996_mcu_muar_config(dev, mlink, link_conf->bssid, true, enable);
}
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink,
MT7996_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* bss_basic must be first */
- mt7996_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76,
- mvif->sta.wcid.idx, enable);
- mt7996_mcu_bss_sec_tlv(skb, vif);
+ mt7996_mcu_bss_basic_tlv(skb, vif, link_conf, mlink, phy->mt76,
+ mlink->wcid->idx, enable);
+ mt7996_mcu_bss_sec_tlv(skb, mlink);
if (vif->type == NL80211_IFTYPE_MONITOR)
goto out;
if (enable) {
- mt7996_mcu_bss_rfch_tlv(skb, vif, phy);
- mt7996_mcu_bss_bmc_tlv(skb, vif, phy);
- mt7996_mcu_bss_ra_tlv(skb, vif, phy);
+ mt7996_mcu_bss_rfch_tlv(skb, phy);
+ mt7996_mcu_bss_bmc_tlv(skb, mlink, phy);
+ mt7996_mcu_bss_ra_tlv(skb, phy);
mt7996_mcu_bss_txcmd_tlv(skb, true);
- mt7996_mcu_bss_ifs_timing_tlv(skb, vif);
+ mt7996_mcu_bss_ifs_timing_tlv(skb, phy);
if (vif->bss_conf.he_support)
- mt7996_mcu_bss_he_tlv(skb, vif, phy);
+ mt7996_mcu_bss_he_tlv(skb, vif, link_conf, phy);
/* this tag is necessary no matter if the vif is MLD */
- mt7996_mcu_bss_mld_tlv(skb, vif);
+ mt7996_mcu_bss_mld_tlv(skb, mlink);
}
- mt7996_mcu_bss_mbssid_tlv(skb, vif, phy, enable);
+ mt7996_mcu_bss_mbssid_tlv(skb, link_conf, enable);
out:
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
}
-int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif)
+int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = phy->dev;
+ struct mt76_vif_link *mlink = mt76_vif_conf_link(&dev->mt76, vif, link_conf);
struct sk_buff *skb;
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink,
MT7996_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7996_mcu_bss_ifs_timing_tlv(skb, vif);
+ mt7996_mcu_bss_ifs_timing_tlv(skb, phy);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
}
static int
-mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif *mvif,
+mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
bool enable, bool tx)
{
@@ -1168,7 +1167,7 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
if (enable && !params->amsdu)
msta->wcid.amsdu = false;
- return mt7996_mcu_sta_ba(dev, &mvif->mt76, params, enable, true);
+ return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, true);
}
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
@@ -1178,7 +1177,7 @@ int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
struct mt7996_vif *mvif = msta->vif;
- return mt7996_mcu_sta_ba(dev, &mvif->mt76, params, enable, false);
+ return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, false);
}
static void
@@ -1461,17 +1460,21 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_sta_sounding_rate(struct sta_rec_bf *bf)
+mt7996_mcu_sta_sounding_rate(struct sta_rec_bf *bf, struct mt7996_phy *phy)
{
bf->sounding_phy = MT_PHY_TYPE_OFDM;
bf->ndp_rate = 0; /* mcs0 */
- bf->ndpa_rate = MT7996_CFEND_RATE_DEFAULT; /* ofdm 24m */
+ if (is_mt7996(phy->mt76->dev))
+ bf->ndpa_rate = MT7996_CFEND_RATE_DEFAULT; /* ofdm 24m */
+ else
+ bf->ndpa_rate = MT7992_CFEND_RATE_DEFAULT; /* ofdm 6m */
+
bf->rept_poll_rate = MT7996_CFEND_RATE_DEFAULT; /* ofdm 24m */
}
static void
mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
- struct sta_rec_bf *bf)
+ struct sta_rec_bf *bf, bool explicit)
{
struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
u8 n = 0;
@@ -1491,7 +1494,8 @@ mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
bf->nrow = hweight8(phy->mt76->antenna_mask) - 1;
bf->ncol = min_t(u8, bf->nrow, n);
- bf->ibf_ncol = n;
+ bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
+ min_t(u8, MT7996_IBF_MAX_NC, n);
}
static void
@@ -1509,7 +1513,7 @@ mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
if (explicit) {
u8 sts, snd_dim;
- mt7996_mcu_sta_sounding_rate(bf);
+ mt7996_mcu_sta_sounding_rate(bf, phy);
sts = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
pc->cap);
@@ -1517,14 +1521,14 @@ mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
vc->cap);
bf->nrow = min_t(u8, min_t(u8, snd_dim, sts), tx_ant);
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
- bf->ibf_ncol = bf->ncol;
+ bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, bf->ncol);
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
bf->nrow = 1;
} else {
bf->nrow = tx_ant;
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
- bf->ibf_ncol = nss_mcs;
+ bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
bf->ibf_nrow = 1;
@@ -1533,7 +1537,8 @@ mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
static void
mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, struct sta_rec_bf *bf)
+ struct mt7996_phy *phy, struct sta_rec_bf *bf,
+ bool explicit)
{
struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
@@ -1549,7 +1554,7 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->tx_mode = MT_PHY_TYPE_HE_SU;
- mt7996_mcu_sta_sounding_rate(bf);
+ mt7996_mcu_sta_sounding_rate(bf, phy);
bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB,
pe->phy_cap_info[6]);
@@ -1561,7 +1566,8 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
pe->phy_cap_info[4]);
bf->nrow = min_t(u8, snd_dim, sts);
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
- bf->ibf_ncol = bf->ncol;
+ bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
+ min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160)
return;
@@ -1596,7 +1602,8 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
static void
mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, struct sta_rec_bf *bf)
+ struct mt7996_phy *phy, struct sta_rec_bf *bf,
+ bool explicit)
{
struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
@@ -1610,7 +1617,7 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->tx_mode = MT_PHY_TYPE_EHT_MU;
- mt7996_mcu_sta_sounding_rate(bf);
+ mt7996_mcu_sta_sounding_rate(bf, phy);
bf->trigger_su = EHT_PHY(CAP3_TRIG_SU_BF_FDBK, pe->phy_cap_info[3]);
bf->trigger_mu = EHT_PHY(CAP3_TRIG_MU_BF_PART_BW_FDBK, pe->phy_cap_info[3]);
@@ -1619,7 +1626,8 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
(EHT_PHY(CAP1_BEAMFORMEE_SS_80MHZ_MASK, pe->phy_cap_info[1]) << 1);
bf->nrow = min_t(u8, snd_dim, sts);
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
- bf->ibf_ncol = bf->ncol;
+ bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
+ min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_160)
return;
@@ -1654,12 +1662,15 @@ static void
mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
+#define EBF_MODE BIT(0)
+#define IBF_MODE BIT(1)
+#define BF_MAT_ORDER 4
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->phy;
+ struct mt7996_phy *phy = mvif->deflink.phy;
int tx_ant = hweight16(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
- static const u8 matrix[4][4] = {
+ static const u8 matrix[BF_MAT_ORDER][BF_MAT_ORDER] = {
{0, 0, 0, 0},
{1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
{2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
@@ -1677,35 +1688,44 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
bf = (struct sta_rec_bf *)tlv;
- /* he/eht: eBF only, in accordance with spec
+ /* he/eht: eBF only, except mt7992 that has 5T on 5GHz also supports iBF
* vht: support eBF and iBF
* ht: iBF only, since mac80211 lacks of eBF support
*/
- if (sta->deflink.eht_cap.has_eht && ebf)
- mt7996_mcu_sta_bfer_eht(sta, vif, phy, bf);
- else if (sta->deflink.he_cap.has_he && ebf)
- mt7996_mcu_sta_bfer_he(sta, vif, phy, bf);
+ if (sta->deflink.eht_cap.has_eht)
+ mt7996_mcu_sta_bfer_eht(sta, vif, phy, bf, ebf);
+ else if (sta->deflink.he_cap.has_he)
+ mt7996_mcu_sta_bfer_he(sta, vif, phy, bf, ebf);
else if (sta->deflink.vht_cap.vht_supported)
mt7996_mcu_sta_bfer_vht(sta, phy, bf, ebf);
else if (sta->deflink.ht_cap.ht_supported)
- mt7996_mcu_sta_bfer_ht(sta, phy, bf);
+ mt7996_mcu_sta_bfer_ht(sta, phy, bf, ebf);
else
return;
- bf->bf_cap = ebf ? ebf : dev->ibf << 1;
+ bf->bf_cap = ebf ? EBF_MODE : (dev->ibf ? IBF_MODE : 0);
+ if (is_mt7992(&dev->mt76) && tx_ant == 4)
+ bf->bf_cap |= IBF_MODE;
bf->bw = sta->deflink.bandwidth;
bf->ibf_dbw = sta->deflink.bandwidth;
bf->ibf_nrow = tx_ant;
- if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
- bf->ibf_timeout = 0x48;
+ if (sta->deflink.eht_cap.has_eht || sta->deflink.he_cap.has_he)
+ bf->ibf_timeout = is_mt7996(&dev->mt76) ? MT7996_IBF_TIMEOUT :
+ MT7992_IBF_TIMEOUT;
+ else if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
+ bf->ibf_timeout = MT7996_IBF_TIMEOUT_LEGACY;
else
- bf->ibf_timeout = 0x18;
+ bf->ibf_timeout = MT7996_IBF_TIMEOUT;
- if (ebf && bf->nrow != tx_ant)
- bf->mem_20m = matrix[tx_ant][bf->ncol];
- else
- bf->mem_20m = matrix[bf->nrow][bf->ncol];
+ if (bf->ncol < BF_MAT_ORDER) {
+ if (ebf)
+ bf->mem_20m = tx_ant < BF_MAT_ORDER ?
+ matrix[tx_ant][bf->ncol] : 0;
+ else
+ bf->mem_20m = bf->nrow < BF_MAT_ORDER ?
+ matrix[bf->nrow][bf->ncol] : 0;
+ }
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
@@ -1726,7 +1746,7 @@ mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->phy;
+ struct mt7996_phy *phy = mvif->deflink.phy;
int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
@@ -1783,11 +1803,9 @@ mt7996_mcu_sta_hdrt_tlv(struct mt7996_dev *dev, struct sk_buff *skb)
static void
mt7996_mcu_sta_hdr_trans_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif, struct mt76_wcid *wcid)
{
struct sta_rec_hdr_trans *hdr_trans;
- struct mt76_wcid *wcid;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HDR_TRANS, sizeof(*hdr_trans));
@@ -1799,10 +1817,9 @@ mt7996_mcu_sta_hdr_trans_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- if (!sta)
+ if (!wcid)
return;
- wcid = (struct mt76_wcid *)sta->drv_priv;
hdr_trans->dis_rx_hdr_tran = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
hdr_trans->to_ds = true;
@@ -1867,7 +1884,7 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif
struct sk_buff *skb;
struct tlv *tlv;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
&msta->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
@@ -1903,8 +1920,8 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
struct ieee80211_sta *sta)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
- struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+ struct cfg80211_chan_def *chandef = &mvif->deflink.phy->mt76->chandef;
+ struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_phy_uni phy = {};
int ret, nrates = 0;
@@ -1991,9 +2008,9 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
{
#define INIT_RCPI 180
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt76_phy *mphy = mvif->phy->mt76;
+ struct mt76_phy *mphy = mvif->deflink.phy->mt76;
struct cfg80211_chan_def *chandef = &mphy->chandef;
- struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+ struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra_uni *ra;
struct tlv *tlv;
@@ -2070,7 +2087,7 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
cap |= STA_CAP_VHT_TX_STBC;
if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
cap |= STA_CAP_VHT_RX_STBC;
- if (vif->bss_conf.vht_ldpc &&
+ if ((vif->type != NL80211_IFTYPE_AP || vif->bss_conf.vht_ldpc) &&
(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
cap |= STA_CAP_VHT_LDPC;
@@ -2099,7 +2116,7 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *skb;
int ret;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
&msta->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
@@ -2146,10 +2163,10 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
.tag = cpu_to_le16(UNI_VOW_DRR_CTRL),
.len = cpu_to_le16(sizeof(req) - 4),
.action = cpu_to_le32(MT_STA_BSS_GROUP),
- .val = cpu_to_le32(mvif->mt76.idx % 16),
+ .val = cpu_to_le32(mvif->deflink.mt76.idx % 16),
};
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
+ msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta;
req.wlan_idx = cpu_to_le16(msta->wcid.idx);
return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(VOW), &req,
@@ -2157,34 +2174,35 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
}
int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable, bool newly)
+ struct mt76_vif_link *mlink,
+ struct ieee80211_sta *sta, int conn_state, bool newly)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct ieee80211_link_sta *link_sta;
- struct mt7996_sta *msta;
+ struct ieee80211_link_sta *link_sta = NULL;
+ struct mt76_wcid *wcid = mlink->wcid;
struct sk_buff *skb;
- int conn_state;
int ret;
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
- link_sta = sta ? &sta->deflink : NULL;
+ if (sta) {
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
- &msta->wcid,
+ wcid = &msta->wcid;
+ link_sta = &sta->deflink;
+ }
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, mlink, wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec basic */
- conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, &vif->bss_conf, link_sta,
conn_state, newly);
- if (!enable)
+ if (conn_state == CONN_STATE_DISCONNECT)
goto out;
/* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, wcid);
/* starec tx proc */
mt7996_mcu_sta_tx_proc_tlv(skb);
@@ -2273,7 +2291,7 @@ int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
struct sk_buff *skb;
int ret;
@@ -2299,7 +2317,7 @@ static int mt7996_mcu_get_pn(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct tlv *tlv;
int ret;
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &mvif->sta.wcid);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76, &mvif->deflink.sta.wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2335,7 +2353,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
sizeof(struct mt7996_mcu_bcn_prot_tlv);
int ret;
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, len);
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2375,11 +2393,12 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
}
-int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
- struct ieee80211_vif *vif, bool enable)
+
+int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink, bool enable)
{
struct mt7996_dev *dev = phy->dev;
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct {
struct req_hdr {
u8 omac_idx;
@@ -2395,8 +2414,8 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
} __packed tlv;
} data = {
.hdr = {
- .omac_idx = mvif->mt76.omac_idx,
- .band_idx = mvif->mt76.band_idx,
+ .omac_idx = mlink->omac_idx,
+ .band_idx = mlink->band_idx,
},
.tlv = {
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
@@ -2405,18 +2424,18 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
},
};
- if (mvif->mt76.omac_idx >= REPEATER_BSSID_START)
- return mt7996_mcu_muar_config(phy, vif, false, enable);
+ if (mlink->omac_idx >= REPEATER_BSSID_START)
+ return mt7996_mcu_muar_config(dev, mlink, link_conf->addr, false, enable);
- memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ memcpy(data.tlv.omac_addr, link_conf->addr, ETH_ALEN);
return mt76_mcu_send_msg(&dev->mt76, MCU_WMWA_UNI_CMD(DEV_INFO_UPDATE),
&data, sizeof(data), true);
}
static void
-mt7996_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
- struct sk_buff *skb,
- struct ieee80211_mutable_offsets *offs)
+mt7996_mcu_beacon_cntdwn(struct sk_buff *rskb, struct sk_buff *skb,
+ struct ieee80211_mutable_offsets *offs,
+ bool csa)
{
struct bss_bcn_cntdwn_tlv *info;
struct tlv *tlv;
@@ -2425,7 +2444,7 @@ mt7996_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
if (!offs->cntdwn_counter_offs[0])
return;
- tag = vif->bss_conf.csa_active ? UNI_BSS_INFO_BCN_CSA : UNI_BSS_INFO_BCN_BCC;
+ tag = csa ? UNI_BSS_INFO_BCN_CSA : UNI_BSS_INFO_BCN_BCC;
tlv = mt7996_mcu_add_uni_tlv(rskb, tag, sizeof(*info));
@@ -2435,16 +2454,13 @@ mt7996_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
static void
mt7996_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct bss_bcn_content_tlv *bcn,
+ struct bss_bcn_content_tlv *bcn,
struct ieee80211_mutable_offsets *offs)
{
struct bss_bcn_mbss_tlv *mbss;
const struct element *elem;
struct tlv *tlv;
- if (!vif->bss_conf.bssid_indicator)
- return;
-
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_MBSSID, sizeof(*mbss));
mbss = (struct bss_bcn_mbss_tlv *)tlv;
@@ -2487,7 +2503,8 @@ mt7996_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
}
static void
-mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+mt7996_mcu_beacon_cont(struct mt7996_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
struct sk_buff *rskb, struct sk_buff *skb,
struct bss_bcn_content_tlv *bcn,
struct ieee80211_mutable_offsets *offs)
@@ -2501,9 +2518,9 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
if (offs->cntdwn_counter_offs[0]) {
u16 offset = offs->cntdwn_counter_offs[0];
- if (vif->bss_conf.csa_active)
+ if (link_conf->csa_active)
bcn->csa_ie_pos = cpu_to_le16(offset - 4);
- if (vif->bss_conf.color_change_active)
+ if (link_conf->color_change_active)
bcn->bcc_ie_pos = cpu_to_le16(offset - 3);
}
@@ -2514,56 +2531,63 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
}
-int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, int en)
+int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt76_vif_link *mlink = mt76_vif_conf_link(&dev->mt76, vif, link_conf);
struct ieee80211_mutable_offsets offs;
struct ieee80211_tx_info *info;
struct sk_buff *skb, *rskb;
struct tlv *tlv;
struct bss_bcn_content_tlv *bcn;
- int len;
+ int len, extra_len = 0;
- if (vif->bss_conf.nontransmitted)
+ if (link_conf->nontransmitted)
return 0;
- rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ if (!mlink)
+ return -EINVAL;
+
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink,
MT7996_MAX_BSS_OFFLOAD_SIZE);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
- skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
- if (!skb) {
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, link_conf->link_id);
+ if (link_conf->enable_beacon && !skb) {
dev_kfree_skb(rskb);
return -EINVAL;
}
- if (skb->len > MT7996_MAX_BEACON_SIZE) {
- dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
- dev_kfree_skb(rskb);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
+ if (skb) {
+ if (skb->len > MT7996_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
- info = IEEE80211_SKB_CB(skb);
- info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+ extra_len = skb->len;
+ }
- len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + skb->len, 4);
+ len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + extra_len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
bcn = (struct bss_bcn_content_tlv *)tlv;
- bcn->enable = en;
- if (!en)
+ bcn->enable = link_conf->enable_beacon;
+ if (!bcn->enable)
goto out;
- mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
- mt7996_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs);
- mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs);
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, mlink->band_idx);
+
+ mt7996_mcu_beacon_cont(dev, link_conf, rskb, skb, bcn, &offs);
+ if (link_conf->bssid_indicator)
+ mt7996_mcu_beacon_mbss(rskb, skb, bcn, &offs);
+ mt7996_mcu_beacon_cntdwn(rskb, skb, &offs, link_conf->csa_active);
out:
dev_kfree_skb(skb);
- return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ return mt76_mcu_skb_send_msg(&dev->mt76, rskb,
MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
}
@@ -2573,22 +2597,28 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
#define OFFLOAD_TX_MODE_SU BIT(0)
#define OFFLOAD_TX_MODE_MU BIT(1)
struct ieee80211_hw *hw = mt76_hw(dev);
- struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
- enum nl80211_band band = chandef->chan->band;
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
struct bss_inband_discovery_tlv *discov;
struct ieee80211_tx_info *info;
struct sk_buff *rskb, *skb = NULL;
+ struct cfg80211_chan_def *chandef;
+ enum nl80211_band band;
struct tlv *tlv;
u8 *buf, interval;
int len;
+ if (!phy)
+ return -EINVAL;
+
+ chandef = &phy->mt76->chandef;
+ band = chandef->chan->band;
+
if (vif->bss_conf.nontransmitted)
return 0;
- rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76,
MT7996_MAX_BSS_OFFLOAD_SIZE);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
@@ -3147,7 +3177,8 @@ int mt7996_mcu_set_hdr_trans(struct mt7996_dev *dev, bool hdr_trans)
MCU_WM_UNI_CMD(RX_HDR_TRANS), true);
}
-int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
+int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
#define MCU_EDCA_AC_PARAM 0
#define WMM_AIFS_SET BIT(0)
@@ -3156,12 +3187,12 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
#define WMM_TXOP_SET BIT(3)
#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \
WMM_CW_MAX_SET | WMM_TXOP_SET)
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_vif_link *link = mt7996_vif_conf_link(dev, vif, link_conf);
struct {
u8 bss_idx;
u8 __rsv[3];
} __packed hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = link->mt76.idx,
};
struct sk_buff *skb;
int len = sizeof(hdr) + IEEE80211_NUM_ACS * sizeof(struct edca);
@@ -3174,7 +3205,7 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
skb_put_data(skb, &hdr, sizeof(hdr));
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct ieee80211_tx_queue_params *q = &link->queue_params[ac];
struct edca *e;
struct tlv *tlv;
@@ -3548,7 +3579,7 @@ int mt7996_mcu_set_eeprom(struct mt7996_dev *dev)
&req, sizeof(req), true);
}
-int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
+int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len)
{
struct {
u8 _rsv[4];
@@ -3577,15 +3608,21 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
valid = le32_to_cpu(*(__le32 *)(skb->data + 16));
if (valid) {
u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12));
- u8 *buf = (u8 *)dev->mt76.eeprom.data + addr;
+
+ if (!buf)
+ buf = (u8 *)dev->mt76.eeprom.data + addr;
+ if (!buf_len || buf_len > MT7996_EEPROM_BLOCK_SIZE)
+ buf_len = MT7996_EEPROM_BLOCK_SIZE;
skb_pull(skb, 48);
- memcpy(buf, skb->data, MT7996_EEPROM_BLOCK_SIZE);
+ memcpy(buf, skb->data, buf_len);
+ } else {
+ ret = -EINVAL;
}
dev_kfree_skb(skb);
- return 0;
+ return ret;
}
int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num)
@@ -3666,6 +3703,13 @@ int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap)
int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
{
+ enum {
+ IDX_TX_TIME,
+ IDX_RX_TIME,
+ IDX_OBSS_AIRTIME,
+ IDX_NON_WIFI_TIME,
+ IDX_NUM
+ };
struct {
struct {
u8 band;
@@ -3675,16 +3719,15 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
__le16 tag;
__le16 len;
__le32 offs;
- } data[4];
+ } data[IDX_NUM];
} __packed req = {
.hdr.band = phy->mt76->band_idx,
};
- /* strict order */
static const u32 offs[] = {
- UNI_MIB_TX_TIME,
- UNI_MIB_RX_TIME,
- UNI_MIB_OBSS_AIRTIME,
- UNI_MIB_NON_WIFI_TIME,
+ [IDX_TX_TIME] = UNI_MIB_TX_TIME,
+ [IDX_RX_TIME] = UNI_MIB_RX_TIME,
+ [IDX_OBSS_AIRTIME] = UNI_MIB_OBSS_AIRTIME,
+ [IDX_NON_WIFI_TIME] = UNI_MIB_NON_WIFI_TIME,
};
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
@@ -3693,7 +3736,7 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
struct sk_buff *skb;
int i, ret;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < IDX_NUM; i++) {
req.data[i].tag = cpu_to_le16(UNI_CMD_MIB_DATA);
req.data[i].len = cpu_to_le16(sizeof(req.data[i]));
req.data[i].offs = cpu_to_le32(offs[i]);
@@ -3712,17 +3755,24 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
goto out;
#define __res_u64(s) le64_to_cpu(res[s].data)
- state->cc_tx += __res_u64(1) - state_ts->cc_tx;
- state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
- state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
- state->cc_busy += __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3) -
+ state->cc_tx += __res_u64(IDX_TX_TIME) - state_ts->cc_tx;
+ state->cc_bss_rx += __res_u64(IDX_RX_TIME) - state_ts->cc_bss_rx;
+ state->cc_rx += __res_u64(IDX_RX_TIME) +
+ __res_u64(IDX_OBSS_AIRTIME) -
+ state_ts->cc_rx;
+ state->cc_busy += __res_u64(IDX_TX_TIME) +
+ __res_u64(IDX_RX_TIME) +
+ __res_u64(IDX_OBSS_AIRTIME) +
+ __res_u64(IDX_NON_WIFI_TIME) -
state_ts->cc_busy;
-
out:
- state_ts->cc_tx = __res_u64(1);
- state_ts->cc_bss_rx = __res_u64(2);
- state_ts->cc_rx = __res_u64(2) + __res_u64(3);
- state_ts->cc_busy = __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3);
+ state_ts->cc_tx = __res_u64(IDX_TX_TIME);
+ state_ts->cc_bss_rx = __res_u64(IDX_RX_TIME);
+ state_ts->cc_rx = __res_u64(IDX_RX_TIME) + __res_u64(IDX_OBSS_AIRTIME);
+ state_ts->cc_busy = __res_u64(IDX_TX_TIME) +
+ __res_u64(IDX_RX_TIME) +
+ __res_u64(IDX_OBSS_AIRTIME) +
+ __res_u64(IDX_NON_WIFI_TIME);
#undef __res_u64
dev_kfree_skb(skb);
@@ -4023,7 +4073,7 @@ mt7996_mcu_set_obss_spr_siga(struct mt7996_phy *phy, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = phy->dev;
- u8 omac = mvif->mt76.omac_idx;
+ u8 omac = mvif->deflink.mt76.omac_idx;
struct {
u8 band_idx;
u8 __rsv[3];
@@ -4137,16 +4187,16 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
return mt7996_mcu_set_obss_spr_bitmap(phy, he_obss_pd);
}
-int mt7996_mcu_update_bss_color(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
+ struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color)
{
int len = sizeof(struct bss_req_hdr) + sizeof(struct bss_color_tlv);
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct bss_color_tlv *bss_color;
struct sk_buff *skb;
struct tlv *tlv;
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, len);
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -4196,12 +4246,12 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
.len = cpu_to_le16(sizeof(req) - 4),
.tbl_idx = flow->table_id,
.cmd = cmd,
- .own_mac_idx = mvif->mt76.omac_idx,
+ .own_mac_idx = mvif->deflink.mt76.omac_idx,
.flowid = flow->id,
.peer_id = cpu_to_le16(flow->wcid),
.duration = flow->duration,
- .bss = mvif->mt76.idx,
- .bss_idx = mvif->mt76.idx,
+ .bss = mvif->deflink.mt76.idx,
+ .bss_idx = mvif->deflink.mt76.idx,
.start_tsf = cpu_to_le64(flow->tsf),
.mantissa = flow->mantissa,
.exponent = flow->exp,
@@ -4297,16 +4347,16 @@ int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
struct mt7996_sta *msta;
struct sk_buff *skb;
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
+ msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
&msta->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, &msta->wcid);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
@@ -4484,12 +4534,32 @@ int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id)
sizeof(req), true);
}
+int mt7996_mcu_set_sniffer_mode(struct mt7996_phy *phy, bool enabled)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct {
+ u8 band_idx;
+ u8 _rsv[3];
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ u8 _pad[3];
+ } __packed req = {
+ .band_idx = phy->mt76->band_idx,
+ .tag = 0,
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .enable = enabled,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(SNIFFER), &req,
+ sizeof(req), true);
+}
+
int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
{
#define TX_POWER_LIMIT_TABLE_RATE 0
struct mt7996_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
- struct ieee80211_hw *hw = mphy->hw;
struct tx_power_limit_table_ctrl {
u8 __rsv1[4];
@@ -4509,7 +4579,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
struct sk_buff *skb;
int i, tx_power;
- tx_power = mt7996_get_power_bound(phy, hw->conf.power_level);
+ tx_power = mt7996_get_power_bound(phy, phy->txpower);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&la, tx_power);
mphy->txpower_cur = tx_power;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 40e45fb2b626..7a8ee6c75cf2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -177,7 +177,7 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
continue;
ofs = addr - dev->reg.map[i].phys;
- if (ofs > dev->reg.map[i].size)
+ if (ofs >= dev->reg.map[i].size)
continue;
return dev->reg.map[i].mapped + ofs;
@@ -605,6 +605,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
+ .link_data_size = sizeof(struct mt7996_vif_link),
.drv_flags = MT_DRV_TXWI_NO_FREE |
MT_DRV_AMSDU_OFFLOAD |
MT_DRV_HW_MGMT_TXQ,
@@ -618,9 +619,12 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.rx_check = mt7996_rx_check,
.rx_poll_complete = mt7996_rx_poll_complete,
.sta_add = mt7996_mac_sta_add,
+ .sta_event = mt7996_mac_sta_event,
.sta_remove = mt7996_mac_sta_remove,
.update_survey = mt7996_update_channel,
.set_channel = mt7996_set_channel,
+ .vif_link_add = mt7996_vif_link_add,
+ .vif_link_remove = mt7996_vif_link_remove,
};
struct mt7996_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index ab8c9070630b..29fabb9b04ae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -11,6 +11,7 @@
#include "../mt76_connac.h"
#include "regs.h"
+#define MT7996_MAX_RADIOS 3
#define MT7996_MAX_INTERFACES 19 /* per-band */
#define MT7996_MAX_WMM_SETS 4
#define MT7996_WTBL_BMC_SIZE (is_mt7992(&dev->mt76) ? 32 : 64)
@@ -34,13 +35,32 @@
#define MT7996_FIRMWARE_DSP "mediatek/mt7996/mt7996_dsp.bin"
#define MT7996_ROM_PATCH "mediatek/mt7996/mt7996_rom_patch.bin"
+#define MT7996_FIRMWARE_WA_233 "mediatek/mt7996/mt7996_wa_233.bin"
+#define MT7996_FIRMWARE_WM_233 "mediatek/mt7996/mt7996_wm_233.bin"
+#define MT7996_FIRMWARE_DSP_233 MT7996_FIRMWARE_DSP
+#define MT7996_ROM_PATCH_233 "mediatek/mt7996/mt7996_rom_patch_233.bin"
+
#define MT7992_FIRMWARE_WA "mediatek/mt7996/mt7992_wa.bin"
#define MT7992_FIRMWARE_WM "mediatek/mt7996/mt7992_wm.bin"
#define MT7992_FIRMWARE_DSP "mediatek/mt7996/mt7992_dsp.bin"
#define MT7992_ROM_PATCH "mediatek/mt7996/mt7992_rom_patch.bin"
+#define MT7992_FIRMWARE_WA_23 "mediatek/mt7996/mt7992_wa_23.bin"
+#define MT7992_FIRMWARE_WM_23 "mediatek/mt7996/mt7992_wm_23.bin"
+#define MT7992_FIRMWARE_DSP_23 "mediatek/mt7996/mt7992_dsp_23.bin"
+#define MT7992_ROM_PATCH_23 "mediatek/mt7996/mt7992_rom_patch_23.bin"
+
#define MT7996_EEPROM_DEFAULT "mediatek/mt7996/mt7996_eeprom.bin"
+#define MT7996_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7996_eeprom_2i5i6i.bin"
+#define MT7996_EEPROM_DEFAULT_233 "mediatek/mt7996/mt7996_eeprom_233.bin"
+#define MT7996_EEPROM_DEFAULT_233_INT "mediatek/mt7996/mt7996_eeprom_233_2i5i6i.bin"
+
#define MT7992_EEPROM_DEFAULT "mediatek/mt7996/mt7992_eeprom.bin"
+#define MT7992_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7992_eeprom_2i5i.bin"
+#define MT7992_EEPROM_DEFAULT_MIX "mediatek/mt7996/mt7992_eeprom_2i5e.bin"
+#define MT7992_EEPROM_DEFAULT_23 "mediatek/mt7996/mt7992_eeprom_23.bin"
+#define MT7992_EEPROM_DEFAULT_23_INT "mediatek/mt7996/mt7992_eeprom_23_2i5i.bin"
+
#define MT7996_EEPROM_SIZE 7680
#define MT7996_EEPROM_BLOCK_SIZE 16
#define MT7996_TOKEN_SIZE 16384
@@ -48,6 +68,12 @@
#define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+#define MT7996_IBF_MAX_NC 2
+#define MT7996_IBF_TIMEOUT 0x18
+#define MT7996_IBF_TIMEOUT_LEGACY 0x48
+
+#define MT7992_CFEND_RATE_DEFAULT 0x4b /* OFDM 6M */
+#define MT7992_IBF_TIMEOUT 0xff
#define MT7996_SKU_RATE_NUM 417
#define MT7996_SKU_PATH_NUM 494
@@ -95,6 +121,22 @@ enum mt7996_ram_type {
MT7996_RAM_TYPE_DSP,
};
+enum mt7996_var_type {
+ MT7996_VAR_TYPE_444,
+ MT7996_VAR_TYPE_233,
+};
+
+enum mt7992_var_type {
+ MT7992_VAR_TYPE_44,
+ MT7992_VAR_TYPE_23,
+};
+
+enum mt7996_fem_type {
+ MT7996_FEM_EXT,
+ MT7996_FEM_INT,
+ MT7996_FEM_MIX,
+};
+
enum mt7996_txq_id {
MT7996_TXQ_FWDL = 16,
MT7996_TXQ_MCU_WM,
@@ -164,8 +206,8 @@ struct mt7996_sta {
} twt;
};
-struct mt7996_vif {
- struct mt76_vif mt76; /* must be first */
+struct mt7996_vif_link {
+ struct mt76_vif_link mt76; /* must be first */
struct mt7996_sta sta;
struct mt7996_phy *phy;
@@ -174,6 +216,11 @@ struct mt7996_vif {
struct cfg80211_bitrate_mask bitrate_mask;
};
+struct mt7996_vif {
+ struct mt7996_vif_link deflink; /* must be first */
+ struct mt76_vif_data mt76;
+};
+
/* crash-dump */
struct mt7996_crash_data {
guid_t guid;
@@ -211,8 +258,6 @@ struct mt7996_phy {
struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
- struct ieee80211_vif *monitor_vif;
-
struct thermal_cooling_device *cdev;
u8 cdev_state;
u8 throttle_state;
@@ -232,11 +277,15 @@ struct mt7996_phy {
u32 rx_ampdu_ts;
u32 ampdu_ref;
+ int txpower;
struct mt76_mib_stats mib;
struct mt76_channel_state state_ts;
+ u16 orig_chainmask;
+
bool has_aux_rx;
+ bool counter_reset;
};
struct mt7996_dev {
@@ -245,6 +294,10 @@ struct mt7996_dev {
struct mt76_phy mphy;
};
+ struct mt7996_phy *radio_phy[MT7996_MAX_RADIOS];
+ struct wiphy_radio radios[MT7996_MAX_RADIOS];
+ struct wiphy_radio_freq_range radio_freqs[MT7996_MAX_RADIOS];
+
struct mt7996_hif *hif2;
struct mt7996_reg_desc reg;
u8 q_id[MT7996_MAX_QUEUE];
@@ -329,6 +382,10 @@ struct mt7996_dev {
spinlock_t reg_lock;
u8 wtbl_size_group;
+ struct {
+ u8 type:4;
+ u8 fem:4;
+ } var;
};
enum {
@@ -360,14 +417,6 @@ enum mt7996_rdd_cmd {
RDD_IRQ_OFF,
};
-static inline struct mt7996_phy *
-mt7996_hw_phy(struct ieee80211_hw *hw)
-{
- struct mt76_phy *phy = hw->priv;
-
- return phy->priv;
-}
-
static inline struct mt7996_dev *
mt7996_hw_dev(struct ieee80211_hw *hw)
{
@@ -405,14 +454,69 @@ mt7996_band_valid(struct mt7996_dev *dev, u8 band)
if (is_mt7992(&dev->mt76))
return band <= MT_BAND1;
- /* tri-band support */
- if (band <= MT_BAND2 &&
- mt76_get_field(dev, MT_PAD_GPIO, MT_PAD_GPIO_ADIE_COMB) <= 1)
- return true;
+ return band <= MT_BAND2;
+}
- return band == MT_BAND0 || band == MT_BAND2;
+static inline bool
+mt7996_has_background_radar(struct mt7996_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7990:
+ if (dev->var.type == MT7996_VAR_TYPE_233)
+ return false;
+ break;
+ case 0x7992:
+ if (dev->var.type == MT7992_VAR_TYPE_23)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
}
+static inline struct mt7996_phy *
+mt7996_band_phy(struct mt7996_dev *dev, enum nl80211_band band)
+{
+ struct mt76_phy *mphy;
+
+ mphy = dev->mt76.band_phys[band];
+ if (!mphy)
+ return NULL;
+
+ return mphy->priv;
+}
+
+static inline struct mt7996_vif_link *
+mt7996_vif_link(struct mt7996_dev *dev, struct ieee80211_vif *vif, int link_id)
+{
+ return (struct mt7996_vif_link *)mt76_vif_link(&dev->mt76, vif, link_id);
+}
+
+static inline struct mt7996_phy *
+mt7996_vif_link_phy(struct mt7996_vif_link *link)
+{
+ struct mt76_phy *mphy = mt76_vif_link_phy(&link->mt76);
+
+ if (!mphy)
+ return NULL;
+
+ return mphy->priv;
+}
+
+static inline struct mt7996_vif_link *
+mt7996_vif_conf_link(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ return (struct mt7996_vif_link *)mt76_vif_conf_link(&dev->mt76, vif,
+ link_conf);
+}
+
+#define mt7996_for_each_phy(dev, phy) \
+ for (int __i = 0; __i < ARRAY_SIZE((dev)->radio_phy); __i++) \
+ if (((phy) = (dev)->radio_phy[__i]) != NULL)
+
extern const struct ieee80211_ops mt7996_ops;
extern struct pci_driver mt7996_pci_driver;
extern struct pci_driver mt7996_hif_driver;
@@ -424,6 +528,12 @@ irqreturn_t mt7996_irq_handler(int irq, void *dev_instance);
u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif);
int mt7996_register_device(struct mt7996_dev *dev);
void mt7996_unregister_device(struct mt7996_dev *dev);
+int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink);
+void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink);
int mt7996_eeprom_init(struct mt7996_dev *dev);
int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy);
int mt7996_eeprom_get_target_power(struct mt7996_dev *dev,
@@ -439,29 +549,33 @@ int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx,
void mt7996_init_txpower(struct mt7996_phy *phy);
int mt7996_txbf_init(struct mt7996_dev *dev);
void mt7996_reset(struct mt7996_dev *dev);
-int mt7996_run(struct ieee80211_hw *hw);
+int mt7996_run(struct mt7996_phy *phy);
int mt7996_mcu_init(struct mt7996_dev *dev);
int mt7996_mcu_init_firmware(struct mt7996_dev *dev);
int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
struct mt7996_vif *mvif,
struct mt7996_twt_flow *flow,
int cmd);
-int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
- struct ieee80211_vif *vif, bool enable);
-int mt7996_mcu_add_bss_info(struct mt7996_phy *phy,
- struct ieee80211_vif *vif, int enable);
+int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink, bool enable);
+int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt76_vif_link *mlink, int enable);
int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable, bool newly);
+ struct mt76_vif_link *mlink,
+ struct ieee80211_sta *sta, int conn_state, bool newly);
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
-int mt7996_mcu_update_bss_color(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
+ struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color);
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int enable);
+ struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
struct ieee80211_vif *vif, u32 changed);
int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
@@ -470,13 +584,14 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
int mt7996_set_channel(struct mt76_phy *mphy);
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
-int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif);
+int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
void *data, u16 version);
int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *data, u32 field);
int mt7996_mcu_set_eeprom(struct mt7996_dev *dev);
-int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset);
+int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len);
int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num);
int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap);
int mt7996_mcu_set_ser(struct mt7996_dev *dev, u8 action, u8 set, u8 band);
@@ -488,7 +603,8 @@ int mt7996_mcu_set_radar_th(struct mt7996_dev *dev, int index,
const struct mt7996_dfs_pattern *pattern);
int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable);
int mt7996_mcu_set_rts_thresh(struct mt7996_phy *phy, u32 val);
-int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif);
+int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch);
int mt7996_mcu_get_temperature(struct mt7996_phy *phy);
int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state);
@@ -511,6 +627,7 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb);
void mt7996_mcu_exit(struct mt7996_dev *dev);
int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id);
+int mt7996_mcu_set_sniffer_mode(struct mt7996_phy *phy, bool enabled);
static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev)
{
@@ -575,6 +692,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
void mt7996_mac_set_coverage_class(struct mt7996_phy *phy);
int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7996_mac_work(struct work_struct *work);
@@ -602,7 +721,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy);
void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy);
void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy);
void mt7996_update_channel(struct mt76_phy *mphy);
-int mt7996_init_debugfs(struct mt7996_phy *phy);
+int mt7996_init_debugfs(struct mt7996_dev *dev);
void mt7996_debugfs_rx_fw_monitor(struct mt7996_dev *dev, const void *data, int len);
bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len);
int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
index 47b429d8bfbe..1876a968c92d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -175,6 +175,9 @@ enum offs_rev {
#define MT_WTBLOFF_RSCR_RCPI_MODE GENMASK(31, 30)
#define MT_WTBLOFF_RSCR_RCPI_PARAM GENMASK(25, 24)
+#define MT_WTBLOFF_ACR(_band) MT_WTBLOFF(_band, 0x010)
+#define MT_WTBLOFF_ADM_BACKOFFTIME BIT(29)
+
/* ETBF: band 0(0x820ea000), band 1(0x820fa000), band 2(0x830ea000) */
#define MT_WF_ETBF_BASE(_band) __BASE(WF_ETBF_BASE, (_band))
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
@@ -660,8 +663,17 @@ enum offs_rev {
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+/* ADIE */
+#define MT_ADIE_CHIP_ID(_idx) (0x0f00002c + ((_idx) << 28))
+#define MT_ADIE_VERSION_MASK GENMASK(15, 0)
+#define MT_ADIE_CHIP_ID_MASK GENMASK(31, 16)
+
#define MT_PAD_GPIO 0x700056f0
#define MT_PAD_GPIO_ADIE_COMB GENMASK(16, 15)
+#define MT_PAD_GPIO_2ADIE_TBTC BIT(19)
+/* for mt7992 */
+#define MT_PAD_GPIO_ADIE_COMB_7992 GENMASK(17, 16)
+#define MT_PAD_GPIO_ADIE_SINGLE BIT(15)
#define MT_HW_REV 0x70010204
#define MT_HW_REV1 0x8a00
diff --git a/drivers/net/wireless/mediatek/mt76/scan.c b/drivers/net/wireless/mediatek/mt76/scan.c
new file mode 100644
index 000000000000..1c4f9deaaada
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/scan.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2024 Felix Fietkau <nbd@nbd.name>
+ */
+#include "mt76.h"
+
+static void mt76_scan_complete(struct mt76_dev *dev, bool abort)
+{
+ struct mt76_phy *phy = dev->scan.phy;
+ struct cfg80211_scan_info info = {
+ .aborted = abort,
+ };
+
+ if (!phy)
+ return;
+
+ clear_bit(MT76_SCANNING, &phy->state);
+
+ if (dev->scan.chan && phy->main_chandef.chan)
+ mt76_set_channel(phy, &phy->main_chandef, false);
+ mt76_put_vif_phy_link(phy, dev->scan.vif, dev->scan.mlink);
+ memset(&dev->scan, 0, sizeof(dev->scan));
+ ieee80211_scan_completed(phy->hw, &info);
+}
+
+void mt76_abort_scan(struct mt76_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->scan_work);
+ mt76_scan_complete(dev, true);
+}
+
+static void
+mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
+{
+ struct cfg80211_scan_request *req = dev->scan.req;
+ struct ieee80211_vif *vif = dev->scan.vif;
+ struct mt76_vif_link *mvif = dev->scan.mlink;
+ enum nl80211_band band = dev->scan.chan->band;
+ struct mt76_phy *phy = dev->scan.phy;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ skb = ieee80211_probereq_get(phy->hw, vif->addr, ssid->ssid,
+ ssid->ssid_len, req->ie_len);
+ if (!skb)
+ return;
+
+ if (is_unicast_ether_addr(req->bssid)) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ ether_addr_copy(hdr->addr1, req->bssid);
+ ether_addr_copy(hdr->addr3, req->bssid);
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ if (req->no_cck)
+ info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+ info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
+
+ if (req->ie_len)
+ skb_put_data(skb, req->ie, req->ie_len);
+
+ skb->priority = 7;
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+
+ rcu_read_lock();
+ if (ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL))
+ mt76_tx(phy, NULL, mvif->wcid, skb);
+ else
+ ieee80211_free_txskb(phy->hw, skb);
+ rcu_read_unlock();
+}
+
+void mt76_scan_work(struct work_struct *work)
+{
+ struct mt76_dev *dev = container_of(work, struct mt76_dev,
+ scan_work.work);
+ struct cfg80211_scan_request *req = dev->scan.req;
+ struct cfg80211_chan_def chandef = {};
+ struct mt76_phy *phy = dev->scan.phy;
+ int duration = HZ / 9; /* ~110 ms */
+ int i;
+
+ if (dev->scan.chan_idx >= req->n_channels) {
+ mt76_scan_complete(dev, false);
+ return;
+ }
+
+ if (dev->scan.chan && phy->num_sta) {
+ dev->scan.chan = NULL;
+ mt76_set_channel(phy, &phy->main_chandef, false);
+ goto out;
+ }
+
+ dev->scan.chan = req->channels[dev->scan.chan_idx++];
+ cfg80211_chandef_create(&chandef, dev->scan.chan, NL80211_CHAN_HT20);
+ mt76_set_channel(phy, &chandef, true);
+
+ if (!req->n_ssids ||
+ chandef.chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR))
+ goto out;
+
+ duration = HZ / 16; /* ~60 ms */
+ local_bh_disable();
+ for (i = 0; i < req->n_ssids; i++)
+ mt76_scan_send_probe(dev, &req->ssids[i]);
+ local_bh_enable();
+
+out:
+ if (!duration)
+ return;
+
+ if (dev->scan.chan)
+ duration = max_t(int, duration,
+ msecs_to_jiffies(req->duration +
+ (req->duration >> 5)));
+
+ ieee80211_queue_delayed_work(dev->phy.hw, &dev->scan_work, duration);
+}
+
+int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_vif_link *mlink;
+ int ret = 0;
+
+ if (hw->wiphy->n_radio > 1) {
+ phy = dev->band_phys[req->req.channels[0]->band];
+ if (!phy)
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->scan.req || phy->roc_vif) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mlink = mt76_get_vif_phy_link(phy, vif);
+ if (IS_ERR(mlink)) {
+ ret = PTR_ERR(mlink);
+ goto out;
+ }
+
+ memset(&dev->scan, 0, sizeof(dev->scan));
+ dev->scan.req = &req->req;
+ dev->scan.vif = vif;
+ dev->scan.phy = phy;
+ dev->scan.mlink = mlink;
+ ieee80211_queue_delayed_work(dev->phy.hw, &dev->scan_work, 0);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_hw_scan);
+
+void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ mt76_abort_scan(phy->dev);
+}
+EXPORT_SYMBOL_GPL(mt76_cancel_hw_scan);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index ddd8c0cc744d..0a927a7313a6 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -46,6 +46,10 @@ static int mt76s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
return 0;
sdio->sched.pse_mcu_quota += pse_mcu_quota;
+ if (sdio->pse_mcu_quota_max &&
+ sdio->sched.pse_mcu_quota > sdio->pse_mcu_quota_max) {
+ sdio->sched.pse_mcu_quota = sdio->pse_mcu_quota_max;
+ }
sdio->sched.pse_data_quota += pse_data_quota;
sdio->sched.ple_data_quota += ple_data_quota;
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index ce193e625666..af0c50c983ec 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -489,7 +489,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
do {
if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
- return -EBUSY;
+ break;
if (stop || mt76_txq_stopped(q))
break;
@@ -522,24 +522,16 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
static int
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
- struct mt76_queue *q = phy->q_tx[qid];
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq;
struct mt76_txq *mtxq;
struct mt76_wcid *wcid;
+ struct mt76_queue *q;
int ret = 0;
while (1) {
int n_frames = 0;
- if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
- return -EBUSY;
-
- if (dev->queue_ops->tx_cleanup &&
- q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
- dev->queue_ops->tx_cleanup(dev, q, false);
- }
-
txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
@@ -549,6 +541,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
continue;
+ phy = mt76_dev_phy(dev, wcid->phy_idx);
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
+ continue;
+
+ q = phy->q_tx[qid];
+ if (dev->queue_ops->tx_cleanup &&
+ q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
+ dev->queue_ops->tx_cleanup(dev, q, false);
+ }
+
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
@@ -578,7 +580,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
int len;
- if (qid >= 4 || phy->offchannel)
+ if (qid >= 4)
return;
local_bh_disable();
@@ -680,9 +682,14 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
void mt76_txq_schedule_all(struct mt76_phy *phy)
{
+ struct mt76_phy *main_phy = &phy->dev->phy;
int i;
mt76_txq_schedule_pending(phy);
+
+ if (phy != main_phy && phy->hw == main_phy->hw)
+ return;
+
for (i = 0; i <= MT_TXQ_BK; i++)
mt76_txq_schedule(phy, i);
}
@@ -693,6 +700,7 @@ void mt76_tx_worker_run(struct mt76_dev *dev)
struct mt76_phy *phy;
int i;
+ mt76_txq_schedule_all(&dev->phy);
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
phy = dev->phys[i];
if (!phy)
@@ -748,9 +756,6 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
- if (!test_bit(MT76_STATE_RUNNING, &phy->state))
- return;
-
mt76_worker_schedule(&dev->tx_worker);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 58ff06823389..f9e67b8c3b3c 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -33,9 +33,9 @@ int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
ret = usb_control_msg(udev, pipe, req, req_type, val,
offset, buf, len, MT_VEND_REQ_TOUT_MS);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -EPROTO)
set_bit(MT76_REMOVED, &dev->phy.state);
- if (ret >= 0 || ret == -ENODEV)
+ if (ret >= 0 || ret == -ENODEV || ret == -EPROTO)
return ret;
usleep_range(5000, 10000);
}
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index d6c01a2dd198..95b3dc96e4c4 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -64,7 +64,7 @@ int mt76_wcid_alloc(u32 *mask, int size)
}
EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx)
{
struct mt76_wcid *wcid;
int i, j, min_rssi = 0;
@@ -75,20 +75,16 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
u32 mask = dev->wcid_mask[i];
- u32 phy_mask = dev->wcid_phy_mask[i];
if (!mask)
continue;
- for (j = i * 32; mask; j++, mask >>= 1, phy_mask >>= 1) {
+ for (j = i * 32; mask; j++, mask >>= 1) {
if (!(mask & 1))
continue;
- if (!!(phy_mask & 1) != ext_phy)
- continue;
-
wcid = rcu_dereference(dev->wcid[j]);
- if (!wcid)
+ if (!wcid || wcid->phy_idx != phy_idx)
continue;
spin_lock(&dev->rx_lock);
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index e96736cc7259..e7aa0f991923 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1669,7 +1669,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
}
static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
int ret;
struct wilc_vif *vif = netdev_priv(wdev->netdev);
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 7e84fc0fd911..af298021e050 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -925,8 +925,6 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
- wiphy_unregister(wilc->wiphy);
- wiphy_free(wilc->wiphy);
}
EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 5262c8846c13..af970f999111 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -193,7 +193,7 @@ static int wilc_sdio_probe(struct sdio_func *func,
ret = wilc_load_mac_from_nv(wilc);
if (ret) {
pr_err("Can not retrieve MAC address from chip\n");
- goto dispose_irq;
+ goto unregister_wiphy;
}
wilc_sdio_deinit(wilc);
@@ -202,15 +202,18 @@ static int wilc_sdio_probe(struct sdio_func *func,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto dispose_irq;
+ goto unregister_wiphy;
}
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
+unregister_wiphy:
+ wiphy_unregister(wilc->wiphy);
dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
+ wiphy_free(wilc->wiphy);
free:
kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
@@ -223,6 +226,8 @@ static void wilc_sdio_remove(struct sdio_func *func)
struct wilc_sdio *sdio_priv = wilc->bus_data;
wilc_netdev_cleanup(wilc);
+ wiphy_unregister(wilc->wiphy);
+ wiphy_free(wilc->wiphy);
kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
}
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index ce2a9cdd6aa7..5bcabb7decea 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -256,7 +256,7 @@ static int wilc_bus_probe(struct spi_device *spi)
ret = wilc_load_mac_from_nv(wilc);
if (ret) {
pr_err("Can not retrieve MAC address from chip\n");
- goto power_down;
+ goto unregister_wiphy;
}
wilc_wlan_power(wilc, false);
@@ -264,14 +264,17 @@ static int wilc_bus_probe(struct spi_device *spi)
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto power_down;
+ goto unregister_wiphy;
}
return 0;
+unregister_wiphy:
+ wiphy_unregister(wilc->wiphy);
power_down:
wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
+ wiphy_free(wilc->wiphy);
free:
kfree(spi_priv);
return ret;
@@ -283,6 +286,8 @@ static void wilc_bus_remove(struct spi_device *spi)
struct wilc_spi *spi_priv = wilc->bus_data;
wilc_netdev_cleanup(wilc);
+ wiphy_unregister(wilc->wiphy);
+ wiphy_free(wilc->wiphy);
kfree(spi_priv);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 8b97accf6638..0b2282528342 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -881,7 +881,7 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
int ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index b375a4751580..a377d85c2451 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -102,7 +102,7 @@ struct qtnf_wmac {
struct qtnf_mac_info macinfo;
struct qtnf_vif iflist[QTNF_MAX_INTF];
struct cfg80211_scan_request *scan_req;
- struct mutex mac_lock; /* lock during wmac speicific ops */
+ struct mutex mac_lock; /* lock during wmac specific ops */
struct delayed_work scan_timeout;
struct ieee80211_regdomain *rd;
struct platform_device *pdev;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index f66eb43094d4..3adcfac2886f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -204,7 +204,7 @@ static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi)
if (!priv->msi_enabled) {
pr_warn("legacy PCIE interrupts enabled\n");
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
}
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 60c2a12e9d5e..e5f553a1ea24 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -8882,13 +8882,10 @@ static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev)
for (ch_idx = 0; ch_idx < 2; ch_idx = ch_idx + 1) {
if (ch_idx == 0) {
- rfval = rfb0r1 & (~0x3);
rfval = rfb0r1 | 0x1;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
- rfval = rfb0r2 & (~0x33);
rfval = rfb0r2 | 0x11;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
- rfval = rfb0r42 & (~0x50);
rfval = rfb0r42 | 0x10;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
@@ -8901,13 +8898,10 @@ static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x00);
} else {
- rfval = rfb0r1 & (~0x3);
rfval = rfb0r1 | 0x2;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
- rfval = rfb0r2 & (~0x33);
rfval = rfb0r2 | 0x22;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
- rfval = rfb0r42 & (~0x50);
rfval = rfb0r42 | 0x40;
rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index 5323acff962a..45775ecdf221 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -842,7 +842,7 @@ int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Set txstatus timer function.
*/
- rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout;
+ hrtimer_update_function(&rt2x00dev->txstatus_timer, rt2800mmio_tx_sta_fifo_timeout);
/*
* Overwrite TX done handler
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 160bef79acdb..b51a23300ba2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -618,7 +618,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Set txstatus timer function.
*/
- rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
+ hrtimer_update_function(&rt2x00dev->txstatus_timer, rt2800usb_tx_sta_fifo_timeout);
/*
* Overwrite TX done handler
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 9e7d9dbe954c..432ddfac2c33 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1391,8 +1391,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
mutex_init(&rt2x00dev->conf_mutex);
INIT_LIST_HEAD(&rt2x00dev->bar_list);
spin_lock_init(&rt2x00dev->bar_list_lock);
- hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
+ hrtimer_setup(&rt2x00dev->txstatus_timer, hrtimer_dummy_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
index 3d04df0f5bf4..766a7a7c7d28 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
@@ -1860,7 +1860,7 @@ struct rtl8xxxu_fileops rtl8188eu_fops = {
.set_crystal_cap = rtl8188f_set_crystal_cap,
.cck_rssi = rtl8188e_cck_rssi,
.led_classdev_brightness_set = rtl8188eu_led_brightness_set,
- .writeN_block_size = 128,
+ .writeN_block_size = 196,
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.has_tx_report = 1,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index f95898f68d68..4ce0c05c5129 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -8147,6 +8147,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8186, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
@@ -8157,12 +8159,18 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x11f2, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9043, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
@@ -8179,6 +8187,10 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3358, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3359, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
@@ -8193,6 +8205,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x9846, 0x9041, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
@@ -8218,6 +8232,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
@@ -8226,6 +8242,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0077, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
@@ -8248,6 +8266,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330d, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index aab4605de9c4..ff61867d142f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -575,9 +575,15 @@ static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw,
void rtl_deinit_core(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
rtl_c2hcmd_launcher(hw, 0);
rtl_free_entries_from_scan_list(hw);
rtl_free_entries_from_ack_queue(hw, false);
+ if (rtlpriv->works.rtl_wq) {
+ destroy_workqueue(rtlpriv->works.rtl_wq);
+ rtlpriv->works.rtl_wq = NULL;
+ }
}
EXPORT_SYMBOL_GPL(rtl_deinit_core);
@@ -2696,9 +2702,6 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
-struct rtl_global_var rtl_global_var = {};
-EXPORT_SYMBOL_GPL(rtl_global_var);
-
static int __init rtl_core_module_init(void)
{
BUILD_BUG_ON(TX_PWR_BY_RATE_NUM_RATE < TX_PWR_BY_RATE_NUM_SECTION);
@@ -2712,10 +2715,6 @@ static int __init rtl_core_module_init(void)
/* add debugfs */
rtl_debugfs_add_topdir();
- /* init some global vars */
- INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
- spin_lock_init(&rtl_global_var.glb_list_lock);
-
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index f081a9a90563..f3a6a43a42ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -124,7 +124,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
-extern struct rtl_global_var rtl_global_var;
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 11709b6c83f1..0eafc4d125f9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -295,46 +295,6 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
return status;
}
-static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
- struct rtl_priv **buddy_priv)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_priv *tpriv = NULL, *iter;
- struct rtl_pci_priv *tpcipriv = NULL;
-
- if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
- list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list,
- list) {
- tpcipriv = (struct rtl_pci_priv *)iter->priv;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
- pcipriv->ndis_adapter.funcnumber);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
- tpcipriv->ndis_adapter.funcnumber);
-
- if (pcipriv->ndis_adapter.busnumber ==
- tpcipriv->ndis_adapter.busnumber &&
- pcipriv->ndis_adapter.devnumber ==
- tpcipriv->ndis_adapter.devnumber &&
- pcipriv->ndis_adapter.funcnumber !=
- tpcipriv->ndis_adapter.funcnumber) {
- tpriv = iter;
- break;
- }
- }
- }
-
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", tpriv != NULL);
-
- if (tpriv)
- *buddy_priv = tpriv;
-
- return tpriv != NULL;
-}
-
static void rtl_pci_parse_configuration(struct pci_dev *pdev,
struct ieee80211_hw *hw)
{
@@ -1696,8 +1656,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
-
- destroy_workqueue(rtlpriv->works.rtl_wq);
}
static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
@@ -2011,7 +1969,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
- list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
return true;
}
@@ -2158,7 +2115,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->rtlhal.interface = INTF_PCI;
rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
rtlpriv->intf_ops = &rtl_pci_ops;
- rtlpriv->glb_var = &rtl_global_var;
rtl_efuse_ops_init(hw);
/* MEM map */
@@ -2209,7 +2165,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
pr_err("Can't init_sw_vars\n");
err = -ENODEV;
- goto fail3;
+ goto fail2;
}
rtl_init_sw_leds(hw);
@@ -2227,14 +2183,14 @@ int rtl_pci_probe(struct pci_dev *pdev,
err = rtl_pci_init(hw, pdev);
if (err) {
pr_err("Failed to init PCI\n");
- goto fail3;
+ goto fail4;
}
err = ieee80211_register_hw(hw);
if (err) {
pr_err("Can't register mac80211 hw.\n");
err = -ENODEV;
- goto fail3;
+ goto fail5;
}
rtlpriv->mac80211.mac80211_registered = 1;
@@ -2257,16 +2213,19 @@ int rtl_pci_probe(struct pci_dev *pdev,
set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
-fail3:
- pci_set_drvdata(pdev, NULL);
+fail5:
+ rtl_pci_deinit(hw);
+fail4:
rtl_deinit_core(hw);
+fail3:
+ wait_for_completion(&rtlpriv->firmware_loading_complete);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
fail2:
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
- complete(&rtlpriv->firmware_loading_complete);
fail1:
if (hw)
@@ -2317,7 +2276,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
if (rtlpci->using_msi)
pci_disable_msi(rtlpci->pdev);
- list_del(&rtlpriv->list);
if (rtlpriv->io.pci_mem_start != 0) {
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
@@ -2376,7 +2334,6 @@ EXPORT_SYMBOL(rtl_pci_resume);
const struct rtl_intf_ops rtl_pci_ops = {
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
- .check_buddy_priv = rtl_pci_check_buddy_priv,
.adapter_tx = rtl_pci_tx,
.flush = rtl_pci_flush,
.reset_trx_ring = rtl_pci_reset_trx_ring,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index bbf8ff63dced..e63c67b1861b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -64,22 +64,23 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
pr_err("Firmware %s not available\n", fw_name);
rtlpriv->max_fw_size = 0;
- return;
+ goto exit;
}
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
rtlpriv->max_fw_size = 0;
release_firmware(firmware);
- return;
+ goto exit;
}
pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware;
memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
pfirmware->sz_fw_tmpbufferlen = firmware->size;
release_firmware(firmware);
+exit:
+ complete(&rtlpriv->firmware_loading_complete);
}
static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
index c269942b3f4a..af8d17b9e012 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
@@ -197,9 +197,9 @@ enum rtl8821a_h2c_cmd {
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value) \
- u8p_replace_bits(__cmd + 1, __value, BIT(0))
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __value) \
- u8p_replace_bits(__cmd + 1, __value, BIT(1))
+ u8p_replace_bits(__cmd, __value, BIT(1))
/* AP_OFFLOAD */
#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 1be51ea3f3c8..9eddbada8af1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -2033,8 +2033,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
if (!_rtl8821ae_check_condition(hw, v1)) {
i += 2; /* skip the pair of expression*/
v2 = array[i+1];
- while (v2 != 0xDEAD)
+ while (v2 != 0xDEAD) {
i += 3;
+ v2 = array[i + 1];
+ }
}
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index d37a017b2b81..f5718e570011 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -629,11 +629,6 @@ static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
tasklet_kill(&rtlusb->rx_work_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
- if (rtlpriv->works.rtl_wq) {
- destroy_workqueue(rtlpriv->works.rtl_wq);
- rtlpriv->works.rtl_wq = NULL;
- }
-
skb_queue_purge(&rtlusb->rx_queue);
while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
@@ -1028,19 +1023,22 @@ int rtl_usb_probe(struct usb_interface *intf,
err = ieee80211_register_hw(hw);
if (err) {
pr_err("Can't register mac80211 hw.\n");
- goto error_out;
+ goto error_init_vars;
}
rtlpriv->mac80211.mac80211_registered = 1;
set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
+error_init_vars:
+ wait_for_completion(&rtlpriv->firmware_loading_complete);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
error_out:
+ rtl_usb_deinit(hw);
rtl_deinit_core(hw);
error_out2:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
- complete(&rtlpriv->firmware_loading_complete);
kfree(rtlpriv->usb_data);
ieee80211_free_hw(hw);
return -ENODEV;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index ae6e351bc83c..f1830ddcdd8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2270,8 +2270,6 @@ struct rtl_intf_ops {
/*com */
int (*adapter_start)(struct ieee80211_hw *hw);
void (*adapter_stop)(struct ieee80211_hw *hw);
- bool (*check_buddy_priv)(struct ieee80211_hw *hw,
- struct rtl_priv **buddy_priv);
int (*adapter_tx)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@@ -2514,14 +2512,6 @@ struct dig_t {
u32 rssi_max;
};
-struct rtl_global_var {
- /* from this list we can get
- * other adapter's rtl_priv
- */
- struct list_head glb_priv_list;
- spinlock_t glb_list_lock;
-};
-
#define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */
struct rtl_btc_info {
@@ -2667,9 +2657,7 @@ struct rtl_scan_list {
struct rtl_priv {
struct ieee80211_hw *hw;
struct completion firmware_loading_complete;
- struct list_head list;
struct rtl_priv *buddy_priv;
- struct rtl_global_var *glb_var;
struct rtl_dmsp_ctl dmsp_ctl;
struct rtl_locks locks;
struct rtl_works works;
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
index 733b3e58da51..ab21b8059e0b 100644
--- a/drivers/net/wireless/realtek/rtw88/Kconfig
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -238,4 +238,9 @@ config RTW88_DEBUGFS
If unsure, say Y to simplify debug problems
+config RTW88_LEDS
+ bool
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
+ default y
+
endif
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index f0b49f5a8a5a..0cbbb366e393 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -20,6 +20,8 @@ rtw88_core-y += main.o \
rtw88_core-$(CONFIG_PM) += wow.o
+rtw88_core-$(CONFIG_RTW88_LEDS) += led.o
+
obj-$(CONFIG_RTW88_8822B) += rtw88_8822b.o
rtw88_8822b-objs := rtw8822b.o rtw8822b_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index e6e9946fbf44..02389b7c6876 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -332,6 +332,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
case C2H_RA_RPT:
rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
break;
+ case C2H_ADAPTIVITY:
+ rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
+ break;
default:
rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
break;
@@ -367,10 +370,6 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
rtw_fw_scan_result(rtwdev, c2h->payload, len);
dev_kfree_skb_any(skb);
break;
- case C2H_ADAPTIVITY:
- rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
- dev_kfree_skb_any(skb);
- break;
default:
/* pass offset for further operation */
*((u32 *)skb->cb) = pkt_offset;
diff --git a/drivers/net/wireless/realtek/rtw88/led.c b/drivers/net/wireless/realtek/rtw88/led.c
new file mode 100644
index 000000000000..25aa6cbaa728
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/led.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include "main.h"
+#include "debug.h"
+#include "led.h"
+
+static int rtw_led_set_blocking(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+
+ rtwdev->chip->ops->led_set(led, brightness);
+
+ return 0;
+}
+
+void rtw_led_init(struct rtw_dev *rtwdev)
+{
+ static const struct ieee80211_tpt_blink rtw_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 5 * 1024, .blink_time = 220 },
+ { .throughput = 10 * 1024, .blink_time = 190 },
+ { .throughput = 20 * 1024, .blink_time = 170 },
+ { .throughput = 50 * 1024, .blink_time = 150 },
+ { .throughput = 70 * 1024, .blink_time = 130 },
+ { .throughput = 100 * 1024, .blink_time = 110 },
+ { .throughput = 200 * 1024, .blink_time = 80 },
+ { .throughput = 300 * 1024, .blink_time = 50 },
+ };
+ struct led_classdev *led = &rtwdev->led_cdev;
+ int err;
+
+ if (!rtwdev->chip->ops->led_set)
+ return;
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ led->brightness_set = rtwdev->chip->ops->led_set;
+ else
+ led->brightness_set_blocking = rtw_led_set_blocking;
+
+ snprintf(rtwdev->led_name, sizeof(rtwdev->led_name),
+ "rtw88-%s", dev_name(rtwdev->dev));
+
+ led->name = rtwdev->led_name;
+ led->max_brightness = LED_ON;
+ led->default_trigger =
+ ieee80211_create_tpt_led_trigger(rtwdev->hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO,
+ rtw_tpt_blink,
+ ARRAY_SIZE(rtw_tpt_blink));
+
+ err = led_classdev_register(rtwdev->dev, led);
+ if (err) {
+ rtw_warn(rtwdev, "Failed to register the LED, error %d\n", err);
+ return;
+ }
+
+ rtwdev->led_registered = true;
+}
+
+void rtw_led_deinit(struct rtw_dev *rtwdev)
+{
+ struct led_classdev *led = &rtwdev->led_cdev;
+
+ if (!rtwdev->led_registered)
+ return;
+
+ rtwdev->chip->ops->led_set(led, LED_OFF);
+ led_classdev_unregister(led);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/led.h b/drivers/net/wireless/realtek/rtw88/led.h
new file mode 100644
index 000000000000..fa64002b0215
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/led.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#ifndef __RTW_LED_H
+#define __RTW_LED_H
+
+#ifdef CONFIG_RTW88_LEDS
+
+void rtw_led_init(struct rtw_dev *rtwdev);
+void rtw_led_deinit(struct rtw_dev *rtwdev);
+
+#else
+
+static inline void rtw_led_init(struct rtw_dev *rtwdev)
+{
+}
+
+static inline void rtw_led_deinit(struct rtw_dev *rtwdev)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index e91530ed05a0..0cee0fd8c0ef 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -19,6 +19,7 @@
#include "bf.h"
#include "sar.h"
#include "sdio.h"
+#include "led.h"
bool rtw_disable_lps_deep_mode;
EXPORT_SYMBOL(rtw_disable_lps_deep_mode);
@@ -1217,7 +1218,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
u8 wireless_set;
u8 bw_mode;
u8 rate_id;
- u8 rf_type = RF_1T1R;
u8 stbc_en = 0;
u8 ldpc_en = 0;
u8 tx_num = 1;
@@ -1302,13 +1302,10 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
break;
}
- if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000) {
+ if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000)
tx_num = 2;
- rf_type = RF_2T2R;
- } else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000) {
+ else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000)
tx_num = 2;
- rf_type = RF_2T2R;
- }
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
@@ -1319,7 +1316,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
si->ldpc_en = ldpc_en;
- si->rf_type = rf_type;
si->sgi_enable = is_support_sgi;
si->vht_enable = is_vht_enable;
si->ra_mask = ra_mask;
@@ -2297,16 +2293,18 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
return ret;
}
+ rtw_led_init(rtwdev);
+
ret = ieee80211_register_hw(hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");
- return ret;
+ goto led_deinit;
}
ret = rtw_regd_hint(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to hint regd\n");
- return ret;
+ goto led_deinit;
}
rtw_debugfs_init(rtwdev);
@@ -2315,6 +2313,10 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
rtwdev->bf_info.bfer_su_cnt = 0;
return 0;
+
+led_deinit:
+ rtw_led_deinit(rtwdev);
+ return ret;
}
EXPORT_SYMBOL(rtw_register_hw);
@@ -2325,6 +2327,7 @@ void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_unregister_hw(hw);
rtw_unset_supported_band(hw, chip);
rtw_debugfs_deinit(rtwdev);
+ rtw_led_deinit(rtwdev);
}
EXPORT_SYMBOL(rtw_unregister_hw);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index cd09fb6f7b8b..62cd4c526301 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -510,12 +510,12 @@ struct rtw_5g_txpwr_idx {
struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff;
struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff;
struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff;
-};
+} __packed;
struct rtw_txpwr_idx {
struct rtw_2g_txpwr_idx pwr_idx_2g;
struct rtw_5g_txpwr_idx pwr_idx_5g;
-};
+} __packed;
struct rtw_channel_params {
u8 center_chan;
@@ -757,7 +757,6 @@ struct rtw_sta_info {
u8 mac_id;
u8 rate_id;
enum rtw_bandwidth bw_mode;
- enum rtw_rf_type rf_type;
u8 stbc_en:2;
u8 ldpc_en:2;
bool sgi_enable;
@@ -888,6 +887,7 @@ struct rtw_chip_ops {
bool is_tx2_path);
void (*config_txrx_mode)(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
+ void (*led_set)(struct led_classdev *led, enum led_brightness brightness);
/* for USB/SDIO only */
void (*fill_txdesc_checksum)(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
@@ -2098,6 +2098,10 @@ struct rtw_dev {
struct completion fw_scan_density;
bool ap_active;
+ bool led_registered;
+ char led_name[32];
+ struct led_classdev led_cdev;
+
/* hci related data, must be last */
u8 priv[] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index e4d506cf9c33..e438405fba56 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -78,7 +78,19 @@
#define BIT_PAPE_SEL_EN BIT(25)
#define BIT_DPDT_WL_SEL BIT(24)
#define BIT_DPDT_SEL_EN BIT(23)
+#define BIT_GPIO13_14_WL_CTRL_EN BIT(22)
+#define BIT_LED2_SV BIT(19)
+#define BIT_LED2_CM GENMASK(18, 16)
+#define BIT_LED1_SV BIT(11)
+#define BIT_LED1_CM GENMASK(10, 8)
+#define BIT_LED0_SV BIT(3)
+#define BIT_LED0_CM GENMASK(2, 0)
+#define BIT_LED_MODE_SW_CTRL 0
+#define BIT_LED_MODE_RX 6
+#define BIT_LED_MODE_TX 4
+#define BIT_LED_MODE_TRX 2
#define REG_LEDCFG2 0x004E
+#define REG_GPIO_PIN_CTRL_2 0x0060
#define REG_PAD_CTRL1 0x0064
#define BIT_BT_BTG_SEL BIT(31)
#define BIT_PAPE_WLBT_SEL BIT(29)
@@ -871,7 +883,17 @@
#define REG_USB_MOD 0xf008
#define REG_USB3_RXITV 0xf050
+#define REG_USB2_PHY_ADR 0xfe40
+#define REG_USB2_PHY_DAT 0xfe41
+#define REG_USB2_PHY_CMD 0xfe42
+#define BIT_USB2_PHY_CMD_TRG 0x81
#define REG_USB_HRPWM 0xfe58
+#define REG_USB3_PHY_ADR 0xff0c
+#define REG_USB3_PHY_DAT_L 0xff0d
+#define REG_USB3_PHY_DAT_H 0xff0e
+#define BIT_USB3_PHY_ADR_WR BIT(7)
+#define BIT_USB3_PHY_ADR_RD BIT(6)
+#define BIT_USB3_PHY_ADR_MASK GENMASK(5, 0)
#define RF_MODE 0x00
#define RF_MODOPT 0x01
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
index a19b94d022ee..1d232adbdd7e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
@@ -903,7 +903,7 @@ static void rtw8703b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x0);
rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x0);
rtw_write32_mask(rtwdev, REG_OFDM0_TX_PSD_NOISE,
- GENMASK(31, 20), 0x0);
+ GENMASK(31, 30), 0x0);
rtw_write32(rtwdev, REG_BBRX_DFIR, 0x4A880000);
rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x19F60000);
break;
@@ -1198,9 +1198,9 @@ static u8 rtw8703b_iqk_rx_path(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
- rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8216000f);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8214030f);
rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000);
- rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x28110000);
+ rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000);
rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
/* LOK setting */
@@ -1372,7 +1372,7 @@ void rtw8703b_iqk_fill_a_matrix(struct rtw_dev *rtwdev, const s32 result[])
return;
tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_X, result[IQK_S1_RX_X]);
- tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_X]);
+ tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_Y]);
rtw_write32(rtwdev, REG_A_RXIQI, tmp_rx_iqi);
rtw_write32_mask(rtwdev, REG_RXIQK_MATRIX_LSB_11N, BIT_MASK_RXIQ_S1_Y2,
BIT_SET_RXIQ_S1_Y2(result[IQK_S1_RX_Y]));
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.h b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
index e93bfce994bf..a99af527c92c 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723x.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
@@ -47,7 +47,7 @@ struct rtw8723xe_efuse {
u8 device_id[2];
u8 sub_vendor_id[2];
u8 sub_device_id[2];
-};
+} __packed;
struct rtw8723xu_efuse {
u8 res4[48]; /* 0xd0 */
@@ -56,12 +56,12 @@ struct rtw8723xu_efuse {
u8 usb_option; /* 0x104 */
u8 res5[2]; /* 0x105 */
u8 mac_addr[ETH_ALEN]; /* 0x107 */
-};
+} __packed;
struct rtw8723xs_efuse {
u8 res4[0x4a]; /* 0xd0 */
u8 mac_addr[ETH_ALEN]; /* 0x11a */
-};
+} __packed;
struct rtw8723x_efuse {
__le16 rtl_id;
@@ -96,7 +96,7 @@ struct rtw8723x_efuse {
struct rtw8723xu_efuse u;
struct rtw8723xs_efuse s;
};
-};
+} __packed;
#define RTW8723X_IQK_ADDA_REG_NUM 16
#define RTW8723X_IQK_MAC8_REG_NUM 3
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a.c b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
index 482edd31823d..f9ba2aa2928a 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
@@ -868,6 +868,22 @@ static void rtw8812a_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
+static void rtw8812a_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u8 ledcfg;
+
+ ledcfg = rtw_read8(rtwdev, REG_LED_CFG);
+ ledcfg &= BIT(6) | BIT(4);
+ ledcfg |= BIT(5);
+
+ if (brightness == LED_OFF)
+ ledcfg |= BIT(3);
+
+ rtw_write8(rtwdev, REG_LED_CFG, ledcfg);
+}
+
static void rtw8812a_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -916,6 +932,7 @@ static const struct rtw_chip_ops rtw8812a_ops = {
.config_bfee = NULL,
.set_gid_table = NULL,
.cfg_csi_rate = NULL,
+ .led_set = rtw8812a_led_set,
.fill_txdesc_checksum = rtw8812a_fill_txdesc_checksum,
.coex_set_init = rtw8812a_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -985,6 +1002,9 @@ static const struct rtw_rfe_def rtw8812a_rfe_defs[] = {
[1] = { .phy_pg_tbl = &rtw8812a_bb_pg_tbl,
.txpwr_lmt_tbl = &rtw8812a_txpwr_lmt_tbl,
.pwr_track_tbl = &rtw8812a_rtw_pwr_track_tbl, },
+ [2] = { .phy_pg_tbl = &rtw8812a_bb_pg_tbl,
+ .txpwr_lmt_tbl = &rtw8812a_txpwr_lmt_tbl,
+ .pwr_track_tbl = &rtw8812a_rtw_pwr_track_tbl, },
[3] = { .phy_pg_tbl = &rtw8812a_bb_pg_rfe3_tbl,
.txpwr_lmt_tbl = &rtw8812a_txpwr_lmt_tbl,
.pwr_track_tbl = &rtw8812a_rtw_pwr_track_rfe3_tbl, },
@@ -1024,7 +1044,7 @@ const struct rtw_chip_info rtw8812a_hw_spec = {
.rx_buf_desc_sz = 8,
.phy_efuse_size = 512,
.log_efuse_size = 512,
- .ptct_efuse_size = 96 + 1, /* TODO or just 18? */
+ .ptct_efuse_size = 0,
.txff_size = 131072,
.rxff_size = 16128,
.rsvd_drv_pg_num = 9,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812au.c b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
index 4da69590a423..e18995f4cc78 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
@@ -9,8 +9,74 @@
#include "usb.h"
static const struct usb_device_id rtw_8812au_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff),
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0409, 0x0408, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* NEC */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x025d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Buffalo */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0952, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* I-O DATA */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1106, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1109, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x3426, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ZyXEL */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Logitec */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Abocom */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9051, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Netgear */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17d2, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ASUS */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0074, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Sitecom */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0022, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Hawking */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0632, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* WD */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13b1, 0x003f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Linksys */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x148f, 0x9097, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Amped Wireless */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1740, 0x0100, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* EnGenius */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3313, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3315, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3316, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab30, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Planex */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TRENDnet */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0101, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0103, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0122, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Tenda */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa822, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8812au_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a.c b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
index db242c9ad68f..f68239b07319 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
@@ -706,6 +706,31 @@ static void rtw8821a_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
+static void rtw8821a_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 gpio8_cfg;
+ u8 ledcfg;
+
+ if (brightness == LED_OFF) {
+ gpio8_cfg = rtw_read32(rtwdev, REG_GPIO_PIN_CTRL_2);
+ gpio8_cfg &= ~BIT(24);
+ gpio8_cfg |= BIT(16) | BIT(8);
+ rtw_write32(rtwdev, REG_GPIO_PIN_CTRL_2, gpio8_cfg);
+ } else {
+ ledcfg = rtw_read8(rtwdev, REG_LED_CFG + 2);
+ gpio8_cfg = rtw_read32(rtwdev, REG_GPIO_PIN_CTRL_2);
+
+ ledcfg &= BIT(7) | BIT(6);
+ rtw_write8(rtwdev, REG_LED_CFG + 2, ledcfg);
+
+ gpio8_cfg &= ~(BIT(24) | BIT(8));
+ gpio8_cfg |= BIT(16);
+ rtw_write32(rtwdev, REG_GPIO_PIN_CTRL_2, gpio8_cfg);
+ }
+}
+
static void rtw8821a_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -853,6 +878,7 @@ static const struct rtw_chip_ops rtw8821a_ops = {
.config_bfee = NULL,
.set_gid_table = NULL,
.cfg_csi_rate = NULL,
+ .led_set = rtw8821a_led_set,
.fill_txdesc_checksum = rtw8821a_fill_txdesc_checksum,
.coex_set_init = rtw8821a_coex_cfg_init,
.coex_set_ant_switch = rtw8821a_coex_cfg_ant_switch,
@@ -1118,7 +1144,7 @@ const struct rtw_chip_info rtw8821a_hw_spec = {
.rx_buf_desc_sz = 8,
.phy_efuse_size = 512,
.log_efuse_size = 512,
- .ptct_efuse_size = 96 + 1, /* TODO or just 18? */
+ .ptct_efuse_size = 0,
.txff_size = 65536,
.rxff_size = 16128,
.rsvd_drv_pg_num = 8,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821au.c b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
index 730018773e1c..a01744b64e8d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
@@ -9,8 +9,58 @@
#include "usb.h"
static const struct usb_device_id rtw_8821au_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff),
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0820, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0821, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8822, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0823, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xa811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x0242, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x029b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0953, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* I-O DATA */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4007, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9052, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Netgear */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0023, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* HAWKING */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3314, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3318, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab32, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Planex */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x804b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TRENDnet */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0120, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3823, 0x6249, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Obihai */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa813, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb611, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8821au_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 0270225b9c20..eb7e34c545d0 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -1206,6 +1206,24 @@ static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
dm_info->cck_pd_default + new_lvl * 2);
}
+static void rtw8821c_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 ledcfg;
+
+ ledcfg = rtw_read32(rtwdev, REG_LED_CFG);
+ u32p_replace_bits(&ledcfg, BIT_LED_MODE_SW_CTRL, BIT_LED2_CM);
+ ledcfg &= ~BIT_GPIO13_14_WL_CTRL_EN;
+
+ if (brightness == LED_OFF)
+ ledcfg |= BIT_LED2_SV;
+ else
+ ledcfg &= ~BIT_LED2_SV;
+
+ rtw_write32(rtwdev, REG_LED_CFG, ledcfg);
+}
+
static void rtw8821c_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -1655,6 +1673,7 @@ static const struct rtw_chip_ops rtw8821c_ops = {
.config_bfee = rtw8821c_bf_config_bfee,
.set_gid_table = rtw_bf_set_gid_table,
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
+ .led_set = rtw8821c_led_set,
.fill_txdesc_checksum = rtw8821c_fill_txdesc_checksum,
.coex_set_init = rtw8821c_coex_cfg_init,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
index 7a33ebd612ed..954e93c8020d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
@@ -27,7 +27,7 @@ struct rtw8821cu_efuse {
u8 res11[0xcf];
u8 package_type; /* 0x1fb */
u8 res12[0x4];
-};
+} __packed;
struct rtw8821ce_efuse {
u8 mac_addr[ETH_ALEN]; /* 0xd0 */
@@ -47,7 +47,8 @@ struct rtw8821ce_efuse {
u8 ltr_en:1;
u8 res1:2;
u8 obff:2;
- u8 res2:3;
+ u8 res2_1:1;
+ u8 res2_2:2;
u8 obff_cap:2;
u8 res3:4;
u8 res4[3];
@@ -63,7 +64,7 @@ struct rtw8821ce_efuse {
u8 res6:1;
u8 port_t_power_on_value:5;
u8 res7;
-};
+} __packed;
struct rtw8821cs_efuse {
u8 res4[0x4a]; /* 0xd0 */
@@ -101,7 +102,7 @@ struct rtw8821c_efuse {
struct rtw8821cu_efuse u;
struct rtw8821cs_efuse s;
};
-};
+} __packed;
static inline void
_rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 739809f4cab5..7f03903ddf4b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -1566,6 +1566,24 @@ static void rtw8822b_adaptivity(struct rtw_dev *rtwdev)
rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
}
+static void rtw8822b_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 ledcfg;
+
+ ledcfg = rtw_read32(rtwdev, REG_LED_CFG);
+ u32p_replace_bits(&ledcfg, BIT_LED_MODE_SW_CTRL, BIT_LED2_CM);
+ ledcfg &= ~BIT_GPIO13_14_WL_CTRL_EN;
+
+ if (brightness == LED_OFF)
+ ledcfg |= BIT_LED2_SV;
+ else
+ ledcfg &= ~BIT_LED2_SV;
+
+ rtw_write32(rtwdev, REG_LED_CFG, ledcfg);
+}
+
static void rtw8822b_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -2146,6 +2164,7 @@ static const struct rtw_chip_ops rtw8822b_ops = {
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
.adaptivity_init = rtw8822b_adaptivity_init,
.adaptivity = rtw8822b_adaptivity,
+ .led_set = rtw8822b_led_set,
.fill_txdesc_checksum = rtw8822b_fill_txdesc_checksum,
.coex_set_init = rtw8822b_coex_cfg_init,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 0514958fb57c..9fca9ba67c90 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -27,7 +27,7 @@ struct rtw8822bu_efuse {
u8 res11[0xcf];
u8 package_type; /* 0x1fb */
u8 res12[0x4];
-};
+} __packed;
struct rtw8822be_efuse {
u8 mac_addr[ETH_ALEN]; /* 0xd0 */
@@ -47,7 +47,8 @@ struct rtw8822be_efuse {
u8 ltr_en:1;
u8 res1:2;
u8 obff:2;
- u8 res2:3;
+ u8 res2_1:1;
+ u8 res2_2:2;
u8 obff_cap:2;
u8 res3:4;
u8 res4[3];
@@ -63,7 +64,7 @@ struct rtw8822be_efuse {
u8 res6:1;
u8 port_t_power_on_value:5;
u8 res7;
-};
+} __packed;
struct rtw8822bs_efuse {
u8 res4[0x4a]; /* 0xd0 */
@@ -103,7 +104,7 @@ struct rtw8822b_efuse {
struct rtw8822bu_efuse u;
struct rtw8822bs_efuse s;
};
-};
+} __packed;
static inline void
_rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
index ab620a0b1dfc..8883300fc6ad 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
@@ -67,6 +67,12 @@ static const struct usb_device_id rtw_8822bu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* LiteOn */
{ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x808a, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-808UBM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-805UBH */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4011, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ELECOM WDB-867DU3S */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0107, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30H */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index af6b76937f1d..ec362a817f5f 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -4537,6 +4537,24 @@ static void rtw8822c_adaptivity(struct rtw_dev *rtwdev)
rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
}
+static void rtw8822c_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 ledcfg;
+
+ ledcfg = rtw_read32(rtwdev, REG_LED_CFG);
+ u32p_replace_bits(&ledcfg, BIT_LED_MODE_SW_CTRL, BIT_LED2_CM);
+ ledcfg &= ~BIT_GPIO13_14_WL_CTRL_EN;
+
+ if (brightness == LED_OFF)
+ ledcfg |= BIT_LED2_SV;
+ else
+ ledcfg &= ~BIT_LED2_SV;
+
+ rtw_write32(rtwdev, REG_LED_CFG, ledcfg);
+}
+
static void rtw8822c_fill_txdesc_checksum(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *txdesc)
@@ -4964,6 +4982,7 @@ static const struct rtw_chip_ops rtw8822c_ops = {
.cfo_track = rtw8822c_cfo_track,
.config_tx_path = rtw8822c_config_tx_path,
.config_txrx_mode = rtw8822c_config_trx_mode,
+ .led_set = rtw8822c_led_set,
.fill_txdesc_checksum = rtw8822c_fill_txdesc_checksum,
.coex_set_init = rtw8822c_coex_cfg_init,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index e2b383d633cd..fc62b67a15f2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -14,7 +14,7 @@ struct rtw8822cu_efuse {
u8 res1[3];
u8 mac_addr[ETH_ALEN]; /* 0x157 */
u8 res2[0x3d];
-};
+} __packed;
struct rtw8822cs_efuse {
u8 res0[0x4a]; /* 0x120 */
@@ -39,7 +39,8 @@ struct rtw8822ce_efuse {
u8 ltr_en:1;
u8 res1:2;
u8 obff:2;
- u8 res2:3;
+ u8 res2_1:1;
+ u8 res2_2:2;
u8 obff_cap:2;
u8 res3:4;
u8 class_code[3];
@@ -55,7 +56,7 @@ struct rtw8822ce_efuse {
u8 res6:1;
u8 port_t_power_on_value:5;
u8 res7;
-};
+} __packed;
struct rtw8822c_efuse {
__le16 rtl_id;
@@ -102,7 +103,7 @@ struct rtw8822c_efuse {
struct rtw8822cu_efuse u;
struct rtw8822cs_efuse s;
};
-};
+} __packed;
enum rtw8822c_dpk_agc_phase {
RTW_DPK_GAIN_CHECK,
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 799230eb5f16..e024061bdbf7 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -1192,6 +1192,8 @@ static void rtw_sdio_indicate_tx_status(struct rtw_dev *rtwdev,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hw *hw = rtwdev->hw;
+ skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
+
/* enqueue to wait for tx report */
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index 8d6db68246f1..c4908db4ff0e 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -7,6 +7,7 @@
#include <linux/mutex.h>
#include "main.h"
#include "debug.h"
+#include "mac.h"
#include "reg.h"
#include "tx.h"
#include "rx.h"
@@ -547,49 +548,58 @@ static void rtw_usb_rx_handler(struct work_struct *work)
{
struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_work);
struct rtw_dev *rtwdev = rtwusb->rtwdev;
- const struct rtw_chip_info *chip = rtwdev->chip;
- u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
struct ieee80211_rx_status rx_status;
- u32 pkt_offset, next_pkt, urb_len;
struct rtw_rx_pkt_stat pkt_stat;
- struct sk_buff *next_skb;
+ struct sk_buff *rx_skb;
struct sk_buff *skb;
+ u32 pkt_desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u32 max_skb_len = pkt_desc_sz + PHY_STATUS_SIZE * 8 +
+ IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ u32 pkt_offset, next_pkt, skb_len;
u8 *rx_desc;
int limit;
for (limit = 0; limit < 200; limit++) {
- skb = skb_dequeue(&rtwusb->rx_queue);
- if (!skb)
+ rx_skb = skb_dequeue(&rtwusb->rx_queue);
+ if (!rx_skb)
break;
if (skb_queue_len(&rtwusb->rx_queue) >= RTW_USB_MAX_RXQ_LEN) {
dev_dbg_ratelimited(rtwdev->dev, "failed to get rx_queue, overflow\n");
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(rx_skb);
continue;
}
- urb_len = skb->len;
+ rx_desc = rx_skb->data;
do {
- rx_desc = skb->data;
rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat,
&rx_status);
pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
pkt_stat.shift;
- next_pkt = round_up(pkt_stat.pkt_len + pkt_offset, 8);
+ skb_len = pkt_stat.pkt_len + pkt_offset;
+ if (skb_len > max_skb_len) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "skipping too big packet: %u\n",
+ skb_len);
+ goto skip_packet;
+ }
- if (urb_len >= next_pkt + pkt_desc_sz)
- next_skb = skb_clone(skb, GFP_KERNEL);
- else
- next_skb = NULL;
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (!skb) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "failed to allocate RX skb of size %u\n",
+ skb_len);
+ goto skip_packet;
+ }
+
+ skb_put_data(skb, rx_desc, skb_len);
if (pkt_stat.is_c2h) {
- skb_trim(skb, pkt_stat.pkt_len + pkt_offset);
rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb);
} else {
skb_pull(skb, pkt_offset);
- skb_trim(skb, pkt_stat.pkt_len);
rtw_update_rx_freq_for_invalid(rtwdev, skb,
&rx_status,
&pkt_stat);
@@ -598,37 +608,75 @@ static void rtw_usb_rx_handler(struct work_struct *work)
ieee80211_rx_irqsafe(rtwdev->hw, skb);
}
- skb = next_skb;
- if (skb)
- skb_pull(skb, next_pkt);
+skip_packet:
+ next_pkt = round_up(skb_len, 8);
+ rx_desc += next_pkt;
+ } while (rx_desc + pkt_desc_sz < rx_skb->data + rx_skb->len);
- urb_len -= next_pkt;
- } while (skb);
+ if (skb_queue_len(&rtwusb->rx_free_queue) >= RTW_USB_RX_SKB_NUM)
+ dev_kfree_skb_any(rx_skb);
+ else
+ skb_queue_tail(&rtwusb->rx_free_queue, rx_skb);
}
}
static void rtw_usb_read_port_complete(struct urb *urb);
-static void rtw_usb_rx_resubmit(struct rtw_usb *rtwusb, struct rx_usb_ctrl_block *rxcb)
+static void rtw_usb_rx_resubmit(struct rtw_usb *rtwusb,
+ struct rx_usb_ctrl_block *rxcb,
+ gfp_t gfp)
{
struct rtw_dev *rtwdev = rtwusb->rtwdev;
+ struct sk_buff *rx_skb;
int error;
- rxcb->rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, GFP_ATOMIC);
- if (!rxcb->rx_skb)
- return;
+ rx_skb = skb_dequeue(&rtwusb->rx_free_queue);
+ if (!rx_skb)
+ rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, gfp);
+
+ if (!rx_skb)
+ goto try_later;
+
+ skb_reset_tail_pointer(rx_skb);
+ rx_skb->len = 0;
+
+ rxcb->rx_skb = rx_skb;
usb_fill_bulk_urb(rxcb->rx_urb, rtwusb->udev,
usb_rcvbulkpipe(rtwusb->udev, rtwusb->pipe_in),
rxcb->rx_skb->data, RTW_USB_MAX_RECVBUF_SZ,
rtw_usb_read_port_complete, rxcb);
- error = usb_submit_urb(rxcb->rx_urb, GFP_ATOMIC);
+ error = usb_submit_urb(rxcb->rx_urb, gfp);
if (error) {
- kfree_skb(rxcb->rx_skb);
+ skb_queue_tail(&rtwusb->rx_free_queue, rxcb->rx_skb);
+
if (error != -ENODEV)
rtw_err(rtwdev, "Err sending rx data urb %d\n",
error);
+
+ if (error == -ENOMEM)
+ goto try_later;
+ }
+
+ return;
+
+try_later:
+ rxcb->rx_skb = NULL;
+ queue_work(rtwusb->rxwq, &rtwusb->rx_urb_work);
+}
+
+static void rtw_usb_rx_resubmit_work(struct work_struct *work)
+{
+ struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_urb_work);
+ struct rx_usb_ctrl_block *rxcb;
+ int i;
+
+ for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+
+ if (!rxcb->rx_skb)
+ rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC);
}
}
@@ -644,15 +692,16 @@ static void rtw_usb_read_port_complete(struct urb *urb)
urb->actual_length < 24) {
rtw_err(rtwdev, "failed to get urb length:%d\n",
urb->actual_length);
- if (skb)
- dev_kfree_skb_any(skb);
+ skb_queue_tail(&rtwusb->rx_free_queue, skb);
} else {
skb_put(skb, urb->actual_length);
skb_queue_tail(&rtwusb->rx_queue, skb);
queue_work(rtwusb->rxwq, &rtwusb->rx_work);
}
- rtw_usb_rx_resubmit(rtwusb, rxcb);
+ rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC);
} else {
+ skb_queue_tail(&rtwusb->rx_free_queue, skb);
+
switch (urb->status) {
case -EINVAL:
case -EPIPE:
@@ -670,8 +719,6 @@ static void rtw_usb_read_port_complete(struct urb *urb)
rtw_err(rtwdev, "status %d\n", urb->status);
break;
}
- if (skb)
- dev_kfree_skb_any(skb);
}
}
@@ -789,6 +836,30 @@ static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable)
rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
}
+static void rtw_usb_dynamic_rx_agg_v2(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ u8 size, timeout;
+ u16 val16;
+
+ if (!enable) {
+ size = 0x0;
+ timeout = 0x1;
+ } else if (rtwusb->udev->speed == USB_SPEED_SUPER) {
+ size = 0x6;
+ timeout = 0x1a;
+ } else {
+ size = 0x5;
+ timeout = 0x20;
+ }
+
+ val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) |
+ u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1);
+
+ rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
+ rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
+}
+
static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
{
switch (rtwdev->chip->id) {
@@ -797,6 +868,10 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
case RTW_CHIP_TYPE_8821C:
rtw_usb_dynamic_rx_agg_v1(rtwdev, enable);
break;
+ case RTW_CHIP_TYPE_8821A:
+ case RTW_CHIP_TYPE_8812A:
+ rtw_usb_dynamic_rx_agg_v2(rtwdev, enable);
+ break;
case RTW_CHIP_TYPE_8723D:
/* Doesn't like aggregation. */
break;
@@ -831,16 +906,26 @@ static struct rtw_hci_ops rtw_usb_ops = {
static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct sk_buff *rx_skb;
+ int i;
- rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq");
+ rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH, 0);
if (!rtwusb->rxwq) {
rtw_err(rtwdev, "failed to create RX work queue\n");
return -ENOMEM;
}
skb_queue_head_init(&rtwusb->rx_queue);
+ skb_queue_head_init(&rtwusb->rx_free_queue);
INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler);
+ INIT_WORK(&rtwusb->rx_urb_work, rtw_usb_rx_resubmit_work);
+
+ for (i = 0; i < RTW_USB_RX_SKB_NUM; i++) {
+ rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, GFP_KERNEL);
+ if (rx_skb)
+ skb_queue_tail(&rtwusb->rx_free_queue, rx_skb);
+ }
return 0;
}
@@ -853,7 +938,7 @@ static void rtw_usb_setup_rx(struct rtw_dev *rtwdev)
for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i];
- rtw_usb_rx_resubmit(rtwusb, rxcb);
+ rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_KERNEL);
}
}
@@ -865,6 +950,8 @@ static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev)
flush_workqueue(rtwusb->rxwq);
destroy_workqueue(rtwusb->rxwq);
+
+ skb_queue_purge(&rtwusb->rx_free_queue);
}
static int rtw_usb_init_tx(struct rtw_dev *rtwdev)
@@ -930,6 +1017,32 @@ static void rtw_usb_intf_deinit(struct rtw_dev *rtwdev,
usb_set_intfdata(intf, NULL);
}
+static int rtw_usb_switch_mode_old(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ enum usb_device_speed cur_speed = rtwusb->udev->speed;
+ u8 hci_opt;
+
+ if (cur_speed == USB_SPEED_HIGH) {
+ hci_opt = rtw_read8(rtwdev, REG_HCI_OPT_CTRL);
+
+ if ((hci_opt & (BIT(2) | BIT(3))) != BIT(3)) {
+ rtw_write8(rtwdev, REG_HCI_OPT_CTRL, 0x8);
+ rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x2);
+ rtw_write8(rtwdev, REG_ACLK_MON, 0x1);
+ rtw_write8(rtwdev, 0x3d, 0x3);
+ /* usb disconnect */
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL + 1, 0x80);
+ return 1;
+ }
+ } else if (cur_speed == USB_SPEED_SUPER) {
+ rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT(1));
+ rtw_write8_clr(rtwdev, REG_ACLK_MON, BIT(0));
+ }
+
+ return 0;
+}
+
static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
{
enum usb_device_speed cur_speed;
@@ -979,11 +1092,22 @@ static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
return 1;
}
+static bool rtw_usb3_chip_old(u8 chip_id)
+{
+ return chip_id == RTW_CHIP_TYPE_8812A;
+}
+
+static bool rtw_usb3_chip_new(u8 chip_id)
+{
+ return chip_id == RTW_CHIP_TYPE_8822C ||
+ chip_id == RTW_CHIP_TYPE_8822B;
+}
+
static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
{
u8 id = rtwdev->chip->id;
- if (id != RTW_CHIP_TYPE_8822C && id != RTW_CHIP_TYPE_8822B)
+ if (!rtw_usb3_chip_new(id) && !rtw_usb3_chip_old(id))
return 0;
if (!rtwdev->efuse.usb_mode_switch) {
@@ -998,7 +1122,75 @@ static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
return 0;
}
- return rtw_usb_switch_mode_new(rtwdev);
+ if (rtw_usb3_chip_old(id))
+ return rtw_usb_switch_mode_old(rtwdev);
+ else
+ return rtw_usb_switch_mode_new(rtwdev);
+}
+
+#define USB_REG_PAGE 0xf4
+#define USB_PHY_PAGE0 0x9b
+#define USB_PHY_PAGE1 0xbb
+
+static void rtw_usb_phy_write(struct rtw_dev *rtwdev, u8 addr, u16 data,
+ enum usb_device_speed speed)
+{
+ if (speed == USB_SPEED_SUPER) {
+ rtw_write8(rtwdev, REG_USB3_PHY_DAT_L, data & 0xff);
+ rtw_write8(rtwdev, REG_USB3_PHY_DAT_H, data >> 8);
+ rtw_write8(rtwdev, REG_USB3_PHY_ADR, addr | BIT_USB3_PHY_ADR_WR);
+ } else if (speed == USB_SPEED_HIGH) {
+ rtw_write8(rtwdev, REG_USB2_PHY_DAT, data);
+ rtw_write8(rtwdev, REG_USB2_PHY_ADR, addr);
+ rtw_write8(rtwdev, REG_USB2_PHY_CMD, BIT_USB2_PHY_CMD_TRG);
+ }
+}
+
+static void rtw_usb_page_switch(struct rtw_dev *rtwdev,
+ enum usb_device_speed speed, u8 page)
+{
+ if (speed == USB_SPEED_SUPER)
+ return;
+
+ rtw_usb_phy_write(rtwdev, USB_REG_PAGE, page, speed);
+}
+
+static void rtw_usb_phy_cfg(struct rtw_dev *rtwdev,
+ enum usb_device_speed speed)
+{
+ const struct rtw_intf_phy_para *para = NULL;
+ u16 offset;
+
+ if (!rtwdev->chip->intf_table)
+ return;
+
+ if (speed == USB_SPEED_SUPER)
+ para = rtwdev->chip->intf_table->usb3_para;
+ else if (speed == USB_SPEED_HIGH)
+ para = rtwdev->chip->intf_table->usb2_para;
+
+ if (!para)
+ return;
+
+ for ( ; para->offset != 0xffff; para++) {
+ if (!(para->cut_mask & BIT(rtwdev->hal.cut_version)))
+ continue;
+
+ offset = para->offset;
+
+ if (para->ip_sel == RTW_IP_SEL_MAC) {
+ rtw_write8(rtwdev, offset, para->value);
+ } else {
+ if (offset > 0x100)
+ rtw_usb_page_switch(rtwdev, speed, USB_PHY_PAGE1);
+ else
+ rtw_usb_page_switch(rtwdev, speed, USB_PHY_PAGE0);
+
+ offset &= 0xff;
+
+ rtw_usb_phy_write(rtwdev, offset, para->value, speed);
+ }
+ }
}
int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -1056,6 +1248,9 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err_destroy_rxwq;
}
+ rtw_usb_phy_cfg(rtwdev, USB_SPEED_HIGH);
+ rtw_usb_phy_cfg(rtwdev, USB_SPEED_SUPER);
+
ret = rtw_usb_switch_mode(rtwdev);
if (ret) {
/* Not a fail, but we do need to skip rtw_register_hw. */
diff --git a/drivers/net/wireless/realtek/rtw88/usb.h b/drivers/net/wireless/realtek/rtw88/usb.h
index 86697a5c0103..9b695b688b24 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.h
+++ b/drivers/net/wireless/realtek/rtw88/usb.h
@@ -38,6 +38,7 @@
#define RTW_USB_RXAGG_TIMEOUT 10
#define RTW_USB_RXCB_NUM 4
+#define RTW_USB_RX_SKB_NUM 8
#define RTW_USB_EP_MAX 4
@@ -81,7 +82,9 @@ struct rtw_usb {
struct rx_usb_ctrl_block rx_cb[RTW_USB_RXCB_NUM];
struct sk_buff_head rx_queue;
+ struct sk_buff_head rx_free_queue;
struct work_struct rx_work;
+ struct work_struct rx_urb_work;
};
static inline struct rtw_usb_tx_data *rtw_usb_get_tx_data(struct sk_buff *skb)
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index d2a3361669d7..b1c86cdd9c0e 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -96,17 +96,19 @@ config RTW89_8852CE
802.11ax PCIe wireless network (Wi-Fi 6E) adapter
config RTW89_8922AE
- tristate "Realtek 8922AE PCI wireless network (Wi-Fi 7) adapter"
+ tristate "Realtek 8922AE/8922AE-VS PCI wireless network (Wi-Fi 7) adapter"
depends on PCI
select RTW89_CORE
select RTW89_PCI
select RTW89_8922A
help
- Select this option will enable support for 8922AE chipset
+ Select this option will enable support for 8922AE/8922AE-VS chipset
802.11be PCIe wireless network (Wi-Fi 7) adapter
supporting 2x2 2GHz/5GHz/6GHz 4096-QAM 160MHz channels.
+ The variant 8922AE-VS has the same features except 1024-QAM.
+
config RTW89_DEBUG
bool
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c
index 908e980a4b72..f5dedb12c129 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.c
+++ b/drivers/net/wireless/realtek/rtw89/acpi.c
@@ -148,3 +148,50 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
ACPI_FREE(obj);
return ret;
}
+
+int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
+ struct rtw89_acpi_rtag_result *res)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_handle root, handle;
+ union acpi_object *obj;
+ acpi_status status;
+ u32 buf_len;
+ int ret = 0;
+
+ root = ACPI_HANDLE(rtwdev->dev);
+ if (!root)
+ return -EOPNOTSUPP;
+
+ status = acpi_get_handle(root, (acpi_string)"RTAG", &handle);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = buf.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf_len = obj->buffer.length;
+ if (buf_len != sizeof(*res)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *res = *(struct rtw89_acpi_rtag_result *)obj->buffer.pointer;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res));
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h
index d274be1775bf..b43ab106e44d 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.h
+++ b/drivers/net/wireless/realtek/rtw89/acpi.h
@@ -63,8 +63,17 @@ struct rtw89_acpi_dsm_result {
} u;
};
+struct rtw89_acpi_rtag_result {
+ u8 tag[4];
+ u8 revision;
+ __le32 domain;
+ u8 ant_gain_table[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
+} __packed;
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res);
+int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
+ struct rtw89_acpi_rtag_result *res);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 8ef59994c0db..8fa1e6c1ce13 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -135,8 +135,8 @@ again:
}
static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
- struct rtw89_sec_cam_entry *sec_cam,
- struct ieee80211_key_conf *key,
+ const struct rtw89_sec_cam_entry *sec_cam,
+ const struct ieee80211_key_conf *key,
u8 *key_idx)
{
u8 idx;
@@ -246,8 +246,8 @@ static int __rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
- struct ieee80211_key_conf *key,
- struct rtw89_sec_cam_entry *sec_cam)
+ const struct ieee80211_key_conf *key,
+ const struct rtw89_sec_cam_entry *sec_cam)
{
struct rtw89_addr_cam_entry *addr_cam;
u8 key_idx = 0;
@@ -286,6 +286,22 @@ static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return 0;
}
+int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ u8 sec_cam_idx)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ const struct rtw89_sec_cam_entry *sec_cam;
+
+ sec_cam = cam_info->sec_entries[sec_cam_idx];
+ if (!sec_cam)
+ return -ENOENT;
+
+ return __rtw89_cam_attach_sec_cam(rtwdev, rtwvif_link, rtwsta_link,
+ sec_cam->key_conf, sec_cam);
+}
+
static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -306,6 +322,9 @@ static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
rtwvif = vif_to_rtwvif(vif);
+ if (rtwsta)
+ clear_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map);
+
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
rtwsta_link = rtwsta ? rtwsta->links[link_id] : NULL;
if (rtwsta && !rtwsta_link)
@@ -369,6 +388,8 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return ret;
}
+ set_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map);
+
return 0;
}
@@ -410,6 +431,9 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
sec_cam->len = RTW89_SEC_CAM_LEN;
sec_cam->ext_key = ext_key;
memcpy(sec_cam->key, key->key, key->keylen);
+
+ sec_cam->key_conf = key;
+
ret = rtw89_cam_send_sec_key_cmd(rtwdev, sec_cam);
if (ret) {
rtw89_err(rtwdev, "failed to send sec key cmd: %d\n", ret);
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 3134ebf08825..8fd2d776408e 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -578,4 +578,9 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev);
+int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ u8 sec_cam_idx);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index fb9449930c40..4df4e04c3e67 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -391,11 +391,12 @@ static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev)
list_del(&role->mgnt_entry);
list_add(&role->mgnt_entry, &mgnt->active_list);
- break;
+ goto fill;
}
}
}
+fill:
list_for_each_entry(role, &mgnt->active_list, mgnt_entry) {
if (unlikely(pos >= RTW89_MAX_INTERFACE_NUM)) {
rtw89_warn(rtwdev,
@@ -801,7 +802,7 @@ fill:
mcc_role->limit.max_toa = max_toa_us / 1024;
mcc_role->limit.max_tob = max_tob_us / 1024;
- mcc_role->limit.max_dur = max_dur_us / 1024;
+ mcc_role->limit.max_dur = mcc_role->limit.max_toa + mcc_role->limit.max_tob;
mcc_role->limit.enable = true;
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
@@ -2530,7 +2531,25 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
hal->entity_pause = true;
}
-void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
+static void rtw89_chanctx_proceed_cb(struct rtw89_dev *rtwdev,
+ const struct rtw89_chanctx_cb_parm *parm)
+{
+ int ret;
+
+ if (!parm || !parm->cb)
+ return;
+
+ ret = parm->cb(rtwdev, parm->data);
+ if (ret)
+ rtw89_warn(rtwdev, "%s (%s): cb failed: %d\n", __func__,
+ parm->caller ?: "unknown", ret);
+}
+
+/* pass @cb_parm if there is a @cb_parm->cb which needs to invoke right after
+ * call rtw89_set_channel() and right before proceed entity according to mode.
+ */
+void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
+ const struct rtw89_chanctx_cb_parm *cb_parm)
{
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_entity_mode mode;
@@ -2538,14 +2557,18 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
lockdep_assert_held(&rtwdev->mutex);
- if (!hal->entity_pause)
+ if (unlikely(!hal->entity_pause)) {
+ rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
return;
+ }
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx proceed\n");
hal->entity_pause = false;
rtw89_set_channel(rtwdev);
+ rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
+
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
case RTW89_ENTITY_MODE_MCC:
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 2eb31dff2083..092a6f676894 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -38,6 +38,12 @@ enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_ROC,
};
+struct rtw89_chanctx_cb_parm {
+ int (*cb)(struct rtw89_dev *rtwdev, void *data);
+ void *data;
+ const char *caller;
+};
+
struct rtw89_entity_weight {
unsigned int active_chanctxs;
unsigned int active_roles;
@@ -100,7 +106,8 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
void rtw89_chanctx_track(struct rtw89_dev *rtwdev);
void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_pause_reasons rsn);
-void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev);
+void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
+ const struct rtw89_chanctx_cb_parm *cb_parm);
const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
const char *caller_message,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index e5b2968c1431..85f739f1173d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -203,6 +203,55 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
},
};
+#define RTW89_6GHZ_SPAN_HEAD 6145
+#define RTW89_6GHZ_SPAN_IDX(center_freq) \
+ ((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2)
+
+#define RTW89_DECL_6GHZ_SPAN(center_freq, subband_l, subband_h) \
+ [RTW89_6GHZ_SPAN_IDX(center_freq)] = { \
+ .sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
+ .sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
+ .ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \
+ .ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \
+ }
+
+/* Since 6GHz subbands are not edge aligned, some cases span two subbands.
+ * In the following, we describe each of them with rtw89_6ghz_span.
+ */
+static const struct rtw89_6ghz_span rtw89_overlapping_6ghz[] = {
+ RTW89_DECL_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
+};
+
+const struct rtw89_6ghz_span *
+rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq)
+{
+ int idx;
+
+ if (center_freq >= RTW89_6GHZ_SPAN_HEAD) {
+ idx = RTW89_6GHZ_SPAN_IDX(center_freq);
+ /* To decrease size of rtw89_overlapping_6ghz[],
+ * RTW89_6GHZ_SPAN_IDX() truncates the leading NULLs
+ * to make first span as index 0 of the table. So, if center
+ * frequency is less than the first one, it will get netative.
+ */
+ if (idx >= 0 && idx < ARRAY_SIZE(rtw89_overlapping_6ghz))
+ return &rtw89_overlapping_6ghz[idx];
+ }
+
+ return NULL;
+}
+
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
{
struct ieee80211_rate rate;
@@ -931,6 +980,11 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
bool is_bmc;
u16 seq;
+ if (tx_req->sta)
+ desc_info->mlo = tx_req->sta->mlo;
+ else if (tx_req->vif)
+ desc_info->mlo = ieee80211_vif_is_mld(tx_req->vif);
+
seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) {
tx_type = rtw89_core_get_tx_type(rtwdev, skb);
@@ -938,7 +992,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
addr_cam = rtw89_get_addr_cam_of(tx_req->rtwvif_link,
tx_req->rtwsta_link);
- if (addr_cam->valid)
+ if (addr_cam->valid && desc_info->mlo)
upd_wlan_hdr = true;
}
is_bmc = (is_broadcast_ether_addr(hdr->addr1) ||
@@ -1078,6 +1132,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
}
tx_req.skb = skb;
+ tx_req.vif = vif;
+ tx_req.sta = sta;
tx_req.rtwvif_link = rtwvif_link;
tx_req.rtwsta_link = rtwsta_link;
@@ -2140,6 +2196,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
if (phy_ppdu)
ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
+
+ pkt_stat->beacon_rate = desc_info->data_rate;
}
if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
@@ -2317,6 +2375,12 @@ static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev,
}
}
+static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status)
+{
+ if (!rx_status->signal)
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+}
+
static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -2333,6 +2397,8 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
+ rtw89_core_validate_rx_signal(rx_status);
+
/* In low power mode, it does RX in thread context. */
local_bh_disable();
ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi);
@@ -2468,6 +2534,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
u8 *data, u32 data_offset)
{
+ struct rtw89_rxdesc_phy_rpt_v2 *rxd_rpt;
struct rtw89_rxdesc_short_v2 *rxd_s;
struct rtw89_rxdesc_long_v2 *rxd_l;
u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len;
@@ -2515,6 +2582,12 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2);
desc_info->ready = true;
+ if (phy_rtp_len == sizeof(*rxd_rpt)) {
+ rxd_rpt = (struct rtw89_rxdesc_phy_rpt_v2 *)(data + data_offset +
+ desc_info->rxd_len);
+ desc_info->rssi = le32_get_bits(rxd_rpt->dword0, BE_RXD_PHY_RSSI);
+ }
+
if (!desc_info->long_rxdesc)
return;
@@ -2657,6 +2730,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
rx_status->flag |= RX_FLAG_MACTIME_START;
rx_status->mactime = desc_info->free_run_cnt;
+ rtw89_chip_phy_rpt_to_rssi(rtwdev, desc_info, rx_status);
rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status);
}
@@ -2664,10 +2738,6 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- /* FIXME: Fix __rtw89_enter_ps_mode() to consider MLO cases. */
- if (rtwdev->support_mlo)
- return RTW89_PS_MODE_NONE;
-
if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
@@ -2700,6 +2770,41 @@ static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
}
}
+static
+void rtw89_core_rx_pkt_hdl(struct rtw89_dev *rtwdev, const struct sk_buff *skb,
+ const struct rtw89_rx_desc_info *desc)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct rtw89_sta_link *rtwsta_link;
+ struct ieee80211_sta *sta;
+ struct rtw89_sta *rtwsta;
+ u8 macid = desc->mac_id;
+
+ if (!refcount_read(&rtwdev->refcount_ap_info))
+ return;
+
+ rcu_read_lock();
+
+ rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid);
+ if (!rtwsta_link)
+ goto out;
+
+ rtwsta = rtwsta_link->rtwsta;
+ if (!test_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags))
+ goto out;
+
+ sta = rtwsta_to_sta(rtwsta);
+ if (ieee80211_is_pspoll(hdr->frame_control))
+ ieee80211_sta_pspoll(sta);
+ else if (ieee80211_has_pm(hdr->frame_control) &&
+ (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)))
+ ieee80211_sta_uapsd_trigger(sta, ieee80211_get_tid(hdr));
+
+out:
+ rcu_read_unlock();
+}
+
void rtw89_core_rx(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct sk_buff *skb)
@@ -2722,6 +2827,7 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev,
rx_status = IEEE80211_SKB_RXCB(skb);
memset(rx_status, 0, sizeof(*rx_status));
rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
+ rtw89_core_rx_pkt_hdl(rtwdev, skb, desc_info);
if (desc_info->long_rxdesc &&
BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
@@ -3131,6 +3237,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool qos, bool ps)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
@@ -3146,7 +3253,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
goto out;
}
- skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, qos);
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos);
if (!skb) {
ret = -ENOMEM;
goto out;
@@ -3257,7 +3364,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
roc->state = RTW89_ROC_IDLE;
rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, NULL);
- rtw89_chanctx_proceed(rtwdev);
+ rtw89_chanctx_proceed(rtwdev, NULL);
ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false);
if (ret)
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
@@ -3367,21 +3474,10 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
return tfc_changed;
}
-static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
-{
- if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION &&
- rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
- return;
-
- rtw89_enter_lps(rtwdev, rtwvif_link, true);
-}
-
static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
{
- struct rtw89_vif_link *rtwvif_link;
+ struct ieee80211_vif *vif;
struct rtw89_vif *rtwvif;
- unsigned int link_id;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
if (rtwvif->tdls_peer)
@@ -3393,8 +3489,13 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
rtwvif->stats.rx_tfc_lv != RTW89_TFC_IDLE)
continue;
- rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- rtw89_vif_enter_lps(rtwdev, rtwvif_link);
+ vif = rtwvif_to_vif(rtwvif);
+
+ if (!(vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_P2P_CLIENT))
+ continue;
+
+ rtw89_enter_lps(rtwdev, rtwvif, true);
}
}
@@ -3699,6 +3800,8 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
{
const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ rtw89_assoc_link_clr(rtwsta_link);
+
if (vif->type == NL80211_IFTYPE_STATION)
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
@@ -3748,6 +3851,22 @@ int rtw89_core_sta_link_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
+static bool rtw89_sta_link_can_er(struct rtw89_dev *rtwdev,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ieee80211_link_sta *link_sta)
+{
+ if (!bss_conf->he_support ||
+ bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE)
+ return false;
+
+ if (rtwdev->chip->chip_id == RTL8852C &&
+ rtw89_sta_link_has_su_mu_4xhe08(link_sta) &&
+ !rtw89_sta_link_has_er_su_4xhe08(link_sta))
+ return false;
+
+ return true;
+}
+
int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link)
@@ -3758,12 +3877,11 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rtwsta_link);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
+ struct ieee80211_link_sta *link_sta;
int ret;
if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
if (sta->tdls) {
- struct ieee80211_link_sta *link_sta;
-
rcu_read_lock();
link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
@@ -3814,9 +3932,8 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rcu_read_lock();
bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
- if (bss_conf->he_support &&
- !(bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE))
- rtwsta_link->er_cap = true;
+ link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
+ rtwsta_link->er_cap = rtw89_sta_link_can_er(rtwdev, bss_conf, link_sta);
rcu_read_unlock();
@@ -3834,6 +3951,7 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
}
+ rtw89_assoc_link_set(rtwsta_link);
return ret;
}
@@ -4117,13 +4235,17 @@ static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
struct ieee80211_eht_mcs_nss_supp *eht_nss;
struct ieee80211_sta_eht_cap *eht_cap;
struct rtw89_hal *hal = &rtwdev->hal;
+ bool support_mcs_12_13 = true;
bool support_320mhz = false;
+ u8 val, val_mcs13;
int sts = 8;
- u8 val;
if (chip->chip_gen == RTW89_CHIP_AX)
return;
+ if (hal->no_mcs_12_13)
+ support_mcs_12_13 = false;
+
if (band == NL80211_BAND_6GHZ &&
chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_320))
support_320mhz = true;
@@ -4181,16 +4303,18 @@ static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
val = u8_encode_bits(hal->rx_nss, IEEE80211_EHT_MCS_NSS_RX) |
u8_encode_bits(hal->tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ val_mcs13 = support_mcs_12_13 ? val : 0;
+
eht_nss->bw._80.rx_tx_mcs9_max_nss = val;
eht_nss->bw._80.rx_tx_mcs11_max_nss = val;
- eht_nss->bw._80.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs13_max_nss = val_mcs13;
eht_nss->bw._160.rx_tx_mcs9_max_nss = val;
eht_nss->bw._160.rx_tx_mcs11_max_nss = val;
- eht_nss->bw._160.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs13_max_nss = val_mcs13;
if (support_320mhz) {
eht_nss->bw._320.rx_tx_mcs9_max_nss = val;
eht_nss->bw._320.rx_tx_mcs11_max_nss = val;
- eht_nss->bw._320.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs13_max_nss = val_mcs13;
}
}
@@ -4433,6 +4557,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_phy_dm_init(rtwdev);
rtw89_mac_cfg_ppdu_status_bands(rtwdev, true);
+ rtw89_mac_cfg_phy_rpt_bands(rtwdev, true);
rtw89_mac_update_rts_threshold(rtwdev);
rtw89_tas_reset(rtwdev);
@@ -4755,6 +4880,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_ser_init(rtwdev);
rtw89_entity_init(rtwdev);
rtw89_tas_init(rtwdev);
+ rtw89_phy_ant_gain_init(rtwdev);
return 0;
}
@@ -5100,6 +5226,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
+ ieee80211_hw_set(hw, AP_LINK_PS);
+
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -5220,7 +5349,8 @@ EXPORT_SYMBOL(rtw89_core_unregister);
struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
u32 bus_data_size,
- const struct rtw89_chip_info *chip)
+ const struct rtw89_chip_info *chip,
+ const struct rtw89_chip_variant *variant)
{
struct rtw89_fw_info early_fw = {};
const struct firmware *firmware;
@@ -5278,6 +5408,7 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
rtwdev->dev = device;
rtwdev->ops = ops;
rtwdev->chip = chip;
+ rtwdev->variant = variant;
rtwdev->fw.req.firmware = firmware;
rtwdev->fw.fw_format = fw_format;
rtwdev->support_mlo = support_mlo;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 5ad32eacd0d5..ff4894c7fa8a 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -830,6 +830,7 @@ enum rtw89_phy_idx {
};
#define __RTW89_MLD_MAX_LINK_NUM 2
+#define RTW89_MLD_NON_STA_LINK_NUM 1
enum rtw89_chanctx_idx {
RTW89_CHANCTX_0 = 0,
@@ -1083,6 +1084,7 @@ struct rtw89_rx_desc_info {
u16 offset;
u16 rxd_len;
bool ready;
+ u16 rssi;
};
struct rtw89_rxdesc_short {
@@ -1125,6 +1127,11 @@ struct rtw89_rxdesc_long_v2 {
__le32 dword9;
} __packed;
+struct rtw89_rxdesc_phy_rpt_v2 {
+ __le32 dword0;
+ __le32 dword1;
+} __packed;
+
struct rtw89_tx_desc_info {
u16 pkt_size;
u8 wp_offset;
@@ -1163,12 +1170,15 @@ struct rtw89_tx_desc_info {
bool stbc;
bool ldpc;
bool upd_wlan_hdr;
+ bool mlo;
};
struct rtw89_core_tx_request {
enum rtw89_core_tx_type tx_type;
struct sk_buff *skb;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
struct rtw89_vif_link *rtwvif_link;
struct rtw89_sta_link *rtwsta_link;
struct rtw89_tx_desc_info desc_info;
@@ -3358,6 +3368,8 @@ struct rtw89_sec_cam_entry {
u8 spp_mode : 1;
/* 256 bits */
u8 key[32];
+
+ struct ieee80211_key_conf *key_conf;
};
struct rtw89_sta_link {
@@ -3621,6 +3633,9 @@ struct rtw89_chip_ops {
struct ieee80211_rx_status *status);
void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
+ void (*phy_rpt_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status);
void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
@@ -4255,6 +4270,7 @@ struct rtw89_chip_info {
u16 support_bandwidths;
bool support_unii4;
bool support_rnr;
+ bool support_ant_gain;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool hw_sec_hdr;
@@ -4296,6 +4312,7 @@ struct rtw89_chip_info {
const struct rtw89_rfe_parms *dflt_parms;
const struct rtw89_chanctx_listener *chanctx_listener;
+ u8 txpwr_factor_bb;
u8 txpwr_factor_rf;
u8 txpwr_factor_mac;
@@ -4350,12 +4367,18 @@ struct rtw89_chip_info {
const struct rtw89_xtal_info *xtal_info;
};
+struct rtw89_chip_variant {
+ bool no_mcs_12_13: 1;
+ u32 fw_min_ver_code;
+};
+
union rtw89_bus_info {
const struct rtw89_pci_info *pci;
};
struct rtw89_driver_info {
const struct rtw89_chip_info *chip;
+ const struct rtw89_chip_variant *variant;
const struct dmi_system_id *quirks;
union rtw89_bus_info bus;
};
@@ -4448,8 +4471,13 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_SCAN_OFFLOAD_BE_V0,
RTW89_FW_FEATURE_WOW_REASON_V1,
RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V1,
RTW89_FW_FEATURE_RFK_RXDCK_V0,
RTW89_FW_FEATURE_NO_WOW_CPU_IO_RX,
+ RTW89_FW_FEATURE_NOTIFY_AP_INFO,
+ RTW89_FW_FEATURE_CH_INFO_BE_V0,
+ RTW89_FW_FEATURE_LPS_CH_INFO,
+ RTW89_FW_FEATURE_NO_PHYCAP_P1,
};
struct rtw89_fw_suit {
@@ -4597,6 +4625,44 @@ struct rtw89_sar_info {
};
};
+enum rtw89_ant_gain_subband {
+ RTW89_ANT_GAIN_2GHZ_SUBBAND,
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_1, /* U-NII-1 */
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_2, /* U-NII-2 */
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_2E, /* U-NII-2-Extended */
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4, /* U-NII-3 and U-NII-4 */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_6, /* U-NII-6 */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_8, /* U-NII-8 */
+
+ RTW89_ANT_GAIN_SUBBAND_NR,
+};
+
+enum rtw89_ant_gain_domain_type {
+ RTW89_ANT_GAIN_ETSI = 0,
+
+ RTW89_ANT_GAIN_DOMAIN_NUM,
+};
+
+#define RTW89_ANT_GAIN_CHAIN_NUM 2
+struct rtw89_ant_gain_info {
+ s8 offset[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
+ u32 regd_enabled;
+};
+
+struct rtw89_6ghz_span {
+ enum rtw89_sar_subband sar_subband_low;
+ enum rtw89_sar_subband sar_subband_high;
+ enum rtw89_ant_gain_subband ant_gain_subband_low;
+ enum rtw89_ant_gain_subband ant_gain_subband_high;
+};
+
+#define RTW89_SAR_SPAN_VALID(span) ((span)->sar_subband_high)
+#define RTW89_ANT_GAIN_SPAN_VALID(span) ((span)->ant_gain_subband_high)
+
enum rtw89_tas_state {
RTW89_TAS_STATE_DPR_OFF,
RTW89_TAS_STATE_DPR_ON,
@@ -4672,7 +4738,7 @@ enum rtw89_dm_type {
};
#define RTW89_THERMAL_PROT_LV_MAX 5
-#define RTW89_THERMAL_PROT_STEP 19 /* -19% for each level */
+#define RTW89_THERMAL_PROT_STEP 5 /* -5% for each level */
struct rtw89_hal {
u32 rx_fltr;
@@ -4687,6 +4753,8 @@ struct rtw89_hal {
bool ant_diversity_fixed;
bool support_cckpd;
bool support_igi;
+ bool no_mcs_12_13;
+
atomic_t roc_chanctx_idx;
DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES);
@@ -4781,6 +4849,7 @@ struct rtw89_pkt_drop_params {
struct rtw89_pkt_stat {
u16 beacon_nr;
+ u8 beacon_rate;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
};
@@ -5544,6 +5613,7 @@ struct rtw89_dev {
enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
+ const struct rtw89_chip_variant *variant;
const struct rtw89_pci_info *pci_info;
const struct rtw89_rfe_parms *rfe_parms;
struct rtw89_hal hal;
@@ -5556,6 +5626,9 @@ struct rtw89_dev {
struct rtw89_rfe_data *rfe_data;
enum rtw89_custid custid;
+ struct rtw89_sta_link __rcu *assoc_link_on_macid[RTW89_MAX_MAC_ID_NUM];
+ refcount_t refcount_ap_info;
+
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
struct list_head rtwvifs_list;
@@ -5636,6 +5709,7 @@ struct rtw89_dev {
struct rtw89_regulatory_info regulatory;
struct rtw89_sar_info sar;
struct rtw89_tas_info tas;
+ struct rtw89_ant_gain_info ant_gain;
struct rtw89_btc btc;
enum rtw89_ps_mode ps_mode;
@@ -5654,10 +5728,17 @@ struct rtw89_dev {
u8 priv[] __aligned(sizeof(void *));
};
+struct rtw89_link_conf_container {
+ struct ieee80211_bss_conf *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+#define RTW89_VIF_IDLE_LINK_ID 0
+
struct rtw89_vif {
struct rtw89_dev *rtwdev;
struct list_head list;
struct list_head mgnt_entry;
+ struct rtw89_link_conf_container __rcu *snap_link_confs;
u8 mac_addr[ETH_ALEN];
__be32 ip_addr;
@@ -5689,10 +5770,18 @@ static inline bool rtw89_vif_assign_link_is_valid(struct rtw89_vif_link **rtwvif
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) \
if (rtw89_vif_assign_link_is_valid(&(rtwvif_link), rtwvif, link_id))
+enum rtw89_sta_flags {
+ RTW89_REMOTE_STA_IN_PS,
+
+ NUM_OF_RTW89_STA_FLAGS,
+};
+
struct rtw89_sta {
struct rtw89_dev *rtwdev;
struct rtw89_vif *rtwvif;
+ DECLARE_BITMAP(flags, NUM_OF_RTW89_STA_FLAGS);
+
bool disassoc;
struct sk_buff_head roc_queue;
@@ -5700,6 +5789,8 @@ struct rtw89_sta {
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
+ DECLARE_BITMAP(pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
+
u8 links_inst_valid_num;
DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
struct rtw89_sta_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -5770,6 +5861,31 @@ u8 rtw89_sta_link_inst_get_index(struct rtw89_sta_link *rtwsta_link)
return rtwsta_link - rtwsta->links_inst;
}
+static inline void rtw89_assoc_link_set(struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+
+ rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id],
+ rtwsta_link);
+}
+
+static inline void rtw89_assoc_link_clr(struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+
+ rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id],
+ NULL);
+ synchronize_rcu();
+}
+
+static inline struct rtw89_sta_link *
+rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid)
+{
+ return rcu_dereference(rtwdev->assoc_link_on_macid[macid]);
+}
+
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
@@ -6194,9 +6310,19 @@ static inline struct ieee80211_bss_conf *
__rtw89_vif_rcu_dereference_link(struct rtw89_vif_link *rtwvif_link, bool *nolink)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct rtw89_link_conf_container *snap;
struct ieee80211_bss_conf *bss_conf;
+ snap = rcu_dereference(rtwvif->snap_link_confs);
+ if (snap) {
+ bss_conf = snap->link_conf[rtwvif_link->link_id];
+ goto out;
+ }
+
bss_conf = rcu_dereference(vif->link_conf[rtwvif_link->link_id]);
+
+out:
if (unlikely(!bss_conf)) {
*nolink = true;
return &vif->bss_conf;
@@ -6605,6 +6731,16 @@ static inline void rtw89_chip_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu);
}
+static inline void rtw89_chip_phy_rpt_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->phy_rpt_to_rssi)
+ chip->ops->phy_rpt_to_rssi(rtwdev, desc_info, rx_status);
+}
+
static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@@ -6753,6 +6889,26 @@ bool rtw89_sta_has_beamformer_cap(struct ieee80211_link_sta *link_sta)
return false;
}
+static inline
+bool rtw89_sta_link_has_su_mu_4xhe08(struct ieee80211_link_sta *link_sta)
+{
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
+ return true;
+
+ return false;
+}
+
+static inline
+bool rtw89_sta_link_has_er_su_4xhe08(struct ieee80211_link_sta *link_sta)
+{
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
+ return true;
+
+ return false;
+}
+
static inline struct rtw89_fw_suit *rtw89_fw_suit_get(struct rtw89_dev *rtwdev,
enum rtw89_fw_type type)
{
@@ -6893,7 +7049,8 @@ int rtw89_core_register(struct rtw89_dev *rtwdev);
void rtw89_core_unregister(struct rtw89_dev *rtwdev);
struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
u32 bus_data_size,
- const struct rtw89_chip_info *chip);
+ const struct rtw89_chip_info *chip,
+ const struct rtw89_chip_variant *variant);
void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev);
u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev);
void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id);
@@ -6908,6 +7065,8 @@ struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
unsigned int link_id);
void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id);
void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
+const struct rtw89_6ghz_span *
+rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
struct rtw89_chan *chan);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 6abd88fa80ba..09fa977a6e6d 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -9,6 +9,7 @@
#include "fw.h"
#include "mac.h"
#include "pci.h"
+#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "sar.h"
@@ -811,6 +812,9 @@ static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev,
case_REGD(MEXICO);
case_REGD(UKRAINE);
case_REGD(CN);
+ case_REGD(QATAR);
+ case_REGD(UK);
+ case_REGD(THAILAND);
}
}
@@ -882,6 +886,9 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
seq_puts(m, "[TAS]\n");
rtw89_print_tas(m, rtwdev);
+ seq_puts(m, "[DAG]\n");
+ rtw89_print_ant_gain(m, rtwdev, chan);
+
tbl = dbgfs_txpwr_tables[chip_gen];
if (!tbl) {
ret = -EOPNOTSUPP;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 2191c037d72e..5d4ad23cc3bd 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -709,11 +709,13 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
@@ -727,7 +729,12 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -799,6 +806,27 @@ out:
return firmware;
}
+static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_variant *variant = rtwdev->variant;
+ const struct rtw89_fw_suit *fw_suit;
+ u32 suit_ver_code;
+
+ if (!variant)
+ return 0;
+
+ fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
+ suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
+
+ if (variant->fw_min_ver_code > suit_ver_code) {
+ rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n",
+ variant->fw_min_ver_code);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -815,6 +843,10 @@ int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
return ret;
normal_done:
+ ret = rtw89_fw_validate_ver_required(rtwdev);
+ if (ret)
+ return ret;
+
/* It still works if wowlan firmware isn't existing. */
__rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
@@ -2414,6 +2446,7 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
u8 *id)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct rtw89_pktofld_info *info;
struct sk_buff *skb;
int ret;
@@ -2430,10 +2463,10 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
skb = ieee80211_proberesp_get(rtwdev->hw, vif);
break;
case RTW89_PKT_OFLD_TYPE_NULL_DATA:
- skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false);
break;
case RTW89_PKT_OFLD_TYPE_QOS_NULL:
- skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true);
break;
case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
skb = rtw89_eapol_get(rtwdev, rtwvif_link);
@@ -2589,14 +2622,17 @@ fail:
return ret;
}
-int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif_link->chanctx_idx);
const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_chan *chan;
+ struct rtw89_vif_link *rtwvif_link;
struct rtw89_h2c_lps_ch_info *h2c;
u32 len = sizeof(*h2c);
+ unsigned int link_id;
struct sk_buff *skb;
+ bool no_chan = true;
+ u8 phy_idx;
u32 done;
int ret;
@@ -2611,11 +2647,27 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
skb_put(skb, len);
h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
- h2c->info[0].central_ch = chan->channel;
- h2c->info[0].pri_ch = chan->primary_channel;
- h2c->info[0].band = chan->band_type;
- h2c->info[0].bw = chan->band_width;
- h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF);
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ phy_idx = rtwvif_link->phy_idx;
+ if (phy_idx >= ARRAY_SIZE(h2c->info))
+ continue;
+
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ no_chan = false;
+
+ h2c->info[phy_idx].central_ch = chan->channel;
+ h2c->info[phy_idx].pri_ch = chan->primary_channel;
+ h2c->info[phy_idx].band = chan->band_type;
+ h2c->info[phy_idx].bw = chan->band_width;
+ }
+
+ if (no_chan) {
+ rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n");
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
@@ -2640,6 +2692,87 @@ fail:
return ret;
}
+int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_h2c_lps_ml_cmn_info *h2c;
+ struct rtw89_vif_link *rtwvif_link;
+ const struct rtw89_chan *chan;
+ u8 bw_idx = RTW89_BB_BW_20_40;
+ u32 len = sizeof(*h2c);
+ unsigned int link_id;
+ struct sk_buff *skb;
+ u8 gain_band;
+ u32 done;
+ u8 path;
+ int ret;
+ int i;
+
+ if (chip->chip_gen != RTW89_CHIP_BE)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
+
+ h2c->fmt_id = 0x1;
+
+ h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+
+ h2c->central_ch[rtwvif_link->phy_idx] = chan->channel;
+ h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel;
+ h2c->band[rtwvif_link->phy_idx] = chan->band_type;
+ h2c->bw[rtwvif_link->phy_idx] = chan->band_width;
+ if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6)
+ h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1;
+ else
+ h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2;
+
+ /* Fill BW20 RX gain table for beacon mode */
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ h2c->tia_gain[rtwvif_link->phy_idx][i] =
+ cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
+ }
+ memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
+ gain->lna_gain[gain_band][bw_idx][path],
+ LNA_GAIN_NUM);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
+ H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
+
+ rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
+ true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
+ if (ret)
+ rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
@@ -4954,13 +5087,14 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_h2c_chinfo_elem_be *elem;
struct rtw89_mac_chinfo_be *ch_info;
- struct rtw89_h2c_chinfo *h2c;
+ struct rtw89_h2c_chinfo_be *h2c;
struct sk_buff *skb;
unsigned int cond;
+ u8 ver = U8_MAX;
int skb_len;
int ret;
- static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE);
skb_len = struct_size(h2c, elem, ch_num);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
@@ -4969,8 +5103,11 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
return -ENOMEM;
}
+ if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
+ ver = 0;
+
skb_put(skb, sizeof(*h2c));
- h2c = (struct rtw89_h2c_chinfo *)skb->data;
+ h2c = (struct rtw89_h2c_chinfo_be *)skb->data;
h2c->ch_num = ch_num;
h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
@@ -4980,8 +5117,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
list_for_each_entry(ch_info, chan_list, list) {
elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
- elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) |
- le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
+ elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
le32_encode_bits(ch_info->central_ch,
RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
@@ -5028,6 +5164,12 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
le32_encode_bits(ch_info->fw_probe0_bssids,
RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
+ if (ver == 0)
+ elem->w0 |=
+ le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD);
+ else
+ elem->w7 = le32_encode_bits(ch_info->period,
+ RTW89_H2C_CHINFO_BE_W7_PERIOD_V1);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -5171,6 +5313,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
u8 probe_id[NUM_NL80211_BANDS];
u8 cfg_len = sizeof(*h2c);
unsigned int cond;
+ u8 ver = U8_MAX;
void *ptr;
int ret;
u32 len;
@@ -5191,6 +5334,9 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
+ if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
+ ver = 0;
+
if (!wowlan) {
list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
if (pkt_info->wildcard_6ghz) {
@@ -5286,9 +5432,7 @@ flex_member:
le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
- opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME,
- RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) |
- le32_encode_bits(op->band_type,
+ opch->w1 = le32_encode_bits(op->band_type,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
le32_encode_bits(op->band_width,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
@@ -5314,6 +5458,13 @@ flex_member:
RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
+
+ if (ver == 0)
+ opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION);
+ else
+ opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1);
ptr += sizeof(*opch);
}
@@ -5416,7 +5567,9 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info_common *common;
struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
+ struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
struct rtw89_fw_h2c_rfk_pre_info *h2c;
u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
u32 len = sizeof(*h2c);
@@ -5426,7 +5579,10 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
u32 val32;
int ret;
- if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
+ if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
+ len = sizeof(*h2c_v1);
+ ver = 1;
+ } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
len = sizeof(*h2c_v0);
ver = 0;
}
@@ -5438,17 +5594,18 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
}
skb_put(skb, len);
h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
+ common = &h2c->base_v1.common;
- h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH);
for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
- h2c->common.dbcc.ch[path][tbl] =
+ common->dbcc.ch[path][tbl] =
cpu_to_le32(rfk_mcc->data[path].ch[tbl]);
- h2c->common.dbcc.band[path][tbl] =
+ common->dbcc.band[path][tbl] =
cpu_to_le32(rfk_mcc->data[path].band[tbl]);
}
}
@@ -5456,13 +5613,19 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
tbl_sel[path] = rfk_mcc->data[path].table_idx;
- h2c->common.tbl.cur_ch[path] =
+ common->tbl.cur_ch[path] =
cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]);
- h2c->common.tbl.cur_band[path] =
+ common->tbl.cur_band[path] =
cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]);
+
+ if (ver <= 1)
+ continue;
+
+ h2c->cur_bandwidth[path] =
+ cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
}
- h2c->common.phy_idx = cpu_to_le32(phy_idx);
+ common->phy_idx = cpu_to_le32(phy_idx);
if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
@@ -5488,8 +5651,10 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
goto done;
}
- if (rtw89_is_mlo_1_1(rtwdev))
- h2c->mlo_1_1 = cpu_to_le32(1);
+ if (rtw89_is_mlo_1_1(rtwdev)) {
+ h2c_v1 = &h2c->base_v1;
+ h2c_v1->mlo_1_1 = cpu_to_le32(1);
+ }
done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
@@ -6496,7 +6661,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = 0, list_len = 0;
- idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
idx++, list_len++) {
channel = nd_config->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6547,7 +6712,7 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
idx++, list_len++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6624,7 +6789,7 @@ int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = 0, list_len = 0;
- idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
idx++, list_len++) {
channel = nd_config->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6679,7 +6844,7 @@ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
idx++, list_len++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6780,22 +6945,25 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
}
-void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- bool aborted)
+struct rtw89_hw_scan_complete_cb_data {
+ struct rtw89_vif_link *rtwvif_link;
+ bool aborted;
+};
+
+static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_hw_scan_complete_cb_data *cb_data = data;
+ struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
struct cfg80211_scan_info info = {
- .aborted = aborted,
+ .aborted = cb_data->aborted,
};
struct rtw89_vif *rtwvif;
u32 reg;
if (!rtwvif_link)
- return;
-
- rtw89_chanctx_proceed(rtwdev);
+ return -EINVAL;
rtwvif = rtwvif_link->rtwvif;
@@ -6814,6 +6982,29 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
scan_info->last_chan_idx = 0;
scan_info->scanning_vif = NULL;
scan_info->abort = false;
+
+ return 0;
+}
+
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ bool aborted)
+{
+ struct rtw89_hw_scan_complete_cb_data cb_data = {
+ .rtwvif_link = rtwvif_link,
+ .aborted = aborted,
+ };
+ const struct rtw89_chanctx_cb_parm cb_parm = {
+ .cb = rtw89_hw_scan_complete_cb,
+ .data = &cb_data,
+ .caller = __func__,
+ };
+
+ /* The things here needs to be done after setting channel (for coex)
+ * and before proceeding entity mode (for MCC). So, pass a callback
+ * of them for the right sequence rather than doing them directly.
+ */
+ rtw89_chanctx_proceed(rtwdev, &cb_parm);
}
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
@@ -8164,6 +8355,71 @@ int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
return 0;
}
+static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en)
+{
+ struct rtw89_h2c_ap_info *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for ap info\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ap_info *)skb->data;
+
+ h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_AP,
+ H2C_FUNC_AP_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
+{
+ int ret;
+
+ if (en) {
+ if (refcount_inc_not_zero(&rtwdev->refcount_ap_info))
+ return 0;
+ } else {
+ if (!refcount_dec_and_test(&rtwdev->refcount_ap_info))
+ return 0;
+ }
+
+ ret = rtw89_fw_h2c_ap_info(rtwdev, en);
+ if (ret) {
+ if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
+ return ret;
+
+ /* During recovery, neither driver nor stack has full error
+ * handling, so show a warning, but return 0 with refcount
+ * increased normally. It can avoid underflow when calling
+ * with @en == false later.
+ */
+ rtw89_warn(rtwdev, "h2c ap_info failed during SER\n");
+ }
+
+ if (en)
+ refcount_set(&rtwdev->refcount_ap_info, 1);
+
+ return 0;
+}
+
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index efa63d444821..2026bc2fd2ac 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -47,6 +47,19 @@ struct rtw89_c2hreg_phycap {
#define RTW89_C2HREG_PHYCAP_W2_HW_TYPE GENMASK(7, 0)
#define RTW89_C2HREG_PHYCAP_W3_ANT_TX_NUM GENMASK(15, 8)
#define RTW89_C2HREG_PHYCAP_W3_ANT_RX_NUM GENMASK(23, 16)
+#define RTW89_C2HREG_PHYCAP_W3_BAND_SEL GENMASK(31, 24)
+
+#define RTW89_C2HREG_PHYCAP_P1_W0_B1_RX_NSS GENMASK(23, 16)
+#define RTW89_C2HREG_PHYCAP_P1_W0_B1_BW GENMASK(31, 24)
+#define RTW89_C2HREG_PHYCAP_P1_W1_B1_TX_NSS GENMASK(7, 0)
+#define RTW89_C2HREG_PHYCAP_P1_W1_B1_ANT_TX_NUM GENMASK(15, 8)
+#define RTW89_C2HREG_PHYCAP_P1_W1_B1_ANT_RX_NUM GENMASK(23, 16)
+#define RTW89_C2HREG_PHYCAP_P1_W1_B1_BAND_SEL GENMASK(31, 24)
+#define RTW89_C2HREG_PHYCAP_P1_W2_QAM GENMASK(7, 0)
+#define RTW89_C2HREG_PHYCAP_P1_W2_QAM_256 0x1
+#define RTW89_C2HREG_PHYCAP_P1_W2_QAM_1024 0x2
+#define RTW89_C2HREG_PHYCAP_P1_W2_QAM_4096 0x3
+#define RTW89_C2HREG_PHYCAP_P1_W2_B1_QAM GENMASK(15, 8)
#define RTW89_C2HREG_AOAC_RPT_1_W0_KEY_IDX GENMASK(23, 16)
#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_0 GENMASK(7, 0)
@@ -92,6 +105,8 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN GENMASK(23, 16)
+#define RTW89_H2CREG_GET_FEATURE_PART_NUM GENMASK(23, 16)
+
#define RTW89_H2CREG_MAX 4
#define RTW89_C2HREG_MAX 4
#define RTW89_C2HREG_HDR_LEN 2
@@ -138,6 +153,7 @@ enum rtw89_mac_c2h_type {
RTW89_FWCMD_C2HREG_FUNC_PHY_CAP,
RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT,
RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK = 0xA,
+ RTW89_FWCMD_C2HREG_FUNC_PHY_CAP_PART1 = 0xC,
RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF,
};
@@ -310,9 +326,12 @@ struct rtw89_fw_macid_pause_sleep_grp {
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
#define RTW89_CHAN_INVALID 0xFF
#define RTW89_MAC_CHINFO_SIZE 28
+#define RTW89_MAC_CHINFO_SIZE_BE 32
#define RTW89_SCAN_LIST_GUARD 4
-#define RTW89_SCAN_LIST_LIMIT \
- ((RTW89_H2C_MAX_SIZE / RTW89_MAC_CHINFO_SIZE) - RTW89_SCAN_LIST_GUARD)
+#define RTW89_SCAN_LIST_LIMIT(size) \
+ ((RTW89_H2C_MAX_SIZE / (size)) - RTW89_SCAN_LIST_GUARD)
+#define RTW89_SCAN_LIST_LIMIT_AX RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE)
+#define RTW89_SCAN_LIST_LIMIT_BE RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE_BE)
#define RTW89_BCN_LOSS_CNT 10
@@ -1780,6 +1799,21 @@ struct rtw89_h2c_lps_ch_info {
__le32 mlo_dbcc_mode_lps;
} __packed;
+struct rtw89_h2c_lps_ml_cmn_info {
+ u8 fmt_id;
+ u8 rsvd0[3];
+ __le32 mlo_dbcc_mode;
+ u8 central_ch[RTW89_PHY_MAX];
+ u8 pri_ch[RTW89_PHY_MAX];
+ u8 bw[RTW89_PHY_MAX];
+ u8 band[RTW89_PHY_MAX];
+ u8 bcn_rate_type[RTW89_PHY_MAX];
+ u8 rsvd1[2];
+ __le16 tia_gain[RTW89_PHY_MAX][TIA_GAIN_NUM];
+ u8 lna_gain[RTW89_PHY_MAX][LNA_GAIN_NUM];
+ u8 rsvd2[2];
+} __packed;
+
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
@@ -2647,6 +2681,7 @@ struct rtw89_h2c_chinfo_elem_be {
__le32 w4;
__le32 w5;
__le32 w6;
+ __le32 w7;
} __packed;
#define RTW89_H2C_CHINFO_BE_W0_PERIOD GENMASK(7, 0)
@@ -2678,6 +2713,7 @@ struct rtw89_h2c_chinfo_elem_be {
#define RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS GENMASK(31, 16)
#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS GENMASK(15, 0)
#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W7_PERIOD_V1 GENMASK(15, 0)
struct rtw89_h2c_chinfo {
u8 ch_num;
@@ -2687,6 +2723,14 @@ struct rtw89_h2c_chinfo {
struct rtw89_h2c_chinfo_elem elem[] __counted_by(ch_num);
} __packed;
+struct rtw89_h2c_chinfo_be {
+ u8 ch_num;
+ u8 elem_size;
+ u8 arg;
+ u8 rsvd0;
+ struct rtw89_h2c_chinfo_elem_be elem[] __counted_by(ch_num);
+} __packed;
+
#define RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK BIT(0)
#define RTW89_H2C_CHINFO_ARG_APPEND_MASK BIT(1)
@@ -2733,6 +2777,7 @@ struct rtw89_h2c_scanofld_be_opch {
__le32 w1;
__le32 w2;
__le32 w3;
+ __le32 w4;
} __packed;
#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID GENMASK(15, 0)
@@ -2754,6 +2799,7 @@ struct rtw89_h2c_scanofld_be_opch {
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1 GENMASK(15, 0)
struct rtw89_h2c_scanofld_be {
__le32 w0;
@@ -3466,6 +3512,12 @@ struct rtw89_h2c_wow_aoac {
__le32 w0;
} __packed;
+struct rtw89_h2c_ap_info {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_AP_INFO_W0_PWR_INT_EN BIT(0)
+
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@@ -3590,6 +3642,7 @@ struct rtw89_c2h_scanofld {
__le32 w5;
__le32 w6;
__le32 w7;
+ __le32 w8;
} __packed;
#define RTW89_C2H_SCANOFLD_W2_PRI_CH GENMASK(7, 0)
@@ -3604,6 +3657,8 @@ struct rtw89_c2h_scanofld {
#define RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD GENMASK(15, 8)
#define RTW89_C2H_SCANOFLD_W6_FW_DEF GENMASK(23, 16)
#define RTW89_C2H_SCANOFLD_W7_REPORT_TSF GENMASK(31, 0)
+#define RTW89_C2H_SCANOFLD_W8_PERIOD_V1 GENMASK(15, 0)
+#define RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1 GENMASK(31, 16)
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
@@ -3725,6 +3780,14 @@ struct rtw89_c2h_wow_aoac_report {
#define RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX BIT(0)
+struct rtw89_c2h_pwr_int_notify {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+} __packed;
+
+#define RTW89_C2H_PWR_INT_NOTIFY_W2_MACID GENMASK(15, 0)
+#define RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS BIT(16)
+
struct rtw89_h2c_tx_duty {
__le32 w0;
__le32 w1;
@@ -4168,6 +4231,10 @@ enum rtw89_mrc_h2c_func {
#define RTW89_MRC_WAIT_COND_REQ_TSF \
RTW89_MRC_WAIT_COND(0 /* don't care */, H2C_FUNC_MRC_REQ_TSF)
+/* CLASS 36 - AP */
+#define H2C_CL_AP 0x24
+#define H2C_FUNC_AP_INFO 0x0
+
#define H2C_CAT_OUTSRC 0x2
#define H2C_CL_OUTSRC_RA 0x1
@@ -4175,6 +4242,7 @@ enum rtw89_mrc_h2c_func {
#define H2C_CL_OUTSRC_DM 0x2
#define H2C_FUNC_FW_LPS_CH_INFO 0xb
+#define H2C_FUNC_FW_LPS_ML_CMN_INFO 0xe
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
@@ -4241,11 +4309,16 @@ struct rtw89_fw_h2c_rfk_pre_info_v0 {
} __packed mlo;
} __packed;
-struct rtw89_fw_h2c_rfk_pre_info {
+struct rtw89_fw_h2c_rfk_pre_info_v1 {
struct rtw89_fw_h2c_rfk_pre_info_common common;
__le32 mlo_1_1;
} __packed;
+struct rtw89_fw_h2c_rfk_pre_info {
+ struct rtw89_fw_h2c_rfk_pre_info_v1 base_v1;
+ __le32 cur_bandwidth[NUM_OF_RTW89_FW_RFK_PATH];
+} __packed;
+
struct rtw89_h2c_rf_tssi {
__le16 len;
u8 phy;
@@ -4602,8 +4675,9 @@ int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
-int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link);
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
bool enable);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
@@ -4697,6 +4771,7 @@ int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_sync_arg *arg);
int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_upd_duration_arg *arg);
+int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 7907b84d204b..a37c6d525d6f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -2898,22 +2898,42 @@ static int cmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
}
static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev,
- struct rtw89_mac_c2h_info *c2h_info)
+ struct rtw89_mac_c2h_info *c2h_info, u8 part_num)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- struct rtw89_mac_h2c_info h2c_info = {0};
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_mac_h2c_info h2c_info = {};
+ enum rtw89_mac_c2h_type c2h_type;
+ u8 content_len;
u32 ret;
+ if (chip->chip_gen == RTW89_CHIP_AX)
+ content_len = 0;
+ else
+ content_len = 2;
+
+ switch (part_num) {
+ case 0:
+ c2h_type = RTW89_FWCMD_C2HREG_FUNC_PHY_CAP;
+ break;
+ case 1:
+ c2h_type = RTW89_FWCMD_C2HREG_FUNC_PHY_CAP_PART1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
mac->cnv_efuse_state(rtwdev, false);
h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE;
- h2c_info.content_len = 0;
+ h2c_info.content_len = content_len;
+ h2c_info.u.hdr.w0 = u32_encode_bits(part_num, RTW89_H2CREG_GET_FEATURE_PART_NUM);
ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info);
if (ret)
goto out;
- if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP)
+ if (c2h_info->id != c2h_type)
ret = -EINVAL;
out:
@@ -2922,20 +2942,20 @@ out:
return ret;
}
-int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
+static int rtw89_mac_setup_phycap_part0(struct rtw89_dev *rtwdev)
{
- struct rtw89_efuse *efuse = &rtwdev->efuse;
- struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_mac_c2h_info c2h_info = {0};
const struct rtw89_c2hreg_phycap *phycap;
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw89_mac_c2h_info c2h_info = {};
+ struct rtw89_hal *hal = &rtwdev->hal;
u8 tx_nss;
u8 rx_nss;
u8 tx_ant;
u8 rx_ant;
- u32 ret;
+ int ret;
- ret = rtw89_mac_read_phycap(rtwdev, &c2h_info);
+ ret = rtw89_mac_read_phycap(rtwdev, &c2h_info, 0);
if (ret)
return ret;
@@ -2979,6 +2999,60 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_mac_setup_phycap_part1(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_variant *variant = rtwdev->variant;
+ const struct rtw89_c2hreg_phycap *phycap;
+ struct rtw89_mac_c2h_info c2h_info = {};
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 qam_raw, qam;
+ int ret;
+
+ ret = rtw89_mac_read_phycap(rtwdev, &c2h_info, 1);
+ if (ret)
+ return ret;
+
+ phycap = &c2h_info.u.phycap;
+
+ qam_raw = u32_get_bits(phycap->w2, RTW89_C2HREG_PHYCAP_P1_W2_QAM);
+
+ switch (qam_raw) {
+ case RTW89_C2HREG_PHYCAP_P1_W2_QAM_256:
+ case RTW89_C2HREG_PHYCAP_P1_W2_QAM_1024:
+ case RTW89_C2HREG_PHYCAP_P1_W2_QAM_4096:
+ qam = qam_raw;
+ break;
+ default:
+ qam = RTW89_C2HREG_PHYCAP_P1_W2_QAM_4096;
+ break;
+ }
+
+ if ((variant && variant->no_mcs_12_13) ||
+ qam <= RTW89_C2HREG_PHYCAP_P1_W2_QAM_1024)
+ hal->no_mcs_12_13 = true;
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "phycap qam=%d/%d no_mcs_12_13=%d\n",
+ qam_raw, qam, hal->no_mcs_12_13);
+
+ return 0;
+}
+
+int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ int ret;
+
+ ret = rtw89_mac_setup_phycap_part0(rtwdev);
+ if (ret)
+ return ret;
+
+ if (chip->chip_gen == RTW89_CHIP_AX ||
+ RTW89_CHK_FW_FEATURE(NO_PHYCAP_P1, &rtwdev->fw))
+ return 0;
+
+ return rtw89_mac_setup_phycap_part1(rtwdev);
+}
+
static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band,
u16 tx_en_u16, u16 mask_u16)
{
@@ -4788,9 +4862,11 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif;
struct rtw89_chan new;
- u8 reason, status, tx_fail, band, actual_period, expect_period;
u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
+ u16 actual_period, expect_period;
+ u8 reason, status, tx_fail, band;
u8 mac_idx, sw_def, fw_def;
+ u8 ver = U8_MAX;
u16 chan;
int ret;
@@ -4799,6 +4875,9 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
rtwvif = rtwvif_link->rtwvif;
+ if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
+ ver = 0;
+
tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL);
status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH);
@@ -4811,21 +4890,28 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
- rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
- mac_idx, band, chan, reason, status, tx_fail, actual_period);
-
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF);
- expect_period = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF);
report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF);
+ if (ver == 0) {
+ expect_period =
+ le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
+ } else {
+ actual_period = le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_PERIOD_V1);
+ expect_period =
+ le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1);
+ }
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
"sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n",
sw_def, fw_def, report_tsf, expect_period);
}
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ mac_idx, band, chan, reason, status, tx_fail, actual_period);
+
switch (reason) {
case RTW89_SCAN_LEAVE_OP_NOTIFY:
case RTW89_SCAN_LEAVE_CH_NOTIFY:
@@ -5364,6 +5450,39 @@ rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rtw89_complete_cond(wait, cond, &data);
}
+static void
+rtw89_mac_c2h_pwr_int_notify(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len)
+{
+ const struct rtw89_c2h_pwr_int_notify *c2h;
+ struct rtw89_sta_link *rtwsta_link;
+ struct ieee80211_sta *sta;
+ struct rtw89_sta *rtwsta;
+ u16 macid;
+ bool ps;
+
+ c2h = (const struct rtw89_c2h_pwr_int_notify *)skb->data;
+ macid = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_MACID);
+ ps = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS);
+
+ rcu_read_lock();
+
+ rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid);
+ if (unlikely(!rtwsta_link))
+ goto out;
+
+ rtwsta = rtwsta_link->rtwsta;
+ if (ps)
+ set_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags);
+ else
+ clear_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags);
+
+ sta = rtwsta_to_sta(rtwsta);
+ ieee80211_sta_ps_transition(sta, ps);
+
+out:
+ rcu_read_unlock();
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -5409,6 +5528,12 @@ void (* const rtw89_mac_c2h_wow_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_AOAC_REPORT] = rtw89_mac_c2h_wow_aoac_rpt,
};
+static
+void (* const rtw89_mac_c2h_ap_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY] = rtw89_mac_c2h_pwr_int_notify,
+};
+
static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
@@ -5463,6 +5588,13 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
return true;
case RTW89_MAC_C2H_CLASS_WOW:
return true;
+ case RTW89_MAC_C2H_CLASS_AP:
+ switch (func) {
+ default:
+ return false;
+ case RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY:
+ return true;
+ }
}
}
@@ -5493,14 +5625,18 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_WOW)
handler = rtw89_mac_c2h_wow_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_AP:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_AP)
+ handler = rtw89_mac_c2h_ap_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
- rtw89_info(rtwdev, "c2h class %d not support\n", class);
+ rtw89_info(rtwdev, "MAC c2h class %d not support\n", class);
return;
}
if (!handler) {
- rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
+ rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class,
func);
return;
}
@@ -6674,6 +6810,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax,
+ .cfg_phy_rpt = NULL,
.dle_mix_cfg = dle_mix_cfg_ax,
.chk_dle_rdy = chk_dle_rdy_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 18579c020548..8edea96d037f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -169,6 +169,20 @@ enum rtw89_mac_ax_l0_to_l1_event {
MAC_AX_L0_TO_L1_EVENT_MAX = 15,
};
+enum rtw89_mac_phy_rpt_size {
+ MAC_AX_PHY_RPT_SIZE_0 = 0,
+ MAC_AX_PHY_RPT_SIZE_8 = 1,
+ MAC_AX_PHY_RPT_SIZE_16 = 2,
+ MAC_AX_PHY_RPT_SIZE_24 = 3,
+};
+
+enum rtw89_mac_hdr_cnv_size {
+ MAC_AX_HDR_CNV_SIZE_0 = 0,
+ MAC_AX_HDR_CNV_SIZE_32 = 1,
+ MAC_AX_HDR_CNV_SIZE_64 = 2,
+ MAC_AX_HDR_CNV_SIZE_96 = 3,
+};
+
enum rtw89_mac_wow_fw_status {
WOWLAN_NOT_READY = 0x00,
WOWLAN_SLEEP_READY = 0x01,
@@ -426,6 +440,12 @@ enum rtw89_mac_c2h_wow_func {
NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
};
+enum rtw89_mac_c2h_ap_func {
+ RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY = 0,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_AP,
+};
+
enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_INFO = 0x0,
RTW89_MAC_C2H_CLASS_OFLD = 0x1,
@@ -434,6 +454,7 @@ enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_MCC = 0x4,
RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
RTW89_MAC_C2H_CLASS_MRC = 0xe,
+ RTW89_MAC_C2H_CLASS_AP = 0x18,
RTW89_MAC_C2H_CLASS_MAX,
};
@@ -961,6 +982,7 @@ struct rtw89_mac_gen_def {
enum rtw89_mac_fwd_target fwd_target,
u8 mac_idx);
int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
+ void (*cfg_phy_rpt)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
@@ -1216,6 +1238,27 @@ int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
+
+static inline
+void rtw89_mac_cfg_phy_rpt(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ if (mac->cfg_phy_rpt)
+ mac->cfg_phy_rpt(rtwdev, mac_idx, enable);
+}
+
+static inline
+void rtw89_mac_cfg_phy_rpt_bands(struct rtw89_dev *rtwdev, bool enable)
+{
+ rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_0, enable);
+
+ if (!rtwdev->dbcc_en)
+ return;
+
+ rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_1, enable);
+}
static inline
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 619d2d3771d5..b3669e0074df 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -189,10 +189,10 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtw89_core_txq_init(rtwdev, vif->txq);
- if (!rtw89_rtwvif_in_list(rtwdev, rtwvif))
+ if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) {
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
-
- INIT_LIST_HEAD(&rtwvif->mgnt_entry);
+ INIT_LIST_HEAD(&rtwvif->mgnt_entry);
+ }
ether_addr_copy(rtwvif->mac_addr, vif->addr);
@@ -202,7 +202,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
- rtwvif_link = rtw89_vif_set_link(rtwvif, 0);
+ rtwvif_link = rtw89_vif_set_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
if (!rtwvif_link) {
ret = -EINVAL;
goto release_port;
@@ -218,7 +218,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
return 0;
unset_link:
- rtw89_vif_unset_link(rtwvif, 0);
+ rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
release_port:
list_del_init(&rtwvif->list);
rtw89_core_release_bit_map(rtwdev->hw_port, port);
@@ -246,17 +246,17 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
- rtwvif_link = rtwvif->links[0];
+ rtwvif_link = rtwvif->links[RTW89_VIF_IDLE_LINK_ID];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
- __func__, 0);
+ __func__, RTW89_VIF_IDLE_LINK_ID);
goto bottom;
}
__rtw89_ops_remove_iface_link(rtwdev, rtwvif_link);
- rtw89_vif_unset_link(rtwvif, 0);
+ rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
bottom:
list_del_init(&rtwvif->list);
@@ -509,6 +509,7 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev,
rtw89_core_txq_init(rtwdev, sta->txq[i]);
skb_queue_head_init(&rtwsta->roc_queue);
+ bitmap_zero(rtwsta->pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
rtwsta_link = rtw89_sta_set_link(rtwsta, sta->deflink.link_id);
if (!rtwsta_link) {
@@ -775,6 +776,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
const struct rtw89_chan *chan;
+ int ret = 0;
mutex_lock(&rtwdev->mutex);
@@ -783,6 +785,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
+ ret = -ENOLINK;
goto out;
}
@@ -804,12 +807,18 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
rtw89_chip_rfk_channel(rtwdev, rtwvif_link);
+ if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) {
+ ret = rtw89_fw_h2c_ap_info_refcount(rtwdev, true);
+ if (ret)
+ goto out;
+ }
+
rtw89_queue_chanctx_work(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
- return 0;
+ return ret;
}
static
@@ -830,6 +839,9 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
+ if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
+ rtw89_fw_h2c_ap_info_refcount(rtwdev, false);
+
rtw89_mac_stop_ap(rtwdev, rtwvif_link);
rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true);
@@ -1273,11 +1285,11 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
return;
- if (!rtwdev->scanning)
- return;
-
mutex_lock(&rtwdev->mutex);
+ if (!rtwdev->scanning)
+ goto out;
+
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n");
@@ -1295,10 +1307,15 @@ static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
- struct ieee80211_sta *sta = link_sta->sta;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta(link_sta->sta);
struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta_link *rtwsta_link;
+
+ rtwsta_link = rtwsta->links[link_sta->link_id];
+ if (unlikely(!rtwsta_link))
+ return;
- rtw89_phy_ra_update_sta(rtwdev, sta, changed);
+ rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
}
static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
@@ -1473,6 +1490,259 @@ static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
return 0;
}
+static bool rtw89_can_work_on_links(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif, u16 links)
+{
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ u8 w = hweight16(links);
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ w > RTW89_MLD_NON_STA_LINK_NUM)
+ return false;
+
+ return w <= rtwvif->links_inst_valid_num;
+}
+
+static bool rtw89_ops_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ guard(mutex)(&rtwdev->mutex);
+
+ return rtw89_can_work_on_links(rtwdev, vif, active_links);
+}
+
+static void __rtw89_ops_clr_vif_links(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ unsigned long clr_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwvif_link = rtwvif->links[link_id];
+ if (unlikely(!rtwvif_link))
+ continue;
+
+ __rtw89_ops_remove_iface_link(rtwdev, rtwvif_link);
+
+ rtw89_vif_unset_link(rtwvif, link_id);
+ }
+}
+
+static int __rtw89_ops_set_vif_links(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ unsigned long set_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ unsigned int link_id;
+ int ret;
+
+ for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwvif_link = rtw89_vif_set_link(rtwvif, link_id);
+ if (!rtwvif_link)
+ return -EINVAL;
+
+ ret = __rtw89_ops_add_iface_link(rtwdev, rtwvif_link);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to add iface (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static
+int rtw89_ops_change_vif_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ unsigned long clr_links = old_links & ~new_links;
+ unsigned long set_links = new_links & ~old_links;
+ bool removing_links = !old_links || clr_links;
+ struct rtw89_link_conf_container *snap;
+ int ret = 0;
+ int i;
+
+ guard(mutex)(&rtwdev->mutex);
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE,
+ "%s: old_links (0x%08x) -> new_links (0x%08x)\n",
+ __func__, old_links, new_links);
+
+ if (!rtw89_can_work_on_links(rtwdev, vif, new_links))
+ return -EOPNOTSUPP;
+
+ if (removing_links) {
+ snap = kzalloc(sizeof(*snap), GFP_KERNEL);
+ if (!snap)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(snap->link_conf); i++)
+ snap->link_conf[i] = old[i];
+
+ rcu_assign_pointer(rtwvif->snap_link_confs, snap);
+ }
+
+ /* might depend on @snap; don't change order */
+ rtw89_leave_ips_by_hwflags(rtwdev);
+
+ if (rtwdev->scanning)
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
+
+ if (!old_links)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif,
+ BIT(RTW89_VIF_IDLE_LINK_ID));
+ else if (clr_links)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif, clr_links);
+
+ if (removing_links) {
+ /* @snap is required if and only if during removing links.
+ * However, it's done here. So, cleanup @snap immediately.
+ */
+ rcu_assign_pointer(rtwvif->snap_link_confs, NULL);
+
+ /* The pointers in @old will free after this function return,
+ * so synchronously wait for all readers of snap to be done.
+ */
+ synchronize_rcu();
+ kfree(snap);
+ }
+
+ if (set_links) {
+ ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif, set_links);
+ if (ret)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif, set_links);
+ } else if (!new_links) {
+ ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif,
+ BIT(RTW89_VIF_IDLE_LINK_ID));
+ if (ret)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif,
+ BIT(RTW89_VIF_IDLE_LINK_ID));
+ }
+
+ rtw89_enter_ips_by_hwflags(rtwdev);
+ return ret;
+}
+
+static void __rtw89_ops_clr_sta_links(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ unsigned long clr_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_sta_link *rtwsta_link;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwsta_link = rtwsta->links[link_id];
+ if (unlikely(!rtwsta_link))
+ continue;
+
+ rtwvif_link = rtwsta_link->rtwvif_link;
+
+ rtw89_core_sta_link_disassoc(rtwdev, rtwvif_link, rtwsta_link);
+ rtw89_core_sta_link_disconnect(rtwdev, rtwvif_link, rtwsta_link);
+ rtw89_core_sta_link_remove(rtwdev, rtwvif_link, rtwsta_link);
+
+ rtw89_sta_unset_link(rtwsta, link_id);
+ }
+}
+
+static int __rtw89_ops_set_sta_links(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ unsigned long set_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_sta_link *rtwsta_link;
+ unsigned int link_id;
+ u8 sec_cam_idx;
+ int ret;
+
+ for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwsta_link = rtw89_sta_set_link(rtwsta, link_id);
+ if (!rtwsta_link)
+ return -EINVAL;
+
+ rtwvif_link = rtwsta_link->rtwvif_link;
+
+ ret = rtw89_core_sta_link_add(rtwdev, rtwvif_link, rtwsta_link);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to add sta (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+
+ rtw89_vif_type_mapping(rtwvif_link, true);
+
+ ret = rtw89_core_sta_link_assoc(rtwdev, rtwvif_link, rtwsta_link);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to assoc sta (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+
+ __rtw89_ops_bss_link_assoc(rtwdev, rtwvif_link);
+
+ for_each_set_bit(sec_cam_idx, rtwsta->pairwise_sec_cam_map,
+ RTW89_MAX_SEC_CAM_NUM) {
+ ret = rtw89_cam_attach_link_sec_cam(rtwdev,
+ rtwvif_link,
+ rtwsta_link,
+ sec_cam_idx);
+ if (ret) {
+ rtw89_err(rtwdev,
+ "%s: failed to apply pairwise key (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static
+int rtw89_ops_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ unsigned long clr_links = old_links & ~new_links;
+ unsigned long set_links = new_links & ~old_links;
+ int ret = 0;
+
+ guard(mutex)(&rtwdev->mutex);
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE,
+ "%s: old_links (0x%08x) -> new_links (0x%08x)\n",
+ __func__, old_links, new_links);
+
+ if (!rtw89_can_work_on_links(rtwdev, vif, new_links))
+ return -EOPNOTSUPP;
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ if (clr_links)
+ __rtw89_ops_clr_sta_links(rtwdev, rtwsta, clr_links);
+
+ if (set_links) {
+ ret = __rtw89_ops_set_sta_links(rtwdev, rtwsta, set_links);
+ if (ret)
+ __rtw89_ops_clr_sta_links(rtwdev, rtwsta, set_links);
+ }
+
+ return ret;
+}
+
#ifdef CONFIG_PM
static int rtw89_ops_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
@@ -1600,6 +1870,9 @@ const struct ieee80211_ops rtw89_ops = {
.set_sar_specs = rtw89_ops_set_sar_specs,
.link_sta_rc_update = rtw89_ops_sta_rc_update,
.set_tid_config = rtw89_ops_set_tid_config,
+ .can_activate_links = rtw89_ops_can_activate_links,
+ .change_vif_links = rtw89_ops_change_vif_links,
+ .change_sta_links = rtw89_ops_change_sta_links,
#ifdef CONFIG_PM
.suspend = rtw89_ops_suspend,
.resume = rtw89_ops_resume,
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index f7a396c8a3cd..2dbdeae904ad 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -1988,6 +1988,20 @@ int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
}
EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v2);
+void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ u32 reg, val;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RCR, mac_idx);
+ val = enable ? MAC_AX_PHY_RPT_SIZE_8 : MAC_AX_PHY_RPT_SIZE_0;
+ rtw89_write32_mask(rtwdev, reg, B_BE_PHY_RPT_SZ_MASK, val);
+ rtw89_write32_mask(rtwdev, reg, B_BE_HDR_CNV_SZ_MASK, MAC_AX_HDR_CNV_SIZE_0);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_DRV_INFO_OPTION, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_DRV_INFO_PHYRPT_EN, enable);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_phy_rpt_be);
+
static
int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
@@ -2583,6 +2597,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be,
+ .cfg_phy_rpt = rtw89_mac_cfg_phy_rpt_be,
.dle_mix_cfg = dle_mix_cfg_be,
.chk_dle_rdy = chk_dle_rdy_be,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index f923bec03d41..c2fe5a898dc7 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -321,10 +321,11 @@ static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring)
{
- struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
- struct rtw89_pci_rx_info *rx_info;
struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
+ struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct sk_buff *new = rx_ring->diliver_skb;
+ struct rtw89_pci_rx_info *rx_info;
struct sk_buff *skb;
u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
u32 skb_idx;
@@ -344,9 +345,14 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
}
rx_info = RTW89_PCI_RX_SKB_CB(skb);
- fs = rx_info->fs;
+ fs = info->no_rxbd_fs ? !new : rx_info->fs;
ls = rx_info->ls;
+ if (unlikely(!fs || !ls))
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ "unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n",
+ fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0);
+
if (fs) {
if (new) {
rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
@@ -2516,7 +2522,7 @@ static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
}
-static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
+static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
{
if (pwr_up)
rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
@@ -2825,6 +2831,8 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
+ rtw89_pci_power_wake(rtwdev, false);
+
if (rtwdev->chip->chip_id == RTL8852A) {
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
@@ -2867,7 +2875,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
return ret;
}
- rtw89_pci_power_wake(rtwdev, true);
+ rtw89_pci_power_wake_ax(rtwdev, true);
rtw89_pci_autoload_hang(rtwdev);
rtw89_pci_l12_vmain(rtwdev);
rtw89_pci_gen2_force_ib(rtwdev);
@@ -2912,6 +2920,13 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
+{
+ rtw89_pci_power_wake_ax(rtwdev, false);
+
+ return 0;
+}
+
int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
{
u32 val;
@@ -4069,6 +4084,15 @@ static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
rtw89_pci_l1ss_set(rtwdev, true);
}
+static void rtw89_pci_cpl_timeout_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
+}
+
static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev)
{
int ret = 0;
@@ -4282,6 +4306,7 @@ void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume)
rtw89_pci_disable_eq(rtwdev);
rtw89_pci_filter_out(rtwdev);
+ rtw89_pci_cpl_timeout_cfg(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
}
@@ -4325,7 +4350,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
B_AX_RDU_INT},
.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
- .mac_pre_deinit = NULL,
+ .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
.clr_idx_all = rtw89_pci_clr_idx_all_ax,
@@ -4343,6 +4368,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.l1ss_set = rtw89_pci_l1ss_set_ax,
.disable_eq = rtw89_pci_disable_eq_ax,
+ .power_wake = rtw89_pci_power_wake_ax,
};
EXPORT_SYMBOL(rtw89_pci_gen_ax);
@@ -4400,7 +4426,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
sizeof(struct rtw89_pci),
- info->chip);
+ info->chip, info->variant);
if (!rtwdev) {
dev_err(&pdev->dev, "failed to allocate hw\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index b68e2d82eea9..4d11c3dd60a5 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -1051,7 +1051,8 @@
#define RTW89_PCI_TXWD_NUM_MAX 512
#define RTW89_PCI_TXWD_PAGE_SIZE 128
#define RTW89_PCI_ADDRINFO_MAX 4
-#define RTW89_PCI_RX_BUF_SIZE (11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
+/* +40 for rtw89_rxdesc_long_v2; +4 for rtw89_pci_rxbd_info */
+#define RTW89_PCI_RX_BUF_SIZE (11454 + 40 + 4)
#define RTW89_PCI_POLL_BDRAM_RST_CNT 100
#define RTW89_PCI_MULTITAG 8
@@ -1290,6 +1291,7 @@ struct rtw89_pci_gen_def {
void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable);
void (*disable_eq)(struct rtw89_dev *rtwdev);
+ void (*power_wake)(struct rtw89_dev *rtwdev, bool pwr_up);
};
#define RTW89_PCI_SSID(v, d, ssv, ssd, cust) \
@@ -1323,6 +1325,7 @@ struct rtw89_pci_info {
enum mac_ax_io_rcy_tmr io_rcy_tmr;
bool rx_ring_eq_is_full;
bool check_rx_tag;
+ bool no_rxbd_fs;
u32 init_cfg_reg;
u32 txhci_en_bit;
@@ -1805,4 +1808,12 @@ static inline void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
gen_def->disable_eq(rtwdev);
}
+static inline void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ gen_def->power_wake(rtwdev, pwr_up);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index 34154506f5d4..cd39eebe8186 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -691,5 +691,6 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.l1ss_set = rtw89_pci_l1ss_set_be,
.disable_eq = rtw89_pci_disable_eq_be,
+ .power_wake = _patch_pcie_power_wake_be,
};
EXPORT_SYMBOL(rtw89_pci_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index f24aca663cf0..c7c05f7fda1d 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include "acpi.h"
#include "chan.h"
#include "coex.h"
#include "debug.h"
@@ -260,19 +261,32 @@ rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
static const u64
rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES};
+static const u64
+rtw89_ra_mask_eht_mcs0_11[4] = {RA_MASK_EHT_1SS_MCS0_11, RA_MASK_EHT_2SS_MCS0_11,
+ RA_MASK_EHT_3SS_MCS0_11, RA_MASK_EHT_4SS_MCS0_11};
static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link,
+ struct ieee80211_link_sta *link_sta,
const struct rtw89_chan *chan,
bool *fix_giltf_en, u8 *fix_giltf)
{
struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
u8 band = chan->band_type;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
- u8 he_gi = mask->control[nl_band].he_gi;
u8 he_ltf = mask->control[nl_band].he_ltf;
+ u8 he_gi = mask->control[nl_band].he_gi;
- if (!rtwsta_link->use_cfg_mask)
+ *fix_giltf_en = true;
+
+ if (rtwdev->chip->chip_id == RTL8852C &&
+ chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
+ rtw89_sta_link_has_su_mu_4xhe08(link_sta))
+ *fix_giltf = RTW89_GILTF_SGI_4XHE08;
+ else
+ *fix_giltf = RTW89_GILTF_2XHE08;
+
+ if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he))
return;
if (he_ltf == 2 && he_gi == 2) {
@@ -287,12 +301,7 @@ static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
*fix_giltf = RTW89_GILTF_1XHE16;
} else if (he_ltf == 0 && he_gi == 0) {
*fix_giltf = RTW89_GILTF_1XHE08;
- } else {
- *fix_giltf_en = false;
- return;
}
-
- *fix_giltf_en = true;
}
static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
@@ -324,7 +333,14 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
if (link_sta->eht_cap.has_eht) {
mode |= RTW89_RA_MODE_EHT;
ra_mask |= get_eht_ra_mask(link_sta);
- high_rate_masks = rtw89_ra_mask_eht_rates;
+
+ if (rtwdev->hal.no_mcs_12_13)
+ high_rate_masks = rtw89_ra_mask_eht_mcs0_11;
+ else
+ high_rate_masks = rtw89_ra_mask_eht_rates;
+
+ rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
+ chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->he_cap.has_he) {
mode |= RTW89_RA_MODE_HE;
csi_mode = RTW89_RA_RPT_MODE_HE;
@@ -336,7 +352,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
ldpc_en = 1;
- rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, chan, &fix_giltf_en, &fix_giltf);
+ rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
+ chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->vht_cap.vht_supported) {
u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
@@ -466,11 +483,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->csi_mode = csi_mode;
}
-static void __rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- struct rtw89_sta_link *rtwsta_link,
- u32 changed)
+void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link,
+ u32 changed)
{
+ struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct rtw89_ra_info *ra = &rtwsta_link->ra;
struct ieee80211_link_sta *link_sta;
@@ -503,14 +520,11 @@ void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta
u32 changed)
{
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
- struct rtw89_vif_link *rtwvif_link;
struct rtw89_sta_link *rtwsta_link;
unsigned int link_id;
- rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
- rtwvif_link = rtwsta_link->rtwvif_link;
- __rtw89_phy_ra_update_sta(rtwdev, rtwvif_link, rtwsta_link, changed);
- }
+ rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
+ rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
}
static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
@@ -1854,6 +1868,228 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
+static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd)
+{
+ switch (ant_gain_regd) {
+ case RTW89_ANT_GAIN_ETSI:
+ return RTW89_ETSI;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "unknown antenna gain domain: %d\n",
+ ant_gain_regd);
+ return RTW89_REGD_NUM;
+ }
+}
+
+/* antenna gain in unit of 0.25 dbm */
+#define RTW89_ANT_GAIN_2GHZ_MIN -8
+#define RTW89_ANT_GAIN_2GHZ_MAX 14
+#define RTW89_ANT_GAIN_5GHZ_MIN -8
+#define RTW89_ANT_GAIN_5GHZ_MAX 20
+#define RTW89_ANT_GAIN_6GHZ_MIN -8
+#define RTW89_ANT_GAIN_6GHZ_MAX 20
+
+#define RTW89_ANT_GAIN_REF_2GHZ 14
+#define RTW89_ANT_GAIN_REF_5GHZ 20
+#define RTW89_ANT_GAIN_REF_6GHZ 20
+
+void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_acpi_rtag_result res = {};
+ u32 domain;
+ int ret;
+ u8 i, j;
+ u8 regd;
+ u8 val;
+
+ if (!chip->support_ant_gain)
+ return;
+
+ ret = rtw89_acpi_evaluate_rtag(rtwdev, &res);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "acpi: cannot eval rtag: %d\n", ret);
+ return;
+ }
+
+ if (res.revision != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "unknown rtag revision: %d\n", res.revision);
+ return;
+ }
+
+ domain = get_unaligned_le32(&res.domain);
+
+ for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) {
+ if (!(domain & BIT(i)))
+ continue;
+
+ regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i);
+ if (regd >= RTW89_REGD_NUM)
+ continue;
+ ant_gain->regd_enabled |= BIT(regd);
+ }
+
+ for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) {
+ for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) {
+ val = res.ant_gain_table[i][j];
+ switch (j) {
+ default:
+ case RTW89_ANT_GAIN_2GHZ_SUBBAND:
+ val = RTW89_ANT_GAIN_REF_2GHZ -
+ clamp_t(s8, val,
+ RTW89_ANT_GAIN_2GHZ_MIN,
+ RTW89_ANT_GAIN_2GHZ_MAX);
+ break;
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_1:
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_2:
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E:
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4:
+ val = RTW89_ANT_GAIN_REF_5GHZ -
+ clamp_t(s8, val,
+ RTW89_ANT_GAIN_5GHZ_MIN,
+ RTW89_ANT_GAIN_5GHZ_MAX);
+ break;
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_6:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_8:
+ val = RTW89_ANT_GAIN_REF_6GHZ -
+ clamp_t(s8, val,
+ RTW89_ANT_GAIN_6GHZ_MIN,
+ RTW89_ANT_GAIN_6GHZ_MAX);
+ }
+ ant_gain->offset[i][j] = val;
+ }
+ }
+}
+
+static
+enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev,
+ u32 center_freq)
+{
+ switch (center_freq) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "center freq: %u to antenna gain subband is unhandled\n",
+ center_freq);
+ fallthrough;
+ case 2412 ... 2484:
+ return RTW89_ANT_GAIN_2GHZ_SUBBAND;
+ case 5180 ... 5240:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_1;
+ case 5250 ... 5320:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_2;
+ case 5500 ... 5720:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E;
+ case 5745 ... 5885:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4;
+ case 5955 ... 6155:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L;
+ case 6175 ... 6415:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H;
+ case 6435 ... 6515:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_6;
+ case 6535 ... 6695:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L;
+ case 6715 ... 6855:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H;
+
+ /* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H
+ * and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with
+ * struct rtw89_6ghz_span.
+ */
+
+ case 6895 ... 7115:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_8;
+ }
+}
+
+static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u32 center_freq)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ enum rtw89_ant_gain_subband subband_l, subband_h;
+ const struct rtw89_6ghz_span *span;
+
+ span = rtw89_get_6ghz_span(rtwdev, center_freq);
+
+ if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) {
+ subband_l = span->ant_gain_subband_low;
+ subband_h = span->ant_gain_subband_high;
+ } else {
+ subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq);
+ subband_h = subband_l;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "center_freq %u: antenna gain subband {%u, %u}\n",
+ center_freq, subband_l, subband_h);
+
+ return min(ant_gain->offset[path][subband_l],
+ ant_gain->offset[path][subband_h]);
+}
+
+static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ s8 offset_patha, offset_pathb;
+
+ if (!chip->support_ant_gain)
+ return 0;
+
+ if (!(ant_gain->regd_enabled & BIT(regd)))
+ return 0;
+
+ offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
+ offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
+
+ return max(offset_patha, offset_pathb);
+}
+
+s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
+ s8 offset_patha, offset_pathb;
+
+ if (!(ant_gain->regd_enabled & BIT(regd)))
+ return 0;
+
+ offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
+ offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
+
+ return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb);
+}
+EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
+
+void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
+ s8 offset_patha, offset_pathb;
+
+ if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) {
+ seq_puts(m, "no DAG is applied\n");
+ return;
+ }
+
+ offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
+ offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
+
+ seq_printf(m, "ChainA offset: %d dBm\n", offset_patha);
+ seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb);
+}
+
static const u8 rtw89_rs_idx_num_ax[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
@@ -1917,20 +2153,6 @@ void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
-static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
-
- return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
-}
-
-static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
-
- return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
-}
-
static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
{
const u8 tssi_deviation_point = 0;
@@ -2027,7 +2249,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt = 0, sar;
+ s8 lmt = 0, sar, offset;
s8 cstr;
switch (band) {
@@ -2059,7 +2281,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt);
+ offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
+ lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset);
sar = rtw89_query_sar(rtwdev, freq);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
@@ -2286,7 +2509,7 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt_ru = 0, sar;
+ s8 lmt_ru = 0, sar, offset;
s8 cstr;
switch (band) {
@@ -2318,7 +2541,8 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
+ offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
+ lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset);
sar = rtw89_query_sar(rtwdev, freq);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
@@ -3228,10 +3452,16 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
(int)(len - sizeof(report->hdr)), &report->state);
}
+static void
+rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
static
void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
+ [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
};
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
@@ -3285,11 +3515,11 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return;
fallthrough;
default:
- rtw89_info(rtwdev, "c2h class %d not support\n", class);
+ rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
return;
}
if (!handler) {
- rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
+ rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class,
func);
return;
}
@@ -4058,7 +4288,6 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
if (!force && cfo->crystal_cap == crystal_cap)
return;
- crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
@@ -4181,7 +4410,7 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
s32 curr_cfo)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
- s8 crystal_cap = cfo->crystal_cap;
+ int crystal_cap = cfo->crystal_cap;
s32 cfo_abs = abs(curr_cfo);
int sign;
@@ -4202,15 +4431,17 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
}
sign = curr_cfo > 0 ? 1 : -1;
if (cfo_abs > CFO_TRK_STOP_TH_4)
- crystal_cap += 7 * sign;
+ crystal_cap += 3 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_3)
- crystal_cap += 5 * sign;
- else if (cfo_abs > CFO_TRK_STOP_TH_2)
crystal_cap += 3 * sign;
+ else if (cfo_abs > CFO_TRK_STOP_TH_2)
+ crystal_cap += 1 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_1)
crystal_cap += 1 * sign;
else
return;
+
+ crystal_cap = clamp(crystal_cap, 0, 127);
rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
rtw89_debug(rtwdev, RTW89_DBG_CFO,
"X_cap{Curr,Default}={0x%x,0x%x}\n",
@@ -6310,6 +6541,12 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_chip_cfg_txrx_path(rtwdev);
}
+void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_env_monitor_init(rtwdev);
+ rtw89_physts_parsing_init(rtwdev);
+}
+
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index c683f4d7d29b..08b635c93ac3 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -51,13 +51,17 @@
#define RA_MASK_EHT_2SS_RATES GENMASK_ULL(43, 28)
#define RA_MASK_EHT_3SS_RATES GENMASK_ULL(59, 44)
#define RA_MASK_EHT_4SS_RATES GENMASK_ULL(62, 60)
+#define RA_MASK_EHT_1SS_MCS0_11 GENMASK_ULL(23, 12)
+#define RA_MASK_EHT_2SS_MCS0_11 GENMASK_ULL(39, 28)
+#define RA_MASK_EHT_3SS_MCS0_11 GENMASK_ULL(55, 44)
+#define RA_MASK_EHT_4SS_MCS0_11 GENMASK_ULL(62, 60)
#define RA_MASK_EHT_RATES GENMASK_ULL(62, 12)
#define CFO_TRK_ENABLE_TH (2 << 2)
#define CFO_TRK_STOP_TH_4 (30 << 2)
#define CFO_TRK_STOP_TH_3 (20 << 2)
#define CFO_TRK_STOP_TH_2 (10 << 2)
-#define CFO_TRK_STOP_TH_1 (00 << 2)
+#define CFO_TRK_STOP_TH_1 (03 << 2)
#define CFO_TRK_STOP_TH (2 << 2)
#define CFO_SW_COMP_FINE_TUNE (2 << 2)
#define CFO_PERIOD_CNT 15
@@ -151,6 +155,7 @@ enum rtw89_phy_c2h_rfk_log_func {
enum rtw89_phy_c2h_rfk_report_func {
RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0,
+ RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6,
};
enum rtw89_phy_c2h_dm_func {
@@ -813,6 +818,7 @@ void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path,
void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
+void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
@@ -826,6 +832,11 @@ s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
const struct rtw89_rate_desc *desc);
s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
const struct rtw89_rate_desc *rate_desc);
+void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev);
+s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan);
+void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
@@ -896,10 +907,34 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
phy->set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
+static inline s8 rtw89_phy_txpwr_rf_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_rf)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_rf << (chip->txpwr_factor_bb - chip->txpwr_factor_rf);
+}
+
+static inline s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
+}
+
+static inline s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
+}
+
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
u32 changed);
+void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link,
+ u32 changed);
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index c1c12abc2ea9..96ea04d90cd3 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -8,6 +8,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
+#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "util.h"
@@ -62,11 +63,8 @@ static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
rtw89_mac_power_mode_change(rtwdev, enter);
}
-void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
{
- if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
- return;
-
if (!rtwdev->ps_mode)
return;
@@ -85,8 +83,8 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
rtw89_ps_power_mode_change(rtwdev, false);
}
-static void __rtw89_enter_lps(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
+static void __rtw89_enter_lps_link(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
{
struct rtw89_lps_parm lps_param = {
.macid = rtwvif_link->mac_id,
@@ -96,7 +94,6 @@ static void __rtw89_enter_lps(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
- rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif_link);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
@@ -121,17 +118,32 @@ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
}
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool ps_mode)
{
+ struct rtw89_vif_link *rtwvif_link;
+ bool can_ps_mode = true;
+ unsigned int link_id;
+
lockdep_assert_held(&rtwdev->mutex);
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, rtwvif_link);
- if (ps_mode)
- __rtw89_enter_ps_mode(rtwdev, rtwvif_link);
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ __rtw89_enter_lps_link(rtwdev, rtwvif_link);
+
+ if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ can_ps_mode = false;
+ }
+
+ if (RTW89_CHK_FW_FEATURE(LPS_CH_INFO, &rtwdev->fw))
+ rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
+ else
+ rtw89_fw_h2c_lps_ml_cmn_info(rtwdev, rtwvif);
+
+ if (ps_mode && can_ps_mode)
+ __rtw89_enter_ps_mode(rtwdev);
}
static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev,
@@ -157,6 +169,8 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
+ rtw89_phy_dm_reinit(rtwdev);
+
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
rtw89_leave_lps_vif(rtwdev, rtwvif_link);
@@ -282,12 +296,6 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
enum rtw89_entity_mode mode;
int count = 0;
- /* FIXME: Fix rtw89_enter_lps() and __rtw89_enter_ps_mode()
- * to take MLO cases into account before doing the following.
- */
- if (rtwdev->support_mlo)
- goto disable_lps;
-
mode = rtw89_get_entity_mode(rtwdev);
if (mode == RTW89_ENTITY_MODE_MCC)
goto disable_lps;
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index cdd712966b09..2b88f254a32d 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -5,11 +5,11 @@
#ifndef __RTW89_PS_H_
#define __RTW89_PS_H_
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool ps_mode);
void rtw89_leave_lps(struct rtw89_dev *rtwdev);
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
-void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 18ec7c0252fb..10d0efa7a58e 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -7447,6 +7447,10 @@
#define B_BE_CSIPRT_HESU_AID_EN BIT(25)
#define B_BE_CSIPRT_VHTSU_AID_EN BIT(24)
+#define R_BE_DRV_INFO_OPTION 0x11470
+#define R_BE_DRV_INFO_OPTION_C1 0x15470
+#define B_BE_DRV_INFO_PHYRPT_EN BIT(0)
+
#define R_BE_RX_ERR_ISR 0x114F4
#define R_BE_RX_ERR_ISR_C1 0x154F4
#define B_BE_RX_ERR_TRIG_ACT_TO BIT(9)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index cad5189708e7..80b2f74589eb 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -17,7 +17,7 @@ static const struct rtw89_regd rtw89_ww_regd =
static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC),
- COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE),
COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
@@ -35,7 +35,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
@@ -72,7 +72,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
@@ -82,13 +82,13 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR),
COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -101,7 +101,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE),
COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN),
@@ -110,12 +110,12 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC),
COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI),
- COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_THAILAND),
+ COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND),
COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
@@ -158,9 +158,9 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
@@ -176,12 +176,12 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -194,19 +194,19 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -216,15 +216,15 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA),
@@ -237,9 +237,9 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -247,13 +247,16 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA),
COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA),
+ COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
};
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 68c67a763f4d..c56f70267882 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -2298,7 +2298,8 @@ static void rtw8851b_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(rx_power[RF_PATH_A]);
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(rx_power[RF_PATH_A]);
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
@@ -2391,6 +2392,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx,
.query_ppdu = rtw8851b_query_ppdu,
.convert_rpl_to_rssi = NULL,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8851b_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset,
@@ -2464,6 +2466,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.nctl_post_table = &rtw8851b_nctl_post_defs_tbl,
.dflt_parms = &rtw89_8851b_dflt_parms,
.rfe_parms_conf = rtw89_8851b_rfe_parms_conf,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -2479,6 +2482,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
+ .support_ant_gain = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index 364e36354225..f72b3ac6f149 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -2199,7 +2199,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (dgain > 0x5fc || dgain < 0x556) {
_dpk_one_shot(rtwdev, phy, path, D_SYNC);
- dgain = _dpk_dgain_read(rtwdev);
+ _dpk_dgain_read(rtwdev);
}
if (agc_cnt == 0) {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index 651cbce1dd7e..5810af825242 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -27,6 +27,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -66,6 +67,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
static const struct rtw89_driver_info rtw89_8851be_info = {
.chip = &rtw8851b_chip_info,
+ .variant = NULL,
.quirks = NULL,
.bus = {
.pci = &rtw8851b_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index e647759ebd69..9bd2842c27d5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -2068,7 +2068,9 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2116,6 +2118,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx,
.query_ppdu = rtw8852a_query_ppdu,
.convert_rpl_to_rssi = NULL,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
@@ -2181,6 +2184,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = &rtw89_8852a_dflt_parms,
.rfe_parms_conf = NULL,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
@@ -2196,6 +2200,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = false,
+ .support_ant_gain = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 701187d69e14..2037713e3952 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -27,6 +27,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -64,6 +65,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
static const struct rtw89_driver_info rtw89_8852ae_info = {
.chip = &rtw8852a_chip_info,
+ .variant = NULL,
.quirks = NULL,
.bus = {
.pci = &rtw8852a_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index 49a319128316..dfb2bf61b0b8 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -745,6 +745,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
.convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
@@ -819,6 +820,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = &rtw89_8852b_dflt_parms,
.rfe_parms_conf = NULL,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -834,6 +836,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
+ .support_ant_gain = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index f4aa4437fb75..0e094ce9c9b0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -1206,24 +1206,25 @@ void __rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_cha
}
static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, s16 ref)
+ enum rtw89_phy_idx phy_idx,
+ s16 ref, u16 pwr_ofst_decrease)
{
const u16 tssi_16dbm_cw = 0x12c;
const u8 base_cw_0db = 0x27;
- const s8 ofst_int = 0;
s16 pwr_s10_3;
s16 rf_pwr_cw;
u16 bb_pwr_cw;
u32 pwr_cw;
u32 tssi_ofst_cw;
- pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ pwr_s10_3 = (ref << 1) + (s16)(base_cw_0db << 3) - pwr_ofst_decrease;
bb_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(2, 0));
rf_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(8, 3));
rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
- tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)) -
+ pwr_ofst_decrease;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
@@ -1234,10 +1235,11 @@ static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx, s16 pwr_ofst)
{
static const u32 addr[RF_PATH_NUM_8852BX] = {0x5800, 0x7800};
const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF;
+ u16 ofst_dec[RF_PATH_NUM_8852BX];
const u8 ofst_ofdm = 0x4;
const u8 ofst_cck = 0x8;
const s16 ref_ofdm = 0;
@@ -1250,19 +1252,20 @@ static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
B_AX_PWR_REF, 0x0);
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
- val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+ ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? 0 : abs(pwr_ofst);
+ ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ofst : 0;
- for (i = 0; i < RF_PATH_NUM_8852BX; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
- phy_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, phy_idx);
+ }
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
- val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
-
- for (i = 0; i < RF_PATH_NUM_8852BX; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
- phy_idx);
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, phy_idx);
+ }
}
static void rtw8852bx_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
@@ -1333,6 +1336,16 @@ static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev,
tx_shape_ofdm);
}
+static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 pwr_ofst;
+
+ pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
+}
+
static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -1342,12 +1355,13 @@ static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
rtw8852bx_set_tx_shape(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw8852bx_set_txpwr_diff(rtwdev, chan, phy_idx);
}
static void __rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
- rtw8852bx_set_txpwr_ref(rtwdev, phy_idx);
+ rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, 0);
}
static
@@ -1936,7 +1950,9 @@ static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index a13ea1cce4a7..abdeafc14b0b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -27,6 +27,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -66,6 +67,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
static const struct rtw89_driver_info rtw89_8852be_info = {
.chip = &rtw8852b_chip_info,
+ .variant = NULL,
.quirks = NULL,
.bus = {
.pci = &rtw8852b_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index 876725133228..bde3e1fb7ca6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -679,6 +679,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
.convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
@@ -752,6 +753,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL,
.rfe_parms_conf = NULL,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -767,6 +769,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
+ .support_ant_gain = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
index e4f40c2e287d..b69fa17beb33 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
@@ -9,6 +9,12 @@
#include "reg.h"
#include "rtw8852bt.h"
+static const struct rtw89_pci_ssid_quirk rtw8852bt_pci_ssid_quirks[] = {
+ {RTW89_PCI_SSID(PCI_VENDOR_ID_REALTEK, 0xB520, 0x103C, 0x88E9, HP),
+ .bitmap = BIT(RTW89_QUIRK_THERMAL_PROT_110C)},
+ {},
+};
+
static const struct rtw89_pci_info rtw8852bt_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
@@ -27,6 +33,7 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -61,11 +68,12 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.disable_intr = rtw89_pci_disable_intr,
.recognize_intrs = rtw89_pci_recognize_intrs,
- .ssid_quirks = NULL,
+ .ssid_quirks = rtw8852bt_pci_ssid_quirks,
};
static const struct rtw89_driver_info rtw89_8852bte_info = {
.chip = &rtw8852bt_chip_info,
+ .variant = NULL,
.quirks = NULL,
.bus = {
.pci = &rtw8852bt_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index cde34f8e1e67..bc84b15e7826 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1882,9 +1882,9 @@ static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev)
}
static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, s16 ref)
+ enum rtw89_phy_idx phy_idx,
+ s16 ref, u16 pwr_ofst_decrease)
{
- s8 ofst_int = 0;
u8 base_cw_0db = 0x27;
u16 tssi_16dbm_cw = 0x12c;
s16 pwr_s10_3 = 0;
@@ -1893,13 +1893,14 @@ static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
u32 pwr_cw = 0;
u32 tssi_ofst_cw = 0;
- pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ pwr_s10_3 = (ref << 1) + (s16)(base_cw_0db << 3) - pwr_ofst_decrease;
bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3);
rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3);
rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
- tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)) -
+ pwr_ofst_decrease;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
@@ -1943,9 +1944,10 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx, s16 pwr_ofst)
{
static const u32 addr[RF_PATH_NUM_8852C] = {0x5800, 0x7800};
+ u16 ofst_dec[RF_PATH_NUM_8852C];
const u32 mask = 0x7FFFFFF;
const u8 ofst_ofdm = 0x4;
const u8 ofst_cck = 0x8;
@@ -1959,19 +1961,20 @@ static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
GENMASK(27, 10), 0x0);
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
- val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+ ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? 0 : abs(pwr_ofst);
+ ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ofst : 0;
- for (i = 0; i < RF_PATH_NUM_8852C; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
- phy_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, phy_idx);
+ }
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
- val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
-
- for (i = 0; i < RF_PATH_NUM_8852C; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
- phy_idx);
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, phy_idx);
+ }
}
static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
@@ -2052,6 +2055,16 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
B_P1_DAC_COMP_POST_DPD_EN);
}
+static void rtw8852c_set_txpwr_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 pwr_ofst;
+
+ pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ rtw8852c_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
+}
+
static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -2061,12 +2074,13 @@ static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
rtw8852c_set_tx_shape(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_diff(rtwdev, chan, phy_idx);
}
static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_txpwr_ref(rtwdev, phy_idx);
+ rtw8852c_set_txpwr_ref(rtwdev, phy_idx, 0);
}
static void
@@ -2793,7 +2807,10 @@ static void rtw8852c_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
+
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2893,6 +2910,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx,
.query_ppdu = rtw8852c_query_ppdu,
.convert_rpl_to_rssi = NULL,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
@@ -2959,6 +2977,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dflt_parms = &rtw89_8852c_dflt_parms,
.rfe_parms_conf = NULL,
.chanctx_listener = &rtw8852c_chanctx_listener,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -2976,6 +2995,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
+ .support_ant_gain = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.hw_sec_hdr = true,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index bd17c0a1c684..b92e2ce4f4ad 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -1769,10 +1769,10 @@ u8 _rx_dck_channel_calc(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan)
target_ch = chan->channel - 33;
}
} else if (chan->band_type == RTW89_BAND_6G) {
- if (chan->channel >= 1 && chan->channel <= 125)
- target_ch = chan->channel + 32;
- else
+ if (chan->channel > 125)
target_ch = chan->channel - 32;
+ else
+ target_ch = chan->channel + 32;
} else {
target_ch = chan->channel;
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 1a46878be96b..5d864fd5974e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -36,6 +36,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
+ .no_rxbd_fs = false,
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
@@ -95,6 +96,7 @@ static const struct dmi_system_id rtw8852c_pci_quirks[] = {
static const struct rtw89_driver_info rtw89_8852ce_info = {
.chip = &rtw8852c_chip_info,
+ .variant = NULL,
.quirks = rtw8852c_pci_quirks,
.bus = {
.pci = &rtw8852c_pci_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 9a4db04a1967..11d66bfceb15 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -14,7 +14,7 @@
#include "rtw8922a_rfk.h"
#include "util.h"
-#define RTW8922A_FW_FORMAT_MAX 2
+#define RTW8922A_FW_FORMAT_MAX 3
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
RTW8922A_FW_BASENAME "-" __stringify(RTW8922A_FW_FORMAT_MAX) ".bin"
@@ -2565,8 +2565,10 @@ static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal =
- RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
+
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2607,6 +2609,16 @@ static void rtw8922a_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
}
+static void rtw8922a_phy_rpt_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ if (desc_info->rssi <= 0x1 || (desc_info->rssi >> 2) > MAX_RSSI)
+ return;
+
+ rx_status->signal = (desc_info->rssi >> 2) - MAX_RSSI;
+}
+
static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
@@ -2665,6 +2677,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
.query_ppdu = rtw8922a_query_ppdu,
.convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi,
+ .phy_rpt_to_rssi = rtw8922a_phy_rpt_to_rssi,
.ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = NULL,
@@ -2729,6 +2742,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL, /* load parm from fw */
.rfe_parms_conf = NULL, /* load parm from fw */
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -2746,6 +2760,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
+ .support_ant_gain = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = true,
@@ -2823,6 +2838,12 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
};
EXPORT_SYMBOL(rtw8922a_chip_info);
+const struct rtw89_chip_variant rtw8922ae_vs_variant = {
+ .no_mcs_12_13 = true,
+ .fw_min_ver_code = RTW89_FW_VER_CODE(0, 35, 54, 0),
+};
+EXPORT_SYMBOL(rtw8922ae_vs_variant);
+
MODULE_FIRMWARE(RTW8922A_MODULE_FIRMWARE);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11be wireless 8922A driver");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.h b/drivers/net/wireless/realtek/rtw89/rtw8922a.h
index 597317ab6af7..a29cfa5b4291 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.h
@@ -69,5 +69,6 @@ struct rtw8922a_efuse {
} __packed;
extern const struct rtw89_chip_info rtw8922a_chip_info;
+extern const struct rtw89_chip_variant rtw8922ae_vs_variant;
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index edfb1f220af0..0ea8d5281c10 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -33,6 +33,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_DEF,
.rx_ring_eq_is_full = true,
.check_rx_tag = true,
+ .no_rxbd_fs = true,
.init_cfg_reg = R_BE_HAXI_INIT_CFG1,
.txhci_en_bit = B_BE_TXDMA_EN,
@@ -70,6 +71,16 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
static const struct rtw89_driver_info rtw89_8922ae_info = {
.chip = &rtw8922a_chip_info,
+ .variant = NULL,
+ .quirks = NULL,
+ .bus = {
+ .pci = &rtw8922a_pci_info,
+ },
+};
+
+static const struct rtw89_driver_info rtw89_8922ae_vs_info = {
+ .chip = &rtw8922a_chip_info,
+ .variant = &rtw8922ae_vs_variant,
.quirks = NULL,
.bus = {
.pci = &rtw8922a_pci_info,
@@ -81,6 +92,10 @@ static const struct pci_device_id rtw89_8922ae_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8922),
.driver_data = (kernel_ulong_t)&rtw89_8922ae_info,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x892B),
+ .driver_data = (kernel_ulong_t)&rtw89_8922ae_vs_info,
+ },
{},
};
MODULE_DEVICE_TABLE(pci, rtw89_8922ae_id_table);
@@ -95,5 +110,5 @@ static struct pci_driver rtw89_8922ae_driver = {
module_pci_driver(rtw89_8922ae_driver);
MODULE_AUTHOR("Realtek Corporation");
-MODULE_DESCRIPTION("Realtek 802.11be wireless 8922AE driver");
+MODULE_DESCRIPTION("Realtek 802.11be wireless 8922AE/8922AE-VS driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index bcc287771b2a..871f45a6508c 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -42,7 +42,7 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
/* freq 6875 (ch 185, 20MHz) spans RTW89_SAR_6GHZ_SUBBAND_7_H
* and RTW89_SAR_6GHZ_SUBBAND_8, so directly describe it with
- * struct rtw89_sar_span in the following.
+ * struct rtw89_6ghz_span.
*/
case 6895 ... 7115:
@@ -50,63 +50,18 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
}
}
-struct rtw89_sar_span {
- enum rtw89_sar_subband subband_low;
- enum rtw89_sar_subband subband_high;
-};
-
-#define RTW89_SAR_SPAN_VALID(span) ((span)->subband_high)
-
-#define RTW89_SAR_6GHZ_SPAN_HEAD 6145
-#define RTW89_SAR_6GHZ_SPAN_IDX(center_freq) \
- ((((int)(center_freq) - RTW89_SAR_6GHZ_SPAN_HEAD) / 5) / 2)
-
-#define RTW89_DECL_SAR_6GHZ_SPAN(center_freq, subband_l, subband_h) \
- [RTW89_SAR_6GHZ_SPAN_IDX(center_freq)] = { \
- .subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
- .subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
- }
-
-/* Since 6GHz SAR subbands are not edge aligned, some cases span two SAR
- * subbands. In the following, we describe each of them with rtw89_sar_span.
- */
-static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = {
- RTW89_DECL_SAR_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
- RTW89_DECL_SAR_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
- RTW89_DECL_SAR_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
- RTW89_DECL_SAR_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
- RTW89_DECL_SAR_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
- RTW89_DECL_SAR_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
- RTW89_DECL_SAR_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
-};
-
static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev,
u32 center_freq, s32 *cfg)
{
struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
- const struct rtw89_sar_span *span = NULL;
enum rtw89_sar_subband subband_l, subband_h;
- int idx;
-
- if (center_freq >= RTW89_SAR_6GHZ_SPAN_HEAD) {
- idx = RTW89_SAR_6GHZ_SPAN_IDX(center_freq);
- /* To decrease size of rtw89_sar_overlapping_6ghz[],
- * RTW89_SAR_6GHZ_SPAN_IDX() truncates the leading NULLs
- * to make first span as index 0 of the table. So, if center
- * frequency is less than the first one, it will get netative.
- */
- if (idx >= 0 && idx < ARRAY_SIZE(rtw89_sar_overlapping_6ghz))
- span = &rtw89_sar_overlapping_6ghz[idx];
- }
+ const struct rtw89_6ghz_span *span;
+
+ span = rtw89_get_6ghz_span(rtwdev, center_freq);
if (span && RTW89_SAR_SPAN_VALID(span)) {
- subband_l = span->subband_low;
- subband_h = span->subband_high;
+ subband_l = span->sar_subband_low;
+ subband_h = span->sar_subband_high;
} else {
subband_l = rtw89_sar_get_subband(rtwdev, center_freq);
subband_h = subband_l;
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 7b203bb7f151..26a944d3b672 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -365,6 +365,7 @@ static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
ser_reset_vif(rtwdev, rtwvif);
rtwdev->total_sta_assoc = 0;
+ refcount_set(&rtwdev->refcount_ap_info, 0);
}
/* hal function */
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index b2e47829983f..70fe7cebc9d5 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -560,6 +560,9 @@ struct rtw89_phy_sts_iehdr {
#define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16)
#define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21)
+/* BE RXD - PHY RPT dword0 */
+#define BE_RXD_PHY_RSSI GENMASK(11, 0)
+
struct rtw89_phy_sts_ie00 {
__le32 w0;
__le32 w1;
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 3e81fd974ec1..01754d031bb4 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -620,7 +620,10 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
* need to unlock mutex
*/
mutex_unlock(&rtwdev->mutex);
- key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
+ if (ieee80211_vif_is_mld(wow_vif))
+ key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, rtwvif_link->link_id);
+ else
+ key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
mutex_lock(&rtwdev->mutex);
kfree(rekey_conf);
@@ -691,9 +694,7 @@ static void rtw89_wow_leave_deep_ps(struct rtw89_dev *rtwdev)
static void rtw89_wow_enter_deep_ps(struct rtw89_dev *rtwdev)
{
- struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
-
- __rtw89_enter_ps_mode(rtwdev, rtwvif_link);
+ __rtw89_enter_ps_mode(rtwdev);
}
static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev)
@@ -701,7 +702,7 @@ static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev)
struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
if (rtw89_wow_mgd_linked(rtwdev))
- rtw89_enter_lps(rtwdev, rtwvif_link, false);
+ rtw89_enter_lps(rtwdev, rtwvif_link->rtwvif, false);
else if (rtw89_wow_no_link(rtwdev))
rtw89_fw_h2c_fwips(rtwdev, rtwvif_link, true);
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 986b07bfa0ee..8fb58a5d911c 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2612,24 +2612,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
ret = -EBUSY;
- goto out;
+ goto out_unlock;
}
ret = wl12xx_init_vif_data(wl, vif);
if (ret < 0)
- goto out;
+ goto out_unlock;
wlvif->wl = wl;
role_type = wl12xx_get_role_type(wl, wlvif);
if (role_type == WL12XX_INVALID_ROLE_TYPE) {
ret = -EINVAL;
- goto out;
+ goto out_unlock;
}
ret = wlcore_allocate_hw_queue_base(wl, wlvif);
if (ret < 0)
- goto out;
+ goto out_unlock;
/*
* TODO: after the nvs issue will be solved, move this block
@@ -2644,7 +2644,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
ret = wl12xx_init_fw(wl);
if (ret < 0)
- goto out;
+ goto out_unlock;
}
/*
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index c07acfcbbd9c..7c57d4c8744a 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -88,7 +88,7 @@ static ssize_t hw_pg_ver_show(struct device *dev,
static DEVICE_ATTR_RO(hw_pg_ver);
static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -121,7 +121,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
static const struct bin_attribute fwlog_attr = {
.attr = { .name = "fwlog", .mode = 0400 },
- .read = wl1271_sysfs_read_fwlog,
+ .read_new = wl1271_sysfs_read_fwlog,
};
int wlcore_sysfs_init(struct wl1271 *wl)
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 3f338b8096c7..fc8ea58bc165 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -45,7 +45,7 @@ enum wl1271_tm_attrs {
};
#define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1)
-static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
+static const struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
[WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 },
[WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 },
[WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY,
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 347a15544afe..fb187a9e984e 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -5048,6 +5048,45 @@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xfffa),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ,
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /* As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field and 320MHz in
+ * 6GHz is supported include all the following
+ * MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#endif
};
@@ -5509,10 +5548,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
for (i = 0; i < ARRAY_SIZE(data->link_data); i++) {
- hrtimer_init(&data->link_data[i].beacon_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_SOFT);
- data->link_data[i].beacon_timer.function =
- mac80211_hwsim_beacon;
+ hrtimer_setup(&data->link_data[i].beacon_timer, mac80211_hwsim_beacon,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS_SOFT);
data->link_data[i].link_id = i;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 829515a601b3..530a3ea47a1a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -1381,24 +1381,20 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
/* The phase is set to power off. */
ipc_imem->phase = IPC_P_OFF;
- hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
+ hrtimer_setup(&ipc_imem->startup_timer, ipc_imem_startup_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
+ hrtimer_setup(&ipc_imem->tdupdate_timer, ipc_imem_td_update_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
+ hrtimer_setup(&ipc_imem->fast_update_timer, ipc_imem_fast_update_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+ hrtimer_setup(&ipc_imem->td_alloc_timer, ipc_imem_td_alloc_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
+ hrtimer_setup(&ipc_imem->adb_timer, ipc_imem_adb_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
if (ipc_imem_config(ipc_imem)) {
dev_err(ipc_imem->dev, "failed to initialize the imem");
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 04517bd3325a..a066977af0be 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/module.h>
+#include <linux/suspend.h>
#include <net/rtnetlink.h>
#include "iosm_ipc_imem.h"
@@ -18,6 +19,7 @@ MODULE_LICENSE("GPL v2");
/* WWAN GUID */
static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
+static bool pci_registered;
static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
{
@@ -448,7 +450,6 @@ static struct pci_driver iosm_ipc_driver = {
},
.id_table = iosm_ipc_ids,
};
-module_pci_driver(iosm_ipc_driver);
int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
size_t size, dma_addr_t *mapping, int direction)
@@ -530,3 +531,56 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
IPC_CB(skb)->mapping = 0;
dev_kfree_skb(skb);
}
+
+static int pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+ if (mode == PM_HIBERNATION_PREPARE || mode == PM_RESTORE_PREPARE) {
+ if (pci_registered) {
+ pci_unregister_driver(&iosm_ipc_driver);
+ pci_registered = false;
+ }
+ } else if (mode == PM_POST_HIBERNATION || mode == PM_POST_RESTORE) {
+ if (!pci_registered) {
+ int ret;
+
+ ret = pci_register_driver(&iosm_ipc_driver);
+ if (ret) {
+ pr_err(KBUILD_MODNAME ": unable to re-register PCI driver: %d\n",
+ ret);
+ } else {
+ pci_registered = true;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct notifier_block pm_notifier = {
+ .notifier_call = pm_notify,
+};
+
+static int __init iosm_ipc_driver_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&iosm_ipc_driver);
+ if (ret)
+ return ret;
+
+ pci_registered = true;
+
+ register_pm_notifier(&pm_notifier);
+
+ return 0;
+}
+module_init(iosm_ipc_driver_init);
+
+static void __exit iosm_ipc_driver_exit(void)
+{
+ unregister_pm_notifier(&pm_notifier);
+
+ if (pci_registered)
+ pci_unregister_driver(&iosm_ipc_driver);
+}
+module_exit(iosm_ipc_driver_exit);
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index d5a9360323d2..8755c5e6a65b 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -220,7 +220,7 @@ static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *s
if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
(mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
!(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
- net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
+ net_dbg_ratelimited("sequence number glitch prev=%d curr=%d\n",
mbim->rx_seq, le16_to_cpu(nth16->wSequence));
}
mbim->rx_seq = le16_to_cpu(nth16->wSequence);
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 8381b0dc7acb..02f2ec7cf4ce 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -43,6 +43,8 @@
#include "t7xx_state_monitor.h"
#include "t7xx_port_proxy.h"
+#define DRIVER_NAME "mtk_t7xx"
+
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
@@ -833,6 +835,7 @@ static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct t7xx_pci_dev *t7xx_dev;
+ void __iomem *iomem;
int ret;
t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
@@ -848,12 +851,21 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
- pci_name(pdev));
+ iomem = pcim_iomap_region(pdev, T7XX_PCI_IREG_BASE, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(iomem);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IREG BAR: %d\n", ret);
+ return -ENOMEM;
+ }
+ IREG_BASE(t7xx_dev) = iomem;
+
+ iomem = pcim_iomap_region(pdev, T7XX_PCI_EREG_BASE, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(iomem);
if (ret) {
- dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
+ dev_err(&pdev->dev, "Could not request EREG BAR: %d\n", ret);
return -ENOMEM;
}
+ t7xx_dev->base_addr.pcie_ext_reg_base = iomem;
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
@@ -867,9 +879,6 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
- t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
-
ret = t7xx_pci_pm_init(t7xx_dev);
if (ret)
return ret;
@@ -937,7 +946,7 @@ static const struct pci_device_id t7xx_pci_table[] = {
MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
static struct pci_driver t7xx_pci_driver = {
- .name = "mtk_t7xx",
+ .name = DRIVER_NAME,
.id_table = t7xx_pci_table,
.probe = t7xx_pci_probe,
.remove = t7xx_pci_remove,
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 956ae92f7573..2037cd6d4f4f 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -5,11 +5,16 @@
* Copyright (C) 2015, Marvell International Ltd.
*/
-#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/of_gpio.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+
#include <net/nfc/nci.h>
#include <net/nfc/nci_core.h>
+
#include "nfcmrvl.h"
static unsigned int hci_muxed;
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index 1ec651e31064..3425b68f0ddc 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -116,18 +116,16 @@ static void st21nfca_tx_work(struct work_struct *work)
struct nfc_dev *dev;
struct sk_buff *skb;
- if (info) {
- dev = info->hdev->ndev;
- skb = info->dep_info.tx_pending;
+ dev = info->hdev->ndev;
+ skb = info->dep_info.tx_pending;
- device_lock(&dev->dev);
+ device_lock(&dev->dev);
- nfc_hci_send_cmd_async(info->hdev, ST21NFCA_RF_READER_F_GATE,
- ST21NFCA_WR_XCHG_DATA, skb->data, skb->len,
- info->async_cb, info);
- device_unlock(&dev->dev);
- kfree_skb(skb);
- }
+ nfc_hci_send_cmd_async(info->hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_WR_XCHG_DATA, skb->data, skb->len,
+ info->async_cb, info);
+ device_unlock(&dev->dev);
+ kfree_skb(skb);
}
static void st21nfca_im_send_pdu(struct st21nfca_hci_info *info,
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 02c3d11a19c4..6d7861383806 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -11,7 +11,6 @@
#include <linux/i2c.h>
#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 6295e55ef85e..368f6d894bba 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -106,10 +106,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb)
if (!ntb->msi)
return -EINVAL;
- msi_lock_descs(&ntb->pdev->dev);
- desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
- addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
- msi_unlock_descs(&ntb->pdev->dev);
+ scoped_guard (msi_descs_lock, &ntb->pdev->dev) {
+ desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
+ addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
+ }
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
@@ -289,7 +289,7 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
if (!ntb->msi)
return -EINVAL;
- msi_lock_descs(dev);
+ guard(msi_descs_lock)(dev);
msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
if (irq_has_action(entry->irq))
continue;
@@ -307,17 +307,11 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
if (ret) {
devm_free_irq(&ntb->dev, entry->irq, dev_id);
- goto unlock;
+ return ret;
}
-
- ret = entry->irq;
- goto unlock;
+ return entry->irq;
}
- ret = -ENODEV;
-
-unlock:
- msi_unlock_descs(dev);
- return ret;
+ return -ENODEV;
}
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 8aeca7914050..1c1c74f4ff2d 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -284,8 +284,7 @@ static struct pp_ctx *pp_create_data(struct ntb_dev *ntb)
pp->ntb = ntb;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->lock);
- hrtimer_init(&pp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pp->timer.function = pp_timer_func;
+ hrtimer_setup(&pp->timer, pp_timer_func, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return pp;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2237715e42eb..0ccf4a9e523a 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -1212,7 +1212,7 @@ enum nd_ioctl_mode {
DIMM_IOCTL,
};
-static int match_dimm(struct device *dev, void *data)
+static int match_dimm(struct device *dev, const void *data)
{
long id = (long) data;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 030dbde6b088..9e84ab411564 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -67,13 +67,6 @@ bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
return claimed;
}
-static int namespace_match(struct device *dev, void *data)
-{
- char *name = data;
-
- return strcmp(name, dev_name(dev)) == 0;
-}
-
static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -168,7 +161,7 @@ ssize_t nd_namespace_store(struct device *dev,
goto out;
}
- found = device_find_child(dev->parent, name, namespace_match);
+ found = device_find_child_by_name(dev->parent, name);
if (!found) {
dev_dbg(dev, "'%s' not found under %s\n", name,
dev_name(dev->parent));
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 4319ab50c10d..8971aca41e63 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
}
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
- !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ !blk_mq_add_to_batch(req, iob,
+ nvme_req(req)->status != NVME_SC_SUCCESS,
apple_nvme_complete_batch))
apple_nvme_complete_rq(req);
}
@@ -1011,25 +1012,37 @@ static void apple_nvme_reset_work(struct work_struct *work)
ret = apple_rtkit_shutdown(anv->rtk);
if (ret)
goto out;
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
}
- writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ /*
+ * Only do the soft-reset if the CPU is not running, which means either we
+ * or the previous stage shut it down cleanly.
+ */
+ if (!(readl(anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL) &
+ APPLE_ANS_COPROC_CPU_CONTROL_RUN)) {
- ret = reset_control_assert(anv->reset);
- if (ret)
- goto out;
+ ret = reset_control_assert(anv->reset);
+ if (ret)
+ goto out;
- ret = apple_rtkit_reinit(anv->rtk);
- if (ret)
- goto out;
+ ret = apple_rtkit_reinit(anv->rtk);
+ if (ret)
+ goto out;
- ret = reset_control_deassert(anv->reset);
- if (ret)
- goto out;
+ ret = reset_control_deassert(anv->reset);
+ if (ret)
+ goto out;
+
+ writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
+ anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+ ret = apple_rtkit_boot(anv->rtk);
+ } else {
+ ret = apple_rtkit_wake(anv->rtk);
+ }
- writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
- anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
- ret = apple_rtkit_boot(anv->rtk);
if (ret) {
dev_err(anv->dev, "ANS did not boot");
goto out;
@@ -1251,7 +1264,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
anv->admin_tagset.numa_node = NUMA_NO_NODE;
anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
- anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
anv->admin_tagset.driver_data = &anv->adminq;
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
@@ -1275,7 +1287,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
anv->tagset.timeout = NVME_IO_TIMEOUT;
anv->tagset.numa_node = NUMA_NO_NODE;
anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
- anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
anv->tagset.driver_data = &anv->ioq;
ret = blk_mq_alloc_tag_set(&anv->tagset);
@@ -1518,6 +1529,7 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
return anv;
put_dev:
+ apple_nvme_detach_genpd(anv);
put_device(anv->dev);
return ERR_PTR(ret);
}
@@ -1551,6 +1563,7 @@ out_uninit_ctrl:
nvme_uninit_ctrl(&anv->ctrl);
out_put_ctrl:
nvme_put_ctrl(&anv->ctrl);
+ apple_nvme_detach_genpd(anv);
return ret;
}
@@ -1565,9 +1578,12 @@ static void apple_nvme_remove(struct platform_device *pdev)
apple_nvme_disable(anv, true);
nvme_uninit_ctrl(&anv->ctrl);
- if (apple_rtkit_is_running(anv->rtk))
+ if (apple_rtkit_is_running(anv->rtk)) {
apple_rtkit_shutdown(anv->rtk);
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ }
+
apple_nvme_detach_genpd(anv);
}
@@ -1576,8 +1592,11 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
struct apple_nvme *anv = platform_get_drvdata(pdev);
apple_nvme_disable(anv, true);
- if (apple_rtkit_is_running(anv->rtk))
+ if (apple_rtkit_is_running(anv->rtk)) {
apple_rtkit_shutdown(anv->rtk);
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ }
}
static int apple_nvme_resume(struct device *dev)
@@ -1594,10 +1613,11 @@ static int apple_nvme_suspend(struct device *dev)
apple_nvme_disable(anv, true);
- if (apple_rtkit_is_running(anv->rtk))
+ if (apple_rtkit_is_running(anv->rtk)) {
ret = apple_rtkit_shutdown(anv->rtk);
- writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ }
return ret;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a970168a3014..8359d0aa0e44 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -431,6 +431,12 @@ static inline void nvme_end_req_zoned(struct request *req)
static inline void __nvme_end_req(struct request *req)
{
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+ if (blk_rq_is_passthrough(req))
+ nvme_log_err_passthru(req);
+ else
+ nvme_log_error(req);
+ }
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
@@ -441,12 +447,6 @@ void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
- if (blk_rq_is_passthrough(req))
- nvme_log_err_passthru(req);
- else
- nvme_log_error(req);
- }
__nvme_end_req(req);
blk_mq_end_request(req, status);
}
@@ -564,8 +564,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
fallthrough;
@@ -885,6 +883,12 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_OK;
}
+static void nvme_set_app_tag(struct request *req, struct nvme_command *cmnd)
+{
+ cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag);
+ cmnd->rw.lbatm = cpu_to_le16(0xffff);
+}
+
static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
struct request *req)
{
@@ -1017,18 +1021,17 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
control |= NVME_RW_PRINFO_PRACT;
}
- switch (ns->head->pi_type) {
- case NVME_NS_DPS_PI_TYPE3:
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
control |= NVME_RW_PRINFO_PRCHK_GUARD;
- break;
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- control |= NVME_RW_PRINFO_PRCHK_GUARD |
- NVME_RW_PRINFO_PRCHK_REF;
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) {
+ control |= NVME_RW_PRINFO_PRCHK_REF;
if (op == nvme_cmd_zone_append)
control |= NVME_RW_APPEND_PIREMAP;
nvme_set_ref_tag(ns, cmnd, req);
- break;
+ }
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) {
+ control |= NVME_RW_PRINFO_PRCHK_APP;
+ nvme_set_app_tag(req, cmnd);
}
}
@@ -1695,7 +1698,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
&result);
- if (status < 0)
+
+ /*
+ * It's either a kernel error or the host observed a connection
+ * lost. In either case it's not possible communicate with the
+ * controller and thus enter the error code path.
+ */
+ if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
return status;
/*
@@ -2002,6 +2011,7 @@ static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns,
lim->atomic_write_hw_boundary = boundary;
lim->atomic_write_hw_unit_min = bs;
lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
+ lim->features |= BLK_FEAT_ATOMIC_WRITES;
}
static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
@@ -2126,14 +2136,16 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
struct queue_limits lim;
+ unsigned int memflags;
int ret;
- blk_mq_freeze_queue(ns->disk->queue);
lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim);
+
+ memflags = blk_mq_freeze_queue(ns->disk->queue);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
- blk_mq_unfreeze_queue(ns->disk->queue);
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
/* Hide the block-interface for these devices */
if (!ret)
@@ -2148,6 +2160,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_id_ns_nvm *nvm = NULL;
struct nvme_zone_info zi = {};
struct nvme_id_ns *id;
+ unsigned int memflags;
sector_t capacity;
unsigned lbaf;
int ret;
@@ -2177,12 +2190,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
goto out;
}
- blk_mq_freeze_queue(ns->disk->queue);
+ lim = queue_limits_start_update(ns->disk->queue);
+
+ memflags = blk_mq_freeze_queue(ns->disk->queue);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
-
- lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim);
nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
nvme_set_chunk_sectors(ns, id, &lim);
@@ -2212,7 +2225,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
- blk_mq_unfreeze_queue(ns->disk->queue);
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
goto out;
}
@@ -2228,7 +2241,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
- blk_mq_unfreeze_queue(ns->disk->queue);
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
if (blk_queue_is_zoned(ns->queue)) {
ret = blk_revalidate_disk_zones(ns->disk);
@@ -2284,8 +2297,10 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
if (!ret && nvme_ns_head_multipath(ns->head)) {
struct queue_limits *ns_lim = &ns->disk->queue->limits;
struct queue_limits lim;
+ unsigned int memflags;
- blk_mq_freeze_queue(ns->head->disk->queue);
+ lim = queue_limits_start_update(ns->head->disk->queue);
+ memflags = blk_mq_freeze_queue(ns->head->disk->queue);
/*
* queue_limits mixes values that are the hardware limitations
* for bio splitting with what is the device configuration.
@@ -2301,7 +2316,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
* the splitting limits in to make sure we still obey possibly
* lower limitations of other controllers.
*/
- lim = queue_limits_start_update(ns->head->disk->queue);
lim.logical_block_size = ns_lim->logical_block_size;
lim.physical_block_size = ns_lim->physical_block_size;
lim.io_min = ns_lim->io_min;
@@ -2318,7 +2332,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
- blk_mq_unfreeze_queue(ns->head->disk->queue);
+ blk_mq_unfreeze_queue(ns->head->disk->queue, memflags);
}
return ret;
@@ -3092,7 +3106,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
struct nvme_effects_log **log)
{
- struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
+ struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
int ret;
if (cel)
@@ -3109,7 +3123,11 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
return ret;
}
- xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+ old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ kfree(cel);
+ return xa_err(old);
+ }
out:
*log = cel;
return 0;
@@ -3171,6 +3189,25 @@ free_data:
return ret;
}
+static int nvme_init_effects_log(struct nvme_ctrl *ctrl,
+ u8 csi, struct nvme_effects_log **log)
+{
+ struct nvme_effects_log *effects, *old;
+
+ effects = kzalloc(sizeof(*effects), GFP_KERNEL);
+ if (!effects)
+ return -ENOMEM;
+
+ old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ kfree(effects);
+ return xa_err(old);
+ }
+
+ *log = effects;
+ return 0;
+}
+
static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
{
struct nvme_effects_log *log = ctrl->effects;
@@ -3217,10 +3254,9 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
}
if (!ctrl->effects) {
- ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
- if (!ctrl->effects)
- return -ENOMEM;
- xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
+ ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
+ if (ret < 0)
+ return ret;
}
nvme_init_known_nvm_effects(ctrl);
@@ -4564,7 +4600,6 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
/* Reserved for fabric connect and keep alive */
set->reserved_tags = 2;
set->numa_node = ctrl->numa_node;
- set->flags = BLK_MQ_F_NO_SCHED;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size;
@@ -4639,7 +4674,6 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
/* Reserved for fabric connect */
set->reserved_tags = 1;
set->numa_node = ctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b81af7919e94..b9929a5a7f4e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -16,7 +16,6 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
-#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
@@ -786,49 +785,8 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
"NVME-FC{%d}: controller connectivity lost. Awaiting "
"Reconnect", ctrl->cnum);
- switch (nvme_ctrl_state(&ctrl->ctrl)) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_LIVE:
- /*
- * Schedule a controller reset. The reset will terminate the
- * association and schedule the reconnect timer. Reconnects
- * will be attempted until either the ctlr_loss_tmo
- * (max_retries * connect_delay) expires or the remoteport's
- * dev_loss_tmo expires.
- */
- if (nvme_reset_ctrl(&ctrl->ctrl)) {
- dev_warn(ctrl->ctrl.device,
- "NVME-FC{%d}: Couldn't schedule reset.\n",
- ctrl->cnum);
- nvme_delete_ctrl(&ctrl->ctrl);
- }
- break;
-
- case NVME_CTRL_CONNECTING:
- /*
- * The association has already been terminated and the
- * controller is attempting reconnects. No need to do anything
- * futher. Reconnects will be attempted until either the
- * ctlr_loss_tmo (max_retries * connect_delay) expires or the
- * remoteport's dev_loss_tmo expires.
- */
- break;
-
- case NVME_CTRL_RESETTING:
- /*
- * Controller is already in the process of terminating the
- * association. No need to do anything further. The reconnect
- * step will kick in naturally after the association is
- * terminated.
- */
- break;
-
- case NVME_CTRL_DELETING:
- case NVME_CTRL_DELETING_NOIO:
- default:
- /* no action to take - let it delete */
- break;
- }
+ set_bit(ASSOC_FAILED, &ctrl->flags);
+ nvme_reset_ctrl(&ctrl->ctrl);
}
/**
@@ -2080,7 +2038,8 @@ done:
nvme_fc_complete_rq(rq);
check_error:
- if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ if (terminate_assoc &&
+ nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING)
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
@@ -2534,6 +2493,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
/*
* if an error (io timeout, etc) while (re)connecting, the remote
* port requested terminating of the association (disconnect_ls)
@@ -2541,9 +2502,8 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
* the controller. Abort any ios on the association and let the
* create_association error path resolve things.
*/
- if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+ if (state == NVME_CTRL_CONNECTING) {
__nvme_fc_abort_outstanding_ios(ctrl, true);
- set_bit(ASSOC_FAILED, &ctrl->flags);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
@@ -2551,7 +2511,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
}
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
- if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+ if (state != NVME_CTRL_LIVE)
return;
dev_warn(ctrl->ctrl.device,
@@ -3062,7 +3022,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
struct nvmefc_ls_rcv_op *disls = NULL;
unsigned long flags;
int ret;
- bool changed;
++ctrl->ctrl.nr_reconnects;
@@ -3173,12 +3132,13 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_term_aen_ops;
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) {
+ ret = -EIO;
+ goto out_term_aen_ops;
+ }
ctrl->ctrl.nr_reconnects = 0;
-
- if (changed)
- nvme_start_ctrl(&ctrl->ctrl);
+ nvme_start_ctrl(&ctrl->ctrl);
return 0; /* Success */
@@ -3579,8 +3539,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
- !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
goto fail_ctrl;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index e8930146847a..24e2c702da7a 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -128,8 +128,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
- if (!supports_metadata)
- return -EINVAL;
+ if (!supports_metadata) {
+ ret = -EINVAL;
+ goto out;
+ }
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
@@ -139,8 +141,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
struct iov_iter iter;
/* fixedbufs is only for non-vectored io */
- if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
- return -EINVAL;
+ if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd);
if (ret < 0)
@@ -283,8 +287,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
{
if (ns && nsid != ns->head->ns_id) {
dev_err(ctrl->device,
- "%s: nsid (%u) in cmd does not match nsid (%u)"
- "of namespace\n",
+ "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
current->comm, nsid, ns->head->ns_id);
return false;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a85d190942bd..2a7635565083 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -60,7 +60,7 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry)
if (h->disk)
- blk_mq_unfreeze_queue(h->disk->queue);
+ blk_mq_unfreeze_queue_nomemrestore(h->disk->queue);
}
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index c4bb8dfe1a45..7be92d07430e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -1187,43 +1187,4 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}
-#ifdef CONFIG_NVME_VERBOSE_ERRORS
-const char *nvme_get_error_status_str(u16 status);
-const char *nvme_get_opcode_str(u8 opcode);
-const char *nvme_get_admin_opcode_str(u8 opcode);
-const char *nvme_get_fabrics_opcode_str(u8 opcode);
-#else /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const char *nvme_get_error_status_str(u16 status)
-{
- return "I/O Error";
-}
-static inline const char *nvme_get_opcode_str(u8 opcode)
-{
- return "I/O Cmd";
-}
-static inline const char *nvme_get_admin_opcode_str(u8 opcode)
-{
- return "Admin Cmd";
-}
-
-static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
-{
- return "Fabrics Cmd";
-}
-#endif /* CONFIG_NVME_VERBOSE_ERRORS */
-
-static inline const char *nvme_opcode_str(int qid, u8 opcode)
-{
- return qid ? nvme_get_opcode_str(opcode) :
- nvme_get_admin_opcode_str(opcode);
-}
-
-static inline const char *nvme_fabrics_opcode_str(
- int qid, const struct nvme_command *cmd)
-{
- if (nvme_is_fabrics(cmd))
- return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
-
- return nvme_opcode_str(qid, cmd->common.opcode);
-}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e2634f437f33..3ad7f197c808 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -8,7 +8,6 @@
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
@@ -373,7 +372,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
/*
* Ensure that the doorbell is updated before reading the event
* index from memory. The controller needs to provide similar
- * ordering to ensure the envent index is updated before reading
+ * ordering to ensure the event index is updated before reading
* the doorbell.
*/
mb();
@@ -463,7 +462,7 @@ static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL && offset)
- blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
+ blk_mq_map_hw_queues(map, dev->dev, offset);
else
blk_mq_map_queues(map);
qoff += map->nr_queues;
@@ -1131,8 +1130,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
- !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
- nvme_pci_complete_batch))
+ !blk_mq_add_to_batch(req, iob,
+ nvme_req(req)->status != NVME_SC_SUCCESS,
+ nvme_pci_complete_batch))
nvme_pci_complete_rq(req);
}
@@ -1148,13 +1148,13 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
}
}
-static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
- struct io_comp_batch *iob)
+static inline bool nvme_poll_cq(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob)
{
- int found = 0;
+ bool found = false;
while (nvme_cqe_pending(nvmeq)) {
- found++;
+ found = true;
/*
* load-load control dependency between phase and the rest of
* the cqe requires a full read memory barrier
@@ -1412,9 +1412,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd = { };
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 csts = readl(dev->bar + NVME_REG_CSTS);
u8 opcode;
+ /*
+ * Shutdown the device immediately if we see it is disconnected. This
+ * unblocks PCIe error handling if the nvme driver is waiting in
+ * error_resume for a device that has been removed. We can't unbind the
+ * driver while the driver's error callback is waiting to complete, so
+ * we're relying on a timeout to break that deadlock if a removal
+ * occurs while reset work is running.
+ */
+ if (pci_dev_is_disconnected(pdev))
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
if (nvme_state_terminal(&dev->ctrl))
goto disable;
@@ -1422,7 +1433,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* the recovery mechanism will surely fail.
*/
mb();
- if (pci_channel_offline(to_pci_dev(dev->dev)))
+ if (pci_channel_offline(pdev))
return BLK_EH_RESET_TIMER;
/*
@@ -1984,6 +1995,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
/*
+ * Controllers may support a CMB size larger than their BAR, for
+ * example, due to being behind a bridge. Reduce the CMB to the
+ * reported size of the BAR
+ */
+ size = min(size, bar_size - offset);
+
+ if (!IS_ALIGNED(size, memremap_compat_align()) ||
+ !IS_ALIGNED(pci_resource_start(pdev, bar),
+ memremap_compat_align()))
+ return;
+
+ /*
* Tell the controller about the host side address mapping the CMB,
* and enable CMB decoding for the NVMe 1.4+ scheme:
*/
@@ -1993,17 +2016,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
dev->bar + NVME_REG_CMBMSC);
}
- /*
- * Controllers may support a CMB size larger than their BAR,
- * for example, due to being behind a bridge. Reduce the CMB to
- * the reported size of the BAR
- */
- if (size > bar_size - offset)
- size = bar_size - offset;
-
if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
dev_warn(dev->ctrl.device,
"failed to register the CMB\n");
+ hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
return;
}
@@ -2086,8 +2102,8 @@ static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size)
sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma,
GFP_KERNEL);
if (!dev->host_mem_descs) {
- dma_free_noncontiguous(dev->dev, dev->host_mem_size,
- dev->hmb_sgt, DMA_BIDIRECTIONAL);
+ dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt,
+ DMA_BIDIRECTIONAL);
dev->hmb_sgt = NULL;
return -ENOMEM;
}
@@ -2154,14 +2170,6 @@ static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
return 0;
out_free_bufs:
- while (--i >= 0) {
- size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
-
- dma_free_attrs(dev->dev, size, bufs[i],
- le64_to_cpu(descs[i].addr),
- DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
- }
-
kfree(bufs);
out_free_descs:
dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
@@ -3148,7 +3156,9 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
* because of high power consumption (> 2 Watt) in s2idle
* sleep. Only some boards with Intel CPU are affected.
*/
- if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
+ dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
@@ -3713,6 +3723,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index b68a9e5f1ea3..3a41b9ab0f13 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -792,7 +792,7 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-const struct attribute_group nvme_tls_attrs_group = {
+static const struct attribute_group nvme_tls_attrs_group = {
.attrs = nvme_tls_attrs,
.is_visible = nvme_tls_attrs_are_visible,
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index b127d41dbbfe..327f3f2f5399 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -54,6 +54,8 @@ MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
#endif
+static atomic_t nvme_tcp_cpu_queues[NR_CPUS];
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
@@ -127,6 +129,7 @@ enum nvme_tcp_queue_flags {
NVME_TCP_Q_ALLOCATED = 0,
NVME_TCP_Q_LIVE = 1,
NVME_TCP_Q_POLLING = 2,
+ NVME_TCP_Q_IO_CPU_SET = 3,
};
enum nvme_tcp_recv_state {
@@ -214,6 +217,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
+static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
+{
+ switch (type) {
+ case nvme_tcp_c2h_term:
+ case nvme_tcp_c2h_data:
+ case nvme_tcp_r2t:
+ case nvme_tcp_rsp:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* Check if the queue is TLS encrypted
*/
@@ -760,6 +776,40 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
return 0;
}
+static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_term_pdu *pdu)
+{
+ u16 fes;
+ const char *msg;
+ u32 plen = le32_to_cpu(pdu->hdr.plen);
+
+ static const char * const msg_table[] = {
+ [NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field",
+ [NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
+ [NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
+ [NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
+ [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
+ [NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
+ };
+
+ if (plen < NVME_TCP_MIN_C2HTERM_PLEN ||
+ plen > NVME_TCP_MAX_C2HTERM_PLEN) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Received a malformed C2HTermReq PDU (plen = %u)\n",
+ plen);
+ return;
+ }
+
+ fes = le16_to_cpu(pdu->fes);
+ if (fes && fes < ARRAY_SIZE(msg_table))
+ msg = msg_table[fes];
+ else
+ msg = "Unknown";
+
+ dev_err(queue->ctrl->ctrl.device,
+ "Received C2HTermReq (FES = %s)\n", msg);
+}
+
static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
@@ -781,6 +831,25 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return 0;
hdr = queue->pdu;
+ if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
+ if (!nvme_tcp_recv_pdu_supported(hdr->type))
+ goto unsupported_pdu;
+
+ dev_err(queue->ctrl->ctrl.device,
+ "pdu type %d has unexpected header length (%d)\n",
+ hdr->type, hdr->hlen);
+ return -EPROTO;
+ }
+
+ if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
+ /*
+ * C2HTermReq never includes Header or Data digests.
+ * Skip the checks.
+ */
+ nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu);
+ return -EINVAL;
+ }
+
if (queue->hdr_digest) {
ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
if (unlikely(ret))
@@ -804,10 +873,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_tcp_init_recv_ctx(queue);
return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
default:
- dev_err(queue->ctrl->ctrl.device,
- "unsupported pdu type (%d)\n", hdr->type);
- return -EINVAL;
+ goto unsupported_pdu;
}
+
+unsupported_pdu:
+ dev_err(queue->ctrl->ctrl.device,
+ "unsupported pdu type (%d)\n", hdr->type);
+ return -EINVAL;
}
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
@@ -1446,8 +1518,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
+ msg.msg_flags = MSG_WAITALL;
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
+ if (ret >= 0 && ret < sizeof(*icresp))
+ ret = -ECONNRESET;
if (ret < 0) {
pr_warn("queue %d: failed to receive icresp, error %d\n",
nvme_tcp_queue_id(queue), ret);
@@ -1562,23 +1637,56 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
ctrl->io_queues[HCTX_TYPE_POLL];
}
+/*
+ * Track the number of queues assigned to each cpu using a global per-cpu
+ * counter and select the least used cpu from the mq_map. Our goal is to spread
+ * different controllers I/O threads across different cpu cores.
+ *
+ * Note that the accounting is not 100% perfect, but we don't need to be, we're
+ * simply putting our best effort to select the best candidate cpu core that we
+ * find at any given point.
+ */
static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
- int qid = nvme_tcp_queue_id(queue);
- int n = 0;
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int qid = nvme_tcp_queue_id(queue) - 1;
+ unsigned int *mq_map = NULL;
+ int cpu, min_queues = INT_MAX, io_cpu;
+
+ if (wq_unbound)
+ goto out;
if (nvme_tcp_default_queue(queue))
- n = qid - 1;
+ mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
else if (nvme_tcp_read_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+ mq_map = set->map[HCTX_TYPE_READ].mq_map;
else if (nvme_tcp_poll_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
- ctrl->io_queues[HCTX_TYPE_READ] - 1;
- if (wq_unbound)
- queue->io_cpu = WORK_CPU_UNBOUND;
- else
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ mq_map = set->map[HCTX_TYPE_POLL].mq_map;
+
+ if (WARN_ON(!mq_map))
+ goto out;
+
+ /* Search for the least used cpu from the mq_map */
+ io_cpu = WORK_CPU_UNBOUND;
+ for_each_online_cpu(cpu) {
+ int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
+
+ if (mq_map[cpu] != qid)
+ continue;
+ if (num_queues < min_queues) {
+ io_cpu = cpu;
+ min_queues = num_queues;
+ }
+ }
+ if (io_cpu != WORK_CPU_UNBOUND) {
+ queue->io_cpu = io_cpu;
+ atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
+ set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
+ }
+out:
+ dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
+ qid, queue->io_cpu);
}
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -1722,7 +1830,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
queue->sock->sk->sk_allocation = GFP_ATOMIC;
queue->sock->sk->sk_use_task_frag = false;
- nvme_tcp_set_queue_io_cpu(queue);
+ queue->io_cpu = WORK_CPU_UNBOUND;
queue->request = NULL;
queue->data_remaining = 0;
queue->ddgst_remaining = 0;
@@ -1844,6 +1952,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
return;
+ if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags))
+ atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]);
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
@@ -1878,9 +1989,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
nvme_tcp_init_recv_ctx(queue);
nvme_tcp_setup_sock_ops(queue);
- if (idx)
+ if (idx) {
+ nvme_tcp_set_queue_io_cpu(queue);
ret = nvmf_connect_io_queue(nctrl, idx);
- else
+ } else
ret = nvmf_connect_admin_queue(nctrl);
if (!ret) {
@@ -2613,6 +2725,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
+ int ret;
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return 0;
@@ -2620,9 +2733,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
set_bit(NVME_TCP_Q_POLLING, &queue->flags);
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
- nvme_tcp_try_recv(queue);
+ ret = nvme_tcp_try_recv(queue);
clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
- return queue->nr_cqe;
+ return ret < 0 ? ret : queue->nr_cqe;
}
static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
@@ -2845,6 +2958,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
static int __init nvme_tcp_init_module(void)
{
unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+ int cpu;
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
@@ -2862,6 +2976,9 @@ static int __init nvme_tcp_init_module(void)
if (!nvme_tcp_wq)
return -ENOMEM;
+ for_each_possible_cpu(cpu)
+ atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
+
nvmf_register_transport(&nvme_tcp_transport);
return 0;
}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 46be031f91b4..fb7446d6d682 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -115,3 +115,14 @@ config NVME_TARGET_AUTH
target side.
If unsure, say N.
+
+config NVME_TARGET_PCI_EPF
+ tristate "NVMe PCI Endpoint Function target support"
+ depends on NVME_TARGET && PCI_ENDPOINT
+ depends on NVME_CORE=y || NVME_CORE=NVME_TARGET
+ help
+ This enables the NVMe PCI Endpoint Function target driver support,
+ which allows creating a NVMe PCI controller using an endpoint mode
+ capable PCI controller.
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index f2b025bbe10c..ed8522911d1f 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
+obj-$(CONFIG_NVME_TARGET_PCI_EPF) += nvmet-pci-epf.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o pr.o
@@ -20,4 +21,5 @@ nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
nvme-fcloop-y += fcloop.o
nvmet-tcp-y += tcp.o
+nvmet-pci-epf-y += pci-epf.o
nvmet-$(CONFIG_TRACING) += trace.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index fa89b0549c36..acc138bbf8f2 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -12,6 +12,142 @@
#include <linux/unaligned.h>
#include "nvmet.h"
+static void nvmet_execute_delete_sq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!sqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_sqid(ctrl, sqid, false);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ status = ctrl->ops->delete_sq(ctrl, sqid);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_create_sq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_command *cmd = req->cmd;
+ u16 sqid = le16_to_cpu(cmd->create_sq.sqid);
+ u16 cqid = le16_to_cpu(cmd->create_sq.cqid);
+ u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags);
+ u16 qsize = le16_to_cpu(cmd->create_sq.qsize);
+ u64 prp1 = le64_to_cpu(cmd->create_sq.prp1);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!sqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_sqid(ctrl, sqid, true);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ /*
+ * Note: The NVMe specification allows multiple SQs to use the same CQ.
+ * However, the target code does not really support that. So for now,
+ * prevent this and fail the command if sqid and cqid are different.
+ */
+ if (!cqid || cqid != sqid) {
+ pr_err("SQ %u: Unsupported CQID %u\n", sqid, cqid);
+ status = NVME_SC_CQ_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
+ status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_delete_cq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!cqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_cqid(ctrl, cqid);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ status = ctrl->ops->delete_cq(ctrl, cqid);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_create_cq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_command *cmd = req->cmd;
+ u16 cqid = le16_to_cpu(cmd->create_cq.cqid);
+ u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags);
+ u16 qsize = le16_to_cpu(cmd->create_cq.qsize);
+ u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector);
+ u64 prp1 = le64_to_cpu(cmd->create_cq.prp1);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!cqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_cqid(ctrl, cqid);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
+ status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize,
+ prp1, irq_vector);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
{
u32 len = le16_to_cpu(cmd->get_log_page.numdu);
@@ -230,8 +366,18 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
+static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl,
+ struct nvme_effects_log *log)
{
+ /* For a PCI target controller, advertize support for the . */
+ if (nvmet_is_pci_ctrl(ctrl)) {
+ log->acs[nvme_admin_delete_sq] =
+ log->acs[nvme_admin_create_sq] =
+ log->acs[nvme_admin_delete_cq] =
+ log->acs[nvme_admin_create_cq] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+ }
+
log->acs[nvme_admin_get_log_page] =
log->acs[nvme_admin_identify] =
log->acs[nvme_admin_abort_cmd] =
@@ -240,7 +386,10 @@ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
log->acs[nvme_admin_async_event] =
log->acs[nvme_admin_keep_alive] =
cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+}
+static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
+{
log->iocs[nvme_cmd_read] =
log->iocs[nvme_cmd_flush] =
log->iocs[nvme_cmd_dsm] =
@@ -265,6 +414,7 @@ static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_effects_log *log;
u16 status = NVME_SC_SUCCESS;
@@ -276,6 +426,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
switch (req->cmd->get_log_page.csi) {
case NVME_CSI_NVM:
+ nvmet_get_cmd_effects_admin(ctrl, log);
nvmet_get_cmd_effects_nvm(log);
break;
case NVME_CSI_ZNS:
@@ -283,6 +434,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
status = NVME_SC_INVALID_IO_CMD_SET;
goto free;
}
+ nvmet_get_cmd_effects_admin(ctrl, log);
nvmet_get_cmd_effects_nvm(log);
nvmet_get_cmd_effects_zns(log);
break;
@@ -508,7 +660,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys *subsys = ctrl->subsys;
struct nvme_id_ctrl *id;
- u32 cmd_capsule_size;
+ u32 cmd_capsule_size, ctratt;
u16 status = 0;
if (!subsys->subsys_discovered) {
@@ -523,9 +675,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
goto out;
}
- /* XXX: figure out how to assign real vendors IDs. */
- id->vid = 0;
- id->ssvid = 0;
+ id->vid = cpu_to_le16(subsys->vendor_id);
+ id->ssvid = cpu_to_le16(subsys->subsys_vendor_id);
memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
@@ -557,8 +708,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* XXX: figure out what to do about RTD3R/RTD3 */
id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
- id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
- NVME_CTRL_ATTR_TBKAS);
+ ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS;
+ if (nvmet_is_pci_ctrl(ctrl))
+ ctratt |= NVME_CTRL_ATTR_RHII;
+ id->ctratt = cpu_to_le32(ctratt);
id->oacs = 0;
@@ -915,6 +1068,7 @@ static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
goto out;
}
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ kfree(id);
out:
nvmet_req_complete(req, status);
}
@@ -1105,6 +1259,92 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
return 0;
}
+static u16 nvmet_set_feat_host_id(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+ if (!nvmet_is_pci_ctrl(ctrl))
+ return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
+
+ /*
+ * The NVMe base specifications v2.1 recommends supporting 128-bits host
+ * IDs (section 5.1.25.1.28.1). However, that same section also says
+ * that "The controller may support a 64-bit Host Identifier and/or an
+ * extended 128-bit Host Identifier". So simplify this support and do
+ * not support 64-bits host IDs to avoid needing to check that all
+ * controllers associated with the same subsystem all use the same host
+ * ID size.
+ */
+ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid,
+ sizeof(req->sq->ctrl->hostid));
+}
+
+static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_irq_coalesce irqc = {
+ .time = (cdw11 >> 8) & 0xff,
+ .thr = cdw11 & 0xff,
+ };
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
+}
+
+static u16 nvmet_set_feat_irq_config(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_irq_config irqcfg = {
+ .iv = cdw11 & 0xffff,
+ .cd = (cdw11 >> 16) & 0x1,
+ };
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
+}
+
+static u16 nvmet_set_feat_arbitration(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_arbitration arb = {
+ .hpw = (cdw11 >> 24) & 0xff,
+ .mpw = (cdw11 >> 16) & 0xff,
+ .lpw = (cdw11 >> 8) & 0xff,
+ .ab = cdw11 & 0x3,
+ };
+
+ if (!ctrl->ops->set_feature) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
+}
+
void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
@@ -1118,6 +1358,9 @@ void nvmet_execute_set_features(struct nvmet_req *req)
return;
switch (cdw10 & 0xff) {
+ case NVME_FEAT_ARBITRATION:
+ status = nvmet_set_feat_arbitration(req);
+ break;
case NVME_FEAT_NUM_QUEUES:
ncqr = (cdw11 >> 16) & 0xffff;
nsqr = cdw11 & 0xffff;
@@ -1128,6 +1371,12 @@ void nvmet_execute_set_features(struct nvmet_req *req)
nvmet_set_result(req,
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
+ case NVME_FEAT_IRQ_COALESCE:
+ status = nvmet_set_feat_irq_coalesce(req);
+ break;
+ case NVME_FEAT_IRQ_CONFIG:
+ status = nvmet_set_feat_irq_config(req);
+ break;
case NVME_FEAT_KATO:
status = nvmet_set_feat_kato(req);
break;
@@ -1135,7 +1384,7 @@ void nvmet_execute_set_features(struct nvmet_req *req)
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
break;
case NVME_FEAT_HOST_ID:
- status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
+ status = nvmet_set_feat_host_id(req);
break;
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_set_feat_write_protect(req);
@@ -1172,6 +1421,79 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
return 0;
}
+static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_feat_irq_coalesce irqc = { };
+ u16 status;
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_get_feat_irq_config(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff;
+ struct nvmet_feat_irq_config irqcfg = { .iv = iv };
+ u16 status;
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_get_feat_arbitration(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_feat_arbitration arb = { };
+ u16 status;
+
+ if (!ctrl->ops->get_feature) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req,
+ ((u32)arb.hpw << 24) |
+ ((u32)arb.mpw << 16) |
+ ((u32)arb.lpw << 8) |
+ (arb.ab & 0x3));
+
+ return NVME_SC_SUCCESS;
+}
+
void nvmet_get_feat_kato(struct nvmet_req *req)
{
nvmet_set_result(req, req->sq->ctrl->kato * 1000);
@@ -1198,21 +1520,24 @@ void nvmet_execute_get_features(struct nvmet_req *req)
* need to come up with some fake values for these.
*/
#if 0
- case NVME_FEAT_ARBITRATION:
- break;
case NVME_FEAT_POWER_MGMT:
break;
case NVME_FEAT_TEMP_THRESH:
break;
case NVME_FEAT_ERR_RECOVERY:
break;
+ case NVME_FEAT_WRITE_ATOMIC:
+ break;
+#endif
+ case NVME_FEAT_ARBITRATION:
+ status = nvmet_get_feat_arbitration(req);
+ break;
case NVME_FEAT_IRQ_COALESCE:
+ status = nvmet_get_feat_irq_coalesce(req);
break;
case NVME_FEAT_IRQ_CONFIG:
+ status = nvmet_get_feat_irq_config(req);
break;
- case NVME_FEAT_WRITE_ATOMIC:
- break;
-#endif
case NVME_FEAT_ASYNC_EVENT:
nvmet_get_feat_async_event(req);
break;
@@ -1293,6 +1618,27 @@ out:
nvmet_req_complete(req, status);
}
+u32 nvmet_admin_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (nvme_is_fabrics(cmd))
+ return nvmet_fabrics_admin_cmd_data_len(req);
+ if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
+ return nvmet_discovery_cmd_data_len(req);
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ return nvmet_get_log_page_len(cmd);
+ case nvme_admin_identify:
+ return NVME_IDENTIFY_DATA_SIZE;
+ case nvme_admin_get_features:
+ return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10));
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -1307,13 +1653,30 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
+ /* For PCI controllers, admin commands shall not use SGL. */
+ if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
+ cmd->common.flags & NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+
if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_admin_cmd(req);
switch (cmd->common.opcode) {
+ case nvme_admin_delete_sq:
+ req->execute = nvmet_execute_delete_sq;
+ return 0;
+ case nvme_admin_create_sq:
+ req->execute = nvmet_execute_create_sq;
+ return 0;
case nvme_admin_get_log_page:
req->execute = nvmet_execute_get_log_page;
return 0;
+ case nvme_admin_delete_cq:
+ req->execute = nvmet_execute_delete_cq;
+ return 0;
+ case nvme_admin_create_cq:
+ req->execute = nvmet_execute_create_cq;
+ return 0;
case nvme_admin_identify:
req->execute = nvmet_execute_identify;
return 0;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 2b030f0efc38..e44ef69dffc2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -37,6 +37,7 @@ static struct nvmet_type_name_map nvmet_transport[] = {
{ NVMF_TRTYPE_RDMA, "rdma" },
{ NVMF_TRTYPE_FC, "fc" },
{ NVMF_TRTYPE_TCP, "tcp" },
+ { NVMF_TRTYPE_PCI, "pci" },
{ NVMF_TRTYPE_LOOP, "loop" },
};
@@ -46,6 +47,7 @@ static const struct nvmet_type_name_map nvmet_addr_family[] = {
{ NVMF_ADDR_FAMILY_IP6, "ipv6" },
{ NVMF_ADDR_FAMILY_IB, "ib" },
{ NVMF_ADDR_FAMILY_FC, "fc" },
+ { NVMF_ADDR_FAMILY_PCI, "pci" },
{ NVMF_ADDR_FAMILY_LOOP, "loop" },
};
@@ -1400,6 +1402,49 @@ out_unlock:
}
CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
+static ssize_t nvmet_subsys_attr_vendor_id_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "0x%x\n", to_subsys(item)->vendor_id);
+}
+
+static ssize_t nvmet_subsys_attr_vendor_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ u16 vid;
+
+ if (kstrtou16(page, 0, &vid))
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->vendor_id = vid;
+ up_write(&nvmet_config_sem);
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_vendor_id);
+
+static ssize_t nvmet_subsys_attr_subsys_vendor_id_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "0x%x\n",
+ to_subsys(item)->subsys_vendor_id);
+}
+
+static ssize_t nvmet_subsys_attr_subsys_vendor_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ u16 ssvid;
+
+ if (kstrtou16(page, 0, &ssvid))
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->subsys_vendor_id = ssvid;
+ up_write(&nvmet_config_sem);
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_subsys_vendor_id);
+
static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
char *page)
{
@@ -1628,6 +1673,8 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_serial,
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
+ &nvmet_subsys_attr_attr_vendor_id,
+ &nvmet_subsys_attr_attr_subsys_vendor_id,
&nvmet_subsys_attr_attr_model,
&nvmet_subsys_attr_attr_qid_max,
&nvmet_subsys_attr_attr_ieee_oui,
@@ -1782,6 +1829,7 @@ static struct config_group *nvmet_referral_make(
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&port->entry);
+ port->disc_addr.trtype = NVMF_TRTYPE_MAX;
config_group_init_type_name(&port->group, name, &nvmet_referral_type);
return &port->group;
@@ -2007,6 +2055,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->max_queue_size = -1; /* < 0 == let the transport choose */
+ port->disc_addr.trtype = NVMF_TRTYPE_MAX;
port->disc_addr.portid = cpu_to_le16(portid);
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index fde6c555af61..2e741696f371 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -606,6 +606,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
goto out_dev_put;
}
+ if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
+ goto out_pr_exit;
+
nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
@@ -613,6 +616,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
+out_pr_exit:
+ if (ns->pr.enable)
+ nvmet_pr_exit_ns(ns);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -638,6 +644,19 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
mutex_unlock(&subsys->lock);
+ /*
+ * Now that we removed the namespaces from the lookup list, we
+ * can kill the per_cpu ref and wait for any remaining references
+ * to be dropped, as well as a RCU grace period for anyone only
+ * using the namepace under rcu_read_lock(). Note that we can't
+ * use call_rcu here as we need to ensure the namespaces have
+ * been fully destroyed before unloading the module.
+ */
+ percpu_ref_kill(&ns->ref);
+ synchronize_rcu();
+ wait_for_completion(&ns->disable_done);
+ percpu_ref_exit(&ns->ref);
+
if (ns->pr.enable)
nvmet_pr_exit_ns(ns);
@@ -660,22 +679,6 @@ void nvmet_ns_free(struct nvmet_ns *ns)
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
- mutex_unlock(&subsys->lock);
-
- /*
- * Now that we removed the namespaces from the lookup list, we
- * can kill the per_cpu ref and wait for any remaining references
- * to be dropped, as well as a RCU grace period for anyone only
- * using the namepace under rcu_read_lock(). Note that we can't
- * use call_rcu here as we need to ensure the namespaces have
- * been fully destroyed before unloading the module.
- */
- percpu_ref_kill(&ns->ref);
- synchronize_rcu();
- wait_for_completion(&ns->disable_done);
- percpu_ref_exit(&ns->ref);
-
- mutex_lock(&subsys->lock);
subsys->nr_namespaces--;
mutex_unlock(&subsys->lock);
@@ -705,9 +708,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->nsid = nsid;
ns->subsys = subsys;
- if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
- goto out_free;
-
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = nsid;
@@ -730,8 +730,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
return ns;
out_exit:
subsys->max_nsid = nvmet_max_nsid(subsys);
- percpu_ref_exit(&ns->ref);
-out_free:
kfree(ns);
out_unlock:
mutex_unlock(&subsys->lock);
@@ -836,6 +834,89 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
complete(&sq->confirm_done);
}
+u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid)
+{
+ if (!ctrl->sqs)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+ if (cqid > ctrl->subsys->max_qid)
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ /*
+ * Note: For PCI controllers, the NVMe specifications allows multiple
+ * SQs to share a single CQ. However, we do not support this yet, so
+ * check that there is no SQ defined for a CQ. If one exist, then the
+ * CQ ID is invalid for creation as well as when the CQ is being
+ * deleted (as that would mean that the SQ was not deleted before the
+ * CQ).
+ */
+ if (ctrl->sqs[cqid])
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
+ u16 qid, u16 size)
+{
+ u16 status;
+
+ status = nvmet_check_cqid(ctrl, qid);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_cq_setup(ctrl, cq, qid, size);
+
+ return NVME_SC_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_create);
+
+u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid,
+ bool create)
+{
+ if (!ctrl->sqs)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+ if (sqid > ctrl->subsys->max_qid)
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ if ((create && ctrl->sqs[sqid]) ||
+ (!create && !ctrl->sqs[sqid]))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
+ u16 sqid, u16 size)
+{
+ u16 status;
+ int ret;
+
+ if (!kref_get_unless_zero(&ctrl->ref))
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+ status = nvmet_check_sqid(ctrl, sqid, true);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ ret = nvmet_sq_init(sq);
+ if (ret) {
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto ctrl_put;
+ }
+
+ nvmet_sq_setup(ctrl, sq, sqid, size);
+ sq->ctrl = ctrl;
+
+ return NVME_SC_SUCCESS;
+
+ctrl_put:
+ nvmet_ctrl_put(ctrl);
+ return status;
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_create);
+
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
struct nvmet_ctrl *ctrl = sq->ctrl;
@@ -929,6 +1010,33 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
return 0;
}
+static u32 nvmet_io_cmd_transfer_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+ u32 metadata_len = 0;
+
+ if (nvme_is_fabrics(cmd))
+ return nvmet_fabrics_io_cmd_data_len(req);
+
+ if (!req->ns)
+ return 0;
+
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_zone_append:
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
+ metadata_len = nvmet_rw_metadata_len(req);
+ return nvmet_rw_data_len(req) + metadata_len;
+ case nvme_cmd_dsm:
+ return nvmet_dsm_len(req);
+ case nvme_cmd_zone_mgmt_recv:
+ return (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+ default:
+ return 0;
+ }
+}
+
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -1030,12 +1138,15 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
/*
* For fabrics, PSDT field shall describe metadata pointer (MPTR) that
* contains an address of a single contiguous physical buffer that is
- * byte aligned.
+ * byte aligned. For PCI controllers, this is optional so not enforced.
*/
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
- req->error_loc = offsetof(struct nvme_common_command, flags);
- status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
- goto fail;
+ if (!req->sq->ctrl || !nvmet_is_pci_ctrl(req->sq->ctrl)) {
+ req->error_loc =
+ offsetof(struct nvme_common_command, flags);
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto fail;
+ }
}
if (unlikely(!req->sq->ctrl))
@@ -1077,11 +1188,27 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
+size_t nvmet_req_transfer_len(struct nvmet_req *req)
+{
+ if (likely(req->sq->qid != 0))
+ return nvmet_io_cmd_transfer_len(req);
+ if (unlikely(!req->sq->ctrl))
+ return nvmet_connect_cmd_data_len(req);
+ return nvmet_admin_cmd_data_len(req);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_transfer_len);
+
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{
if (unlikely(len != req->transfer_len)) {
+ u16 status;
+
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
+ if (req->cmd->common.flags & NVME_CMD_SGL_ALL)
+ status = NVME_SC_SGL_INVALID_DATA;
+ else
+ status = NVME_SC_INVALID_FIELD;
+ nvmet_req_complete(req, status | NVME_STATUS_DNR);
return false;
}
@@ -1092,8 +1219,14 @@ EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{
if (unlikely(data_len > req->transfer_len)) {
+ u16 status;
+
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
+ if (req->cmd->common.flags & NVME_CMD_SGL_ALL)
+ status = NVME_SC_SGL_INVALID_DATA;
+ else
+ status = NVME_SC_INVALID_FIELD;
+ nvmet_req_complete(req, status | NVME_STATUS_DNR);
return false;
}
@@ -1184,41 +1317,6 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
-static inline bool nvmet_cc_en(u32 cc)
-{
- return (cc >> NVME_CC_EN_SHIFT) & 0x1;
-}
-
-static inline u8 nvmet_cc_css(u32 cc)
-{
- return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
-}
-
-static inline u8 nvmet_cc_mps(u32 cc)
-{
- return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
-}
-
-static inline u8 nvmet_cc_ams(u32 cc)
-{
- return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
-}
-
-static inline u8 nvmet_cc_shn(u32 cc)
-{
- return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
-}
-
-static inline u8 nvmet_cc_iosqes(u32 cc)
-{
- return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
-}
-
-static inline u8 nvmet_cc_iocqes(u32 cc)
-{
- return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
-}
-
static inline bool nvmet_css_supported(u8 cc_css)
{
switch (cc_css << NVME_CC_CSS_SHIFT) {
@@ -1295,6 +1393,7 @@ void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
mutex_unlock(&ctrl->lock);
}
+EXPORT_SYMBOL_GPL(nvmet_update_cc);
static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
{
@@ -1402,15 +1501,15 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
* Note: ctrl->subsys->lock should be held when calling this function
*/
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
- struct nvmet_req *req)
+ struct device *p2p_client)
{
struct nvmet_ns *ns;
unsigned long idx;
- if (!req->p2p_client)
+ if (!p2p_client)
return;
- ctrl->p2p_client = get_device(req->p2p_client);
+ ctrl->p2p_client = get_device(p2p_client);
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -1439,45 +1538,44 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
ctrl->ops->delete_ctrl(ctrl);
}
-u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
- uuid_t *hostid)
+struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
{
struct nvmet_subsys *subsys;
struct nvmet_ctrl *ctrl;
+ u32 kato = args->kato;
+ u8 dhchap_status;
int ret;
- u16 status;
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
- subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+ args->status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
+ subsys = nvmet_find_get_subsys(args->port, args->subsysnqn);
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
- subsysnqn);
- req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
- req->error_loc = offsetof(struct nvme_common_command, dptr);
- goto out;
+ args->subsysnqn);
+ args->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ args->error_loc = offsetof(struct nvme_common_command, dptr);
+ return NULL;
}
down_read(&nvmet_config_sem);
- if (!nvmet_host_allowed(subsys, hostnqn)) {
+ if (!nvmet_host_allowed(subsys, args->hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
- hostnqn, subsysnqn);
- req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
+ args->hostnqn, args->subsysnqn);
+ args->result = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
- status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
- req->error_loc = offsetof(struct nvme_common_command, dptr);
+ args->status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
+ args->error_loc = offsetof(struct nvme_common_command, dptr);
goto out_put_subsystem;
}
up_read(&nvmet_config_sem);
- status = NVME_SC_INTERNAL;
+ args->status = NVME_SC_INTERNAL;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
goto out_put_subsystem;
mutex_init(&ctrl->lock);
- ctrl->port = req->port;
- ctrl->ops = req->ops;
+ ctrl->port = args->port;
+ ctrl->ops = args->ops;
#ifdef CONFIG_NVME_TARGET_PASSTHRU
/* By default, set loop targets to clear IDS by default */
@@ -1491,8 +1589,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
- memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
- memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+ memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
+ memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
@@ -1515,12 +1613,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
- status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
+ args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
goto out_free_sqs;
}
ctrl->cntlid = ret;
- uuid_copy(&ctrl->hostid, hostid);
+ uuid_copy(&ctrl->hostid, args->hostid);
/*
* Discovery controllers may use some arbitrary high value
@@ -1542,12 +1640,35 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (ret)
goto init_pr_fail;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
- nvmet_setup_p2p_ns_map(ctrl, req);
+ nvmet_setup_p2p_ns_map(ctrl, args->p2p_client);
nvmet_debugfs_ctrl_setup(ctrl);
mutex_unlock(&subsys->lock);
- *ctrlp = ctrl;
- return 0;
+ if (args->hostid)
+ uuid_copy(&ctrl->hostid, args->hostid);
+
+ dhchap_status = nvmet_setup_auth(ctrl);
+ if (dhchap_status) {
+ pr_err("Failed to setup authentication, dhchap status %u\n",
+ dhchap_status);
+ nvmet_ctrl_put(ctrl);
+ if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
+ args->status =
+ NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
+ else
+ args->status = NVME_SC_INTERNAL;
+ return NULL;
+ }
+
+ args->status = NVME_SC_SUCCESS;
+
+ pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s.\n",
+ nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
+ ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
+ ctrl->pi_support ? " T10-PI is enabled" : "",
+ nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
+
+ return ctrl;
init_pr_fail:
mutex_unlock(&subsys->lock);
@@ -1561,9 +1682,9 @@ out_free_ctrl:
kfree(ctrl);
out_put_subsystem:
nvmet_subsys_put(subsys);
-out:
- return status;
+ return NULL;
}
+EXPORT_SYMBOL_GPL(nvmet_alloc_ctrl);
static void nvmet_ctrl_free(struct kref *ref)
{
@@ -1599,6 +1720,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
{
kref_put(&ctrl->ref, nvmet_ctrl_free);
}
+EXPORT_SYMBOL_GPL(nvmet_ctrl_put);
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 28843df5fa7c..df7207640506 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -224,6 +224,9 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
}
list_for_each_entry(r, &req->port->referrals, entry) {
+ if (r->disc_addr.trtype == NVMF_TRTYPE_PCI)
+ continue;
+
nvmet_format_discovery_entry(hdr, r,
NVME_DISC_SUBSYS_NAME,
r->disc_addr.traddr,
@@ -352,6 +355,20 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
nvmet_req_complete(req, stat);
}
+u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ return nvmet_get_log_page_len(req->cmd);
+ case nvme_admin_identify:
+ return NVME_IDENTIFY_DATA_SIZE;
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 3f2857c17d95..2022757f08dc 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -179,6 +179,11 @@ static u8 nvmet_auth_failure2(void *d)
return data->rescode_exp;
}
+u32 nvmet_auth_send_data_len(struct nvmet_req *req)
+{
+ return le32_to_cpu(req->cmd->auth_send.tl);
+}
+
void nvmet_execute_auth_send(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -206,7 +211,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
offsetof(struct nvmf_auth_send_command, spsp1);
goto done;
}
- tl = le32_to_cpu(req->cmd->auth_send.tl);
+ tl = nvmet_auth_send_data_len(req);
if (!tl) {
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
@@ -429,6 +434,11 @@ static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
data->rescode_exp = req->sq->dhchap_status;
}
+u32 nvmet_auth_receive_data_len(struct nvmet_req *req)
+{
+ return le32_to_cpu(req->cmd->auth_receive.al);
+}
+
void nvmet_execute_auth_receive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -454,7 +464,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
offsetof(struct nvmf_auth_receive_command, spsp1);
goto done;
}
- al = le32_to_cpu(req->cmd->auth_receive.al);
+ al = nvmet_auth_receive_data_len(req);
if (!al) {
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index c49904ebb6c2..eb406c90c167 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -85,6 +85,22 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
+u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ return nvmet_auth_send_data_len(req);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_auth_receive_data_len(req);
+#endif
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -114,6 +130,22 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
return 0;
}
+u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ return nvmet_auth_send_data_len(req);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_auth_receive_data_len(req);
+#endif
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -213,73 +245,67 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
- u16 status;
- u8 dhchap_status;
+ struct nvmet_alloc_ctrl_args args = {
+ .port = req->port,
+ .ops = req->ops,
+ .p2p_client = req->p2p_client,
+ .kato = le32_to_cpu(c->kato),
+ };
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
- status = NVME_SC_INTERNAL;
+ args.status = NVME_SC_INTERNAL;
goto complete;
}
- status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
- if (status)
+ args.status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (args.status)
goto out;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
- req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
- status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
+ args.error_loc = offsetof(struct nvmf_connect_command, recfmt);
+ args.status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out;
}
if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
- req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+ args.status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
+ args.result = IPO_IATTR_CONNECT_DATA(cntlid);
goto out;
}
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
- status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
- le32_to_cpu(c->kato), &ctrl, &d->hostid);
- if (status)
- goto out;
- dhchap_status = nvmet_setup_auth(ctrl);
- if (dhchap_status) {
- pr_err("Failed to setup authentication, dhchap status %u\n",
- dhchap_status);
- nvmet_ctrl_put(ctrl);
- if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
- status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR);
- else
- status = NVME_SC_INTERNAL;
+ args.subsysnqn = d->subsysnqn;
+ args.hostnqn = d->hostnqn;
+ args.hostid = &d->hostid;
+ args.kato = le32_to_cpu(c->kato);
+
+ ctrl = nvmet_alloc_ctrl(&args);
+ if (!ctrl)
goto out;
- }
- status = nvmet_install_queue(ctrl, req);
- if (status) {
+ args.status = nvmet_install_queue(ctrl, req);
+ if (args.status) {
nvmet_ctrl_put(ctrl);
goto out;
}
- pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
- nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
- ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
- ctrl->pi_support ? " T10-PI is enabled" : "",
- nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
- req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
+ args.result = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
- nvmet_req_complete(req, status);
+ req->error_loc = args.error_loc;
+ req->cqe->result.u32 = args.result;
+ nvmet_req_complete(req, args.status);
}
static void nvmet_execute_io_connect(struct nvmet_req *req)
@@ -343,6 +369,17 @@ out_ctrl_put:
goto out;
}
+u32 nvmet_connect_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (!nvme_is_fabrics(cmd) ||
+ cmd->fabrics.fctype != nvme_fabrics_type_connect)
+ return 0;
+
+ return sizeof(struct nvmf_connect_data);
+}
+
u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index eaf31c823cbe..83be0657e6df 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -272,6 +272,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
iter_flags = SG_MITER_FROM_SG;
}
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_LR))
+ opf |= REQ_FAILFAST_DEV;
+
if (is_pci_p2pdma_page(sg_page(req->sg)))
opf |= REQ_NOMERGE;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7233549f7c8a..fcf4f460dc9a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -245,6 +245,8 @@ struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
+ void *drvdata;
+
bool reset_tbkas;
struct mutex lock;
@@ -331,6 +333,8 @@ struct nvmet_subsys {
struct config_group namespaces_group;
struct config_group allowed_hosts_group;
+ u16 vendor_id;
+ u16 subsys_vendor_id;
char *model_number;
u32 ieee_oui;
char *firmware_rev;
@@ -411,6 +415,18 @@ struct nvmet_fabrics_ops {
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
+
+ /* Operations mandatory for PCI target controllers */
+ u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 flags,
+ u16 qsize, u64 prp1);
+ u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid);
+ u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags,
+ u16 qsize, u64 prp1, u16 irq_vector);
+ u16 (*delete_cq)(struct nvmet_ctrl *ctrl, u16 cqid);
+ u16 (*set_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
+ void *feat_data);
+ u16 (*get_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
+ void *feat_data);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@@ -520,18 +536,24 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
+u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
+u32 nvmet_admin_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
+u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
+u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
+u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
+size_t nvmet_req_transfer_len(struct nvmet_req *req);
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
@@ -542,19 +564,37 @@ void nvmet_execute_set_features(struct nvmet_req *req);
void nvmet_execute_get_features(struct nvmet_req *req);
void nvmet_execute_keep_alive(struct nvmet_req *req);
+u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
u16 size);
+u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
+ u16 size);
+u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create);
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
u16 size);
+u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
+ u16 size);
void nvmet_sq_destroy(struct nvmet_sq *sq);
int nvmet_sq_init(struct nvmet_sq *sq);
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
-u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
- uuid_t *hostid);
+
+struct nvmet_alloc_ctrl_args {
+ struct nvmet_port *port;
+ char *subsysnqn;
+ char *hostnqn;
+ uuid_t *hostid;
+ const struct nvmet_fabrics_ops *ops;
+ struct device *p2p_client;
+ u32 kato;
+ __le32 result;
+ u16 error_loc;
+ u16 status;
+};
+
+struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args);
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
const char *hostnqn, u16 cntlid,
struct nvmet_req *req);
@@ -607,7 +647,6 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_host *host);
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
-bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
#define NVMET_MIN_QUEUE_SIZE 16
#define NVMET_MAX_QUEUE_SIZE 1024
@@ -696,6 +735,11 @@ static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
return subsys->type != NVME_NQN_NVME;
}
+static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl)
+{
+ return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
@@ -737,6 +781,41 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
+static inline bool nvmet_cc_en(u32 cc)
+{
+ return (cc & NVME_CC_ENABLE) >> NVME_CC_EN_SHIFT;
+}
+
+static inline u8 nvmet_cc_css(u32 cc)
+{
+ return (cc & NVME_CC_CSS_MASK) >> NVME_CC_CSS_SHIFT;
+}
+
+static inline u8 nvmet_cc_mps(u32 cc)
+{
+ return (cc & NVME_CC_MPS_MASK) >> NVME_CC_MPS_SHIFT;
+}
+
+static inline u8 nvmet_cc_ams(u32 cc)
+{
+ return (cc & NVME_CC_AMS_MASK) >> NVME_CC_AMS_SHIFT;
+}
+
+static inline u8 nvmet_cc_shn(u32 cc)
+{
+ return (cc & NVME_CC_SHN_MASK) >> NVME_CC_SHN_SHIFT;
+}
+
+static inline u8 nvmet_cc_iosqes(u32 cc)
+{
+ return (cc & NVME_CC_IOSQES_MASK) >> NVME_CC_IOSQES_SHIFT;
+}
+
+static inline u8 nvmet_cc_iocqes(u32 cc)
+{
+ return (cc & NVME_CC_IOCQES_MASK) >> NVME_CC_IOCQES_SHIFT;
+}
+
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
{
@@ -773,7 +852,9 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
}
#ifdef CONFIG_NVME_TARGET_AUTH
+u32 nvmet_auth_send_data_len(struct nvmet_req *req);
void nvmet_execute_auth_send(struct nvmet_req *req);
+u32 nvmet_auth_receive_data_len(struct nvmet_req *req);
void nvmet_execute_auth_receive(struct nvmet_req *req);
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
@@ -831,4 +912,26 @@ static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
{
percpu_ref_put(&pc_ref->ref);
}
+
+/*
+ * Data for the get_feature() and set_feature() operations of PCI target
+ * controllers.
+ */
+struct nvmet_feat_irq_coalesce {
+ u8 thr;
+ u8 time;
+};
+
+struct nvmet_feat_irq_config {
+ u16 iv;
+ bool cd;
+};
+
+struct nvmet_feat_arbitration {
+ u8 hpw;
+ u8 mpw;
+ u8 lpw;
+ u8 ab;
+};
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 30b21936b0c6..26e2907ce8bb 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -261,6 +261,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
{
struct scatterlist *sg;
struct bio *bio;
+ int ret = -EINVAL;
int i;
if (req->sg_cnt > BIO_MAX_VECS)
@@ -277,16 +278,19 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
}
for_each_sg(req->sg, sg, req->sg_cnt, i) {
- if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
- sg->offset) < sg->length) {
- nvmet_req_bio_put(req, bio);
- return -EINVAL;
- }
+ if (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) <
+ sg->length)
+ goto out_bio_put;
}
- blk_rq_bio_prep(rq, bio, req->sg_cnt);
-
+ ret = blk_rq_append_bio(rq, bio);
+ if (ret)
+ goto out_bio_put;
return 0;
+
+out_bio_put:
+ nvmet_req_bio_put(req, bio);
+ return ret;
}
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
new file mode 100644
index 000000000000..b1e31483f157
--- /dev/null
+++ b/drivers/nvme/target/pci-epf.c
@@ -0,0 +1,2610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe PCI Endpoint Function target driver.
+ *
+ * Copyright (c) 2024, Western Digital Corporation or its affiliates.
+ * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com>
+ * REDS Institute, HEIG-VD, HES-SO, Switzerland
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvme.h>
+#include <linux/pci_ids.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci_regs.h>
+#include <linux/slab.h>
+
+#include "nvmet.h"
+
+static LIST_HEAD(nvmet_pci_epf_ports);
+static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
+
+/*
+ * Default and maximum allowed data transfer size. For the default,
+ * allow up to 128 page-sized segments. For the maximum allowed,
+ * use 4 times the default (which is completely arbitrary).
+ */
+#define NVMET_PCI_EPF_MAX_SEGS 128
+#define NVMET_PCI_EPF_MDTS_KB \
+ (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10))
+#define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4)
+
+/*
+ * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an
+ * interrupt vector to the host. This default 8 is completely arbitrary and can
+ * be changed by the host with a nvme_set_features command.
+ */
+#define NVMET_PCI_EPF_IV_THRESHOLD 8
+
+/*
+ * BAR CC register and SQ polling intervals.
+ */
+#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10)
+#define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5)
+#define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000)
+
+/*
+ * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ.
+ */
+#define NVMET_PCI_EPF_SQ_AB 8
+
+/*
+ * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ
+ * is full, in which case we retry the CQ processing after this interval.
+ */
+#define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1)
+
+enum nvmet_pci_epf_queue_flags {
+ NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */
+ NVMET_PCI_EPF_Q_LIVE, /* The queue is live */
+ NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */
+};
+
+/*
+ * IRQ vector descriptor.
+ */
+struct nvmet_pci_epf_irq_vector {
+ unsigned int vector;
+ unsigned int ref;
+ bool cd;
+ int nr_irqs;
+};
+
+struct nvmet_pci_epf_queue {
+ union {
+ struct nvmet_sq nvme_sq;
+ struct nvmet_cq nvme_cq;
+ };
+ struct nvmet_pci_epf_ctrl *ctrl;
+ unsigned long flags;
+
+ u64 pci_addr;
+ size_t pci_size;
+ struct pci_epc_map pci_map;
+
+ u16 qid;
+ u16 depth;
+ u16 vector;
+ u16 head;
+ u16 tail;
+ u16 phase;
+ u32 db;
+
+ size_t qes;
+
+ struct nvmet_pci_epf_irq_vector *iv;
+ struct workqueue_struct *iod_wq;
+ struct delayed_work work;
+ spinlock_t lock;
+ struct list_head list;
+};
+
+/*
+ * PCI Root Complex (RC) address data segment for mapping an admin or
+ * I/O command buffer @buf of @length bytes to the PCI address @pci_addr.
+ */
+struct nvmet_pci_epf_segment {
+ void *buf;
+ u64 pci_addr;
+ u32 length;
+};
+
+/*
+ * Command descriptors.
+ */
+struct nvmet_pci_epf_iod {
+ struct list_head link;
+
+ struct nvmet_req req;
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ unsigned int status;
+
+ struct nvmet_pci_epf_ctrl *ctrl;
+
+ struct nvmet_pci_epf_queue *sq;
+ struct nvmet_pci_epf_queue *cq;
+
+ /* Data transfer size and direction for the command. */
+ size_t data_len;
+ enum dma_data_direction dma_dir;
+
+ /*
+ * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we
+ * use only @data_seg. Otherwise, the array of segments @data_segs is
+ * allocated to manage multiple PCI address data segments. @data_sgl and
+ * @data_sgt are used to setup the command request for execution by the
+ * target core.
+ */
+ unsigned int nr_data_segs;
+ struct nvmet_pci_epf_segment data_seg;
+ struct nvmet_pci_epf_segment *data_segs;
+ struct scatterlist data_sgl;
+ struct sg_table data_sgt;
+
+ struct work_struct work;
+ struct completion done;
+};
+
+/*
+ * PCI target controller private data.
+ */
+struct nvmet_pci_epf_ctrl {
+ struct nvmet_pci_epf *nvme_epf;
+ struct nvmet_port *port;
+ struct nvmet_ctrl *tctrl;
+ struct device *dev;
+
+ unsigned int nr_queues;
+ struct nvmet_pci_epf_queue *sq;
+ struct nvmet_pci_epf_queue *cq;
+ unsigned int sq_ab;
+
+ mempool_t iod_pool;
+ void *bar;
+ u64 cap;
+ u32 cc;
+ u32 csts;
+
+ size_t io_sqes;
+ size_t io_cqes;
+
+ size_t mps_shift;
+ size_t mps;
+ size_t mps_mask;
+
+ unsigned int mdts;
+
+ struct delayed_work poll_cc;
+ struct delayed_work poll_sqs;
+
+ struct mutex irq_lock;
+ struct nvmet_pci_epf_irq_vector *irq_vectors;
+ unsigned int irq_vector_threshold;
+
+ bool link_up;
+ bool enabled;
+};
+
+/*
+ * PCI EPF driver private data.
+ */
+struct nvmet_pci_epf {
+ struct pci_epf *epf;
+
+ const struct pci_epc_features *epc_features;
+
+ void *reg_bar;
+ size_t msix_table_offset;
+
+ unsigned int irq_type;
+ unsigned int nr_vectors;
+
+ struct nvmet_pci_epf_ctrl ctrl;
+
+ bool dma_enabled;
+ struct dma_chan *dma_tx_chan;
+ struct mutex dma_tx_lock;
+ struct dma_chan *dma_rx_chan;
+ struct mutex dma_rx_lock;
+
+ struct mutex mmio_lock;
+
+ /* PCI endpoint function configfs attributes. */
+ struct config_group group;
+ __le16 portid;
+ char subsysnqn[NVMF_NQN_SIZE];
+ unsigned int mdts_kb;
+};
+
+static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off)
+{
+ __le32 *bar_reg = ctrl->bar + off;
+
+ return le32_to_cpu(READ_ONCE(*bar_reg));
+}
+
+static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off, u32 val)
+{
+ __le32 *bar_reg = ctrl->bar + off;
+
+ WRITE_ONCE(*bar_reg, cpu_to_le32(val));
+}
+
+static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off)
+{
+ return (u64)nvmet_pci_epf_bar_read32(ctrl, off) |
+ ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32);
+}
+
+static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off, u64 val)
+{
+ nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF);
+ nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF);
+}
+
+static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf,
+ u64 pci_addr, size_t size, struct pci_epc_map *map)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
+ pci_addr, size, map);
+}
+
+static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf,
+ struct pci_epc_map *map)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map);
+}
+
+struct nvmet_pci_epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg)
+{
+ struct nvmet_pci_epf_dma_filter *filter = arg;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev &&
+ (filter->dma_mask & caps.directions);
+}
+
+static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+ struct device *dev = &epf->dev;
+ struct nvmet_pci_epf_dma_filter filter;
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+
+ mutex_init(&nvme_epf->dma_rx_lock);
+ mutex_init(&nvme_epf->dma_tx_lock);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ filter.dev = epf->epc->dev.parent;
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+
+ chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
+ if (!chan)
+ goto out_dma_no_rx;
+
+ nvme_epf->dma_rx_chan = chan;
+
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
+ if (!chan)
+ goto out_dma_no_tx;
+
+ nvme_epf->dma_tx_chan = chan;
+
+ nvme_epf->dma_enabled = true;
+
+ dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
+ dma_chan_name(chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+
+ dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
+ dma_chan_name(chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+
+ return;
+
+out_dma_no_tx:
+ dma_release_channel(nvme_epf->dma_rx_chan);
+ nvme_epf->dma_rx_chan = NULL;
+
+out_dma_no_rx:
+ mutex_destroy(&nvme_epf->dma_rx_lock);
+ mutex_destroy(&nvme_epf->dma_tx_lock);
+ nvme_epf->dma_enabled = false;
+
+ dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n");
+}
+
+static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf)
+{
+ if (!nvme_epf->dma_enabled)
+ return;
+
+ dma_release_channel(nvme_epf->dma_tx_chan);
+ nvme_epf->dma_tx_chan = NULL;
+ dma_release_channel(nvme_epf->dma_rx_chan);
+ nvme_epf->dma_rx_chan = NULL;
+ mutex_destroy(&nvme_epf->dma_rx_lock);
+ mutex_destroy(&nvme_epf->dma_tx_lock);
+ nvme_epf->dma_enabled = false;
+}
+
+static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf,
+ struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config sconf = {};
+ struct device *dev = &epf->dev;
+ struct device *dma_dev;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+ dma_addr_t dma_addr;
+ struct mutex *lock;
+ int ret;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ lock = &nvme_epf->dma_rx_lock;
+ chan = nvme_epf->dma_rx_chan;
+ sconf.direction = DMA_DEV_TO_MEM;
+ sconf.src_addr = seg->pci_addr;
+ break;
+ case DMA_TO_DEVICE:
+ lock = &nvme_epf->dma_tx_lock;
+ chan = nvme_epf->dma_tx_chan;
+ sconf.direction = DMA_MEM_TO_DEV;
+ sconf.dst_addr = seg->pci_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(lock);
+
+ dma_dev = dmaengine_get_dma_device(chan);
+ dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir);
+ ret = dma_mapping_error(dma_dev, dma_addr);
+ if (ret)
+ goto unlock;
+
+ ret = dmaengine_slave_config(chan, &sconf);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto unmap;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length,
+ sconf.direction, DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto unmap;
+ }
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret);
+ goto unmap;
+ }
+
+ if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) {
+ dev_err(dev, "DMA transfer failed\n");
+ ret = -EIO;
+ }
+
+ dmaengine_terminate_sync(chan);
+
+unmap:
+ dma_unmap_single(dma_dev, dma_addr, seg->length, dir);
+
+unlock:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
+static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf,
+ struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
+{
+ u64 pci_addr = seg->pci_addr;
+ u32 length = seg->length;
+ void *buf = seg->buf;
+ struct pci_epc_map map;
+ int ret = -EINVAL;
+
+ /*
+ * Note: MMIO transfers do not need serialization but this is a
+ * simple way to avoid using too many mapping windows.
+ */
+ mutex_lock(&nvme_epf->mmio_lock);
+
+ while (length) {
+ ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map);
+ if (ret)
+ break;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ memcpy_fromio(buf, map.virt_addr, map.pci_size);
+ break;
+ case DMA_TO_DEVICE:
+ memcpy_toio(map.virt_addr, buf, map.pci_size);
+ break;
+ default:
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ pci_addr += map.pci_size;
+ buf += map.pci_size;
+ length -= map.pci_size;
+
+ nvmet_pci_epf_mem_unmap(nvme_epf, &map);
+ }
+
+unlock:
+ mutex_unlock(&nvme_epf->mmio_lock);
+
+ return ret;
+}
+
+static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf,
+ struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
+{
+ if (nvme_epf->dma_enabled)
+ return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir);
+
+ return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir);
+}
+
+static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl,
+ void *buf, u64 pci_addr, u32 length,
+ enum dma_data_direction dir)
+{
+ struct nvmet_pci_epf_segment seg = {
+ .buf = buf,
+ .pci_addr = pci_addr,
+ .length = length,
+ };
+
+ return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir);
+}
+
+static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ ctrl->irq_vectors = kcalloc(ctrl->nr_queues,
+ sizeof(struct nvmet_pci_epf_irq_vector),
+ GFP_KERNEL);
+ if (!ctrl->irq_vectors)
+ return -ENOMEM;
+
+ mutex_init(&ctrl->irq_lock);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ if (ctrl->irq_vectors) {
+ mutex_destroy(&ctrl->irq_lock);
+ kfree(ctrl->irq_vectors);
+ ctrl->irq_vectors = NULL;
+ }
+}
+
+static struct nvmet_pci_epf_irq_vector *
+nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
+{
+ struct nvmet_pci_epf_irq_vector *iv;
+ int i;
+
+ lockdep_assert_held(&ctrl->irq_lock);
+
+ for (i = 0; i < ctrl->nr_queues; i++) {
+ iv = &ctrl->irq_vectors[i];
+ if (iv->ref && iv->vector == vector)
+ return iv;
+ }
+
+ return NULL;
+}
+
+static struct nvmet_pci_epf_irq_vector *
+nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
+{
+ struct nvmet_pci_epf_irq_vector *iv;
+ int i;
+
+ mutex_lock(&ctrl->irq_lock);
+
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
+ if (iv) {
+ iv->ref++;
+ goto unlock;
+ }
+
+ for (i = 0; i < ctrl->nr_queues; i++) {
+ iv = &ctrl->irq_vectors[i];
+ if (!iv->ref)
+ break;
+ }
+
+ if (WARN_ON_ONCE(!iv))
+ goto unlock;
+
+ iv->ref = 1;
+ iv->vector = vector;
+ iv->nr_irqs = 0;
+
+unlock:
+ mutex_unlock(&ctrl->irq_lock);
+
+ return iv;
+}
+
+static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl,
+ u16 vector)
+{
+ struct nvmet_pci_epf_irq_vector *iv;
+
+ mutex_lock(&ctrl->irq_lock);
+
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
+ if (iv) {
+ iv->ref--;
+ if (!iv->ref) {
+ iv->vector = 0;
+ iv->nr_irqs = 0;
+ }
+ }
+
+ mutex_unlock(&ctrl->irq_lock);
+}
+
+static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *cq, bool force)
+{
+ struct nvmet_pci_epf_irq_vector *iv = cq->iv;
+ bool ret;
+
+ if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ return false;
+
+ /* IRQ coalescing for the admin queue is not allowed. */
+ if (!cq->qid)
+ return true;
+
+ if (iv->cd)
+ return true;
+
+ if (force) {
+ ret = iv->nr_irqs > 0;
+ } else {
+ iv->nr_irqs++;
+ ret = iv->nr_irqs >= ctrl->irq_vector_threshold;
+ }
+ if (ret)
+ iv->nr_irqs = 0;
+
+ return ret;
+}
+
+static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *cq, bool force)
+{
+ struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
+ struct pci_epf *epf = nvme_epf->epf;
+ int ret = 0;
+
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ return;
+
+ mutex_lock(&ctrl->irq_lock);
+
+ if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force))
+ goto unlock;
+
+ switch (nvme_epf->irq_type) {
+ case PCI_IRQ_MSIX:
+ case PCI_IRQ_MSI:
+ ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
+ nvme_epf->irq_type, cq->vector + 1);
+ if (!ret)
+ break;
+ /*
+ * If we got an error, it is likely because the host is using
+ * legacy IRQs (e.g. BIOS, grub).
+ */
+ fallthrough;
+ case PCI_IRQ_INTX:
+ ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
+ PCI_IRQ_INTX, 0);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret);
+
+unlock:
+ mutex_unlock(&ctrl->irq_lock);
+}
+
+static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod)
+{
+ return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode);
+}
+
+static void nvmet_pci_epf_exec_iod_work(struct work_struct *work);
+
+static struct nvmet_pci_epf_iod *
+nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl;
+ struct nvmet_pci_epf_iod *iod;
+
+ iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL);
+ if (unlikely(!iod))
+ return NULL;
+
+ memset(iod, 0, sizeof(*iod));
+ iod->req.cmd = &iod->cmd;
+ iod->req.cqe = &iod->cqe;
+ iod->req.port = ctrl->port;
+ iod->ctrl = ctrl;
+ iod->sq = sq;
+ iod->cq = &ctrl->cq[sq->qid];
+ INIT_LIST_HEAD(&iod->link);
+ iod->dma_dir = DMA_NONE;
+ INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work);
+ init_completion(&iod->done);
+
+ return iod;
+}
+
+/*
+ * Allocate or grow a command table of PCI segments.
+ */
+static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod,
+ int nsegs)
+{
+ struct nvmet_pci_epf_segment *segs;
+ int nr_segs = iod->nr_data_segs + nsegs;
+
+ segs = krealloc(iod->data_segs,
+ nr_segs * sizeof(struct nvmet_pci_epf_segment),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!segs)
+ return -ENOMEM;
+
+ iod->nr_data_segs = nr_segs;
+ iod->data_segs = segs;
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod)
+{
+ int i;
+
+ if (iod->data_segs) {
+ for (i = 0; i < iod->nr_data_segs; i++)
+ kfree(iod->data_segs[i].buf);
+ if (iod->data_segs != &iod->data_seg)
+ kfree(iod->data_segs);
+ }
+ if (iod->data_sgt.nents > 1)
+ sg_free_table(&iod->data_sgt);
+ mempool_free(iod, &iod->ctrl->iod_pool);
+}
+
+static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf;
+ struct nvmet_pci_epf_segment *seg = &iod->data_segs[0];
+ int i, ret;
+
+ /* Split the data transfer according to the PCI segments. */
+ for (i = 0; i < iod->nr_data_segs; i++, seg++) {
+ ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir);
+ if (ret) {
+ iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl,
+ u64 prp)
+{
+ return prp & ctrl->mps_mask;
+}
+
+static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl,
+ u64 prp)
+{
+ return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp);
+}
+
+/*
+ * Transfer a PRP list from the host and return the number of prps.
+ */
+static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp,
+ size_t xfer_len, __le64 *prps)
+{
+ size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift;
+ u32 length;
+ int ret;
+
+ /*
+ * Compute the number of PRPs required for the number of bytes to
+ * transfer (xfer_len). If this number overflows the memory page size
+ * with the PRP list pointer specified, only return the space available
+ * in the memory page, the last PRP in there will be a PRP list pointer
+ * to the remaining PRPs.
+ */
+ length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3);
+ ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ return length >> 3;
+}
+
+static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_iod *iod)
+{
+ struct nvme_command *cmd = &iod->cmd;
+ struct nvmet_pci_epf_segment *seg;
+ size_t size = 0, ofst, prp_size, xfer_len;
+ size_t transfer_len = iod->data_len;
+ int nr_segs, nr_prps = 0;
+ u64 pci_addr, prp;
+ int i = 0, ret;
+ __le64 *prps;
+
+ prps = kzalloc(ctrl->mps, GFP_KERNEL);
+ if (!prps)
+ goto err_internal;
+
+ /*
+ * Allocate PCI segments for the command: this considers the worst case
+ * scenario where all prps are discontiguous, so get as many segments
+ * as we can have prps. In practice, most of the time, we will have
+ * far less PCI segments than prps.
+ */
+ prp = le64_to_cpu(cmd->common.dptr.prp1);
+ if (!prp)
+ goto err_invalid_field;
+
+ ofst = nvmet_pci_epf_prp_ofst(ctrl, prp);
+ nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift;
+
+ ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
+ if (ret)
+ goto err_internal;
+
+ /* Set the first segment using prp1. */
+ seg = &iod->data_segs[0];
+ seg->pci_addr = prp;
+ seg->length = nvmet_pci_epf_prp_size(ctrl, prp);
+
+ size = seg->length;
+ pci_addr = prp + size;
+ nr_segs = 1;
+
+ /*
+ * Now build the PCI address segments using the PRP lists, starting
+ * from prp2.
+ */
+ prp = le64_to_cpu(cmd->common.dptr.prp2);
+ if (!prp)
+ goto err_invalid_field;
+
+ while (size < transfer_len) {
+ xfer_len = transfer_len - size;
+
+ if (!nr_prps) {
+ nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp,
+ xfer_len, prps);
+ if (nr_prps < 0)
+ goto err_internal;
+
+ i = 0;
+ ofst = 0;
+ }
+
+ /* Current entry */
+ prp = le64_to_cpu(prps[i]);
+ if (!prp)
+ goto err_invalid_field;
+
+ /* Did we reach the last PRP entry of the list? */
+ if (xfer_len > ctrl->mps && i == nr_prps - 1) {
+ /* We need more PRPs: PRP is a list pointer. */
+ nr_prps = 0;
+ continue;
+ }
+
+ /* Only the first PRP is allowed to have an offset. */
+ if (nvmet_pci_epf_prp_ofst(ctrl, prp))
+ goto err_invalid_offset;
+
+ if (prp != pci_addr) {
+ /* Discontiguous prp: new segment. */
+ nr_segs++;
+ if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs))
+ goto err_internal;
+
+ seg++;
+ seg->pci_addr = prp;
+ seg->length = 0;
+ pci_addr = prp;
+ }
+
+ prp_size = min_t(size_t, ctrl->mps, xfer_len);
+ seg->length += prp_size;
+ pci_addr += prp_size;
+ size += prp_size;
+
+ i++;
+ }
+
+ iod->nr_data_segs = nr_segs;
+ ret = 0;
+
+ if (size != transfer_len) {
+ dev_err(ctrl->dev,
+ "PRPs transfer length mismatch: got %zu B, need %zu B\n",
+ size, transfer_len);
+ goto err_internal;
+ }
+
+ kfree(prps);
+
+ return 0;
+
+err_invalid_offset:
+ dev_err(ctrl->dev, "PRPs list invalid offset\n");
+ iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
+ goto err;
+
+err_invalid_field:
+ dev_err(ctrl->dev, "PRPs list invalid field\n");
+ iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto err;
+
+err_internal:
+ dev_err(ctrl->dev, "PRPs list internal error\n");
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+err:
+ kfree(prps);
+ return -EINVAL;
+}
+
+static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_iod *iod)
+{
+ struct nvme_command *cmd = &iod->cmd;
+ size_t transfer_len = iod->data_len;
+ int ret, nr_segs = 1;
+ u64 prp1, prp2 = 0;
+ size_t prp1_size;
+
+ prp1 = le64_to_cpu(cmd->common.dptr.prp1);
+ prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1);
+
+ /* For commands crossing a page boundary, we should have prp2. */
+ if (transfer_len > prp1_size) {
+ prp2 = le64_to_cpu(cmd->common.dptr.prp2);
+ if (!prp2) {
+ iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+ if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) {
+ iod->status =
+ NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+ if (prp2 != prp1 + prp1_size)
+ nr_segs = 2;
+ }
+
+ if (nr_segs == 1) {
+ iod->nr_data_segs = 1;
+ iod->data_segs = &iod->data_seg;
+ iod->data_segs[0].pci_addr = prp1;
+ iod->data_segs[0].length = transfer_len;
+ return 0;
+ }
+
+ ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
+ if (ret) {
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ return ret;
+ }
+
+ iod->data_segs[0].pci_addr = prp1;
+ iod->data_segs[0].length = prp1_size;
+ iod->data_segs[1].pci_addr = prp2;
+ iod->data_segs[1].length = transfer_len - prp1_size;
+
+ return 0;
+}
+
+static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
+ u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1);
+ size_t ofst;
+
+ /* Get the PCI address segments for the command using its PRPs. */
+ ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1);
+ if (ofst & 0x3) {
+ iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+
+ if (iod->data_len + ofst <= ctrl->mps * 2)
+ return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod);
+
+ return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod);
+}
+
+/*
+ * Transfer an SGL segment from the host and return the number of data
+ * descriptors and the next segment descriptor, if any.
+ */
+static struct nvme_sgl_desc *
+nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvme_sgl_desc *desc, unsigned int *nr_sgls)
+{
+ struct nvme_sgl_desc *sgls;
+ u32 length = le32_to_cpu(desc->length);
+ int nr_descs, ret;
+ void *buf;
+
+ buf = kmalloc(length, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ kfree(buf);
+ return NULL;
+ }
+
+ sgls = buf;
+ nr_descs = length / sizeof(struct nvme_sgl_desc);
+ if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) ||
+ sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
+ /*
+ * We have another SGL segment following this one: do not count
+ * it as a regular data SGL descriptor and return it to the
+ * caller.
+ */
+ *desc = sgls[nr_descs - 1];
+ nr_descs--;
+ } else {
+ /* We do not have another SGL segment after this one. */
+ desc->length = 0;
+ }
+
+ *nr_sgls = nr_descs;
+
+ return sgls;
+}
+
+static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_iod *iod)
+{
+ struct nvme_command *cmd = &iod->cmd;
+ struct nvme_sgl_desc seg = cmd->common.dptr.sgl;
+ struct nvme_sgl_desc *sgls = NULL;
+ int n = 0, i, nr_sgls;
+ int ret;
+
+ /*
+ * We do not support inline data nor keyed SGLs, so we should be seeing
+ * only segment descriptors.
+ */
+ if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) &&
+ seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
+ iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
+ return -EIO;
+ }
+
+ while (seg.length) {
+ sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls);
+ if (!sgls) {
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ return -EIO;
+ }
+
+ /* Grow the PCI segment table as needed. */
+ ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls);
+ if (ret) {
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ /*
+ * Parse the SGL descriptors to build the PCI segment table,
+ * checking the descriptor type as we go.
+ */
+ for (i = 0; i < nr_sgls; i++) {
+ if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) {
+ iod->status = NVME_SC_SGL_INVALID_TYPE |
+ NVME_STATUS_DNR;
+ goto out;
+ }
+ iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr);
+ iod->data_segs[n].length = le32_to_cpu(sgls[i].length);
+ n++;
+ }
+
+ kfree(sgls);
+ }
+
+ out:
+ if (iod->status != NVME_SC_SUCCESS) {
+ kfree(sgls);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
+ struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl;
+
+ if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
+ /* Single data descriptor case. */
+ iod->nr_data_segs = 1;
+ iod->data_segs = &iod->data_seg;
+ iod->data_seg.pci_addr = le64_to_cpu(sgl->addr);
+ iod->data_seg.length = le32_to_cpu(sgl->length);
+ return 0;
+ }
+
+ return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod);
+}
+
+static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
+ struct nvmet_req *req = &iod->req;
+ struct nvmet_pci_epf_segment *seg;
+ struct scatterlist *sg;
+ int ret, i;
+
+ if (iod->data_len > ctrl->mdts) {
+ iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+
+ /*
+ * Get the PCI address segments for the command data buffer using either
+ * its SGLs or PRPs.
+ */
+ if (iod->cmd.common.flags & NVME_CMD_SGL_ALL)
+ ret = nvmet_pci_epf_iod_parse_sgls(iod);
+ else
+ ret = nvmet_pci_epf_iod_parse_prps(iod);
+ if (ret)
+ return ret;
+
+ /* Get a command buffer using SGLs matching the PCI segments. */
+ if (iod->nr_data_segs == 1) {
+ sg_init_table(&iod->data_sgl, 1);
+ iod->data_sgt.sgl = &iod->data_sgl;
+ iod->data_sgt.nents = 1;
+ iod->data_sgt.orig_nents = 1;
+ } else {
+ ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs,
+ GFP_KERNEL);
+ if (ret)
+ goto err_nomem;
+ }
+
+ for_each_sgtable_sg(&iod->data_sgt, sg, i) {
+ seg = &iod->data_segs[i];
+ seg->buf = kmalloc(seg->length, GFP_KERNEL);
+ if (!seg->buf)
+ goto err_nomem;
+ sg_set_buf(sg, seg->buf, seg->length);
+ }
+
+ req->transfer_len = iod->data_len;
+ req->sg = iod->data_sgt.sgl;
+ req->sg_cnt = iod->data_sgt.nents;
+
+ return 0;
+
+err_nomem:
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ return -ENOMEM;
+}
+
+static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_queue *cq = iod->cq;
+ unsigned long flags;
+
+ /* Print an error message for failed commands, except AENs. */
+ iod->status = le16_to_cpu(iod->cqe.status) >> 1;
+ if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event)
+ dev_err(iod->ctrl->dev,
+ "CQ[%d]: Command %s (0x%x) status 0x%0x\n",
+ iod->sq->qid, nvmet_pci_epf_iod_name(iod),
+ iod->cmd.common.opcode, iod->status);
+
+ /*
+ * Add the command to the list of completed commands and schedule the
+ * CQ work.
+ */
+ spin_lock_irqsave(&cq->lock, flags);
+ list_add_tail(&iod->link, &cq->list);
+ queue_delayed_work(system_highpri_wq, &cq->work, 0);
+ spin_unlock_irqrestore(&cq->lock, flags);
+}
+
+static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue)
+{
+ struct nvmet_pci_epf_iod *iod;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ while (!list_empty(&queue->list)) {
+ iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod,
+ link);
+ list_del_init(&iod->link);
+ nvmet_pci_epf_free_iod(iod);
+ }
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+static int nvmet_pci_epf_add_port(struct nvmet_port *port)
+{
+ mutex_lock(&nvmet_pci_epf_ports_mutex);
+ list_add_tail(&port->entry, &nvmet_pci_epf_ports);
+ mutex_unlock(&nvmet_pci_epf_ports_mutex);
+ return 0;
+}
+
+static void nvmet_pci_epf_remove_port(struct nvmet_port *port)
+{
+ mutex_lock(&nvmet_pci_epf_ports_mutex);
+ list_del_init(&port->entry);
+ mutex_unlock(&nvmet_pci_epf_ports_mutex);
+}
+
+static struct nvmet_port *
+nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid)
+{
+ struct nvmet_port *p, *port = NULL;
+
+ mutex_lock(&nvmet_pci_epf_ports_mutex);
+ list_for_each_entry(p, &nvmet_pci_epf_ports, entry) {
+ if (p->disc_addr.portid == portid) {
+ port = p;
+ break;
+ }
+ }
+ mutex_unlock(&nvmet_pci_epf_ports_mutex);
+
+ return port;
+}
+
+static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
+{
+ struct nvmet_pci_epf_iod *iod =
+ container_of(req, struct nvmet_pci_epf_iod, req);
+
+ iod->status = le16_to_cpu(req->cqe->status) >> 1;
+
+ /* If we have no data to transfer, directly complete the command. */
+ if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
+ nvmet_pci_epf_complete_iod(iod);
+ return;
+ }
+
+ complete(&iod->done);
+}
+
+static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12;
+
+ return ilog2(ctrl->mdts) - page_shift;
+}
+
+static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
+ u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
+ u16 status;
+
+ if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ if (!(flags & NVME_QUEUE_PHYS_CONTIG))
+ return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
+
+ cq->pci_addr = pci_addr;
+ cq->qid = cqid;
+ cq->depth = qsize + 1;
+ cq->vector = vector;
+ cq->head = 0;
+ cq->tail = 0;
+ cq->phase = 1;
+ cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32));
+ nvmet_pci_epf_bar_write32(ctrl, cq->db, 0);
+
+ if (!cqid)
+ cq->qes = sizeof(struct nvme_completion);
+ else
+ cq->qes = ctrl->io_cqes;
+ cq->pci_size = cq->qes * cq->depth;
+
+ if (flags & NVME_CQ_IRQ_ENABLED) {
+ cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
+ if (!cq->iv)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
+ }
+
+ status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
+ if (status != NVME_SC_SUCCESS)
+ goto err;
+
+ set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
+
+ dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
+ cqid, qsize, cq->qes, cq->vector);
+
+ return NVME_SC_SUCCESS;
+
+err:
+ if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
+ return status;
+}
+
+static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
+
+ if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ cancel_delayed_work_sync(&cq->work);
+ nvmet_pci_epf_drain_queue(cq);
+ nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
+ u16 sqid, u16 flags, u16 qsize, u64 pci_addr)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
+ u16 status;
+
+ if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ if (!(flags & NVME_QUEUE_PHYS_CONTIG))
+ return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
+
+ sq->pci_addr = pci_addr;
+ sq->qid = sqid;
+ sq->depth = qsize + 1;
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 0;
+ sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32));
+ nvmet_pci_epf_bar_write32(ctrl, sq->db, 0);
+ if (!sqid)
+ sq->qes = 1UL << NVME_ADM_SQES;
+ else
+ sq->qes = ctrl->io_sqes;
+ sq->pci_size = sq->qes * sq->depth;
+
+ status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
+ min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
+ if (!sq->iod_wq) {
+ dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid);
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto out_destroy_sq;
+ }
+
+ set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
+
+ dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
+ sqid, qsize, sq->qes);
+
+ return NVME_SC_SUCCESS;
+
+out_destroy_sq:
+ nvmet_sq_destroy(&sq->nvme_sq);
+ return status;
+}
+
+static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
+
+ if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ flush_workqueue(sq->iod_wq);
+ destroy_workqueue(sq->iod_wq);
+ sq->iod_wq = NULL;
+
+ nvmet_pci_epf_drain_queue(sq);
+
+ if (sq->nvme_sq.ctrl)
+ nvmet_sq_destroy(&sq->nvme_sq);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl,
+ u8 feat, void *data)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_feat_arbitration *arb;
+ struct nvmet_feat_irq_coalesce *irqc;
+ struct nvmet_feat_irq_config *irqcfg;
+ struct nvmet_pci_epf_irq_vector *iv;
+ u16 status;
+
+ switch (feat) {
+ case NVME_FEAT_ARBITRATION:
+ arb = data;
+ if (!ctrl->sq_ab)
+ arb->ab = 0x7;
+ else
+ arb->ab = ilog2(ctrl->sq_ab);
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_COALESCE:
+ irqc = data;
+ irqc->thr = ctrl->irq_vector_threshold;
+ irqc->time = 0;
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_CONFIG:
+ irqcfg = data;
+ mutex_lock(&ctrl->irq_lock);
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
+ if (iv) {
+ irqcfg->cd = iv->cd;
+ status = NVME_SC_SUCCESS;
+ } else {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+ mutex_unlock(&ctrl->irq_lock);
+ return status;
+
+ default:
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+}
+
+static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl,
+ u8 feat, void *data)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_feat_arbitration *arb;
+ struct nvmet_feat_irq_coalesce *irqc;
+ struct nvmet_feat_irq_config *irqcfg;
+ struct nvmet_pci_epf_irq_vector *iv;
+ u16 status;
+
+ switch (feat) {
+ case NVME_FEAT_ARBITRATION:
+ arb = data;
+ if (arb->ab == 0x7)
+ ctrl->sq_ab = 0;
+ else
+ ctrl->sq_ab = 1 << arb->ab;
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_COALESCE:
+ /*
+ * Since we do not implement precise IRQ coalescing timing,
+ * ignore the time field.
+ */
+ irqc = data;
+ ctrl->irq_vector_threshold = irqc->thr + 1;
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_CONFIG:
+ irqcfg = data;
+ mutex_lock(&ctrl->irq_lock);
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
+ if (iv) {
+ iv->cd = irqcfg->cd;
+ status = NVME_SC_SUCCESS;
+ } else {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+ mutex_unlock(&ctrl->irq_lock);
+ return status;
+
+ default:
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+}
+
+static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_PCI,
+ .add_port = nvmet_pci_epf_add_port,
+ .remove_port = nvmet_pci_epf_remove_port,
+ .queue_response = nvmet_pci_epf_queue_response,
+ .get_mdts = nvmet_pci_epf_get_mdts,
+ .create_cq = nvmet_pci_epf_create_cq,
+ .delete_cq = nvmet_pci_epf_delete_cq,
+ .create_sq = nvmet_pci_epf_create_sq,
+ .delete_sq = nvmet_pci_epf_delete_sq,
+ .get_feature = nvmet_pci_epf_get_feat,
+ .set_feature = nvmet_pci_epf_set_feat,
+};
+
+static void nvmet_pci_epf_cq_work(struct work_struct *work);
+
+static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl,
+ unsigned int qid, bool sq)
+{
+ struct nvmet_pci_epf_queue *queue;
+
+ if (sq) {
+ queue = &ctrl->sq[qid];
+ set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags);
+ } else {
+ queue = &ctrl->cq[qid];
+ INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);
+ }
+ queue->ctrl = ctrl;
+ queue->qid = qid;
+ spin_lock_init(&queue->lock);
+ INIT_LIST_HEAD(&queue->list);
+}
+
+static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ unsigned int qid;
+
+ ctrl->sq = kcalloc(ctrl->nr_queues,
+ sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
+ if (!ctrl->sq)
+ return -ENOMEM;
+
+ ctrl->cq = kcalloc(ctrl->nr_queues,
+ sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
+ if (!ctrl->cq) {
+ kfree(ctrl->sq);
+ ctrl->sq = NULL;
+ return -ENOMEM;
+ }
+
+ for (qid = 0; qid < ctrl->nr_queues; qid++) {
+ nvmet_pci_epf_init_queue(ctrl, qid, true);
+ nvmet_pci_epf_init_queue(ctrl, qid, false);
+ }
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ kfree(ctrl->sq);
+ ctrl->sq = NULL;
+ kfree(ctrl->cq);
+ ctrl->cq = NULL;
+}
+
+static int nvmet_pci_epf_map_queue(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *queue)
+{
+ struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
+ int ret;
+
+ ret = nvmet_pci_epf_mem_map(nvme_epf, queue->pci_addr,
+ queue->pci_size, &queue->pci_map);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to map queue %u (err=%d)\n",
+ queue->qid, ret);
+ return ret;
+ }
+
+ if (queue->pci_map.pci_size < queue->pci_size) {
+ dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
+ queue->qid);
+ nvmet_pci_epf_mem_unmap(nvme_epf, &queue->pci_map);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline void nvmet_pci_epf_unmap_queue(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *queue)
+{
+ nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &queue->pci_map);
+}
+
+static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_iod *iod =
+ container_of(work, struct nvmet_pci_epf_iod, work);
+ struct nvmet_req *req = &iod->req;
+ int ret;
+
+ if (!iod->ctrl->link_up) {
+ nvmet_pci_epf_free_iod(iod);
+ return;
+ }
+
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) {
+ iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq,
+ &nvmet_pci_epf_fabrics_ops))
+ goto complete;
+
+ iod->data_len = nvmet_req_transfer_len(req);
+ if (iod->data_len) {
+ /*
+ * Get the data DMA transfer direction. Here "device" means the
+ * PCI root-complex host.
+ */
+ if (nvme_is_write(&iod->cmd))
+ iod->dma_dir = DMA_FROM_DEVICE;
+ else
+ iod->dma_dir = DMA_TO_DEVICE;
+
+ /*
+ * Setup the command data buffer and get the command data from
+ * the host if needed.
+ */
+ ret = nvmet_pci_epf_alloc_iod_data_buf(iod);
+ if (!ret && iod->dma_dir == DMA_FROM_DEVICE)
+ ret = nvmet_pci_epf_transfer_iod_data(iod);
+ if (ret) {
+ nvmet_req_uninit(req);
+ goto complete;
+ }
+ }
+
+ req->execute(req);
+
+ /*
+ * If we do not have data to transfer after the command execution
+ * finishes, nvmet_pci_epf_queue_response() will complete the command
+ * directly. No need to wait for the completion in this case.
+ */
+ if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE)
+ return;
+
+ wait_for_completion(&iod->done);
+
+ if (iod->status == NVME_SC_SUCCESS) {
+ WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
+ nvmet_pci_epf_transfer_iod_data(iod);
+ }
+
+complete:
+ nvmet_pci_epf_complete_iod(iod);
+}
+
+static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *sq)
+{
+ struct nvmet_pci_epf_iod *iod;
+ int ret, n = 0;
+
+ sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
+ while (sq->head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
+ iod = nvmet_pci_epf_alloc_iod(sq);
+ if (!iod)
+ break;
+
+ /* Get the NVMe command submitted by the host. */
+ ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd,
+ sq->pci_addr + sq->head * sq->qes,
+ sq->qes, DMA_FROM_DEVICE);
+ if (ret) {
+ /* Not much we can do... */
+ nvmet_pci_epf_free_iod(iod);
+ break;
+ }
+
+ dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n",
+ sq->qid, sq->head, sq->tail,
+ nvmet_pci_epf_iod_name(iod));
+
+ sq->head++;
+ if (sq->head == sq->depth)
+ sq->head = 0;
+ n++;
+
+ queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work);
+
+ sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
+ }
+
+ return n;
+}
+
+static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_ctrl *ctrl =
+ container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
+ struct nvmet_pci_epf_queue *sq;
+ unsigned long limit = jiffies;
+ unsigned long last = 0;
+ int i, nr_sqs;
+
+ while (ctrl->link_up && ctrl->enabled) {
+ nr_sqs = 0;
+ /* Do round-robin arbitration. */
+ for (i = 0; i < ctrl->nr_queues; i++) {
+ sq = &ctrl->sq[i];
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ continue;
+ if (nvmet_pci_epf_process_sq(ctrl, sq))
+ nr_sqs++;
+ }
+
+ /*
+ * If we have been running for a while, reschedule to let other
+ * tasks run and to avoid RCU stalls.
+ */
+ if (time_is_before_jiffies(limit + secs_to_jiffies(1))) {
+ cond_resched();
+ limit = jiffies;
+ continue;
+ }
+
+ if (nr_sqs) {
+ last = jiffies;
+ continue;
+ }
+
+ /*
+ * If we have not received any command on any queue for more
+ * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and
+ * reschedule. This avoids "burning" a CPU when the controller
+ * is idle for a long time.
+ */
+ if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE))
+ break;
+
+ cpu_relax();
+ }
+
+ schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL);
+}
+
+static void nvmet_pci_epf_cq_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_queue *cq =
+ container_of(work, struct nvmet_pci_epf_queue, work.work);
+ struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl;
+ struct nvme_completion *cqe;
+ struct nvmet_pci_epf_iod *iod;
+ unsigned long flags;
+ int ret, n = 0;
+
+ ret = nvmet_pci_epf_map_queue(ctrl, cq);
+ if (ret)
+ goto again;
+
+ while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
+
+ /* Check that the CQ is not full. */
+ cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db);
+ if (cq->head == cq->tail + 1) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ spin_lock_irqsave(&cq->lock, flags);
+ iod = list_first_entry_or_null(&cq->list,
+ struct nvmet_pci_epf_iod, link);
+ if (iod)
+ list_del_init(&iod->link);
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ if (!iod)
+ break;
+
+ /* Post the IOD completion entry. */
+ cqe = &iod->cqe;
+ cqe->status = cpu_to_le16((iod->status << 1) | cq->phase);
+
+ dev_dbg(ctrl->dev,
+ "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n",
+ cq->qid, nvmet_pci_epf_iod_name(iod), iod->status,
+ le64_to_cpu(cqe->result.u64), cq->head, cq->tail,
+ cq->phase);
+
+ memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes,
+ cqe, cq->qes);
+
+ cq->tail++;
+ if (cq->tail >= cq->depth) {
+ cq->tail = 0;
+ cq->phase ^= 1;
+ }
+
+ nvmet_pci_epf_free_iod(iod);
+
+ /* Signal the host. */
+ nvmet_pci_epf_raise_irq(ctrl, cq, false);
+ n++;
+ }
+
+ nvmet_pci_epf_unmap_queue(ctrl, cq);
+
+ /*
+ * We do not support precise IRQ coalescing time (100ns units as per
+ * NVMe specifications). So if we have posted completion entries without
+ * reaching the interrupt coalescing threshold, raise an interrupt.
+ */
+ if (n)
+ nvmet_pci_epf_raise_irq(ctrl, cq, true);
+
+again:
+ if (ret < 0)
+ queue_delayed_work(system_highpri_wq, &cq->work,
+ NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
+}
+
+static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ u64 pci_addr, asq, acq;
+ u32 aqa;
+ u16 status, qsize;
+
+ if (ctrl->enabled)
+ return 0;
+
+ dev_info(ctrl->dev, "Enabling controller\n");
+
+ ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12;
+ ctrl->mps = 1UL << ctrl->mps_shift;
+ ctrl->mps_mask = ctrl->mps - 1;
+
+ ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc);
+ if (ctrl->io_sqes < sizeof(struct nvme_command)) {
+ dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n",
+ ctrl->io_sqes, sizeof(struct nvme_command));
+ goto err;
+ }
+
+ ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
+ if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
+ dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
+ ctrl->io_sqes, sizeof(struct nvme_completion));
+ goto err;
+ }
+
+ /* Create the admin queue. */
+ aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA);
+ asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ);
+ acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ);
+
+ qsize = (aqa & 0x0fff0000) >> 16;
+ pci_addr = acq & GENMASK_ULL(63, 12);
+ status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0,
+ NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG,
+ qsize, pci_addr, 0);
+ if (status != NVME_SC_SUCCESS) {
+ dev_err(ctrl->dev, "Failed to create admin completion queue\n");
+ goto err;
+ }
+
+ qsize = aqa & 0x00000fff;
+ pci_addr = asq & GENMASK_ULL(63, 12);
+ status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG,
+ qsize, pci_addr);
+ if (status != NVME_SC_SUCCESS) {
+ dev_err(ctrl->dev, "Failed to create admin submission queue\n");
+ nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
+ goto err;
+ }
+
+ ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB;
+ ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD;
+ ctrl->enabled = true;
+ ctrl->csts = NVME_CSTS_RDY;
+
+ /* Start polling the controller SQs. */
+ schedule_delayed_work(&ctrl->poll_sqs, 0);
+
+ return 0;
+
+err:
+ ctrl->csts = 0;
+ return -EINVAL;
+}
+
+static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ int qid;
+
+ if (!ctrl->enabled)
+ return;
+
+ dev_info(ctrl->dev, "Disabling controller\n");
+
+ ctrl->enabled = false;
+ cancel_delayed_work_sync(&ctrl->poll_sqs);
+
+ /* Delete all I/O queues first. */
+ for (qid = 1; qid < ctrl->nr_queues; qid++)
+ nvmet_pci_epf_delete_sq(ctrl->tctrl, qid);
+
+ for (qid = 1; qid < ctrl->nr_queues; qid++)
+ nvmet_pci_epf_delete_cq(ctrl->tctrl, qid);
+
+ /* Delete the admin queue last. */
+ nvmet_pci_epf_delete_sq(ctrl->tctrl, 0);
+ nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
+
+ ctrl->csts &= ~NVME_CSTS_RDY;
+}
+
+static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_ctrl *ctrl =
+ container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work);
+ u32 old_cc, new_cc;
+ int ret;
+
+ if (!ctrl->tctrl)
+ return;
+
+ old_cc = ctrl->cc;
+ new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC);
+ if (new_cc == old_cc)
+ goto reschedule_work;
+
+ ctrl->cc = new_cc;
+
+ if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) {
+ ret = nvmet_pci_epf_enable_ctrl(ctrl);
+ if (ret)
+ goto reschedule_work;
+ }
+
+ if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc))
+ nvmet_pci_epf_disable_ctrl(ctrl);
+
+ if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) {
+ nvmet_pci_epf_disable_ctrl(ctrl);
+ ctrl->csts |= NVME_CSTS_SHST_CMPLT;
+ }
+
+ if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc))
+ ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
+
+ nvmet_update_cc(ctrl->tctrl, ctrl->cc);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
+
+reschedule_work:
+ schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
+}
+
+static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ struct nvmet_ctrl *tctrl = ctrl->tctrl;
+
+ ctrl->bar = ctrl->nvme_epf->reg_bar;
+
+ /* Copy the target controller capabilities as a base. */
+ ctrl->cap = tctrl->cap;
+
+ /* Contiguous Queues Required (CQR). */
+ ctrl->cap |= 0x1ULL << 16;
+
+ /* Set Doorbell stride to 4B (DSTRB). */
+ ctrl->cap &= ~GENMASK_ULL(35, 32);
+
+ /* Clear NVM Subsystem Reset Supported (NSSRS). */
+ ctrl->cap &= ~(0x1ULL << 36);
+
+ /* Clear Boot Partition Support (BPS). */
+ ctrl->cap &= ~(0x1ULL << 45);
+
+ /* Clear Persistent Memory Region Supported (PMRS). */
+ ctrl->cap &= ~(0x1ULL << 56);
+
+ /* Clear Controller Memory Buffer Supported (CMBS). */
+ ctrl->cap &= ~(0x1ULL << 57);
+
+ /* Controller configuration. */
+ ctrl->cc = tctrl->cc & (~NVME_CC_ENABLE);
+
+ /* Controller status. */
+ ctrl->csts = ctrl->tctrl->csts;
+
+ nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
+}
+
+static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf,
+ unsigned int max_nr_queues)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+ struct nvmet_alloc_ctrl_args args = {};
+ char hostnqn[NVMF_NQN_SIZE];
+ uuid_t id;
+ int ret;
+
+ memset(ctrl, 0, sizeof(*ctrl));
+ ctrl->dev = &nvme_epf->epf->dev;
+ mutex_init(&ctrl->irq_lock);
+ ctrl->nvme_epf = nvme_epf;
+ ctrl->mdts = nvme_epf->mdts_kb * SZ_1K;
+ INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work);
+ INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work);
+
+ ret = mempool_init_kmalloc_pool(&ctrl->iod_pool,
+ max_nr_queues * NVMET_MAX_QUEUE_SIZE,
+ sizeof(struct nvmet_pci_epf_iod));
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to initialize IOD mempool\n");
+ return ret;
+ }
+
+ ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid);
+ if (!ctrl->port) {
+ dev_err(ctrl->dev, "Port not found\n");
+ ret = -EINVAL;
+ goto out_mempool_exit;
+ }
+
+ /* Create the target controller. */
+ uuid_gen(&id);
+ snprintf(hostnqn, NVMF_NQN_SIZE,
+ "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
+ args.port = ctrl->port;
+ args.subsysnqn = nvme_epf->subsysnqn;
+ memset(&id, 0, sizeof(uuid_t));
+ args.hostid = &id;
+ args.hostnqn = hostnqn;
+ args.ops = &nvmet_pci_epf_fabrics_ops;
+
+ ctrl->tctrl = nvmet_alloc_ctrl(&args);
+ if (!ctrl->tctrl) {
+ dev_err(ctrl->dev, "Failed to create target controller\n");
+ ret = -ENOMEM;
+ goto out_mempool_exit;
+ }
+ ctrl->tctrl->drvdata = ctrl;
+
+ /* We do not support protection information for now. */
+ if (ctrl->tctrl->pi_support) {
+ dev_err(ctrl->dev,
+ "Protection information (PI) is not supported\n");
+ ret = -ENOTSUPP;
+ goto out_put_ctrl;
+ }
+
+ /* Allocate our queues, up to the maximum number. */
+ ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues);
+ ret = nvmet_pci_epf_alloc_queues(ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
+ /*
+ * Allocate the IRQ vectors descriptors. We cannot have more than the
+ * maximum number of queues.
+ */
+ ret = nvmet_pci_epf_alloc_irq_vectors(ctrl);
+ if (ret)
+ goto out_free_queues;
+
+ dev_info(ctrl->dev,
+ "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n",
+ ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1,
+ ctrl->mdts);
+
+ /* Initialize BAR 0 using the target controller CAP. */
+ nvmet_pci_epf_init_bar(ctrl);
+
+ return 0;
+
+out_free_queues:
+ nvmet_pci_epf_free_queues(ctrl);
+out_put_ctrl:
+ nvmet_ctrl_put(ctrl->tctrl);
+ ctrl->tctrl = NULL;
+out_mempool_exit:
+ mempool_exit(&ctrl->iod_pool);
+ return ret;
+}
+
+static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
+}
+
+static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ cancel_delayed_work_sync(&ctrl->poll_cc);
+
+ nvmet_pci_epf_disable_ctrl(ctrl);
+}
+
+static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ if (!ctrl->tctrl)
+ return;
+
+ dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n",
+ ctrl->tctrl->subsys->subsysnqn);
+
+ nvmet_pci_epf_stop_ctrl(ctrl);
+
+ nvmet_pci_epf_free_queues(ctrl);
+ nvmet_pci_epf_free_irq_vectors(ctrl);
+
+ nvmet_ctrl_put(ctrl->tctrl);
+ ctrl->tctrl = NULL;
+
+ mempool_exit(&ctrl->iod_pool);
+}
+
+static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+ const struct pci_epc_features *epc_features = nvme_epf->epc_features;
+ size_t reg_size, reg_bar_size;
+ size_t msix_table_size = 0;
+
+ /*
+ * The first free BAR will be our register BAR and per NVMe
+ * specifications, it must be BAR 0.
+ */
+ if (pci_epc_get_first_free_bar(epc_features) != BAR_0) {
+ dev_err(&epf->dev, "BAR 0 is not free\n");
+ return -ENODEV;
+ }
+
+ if (epc_features->bar[BAR_0].only_64bit)
+ epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+ /*
+ * Calculate the size of the register bar: NVMe registers first with
+ * enough space for the doorbells, followed by the MSI-X table
+ * if supported.
+ */
+ reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32));
+ reg_size = ALIGN(reg_size, 8);
+
+ if (epc_features->msix_capable) {
+ size_t pba_size;
+
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
+ nvme_epf->msix_table_offset = reg_size;
+ pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
+
+ reg_size += msix_table_size + pba_size;
+ }
+
+ if (epc_features->bar[BAR_0].type == BAR_FIXED) {
+ if (reg_size > epc_features->bar[BAR_0].fixed_size) {
+ dev_err(&epf->dev,
+ "BAR 0 size %llu B too small, need %zu B\n",
+ epc_features->bar[BAR_0].fixed_size,
+ reg_size);
+ return -ENOMEM;
+ }
+ reg_bar_size = epc_features->bar[BAR_0].fixed_size;
+ } else {
+ reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096));
+ }
+
+ nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0,
+ epc_features, PRIMARY_INTERFACE);
+ if (!nvme_epf->reg_bar) {
+ dev_err(&epf->dev, "Failed to allocate BAR 0\n");
+ return -ENOMEM;
+ }
+ memset(nvme_epf->reg_bar, 0, reg_bar_size);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ if (!nvme_epf->reg_bar)
+ return;
+
+ pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE);
+ nvme_epf->reg_bar = NULL;
+}
+
+static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[BAR_0]);
+}
+
+static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf)
+{
+ const struct pci_epc_features *epc_features = nvme_epf->epc_features;
+ struct pci_epf *epf = nvme_epf->epf;
+ int ret;
+
+ /* Enable MSI-X if supported, otherwise, use MSI. */
+ if (epc_features->msix_capable && epf->msix_interrupts) {
+ ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no,
+ epf->msix_interrupts, BAR_0,
+ nvme_epf->msix_table_offset);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to configure MSI-X\n");
+ return ret;
+ }
+
+ nvme_epf->nr_vectors = epf->msix_interrupts;
+ nvme_epf->irq_type = PCI_IRQ_MSIX;
+
+ return 0;
+ }
+
+ if (epc_features->msi_capable && epf->msi_interrupts) {
+ ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no,
+ epf->msi_interrupts);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to configure MSI\n");
+ return ret;
+ }
+
+ nvme_epf->nr_vectors = epf->msi_interrupts;
+ nvme_epf->irq_type = PCI_IRQ_MSI;
+
+ return 0;
+ }
+
+ /* MSI and MSI-X are not supported: fall back to INTx. */
+ nvme_epf->nr_vectors = 1;
+ nvme_epf->irq_type = PCI_IRQ_INTX;
+
+ return 0;
+}
+
+static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ const struct pci_epc_features *epc_features = nvme_epf->epc_features;
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+ unsigned int max_nr_queues = NVMET_NR_QUEUES;
+ int ret;
+
+ /* For now, do not support virtual functions. */
+ if (epf->vfunc_no > 0) {
+ dev_err(&epf->dev, "Virtual functions are not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Cap the maximum number of queues we can support on the controller
+ * with the number of IRQs we can use.
+ */
+ if (epc_features->msix_capable && epf->msix_interrupts) {
+ dev_info(&epf->dev,
+ "PCI endpoint controller supports MSI-X, %u vectors\n",
+ epf->msix_interrupts);
+ max_nr_queues = min(max_nr_queues, epf->msix_interrupts);
+ } else if (epc_features->msi_capable && epf->msi_interrupts) {
+ dev_info(&epf->dev,
+ "PCI endpoint controller supports MSI, %u vectors\n",
+ epf->msi_interrupts);
+ max_nr_queues = min(max_nr_queues, epf->msi_interrupts);
+ }
+
+ if (max_nr_queues < 2) {
+ dev_err(&epf->dev, "Invalid maximum number of queues %u\n",
+ max_nr_queues);
+ return -EINVAL;
+ }
+
+ /* Create the target controller. */
+ ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues);
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to create NVMe PCI target controller (err=%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Set device ID, class, etc. */
+ epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
+ epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
+ ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no,
+ epf->header);
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to write configuration header (err=%d)\n", ret);
+ goto out_destroy_ctrl;
+ }
+
+ ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[BAR_0]);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret);
+ goto out_destroy_ctrl;
+ }
+
+ /*
+ * Enable interrupts and start polling the controller BAR if we do not
+ * have a link up notifier.
+ */
+ ret = nvmet_pci_epf_init_irq(nvme_epf);
+ if (ret)
+ goto out_clear_bar;
+
+ if (!epc_features->linkup_notifier) {
+ ctrl->link_up = true;
+ nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
+ }
+
+ return 0;
+
+out_clear_bar:
+ nvmet_pci_epf_clear_bar(nvme_epf);
+out_destroy_ctrl:
+ nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
+ return ret;
+}
+
+static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+
+ ctrl->link_up = false;
+ nvmet_pci_epf_destroy_ctrl(ctrl);
+
+ nvmet_pci_epf_deinit_dma(nvme_epf);
+ nvmet_pci_epf_clear_bar(nvme_epf);
+}
+
+static int nvmet_pci_epf_link_up(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+
+ ctrl->link_up = true;
+ nvmet_pci_epf_start_ctrl(ctrl);
+
+ return 0;
+}
+
+static int nvmet_pci_epf_link_down(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+
+ ctrl->link_up = false;
+ nvmet_pci_epf_stop_ctrl(ctrl);
+
+ return 0;
+}
+
+static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = {
+ .epc_init = nvmet_pci_epf_epc_init,
+ .epc_deinit = nvmet_pci_epf_epc_deinit,
+ .link_up = nvmet_pci_epf_link_up,
+ .link_down = nvmet_pci_epf_link_down,
+};
+
+static int nvmet_pci_epf_bind(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ const struct pci_epc_features *epc_features;
+ struct pci_epc *epc = epf->epc;
+ int ret;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
+ if (!epc_features) {
+ dev_err(&epf->dev, "epc_features not implemented\n");
+ return -EOPNOTSUPP;
+ }
+ nvme_epf->epc_features = epc_features;
+
+ ret = nvmet_pci_epf_configure_bar(nvme_epf);
+ if (ret)
+ return ret;
+
+ nvmet_pci_epf_init_dma(nvme_epf);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_unbind(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+
+ nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
+
+ if (epc->init_complete) {
+ nvmet_pci_epf_deinit_dma(nvme_epf);
+ nvmet_pci_epf_clear_bar(nvme_epf);
+ }
+
+ nvmet_pci_epf_free_bar(nvme_epf);
+}
+
+static struct pci_epf_header nvme_epf_pci_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .progif_code = 0x02, /* NVM Express */
+ .baseclass_code = PCI_BASE_CLASS_STORAGE,
+ .subclass_code = 0x08, /* Non-Volatile Memory controller */
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static int nvmet_pci_epf_probe(struct pci_epf *epf,
+ const struct pci_epf_device_id *id)
+{
+ struct nvmet_pci_epf *nvme_epf;
+ int ret;
+
+ nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL);
+ if (!nvme_epf)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock);
+ if (ret)
+ return ret;
+
+ nvme_epf->epf = epf;
+ nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB;
+
+ epf->event_ops = &nvmet_pci_epf_event_ops;
+ epf->header = &nvme_epf_pci_header;
+ epf_set_drvdata(epf, nvme_epf);
+
+ return 0;
+}
+
+#define to_nvme_epf(epf_group) \
+ container_of(epf_group, struct nvmet_pci_epf, group)
+
+static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid));
+}
+
+static ssize_t nvmet_pci_epf_portid_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+ u16 portid;
+
+ /* Do not allow setting this when the function is already started. */
+ if (nvme_epf->ctrl.tctrl)
+ return -EBUSY;
+
+ if (!len)
+ return -EINVAL;
+
+ if (kstrtou16(page, 0, &portid))
+ return -EINVAL;
+
+ nvme_epf->portid = cpu_to_le16(portid);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_pci_epf_, portid);
+
+static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn);
+}
+
+static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ /* Do not allow setting this when the function is already started. */
+ if (nvme_epf->ctrl.tctrl)
+ return -EBUSY;
+
+ if (!len)
+ return -EINVAL;
+
+ strscpy(nvme_epf->subsysnqn, page, len);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn);
+
+static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb);
+}
+
+static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+ unsigned long mdts_kb;
+ int ret;
+
+ if (nvme_epf->ctrl.tctrl)
+ return -EBUSY;
+
+ ret = kstrtoul(page, 0, &mdts_kb);
+ if (ret)
+ return ret;
+ if (!mdts_kb)
+ mdts_kb = NVMET_PCI_EPF_MDTS_KB;
+ else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB)
+ mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB;
+
+ if (!is_power_of_2(mdts_kb))
+ return -EINVAL;
+
+ nvme_epf->mdts_kb = mdts_kb;
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb);
+
+static struct configfs_attribute *nvmet_pci_epf_attrs[] = {
+ &nvmet_pci_epf_attr_portid,
+ &nvmet_pci_epf_attr_subsysnqn,
+ &nvmet_pci_epf_attr_mdts_kb,
+ NULL,
+};
+
+static const struct config_item_type nvmet_pci_epf_group_type = {
+ .ct_attrs = nvmet_pci_epf_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+
+ config_group_init_type_name(&nvme_epf->group, "nvme",
+ &nvmet_pci_epf_group_type);
+
+ return &nvme_epf->group;
+}
+
+static const struct pci_epf_device_id nvmet_pci_epf_ids[] = {
+ { .name = "nvmet_pci_epf" },
+ {},
+};
+
+static struct pci_epf_ops nvmet_pci_epf_ops = {
+ .bind = nvmet_pci_epf_bind,
+ .unbind = nvmet_pci_epf_unbind,
+ .add_cfs = nvmet_pci_epf_add_cfs,
+};
+
+static struct pci_epf_driver nvmet_pci_epf_driver = {
+ .driver.name = "nvmet_pci_epf",
+ .probe = nvmet_pci_epf_probe,
+ .id_table = nvmet_pci_epf_ids,
+ .ops = &nvmet_pci_epf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init nvmet_pci_epf_init_module(void)
+{
+ int ret;
+
+ ret = pci_epf_register_driver(&nvmet_pci_epf_driver);
+ if (ret)
+ return ret;
+
+ ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops);
+ if (ret) {
+ pci_epf_unregister_driver(&nvmet_pci_epf_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit nvmet_pci_epf_cleanup_module(void)
+{
+ nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops);
+ pci_epf_unregister_driver(&nvmet_pci_epf_driver);
+}
+
+module_init(nvmet_pci_epf_init_module);
+module_exit(nvmet_pci_epf_cleanup_module);
+
+MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver");
+MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1afd93026f9b..2a4536ef6184 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -996,6 +996,27 @@ out_err:
nvmet_req_complete(&cmd->req, status);
}
+static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue,
+ struct nvmet_rdma_rsp *rsp)
+{
+ unsigned long flags;
+ bool ret = true;
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ /*
+ * recheck queue state is not live to prevent a race condition
+ * with RDMA_CM_EVENT_ESTABLISHED handler.
+ */
+ if (queue->state == NVMET_RDMA_Q_LIVE)
+ ret = false;
+ else if (queue->state == NVMET_RDMA_Q_CONNECTING)
+ list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
+ else
+ nvmet_rdma_put_rsp(rsp);
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ return ret;
+}
+
static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_cmd *cmd =
@@ -1038,17 +1059,9 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rsp->n_rdma = 0;
rsp->invalidate_rkey = 0;
- if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
- unsigned long flags;
-
- spin_lock_irqsave(&queue->state_lock, flags);
- if (queue->state == NVMET_RDMA_Q_CONNECTING)
- list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
- else
- nvmet_rdma_put_rsp(rsp);
- spin_unlock_irqrestore(&queue->state_lock, flags);
+ if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) &&
+ nvmet_rdma_recv_not_live(queue, rsp))
return;
- }
nvmet_rdma_handle_command(queue, rsp);
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 7c51c2a8c109..4f9cac8a5abe 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -571,10 +571,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
+ enum nvmet_tcp_recv_state queue_state;
+ struct nvmet_tcp_cmd *queue_cmd;
struct nvme_sgl_desc *sgl;
u32 len;
- if (unlikely(cmd == queue->cmd)) {
+ /* Pairs with store_release in nvmet_prepare_receive_pdu() */
+ queue_state = smp_load_acquire(&queue->rcv_state);
+ queue_cmd = READ_ONCE(queue->cmd);
+
+ if (unlikely(cmd == queue_cmd)) {
sgl = &cmd->req.cmd->common.dptr.sgl;
len = le32_to_cpu(sgl->length);
@@ -583,7 +589,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
* Avoid using helpers, this might happen before
* nvmet_req_init is completed.
*/
- if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
+ if (queue_state == NVMET_TCP_RECV_PDU &&
len && len <= cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
@@ -847,8 +853,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
{
queue->offset = 0;
queue->left = sizeof(struct nvme_tcp_hdr);
- queue->cmd = NULL;
- queue->rcv_state = NVMET_TCP_RECV_PDU;
+ WRITE_ONCE(queue->cmd, NULL);
+ /* Ensure rcv_state is visible only after queue->cmd is set */
+ smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
}
static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 3aef35b05111..29a60fabfcc8 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -586,8 +586,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
unsigned int len = sg->length;
- if (bio_add_pc_page(bdev_get_queue(bio->bi_bdev), bio,
- sg_page(sg), len, sg->offset) != len) {
+ if (bio_add_page(bio, sg_page(sg), len, sg->offset) != len) {
status = NVME_SC_INTERNAL;
goto out_put_bio;
}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index d6494dfc20a7..fff85bbf0ecd 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -213,7 +213,7 @@ static struct attribute *nvmem_attrs[] = {
};
static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
@@ -246,7 +246,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
}
static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
@@ -340,7 +340,7 @@ static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
const char *id, int index);
static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct nvmem_cell_entry *entry;
@@ -374,22 +374,22 @@ destroy_cell:
}
/* default read/write permissions */
-static struct bin_attribute bin_attr_rw_nvmem = {
+static const struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
.name = "nvmem",
.mode = 0644,
},
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
+ .read_new = bin_attr_nvmem_read,
+ .write_new = bin_attr_nvmem_write,
};
-static struct bin_attribute *nvmem_bin_attributes[] = {
+static const struct bin_attribute *const nvmem_bin_attributes[] = {
&bin_attr_rw_nvmem,
NULL,
};
static const struct attribute_group nvmem_bin_group = {
- .bin_attrs = nvmem_bin_attributes,
+ .bin_attrs_new = nvmem_bin_attributes,
.attrs = nvmem_attrs,
.is_bin_visible = nvmem_bin_attr_is_visible,
.bin_size = nvmem_bin_attr_size,
@@ -401,12 +401,12 @@ static const struct attribute_group *nvmem_dev_groups[] = {
NULL,
};
-static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
+static const struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
.name = "eeprom",
},
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
+ .read_new = bin_attr_nvmem_read,
+ .write_new = bin_attr_nvmem_write,
};
/*
@@ -461,6 +461,7 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
.name = "cells",
};
struct nvmem_cell_entry *entry;
+ const struct bin_attribute **pattrs;
struct bin_attribute *attrs;
unsigned int ncells = 0, i = 0;
int ret = 0;
@@ -472,9 +473,9 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
/* Allocate an array of attributes with a sentinel */
ncells = list_count_nodes(&nvmem->cells);
- group.bin_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
- sizeof(struct bin_attribute *), GFP_KERNEL);
- if (!group.bin_attrs) {
+ pattrs = devm_kcalloc(&nvmem->dev, ncells + 1,
+ sizeof(struct bin_attribute *), GFP_KERNEL);
+ if (!pattrs) {
ret = -ENOMEM;
goto unlock_mutex;
}
@@ -494,17 +495,19 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
entry->bit_offset);
attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem);
attrs[i].size = entry->bytes;
- attrs[i].read = &nvmem_cell_attr_read;
+ attrs[i].read_new = &nvmem_cell_attr_read;
attrs[i].private = entry;
if (!attrs[i].attr.name) {
ret = -ENOMEM;
goto unlock_mutex;
}
- group.bin_attrs[i] = &attrs[i];
+ pattrs[i] = &attrs[i];
i++;
}
+ group.bin_attrs_new = pattrs;
+
ret = device_add_group(&nvmem->dev, &group);
if (ret)
goto unlock_mutex;
@@ -1790,6 +1793,8 @@ static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, si
return -EINVAL;
if (cell->bit_offset || cell->nbits) {
+ if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes)
+ return -EINVAL;
buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
if (IS_ERR(buf))
return PTR_ERR(buf);
diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
index 1ba494497698..ca6dd71d8a2e 100644
--- a/drivers/nvmem/imx-ocotp-ele.c
+++ b/drivers/nvmem/imx-ocotp-ele.c
@@ -71,13 +71,15 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
u32 *buf;
void *p;
int i;
+ u8 skipbytes;
- index = offset;
- num_bytes = round_up(bytes, 4);
- count = num_bytes >> 2;
+ if (offset + bytes > priv->data->size)
+ bytes = priv->data->size - offset;
- if (count > ((priv->data->size >> 2) - index))
- count = (priv->data->size >> 2) - index;
+ index = offset >> 2;
+ skipbytes = offset - (index << 2);
+ num_bytes = round_up(bytes + skipbytes, 4);
+ count = num_bytes >> 2;
p = kzalloc(num_bytes, GFP_KERNEL);
if (!p)
@@ -100,7 +102,7 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
*buf++ = readl_relaxed(reg + (i << 2));
}
- memcpy(val, (u8 *)p, bytes);
+ memcpy(val, ((u8 *)p) + skipbytes, bytes);
mutex_unlock(&priv->lock);
@@ -109,6 +111,26 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
return 0;
};
+static int imx_ocotp_cell_pp(void *context, const char *id, int index,
+ unsigned int offset, void *data, size_t bytes)
+{
+ u8 *buf = data;
+ int i;
+
+ /* Deal with some post processing of nvmem cell data */
+ if (id && !strcmp(id, "mac-address"))
+ for (i = 0; i < bytes / 2; i++)
+ swap(buf[i], buf[bytes - i - 1]);
+
+ return 0;
+}
+
+static void imx_ocotp_fixup_dt_cell_info(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell)
+{
+ cell->read_post_process = imx_ocotp_cell_pp;
+}
+
static int imx_ele_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -131,10 +153,12 @@ static int imx_ele_ocotp_probe(struct platform_device *pdev)
priv->config.owner = THIS_MODULE;
priv->config.size = priv->data->size;
priv->config.reg_read = priv->data->reg_read;
- priv->config.word_size = 4;
+ priv->config.word_size = 1;
priv->config.stride = 1;
priv->config.priv = priv;
priv->config.read_only = true;
+ priv->config.add_legacy_fixed_of_cells = true;
+ priv->config.fixup_dt_cell_info = imx_ocotp_fixup_dt_cell_info;
mutex_init(&priv->lock);
nvmem = devm_nvmem_register(dev, &priv->config);
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index 9aa8f42faa4c..4f1cca6eab71 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -144,6 +144,7 @@ static int sdam_probe(struct platform_device *pdev)
sdam->sdam_config.owner = THIS_MODULE;
sdam->sdam_config.add_legacy_fixed_of_cells = true;
sdam->sdam_config.stride = 1;
+ sdam->sdam_config.size = sdam->size;
sdam->sdam_config.word_size = 1;
sdam->sdam_config.reg_read = sdam_read;
sdam->sdam_config.reg_write = sdam_write;
diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
index 7f907c5a445e..b39d628cb60a 100644
--- a/drivers/nvmem/rmem.c
+++ b/drivers/nvmem/rmem.c
@@ -3,28 +3,40 @@
* Copyright (C) 2020 Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
*/
+#include <linux/crc32.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
struct rmem {
struct device *dev;
struct nvmem_device *nvmem;
struct reserved_mem *mem;
+};
+
+struct rmem_match_data {
+ int (*checksum)(struct rmem *priv);
+};
- phys_addr_t size;
+struct __packed rmem_eyeq5_header {
+ u32 magic;
+ u32 version;
+ u32 size;
};
+#define RMEM_EYEQ5_MAGIC ((u32)0xDABBAD00)
+
static int rmem_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct rmem *priv = context;
- size_t available = priv->mem->size;
- loff_t off = offset;
void *addr;
- int count;
+
+ if ((phys_addr_t)offset + bytes > priv->mem->size)
+ return -EIO;
/*
* Only map the reserved memory at this point to avoid potential rogue
@@ -36,26 +48,79 @@ static int rmem_read(void *context, unsigned int offset,
* An alternative would be setting the memory as RO, set_memory_ro(),
* but as of Dec 2020 this isn't possible on arm64.
*/
- addr = memremap(priv->mem->base, available, MEMREMAP_WB);
+ addr = memremap(priv->mem->base, priv->mem->size, MEMREMAP_WB);
if (!addr) {
dev_err(priv->dev, "Failed to remap memory region\n");
return -ENOMEM;
}
- count = memory_read_from_buffer(val, bytes, &off, addr, available);
+ memcpy(val, addr + offset, bytes);
memunmap(addr);
- if (count < 0)
- return count;
+ return 0;
+}
+
+static int rmem_eyeq5_checksum(struct rmem *priv)
+{
+ void *buf __free(kfree) = NULL;
+ struct rmem_eyeq5_header header;
+ u32 computed_crc, *target_crc;
+ size_t data_size;
+ int ret;
+
+ ret = rmem_read(priv, 0, &header, sizeof(header));
+ if (ret)
+ return ret;
+
+ if (header.magic != RMEM_EYEQ5_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Avoid massive kmalloc() if header read is invalid;
+ * the check would be done by the next rmem_read() anyway.
+ */
+ if (header.size > priv->mem->size)
+ return -EINVAL;
+
+ /*
+ * 0 +-------------------+
+ * | Header (12 bytes) | \
+ * +-------------------+ |
+ * | | | data to be CRCed
+ * | ... | |
+ * | | /
+ * data_size +-------------------+
+ * | CRC (4 bytes) |
+ * header.size +-------------------+
+ */
+
+ buf = kmalloc(header.size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = rmem_read(priv, 0, buf, header.size);
+ if (ret)
+ return ret;
- return count == bytes ? 0 : -EIO;
+ data_size = header.size - sizeof(*target_crc);
+ target_crc = buf + data_size;
+ computed_crc = crc32(U32_MAX, buf, data_size) ^ U32_MAX;
+
+ if (computed_crc == *target_crc)
+ return 0;
+
+ dev_err(priv->dev,
+ "checksum failed: computed %#x, expected %#x, header (%#x, %#x, %#x)\n",
+ computed_crc, *target_crc, header.magic, header.version, header.size);
+ return -EINVAL;
}
static int rmem_probe(struct platform_device *pdev)
{
struct nvmem_config config = { };
struct device *dev = &pdev->dev;
+ const struct rmem_match_data *match_data = device_get_match_data(dev);
struct reserved_mem *mem;
struct rmem *priv;
@@ -78,10 +143,22 @@ static int rmem_probe(struct platform_device *pdev)
config.size = mem->size;
config.reg_read = rmem_read;
+ if (match_data && match_data->checksum) {
+ int ret = match_data->checksum(priv);
+
+ if (ret)
+ return ret;
+ }
+
return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
}
+static const struct rmem_match_data rmem_eyeq5_match_data = {
+ .checksum = rmem_eyeq5_checksum,
+};
+
static const struct of_device_id rmem_match[] = {
+ { .compatible = "mobileye,eyeq5-bootloader-config", .data = &rmem_eyeq5_match_data },
{ .compatible = "nvmem-rmem", },
{ /* sentinel */ },
};
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c1f1c810e810..d177a2b9edaf 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -16,25 +16,12 @@
#include <linux/string.h>
#include <linux/dma-direct.h> /* for bus_dma_region */
-#include "of_private.h"
+#include <kunit/visibility.h>
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
-#define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
+/* Uncomment me to enable of_dump_addr() debugging output */
+// #define DEBUG
-/* Debug utility */
-#ifdef DEBUG
-static void of_dump_addr(const char *s, const __be32 *addr, int na)
-{
- pr_debug("%s", s);
- while (na--)
- pr_cont(" %08x", be32_to_cpu(*(addr++)));
- pr_cont("\n");
-}
-#else
-static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
-#endif
+#include "of_private.h"
/* Callbacks for bus specific translators */
struct of_bus {
@@ -198,22 +185,21 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
#endif /* CONFIG_PCI */
-static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
+VISIBLE_IF_KUNIT int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
{
- u64 end = start;
-
if (overflows_type(start, r->start))
return -EOVERFLOW;
- if (size && check_add_overflow(end, size - 1, &end))
- return -EOVERFLOW;
- if (overflows_type(end, r->end))
- return -EOVERFLOW;
r->start = start;
- r->end = end;
+
+ if (!size)
+ r->end = wrapping_sub(typeof(r->end), r->start, 1);
+ else if (size && check_add_overflow(r->start, size - 1, &r->end))
+ return -EOVERFLOW;
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(__of_address_resource_bounds);
/*
* of_pci_range_to_resource - Create a resource from an of_pci_range
@@ -340,6 +326,15 @@ static int of_bus_default_flags_match(struct device_node *np)
return of_property_present(np, "#address-cells") && (of_bus_n_addr_cells(np) == 3);
}
+static int of_bus_default_match(struct device_node *np)
+{
+ /*
+ * Check for presence first since of_bus_n_addr_cells() will warn when
+ * walking parent nodes.
+ */
+ return of_property_present(np, "#address-cells");
+}
+
/*
* Array of bus specific translators
*/
@@ -384,7 +379,7 @@ static const struct of_bus of_busses[] = {
{
.name = "default",
.addresses = "reg",
- .match = NULL,
+ .match = of_bus_default_match,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.translate = of_bus_default_translate,
@@ -399,7 +394,6 @@ static const struct of_bus *of_match_bus(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(of_busses); i++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
- BUG();
return NULL;
}
@@ -521,6 +515,8 @@ static u64 __of_translate_address(struct device_node *node,
if (parent == NULL)
return OF_BAD_ADDR;
bus = of_match_bus(parent);
+ if (!bus)
+ return OF_BAD_ADDR;
/* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
@@ -564,6 +560,8 @@ static u64 __of_translate_address(struct device_node *node,
/* Get new parent bus and counts */
pbus = of_match_bus(parent);
+ if (!pbus)
+ return OF_BAD_ADDR;
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
pr_err("Bad cell count for %pOF\n", dev);
@@ -703,7 +701,7 @@ const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
/* match the parent's bus type */
bus = of_match_bus(parent);
- if (strcmp(bus->name, "pci") && (bar_no >= 0))
+ if (!bus || (strcmp(bus->name, "pci") && (bar_no >= 0)))
return NULL;
/* Get "reg" or "assigned-addresses" property */
@@ -816,6 +814,8 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
else
range->cpu_addr = of_translate_address(parser->node,
parser->range + na);
+
+ range->parent_bus_addr = of_read_number(parser->range + na, parser->pna);
range->size = of_read_number(parser->range + parser->pna + na, ns);
parser->range += np;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 6f5abea2462a..af6c68bbb427 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -894,10 +894,10 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
/* The path could begin with an alias */
if (*path != '/') {
int len;
- const char *p = separator;
+ const char *p = strchrnul(path, '/');
- if (!p)
- p = strchrnul(path, '/');
+ if (separator && separator < p)
+ p = separator;
len = p - path;
/* of_aliases must not be NULL */
@@ -1027,19 +1027,15 @@ struct device_node *of_find_node_with_property(struct device_node *from,
const char *prop_name)
{
struct device_node *np;
- const struct property *pp;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
for_each_of_allnodes_from(from, np) {
- for (pp = np->properties; pp; pp = pp->next) {
- if (of_prop_cmp(pp->name, prop_name) == 0) {
- of_node_get(np);
- goto out;
- }
+ if (__of_find_property(np, prop_name, NULL)) {
+ of_node_get(np);
+ break;
}
}
-out:
of_node_put(from);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
@@ -1453,8 +1449,8 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
struct device_node *cur, *new = NULL;
const __be32 *map, *mask, *pass;
- static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
- static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(0) };
+ static const __be32 dummy_mask[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(~0) };
+ static const __be32 dummy_pass[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(0) };
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
int i, ret, map_len, match;
@@ -1546,7 +1542,6 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
* specifier into the out_args structure, keeping the
* bits specified in <list>-map-pass-thru.
*/
- match_array = map - new_size;
for (i = 0; i < new_size; i++) {
__be32 val = *(map - new_size + i);
@@ -1555,6 +1550,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
val |= cpu_to_be32(out_args->args[i]) & pass[i];
}
+ initial_match_array[i] = val;
out_args->args[i] = be32_to_cpu(val);
}
out_args->args_count = list_size = new_size;
@@ -1822,8 +1818,7 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np,
* for storing the resulting tree
*
* The function scans all the properties of the 'aliases' node and populates
- * the global lookup table with the properties. It returns the
- * number of alias properties found, or an error code in case of failure.
+ * the global lookup table with the properties.
*/
void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
{
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0121100372b4..aedd0e2dcd89 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) "OF: fdt: " fmt
-#include <linux/acpi.h>
#include <linux/crash_dump.h>
#include <linux/crc32.h>
#include <linux/kernel.h>
@@ -497,6 +496,7 @@ static void __init fdt_reserve_elfcorehdr(void)
void __init early_init_fdt_scan_reserved_mem(void)
{
int n;
+ int res;
u64 base, size;
if (!initial_boot_params)
@@ -507,7 +507,11 @@ void __init early_init_fdt_scan_reserved_mem(void)
/* Process header /memreserve/ fields */
for (n = 0; ; n++) {
- fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
+ res = fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
+ if (res) {
+ pr_err("Invalid memory reservation block index %d\n", n);
+ break;
+ }
if (!size)
break;
memblock_reserve(base, size);
@@ -1126,13 +1130,7 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
- void *ptr = memblock_alloc(size, align);
-
- if (!ptr)
- panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
- __func__, size, align);
-
- return ptr;
+ return memblock_alloc_or_panic(size, align);
}
bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys)
@@ -1215,14 +1213,7 @@ void __init unflatten_device_tree(void)
/* Save the statically-placed regions in the reserved_mem array */
fdt_scan_reserved_mem_reg_nodes();
- /* Don't use the bootloader provided DTB if ACPI is enabled */
- if (!acpi_disabled)
- fdt = NULL;
-
- /*
- * Populate an empty root node when ACPI is enabled or bootloader
- * doesn't provide one.
- */
+ /* Populate an empty root node when bootloader doesn't provide one */
if (!fdt) {
fdt = (void *) __dtb_empty_root_begin;
/* fdt_totalsize() will be used for copy size */
@@ -1264,18 +1255,9 @@ void __init unflatten_and_copy_device_tree(void)
}
#ifdef CONFIG_SYSFS
-static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- memcpy(buf, initial_boot_params + off, count);
- return count;
-}
-
static int __init of_fdt_raw_init(void)
{
- static struct bin_attribute of_fdt_raw_attr =
- __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
+ static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(fdt);
if (!initial_boot_params)
return 0;
@@ -1285,8 +1267,9 @@ static int __init of_fdt_raw_init(void)
pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
return 0;
}
- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
- return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
+ bin_attr_fdt.private = initial_boot_params;
+ bin_attr_fdt.size = fdt_totalsize(initial_boot_params);
+ return sysfs_create_bin_file(firmware_kobj, &bin_attr_fdt);
}
late_initcall(of_fdt_raw_init);
#endif
diff --git a/drivers/of/fdt_address.c b/drivers/of/fdt_address.c
index 9804d7f06705..f358d2c80754 100644
--- a/drivers/of/fdt_address.c
+++ b/drivers/of/fdt_address.c
@@ -17,23 +17,10 @@
#include <linux/of_fdt.h>
#include <linux/sizes.h>
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
- (ns) > 0)
-
-/* Debug utility */
-#ifdef DEBUG
-static void __init of_dump_addr(const char *s, const __be32 *addr, int na)
-{
- pr_debug("%s", s);
- while(na--)
- pr_cont(" %08x", *(addr++));
- pr_cont("\n");
-}
-#else
-static void __init of_dump_addr(const char *s, const __be32 *addr, int na) { }
-#endif
+/* Uncomment me to enable of_dump_addr() debugging output */
+// #define DEBUG
+
+#include "of_private.h"
/* Callbacks for bus specific translators */
struct of_bus {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 98b1cf78ecac..6c843d54ebb1 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -171,7 +171,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
struct device_node *ipar, *tnode, *old = NULL;
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
- const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+ const __be32 *tmp, dummy_imask[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(~0) };
u32 intsize = 1, addrsize;
int i, rc = -EINVAL;
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index cab9b169dc67..aa887166f0d2 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -29,7 +29,7 @@ const struct kobj_type of_node_ktype = {
};
static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct property *pp = container_of(bin_attr, struct property, attr);
@@ -77,7 +77,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
pp->attr.attr.name = safe_name(&np->kobj, pp->name);
pp->attr.attr.mode = secure ? 0400 : 0444;
pp->attr.size = secure ? 0 : pp->length;
- pp->attr.read = of_node_property_read;
+ pp->attr.read_new = of_node_property_read;
rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index ea5a0951ec5e..1bdc7ceef3c5 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -119,6 +119,8 @@ extern void *__unflatten_device_tree(const void *blob,
void *(*dt_alloc)(u64 size, u64 align),
bool detached);
+void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
+
/**
* General utilities for working with live trees.
*
@@ -188,4 +190,26 @@ void __init fdt_scan_reserved_mem_reg_nodes(void);
bool of_fdt_device_is_available(const void *blob, unsigned long node);
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
+#define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
+
+/* Debug utility */
+#ifdef DEBUG
+static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int na)
+{
+ pr_debug("%s", s);
+ while (na--)
+ pr_cont(" %08x", be32_to_cpu(*(addr++)));
+ pr_cont("\n");
+}
+#else
+static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int na) { }
+#endif
+
+#if IS_ENABLED(CONFIG_KUNIT)
+int __of_address_resource_bounds(struct resource *r, u64 start, u64 size);
+#endif
+
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 45517b9e57b1..ee2e31522d7e 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -52,7 +52,8 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
memblock_phys_free(base, size);
}
- kmemleak_ignore_phys(base);
+ if (!err)
+ kmemleak_ignore_phys(base);
return err;
}
@@ -262,6 +263,11 @@ void __init fdt_scan_reserved_mem_reg_nodes(void)
uname);
continue;
}
+
+ if (len > t_len)
+ pr_warn("%s() ignores %d regions in node '%s'\n",
+ __func__, len / t_len - 1, uname);
+
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
@@ -435,13 +441,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
return -EINVAL;
}
- base = 0;
-
while (len > 0) {
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
+ base = 0;
ret = __reserved_mem_alloc_in_range(size, align,
start, end, nomap, &base);
if (ret == 0) {
diff --git a/drivers/of/of_test.c b/drivers/of/of_test.c
index b0557ded838f..8bba5a72c9c7 100644
--- a/drivers/of/of_test.c
+++ b/drivers/of/of_test.c
@@ -2,6 +2,7 @@
/*
* KUnit tests for OF APIs
*/
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -54,8 +55,124 @@ static struct kunit_suite of_dtb_suite = {
.init = of_dtb_test_init,
};
+struct of_address_resource_bounds_case {
+ u64 start;
+ u64 size;
+ int ret;
+
+ u64 res_start;
+ u64 res_end;
+};
+
+static void of_address_resource_bounds_case_desc(const struct of_address_resource_bounds_case *p,
+ char *name)
+{
+ snprintf(name, KUNIT_PARAM_DESC_SIZE, "start=0x%016llx,size=0x%016llx", p->start, p->size);
+}
+
+static const struct of_address_resource_bounds_case of_address_resource_bounds_cases[] = {
+ {
+ .start = 0,
+ .size = 0,
+ .ret = 0,
+ .res_start = 0,
+ .res_end = -1,
+ },
+ {
+ .start = 0,
+ .size = 0x1000,
+ .ret = 0,
+ .res_start = 0,
+ .res_end = 0xfff,
+ },
+ {
+ .start = 0x1000,
+ .size = 0,
+ .ret = 0,
+ .res_start = 0x1000,
+ .res_end = 0xfff,
+ },
+ {
+ .start = 0x1000,
+ .size = 0x1000,
+ .ret = 0,
+ .res_start = 0x1000,
+ .res_end = 0x1fff,
+ },
+ {
+ .start = 1,
+ .size = RESOURCE_SIZE_MAX,
+ .ret = 0,
+ .res_start = 1,
+ .res_end = RESOURCE_SIZE_MAX,
+ },
+ {
+ .start = RESOURCE_SIZE_MAX,
+ .size = 1,
+ .ret = 0,
+ .res_start = RESOURCE_SIZE_MAX,
+ .res_end = RESOURCE_SIZE_MAX,
+ },
+ {
+ .start = 2,
+ .size = RESOURCE_SIZE_MAX,
+ .ret = -EOVERFLOW,
+ },
+ {
+ .start = RESOURCE_SIZE_MAX,
+ .size = 2,
+ .ret = -EOVERFLOW,
+ },
+ {
+ .start = ULL(0x100000000),
+ .size = 1,
+ .ret = sizeof(resource_size_t) > sizeof(u32) ? 0 : -EOVERFLOW,
+ .res_start = ULL(0x100000000),
+ .res_end = ULL(0x100000000),
+ },
+ {
+ .start = 0x1000,
+ .size = 0xffffffff,
+ .ret = sizeof(resource_size_t) > sizeof(u32) ? 0 : -EOVERFLOW,
+ .res_start = 0x1000,
+ .res_end = ULL(0x100000ffe),
+ },
+};
+
+KUNIT_ARRAY_PARAM(of_address_resource_bounds,
+ of_address_resource_bounds_cases, of_address_resource_bounds_case_desc);
+
+static void of_address_resource_bounds(struct kunit *test)
+{
+ const struct of_address_resource_bounds_case *param = test->param_value;
+ struct resource r; /* Intentionally uninitialized */
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_ADDRESS))
+ kunit_skip(test, "CONFIG_OF_ADDRESS not enabled\n");
+
+ ret = __of_address_resource_bounds(&r, param->start, param->size);
+ KUNIT_EXPECT_EQ(test, param->ret, ret);
+ if (ret == 0) {
+ KUNIT_EXPECT_EQ(test, (resource_size_t)param->res_start, r.start);
+ KUNIT_EXPECT_EQ(test, (resource_size_t)param->res_end, r.end);
+ KUNIT_EXPECT_EQ(test, param->size, resource_size(&r));
+ }
+}
+
+static struct kunit_case of_address_test_cases[] = {
+ KUNIT_CASE_PARAM(of_address_resource_bounds, of_address_resource_bounds_gen_params),
+ {}
+};
+
+static struct kunit_suite of_address_suite = {
+ .name = "of_address",
+ .test_cases = of_address_test_cases,
+};
+
kunit_test_suites(
- &of_dtb_suite,
+ &of_dtb_suite, &of_address_suite,
);
MODULE_DESCRIPTION("KUnit tests for OF APIs");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
MODULE_LICENSE("GPL");
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 7eda43c66c91..cb0cb374b21f 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -19,6 +19,8 @@
#include <linux/of.h>
#include <linux/of_pdt.h>
+#include "of_private.h"
+
static struct of_pdt_ops *of_pdt_prom_ops __initdata;
#if defined(CONFIG_SPARC)
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 9bafcff3e628..c6d8afb284e8 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -24,16 +24,6 @@
#include "of_private.h"
-const struct of_device_id of_default_bus_match_table[] = {
- { .compatible = "simple-bus", },
- { .compatible = "simple-mfd", },
- { .compatible = "isa", },
-#ifdef CONFIG_ARM_AMBA
- { .compatible = "arm,amba-bus", },
-#endif /* CONFIG_ARM_AMBA */
- {} /* Empty terminated list */
-};
-
/**
* of_find_device_by_node - Find the platform_device associated with a node
* @np: Pointer to device tree node
@@ -484,8 +474,17 @@ int of_platform_default_populate(struct device_node *root,
const struct of_dev_auxdata *lookup,
struct device *parent)
{
- return of_platform_populate(root, of_default_bus_match_table, lookup,
- parent);
+ static const struct of_device_id match_table[] = {
+ { .compatible = "simple-bus", },
+ { .compatible = "simple-mfd", },
+ { .compatible = "isa", },
+#ifdef CONFIG_ARM_AMBA
+ { .compatible = "arm,amba-bus", },
+#endif /* CONFIG_ARM_AMBA */
+ {} /* Empty terminated list */
+ };
+
+ return of_platform_populate(root, match_table, lookup, parent);
}
EXPORT_SYMBOL_GPL(of_platform_default_populate);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index cfc8aea002e4..208d922cc24c 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -32,6 +32,32 @@
#include "of_private.h"
/**
+ * of_property_read_bool - Find a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a boolean property in a device node. Usage on non-boolean
+ * property types is deprecated.
+ *
+ * Return: true if the property exists false otherwise.
+ */
+bool of_property_read_bool(const struct device_node *np, const char *propname)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ /*
+ * Boolean properties should not have a value. Testing for property
+ * presence should either use of_property_present() or just read the
+ * property value and check the returned error code.
+ */
+ if (prop && prop->length)
+ pr_warn("%pOF: Read of boolean property '%s' with a value.\n", np, propname);
+
+ return prop ? true : false;
+}
+EXPORT_SYMBOL(of_property_read_bool);
+
+/**
* of_graph_is_present() - check graph's presence
* @node: pointer to device_node containing graph port
*
@@ -966,6 +992,12 @@ of_fwnode_device_get_dma_attr(const struct fwnode_handle *fwnode)
static bool of_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
+ return of_property_present(to_of_node(fwnode), propname);
+}
+
+static bool of_fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
return of_property_read_bool(to_of_node(fwnode), propname);
}
@@ -1390,9 +1422,9 @@ static struct device_node *parse_interrupt_map(struct device_node *np,
addrcells = of_bus_n_addr_cells(np);
imap = of_get_property(np, "interrupt-map", &imaplen);
- imaplen /= sizeof(*imap);
if (!imap)
return NULL;
+ imaplen /= sizeof(*imap);
imap_end = imap + imaplen;
@@ -1560,6 +1592,7 @@ const struct fwnode_operations of_fwnode_ops = {
.device_dma_supported = of_fwnode_device_dma_supported,
.device_get_dma_attr = of_fwnode_device_get_dma_attr,
.property_present = of_fwnode_property_present,
+ .property_read_bool = of_fwnode_property_read_bool,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
.get_name = of_fwnode_get_name,
diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
index fa39611071b3..4171f43cf01c 100644
--- a/drivers/of/unittest-data/tests-platform.dtsi
+++ b/drivers/of/unittest-data/tests-platform.dtsi
@@ -33,6 +33,24 @@
reg = <0x100>;
};
};
+
+ test-device@2 {
+ compatible = "test,rust-device";
+ reg = <0x2>;
+ };
+ };
+
+ platform-tests-2 {
+ // No #address-cells or #size-cells
+ node {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ test-device@100 {
+ compatible = "test-sub-device";
+ reg = <0x100 1>;
+ };
+ };
};
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 438fd70fa995..f88ddb1cf5d7 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -161,6 +161,15 @@ static void __init of_unittest_find_node_by_name(void)
"option alias path test, subcase #1 failed\n");
of_node_put(np);
+ np = of_find_node_opts_by_path("testcase-alias/phandle-tests/consumer-a:testaliasoption",
+ &options);
+ name = kasprintf(GFP_KERNEL, "%pOF", np);
+ unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name) &&
+ !strcmp("testaliasoption", options),
+ "option alias path test, subcase #2 failed\n");
+ of_node_put(np);
+ kfree(name);
+
np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
unittest(np, "NULL option alias path test failed\n");
of_node_put(np);
@@ -1380,6 +1389,7 @@ static void __init of_unittest_bus_3cell_ranges(void)
static void __init of_unittest_reg(void)
{
struct device_node *np;
+ struct resource res;
int ret;
u64 addr, size;
@@ -1396,6 +1406,19 @@ static void __init of_unittest_reg(void)
np, addr);
of_node_put(np);
+
+ np = of_find_node_by_path("/testcase-data/platform-tests-2/node/test-device@100");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ unittest(ret == -EINVAL, "of_address_to_resource(%pOF) expected error on untranslatable address\n",
+ np);
+
+ of_node_put(np);
+
}
struct of_unittest_expected_res {
@@ -3666,13 +3689,7 @@ static struct device_node *overlay_base_root;
static void * __init dt_alloc_memory(u64 size, u64 align)
{
- void *ptr = memblock_alloc(size, align);
-
- if (!ptr)
- panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
- __func__, size, align);
-
- return ptr;
+ return memblock_alloc_or_panic(size, align);
}
/*
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 0311b18319a4..73e9a3b2f29b 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -101,11 +101,55 @@ struct opp_table *_find_opp_table(struct device *dev)
* representation in the OPP table and manage the clock configuration themselves
* in an platform specific way.
*/
-static bool assert_single_clk(struct opp_table *opp_table)
+static bool assert_single_clk(struct opp_table *opp_table,
+ unsigned int __always_unused index)
{
return !WARN_ON(opp_table->clk_count > 1);
}
+/*
+ * Returns true if clock table is large enough to contain the clock index.
+ */
+static bool assert_clk_index(struct opp_table *opp_table,
+ unsigned int index)
+{
+ return opp_table->clk_count > index;
+}
+
+/*
+ * Returns true if bandwidth table is large enough to contain the bandwidth index.
+ */
+static bool assert_bandwidth_index(struct opp_table *opp_table,
+ unsigned int index)
+{
+ return opp_table->path_count > index;
+}
+
+/**
+ * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp
+ * @opp: opp for which bandwidth has to be returned for
+ * @peak: select peak or average bandwidth
+ * @index: bandwidth index
+ *
+ * Return: bandwidth in kBps, else return 0
+ */
+unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
+{
+ if (IS_ERR_OR_NULL(opp)) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ if (index >= opp->opp_table->path_count)
+ return 0;
+
+ if (!opp->bandwidth)
+ return 0;
+
+ return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw);
+
/**
* dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
@@ -499,12 +543,12 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
unsigned long opp_key, unsigned long key),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
/* Assert that the requirement is met */
- if (assert && !assert(opp_table))
+ if (assert && !assert(opp_table, index))
return ERR_PTR(-EINVAL);
mutex_lock(&opp_table->lock);
@@ -532,7 +576,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
unsigned long opp_key, unsigned long key),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
struct opp_table *opp_table;
struct dev_pm_opp *opp;
@@ -555,7 +599,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
static struct dev_pm_opp *_find_key_exact(struct device *dev,
unsigned long key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
/*
* The value of key will be updated here, but will be ignored as the
@@ -568,7 +612,7 @@ static struct dev_pm_opp *_find_key_exact(struct device *dev,
static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
return _opp_table_find_key(opp_table, key, index, available, read,
_compare_ceil, assert);
@@ -577,7 +621,7 @@ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
return _find_key(dev, key, index, available, read, _compare_ceil,
assert);
@@ -586,7 +630,7 @@ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
static struct dev_pm_opp *_find_key_floor(struct device *dev,
unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
- bool (*assert)(struct opp_table *opp_table))
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
return _find_key(dev, key, index, available, read, _compare_floor,
assert);
@@ -647,7 +691,8 @@ struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
u32 index, bool available)
{
- return _find_key_exact(dev, freq, index, available, _read_freq, NULL);
+ return _find_key_exact(dev, freq, index, available, _read_freq,
+ assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
@@ -707,7 +752,8 @@ struct dev_pm_opp *
dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
u32 index)
{
- return _find_key_ceil(dev, freq, index, true, _read_freq, NULL);
+ return _find_key_ceil(dev, freq, index, true, _read_freq,
+ assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
@@ -760,7 +806,7 @@ struct dev_pm_opp *
dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
u32 index)
{
- return _find_key_floor(dev, freq, index, true, _read_freq, NULL);
+ return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
@@ -878,7 +924,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
unsigned long temp = *bw;
struct dev_pm_opp *opp;
- opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
+ opp = _find_key_ceil(dev, &temp, index, true, _read_bw,
+ assert_bandwidth_index);
*bw = temp;
return opp;
}
@@ -909,7 +956,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned long temp = *bw;
struct dev_pm_opp *opp;
- opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
+ opp = _find_key_floor(dev, &temp, index, true, _read_bw,
+ assert_bandwidth_index);
*bw = temp;
return opp;
}
@@ -1480,11 +1528,6 @@ err:
return ERR_PTR(ret);
}
-void _get_opp_table_kref(struct opp_table *opp_table)
-{
- kref_get(&opp_table->kref);
-}
-
static struct opp_table *_update_opp_table_clk(struct device *dev,
struct opp_table *opp_table,
bool getclk)
@@ -1645,6 +1688,17 @@ static void _opp_table_kref_release(struct kref *kref)
kfree(opp_table);
}
+void _get_opp_table_kref(struct opp_table *opp_table)
+{
+ kref_get(&opp_table->kref);
+}
+
+void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
+{
+ _get_opp_table_kref(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref);
+
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
@@ -1679,6 +1733,7 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
{
kref_get(&opp->kref);
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get);
void dev_pm_opp_put(struct dev_pm_opp *opp)
{
@@ -1702,7 +1757,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
if (IS_ERR(opp_table))
return;
- if (!assert_single_clk(opp_table))
+ if (!assert_single_clk(opp_table, 0))
goto put_table;
mutex_lock(&opp_table->lock);
@@ -2054,7 +2109,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
unsigned long tol, u_volt = data->u_volt;
int ret;
- if (!assert_single_clk(opp_table))
+ if (!assert_single_clk(opp_table, 0))
return -EINVAL;
new_opp = _opp_allocate(opp_table);
@@ -2810,7 +2865,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
return r;
}
- if (!assert_single_clk(opp_table)) {
+ if (!assert_single_clk(opp_table, 0)) {
r = -EINVAL;
goto put_table;
}
@@ -2886,7 +2941,7 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
return r;
}
- if (!assert_single_clk(opp_table)) {
+ if (!assert_single_clk(opp_table, 0)) {
r = -EINVAL;
goto put_table;
}
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 105de7c3274a..8fc6238b1728 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -217,7 +217,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev,
{
struct opp_device *new_dev = NULL, *iter;
const struct device *dev;
- struct dentry *dentry;
+ int err;
/* Look for next opp-dev */
list_for_each_entry(iter, &opp_table->dev_list, node)
@@ -234,16 +234,14 @@ static void opp_migrate_dentry(struct opp_device *opp_dev,
opp_set_dev_name(dev, opp_table->dentry_name);
- dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
- opp_table->dentry_name);
- if (IS_ERR(dentry)) {
+ err = debugfs_change_name(opp_dev->dentry, "%s", opp_table->dentry_name);
+ if (err) {
dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
__func__, dev_name(opp_dev->dev), dev_name(dev));
return;
}
- new_dev->dentry = dentry;
- opp_table->dentry = dentry;
+ new_dev->dentry = opp_table->dentry = opp_dev->dentry;
}
/**
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index fd5ed2858258..a24f76f5fd01 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -926,7 +926,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
ret = _of_opp_alloc_required_opps(opp_table, new_opp);
if (ret)
- goto free_opp;
+ goto put_node;
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
@@ -976,6 +976,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
free_required_opps:
_of_opp_free_required_opps(opp_table, new_opp);
+put_node:
+ of_node_put(np);
free_opp:
_opp_free(new_opp);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 430651e7424a..5c7c81190e41 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -250,7 +250,6 @@ struct opp_table {
};
/* Routines internal to opp core */
-void dev_pm_opp_get(struct dev_pm_opp *opp);
bool _opp_remove_all_static(struct opp_table *opp_table);
void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 3644997a8342..24d4f3a3ec3d 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -266,10 +266,14 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
{ 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
/* WCH CARDS */
- { 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
- { 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
- { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p},
- { 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p},
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_1S1P,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p },
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1P,
+ 0x4348, 0x3253, 0, 0, wch_ch353_2s1p },
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_0S1P,
+ 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p },
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S1P,
+ 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p },
/* BrainBoxes PX272/PX306 MIO card */
{ PCI_VENDOR_ID_INTASHIELD, 0x4100,
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2fbd379923fd..5c3054aaec8c 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -203,6 +203,12 @@ config PCI_P2PDMA
P2P DMA transactions must be between devices behind the same root
port.
+ Enabling this option will reduce the entropy of x86 KASLR memory
+ regions. For example - on a 46 bit system, the entropy goes down
+ from 16 bits to 15 bits. The actual reduction in entropy depends
+ on the physical address bits, on processor features, kernel config
+ (5 level page table) and physical memory present on the system.
+
If unsure, say N.
config PCI_LABEL
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 6afff1f1b143..c6b266c772c8 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -410,7 +410,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
- if (!pdev->eetlp_prefix_path && !pdev->pasid_no_tlp)
+ if (!pdev->eetlp_prefix_max && !pdev->pasid_no_tlp)
return -EINVAL;
if (!pasid)
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 5c62e1a3ba52..33d6bf460ffe 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -635,30 +635,20 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
- struct of_phandle_args args;
+ unsigned int args[2];
struct regmap *regmap;
- regmap = syscon_regmap_lookup_by_phandle(np,
- "ti,syscon-unaligned-access");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access",
+ 2, args);
if (IS_ERR(regmap)) {
dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
return -EINVAL;
}
- ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
- 2, 0, &args);
- if (ret) {
- dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
- return ret;
- }
-
- ret = regmap_update_bits(regmap, args.args[0], args.args[1],
- args.args[1]);
+ ret = regmap_update_bits(regmap, args[0], args[1], args[1]);
if (ret)
dev_err(dev, "failed to enable unaligned access\n");
- of_node_put(args.np);
-
return ret;
}
@@ -671,18 +661,13 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev,
u32 mask;
u32 val;
- pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel",
+ 1, &pcie_reg);
if (IS_ERR(pcie_syscon)) {
dev_err(dev, "unable to get ti,syscon-lane-sel\n");
return -EINVAL;
}
- if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
- &pcie_reg)) {
- dev_err(dev, "couldn't get lane selection reg offset\n");
- return -EINVAL;
- }
-
mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 6a830166d37f..ace736b025b1 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -300,7 +300,7 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->elbi_base))
return PTR_ERR(ep->elbi_base);
- ret = devm_clk_bulk_get_all_enable(dev, &ep->clks);
+ ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index c8d5c90aa4d4..90ace941090f 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -33,6 +33,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
@@ -55,6 +56,22 @@
#define IMX95_PE0_GEN_CTRL_3 0x1058
#define IMX95_PCIE_LTSSM_EN BIT(0)
+#define IMX95_PE0_LUT_ACSCTRL 0x1008
+#define IMX95_PEO_LUT_RWA BIT(16)
+#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0)
+
+#define IMX95_PE0_LUT_DATA1 0x100c
+#define IMX95_PE0_LUT_VLD BIT(31)
+#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8)
+#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0)
+
+#define IMX95_PE0_LUT_DATA2 0x1010
+#define IMX95_PE0_LUT_REQID GENMASK(31, 16)
+#define IMX95_PE0_LUT_MASK GENMASK(15, 0)
+
+#define IMX95_SID_MASK GENMASK(5, 0)
+#define IMX95_MAX_LUT 32
+
#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
enum imx_pcie_variants {
@@ -70,6 +87,7 @@ enum imx_pcie_variants {
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
+ IMX8Q_EP,
IMX95_EP,
};
@@ -87,6 +105,7 @@ enum imx_pcie_variants {
* workaround suspend resume on some devices which are affected by this errata.
*/
#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
+#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
@@ -103,6 +122,7 @@ struct imx_pcie_drvdata {
const char *gpr;
const char * const *clk_names;
const u32 clks_cnt;
+ const u32 clks_optional_cnt;
const u32 ltssm_off;
const u32 ltssm_mask;
const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
@@ -111,19 +131,18 @@ struct imx_pcie_drvdata {
int (*init_phy)(struct imx_pcie *pcie);
int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ const struct dw_pcie_host_ops *ops;
};
struct imx_pcie {
struct dw_pcie *pci;
struct gpio_desc *reset_gpiod;
- bool link_is_up;
struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS];
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
- struct reset_control *turnoff_reset;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -139,6 +158,9 @@ struct imx_pcie {
struct device *pd_pcie_phy;
struct phy *phy;
const struct imx_pcie_drvdata *drvdata;
+
+ /* Ensure that only one device's LUT is configured at any given time */
+ struct mutex lock;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -234,11 +256,11 @@ static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
id = imx_pcie->controller_id;
- /* If mode_mask is 0, then generic PHY driver is used to set the mode */
+ /* If mode_mask is 0, generic PHY driver is used to set the mode */
if (!drvdata->mode_mask[0])
return;
- /* If mode_mask[id] is zero, means each controller have its individual gpr */
+ /* If mode_mask[id] is 0, each controller has its individual GPR */
if (!drvdata->mode_mask[id])
id = 0;
@@ -375,14 +397,15 @@ static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- /* TODO: Currently this code assumes external oscillator is being used */
+ /* TODO: This code assumes external oscillator is being used */
regmap_update_bits(imx_pcie->iomuxc_gpr,
imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_REF_USE_PAD,
IMX8MQ_GPR_PCIE_REF_USE_PAD);
/*
- * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
- * supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
+ * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the
+ * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared
+ * to zero.
*/
if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
regmap_update_bits(imx_pcie->iomuxc_gpr,
@@ -393,13 +416,6 @@ static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
return 0;
}
-static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie)
-{
- regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
-
- return 0;
-}
-
static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
@@ -576,7 +592,7 @@ static int imx_pcie_attach_pd(struct device *dev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie pd\n");
return -EINVAL;
}
@@ -589,7 +605,7 @@ static int imx_pcie_attach_pd(struct device *dev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie_phy pd\n");
return -EINVAL;
}
@@ -598,10 +614,9 @@ static int imx_pcie_attach_pd(struct device *dev)
static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- if (enable)
- regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
-
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
return 0;
}
@@ -611,10 +626,10 @@ static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
/* power up core phy and enable ref clock */
regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
- * the async reset input need ref clock to sync internally,
+ * The async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
+ * Add a ~10us delay here.
*/
usleep_range(10, 100);
regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
@@ -630,19 +645,20 @@ static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
int offset = imx_pcie_grp_offset(imx_pcie);
- if (enable) {
- regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
- regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
- }
-
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
return 0;
}
static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- if (!enable)
- regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
return 0;
}
@@ -775,6 +791,7 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
{
reset_control_deassert(imx_pcie->pciephy_reset);
+ reset_control_deassert(imx_pcie->apps_reset);
if (imx_pcie->drvdata->core_reset)
imx_pcie->drvdata->core_reset(imx_pcie, false);
@@ -884,6 +901,7 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
if (imx_pcie->drvdata->flags &
IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
+
/*
* On i.MX7, DIRECT_SPEED_CHANGE behaves differently
* from i.MX6 family when no link speed transition
@@ -892,7 +910,6 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
* which will cause the following code to report false
* failure.
*/
-
ret = imx_pcie_wait_for_speed_change(imx_pcie);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
@@ -908,13 +925,11 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- imx_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
- imx_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
@@ -930,6 +945,184 @@ static void imx_pcie_stop_link(struct dw_pcie *pci)
imx_pcie_ltssm_disable(dev);
}
+static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 data1, data2;
+ int free = -1;
+ int i;
+
+ if (sid >= 64) {
+ dev_err(dev, "Invalid SID for index %d\n", sid);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&imx_pcie->lock);
+
+ /*
+ * Iterate through all LUT entries to check for duplicate RID and
+ * identify the first available entry. Configure this available entry
+ * immediately after verification to avoid rescanning it.
+ */
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+
+ if (!(data1 & IMX95_PE0_LUT_VLD)) {
+ if (free < 0)
+ free = i;
+ continue;
+ }
+
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+
+ /* Do not add duplicate RID */
+ if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) {
+ dev_warn(dev, "Existing LUT entry available for RID (%d)", rid);
+ return 0;
+ }
+ }
+
+ if (free < 0) {
+ dev_err(dev, "LUT entry is not available\n");
+ return -ENOSPC;
+ }
+
+ data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0);
+ data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid);
+ data1 |= IMX95_PE0_LUT_VLD;
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1);
+
+ data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
+ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2);
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free);
+
+ return 0;
+}
+
+static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid)
+{
+ u32 data2;
+ int i;
+
+ guard(mutex)(&imx_pcie->lock);
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA1, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA2, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, i);
+
+ break;
+ }
+ }
+}
+
+static int imx_pcie_enable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ u32 sid_i, sid_m, rid = pci_dev_id(pdev);
+ struct device_node *target;
+ struct device *dev;
+ int err_i, err_m;
+ u32 sid = 0;
+
+ dev = imx_pcie->pci->dev;
+
+ target = NULL;
+ err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask",
+ &target, &sid_i);
+ if (target) {
+ of_node_put(target);
+ } else {
+ /*
+ * "target == NULL && err_i == 0" means RID out of map range.
+ * Use 1:1 map RID to streamID. Hardware can't support this
+ * because the streamID is only 6 bits
+ */
+ err_i = -EINVAL;
+ }
+
+ target = NULL;
+ err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask",
+ &target, &sid_m);
+
+ /*
+ * err_m target
+ * 0 NULL RID out of range. Use 1:1 map RID to
+ * streamID, Current hardware can't
+ * support it, so return -EINVAL.
+ * != 0 NULL msi-map does not exist, use built-in MSI
+ * 0 != NULL Get correct streamID from RID
+ * != 0 != NULL Invalid combination
+ */
+ if (!err_m && !target)
+ return -EINVAL;
+ else if (target)
+ of_node_put(target); /* Find streamID map entry for RID in msi-map */
+
+ /*
+ * msi-map iommu-map
+ * N N DWC MSI Ctrl
+ * Y Y ITS + SMMU, require the same SID
+ * Y N ITS
+ * N Y DWC MSI Ctrl + SMMU
+ */
+ if (err_i && err_m)
+ return 0;
+
+ if (!err_i && !err_m) {
+ /*
+ * Glue Layer
+ * <==========>
+ * ┌─────┐ ┌──────────┐
+ * │ LUT │ 6-bit streamID │ │
+ * │ │─────────────────►│ MSI │
+ * └─────┘ 2-bit ctrl ID │ │
+ * ┌───────────►│ │
+ * (i.MX95) │ │ │
+ * 00 PCIe0 │ │ │
+ * 01 ENETC │ │ │
+ * 10 PCIe1 │ │ │
+ * │ └──────────┘
+ * The MSI glue layer auto adds 2 bits controller ID ahead of
+ * streamID, so mask these 2 bits to get streamID. The
+ * IOMMU glue layer doesn't do that.
+ */
+ if (sid_i != (sid_m & IMX95_SID_MASK)) {
+ dev_err(dev, "iommu-map and msi-map entries mismatch!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!err_i)
+ sid = sid_i;
+ else if (!err_m)
+ sid = sid_m & IMX95_SID_MASK;
+
+ return imx_pcie_add_lut(imx_pcie, rid, sid);
+}
+
+static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie;
+
+ imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev));
+}
+
static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -946,6 +1139,11 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
}
}
+ if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) {
+ pp->bridge->enable_device = imx_pcie_enable_device;
+ pp->bridge->disable_device = imx_pcie_disable_device;
+ }
+
imx_pcie_assert_core_reset(imx_pcie);
if (imx_pcie->drvdata->init_phy)
@@ -966,7 +1164,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
goto err_clk_disable;
}
- ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
+ imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
+ PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
if (ret) {
dev_err(dev, "unable to set PCIe PHY mode\n");
goto err_phy_exit;
@@ -1033,9 +1233,31 @@ static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr)
return cpu_addr - entry->offset;
}
+/*
+ * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
+ * register is reserved, so the generic DWC implementation of sending the
+ * PME_Turn_Off message using a dummy MMIO write cannot be used.
+ */
+static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+
+ usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US);
+}
+
static const struct dw_pcie_host_ops imx_pcie_host_ops = {
.init = imx_pcie_host_init,
.deinit = imx_pcie_host_exit,
+ .pme_turn_off = imx_pcie_pme_turn_off,
+};
+
+static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
};
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -1082,16 +1304,27 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
.align = SZ_64K,
};
+static const struct pci_epc_features imx8q_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
/*
- * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme
- * ================================================================================================
- * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
- * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
- * BAR1 should be disabled if BAR0 is 64bit.
- * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
- * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
- * BAR4 | Enable | 32-bit | 1M | Programmable Size
- * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ * | Default | Default | Default | BAR Sizing
+ * BAR# | Enable? | Type | Size | Scheme
+ * =======================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * (BAR1 should be disabled if BAR0 is 64-bit)
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
*/
static const struct pci_epc_features imx95_pcie_epc_features = {
.msi_capable = true,
@@ -1118,7 +1351,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
struct platform_device *pdev)
{
int ret;
- unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
struct dw_pcie *pci = imx_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
@@ -1128,28 +1360,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- switch (imx_pcie->drvdata->variant) {
- case IMX8MQ_EP:
- case IMX8MM_EP:
- case IMX8MP_EP:
- pcie_dbi2_offset = SZ_1M;
- break;
- default:
- pcie_dbi2_offset = SZ_4K;
- break;
- }
-
- pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
-
- /*
- * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining
- * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC
- * core code can fetch that from DT. But once all platform DTs were fixed, this and the
- * above "dbi_base2" setting should be removed.
- */
- if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
- pci->dbi_base2 = NULL;
-
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
@@ -1176,43 +1386,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
return 0;
}
-static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie)
-{
- struct device *dev = imx_pcie->pci->dev;
-
- /* Some variants have a turnoff reset in DT */
- if (imx_pcie->turnoff_reset) {
- reset_control_assert(imx_pcie->turnoff_reset);
- reset_control_deassert(imx_pcie->turnoff_reset);
- goto pm_turnoff_sleep;
- }
-
- /* Others poke directly at IOMUXC registers */
- switch (imx_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
- break;
- default:
- dev_err(dev, "PME_Turn_Off not implemented\n");
- return;
- }
-
- /*
- * Components with an upstream port must respond to
- * PME_Turn_Off with PME_TO_Ack but we can't check.
- *
- * The standard recommends a 1-10ms timeout after which to
- * proceed anyway as if acks were received.
- */
-pm_turnoff_sleep:
- usleep_range(1000, 10000);
-}
-
static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
{
u8 offset;
@@ -1236,7 +1409,6 @@ static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
static int imx_pcie_suspend_noirq(struct device *dev)
{
struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
@@ -1251,9 +1423,7 @@ static int imx_pcie_suspend_noirq(struct device *dev)
imx_pcie_assert_core_reset(imx_pcie);
imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
} else {
- imx_pcie_pm_turnoff(imx_pcie);
- imx_pcie_stop_link(imx_pcie->pci);
- imx_pcie_host_exit(pp);
+ return dw_pcie_suspend_noirq(imx_pcie->pci);
}
return 0;
@@ -1263,7 +1433,6 @@ static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
@@ -1275,6 +1444,7 @@ static int imx_pcie_resume_noirq(struct device *dev)
ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret)
return ret;
+
/*
* Using PCIE_TEST_PD seems to disable MSI and powers down the
* root complex. This is why we have to setup the rc again and
@@ -1283,17 +1453,12 @@ static int imx_pcie_resume_noirq(struct device *dev)
ret = dw_pcie_setup_rc(&imx_pcie->pci->pp);
if (ret)
return ret;
- imx_pcie_msi_save_restore(imx_pcie, false);
} else {
- ret = imx_pcie_host_init(pp);
+ ret = dw_pcie_resume_noirq(imx_pcie->pci);
if (ret)
return ret;
- imx_pcie_msi_save_restore(imx_pcie, false);
- dw_pcie_setup_rc(pp);
-
- if (imx_pcie->link_is_up)
- imx_pcie_start_link(imx_pcie->pci);
}
+ imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
}
@@ -1311,9 +1476,8 @@ static int imx_pcie_probe(struct platform_device *pdev)
struct device_node *np;
struct resource *dbi_base;
struct device_node *node = dev->of_node;
- int ret;
+ int i, ret, req_cnt;
u16 val;
- int i;
imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
if (!imx_pcie)
@@ -1325,11 +1489,17 @@ static int imx_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- pci->pp.ops = &imx_pcie_host_ops;
imx_pcie->pci = pci;
imx_pcie->drvdata = of_device_get_match_data(dev);
+ mutex_init(&imx_pcie->lock);
+
+ if (imx_pcie->drvdata->ops)
+ pci->pp.ops = imx_pcie->drvdata->ops;
+ else
+ pci->pp.ops = &imx_pcie_host_dw_pme_ops;
+
/* Find the PHY if one is defined, only imx7d uses it */
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
if (np) {
@@ -1363,9 +1533,13 @@ static int imx_pcie_probe(struct platform_device *pdev)
imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i];
/* Fetch clocks */
- ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
+ req_cnt = imx_pcie->drvdata->clks_cnt - imx_pcie->drvdata->clks_optional_cnt;
+ ret = devm_clk_bulk_get(dev, req_cnt, imx_pcie->clks);
if (ret)
return ret;
+ imx_pcie->clks[req_cnt].clk = devm_clk_get_optional(dev, "ref");
+ if (IS_ERR(imx_pcie->clks[req_cnt].clk))
+ return PTR_ERR(imx_pcie->clks[req_cnt].clk);
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
@@ -1391,7 +1565,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
switch (imx_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
- case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
imx_pcie->controller_id = 1;
break;
@@ -1399,13 +1572,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
break;
}
- /* Grab turnoff reset */
- imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx_pcie->turnoff_reset)) {
- dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx_pcie->turnoff_reset);
- }
-
if (imx_pcie->drvdata->gpr) {
/* Grab GPR config register range */
imx_pcie->iomuxc_gpr =
@@ -1484,6 +1650,7 @@ static int imx_pcie_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
} else {
+ pci->pp.use_atu_msg = true;
ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
@@ -1513,6 +1680,7 @@ static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"};
+static const char * const imx95_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux", "ref"};
static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
@@ -1548,6 +1716,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
.init_phy = imx6sx_pcie_init_phy,
.enable_ref_clk = imx6sx_pcie_enable_ref_clk,
.core_reset = imx6sx_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1565,6 +1734,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
.init_phy = imx_pcie_init_phy,
.enable_ref_clk = imx6q_pcie_enable_ref_clk,
.core_reset = imx6qp_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX7D] = {
.variant = IMX7D,
@@ -1576,14 +1746,14 @@ static const struct imx_pcie_drvdata drvdata[] = {
.clks_cnt = ARRAY_SIZE(imx6q_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx7d_pcie_init_phy,
.enable_ref_clk = imx7d_pcie_enable_ref_clk,
.core_reset = imx7d_pcie_core_reset,
},
[IMX8MQ] = {
.variant = IMX8MQ,
.flags = IMX_PCIE_FLAG_HAS_APP_RESET |
- IMX_PCIE_FLAG_HAS_PHY_RESET,
+ IMX_PCIE_FLAG_HAS_PHY_RESET |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
.clk_names = imx8mq_clks,
.clks_cnt = ARRAY_SIZE(imx8mq_clks),
@@ -1621,15 +1791,19 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX8Q] = {
.variant = IMX8Q,
.flags = IMX_PCIE_FLAG_HAS_PHYDRV |
- IMX_PCIE_FLAG_CPU_ADDR_FIXUP,
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.clk_names = imx8q_clks,
.clks_cnt = ARRAY_SIZE(imx8q_clks),
},
[IMX95] = {
.variant = IMX95,
- .flags = IMX_PCIE_FLAG_HAS_SERDES,
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .clk_names = imx95_clks,
+ .clks_cnt = ARRAY_SIZE(imx95_clks),
+ .clks_optional_cnt = 1,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
@@ -1678,6 +1852,14 @@ static const struct imx_pcie_drvdata drvdata[] = {
.epc_features = &imx8m_pcie_epc_features,
.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
+ [IMX8Q_EP] = {
+ .variant = IMX8Q_EP,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
+ .clk_names = imx8q_clks,
+ .clks_cnt = ARRAY_SIZE(imx8q_clks),
+ },
[IMX95_EP] = {
.variant = IMX95_EP,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
@@ -1707,6 +1889,7 @@ static const struct of_device_id imx_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], },
{ .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ee6f52568133..239a05b36e8e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -329,7 +329,6 @@ static int ls_pcie_probe(struct platform_device *pdev)
struct ls_pcie *pcie;
struct resource *dbi_base;
u32 index[2];
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -355,16 +354,15 @@ static int ls_pcie_probe(struct platform_device *pdev)
pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
if (pcie->drvdata->scfg_support) {
- pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg");
+ pcie->scfg =
+ syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "fsl,pcie-scfg", 2,
+ index);
if (IS_ERR(pcie->scfg)) {
dev_err(dev, "No syscfg phandle specified\n");
return PTR_ERR(pcie->scfg);
}
- ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2);
- if (ret)
- return ret;
-
pcie->index = index[1];
}
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index f8e7283dacd4..234c8cbcae3a 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -369,9 +369,22 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_features artpec6_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features *
+artpec6_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &artpec6_pcie_epc_features;
+}
+
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
+ .get_features = artpec6_pcie_get_features,
};
static int artpec6_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index f3ac7d46a855..8e07d432e74f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -128,7 +128,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar)
+ dma_addr_t cpu_addr, enum pci_barno bar,
+ size_t size)
{
int ret;
u32 free_win;
@@ -145,7 +146,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar);
+ cpu_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -222,19 +223,30 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
return -EINVAL;
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
-
- if (!(flags & PCI_BASE_ADDRESS_SPACE))
- type = PCIE_ATU_TYPE_MEM;
- else
- type = PCIE_ATU_TYPE_IO;
+ /*
+ * Certain EPF drivers dynamically change the physical address of a BAR
+ * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by the
+ * host).
+ */
+ if (ep->epf_bar[bar]) {
+ /*
+ * We can only dynamically change a BAR if the new BAR size and
+ * BAR flags do not differ from the existing configuration.
+ */
+ if (ep->epf_bar[bar]->barno != bar ||
+ ep->epf_bar[bar]->size != size ||
+ ep->epf_bar[bar]->flags != flags)
+ return -EINVAL;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
- if (ret)
- return ret;
+ /*
+ * When dynamically changing a BAR, skip writing the BAR reg, as
+ * that would clear the BAR's PCI address assigned by the host.
+ */
+ goto config_atu;
+ }
- if (ep->epf_bar[bar])
- return 0;
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
@@ -246,9 +258,21 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
}
- ep->epf_bar[bar] = epf_bar;
dw_pcie_dbi_ro_wr_dis(pci);
+config_atu:
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ type = PCIE_ATU_TYPE_MEM;
+ else
+ type = PCIE_ATU_TYPE_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
+ size);
+ if (ret)
+ return ret;
+
+ ep->epf_bar[bar] = epf_bar;
+
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d2291c3ceb8b..ffaded8f2df7 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -436,18 +436,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (res) {
- pp->cfg0_size = resource_size(res);
- pp->cfg0_base = res->start;
-
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
- } else {
- dev_err(dev, "Missing *config* reg space\n");
+ if (!res) {
+ dev_err(dev, "Missing \"config\" reg space\n");
return -ENODEV;
}
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
+
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -530,8 +530,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
goto err_remove_edma;
}
- /* Ignore errors, the link may come up later */
- dw_pcie_wait_for_link(pci);
+ /*
+ * Note: Skip the link up delay only when a Link Up IRQ is present.
+ * If there is no Link Up IRQ, we should not bypass the delay
+ * because that would require users to manually rescan for devices.
+ */
+ if (!pp->use_linkup_irq)
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
bridge->sysdata = pp;
@@ -918,7 +924,7 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
{
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- int ret = 0;
+ int ret;
/*
* If L1SS is supported, then do not put the link into L2 as some
@@ -927,25 +933,33 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
return 0;
- if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
- return 0;
-
- if (pci->pp.ops->pme_turn_off)
+ if (pci->pp.ops->pme_turn_off) {
pci->pp.ops->pme_turn_off(&pci->pp);
- else
+ } else {
ret = dw_pcie_pme_turn_off(pci);
+ if (ret)
+ return ret;
+ }
- if (ret)
- return ret;
-
- ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val,
+ val == DW_PCIE_LTSSM_L2_IDLE ||
+ val <= DW_PCIE_LTSSM_DETECT_WAIT,
PCIE_PME_TO_L2_TIMEOUT_US/10,
PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
if (ret) {
+ /* Only log message when LTSSM isn't in DETECT or POLL */
dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
return ret;
}
+ /*
+ * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
+ * 100ns after L2/L3 Ready before turning off refclock and
+ * main power. This is harmless when no endpoint is connected.
+ */
+ udelay(1);
+
+ dw_pcie_stop_link(pci);
if (pci->pp.ops->deinit)
pci->pp.ops->deinit(&pci->pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 6d6cbc8b5b2c..145e7f579072 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -597,11 +597,12 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar)
+ int type, u64 cpu_addr, u8 bar, size_t size)
{
u32 retries, val;
- if (!IS_ALIGNED(cpu_addr, pci->region_align))
+ if (!IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(cpu_addr, size))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
@@ -970,7 +971,7 @@ static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
- char name[6];
+ char name[15];
int ret;
if (pci->edma.nr_irqs == 1)
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 347ab74ac35a..501d9ddfea16 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -330,6 +330,7 @@ enum dw_pcie_ltssm {
/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
DW_PCIE_LTSSM_L0 = 0x11,
DW_PCIE_LTSSM_L2_IDLE = 0x15,
@@ -379,6 +380,7 @@ struct dw_pcie_rp {
bool use_atu_msg;
int msg_atu_index;
struct resource *msg_res;
+ bool use_linkup_irq;
};
struct dw_pcie_ep_ops {
@@ -491,16 +493,13 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar);
+ int type, u64 cpu_addr, u8 bar, size_t size);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
-int dw_pcie_suspend_noirq(struct dw_pcie *pci);
-int dw_pcie_resume_noirq(struct dw_pcie *pci);
-
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
dw_pcie_write_dbi(pci, reg, 0x4, val);
@@ -678,6 +677,8 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
}
#ifdef CONFIG_PCIE_DW_HOST
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
int dw_pcie_host_init(struct dw_pcie_rp *pp);
@@ -686,6 +687,16 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 1170e1107508..93698abff4d9 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -389,6 +389,34 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.stop_link = rockchip_pcie_stop_link,
};
+static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ val = rockchip_pcie_get_ltssm(rockchip);
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
{
struct rockchip_pcie *rockchip = arg;
@@ -418,14 +446,29 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
-static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
+static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
{
+ struct device *dev = &pdev->dev;
struct dw_pcie_rp *pp;
+ int irq, ret;
u32 val;
if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
return -ENODEV;
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_rc_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-rc", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
/* LTSSM enable control mode */
val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
@@ -435,8 +478,19 @@ static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
pp = &rockchip->pci.pp;
pp->ops = &rockchip_pcie_host_ops;
+ pp->use_linkup_irq = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ /* unmask DLL up/down indicator */
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
- return dw_pcie_host_init(pp);
+ return ret;
}
static int rockchip_pcie_configure_ep(struct platform_device *pdev,
@@ -450,14 +504,12 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
return -ENODEV;
irq = platform_get_irq_byname(pdev, "sys");
- if (irq < 0) {
- dev_err(dev, "missing sys IRQ resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, NULL,
rockchip_pcie_ep_sys_irq_thread,
- IRQF_ONESHOT, "pcie-sys", rockchip);
+ IRQF_ONESHOT, "pcie-sys-ep", rockchip);
if (ret) {
dev_err(dev, "failed to request PCIe sys IRQ\n");
return ret;
@@ -491,7 +543,8 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
pci_epc_init_notify(rockchip->pci.ep.epc);
/* unmask DLL up/down indicator and hot reset/link-down reset */
- rockchip_pcie_writel_apb(rockchip, 0x60000, PCIE_CLIENT_INTR_MASK_MISC);
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED | PCIE_LINK_REQ_RST_NOT_INT, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
return ret;
}
@@ -553,7 +606,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
switch (data->mode) {
case DW_PCIE_RC_TYPE:
- ret = rockchip_pcie_configure_rc(rockchip);
+ ret = rockchip_pcie_configure_rc(pdev, rockchip);
if (ret)
goto deinit_clk;
break;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index dc102d8bd58c..e4d3366ead1f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1569,6 +1569,8 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
pci_lock_rescan_remove();
pci_rescan_bus(pp->bridge->bus);
pci_unlock_rescan_remove();
+
+ qcom_pcie_icc_opp_update(pcie);
} else {
dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
status);
@@ -1703,6 +1705,10 @@ static int qcom_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0)
+ pp->use_linkup_irq = true;
+
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
@@ -1716,7 +1722,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_host_deinit;
}
- irq = platform_get_irq_byname_optional(pdev, "global");
if (irq > 0) {
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
qcom_pcie_global_irq_thread,
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index cf5f59a745b3..f441bfd6f96a 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -75,6 +75,8 @@ int pci_host_common_probe(struct platform_device *pdev)
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->enable_device = ops->enable_device;
+ bridge->disable_device = ops->disable_device;
bridge->msi_domain = true;
return pci_host_probe(bridge);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index cdd5be16021d..178da6b9fc33 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1757,8 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
- false);
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
@@ -2053,6 +2052,7 @@ static struct irq_chip hv_msi_irq_chip = {
.irq_set_affinity = irq_chip_set_affinity_parent,
#ifdef CONFIG_X86
.irq_ack = irq_chip_ack_parent,
+ .flags = IRQCHIP_MOVE_DEFERRED,
#elif defined(CONFIG_ARM64)
.irq_eoi = irq_chip_eoi_parent,
#endif
@@ -3975,24 +3975,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
struct irq_data *irq_data;
struct msi_desc *entry;
- int ret = 0;
if (!pdev->msi_enabled && !pdev->msix_enabled)
return 0;
- msi_lock_descs(&pdev->dev);
+ guard(msi_descs_lock)(&pdev->dev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data)) {
- ret = -EINVAL;
- break;
- }
-
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
hv_compose_msi_msg(irq_data, &entry->msg);
}
- msi_unlock_descs(&pdev->dev);
-
- return ret;
+ return 0;
}
/*
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 46d3afe1d308..665f35f9d826 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1715,6 +1715,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index fefab2758a06..a7e51bc1c2fe 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -26,7 +26,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
@@ -667,12 +666,16 @@ static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
return NULL;
}
-static int apple_pcie_add_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
u32 sid, rid = pci_dev_id(pdev);
+ struct apple_pcie_port *port;
int idx, err;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return 0;
+
dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
pci_name(pdev->bus->self), port->idx);
@@ -698,12 +701,16 @@ static int apple_pcie_add_device(struct apple_pcie_port *port,
return idx >= 0 ? 0 : -ENOSPC;
}
-static void apple_pcie_release_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
+ struct apple_pcie_port *port;
u32 rid = pci_dev_id(pdev);
int idx;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return;
+
mutex_lock(&port->pcie->lock);
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
@@ -721,45 +728,6 @@ static void apple_pcie_release_device(struct apple_pcie_port *port,
mutex_unlock(&port->pcie->lock);
}
-static int apple_pcie_bus_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
-{
- struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct apple_pcie_port *port;
- int err;
-
- /*
- * This is a bit ugly. We assume that if we get notified for
- * any PCI device, we must be in charge of it, and that there
- * is no other PCI controller in the whole system. It probably
- * holds for now, but who knows for how long?
- */
- port = apple_pcie_get_port(pdev);
- if (!port)
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- err = apple_pcie_add_device(port, pdev);
- if (err)
- return notifier_from_errno(err);
- break;
- case BUS_NOTIFY_DEL_DEVICE:
- apple_pcie_release_device(port, pdev);
- break;
- default:
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block apple_pcie_nb = {
- .notifier_call = apple_pcie_bus_notifier,
-};
-
static int apple_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -799,23 +767,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
return 0;
}
-static int apple_pcie_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
- if (ret)
- return ret;
-
- ret = pci_host_common_probe(pdev);
- if (ret)
- bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
-
- return ret;
-}
-
static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
.init = apple_pcie_init,
+ .enable_device = apple_pcie_enable_device,
+ .disable_device = apple_pcie_disable_device,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read,
@@ -830,7 +785,7 @@ static const struct of_device_id apple_pcie_of_match[] = {
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
static struct platform_driver apple_pcie_driver = {
- .probe = apple_pcie_probe,
+ .probe = pci_host_common_probe,
.driver = {
.name = "pcie-apple",
.of_match_table = apple_pcie_of_match,
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index be52e3a123ab..aa24ac9aaecc 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -125,6 +125,8 @@
#define MAX_NUM_PHY_RESETS 3
+#define PCIE_MTK_RESET_TIME_US 10
+
/* Time in ms needed to complete PCIe reset on EN7581 SoC */
#define PCIE_EN7581_RESET_TIME_MS 100
@@ -133,10 +135,18 @@ struct mtk_gen3_pcie;
#define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0)
#define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0)
+enum mtk_gen3_pcie_flags {
+ SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device
+ * probing or suspend/resume phase to
+ * avoid hw bugs/issues.
+ */
+};
+
/**
* struct mtk_gen3_pcie_pdata - differentiate between host generations
* @power_up: pcie power_up callback
* @phy_resets: phy reset lines SoC data.
+ * @flags: pcie device flags.
*/
struct mtk_gen3_pcie_pdata {
int (*power_up)(struct mtk_gen3_pcie *pcie);
@@ -144,6 +154,7 @@ struct mtk_gen3_pcie_pdata {
const char *id[MAX_NUM_PHY_RESETS];
int num_resets;
} phy_resets;
+ u32 flags;
};
/**
@@ -438,22 +449,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
- /* Assert all reset signals */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-
/*
- * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
- * and 2.2.1 (Initial Power-Up (G3 to S0)).
- * The deassertion of PERST# should be delayed 100ms (TPVPERL)
- * for the power and clock to become stable.
+ * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
+ * causing occasional PCIe link down. In order to overcome the issue,
+ * PCIE_RSTB signals are not asserted/released at this stage and the
+ * PCIe block is reset using en7523_reset_assert() and
+ * en7581_pci_enable().
*/
- msleep(100);
-
- /* De-assert reset signals */
- val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert all reset signals */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification revision 6.0.
+ *
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
/* Check if the link is up or not */
err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
@@ -913,11 +935,20 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
u32 val;
/*
- * Wait for the time needed to complete the bulk assert in
- * mtk_pcie_setup for EN7581 SoC.
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
*/
- mdelay(PCIE_EN7581_RESET_TIME_MS);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ /* Wait for the time needed to complete the reset lines assert. */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ /*
+ * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
+ * requires PHY initialization and power-on before PHY reset deassert.
+ */
err = phy_init(pcie->phy);
if (err) {
dev_err(dev, "failed to initialize PHY\n");
@@ -940,17 +971,11 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
* Wait for the time needed to complete the bulk de-assert above.
* This time is specific for EN7581 SoC.
*/
- mdelay(PCIE_EN7581_RESET_TIME_MS);
+ msleep(PCIE_EN7581_RESET_TIME_MS);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
- if (err) {
- dev_err(dev, "failed to prepare clock\n");
- goto err_clk_prepare;
- }
-
val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
@@ -963,17 +988,22 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
- err = clk_bulk_enable(pcie->num_clks, pcie->clks);
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
if (err) {
dev_err(dev, "failed to prepare clock\n");
- goto err_clk_enable;
+ goto err_clk_prepare_enable;
}
+ /*
+ * Airoha EN7581 performs PCIe reset via clk callbacks since it has a
+ * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
+ * complete the PCIe reset.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
return 0;
-err_clk_enable:
- clk_bulk_unprepare(pcie->num_clks, pcie->clks);
-err_clk_prepare:
+err_clk_prepare_enable:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
@@ -990,6 +1020,15 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
struct device *dev = pcie->dev;
int err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
+
/* PHY power on and enable pipe clock */
err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
if (err) {
@@ -1074,14 +1113,6 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
* counter since the bulk is shared.
*/
reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
- /*
- * The controller may have been left out of reset by the bootloader
- * so make sure that we get a clean start by asserting resets here.
- */
- reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-
- reset_control_assert(pcie->mac_reset);
- usleep_range(10, 20);
/* Don't touch the hardware registers before power up */
err = pcie->soc->power_up(pcie);
@@ -1231,10 +1262,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
return err;
}
- /* Pull down the PERST# pin */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert the PERST# pin */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
dev_dbg(pcie->dev, "entered L2 states successfully");
@@ -1285,6 +1318,7 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
.id[2] = "phy-lane2",
.num_resets = 3,
},
+ .flags = SKIP_PCIE_RSTB,
};
static const struct of_device_id mtk_pcie_of_match[] = {
@@ -1301,6 +1335,7 @@ static struct platform_driver mtk_pcie_driver = {
.name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index 047e2cef5afc..c5e0d025bc43 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
}
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res),
- outbound_name)) {
+ res->name)) {
dev_err(pcie->dev, "Cannot request memory region %s.\n",
outbound_name);
return -EIO;
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 1064b7b06cef..85ea36df2f59 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -40,6 +40,10 @@
* @irq_pci_fn: the latest PCI function that has updated the mapping of
* the MSI/INTX IRQ dedicated outbound region.
* @irq_pending: bitmask of asserted INTX IRQs.
+ * @perst_irq: IRQ used for the PERST# signal.
+ * @perst_asserted: True if the PERST# signal was asserted.
+ * @link_up: True if the PCI link is up.
+ * @link_training: Work item to execute PCI link training.
*/
struct rockchip_pcie_ep {
struct rockchip_pcie rockchip;
@@ -784,6 +788,7 @@ static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
SZ_1M);
if (!ep->irq_cpu_addr) {
dev_err(dev, "failed to reserve memory space for MSI\n");
+ err = -ENOMEM;
goto err_epc_mem_exit;
}
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index b9ade7632e11..0f88da378805 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -30,7 +30,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct resource *regs;
- int err;
+ int err, i;
if (rockchip->is_rc) {
regs = platform_get_resource_byname(pdev,
@@ -69,55 +69,23 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
rockchip->link_gen = 2;
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_PM_RSTS; i++)
+ rockchip->pm_rsts[i].id = rockchip_pci_pm_rsts[i];
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the PM reset\n");
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_CORE_RSTS; i++)
+ rockchip->core_rsts[i].id = rockchip_pci_core_rsts[i];
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the Core resets\n");
if (rockchip->is_rc)
rockchip->perst_gpio = devm_gpiod_get_optional(dev, "ep",
@@ -129,29 +97,10 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
return dev_err_probe(dev, PTR_ERR(rockchip->perst_gpio),
"failed to get PERST# GPIO\n");
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
-
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
-
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
-
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
+ rockchip->num_clks = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (rockchip->num_clks < 0)
+ return dev_err_probe(dev, rockchip->num_clks,
+ "failed to get clocks\n");
return 0;
}
@@ -169,23 +118,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
int err, i;
u32 regs;
- err = reset_control_assert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "assert aclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "assert pclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "assert pm_rst err %d\n", err);
- return err;
- }
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Couldn't assert PM resets\n");
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_init(rockchip->phys[i]);
@@ -195,47 +131,19 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
}
}
- err = reset_control_assert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "assert core_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "assert mgmt_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->pipe_rst);
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "assert pipe_rst err %d\n", err);
+ dev_err_probe(dev, err, "Couldn't assert Core resets\n");
goto err_exit_phy;
}
udelay(10);
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "deassert aclk_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
if (err) {
- dev_err(dev, "deassert pclk_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert PM resets %d\n", err);
goto err_exit_phy;
}
@@ -275,31 +183,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
goto err_power_off_phy;
}
- /*
- * Please don't reorder the deassert sequence of the following
- * four reset pins.
- */
- err = reset_control_deassert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "deassert core_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->pipe_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "deassert pipe_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert Core reset %d\n", err);
goto err_power_off_phy;
}
@@ -375,50 +262,18 @@ int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
struct device *dev = rockchip->dev;
int err;
- err = clk_prepare_enable(rockchip->aclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_pcie clock\n");
- return err;
- }
-
- err = clk_prepare_enable(rockchip->aclk_perf_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
- goto err_aclk_perf_pcie;
- }
-
- err = clk_prepare_enable(rockchip->hclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable hclk_pcie clock\n");
- goto err_hclk_pcie;
- }
-
- err = clk_prepare_enable(rockchip->clk_pcie_pm);
- if (err) {
- dev_err(dev, "unable to enable clk_pcie_pm clock\n");
- goto err_clk_pcie_pm;
- }
+ err = clk_bulk_prepare_enable(rockchip->num_clks, rockchip->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
return 0;
-
-err_clk_pcie_pm:
- clk_disable_unprepare(rockchip->hclk_pcie);
-err_hclk_pcie:
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
-err_aclk_perf_pcie:
- clk_disable_unprepare(rockchip->aclk_pcie);
- return err;
}
EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip)
{
- struct rockchip_pcie *rockchip = data;
- clk_disable_unprepare(rockchip->clk_pcie_pm);
- clk_disable_unprepare(rockchip->hclk_pcie);
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
- clk_disable_unprepare(rockchip->aclk_pcie);
+ clk_bulk_disable_unprepare(rockchip->num_clks, rockchip->clks);
}
EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index a51b087ce878..11def598534b 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -11,9 +11,11 @@
#ifndef _PCIE_ROCKCHIP_H
#define _PCIE_ROCKCHIP_H
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/reset.h>
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
@@ -309,22 +311,31 @@
(((c) << ((b) * 8 + 5)) & \
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+#define ROCKCHIP_NUM_PM_RSTS ARRAY_SIZE(rockchip_pci_pm_rsts)
+#define ROCKCHIP_NUM_CORE_RSTS ARRAY_SIZE(rockchip_pci_core_rsts)
+
+static const char * const rockchip_pci_pm_rsts[] = {
+ "pm",
+ "pclk",
+ "aclk",
+};
+
+static const char * const rockchip_pci_core_rsts[] = {
+ "mgmt-sticky",
+ "core",
+ "mgmt",
+ "pipe",
+};
+
struct rockchip_pcie {
void __iomem *reg_base; /* DT axi-base */
void __iomem *apb_base; /* DT apb-base */
bool legacy_phy;
struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
+ struct reset_control_bulk_data pm_rsts[ROCKCHIP_NUM_PM_RSTS];
+ struct reset_control_bulk_data core_rsts[ROCKCHIP_NUM_CORE_RSTS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator *vpcie12v; /* 12V power supply */
struct regulator *vpcie3v3; /* 3.3V power supply */
struct regulator *vpcie1v8; /* 1.8V power supply */
@@ -358,7 +369,7 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
-void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip);
void rockchip_pcie_cfg_configuration_accesses(
struct rockchip_pcie *rockchip, u32 type);
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index a0f5e1d67b04..81e8bfae53d0 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -30,11 +30,14 @@
#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
-#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2)
-#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
-#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
-#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+#define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4
+#define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
@@ -80,14 +83,21 @@
enum xilinx_cpm_version {
CPM,
CPM5,
+ CPM5_HOST1,
};
/**
* struct xilinx_cpm_variant - CPM variant information
* @version: CPM version
+ * @ir_status: Offset for the error interrupt status register
+ * @ir_enable: Offset for the CPM5 local error interrupt enable register
+ * @ir_misc_value: A bitmask for the miscellaneous interrupt status
*/
struct xilinx_cpm_variant {
enum xilinx_cpm_version version;
+ u32 ir_status;
+ u32 ir_enable;
+ u32 ir_misc_value;
};
/**
@@ -269,6 +279,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
{
struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ const struct xilinx_cpm_variant *variant = port->variant;
unsigned long val;
int i;
@@ -279,11 +290,11 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
- if (port->variant->version == CPM5) {
- val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (variant->ir_status) {
+ val = readl_relaxed(port->cpm_base + variant->ir_status);
if (val)
writel_relaxed(val, port->cpm_base +
- XILINX_CPM_PCIE_IR_STATUS);
+ variant->ir_status);
}
/*
@@ -465,6 +476,8 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
*/
static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
+ const struct xilinx_cpm_variant *variant = port->variant;
+
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
@@ -483,15 +496,15 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
* XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
* CPM SLCR block.
*/
- writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ writel(variant->ir_misc_value,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
- if (port->variant->version == CPM5) {
+ if (variant->ir_enable) {
writel(XILINX_CPM_PCIE_IR_LOCAL,
- port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ port->cpm_base + variant->ir_enable);
}
- /* Enable the Bridge enable bit */
+ /* Set Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
XILINX_CPM_PCIE_REG_RPSC);
@@ -609,10 +622,21 @@ err_parse_dt:
static const struct xilinx_cpm_variant cpm_host = {
.version = CPM,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
};
static const struct xilinx_cpm_variant cpm5_host = {
.version = CPM5,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE0_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5_host1 = {
+ .version = CPM5_HOST1,
+ .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE1_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
};
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
@@ -624,6 +648,10 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
.compatible = "xlnx,versal-cpm5-host",
.data = &cpm5_host,
},
+ {
+ .compatible = "xlnx,versal-cpm5-host1",
+ .data = &cpm5_host1,
+ },
{}
};
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
index 6630cacef301..3fdfffdf0270 100644
--- a/drivers/pci/controller/plda/pcie-microchip-host.c
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -7,20 +7,27 @@
* Author: Daire McNamara <daire.mcnamara@microchip.com>
*/
+#include <linux/align.h>
+#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include <linux/wordpart.h>
#include "../../pci.h"
#include "pcie-plda.h"
+#define MC_MAX_NUM_INBOUND_WINDOWS 8
+#define MPFS_NC_BOUNCE_ADDR 0x80000000
+
/* PCIe Bridge Phy and Controller Phy offsets */
#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
#define MC_PCIE1_CTRL_ADDR 0x0000a000u
@@ -607,6 +614,91 @@ static void mc_disable_interrupts(struct mc_pcie *port)
writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST);
}
+static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index,
+ u64 axi_addr, u64 pcie_addr, u64 size)
+{
+ u32 table_offset = window_index * ATR_ENTRY_SIZE;
+ void __iomem *table_addr = port->bridge_base_addr + table_offset;
+ u32 atr_sz;
+ u32 val;
+
+ atr_sz = ilog2(size) - 1;
+
+ val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+
+ writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+
+ writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+
+ writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB);
+ writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW);
+
+ writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM);
+}
+
+static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev,
+ struct mc_pcie *port)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ int atr_index = 0;
+
+ /*
+ * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface
+ * Controller FPGA logic block which contains the AXI-S interface.
+ *
+ * From the point of view of the PCIe Root Port, there are only two
+ * supported Root Port configurations:
+ *
+ * Configuration 1: for use with fully coherent designs; supports a
+ * window from 0x0 (CPU space) to specified PCIe space.
+ *
+ * Configuration 2: for use with non-coherent designs; supports two
+ * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space
+ * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe
+ * space 0xc0000000. This cfg needs two windows because of how the
+ * MSI space is allocated in the AXI-S range on MPFS.
+ *
+ * The FIC interface outside the PCIe block *must* complete the
+ * inbound address translation as per MCHP MPFS FPGA design
+ * guidelines.
+ */
+ if (device_property_read_bool(dev, "dma-noncoherent")) {
+ /*
+ * Always need same two tables in this case. Need two tables
+ * due to hardware interactions between address and size.
+ */
+ mc_pcie_setup_inbound_atr(port, 0, 0,
+ MPFS_NC_BOUNCE_ADDR, SZ_1G);
+ mc_pcie_setup_inbound_atr(port, 1, SZ_1G,
+ MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G);
+ } else {
+ /* Find any DMA ranges */
+ if (of_pci_dma_range_parser_init(&parser, dn)) {
+ /* No DMA range property - setup default */
+ mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
+ return 0;
+ }
+
+ for_each_of_range(&parser, &range) {
+ if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) {
+ dev_err(dev, "too many inbound ranges; %d available tables\n",
+ MC_MAX_NUM_INBOUND_WINDOWS);
+ return -EINVAL;
+ }
+ mc_pcie_setup_inbound_atr(port, atr_index, 0,
+ range.pci_addr, range.size);
+ atr_index++;
+ }
+ }
+
+ return 0;
+}
+
static int mc_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -627,6 +719,10 @@ static int mc_platform_init(struct pci_config_window *cfg)
if (ret)
return ret;
+ ret = mc_pcie_setup_inbound_ranges(pdev, port);
+ if (ret)
+ return ret;
+
port->plda.event_ops = &mc_event_ops;
port->plda.event_irq_chip = &mc_event_irq_chip;
port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index 8533dc618d45..4153214ca410 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -8,11 +8,14 @@
* Author: Daire McNamara <daire.mcnamara@microchip.com>
*/
+#include <linux/align.h>
+#include <linux/bitfield.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/pci_regs.h>
#include <linux/pci-ecam.h>
+#include <linux/wordpart.h>
#include "pcie-plda.h"
@@ -502,8 +505,9 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
ATR0_AXI4_SLV0_TRSL_PARAM);
- val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
- ATR_IMPL_ENABLE;
+ val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
ATR0_AXI4_SLV0_SRCADDR_PARAM);
@@ -518,13 +522,20 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
val = upper_32_bits(pci_addr);
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 val;
val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
}
-EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
struct plda_pcie_rp *port)
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
index 0e7dc0d8e5ba..61ece26065ea 100644
--- a/drivers/pci/controller/plda/pcie-plda.h
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -89,14 +89,15 @@
/* PCIe AXI slave table init defines */
#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
-#define ATR_SIZE_SHIFT 1
-#define ATR_IMPL_ENABLE 1
+#define ATR_SIZE_MASK GENMASK(6, 1)
+#define ATR_IMPL_ENABLE BIT(0)
#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
#define PCIE_TX_RX_INTERFACE 0x00000000u
#define PCIE_CONFIG_INTERFACE 0x00000001u
+#define TRSL_ID_AXI4_MASTER_0 0x00000004u
#define CONFIG_SPACE_ADDR_OFFSET 0x1000u
@@ -204,6 +205,7 @@ int plda_init_interrupts(struct platform_device *pdev,
void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
phys_addr_t axi_addr, phys_addr_t pci_addr,
size_t size);
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port);
int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
struct plda_pcie_rp *port);
int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 3b59a86a764b..3431a7df3e0d 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -101,7 +101,7 @@ static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
* @bar: BAR the range is within
* @offset: offset from the BAR's start address
* @maxlen: length in bytes, beginning at @offset
- * @name: name associated with the request
+ * @name: name of the driver requesting the resource
* @req_flags: flags for the request, e.g., for kernel-exclusive requests
*
* Returns: 0 on success, a negative error code on failure.
@@ -411,46 +411,20 @@ static inline bool mask_contains_bar(int mask, int bar)
return mask & BIT(bar);
}
-/*
- * This is a copy of pci_intx() used to bypass the problem of recursive
- * function calls due to the hybrid nature of pci_intx().
- */
-static void __pcim_intx(struct pci_dev *pdev, int enable)
-{
- u16 pci_command, new;
-
- pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
-
- if (enable)
- new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
- else
- new = pci_command | PCI_COMMAND_INTX_DISABLE;
-
- if (new != pci_command)
- pci_write_config_word(pdev, PCI_COMMAND, new);
-}
-
static void pcim_intx_restore(struct device *dev, void *data)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pcim_intx_devres *res = data;
- __pcim_intx(pdev, res->orig_intx);
+ pci_intx(pdev, res->orig_intx);
}
-static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
+static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res)
{
- struct pcim_intx_devres *res;
-
- res = devres_find(dev, pcim_intx_restore, NULL, NULL);
- if (res)
- return res;
+ u16 pci_command;
- res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
- if (res)
- devres_add(dev, res);
-
- return res;
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE);
}
/**
@@ -466,16 +440,28 @@ static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
int pcim_intx(struct pci_dev *pdev, int enable)
{
struct pcim_intx_devres *res;
+ struct device *dev = &pdev->dev;
- res = get_or_create_intx_devres(&pdev->dev);
- if (!res)
- return -ENOMEM;
+ /*
+ * pcim_intx() must only restore the INTx value that existed before the
+ * driver was loaded, i.e., before it called pcim_intx() for the
+ * first time.
+ */
+ res = devres_find(dev, pcim_intx_restore, NULL, NULL);
+ if (!res) {
+ res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ save_orig_intx(pdev, res);
+ devres_add(dev, res);
+ }
- res->orig_intx = !enable;
- __pcim_intx(pdev, enable);
+ pci_intx(pdev, enable);
return 0;
}
+EXPORT_SYMBOL_GPL(pcim_intx);
static void pcim_disable_device(void *pdev_raw)
{
@@ -723,7 +709,7 @@ EXPORT_SYMBOL(pcim_iounmap);
* pcim_iomap_region - Request and iomap a PCI BAR
* @pdev: PCI device to map IO resources for
* @bar: Index of a BAR to map
- * @name: Name associated with the request
+ * @name: Name of the driver requesting the resource
*
* Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
*
@@ -790,7 +776,7 @@ EXPORT_SYMBOL(pcim_iounmap_region);
* pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
- * @name: Name associated with the requests
+ * @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
@@ -855,9 +841,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
/**
* pcim_request_region - Request a PCI BAR
- * @pdev: PCI device to requestion region for
+ * @pdev: PCI device to request region for
* @bar: Index of BAR to request
- * @name: Name associated with the request
+ * @name: Name of the driver requesting the resource
*
* Returns: 0 on success, a negative error code on failure.
*
@@ -874,9 +860,9 @@ EXPORT_SYMBOL(pcim_request_region);
/**
* pcim_request_region_exclusive - Request a PCI BAR exclusively
- * @pdev: PCI device to requestion region for
+ * @pdev: PCI device to request region for
* @bar: Index of BAR to request
- * @name: Name associated with the request
+ * @name: Name of the driver requesting the resource
*
* Returns: 0 on success, a negative error code on failure.
*
@@ -932,7 +918,7 @@ static void pcim_release_all_regions(struct pci_dev *pdev)
/**
* pcim_request_all_regions - Request all regions
* @pdev: PCI device to map IO resources for
- * @name: name associated with the request
+ * @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index ef6677f34116..b94e205ae10b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -44,6 +44,8 @@
#define TIMER_RESOLUTION 1
+#define CAP_UNALIGNED_ACCESS BIT(0)
+
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
@@ -74,6 +76,7 @@ struct pci_epf_test_reg {
u32 irq_type;
u32 irq_number;
u32 flags;
+ u32 caps;
} __packed;
static struct pci_epf_header test_header = {
@@ -251,7 +254,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
fail_back_rx:
dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
fail_back_tx:
dma_cap_zero(mask);
@@ -328,8 +331,8 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
void *copy_buf = NULL, *buf;
if (reg->flags & FLAG_USE_DMA) {
- if (epf_test->dma_private) {
- dev_err(dev, "Cannot transfer data using DMA\n");
+ if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
+ dev_err(dev, "DMA controller doesn't support MEMCPY\n");
ret = -EINVAL;
goto set_status;
}
@@ -739,6 +742,20 @@ static void pci_epf_test_clear_bar(struct pci_epf *epf)
}
}
+static void pci_epf_test_set_capabilities(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ struct pci_epc *epc = epf->epc;
+ u32 caps = 0;
+
+ if (epc->ops->align_addr)
+ caps |= CAP_UNALIGNED_ACCESS;
+
+ reg->caps = cpu_to_le32(caps);
+}
+
static int pci_epf_test_epc_init(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -763,6 +780,8 @@ static int pci_epf_test_epc_init(struct pci_epf *epf)
}
}
+ pci_epf_test_set_capabilities(epf);
+
ret = pci_epf_test_set_bar(epf);
if (ret)
return ret;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index bed7c7d1fe3c..9e9ca5f8e8f8 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -60,26 +60,17 @@ struct pci_epc *pci_epc_get(const char *epc_name)
int ret = -EINVAL;
struct pci_epc *epc;
struct device *dev;
- struct class_dev_iter iter;
- class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- if (strcmp(epc_name, dev_name(dev)))
- continue;
+ dev = class_find_device_by_name(&pci_epc_class, epc_name);
+ if (!dev)
+ goto err;
- epc = to_pci_epc(dev);
- if (!try_module_get(epc->ops->owner)) {
- ret = -EINVAL;
- goto err;
- }
-
- class_dev_iter_exit(&iter);
- get_device(&epc->dev);
+ epc = to_pci_epc(dev);
+ if (try_module_get(epc->ops->owner))
return epc;
- }
err:
- class_dev_iter_exit(&iter);
+ put_device(dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pci_epc_get);
@@ -609,10 +600,20 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- int ret;
+ const struct pci_epc_features *epc_features;
+ enum pci_barno bar = epf_bar->barno;
int flags = epf_bar->flags;
+ int ret;
- if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
+ if (!epc_features)
+ return -EINVAL;
+
+ if (epc_features->bar[bar].type == BAR_FIXED &&
+ (epc_features->bar[bar].fixed_size != epf_bar->size))
+ return -EINVAL;
+
+ if (!is_power_of_2(epf_bar->size))
return -EINVAL;
if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
@@ -942,7 +943,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
{
int r;
- r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
+ r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
epc);
dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 8fa2797d4169..50bc2892a36c 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -202,6 +202,7 @@ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
mutex_lock(&epf_pf->lock);
clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->epf_pf = NULL;
list_del(&epf_vf->list);
mutex_unlock(&epf_pf->lock);
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 8f3a0a33f362..b3aa34e3a4a2 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -84,7 +84,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
static int ibm_get_table_from_acpi(char **bufp);
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size);
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv);
@@ -98,7 +98,7 @@ static struct bin_attribute ibm_apci_table_attr __ro_after_init = {
.name = "apci_table",
.mode = S_IRUGO,
},
- .read = ibm_read_apci_table,
+ .read_new = ibm_read_apci_table,
.write = NULL,
};
static struct acpiphp_attention_info ibm_attention_info =
@@ -353,7 +353,7 @@ read_table_done:
* our solution is to only allow reading the table in all at once.
*/
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size)
{
int bytes_read = -EINVAL;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 4be402fe9ab9..9e4770cdd4d5 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -747,6 +747,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
struct resource *res;
const char *res_name;
struct pci_dev *pdev;
+ u32 sriovbars[PCI_SRIOV_NUM_BARS];
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
if (ctrl & PCI_SRIOV_CTRL_VFE) {
@@ -783,6 +784,10 @@ found:
if (!iov)
return -ENOMEM;
+ /* Sizing SR-IOV BARs with VF Enable cleared - no decode */
+ __pci_size_stdbars(dev, PCI_SRIOV_NUM_BARS,
+ pos + PCI_SRIOV_BAR, sriovbars);
+
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
@@ -796,7 +801,8 @@ found:
bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
else
bar64 = __pci_read_base(dev, pci_bar_unknown, res,
- pos + PCI_SRIOV_BAR + i * 4);
+ pos + PCI_SRIOV_BAR + i * 4,
+ &sriovbars[i]);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index b956ce591f96..d89f491afdf0 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msi_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msix_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msix);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 2f647cac4cae..dc78d9d402c3 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -336,41 +336,11 @@ static int msi_verify_entries(struct pci_dev *dev)
return !entry ? 0 : -EIO;
}
-/**
- * msi_capability_init - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
- * @nvec: number of interrupts to allocate
- * @affd: description of automatic IRQ affinity assignments (may be %NULL)
- *
- * Setup the MSI capability structure of the device with the requested
- * number of interrupts. A return value of zero indicates the successful
- * setup of an entry with the new MSI IRQ. A negative return value indicates
- * an error, and a positive return value indicates the number of interrupts
- * which could have been allocated.
- */
-static int msi_capability_init(struct pci_dev *dev, int nvec,
- struct irq_affinity *affd)
+static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *masks = NULL;
+ int ret = msi_setup_msi_desc(dev, nvec, masks);
struct msi_desc *entry, desc;
- int ret;
- /* Reject multi-MSI early on irq domain enabled architectures */
- if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
- return 1;
-
- /*
- * Disable MSI during setup in the hardware, but mark it enabled
- * so that setup code can evaluate it.
- */
- pci_msi_set_enable(dev, 0);
- dev->msi_enabled = 1;
-
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
-
- msi_lock_descs(&dev->dev);
- ret = msi_setup_msi_desc(dev, nvec, masks);
if (ret)
goto fail;
@@ -399,19 +369,48 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
pcibios_free_irq(dev);
dev->irq = entry->irq;
- goto unlock;
-
+ return 0;
err:
pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
fail:
dev->msi_enabled = 0;
-unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
return ret;
}
+/**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
+ *
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts. A return value of zero indicates the successful
+ * setup of an entry with the new MSI IRQ. A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ struct irq_affinity *affd)
+{
+ /* Reject multi-MSI early on irq domain enabled architectures */
+ if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
+ return 1;
+
+ /*
+ * Disable MSI during setup in the hardware, but mark it enabled
+ * so that setup code can evaluate it.
+ */
+ pci_msi_set_enable(dev, 0);
+ dev->msi_enabled = 1;
+
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msi_capability_init(dev, nvec, masks);
+}
+
int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
struct irq_affinity *affd)
{
@@ -666,40 +665,41 @@ static void msix_mask_all(void __iomem *base, int tsize)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
-static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, struct irq_affinity *affd)
+static int __msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *masks = NULL;
- int ret;
-
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+ int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
- msi_lock_descs(&dev->dev);
- ret = msix_setup_msi_descs(dev, entries, nvec, masks);
if (ret)
- goto out_free;
+ goto fail;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
- goto out_free;
+ goto fail;
/* Check if all MSI entries honor device restrictions */
ret = msi_verify_entries(dev);
if (ret)
- goto out_free;
+ goto fail;
msix_update_entries(dev, entries);
- goto out_unlock;
+ return 0;
-out_free:
+fail:
pci_free_msi_irqs(dev);
-out_unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
return ret;
}
+static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity *affd)
+{
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msix_setup_interrupts(dev, entries, nvec, masks);
+}
+
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -871,13 +871,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
write_msg = arch_restore_msi_irqs(dev);
- msi_lock_descs(&dev->dev);
- msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
- if (write_msg)
- __pci_write_msi_msg(entry, &entry->msg);
- pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ scoped_guard (msi_descs_lock, &dev->dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+ if (write_msg)
+ __pci_write_msi_msg(entry, &entry->msg);
+ pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ }
}
- msi_unlock_descs(&dev->dev);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
@@ -916,6 +916,53 @@ void pci_free_msi_irqs(struct pci_dev *dev)
}
}
+#ifdef CONFIG_PCIE_TPH
+/**
+ * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
+ * @pdev: The PCIe device to update
+ * @index: The MSI-X index to update
+ * @tag: The tag to write
+ *
+ * Returns: 0 on success, error code on failure
+ */
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ struct msi_desc *msi_desc;
+ struct irq_desc *irq_desc;
+ unsigned int virq;
+
+ if (!pdev->msix_enabled)
+ return -ENXIO;
+
+ guard(msi_descs_lock)(&pdev->dev);
+ virq = msi_get_virq(&pdev->dev, index);
+ if (!virq)
+ return -ENXIO;
+ /*
+ * This is a horrible hack, but short of implementing a PCI
+ * specific interrupt chip callback and a huge pile of
+ * infrastructure, this is the minor nuissance. It provides the
+ * protection against concurrent operations on this entry and keeps
+ * the control word cache in sync.
+ */
+ irq_desc = irq_to_desc(virq);
+ if (!irq_desc)
+ return -ENXIO;
+
+ guard(raw_spinlock_irq)(&irq_desc->lock);
+ msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data);
+ if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual)
+ return -ENXIO;
+
+ msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST;
+ msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
+ pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl);
+ /* Flush the write */
+ readl(pci_msix_desc_addr(msi_desc));
+ return 0;
+}
+#endif
+
/* Misc. infrastructure */
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 52f770bcc481..7a806f5c0d20 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -190,7 +190,8 @@ EXPORT_SYMBOL_GPL(of_pci_get_devfn);
*
* Returns 0 on success or a negative error-code on failure.
*/
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+static int of_pci_parse_bus_range(struct device_node *node,
+ struct resource *res)
{
u32 bus_range[2];
int error;
@@ -207,7 +208,6 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
return 0;
}
-EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
/**
* of_get_pci_domain_nr - Find the host bridge domain number
@@ -302,8 +302,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
* @dev: host bridge device
- * @busno: bus number associated with the bridge root bus
- * @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
* @ib_resources: list where the range of inbound resources (with addresses
* from 'dma-ranges') will be added after DT parsing
@@ -319,7 +317,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* value if it failed.
*/
static int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
struct list_head *resources,
struct list_head *ib_resources,
resource_size_t *io_base)
@@ -343,14 +340,15 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
err = of_pci_parse_bus_range(dev_node, bus_range);
if (err) {
- bus_range->start = busno;
- bus_range->end = bus_max;
+ bus_range->start = 0;
+ bus_range->end = 0xff;
bus_range->flags = IORESOURCE_BUS;
- dev_info(dev, " No bus range found for %pOF, using %pR\n",
- dev_node, bus_range);
} else {
- if (bus_range->end > bus_range->start + bus_max)
- bus_range->end = bus_range->start + bus_max;
+ if (bus_range->end > 0xff) {
+ dev_warn(dev, " Invalid end bus number in %pR, defaulting to 0xff\n",
+ bus_range);
+ bus_range->end = 0xff;
+ }
}
pci_add_resource(resources, bus_range);
@@ -597,7 +595,7 @@ static int pci_parse_request_of_pci_ranges(struct device *dev,
INIT_LIST_HEAD(&bridge->windows);
INIT_LIST_HEAD(&bridge->dma_ranges);
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
+ err = devm_of_pci_get_host_bridge_resources(dev, &bridge->windows,
&bridge->dma_ranges, &iobase);
if (err)
return err;
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index 886c236e5de6..58fbafac7c6a 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -26,7 +26,7 @@ struct of_pci_addr_pair {
* side and the child address is the corresponding address on the secondary
* side.
*/
-struct of_pci_range {
+struct of_pci_range_entry {
u32 child_addr[OF_PCI_ADDRESS_CELLS];
u32 parent_addr[OF_PCI_ADDRESS_CELLS];
u32 size[OF_PCI_SIZE_CELLS];
@@ -101,7 +101,7 @@ static int of_pci_prop_bus_range(struct pci_dev *pdev,
static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs,
struct device_node *np)
{
- struct of_pci_range *rp;
+ struct of_pci_range_entry *rp;
struct resource *res;
int i, j, ret;
u32 flags, num;
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 7abd4f546d3c..0cb7e0aaba0e 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -161,7 +161,7 @@ out:
return ret;
}
-static struct bin_attribute p2pmem_alloc_attr = {
+static const struct bin_attribute p2pmem_alloc_attr = {
.attr = { .name = "allocate", .mode = 0660 },
.mmap = p2pmem_alloc_mmap,
/*
@@ -180,14 +180,14 @@ static struct attribute *p2pmem_attrs[] = {
NULL,
};
-static struct bin_attribute *p2pmem_bin_attrs[] = {
+static const struct bin_attribute *const p2pmem_bin_attrs[] = {
&p2pmem_alloc_attr,
NULL,
};
static const struct attribute_group p2pmem_group = {
.attrs = p2pmem_attrs,
- .bin_attrs = p2pmem_bin_attrs,
+ .bin_attrs_new = p2pmem_bin_attrs,
.name = "p2pmem",
};
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 35270172c833..f57ea36d125d 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1670,6 +1670,19 @@ static void pci_dma_cleanup(struct device *dev)
iommu_device_unuse_default_domain(dev);
}
+/*
+ * pci_device_irq_get_affinity - get IRQ affinity mask for device
+ * @dev: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @dev and @irq_vec.
+ */
+static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
+ unsigned int irq_vec)
+{
+ return pci_irq_get_affinity(to_pci_dev(dev), irq_vec);
+}
+
const struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
@@ -1677,6 +1690,7 @@ const struct bus_type pci_bus_type = {
.probe = pci_device_probe,
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
+ .irq_get_affinity = pci_device_irq_get_affinity,
.dev_groups = pci_dev_groups,
.bus_groups = pci_bus_groups,
.drv_groups = pci_drv_groups,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 7679d75d71e5..b46ce1a2c554 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -13,6 +13,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -694,7 +695,7 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(boot_vga);
static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -769,7 +770,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -837,9 +838,9 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
+static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
-static struct bin_attribute *pci_dev_config_attrs[] = {
+static const struct bin_attribute *const pci_dev_config_attrs[] = {
&bin_attr_config,
NULL,
};
@@ -856,7 +857,7 @@ static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
}
static const struct attribute_group pci_dev_config_attr_group = {
- .bin_attrs = pci_dev_config_attrs,
+ .bin_attrs_new = pci_dev_config_attrs,
.bin_size = pci_dev_config_attr_bin_size,
};
@@ -887,8 +888,8 @@ pci_llseek_resource(struct file *filep,
* callback routine (pci_legacy_read).
*/
static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -912,8 +913,8 @@ static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
* callback routine (pci_legacy_write).
*/
static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -1003,8 +1004,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->attr.name = "legacy_io";
b->legacy_io->size = 0xffff;
b->legacy_io->attr.mode = 0600;
- b->legacy_io->read = pci_read_legacy_io;
- b->legacy_io->write = pci_write_legacy_io;
+ b->legacy_io->read_new = pci_read_legacy_io;
+ b->legacy_io->write_new = pci_write_legacy_io;
/* See pci_create_attr() for motivation */
b->legacy_io->llseek = pci_llseek_resource;
b->legacy_io->mmap = pci_mmap_legacy_io;
@@ -1099,7 +1100,7 @@ static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count, bool write)
{
#ifdef CONFIG_HAS_IOPORT
@@ -1142,14 +1143,14 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
return pci_resource_io(filp, kobj, attr, buf, off, count, false);
}
static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
int ret;
@@ -1210,8 +1211,8 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
} else {
sprintf(res_attr_name, "resource%d", num);
if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
- res_attr->read = pci_read_resource_io;
- res_attr->write = pci_write_resource_io;
+ res_attr->read_new = pci_read_resource_io;
+ res_attr->write_new = pci_write_resource_io;
if (arch_can_pci_mmap_io())
res_attr->mmap = pci_mmap_resource_uc;
} else {
@@ -1292,7 +1293,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
* writing anything except 0 enables it
*/
static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1318,7 +1319,7 @@ static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
* device corresponding to @kobj.
*/
static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1344,9 +1345,9 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
+static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
-static struct bin_attribute *pci_dev_rom_attrs[] = {
+static const struct bin_attribute *const pci_dev_rom_attrs[] = {
&bin_attr_rom,
NULL,
};
@@ -1372,7 +1373,7 @@ static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
}
static const struct attribute_group pci_dev_rom_attr_group = {
- .bin_attrs = pci_dev_rom_attrs,
+ .bin_attrs_new = pci_dev_rom_attrs,
.is_bin_visible = pci_dev_rom_attr_is_visible,
.bin_size = pci_dev_rom_attr_bin_size,
};
@@ -1421,6 +1422,113 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+static ssize_t reset_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len = 0;
+ int i, m;
+
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = pdev->reset_methods[i];
+ if (!m)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
+ pci_reset_fn_methods[m].name);
+ }
+
+ if (len)
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static int reset_method_lookup(const char *name)
+{
+ int m;
+
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ if (sysfs_streq(name, pci_reset_fn_methods[m].name))
+ return m;
+ }
+
+ return 0; /* not found */
+}
+
+static ssize_t reset_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *tmp_options, *name;
+ int m, n;
+ u8 reset_methods[PCI_NUM_RESET_METHODS] = {};
+
+ if (sysfs_streq(buf, "")) {
+ pdev->reset_methods[0] = 0;
+ pci_warn(pdev, "All device reset methods disabled by user");
+ return count;
+ }
+
+ if (sysfs_streq(buf, "default")) {
+ pci_init_reset_methods(pdev);
+ return count;
+ }
+
+ char *options __free(kfree) = kstrndup(buf, count, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ n = 0;
+ tmp_options = options;
+ while ((name = strsep(&tmp_options, " ")) != NULL) {
+ if (sysfs_streq(name, ""))
+ continue;
+
+ name = strim(name);
+
+ /* Leave previous methods unchanged if input is invalid */
+ m = reset_method_lookup(name);
+ if (!m) {
+ pci_err(pdev, "Invalid reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
+ pci_err(pdev, "Unsupported reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (n == PCI_NUM_RESET_METHODS - 1) {
+ pci_err(pdev, "Too many reset methods\n");
+ return -EINVAL;
+ }
+
+ reset_methods[n++] = m;
+ }
+
+ reset_methods[n] = 0;
+
+ /* Warn if dev-specific supported but not highest priority */
+ if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
+ reset_methods[0] != 1)
+ pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
+ memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
+ return count;
+}
+static DEVICE_ATTR_RW(reset_method);
+
+static struct attribute *pci_dev_reset_method_attrs[] = {
+ &dev_attr_reset_method.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_dev_reset_method_attr_group = {
+ .attrs = pci_dev_reset_method_attrs,
+ .is_visible = pci_dev_reset_attr_is_visible,
+};
+
static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 661f98c6c63a..869d204a70a3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/logic_pio.h>
-#include <linux/pm_wakeup.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
@@ -1100,34 +1099,6 @@ static void pci_enable_acs(struct pci_dev *dev)
}
/**
- * pcie_read_tlp_log - read TLP Header Log
- * @dev: PCIe device
- * @where: PCI Config offset of TLP Header Log
- * @tlp_log: TLP Log structure to fill
- *
- * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
- *
- * Return: 0 on success and filled TLP Log structure, <0 on error.
- */
-int pcie_read_tlp_log(struct pci_dev *dev, int where,
- struct pcie_tlp_log *tlp_log)
-{
- int i, ret;
-
- memset(tlp_log, 0, sizeof(*tlp_log));
-
- for (i = 0; i < 4; i++) {
- ret = pci_read_config_dword(dev, where + i * 4,
- &tlp_log->dw[i]);
- if (ret)
- return pcibios_err_to_errno(ret);
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
-
-/**
* pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
@@ -2059,6 +2030,28 @@ int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
return pci_enable_resources(dev, bars);
}
+static int pci_host_bridge_enable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+ int err;
+
+ if (host_bridge && host_bridge->enable_device) {
+ err = host_bridge->enable_device(host_bridge, dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void pci_host_bridge_disable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+
+ if (host_bridge && host_bridge->disable_device)
+ host_bridge->disable_device(host_bridge, dev);
+}
+
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
@@ -2074,9 +2067,13 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
if (bridge)
pcie_aspm_powersave_config_link(bridge);
+ err = pci_host_bridge_enable_device(dev);
+ if (err)
+ return err;
+
err = pcibios_enable_device(dev, bars);
if (err < 0)
- return err;
+ goto err_enable;
pci_fixup_device(pci_fixup_enable, dev);
if (dev->msi_enabled || dev->msix_enabled)
@@ -2091,6 +2088,12 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
}
return 0;
+
+err_enable:
+ pci_host_bridge_disable_device(dev);
+
+ return err;
+
}
/**
@@ -2274,6 +2277,8 @@ void pci_disable_device(struct pci_dev *dev)
if (atomic_dec_return(&dev->enable_cnt) != 0)
return;
+ pci_host_bridge_disable_device(dev);
+
do_pci_disable_device(dev);
dev->is_busmaster = 0;
@@ -3941,15 +3946,14 @@ EXPORT_SYMBOL(pci_release_region);
* __pci_request_region - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resource
* @exclusive: whether the region access is exclusive or not
*
* Returns: 0 on success, negative error code on failure.
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
@@ -3959,13 +3963,13 @@ EXPORT_SYMBOL(pci_release_region);
* message is also printed on failure.
*/
static int __pci_request_region(struct pci_dev *pdev, int bar,
- const char *res_name, int exclusive)
+ const char *name, int exclusive)
{
if (pci_is_managed(pdev)) {
if (exclusive == IORESOURCE_EXCLUSIVE)
- return pcim_request_region_exclusive(pdev, bar, res_name);
+ return pcim_request_region_exclusive(pdev, bar, name);
- return pcim_request_region(pdev, bar, res_name);
+ return pcim_request_region(pdev, bar, name);
}
if (pci_resource_len(pdev, bar) == 0)
@@ -3973,11 +3977,11 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name))
+ pci_resource_len(pdev, bar), name))
goto err_out;
} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
if (!__request_mem_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name,
+ pci_resource_len(pdev, bar), name,
exclusive))
goto err_out;
}
@@ -3994,14 +3998,13 @@ err_out:
* pci_request_region - Reserve PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource
+ * @name: name of the driver requesting the resource
*
* Returns: 0 on success, negative error code on failure.
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -4011,9 +4014,9 @@ err_out:
* when pcim_enable_device() has been called in advance. This hybrid feature is
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
{
- return __pci_request_region(pdev, bar, res_name, 0);
+ return __pci_request_region(pdev, bar, name, 0);
}
EXPORT_SYMBOL(pci_request_region);
@@ -4036,13 +4039,13 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
EXPORT_SYMBOL(pci_release_selected_regions);
static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name, int excl)
+ const char *name, int excl)
{
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
- if (__pci_request_region(pdev, i, res_name, excl))
+ if (__pci_request_region(pdev, i, name, excl))
goto err_out;
return 0;
@@ -4059,7 +4062,7 @@ err_out:
* pci_request_selected_regions - Reserve selected PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
- * @res_name: Name to be associated with resource
+ * @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
@@ -4069,9 +4072,9 @@ err_out:
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name, 0);
+ return __pci_request_selected_regions(pdev, bars, name, 0);
}
EXPORT_SYMBOL(pci_request_selected_regions);
@@ -4079,7 +4082,7 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* pci_request_selected_regions_exclusive - Request regions exclusively
* @pdev: PCI device to request regions from
* @bars: bit mask of BARs to request
- * @res_name: name to be associated with the requests
+ * @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
@@ -4089,9 +4092,9 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name,
+ return __pci_request_selected_regions(pdev, bars, name,
IORESOURCE_EXCLUSIVE);
}
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
@@ -4114,12 +4117,11 @@ EXPORT_SYMBOL(pci_release_regions);
/**
* pci_request_regions - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
*
- * Mark all PCI regions associated with PCI device @pdev as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark all PCI regions associated with PCI device @pdev as being reserved by
+ * owner @name. Do not access any address inside the PCI regions unless this
+ * call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -4129,22 +4131,22 @@ EXPORT_SYMBOL(pci_release_regions);
* when pcim_enable_device() has been called in advance. This hybrid feature is
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_regions(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions);
/**
* pci_request_regions_exclusive - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
* Mark all PCI regions associated with PCI device @pdev as being reserved
- * by owner @res_name. Do not access any address inside the PCI regions
+ * by owner @name. Do not access any address inside the PCI regions
* unless this call returns successfully.
*
* pci_request_regions_exclusive() will mark the region so that /dev/mem
@@ -4158,10 +4160,10 @@ EXPORT_SYMBOL(pci_request_regions);
* when pcim_enable_device() has been called in advance. This hybrid feature is
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions_exclusive(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -4488,11 +4490,6 @@ void pci_disable_parity(struct pci_dev *dev)
* @enable: boolean: whether to enable or disable PCI INTx
*
* Enables/disables PCI INTx for device @pdev
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
*/
void pci_intx(struct pci_dev *pdev, int enable)
{
@@ -4505,15 +4502,10 @@ void pci_intx(struct pci_dev *pdev, int enable)
else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- if (new != pci_command) {
- /* Preserve the "hybrid" behavior for backwards compatibility */
- if (pci_is_managed(pdev)) {
- WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
- return;
- }
+ if (new == pci_command)
+ return;
- pci_write_config_word(pdev, PCI_COMMAND, new);
- }
+ pci_write_config_word(pdev, PCI_COMMAND, new);
}
EXPORT_SYMBOL_GPL(pci_intx);
@@ -5204,7 +5196,7 @@ static void pci_dev_restore(struct pci_dev *dev)
}
/* dev->reset_methods[] is a 0-terminated list of indices into this array */
-static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
+const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ },
{ pci_dev_specific_reset, .name = "device_specific" },
{ pci_dev_acpi_reset, .name = "acpi" },
@@ -5215,129 +5207,6 @@ static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ cxl_reset_bus_function, .name = "cxl_bus" },
};
-static ssize_t reset_method_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- ssize_t len = 0;
- int i, m;
-
- for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
- m = pdev->reset_methods[i];
- if (!m)
- break;
-
- len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
- pci_reset_fn_methods[m].name);
- }
-
- if (len)
- len += sysfs_emit_at(buf, len, "\n");
-
- return len;
-}
-
-static int reset_method_lookup(const char *name)
-{
- int m;
-
- for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
- if (sysfs_streq(name, pci_reset_fn_methods[m].name))
- return m;
- }
-
- return 0; /* not found */
-}
-
-static ssize_t reset_method_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- char *options, *tmp_options, *name;
- int m, n;
- u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
-
- if (sysfs_streq(buf, "")) {
- pdev->reset_methods[0] = 0;
- pci_warn(pdev, "All device reset methods disabled by user");
- return count;
- }
-
- if (sysfs_streq(buf, "default")) {
- pci_init_reset_methods(pdev);
- return count;
- }
-
- options = kstrndup(buf, count, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
-
- n = 0;
- tmp_options = options;
- while ((name = strsep(&tmp_options, " ")) != NULL) {
- if (sysfs_streq(name, ""))
- continue;
-
- name = strim(name);
-
- m = reset_method_lookup(name);
- if (!m) {
- pci_err(pdev, "Invalid reset method '%s'", name);
- goto error;
- }
-
- if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
- pci_err(pdev, "Unsupported reset method '%s'", name);
- goto error;
- }
-
- if (n == PCI_NUM_RESET_METHODS - 1) {
- pci_err(pdev, "Too many reset methods\n");
- goto error;
- }
-
- reset_methods[n++] = m;
- }
-
- reset_methods[n] = 0;
-
- /* Warn if dev-specific supported but not highest priority */
- if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
- reset_methods[0] != 1)
- pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
- memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
- kfree(options);
- return count;
-
-error:
- /* Leave previous methods unchanged */
- kfree(options);
- return -EINVAL;
-}
-static DEVICE_ATTR_RW(reset_method);
-
-static struct attribute *pci_dev_reset_method_attrs[] = {
- &dev_attr_reset_method.attr,
- NULL,
-};
-
-static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
- struct attribute *a, int n)
-{
- struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
-
- if (!pci_reset_supported(pdev))
- return 0;
-
- return a->mode;
-}
-
-const struct attribute_group pci_dev_reset_method_attr_group = {
- .attrs = pci_dev_reset_method_attrs,
- .is_visible = pci_dev_reset_method_attr_is_visible,
-};
-
/**
* __pci_reset_function_locked - reset a PCI device function while holding
* the @dev mutex lock.
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 2e40fc63ba31..2e9cf26a9ee9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -4,6 +4,8 @@
#include <linux/pci.h>
+struct pcie_tlp_log;
+
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
@@ -315,8 +317,10 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout);
int pci_setup_device(struct pci_dev *dev);
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int reg);
+ struct resource *res, unsigned int reg, u32 *sizes);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -547,6 +551,12 @@ struct aer_err_info {
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, struct pcie_tlp_log *log);
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc);
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *pfx);
#endif /* CONFIG_PCIEAER */
#ifdef CONFIG_PCIEPORTBUS
@@ -565,6 +575,7 @@ void pci_dpc_init(struct pci_dev *pdev);
void dpc_process_error(struct pci_dev *pdev);
pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
bool pci_dpc_recovered(struct pci_dev *pdev);
+unsigned int dpc_tlp_log_len(struct pci_dev *dev);
#else
static inline void pci_save_dpc_state(struct pci_dev *dev) { }
static inline void pci_restore_dpc_state(struct pci_dev *dev) { }
@@ -766,6 +777,7 @@ struct pci_reset_fn_method {
int (*reset_fn)(struct pci_dev *pdev, bool probe);
char *name;
};
+extern const struct pci_reset_fn_method pci_reset_fn_methods[];
#ifdef CONFIG_PCI_QUIRKS
int pci_dev_specific_reset(struct pci_dev *dev, bool probe);
@@ -797,7 +809,6 @@ static inline u64 pci_rebar_size_to_bytes(int size)
struct device_node;
#ifdef CONFIG_OF
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
u32 of_pci_get_slot_power_limit(struct device_node *node,
@@ -814,12 +825,6 @@ bool of_pci_supply_present(struct device_node *np);
#else
static inline int
-of_pci_parse_bus_range(struct device_node *node, struct resource *res)
-{
- return -EINVAL;
-}
-
-static inline int
of_get_pci_domain_nr(struct device_node *node)
{
return -1;
@@ -960,8 +965,6 @@ static inline pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
extern const struct attribute_group aspm_ctrl_attr_group;
#endif
-extern const struct attribute_group pci_dev_reset_method_attr_group;
-
#ifdef CONFIG_X86_INTEL_MID
bool pci_use_mid_pm(void);
int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
@@ -986,6 +989,15 @@ int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
const char *name);
void pcim_release_region(struct pci_dev *pdev, int bar);
+#ifdef CONFIG_PCI_MSI
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag);
+#else
+static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ return -ENODEV;
+}
+#endif
+
/*
* Config Address for PCI Configuration Mechanism #1
*
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 53ccab62314d..173829aa02e6 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -7,7 +7,7 @@ pcieportdrv-y := portdrv.o rcec.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o bwctrl.o
obj-y += aspm.o
-obj-$(CONFIG_PCIEAER) += aer.o err.o
+obj-$(CONFIG_PCIEAER) += aer.o err.o tlp.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 34ce9f834d0c..508474e17183 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -665,12 +665,6 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
}
}
-static void __print_tlp_header(struct pci_dev *dev, struct pcie_tlp_log *t)
-{
- pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
- t->dw[0], t->dw[1], t->dw[2], t->dw[3]);
-}
-
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
@@ -725,7 +719,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
__aer_print_error(dev, info);
if (info->tlp_header_valid)
- __print_tlp_header(dev, &info->tlp);
+ pcie_print_tlp_log(dev, &info->tlp, dev_fmt(" "));
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
@@ -797,7 +791,7 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity,
aer->uncor_severity);
if (tlp_header_valid)
- __print_tlp_header(dev, &aer->header_log);
+ pcie_print_tlp_log(dev, &aer->header_log, dev_fmt(" "));
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
aer_severity, tlp_header_valid, &aer->header_log);
@@ -1248,7 +1242,10 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
- pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
+ pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG,
+ aer + PCI_ERR_PREFIX_LOG,
+ aer_tlp_log_len(dev, aercc),
+ &info->tlp);
}
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 28567d457613..da3e7edcf49d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -81,24 +81,44 @@ void pci_configure_aspm_l1ss(struct pci_dev *pdev)
void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
{
+ struct pci_dev *parent = pdev->bus->self;
struct pci_cap_saved_state *save_state;
- u16 l1ss = pdev->l1ss;
u32 *cap;
/*
+ * If this is a Downstream Port, we never restore the L1SS state
+ * directly; we only restore it when we restore the state of the
+ * Upstream Port below it.
+ */
+ if (pcie_downstream_port(pdev) || !parent)
+ return;
+
+ if (!pdev->l1ss || !parent->l1ss)
+ return;
+
+ /*
* Save L1 substate configuration. The ASPM L0s/L1 configuration
* in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
*/
- if (!l1ss)
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
return;
- save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
+
+ /*
+ * Save parent's L1 substate configuration so we have it for
+ * pci_restore_aspm_l1ss_state(pdev) to restore.
+ */
+ save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
if (!save_state)
return;
cap = &save_state->cap.data[0];
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
}
void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index b59cacc740fa..0a5e7efbce2c 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -303,14 +303,17 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
if (ret)
return ret;
- ret = devm_request_irq(&srv->device, srv->irq, pcie_bwnotif_irq,
- IRQF_SHARED, "PCIe bwctrl", srv);
- if (ret)
- return ret;
-
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- port->link_bwctrl = no_free_ptr(data);
+ port->link_bwctrl = data;
+
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
+ }
+
pcie_bwnotif_enable(srv);
}
}
@@ -331,11 +334,15 @@ static void pcie_bwnotif_remove(struct pcie_device *srv)
pcie_cooling_device_unregister(data->cdev);
- pcie_bwnotif_disable(srv->port);
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
+ pcie_bwnotif_disable(srv->port);
+
+ free_irq(srv->irq, srv);
- scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem)
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem)
srv->port->link_bwctrl = NULL;
+ }
+ }
}
static int pcie_bwnotif_suspend(struct pcie_device *srv)
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 2b6ef7efa3c1..242cabd5eeeb 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -190,7 +190,7 @@ out:
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, dpc_status, first_error;
- u32 status, mask, sev, syserr, exc, log, prefix;
+ u32 status, mask, sev, syserr, exc, log;
struct pcie_tlp_log tlp_log;
int i;
@@ -215,22 +215,18 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
first_error == i ? " (First)" : "");
}
- if (pdev->dpc_rp_log_size < 4)
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG)
goto clear_status;
- pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &tlp_log);
- pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
- tlp_log.dw[0], tlp_log.dw[1], tlp_log.dw[2], tlp_log.dw[3]);
+ pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG,
+ dpc_tlp_log_len(pdev), &tlp_log);
+ pcie_print_tlp_log(pdev, &tlp_log, dev_fmt(""));
- if (pdev->dpc_rp_log_size < 5)
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
- for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
- pci_read_config_dword(pdev,
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
- pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
- }
clear_status:
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
@@ -404,7 +400,9 @@ void pci_dpc_init(struct pci_dev *pdev)
if (!pdev->dpc_rp_log_size) {
pdev->dpc_rp_log_size =
FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
- if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG ||
+ pdev->dpc_rp_log_size > PCIE_STD_NUM_TLP_HEADERLOG + 1 +
+ PCIE_STD_MAX_TLP_PREFIXLOG) {
pci_err(pdev, "RP PIO log size %u is invalid\n",
pdev->dpc_rp_log_size);
pdev->dpc_rp_log_size = 0;
diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c
new file mode 100644
index 000000000000..0860b5da837f
--- /dev/null
+++ b/drivers/pci/pcie/tlp.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe TLP Log handling
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/aer.h>
+#include <linux/array_size.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include "../pci.h"
+
+/**
+ * aer_tlp_log_len - Calculate AER Capability TLP Header/Prefix Log length
+ * @dev: PCIe device
+ * @aercc: AER Capabilities and Control register value
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc)
+{
+ return PCIE_STD_NUM_TLP_HEADERLOG +
+ ((aercc & PCI_ERR_CAP_PREFIX_LOG_PRESENT) ?
+ dev->eetlp_prefix_max : 0);
+}
+
+#ifdef CONFIG_PCIE_DPC
+/**
+ * dpc_tlp_log_len - Calculate DPC RP PIO TLP Header/Prefix Log length
+ * @dev: PCIe device
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int dpc_tlp_log_len(struct pci_dev *dev)
+{
+ /* Remove ImpSpec Log register from the count */
+ if (dev->dpc_rp_log_size >= PCIE_STD_NUM_TLP_HEADERLOG + 1)
+ return dev->dpc_rp_log_size - 1;
+
+ return dev->dpc_rp_log_size;
+}
+#endif
+
+/**
+ * pcie_read_tlp_log - read TLP Header Log
+ * @dev: PCIe device
+ * @where: PCI Config offset of TLP Header Log
+ * @where2: PCI Config offset of TLP Prefix Log
+ * @tlp_len: TLP Log length (Header Log + TLP Prefix Log in DWORDs)
+ * @log: TLP Log structure to fill
+ *
+ * Fill @log from TLP Header Log registers, e.g., AER or DPC.
+ *
+ * Return: 0 on success and filled TLP Log structure, <0 on error.
+ */
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, struct pcie_tlp_log *log)
+{
+ unsigned int i;
+ int off, ret;
+ u32 *to;
+
+ memset(log, 0, sizeof(*log));
+
+ for (i = 0; i < tlp_len; i++) {
+ if (i < PCIE_STD_NUM_TLP_HEADERLOG) {
+ off = where + i * 4;
+ to = &log->dw[i];
+ } else {
+ off = where2 + (i - PCIE_STD_NUM_TLP_HEADERLOG) * 4;
+ to = &log->prefix[i - PCIE_STD_NUM_TLP_HEADERLOG];
+ }
+
+ ret = pci_read_config_dword(dev, off, to);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+ }
+
+ return 0;
+}
+
+#define EE_PREFIX_STR " E-E Prefixes:"
+
+/**
+ * pcie_print_tlp_log - Print TLP Header / Prefix Log contents
+ * @dev: PCIe device
+ * @log: TLP Log structure
+ * @pfx: String prefix
+ *
+ * Prints TLP Header and Prefix Log information held by @log.
+ */
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *pfx)
+{
+ char buf[11 * (PCIE_STD_NUM_TLP_HEADERLOG + ARRAY_SIZE(log->prefix)) +
+ sizeof(EE_PREFIX_STR)];
+ unsigned int i;
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%#010x %#010x %#010x %#010x",
+ log->dw[0], log->dw[1], log->dw[2], log->dw[3]);
+
+ if (log->prefix[0])
+ len += scnprintf(buf + len, sizeof(buf) - len, EE_PREFIX_STR);
+ for (i = 0; i < ARRAY_SIZE(log->prefix); i++) {
+ if (!log->prefix[i])
+ break;
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->prefix[i]);
+ }
+
+ pci_err(dev, "%sTLP Header: %s\n", pfx, buf);
+}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2e81ab0f5a25..246744d8d268 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -165,40 +165,66 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
/**
+ * __pci_size_bars - Read the raw BAR mask for a range of PCI BARs
+ * @dev: the PCI device
+ * @count: number of BARs to size
+ * @pos: starting config space position
+ * @sizes: array to store mask values
+ * @rom: indicate whether to use ROM mask, which avoids enabling ROM BARs
+ *
+ * Provided @sizes array must be sufficiently sized to store results for
+ * @count u32 BARs. Caller is responsible for disabling decode to specified
+ * BAR range around calling this function. This function is intended to avoid
+ * disabling decode around sizing each BAR individually, which can result in
+ * non-trivial overhead in virtualized environments with very large PCI BARs.
+ */
+static void __pci_size_bars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes, bool rom)
+{
+ u32 orig, mask = rom ? PCI_ROM_ADDRESS_MASK : ~0;
+ int i;
+
+ for (i = 0; i < count; i++, pos += 4, sizes++) {
+ pci_read_config_dword(dev, pos, &orig);
+ pci_write_config_dword(dev, pos, mask);
+ pci_read_config_dword(dev, pos, sizes);
+ pci_write_config_dword(dev, pos, orig);
+ }
+}
+
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, count, pos, sizes, false);
+}
+
+static void __pci_size_rom(struct pci_dev *dev, unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, 1, pos, sizes, true);
+}
+
+/**
* __pci_read_base - Read a PCI BAR
* @dev: the PCI device
* @type: type of the BAR
* @res: resource buffer to be filled in
* @pos: BAR position in the config space
+ * @sizes: array of one or more pre-read BAR masks
*
* Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
*/
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int pos)
+ struct resource *res, unsigned int pos, u32 *sizes)
{
- u32 l = 0, sz = 0, mask;
+ u32 l = 0, sz;
u64 l64, sz64, mask64;
- u16 orig_cmd;
struct pci_bus_region region, inverted_region;
const char *res_name = pci_resource_name(dev, res - dev->resource);
- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
-
- /* No printks while decoding is disabled! */
- if (!dev->mmio_always_on) {
- pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
- if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
- pci_write_config_word(dev, PCI_COMMAND,
- orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
- }
- }
-
res->name = pci_name(dev);
pci_read_config_dword(dev, pos, &l);
- pci_write_config_dword(dev, pos, l | mask);
- pci_read_config_dword(dev, pos, &sz);
- pci_write_config_dword(dev, pos, l);
+ sz = sizes[0];
/*
* All bits set in sz means the device isn't working properly.
@@ -238,18 +264,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
if (res->flags & IORESOURCE_MEM_64) {
pci_read_config_dword(dev, pos + 4, &l);
- pci_write_config_dword(dev, pos + 4, ~0);
- pci_read_config_dword(dev, pos + 4, &sz);
- pci_write_config_dword(dev, pos + 4, l);
+ sz = sizes[1];
l64 |= ((u64)l << 32);
sz64 |= ((u64)sz << 32);
mask64 |= ((u64)~0 << 32);
}
- if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
- pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
-
if (!sz64)
goto fail;
@@ -318,9 +339,14 @@ out:
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
-static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+static __always_inline void pci_read_bases(struct pci_dev *dev,
+ unsigned int howmany, int rom)
{
+ u32 rombar, stdbars[PCI_STD_NUM_BARS];
unsigned int pos, reg;
+ u16 orig_cmd;
+
+ BUILD_BUG_ON(statically_true(howmany > PCI_STD_NUM_BARS));
if (dev->non_compliant_bars)
return;
@@ -329,10 +355,28 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
if (dev->is_virtfn)
return;
+ /* No printks while decoding is disabled! */
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+ if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
+ }
+ }
+
+ __pci_size_stdbars(dev, howmany, PCI_BASE_ADDRESS_0, stdbars);
+ if (rom)
+ __pci_size_rom(dev, rom, &rombar);
+
+ if (!dev->mmio_always_on &&
+ (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
+ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+
for (pos = 0; pos < howmany; pos++) {
struct resource *res = &dev->resource[pos];
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
- pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
+ pos += __pci_read_base(dev, pci_bar_unknown,
+ res, reg, &stdbars[pos]);
}
if (rom) {
@@ -340,7 +384,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
dev->rom_base_reg = rom;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
- __pci_read_base(dev, pci_bar_mem32, res, rom);
+ __pci_read_base(dev, pci_bar_mem32, res, rom, &rombar);
}
}
@@ -2251,8 +2295,8 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
-#ifdef CONFIG_PCI_PASID
struct pci_dev *bridge;
+ unsigned int eetlp_max;
int pcie_type;
u32 cap;
@@ -2264,15 +2308,19 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
return;
pcie_type = pci_pcie_type(dev);
+
+ eetlp_max = FIELD_GET(PCI_EXP_DEVCAP2_EE_PREFIX_MAX, cap);
+ /* 00b means 4 */
+ eetlp_max = eetlp_max ?: 4;
+
if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pcie_type == PCI_EXP_TYPE_RC_END)
- dev->eetlp_prefix_path = 1;
+ dev->eetlp_prefix_max = eetlp_max;
else {
bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->eetlp_prefix_path)
- dev->eetlp_prefix_path = 1;
+ if (bridge && bridge->eetlp_prefix_max)
+ dev->eetlp_prefix_max = eetlp_max;
}
-#endif
}
static void pci_configure_serr(struct pci_dev *dev)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 76f4df75b08a..82b21e34c545 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -12,6 +12,7 @@
* file, where their drivers can use them.
*/
+#include <linux/aer.h>
#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/types.h>
@@ -5521,7 +5522,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
* AMD Matisse USB 3.0 Host Controller 0x149c
* Intel 82579LM Gigabit Ethernet Controller 0x1502
* Intel 82579V Gigabit Ethernet Controller 0x1503
- *
+ * Mediatek MT7922 802.11ax PCI Express Wireless Network Adapter
*/
static void quirk_no_flr(struct pci_dev *dev)
{
@@ -5533,6 +5534,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MEDIATEK, 0x0616, quirk_no_flr);
/* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
static void quirk_no_flr_snet(struct pci_dev *dev)
@@ -5984,6 +5986,17 @@ SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */
SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */
SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */
+#define SWITCHTEC_PCI100X_QUIRK(vid) \
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \
+ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
+SWITCHTEC_PCI100X_QUIRK(0x1001); /* PCI1001XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1002); /* PCI1002XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1003); /* PCI1003XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1004); /* PCI1004XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1005); /* PCI1005XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1006); /* PCI1006XG4 */
+
+
/*
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
* These IDs are used to forward responses to the originator on the other
@@ -6233,8 +6246,9 @@ static void dpc_log_size(struct pci_dev *dev)
return;
if (FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, val) == 0) {
- pci_info(dev, "Overriding RP PIO Log Size to 4\n");
- dev->dpc_rp_log_size = 4;
+ pci_info(dev, "Overriding RP PIO Log Size to %d\n",
+ PCIE_STD_NUM_TLP_HEADERLOG);
+ dev->dpc_rp_log_size = PCIE_STD_NUM_TLP_HEADERLOG;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
@@ -6253,6 +6267,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index c7e1089ffdaf..b14dfab04d84 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1739,6 +1739,26 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
.driver_data = gen, \
}
+#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_MEMORY_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }, \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }
+
static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */
SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */
@@ -1833,6 +1853,12 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */
+ SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4), /* PCI1001 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4), /* PCI1002 12XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4), /* PCI1003 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4), /* PCI1004 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4), /* PCI1005 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4), /* PCI1006 16XG4 */
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index 1e604fbbda65..77fce5e1b830 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -204,48 +204,6 @@ static u8 get_rp_completer_type(struct pci_dev *pdev)
return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
}
-/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */
-static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag)
-{
-#ifdef CONFIG_PCI_MSI
- struct msi_desc *msi_desc = NULL;
- void __iomem *vec_ctrl;
- u32 val;
- int err = 0;
-
- msi_lock_descs(&pdev->dev);
-
- /* Find the msi_desc entry with matching msix_idx */
- msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) {
- if (msi_desc->msi_index == msix_idx)
- break;
- }
-
- if (!msi_desc) {
- err = -ENXIO;
- goto err_out;
- }
-
- /* Get the vector control register (offset 0xc) pointed by msix_idx */
- vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE;
- vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL;
-
- val = readl(vec_ctrl);
- val &= ~PCI_MSIX_ENTRY_CTRL_ST;
- val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
- writel(val, vec_ctrl);
-
- /* Read back to flush the update */
- val = readl(vec_ctrl);
-
-err_out:
- msi_unlock_descs(&pdev->dev);
- return err;
-#else
- return -ENODEV;
-#endif
-}
-
/* Write tag to ST table - Return 0 if OK, otherwise -errno */
static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
{
@@ -346,7 +304,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
switch (loc) {
case PCI_TPH_LOC_MSIX:
- err = write_tag_to_msix(pdev, index, tag);
+ err = pci_msix_write_tph_tag(pdev, index, tag);
break;
case PCI_TPH_LOC_CAP:
err = write_tag_to_st_table(pdev, index, tag);
@@ -360,7 +318,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
return err;
}
- set_ctrl_reg_req_en(pdev, pdev->tph_mode);
+ set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
(loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index a469bcbc0da7..3d29b2602d0f 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -271,8 +271,8 @@ void pci_vpd_init(struct pci_dev *dev)
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
struct pci_dev *vpd_dev = dev;
@@ -295,8 +295,8 @@ static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
struct pci_dev *vpd_dev = dev;
@@ -317,9 +317,9 @@ static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
return ret;
}
-static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
+static const BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
-static struct bin_attribute *vpd_attrs[] = {
+static const struct bin_attribute *const vpd_attrs[] = {
&bin_attr_vpd,
NULL,
};
@@ -336,7 +336,7 @@ static umode_t vpd_attr_is_visible(struct kobject *kobj,
}
const struct attribute_group pci_dev_vpd_attr_group = {
- .bin_attrs = vpd_attrs,
+ .bin_attrs_new = vpd_attrs,
.is_bin_visible = vpd_attr_is_visible,
};
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index 1d4d01e1275e..9a0d1420ac4f 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -120,6 +120,8 @@ enum m1_pmu_events {
*/
M1_PMU_CFG_COUNT_USER = BIT(8),
M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+ M1_PMU_CFG_COUNT_HOST = BIT(10),
+ M1_PMU_CFG_COUNT_GUEST = BIT(11),
};
/*
@@ -168,6 +170,8 @@ static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE,
[PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_ALL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_BRANCH,
+ [PERF_COUNT_HW_BRANCH_MISSES] = M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC,
};
/* sysfs definitions */
@@ -325,11 +329,10 @@ static void m1_pmu_disable_counter_interrupt(unsigned int index)
__m1_pmu_enable_counter_interrupt(index, false);
}
-static void m1_pmu_configure_counter(unsigned int index, u8 event,
- bool user, bool kernel)
+static void __m1_pmu_configure_event_filter(unsigned int index, bool user,
+ bool kernel, bool host)
{
- u64 val, user_bit, kernel_bit;
- int shift;
+ u64 clear, set, user_bit, kernel_bit;
switch (index) {
case 0 ... 7:
@@ -344,19 +347,27 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
BUG();
}
- val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
-
+ clear = set = 0;
if (user)
- val |= user_bit;
+ set |= user_bit;
else
- val &= ~user_bit;
+ clear |= user_bit;
if (kernel)
- val |= kernel_bit;
+ set |= kernel_bit;
else
- val &= ~kernel_bit;
+ clear |= kernel_bit;
- write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
+ if (host)
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL1, clear, set);
+ else if (is_kernel_in_hyp_mode())
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL12, clear, set);
+}
+
+static void __m1_pmu_configure_eventsel(unsigned int index, u8 event)
+{
+ u64 clear = 0, set = 0;
+ int shift;
/*
* Counters 0 and 1 have fixed events. For anything else,
@@ -369,21 +380,32 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
break;
case 2 ... 5:
shift = (index - 2) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR0_EL1, clear, set);
break;
case 6 ... 9:
shift = (index - 6) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR1_EL1, clear, set);
break;
}
}
+static void m1_pmu_configure_counter(unsigned int index, unsigned long config_base)
+{
+ bool kernel = config_base & M1_PMU_CFG_COUNT_KERNEL;
+ bool guest = config_base & M1_PMU_CFG_COUNT_GUEST;
+ bool host = config_base & M1_PMU_CFG_COUNT_HOST;
+ bool user = config_base & M1_PMU_CFG_COUNT_USER;
+ u8 evt = config_base & M1_PMU_CFG_EVENT;
+
+ __m1_pmu_configure_event_filter(index, user && host, kernel && host, true);
+ __m1_pmu_configure_event_filter(index, user && guest, kernel && guest, false);
+ __m1_pmu_configure_eventsel(index, evt);
+}
+
/* arm_pmu backend */
static void m1_pmu_enable_event(struct perf_event *event)
{
@@ -394,11 +416,7 @@ static void m1_pmu_enable_event(struct perf_event *event)
user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
- m1_pmu_disable_counter_interrupt(event->hw.idx);
- m1_pmu_disable_counter(event->hw.idx);
- isb();
-
- m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
+ m1_pmu_configure_counter(event->hw.idx, event->hw.config_base);
m1_pmu_enable_counter(event->hw.idx);
m1_pmu_enable_counter_interrupt(event->hw.idx);
isb();
@@ -556,7 +574,7 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
{
unsigned long config_base = 0;
- if (!attr->exclude_guest) {
+ if (!attr->exclude_guest && !is_kernel_in_hyp_mode()) {
pr_debug("ARM performance counters do not support mode exclusion\n");
return -EOPNOTSUPP;
}
@@ -564,6 +582,10 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
config_base |= M1_PMU_CFG_COUNT_KERNEL;
if (!attr->exclude_user)
config_base |= M1_PMU_CFG_COUNT_USER;
+ if (!attr->exclude_host)
+ config_base |= M1_PMU_CFG_COUNT_HOST;
+ if (!attr->exclude_guest)
+ config_base |= M1_PMU_CFG_COUNT_GUEST;
event->config_base = config_base;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index d5fcea3d4328..1a0d0e1a2263 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1273,9 +1273,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* No overflow interrupt? Have to use a timer instead. */
if (!ccn->irq) {
dev_info(ccn->dev, "No access to interrupts, using timer.\n");
- hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
+ hrtimer_setup(&ccn->dt.hrtimer, arm_ccn_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
/* Pick one CPU which we will use to collect data from CCN... */
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index b20fa600e510..d4fe30ff225b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -802,8 +802,6 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
CMN_EVENT_ATTR(_model, ccha_##_name, CMN_TYPE_CCHA, _event)
#define CMN_EVENT_CCLA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
-#define CMN_EVENT_CCLA_RNI(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
#define CMN_EVENT_HNS(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
@@ -1713,8 +1711,8 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
goto done;
}
- for (i = 0; i < CMN_MAX_DTCS; i++)
- if (val->dtc_count[i] == CMN_DT_NUM_COUNTERS)
+ for_each_hw_dtc_idx(hw, dtc, idx)
+ if (val->dtc_count[dtc] == CMN_DT_NUM_COUNTERS)
goto done;
for_each_hw_dn(hw, dn, i) {
@@ -1798,6 +1796,9 @@ static int arm_cmn_event_init(struct perf_event *event)
} else if (type == CMN_TYPE_XP &&
(cmn->part == PART_CMN700 || cmn->part == PART_CMN_S3)) {
hw->wide_sel = true;
+ } else if (type == CMN_TYPE_RND) {
+ /* Secretly permit this as an alias for "rnid" events */
+ type = CMN_TYPE_RNI;
}
/* This is sufficiently annoying to recalculate, so cache it */
diff --git a/drivers/perf/arm_cspmu/ampere_cspmu.c b/drivers/perf/arm_cspmu/ampere_cspmu.c
index f72f5689923c..b8ca69fd9d1d 100644
--- a/drivers/perf/arm_cspmu/ampere_cspmu.c
+++ b/drivers/perf/arm_cspmu/ampere_cspmu.c
@@ -10,10 +10,10 @@
#include "arm_cspmu.h"
-#define PMAUXR0 0xD80
-#define PMAUXR1 0xD84
-#define PMAUXR2 0xD88
-#define PMAUXR3 0xD8C
+#define PMAUXR0 PMIMPDEF
+#define PMAUXR1 (PMIMPDEF + 0x4)
+#define PMAUXR2 (PMIMPDEF + 0x8)
+#define PMAUXR3 (PMIMPDEF + 0xC)
#define to_ampere_cspmu_ctx(cspmu) ((struct ampere_cspmu_ctx *)(cspmu->impl.ctx))
@@ -132,32 +132,20 @@ ampere_cspmu_get_name(const struct arm_cspmu *cspmu)
return ctx->name;
}
-static u32 ampere_cspmu_event_filter(const struct perf_event *event)
+static void ampere_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
/*
- * PMEVFILTR or PMCCFILTR aren't used in Ampere SoC PMU but are marked
- * as RES0. Make sure, PMCCFILTR is written zero.
+ * PMCCFILTR is RES0, so this is just a dummy callback to override
+ * the default implementation and avoid writing to it.
*/
- return 0;
}
static void ampere_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc,
- u32 filter)
+ const struct perf_event *event)
{
- struct perf_event *event;
- unsigned int idx;
u32 threshold, rank, bank;
- /*
- * At this point, all the events have the same filter settings.
- * Therefore, take the first event and use its configuration.
- */
- idx = find_first_bit(cspmu->hw_events.used_ctrs,
- cspmu->cycle_counter_logical_idx);
-
- event = cspmu->hw_events.events[idx];
-
threshold = get_threshold(event);
rank = get_rank(event);
bank = get_bank(event);
@@ -233,7 +221,7 @@ static int ampere_cspmu_init_ops(struct arm_cspmu *cspmu)
cspmu->impl.ctx = ctx;
- impl_ops->event_filter = ampere_cspmu_event_filter;
+ impl_ops->set_cc_filter = ampere_cspmu_set_cc_filter;
impl_ops->set_ev_filter = ampere_cspmu_set_ev_filter;
impl_ops->validate_event = ampere_cspmu_validate_event;
impl_ops->get_name = ampere_cspmu_get_name;
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 81e8b97e9353..efa9b229e701 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -40,51 +40,6 @@
ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
(unsigned long)_config)
-/*
- * CoreSight PMU Arch register offsets.
- */
-#define PMEVCNTR_LO 0x0
-#define PMEVCNTR_HI 0x4
-#define PMEVTYPER 0x400
-#define PMCCFILTR 0x47C
-#define PMEVFILTR 0xA00
-#define PMCNTENSET 0xC00
-#define PMCNTENCLR 0xC20
-#define PMINTENSET 0xC40
-#define PMINTENCLR 0xC60
-#define PMOVSCLR 0xC80
-#define PMOVSSET 0xCC0
-#define PMCFGR 0xE00
-#define PMCR 0xE04
-#define PMIIDR 0xE08
-
-/* PMCFGR register field */
-#define PMCFGR_NCG GENMASK(31, 28)
-#define PMCFGR_HDBG BIT(24)
-#define PMCFGR_TRO BIT(23)
-#define PMCFGR_SS BIT(22)
-#define PMCFGR_FZO BIT(21)
-#define PMCFGR_MSI BIT(20)
-#define PMCFGR_UEN BIT(19)
-#define PMCFGR_NA BIT(17)
-#define PMCFGR_EX BIT(16)
-#define PMCFGR_CCD BIT(15)
-#define PMCFGR_CC BIT(14)
-#define PMCFGR_SIZE GENMASK(13, 8)
-#define PMCFGR_N GENMASK(7, 0)
-
-/* PMCR register field */
-#define PMCR_TRO BIT(11)
-#define PMCR_HDBG BIT(10)
-#define PMCR_FZO BIT(9)
-#define PMCR_NA BIT(8)
-#define PMCR_DP BIT(5)
-#define PMCR_X BIT(4)
-#define PMCR_D BIT(3)
-#define PMCR_C BIT(2)
-#define PMCR_P BIT(1)
-#define PMCR_E BIT(0)
-
/* Each SET/CLR register supports up to 32 counters. */
#define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
#define ARM_CSPMU_SET_CLR_COUNTER_NUM \
@@ -111,7 +66,9 @@ static unsigned long arm_cspmu_cpuhp_state;
static DEFINE_MUTEX(arm_cspmu_lock);
static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc, u32 filter);
+ const struct perf_event *event);
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
{
@@ -226,6 +183,7 @@ arm_cspmu_event_attr_is_visible(struct kobject *kobj,
static struct attribute *arm_cspmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_FILTER_ATTR,
+ ARM_CSPMU_FORMAT_FILTER2_ATTR,
NULL,
};
@@ -250,11 +208,6 @@ static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
}
-static u32 arm_cspmu_event_filter(const struct perf_event *event)
-{
- return event->attr.config1 & ARM_CSPMU_FILTER_MASK;
-}
-
static ssize_t arm_cspmu_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
@@ -416,7 +369,7 @@ static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
DEFAULT_IMPL_OP(get_name),
DEFAULT_IMPL_OP(is_cycle_counter_event),
DEFAULT_IMPL_OP(event_type),
- DEFAULT_IMPL_OP(event_filter),
+ DEFAULT_IMPL_OP(set_cc_filter),
DEFAULT_IMPL_OP(set_ev_filter),
DEFAULT_IMPL_OP(event_attr_is_visible),
};
@@ -812,26 +765,28 @@ static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
}
static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc,
- u32 filter)
+ const struct perf_event *event)
{
- u32 offset = PMEVFILTR + (4 * hwc->idx);
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
+ u32 filter2 = event->attr.config2 & ARM_CSPMU_FILTER_MASK;
+ u32 offset = 4 * event->hw.idx;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMEVFILTR + offset);
+ writel(filter2, cspmu->base0 + PMEVFILT2R + offset);
}
-static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
- u32 offset = PMCCFILTR;
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMCCFILTR);
}
static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- u32 filter;
/* We always reprogram the counter */
if (pmu_flags & PERF_EF_RELOAD)
@@ -839,13 +794,11 @@ static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
arm_cspmu_set_event_period(event);
- filter = cspmu->impl.ops.event_filter(event);
-
if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
- arm_cspmu_set_cc_filter(cspmu, filter);
+ cspmu->impl.ops.set_cc_filter(cspmu, event);
} else {
arm_cspmu_set_event(cspmu, hwc);
- cspmu->impl.ops.set_ev_filter(cspmu, hwc, filter);
+ cspmu->impl.ops.set_ev_filter(cspmu, event);
}
hwc->state = 0;
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
index 2621f3111148..19684b76bd96 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.h
+++ b/drivers/perf/arm_cspmu/arm_cspmu.h
@@ -47,6 +47,8 @@
/* Default filter format */
#define ARM_CSPMU_FORMAT_FILTER_ATTR \
ARM_CSPMU_FORMAT_ATTR(filter, "config1:0-31")
+#define ARM_CSPMU_FORMAT_FILTER2_ATTR \
+ ARM_CSPMU_FORMAT_ATTR(filter2, "config2:0-31")
/*
* This is the default event number for cycle count, if supported, since the
@@ -65,6 +67,53 @@
/* The cycle counter, if implemented, is located at counter[31]. */
#define ARM_CSPMU_CYCLE_CNTR_IDX 31
+/*
+ * CoreSight PMU Arch register offsets.
+ */
+#define PMEVCNTR_LO 0x0
+#define PMEVCNTR_HI 0x4
+#define PMEVTYPER 0x400
+#define PMCCFILTR 0x47C
+#define PMEVFILT2R 0x800
+#define PMEVFILTR 0xA00
+#define PMCNTENSET 0xC00
+#define PMCNTENCLR 0xC20
+#define PMINTENSET 0xC40
+#define PMINTENCLR 0xC60
+#define PMOVSCLR 0xC80
+#define PMOVSSET 0xCC0
+#define PMIMPDEF 0xD80
+#define PMCFGR 0xE00
+#define PMCR 0xE04
+#define PMIIDR 0xE08
+
+/* PMCFGR register field */
+#define PMCFGR_NCG GENMASK(31, 28)
+#define PMCFGR_HDBG BIT(24)
+#define PMCFGR_TRO BIT(23)
+#define PMCFGR_SS BIT(22)
+#define PMCFGR_FZO BIT(21)
+#define PMCFGR_MSI BIT(20)
+#define PMCFGR_UEN BIT(19)
+#define PMCFGR_NA BIT(17)
+#define PMCFGR_EX BIT(16)
+#define PMCFGR_CCD BIT(15)
+#define PMCFGR_CC BIT(14)
+#define PMCFGR_SIZE GENMASK(13, 8)
+#define PMCFGR_N GENMASK(7, 0)
+
+/* PMCR register field */
+#define PMCR_TRO BIT(11)
+#define PMCR_HDBG BIT(10)
+#define PMCR_FZO BIT(9)
+#define PMCR_NA BIT(8)
+#define PMCR_DP BIT(5)
+#define PMCR_X BIT(4)
+#define PMCR_D BIT(3)
+#define PMCR_C BIT(2)
+#define PMCR_P BIT(1)
+#define PMCR_E BIT(0)
+
/* PMIIDR register field */
#define ARM_CSPMU_PMIIDR_IMPLEMENTER GENMASK(11, 0)
#define ARM_CSPMU_PMIIDR_PRODUCTID GENMASK(31, 20)
@@ -103,11 +152,11 @@ struct arm_cspmu_impl_ops {
bool (*is_cycle_counter_event)(const struct perf_event *event);
/* Decode event type/id from configs */
u32 (*event_type)(const struct perf_event *event);
- /* Decode filter value from configs */
- u32 (*event_filter)(const struct perf_event *event);
- /* Set event filter */
+ /* Set event filters */
+ void (*set_cc_filter)(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
void (*set_ev_filter)(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc, u32 filter);
+ const struct perf_event *event);
/* Implementation specific event validation */
int (*validate_event)(struct arm_cspmu *cspmu,
struct perf_event *event);
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index d0ef611240aa..dc6d4e3e2a1b 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -6,6 +6,7 @@
/* Support for NVIDIA specific attributes. */
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/topology.h>
@@ -54,65 +55,24 @@ static struct attribute *scf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(scf_cache_wb, 0xF3),
NV_CSPMU_EVENT_ATTR_4(socket, rd_data, 0x101),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_rsp, 0x105),
NV_CSPMU_EVENT_ATTR_4(socket, wb_data, 0x109),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_rsp, 0x10d),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_data, 0x111),
NV_CSPMU_EVENT_ATTR_4(socket, rd_outstanding, 0x115),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_outstanding, 0x119),
- NV_CSPMU_EVENT_ATTR_4(socket, wb_outstanding, 0x11d),
- NV_CSPMU_EVENT_ATTR_4(socket, wr_outstanding, 0x121),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_outstanding, 0x125),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_outstanding, 0x129),
NV_CSPMU_EVENT_ATTR_4(socket, rd_access, 0x12d),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_access, 0x131),
NV_CSPMU_EVENT_ATTR_4(socket, wb_access, 0x135),
NV_CSPMU_EVENT_ATTR_4(socket, wr_access, 0x139),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_access, 0x13d),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_access, 0x141),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_data, 0x145),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_access, 0x149),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_access, 0x14d),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_outstanding, 0x151),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_outstanding, 0x155),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_data, 0x159),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_access, 0x15d),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_access, 0x161),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_outstanding, 0x165),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_outstanding, 0x169),
ARM_CSPMU_EVENT_ATTR(gmem_rd_data, 0x16d),
ARM_CSPMU_EVENT_ATTR(gmem_rd_access, 0x16e),
ARM_CSPMU_EVENT_ATTR(gmem_rd_outstanding, 0x16f),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_rsp, 0x170),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_access, 0x171),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_outstanding, 0x172),
ARM_CSPMU_EVENT_ATTR(gmem_wb_data, 0x173),
ARM_CSPMU_EVENT_ATTR(gmem_wb_access, 0x174),
- ARM_CSPMU_EVENT_ATTR(gmem_wb_outstanding, 0x175),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_rsp, 0x176),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_access, 0x177),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_outstanding, 0x178),
ARM_CSPMU_EVENT_ATTR(gmem_wr_data, 0x179),
- ARM_CSPMU_EVENT_ATTR(gmem_wr_outstanding, 0x17a),
ARM_CSPMU_EVENT_ATTR(gmem_wr_access, 0x17b),
NV_CSPMU_EVENT_ATTR_4(socket, wr_data, 0x17c),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_data, 0x180),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_data, 0x184),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_access, 0x188),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_outstanding, 0x18c),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_data, 0x190),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_data, 0x194),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_access, 0x198),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_outstanding, 0x19c),
-
ARM_CSPMU_EVENT_ATTR(gmem_wr_total_bytes, 0x1a0),
ARM_CSPMU_EVENT_ATTR(remote_socket_wr_total_bytes, 0x1a1),
ARM_CSPMU_EVENT_ATTR(remote_socket_rd_data, 0x1a2),
@@ -122,35 +82,12 @@ static struct attribute *scf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(cmem_rd_data, 0x1a5),
ARM_CSPMU_EVENT_ATTR(cmem_rd_access, 0x1a6),
ARM_CSPMU_EVENT_ATTR(cmem_rd_outstanding, 0x1a7),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_rsp, 0x1a8),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_access, 0x1a9),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_outstanding, 0x1aa),
ARM_CSPMU_EVENT_ATTR(cmem_wb_data, 0x1ab),
ARM_CSPMU_EVENT_ATTR(cmem_wb_access, 0x1ac),
- ARM_CSPMU_EVENT_ATTR(cmem_wb_outstanding, 0x1ad),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_rsp, 0x1ae),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_access, 0x1af),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_outstanding, 0x1b0),
ARM_CSPMU_EVENT_ATTR(cmem_wr_data, 0x1b1),
- ARM_CSPMU_EVENT_ATTR(cmem_wr_outstanding, 0x1b2),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_data, 0x1b3),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_access, 0x1b7),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_access, 0x1bb),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_outstanding, 0x1bf),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_outstanding, 0x1c3),
-
- ARM_CSPMU_EVENT_ATTR(ocu_prb_access, 0x1c7),
- ARM_CSPMU_EVENT_ATTR(ocu_prb_data, 0x1c8),
- ARM_CSPMU_EVENT_ATTR(ocu_prb_outstanding, 0x1c9),
ARM_CSPMU_EVENT_ATTR(cmem_wr_access, 0x1ca),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_access, 0x1cb),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_data, 0x1cf),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_data, 0x1d3),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_outstanding, 0x1d7),
-
ARM_CSPMU_EVENT_ATTR(cmem_wr_total_bytes, 0x1db),
ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
@@ -194,6 +131,7 @@ static struct attribute *pcie_pmu_format_attrs[] = {
static struct attribute *nvlink_c2c_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
+ ARM_CSPMU_FORMAT_ATTR(port, "config1:0-1"),
NULL,
};
@@ -238,12 +176,32 @@ static u32 nv_cspmu_event_filter(const struct perf_event *event)
const struct nv_cspmu_ctx *ctx =
to_nv_cspmu_ctx(to_arm_cspmu(event->pmu));
- if (ctx->filter_mask == 0)
+ const u32 filter_val = event->attr.config1 & ctx->filter_mask;
+
+ if (filter_val == 0)
return ctx->filter_default_val;
- return event->attr.config1 & ctx->filter_mask;
+ return filter_val;
}
+static void nv_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter = nv_cspmu_event_filter(event);
+ u32 offset = PMEVFILTR + (4 * event->hw.idx);
+
+ writel(filter, cspmu->base0 + offset);
+}
+
+static void nv_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter = nv_cspmu_event_filter(event);
+
+ writel(filter, cspmu->base0 + PMCCFILTR);
+}
+
+
enum nv_cspmu_name_fmt {
NAME_FMT_GENERIC,
NAME_FMT_SOCKET
@@ -274,7 +232,7 @@ static const struct nv_cspmu_match nv_cspmu_match[] = {
{
.prodid = 0x104,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = 0x0,
+ .filter_mask = NV_NVL_C2C_FILTER_ID_MASK,
.filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c1_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
@@ -284,7 +242,7 @@ static const struct nv_cspmu_match nv_cspmu_match[] = {
{
.prodid = 0x105,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = 0x0,
+ .filter_mask = NV_NVL_C2C_FILTER_ID_MASK,
.filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c0_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
@@ -383,7 +341,8 @@ static int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
cspmu->impl.ctx = ctx;
/* NVIDIA specific callbacks. */
- impl_ops->event_filter = nv_cspmu_event_filter;
+ impl_ops->set_cc_filter = nv_cspmu_set_cc_filter;
+ impl_ops->set_ev_filter = nv_cspmu_set_ev_filter;
impl_ops->get_event_attrs = nv_cspmu_get_event_attrs;
impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
impl_ops->get_name = nv_cspmu_get_name;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 398cce3d76fc..2f33e69a8caf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -342,12 +342,10 @@ armpmu_add(struct perf_event *event, int flags)
if (idx < 0)
return idx;
- /*
- * If there is an event in the counter we are going to use then make
- * sure it is disabled.
- */
+ /* The newly-allocated counter should be empty */
+ WARN_ON_ONCE(hw_events->events[idx]);
+
event->hw.idx = idx;
- armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index b5cc11abc962..e506d59654e7 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -795,11 +795,6 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
static void armv8pmu_enable_event(struct perf_event *event)
{
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- armv8pmu_disable_event_counter(event);
armv8pmu_write_event_type(event);
armv8pmu_enable_event_irq(event);
armv8pmu_enable_event_counter(event);
@@ -825,10 +820,10 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
else
armv8pmu_disable_user_access();
+ kvm_vcpu_pmu_resync_el0();
+
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
-
- kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
@@ -1279,7 +1274,7 @@ static int armv8pmu_proc_user_access_handler(const struct ctl_table *table, int
return 0;
}
-static struct ctl_table armv8_pmu_sysctl_table[] = {
+static const struct ctl_table armv8_pmu_sysctl_table[] = {
{
.procname = "perf_user_access",
.data = &sysctl_perf_user_access,
@@ -1369,6 +1364,7 @@ PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
PMUV3_INIT_SIMPLE(armv8_neoverse_v2)
PMUV3_INIT_SIMPLE(armv8_neoverse_v3)
PMUV3_INIT_SIMPLE(armv8_neoverse_v3ae)
+PMUV3_INIT_SIMPLE(armv8_rainier)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
@@ -1416,6 +1412,7 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,neoverse-v2-pmu", .data = armv8_neoverse_v2_pmu_init},
{.compatible = "arm,neoverse-v3-pmu", .data = armv8_neoverse_v3_pmu_init},
{.compatible = "arm,neoverse-v3ae-pmu", .data = armv8_neoverse_v3ae_pmu_init},
+ {.compatible = "arm,rainier-pmu", .data = armv8_rainier_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index fd5b78732603..f5e6878db9d6 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -85,6 +85,7 @@ struct arm_spe_pmu {
#define SPE_PMU_FEAT_LDS (1UL << 4)
#define SPE_PMU_FEAT_ERND (1UL << 5)
#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
+#define SPE_PMU_FEAT_DISCARD (1UL << 7)
#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
u64 features;
@@ -193,6 +194,9 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
#define ATTR_CFG_FLD_store_filter_LO 34
#define ATTR_CFG_FLD_store_filter_HI 34
+#define ATTR_CFG_FLD_discard_CFG config /* PMBLIMITR_EL1.FM = DISCARD */
+#define ATTR_CFG_FLD_discard_LO 35
+#define ATTR_CFG_FLD_discard_HI 35
#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
#define ATTR_CFG_FLD_event_filter_LO 0
@@ -216,6 +220,7 @@ GEN_PMU_FORMAT_ATTR(store_filter);
GEN_PMU_FORMAT_ATTR(event_filter);
GEN_PMU_FORMAT_ATTR(inv_event_filter);
GEN_PMU_FORMAT_ATTR(min_latency);
+GEN_PMU_FORMAT_ATTR(discard);
static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_ts_enable.attr,
@@ -228,6 +233,7 @@ static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_event_filter.attr,
&format_attr_inv_event_filter.attr,
&format_attr_min_latency.attr,
+ &format_attr_discard.attr,
NULL,
};
@@ -238,6 +244,9 @@ static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
+ if (attr == &format_attr_discard.attr && !(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
+ return 0;
+
if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
return 0;
@@ -502,6 +511,12 @@ static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
u64 base, limit;
struct arm_spe_pmu_buf *buf;
+ if (ATTR_CFG_GET_FLD(&event->attr, discard)) {
+ limit = FIELD_PREP(PMBLIMITR_EL1_FM, PMBLIMITR_EL1_FM_DISCARD);
+ limit |= PMBLIMITR_EL1_E;
+ goto out_write_limit;
+ }
+
/* Start a new aux session */
buf = perf_aux_output_begin(handle, event);
if (!buf) {
@@ -743,6 +758,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
+ if (ATTR_CFG_GET_FLD(&event->attr, discard) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
+ return -EOPNOTSUPP;
+
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
@@ -1027,6 +1046,9 @@ static void __arm_spe_pmu_dev_probe(void *info)
if (FIELD_GET(PMSIDR_EL1_ERND, reg))
spe_pmu->features |= SPE_PMU_FEAT_ERND;
+ if (spe_pmu->pmsver >= ID_AA64DFR0_EL1_PMSVer_V1P2)
+ spe_pmu->features |= SPE_PMU_FEAT_DISCARD;
+
/* This field has a spaced out encoding, so just use a look-up */
fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
switch (fld) {
diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c
index 420cadd108e7..17831e1920bd 100644
--- a/drivers/perf/arm_v7_pmu.c
+++ b/drivers/perf/arm_v7_pmu.c
@@ -858,16 +858,6 @@ static void armv7pmu_enable_event(struct perf_event *event)
}
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
@@ -875,14 +865,7 @@ static void armv7pmu_enable_event(struct perf_event *event)
if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /*
- * Enable interrupt for this counter
- */
armv7_pmnc_enable_intens(idx);
-
- /*
- * Enable counter
- */
armv7_pmnc_enable_counter(idx);
}
@@ -898,18 +881,7 @@ static void armv7pmu_disable_event(struct perf_event *event)
return;
}
- /*
- * Disable counter and interrupt
- */
-
- /*
- * Disable counter
- */
armv7_pmnc_disable_counter(idx);
-
- /*
- * Disable interrupt for this counter
- */
armv7_pmnc_disable_intens(idx);
}
@@ -1477,14 +1449,6 @@ static void krait_pmu_enable_event(struct perf_event *event)
int idx = hwc->idx;
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We set the event for the cycle counter because we
* have the ability to perform event filtering.
@@ -1494,10 +1458,7 @@ static void krait_pmu_enable_event(struct perf_event *event)
else
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /* Enable interrupt for this counter */
armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
armv7_pmnc_enable_counter(idx);
}
@@ -1798,14 +1759,6 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
int idx = hwc->idx;
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We don't set the event for the cycle counter because we
* don't have the ability to perform event filtering.
@@ -1815,10 +1768,7 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
else if (idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /* Enable interrupt for this counter */
armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
armv7_pmnc_enable_counter(idx);
}
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index 9cbea9675e21..f851e070760c 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -20,7 +20,6 @@
#include <linux/sysfs.h>
#include <linux/types.h>
-#define DWC_PCIE_VSEC_RAS_DES_ID 0x02
#define DWC_PCIE_EVENT_CNT_CTL 0x8
/*
@@ -100,14 +99,23 @@ struct dwc_pcie_dev_info {
struct list_head dev_node;
};
-struct dwc_pcie_vendor_id {
- int vendor_id;
+struct dwc_pcie_pmu_vsec_id {
+ u16 vendor_id;
+ u16 vsec_id;
+ u8 vsec_rev;
};
-static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = {
- {.vendor_id = PCI_VENDOR_ID_ALIBABA },
- {.vendor_id = PCI_VENDOR_ID_AMPERE },
- {.vendor_id = PCI_VENDOR_ID_QCOM },
+/*
+ * VSEC IDs are allocated by the vendor, so a given ID may mean different
+ * things to different vendors. See PCIe r6.0, sec 7.9.5.2.
+ */
+static const struct dwc_pcie_pmu_vsec_id dwc_pcie_pmu_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_ALIBABA,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_AMPERE,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
{} /* terminator */
};
@@ -199,8 +207,8 @@ static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05),
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06),
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07),
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08),
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x08),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x09),
/* Group #1 */
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload, 0x20),
@@ -519,31 +527,28 @@ static void dwc_pcie_unregister_pmu(void *data)
perf_pmu_unregister(&pcie_pmu->pmu);
}
-static bool dwc_pcie_match_des_cap(struct pci_dev *pdev)
+static u16 dwc_pcie_des_cap(struct pci_dev *pdev)
{
- const struct dwc_pcie_vendor_id *vid;
- u16 vsec = 0;
+ const struct dwc_pcie_pmu_vsec_id *vid;
+ u16 vsec;
u32 val;
if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
- return false;
+ return 0;
- for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) {
+ for (vid = dwc_pcie_pmu_vsec_ids; vid->vendor_id; vid++) {
vsec = pci_find_vsec_capability(pdev, vid->vendor_id,
- DWC_PCIE_VSEC_RAS_DES_ID);
- if (vsec)
- break;
+ vid->vsec_id);
+ if (vsec) {
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER,
+ &val);
+ if (PCI_VNDR_HEADER_REV(val) == vid->vsec_rev) {
+ pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
+ return vsec;
+ }
+ }
}
- if (!vsec)
- return false;
-
- pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
- if (PCI_VNDR_HEADER_REV(val) != 0x04)
- return false;
-
- pci_dbg(pdev,
- "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
- return true;
+ return 0;
}
static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info)
@@ -560,15 +565,15 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev)
u32 sbdf;
sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn);
- plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf,
- pdev, sizeof(*pdev));
-
+ plat_dev = platform_device_register_simple("dwc_pcie_pmu", sbdf, NULL, 0);
if (IS_ERR(plat_dev))
return PTR_ERR(plat_dev);
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
- if (!dev_info)
+ if (!dev_info) {
+ platform_device_unregister(plat_dev);
return -ENOMEM;
+ }
/* Cache platform device to handle pci device hotplug */
dev_info->plat_dev = plat_dev;
@@ -587,7 +592,7 @@ static int dwc_pcie_pmu_notifier(struct notifier_block *nb,
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
- if (!dwc_pcie_match_des_cap(pdev))
+ if (!dwc_pcie_des_cap(pdev))
return NOTIFY_DONE;
if (dwc_pcie_register_dev(pdev))
return NOTIFY_BAD;
@@ -609,17 +614,26 @@ static struct notifier_block dwc_pcie_pmu_nb = {
static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
{
- struct pci_dev *pdev = plat_dev->dev.platform_data;
+ struct pci_dev *pdev;
struct dwc_pcie_pmu *pcie_pmu;
char *name;
- u32 sbdf, val;
+ u32 sbdf;
u16 vsec;
int ret;
- vsec = pci_find_vsec_capability(pdev, pdev->vendor,
- DWC_PCIE_VSEC_RAS_DES_ID);
- pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
sbdf = plat_dev->id;
+ pdev = pci_get_domain_bus_and_slot(sbdf >> 16, PCI_BUS_NUM(sbdf & 0xffff),
+ sbdf & 0xff);
+ if (!pdev) {
+ pr_err("No pdev found for the sbdf 0x%x\n", sbdf);
+ return -ENODEV;
+ }
+
+ vsec = dwc_pcie_des_cap(pdev);
+ if (!vsec)
+ return -ENODEV;
+
+ pci_dev_put(pdev);
name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf);
if (!name)
return -ENOMEM;
@@ -634,7 +648,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
pcie_pmu->on_cpu = -1;
pcie_pmu->pmu = (struct pmu){
.name = name,
- .parent = &pdev->dev,
+ .parent = &plat_dev->dev,
.module = THIS_MODULE,
.attr_groups = dwc_pcie_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
@@ -724,19 +738,28 @@ static struct platform_driver dwc_pcie_pmu_driver = {
.driver = {.name = "dwc_pcie_pmu",},
};
+static void dwc_pcie_cleanup_devices(void)
+{
+ struct dwc_pcie_dev_info *dev_info, *tmp;
+
+ list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) {
+ dwc_pcie_unregister_dev(dev_info);
+ }
+}
+
static int __init dwc_pcie_pmu_init(void)
{
struct pci_dev *pdev = NULL;
int ret;
for_each_pci_dev(pdev) {
- if (!dwc_pcie_match_des_cap(pdev))
+ if (!dwc_pcie_des_cap(pdev))
continue;
ret = dwc_pcie_register_dev(pdev);
if (ret) {
pci_dev_put(pdev);
- return ret;
+ goto err_cleanup;
}
}
@@ -745,35 +768,35 @@ static int __init dwc_pcie_pmu_init(void)
dwc_pcie_pmu_online_cpu,
dwc_pcie_pmu_offline_cpu);
if (ret < 0)
- return ret;
+ goto err_cleanup;
dwc_pcie_pmu_hp_state = ret;
ret = platform_driver_register(&dwc_pcie_pmu_driver);
if (ret)
- goto platform_driver_register_err;
+ goto err_remove_cpuhp;
ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
if (ret)
- goto platform_driver_register_err;
+ goto err_unregister_driver;
notify = true;
return 0;
-platform_driver_register_err:
+err_unregister_driver:
+ platform_driver_unregister(&dwc_pcie_pmu_driver);
+err_remove_cpuhp:
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
-
+err_cleanup:
+ dwc_pcie_cleanup_devices();
return ret;
}
static void __exit dwc_pcie_pmu_exit(void)
{
- struct dwc_pcie_dev_info *dev_info, *tmp;
-
if (notify)
bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
- list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
- dwc_pcie_unregister_dev(dev_info);
+ dwc_pcie_cleanup_devices();
platform_driver_unregister(&dwc_pcie_pmu_driver);
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
}
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 3c856d9a4e97..843f163e6c33 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -63,8 +63,21 @@
static DEFINE_IDA(ddr_ida);
+/*
+ * V1 support 1 read transaction, 1 write transaction and 1 read beats
+ * event which corresponding respecitively to counter 2, 3 and 4.
+ */
+#define DDR_PERF_AXI_FILTER_V1 0x1
+
+/*
+ * V2 support 1 read beats and 3 write beats events which corresponding
+ * respecitively to counter 2-5.
+ */
+#define DDR_PERF_AXI_FILTER_V2 0x2
+
struct imx_ddr_devtype_data {
const char *identifier; /* system PMU identifier for userspace */
+ unsigned int filter_ver; /* AXI filter version */
};
struct ddr_pmu {
@@ -83,24 +96,27 @@ struct ddr_pmu {
static const struct imx_ddr_devtype_data imx91_devtype_data = {
.identifier = "imx91",
+ .filter_ver = DDR_PERF_AXI_FILTER_V1
};
static const struct imx_ddr_devtype_data imx93_devtype_data = {
.identifier = "imx93",
+ .filter_ver = DDR_PERF_AXI_FILTER_V1
};
static const struct imx_ddr_devtype_data imx95_devtype_data = {
.identifier = "imx95",
+ .filter_ver = DDR_PERF_AXI_FILTER_V2
};
-static inline bool is_imx93(struct ddr_pmu *pmu)
+static inline bool axi_filter_v1(struct ddr_pmu *pmu)
{
- return pmu->devtype_data == &imx93_devtype_data;
+ return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V1;
}
-static inline bool is_imx95(struct ddr_pmu *pmu)
+static inline bool axi_filter_v2(struct ddr_pmu *pmu)
{
- return pmu->devtype_data == &imx95_devtype_data;
+ return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V2;
}
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
@@ -155,7 +171,7 @@ static const struct attribute_group ddr_perf_cpumask_attr_group = {
struct imx9_pmu_events_attr {
struct device_attribute attr;
u64 id;
- const void *devtype_data;
+ const struct imx_ddr_devtype_data *devtype_data;
};
static ssize_t ddr_pmu_event_show(struct device *dev,
@@ -307,7 +323,8 @@ ddr_perf_events_attrs_is_visible(struct kobject *kobj,
if (!eattr->devtype_data)
return attr->mode;
- if (eattr->devtype_data != ddr_pmu->devtype_data)
+ if (eattr->devtype_data != ddr_pmu->devtype_data &&
+ eattr->devtype_data->filter_ver != ddr_pmu->devtype_data->filter_ver)
return 0;
return attr->mode;
@@ -624,11 +641,11 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
- if (is_imx93(pmu))
+ if (axi_filter_v1(pmu))
/* read trans, write trans, read beat */
imx93_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
- if (is_imx95(pmu))
+ if (axi_filter_v2(pmu))
/* write beat, read beat2, read beat1, read beat */
imx95_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
index 3f3fb1de11f5..b879b81adfdd 100644
--- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -180,20 +180,18 @@ MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match);
static int hisi_cpa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *cpa_pmu)
{
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &cpa_pmu->sicl_id)) {
+ hisi_uncore_pmu_init_topology(cpa_pmu, &pdev->dev);
+
+ if (cpa_pmu->topo.sicl_id < 0) {
dev_err(&pdev->dev, "Can not read sicl-id\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &cpa_pmu->index_id)) {
+ if (cpa_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id\n");
return -EINVAL;
}
- cpa_pmu->ccl_id = -1;
- cpa_pmu->sccl_id = -1;
cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cpa_pmu->base))
return PTR_ERR(cpa_pmu->base);
@@ -227,34 +225,11 @@ static const struct attribute_group hisi_cpa_pmu_events_group = {
.attrs = hisi_cpa_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = {
- .attrs = hisi_cpa_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_cpa_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_cpa_pmu_identifier_attrs[] = {
- &hisi_cpa_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_cpa_pmu_identifier_group = {
- .attrs = hisi_cpa_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = {
&hisi_cpa_pmu_format_group,
&hisi_cpa_pmu_events_group,
- &hisi_cpa_pmu_cpumask_attr_group,
- &hisi_cpa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -311,8 +286,8 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u",
- cpa_pmu->sicl_id, cpa_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%d",
+ cpa_pmu->topo.sicl_id, cpa_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -389,6 +364,7 @@ static void __exit hisi_cpa_pmu_module_exit(void)
}
module_exit(hisi_cpa_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index a6ebf2ec99d3..7e490f8868f2 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -111,14 +111,14 @@ static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
* so there is no need to write event type, while it is programmable counter in
* PMU v2.
*/
-static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *ddrc_pmu, int idx,
u32 type)
{
u32 offset;
- if (hha_pmu->identifier >= HISI_PMU_V2) {
+ if (ddrc_pmu->identifier >= HISI_PMU_V2) {
offset = DDRC_V2_EVENT_TYPE + 4 * idx;
- writel(type, hha_pmu->base + offset);
+ writel(type, ddrc_pmu->base + offset);
}
}
@@ -297,23 +297,22 @@ MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *ddrc_pmu)
{
+ hisi_uncore_pmu_init_topology(ddrc_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and DDRC channel ID to identify the
* DDRC PMU, while SCCL_ID is in MPIDR[aff2].
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
- &ddrc_pmu->index_id)) {
+ &ddrc_pmu->topo.index_id)) {
dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &ddrc_pmu->sccl_id)) {
+ if (ddrc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
return -EINVAL;
}
- /* DDRC PMUs only share the same SCCL */
- ddrc_pmu->ccl_id = -1;
ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddrc_pmu->base)) {
@@ -323,8 +322,7 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
if (ddrc_pmu->identifier >= HISI_PMU_V2) {
- if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
- &ddrc_pmu->sub_id)) {
+ if (ddrc_pmu->topo.sub_id < 0) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
@@ -382,42 +380,19 @@ static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
.attrs = hisi_ddrc_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
- .attrs = hisi_ddrc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_ddrc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
- &hisi_ddrc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
- .attrs = hisi_ddrc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
&hisi_ddrc_pmu_v1_format_group,
&hisi_ddrc_pmu_v1_events_group,
- &hisi_ddrc_pmu_cpumask_attr_group,
- &hisi_ddrc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
&hisi_ddrc_pmu_v2_format_group,
&hisi_ddrc_pmu_v2_events_group,
- &hisi_ddrc_pmu_cpumask_attr_group,
- &hisi_ddrc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -501,13 +476,13 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
if (ddrc_pmu->identifier >= HISI_PMU_V2)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "hisi_sccl%u_ddrc%u_%u",
- ddrc_pmu->sccl_id, ddrc_pmu->index_id,
- ddrc_pmu->sub_id);
+ "hisi_sccl%d_ddrc%d_%d",
+ ddrc_pmu->topo.sccl_id, ddrc_pmu->topo.index_id,
+ ddrc_pmu->topo.sub_id);
else
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
- ddrc_pmu->index_id);
+ "hisi_sccl%d_ddrc%d", ddrc_pmu->topo.sccl_id,
+ ddrc_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -575,10 +550,10 @@ static void __exit hisi_ddrc_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_ddrc_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
-
}
module_exit(hisi_ddrc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 32624872596f..ca609db86046 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -295,12 +295,13 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
unsigned long long id;
acpi_status status;
+ hisi_uncore_pmu_init_topology(hha_pmu, &pdev->dev);
+
/*
* Use SCCL_ID and UID to identify the HHA PMU, while
* SCCL_ID is in MPIDR[aff2].
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &hha_pmu->sccl_id)) {
+ if (hha_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
return -EINVAL;
}
@@ -309,8 +310,7 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
* Early versions of BIOS support _UID by mistake, so we support
* both "hisilicon, idx-id" as preference, if available.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &hha_pmu->index_id)) {
+ if (hha_pmu->topo.index_id < 0) {
status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
"_UID", NULL, &id);
if (ACPI_FAILURE(status)) {
@@ -318,10 +318,8 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
return -EINVAL;
}
- hha_pmu->index_id = id;
+ hha_pmu->topo.index_id = id;
}
- /* HHA PMUs only share the same SCCL */
- hha_pmu->ccl_id = -1;
hha_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hha_pmu->base)) {
@@ -407,42 +405,19 @@ static const struct attribute_group hisi_hha_pmu_v2_events_group = {
.attrs = hisi_hha_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
- .attrs = hisi_hha_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_hha_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_hha_pmu_identifier_attrs[] = {
- &hisi_hha_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_hha_pmu_identifier_group = {
- .attrs = hisi_hha_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = {
&hisi_hha_pmu_v1_format_group,
&hisi_hha_pmu_v1_events_group,
- &hisi_hha_pmu_cpumask_attr_group,
- &hisi_hha_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = {
&hisi_hha_pmu_v2_format_group,
&hisi_hha_pmu_v2_events_group,
- &hisi_hha_pmu_cpumask_attr_group,
- &hisi_hha_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -510,8 +485,8 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
- hha_pmu->sccl_id, hha_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_hha%d",
+ hha_pmu->topo.sccl_id, hha_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -581,6 +556,7 @@ static void __exit hisi_hha_pmu_module_exit(void)
}
module_exit(hisi_hha_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index c235b46ce873..412fc3a97963 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -355,18 +355,18 @@ MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
+ hisi_uncore_pmu_init_topology(l3c_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
* SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &l3c_pmu->sccl_id)) {
+ if (l3c_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read l3c sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
- &l3c_pmu->ccl_id)) {
+ if (l3c_pmu->topo.ccl_id < 0) {
dev_err(&pdev->dev, "Can not read l3c ccl-id!\n");
return -EINVAL;
}
@@ -441,42 +441,19 @@ static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
.attrs = hisi_l3c_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
- .attrs = hisi_l3c_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_l3c_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_l3c_pmu_identifier_attrs[] = {
- &hisi_l3c_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_l3c_pmu_identifier_group = {
- .attrs = hisi_l3c_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
&hisi_l3c_pmu_v1_format_group,
&hisi_l3c_pmu_v1_events_group,
- &hisi_l3c_pmu_cpumask_attr_group,
- &hisi_l3c_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
&hisi_l3c_pmu_v2_format_group,
&hisi_l3c_pmu_v2_events_group,
- &hisi_l3c_pmu_cpumask_attr_group,
- &hisi_l3c_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -544,8 +521,8 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
- l3c_pmu->sccl_id, l3c_pmu->ccl_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d",
+ l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id);
if (!name)
return -ENOMEM;
@@ -615,6 +592,7 @@ static void __exit hisi_l3c_pmu_module_exit(void)
}
module_exit(hisi_l3c_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index c0f5d7c73e06..a0142684e379 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -269,25 +269,22 @@ static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
static int hisi_pa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *pa_pmu)
{
+ hisi_uncore_pmu_init_topology(pa_pmu, &pdev->dev);
+
/*
* As PA PMU is in a SICL, use the SICL_ID and the index ID
* to identify the PA PMU.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &pa_pmu->sicl_id)) {
+ if (pa_pmu->topo.sicl_id < 0) {
dev_err(&pdev->dev, "Cannot read sicl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &pa_pmu->index_id)) {
+ if (pa_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
- pa_pmu->ccl_id = -1;
- pa_pmu->sccl_id = -1;
-
pa_pmu->dev_info = device_get_match_data(&pdev->dev);
if (!pa_pmu->dev_info)
return -ENODEV;
@@ -356,29 +353,6 @@ static const struct attribute_group hisi_h60pa_pmu_events_group = {
.attrs = hisi_h60pa_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = {
- .attrs = hisi_pa_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_pa_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
- &hisi_pa_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_pa_pmu_identifier_group = {
- .attrs = hisi_pa_pmu_identifier_attrs,
-};
-
static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
.mask_offset = PA_INT_MASK,
.clear_offset = PA_INT_CLEAR,
@@ -388,8 +362,8 @@ static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v2_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -402,8 +376,8 @@ static const struct hisi_pmu_dev_info hisi_h32pa_v2 = {
static const struct attribute_group *hisi_pa_pmu_v3_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v3_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -422,8 +396,8 @@ static struct hisi_pa_pmu_int_regs hisi_h60pa_pmu_regs = {
static const struct attribute_group *hisi_h60pa_pmu_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_h60pa_pmu_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -488,9 +462,9 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%u",
- pa_pmu->sicl_id, pa_pmu->dev_info->name,
- pa_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%d",
+ pa_pmu->topo.sicl_id, pa_pmu->dev_info->name,
+ pa_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -569,6 +543,7 @@ static void __exit hisi_pa_pmu_module_exit(void)
}
module_exit(hisi_pa_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 918cdc31de57..ef058b1dd509 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/property.h>
#include <asm/cputype.h>
#include <asm/local64.h>
@@ -34,7 +35,7 @@ ssize_t hisi_event_sysfs_show(struct device *dev,
return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
}
-EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
+EXPORT_SYMBOL_NS_GPL(hisi_event_sysfs_show, "HISI_PMU");
/*
* sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
@@ -46,7 +47,52 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
}
-EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
+EXPORT_SYMBOL_NS_GPL(hisi_cpumask_sysfs_show, "HISI_PMU");
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static ssize_t hisi_associated_cpus_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &hisi_pmu->associated_cpus);
+}
+static DEVICE_ATTR(associated_cpus, 0444, hisi_associated_cpus_sysfs_show, NULL);
+
+static struct attribute *hisi_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ &dev_attr_associated_cpus.attr,
+ NULL
+};
+
+const struct attribute_group hisi_pmu_cpumask_attr_group = {
+ .attrs = hisi_pmu_cpumask_attrs,
+};
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_cpumask_attr_group, "HISI_PMU");
+
+ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
+}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_identifier_attr_show, "HISI_PMU");
+
+static struct device_attribute hisi_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_pmu_identifier_attrs[] = {
+ &hisi_pmu_identifier_attr.attr,
+ NULL
+};
+
+const struct attribute_group hisi_pmu_identifier_group = {
+ .attrs = hisi_pmu_identifier_attrs,
+};
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_identifier_group, "HISI_PMU");
static bool hisi_validate_event_group(struct perf_event *event)
{
@@ -96,17 +142,7 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
return idx;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
-
-ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
-
- return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
-}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_get_event_idx, "HISI_PMU");
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{
@@ -165,7 +201,7 @@ int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_init_irq, "HISI_PMU");
int hisi_uncore_pmu_event_init(struct perf_event *event)
{
@@ -219,7 +255,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_event_init, "HISI_PMU");
/*
* Set the counter to count the event that we're interested in,
@@ -273,7 +309,7 @@ void hisi_uncore_pmu_set_event_period(struct perf_event *event)
/* Write start value to the hardware event counter */
hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_set_event_period, "HISI_PMU");
void hisi_uncore_pmu_event_update(struct perf_event *event)
{
@@ -294,7 +330,7 @@ void hisi_uncore_pmu_event_update(struct perf_event *event)
HISI_MAX_PERIOD(hisi_pmu->counter_bits);
local64_add(delta, &event->count);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_event_update, "HISI_PMU");
void hisi_uncore_pmu_start(struct perf_event *event, int flags)
{
@@ -317,7 +353,7 @@ void hisi_uncore_pmu_start(struct perf_event *event, int flags)
hisi_uncore_pmu_enable_event(event);
perf_event_update_userpage(event);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_start, "HISI_PMU");
void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
{
@@ -334,7 +370,7 @@ void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
hisi_uncore_pmu_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_stop, "HISI_PMU");
int hisi_uncore_pmu_add(struct perf_event *event, int flags)
{
@@ -357,7 +393,7 @@ int hisi_uncore_pmu_add(struct perf_event *event, int flags)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_add, "HISI_PMU");
void hisi_uncore_pmu_del(struct perf_event *event, int flags)
{
@@ -369,14 +405,14 @@ void hisi_uncore_pmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_del, "HISI_PMU");
void hisi_uncore_pmu_read(struct perf_event *event)
{
/* Read hardware counter and update the perf counter statistics */
hisi_uncore_pmu_event_update(event);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_read, "HISI_PMU");
void hisi_uncore_pmu_enable(struct pmu *pmu)
{
@@ -389,7 +425,7 @@ void hisi_uncore_pmu_enable(struct pmu *pmu)
hisi_pmu->ops->start_counters(hisi_pmu);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_enable, "HISI_PMU");
void hisi_uncore_pmu_disable(struct pmu *pmu)
{
@@ -397,7 +433,7 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
hisi_pmu->ops->stop_counters(hisi_pmu);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_disable, "HISI_PMU");
/*
@@ -444,22 +480,19 @@ static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
*/
static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
{
+ struct hisi_pmu_topology *topo = &hisi_pmu->topo;
int sccl_id, ccl_id;
- /* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */
- if (hisi_pmu->sccl_id == -1)
- return true;
-
- if (hisi_pmu->ccl_id == -1) {
+ if (topo->ccl_id == -1) {
/* If CCL_ID is -1, the PMU only shares the same SCCL */
hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
- return sccl_id == hisi_pmu->sccl_id;
+ return sccl_id == topo->sccl_id;
}
hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
- return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
+ return sccl_id == topo->sccl_id && ccl_id == topo->ccl_id;
}
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
@@ -467,13 +500,25 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
node);
- if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
+ /*
+ * If the CPU is not associated to PMU, initialize the hisi_pmu->on_cpu
+ * based on the locality if it hasn't been initialized yet. For PMUs
+ * do have associated CPUs, it'll be updated later.
+ */
+ if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu)) {
+ if (hisi_pmu->on_cpu != -1)
+ return 0;
+
+ hisi_pmu->on_cpu = cpumask_local_spread(0, dev_to_node(hisi_pmu->dev));
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(hisi_pmu->on_cpu)));
return 0;
+ }
cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
- /* If another CPU is already managing this PMU, simply return. */
- if (hisi_pmu->on_cpu != -1)
+ /* If another associated CPU is already managing this PMU, simply return. */
+ if (hisi_pmu->on_cpu != -1 &&
+ cpumask_test_cpu(hisi_pmu->on_cpu, &hisi_pmu->associated_cpus))
return 0;
/* Use this CPU in cpumask for event counting */
@@ -484,7 +529,7 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_online_cpu, "HISI_PMU");
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
@@ -492,9 +537,6 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
node);
unsigned int target;
- if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
- return 0;
-
/* Nothing to do if this CPU doesn't own the PMU */
if (hisi_pmu->on_cpu != cpu)
return 0;
@@ -502,10 +544,17 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
/* Give up ownership of the PMU */
hisi_pmu->on_cpu = -1;
- /* Choose a new CPU to migrate ownership of the PMU to */
+ /*
+ * Migrate ownership of the PMU to a new CPU chosen from PMU's online
+ * associated CPUs if possible, if no associated CPU online then
+ * migrate to one online CPU.
+ */
target = cpumask_any_and_but(&hisi_pmu->associated_cpus,
cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
@@ -515,7 +564,36 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_offline_cpu, "HISI_PMU");
+
+/*
+ * Retrieve the topology information from the firmware for the hisi_pmu device.
+ * The topology ID will be -1 if we cannot initialize it, it may either due to
+ * the PMU doesn't locate on this certain topology or the firmware needs to be
+ * fixed.
+ */
+void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev)
+{
+ struct hisi_pmu_topology *topo = &hisi_pmu->topo;
+
+ topo->sccl_id = -1;
+ topo->ccl_id = -1;
+ topo->index_id = -1;
+ topo->sub_id = -1;
+
+ if (device_property_read_u32(dev, "hisilicon,scl-id", &topo->sccl_id))
+ dev_dbg(dev, "no scl-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,ccl-id", &topo->ccl_id))
+ dev_dbg(dev, "no ccl-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,idx-id", &topo->index_id))
+ dev_dbg(dev, "no idx-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,sub-id", &topo->sub_id))
+ dev_dbg(dev, "no sub-id present\n");
+}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_init_topology, "HISI_PMU");
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
{
@@ -535,7 +613,7 @@ void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
pmu->attr_groups = hisi_pmu->pmu_events.attr_groups;
pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
}
-EXPORT_SYMBOL_GPL(hisi_pmu_init);
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_init, "HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC uncore Performance Monitor driver framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 25b2d43b72bf..f4fed2544877 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -81,27 +81,55 @@ struct hisi_pmu_hwevents {
const struct attribute_group **attr_groups;
};
+/**
+ * struct hisi_pmu_topology - Describe the topology hierarchy on which the PMU
+ * is located.
+ * @sccl_id: ID of the SCCL on which the PMU locate is located.
+ * @sicl_id: ID of the SICL on which the PMU locate is located.
+ * @scl_id: ID used by the core which is unaware of the SCCL/SICL.
+ * @ccl_id: ID of the CCL (CPU cluster) on which the PMU is located.
+ * @index_id: the ID of the PMU module if there're several PMUs at a
+ * particularly location in the topology.
+ * @sub_id: submodule ID of the PMU. For example we use this for DDRC PMU v2
+ * since each DDRC has more than one DMC
+ *
+ * The ID will be -1 if the PMU isn't located on a certain topology.
+ */
+struct hisi_pmu_topology {
+ /*
+ * SCCL (Super CPU CLuster) and SICL (Super I/O Cluster) are parallel
+ * so a PMU cannot locate on a SCCL and a SICL. If the SCCL/SICL
+ * distinction is not relevant, use scl_id instead.
+ */
+ union {
+ int sccl_id;
+ int sicl_id;
+ int scl_id;
+ };
+ int ccl_id;
+ int index_id;
+ int sub_id;
+};
+
/* Generic pmu struct for different pmu types */
struct hisi_pmu {
struct pmu pmu;
const struct hisi_uncore_ops *ops;
const struct hisi_pmu_dev_info *dev_info;
struct hisi_pmu_hwevents pmu_events;
- /* associated_cpus: All CPUs associated with the PMU */
+ struct hisi_pmu_topology topo;
+ /*
+ * CPUs associated to the PMU and are preferred to use for counting.
+ * Could be empty if PMU has no association (e.g. PMU on SICL), in
+ * which case any online CPU will be used.
+ */
cpumask_t associated_cpus;
/* CPU used for counting */
int on_cpu;
int irq;
struct device *dev;
struct hlist_node node;
- int sccl_id;
- int sicl_id;
- int ccl_id;
void __iomem *base;
- /* the ID of the PMU modules */
- u32 index_id;
- /* For DDRC PMU v2: each DDRC has more than one DMC */
- u32 sub_id;
int num_counters;
int counter_bits;
/* check event code range */
@@ -109,6 +137,10 @@ struct hisi_pmu {
u32 identifier;
};
+/* Generic implementation of cpumask/identifier group */
+extern const struct attribute_group hisi_pmu_cpumask_attr_group;
+extern const struct attribute_group hisi_pmu_identifier_group;
+
int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
void hisi_uncore_pmu_read(struct perf_event *event);
int hisi_uncore_pmu_add(struct perf_event *event, int flags);
@@ -132,6 +164,7 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
char *page);
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
+void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module);
#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index c5f4764ee888..dbd079016fc4 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -288,25 +288,22 @@ MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match);
static int hisi_sllc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *sllc_pmu)
{
+ hisi_uncore_pmu_init_topology(sllc_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and the index ID to identify the SLLC PMU,
* while SCCL_ID is from MPIDR_EL1 by CPU.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &sllc_pmu->sccl_id)) {
+ if (sllc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Cannot read sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &sllc_pmu->index_id)) {
+ if (sllc_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
- /* SLLC PMUs only share the same SCCL */
- sllc_pmu->ccl_id = -1;
-
sllc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sllc_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for sllc_pmu resource.\n");
@@ -347,34 +344,11 @@ static const struct attribute_group hisi_sllc_pmu_v2_events_group = {
.attrs = hisi_sllc_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_sllc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_sllc_pmu_cpumask_attr_group = {
- .attrs = hisi_sllc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_sllc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
- &hisi_sllc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_sllc_pmu_identifier_group = {
- .attrs = hisi_sllc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = {
&hisi_sllc_pmu_v2_format_group,
&hisi_sllc_pmu_v2_events_group,
- &hisi_sllc_pmu_cpumask_attr_group,
- &hisi_sllc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -433,8 +407,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_sllc%u",
- sllc_pmu->sccl_id, sllc_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_sllc%d",
+ sllc_pmu->topo.sccl_id, sllc_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -507,6 +481,7 @@ static void __exit hisi_sllc_pmu_module_exit(void)
}
module_exit(hisi_sllc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SLLC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
index 481dcc9e8fbf..03cb9b564b99 100644
--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -11,7 +11,6 @@
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
-#include <linux/property.h>
#include "hisi_uncore_pmu.h"
@@ -366,25 +365,24 @@ static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx)
static int hisi_uc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *uc_pmu)
{
+ hisi_uncore_pmu_init_topology(uc_pmu, &pdev->dev);
+
/*
* Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to
* identify the topology information of UC PMU devices in the chip.
* They have some CCLs per SCCL and then 4 UC PMU per CCL.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &uc_pmu->sccl_id)) {
+ if (uc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read uc sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
- &uc_pmu->ccl_id)) {
+ if (uc_pmu->topo.ccl_id < 0) {
dev_err(&pdev->dev, "Can not read uc ccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
- &uc_pmu->sub_id)) {
+ if (uc_pmu->topo.sub_id < 0) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
@@ -439,34 +437,11 @@ static const struct attribute_group hisi_uc_pmu_events_group = {
.attrs = hisi_uc_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_uc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_uc_pmu_cpumask_attr_group = {
- .attrs = hisi_uc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_uc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_uc_pmu_identifier_attrs[] = {
- &hisi_uc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_uc_pmu_identifier_group = {
- .attrs = hisi_uc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_uc_pmu_attr_groups[] = {
&hisi_uc_pmu_format_group,
&hisi_uc_pmu_events_group,
- &hisi_uc_pmu_cpumask_attr_group,
- &hisi_uc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -538,8 +513,9 @@ static int hisi_uc_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%u",
- uc_pmu->sccl_id, uc_pmu->ccl_id, uc_pmu->sub_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%d",
+ uc_pmu->topo.sccl_id, uc_pmu->topo.ccl_id,
+ uc_pmu->topo.sub_id);
if (!name)
return -ENOMEM;
@@ -613,6 +589,7 @@ static void __exit hisi_uc_pmu_module_exit(void)
}
module_exit(hisi_uc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 8860d9f687ae..72ac17efd846 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
+/*
+ * Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
*
- * Copyright (C) 2021 Marvell.
+ * Copyright (C) 2021-2024 Marvell.
*/
#include <linux/init.h>
@@ -14,24 +15,29 @@
#include <linux/platform_device.h>
/* Performance Counters Operating Mode Control Registers */
-#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
-#define OP_MODE_CTRL_VAL_MANNUAL 0x1
+#define CN10K_DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
+#define ODY_DDRC_PERF_CNT_OP_MODE_CTRL 0x20020
+#define OP_MODE_CTRL_VAL_MANUAL 0x1
/* Performance Counters Start Operation Control Registers */
-#define DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define CN10K_DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define ODY_DDRC_PERF_CNT_START_OP_CTRL 0x200A0
#define START_OP_CTRL_VAL_START 0x1ULL
#define START_OP_CTRL_VAL_ACTIVE 0x2
/* Performance Counters End Operation Control Registers */
-#define DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define CN10K_DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define ODY_DDRC_PERF_CNT_END_OP_CTRL 0x200E0
#define END_OP_CTRL_VAL_END 0x1ULL
/* Performance Counters End Status Registers */
-#define DDRC_PERF_CNT_END_STATUS 0x8038
+#define CN10K_DDRC_PERF_CNT_END_STATUS 0x8038
+#define ODY_DDRC_PERF_CNT_END_STATUS 0x20120
#define END_STATUS_VAL_END_TIMER_MODE_END 0x1
/* Performance Counters Configuration Registers */
-#define DDRC_PERF_CFG_BASE 0x8040
+#define CN10K_DDRC_PERF_CFG_BASE 0x8040
+#define ODY_DDRC_PERF_CFG_BASE 0x20160
/* 8 Generic event counter + 2 fixed event counters */
#define DDRC_PERF_NUM_GEN_COUNTERS 8
@@ -42,18 +48,28 @@
DDRC_PERF_NUM_FIX_COUNTERS)
/* Generic event counter registers */
-#define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n))
+#define DDRC_PERF_CFG(base, n) ((base) + 8 * (n))
#define EVENT_ENABLE BIT_ULL(63)
/* Two dedicated event counters for DDR reads and writes */
#define EVENT_DDR_READS 101
#define EVENT_DDR_WRITES 100
+#define DDRC_PERF_REG(base, n) ((base) + 8 * (n))
/*
* programmable events IDs in programmable event counters.
* DO NOT change these event-id numbers, they are used to
* program event bitmap in h/w.
*/
+#define EVENT_DFI_CMD_IS_RETRY 61
+#define EVENT_RD_UC_ECC_ERROR 60
+#define EVENT_RD_CRC_ERROR 59
+#define EVENT_CAPAR_ERROR 58
+#define EVENT_WR_CRC_ERROR 57
+#define EVENT_DFI_PARITY_POISON 56
+#define EVENT_RETRY_FIFO_FULL 46
+#define EVENT_DFI_CYCLES 45
+
#define EVENT_OP_IS_ZQLATCH 55
#define EVENT_OP_IS_ZQSTART 54
#define EVENT_OP_IS_TCR_MRR 53
@@ -102,28 +118,37 @@
#define EVENT_HIF_RD_OR_WR 1
/* Event counter value registers */
-#define DDRC_PERF_CNT_VALUE_BASE 0x8080
-#define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
+#define CN10K_DDRC_PERF_CNT_VALUE_BASE 0x8080
+#define ODY_DDRC_PERF_CNT_VALUE_BASE 0x201C0
/* Fixed event counter enable/disable register */
-#define DDRC_PERF_CNT_FREERUN_EN 0x80C0
+#define CN10K_DDRC_PERF_CNT_FREERUN_EN 0x80C0
#define DDRC_PERF_FREERUN_WRITE_EN 0x1
#define DDRC_PERF_FREERUN_READ_EN 0x2
/* Fixed event counter control register */
-#define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define CN10K_DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define ODY_DDRC_PERF_CNT_FREERUN_CTRL 0x20240
#define DDRC_FREERUN_WRITE_CNT_CLR 0x1
#define DDRC_FREERUN_READ_CNT_CLR 0x2
-/* Fixed event counter value register */
-#define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
-#define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+/* Fixed event counter clear register, defined only for Odyssey */
+#define ODY_DDRC_PERF_CNT_FREERUN_CLR 0x20248
+
#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48)
#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0)
+/* Fixed event counter value register */
+#define CN10K_DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
+#define CN10K_DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+#define ODY_DDRC_PERF_CNT_VALUE_WR_OP 0x20250
+#define ODY_DDRC_PERF_CNT_VALUE_RD_OP 0x20258
+
struct cn10k_ddr_pmu {
struct pmu pmu;
void __iomem *base;
+ const struct ddr_pmu_platform_data *p_data;
+ const struct ddr_pmu_ops *ops;
unsigned int cpu;
struct device *dev;
int active_events;
@@ -132,8 +157,36 @@ struct cn10k_ddr_pmu {
struct hlist_node node;
};
+struct ddr_pmu_ops {
+ void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu,
+ bool enable);
+ void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu,
+ bool enable);
+ void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu);
+ void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu);
+ void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx);
+};
+
#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
+struct ddr_pmu_platform_data {
+ u64 counter_overflow_val;
+ u64 counter_max_val;
+ u64 cnt_base;
+ u64 cfg_base;
+ u64 cnt_op_mode_ctrl;
+ u64 cnt_start_op_ctrl;
+ u64 cnt_end_op_ctrl;
+ u64 cnt_end_status;
+ u64 cnt_freerun_en;
+ u64 cnt_freerun_ctrl;
+ u64 cnt_freerun_clr;
+ u64 cnt_value_wr_op;
+ u64 cnt_value_rd_op;
+ bool is_cn10k;
+ bool is_ody;
+};
+
static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
struct device_attribute *attr,
char *page)
@@ -209,6 +262,85 @@ static struct attribute *cn10k_ddr_perf_events_attrs[] = {
NULL
};
+static struct attribute *odyssey_ddr_perf_events_attrs[] = {
+ /* Programmable */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_wr_data_access,
+ EVENT_DFI_WR_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_rd_data_access,
+ EVENT_DFI_RD_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
+ EVENT_HPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
+ EVENT_LPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
+ EVENT_WR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access,
+ EVENT_OP_IS_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access,
+ EVENT_OP_IS_RD_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr,
+ EVENT_PRECHARGE_FOR_RDWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
+ EVENT_PRECHARGE_FOR_OTHER),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown,
+ EVENT_OP_IS_ENTER_POWERDOWN),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cycles, EVENT_DFI_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_retry_fifo_full,
+ EVENT_RETRY_FIFO_FULL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_parity_poison,
+ EVENT_DFI_PARITY_POISON),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_crc_error, EVENT_WR_CRC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_capar_error, EVENT_CAPAR_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_crc_error, EVENT_RD_CRC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_uc_ecc_error, EVENT_RD_UC_ECC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cmd_is_retry, EVENT_DFI_CMD_IS_RETRY),
+ /* Free run event counters */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
+ NULL
+};
+
+static struct attribute_group odyssey_ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = odyssey_ddr_perf_events_attrs,
+};
+
static struct attribute_group cn10k_ddr_perf_events_attr_group = {
.name = "events",
.attrs = cn10k_ddr_perf_events_attrs,
@@ -254,6 +386,13 @@ static const struct attribute_group *cn10k_attr_groups[] = {
NULL,
};
+static const struct attribute_group *odyssey_attr_groups[] = {
+ &odyssey_ddr_perf_events_attr_group,
+ &cn10k_ddr_perf_format_attr_group,
+ &cn10k_ddr_perf_cpumask_attr_group,
+ NULL
+};
+
/* Default poll timeout is 100 sec, which is very sufficient for
* 48 bit counter incremented max at 5.6 GT/s, which may take many
* hours to overflow.
@@ -266,9 +405,18 @@ static ktime_t cn10k_ddr_pmu_timer_period(void)
return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC);
}
-static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
+static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap,
+ struct cn10k_ddr_pmu *ddr_pmu)
{
+ int err = 0;
+
switch (eventid) {
+ case EVENT_DFI_PARITY_POISON ...EVENT_DFI_CMD_IS_RETRY:
+ if (!ddr_pmu->p_data->is_ody) {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
*event_bitmap = (1ULL << (eventid - 1));
@@ -279,11 +427,12 @@ static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
*event_bitmap = (0xFULL << (eventid - 1));
break;
default:
- pr_err("%s Invalid eventid %d\n", __func__, eventid);
- return -EINVAL;
+ err = -EINVAL;
}
- return 0;
+ if (err)
+ pr_err("%s Invalid eventid %d\n", __func__, eventid);
+ return err;
}
static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
@@ -351,9 +500,33 @@ static int cn10k_ddr_perf_event_init(struct perf_event *event)
return 0;
}
+static void cn10k_ddr_perf_counter_start(struct cn10k_ddr_pmu *ddr_pmu,
+ int counter)
+{
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
+ u64 ctrl_reg = p_data->cnt_start_op_ctrl;
+
+ writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
+ DDRC_PERF_REG(ctrl_reg, counter));
+}
+
+static void cn10k_ddr_perf_counter_stop(struct cn10k_ddr_pmu *ddr_pmu,
+ int counter)
+{
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
+ u64 ctrl_reg = p_data->cnt_end_op_ctrl;
+
+ writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
+ DDRC_PERF_REG(ctrl_reg, counter));
+}
+
static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
int counter, bool enable)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 ctrl_reg = pmu->p_data->cnt_op_mode_ctrl;
+ const struct ddr_pmu_ops *ops = pmu->ops;
+ bool is_ody = pmu->p_data->is_ody;
u32 reg;
u64 val;
@@ -363,7 +536,7 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
}
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
- reg = DDRC_PERF_CFG(counter);
+ reg = DDRC_PERF_CFG(p_data->cfg_base, counter);
val = readq_relaxed(pmu->base + reg);
if (enable)
@@ -372,40 +545,52 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
val &= ~EVENT_ENABLE;
writeq_relaxed(val, pmu->base + reg);
- } else {
- val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
- if (enable) {
- if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val |= DDRC_PERF_FREERUN_READ_EN;
- else
- val |= DDRC_PERF_FREERUN_WRITE_EN;
- } else {
- if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val &= ~DDRC_PERF_FREERUN_READ_EN;
- else
- val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ if (is_ody) {
+ if (enable) {
+ /*
+ * Setup the PMU counter to work in
+ * manual mode
+ */
+ reg = DDRC_PERF_REG(ctrl_reg, counter);
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL,
+ pmu->base + reg);
+
+ cn10k_ddr_perf_counter_start(pmu, counter);
+ } else {
+ cn10k_ddr_perf_counter_stop(pmu, counter);
+ }
}
- writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ } else {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ ops->enable_read_freerun_counter(pmu, enable);
+ else
+ ops->enable_write_freerun_counter(pmu, enable);
}
}
static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
u64 val;
if (counter == DDRC_PERF_READ_COUNTER_IDX)
- return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
+ return readq_relaxed(pmu->base +
+ p_data->cnt_value_rd_op);
if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
- return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
+ return readq_relaxed(pmu->base +
+ p_data->cnt_value_wr_op);
- val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter));
+ val = readq_relaxed(pmu->base +
+ DDRC_PERF_REG(p_data->cnt_base, counter));
return val;
}
static void cn10k_ddr_perf_event_update(struct perf_event *event)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
struct hw_perf_event *hwc = &event->hw;
u64 prev_count, new_count, mask;
@@ -414,7 +599,7 @@ static void cn10k_ddr_perf_event_update(struct perf_event *event)
new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
} while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
- mask = DDRC_PERF_CNT_MAX_VALUE;
+ mask = p_data->counter_max_val;
local64_add((new_count - prev_count) & mask, &event->count);
}
@@ -435,6 +620,8 @@ static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ const struct ddr_pmu_ops *ops = pmu->ops;
struct hw_perf_event *hwc = &event->hw;
u8 config = event->attr.config;
int counter, ret;
@@ -454,8 +641,8 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
/* Generic counters, configure event id */
- reg_offset = DDRC_PERF_CFG(counter);
- ret = ddr_perf_get_event_bitmap(config, &val);
+ reg_offset = DDRC_PERF_CFG(p_data->cfg_base, counter);
+ ret = ddr_perf_get_event_bitmap(config, &val, pmu);
if (ret)
return ret;
@@ -463,11 +650,9 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
} else {
/* fixed event counter, clear counter value */
if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val = DDRC_FREERUN_READ_CNT_CLR;
+ ops->clear_read_freerun_counter(pmu);
else
- val = DDRC_FREERUN_WRITE_CNT_CLR;
-
- writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
+ ops->clear_write_freerun_counter(pmu);
}
hwc->state |= PERF_HES_STOPPED;
@@ -512,17 +697,19 @@ static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
- DDRC_PERF_CNT_START_OP_CTRL);
+ p_data->cnt_start_op_ctrl);
}
static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
- DDRC_PERF_CNT_END_OP_CTRL);
+ p_data->cnt_end_op_ctrl);
}
static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
@@ -547,8 +734,123 @@ static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
}
}
+static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
+}
+
+static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
+}
+
+static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
+{
+ cn10k_ddr_perf_event_update_all(pmu);
+ cn10k_ddr_perf_pmu_disable(&pmu->pmu);
+ cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+}
+
+static void ddr_pmu_ody_enable_read_freerun(struct cn10k_ddr_pmu *pmu,
+ bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_ody_enable_write_freerun(struct cn10k_ddr_pmu *pmu,
+ bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_ody_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
+}
+
+static void ddr_pmu_ody_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
+}
+
+static void ddr_pmu_ody_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
+{
+ /*
+ * On reaching the maximum value of the counter, the counter freezes
+ * there. The particular event is updated and the respective counter
+ * is stopped and started again so that it starts counting from zero
+ */
+ cn10k_ddr_perf_event_update(pmu->events[evt_idx]);
+ cn10k_ddr_perf_counter_stop(pmu, evt_idx);
+ cn10k_ddr_perf_counter_start(pmu, evt_idx);
+}
+
static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ const struct ddr_pmu_ops *ops = pmu->ops;
struct perf_event *event;
struct hw_perf_event *hwc;
u64 prev_count, new_count;
@@ -586,11 +888,9 @@ static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
continue;
value = cn10k_ddr_perf_read_counter(pmu, i);
- if (value == DDRC_PERF_CNT_MAX_VALUE) {
+ if (value == p_data->counter_max_val) {
pr_info("Counter-(%d) reached max value\n", i);
- cn10k_ddr_perf_event_update_all(pmu);
- cn10k_ddr_perf_pmu_disable(&pmu->pmu);
- cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+ ops->pmu_overflow_handler(pmu, i);
}
}
@@ -629,11 +929,68 @@ static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
+static const struct ddr_pmu_ops ddr_pmu_ops = {
+ .enable_read_freerun_counter = ddr_pmu_enable_read_freerun,
+ .enable_write_freerun_counter = ddr_pmu_enable_write_freerun,
+ .clear_read_freerun_counter = ddr_pmu_read_clear_freerun,
+ .clear_write_freerun_counter = ddr_pmu_write_clear_freerun,
+ .pmu_overflow_handler = ddr_pmu_overflow_hander,
+};
+
+#if defined(CONFIG_ACPI) || defined(CONFIG_OF)
+static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = {
+ .counter_overflow_val = BIT_ULL(48),
+ .counter_max_val = GENMASK_ULL(48, 0),
+ .cnt_base = CN10K_DDRC_PERF_CNT_VALUE_BASE,
+ .cfg_base = CN10K_DDRC_PERF_CFG_BASE,
+ .cnt_op_mode_ctrl = CN10K_DDRC_PERF_CNT_OP_MODE_CTRL,
+ .cnt_start_op_ctrl = CN10K_DDRC_PERF_CNT_START_OP_CTRL,
+ .cnt_end_op_ctrl = CN10K_DDRC_PERF_CNT_END_OP_CTRL,
+ .cnt_end_status = CN10K_DDRC_PERF_CNT_END_STATUS,
+ .cnt_freerun_en = CN10K_DDRC_PERF_CNT_FREERUN_EN,
+ .cnt_freerun_ctrl = CN10K_DDRC_PERF_CNT_FREERUN_CTRL,
+ .cnt_freerun_clr = 0,
+ .cnt_value_wr_op = CN10K_DDRC_PERF_CNT_VALUE_WR_OP,
+ .cnt_value_rd_op = CN10K_DDRC_PERF_CNT_VALUE_RD_OP,
+ .is_cn10k = TRUE,
+};
+#endif
+
+static const struct ddr_pmu_ops ddr_pmu_ody_ops = {
+ .enable_read_freerun_counter = ddr_pmu_ody_enable_read_freerun,
+ .enable_write_freerun_counter = ddr_pmu_ody_enable_write_freerun,
+ .clear_read_freerun_counter = ddr_pmu_ody_read_clear_freerun,
+ .clear_write_freerun_counter = ddr_pmu_ody_write_clear_freerun,
+ .pmu_overflow_handler = ddr_pmu_ody_overflow_hander,
+};
+
+#ifdef CONFIG_ACPI
+static const struct ddr_pmu_platform_data odyssey_ddr_pmu_pdata = {
+ .counter_overflow_val = 0,
+ .counter_max_val = GENMASK_ULL(63, 0),
+ .cnt_base = ODY_DDRC_PERF_CNT_VALUE_BASE,
+ .cfg_base = ODY_DDRC_PERF_CFG_BASE,
+ .cnt_op_mode_ctrl = ODY_DDRC_PERF_CNT_OP_MODE_CTRL,
+ .cnt_start_op_ctrl = ODY_DDRC_PERF_CNT_START_OP_CTRL,
+ .cnt_end_op_ctrl = ODY_DDRC_PERF_CNT_END_OP_CTRL,
+ .cnt_end_status = ODY_DDRC_PERF_CNT_END_STATUS,
+ .cnt_freerun_en = 0,
+ .cnt_freerun_ctrl = ODY_DDRC_PERF_CNT_FREERUN_CTRL,
+ .cnt_freerun_clr = ODY_DDRC_PERF_CNT_FREERUN_CLR,
+ .cnt_value_wr_op = ODY_DDRC_PERF_CNT_VALUE_WR_OP,
+ .cnt_value_rd_op = ODY_DDRC_PERF_CNT_VALUE_RD_OP,
+ .is_ody = TRUE,
+};
+#endif
+
static int cn10k_ddr_perf_probe(struct platform_device *pdev)
{
+ const struct ddr_pmu_platform_data *dev_data;
struct cn10k_ddr_pmu *ddr_pmu;
struct resource *res;
void __iomem *base;
+ bool is_cn10k;
+ bool is_ody;
char *name;
int ret;
@@ -644,30 +1001,60 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
ddr_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, ddr_pmu);
+ dev_data = device_get_match_data(&pdev->dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
+ return -ENODEV;
+ }
+
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
ddr_pmu->base = base;
- /* Setup the PMU counter to work in manual mode */
- writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base +
- DDRC_PERF_CNT_OP_MODE_CTRL);
-
- ddr_pmu->pmu = (struct pmu) {
- .module = THIS_MODULE,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- .task_ctx_nr = perf_invalid_context,
- .attr_groups = cn10k_attr_groups,
- .event_init = cn10k_ddr_perf_event_init,
- .add = cn10k_ddr_perf_event_add,
- .del = cn10k_ddr_perf_event_del,
- .start = cn10k_ddr_perf_event_start,
- .stop = cn10k_ddr_perf_event_stop,
- .read = cn10k_ddr_perf_event_update,
- .pmu_enable = cn10k_ddr_perf_pmu_enable,
- .pmu_disable = cn10k_ddr_perf_pmu_disable,
- };
+ ddr_pmu->p_data = dev_data;
+ is_cn10k = ddr_pmu->p_data->is_cn10k;
+ is_ody = ddr_pmu->p_data->is_ody;
+
+ if (is_cn10k) {
+ ddr_pmu->ops = &ddr_pmu_ops;
+ /* Setup the PMU counter to work in manual mode */
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base +
+ ddr_pmu->p_data->cnt_op_mode_ctrl);
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = cn10k_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ .pmu_enable = cn10k_ddr_perf_pmu_enable,
+ .pmu_disable = cn10k_ddr_perf_pmu_disable,
+ };
+ }
+
+ if (is_ody) {
+ ddr_pmu->ops = &ddr_pmu_ody_ops;
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = odyssey_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ };
+ }
/* Choose this cpu to collect perf data */
ddr_pmu->cpu = raw_smp_processor_id();
@@ -677,8 +1064,8 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+ hrtimer_setup(&ddr_pmu->hrtimer, cn10k_ddr_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
cpuhp_state_add_instance_nocalls(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
@@ -688,7 +1075,7 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
if (ret)
goto error;
- pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start);
+ pr_info("DDR PMU Driver for ddrc@%llx\n", res->start);
return 0;
error:
cpuhp_state_remove_instance_nocalls(
@@ -710,7 +1097,7 @@ static void cn10k_ddr_perf_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
- { .compatible = "marvell,cn10k-ddr-pmu", },
+ { .compatible = "marvell,cn10k-ddr-pmu", .data = &cn10k_ddr_pmu_pdata },
{ },
};
MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
@@ -718,7 +1105,8 @@ MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = {
- {"MRVL000A", 0},
+ {"MRVL000A", (kernel_ulong_t)&cn10k_ddr_pmu_pdata },
+ {"MRVL000C", (kernel_ulong_t)&odyssey_ddr_pmu_pdata},
{},
};
MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index cda55ee35eee..51ccb0befa05 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -37,6 +37,15 @@ struct tad_pmu {
DECLARE_BITMAP(counters_map, TAD_MAX_COUNTERS);
};
+enum mrvl_tad_pmu_version {
+ TAD_PMU_V1 = 1,
+ TAD_PMU_V2,
+};
+
+struct tad_pmu_data {
+ int id;
+};
+
static int tad_pmu_cpuhp_state;
static void tad_pmu_event_counter_read(struct perf_event *event)
@@ -214,6 +223,24 @@ static const struct attribute_group tad_pmu_events_attr_group = {
.attrs = tad_pmu_event_attrs,
};
+static struct attribute *ody_tad_pmu_event_attrs[] = {
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_exlmn, 0x3),
+ TAD_PMU_EVENT_ATTR(tad_alloc_dtg, 0x1a),
+ TAD_PMU_EVENT_ATTR(tad_alloc_ltg, 0x1b),
+ TAD_PMU_EVENT_ATTR(tad_alloc_any, 0x1c),
+ TAD_PMU_EVENT_ATTR(tad_hit_dtg, 0x1d),
+ TAD_PMU_EVENT_ATTR(tad_hit_ltg, 0x1e),
+ TAD_PMU_EVENT_ATTR(tad_hit_any, 0x1f),
+ TAD_PMU_EVENT_ATTR(tad_tag_rd, 0x20),
+ TAD_PMU_EVENT_ATTR(tad_tot_cycle, 0xFF),
+ NULL
+};
+
+static const struct attribute_group ody_tad_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = ody_tad_pmu_event_attrs,
+};
+
PMU_FORMAT_ATTR(event, "config:0-7");
static struct attribute *tad_pmu_format_attrs[] = {
@@ -252,8 +279,16 @@ static const struct attribute_group *tad_pmu_attr_groups[] = {
NULL
};
+static const struct attribute_group *ody_tad_pmu_attr_groups[] = {
+ &ody_tad_pmu_events_attr_group,
+ &tad_pmu_format_attr_group,
+ &tad_pmu_cpumask_attr_group,
+ NULL
+};
+
static int tad_pmu_probe(struct platform_device *pdev)
{
+ const struct tad_pmu_data *dev_data;
struct device *dev = &pdev->dev;
struct tad_region *regions;
struct tad_pmu *tad_pmu;
@@ -261,6 +296,7 @@ static int tad_pmu_probe(struct platform_device *pdev)
u32 tad_pmu_page_size;
u32 tad_page_size;
u32 tad_cnt;
+ int version;
int i, ret;
char *name;
@@ -270,6 +306,13 @@ static int tad_pmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tad_pmu);
+ dev_data = device_get_match_data(&pdev->dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
+ return -ENODEV;
+ }
+ version = dev_data->id;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Mem resource not found\n");
@@ -319,7 +362,6 @@ static int tad_pmu_probe(struct platform_device *pdev)
tad_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
- .attr_groups = tad_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE |
PERF_PMU_CAP_NO_INTERRUPT,
.task_ctx_nr = perf_invalid_context,
@@ -332,6 +374,11 @@ static int tad_pmu_probe(struct platform_device *pdev)
.read = tad_pmu_event_counter_read,
};
+ if (version == TAD_PMU_V1)
+ tad_pmu->pmu.attr_groups = tad_pmu_attr_groups;
+ else
+ tad_pmu->pmu.attr_groups = ody_tad_pmu_attr_groups;
+
tad_pmu->cpu = raw_smp_processor_id();
/* Register pmu instance for cpu hotplug */
@@ -360,16 +407,29 @@ static void tad_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
}
+#if defined(CONFIG_OF) || defined(CONFIG_ACPI)
+static const struct tad_pmu_data tad_pmu_data = {
+ .id = TAD_PMU_V1,
+};
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct tad_pmu_data tad_pmu_v2_data = {
+ .id = TAD_PMU_V2,
+};
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id tad_pmu_of_match[] = {
- { .compatible = "marvell,cn10k-tad-pmu", },
+ { .compatible = "marvell,cn10k-tad-pmu", .data = &tad_pmu_data },
{},
};
#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id tad_pmu_acpi_match[] = {
- {"MRVL000B", 0},
+ {"MRVL000B", (kernel_ulong_t)&tad_pmu_data},
+ {"MRVL000D", (kernel_ulong_t)&tad_pmu_v2_data},
{},
};
MODULE_DEVICE_TABLE(acpi, tad_pmu_acpi_match);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 194c153e5d71..698de8ddf895 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -1317,7 +1317,7 @@ static int riscv_pmu_proc_user_access_handler(const struct ctl_table *table,
return 0;
}
-static struct ctl_table sbi_pmu_sysctl_table[] = {
+static const struct ctl_table sbi_pmu_sysctl_table[] = {
{
.procname = "perf_user_access",
.data = &sysctl_perf_user_access,
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index cadd60221b8f..6ed4707bd6bb 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -752,9 +752,8 @@ static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
tx2_pmu->cpu = cpu;
if (tx2_pmu->hrtimer_callback) {
- hrtimer_init(&tx2_pmu->hrtimer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
+ hrtimer_setup(&tx2_pmu->hrtimer, tx2_pmu->hrtimer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = tx2_uncore_pmu_register(tx2_pmu);
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index cd159a71b23c..29b8fd4b9351 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-sun4i-usb.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
index d3ccf547ba1c..e4c0a82d16d9 100644
--- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
+++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
@@ -325,31 +325,26 @@ to_fsl_samsung_hdmi_phy(struct clk_hw *hw)
return container_of(hw, struct fsl_samsung_hdmi_phy, hw);
}
-static void
+static int
fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
const struct phy_config *cfg)
{
u32 pclk = cfg->pixclk;
u32 fld_tg_code;
- u32 pclk_khz;
- u8 div = 1;
-
- switch (cfg->pixclk) {
- case 22250000 ... 47500000:
- div = 1;
- break;
- case 50349650 ... 99000000:
- div = 2;
- break;
- case 100699300 ... 198000000:
- div = 4;
- break;
- case 205000000 ... 297000000:
- div = 8;
- break;
+ u32 int_pllclk;
+ u8 div;
+
+ /* Find int_pllclk speed */
+ for (div = 0; div < 4; div++) {
+ int_pllclk = pclk / (1 << div);
+ if (int_pllclk < (50 * MHZ))
+ break;
}
- writeb(FIELD_PREP(REG12_CK_DIV_MASK, ilog2(div)), phy->regs + PHY_REG(12));
+ if (unlikely(div == 4))
+ return -EINVAL;
+
+ writeb(FIELD_PREP(REG12_CK_DIV_MASK, div), phy->regs + PHY_REG(12));
/*
* Calculation for the frequency lock detector target code (fld_tg_code)
@@ -362,10 +357,8 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
* settings rounding up always too. TODO: Check if that is
* correct.
*/
- pclk /= div;
- pclk_khz = pclk / 1000;
- fld_tg_code = 256 * 1000 * 1000 / pclk_khz * 24;
- fld_tg_code = DIV_ROUND_UP(fld_tg_code, 1000);
+
+ fld_tg_code = DIV_ROUND_UP(24 * MHZ * 256, int_pllclk);
/* FLD_TOL and FLD_RP_CODE taken from downstream driver */
writeb(FIELD_PREP(REG13_TG_CODE_LOW_MASK, fld_tg_code),
@@ -374,6 +367,8 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
FIELD_PREP(REG14_RP_CODE_MASK, 2) |
FIELD_PREP(REG14_TG_CODE_HIGH_MASK, fld_tg_code >> 8),
phy->regs + PHY_REG(14));
+
+ return 0;
}
static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u16 *m, u8 *s)
@@ -406,16 +401,15 @@ static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u1
continue;
/*
- * TODO: Ref Manual doesn't state the range of _m
- * so this should be further refined if possible.
- * This range was set based on the original values
- * in the lookup table
+ * The Ref manual doesn't explicitly state the range of M,
+ * but it does show it as an 8-bit value, so reject
+ * any value above 255.
*/
tmp = (u64)fout * (_p * _s);
do_div(tmp, 24 * MHZ);
- _m = tmp;
- if (_m < 0x30 || _m > 0x7b)
+ if (tmp > 255)
continue;
+ _m = tmp;
/*
* Rev 2 of the Ref Manual states the
@@ -440,9 +434,13 @@ static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u1
min_delta = delta;
best_freq = tmp;
}
+
+ /* If we have an exact match, stop looking for a better value */
+ if (!delta)
+ goto done;
}
}
-
+done:
if (best_freq) {
*p = best_p;
*m = best_m;
@@ -473,7 +471,11 @@ static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy,
writeb(REG21_SEL_TX_CK_INV | FIELD_PREP(REG21_PMS_S_MASK,
cfg->pll_div_regs[2] >> 4), phy->regs + PHY_REG(21));
- fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg);
+ ret = fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg);
+ if (ret) {
+ dev_err(phy->dev, "pixclock too large\n");
+ return ret;
+ }
writeb(REG33_FIX_DA | REG33_MODE_SET_DONE, phy->regs + PHY_REG(33));
diff --git a/drivers/phy/hisilicon/phy-hi3670-pcie.c b/drivers/phy/hisilicon/phy-hi3670-pcie.c
index 0ac9634b398d..dbc7dcce682b 100644
--- a/drivers/phy/hisilicon/phy-hi3670-pcie.c
+++ b/drivers/phy/hisilicon/phy-hi3670-pcie.c
@@ -16,15 +16,20 @@
*/
#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/types.h>
#define AXI_CLK_FREQ 207500000
#define REF_CLK_FREQ 100000000
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index fefc02d921e6..71f9c14fb50d 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -422,7 +422,7 @@ static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
/* wait until clocks are ready */
mdelay(1);
- /* exlicitly disable 40B, the bits isn't clear on reset */
+ /* explicitly disable 40B, the bits isn't clear on reset */
regmap_read(priv->regmap, MVEBU_COMPHY_CONF6(lane->id), &val);
val &= ~MVEBU_COMPHY_CONF6_40B;
regmap_write(priv->regmap, MVEBU_COMPHY_CONF6(lane->id), val);
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
index bbfe11d6a69d..b38f3ae26b3f 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
@@ -9,6 +9,8 @@
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/types.h>
#include <linux/units.h>
#include <linux/nvmem-consumer.h>
@@ -478,8 +480,50 @@ static int mtk_hdmi_phy_configure(struct phy *phy, union phy_configure_opts *opt
return ret;
}
+static int mtk_hdmi_phy_pwr5v_enable(struct regulator_dev *rdev)
+{
+ struct mtk_hdmi_phy *hdmi_phy = rdev_get_drvdata(rdev);
+
+ mtk_phy_set_bits(hdmi_phy->regs + HDMI_CTL_1, RG_HDMITX_PWR5V_O);
+
+ return 0;
+}
+
+static int mtk_hdmi_phy_pwr5v_disable(struct regulator_dev *rdev)
+{
+ struct mtk_hdmi_phy *hdmi_phy = rdev_get_drvdata(rdev);
+
+ mtk_phy_clear_bits(hdmi_phy->regs + HDMI_CTL_1, RG_HDMITX_PWR5V_O);
+
+ return 0;
+}
+
+static int mtk_hdmi_phy_pwr5v_is_enabled(struct regulator_dev *rdev)
+{
+ struct mtk_hdmi_phy *hdmi_phy = rdev_get_drvdata(rdev);
+
+ return !!(readl(hdmi_phy->regs + HDMI_CTL_1) & RG_HDMITX_PWR5V_O);
+}
+
+static const struct regulator_ops mtk_hdmi_pwr5v_regulator_ops = {
+ .enable = mtk_hdmi_phy_pwr5v_enable,
+ .disable = mtk_hdmi_phy_pwr5v_disable,
+ .is_enabled = mtk_hdmi_phy_pwr5v_is_enabled
+};
+
+static const struct regulator_desc mtk_hdmi_phy_pwr5v_desc = {
+ .name = "hdmi-pwr5v",
+ .id = -1,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .ops = &mtk_hdmi_pwr5v_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8195_conf = {
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
+ .hdmi_phy_regulator_desc = &mtk_hdmi_phy_pwr5v_desc,
.hdmi_phy_clk_ops = &mtk_hdmi_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h
index 22a68dc9550c..e26caaf4d104 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h
@@ -103,6 +103,9 @@
#define HDMI_ANA_CTL 0x7c
#define REG_ANA_HDMI20_FIFO_EN BIT(16)
+#define HDMI_CTL_1 0xc4
+#define RG_HDMITX_PWR5V_O BIT(9)
+
#define HDMI_CTL_3 0xcc
#define REG_HDMITXPLL_DIV GENMASK(4, 0)
#define REG_HDMITX_REF_XTAL_SEL BIT(7)
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index d2e824771f9d..52a7d525ff9b 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -75,6 +75,28 @@ static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
}
+static int mtk_hdmi_phy_register_regulators(struct mtk_hdmi_phy *hdmi_phy)
+{
+ const struct regulator_desc *vreg_desc = hdmi_phy->conf->hdmi_phy_regulator_desc;
+ const struct regulator_init_data vreg_init_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ }
+ };
+ struct regulator_config vreg_config = {
+ .dev = hdmi_phy->dev,
+ .driver_data = hdmi_phy,
+ .init_data = &vreg_init_data,
+ .of_node = hdmi_phy->dev->of_node
+ };
+
+ hdmi_phy->rdev = devm_regulator_register(hdmi_phy->dev, vreg_desc, &vreg_config);
+ if (IS_ERR(hdmi_phy->rdev))
+ return PTR_ERR(hdmi_phy->rdev);
+
+ return 0;
+}
+
static int mtk_hdmi_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -150,6 +172,12 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
if (hdmi_phy->conf->pll_default_off)
hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
+ if (hdmi_phy->conf->hdmi_phy_regulator_desc) {
+ ret = mtk_hdmi_phy_register_regulators(hdmi_phy);
+ if (ret)
+ return ret;
+ }
+
return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
hdmi_phy->pll);
}
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.h b/drivers/phy/mediatek/phy-mtk-hdmi.h
index 71c02d043485..99d917e0036a 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.h
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.h
@@ -13,6 +13,8 @@
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
#include <linux/types.h>
struct mtk_hdmi_phy;
@@ -20,6 +22,7 @@ struct mtk_hdmi_phy;
struct mtk_hdmi_phy_conf {
unsigned long flags;
bool pll_default_off;
+ const struct regulator_desc *hdmi_phy_regulator_desc;
const struct clk_ops *hdmi_phy_clk_ops;
void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -32,6 +35,7 @@ struct mtk_hdmi_phy {
struct mtk_hdmi_phy_conf *conf;
struct clk *pll;
struct clk_hw pll_hw;
+ struct regulator_dev *rdev;
unsigned long pll_rate;
unsigned char drv_imp_clk;
unsigned char drv_imp_d2;
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 3f7095ec5978..a496fbe3352b 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -381,17 +381,12 @@ static const char *const u3_phy_files[] = {
static int u2_phy_params_show(struct seq_file *sf, void *unused)
{
struct mtk_phy_instance *inst = sf->private;
- const char *fname = file_dentry(sf->file)->d_iname;
struct u2phy_banks *u2_banks = &inst->u2_banks;
void __iomem *com = u2_banks->com;
u32 max = 0;
u32 tmp = 0;
u32 val = 0;
- int ret;
-
- ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname);
- if (ret < 0)
- return ret;
+ int ret = debugfs_get_aux_num(sf->file);
switch (ret) {
case U2P_EYE_VRT:
@@ -438,7 +433,7 @@ static int u2_phy_params_show(struct seq_file *sf, void *unused)
break;
}
- seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max);
+ seq_printf(sf, "%s : %d [0, %d]\n", u2_phy_files[ret], val, max);
return 0;
}
@@ -451,23 +446,18 @@ static int u2_phy_params_open(struct inode *inode, struct file *file)
static ssize_t u2_phy_params_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- const char *fname = file_dentry(file)->d_iname;
struct seq_file *sf = file->private_data;
struct mtk_phy_instance *inst = sf->private;
struct u2phy_banks *u2_banks = &inst->u2_banks;
void __iomem *com = u2_banks->com;
ssize_t rc;
u32 val;
- int ret;
+ int ret = debugfs_get_aux_num(file);
rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val);
if (rc)
return rc;
- ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname);
- if (ret < 0)
- return (ssize_t)ret;
-
switch (ret) {
case U2P_EYE_VRT:
mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL, val);
@@ -516,23 +506,18 @@ static void u2_phy_dbgfs_files_create(struct mtk_phy_instance *inst)
int i;
for (i = 0; i < count; i++)
- debugfs_create_file(u2_phy_files[i], 0644, inst->phy->debugfs,
- inst, &u2_phy_fops);
+ debugfs_create_file_aux_num(u2_phy_files[i], 0644, inst->phy->debugfs,
+ inst, i, &u2_phy_fops);
}
static int u3_phy_params_show(struct seq_file *sf, void *unused)
{
struct mtk_phy_instance *inst = sf->private;
- const char *fname = file_dentry(sf->file)->d_iname;
struct u3phy_banks *u3_banks = &inst->u3_banks;
u32 val = 0;
u32 max = 0;
u32 tmp;
- int ret;
-
- ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname);
- if (ret < 0)
- return ret;
+ int ret = debugfs_get_aux_num(sf->file);
switch (ret) {
case U3P_EFUSE_EN:
@@ -564,7 +549,7 @@ static int u3_phy_params_show(struct seq_file *sf, void *unused)
break;
}
- seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max);
+ seq_printf(sf, "%s : %d [0, %d]\n", u3_phy_files[ret], val, max);
return 0;
}
@@ -577,23 +562,18 @@ static int u3_phy_params_open(struct inode *inode, struct file *file)
static ssize_t u3_phy_params_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- const char *fname = file_dentry(file)->d_iname;
struct seq_file *sf = file->private_data;
struct mtk_phy_instance *inst = sf->private;
struct u3phy_banks *u3_banks = &inst->u3_banks;
void __iomem *phyd = u3_banks->phyd;
ssize_t rc;
u32 val;
- int ret;
+ int ret = debugfs_get_aux_num(sf->file);
rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val);
if (rc)
return rc;
- ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname);
- if (ret < 0)
- return (ssize_t)ret;
-
switch (ret) {
case U3P_EFUSE_EN:
mtk_phy_update_field(phyd + U3P_U3_PHYD_RSV,
@@ -636,8 +616,8 @@ static void u3_phy_dbgfs_files_create(struct mtk_phy_instance *inst)
int i;
for (i = 0; i < count; i++)
- debugfs_create_file(u3_phy_files[i], 0644, inst->phy->debugfs,
- inst, &u3_phy_fops);
+ debugfs_create_file_aux_num(u3_phy_files[i], 0644, inst->phy->debugfs,
+ inst, i, &u3_phy_fops);
}
static int phy_type_show(struct seq_file *sf, void *unused)
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 413f76e2d174..8dfdce605a90 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -749,8 +749,8 @@ EXPORT_SYMBOL_GPL(devm_phy_put);
/**
* of_phy_simple_xlate() - returns the phy instance from phy provider
- * @dev: the PHY provider device
- * @args: of_phandle_args (not used here)
+ * @dev: the PHY provider device (not used here)
+ * @args: of_phandle_args
*
* Intended to be used by phy provider for the common case where #phy-cells is
* 0. For other cases where #phy-cells is greater than '0', the phy provider
@@ -760,21 +760,14 @@ EXPORT_SYMBOL_GPL(devm_phy_put);
struct phy *of_phy_simple_xlate(struct device *dev,
const struct of_phandle_args *args)
{
- struct phy *phy;
- struct class_dev_iter iter;
-
- class_dev_iter_init(&iter, &phy_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- phy = to_phy(dev);
- if (args->np != phy->dev.of_node)
- continue;
+ struct device *target_dev;
- class_dev_iter_exit(&iter);
- return phy;
- }
+ target_dev = class_find_device_by_of_node(&phy_class, args->np);
+ if (!target_dev)
+ return ERR_PTR(-ENODEV);
- class_dev_iter_exit(&iter);
- return ERR_PTR(-ENODEV);
+ put_device(target_dev);
+ return to_phy(target_dev);
}
EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 3bae39381fd0..b09fa00e9fe7 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -400,6 +400,57 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
};
+static const struct qmp_phy_init_tbl sar2130p_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x2e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE1, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE0, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_1, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_2, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_ADDITIONAL_MISC, 0x0c),
+};
+
static const struct qmp_phy_init_tbl sm6350_usb3_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
@@ -1730,6 +1781,51 @@ static const struct qmp_combo_offsets qmp_combo_offsets_v5 = {
.dp_dp_phy = 0x2200,
};
+static const struct qmp_phy_cfg sar2130p_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
+ .serdes_tbl = sar2130p_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sar2130p_usb3_serdes_tbl),
+ .tx_tbl = sm8550_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8550_usb3_tx_tbl),
+ .rx_tbl = sm8550_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8550_usb3_rx_tbl),
+ .pcs_tbl = sm8550_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8550_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_usb_tbl),
+
+ .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+ .dp_aux_init = qmp_v4_dp_aux_init,
+ .configure_dp_tx = qmp_v4_configure_dp_tx,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
+ .calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
+
+ .regs = qmp_v6_usb3phy_regs_layout,
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+};
+
static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = {
.offsets = &qmp_combo_offsets_v3,
@@ -3768,6 +3864,10 @@ err_node_put:
static const struct of_device_id qmp_combo_of_match_table[] = {
{
+ .compatible = "qcom,sar2130p-qmp-usb3-dp-phy",
+ .data = &sar2130p_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,sc7180-qmp-usb3-dp-phy",
.data = &sc7180_usb3dpphy_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 873f2f9844c6..018bbb300830 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -728,6 +728,83 @@ static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
};
+static const struct qmp_phy_init_tbl qcs615_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x9),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x3),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0xd),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+};
+
+static const struct qmp_phy_init_tbl qcs615_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
+};
+
+static const struct qmp_phy_init_tbl qcs615_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl qcs615_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE, 0x4),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_OSC_DTCT_ACTIONS, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_SIGDET_CNTRL, 0x7),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0xe),
+};
+
static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
@@ -1773,7 +1850,7 @@ static const struct qmp_phy_init_tbl sdx55_qmp_pcie_rc_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
};
-static const struct qmp_phy_init_tbl sdx55_qmp_pcie_ep_pcs_misc_tbl[] = {
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_ep_pcs_lane1_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
};
@@ -1907,6 +1984,9 @@ static const struct qmp_phy_init_tbl sdx65_qmp_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG2, 0x0d),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+};
+
+static const struct qmp_phy_init_tbl sdx65_qmp_pcie_pcs_lane1_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
};
@@ -2582,8 +2662,6 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl[] = {
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66),
};
@@ -2724,10 +2802,106 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_ep_pcs_alt_tbl[] =
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_INSIG_SW_CTRL7, 0x00),
};
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_rc_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_ADDITIONAL_MISC_3, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0xa0),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_pcs_lane1_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_LANE1_INSIG_SW_CTRL2, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_LANE1_INSIG_MX_CTRL2, 0x01),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_rc_tx_tbl[] = {
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_TX_BIST_MODE_LANENO, 0x00, 2),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_rc_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_G12S1_TXDEEMPH_M6DB, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_G3S2_PRE_GAIN, 0x2e),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_ep_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE1, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0xa0),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_ep_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_G12S1_TXDEEMPH_M6DB, 0x17),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_ep_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_EQ_CONFIG1, 0x1e),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG2, 0x14),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+};
+
struct qmp_pcie_offsets {
u16 serdes;
u16 pcs;
u16 pcs_misc;
+ u16 pcs_lane1;
u16 tx;
u16 rx;
u16 tx2;
@@ -2752,6 +2926,8 @@ struct qmp_phy_cfg_tbls {
int pcs_num;
const struct qmp_phy_init_tbl *pcs_misc;
int pcs_misc_num;
+ const struct qmp_phy_init_tbl *pcs_lane1;
+ int pcs_lane1_num;
const struct qmp_phy_init_tbl *ln_shrd;
int ln_shrd_num;
};
@@ -2811,6 +2987,7 @@ struct qmp_pcie {
void __iomem *serdes;
void __iomem *pcs;
void __iomem *pcs_misc;
+ void __iomem *pcs_lane1;
void __iomem *tx;
void __iomem *rx;
void __iomem *tx2;
@@ -2927,6 +3104,7 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v4_20 = {
.serdes = 0x1000,
.pcs = 0x1200,
.pcs_misc = 0x1600,
+ .pcs_lane1 = 0x1e00,
.tx = 0x0000,
.rx = 0x0200,
.tx2 = 0x0800,
@@ -2957,6 +3135,7 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = {
.serdes = 0x1000,
.pcs = 0x1200,
.pcs_misc = 0x1400,
+ .pcs_lane1 = 0x1e00,
.tx = 0x0000,
.rx = 0x0200,
.tx2 = 0x0800,
@@ -3132,6 +3311,31 @@ static const struct qmp_phy_cfg ipq9574_gen3x2_pciephy_cfg = {
.pipe_clock_rate = 250000000,
};
+static const struct qmp_phy_cfg qcs615_pciephy_cfg = {
+ .lanes = 1,
+
+ .offsets = &qmp_pcie_offsets_v2,
+
+ .tbls = {
+ .serdes = qcs615_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(qcs615_pcie_serdes_tbl),
+ .tx = qcs615_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(qcs615_pcie_tx_tbl),
+ .rx = qcs615_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(qcs615_pcie_rx_tbl),
+ .pcs = qcs615_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(qcs615_pcie_pcs_tbl),
+ },
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v2_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.lanes = 1,
@@ -3283,6 +3487,49 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.skip_start_delay = true,
};
+static const struct qmp_phy_cfg sar2130p_qmp_gen3x2_pciephy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_pcie_offsets_v5,
+
+ .tbls = {
+ .tx = sm8550_qmp_gen3x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_tx_tbl),
+ .rx = sm8550_qmp_gen3x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_rx_tbl),
+ .pcs = sm8550_qmp_gen3x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_pcs_tbl),
+ .pcs_lane1 = sar2130p_qmp_gen3x2_pcie_pcs_lane1_tbl,
+ .pcs_lane1_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_pcs_lane1_tbl),
+ },
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sar2130p_qmp_gen3x2_pcie_rc_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_rc_serdes_tbl),
+ .tx = sar2130p_qmp_gen3x2_pcie_rc_tx_tbl,
+ .tx_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_rc_tx_tbl),
+ .pcs = sar2130p_qmp_gen3x2_pcie_rc_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_rc_pcs_tbl),
+ .pcs_misc = sm8550_qmp_gen3x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_pcs_misc_tbl),
+ },
+ .tbls_ep = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sar2130p_qmp_gen3x2_pcie_ep_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_ep_serdes_tbl),
+ .pcs = sar2130p_qmp_gen3x2_pcie_ep_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_ep_pcs_tbl),
+ .pcs_misc = sar2130p_qmp_gen3x2_pcie_ep_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_ep_pcs_misc_tbl),
+ },
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
.lanes = 2,
@@ -3440,8 +3687,8 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.tbls_ep = &(const struct qmp_phy_cfg_tbls) {
.serdes = sdx55_qmp_pcie_ep_serdes_tbl,
.serdes_num = ARRAY_SIZE(sdx55_qmp_pcie_ep_serdes_tbl),
- .pcs_misc = sdx55_qmp_pcie_ep_pcs_misc_tbl,
- .pcs_misc_num = ARRAY_SIZE(sdx55_qmp_pcie_ep_pcs_misc_tbl),
+ .pcs_lane1 = sdx55_qmp_pcie_ep_pcs_lane1_tbl,
+ .pcs_lane1_num = ARRAY_SIZE(sdx55_qmp_pcie_ep_pcs_lane1_tbl),
},
.reset_list = sdm845_pciephy_reset_l,
@@ -3540,6 +3787,8 @@ static const struct qmp_phy_cfg sdx65_qmp_pciephy_cfg = {
.pcs_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_tbl),
.pcs_misc = sdx65_qmp_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_misc_tbl),
+ .pcs_lane1 = sdx65_qmp_pcie_pcs_lane1_tbl,
+ .pcs_lane1_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_lane1_tbl),
},
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
@@ -3739,6 +3988,8 @@ static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = {
.pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl),
.pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
+ .pcs_lane1 = sdx65_qmp_pcie_pcs_lane1_tbl,
+ .pcs_lane1_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_lane1_tbl),
},
.tbls_rc = &(const struct qmp_phy_cfg_tbls) {
@@ -3945,6 +4196,7 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c
void __iomem *rx2 = qmp->rx2;
void __iomem *pcs = qmp->pcs;
void __iomem *pcs_misc = qmp->pcs_misc;
+ void __iomem *pcs_lane1 = qmp->pcs_lane1;
void __iomem *ln_shrd = qmp->ln_shrd;
if (!tbls)
@@ -3969,6 +4221,7 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c
qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num);
qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
+ qmp_configure(qmp->dev, pcs_lane1, tbls->pcs_lane1, tbls->pcs_lane1_num);
if (cfg->lanes >= 4 && qmp->tcsr_4ln_config) {
qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl,
@@ -4420,6 +4673,14 @@ static int qmp_pcie_parse_dt_legacy(struct qmp_pcie *qmp, struct device_node *np
}
}
+ /*
+ * For all platforms where legacy bindings existed, PCS_LANE1 was
+ * mapped as a part of the PCS_MISC region.
+ */
+ if (!IS_ERR(qmp->pcs_misc) && cfg->offsets->pcs_lane1 != 0)
+ qmp->pcs_lane1 = qmp->pcs_misc +
+ (cfg->offsets->pcs_lane1 - cfg->offsets->pcs_misc);
+
clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(clk)) {
return dev_err_probe(dev, PTR_ERR(clk),
@@ -4487,6 +4748,7 @@ static int qmp_pcie_parse_dt(struct qmp_pcie *qmp)
qmp->serdes = base + offs->serdes;
qmp->pcs = base + offs->pcs;
qmp->pcs_misc = base + offs->pcs_misc;
+ qmp->pcs_lane1 = base + offs->pcs_lane1;
qmp->tx = base + offs->tx;
qmp->rx = base + offs->rx;
@@ -4612,12 +4874,18 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,msm8998-qmp-pcie-phy",
.data = &msm8998_pciephy_cfg,
}, {
+ .compatible = "qcom,qcs615-qmp-gen3x1-pcie-phy",
+ .data = &qcs615_pciephy_cfg,
+ }, {
.compatible = "qcom,sa8775p-qmp-gen4x2-pcie-phy",
.data = &sa8775p_qmp_gen4x2_pciephy_cfg,
}, {
.compatible = "qcom,sa8775p-qmp-gen4x4-pcie-phy",
.data = &sa8775p_qmp_gen4x4_pciephy_cfg,
}, {
+ .compatible = "qcom,sar2130p-qmp-gen3x2-pcie-phy",
+ .data = &sar2130p_qmp_gen3x2_pciephy_cfg,
+ }, {
.compatible = "qcom,sc8180x-qmp-pcie-phy",
.data = &sc8180x_pciephy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h
index ac872a9eff9a..ab892d1067c2 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h
@@ -13,7 +13,8 @@
#define QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME 0x0f4
#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2 0x0fc
#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
-#define QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2 0x824
-#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828
+
+#define QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2 0x024
+#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x028
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
index cdf8c04ea078..283d63c81593 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
@@ -17,7 +17,8 @@
#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
#define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c
#define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184
-#define QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2 0xa24
-#define QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2 0xa28
+
+#define QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2 0x024
+#define QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2 0x028
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
index 0ca79333d942..45397cb3c0c6 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
@@ -14,4 +14,7 @@
#define QPHY_PCIE_V6_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
#define QPHY_PCIE_V6_PCS_PCIE_OSC_DTCT_ACTIONS 0x94
+#define QPHY_PCIE_V6_PCS_LANE1_INSIG_SW_CTRL2 0x024
+#define QPHY_PCIE_V6_PCS_LANE1_INSIG_MX_CTRL2 0x028
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
index bf36399d0057..1ecf4b5beba6 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
@@ -34,6 +34,7 @@
#define QPHY_V2_PCS_USB_PCS_STATUS 0x17c /* USB */
#define QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB 0x1a8
#define QPHY_V2_PCS_OSC_DTCT_ACTIONS 0x1ac
+#define QPHY_V2_PCS_SIGDET_CNTRL 0x1b0
#define QPHY_V2_PCS_RX_SIGDET_LVL 0x1d8
#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1dc
#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1e0
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
index 08299d2b78f0..aa5afb921f12 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
@@ -17,6 +17,8 @@
#define QPHY_V6_PCS_LOCK_DETECT_CONFIG3 0x0cc
#define QPHY_V6_PCS_LOCK_DETECT_CONFIG6 0x0d8
#define QPHY_V6_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V6_PCS_G12S1_TXDEEMPH_M6DB 0x168
+#define QPHY_V6_PCS_G3S2_PRE_GAIN 0x170
#define QPHY_V6_PCS_RX_SIGDET_LVL 0x188
#define QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
#define QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
index 23ffcfae9efa..f47fdc9cecda 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
@@ -6,6 +6,7 @@
#ifndef QCOM_PHY_QMP_QSERDES_TXRX_USB_V6_H_
#define QCOM_PHY_QMP_QSERDES_TXRX_USB_V6_H_
+#define QSERDES_V6_TX_BIST_MODE_LANENO 0x00
#define QSERDES_V6_TX_CLKBUF_ENABLE 0x08
#define QSERDES_V6_TX_TX_EMP_POST1_LVL 0x0c
#define QSERDES_V6_TX_TX_DRV_LVL 0x14
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index c9c337840715..787721570457 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -2298,6 +2298,9 @@ err_node_put:
static const struct of_device_id qmp_usb_of_match_table[] = {
{
+ .compatible = "qcom,ipq5424-qmp-usb3-phy",
+ .data = &ipq9574_usb3phy_cfg,
+ }, {
.compatible = "qcom,ipq6018-qmp-usb3-phy",
.data = &ipq6018_usb3phy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index c52655a383ce..1f5f7df14d5a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -151,6 +151,34 @@ static const struct qusb2_phy_init_tbl ipq6018_init_tbl[] = {
QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9F),
};
+static const struct qusb2_phy_init_tbl ipq5424_init_tbl[] = {
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL, 0x14),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0x00),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0x53),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE4, 0xc3),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TUNE, 0x30),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL1, 0x79),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL2, 0x21),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE5, 0x00),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TEST2, 0x14),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TEST, 0x80),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9f),
+};
+
+static const struct qusb2_phy_init_tbl qcs615_init_tbl[] = {
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0xc8),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0xb3),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE3, 0x83),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE4, 0xc0),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TUNE, 0x30),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL1, 0x79),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL2, 0x21),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TEST2, 0x14),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9f),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00),
+};
+
static const unsigned int ipq6018_regs_layout[] = {
[QUSB2PHY_PLL_STATUS] = 0x38,
[QUSB2PHY_PORT_TUNE1] = 0x80,
@@ -331,6 +359,27 @@ static const struct qusb2_phy_cfg ipq6018_phy_cfg = {
.autoresume_en = BIT(0),
};
+static const struct qusb2_phy_cfg ipq5424_phy_cfg = {
+ .tbl = ipq5424_init_tbl,
+ .tbl_num = ARRAY_SIZE(ipq5424_init_tbl),
+ .regs = ipq6018_regs_layout,
+
+ .disable_ctrl = POWER_DOWN,
+ .mask_core_ready = PLL_LOCKED,
+ .autoresume_en = BIT(0),
+};
+
+static const struct qusb2_phy_cfg qcs615_phy_cfg = {
+ .tbl = qcs615_init_tbl,
+ .tbl_num = ARRAY_SIZE(qcs615_init_tbl),
+ .regs = ipq6018_regs_layout,
+
+ .disable_ctrl = (CLAMP_N_EN | FREEZIO_N | POWER_DOWN),
+ .mask_core_ready = PLL_LOCKED,
+ /* autoresume not used */
+ .autoresume_en = BIT(0),
+};
+
static const struct qusb2_phy_cfg qusb2_v2_phy_cfg = {
.tbl = qusb2_v2_init_tbl,
.tbl_num = ARRAY_SIZE(qusb2_v2_init_tbl),
@@ -905,6 +954,9 @@ static const struct phy_ops qusb2_phy_gen_ops = {
static const struct of_device_id qusb2_phy_of_match_table[] = {
{
+ .compatible = "qcom,ipq5424-qusb2-phy",
+ .data = &ipq5424_phy_cfg,
+ }, {
.compatible = "qcom,ipq6018-qusb2-phy",
.data = &ipq6018_phy_cfg,
}, {
@@ -923,6 +975,9 @@ static const struct of_device_id qusb2_phy_of_match_table[] = {
.compatible = "qcom,msm8998-qusb2-phy",
.data = &msm8998_phy_cfg,
}, {
+ .compatible = "qcom,qcs615-qusb2-phy",
+ .data = &qcs615_phy_cfg,
+ }, {
.compatible = "qcom,qcm2290-qusb2-phy",
.data = &sm6115_phy_cfg,
}, {
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 2f7a05f21dc5..dcb8e1628632 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -125,6 +125,7 @@ config PHY_ROCKCHIP_USBDP
depends on ARCH_ROCKCHIP && OF
depends on TYPEC
select GENERIC_PHY
+ select USB_COMMON
help
Enable this to support the Rockchip USB3.0/DP combo PHY with
Samsung IP block. This is required for USB3 support on RK3588.
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 2eb3329ca23f..8c3ce57f8915 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -37,6 +37,10 @@
#define PHYREG8 0x1C
#define PHYREG8_SSC_EN BIT(4)
+#define PHYREG10 0x24
+#define PHYREG10_SSC_PCM_MASK GENMASK(3, 0)
+#define PHYREG10_SSC_PCM_3500PPM 7
+
#define PHYREG11 0x28
#define PHYREG11_SU_TRIM_0_7 0xF0
@@ -61,17 +65,26 @@
#define PHYREG16 0x3C
#define PHYREG16_SSC_CNT_VALUE 0x5f
+#define PHYREG17 0x40
+
#define PHYREG18 0x44
#define PHYREG18_PLL_LOOP 0x32
+#define PHYREG21 0x50
+#define PHYREG21_RX_SQUELCH_VAL 0x0D
+
#define PHYREG27 0x6C
#define PHYREG27_RX_TRIM_RK3588 0x4C
+#define PHYREG30 0x74
+
#define PHYREG32 0x7C
#define PHYREG32_SSC_MASK GENMASK(7, 4)
+#define PHYREG32_SSC_DIR_MASK GENMASK(5, 4)
#define PHYREG32_SSC_DIR_SHIFT 4
#define PHYREG32_SSC_UPWARD 0
#define PHYREG32_SSC_DOWNWARD 1
+#define PHYREG32_SSC_OFFSET_MASK GENMASK(7, 6)
#define PHYREG32_SSC_OFFSET_SHIFT 6
#define PHYREG32_SSC_OFFSET_500PPM 1
@@ -79,6 +92,7 @@
#define PHYREG33_PLL_KVCO_MASK GENMASK(4, 2)
#define PHYREG33_PLL_KVCO_SHIFT 2
#define PHYREG33_PLL_KVCO_VALUE 2
+#define PHYREG33_PLL_KVCO_VALUE_RK3576 4
struct rockchip_combphy_priv;
@@ -98,6 +112,7 @@ struct rockchip_combphy_grfcfg {
struct combphy_reg pipe_rxterm_set;
struct combphy_reg pipe_txelec_set;
struct combphy_reg pipe_txcomp_set;
+ struct combphy_reg pipe_clk_24m;
struct combphy_reg pipe_clk_25m;
struct combphy_reg pipe_clk_100m;
struct combphy_reg pipe_phymode_sel;
@@ -309,7 +324,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
- priv->phy_rst = devm_reset_control_get(dev, "phy");
+ priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy");
+ /* fallback to old behaviour */
+ if (PTR_ERR(priv->phy_rst) == -ENOENT)
+ priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(priv->phy_rst))
return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
@@ -584,6 +602,266 @@ static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
.combphy_cfg = rk3568_combphy_cfg,
};
+static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
+{
+ const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
+ unsigned long rate;
+ u32 val;
+
+ switch (priv->type) {
+ case PHY_TYPE_PCIE:
+ /* Set SSC downward spread spectrum */
+ val = FIELD_PREP(PHYREG32_SSC_MASK, PHYREG32_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
+ break;
+
+ case PHY_TYPE_USB3:
+ /* Set SSC downward spread spectrum */
+ val = FIELD_PREP(PHYREG32_SSC_MASK, PHYREG32_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+
+ /* Enable adaptive CTLE for USB3.0 Rx */
+ val = readl(priv->mmio + PHYREG15);
+ val |= PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + PHYREG15);
+
+ /* Set PLL KVCO fine tuning signals */
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK, BIT(3), PHYREG33);
+
+ /* Set PLL LPF R1 to su_trim[10:7]=1001 */
+ writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+
+ /* Set PLL input clock divider 1/2 */
+ val = FIELD_PREP(PHYREG6_PLL_DIV_MASK, PHYREG6_PLL_DIV_2);
+ rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK, val, PHYREG6);
+
+ /* Set PLL loop divider */
+ writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+
+ /* Set PLL KVCO to min and set PLL charge pump current to max */
+ writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+
+ /* Set Rx squelch input filler bandwidth */
+ writel(PHYREG21_RX_SQUELCH_VAL, priv->mmio + PHYREG21);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true);
+ break;
+
+ case PHY_TYPE_SATA:
+ /* Enable adaptive CTLE for SATA Rx */
+ val = readl(priv->mmio + PHYREG15);
+ val |= PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + PHYREG15);
+
+ /* Set tx_rterm = 50 ohm and rx_rterm = 43.5 ohm */
+ val = PHYREG7_TX_RTERM_50OHM << PHYREG7_TX_RTERM_SHIFT;
+ val |= PHYREG7_RX_RTERM_44OHM << PHYREG7_RX_RTERM_SHIFT;
+ writel(val, priv->mmio + PHYREG7);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_sata, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_sata, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_sata, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_sata, true);
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_con0_for_sata, true);
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_con1_for_sata, true);
+ break;
+
+ default:
+ dev_err(priv->dev, "incompatible PHY type\n");
+ return -EINVAL;
+ }
+
+ rate = clk_get_rate(priv->refclk);
+
+ switch (rate) {
+ case REF_CLOCK_24MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_24m, true);
+ if (priv->type == PHY_TYPE_USB3 || priv->type == PHY_TYPE_SATA) {
+ /* Set ssc_cnt[9:0]=0101111101 & 31.5KHz */
+ val = FIELD_PREP(PHYREG15_SSC_CNT_MASK, PHYREG15_SSC_CNT_VALUE);
+ rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
+ val, PHYREG15);
+
+ writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ } else if (priv->type == PHY_TYPE_PCIE) {
+ /* PLL KVCO tuning fine */
+ val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
+ val, PHYREG33);
+
+ /* Set up rx_pck invert and rx msb to disable */
+ writel(0x00, priv->mmio + PHYREG27);
+
+ /*
+ * Set up SU adjust signal:
+ * su_trim[7:0], PLL KVCO adjust bits[2:0] to min
+ * su_trim[15:8], PLL LPF R1 adujst bits[9:7]=3'b011
+ * su_trim[31:24], CKDRV adjust
+ */
+ writel(0x90, priv->mmio + PHYREG11);
+ writel(0x02, priv->mmio + PHYREG12);
+ writel(0x57, priv->mmio + PHYREG14);
+
+ writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ }
+ break;
+
+ case REF_CLOCK_25MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_25m, true);
+ break;
+
+ case REF_CLOCK_100MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
+ if (priv->type == PHY_TYPE_PCIE) {
+ /* gate_tx_pck_sel length select work for L1SS */
+ writel(0xc0, priv->mmio + PHYREG30);
+
+ /* PLL KVCO tuning fine */
+ val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
+ val, PHYREG33);
+
+ /* Set up rx_trim: PLL LPF C1 85pf R1 1.25kohm */
+ writel(0x4c, priv->mmio + PHYREG27);
+
+ /*
+ * Set up SU adjust signal:
+ * su_trim[7:0], PLL KVCO adjust bits[2:0] to min
+ * su_trim[15:8], bypass PLL loop divider code, and
+ * PLL LPF R1 adujst bits[9:7]=3'b101
+ * su_trim[23:16], CKRCV adjust
+ * su_trim[31:24], CKDRV adjust
+ */
+ writel(0x90, priv->mmio + PHYREG11);
+ writel(0x43, priv->mmio + PHYREG12);
+ writel(0x88, priv->mmio + PHYREG13);
+ writel(0x56, priv->mmio + PHYREG14);
+ } else if (priv->type == PHY_TYPE_SATA) {
+ /* downward spread spectrum +500ppm */
+ val = FIELD_PREP(PHYREG32_SSC_DIR_MASK, PHYREG32_SSC_DOWNWARD);
+ val |= FIELD_PREP(PHYREG32_SSC_OFFSET_MASK, PHYREG32_SSC_OFFSET_500PPM);
+ rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+
+ /* ssc ppm adjust to 3500ppm */
+ rockchip_combphy_updatel(priv, PHYREG10_SSC_PCM_MASK,
+ PHYREG10_SSC_PCM_3500PPM,
+ PHYREG10);
+ }
+ break;
+
+ default:
+ dev_err(priv->dev, "Unsupported rate: %lu\n", rate);
+ return -EINVAL;
+ }
+
+ if (priv->ext_refclk) {
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
+ if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
+ val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
+ val, PHYREG33);
+
+ /* Set up rx_trim: PLL LPF C1 85pf R1 2.5kohm */
+ writel(0x0c, priv->mmio + PHYREG27);
+
+ /*
+ * Set up SU adjust signal:
+ * su_trim[7:0], PLL KVCO adjust bits[2:0] to min
+ * su_trim[15:8], bypass PLL loop divider code, and
+ * PLL LPF R1 adujst bits[9:7]=3'b101.
+ * su_trim[23:16], CKRCV adjust
+ * su_trim[31:24], CKDRV adjust
+ */
+ writel(0x90, priv->mmio + PHYREG11);
+ writel(0x43, priv->mmio + PHYREG12);
+ writel(0x88, priv->mmio + PHYREG13);
+ writel(0x56, priv->mmio + PHYREG14);
+ }
+ }
+
+ if (priv->enable_ssc) {
+ val = readl(priv->mmio + PHYREG8);
+ val |= PHYREG8_SSC_EN;
+ writel(val, priv->mmio + PHYREG8);
+
+ if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_24MHz) {
+ /* Set PLL loop divider */
+ writel(0x00, priv->mmio + PHYREG17);
+ writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+
+ /* Set up rx_pck invert and rx msb to disable */
+ writel(0x00, priv->mmio + PHYREG27);
+
+ /*
+ * Set up SU adjust signal:
+ * su_trim[7:0], PLL KVCO adjust bits[2:0] to min
+ * su_trim[15:8], PLL LPF R1 adujst bits[9:7]=3'b101
+ * su_trim[23:16], CKRCV adjust
+ * su_trim[31:24], CKDRV adjust
+ */
+ writel(0x90, priv->mmio + PHYREG11);
+ writel(0x02, priv->mmio + PHYREG12);
+ writel(0x08, priv->mmio + PHYREG13);
+ writel(0x57, priv->mmio + PHYREG14);
+ writel(0x40, priv->mmio + PHYREG15);
+
+ writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+
+ val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
+ writel(val, priv->mmio + PHYREG33);
+ }
+ }
+
+ return 0;
+}
+
+static const struct rockchip_combphy_grfcfg rk3576_combphy_grfcfgs = {
+ /* pipe-phy-grf */
+ .pcie_mode_set = { 0x0000, 5, 0, 0x00, 0x11 },
+ .usb_mode_set = { 0x0000, 5, 0, 0x00, 0x04 },
+ .pipe_rxterm_set = { 0x0000, 12, 12, 0x00, 0x01 },
+ .pipe_txelec_set = { 0x0004, 1, 1, 0x00, 0x01 },
+ .pipe_txcomp_set = { 0x0004, 4, 4, 0x00, 0x01 },
+ .pipe_clk_24m = { 0x0004, 14, 13, 0x00, 0x00 },
+ .pipe_clk_25m = { 0x0004, 14, 13, 0x00, 0x01 },
+ .pipe_clk_100m = { 0x0004, 14, 13, 0x00, 0x02 },
+ .pipe_phymode_sel = { 0x0008, 1, 1, 0x00, 0x01 },
+ .pipe_rate_sel = { 0x0008, 2, 2, 0x00, 0x01 },
+ .pipe_rxterm_sel = { 0x0008, 8, 8, 0x00, 0x01 },
+ .pipe_txelec_sel = { 0x0008, 12, 12, 0x00, 0x01 },
+ .pipe_txcomp_sel = { 0x0008, 15, 15, 0x00, 0x01 },
+ .pipe_clk_ext = { 0x000c, 9, 8, 0x02, 0x01 },
+ .pipe_phy_status = { 0x0034, 6, 6, 0x01, 0x00 },
+ .con0_for_pcie = { 0x0000, 15, 0, 0x00, 0x1000 },
+ .con1_for_pcie = { 0x0004, 15, 0, 0x00, 0x0000 },
+ .con2_for_pcie = { 0x0008, 15, 0, 0x00, 0x0101 },
+ .con3_for_pcie = { 0x000c, 15, 0, 0x00, 0x0200 },
+ .con0_for_sata = { 0x0000, 15, 0, 0x00, 0x0129 },
+ .con1_for_sata = { 0x0004, 15, 0, 0x00, 0x0000 },
+ .con2_for_sata = { 0x0008, 15, 0, 0x00, 0x80c1 },
+ .con3_for_sata = { 0x000c, 15, 0, 0x00, 0x0407 },
+ /* php-grf */
+ .pipe_con0_for_sata = { 0x001C, 2, 0, 0x00, 0x2 },
+ .pipe_con1_for_sata = { 0x0020, 2, 0, 0x00, 0x2 },
+};
+
+static const struct rockchip_combphy_cfg rk3576_combphy_cfgs = {
+ .num_phys = 2,
+ .phy_ids = {
+ 0x2b050000,
+ 0x2b060000
+ },
+ .grfcfg = &rk3576_combphy_grfcfgs,
+ .combphy_cfg = rk3576_combphy_cfg,
+};
+
static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
{
const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
@@ -776,6 +1054,10 @@ static const struct of_device_id rockchip_combphy_of_match[] = {
.data = &rk3568_combphy_cfgs,
},
{
+ .compatible = "rockchip,rk3576-naneng-combphy",
+ .data = &rk3576_combphy_cfgs,
+ },
+ {
.compatible = "rockchip,rk3588-naneng-combphy",
.data = &rk3588_combphy_cfgs,
},
diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
index 51cc5ece0e63..bd44af36c67a 100644
--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
+++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
@@ -124,7 +124,7 @@ static int rockchip_pcie_phy_power_off(struct phy *phy)
struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
int err = 0;
- mutex_lock(&rk_phy->pcie_mutex);
+ guard(mutex)(&rk_phy->pcie_mutex);
regmap_write(rk_phy->reg_base,
rk_phy->phy_data->pcie_laneoff,
@@ -132,27 +132,22 @@ static int rockchip_pcie_phy_power_off(struct phy *phy)
PHY_LANE_IDLE_MASK,
PHY_LANE_IDLE_A_SHIFT + inst->index));
- if (--rk_phy->pwr_cnt)
- goto err_out;
+ if (--rk_phy->pwr_cnt) {
+ return 0;
+ }
err = reset_control_assert(rk_phy->phy_rst);
if (err) {
dev_err(&phy->dev, "assert phy_rst err %d\n", err);
- goto err_restore;
+ rk_phy->pwr_cnt++;
+ regmap_write(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_laneoff,
+ HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
+ PHY_LANE_IDLE_MASK,
+ PHY_LANE_IDLE_A_SHIFT + inst->index));
+ return err;
}
-err_out:
- mutex_unlock(&rk_phy->pcie_mutex);
- return 0;
-
-err_restore:
- rk_phy->pwr_cnt++;
- regmap_write(rk_phy->reg_base,
- rk_phy->phy_data->pcie_laneoff,
- HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
- PHY_LANE_IDLE_MASK,
- PHY_LANE_IDLE_A_SHIFT + inst->index));
- mutex_unlock(&rk_phy->pcie_mutex);
return err;
}
@@ -162,17 +157,18 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
int err = 0;
u32 status;
- unsigned long timeout;
- mutex_lock(&rk_phy->pcie_mutex);
+ guard(mutex)(&rk_phy->pcie_mutex);
- if (rk_phy->pwr_cnt++)
- goto err_out;
+ if (rk_phy->pwr_cnt++) {
+ return 0;
+ }
err = reset_control_deassert(rk_phy->phy_rst);
if (err) {
dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
- goto err_pwr_cnt;
+ rk_phy->pwr_cnt--;
+ return err;
}
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
@@ -191,21 +187,11 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
* so we make it large enough here. And we use loop-break
* method which should not be harmful.
*/
- timeout = jiffies + msecs_to_jiffies(1000);
-
- err = -EINVAL;
- while (time_before(jiffies, timeout)) {
- regmap_read(rk_phy->reg_base,
- rk_phy->phy_data->pcie_status,
- &status);
- if (status & PHY_PLL_LOCKED) {
- dev_dbg(&phy->dev, "pll locked!\n");
- err = 0;
- break;
- }
- msleep(20);
- }
-
+ err = regmap_read_poll_timeout(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ status,
+ status & PHY_PLL_LOCKED,
+ 200, 100000);
if (err) {
dev_err(&phy->dev, "pll lock timeout!\n");
goto err_pll_lock;
@@ -214,19 +200,11 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
phy_wr_cfg(rk_phy, PHY_CFG_CLK_TEST, PHY_CFG_SEPE_RATE);
phy_wr_cfg(rk_phy, PHY_CFG_CLK_SCC, PHY_CFG_PLL_100M);
- err = -ETIMEDOUT;
- while (time_before(jiffies, timeout)) {
- regmap_read(rk_phy->reg_base,
- rk_phy->phy_data->pcie_status,
- &status);
- if (!(status & PHY_PLL_OUTPUT)) {
- dev_dbg(&phy->dev, "pll output enable done!\n");
- err = 0;
- break;
- }
- msleep(20);
- }
-
+ err = regmap_read_poll_timeout(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ status,
+ !(status & PHY_PLL_OUTPUT),
+ 200, 100000);
if (err) {
dev_err(&phy->dev, "pll output enable timeout!\n");
goto err_pll_lock;
@@ -236,33 +214,22 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
PHY_CFG_ADDR_MASK,
PHY_CFG_ADDR_SHIFT));
- err = -EINVAL;
- while (time_before(jiffies, timeout)) {
- regmap_read(rk_phy->reg_base,
- rk_phy->phy_data->pcie_status,
- &status);
- if (status & PHY_PLL_LOCKED) {
- dev_dbg(&phy->dev, "pll relocked!\n");
- err = 0;
- break;
- }
- msleep(20);
- }
+ err = regmap_read_poll_timeout(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ status,
+ status & PHY_PLL_LOCKED,
+ 200, 100000);
if (err) {
dev_err(&phy->dev, "pll relock timeout!\n");
goto err_pll_lock;
}
-err_out:
- mutex_unlock(&rk_phy->pcie_mutex);
- return 0;
+ return err;
err_pll_lock:
reset_control_assert(rk_phy->phy_rst);
-err_pwr_cnt:
rk_phy->pwr_cnt--;
- mutex_unlock(&rk_phy->pcie_mutex);
return err;
}
@@ -272,33 +239,19 @@ static int rockchip_pcie_phy_init(struct phy *phy)
struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
int err = 0;
- mutex_lock(&rk_phy->pcie_mutex);
-
- if (rk_phy->init_cnt++)
- goto err_out;
+ guard(mutex)(&rk_phy->pcie_mutex);
- err = clk_prepare_enable(rk_phy->clk_pciephy_ref);
- if (err) {
- dev_err(&phy->dev, "Fail to enable pcie ref clock.\n");
- goto err_refclk;
+ if (rk_phy->init_cnt++) {
+ return 0;
}
err = reset_control_assert(rk_phy->phy_rst);
if (err) {
dev_err(&phy->dev, "assert phy_rst err %d\n", err);
- goto err_reset;
+ rk_phy->init_cnt--;
+ return err;
}
-err_out:
- mutex_unlock(&rk_phy->pcie_mutex);
- return 0;
-
-err_reset:
-
- clk_disable_unprepare(rk_phy->clk_pciephy_ref);
-err_refclk:
- rk_phy->init_cnt--;
- mutex_unlock(&rk_phy->pcie_mutex);
return err;
}
@@ -307,15 +260,12 @@ static int rockchip_pcie_phy_exit(struct phy *phy)
struct phy_pcie_instance *inst = phy_get_drvdata(phy);
struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
- mutex_lock(&rk_phy->pcie_mutex);
+ guard(mutex)(&rk_phy->pcie_mutex);
if (--rk_phy->init_cnt)
goto err_init_cnt;
- clk_disable_unprepare(rk_phy->clk_pciephy_ref);
-
err_init_cnt:
- mutex_unlock(&rk_phy->pcie_mutex);
return 0;
}
@@ -371,18 +321,14 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
mutex_init(&rk_phy->pcie_mutex);
rk_phy->phy_rst = devm_reset_control_get(dev, "phy");
- if (IS_ERR(rk_phy->phy_rst)) {
- if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER)
- dev_err(dev,
- "missing phy property for reset controller\n");
- return PTR_ERR(rk_phy->phy_rst);
- }
-
- rk_phy->clk_pciephy_ref = devm_clk_get(dev, "refclk");
- if (IS_ERR(rk_phy->clk_pciephy_ref)) {
- dev_err(dev, "refclk not found.\n");
- return PTR_ERR(rk_phy->clk_pciephy_ref);
- }
+ if (IS_ERR(rk_phy->phy_rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rk_phy->phy_rst),
+ "missing phy property for reset controller\n");
+
+ rk_phy->clk_pciephy_ref = devm_clk_get_enabled(dev, "refclk");
+ if (IS_ERR(rk_phy->clk_pciephy_ref))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rk_phy->clk_pciephy_ref),
+ "failed to get phyclk\n");
/* parse #phy-cells to see if it's legacy PHY model */
if (of_property_read_u32(dev->of_node, "#phy-cells", &phy_num))
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 122ae0fdc785..d9701b6106d5 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
* Kever Yang <kever.yang@rock-chips.com>
*
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index f10afa3d7ff5..e2330b0894d6 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -33,6 +33,7 @@ config PHY_SAMSUNG_UFS
tristate "Exynos SoC series UFS PHY driver"
depends on OF && (ARCH_EXYNOS || COMPILE_TEST)
select GENERIC_PHY
+ select MFD_SYSCON
help
Enable this to support the Samsung Exynos SoC UFS PHY driver for
Samsung Exynos SoCs. This driver provides the interface for UFS host
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index c421b495eb0f..46b8f6987c62 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -488,9 +488,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
/* FSEL settings corresponding to reference clock */
- reg &= ~PHYCLKRST_FSEL_PIPE_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK;
+ reg &= ~(PHYCLKRST_FSEL_PIPE_MASK |
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
switch (phy_drd->extrefclk) {
case EXYNOS5_FSEL_50MHZ:
reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF |
@@ -532,9 +532,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
reg &= ~PHYCLKRST_REFCLKSEL_MASK;
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
- reg &= ~PHYCLKRST_FSEL_UTMI_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK;
+ reg &= ~(PHYCLKRST_FSEL_UTMI_MASK |
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
reg |= PHYCLKRST_FSEL(phy_drd->extrefclk);
return reg;
@@ -1296,14 +1296,17 @@ static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy)
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
int ret;
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) {
+ ret = exynos850_usbdrd_phy_exit(phy);
+ if (ret)
+ return ret;
+ }
+
+ exynos5_usbdrd_phy_isol(inst, true);
+
if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI)
return 0;
- ret = exynos850_usbdrd_phy_exit(phy);
- if (ret)
- return ret;
-
- exynos5_usbdrd_phy_isol(inst, true);
return regulator_bulk_disable(phy_drd->drv_data->n_regulators,
phy_drd->regulators);
}
diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c
index 6c5d41552649..8e9ccd39f97e 100644
--- a/drivers/phy/samsung/phy-samsung-ufs.c
+++ b/drivers/phy/samsung/phy-samsung-ufs.c
@@ -13,11 +13,11 @@
#include <linux/of.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/soc/samsung/exynos-pmu.h>
#include "phy-samsung-ufs.h"
@@ -268,8 +268,8 @@ static int samsung_ufs_phy_probe(struct platform_device *pdev)
goto out;
}
- phy->reg_pmu = exynos_get_pmu_regmap_by_phandle(dev->of_node,
- "samsung,pmu-syscon");
+ phy->reg_pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,pmu-syscon");
if (IS_ERR(phy->reg_pmu)) {
err = PTR_ERR(phy->reg_pmu);
dev_err(dev, "failed syscon remap for pmu\n");
diff --git a/drivers/phy/st/phy-stm32-combophy.c b/drivers/phy/st/phy-stm32-combophy.c
index 49e9fa90a681..607b4d607eb5 100644
--- a/drivers/phy/st/phy-stm32-combophy.c
+++ b/drivers/phy/st/phy-stm32-combophy.c
@@ -111,6 +111,7 @@ static const struct clk_impedance imp_lookup[] = {
{ 4204000, { 511000, 609000, 706000, 802000 } },
{ 3999000, { 571000, 648000, 726000, 803000 } }
};
+#define DEFAULT_IMP_INDEX 3 /* Default impedance is 50 Ohm */
static int stm32_impedance_tune(struct stm32_combophy *combophy)
{
@@ -119,10 +120,9 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
u8 imp_of, vswing_of;
u32 max_imp = imp_lookup[0].microohm;
u32 min_imp = imp_lookup[imp_size - 1].microohm;
- u32 max_vswing = imp_lookup[imp_size - 1].vswing[vswing_size - 1];
+ u32 max_vswing;
u32 min_vswing = imp_lookup[0].vswing[0];
u32 val;
- u32 regval;
if (!of_property_read_u32(combophy->dev->of_node, "st,output-micro-ohms", &val)) {
if (val < min_imp || val > max_imp) {
@@ -130,45 +130,43 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
return -EINVAL;
}
- regval = 0;
- for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++) {
- if (imp_lookup[imp_of].microohm <= val) {
- regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of);
+ for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++)
+ if (imp_lookup[imp_of].microohm <= val)
break;
- }
- }
+
+ if (WARN_ON(imp_of == ARRAY_SIZE(imp_lookup)))
+ return -EINVAL;
dev_dbg(combophy->dev, "Set %u micro-ohms output impedance\n",
imp_lookup[imp_of].microohm);
regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
STM32MP25_PCIEPRG_IMPCTRL_OHM,
- regval);
- } else {
- regmap_read(combophy->regmap, SYSCFG_PCIEPRGCR, &val);
- imp_of = FIELD_GET(STM32MP25_PCIEPRG_IMPCTRL_OHM, val);
- }
+ FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of));
+ } else
+ imp_of = DEFAULT_IMP_INDEX;
if (!of_property_read_u32(combophy->dev->of_node, "st,output-vswing-microvolt", &val)) {
+ max_vswing = imp_lookup[imp_of].vswing[vswing_size - 1];
+
if (val < min_vswing || val > max_vswing) {
dev_err(combophy->dev, "Invalid value %u for output vswing\n", val);
return -EINVAL;
}
- regval = 0;
- for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++) {
- if (imp_lookup[imp_of].vswing[vswing_of] >= val) {
- regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of);
+ for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++)
+ if (imp_lookup[imp_of].vswing[vswing_of] >= val)
break;
- }
- }
+
+ if (WARN_ON(vswing_of == ARRAY_SIZE(imp_lookup[imp_of].vswing)))
+ return -EINVAL;
dev_dbg(combophy->dev, "Set %u microvolt swing\n",
imp_lookup[imp_of].vswing[vswing_of]);
regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
STM32MP25_PCIEPRG_IMPCTRL_VSWING,
- regval);
+ FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of));
}
return 0;
diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig
index c591c958f1eb..f30cfb42b210 100644
--- a/drivers/phy/tegra/Kconfig
+++ b/drivers/phy/tegra/Kconfig
@@ -13,7 +13,8 @@ config PHY_TEGRA_XUSB
config PHY_TEGRA194_P2U
tristate "NVIDIA Tegra194 PIPE2UPHY PHY driver"
- depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || COMPILE_TEST
select GENERIC_PHY
help
- Enable this to support the P2U (PIPE to UPHY) that is part of Tegra 19x SOCs.
+ Enable this to support the P2U (PIPE to UPHY) that is part of Tegra 19x
+ and 234 SOCs.
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 0f60d5d1c167..fae6242aa730 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -928,6 +928,7 @@ static int tegra186_utmi_phy_init(struct phy *phy)
unsigned int index = lane->index;
struct device *dev = padctl->dev;
int err;
+ u32 reg;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
@@ -935,6 +936,16 @@ static int tegra186_utmi_phy_init(struct phy *phy)
return -ENODEV;
}
+ if (port->mode == USB_DR_MODE_OTG ||
+ port->mode == USB_DR_MODE_PERIPHERAL) {
+ /* reset VBUS&ID OVERRIDE */
+ reg = padctl_readl(padctl, USB2_VBUS_ID);
+ reg &= ~VBUS_OVERRIDE;
+ reg &= ~ID_OVERRIDE(~0);
+ reg |= ID_OVERRIDE_FLOATING;
+ padctl_writel(padctl, reg, USB2_VBUS_ID);
+ }
+
if (port->supply && port->mode == USB_DR_MODE_HOST) {
err = regulator_enable(port->supply);
if (err) {
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index e0ca59ae3153..ff5d5e29629f 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -424,6 +424,12 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
return 0;
}
+static const struct regmap_config phy_gmii_sel_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int phy_gmii_sel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -468,7 +474,14 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
- priv->regmap = device_node_to_regmap(node);
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "failed to get base memory resource\n");
+
+ priv->regmap = regmap_init_mmio(dev, base, &phy_gmii_sel_regmap_cfg);
if (IS_ERR(priv->regmap))
return dev_err_probe(dev, PTR_ERR(priv->regmap),
"Failed to get syscon\n");
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index 73dbf29c002f..cf6efa9c0364 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -974,7 +974,7 @@ static const struct regmap_config bcm281xx_pinctrl_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = BCM281XX_PIN_VC_CAM3_SDA,
+ .max_register = BCM281XX_PIN_VC_CAM3_SDA * 4,
};
static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 84af6aae36d1..a96be8f244e0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -21,6 +21,7 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "../pinctrl-utils.h"
@@ -254,7 +255,7 @@ static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev, "gpio:%u level_low:%s falling:%s\n", gpio,
- level_low ? "true" : "false", falling ? "true" : "false");
+ str_true_false(level_low), str_true_false(falling));
return 0;
}
diff --git a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
index 014297a3fbd2..0f32866a4aef 100644
--- a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
+++ b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -1068,7 +1069,7 @@ static void lochnagar_gpio_set(struct gpio_chip *chip,
value = !!value;
dev_dbg(priv->dev, "Set GPIO %s to %s\n",
- pin->name, value ? "high" : "low");
+ pin->name, str_high_low(value));
switch (pin->type) {
case LN_PTYPE_MUX:
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index b3eec63c00ba..4bdbf6bb26e2 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1256,6 +1256,20 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
DL_FLAG_AUTOREMOVE_CONSUMER);
}
+static void pinctrl_cond_disable_mux_setting(struct pinctrl_state *state,
+ struct pinctrl_setting *target_setting)
+{
+ struct pinctrl_setting *setting;
+
+ list_for_each_entry(setting, &state->settings, node) {
+ if (target_setting && (&setting->node == &target_setting->node))
+ break;
+
+ if (setting->type == PIN_MAP_TYPE_MUX_GROUP)
+ pinmux_disable_setting(setting);
+ }
+}
+
/**
* pinctrl_commit_state() - select/activate/program a pinctrl state to HW
* @p: the pinctrl handle for the device that requests configuration
@@ -1263,7 +1277,7 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
*/
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
- struct pinctrl_setting *setting, *setting2;
+ struct pinctrl_setting *setting;
struct pinctrl_state *old_state = READ_ONCE(p->state);
int ret;
@@ -1274,11 +1288,7 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
* still owned by the new state will be re-acquired by the call
* to pinmux_enable_setting() in the loop below.
*/
- list_for_each_entry(setting, &old_state->settings, node) {
- if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
- continue;
- pinmux_disable_setting(setting);
- }
+ pinctrl_cond_disable_mux_setting(old_state, NULL);
}
p->state = NULL;
@@ -1322,7 +1332,7 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
}
if (ret < 0) {
- goto unapply_new_state;
+ goto unapply_mux_setting;
}
/* Do not link hogs (circular dependency) */
@@ -1334,23 +1344,23 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
return 0;
+unapply_mux_setting:
+ pinctrl_cond_disable_mux_setting(state, NULL);
+ goto restore_old_state;
+
unapply_new_state:
dev_err(p->dev, "Error applying setting, reverse things back\n");
- list_for_each_entry(setting2, &state->settings, node) {
- if (&setting2->node == &setting->node)
- break;
- /*
- * All we can do here is pinmux_disable_setting.
- * That means that some pins are muxed differently now
- * than they were before applying the setting (We can't
- * "unmux a pin"!), but it's not a big deal since the pins
- * are free to be muxed by another apply_setting.
- */
- if (setting2->type == PIN_MAP_TYPE_MUX_GROUP)
- pinmux_disable_setting(setting2);
- }
+ /*
+ * All we can do here is pinmux_disable_setting.
+ * That means that some pins are muxed differently now
+ * than they were before applying the setting (We can't
+ * "unmux a pin"!), but it's not a big deal since the pins
+ * are free to be muxed by another apply_setting.
+ */
+ pinctrl_cond_disable_mux_setting(state, setting);
+restore_old_state:
/* There's no infinite recursive loop here because p->state is NULL */
if (old_state)
pinctrl_select_state(p, old_state);
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index a417a031659c..58f32818a0e6 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -202,6 +202,13 @@ config PINCTRL_MT7986
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_MOORE
+config PINCTRL_MT7988
+ bool "Mediatek MT7988 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_MOORE
+
config PINCTRL_MT8167
bool "MediaTek MT8167 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 1405d434218e..721ae83476d0 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
obj-$(CONFIG_PINCTRL_MT7981) += pinctrl-mt7981.o
obj-$(CONFIG_PINCTRL_MT7986) += pinctrl-mt7986.o
+obj-$(CONFIG_PINCTRL_MT7988) += pinctrl-mt7988.o
obj-$(CONFIG_PINCTRL_MT8167) += pinctrl-mt8167.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7988.c b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
new file mode 100644
index 000000000000..68b4097792b8
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
@@ -0,0 +1,1556 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The MT7988 driver based on Linux generic pinctrl binding.
+ *
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#include "pinctrl-moore.h"
+
+enum mt7988_pinctrl_reg_page {
+ GPIO_BASE,
+ IOCFG_TR_BASE,
+ IOCFG_BR_BASE,
+ IOCFG_RB_BASE,
+ IOCFG_LB_BASE,
+ IOCFG_TL_BASE,
+};
+
+#define MT7988_PIN(_number, _name) MTK_PIN(_number, _name, 0, _number, DRV_GRP4)
+
+#define PIN_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 0)
+
+#define PINS_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 1)
+
+static const struct mtk_pin_field_calc mt7988_pin_mode_range[] = {
+ PIN_FIELD(0, 83, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_dir_range[] = {
+ PIN_FIELD(0, 83, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_di_range[] = {
+ PIN_FIELD(0, 83, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_do_range[] = {
+ PIN_FIELD(0, 83, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x30, 0x10, 13, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x30, 0x10, 14, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x30, 0x10, 11, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x30, 0x10, 12, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x30, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(7, 7, 4, 0x30, 0x10, 8, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x30, 0x10, 6, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x30, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x40, 0x10, 21, 1),
+ PIN_FIELD_BASE(13, 13, 1, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0x40, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(15, 15, 5, 0x30, 0x10, 7, 1),
+ PIN_FIELD_BASE(16, 16, 5, 0x30, 0x10, 8, 1),
+ PIN_FIELD_BASE(17, 17, 5, 0x30, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 5, 0x30, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x30, 0x10, 7, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x30, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x50, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x50, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x50, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x50, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x50, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x50, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x50, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x50, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x50, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x50, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x50, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x50, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x50, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x50, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x50, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x50, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x50, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x50, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x50, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x50, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x50, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x50, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x50, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x50, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x50, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x50, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x40, 0x10, 14, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x40, 0x10, 15, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x40, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x40, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x40, 0x10, 20, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x40, 0x10, 8, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x40, 0x10, 9, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x40, 0x10, 10, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x40, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x40, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(71, 71, 5, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 5, 0x30, 0x10, 6, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x30, 0x10, 10, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(75, 75, 4, 0x30, 0x10, 11, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x30, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x40, 0x10, 18, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x40, 0x10, 19, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x40, 0x10, 16, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x40, 0x10, 17, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0xc0, 0x10, 13, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0xc0, 0x10, 14, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0xc0, 0x10, 11, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0xc0, 0x10, 12, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0xc0, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0xc0, 0x10, 9, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0xc0, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(7, 7, 4, 0xb0, 0x10, 8, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0xb0, 0x10, 6, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0xb0, 0x10, 5, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0xb0, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0xe0, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0xe0, 0x10, 21, 1),
+ PIN_FIELD_BASE(13, 13, 1, 0xe0, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0xe0, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(15, 15, 5, 0xc0, 0x10, 7, 1),
+ PIN_FIELD_BASE(16, 16, 5, 0xc0, 0x10, 8, 1),
+ PIN_FIELD_BASE(17, 17, 5, 0xc0, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 5, 0xc0, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0xb0, 0x10, 7, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0xb0, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x140, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x140, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x140, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x140, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x140, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x140, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x140, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x140, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x140, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x140, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x140, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x140, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x150, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x140, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x140, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x140, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x150, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x140, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x140, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x140, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x140, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x140, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x140, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x140, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x140, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x140, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x140, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x140, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x140, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x140, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x140, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x140, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x140, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x140, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0xe0, 0x10, 14, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0xe0, 0x10, 15, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0xe0, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0xe0, 0x10, 4, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0xe0, 0x10, 5, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0xe0, 0x10, 6, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0xe0, 0x10, 3, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0xe0, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0xe0, 0x10, 20, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0xe0, 0x10, 8, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0xe0, 0x10, 9, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0xe0, 0x10, 10, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0xe0, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0xe0, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0xc0, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0xc0, 0x10, 2, 1),
+ PIN_FIELD_BASE(71, 71, 5, 0xc0, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 5, 0xc0, 0x10, 6, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0xb0, 0x10, 10, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0xb0, 0x10, 1, 1),
+ PIN_FIELD_BASE(75, 75, 4, 0xb0, 0x10, 11, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0xb0, 0x10, 9, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0xb0, 0x10, 2, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0xb0, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0xb0, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0xe0, 0x10, 18, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0xe0, 0x10, 19, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0xe0, 0x10, 16, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0xe0, 0x10, 17, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_pu_range[] = {
+ PIN_FIELD_BASE(7, 7, 4, 0x60, 0x10, 5, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x60, 0x10, 4, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x60, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(13, 13, 1, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x70, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(75, 75, 4, 0x60, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0x60, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x60, 0x10, 8, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_pd_range[] = {
+ PIN_FIELD_BASE(7, 7, 4, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x40, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(13, 13, 1, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0x50, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(15, 15, 5, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(16, 16, 5, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(17, 17, 5, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(18, 18, 5, 0x40, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(63, 63, 1, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(71, 71, 5, 0x40, 0x10, 2, 1),
+ PIN_FIELD_BASE(72, 72, 5, 0x40, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(75, 75, 4, 0x40, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x40, 0x10, 8, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(1, 1, 5, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(2, 2, 5, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(3, 3, 5, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(4, 4, 5, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(5, 5, 5, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(6, 6, 5, 0x00, 0x10, 12, 3),
+
+ PIN_FIELD_BASE(7, 7, 4, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(8, 8, 4, 0x00, 0x10, 28, 3),
+ PIN_FIELD_BASE(9, 9, 4, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(10, 10, 4, 0x00, 0x10, 9, 3),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(12, 12, 1, 0x20, 0x10, 3, 3),
+ PIN_FIELD_BASE(13, 13, 1, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(14, 14, 1, 0x00, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(20, 20, 4, 0x00, 0x10, 12, 3),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x10, 0x10, 21, 3),
+ PIN_FIELD_BASE(22, 22, 3, 0x20, 0x10, 9, 3),
+ PIN_FIELD_BASE(23, 23, 3, 0x20, 0x10, 0, 3),
+ PIN_FIELD_BASE(24, 24, 3, 0x10, 0x10, 27, 3),
+ PIN_FIELD_BASE(25, 25, 3, 0x20, 0x10, 3, 3),
+ PIN_FIELD_BASE(26, 26, 3, 0x20, 0x10, 6, 3),
+ PIN_FIELD_BASE(27, 27, 3, 0x10, 0x10, 24, 3),
+ PIN_FIELD_BASE(28, 28, 3, 0x20, 0x10, 15, 3),
+ PIN_FIELD_BASE(29, 29, 3, 0x20, 0x10, 18, 3),
+ PIN_FIELD_BASE(30, 30, 3, 0x20, 0x10, 21, 3),
+ PIN_FIELD_BASE(31, 31, 3, 0x20, 0x10, 12, 3),
+ PIN_FIELD_BASE(32, 32, 3, 0x20, 0x10, 24, 3),
+ PIN_FIELD_BASE(33, 33, 3, 0x30, 0x10, 6, 3),
+ PIN_FIELD_BASE(34, 34, 3, 0x30, 0x10, 3, 3),
+ PIN_FIELD_BASE(35, 35, 3, 0x20, 0x10, 27, 3),
+ PIN_FIELD_BASE(36, 36, 3, 0x30, 0x10, 0, 3),
+ PIN_FIELD_BASE(37, 37, 3, 0x30, 0x10, 9, 3),
+ PIN_FIELD_BASE(38, 38, 3, 0x10, 0x10, 3, 3),
+ PIN_FIELD_BASE(39, 39, 3, 0x10, 0x10, 0, 3),
+ PIN_FIELD_BASE(40, 40, 3, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(41, 41, 3, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(42, 42, 3, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(43, 43, 3, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(44, 44, 3, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(45, 45, 3, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(46, 46, 3, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(47, 47, 3, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(48, 48, 3, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(49, 49, 3, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(50, 50, 3, 0x10, 0x10, 15, 3),
+ PIN_FIELD_BASE(51, 51, 3, 0x10, 0x10, 6, 3),
+ PIN_FIELD_BASE(52, 52, 3, 0x10, 0x10, 9, 3),
+ PIN_FIELD_BASE(53, 53, 3, 0x10, 0x10, 12, 3),
+ PIN_FIELD_BASE(54, 54, 3, 0x10, 0x10, 18, 3),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x10, 0x10, 12, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x10, 0x10, 15, 3),
+ PIN_FIELD_BASE(57, 57, 1, 0x10, 0x10, 9, 3),
+ PIN_FIELD_BASE(58, 58, 1, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(59, 59, 1, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(60, 60, 1, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(61, 61, 1, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(62, 62, 1, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(63, 63, 1, 0x20, 0x10, 0, 3),
+ PIN_FIELD_BASE(64, 64, 1, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(65, 65, 1, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(66, 66, 1, 0x10, 0x10, 0, 3),
+ PIN_FIELD_BASE(67, 67, 1, 0x10, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 1, 0x10, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(70, 70, 5, 0x00, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x10, 0x10, 0, 3),
+ PIN_FIELD_BASE(74, 74, 4, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(75, 75, 4, 0x10, 0x10, 3, 3),
+ PIN_FIELD_BASE(76, 76, 4, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(77, 77, 4, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(78, 78, 4, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(79, 79, 4, 0x10, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x10, 0x10, 24, 3),
+ PIN_FIELD_BASE(81, 81, 1, 0x10, 0x10, 27, 3),
+ PIN_FIELD_BASE(82, 82, 1, 0x10, 0x10, 18, 3),
+ PIN_FIELD_BASE(83, 83, 1, 0x10, 0x10, 21, 3),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_pupd_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x50, 0x10, 7, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x50, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x50, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x60, 0x10, 18, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x50, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x70, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x70, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x70, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x70, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x70, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x70, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x70, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x70, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x70, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x70, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x70, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x70, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x80, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x70, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x70, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x70, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x80, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x70, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x70, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x70, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x70, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x70, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x70, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x70, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x70, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x70, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x70, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x70, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x70, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x70, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x70, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x60, 0x10, 12, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x60, 0x10, 13, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x60, 0x10, 11, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x60, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x60, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x60, 0x10, 5, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x60, 0x10, 6, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x60, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x60, 0x10, 8, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x60, 0x10, 9, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x60, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x50, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x50, 0x10, 0, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x60, 0x10, 16, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x60, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x60, 0x10, 14, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x60, 0x10, 15, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_r0_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x60, 0x10, 7, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x60, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x60, 0x10, 5, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x60, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x60, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x80, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x80, 0x10, 18, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x70, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x70, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x90, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x90, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x90, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x90, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x90, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x90, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x90, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x90, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x90, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x90, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x90, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x90, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0xa0, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x90, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x90, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x90, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0xa0, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x90, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x90, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x90, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x90, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x90, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x90, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x90, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x90, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x90, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x90, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x90, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x90, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x90, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x90, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x90, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x90, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x90, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x80, 0x10, 12, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x80, 0x10, 13, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x80, 0x10, 11, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x80, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x80, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x80, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x80, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x80, 0x10, 5, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x80, 0x10, 6, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x80, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x80, 0x10, 8, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x80, 0x10, 9, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x80, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x60, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x70, 0x10, 0, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x80, 0x10, 16, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x80, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x80, 0x10, 14, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x80, 0x10, 15, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_r1_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x70, 0x10, 7, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x70, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x70, 0x10, 5, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x70, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x70, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x90, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x90, 0x10, 18, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x80, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x80, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0xb0, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0xb0, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0xb0, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0xb0, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0xb0, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0xb0, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0xb0, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0xb0, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0xb0, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0xb0, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0xb0, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0xb0, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0xc0, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0xb0, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0xb0, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0xb0, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0xc0, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0xb0, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0xb0, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0xb0, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0xb0, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0xb0, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0xb0, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0xb0, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0xb0, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0xb0, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0xb0, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0xb0, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0xb0, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0xb0, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0xb0, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0xb0, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0xb0, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0xb0, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x90, 0x10, 12, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x90, 0x10, 13, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x90, 0x10, 11, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x90, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x90, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x90, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x90, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x90, 0x10, 5, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x90, 0x10, 6, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x90, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x90, 0x10, 8, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x90, 0x10, 9, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x90, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x70, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x80, 0x10, 3, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x80, 0x10, 0, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x90, 0x10, 16, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x90, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x90, 0x10, 14, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x90, 0x10, 15, 1),
+};
+
+static const unsigned int mt7988_pull_type[] = {
+ MTK_PULL_PUPD_R1R0_TYPE,/*0*/ MTK_PULL_PUPD_R1R0_TYPE,/*1*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*2*/ MTK_PULL_PUPD_R1R0_TYPE,/*3*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*4*/ MTK_PULL_PUPD_R1R0_TYPE,/*5*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE, /*7*/
+ MTK_PULL_PU_PD_TYPE, /*8*/ MTK_PULL_PU_PD_TYPE, /*9*/
+ MTK_PULL_PU_PD_TYPE, /*10*/ MTK_PULL_PUPD_R1R0_TYPE,/*11*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*12*/ MTK_PULL_PU_PD_TYPE, /*13*/
+ MTK_PULL_PU_PD_TYPE, /*14*/ MTK_PULL_PD_TYPE, /*15*/
+ MTK_PULL_PD_TYPE, /*16*/ MTK_PULL_PD_TYPE, /*17*/
+ MTK_PULL_PD_TYPE, /*18*/ MTK_PULL_PUPD_R1R0_TYPE,/*19*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*20*/ MTK_PULL_PUPD_R1R0_TYPE,/*21*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*22*/ MTK_PULL_PUPD_R1R0_TYPE,/*23*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*24*/ MTK_PULL_PUPD_R1R0_TYPE,/*25*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*26*/ MTK_PULL_PUPD_R1R0_TYPE,/*27*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*28*/ MTK_PULL_PUPD_R1R0_TYPE,/*29*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*30*/ MTK_PULL_PUPD_R1R0_TYPE,/*31*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*32*/ MTK_PULL_PUPD_R1R0_TYPE,/*33*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*34*/ MTK_PULL_PUPD_R1R0_TYPE,/*35*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*36*/ MTK_PULL_PUPD_R1R0_TYPE,/*37*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*38*/ MTK_PULL_PUPD_R1R0_TYPE,/*39*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*40*/ MTK_PULL_PUPD_R1R0_TYPE,/*41*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*42*/ MTK_PULL_PUPD_R1R0_TYPE,/*43*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*44*/ MTK_PULL_PUPD_R1R0_TYPE,/*45*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*46*/ MTK_PULL_PUPD_R1R0_TYPE,/*47*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*48*/ MTK_PULL_PUPD_R1R0_TYPE,/*49*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*50*/ MTK_PULL_PUPD_R1R0_TYPE,/*51*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*52*/ MTK_PULL_PUPD_R1R0_TYPE,/*53*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*54*/ MTK_PULL_PUPD_R1R0_TYPE,/*55*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*56*/ MTK_PULL_PUPD_R1R0_TYPE,/*57*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*58*/ MTK_PULL_PUPD_R1R0_TYPE,/*59*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*60*/ MTK_PULL_PUPD_R1R0_TYPE,/*61*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE, /*63*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*64*/ MTK_PULL_PUPD_R1R0_TYPE,/*65*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*66*/ MTK_PULL_PUPD_R1R0_TYPE,/*67*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*68*/ MTK_PULL_PUPD_R1R0_TYPE,/*69*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*70*/ MTK_PULL_PD_TYPE, /*71*/
+ MTK_PULL_PD_TYPE, /*72*/ MTK_PULL_PUPD_R1R0_TYPE,/*73*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE, /*75*/
+ MTK_PULL_PU_PD_TYPE, /*76*/ MTK_PULL_PU_PD_TYPE, /*77*/
+ MTK_PULL_PU_PD_TYPE, /*78*/ MTK_PULL_PU_PD_TYPE, /*79*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*80*/ MTK_PULL_PUPD_R1R0_TYPE,/*81*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*82*/ MTK_PULL_PUPD_R1R0_TYPE,/*83*/
+};
+
+static const struct mtk_pin_reg_calc mt7988_reg_cals[] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt7988_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt7988_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt7988_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt7988_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt7988_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt7988_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt7988_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt7988_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt7988_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt7988_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt7988_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt7988_pin_r1_range),
+};
+
+static const struct mtk_pin_desc mt7988_pins[] = {
+ MT7988_PIN(0, "UART2_RXD"),
+ MT7988_PIN(1, "UART2_TXD"),
+ MT7988_PIN(2, "UART2_CTS"),
+ MT7988_PIN(3, "UART2_RTS"),
+ MT7988_PIN(4, "GPIO_A"),
+ MT7988_PIN(5, "SMI_0_MDC"),
+ MT7988_PIN(6, "SMI_0_MDIO"),
+ MT7988_PIN(7, "PCIE30_2L_0_WAKE_N"),
+ MT7988_PIN(8, "PCIE30_2L_0_CLKREQ_N"),
+ MT7988_PIN(9, "PCIE30_1L_1_WAKE_N"),
+ MT7988_PIN(10, "PCIE30_1L_1_CLKREQ_N"),
+ MT7988_PIN(11, "GPIO_P"),
+ MT7988_PIN(12, "WATCHDOG"),
+ MT7988_PIN(13, "GPIO_RESET"),
+ MT7988_PIN(14, "GPIO_WPS"),
+ MT7988_PIN(15, "PMIC_I2C_SCL"),
+ MT7988_PIN(16, "PMIC_I2C_SDA"),
+ MT7988_PIN(17, "I2C_1_SCL"),
+ MT7988_PIN(18, "I2C_1_SDA"),
+ MT7988_PIN(19, "PCIE30_2L_0_PRESET_N"),
+ MT7988_PIN(20, "PCIE30_1L_1_PRESET_N"),
+ MT7988_PIN(21, "PWMD1"),
+ MT7988_PIN(22, "SPI0_WP"),
+ MT7988_PIN(23, "SPI0_HOLD"),
+ MT7988_PIN(24, "SPI0_CSB"),
+ MT7988_PIN(25, "SPI0_MISO"),
+ MT7988_PIN(26, "SPI0_MOSI"),
+ MT7988_PIN(27, "SPI0_CLK"),
+ MT7988_PIN(28, "SPI1_CSB"),
+ MT7988_PIN(29, "SPI1_MISO"),
+ MT7988_PIN(30, "SPI1_MOSI"),
+ MT7988_PIN(31, "SPI1_CLK"),
+ MT7988_PIN(32, "SPI2_CLK"),
+ MT7988_PIN(33, "SPI2_MOSI"),
+ MT7988_PIN(34, "SPI2_MISO"),
+ MT7988_PIN(35, "SPI2_CSB"),
+ MT7988_PIN(36, "SPI2_HOLD"),
+ MT7988_PIN(37, "SPI2_WP"),
+ MT7988_PIN(38, "EMMC_RSTB"),
+ MT7988_PIN(39, "EMMC_DSL"),
+ MT7988_PIN(40, "EMMC_CK"),
+ MT7988_PIN(41, "EMMC_CMD"),
+ MT7988_PIN(42, "EMMC_DATA_7"),
+ MT7988_PIN(43, "EMMC_DATA_6"),
+ MT7988_PIN(44, "EMMC_DATA_5"),
+ MT7988_PIN(45, "EMMC_DATA_4"),
+ MT7988_PIN(46, "EMMC_DATA_3"),
+ MT7988_PIN(47, "EMMC_DATA_2"),
+ MT7988_PIN(48, "EMMC_DATA_1"),
+ MT7988_PIN(49, "EMMC_DATA_0"),
+ MT7988_PIN(50, "PCM_FS_I2S_LRCK"),
+ MT7988_PIN(51, "PCM_CLK_I2S_BCLK"),
+ MT7988_PIN(52, "PCM_DRX_I2S_DIN"),
+ MT7988_PIN(53, "PCM_DTX_I2S_DOUT"),
+ MT7988_PIN(54, "PCM_MCK_I2S_MCLK"),
+ MT7988_PIN(55, "UART0_RXD"),
+ MT7988_PIN(56, "UART0_TXD"),
+ MT7988_PIN(57, "PWMD0"),
+ MT7988_PIN(58, "JTAG_JTDI"),
+ MT7988_PIN(59, "JTAG_JTDO"),
+ MT7988_PIN(60, "JTAG_JTMS"),
+ MT7988_PIN(61, "JTAG_JTCLK"),
+ MT7988_PIN(62, "JTAG_JTRST_N"),
+ MT7988_PIN(63, "USB_DRV_VBUS_P1"),
+ MT7988_PIN(64, "LED_A"),
+ MT7988_PIN(65, "LED_B"),
+ MT7988_PIN(66, "LED_C"),
+ MT7988_PIN(67, "LED_D"),
+ MT7988_PIN(68, "LED_E"),
+ MT7988_PIN(69, "GPIO_B"),
+ MT7988_PIN(70, "GPIO_C"),
+ MT7988_PIN(71, "I2C_2_SCL"),
+ MT7988_PIN(72, "I2C_2_SDA"),
+ MT7988_PIN(73, "PCIE30_2L_1_PRESET_N"),
+ MT7988_PIN(74, "PCIE30_1L_0_PRESET_N"),
+ MT7988_PIN(75, "PCIE30_2L_1_WAKE_N"),
+ MT7988_PIN(76, "PCIE30_2L_1_CLKREQ_N"),
+ MT7988_PIN(77, "PCIE30_1L_0_WAKE_N"),
+ MT7988_PIN(78, "PCIE30_1L_0_CLKREQ_N"),
+ MT7988_PIN(79, "USB_DRV_VBUS_P0"),
+ MT7988_PIN(80, "UART1_RXD"),
+ MT7988_PIN(81, "UART1_TXD"),
+ MT7988_PIN(82, "UART1_CTS"),
+ MT7988_PIN(83, "UART1_RTS"),
+};
+
+/* jtag */
+static const int mt7988_tops_jtag0_0_pins[] = { 0, 1, 2, 3, 4 };
+static int mt7988_tops_jtag0_0_funcs[] = { 2, 2, 2, 2, 2 };
+
+static const int mt7988_wo0_jtag_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_wo0_jtag_funcs[] = { 3, 3, 3, 3, 3 };
+
+static const int mt7988_wo1_jtag_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_wo1_jtag_funcs[] = { 4, 4, 4, 4, 4 };
+
+static const int mt7988_wo2_jtag_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_wo2_jtag_funcs[] = { 5, 5, 5, 5, 5 };
+
+static const int mt7988_jtag_pins[] = { 58, 59, 60, 61, 62 };
+static int mt7988_jtag_funcs[] = { 1, 1, 1, 1, 1 };
+
+static const int mt7988_tops_jtag0_1_pins[] = { 58, 59, 60, 61, 62 };
+static int mt7988_tops_jtag0_1_funcs[] = { 4, 4, 4, 4, 4 };
+
+/* int_usxgmii */
+static const int mt7988_int_usxgmii_pins[] = { 2, 3 };
+static int mt7988_int_usxgmii_funcs[] = { 3, 3 };
+
+/* pwm */
+static const int mt7988_pwm0_pins[] = { 57 };
+static int mt7988_pwm0_funcs[] = { 1 };
+
+static const int mt7988_pwm1_pins[] = { 21 };
+static int mt7988_pwm1_funcs[] = { 1 };
+
+static const int mt7988_pwm2_pins[] = { 80 };
+static int mt7988_pwm2_funcs[] = { 2 };
+
+static const int mt7988_pwm2_0_pins[] = { 58 };
+static int mt7988_pwm2_0_funcs[] = { 5 };
+
+static const int mt7988_pwm3_pins[] = { 81 };
+static int mt7988_pwm3_funcs[] = { 2 };
+
+static const int mt7988_pwm3_0_pins[] = { 59 };
+static int mt7988_pwm3_0_funcs[] = { 5 };
+
+static const int mt7988_pwm4_pins[] = { 82 };
+static int mt7988_pwm4_funcs[] = { 2 };
+
+static const int mt7988_pwm4_0_pins[] = { 60 };
+static int mt7988_pwm4_0_funcs[] = { 5 };
+
+static const int mt7988_pwm5_pins[] = { 83 };
+static int mt7988_pwm5_funcs[] = { 2 };
+
+static const int mt7988_pwm5_0_pins[] = { 61 };
+static int mt7988_pwm5_0_funcs[] = { 5 };
+
+static const int mt7988_pwm6_pins[] = { 69 };
+static int mt7988_pwm6_funcs[] = { 3 };
+
+static const int mt7988_pwm6_0_pins[] = { 62 };
+static int mt7988_pwm6_0_funcs[] = { 5 };
+
+static const int mt7988_pwm7_pins[] = { 70 };
+static int mt7988_pwm7_funcs[] = { 3 };
+
+static const int mt7988_pwm7_0_pins[] = { 4 };
+static int mt7988_pwm7_0_funcs[] = { 3 };
+
+/* dfd */
+static const int mt7988_dfd_pins[] = { 0, 1, 2, 3, 4 };
+static int mt7988_dfd_funcs[] = { 4, 4, 4, 4, 4 };
+
+/* i2c */
+static const int mt7988_xfi_phy0_i2c0_pins[] = { 0, 1 };
+static int mt7988_xfi_phy0_i2c0_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_phy1_i2c0_pins[] = { 0, 1 };
+static int mt7988_xfi_phy1_i2c0_funcs[] = { 6, 6 };
+
+static const int mt7988_xfi_phy_pll_i2c0_pins[] = { 3, 4 };
+static int mt7988_xfi_phy_pll_i2c0_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_phy_pll_i2c1_pins[] = { 3, 4 };
+static int mt7988_xfi_phy_pll_i2c1_funcs[] = { 6, 6 };
+
+static const int mt7988_i2c0_0_pins[] = { 5, 6 };
+static int mt7988_i2c0_0_funcs[] = { 2, 2 };
+
+static const int mt7988_i2c1_sfp_pins[] = { 5, 6 };
+static int mt7988_i2c1_sfp_funcs[] = { 4, 4 };
+
+static const int mt7988_xfi_pextp_phy0_i2c_pins[] = { 5, 6 };
+static int mt7988_xfi_pextp_phy0_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_pextp_phy1_i2c_pins[] = { 5, 6 };
+static int mt7988_xfi_pextp_phy1_i2c_funcs[] = { 6, 6 };
+
+static const int mt7988_i2c0_1_pins[] = { 15, 16 };
+static int mt7988_i2c0_1_funcs[] = { 1, 1 };
+
+static const int mt7988_u30_phy_i2c0_pins[] = { 15, 16 };
+static int mt7988_u30_phy_i2c0_funcs[] = { 2, 2 };
+
+static const int mt7988_u32_phy_i2c0_pins[] = { 15, 16 };
+static int mt7988_u32_phy_i2c0_funcs[] = { 3, 3 };
+
+static const int mt7988_xfi_phy0_i2c1_pins[] = { 15, 16 };
+static int mt7988_xfi_phy0_i2c1_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_phy1_i2c1_pins[] = { 15, 16 };
+static int mt7988_xfi_phy1_i2c1_funcs[] = { 6, 6 };
+
+static const int mt7988_xfi_phy_pll_i2c2_pins[] = { 15, 16 };
+static int mt7988_xfi_phy_pll_i2c2_funcs[] = { 7, 7 };
+
+static const int mt7988_i2c1_0_pins[] = { 17, 18 };
+static int mt7988_i2c1_0_funcs[] = { 1, 1 };
+
+static const int mt7988_u30_phy_i2c1_pins[] = { 17, 18 };
+static int mt7988_u30_phy_i2c1_funcs[] = { 2, 2 };
+
+static const int mt7988_u32_phy_i2c1_pins[] = { 17, 18 };
+static int mt7988_u32_phy_i2c1_funcs[] = { 3, 3 };
+
+static const int mt7988_xfi_phy_pll_i2c3_pins[] = { 17, 18 };
+static int mt7988_xfi_phy_pll_i2c3_funcs[] = { 4, 4 };
+
+static const int mt7988_sgmii0_i2c_pins[] = { 17, 18 };
+static int mt7988_sgmii0_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_sgmii1_i2c_pins[] = { 17, 18 };
+static int mt7988_sgmii1_i2c_funcs[] = { 6, 6 };
+
+static const int mt7988_i2c1_2_pins[] = { 69, 70 };
+static int mt7988_i2c1_2_funcs[] = { 2, 2 };
+
+static const int mt7988_i2c2_0_pins[] = { 69, 70 };
+static int mt7988_i2c2_0_funcs[] = { 4, 4 };
+
+static const int mt7988_i2c2_1_pins[] = { 71, 72 };
+static int mt7988_i2c2_1_funcs[] = { 1, 1 };
+
+/* eth */
+static const int mt7988_mdc_mdio0_pins[] = { 5, 6 };
+static int mt7988_mdc_mdio0_funcs[] = { 1, 1 };
+
+static const int mt7988_2p5g_ext_mdio_pins[] = { 28, 29 };
+static int mt7988_2p5g_ext_mdio_funcs[] = { 6, 6 };
+
+static const int mt7988_gbe_ext_mdio_pins[] = { 30, 31 };
+static int mt7988_gbe_ext_mdio_funcs[] = { 6, 6 };
+
+static const int mt7988_mdc_mdio1_pins[] = { 69, 70 };
+static int mt7988_mdc_mdio1_funcs[] = { 1, 1 };
+
+/* pcie */
+static const int mt7988_pcie_wake_n0_0_pins[] = { 7 };
+static int mt7988_pcie_wake_n0_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n0_0_pins[] = { 8 };
+static int mt7988_pcie_clk_req_n0_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n3_0_pins[] = { 9 };
+static int mt7988_pcie_wake_n3_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n3_pins[] = { 10 };
+static int mt7988_pcie_clk_req_n3_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n0_1_pins[] = { 10 };
+static int mt7988_pcie_clk_req_n0_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_p0_phy_i2c_pins[] = { 7, 8 };
+static int mt7988_pcie_p0_phy_i2c_funcs[] = { 3, 3 };
+
+static const int mt7988_pcie_p1_phy_i2c_pins[] = { 7, 8 };
+static int mt7988_pcie_p1_phy_i2c_funcs[] = { 4, 4 };
+
+static const int mt7988_pcie_p3_phy_i2c_pins[] = { 9, 10 };
+static int mt7988_pcie_p3_phy_i2c_funcs[] = { 4, 4 };
+
+static const int mt7988_pcie_p2_phy_i2c_pins[] = { 7, 8 };
+static int mt7988_pcie_p2_phy_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_ckm_phy_i2c_pins[] = { 9, 10 };
+static int mt7988_ckm_phy_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_pcie_wake_n0_1_pins[] = { 13 };
+static int mt7988_pcie_wake_n0_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_wake_n3_1_pins[] = { 14 };
+static int mt7988_pcie_wake_n3_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_2l_0_pereset_pins[] = { 19 };
+static int mt7988_pcie_2l_0_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_1l_1_pereset_pins[] = { 20 };
+static int mt7988_pcie_1l_1_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n2_1_pins[] = { 63 };
+static int mt7988_pcie_clk_req_n2_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_2l_1_pereset_pins[] = { 73 };
+static int mt7988_pcie_2l_1_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_1l_0_pereset_pins[] = { 74 };
+static int mt7988_pcie_1l_0_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n1_0_pins[] = { 75 };
+static int mt7988_pcie_wake_n1_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n1_pins[] = { 76 };
+static int mt7988_pcie_clk_req_n1_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n2_0_pins[] = { 77 };
+static int mt7988_pcie_wake_n2_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n2_0_pins[] = { 78 };
+static int mt7988_pcie_clk_req_n2_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n2_1_pins[] = { 79 };
+static int mt7988_pcie_wake_n2_1_funcs[] = { 2 };
+
+/* pmic */
+static const int mt7988_pmic_pins[] = { 11 };
+static int mt7988_pmic_funcs[] = { 1 };
+
+/* watchdog */
+static const int mt7988_watchdog_pins[] = { 12 };
+static int mt7988_watchdog_funcs[] = { 1 };
+
+/* spi */
+static const int mt7988_spi0_wp_hold_pins[] = { 22, 23 };
+static int mt7988_spi0_wp_hold_funcs[] = { 1, 1 };
+
+static const int mt7988_spi0_pins[] = { 24, 25, 26, 27 };
+static int mt7988_spi0_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_spi1_pins[] = { 28, 29, 30, 31 };
+static int mt7988_spi1_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_spi2_pins[] = { 32, 33, 34, 35 };
+static int mt7988_spi2_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_spi2_wp_hold_pins[] = { 36, 37 };
+static int mt7988_spi2_wp_hold_funcs[] = { 1, 1 };
+
+/* flash */
+static const int mt7988_snfi_pins[] = { 22, 23, 24, 25, 26, 27 };
+static int mt7988_snfi_funcs[] = { 2, 2, 2, 2, 2, 2 };
+
+static const int mt7988_emmc_45_pins[] = {
+ 21, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37
+};
+static int mt7988_emmc_45_funcs[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
+
+static const int mt7988_sdcard_pins[] = { 32, 33, 34, 35, 36, 37 };
+static int mt7988_sdcard_funcs[] = { 5, 5, 5, 5, 5, 5 };
+
+static const int mt7988_emmc_51_pins[] = { 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49 };
+static int mt7988_emmc_51_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+
+/* uart */
+static const int mt7988_uart2_pins[] = { 0, 1, 2, 3 };
+static int mt7988_uart2_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_tops_uart0_0_pins[] = { 22, 23 };
+static int mt7988_tops_uart0_0_funcs[] = { 3, 3 };
+
+static const int mt7988_uart2_0_pins[] = { 28, 29, 30, 31 };
+static int mt7988_uart2_0_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart1_0_pins[] = { 32, 33, 34, 35 };
+static int mt7988_uart1_0_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart2_1_pins[] = { 32, 33, 34, 35 };
+static int mt7988_uart2_1_funcs[] = { 3, 3, 3, 3 };
+
+static const int mt7988_net_wo0_uart_txd_0_pins[] = { 28 };
+static int mt7988_net_wo0_uart_txd_0_funcs[] = { 3 };
+
+static const int mt7988_net_wo1_uart_txd_0_pins[] = { 29 };
+static int mt7988_net_wo1_uart_txd_0_funcs[] = { 3 };
+
+static const int mt7988_net_wo2_uart_txd_0_pins[] = { 30 };
+static int mt7988_net_wo2_uart_txd_0_funcs[] = { 3 };
+
+static const int mt7988_tops_uart1_0_pins[] = { 28, 29 };
+static int mt7988_tops_uart1_0_funcs[] = { 4, 4 };
+
+static const int mt7988_tops_uart0_1_pins[] = { 30, 31 };
+static int mt7988_tops_uart0_1_funcs[] = { 4, 4 };
+
+static const int mt7988_tops_uart1_1_pins[] = { 36, 37 };
+static int mt7988_tops_uart1_1_funcs[] = { 3, 3 };
+
+static const int mt7988_uart0_pins[] = { 55, 56 };
+static int mt7988_uart0_funcs[] = { 1, 1 };
+
+static const int mt7988_tops_uart0_2_pins[] = { 55, 56 };
+static int mt7988_tops_uart0_2_funcs[] = { 2, 2 };
+
+static const int mt7988_uart2_2_pins[] = { 50, 51, 52, 53 };
+static int mt7988_uart2_2_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart1_1_pins[] = { 58, 59, 60, 61 };
+static int mt7988_uart1_1_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart2_3_pins[] = { 58, 59, 60, 61 };
+static int mt7988_uart2_3_funcs[] = { 3, 3, 3, 3 };
+
+static const int mt7988_uart1_2_pins[] = { 80, 81, 82, 83 };
+static int mt7988_uart1_2_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_uart1_2_lite_pins[] = { 80, 81 };
+static int mt7988_uart1_2_lite_funcs[] = { 1, 1 };
+
+static const int mt7988_tops_uart1_2_pins[] = { 80, 81 };
+static int mt7988_tops_uart1_2_funcs[] = { 4, 4, };
+
+static const int mt7988_net_wo0_uart_txd_1_pins[] = { 80 };
+static int mt7988_net_wo0_uart_txd_1_funcs[] = { 3 };
+
+static const int mt7988_net_wo1_uart_txd_1_pins[] = { 81 };
+static int mt7988_net_wo1_uart_txd_1_funcs[] = { 3 };
+
+static const int mt7988_net_wo2_uart_txd_1_pins[] = { 82 };
+static int mt7988_net_wo2_uart_txd_1_funcs[] = { 3 };
+
+/* udi */
+static const int mt7988_udi_pins[] = { 32, 33, 34, 35, 36 };
+static int mt7988_udi_funcs[] = { 4, 4, 4, 4, 4 };
+
+/* i2s */
+static const int mt7988_i2s_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_i2s_funcs[] = { 1, 1, 1, 1, 1 };
+
+/* pcm */
+static const int mt7988_pcm_pins[] = { 50, 51, 52, 53 };
+static int mt7988_pcm_funcs[] = { 1, 1, 1, 1 };
+
+/* led */
+static const int mt7988_gbe0_led1_pins[] = { 58 };
+static int mt7988_gbe0_led1_funcs[] = { 6 };
+static const int mt7988_gbe1_led1_pins[] = { 59 };
+static int mt7988_gbe1_led1_funcs[] = { 6 };
+static const int mt7988_gbe2_led1_pins[] = { 60 };
+static int mt7988_gbe2_led1_funcs[] = { 6 };
+static const int mt7988_gbe3_led1_pins[] = { 61 };
+static int mt7988_gbe3_led1_funcs[] = { 6 };
+
+static const int mt7988_2p5gbe_led1_pins[] = { 62 };
+static int mt7988_2p5gbe_led1_funcs[] = { 6 };
+
+static const int mt7988_gbe0_led0_pins[] = { 64 };
+static int mt7988_gbe0_led0_funcs[] = { 1 };
+static const int mt7988_gbe1_led0_pins[] = { 65 };
+static int mt7988_gbe1_led0_funcs[] = { 1 };
+static const int mt7988_gbe2_led0_pins[] = { 66 };
+static int mt7988_gbe2_led0_funcs[] = { 1 };
+static const int mt7988_gbe3_led0_pins[] = { 67 };
+static int mt7988_gbe3_led0_funcs[] = { 1 };
+
+static const int mt7988_2p5gbe_led0_pins[] = { 68 };
+static int mt7988_2p5gbe_led0_funcs[] = { 1 };
+
+/* usb */
+static const int mt7988_drv_vbus_p1_pins[] = { 63 };
+static int mt7988_drv_vbus_p1_funcs[] = { 1 };
+
+static const int mt7988_drv_vbus_pins[] = { 79 };
+static int mt7988_drv_vbus_funcs[] = { 1 };
+
+static const struct group_desc mt7988_groups[] = {
+ /* @GPIO(0,1,2,3): uart2 */
+ PINCTRL_PIN_GROUP("uart2", mt7988_uart2),
+ /* @GPIO(0,1,2,3,4): tops_jtag0_0 */
+ PINCTRL_PIN_GROUP("tops_jtag0_0", mt7988_tops_jtag0_0),
+ /* @GPIO(2,3): int_usxgmii */
+ PINCTRL_PIN_GROUP("int_usxgmii", mt7988_int_usxgmii),
+ /* @GPIO(0,1,2,3,4): dfd */
+ PINCTRL_PIN_GROUP("dfd", mt7988_dfd),
+ /* @GPIO(0,1): xfi_phy0_i2c0 */
+ PINCTRL_PIN_GROUP("xfi_phy0_i2c0", mt7988_xfi_phy0_i2c0),
+ /* @GPIO(0,1): xfi_phy1_i2c0 */
+ PINCTRL_PIN_GROUP("xfi_phy1_i2c0", mt7988_xfi_phy1_i2c0),
+ /* @GPIO(3,4): xfi_phy_pll_i2c0 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c0", mt7988_xfi_phy_pll_i2c0),
+ /* @GPIO(3,4): xfi_phy_pll_i2c1 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c1", mt7988_xfi_phy_pll_i2c1),
+ /* @GPIO(4): pwm7 */
+ PINCTRL_PIN_GROUP("pwm7_0", mt7988_pwm7_0),
+ /* @GPIO(5,6) i2c0_0 */
+ PINCTRL_PIN_GROUP("i2c0_0", mt7988_i2c0_0),
+ /* @GPIO(5,6) i2c1_sfp */
+ PINCTRL_PIN_GROUP("i2c1_sfp", mt7988_i2c1_sfp),
+ /* @GPIO(5,6) xfi_pextp_phy0_i2c */
+ PINCTRL_PIN_GROUP("xfi_pextp_phy0_i2c", mt7988_xfi_pextp_phy0_i2c),
+ /* @GPIO(5,6) xfi_pextp_phy1_i2c */
+ PINCTRL_PIN_GROUP("xfi_pextp_phy1_i2c", mt7988_xfi_pextp_phy1_i2c),
+ /* @GPIO(5,6) mdc_mdio0 */
+ PINCTRL_PIN_GROUP("mdc_mdio0", mt7988_mdc_mdio0),
+ /* @GPIO(7): pcie_wake_n0_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n0_0", mt7988_pcie_wake_n0_0),
+ /* @GPIO(8): pcie_clk_req_n0_0 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n0_0", mt7988_pcie_clk_req_n0_0),
+ /* @GPIO(9): pcie_wake_n3_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n3_0", mt7988_pcie_wake_n3_0),
+ /* @GPIO(10): pcie_clk_req_n3 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n3", mt7988_pcie_clk_req_n3),
+ /* @GPIO(10): pcie_clk_req_n0_1 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n0_1", mt7988_pcie_clk_req_n0_1),
+ /* @GPIO(7,8) pcie_p0_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p0_phy_i2c", mt7988_pcie_p0_phy_i2c),
+ /* @GPIO(7,8) pcie_p1_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p1_phy_i2c", mt7988_pcie_p1_phy_i2c),
+ /* @GPIO(7,8) pcie_p2_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p2_phy_i2c", mt7988_pcie_p2_phy_i2c),
+ /* @GPIO(9,10) pcie_p3_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p3_phy_i2c", mt7988_pcie_p3_phy_i2c),
+ /* @GPIO(9,10) ckm_phy_i2c */
+ PINCTRL_PIN_GROUP("ckm_phy_i2c", mt7988_ckm_phy_i2c),
+ /* @GPIO(11): pmic */
+ PINCTRL_PIN_GROUP("pcie_pmic", mt7988_pmic),
+ /* @GPIO(12): watchdog */
+ PINCTRL_PIN_GROUP("watchdog", mt7988_watchdog),
+ /* @GPIO(13): pcie_wake_n0_1 */
+ PINCTRL_PIN_GROUP("pcie_wake_n0_1", mt7988_pcie_wake_n0_1),
+ /* @GPIO(14): pcie_wake_n3_1 */
+ PINCTRL_PIN_GROUP("pcie_wake_n3_1", mt7988_pcie_wake_n3_1),
+ /* @GPIO(15,16) i2c0_1 */
+ PINCTRL_PIN_GROUP("i2c0_1", mt7988_i2c0_1),
+ /* @GPIO(15,16) u30_phy_i2c0 */
+ PINCTRL_PIN_GROUP("u30_phy_i2c0", mt7988_u30_phy_i2c0),
+ /* @GPIO(15,16) u32_phy_i2c0 */
+ PINCTRL_PIN_GROUP("u32_phy_i2c0", mt7988_u32_phy_i2c0),
+ /* @GPIO(15,16) xfi_phy0_i2c1 */
+ PINCTRL_PIN_GROUP("xfi_phy0_i2c1", mt7988_xfi_phy0_i2c1),
+ /* @GPIO(15,16) xfi_phy1_i2c1 */
+ PINCTRL_PIN_GROUP("xfi_phy1_i2c1", mt7988_xfi_phy1_i2c1),
+ /* @GPIO(15,16) xfi_phy_pll_i2c2 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c2", mt7988_xfi_phy_pll_i2c2),
+ /* @GPIO(17,18) i2c1_0 */
+ PINCTRL_PIN_GROUP("i2c1_0", mt7988_i2c1_0),
+ /* @GPIO(17,18) u30_phy_i2c1 */
+ PINCTRL_PIN_GROUP("u30_phy_i2c1", mt7988_u30_phy_i2c1),
+ /* @GPIO(17,18) u32_phy_i2c1 */
+ PINCTRL_PIN_GROUP("u32_phy_i2c1", mt7988_u32_phy_i2c1),
+ /* @GPIO(17,18) xfi_phy_pll_i2c3 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c3", mt7988_xfi_phy_pll_i2c3),
+ /* @GPIO(17,18) sgmii0_i2c */
+ PINCTRL_PIN_GROUP("sgmii0_i2c", mt7988_sgmii0_i2c),
+ /* @GPIO(17,18) sgmii1_i2c */
+ PINCTRL_PIN_GROUP("sgmii1_i2c", mt7988_sgmii1_i2c),
+ /* @GPIO(19): pcie_2l_0_pereset */
+ PINCTRL_PIN_GROUP("pcie_2l_0_pereset", mt7988_pcie_2l_0_pereset),
+ /* @GPIO(20): pcie_1l_1_pereset */
+ PINCTRL_PIN_GROUP("pcie_1l_1_pereset", mt7988_pcie_1l_1_pereset),
+ /* @GPIO(21): pwm1 */
+ PINCTRL_PIN_GROUP("pwm1", mt7988_pwm1),
+ /* @GPIO(22,23) spi0_wp_hold */
+ PINCTRL_PIN_GROUP("spi0_wp_hold", mt7988_spi0_wp_hold),
+ /* @GPIO(24,25,26,27) spi0 */
+ PINCTRL_PIN_GROUP("spi0", mt7988_spi0),
+ /* @GPIO(28,29,30,31) spi1 */
+ PINCTRL_PIN_GROUP("spi1", mt7988_spi1),
+ /* @GPIO(32,33,34,35) spi2 */
+ PINCTRL_PIN_GROUP("spi2", mt7988_spi2),
+ /* @GPIO(36,37) spi2_wp_hold */
+ PINCTRL_PIN_GROUP("spi2_wp_hold", mt7988_spi2_wp_hold),
+ /* @GPIO(22,23,24,25,26,27) snfi */
+ PINCTRL_PIN_GROUP("snfi", mt7988_snfi),
+ /* @GPIO(22,23) tops_uart0_0 */
+ PINCTRL_PIN_GROUP("tops_uart0_0", mt7988_tops_uart0_0),
+ /* @GPIO(28,29,30,31) uart2_0 */
+ PINCTRL_PIN_GROUP("uart2_0", mt7988_uart2_0),
+ /* @GPIO(32,33,34,35) uart1_0 */
+ PINCTRL_PIN_GROUP("uart1_0", mt7988_uart1_0),
+ /* @GPIO(32,33,34,35) uart2_1 */
+ PINCTRL_PIN_GROUP("uart2_1", mt7988_uart2_1),
+ /* @GPIO(28) net_wo0_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_0", mt7988_net_wo0_uart_txd_0),
+ /* @GPIO(29) net_wo1_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo1_uart_txd_0", mt7988_net_wo1_uart_txd_0),
+ /* @GPIO(30) net_wo2_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo2_uart_txd_0", mt7988_net_wo2_uart_txd_0),
+ /* @GPIO(28,29) tops_uart1_0 */
+ PINCTRL_PIN_GROUP("tops_uart0_0", mt7988_tops_uart1_0),
+ /* @GPIO(30,31) tops_uart0_1 */
+ PINCTRL_PIN_GROUP("tops_uart0_1", mt7988_tops_uart0_1),
+ /* @GPIO(36,37) tops_uart1_1 */
+ PINCTRL_PIN_GROUP("tops_uart1_1", mt7988_tops_uart1_1),
+ /* @GPIO(32,33,34,35,36) udi */
+ PINCTRL_PIN_GROUP("udi", mt7988_udi),
+ /* @GPIO(21,28,29,30,31,32,33,34,35,36,37) emmc_45 */
+ PINCTRL_PIN_GROUP("emmc_45", mt7988_emmc_45),
+ /* @GPIO(32,33,34,35,36,37) sdcard */
+ PINCTRL_PIN_GROUP("sdcard", mt7988_sdcard),
+ /* @GPIO(38,39,40,41,42,43,44,45,46,47,48,49) emmc_51 */
+ PINCTRL_PIN_GROUP("emmc_51", mt7988_emmc_51),
+ /* @GPIO(28,29) 2p5g_ext_mdio */
+ PINCTRL_PIN_GROUP("2p5g_ext_mdio", mt7988_2p5g_ext_mdio),
+ /* @GPIO(30,31) gbe_ext_mdio */
+ PINCTRL_PIN_GROUP("gbe_ext_mdio", mt7988_gbe_ext_mdio),
+ /* @GPIO(50,51,52,53,54) i2s */
+ PINCTRL_PIN_GROUP("i2s", mt7988_i2s),
+ /* @GPIO(50,51,52,53) pcm */
+ PINCTRL_PIN_GROUP("pcm", mt7988_pcm),
+ /* @GPIO(55,56) uart0 */
+ PINCTRL_PIN_GROUP("uart0", mt7988_uart0),
+ /* @GPIO(55,56) tops_uart0_2 */
+ PINCTRL_PIN_GROUP("tops_uart0_2", mt7988_tops_uart0_2),
+ /* @GPIO(50,51,52,53) uart2_2 */
+ PINCTRL_PIN_GROUP("uart2_2", mt7988_uart2_2),
+ /* @GPIO(50,51,52,53,54) wo0_jtag */
+ PINCTRL_PIN_GROUP("wo0_jtag", mt7988_wo0_jtag),
+ /* @GPIO(50,51,52,53,54) wo1-wo1_jtag */
+ PINCTRL_PIN_GROUP("wo1_jtag", mt7988_wo1_jtag),
+ /* @GPIO(50,51,52,53,54) wo2_jtag */
+ PINCTRL_PIN_GROUP("wo2_jtag", mt7988_wo2_jtag),
+ /* @GPIO(57) pwm0 */
+ PINCTRL_PIN_GROUP("pwm0", mt7988_pwm0),
+ /* @GPIO(58) pwm2_0 */
+ PINCTRL_PIN_GROUP("pwm2_0", mt7988_pwm2_0),
+ /* @GPIO(59) pwm3_0 */
+ PINCTRL_PIN_GROUP("pwm3_0", mt7988_pwm3_0),
+ /* @GPIO(60) pwm4_0 */
+ PINCTRL_PIN_GROUP("pwm4_0", mt7988_pwm4_0),
+ /* @GPIO(61) pwm5_0 */
+ PINCTRL_PIN_GROUP("pwm5_0", mt7988_pwm5_0),
+ /* @GPIO(58,59,60,61,62) jtag */
+ PINCTRL_PIN_GROUP("jtag", mt7988_jtag),
+ /* @GPIO(58,59,60,61,62) tops_jtag0_1 */
+ PINCTRL_PIN_GROUP("tops_jtag0_1", mt7988_tops_jtag0_1),
+ /* @GPIO(58,59,60,61) uart2_3 */
+ PINCTRL_PIN_GROUP("uart2_3", mt7988_uart2_3),
+ /* @GPIO(58,59,60,61) uart1_1 */
+ PINCTRL_PIN_GROUP("uart1_1", mt7988_uart1_1),
+ /* @GPIO(58,59,60,61) gbe_led1 */
+ PINCTRL_PIN_GROUP("gbe0_led1", mt7988_gbe0_led1),
+ PINCTRL_PIN_GROUP("gbe1_led1", mt7988_gbe1_led1),
+ PINCTRL_PIN_GROUP("gbe2_led1", mt7988_gbe2_led1),
+ PINCTRL_PIN_GROUP("gbe3_led1", mt7988_gbe3_led1),
+ /* @GPIO(62) pwm6_0 */
+ PINCTRL_PIN_GROUP("pwm6_0", mt7988_pwm6_0),
+ /* @GPIO(62) 2p5gbe_led1 */
+ PINCTRL_PIN_GROUP("2p5gbe_led1", mt7988_2p5gbe_led1),
+ /* @GPIO(64,65,66,67) gbe_led0 */
+ PINCTRL_PIN_GROUP("gbe0_led0", mt7988_gbe0_led0),
+ PINCTRL_PIN_GROUP("gbe1_led0", mt7988_gbe1_led0),
+ PINCTRL_PIN_GROUP("gbe2_led0", mt7988_gbe2_led0),
+ PINCTRL_PIN_GROUP("gbe3_led0", mt7988_gbe3_led0),
+ /* @GPIO(68) 2p5gbe_led0 */
+ PINCTRL_PIN_GROUP("2p5gbe_led0", mt7988_2p5gbe_led0),
+ /* @GPIO(63) drv_vbus_p1 */
+ PINCTRL_PIN_GROUP("drv_vbus_p1", mt7988_drv_vbus_p1),
+ /* @GPIO(63) pcie_clk_req_n2_1 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n2_1", mt7988_pcie_clk_req_n2_1),
+ /* @GPIO(69, 70) mdc_mdio1 */
+ PINCTRL_PIN_GROUP("mdc_mdio1", mt7988_mdc_mdio1),
+ /* @GPIO(69, 70) i2c1_2 */
+ PINCTRL_PIN_GROUP("i2c1_2", mt7988_i2c1_2),
+ /* @GPIO(69) pwm6 */
+ PINCTRL_PIN_GROUP("pwm6", mt7988_pwm6),
+ /* @GPIO(70) pwm7 */
+ PINCTRL_PIN_GROUP("pwm7", mt7988_pwm7),
+ /* @GPIO(69,70) i2c2_0 */
+ PINCTRL_PIN_GROUP("i2c2_0", mt7988_i2c2_0),
+ /* @GPIO(71,72) i2c2_1 */
+ PINCTRL_PIN_GROUP("i2c2_1", mt7988_i2c2_1),
+ /* @GPIO(73) pcie_2l_1_pereset */
+ PINCTRL_PIN_GROUP("pcie_2l_1_pereset", mt7988_pcie_2l_1_pereset),
+ /* @GPIO(74) pcie_1l_0_pereset */
+ PINCTRL_PIN_GROUP("pcie_1l_0_pereset", mt7988_pcie_1l_0_pereset),
+ /* @GPIO(75) pcie_wake_n1_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n1_0", mt7988_pcie_wake_n1_0),
+ /* @GPIO(76) pcie_clk_req_n1 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n1", mt7988_pcie_clk_req_n1),
+ /* @GPIO(77) pcie_wake_n2_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n2_0", mt7988_pcie_wake_n2_0),
+ /* @GPIO(78) pcie_clk_req_n2_0 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n2_0", mt7988_pcie_clk_req_n2_0),
+ /* @GPIO(79) drv_vbus */
+ PINCTRL_PIN_GROUP("drv_vbus", mt7988_drv_vbus),
+ /* @GPIO(79) pcie_wake_n2_1 */
+ PINCTRL_PIN_GROUP("pcie_wake_n2_1", mt7988_pcie_wake_n2_1),
+ /* @GPIO(80,81,82,83) uart1_2 */
+ PINCTRL_PIN_GROUP("uart1_2", mt7988_uart1_2),
+ /* @GPIO(80,81) uart1_2_lite */
+ PINCTRL_PIN_GROUP("uart1_2_lite", mt7988_uart1_2_lite),
+ /* @GPIO(80) pwm2 */
+ PINCTRL_PIN_GROUP("pwm2", mt7988_pwm2),
+ /* @GPIO(81) pwm3 */
+ PINCTRL_PIN_GROUP("pwm3", mt7988_pwm3),
+ /* @GPIO(82) pwm4 */
+ PINCTRL_PIN_GROUP("pwm4", mt7988_pwm4),
+ /* @GPIO(83) pwm5 */
+ PINCTRL_PIN_GROUP("pwm5", mt7988_pwm5),
+ /* @GPIO(80) net_wo0_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_0", mt7988_net_wo0_uart_txd_0),
+ /* @GPIO(81) net_wo1_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo1_uart_txd_0", mt7988_net_wo1_uart_txd_0),
+ /* @GPIO(82) net_wo2_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo2_uart_txd_0", mt7988_net_wo2_uart_txd_0),
+ /* @GPIO(80,81) tops_uart1_2 */
+ PINCTRL_PIN_GROUP("tops_uart1_2", mt7988_tops_uart1_2),
+ /* @GPIO(80) net_wo0_uart_txd_1 */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_1", mt7988_net_wo0_uart_txd_1),
+ /* @GPIO(81) net_wo1_uart_txd_1 */
+ PINCTRL_PIN_GROUP("net_wo1_uart_txd_1", mt7988_net_wo1_uart_txd_1),
+ /* @GPIO(82) net_wo2_uart_txd_1 */
+ PINCTRL_PIN_GROUP("net_wo2_uart_txd_1", mt7988_net_wo2_uart_txd_1),
+};
+
+/* Joint those groups owning the same capability in user point of view which
+ * allows that people tend to use through the device tree.
+ */
+static const char * const mt7988_jtag_groups[] = {
+ "tops_jtag0_0", "wo0_jtag", "wo1_jtag",
+ "wo2_jtag", "jtag", "tops_jtag0_1",
+};
+static const char * const mt7988_int_usxgmii_groups[] = {
+ "int_usxgmii",
+};
+static const char * const mt7988_pwm_groups[] = {
+ "pwm0", "pwm1", "pwm2", "pwm2_0", "pwm3", "pwm3_0", "pwm4", "pwm4_0",
+ "pwm5", "pwm5_0", "pwm6", "pwm6_0", "pwm7", "pwm7_0",
+
+};
+static const char * const mt7988_dfd_groups[] = {
+ "dfd",
+};
+static const char * const mt7988_i2c_groups[] = {
+ "xfi_phy0_i2c0",
+ "xfi_phy1_i2c0",
+ "xfi_phy_pll_i2c0",
+ "xfi_phy_pll_i2c1",
+ "i2c0_0",
+ "i2c1_sfp",
+ "xfi_pextp_phy0_i2c",
+ "xfi_pextp_phy1_i2c",
+ "i2c0_1",
+ "u30_phy_i2c0",
+ "u32_phy_i2c0",
+ "xfi_phy0_i2c1",
+ "xfi_phy1_i2c1",
+ "xfi_phy_pll_i2c2",
+ "i2c1_0",
+ "u30_phy_i2c1",
+ "u32_phy_i2c1",
+ "xfi_phy_pll_i2c3",
+ "sgmii0_i2c",
+ "sgmii1_i2c",
+ "i2c1_2",
+ "i2c2_0",
+ "i2c2_1",
+};
+static const char * const mt7988_ethernet_groups[] = {
+ "mdc_mdio0",
+ "2p5g_ext_mdio",
+ "gbe_ext_mdio",
+ "mdc_mdio1",
+};
+static const char * const mt7988_pcie_groups[] = {
+ "pcie_wake_n0_0", "pcie_clk_req_n0_0", "pcie_wake_n3_0",
+ "pcie_clk_req_n3", "pcie_p0_phy_i2c", "pcie_p1_phy_i2c",
+ "pcie_p3_phy_i2c", "pcie_p2_phy_i2c", "ckm_phy_i2c",
+ "pcie_wake_n0_1", "pcie_wake_n3_1", "pcie_2l_0_pereset",
+ "pcie_1l_1_pereset", "pcie_clk_req_n2_1", "pcie_2l_1_pereset",
+ "pcie_1l_0_pereset", "pcie_wake_n1_0", "pcie_clk_req_n1",
+ "pcie_wake_n2_0", "pcie_clk_req_n2_0", "pcie_wake_n2_1",
+ "pcie_clk_req_n0_1"
+};
+static const char * const mt7988_pmic_groups[] = {
+ "pmic",
+};
+static const char * const mt7988_wdt_groups[] = {
+ "watchdog",
+};
+static const char * const mt7988_spi_groups[] = {
+ "spi0", "spi0_wp_hold", "spi1", "spi2", "spi2_wp_hold",
+};
+static const char * const mt7988_flash_groups[] = { "emmc_45", "sdcard", "snfi",
+ "emmc_51" };
+static const char * const mt7988_uart_groups[] = {
+ "uart2",
+ "tops_uart0_0",
+ "uart2_0",
+ "uart1_0",
+ "uart2_1",
+ "net_wo0_uart_txd_0",
+ "net_wo1_uart_txd_0",
+ "net_wo2_uart_txd_0",
+ "tops_uart1_0",
+ "ops_uart0_1",
+ "ops_uart1_1",
+ "uart0",
+ "tops_uart0_2",
+ "uart1_1",
+ "uart2_3",
+ "uart1_2",
+ "uart1_2_lite",
+ "tops_uart1_2",
+ "net_wo0_uart_txd_1",
+ "net_wo1_uart_txd_1",
+ "net_wo2_uart_txd_1",
+};
+static const char * const mt7988_udi_groups[] = {
+ "udi",
+};
+static const char * const mt7988_audio_groups[] = {
+ "i2s", "pcm",
+};
+static const char * const mt7988_led_groups[] = {
+ "gbe0_led1", "gbe1_led1", "gbe2_led1", "gbe3_led1", "2p5gbe_led1",
+ "gbe0_led0", "gbe1_led0", "gbe2_led0", "gbe3_led0", "2p5gbe_led0",
+ "wf5g_led0", "wf5g_led1",
+};
+static const char * const mt7988_usb_groups[] = {
+ "drv_vbus",
+ "drv_vbus_p1",
+};
+
+static const struct function_desc mt7988_functions[] = {
+ { { "audio", mt7988_audio_groups, ARRAY_SIZE(mt7988_audio_groups) },
+ NULL },
+ { { "jtag", mt7988_jtag_groups, ARRAY_SIZE(mt7988_jtag_groups) },
+ NULL },
+ { { "int_usxgmii", mt7988_int_usxgmii_groups,
+ ARRAY_SIZE(mt7988_int_usxgmii_groups) },
+ NULL },
+ { { "pwm", mt7988_pwm_groups, ARRAY_SIZE(mt7988_pwm_groups) }, NULL },
+ { { "dfd", mt7988_dfd_groups, ARRAY_SIZE(mt7988_dfd_groups) }, NULL },
+ { { "i2c", mt7988_i2c_groups, ARRAY_SIZE(mt7988_i2c_groups) }, NULL },
+ { { "eth", mt7988_ethernet_groups, ARRAY_SIZE(mt7988_ethernet_groups) },
+ NULL },
+ { { "pcie", mt7988_pcie_groups, ARRAY_SIZE(mt7988_pcie_groups) },
+ NULL },
+ { { "pmic", mt7988_pmic_groups, ARRAY_SIZE(mt7988_pmic_groups) },
+ NULL },
+ { { "watchdog", mt7988_wdt_groups, ARRAY_SIZE(mt7988_wdt_groups) },
+ NULL },
+ { { "spi", mt7988_spi_groups, ARRAY_SIZE(mt7988_spi_groups) }, NULL },
+ { { "flash", mt7988_flash_groups, ARRAY_SIZE(mt7988_flash_groups) },
+ NULL },
+ { { "uart", mt7988_uart_groups, ARRAY_SIZE(mt7988_uart_groups) },
+ NULL },
+ { { "udi", mt7988_udi_groups, ARRAY_SIZE(mt7988_udi_groups) }, NULL },
+ { { "usb", mt7988_usb_groups, ARRAY_SIZE(mt7988_usb_groups) }, NULL },
+ { { "led", mt7988_led_groups, ARRAY_SIZE(mt7988_led_groups) }, NULL },
+};
+
+static const struct mtk_eint_hw mt7988_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = ARRAY_SIZE(mt7988_pins),
+ .db_cnt = 16,
+};
+
+static const char * const mt7988_pinctrl_register_base_names[] = {
+ "gpio", "iocfg_tr", "iocfg_br",
+ "iocfg_rb", "iocfg_lb", "iocfg_tl",
+};
+
+static const struct mtk_pin_soc mt7988_data = {
+ .reg_cal = mt7988_reg_cals,
+ .pins = mt7988_pins,
+ .npins = ARRAY_SIZE(mt7988_pins),
+ .grps = mt7988_groups,
+ .ngrps = ARRAY_SIZE(mt7988_groups),
+ .funcs = mt7988_functions,
+ .nfuncs = ARRAY_SIZE(mt7988_functions),
+ .eint_hw = &mt7988_eint_hw,
+ .gpio_m = 0,
+ .ies_present = false,
+ .base_names = mt7988_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt7988_pinctrl_register_base_names),
+ .bias_disable_set = mtk_pinconf_bias_disable_set,
+ .bias_disable_get = mtk_pinconf_bias_disable_get,
+ .bias_set = mtk_pinconf_bias_set,
+ .bias_get = mtk_pinconf_bias_get,
+ .pull_type = mt7988_pull_type,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_pull_get = mtk_pinconf_adv_pull_get,
+ .adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+static const struct of_device_id mt7988_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt7988-pinctrl" },
+ {}
+};
+
+static int mt7988_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_moore_pinctrl_probe(pdev, &mt7988_data);
+}
+
+static struct platform_driver mt7988_pinctrl_driver = {
+ .driver = {
+ .name = "mt7988-pinctrl",
+ .of_match_table = mt7988_pinctrl_of_match,
+ },
+ .probe = mt7988_pinctrl_probe,
+};
+
+static int __init mt7988_pinctrl_init(void)
+{
+ return platform_driver_register(&mt7988_pinctrl_driver);
+}
+arch_initcall(mt7988_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 54301fbba524..00e95682b9f8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get_rev1);
*/
static int mtk_pinconf_bias_set_pu_pd(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
- u32 pullup, u32 arg)
+ u32 pullup, u32 arg, bool pd_only)
{
int err, pu, pd;
@@ -587,18 +587,16 @@ static int mtk_pinconf_bias_set_pu_pd(struct mtk_pinctrl *hw,
pu = 0;
pd = 1;
} else {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, pu);
- if (err)
- goto out;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, pd);
+ if (!pd_only) {
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, pu);
+ if (err)
+ return err;
+ }
-out:
- return err;
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, pd);
}
static int mtk_pinconf_bias_set_pullsel_pullen(struct mtk_pinctrl *hw,
@@ -737,7 +735,7 @@ static int mtk_pinconf_bias_set_pu_pd_rsel(struct mtk_pinctrl *hw,
return err;
}
- return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable);
+ return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable, false);
}
int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
@@ -758,8 +756,14 @@ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
return 0;
}
+ if (try_all_type & MTK_PULL_PD_TYPE) {
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg, true);
+ if (!err)
+ return err;
+ }
+
if (try_all_type & MTK_PULL_PU_PD_TYPE) {
- err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg);
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg, false);
if (!err)
return 0;
}
@@ -878,6 +882,29 @@ out:
return err;
}
+static int mtk_pinconf_bias_get_pd(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err, pd;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &pd);
+ if (err)
+ goto out;
+
+ if (pd == 0) {
+ *pullup = 0;
+ *enable = MTK_DISABLE;
+ } else if (pd == 1) {
+ *pullup = 0;
+ *enable = MTK_ENABLE;
+ } else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
static int mtk_pinconf_bias_get_pullsel_pullen(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
u32 *pullup, u32 *enable)
@@ -947,6 +974,12 @@ int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
return 0;
}
+ if (try_all_type & MTK_PULL_PD_TYPE) {
+ err = mtk_pinconf_bias_get_pd(hw, desc, pullup, enable);
+ if (!err)
+ return err;
+ }
+
if (try_all_type & MTK_PULL_PU_PD_TYPE) {
err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable);
if (!err)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index 23688ca6d04e..9c271dc2b521 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -24,6 +24,7 @@
* turned on/off itself. But it can't be selected pull up/down
*/
#define MTK_PULL_RSEL_TYPE BIT(3)
+#define MTK_PULL_PD_TYPE BIT(4)
/* MTK_PULL_PU_PD_RSEL_TYPE is a type which is controlled by
* MTK_PULL_PU_PD_TYPE and MTK_PULL_RSEL_TYPE.
*/
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 4ce2e35a6373..8cd4ba5cf0bd 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -22,6 +22,7 @@
#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/mfd/abx500.h>
@@ -496,7 +497,7 @@ static void abx500_gpio_dbg_show_one(struct seq_file *s,
seq_printf(s, " %-9s", pull_up_down[pd]);
} else
- seq_printf(s, " %-9s", chip->get(chip, offset) ? "hi" : "lo");
+ seq_printf(s, " %-9s", str_hi_lo(chip->get(chip, offset)));
mode = abx500_get_mode(pctldev, chip, offset);
@@ -865,7 +866,7 @@ static int abx500_pin_config_set(struct pinctrl_dev *pctldev,
pin, configs[i],
(param == PIN_CONFIG_OUTPUT) ? "output " : "input",
(param == PIN_CONFIG_OUTPUT) ?
- (argument ? "high" : "low") :
+ str_high_low(argument) :
(argument ? "pull up" : "pull down"));
/* on ABx500, there is no GPIO0, so adjust the offset */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index f4f10c60c1d2..8940e04fcf4c 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -28,6 +28,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
/* Since we request GPIOs from ourself */
@@ -438,9 +439,9 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
* - Any spurious wake up event during switch sequence to be ignored and
* cleared
*/
-static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+static int nmk_gpio_glitch_slpm_init(unsigned int *slpm)
{
- int i;
+ int i, j, ret;
for (i = 0; i < NMK_MAX_BANKS; i++) {
struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
@@ -449,11 +450,21 @@ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
if (!chip)
break;
- clk_enable(chip->clk);
+ ret = clk_enable(chip->clk);
+ if (ret) {
+ for (j = 0; j < i; j++) {
+ chip = nmk_gpio_chips[j];
+ clk_disable(chip->clk);
+ }
+
+ return ret;
+ }
slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
writel(temp, chip->addr + NMK_GPIO_SLPC);
}
+
+ return 0;
}
static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
@@ -923,7 +934,9 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
slpm[nmk_chip->bank] &= ~BIT(bit);
}
- nmk_gpio_glitch_slpm_init(slpm);
+ ret = nmk_gpio_glitch_slpm_init(slpm);
+ if (ret)
+ goto out_pre_slpm_init;
}
for (i = 0; i < g->grp.npins; i++) {
@@ -940,7 +953,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
dev_dbg(npct->dev, "setting pin %d to altsetting %d\n",
g->grp.pins[i], g->altsetting);
- clk_enable(nmk_chip->clk);
+ ret = clk_enable(nmk_chip->clk);
+ if (ret)
+ goto out_glitch;
+
/*
* If the pin is switching to altfunc, and there was an
* interrupt installed on it which has been lazy disabled,
@@ -988,6 +1004,7 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
unsigned int bit;
+ int ret;
if (!range) {
dev_err(npct->dev, "invalid range\n");
@@ -1004,7 +1021,9 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
find_nmk_gpio_from_pin(pin, &bit);
- clk_enable(nmk_chip->clk);
+ ret = clk_enable(nmk_chip->clk);
+ if (ret)
+ return ret;
/* There is no glitch when converting any pin to GPIO */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
clk_disable(nmk_chip->clk);
@@ -1058,6 +1077,7 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long cfg;
int pull, slpm, output, val, i;
bool lowemi, gpiomode, sleep;
+ int ret;
nmk_chip = find_nmk_gpio_from_pin(pin, &bit);
if (!nmk_chip) {
@@ -1106,17 +1126,19 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
slpm_pull ? pullnames[pull] : "same",
slpm_output ? (output ? "output" : "input")
: "same",
- slpm_val ? (val ? "high" : "low") : "same");
+ slpm_val ? str_high_low(val) : "same");
}
dev_dbg(nmk_chip->chip.parent,
"pin %d [%#lx]: pull %s, slpm %s (%s%s), lowemi %s\n",
pin, cfg, pullnames[pull], slpmnames[slpm],
output ? "output " : "input",
- output ? (val ? "high" : "low") : "",
- lowemi ? "on" : "off");
+ output ? str_high_low(val) : "",
+ str_on_off(lowemi));
- clk_enable(nmk_chip->clk);
+ ret = clk_enable(nmk_chip->clk);
+ if (ret)
+ return ret;
if (gpiomode)
/* No glitch when going to GPIO mode */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
index 471f644c5eef..d09a5e9b2eca 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -2374,6 +2374,9 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
pctrl->gpio_bank[id].gc.parent = dev;
pctrl->gpio_bank[id].gc.fwnode = child;
pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
+ if (pctrl->gpio_bank[id].gc.label == NULL)
+ return -ENOMEM;
+
pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input;
pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input;
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 0b13d7f17b32..42547f64453e 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -89,12 +89,12 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
seq_puts(s, items[i].display);
/* Print unit if available */
if (items[i].has_arg) {
- seq_printf(s, " (0x%x",
- pinconf_to_config_argument(config));
+ u32 val = pinconf_to_config_argument(config);
+
if (items[i].format)
- seq_printf(s, " %s)", items[i].format);
+ seq_printf(s, " (%u %s)", val, items[i].format);
else
- seq_puts(s, ")");
+ seq_printf(s, " (0x%x)", val);
}
}
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index fff6d4209ad5..1d7fdcdec4c8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -30,6 +30,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include "core.h"
@@ -458,7 +459,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
if (err)
dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n",
- on ? "enable" : "disable");
+ str_enable_disable(on));
return 0;
}
@@ -908,12 +909,13 @@ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
return false;
}
-static int amd_gpio_suspend(struct device *dev)
+static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
int i;
+ u32 wake_mask = is_suspend ? WAKE_SOURCE_SUSPEND : WAKE_SOURCE_HIBERNATE;
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
@@ -925,11 +927,11 @@ static int amd_gpio_suspend(struct device *dev)
gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
/* mask any interrupts not intended to be a wake source */
- if (!(gpio_dev->saved_regs[i] & WAKE_SOURCE)) {
+ if (!(gpio_dev->saved_regs[i] & wake_mask)) {
writel(gpio_dev->saved_regs[i] & ~BIT(INTERRUPT_MASK_OFF),
gpio_dev->base + pin * 4);
- pm_pr_dbg("Disabling GPIO #%d interrupt for suspend.\n",
- pin);
+ pm_pr_dbg("Disabling GPIO #%d interrupt for %s.\n",
+ pin, is_suspend ? "suspend" : "hibernate");
}
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -938,6 +940,16 @@ static int amd_gpio_suspend(struct device *dev)
return 0;
}
+static int amd_gpio_suspend(struct device *dev)
+{
+ return amd_gpio_suspend_hibernate_common(dev, true);
+}
+
+static int amd_gpio_hibernate(struct device *dev)
+{
+ return amd_gpio_suspend_hibernate_common(dev, false);
+}
+
static int amd_gpio_resume(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
@@ -961,8 +973,12 @@ static int amd_gpio_resume(struct device *dev)
}
static const struct dev_pm_ops amd_gpio_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend,
- amd_gpio_resume)
+ .suspend_late = amd_gpio_suspend,
+ .resume_early = amd_gpio_resume,
+ .freeze_late = amd_gpio_hibernate,
+ .thaw_early = amd_gpio_resume,
+ .poweroff_late = amd_gpio_hibernate,
+ .restore_early = amd_gpio_resume,
};
#endif
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 667be49c3f48..3a1e5bffaf6e 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -80,10 +80,9 @@
#define FUNCTION_MASK GENMASK(1, 0)
#define FUNCTION_INVALID GENMASK(7, 0)
-#define WAKE_SOURCE (BIT(WAKE_CNTRL_OFF_S0I3) | \
- BIT(WAKE_CNTRL_OFF_S3) | \
- BIT(WAKE_CNTRL_OFF_S4) | \
- BIT(WAKECNTRL_Z_OFF))
+#define WAKE_SOURCE_SUSPEND (BIT(WAKE_CNTRL_OFF_S0I3) | \
+ BIT(WAKE_CNTRL_OFF_S3))
+#define WAKE_SOURCE_HIBERNATE BIT(WAKE_CNTRL_OFF_S4)
struct amd_function {
const char *name;
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 0d6c2027d4c1..d73004b4a45e 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -42,7 +42,7 @@
#define CY8C95X0_PORTSEL 0x18
/* Port settings, write PORTSEL first */
#define CY8C95X0_INTMASK 0x19
-#define CY8C95X0_PWMSEL 0x1A
+#define CY8C95X0_SELPWM 0x1A
#define CY8C95X0_INVERT 0x1B
#define CY8C95X0_DIRECTION 0x1C
/* Drive mode register change state on writing '1' */
@@ -328,14 +328,14 @@ static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
{
/*
- * Only 12 registers are present per port (see Table 6 in the
- * datasheet).
+ * Only 12 registers are present per port (see Table 6 in the datasheet).
*/
- if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) < 12)
- return true;
+ if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
+ return false;
switch (reg) {
case 0x24 ... 0x27:
+ case 0x31 ... 0x3f:
return false;
default:
return true;
@@ -344,8 +344,11 @@ static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
{
- if (reg >= CY8C95X0_VIRTUAL)
- return true;
+ /*
+ * Only 12 registers are present per port (see Table 6 in the datasheet).
+ */
+ if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
+ return false;
switch (reg) {
case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
@@ -353,6 +356,7 @@ static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
case CY8C95X0_DEVID:
return false;
case 0x24 ... 0x27:
+ case 0x31 ... 0x3f:
return false;
default:
return true;
@@ -365,8 +369,8 @@ static bool cy8c95x0_volatile_register(struct device *dev, unsigned int reg)
case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
case CY8C95X0_INTMASK:
+ case CY8C95X0_SELPWM:
case CY8C95X0_INVERT:
- case CY8C95X0_PWMSEL:
case CY8C95X0_DIRECTION:
case CY8C95X0_DRV_PU:
case CY8C95X0_DRV_PD:
@@ -395,7 +399,7 @@ static bool cy8c95x0_muxed_register(unsigned int reg)
{
switch (reg) {
case CY8C95X0_INTMASK:
- case CY8C95X0_PWMSEL:
+ case CY8C95X0_SELPWM:
case CY8C95X0_INVERT:
case CY8C95X0_DIRECTION:
case CY8C95X0_DRV_PU:
@@ -466,7 +470,11 @@ static const struct regmap_config cy8c9520_i2c_regmap = {
.max_register = 0, /* Updated at runtime */
.num_reg_defaults_raw = 0, /* Updated at runtime */
.use_single_read = true, /* Workaround for regcache bug */
+#if IS_ENABLED(CONFIG_DEBUG_PINCTRL)
+ .disable_locking = false,
+#else
.disable_locking = true,
+#endif
};
static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
@@ -789,7 +797,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
reg = CY8C95X0_DIRECTION;
break;
case PIN_CONFIG_MODE_PWM:
- reg = CY8C95X0_PWMSEL;
+ reg = CY8C95X0_SELPWM;
break;
case PIN_CONFIG_OUTPUT:
reg = CY8C95X0_OUTPUT;
@@ -868,7 +876,7 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
reg = CY8C95X0_DRV_PP_FAST;
break;
case PIN_CONFIG_MODE_PWM:
- reg = CY8C95X0_PWMSEL;
+ reg = CY8C95X0_SELPWM;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
return cy8c95x0_pinmux_direction(chip, off, !arg);
@@ -1153,7 +1161,7 @@ static void cy8c95x0_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *
bitmap_zero(mask, MAX_LINE);
__set_bit(pin, mask);
- if (cy8c95x0_read_regs_mask(chip, CY8C95X0_PWMSEL, pwm, mask)) {
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_SELPWM, pwm, mask)) {
seq_puts(s, "not available");
return;
}
@@ -1198,7 +1206,7 @@ static int cy8c95x0_set_mode(struct cy8c95x0_pinctrl *chip, unsigned int off, bo
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- return cy8c95x0_regmap_write_bits(chip, CY8C95X0_PWMSEL, port, bit, mode ? bit : 0);
+ return cy8c95x0_regmap_write_bits(chip, CY8C95X0_SELPWM, port, bit, mode ? bit : 0);
}
static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
@@ -1347,7 +1355,7 @@ static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
ret = devm_request_threaded_irq(chip->dev, irq,
NULL, cy8c95x0_irq_handler,
- IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
+ IRQF_ONESHOT | IRQF_SHARED,
dev_name(chip->dev), chip);
if (ret) {
dev_err(chip->dev, "failed to request irq %d\n", irq);
@@ -1438,15 +1446,15 @@ static int cy8c95x0_probe(struct i2c_client *client)
switch (chip->tpin) {
case 20:
strscpy(chip->name, cy8c95x0_id[0].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE - 1;
break;
case 40:
strscpy(chip->name, cy8c95x0_id[1].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE - 1;
break;
case 60:
strscpy(chip->name, cy8c95x0_id[2].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE - 1;
break;
default:
return -ENODEV;
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index 631612539af7..e9f61927858d 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -2237,7 +2238,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
"pin group %s could not be %s: "
"probably a hardware limitation\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
dev_err(pmx->dev,
"GLOBAL MISC CTRL before: %08x, after %08x, expected %08x\n",
before, after, expected);
@@ -2245,7 +2246,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
dev_dbg(pmx->dev,
"padgroup %s %s\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
}
}
@@ -2259,7 +2260,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
"pin group %s could not be %s: "
"probably a hardware limitation\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
dev_err(pmx->dev,
"GLOBAL MISC CTRL before: %08x, after %08x, expected %08x\n",
before, after, expected);
@@ -2267,7 +2268,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
dev_dbg(pmx->dev,
"padgroup %s %s\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
}
}
@@ -2588,7 +2589,7 @@ static int gemini_pmx_probe(struct platform_device *pdev)
tmp = val;
for_each_set_bit(i, &tmp, PADS_MAXBIT) {
dev_dbg(dev, "pad group %s %s\n", gemini_padgroups[i],
- (val & BIT(i)) ? "enabled" : "disabled");
+ str_enabled_disabled(val & BIT(i)));
}
/* Check if flash pin is set */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 31703737731b..bc7ee54e062b 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3699,7 +3699,7 @@ static void ingenic_gpio_irq_print_chip(struct irq_data *data, struct seq_file *
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
- seq_printf(p, "%s", gpio_chip->label);
+ seq_puts(p, gpio_chip->label);
}
static const struct irq_chip ingenic_gpio_irqchip = {
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 61532a7a612a..329d54b11529 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1777,7 +1777,7 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-static struct ocelot_match_data luton_desc = {
+static const struct ocelot_match_data luton_desc = {
.desc = {
.name = "luton-pinctrl",
.pins = luton_pins,
@@ -1788,7 +1788,7 @@ static struct ocelot_match_data luton_desc = {
},
};
-static struct ocelot_match_data serval_desc = {
+static const struct ocelot_match_data serval_desc = {
.desc = {
.name = "serval-pinctrl",
.pins = serval_pins,
@@ -1799,7 +1799,7 @@ static struct ocelot_match_data serval_desc = {
},
};
-static struct ocelot_match_data ocelot_desc = {
+static const struct ocelot_match_data ocelot_desc = {
.desc = {
.name = "ocelot-pinctrl",
.pins = ocelot_pins,
@@ -1810,7 +1810,7 @@ static struct ocelot_match_data ocelot_desc = {
},
};
-static struct ocelot_match_data jaguar2_desc = {
+static const struct ocelot_match_data jaguar2_desc = {
.desc = {
.name = "jaguar2-pinctrl",
.pins = jaguar2_pins,
@@ -1821,7 +1821,7 @@ static struct ocelot_match_data jaguar2_desc = {
},
};
-static struct ocelot_match_data servalt_desc = {
+static const struct ocelot_match_data servalt_desc = {
.desc = {
.name = "servalt-pinctrl",
.pins = servalt_pins,
@@ -1832,7 +1832,7 @@ static struct ocelot_match_data servalt_desc = {
},
};
-static struct ocelot_match_data sparx5_desc = {
+static const struct ocelot_match_data sparx5_desc = {
.desc = {
.name = "sparx5-pinctrl",
.pins = sparx5_pins,
@@ -1850,7 +1850,7 @@ static struct ocelot_match_data sparx5_desc = {
},
};
-static struct ocelot_match_data lan966x_desc = {
+static const struct ocelot_match_data lan966x_desc = {
.desc = {
.name = "lan966x-pinctrl",
.pins = lan966x_pins,
@@ -1867,7 +1867,7 @@ static struct ocelot_match_data lan966x_desc = {
},
};
-static struct ocelot_match_data lan969x_desc = {
+static const struct ocelot_match_data lan969x_desc = {
.desc = {
.name = "lan969x-pinctrl",
.pins = lan969x_pins,
@@ -2116,7 +2116,7 @@ static void ocelot_irq_ack(struct irq_data *data)
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
-static struct irq_chip ocelot_level_irqchip = {
+static const struct irq_chip ocelot_level_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
.irq_ack = ocelot_irq_ack,
@@ -2126,7 +2126,7 @@ static struct irq_chip ocelot_level_irqchip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS
};
-static struct irq_chip ocelot_irqchip = {
+static const struct irq_chip ocelot_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
.irq_ack = ocelot_irq_ack,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 36d4eaf0ebd1..15145882950f 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Pinctrl driver for Rockchip SoCs
- *
+ * Copyright (c) 2020-2024 Rockchip Electronics Co., Ltd.
* Copyright (c) 2013 MundoReader S.L.
* Author: Heiko Stuebner <heiko@sntech.de>
*
@@ -2003,6 +2003,151 @@ static int rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3562_DRV_BITS_PER_PIN 8
+#define RK3562_DRV_PINS_PER_REG 2
+#define RK3562_DRV_GPIO0_OFFSET 0x20070
+#define RK3562_DRV_GPIO1_OFFSET 0x200
+#define RK3562_DRV_GPIO2_OFFSET 0x240
+#define RK3562_DRV_GPIO3_OFFSET 0x10280
+#define RK3562_DRV_GPIO4_OFFSET 0x102C0
+
+static int rk3562_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ switch (bank->bank_num) {
+ case 0:
+ *reg = RK3562_DRV_GPIO0_OFFSET;
+ break;
+
+ case 1:
+ *reg = RK3562_DRV_GPIO1_OFFSET;
+ break;
+
+ case 2:
+ *reg = RK3562_DRV_GPIO2_OFFSET;
+ break;
+
+ case 3:
+ *reg = RK3562_DRV_GPIO3_OFFSET;
+ break;
+
+ case 4:
+ *reg = RK3562_DRV_GPIO4_OFFSET;
+ break;
+
+ default:
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+ break;
+ }
+
+ *reg += ((pin_num / RK3562_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3562_DRV_PINS_PER_REG;
+ *bit *= RK3562_DRV_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3562_PULL_BITS_PER_PIN 2
+#define RK3562_PULL_PINS_PER_REG 8
+#define RK3562_PULL_GPIO0_OFFSET 0x20020
+#define RK3562_PULL_GPIO1_OFFSET 0x80
+#define RK3562_PULL_GPIO2_OFFSET 0x90
+#define RK3562_PULL_GPIO3_OFFSET 0x100A0
+#define RK3562_PULL_GPIO4_OFFSET 0x100B0
+
+static int rk3562_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ switch (bank->bank_num) {
+ case 0:
+ *reg = RK3562_PULL_GPIO0_OFFSET;
+ break;
+
+ case 1:
+ *reg = RK3562_PULL_GPIO1_OFFSET;
+ break;
+
+ case 2:
+ *reg = RK3562_PULL_GPIO2_OFFSET;
+ break;
+
+ case 3:
+ *reg = RK3562_PULL_GPIO3_OFFSET;
+ break;
+
+ case 4:
+ *reg = RK3562_PULL_GPIO4_OFFSET;
+ break;
+
+ default:
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+ break;
+ }
+
+ *reg += ((pin_num / RK3562_PULL_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3562_PULL_PINS_PER_REG;
+ *bit *= RK3562_PULL_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3562_SMT_BITS_PER_PIN 2
+#define RK3562_SMT_PINS_PER_REG 8
+#define RK3562_SMT_GPIO0_OFFSET 0x20030
+#define RK3562_SMT_GPIO1_OFFSET 0xC0
+#define RK3562_SMT_GPIO2_OFFSET 0xD0
+#define RK3562_SMT_GPIO3_OFFSET 0x100E0
+#define RK3562_SMT_GPIO4_OFFSET 0x100F0
+
+static int rk3562_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ switch (bank->bank_num) {
+ case 0:
+ *reg = RK3562_SMT_GPIO0_OFFSET;
+ break;
+
+ case 1:
+ *reg = RK3562_SMT_GPIO1_OFFSET;
+ break;
+
+ case 2:
+ *reg = RK3562_SMT_GPIO2_OFFSET;
+ break;
+
+ case 3:
+ *reg = RK3562_SMT_GPIO3_OFFSET;
+ break;
+
+ case 4:
+ *reg = RK3562_SMT_GPIO4_OFFSET;
+ break;
+
+ default:
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+ break;
+ }
+
+ *reg += ((pin_num / RK3562_SMT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3562_SMT_PINS_PER_REG;
+ *bit *= RK3562_SMT_BITS_PER_PIN;
+
+ return 0;
+}
+
#define RK3568_PULL_PMU_OFFSET 0x20
#define RK3568_PULL_GRF_OFFSET 0x80
#define RK3568_PULL_BITS_PER_PIN 2
@@ -2495,7 +2640,8 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3588_DRV_BITS_PER_PIN;
ret = strength;
goto config;
- } else if (ctrl->type == RK3568) {
+ } else if (ctrl->type == RK3562 ||
+ ctrl->type == RK3568) {
rmask_bits = RK3568_DRV_BITS_PER_PIN;
ret = (1 << (strength + 1)) - 1;
goto config;
@@ -2639,6 +2785,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3328:
case RK3368:
case RK3399:
+ case RK3562:
case RK3568:
case RK3576:
case RK3588:
@@ -2699,6 +2846,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3328:
case RK3368:
case RK3399:
+ case RK3562:
case RK3568:
case RK3576:
case RK3588:
@@ -2810,6 +2958,7 @@ static int rockchip_get_schmitt(struct rockchip_pin_bank *bank, int pin_num)
data >>= bit;
switch (ctrl->type) {
+ case RK3562:
case RK3568:
return data & ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1);
default:
@@ -2839,6 +2988,7 @@ static int rockchip_set_schmitt(struct rockchip_pin_bank *bank,
/* enable the write to the equivalent lower bits */
switch (ctrl->type) {
+ case RK3562:
case RK3568:
data = ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1) << (bit + 16);
rmask = data | (data >> 16);
@@ -2965,6 +3115,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3328:
case RK3368:
case RK3399:
+ case RK3562:
case RK3568:
case RK3576:
case RK3588:
@@ -4086,6 +4237,49 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
.drv_calc_reg = rk3399_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3562_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS_OFFSET(0, 32, "gpio0",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x20000, 0x20008, 0x20010, 0x20018),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(1, 32, "gpio1",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0, 0x08, 0x10, 0x18),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(2, 32, "gpio2",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x20, 0, 0, 0),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(3, 32, "gpio3",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x10040, 0x10048, 0x10050, 0x10058),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(4, 16, "gpio4",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0,
+ 0,
+ 0x10060, 0x10068, 0, 0),
+};
+
+static struct rockchip_pin_ctrl rk3562_pin_ctrl __maybe_unused = {
+ .pin_banks = rk3562_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3562_pin_banks),
+ .label = "RK3562-GPIO",
+ .type = RK3562,
+ .pull_calc_reg = rk3562_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3562_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3562_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3568_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
@@ -4210,6 +4404,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3368_pin_ctrl },
{ .compatible = "rockchip,rk3399-pinctrl",
.data = &rk3399_pin_ctrl },
+ { .compatible = "rockchip,rk3562-pinctrl",
+ .data = &rk3562_pin_ctrl },
{ .compatible = "rockchip,rk3568-pinctrl",
.data = &rk3568_pin_ctrl },
{ .compatible = "rockchip,rk3576-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 6ebbb0a88ce7..87a20cec8e21 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2020-2021 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2020-2024 Rockchip Electronics Co., Ltd.
*
* Copyright (c) 2013 MundoReader S.L.
* Author: Heiko Stuebner <heiko@sntech.de>
@@ -196,6 +196,7 @@ enum rockchip_pinctrl_type {
RK3328,
RK3368,
RK3399,
+ RK3562,
RK3568,
RK3576,
RK3588,
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 521f6fef0b9f..aae01120dc52 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -380,7 +380,7 @@ static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "input %s ", str_high_low(val));
if (type)
seq_printf(s, "with internal pull-%s ",
- pupd ? "up" : "down");
+ str_up_down(pupd));
else
seq_printf(s, "%s ", pupd ? "floating" : "analog");
}
diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm
index 206226318e45..35f47660a56b 100644
--- a/drivers/pinctrl/qcom/Kconfig.msm
+++ b/drivers/pinctrl/qcom/Kconfig.msm
@@ -137,6 +137,12 @@ config PINCTRL_MSM8916
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_MSM8917
+ tristate "Qualcomm 8917 pin controller driver"
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8917 platform.
+
config PINCTRL_MSM8953
tristate "Qualcomm 8953 pin controller driver"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 9a23d41d801c..5c4100925cf9 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8909) += pinctrl-msm8909.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_MSM8917) += pinctrl-msm8917.o
obj-$(CONFIG_PINCTRL_MSM8953) += pinctrl-msm8953.o
obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5424.c b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
index 796299cd2e4e..0d610b076da3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5424.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
@@ -233,7 +233,10 @@ enum ipq5424_functions {
msm_mux_sdc_clk,
msm_mux_sdc_cmd,
msm_mux_sdc_data,
- msm_mux_spi0,
+ msm_mux_spi0_clk,
+ msm_mux_spi0_cs,
+ msm_mux_spi0_miso,
+ msm_mux_spi0_mosi,
msm_mux_spi1,
msm_mux_spi10,
msm_mux_spi11,
@@ -297,8 +300,8 @@ static const char * const qspi_clk_groups[] = {
"gpio5",
};
-static const char * const spi0_groups[] = {
- "gpio6", "gpio7", "gpio8", "gpio9",
+static const char * const spi0_clk_groups[] = {
+ "gpio6",
};
static const char * const pwm1_groups[] = {
@@ -315,14 +318,26 @@ static const char * const qdss_tracedata_a_groups[] = {
"gpio38", "gpio39",
};
+static const char * const spi0_cs_groups[] = {
+ "gpio7",
+};
+
static const char * const cri_trng1_groups[] = {
"gpio7",
};
+static const char * const spi0_miso_groups[] = {
+ "gpio8",
+};
+
static const char * const cri_trng2_groups[] = {
"gpio8",
};
+static const char * const spi0_mosi_groups[] = {
+ "gpio9",
+};
+
static const char * const cri_trng3_groups[] = {
"gpio9",
};
@@ -680,7 +695,10 @@ static const struct pinfunction ipq5424_functions[] = {
MSM_PIN_FUNCTION(sdc_clk),
MSM_PIN_FUNCTION(sdc_cmd),
MSM_PIN_FUNCTION(sdc_data),
- MSM_PIN_FUNCTION(spi0),
+ MSM_PIN_FUNCTION(spi0_clk),
+ MSM_PIN_FUNCTION(spi0_cs),
+ MSM_PIN_FUNCTION(spi0_miso),
+ MSM_PIN_FUNCTION(spi0_mosi),
MSM_PIN_FUNCTION(spi1),
MSM_PIN_FUNCTION(spi10),
MSM_PIN_FUNCTION(spi11),
@@ -700,10 +718,10 @@ static const struct msm_pingroup ipq5424_groups[] = {
PINGROUP(3, sdc_data, qspi_data, pwm2, _, _, _, _, _, _),
PINGROUP(4, sdc_cmd, qspi_cs, _, _, _, _, _, _, _),
PINGROUP(5, sdc_clk, qspi_clk, _, _, _, _, _, _, _),
- PINGROUP(6, spi0, pwm1, _, cri_trng0, qdss_tracedata_a, _, _, _, _),
- PINGROUP(7, spi0, pwm1, _, cri_trng1, qdss_tracedata_a, _, _, _, _),
- PINGROUP(8, spi0, pwm1, wci_txd, wci_rxd, _, cri_trng2, qdss_tracedata_a, _, _),
- PINGROUP(9, spi0, pwm1, _, cri_trng3, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(6, spi0_clk, pwm1, _, cri_trng0, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(7, spi0_cs, pwm1, _, cri_trng1, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(8, spi0_miso, pwm1, wci_txd, wci_rxd, _, cri_trng2, qdss_tracedata_a, _, _),
+ PINGROUP(9, spi0_mosi, pwm1, _, cri_trng3, qdss_tracedata_a, _, _, _, _),
PINGROUP(10, uart0, pwm0, spi11, _, wci_txd, wci_rxd, _, qdss_tracedata_a, _),
PINGROUP(11, uart0, pwm0, spi1, _, wci_txd, wci_rxd, _, qdss_tracedata_a, _),
PINGROUP(12, uart0, pwm0, spi11, _, prng_rosc0, qdss_tracedata_a, _, _, _),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index ec913c2e200f..47daa47153c9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -714,7 +715,7 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
}
seq_printf(s, " %-8s: %-3s", g->grp.name, is_out ? "out" : "in");
- seq_printf(s, " %-4s func%d", val ? "high" : "low", func);
+ seq_printf(s, " %-4s func%d", str_high_low(val), func);
seq_printf(s, " %dmA", msm_regval_to_drive(drive));
if (pctrl->soc->pull_no_keeper)
seq_printf(s, " %s", pulls_no_keeper[pull]);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
new file mode 100644
index 000000000000..cff137bb3b23
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -0,0 +1,1620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc msm8917_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "SDC1_CLK"),
+ PINCTRL_PIN(135, "SDC1_CMD"),
+ PINCTRL_PIN(136, "SDC1_DATA"),
+ PINCTRL_PIN(137, "SDC1_RCLK"),
+ PINCTRL_PIN(138, "SDC2_CLK"),
+ PINCTRL_PIN(139, "SDC2_CMD"),
+ PINCTRL_PIN(140, "SDC2_DATA"),
+ PINCTRL_PIN(141, "QDSD_CLK"),
+ PINCTRL_PIN(142, "QDSD_CMD"),
+ PINCTRL_PIN(143, "QDSD_DATA0"),
+ PINCTRL_PIN(144, "QDSD_DATA1"),
+ PINCTRL_PIN(145, "QDSD_DATA2"),
+ PINCTRL_PIN(146, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+
+static const unsigned int sdc1_clk_pins[] = { 134 };
+static const unsigned int sdc1_cmd_pins[] = { 135 };
+static const unsigned int sdc1_data_pins[] = { 136 };
+static const unsigned int sdc1_rclk_pins[] = { 137 };
+static const unsigned int sdc2_clk_pins[] = { 138 };
+static const unsigned int sdc2_cmd_pins[] = { 139 };
+static const unsigned int sdc2_data_pins[] = { 140 };
+static const unsigned int qdsd_clk_pins[] = { 141 };
+static const unsigned int qdsd_cmd_pins[] = { 142 };
+static const unsigned int qdsd_data0_pins[] = { 143 };
+static const unsigned int qdsd_data1_pins[] = { 144 };
+static const unsigned int qdsd_data2_pins[] = { 145 };
+static const unsigned int qdsd_data3_pins[] = { 146 };
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .grp = PINCTRL_PINGROUP("gpio" #id, \
+ gpio##id##_pins, \
+ ARRAY_SIZE(gpio##id##_pins)), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = 0x1000 * id, \
+ .io_reg = 0x4 + 0x1000 * id, \
+ .intr_cfg_reg = 0x8 + 0x1000 * id, \
+ .intr_status_reg = 0xc + 0x1000 * id, \
+ .intr_target_reg = 0x8 + 0x1000 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+enum msm8917_functions {
+ msm_mux_accel_int,
+ msm_mux_adsp_ext,
+ msm_mux_alsp_int,
+ msm_mux_atest_bbrx0,
+ msm_mux_atest_bbrx1,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_combodac_to_gpio_native,
+ msm_mux_atest_gpsadc_dtest0_native,
+ msm_mux_atest_gpsadc_dtest1_native,
+ msm_mux_atest_tsens,
+ msm_mux_atest_wlan0,
+ msm_mux_atest_wlan1,
+ msm_mux_audio_ref,
+ msm_mux_audio_reset,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp6_spi,
+ msm_mux_blsp8_spi,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_i2c7,
+ msm_mux_blsp_i2c8,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_spi7,
+ msm_mux_blsp_spi8,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart3,
+ msm_mux_blsp_uart4,
+ msm_mux_blsp_uart5,
+ msm_mux_blsp_uart6,
+ msm_mux_blsp_uart7,
+ msm_mux_blsp_uart8,
+ msm_mux_cam0_ldo,
+ msm_mux_cam1_rst,
+ msm_mux_cam1_standby,
+ msm_mux_cam2_rst,
+ msm_mux_cam2_standby,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cdc_pdm0,
+ msm_mux_codec_int1,
+ msm_mux_codec_int2,
+ msm_mux_codec_mad,
+ msm_mux_coex_uart,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_dmic0_clk,
+ msm_mux_dmic0_data,
+ msm_mux_ebi_cdc,
+ msm_mux_ebi_ch0,
+ msm_mux_ext_lpass,
+ msm_mux_forced_usb,
+ msm_mux_fp_gpio,
+ msm_mux_fp_int,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_gcc_plltest,
+ msm_mux_gcc_tlmm,
+ msm_mux_gpio,
+ msm_mux_gsm0_tx,
+ msm_mux_key_focus,
+ msm_mux_key_snapshot,
+ msm_mux_key_volp,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_lpass_slimbus,
+ msm_mux_lpass_slimbus0,
+ msm_mux_lpass_slimbus1,
+ msm_mux_m_voc,
+ msm_mux_mag_int,
+ msm_mux_mdp_vsync,
+ msm_mux_mipi_dsi0,
+ msm_mux_modem_tsync,
+ msm_mux_nav_pps,
+ msm_mux_nav_pps_in_a,
+ msm_mux_nav_pps_in_b,
+ msm_mux_nav_tsync,
+ msm_mux_nfc_pwr,
+ msm_mux_ov_ldo,
+ msm_mux_pa_indicator,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pbs2,
+ msm_mux_pri_mi2s,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_ws,
+ msm_mux_prng_rosc,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_sd_write,
+ msm_mux_sdcard_det,
+ msm_mux_sec_mi2s,
+ msm_mux_sec_mi2s_mclk_a,
+ msm_mux_sec_mi2s_mclk_b,
+ msm_mux_sensor_rst,
+ msm_mux_smb_int,
+ msm_mux_ssbi_wtr1,
+ msm_mux_ts_resout,
+ msm_mux_ts_sample,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim_batt,
+ msm_mux_us_emitter,
+ msm_mux_us_euro,
+ msm_mux_wcss_bt,
+ msm_mux_wcss_fm,
+ msm_mux_wcss_wlan,
+ msm_mux_wcss_wlan0,
+ msm_mux_wcss_wlan1,
+ msm_mux_wcss_wlan2,
+ msm_mux_webcam_rst,
+ msm_mux_webcam_standby,
+ msm_mux_wsa_io,
+ msm_mux_wsa_irq,
+ msm_mux__,
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio0", "gpio1", "gpio6", "gpio7", "gpio12", "gpio13", "gpio23",
+ "gpio42", "gpio43", "gpio44", "gpio47", "gpio66", "gpio86", "gpio87",
+ "gpio88", "gpio92",
+};
+
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133",
+};
+
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const adsp_ext_groups[] = {
+ "gpio1",
+};
+
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio2",
+};
+
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio2",
+};
+
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const pbs0_groups[] = {
+ "gpio8",
+};
+
+static const char * const pbs1_groups[] = {
+ "gpio9",
+};
+
+static const char * const pwr_modem_enabled_b_groups[] = {
+ "gpio9",
+};
+
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio10",
+};
+
+static const char * const ldo_update_groups[] = {
+ "gpio4",
+};
+
+static const char * const atest_combodac_to_gpio_native_groups[] = {
+ "gpio4", "gpio12", "gpio13", "gpio20", "gpio21", "gpio28", "gpio29",
+ "gpio30", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43", "gpio44",
+ "gpio45", "gpio46", "gpio47", "gpio48", "gpio67", "gpio115",
+};
+
+static const char * const ldo_en_groups[] = {
+ "gpio5",
+};
+
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio6",
+};
+
+static const char * const pbs2_groups[] = {
+ "gpio7",
+};
+
+static const char * const atest_gpsadc_dtest0_native_groups[] = {
+ "gpio7",
+};
+
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio11",
+};
+
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const blsp_uart4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const sec_mi2s_groups[] = {
+ "gpio12", "gpio13", "gpio94", "gpio95",
+};
+
+static const char * const pwr_nav_enabled_b_groups[] = {
+ "gpio12",
+};
+
+static const char * const codec_mad_groups[] = {
+ "gpio13",
+};
+
+static const char * const pwr_crypto_enabled_b_groups[] = {
+ "gpio13",
+};
+
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const blsp_uart5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio16",
+};
+
+static const char * const atest_bbrx1_groups[] = {
+ "gpio16",
+};
+
+static const char * const m_voc_groups[] = {
+ "gpio17", "gpio21",
+};
+
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio17",
+};
+
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio21",
+};
+
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio22",
+};
+
+static const char * const atest_wlan0_groups[] = {
+ "gpio22",
+};
+
+static const char * const atest_bbrx0_groups[] = {
+ "gpio17",
+};
+
+static const char * const blsp_i2c5_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio18",
+};
+
+static const char * const atest_gpsadc_dtest1_native_groups[] = {
+ "gpio18",
+};
+
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio19", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio33", "gpio34", "gpio35", "gpio36", "gpio38", "gpio39",
+ "gpio40", "gpio50",
+};
+
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio20",
+};
+
+static const char * const atest_wlan1_groups[] = {
+ "gpio23",
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio24", "gpio25",
+};
+
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+
+static const char * const sec_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio26", "gpio27", "gpio28",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+
+static const char * const pwr_modem_enabled_a_groups[] = {
+ "gpio29",
+};
+
+static const char * const cci_timer0_groups[] = {
+ "gpio33",
+};
+
+static const char * const cci_timer1_groups[] = {
+ "gpio34",
+};
+
+static const char * const cam1_standby_groups[] = {
+ "gpio35",
+};
+
+static const char * const pwr_nav_enabled_a_groups[] = {
+ "gpio35",
+};
+
+static const char * const cam1_rst_groups[] = {
+ "gpio36",
+};
+
+static const char * const pwr_crypto_enabled_a_groups[] = {
+ "gpio36",
+};
+
+static const char * const forced_usb_groups[] = {
+ "gpio37",
+};
+
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio37",
+};
+
+static const char * const cam2_rst_groups[] = {
+ "gpio38",
+};
+
+static const char * const webcam_standby_groups[] = {
+ "gpio39",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio39",
+};
+
+static const char * const webcam_rst_groups[] = {
+ "gpio40",
+};
+
+static const char * const ov_ldo_groups[] = {
+ "gpio41",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio41",
+};
+
+static const char * const accel_int_groups[] = {
+ "gpio42",
+};
+
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio42",
+};
+
+static const char * const alsp_int_groups[] = {
+ "gpio43",
+};
+
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio43",
+};
+
+static const char * const mag_int_groups[] = {
+ "gpio44",
+};
+
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio44",
+};
+
+static const char * const blsp6_spi_groups[] = {
+ "gpio47",
+};
+
+static const char * const fp_int_groups[] = {
+ "gpio48",
+};
+
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio48",
+};
+
+static const char * const uim_batt_groups[] = {
+ "gpio49",
+};
+
+static const char * const cam2_standby_groups[] = {
+ "gpio50",
+};
+
+static const char * const uim1_data_groups[] = {
+ "gpio51",
+};
+
+static const char * const uim1_clk_groups[] = {
+ "gpio52",
+};
+
+static const char * const uim1_reset_groups[] = {
+ "gpio53",
+};
+
+static const char * const uim1_present_groups[] = {
+ "gpio54",
+};
+
+static const char * const uim2_data_groups[] = {
+ "gpio55",
+};
+
+static const char * const uim2_clk_groups[] = {
+ "gpio56",
+};
+
+static const char * const uim2_reset_groups[] = {
+ "gpio57",
+};
+
+static const char * const uim2_present_groups[] = {
+ "gpio58",
+};
+
+static const char * const sensor_rst_groups[] = {
+ "gpio59",
+};
+
+static const char * const mipi_dsi0_groups[] = {
+ "gpio60",
+};
+
+static const char * const smb_int_groups[] = {
+ "gpio61",
+};
+
+static const char * const cam0_ldo_groups[] = {
+ "gpio62",
+};
+
+static const char * const us_euro_groups[] = {
+ "gpio63",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio63",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio63",
+};
+
+static const char * const bimc_dte0_groups[] = {
+ "gpio63", "gpio65",
+};
+
+static const char * const ts_resout_groups[] = {
+ "gpio64",
+};
+
+static const char * const ts_sample_groups[] = {
+ "gpio65",
+};
+
+static const char * const sec_mi2s_mclk_b_groups[] = {
+ "gpio66",
+};
+
+static const char * const pri_mi2s_groups[] = {
+ "gpio66", "gpio85", "gpio86", "gpio88", "gpio94", "gpio95",
+};
+
+static const char * const sdcard_det_groups[] = {
+ "gpio67",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio67",
+};
+
+static const char * const ebi_cdc_groups[] = {
+ "gpio67", "gpio69", "gpio118", "gpio119", "gpio120", "gpio123",
+};
+
+static const char * const audio_reset_groups[] = {
+ "gpio68",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio68",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio69",
+};
+
+static const char * const cdc_pdm0_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74",
+};
+
+static const char * const pri_mi2s_mclk_b_groups[] = {
+ "gpio69",
+};
+
+static const char * const lpass_slimbus_groups[] = {
+ "gpio70",
+};
+
+static const char * const lpass_slimbus0_groups[] = {
+ "gpio71",
+};
+
+static const char * const lpass_slimbus1_groups[] = {
+ "gpio72",
+};
+
+static const char * const codec_int1_groups[] = {
+ "gpio73",
+};
+
+static const char * const codec_int2_groups[] = {
+ "gpio74",
+};
+
+static const char * const wcss_bt_groups[] = {
+ "gpio75", "gpio83", "gpio84",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio75",
+};
+
+static const char * const ebi_ch0_groups[] = {
+ "gpio75",
+};
+
+static const char * const wcss_wlan2_groups[] = {
+ "gpio76",
+};
+
+static const char * const wcss_wlan1_groups[] = {
+ "gpio77",
+};
+
+static const char * const wcss_wlan0_groups[] = {
+ "gpio78",
+};
+
+static const char * const wcss_wlan_groups[] = {
+ "gpio79", "gpio80",
+};
+
+static const char * const wcss_fm_groups[] = {
+ "gpio81", "gpio82",
+};
+
+static const char * const ext_lpass_groups[] = {
+ "gpio81",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio82",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio83",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio84",
+};
+
+static const char * const blsp_spi7_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+
+static const char * const blsp_uart7_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio87",
+};
+
+static const char * const blsp_i2c7_groups[] = {
+ "gpio87", "gpio88",
+};
+
+static const char * const gcc_tlmm_groups[] = {
+ "gpio87",
+};
+
+static const char * const dmic0_clk_groups[] = {
+ "gpio89",
+};
+
+static const char * const dmic0_data_groups[] = {
+ "gpio90",
+};
+
+static const char * const key_volp_groups[] = {
+ "gpio91",
+};
+
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio91",
+};
+
+static const char * const us_emitter_groups[] = {
+ "gpio92",
+};
+
+static const char * const wsa_irq_groups[] = {
+ "gpio93",
+};
+
+static const char * const wsa_io_groups[] = {
+ "gpio94", "gpio95",
+};
+
+static const char * const blsp_spi8_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const blsp_uart8_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const blsp_i2c8_groups[] = {
+ "gpio98", "gpio99",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio98", "gpio99",
+};
+
+static const char * const nav_pps_in_a_groups[] = {
+ "gpio115",
+};
+
+static const char * const pa_indicator_groups[] = {
+ "gpio116",
+};
+
+static const char * const modem_tsync_groups[] = {
+ "gpio117",
+};
+
+static const char * const nav_tsync_groups[] = {
+ "gpio117",
+};
+
+static const char * const nav_pps_in_b_groups[] = {
+ "gpio117",
+};
+
+static const char * const nav_pps_groups[] = {
+ "gpio117",
+};
+
+static const char * const gsm0_tx_groups[] = {
+ "gpio119",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio120",
+};
+
+static const char * const atest_tsens_groups[] = {
+ "gpio120",
+};
+
+static const char * const bimc_dte1_groups[] = {
+ "gpio121", "gpio122",
+};
+
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio122", "gpio123",
+};
+
+static const char * const fp_gpio_groups[] = {
+ "gpio124",
+};
+
+static const char * const coex_uart_groups[] = {
+ "gpio124", "gpio127",
+};
+
+static const char * const key_snapshot_groups[] = {
+ "gpio127",
+};
+
+static const char * const key_focus_groups[] = {
+ "gpio128",
+};
+
+static const char * const nfc_pwr_groups[] = {
+ "gpio129",
+};
+
+static const char * const blsp8_spi_groups[] = {
+ "gpio130",
+};
+
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio132",
+};
+
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio133",
+};
+
+static const struct pinfunction msm8917_functions[] = {
+ MSM_PIN_FUNCTION(accel_int),
+ MSM_PIN_FUNCTION(adsp_ext),
+ MSM_PIN_FUNCTION(alsp_int),
+ MSM_PIN_FUNCTION(atest_bbrx0),
+ MSM_PIN_FUNCTION(atest_bbrx1),
+ MSM_PIN_FUNCTION(atest_char),
+ MSM_PIN_FUNCTION(atest_char0),
+ MSM_PIN_FUNCTION(atest_char1),
+ MSM_PIN_FUNCTION(atest_char2),
+ MSM_PIN_FUNCTION(atest_char3),
+ MSM_PIN_FUNCTION(atest_combodac_to_gpio_native),
+ MSM_PIN_FUNCTION(atest_gpsadc_dtest0_native),
+ MSM_PIN_FUNCTION(atest_gpsadc_dtest1_native),
+ MSM_PIN_FUNCTION(atest_tsens),
+ MSM_PIN_FUNCTION(atest_wlan0),
+ MSM_PIN_FUNCTION(atest_wlan1),
+ MSM_PIN_FUNCTION(audio_ref),
+ MSM_PIN_FUNCTION(audio_reset),
+ MSM_PIN_FUNCTION(bimc_dte0),
+ MSM_PIN_FUNCTION(bimc_dte1),
+ MSM_PIN_FUNCTION(blsp6_spi),
+ MSM_PIN_FUNCTION(blsp8_spi),
+ MSM_PIN_FUNCTION(blsp_i2c1),
+ MSM_PIN_FUNCTION(blsp_i2c2),
+ MSM_PIN_FUNCTION(blsp_i2c3),
+ MSM_PIN_FUNCTION(blsp_i2c4),
+ MSM_PIN_FUNCTION(blsp_i2c5),
+ MSM_PIN_FUNCTION(blsp_i2c6),
+ MSM_PIN_FUNCTION(blsp_i2c7),
+ MSM_PIN_FUNCTION(blsp_i2c8),
+ MSM_PIN_FUNCTION(blsp_spi1),
+ MSM_PIN_FUNCTION(blsp_spi2),
+ MSM_PIN_FUNCTION(blsp_spi3),
+ MSM_PIN_FUNCTION(blsp_spi4),
+ MSM_PIN_FUNCTION(blsp_spi5),
+ MSM_PIN_FUNCTION(blsp_spi6),
+ MSM_PIN_FUNCTION(blsp_spi7),
+ MSM_PIN_FUNCTION(blsp_spi8),
+ MSM_PIN_FUNCTION(blsp_uart1),
+ MSM_PIN_FUNCTION(blsp_uart2),
+ MSM_PIN_FUNCTION(blsp_uart3),
+ MSM_PIN_FUNCTION(blsp_uart4),
+ MSM_PIN_FUNCTION(blsp_uart5),
+ MSM_PIN_FUNCTION(blsp_uart6),
+ MSM_PIN_FUNCTION(blsp_uart7),
+ MSM_PIN_FUNCTION(blsp_uart8),
+ MSM_PIN_FUNCTION(cam0_ldo),
+ MSM_PIN_FUNCTION(cam1_rst),
+ MSM_PIN_FUNCTION(cam1_standby),
+ MSM_PIN_FUNCTION(cam2_rst),
+ MSM_PIN_FUNCTION(cam2_standby),
+ MSM_PIN_FUNCTION(cam_mclk),
+ MSM_PIN_FUNCTION(cci_async),
+ MSM_PIN_FUNCTION(cci_i2c),
+ MSM_PIN_FUNCTION(cci_timer0),
+ MSM_PIN_FUNCTION(cci_timer1),
+ MSM_PIN_FUNCTION(cdc_pdm0),
+ MSM_PIN_FUNCTION(codec_int1),
+ MSM_PIN_FUNCTION(codec_int2),
+ MSM_PIN_FUNCTION(codec_mad),
+ MSM_PIN_FUNCTION(coex_uart),
+ MSM_PIN_FUNCTION(cri_trng),
+ MSM_PIN_FUNCTION(cri_trng0),
+ MSM_PIN_FUNCTION(cri_trng1),
+ MSM_PIN_FUNCTION(dbg_out),
+ MSM_PIN_FUNCTION(dmic0_clk),
+ MSM_PIN_FUNCTION(dmic0_data),
+ MSM_PIN_FUNCTION(ebi_cdc),
+ MSM_PIN_FUNCTION(ebi_ch0),
+ MSM_PIN_FUNCTION(ext_lpass),
+ MSM_PIN_FUNCTION(forced_usb),
+ MSM_PIN_FUNCTION(fp_gpio),
+ MSM_PIN_FUNCTION(fp_int),
+ MSM_PIN_FUNCTION(gcc_gp1_clk_a),
+ MSM_PIN_FUNCTION(gcc_gp1_clk_b),
+ MSM_PIN_FUNCTION(gcc_gp2_clk_a),
+ MSM_PIN_FUNCTION(gcc_gp2_clk_b),
+ MSM_PIN_FUNCTION(gcc_gp3_clk_a),
+ MSM_PIN_FUNCTION(gcc_gp3_clk_b),
+ MSM_PIN_FUNCTION(gcc_plltest),
+ MSM_PIN_FUNCTION(gcc_tlmm),
+ MSM_PIN_FUNCTION(gpio),
+ MSM_PIN_FUNCTION(gsm0_tx),
+ MSM_PIN_FUNCTION(key_focus),
+ MSM_PIN_FUNCTION(key_snapshot),
+ MSM_PIN_FUNCTION(key_volp),
+ MSM_PIN_FUNCTION(ldo_en),
+ MSM_PIN_FUNCTION(ldo_update),
+ MSM_PIN_FUNCTION(lpass_slimbus),
+ MSM_PIN_FUNCTION(lpass_slimbus0),
+ MSM_PIN_FUNCTION(lpass_slimbus1),
+ MSM_PIN_FUNCTION(m_voc),
+ MSM_PIN_FUNCTION(mag_int),
+ MSM_PIN_FUNCTION(mdp_vsync),
+ MSM_PIN_FUNCTION(mipi_dsi0),
+ MSM_PIN_FUNCTION(modem_tsync),
+ MSM_PIN_FUNCTION(nav_pps),
+ MSM_PIN_FUNCTION(nav_pps_in_a),
+ MSM_PIN_FUNCTION(nav_pps_in_b),
+ MSM_PIN_FUNCTION(nav_tsync),
+ MSM_PIN_FUNCTION(nfc_pwr),
+ MSM_PIN_FUNCTION(ov_ldo),
+ MSM_PIN_FUNCTION(pa_indicator),
+ MSM_PIN_FUNCTION(pbs0),
+ MSM_PIN_FUNCTION(pbs1),
+ MSM_PIN_FUNCTION(pbs2),
+ MSM_PIN_FUNCTION(pri_mi2s),
+ MSM_PIN_FUNCTION(pri_mi2s_mclk_a),
+ MSM_PIN_FUNCTION(pri_mi2s_mclk_b),
+ MSM_PIN_FUNCTION(pri_mi2s_ws),
+ MSM_PIN_FUNCTION(prng_rosc),
+ MSM_PIN_FUNCTION(pwr_crypto_enabled_a),
+ MSM_PIN_FUNCTION(pwr_crypto_enabled_b),
+ MSM_PIN_FUNCTION(pwr_modem_enabled_a),
+ MSM_PIN_FUNCTION(pwr_modem_enabled_b),
+ MSM_PIN_FUNCTION(pwr_nav_enabled_a),
+ MSM_PIN_FUNCTION(pwr_nav_enabled_b),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_a0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_a1),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_b0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_b1),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_a0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_a1),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_b0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_b1),
+ MSM_PIN_FUNCTION(qdss_traceclk_a),
+ MSM_PIN_FUNCTION(qdss_traceclk_b),
+ MSM_PIN_FUNCTION(qdss_tracectl_a),
+ MSM_PIN_FUNCTION(qdss_tracectl_b),
+ MSM_PIN_FUNCTION(qdss_tracedata_a),
+ MSM_PIN_FUNCTION(qdss_tracedata_b),
+ MSM_PIN_FUNCTION(sd_write),
+ MSM_PIN_FUNCTION(sdcard_det),
+ MSM_PIN_FUNCTION(sec_mi2s),
+ MSM_PIN_FUNCTION(sec_mi2s_mclk_a),
+ MSM_PIN_FUNCTION(sec_mi2s_mclk_b),
+ MSM_PIN_FUNCTION(sensor_rst),
+ MSM_PIN_FUNCTION(smb_int),
+ MSM_PIN_FUNCTION(ssbi_wtr1),
+ MSM_PIN_FUNCTION(ts_resout),
+ MSM_PIN_FUNCTION(ts_sample),
+ MSM_PIN_FUNCTION(uim1_clk),
+ MSM_PIN_FUNCTION(uim1_data),
+ MSM_PIN_FUNCTION(uim1_present),
+ MSM_PIN_FUNCTION(uim1_reset),
+ MSM_PIN_FUNCTION(uim2_clk),
+ MSM_PIN_FUNCTION(uim2_data),
+ MSM_PIN_FUNCTION(uim2_present),
+ MSM_PIN_FUNCTION(uim2_reset),
+ MSM_PIN_FUNCTION(uim_batt),
+ MSM_PIN_FUNCTION(us_emitter),
+ MSM_PIN_FUNCTION(us_euro),
+ MSM_PIN_FUNCTION(wcss_bt),
+ MSM_PIN_FUNCTION(wcss_fm),
+ MSM_PIN_FUNCTION(wcss_wlan),
+ MSM_PIN_FUNCTION(wcss_wlan0),
+ MSM_PIN_FUNCTION(wcss_wlan1),
+ MSM_PIN_FUNCTION(wcss_wlan2),
+ MSM_PIN_FUNCTION(webcam_rst),
+ MSM_PIN_FUNCTION(webcam_standby),
+ MSM_PIN_FUNCTION(wsa_io),
+ MSM_PIN_FUNCTION(wsa_irq),
+};
+
+static const struct msm_pingroup msm8917_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, qdss_tracedata_b, _, _, _, _,
+ _, _),
+ PINGROUP(1, blsp_spi1, blsp_uart1, adsp_ext, _, _, _, _, _,
+ qdss_tracedata_b),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, prng_rosc, _, _, _,
+ _, _),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, _),
+ PINGROUP(4, blsp_spi2, blsp_uart2, ldo_update, _,
+ atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(5, blsp_spi2, blsp_uart2, ldo_en, _, _, _, _, _, _),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, gcc_gp1_clk_b,
+ qdss_tracedata_b, _, _, _, _),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, pbs2, _,
+ qdss_tracedata_b, _, atest_gpsadc_dtest0_native, _),
+ PINGROUP(8, blsp_spi3, blsp_uart3, pbs0, _, _, _, _, _, _),
+ PINGROUP(9, blsp_spi3, blsp_uart3, pbs1, pwr_modem_enabled_b, _, _,
+ _, _, _),
+ PINGROUP(10, blsp_spi3, blsp_uart3, blsp_i2c3, gcc_gp2_clk_b, _, _,
+ _, _, _),
+ PINGROUP(11, blsp_spi3, blsp_uart3, blsp_i2c3, gcc_gp3_clk_b, _, _,
+ _, _, _),
+ PINGROUP(12, blsp_spi4, blsp_uart4, sec_mi2s, pwr_nav_enabled_b, _,
+ _, _, _, _),
+ PINGROUP(13, blsp_spi4, blsp_uart4, sec_mi2s, pwr_crypto_enabled_b, _,
+ _, _, _, _),
+ PINGROUP(14, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(15, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(16, blsp_spi5, blsp_uart5, _, _, _, _, qdss_traceclk_a,
+ _, atest_bbrx1),
+ PINGROUP(17, blsp_spi5, blsp_uart5, m_voc, qdss_cti_trig_in_a0, _,
+ atest_bbrx0, _, _, _),
+ PINGROUP(18, blsp_spi5, blsp_uart5, blsp_i2c5, qdss_tracectl_a, _,
+ atest_gpsadc_dtest1_native, _, _, _),
+ PINGROUP(19, blsp_spi5, blsp_uart5, blsp_i2c5, qdss_tracedata_a, _,
+ _, _, _, _),
+ PINGROUP(20, blsp_spi6, blsp_uart6, _, _, _, _, _, _,
+ qdss_tracectl_b),
+ PINGROUP(21, blsp_spi6, blsp_uart6, m_voc, _, _, _, _, _,
+ qdss_cti_trig_in_b0),
+ PINGROUP(22, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_traceclk_b, _,
+ atest_wlan0, _, _, _),
+ PINGROUP(23, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_tracedata_b, _,
+ atest_wlan1, _, _, _),
+ PINGROUP(24, mdp_vsync, _, _, _, _, _, _, _, _),
+ PINGROUP(25, mdp_vsync, pri_mi2s_mclk_a, sec_mi2s_mclk_a, _, _, _,
+ _, _, _),
+ PINGROUP(26, cam_mclk, _, _, _, _, _, qdss_tracedata_a, _, _),
+ PINGROUP(27, cam_mclk, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(28, cam_mclk, _, _, _, _, _, qdss_tracedata_a, _,
+ atest_combodac_to_gpio_native),
+ PINGROUP(29, cci_i2c, pwr_modem_enabled_a, _, _, _, _, _,
+ qdss_tracedata_a, _),
+ PINGROUP(30, cci_i2c, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(31, cci_i2c, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(32, cci_i2c, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(33, cci_timer0, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(34, cci_timer1, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(35, pwr_nav_enabled_a, _, _, _, _, _, _, _,
+ qdss_tracedata_a),
+ PINGROUP(36, pwr_crypto_enabled_a, _, _, _, _, _, _, _,
+ qdss_tracedata_a),
+ PINGROUP(37, _, _, _, _, _, qdss_cti_trig_out_b1, _, _, _),
+ PINGROUP(38, _, qdss_tracedata_a, _, _, _, _, _, _, _),
+ PINGROUP(39, cci_async, _, _, _, _, _, qdss_tracedata_a, _,
+ atest_combodac_to_gpio_native),
+ PINGROUP(40, _, _, _, _, qdss_tracedata_a, _,
+ atest_combodac_to_gpio_native, _, _),
+ PINGROUP(41, sd_write, _, _, _, _, _, _, _,
+ atest_combodac_to_gpio_native),
+ PINGROUP(42, gcc_gp1_clk_a, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(43, gcc_gp2_clk_a, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(44, gcc_gp3_clk_a, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(45, _, _, atest_combodac_to_gpio_native, _, _, _, _, _,
+ _),
+ PINGROUP(46, _, _, atest_combodac_to_gpio_native, _, _, _, _, _,
+ _),
+ PINGROUP(47, blsp6_spi, _, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(48, _, qdss_cti_trig_in_b1, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(49, uim_batt, _, _, _, _, _, _, _, _),
+ PINGROUP(50, qdss_tracedata_a, _, _, _, _, _, _, _, _),
+ PINGROUP(51, uim1_data, _, _, _, _, _, _, _, _),
+ PINGROUP(52, uim1_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(53, uim1_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(54, uim1_present, _, _, _, _, _, _, _, _),
+ PINGROUP(55, uim2_data, _, _, _, _, _, _, _, _),
+ PINGROUP(56, uim2_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(57, uim2_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(58, uim2_present, _, _, _, _, _, _, _, _),
+ PINGROUP(59, _, _, _, _, _, _, _, _, _),
+ PINGROUP(60, _, _, _, _, _, _, _, _, _),
+ PINGROUP(61, _, _, _, _, _, _, _, _, _),
+ PINGROUP(62, _, _, _, _, _, _, _, _, _),
+ PINGROUP(63, atest_char3, dbg_out, bimc_dte0, _, _, _, _, _, _),
+ PINGROUP(64, _, _, _, _, _, _, _, _, _),
+ PINGROUP(65, bimc_dte0, _, _, _, _, _, _, _, _),
+ PINGROUP(66, sec_mi2s_mclk_b, pri_mi2s, _, qdss_tracedata_b, _, _,
+ _, _, _),
+ PINGROUP(67, atest_char1, ebi_cdc, _, atest_combodac_to_gpio_native,
+ _, _, _, _, _),
+ PINGROUP(68, atest_char0, _, _, _, _, _, _, _, _),
+ PINGROUP(69, audio_ref, cdc_pdm0, pri_mi2s_mclk_b, ebi_cdc, _, _, _,
+ _, _),
+ PINGROUP(70, lpass_slimbus, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(71, lpass_slimbus0, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(72, lpass_slimbus1, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(73, cdc_pdm0, _, _, _, _, _, _, _, _),
+ PINGROUP(74, cdc_pdm0, _, _, _, _, _, _, _, _),
+ PINGROUP(75, wcss_bt, atest_char2, _, ebi_ch0, _, _, _, _, _),
+ PINGROUP(76, wcss_wlan2, _, _, _, _, _, _, _, _),
+ PINGROUP(77, wcss_wlan1, _, _, _, _, _, _, _, _),
+ PINGROUP(78, wcss_wlan0, _, _, _, _, _, _, _, _),
+ PINGROUP(79, wcss_wlan, _, _, _, _, _, _, _, _),
+ PINGROUP(80, wcss_wlan, _, _, _, _, _, _, _, _),
+ PINGROUP(81, wcss_fm, ext_lpass, _, _, _, _, _, _, _),
+ PINGROUP(82, wcss_fm, cri_trng, _, _, _, _, _, _, _),
+ PINGROUP(83, wcss_bt, cri_trng1, _, _, _, _, _, _, _),
+ PINGROUP(84, wcss_bt, cri_trng0, _, _, _, _, _, _, _),
+ PINGROUP(85, pri_mi2s, blsp_spi7, blsp_uart7, _, _, _, _, _, _),
+ PINGROUP(86, pri_mi2s, blsp_spi7, blsp_uart7, qdss_tracedata_b, _, _,
+ _, _, _),
+ PINGROUP(87, pri_mi2s_ws, blsp_spi7, blsp_uart7, blsp_i2c7,
+ qdss_tracedata_b, gcc_tlmm, _, _, _),
+ PINGROUP(88, pri_mi2s, blsp_spi7, blsp_uart7, blsp_i2c7, _, _, _,
+ _, _),
+ PINGROUP(89, dmic0_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(90, dmic0_data, _, _, _, _, _, _, _, _),
+ PINGROUP(91, _, _, _, _, _, qdss_cti_trig_in_a1, _, _, _),
+ PINGROUP(92, _, _, _, _, _, qdss_tracedata_b, _, _, _),
+ PINGROUP(93, _, _, _, _, _, _, _, _, _),
+ PINGROUP(94, wsa_io, sec_mi2s, pri_mi2s, _, _, _, _, _, _),
+ PINGROUP(95, wsa_io, sec_mi2s, pri_mi2s, _, _, _, _, _, _),
+ PINGROUP(96, blsp_spi8, blsp_uart8, _, _, _, _, _, _, _),
+ PINGROUP(97, blsp_spi8, blsp_uart8, _, _, _, _, _, _, _),
+ PINGROUP(98, blsp_spi8, blsp_uart8, blsp_i2c8, gcc_plltest, _, _, _,
+ _, _),
+ PINGROUP(99, blsp_spi8, blsp_uart8, blsp_i2c8, gcc_plltest, _, _, _,
+ _, _),
+ PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ PINGROUP(102, _, _, _, _, _, _, _, _, _),
+ PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ PINGROUP(106, _, _, _, _, _, _, _, _, _),
+ PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ PINGROUP(111, _, _, _, _, _, _, _, _, _),
+ PINGROUP(112, _, _, _, _, _, _, _, _, _),
+ PINGROUP(113, _, _, _, _, _, _, _, _, _),
+ PINGROUP(114, _, _, _, _, _, _, _, _, _),
+ PINGROUP(115, _, _, nav_pps_in_a, _, atest_combodac_to_gpio_native,
+ _, _, _, _),
+ PINGROUP(116, _, pa_indicator, _, _, _, _, _, _, _),
+ PINGROUP(117, _, modem_tsync, nav_tsync, nav_pps_in_b, nav_pps, _,
+ _, _, _),
+ PINGROUP(118, _, ebi_cdc, _, _, _, _, _, _, _),
+ PINGROUP(119, gsm0_tx, _, ebi_cdc, _, _, _, _, _, _),
+ PINGROUP(120, _, atest_char, ebi_cdc, _, atest_tsens, _, _, _, _),
+ PINGROUP(121, _, _, _, bimc_dte1, _, _, _, _, _),
+ PINGROUP(122, _, ssbi_wtr1, _, _, bimc_dte1, _, _, _, _),
+ PINGROUP(123, _, ssbi_wtr1, ebi_cdc, _, _, _, _, _, _),
+ PINGROUP(124, coex_uart, _, _, _, _, _, _, _, _),
+ PINGROUP(125, _, _, _, _, _, _, _, _, _),
+ PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ PINGROUP(127, coex_uart, _, _, _, _, _, _, _, _),
+ PINGROUP(128, _, _, _, _, _, _, _, _, _),
+ PINGROUP(129, _, _, _, _, _, _, _, _, _),
+ PINGROUP(130, blsp8_spi, _, _, _, _, _, _, _, _),
+ PINGROUP(131, _, _, _, _, _, _, _, _, _),
+ PINGROUP(132, qdss_cti_trig_out_a0, _, _, _, _, _, _, _, _),
+ PINGROUP(133, qdss_cti_trig_out_a1, _, _, _, _, _, _, _, _),
+ SDC_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_PINGROUP(sdc1_rclk, 0x10a000, 15, 0),
+ SDC_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_pinctrl_soc_data msm8917_pinctrl = {
+ .pins = msm8917_pins,
+ .npins = ARRAY_SIZE(msm8917_pins),
+ .functions = msm8917_functions,
+ .nfunctions = ARRAY_SIZE(msm8917_functions),
+ .groups = msm8917_groups,
+ .ngroups = ARRAY_SIZE(msm8917_groups),
+ .ngpios = 134,
+};
+
+static int msm8917_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8917_pinctrl);
+}
+
+static const struct of_device_id msm8917_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8917-pinctrl", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8917_pinctrl_of_match);
+
+static struct platform_driver msm8917_pinctrl_driver = {
+ .driver = {
+ .name = "msm8917-pinctrl",
+ .of_match_table = msm8917_pinctrl_of_match,
+ },
+ .probe = msm8917_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8917_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8917_pinctrl_driver);
+}
+arch_initcall(msm8917_pinctrl_init);
+
+static void __exit msm8917_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8917_pinctrl_driver);
+}
+module_exit(msm8917_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm msm8917 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 0c806b8128b6..c8ce61066070 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -14,6 +14,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spmi.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -702,7 +703,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
else
seq_printf(s, " %-4s",
pad->output_enabled ? "out" : "in");
- seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+ seq_printf(s, " %-4s", str_high_low(pad->out_value));
seq_printf(s, " %-7s", pmic_gpio_functions[function]);
seq_printf(s, " vin-%d", pad->power_source);
seq_printf(s, " %-27s", biases[pad->pullup]);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 84de584cf7eb..7b28c5fb2402 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -544,7 +545,7 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, " %d", pad->aout_level);
if (pad->has_pullup)
seq_printf(s, " %-8s", biases[pad->pullup]);
- seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+ seq_printf(s, " %-4s", str_high_low(pad->out_value));
if (pad->dtest)
seq_printf(s, " dtest%d", pad->dtest);
if (pad->paired)
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 2225dc49d477..82679417e25f 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
@@ -569,7 +570,7 @@ static void pm8xxx_gpio_dbg_show_one(struct seq_file *s,
seq_printf(s, " VIN%d", pin->power_source);
seq_printf(s, " %-27s", biases[pin->bias]);
seq_printf(s, " %-10s", buffer_types[pin->open_drain]);
- seq_printf(s, " %-4s", pin->output_value ? "high" : "low");
+ seq_printf(s, " %-4s", str_high_low(pin->output_value));
seq_printf(s, " %-7s", strengths[pin->output_strength]);
if (pin->inverted)
seq_puts(s, " inverted");
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 9b1039c08aa6..4841bbfe4864 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
@@ -576,8 +577,7 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
seq_puts(s, "out ");
if (!pin->paired) {
- seq_puts(s, pin->output_value ?
- "high" : "low");
+ seq_puts(s, str_high_low(pin->output_value));
} else {
seq_puts(s, pin->output_value ?
"inverted" : "follow");
@@ -589,8 +589,7 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
if (pin->output) {
seq_printf(s, "out %s ", aout_lvls[pin->aout_level]);
if (!pin->paired) {
- seq_puts(s, pin->output_value ?
- "high" : "low");
+ seq_puts(s, str_high_low(pin->output_value));
} else {
seq_puts(s, pin->output_value ?
"inverted" : "follow");
@@ -605,8 +604,7 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
seq_printf(s, "dtest%d", pin->dtest);
} else {
if (!pin->paired) {
- seq_puts(s, pin->output_value ?
- "high" : "low");
+ seq_puts(s, str_high_low(pin->output_value));
} else {
seq_puts(s, pin->output_value ?
"inverted" : "follow");
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 7f3f41c7fe54..3c18d908b21e 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -41,6 +41,7 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A779H0 if ARCH_R8A779H0
select PINCTRL_RZG2L if ARCH_RZG2L
select PINCTRL_RZV2M if ARCH_R9A09G011
+ select PINCTRL_RZG2L if ARCH_R9A09G047
select PINCTRL_RZG2L if ARCH_R9A09G057
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 1df9cec2873f..ce4a07a3df49 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -26,6 +26,8 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h>
+#include <dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h>
#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
#include "../core.h"
@@ -157,7 +159,7 @@
#define PWPR_REGWE_B BIT(5) /* OEN Register Write Enable, known only in RZ/V2H(P) */
#define PM_MASK 0x03
-#define PFC_MASK 0x07
+#define PFC_MASK 0x0f
#define IEN_MASK 0x01
#define IOLH_MASK 0x03
#define SR_MASK 0x01
@@ -381,13 +383,51 @@ static u64 rzg2l_pinctrl_get_variable_pin_cfg(struct rzg2l_pinctrl *pctrl,
return 0;
}
+static const u64 r9a09g047_variable_pin_cfg[] = {
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 1, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 1, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 1, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 4, RZV2H_MPXED_PIN_FUNCS),
+};
+
static const u64 r9a09g057_variable_pin_cfg[] = {
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 0, RZV2H_MPXED_PIN_FUNCS),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
};
#ifdef CONFIG_RISCV
@@ -1962,6 +2002,73 @@ static const u64 r9a08g045_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(6, 0x2a, RZG3S_MPXED_PIN_FUNCS(A)), /* P18 */
};
+static const char * const rzg3e_gpio_names[] = {
+ "P00", "P01", "P02", "P03", "P04", "P05", "P06", "P07",
+ "P10", "P11", "P12", "P13", "P14", "P15", "P16", "P17",
+ "P20", "P21", "P22", "P23", "P24", "P25", "P26", "P27",
+ "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37",
+ "P40", "P41", "P42", "P43", "P44", "P45", "P46", "P47",
+ "P50", "P51", "P52", "P53", "P54", "P55", "P56", "P57",
+ "P60", "P61", "P62", "P63", "P64", "P65", "P66", "P67",
+ "P70", "P71", "P72", "P73", "P74", "P75", "P76", "P77",
+ "P80", "P81", "P82", "P83", "P84", "P85", "P86", "P87",
+ "", "", "", "", "", "", "", "",
+ "PA0", "PA1", "PA2", "PA3", "PA4", "PA5", "PA6", "PA7",
+ "PB0", "PB1", "PB2", "PB3", "PB4", "PB5", "PB6", "PB7",
+ "PC0", "PC1", "PC2", "PC3", "PC4", "PC5", "PC6", "PC7",
+ "PD0", "PD1", "PD2", "PD3", "PD4", "PD5", "PD6", "PD7",
+ "PE0", "PE1", "PE2", "PE3", "PE4", "PE5", "PE6", "PE7",
+ "PF0", "PF1", "PF2", "PF3", "PF4", "PF5", "PF6", "PF7",
+ "PG0", "PG1", "PG2", "PG3", "PG4", "PG5", "PG6", "PG7",
+ "PH0", "PH1", "PH2", "PH3", "PH4", "PH5", "PH6", "PH7",
+ "", "", "", "", "", "", "", "",
+ "PJ0", "PJ1", "PJ2", "PJ3", "PJ4", "PJ5", "PJ6", "PJ7",
+ "PK0", "PK1", "PK2", "PK3", "PK4", "PK5", "PK6", "PK7",
+ "PL0", "PL1", "PL2", "PL3", "PL4", "PL5", "PL6", "PL7",
+ "PM0", "PM1", "PM2", "PM3", "PM4", "PM5", "PM6", "PM7",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "PS0", "PS1", "PS2", "PS3", "PS4", "PS5", "PS6", "PS7",
+};
+
+static const u64 r9a09g047_gpio_configs[] = {
+ RZG2L_GPIO_PORT_PACK(8, 0x20, RZV2H_MPXED_PIN_FUNCS), /* P0 */
+ RZG2L_GPIO_PORT_PACK(8, 0x21, RZV2H_MPXED_PIN_FUNCS |
+ PIN_CFG_ELC), /* P1 */
+ RZG2L_GPIO_PORT_PACK(2, 0x22, RZG2L_MPXED_COMMON_PIN_FUNCS(RZV2H) |
+ PIN_CFG_NOD), /* P2 */
+ RZG2L_GPIO_PORT_PACK(8, 0x23, RZV2H_MPXED_PIN_FUNCS), /* P3 */
+ RZG2L_GPIO_PORT_PACK(6, 0x24, RZV2H_MPXED_PIN_FUNCS), /* P4 */
+ RZG2L_GPIO_PORT_PACK(7, 0x25, RZV2H_MPXED_PIN_FUNCS), /* P5 */
+ RZG2L_GPIO_PORT_PACK(7, 0x26, RZV2H_MPXED_PIN_FUNCS), /* P6 */
+ RZG2L_GPIO_PORT_PACK(8, 0x27, RZV2H_MPXED_PIN_FUNCS |
+ PIN_CFG_ELC), /* P7 */
+ RZG2L_GPIO_PORT_PACK(6, 0x28, RZV2H_MPXED_PIN_FUNCS), /* P8 */
+ 0x0,
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2a), /* PA */
+ RZG2L_GPIO_PORT_PACK(8, 0x2b, RZV2H_MPXED_PIN_FUNCS), /* PB */
+ RZG2L_GPIO_PORT_PACK(3, 0x2c, RZV2H_MPXED_PIN_FUNCS), /* PC */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2d), /* PD */
+ RZG2L_GPIO_PORT_PACK(8, 0x2e, RZV2H_MPXED_PIN_FUNCS), /* PE */
+ RZG2L_GPIO_PORT_PACK(3, 0x2f, RZV2H_MPXED_PIN_FUNCS), /* PF */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x30), /* PG */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x31), /* PH */
+ 0x0,
+ RZG2L_GPIO_PORT_PACK_VARIABLE(5, 0x33), /* PJ */
+ RZG2L_GPIO_PORT_PACK(4, 0x34, RZV2H_MPXED_PIN_FUNCS), /* PK */
+ RZG2L_GPIO_PORT_PACK(8, 0x35, RZV2H_MPXED_PIN_FUNCS), /* PL */
+ RZG2L_GPIO_PORT_PACK(8, 0x36, RZV2H_MPXED_PIN_FUNCS), /* PM */
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ RZG2L_GPIO_PORT_PACK(4, 0x3c, RZV2H_MPXED_PIN_FUNCS), /* PS */
+};
+
static const char * const rzv2h_gpio_names[] = {
"P00", "P01", "P02", "P03", "P04", "P05", "P06", "P07",
"P10", "P11", "P12", "P13", "P14", "P15", "P16", "P17",
@@ -2085,6 +2192,8 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
{ "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_IOLH_A | PIN_CFG_IEN |
PIN_CFG_SOFT_PS)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x1, 1, (PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS)) },
+ { "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x2, 0, PIN_CFG_IEN) },
+ { "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x2, 1, PIN_CFG_IEN) },
{ "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0x6, 0, PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS) },
{ "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_IOLH_B | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
@@ -2250,6 +2359,43 @@ static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = {
{ "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) },
};
+static struct rzg2l_dedicated_configs rzg3e_dedicated_pins[] = {
+ { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD)) },
+ { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD)) },
+ { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0PWEN", RZG2L_SINGLE_PIN_PACK(0x9, 3,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0IOVS", RZG2L_SINGLE_PIN_PACK(0x9, 4,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+};
+
static int rzg2l_gpio_get_gpioint(unsigned int virq, struct rzg2l_pinctrl *pctrl)
{
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[virq];
@@ -2760,6 +2906,9 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
BUILD_BUG_ON(ARRAY_SIZE(r9a08g045_gpio_configs) * RZG2L_PINS_PER_PORT >
ARRAY_SIZE(rzg2l_gpio_names));
+ BUILD_BUG_ON(ARRAY_SIZE(r9a09g047_gpio_configs) * RZG2L_PINS_PER_PORT >
+ ARRAY_SIZE(rzg3e_gpio_names));
+
BUILD_BUG_ON(ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT >
ARRAY_SIZE(rzv2h_gpio_names));
@@ -3158,6 +3307,29 @@ static struct rzg2l_pinctrl_data r9a08g045_data = {
.bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
+static struct rzg2l_pinctrl_data r9a09g047_data = {
+ .port_pins = rzg3e_gpio_names,
+ .port_pin_configs = r9a09g047_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g047_gpio_configs),
+ .dedicated_pins = rzg3e_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(r9a09g047_gpio_configs) * RZG2L_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzg3e_dedicated_pins),
+ .hwcfg = &rzv2h_hwcfg,
+ .variable_pin_cfg = r9a09g047_variable_pin_cfg,
+ .n_variable_pin_cfg = ARRAY_SIZE(r9a09g047_variable_pin_cfg),
+ .num_custom_params = ARRAY_SIZE(renesas_rzv2h_custom_bindings),
+ .custom_params = renesas_rzv2h_custom_bindings,
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = renesas_rzv2h_conf_items,
+#endif
+ .pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
+ .pmc_writeb = &rzv2h_pmc_writeb,
+ .oen_read = &rzv2h_oen_read,
+ .oen_write = &rzv2h_oen_write,
+ .hw_to_bias_param = &rzv2h_hw_to_bias_param,
+ .bias_param_to_hw = &rzv2h_bias_param_to_hw,
+};
+
static struct rzg2l_pinctrl_data r9a09g057_data = {
.port_pins = rzv2h_gpio_names,
.port_pin_configs = r9a09g057_gpio_configs,
@@ -3195,6 +3367,10 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = {
.data = &r9a08g045_data,
},
{
+ .compatible = "renesas,r9a09g047-pinctrl",
+ .data = &r9a09g047_data,
+ },
+ {
.compatible = "renesas,r9a09g057-pinctrl",
.data = &r9a09g057_data,
},
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index b79c211c0374..42093bae8bb7 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -23,6 +23,7 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/soc/samsung/exynos-pmu.h>
@@ -442,7 +443,7 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq);
- pr_info("wake %s for irq %u (%s-%lu)\n", on ? "enabled" : "disabled",
+ pr_info("wake %s for irq %u (%s-%lu)\n", str_enabled_disabled(on),
irqd->irq, bank->name, irqd->hwirq);
if (!on)
@@ -636,7 +637,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
if (clk_enable(b->drvdata->pclk)) {
dev_err(b->gpio_chip.parent,
"unable to enable clock for pending IRQs\n");
- return;
+ goto out;
}
}
@@ -652,6 +653,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
if (eintd->nr_banks)
clk_disable(eintd->banks[0]->drvdata->pclk);
+out:
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index bbedd980ec67..cfced7afd4ca 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1172,7 +1172,7 @@ static void samsung_banks_node_get(struct device *dev, struct samsung_pinctrl_dr
else
dev_warn(dev, "Missing node for bank %s - invalid DTB\n",
bank->name);
- /* child reference dropped in samsung_drop_banks_of_node() */
+ /* child reference dropped in samsung_banks_node_put() */
}
}
@@ -1272,7 +1272,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
ret = platform_get_irq_optional(pdev, 0);
if (ret < 0 && ret != -ENXIO)
- return ret;
+ goto err_put_banks;
if (ret > 0)
drvdata->irq = ret;
diff --git a/drivers/pinctrl/spacemit/Kconfig b/drivers/pinctrl/spacemit/Kconfig
index 168f8a5ffbb9..d6f6017fd097 100644
--- a/drivers/pinctrl/spacemit/Kconfig
+++ b/drivers/pinctrl/spacemit/Kconfig
@@ -4,9 +4,10 @@
#
config PINCTRL_SPACEMIT_K1
- tristate "SpacemiT K1 SoC Pinctrl driver"
+ bool "SpacemiT K1 SoC Pinctrl driver"
depends on ARCH_SPACEMIT || COMPILE_TEST
depends on OF
+ default ARCH_SPACEMIT
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c
index a32579d73613..59fd555ff38d 100644
--- a/drivers/pinctrl/spacemit/pinctrl-k1.c
+++ b/drivers/pinctrl/spacemit/pinctrl-k1.c
@@ -1044,7 +1044,7 @@ static struct platform_driver k1_pinctrl_driver = {
.of_match_table = k1_pinctrl_ids,
},
};
-module_platform_driver(k1_pinctrl_driver);
+builtin_platform_driver(k1_pinctrl_driver);
MODULE_AUTHOR("Yixun Lan <dlan@gentoo.org>");
MODULE_DESCRIPTION("Pinctrl driver for the SpacemiT K1 SoC");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 5b7fa77c1184..cc0b4d1d7cff 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -22,6 +22,7 @@
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
@@ -86,7 +87,6 @@ struct stm32_pinctrl_group {
struct stm32_gpio_bank {
void __iomem *base;
- struct clk *clk;
struct reset_control *rstc;
spinlock_t lock;
struct gpio_chip gpio_chip;
@@ -108,6 +108,7 @@ struct stm32_pinctrl {
unsigned ngroups;
const char **grp_names;
struct stm32_gpio_bank *banks;
+ struct clk_bulk_data *clks;
unsigned nbanks;
const struct stm32_pinctrl_match_data *match_data;
struct irq_domain *domain;
@@ -1217,7 +1218,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
case 0:
val = stm32_pconf_get(bank, offset, true);
seq_printf(s, "- %s - %s",
- val ? "high" : "low",
+ str_high_low(val),
biasing[bias]);
break;
@@ -1227,7 +1228,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
speed = stm32_pconf_get_speed(bank, offset);
val = stm32_pconf_get(bank, offset, false);
seq_printf(s, "- %s - %s - %s - %s %s",
- val ? "high" : "low",
+ str_high_low(val),
drive ? "open drain" : "push pull",
biasing[bias],
speeds[speed], "speed");
@@ -1308,12 +1309,6 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
if (IS_ERR(bank->base))
return PTR_ERR(bank->base);
- err = clk_prepare_enable(bank->clk);
- if (err) {
- dev_err(dev, "failed to prepare_enable clk (%d)\n", err);
- return err;
- }
-
bank->gpio_chip = stm32_gpio_template;
fwnode_property_read_string(fwnode, "st,bank-name", &bank->gpio_chip.label);
@@ -1360,26 +1355,20 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
bank->fwnode, &stm32_gpio_domain_ops,
bank);
- if (!bank->domain) {
- err = -ENODEV;
- goto err_clk;
- }
+ if (!bank->domain)
+ return -ENODEV;
}
names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
- if (!names) {
- err = -ENOMEM;
- goto err_clk;
- }
+ if (!names)
+ return -ENOMEM;
for (i = 0; i < npins; i++) {
stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
if (stm32_pin && stm32_pin->pin.name) {
names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", stm32_pin->pin.name);
- if (!names[i]) {
- err = -ENOMEM;
- goto err_clk;
- }
+ if (!names[i])
+ return -ENOMEM;
} else {
names[i] = NULL;
}
@@ -1390,15 +1379,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
err = gpiochip_add_data(&bank->gpio_chip, bank);
if (err) {
dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr);
- goto err_clk;
+ return err;
}
dev_info(dev, "%s bank added\n", bank->gpio_chip.label);
return 0;
-
-err_clk:
- clk_disable_unprepare(bank->clk);
- return err;
}
static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pdev)
@@ -1621,6 +1606,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
if (!pctl->banks)
return -ENOMEM;
+ pctl->clks = devm_kcalloc(dev, banks, sizeof(*pctl->clks),
+ GFP_KERNEL);
+ if (!pctl->clks)
+ return -ENOMEM;
+
i = 0;
for_each_gpiochip_node(dev, child) {
struct stm32_gpio_bank *bank = &pctl->banks[i];
@@ -1632,24 +1622,27 @@ int stm32_pctl_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- bank->clk = of_clk_get_by_name(np, NULL);
- if (IS_ERR(bank->clk)) {
+ pctl->clks[i].clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(pctl->clks[i].clk)) {
fwnode_handle_put(child);
- return dev_err_probe(dev, PTR_ERR(bank->clk),
+ return dev_err_probe(dev, PTR_ERR(pctl->clks[i].clk),
"failed to get clk\n");
}
+ pctl->clks[i].id = "pctl";
i++;
}
+ ret = clk_bulk_prepare_enable(banks, pctl->clks);
+ if (ret) {
+ dev_err(dev, "failed to prepare_enable clk (%d)\n", ret);
+ return ret;
+ }
+
for_each_gpiochip_node(dev, child) {
ret = stm32_gpiolib_register_bank(pctl, child);
if (ret) {
fwnode_handle_put(child);
-
- for (i = 0; i < pctl->nbanks; i++)
- clk_disable_unprepare(pctl->banks[i].clk);
-
- return ret;
+ goto err_register;
}
pctl->nbanks++;
@@ -1658,6 +1651,15 @@ int stm32_pctl_probe(struct platform_device *pdev)
dev_info(dev, "Pinctrl STM32 initialized\n");
return 0;
+err_register:
+ for (i = 0; i < pctl->nbanks; i++) {
+ struct stm32_gpio_bank *bank = &pctl->banks[i];
+
+ gpiochip_remove(&bank->gpio_chip);
+ }
+
+ clk_bulk_disable_unprepare(banks, pctl->clks);
+ return ret;
}
static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
@@ -1726,10 +1728,8 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
int __maybe_unused stm32_pinctrl_suspend(struct device *dev)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
- int i;
- for (i = 0; i < pctl->nbanks; i++)
- clk_disable(pctl->banks[i].clk);
+ clk_bulk_disable(pctl->nbanks, pctl->clks);
return 0;
}
@@ -1738,10 +1738,11 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
struct stm32_pinctrl_group *g = pctl->groups;
- int i;
+ int i, ret;
- for (i = 0; i < pctl->nbanks; i++)
- clk_enable(pctl->banks[i].clk);
+ ret = clk_bulk_enable(pctl->nbanks, pctl->clks);
+ if (ret)
+ return ret;
for (i = 0; i < pctl->ngroups; i++, g++)
stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
index df90c75fb3c5..b97de80ae2f3 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
@@ -256,72 +256,84 @@ static const struct sunxi_desc_pin a100_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D3P */
SUNXI_FUNCTION(0x4, "dsi0"), /* DP3 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D3N */
SUNXI_FUNCTION(0x4, "dsi0"), /* DM3 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D0P */
SUNXI_FUNCTION(0x4, "spi1"), /* CS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D0N */
SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D1P */
SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D1N */
SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D2P */
SUNXI_FUNCTION(0x4, "uart3"), /* TX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D2N */
SUNXI_FUNCTION(0x4, "uart3"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKP */
SUNXI_FUNCTION(0x4, "uart3"), /* RTS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 16)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKN */
SUNXI_FUNCTION(0x4, "uart3"), /* CTS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 17)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D3P */
SUNXI_FUNCTION(0x4, "uart4"), /* TX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 18)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D3N */
SUNXI_FUNCTION(0x4, "uart4"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 19)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index b7dbaf77b6db..1b2f2bd09662 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -237,12 +237,19 @@ config CROS_EC_SYSFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_sysfs.
+config CROS_EC_TYPEC_ALTMODES
+ bool
+ help
+ Selectable symbol to enable altmodes.
+
config CROS_EC_TYPEC
tristate "ChromeOS EC Type-C Connector Control"
depends on MFD_CROS_EC_DEV && TYPEC
depends on CROS_USBPD_NOTIFY
depends on USB_ROLE_SWITCH
default MFD_CROS_EC_DEV
+ select CROS_EC_TYPEC_ALTMODES if TYPEC_DP_ALTMODE
+ select CROS_EC_TYPEC_ALTMODES if TYPEC_TBT_ALTMODE
help
If you say Y here, you get support for accessing Type C connector
information from the Chrome OS EC.
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index fb8335458a22..1a5a484563cc 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -19,7 +19,11 @@ obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
obj-$(CONFIG_CROS_EC_UART) += cros_ec_uart.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_mec.o
cros-ec-typec-objs := cros_ec_typec.o cros_typec_vdm.o
+ifneq ($(CONFIG_CROS_EC_TYPEC_ALTMODES),)
+ cros-ec-typec-objs += cros_typec_altmode.o
+endif
obj-$(CONFIG_CROS_EC_TYPEC) += cros-ec-typec.o
+
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o
obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index e821b3d39590..110771a8645e 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -204,6 +204,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
mutex_init(&ec_dev->lock);
lockdep_set_class(&ec_dev->lock, &ec_dev->lockdep_key);
+ /* Send RWSIG continue to jump to RW for devices using RWSIG. */
+ err = cros_ec_rwsig_continue(ec_dev);
+ if (err)
+ dev_info(dev, "Failed to continue RWSIG: %d\n", err);
+
err = cros_ec_query_all(ec_dev);
if (err) {
dev_err(dev, "Cannot identify the EC: error %d\n", err);
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 62662ba5bf6e..38af97cdaab2 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -305,7 +305,8 @@ static int cros_ec_i2c_probe(struct i2c_client *client)
ec_dev->phys_name = client->adapter->name;
ec_dev->din_size = sizeof(struct ec_host_response_i2c) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request_i2c);
+ ec_dev->dout_size = sizeof(struct ec_host_request_i2c) +
+ sizeof(struct ec_params_rwsig_action);
err = cros_ec_register(ec_dev);
if (err) {
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 5ac37bd024c8..7e7190b30cbb 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -557,7 +557,7 @@ static int cros_ec_dev_init(struct ishtp_cl_data *client_data)
ec_dev->phys_name = dev_name(dev);
ec_dev->din_size = sizeof(struct cros_ish_in_msg) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct cros_ish_out_msg);
+ ec_dev->dout_size = sizeof(struct cros_ish_out_msg) + sizeof(struct ec_params_rwsig_action);
return cros_ec_register(ec_dev);
}
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 8470b7f2b135..5a2f1d98b350 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -70,13 +70,8 @@ struct lpc_driver_data {
/**
* struct cros_ec_lpc - LPC device-specific data
* @mmio_memory_base: The first I/O port addressing EC mapped memory.
- */
-struct cros_ec_lpc {
- u16 mmio_memory_base;
-};
-
-/**
- * struct lpc_driver_ops - LPC driver operations
+ * @base: For EC supporting memory mapping, base address of the mapped region.
+ * @mem32: Information about the memory mapped register region, if present.
* @read: Copy length bytes from EC address offset into buffer dest.
* Returns a negative error code on error, or the 8-bit checksum
* of all bytes read.
@@ -84,18 +79,21 @@ struct cros_ec_lpc {
* Returns a negative error code on error, or the 8-bit checksum
* of all bytes written.
*/
-struct lpc_driver_ops {
- int (*read)(unsigned int offset, unsigned int length, u8 *dest);
- int (*write)(unsigned int offset, unsigned int length, const u8 *msg);
+struct cros_ec_lpc {
+ u16 mmio_memory_base;
+ void __iomem *base;
+ struct acpi_resource_fixed_memory32 mem32;
+ int (*read)(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, u8 *dest);
+ int (*write)(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, const u8 *msg);
};
-static struct lpc_driver_ops cros_ec_lpc_ops = { };
-
/*
* A generic instance of the read function of struct lpc_driver_ops, used for
* the LPC EC.
*/
-static int cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length,
+static int cros_ec_lpc_read_bytes(struct cros_ec_lpc *_, unsigned int offset, unsigned int length,
u8 *dest)
{
u8 sum = 0;
@@ -114,7 +112,7 @@ static int cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length,
* A generic instance of the write function of struct lpc_driver_ops, used for
* the LPC EC.
*/
-static int cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length,
+static int cros_ec_lpc_write_bytes(struct cros_ec_lpc *_, unsigned int offset, unsigned int length,
const u8 *msg)
{
u8 sum = 0;
@@ -133,8 +131,8 @@ static int cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length,
* An instance of the read function of struct lpc_driver_ops, used for the
* MEC variant of LPC EC.
*/
-static int cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length,
- u8 *dest)
+static int cros_ec_lpc_mec_read_bytes(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, u8 *dest)
{
int in_range = cros_ec_lpc_mec_in_range(offset, length);
@@ -145,15 +143,15 @@ static int cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length,
cros_ec_lpc_io_bytes_mec(MEC_IO_READ,
offset - EC_HOST_CMD_REGION0,
length, dest) :
- cros_ec_lpc_read_bytes(offset, length, dest);
+ cros_ec_lpc_read_bytes(ec_lpc, offset, length, dest);
}
/*
* An instance of the write function of struct lpc_driver_ops, used for the
* MEC variant of LPC EC.
*/
-static int cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length,
- const u8 *msg)
+static int cros_ec_lpc_mec_write_bytes(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, const u8 *msg)
{
int in_range = cros_ec_lpc_mec_in_range(offset, length);
@@ -164,10 +162,50 @@ static int cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length,
cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE,
offset - EC_HOST_CMD_REGION0,
length, (u8 *)msg) :
- cros_ec_lpc_write_bytes(offset, length, msg);
+ cros_ec_lpc_write_bytes(ec_lpc, offset, length, msg);
+}
+
+static int cros_ec_lpc_direct_read(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, u8 *dest)
+{
+ int sum = 0;
+ int i;
+
+ if (offset < EC_HOST_CMD_REGION0 || offset > EC_LPC_ADDR_MEMMAP +
+ EC_MEMMAP_SIZE) {
+ return cros_ec_lpc_read_bytes(ec_lpc, offset, length, dest);
+ }
+
+ for (i = 0; i < length; ++i) {
+ dest[i] = readb(ec_lpc->base + offset - EC_HOST_CMD_REGION0 + i);
+ sum += dest[i];
+ }
+
+ /* Return checksum of all bytes read */
+ return sum;
}
-static int ec_response_timed_out(void)
+static int cros_ec_lpc_direct_write(struct cros_ec_lpc *ec_lpc, unsigned int offset,
+ unsigned int length, const u8 *msg)
+{
+ int sum = 0;
+ int i;
+
+ if (offset < EC_HOST_CMD_REGION0 || offset > EC_LPC_ADDR_MEMMAP +
+ EC_MEMMAP_SIZE) {
+ return cros_ec_lpc_write_bytes(ec_lpc, offset, length, msg);
+ }
+
+ for (i = 0; i < length; ++i) {
+ writeb(msg[i], ec_lpc->base + offset - EC_HOST_CMD_REGION0 + i);
+ sum += msg[i];
+ }
+
+ /* Return checksum of all bytes written */
+ return sum;
+}
+
+static int ec_response_timed_out(struct cros_ec_lpc *ec_lpc)
{
unsigned long one_second = jiffies + HZ;
u8 data;
@@ -175,7 +213,7 @@ static int ec_response_timed_out(void)
usleep_range(200, 300);
do {
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_CMD, 1, &data);
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_CMD, 1, &data);
if (ret < 0)
return ret;
if (!(data & EC_LPC_STATUS_BUSY_MASK))
@@ -189,6 +227,7 @@ static int ec_response_timed_out(void)
static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
struct cros_ec_command *msg)
{
+ struct cros_ec_lpc *ec_lpc = ec->priv;
struct ec_host_response response;
u8 sum;
int ret = 0;
@@ -199,17 +238,17 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
goto done;
/* Write buffer */
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
if (ret < 0)
goto done;
/* Here we go */
sum = EC_COMMAND_PROTOCOL_3;
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_CMD, 1, &sum);
if (ret < 0)
goto done;
- ret = ec_response_timed_out();
+ ret = ec_response_timed_out(ec_lpc);
if (ret < 0)
goto done;
if (ret) {
@@ -219,7 +258,7 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
}
/* Check result */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_DATA, 1, &sum);
if (ret < 0)
goto done;
msg->result = ret;
@@ -229,7 +268,7 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
/* Read back response */
dout = (u8 *)&response;
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET, sizeof(response),
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_PACKET, sizeof(response),
dout);
if (ret < 0)
goto done;
@@ -246,7 +285,7 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
}
/* Read response and process checksum */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET +
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_PACKET +
sizeof(response), response.data_len,
msg->data);
if (ret < 0)
@@ -270,6 +309,7 @@ done:
static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
struct cros_ec_command *msg)
{
+ struct cros_ec_lpc *ec_lpc = ec->priv;
struct ec_lpc_host_args args;
u8 sum;
int ret = 0;
@@ -291,7 +331,7 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
sum = msg->command + args.flags + args.command_version + args.data_size;
/* Copy data and update checksum */
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PARAM, msg->outsize,
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_PARAM, msg->outsize,
msg->data);
if (ret < 0)
goto done;
@@ -299,18 +339,18 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
/* Finalize checksum and write args */
args.checksum = sum;
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_ARGS, sizeof(args),
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_ARGS, sizeof(args),
(u8 *)&args);
if (ret < 0)
goto done;
/* Here we go */
sum = msg->command;
- ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+ ret = ec_lpc->write(ec_lpc, EC_LPC_ADDR_HOST_CMD, 1, &sum);
if (ret < 0)
goto done;
- ret = ec_response_timed_out();
+ ret = ec_response_timed_out(ec_lpc);
if (ret < 0)
goto done;
if (ret) {
@@ -320,7 +360,7 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
}
/* Check result */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_DATA, 1, &sum);
if (ret < 0)
goto done;
msg->result = ret;
@@ -329,7 +369,7 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
goto done;
/* Read back args */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args);
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args);
if (ret < 0)
goto done;
@@ -345,7 +385,7 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
sum = msg->command + args.flags + args.command_version + args.data_size;
/* Read response and update checksum */
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PARAM, args.data_size,
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_HOST_PARAM, args.data_size,
msg->data);
if (ret < 0)
goto done;
@@ -381,7 +421,7 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
/* fixed length */
if (bytes) {
- ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + offset, bytes, s);
+ ret = ec_lpc->read(ec_lpc, ec_lpc->mmio_memory_base + offset, bytes, s);
if (ret < 0)
return ret;
return bytes;
@@ -389,7 +429,7 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
/* string */
for (; i < EC_MEMMAP_SIZE; i++, s++) {
- ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + i, 1, s);
+ ret = ec_lpc->read(ec_lpc, ec_lpc->mmio_memory_base + i, 1, s);
if (ret < 0)
return ret;
cnt++;
@@ -419,7 +459,7 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
return;
}
- if (ec_dev->mkbp_event_supported)
+ if (value == ACPI_NOTIFY_CROS_EC_MKBP && ec_dev->mkbp_event_supported)
do {
ret = cros_ec_get_next_event(ec_dev, NULL,
&ec_has_more_events);
@@ -453,6 +493,20 @@ static struct acpi_device *cros_ec_lpc_get_device(const char *id)
return adev;
}
+static acpi_status cros_ec_lpc_resources(struct acpi_resource *res, void *data)
+{
+ struct cros_ec_lpc *ec_lpc = data;
+
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ ec_lpc->mem32 = res->data.fixed_memory32;
+ break;
+ default:
+ break;
+ }
+ return AE_OK;
+}
+
static int cros_ec_lpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -492,8 +546,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
}
if (quirks & CROS_EC_LPC_QUIRK_AML_MUTEX) {
- const char *name
- = driver_data->quirk_aml_mutex_name;
+ const char *name = driver_data->quirk_aml_mutex_name;
ret = cros_ec_lpc_mec_acpi_mutex(ACPI_COMPANION(dev), name);
if (ret) {
dev_err(dev, "failed to get AML mutex '%s'", name);
@@ -502,30 +555,49 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
dev_info(dev, "got AML mutex '%s'", name);
}
}
-
- /*
- * The Framework Laptop (and possibly other non-ChromeOS devices)
- * only exposes the eight I/O ports that are required for the Microchip EC.
- * Requesting a larger reservation will fail.
- */
- if (!devm_request_region(dev, EC_HOST_CMD_REGION0,
- EC_HOST_CMD_MEC_REGION_SIZE, dev_name(dev))) {
- dev_err(dev, "couldn't reserve MEC region\n");
- return -EBUSY;
+ adev = ACPI_COMPANION(dev);
+ if (adev) {
+ /*
+ * Retrieve the resource information in the CRS register, if available.
+ */
+ status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
+ cros_ec_lpc_resources, ec_lpc);
+ if (ACPI_SUCCESS(status) && ec_lpc->mem32.address_length) {
+ ec_lpc->base = devm_ioremap(dev,
+ ec_lpc->mem32.address,
+ ec_lpc->mem32.address_length);
+ if (!ec_lpc->base)
+ return -EINVAL;
+
+ ec_lpc->read = cros_ec_lpc_direct_read;
+ ec_lpc->write = cros_ec_lpc_direct_write;
+ }
}
+ if (!ec_lpc->read) {
+ /*
+ * The Framework Laptop (and possibly other non-ChromeOS devices)
+ * only exposes the eight I/O ports that are required for the Microchip EC.
+ * Requesting a larger reservation will fail.
+ */
+ if (!devm_request_region(dev, EC_HOST_CMD_REGION0,
+ EC_HOST_CMD_MEC_REGION_SIZE, dev_name(dev))) {
+ dev_err(dev, "couldn't reserve MEC region\n");
+ return -EBUSY;
+ }
- cros_ec_lpc_mec_init(EC_HOST_CMD_REGION0,
- EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE);
+ cros_ec_lpc_mec_init(EC_HOST_CMD_REGION0,
+ EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE);
- /*
- * Read the mapped ID twice, the first one is assuming the
- * EC is a Microchip Embedded Controller (MEC) variant, if the
- * protocol fails, fallback to the non MEC variant and try to
- * read again the ID.
- */
- cros_ec_lpc_ops.read = cros_ec_lpc_mec_read_bytes;
- cros_ec_lpc_ops.write = cros_ec_lpc_mec_write_bytes;
- ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
+ /*
+ * Read the mapped ID twice, the first one is assuming the
+ * EC is a Microchip Embedded Controller (MEC) variant, if the
+ * protocol fails, fallback to the non MEC variant and try to
+ * read again the ID.
+ */
+ ec_lpc->read = cros_ec_lpc_mec_read_bytes;
+ ec_lpc->write = cros_ec_lpc_mec_write_bytes;
+ }
+ ret = ec_lpc->read(ec_lpc, EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
if (ret < 0)
return ret;
if (buf[0] != 'E' || buf[1] != 'C') {
@@ -536,9 +608,9 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
}
/* Re-assign read/write operations for the non MEC variant */
- cros_ec_lpc_ops.read = cros_ec_lpc_read_bytes;
- cros_ec_lpc_ops.write = cros_ec_lpc_write_bytes;
- ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2,
+ ec_lpc->read = cros_ec_lpc_read_bytes;
+ ec_lpc->write = cros_ec_lpc_write_bytes;
+ ret = ec_lpc->read(ec_lpc, ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2,
buf);
if (ret < 0)
return ret;
@@ -573,7 +645,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
ec_dev->cmd_readmem = cros_ec_lpc_readmem;
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
ec_dev->priv = ec_lpc;
/*
@@ -598,7 +670,6 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
* Connect a notify handler to process MKBP messages if we have a
* companion ACPI device.
*/
- adev = ACPI_COMPANION(dev);
if (adev) {
status = acpi_install_notify_handler(adev->handle,
ACPI_ALL_NOTIFY,
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 5c9a53dffcf9..877b107fee4b 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -15,6 +15,8 @@
#include "cros_ec_trace.h"
#define EC_COMMAND_RETRIES 50
+#define RWSIG_CONTINUE_RETRIES 8
+#define RWSIG_CONTINUE_MAX_ERRORS_IN_ROW 3
static const int cros_ec_error_map[] = {
[EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
@@ -288,6 +290,64 @@ exit:
return ret;
}
+int cros_ec_rwsig_continue(struct cros_ec_device *ec_dev)
+{
+ struct cros_ec_command *msg;
+ struct ec_params_rwsig_action *rwsig_action;
+ int ret = 0;
+ int error_count = 0;
+
+ ec_dev->proto_version = 3;
+
+ msg = kmalloc(sizeof(*msg) + sizeof(*rwsig_action), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 0;
+ msg->command = EC_CMD_RWSIG_ACTION;
+ msg->insize = 0;
+ msg->outsize = sizeof(*rwsig_action);
+
+ rwsig_action = (struct ec_params_rwsig_action *)msg->data;
+ rwsig_action->action = RWSIG_ACTION_CONTINUE;
+
+ for (int i = 0; i < RWSIG_CONTINUE_RETRIES; i++) {
+ ret = cros_ec_send_command(ec_dev, msg);
+
+ if (ret < 0) {
+ if (++error_count >= RWSIG_CONTINUE_MAX_ERRORS_IN_ROW)
+ break;
+ } else if (msg->result == EC_RES_INVALID_COMMAND) {
+ /*
+ * If EC_RES_INVALID_COMMAND is retured, it means RWSIG
+ * is not supported or EC is already in RW, so there is
+ * nothing left to do.
+ */
+ break;
+ } else if (msg->result != EC_RES_SUCCESS) {
+ /* Unexpected command error. */
+ ret = cros_ec_map_error(msg->result);
+ break;
+ } else {
+ /*
+ * The EC_CMD_RWSIG_ACTION succeed. Send the command
+ * more times, to make sure EC is in RW. A following
+ * command can timeout, because EC may need some time to
+ * initialize after jump to RW.
+ */
+ error_count = 0;
+ }
+
+ if (ret != -ETIMEDOUT)
+ usleep_range(90000, 100000);
+ }
+
+ kfree(msg);
+
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_rwsig_continue);
+
static int cros_ec_get_proto_info(struct cros_ec_device *ec_dev, int devidx)
{
struct cros_ec_command *msg;
@@ -306,15 +366,6 @@ static int cros_ec_get_proto_info(struct cros_ec_device *ec_dev, int devidx)
msg->insize = sizeof(*info);
ret = cros_ec_send_command(ec_dev, msg);
- /*
- * Send command once again when timeout occurred.
- * Fingerprint MCU (FPMCU) is restarted during system boot which
- * introduces small window in which FPMCU won't respond for any
- * messages sent by kernel. There is no need to wait before next
- * attempt because we waited at least EC_MSG_DEADLINE_MS.
- */
- if (ret == -ETIMEDOUT)
- ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev,
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index 39d3b50a7c09..bc2666491db1 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -231,7 +231,7 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
ec_dev->phys_name = dev_name(&rpdev->dev);
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
dev_set_drvdata(dev, ec_dev);
ec_rpmsg->rpdev = rpdev;
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 86a3d32a7763..8ca0f854e7ac 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -715,7 +715,7 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
int err;
ec_spi->high_pri_worker =
- kthread_create_worker(0, "cros_ec_spi_high_pri");
+ kthread_run_worker(0, "cros_ec_spi_high_pri");
if (IS_ERR(ec_spi->high_pri_worker)) {
err = PTR_ERR(ec_spi->high_pri_worker);
@@ -766,7 +766,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
ec_dev->din_size = EC_MSG_PREAMBLE_COUNT +
sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
ec_spi->last_transfer_ns = ktime_get_ns();
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
index 425e9441b7ca..9827b3117597 100644
--- a/drivers/platform/chrome/cros_ec_trace.c
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -122,8 +122,10 @@
TRACE_SYMBOL(EC_CMD_ENTERING_MODE), \
TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU_PROTECT), \
TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
+ TRACE_SYMBOL(EC_CMD_CEC_READ_MSG), \
TRACE_SYMBOL(EC_CMD_CEC_SET), \
TRACE_SYMBOL(EC_CMD_CEC_GET), \
+ TRACE_SYMBOL(EC_CMD_CEC_PORT_COUNT), \
TRACE_SYMBOL(EC_CMD_EC_CODEC), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
@@ -161,11 +163,18 @@
TRACE_SYMBOL(EC_CMD_ADC_READ), \
TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \
TRACE_SYMBOL(EC_CMD_AP_RESET), \
+ TRACE_SYMBOL(EC_CMD_PCHG_COUNT), \
+ TRACE_SYMBOL(EC_CMD_PCHG), \
+ TRACE_SYMBOL(EC_CMD_PCHG_UPDATE), \
TRACE_SYMBOL(EC_CMD_REGULATOR_GET_INFO), \
TRACE_SYMBOL(EC_CMD_REGULATOR_ENABLE), \
TRACE_SYMBOL(EC_CMD_REGULATOR_IS_ENABLED), \
TRACE_SYMBOL(EC_CMD_REGULATOR_SET_VOLTAGE), \
TRACE_SYMBOL(EC_CMD_REGULATOR_GET_VOLTAGE), \
+ TRACE_SYMBOL(EC_CMD_TYPEC_DISCOVERY), \
+ TRACE_SYMBOL(EC_CMD_TYPEC_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_TYPEC_STATUS), \
+ TRACE_SYMBOL(EC_CMD_TYPEC_VDM_RESPONSE), \
TRACE_SYMBOL(EC_CMD_CR51_BASE), \
TRACE_SYMBOL(EC_CMD_CR51_LAST), \
TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \
@@ -184,6 +193,7 @@
TRACE_SYMBOL(EC_CMD_BATTERY_GET_STATIC), \
TRACE_SYMBOL(EC_CMD_BATTERY_GET_DYNAMIC), \
TRACE_SYMBOL(EC_CMD_CHARGER_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_MUX_ACK), \
TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_BASE), \
TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_LAST)
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index ae2f86296954..6ee182101bc9 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -18,6 +18,7 @@
#include "cros_ec_typec.h"
#include "cros_typec_vdm.h"
+#include "cros_typec_altmode.h"
#define DRV_NAME "cros-ec-typec"
@@ -290,30 +291,32 @@ static int cros_typec_register_port_altmodes(struct cros_typec_data *typec,
struct typec_altmode *amode;
/* All PD capable CrOS devices are assumed to support DP altmode. */
+ memset(&desc, 0, sizeof(desc));
desc.svid = USB_TYPEC_DP_SID;
desc.mode = USB_TYPEC_DP_MODE;
desc.vdo = DP_PORT_VDO;
- amode = typec_port_register_altmode(port->port, &desc);
+ amode = cros_typec_register_displayport(port, &desc,
+ typec->ap_driven_altmode);
if (IS_ERR(amode))
return PTR_ERR(amode);
port->port_altmode[CROS_EC_ALTMODE_DP] = amode;
- typec_altmode_set_drvdata(amode, port);
- amode->ops = &port_amode_ops;
/*
* Register TBT compatibility alt mode. The EC will not enter the mode
- * if it doesn't support it, so it's safe to register it unconditionally
- * here for now.
+ * if it doesn't support it and it will not enter automatically by
+ * design so we can use the |ap_driven_altmode| feature to check if we
+ * should register it.
*/
- memset(&desc, 0, sizeof(desc));
- desc.svid = USB_TYPEC_TBT_SID;
- desc.mode = TYPEC_ANY_MODE;
- amode = typec_port_register_altmode(port->port, &desc);
- if (IS_ERR(amode))
- return PTR_ERR(amode);
- port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
- typec_altmode_set_drvdata(amode, port);
- amode->ops = &port_amode_ops;
+ if (typec->ap_driven_altmode) {
+ memset(&desc, 0, sizeof(desc));
+ desc.svid = USB_TYPEC_TBT_SID;
+ desc.mode = TBT_MODE;
+ desc.inactive = true;
+ amode = cros_typec_register_thunderbolt(port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
+ }
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
@@ -576,6 +579,10 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
if (!ret)
ret = typec_mux_set(port->mux, &port->state);
+ if (!ret)
+ ret = cros_typec_displayport_status_update(port->state.alt,
+ port->state.data);
+
return ret;
}
@@ -619,6 +626,7 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
};
struct ec_params_usb_pd_mux_ack mux_ack;
enum typec_orientation orientation;
+ struct cros_typec_altmode_node *node;
int ret;
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
@@ -677,6 +685,14 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
port->mux_flags);
}
+ /* Iterate all partner alt-modes and set the active alternate mode. */
+ list_for_each_entry(node, &port->partner_mode_list, list) {
+ typec_altmode_update_active(
+ node->amode,
+ port->state.alt &&
+ node->amode->svid == port->state.alt->svid);
+ }
+
mux_ack:
if (!typec->needs_mux_ack)
return ret;
@@ -1244,6 +1260,8 @@ static int cros_typec_probe(struct platform_device *pdev)
typec->typec_cmd_supported = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
typec->needs_mux_ack = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
+ typec->ap_driven_altmode = cros_ec_check_features(
+ ec_dev, EC_FEATURE_TYPEC_REQUIRE_AP_MODE_ENTRY);
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
&resp, sizeof(resp));
diff --git a/drivers/platform/chrome/cros_ec_typec.h b/drivers/platform/chrome/cros_ec_typec.h
index deda180a646f..9fd5342bb0ad 100644
--- a/drivers/platform/chrome/cros_ec_typec.h
+++ b/drivers/platform/chrome/cros_ec_typec.h
@@ -39,6 +39,7 @@ struct cros_typec_data {
struct work_struct port_work;
bool typec_cmd_supported;
bool needs_mux_ack;
+ bool ap_driven_altmode;
};
/* Per port data. */
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
index 62bc24f6dcc7..19c179d49c90 100644
--- a/drivers/platform/chrome/cros_ec_uart.c
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -283,7 +283,7 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
ec_dev->pkt_xfer = cros_ec_uart_pkt_xfer;
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 7bdb489354c5..963c4db23055 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -15,7 +15,7 @@
#define DRV_NAME "cros-ec-vbc"
static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *att, char *buf,
+ const struct bin_attribute *att, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -59,7 +59,7 @@ static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -99,16 +99,16 @@ static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj,
return data_sz;
}
-static BIN_ATTR_RW(vboot_context, 16);
+static const BIN_ATTR_RW(vboot_context, 16);
-static struct bin_attribute *cros_ec_vbc_bin_attrs[] = {
+static const struct bin_attribute *const cros_ec_vbc_bin_attrs[] = {
&bin_attr_vboot_context,
NULL
};
static const struct attribute_group cros_ec_vbc_attr_group = {
.name = "vbc",
- .bin_attrs = cros_ec_vbc_bin_attrs,
+ .bin_attrs_new = cros_ec_vbc_bin_attrs,
};
static int cros_ec_vbc_probe(struct platform_device *pd)
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index 78097c8a4966..fc27bd7fc4b9 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -121,7 +121,17 @@ static const struct keyboard_led_drvdata keyboard_led_drvdata_acpi = {
#endif /* CONFIG_ACPI */
-#if IS_ENABLED(CONFIG_CROS_EC)
+#if IS_ENABLED(CONFIG_MFD_CROS_EC_DEV)
+static int keyboard_led_init_ec_pwm_mfd(struct platform_device *pdev)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
+
+ keyboard_led->ec = cros_ec;
+
+ return 0;
+}
static int
keyboard_led_set_brightness_ec_pwm(struct led_classdev *cdev,
@@ -169,44 +179,6 @@ keyboard_led_get_brightness_ec_pwm(struct led_classdev *cdev)
return resp->percent;
}
-static int keyboard_led_init_ec_pwm(struct platform_device *pdev)
-{
- struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
-
- keyboard_led->ec = dev_get_drvdata(pdev->dev.parent);
- if (!keyboard_led->ec) {
- dev_err(&pdev->dev, "no parent EC device\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {
- .init = keyboard_led_init_ec_pwm,
- .brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
- .brightness_get = keyboard_led_get_brightness_ec_pwm,
- .max_brightness = KEYBOARD_BACKLIGHT_MAX,
-};
-
-#else /* IS_ENABLED(CONFIG_CROS_EC) */
-
-static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {};
-
-#endif /* IS_ENABLED(CONFIG_CROS_EC) */
-
-#if IS_ENABLED(CONFIG_MFD_CROS_EC_DEV)
-static int keyboard_led_init_ec_pwm_mfd(struct platform_device *pdev)
-{
- struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
- struct cros_ec_device *cros_ec = ec_dev->ec_dev;
- struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
-
- keyboard_led->ec = cros_ec;
-
- return 0;
-}
-
static const struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm_mfd = {
.init = keyboard_led_init_ec_pwm_mfd,
.brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
@@ -229,7 +201,7 @@ static int keyboard_led_probe(struct platform_device *pdev)
{
const struct keyboard_led_drvdata *drvdata;
struct keyboard_led *keyboard_led;
- int error;
+ int err;
if (keyboard_led_is_mfd_device(pdev))
drvdata = &keyboard_led_drvdata_ec_pwm_mfd;
@@ -244,9 +216,9 @@ static int keyboard_led_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, keyboard_led);
if (drvdata->init) {
- error = drvdata->init(pdev);
- if (error)
- return error;
+ err = drvdata->init(pdev);
+ if (err)
+ return err;
}
keyboard_led->cdev.name = "chromeos::kbd_backlight";
@@ -256,13 +228,10 @@ static int keyboard_led_probe(struct platform_device *pdev)
keyboard_led->cdev.brightness_set_blocking = drvdata->brightness_set_blocking;
keyboard_led->cdev.brightness_get = drvdata->brightness_get;
- error = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
- if (error == -EEXIST) /* Already bound via other mechanism */
+ err = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
+ if (err == -EEXIST) /* Already bound via other mechanism */
return -ENODEV;
- if (error)
- return error;
-
- return 0;
+ return err;
}
#ifdef CONFIG_ACPI
@@ -273,17 +242,6 @@ static const struct acpi_device_id keyboard_led_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, keyboard_led_acpi_match);
#endif
-#ifdef CONFIG_OF
-static const struct of_device_id keyboard_led_of_match[] = {
- {
- .compatible = "google,cros-kbd-led-backlight",
- .data = &keyboard_led_drvdata_ec_pwm,
- },
- {}
-};
-MODULE_DEVICE_TABLE(of, keyboard_led_of_match);
-#endif
-
static const struct platform_device_id keyboard_led_id[] = {
{ "cros-keyboard-leds", 0 },
{}
@@ -294,7 +252,6 @@ static struct platform_driver keyboard_led_driver = {
.driver = {
.name = "cros-keyboard-leds",
.acpi_match_table = ACPI_PTR(keyboard_led_acpi_match),
- .of_match_table = of_match_ptr(keyboard_led_of_match),
},
.probe = keyboard_led_probe,
.id_table = keyboard_led_id,
diff --git a/drivers/platform/chrome/cros_typec_altmode.c b/drivers/platform/chrome/cros_typec_altmode.c
new file mode 100644
index 000000000000..557340b53af0
--- /dev/null
+++ b/drivers/platform/chrome/cros_typec_altmode.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Alt-mode implementation on ChromeOS EC.
+ *
+ * Copyright 2024 Google LLC
+ * Author: Abhishek Pandit-Subedi <abhishekpandit@chromium.org>
+ */
+#include "cros_ec_typec.h"
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_tbt.h>
+#include <linux/usb/pd_vdo.h>
+
+#include "cros_typec_altmode.h"
+
+struct cros_typec_altmode_data {
+ struct work_struct work;
+ struct cros_typec_port *port;
+ struct typec_altmode *alt;
+ bool ap_mode_entry;
+
+ struct mutex lock;
+ u32 header;
+ u32 *vdo_data;
+ u8 vdo_size;
+
+ u16 sid;
+ u8 mode;
+};
+
+struct cros_typec_dp_data {
+ struct cros_typec_altmode_data adata;
+ struct typec_displayport_data data;
+ bool configured;
+ bool pending_status_update;
+};
+
+static void cros_typec_altmode_work(struct work_struct *work)
+{
+ struct cros_typec_altmode_data *data =
+ container_of(work, struct cros_typec_altmode_data, work);
+
+ mutex_lock(&data->lock);
+
+ if (typec_altmode_vdm(data->alt, data->header, data->vdo_data,
+ data->vdo_size))
+ dev_err(&data->alt->dev, "VDM 0x%x failed\n", data->header);
+
+ data->header = 0;
+ data->vdo_data = NULL;
+ data->vdo_size = 0;
+
+ mutex_unlock(&data->lock);
+}
+
+static int cros_typec_altmode_enter(struct typec_altmode *alt, u32 *vdo)
+{
+ struct cros_typec_altmode_data *adata = typec_altmode_get_drvdata(alt);
+ struct ec_params_typec_control req = {
+ .port = adata->port->port_num,
+ .command = TYPEC_CONTROL_COMMAND_ENTER_MODE,
+ };
+ int svdm_version;
+ int ret;
+
+ if (!adata->ap_mode_entry) {
+ dev_warn(&alt->dev,
+ "EC does not support AP driven mode entry\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (adata->sid == USB_TYPEC_DP_SID)
+ req.mode_to_enter = CROS_EC_ALTMODE_DP;
+ else if (adata->sid == USB_TYPEC_TBT_SID)
+ req.mode_to_enter = CROS_EC_ALTMODE_TBT;
+ else
+ return -EOPNOTSUPP;
+
+ ret = cros_ec_cmd(adata->port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL,
+ &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ mutex_lock(&adata->lock);
+
+ adata->header = VDO(adata->sid, 1, svdm_version, CMD_ENTER_MODE);
+ adata->header |= VDO_OPOS(adata->mode);
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ adata->vdo_data = NULL;
+ adata->vdo_size = 1;
+ schedule_work(&adata->work);
+
+ mutex_unlock(&adata->lock);
+ return ret;
+}
+
+static int cros_typec_altmode_exit(struct typec_altmode *alt)
+{
+ struct cros_typec_altmode_data *adata = typec_altmode_get_drvdata(alt);
+ struct ec_params_typec_control req = {
+ .port = adata->port->port_num,
+ .command = TYPEC_CONTROL_COMMAND_EXIT_MODES,
+ };
+ int svdm_version;
+ int ret;
+
+ if (!adata->ap_mode_entry) {
+ dev_warn(&alt->dev,
+ "EC does not support AP driven mode exit\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = cros_ec_cmd(adata->port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL,
+ &req, sizeof(req), NULL, 0);
+
+ if (ret < 0)
+ return ret;
+
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ mutex_lock(&adata->lock);
+
+ adata->header = VDO(adata->sid, 1, svdm_version, CMD_EXIT_MODE);
+ adata->header |= VDO_OPOS(adata->mode);
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ adata->vdo_data = NULL;
+ adata->vdo_size = 1;
+ schedule_work(&adata->work);
+
+ mutex_unlock(&adata->lock);
+ return ret;
+}
+
+static int cros_typec_displayport_vdm(struct typec_altmode *alt, u32 header,
+ const u32 *data, int count)
+{
+ struct cros_typec_dp_data *dp_data = typec_altmode_get_drvdata(alt);
+ struct cros_typec_altmode_data *adata = &dp_data->adata;
+
+
+ int cmd_type = PD_VDO_CMDT(header);
+ int cmd = PD_VDO_CMD(header);
+ int svdm_version;
+
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ mutex_lock(&adata->lock);
+
+ switch (cmd_type) {
+ case CMDT_INIT:
+ if (PD_VDO_SVDM_VER(header) < svdm_version) {
+ typec_partner_set_svdm_version(adata->port->partner,
+ PD_VDO_SVDM_VER(header));
+ svdm_version = PD_VDO_SVDM_VER(header);
+ }
+
+ adata->header = VDO(adata->sid, 1, svdm_version, cmd);
+ adata->header |= VDO_OPOS(adata->mode);
+
+ /*
+ * DP_CMD_CONFIGURE: We can't actually do anything with the
+ * provided VDO yet so just send back an ACK.
+ *
+ * DP_CMD_STATUS_UPDATE: We wait for Mux changes to send
+ * DPStatus Acks.
+ */
+ switch (cmd) {
+ case DP_CMD_CONFIGURE:
+ dp_data->data.conf = *data;
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ dp_data->configured = true;
+ schedule_work(&adata->work);
+ break;
+ case DP_CMD_STATUS_UPDATE:
+ dp_data->pending_status_update = true;
+ break;
+ default:
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ schedule_work(&adata->work);
+ break;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&adata->lock);
+ return 0;
+}
+
+static int cros_typec_thunderbolt_vdm(struct typec_altmode *alt, u32 header,
+ const u32 *data, int count)
+{
+ struct cros_typec_altmode_data *adata = typec_altmode_get_drvdata(alt);
+
+ int cmd_type = PD_VDO_CMDT(header);
+ int cmd = PD_VDO_CMD(header);
+ int svdm_version;
+
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ mutex_lock(&adata->lock);
+
+ switch (cmd_type) {
+ case CMDT_INIT:
+ if (PD_VDO_SVDM_VER(header) < svdm_version) {
+ typec_partner_set_svdm_version(adata->port->partner,
+ PD_VDO_SVDM_VER(header));
+ svdm_version = PD_VDO_SVDM_VER(header);
+ }
+
+ adata->header = VDO(adata->sid, 1, svdm_version, cmd);
+ adata->header |= VDO_OPOS(adata->mode);
+
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ /* Don't respond to the enter mode vdm because it
+ * triggers mux configuration. This is handled directly
+ * by the cros_ec_typec driver so the Thunderbolt driver
+ * doesn't need to be involved.
+ */
+ break;
+ default:
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ schedule_work(&adata->work);
+ break;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&adata->lock);
+ return 0;
+}
+
+
+static int cros_typec_altmode_vdm(struct typec_altmode *alt, u32 header,
+ const u32 *data, int count)
+{
+ struct cros_typec_altmode_data *adata = typec_altmode_get_drvdata(alt);
+
+ if (!adata->ap_mode_entry)
+ return -EOPNOTSUPP;
+
+ if (adata->sid == USB_TYPEC_DP_SID)
+ return cros_typec_displayport_vdm(alt, header, data, count);
+
+ if (adata->sid == USB_TYPEC_TBT_SID)
+ return cros_typec_thunderbolt_vdm(alt, header, data, count);
+
+ return -EINVAL;
+}
+
+static const struct typec_altmode_ops cros_typec_altmode_ops = {
+ .enter = cros_typec_altmode_enter,
+ .exit = cros_typec_altmode_exit,
+ .vdm = cros_typec_altmode_vdm,
+};
+
+#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
+int cros_typec_displayport_status_update(struct typec_altmode *altmode,
+ struct typec_displayport_data *data)
+{
+ struct cros_typec_dp_data *dp_data =
+ typec_altmode_get_drvdata(altmode);
+ struct cros_typec_altmode_data *adata = &dp_data->adata;
+
+ if (!dp_data->pending_status_update) {
+ dev_dbg(&altmode->dev,
+ "Got DPStatus without a pending request\n");
+ return 0;
+ }
+
+ if (dp_data->configured && dp_data->data.conf != data->conf)
+ dev_dbg(&altmode->dev,
+ "DP Conf doesn't match. Requested 0x%04x, Actual 0x%04x\n",
+ dp_data->data.conf, data->conf);
+
+ mutex_lock(&adata->lock);
+
+ dp_data->data = *data;
+ dp_data->pending_status_update = false;
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ adata->vdo_data = &dp_data->data.status;
+ adata->vdo_size = 2;
+ schedule_work(&adata->work);
+
+ mutex_unlock(&adata->lock);
+
+ return 0;
+}
+
+struct typec_altmode *
+cros_typec_register_displayport(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc,
+ bool ap_mode_entry)
+{
+ struct typec_altmode *alt;
+ struct cros_typec_dp_data *dp_data;
+ struct cros_typec_altmode_data *adata;
+
+ alt = typec_port_register_altmode(port->port, desc);
+ if (IS_ERR(alt))
+ return alt;
+
+ dp_data = devm_kzalloc(&alt->dev, sizeof(*dp_data), GFP_KERNEL);
+ if (!dp_data) {
+ typec_unregister_altmode(alt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ adata = &dp_data->adata;
+ INIT_WORK(&adata->work, cros_typec_altmode_work);
+ mutex_init(&adata->lock);
+ adata->alt = alt;
+ adata->port = port;
+ adata->ap_mode_entry = ap_mode_entry;
+ adata->sid = desc->svid;
+ adata->mode = desc->mode;
+
+ typec_altmode_set_ops(alt, &cros_typec_altmode_ops);
+ typec_altmode_set_drvdata(alt, adata);
+
+ return alt;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_TYPEC_TBT_ALTMODE)
+struct typec_altmode *
+cros_typec_register_thunderbolt(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc)
+{
+ struct typec_altmode *alt;
+ struct cros_typec_altmode_data *adata;
+
+ alt = typec_port_register_altmode(port->port, desc);
+ if (IS_ERR(alt))
+ return alt;
+
+ adata = devm_kzalloc(&alt->dev, sizeof(*adata), GFP_KERNEL);
+ if (!adata) {
+ typec_unregister_altmode(alt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK(&adata->work, cros_typec_altmode_work);
+ adata->alt = alt;
+ adata->port = port;
+ adata->ap_mode_entry = true;
+ adata->sid = desc->svid;
+ adata->mode = desc->mode;
+
+ typec_altmode_set_ops(alt, &cros_typec_altmode_ops);
+ typec_altmode_set_drvdata(alt, adata);
+
+ return alt;
+}
+#endif
diff --git a/drivers/platform/chrome/cros_typec_altmode.h b/drivers/platform/chrome/cros_typec_altmode.h
new file mode 100644
index 000000000000..3f2aa95d065a
--- /dev/null
+++ b/drivers/platform/chrome/cros_typec_altmode.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __CROS_TYPEC_ALTMODE_H__
+#define __CROS_TYPEC_ALTMODE_H__
+
+#include <linux/kconfig.h>
+#include <linux/usb/typec.h>
+
+struct cros_typec_port;
+struct typec_altmode;
+struct typec_altmode_desc;
+struct typec_displayport_data;
+
+#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
+struct typec_altmode *
+cros_typec_register_displayport(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc,
+ bool ap_mode_entry);
+
+int cros_typec_displayport_status_update(struct typec_altmode *altmode,
+ struct typec_displayport_data *data);
+#else
+static inline struct typec_altmode *
+cros_typec_register_displayport(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc,
+ bool ap_mode_entry)
+{
+ return typec_port_register_altmode(port->port, desc);
+}
+
+static inline int cros_typec_displayport_status_update(struct typec_altmode *altmode,
+ struct typec_displayport_data *data)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_TYPEC_TBT_ALTMODE)
+struct typec_altmode *
+cros_typec_register_thunderbolt(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc);
+#else
+static inline struct typec_altmode *
+cros_typec_register_thunderbolt(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc)
+{
+ return typec_port_register_altmode(port->port, desc);
+}
+#endif
+
+#endif /* __CROS_TYPEC_ALTMODE_H__ */
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index cd71f1caea81..7ce75e2e039e 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -13,6 +13,7 @@
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
+#include <linux/string_choices.h>
#define DRV_NAME "cros-usbpd-logger"
@@ -135,8 +136,8 @@ static void cros_usbpd_print_log_entry(struct ec_response_pd_log *r,
len += append_str(buf, len, "Power supply fault: %s", fault);
break;
case PD_EVENT_VIDEO_DP_MODE:
- len += append_str(buf, len, "DP mode %sabled", r->data == 1 ?
- "en" : "dis");
+ len += append_str(buf, len, "DP mode %s",
+ str_enabled_disabled(r->data == 1));
break;
case PD_EVENT_VIDEO_CODEC:
minfo = (struct mcdp_info *)r->payload;
diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
index 49c383eb6785..13e37b49d9d0 100644
--- a/drivers/platform/cznic/Kconfig
+++ b/drivers/platform/cznic/Kconfig
@@ -6,6 +6,7 @@
menuconfig CZNIC_PLATFORMS
bool "Platform support for CZ.NIC's Turris hardware"
+ depends on ARCH_MVEBU || COMPILE_TEST
help
Say Y here to be able to choose driver support for CZ.NIC's Turris
devices. This option alone does not add any kernel code.
diff --git a/drivers/platform/cznic/turris-omnia-mcu-base.c b/drivers/platform/cznic/turris-omnia-mcu-base.c
index 58f9afae2867..770e680b96f9 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-base.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-base.c
@@ -52,6 +52,7 @@ int omnia_cmd_write_read(const struct i2c_client *client,
return 0;
}
+EXPORT_SYMBOL_GPL(omnia_cmd_write_read);
static int omnia_get_version_hash(struct omnia_mcu *mcu, bool bootloader,
char version[static OMNIA_FW_VERSION_HEX_LEN])
@@ -257,6 +258,8 @@ static int omnia_mcu_read_features(struct omnia_mcu *mcu)
_DEF_FEAT(NEW_INT_API, "new interrupt API"),
_DEF_FEAT(POWEROFF_WAKEUP, "poweroff and wakeup"),
_DEF_FEAT(TRNG, "true random number generator"),
+ _DEF_FEAT(BRIGHTNESS_INT, "LED panel brightness change interrupt"),
+ _DEF_FEAT(LED_GAMMA_CORRECTION, "LED gamma correction"),
#undef _DEF_FEAT
};
struct i2c_client *client = mcu->client;
diff --git a/drivers/platform/cznic/turris-omnia-mcu.h b/drivers/platform/cznic/turris-omnia-mcu.h
index 2b13e28ee323..088541be3f4c 100644
--- a/drivers/platform/cznic/turris-omnia-mcu.h
+++ b/drivers/platform/cznic/turris-omnia-mcu.h
@@ -8,7 +8,6 @@
#ifndef __TURRIS_OMNIA_MCU_H
#define __TURRIS_OMNIA_MCU_H
-#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/gpio/driver.h>
#include <linux/hw_random.h>
@@ -17,8 +16,6 @@
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/workqueue.h>
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
struct i2c_client;
struct rtc_device;
@@ -93,133 +90,6 @@ struct omnia_mcu {
#endif
};
-int omnia_cmd_write_read(const struct i2c_client *client,
- void *cmd, unsigned int cmd_len,
- void *reply, unsigned int reply_len);
-
-static inline int omnia_cmd_write(const struct i2c_client *client, void *cmd,
- unsigned int len)
-{
- return omnia_cmd_write_read(client, cmd, len, NULL, 0);
-}
-
-static inline int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd,
- u8 val)
-{
- u8 buf[2] = { cmd, val };
-
- return omnia_cmd_write(client, buf, sizeof(buf));
-}
-
-static inline int omnia_cmd_write_u16(const struct i2c_client *client, u8 cmd,
- u16 val)
-{
- u8 buf[3];
-
- buf[0] = cmd;
- put_unaligned_le16(val, &buf[1]);
-
- return omnia_cmd_write(client, buf, sizeof(buf));
-}
-
-static inline int omnia_cmd_write_u32(const struct i2c_client *client, u8 cmd,
- u32 val)
-{
- u8 buf[5];
-
- buf[0] = cmd;
- put_unaligned_le32(val, &buf[1]);
-
- return omnia_cmd_write(client, buf, sizeof(buf));
-}
-
-static inline int omnia_cmd_read(const struct i2c_client *client, u8 cmd,
- void *reply, unsigned int len)
-{
- return omnia_cmd_write_read(client, &cmd, 1, reply, len);
-}
-
-static inline unsigned int
-omnia_compute_reply_length(unsigned long mask, bool interleaved,
- unsigned int offset)
-{
- if (!mask)
- return 0;
-
- return ((__fls(mask) >> 3) << interleaved) + 1 + offset;
-}
-
-/* Returns 0 on success */
-static inline int omnia_cmd_read_bits(const struct i2c_client *client, u8 cmd,
- unsigned long bits, unsigned long *dst)
-{
- __le32 reply;
- int err;
-
- if (!bits) {
- *dst = 0;
- return 0;
- }
-
- err = omnia_cmd_read(client, cmd, &reply,
- omnia_compute_reply_length(bits, false, 0));
- if (err)
- return err;
-
- *dst = le32_to_cpu(reply) & bits;
-
- return 0;
-}
-
-static inline int omnia_cmd_read_bit(const struct i2c_client *client, u8 cmd,
- unsigned long bit)
-{
- unsigned long reply;
- int err;
-
- err = omnia_cmd_read_bits(client, cmd, bit, &reply);
- if (err)
- return err;
-
- return !!reply;
-}
-
-static inline int omnia_cmd_read_u32(const struct i2c_client *client, u8 cmd,
- u32 *dst)
-{
- __le32 reply;
- int err;
-
- err = omnia_cmd_read(client, cmd, &reply, sizeof(reply));
- if (err)
- return err;
-
- *dst = le32_to_cpu(reply);
-
- return 0;
-}
-
-static inline int omnia_cmd_read_u16(const struct i2c_client *client, u8 cmd,
- u16 *dst)
-{
- __le16 reply;
- int err;
-
- err = omnia_cmd_read(client, cmd, &reply, sizeof(reply));
- if (err)
- return err;
-
- *dst = le16_to_cpu(reply);
-
- return 0;
-}
-
-static inline int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd,
- u8 *reply)
-{
- return omnia_cmd_read(client, cmd, reply, sizeof(*reply));
-}
-
#ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO
extern const u8 omnia_int_to_gpio_idx[32];
extern const struct attribute_group omnia_mcu_gpio_group;
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index c5b36837e694..9cae07348d5e 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -177,7 +177,7 @@ static ssize_t post_reset_wdog_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t post_reset_wdog_store(struct device *dev,
@@ -206,7 +206,7 @@ static ssize_t mlxbf_bootctl_show(int smc_op, char *buf)
if (action < 0)
return action;
- return sprintf(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
+ return sysfs_emit(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
}
static int mlxbf_bootctl_store(int smc_op, const char *buf, size_t count)
@@ -274,14 +274,14 @@ static ssize_t lifecycle_state_show(struct device *dev,
* due to using the test bits.
*/
if (test_state) {
- return sprintf(buf, "%s(test)\n",
+ return sysfs_emit(buf, "%s(test)\n",
mlxbf_bootctl_lifecycle_states[lc_state]);
} else if (use_dev_key &&
(lc_state == MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE)) {
- return sprintf(buf, "Secured (development)\n");
+ return sysfs_emit(buf, "Secured (development)\n");
}
- return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
+ return sysfs_emit(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
}
static ssize_t secure_boot_fuse_state_show(struct device *dev,
@@ -332,9 +332,9 @@ static ssize_t secure_boot_fuse_state_show(struct device *dev,
else
status = valid ? "Invalid" : "Free";
}
- buf_len += sprintf(buf + buf_len, "%d:%s ", key, status);
+ buf_len += sysfs_emit(buf + buf_len, "%d:%s ", key, status);
}
- buf_len += sprintf(buf + buf_len, "\n");
+ buf_len += sysfs_emit(buf + buf_len, "\n");
return buf_len;
}
@@ -939,7 +939,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_bootctl_acpi_ids);
static ssize_t mlxbf_bootctl_bootfifo_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos,
size_t count)
{
@@ -971,9 +971,9 @@ static ssize_t mlxbf_bootctl_bootfifo_read(struct file *filp,
return p - buf;
}
-static struct bin_attribute mlxbf_bootctl_bootfifo_sysfs_attr = {
+static const struct bin_attribute mlxbf_bootctl_bootfifo_sysfs_attr = {
.attr = { .name = "bootfifo", .mode = 0400 },
- .read = mlxbf_bootctl_bootfifo_read,
+ .read_new = mlxbf_bootctl_bootfifo_read,
};
static bool mlxbf_bootctl_guid_match(const guid_t *guid,
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 9d18dfca6a67..36a00692347d 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -88,6 +88,7 @@
#define MLXBF_PMC_CRSPACE_PERFMON_CTL(n) (n * MLXBF_PMC_CRSPACE_PERFMON_REG0_SZ)
#define MLXBF_PMC_CRSPACE_PERFMON_EN BIT(30)
#define MLXBF_PMC_CRSPACE_PERFMON_CLR BIT(28)
+#define MLXBF_PMC_CRSPACE_PERFMON_COUNT_CLOCK(n) (MLXBF_PMC_CRSPACE_PERFMON_CTL(n) + 0x4)
#define MLXBF_PMC_CRSPACE_PERFMON_VAL0(n) (MLXBF_PMC_CRSPACE_PERFMON_CTL(n) + 0xc)
/**
@@ -114,6 +115,7 @@ struct mlxbf_pmc_attribute {
* @attr_event: Attributes for "event" sysfs files
* @attr_event_list: Attributes for "event_list" sysfs files
* @attr_enable: Attributes for "enable" sysfs files
+ * @attr_count_clock: Attributes for "count_clock" sysfs files
* @block_attr: All attributes needed for the block
* @block_attr_grp: Attribute group for the block
*/
@@ -126,6 +128,7 @@ struct mlxbf_pmc_block_info {
struct mlxbf_pmc_attribute *attr_event;
struct mlxbf_pmc_attribute attr_event_list;
struct mlxbf_pmc_attribute attr_enable;
+ struct mlxbf_pmc_attribute attr_count_clock;
struct attribute *block_attr[MLXBF_PMC_MAX_ATTRS];
struct attribute_group block_attr_grp;
};
@@ -859,6 +862,37 @@ static const struct mlxbf_pmc_events mlxbf_pmc_llt_miss_events[] = {
{75, "HISTOGRAM_HISTOGRAM_BIN9"},
};
+static const struct mlxbf_pmc_events mlxbf_pmc_clock_events[] = {
+ { 0x0, "FMON_CLK_LAST_COUNT_PLL_D1_INST0" },
+ { 0x4, "REFERENCE_WINDOW_WIDTH_PLL_D1_INST0" },
+ { 0x8, "FMON_CLK_LAST_COUNT_PLL_D1_INST1" },
+ { 0xc, "REFERENCE_WINDOW_WIDTH_PLL_D1_INST1" },
+ { 0x10, "FMON_CLK_LAST_COUNT_PLL_G1" },
+ { 0x14, "REFERENCE_WINDOW_WIDTH_PLL_G1" },
+ { 0x18, "FMON_CLK_LAST_COUNT_PLL_W1" },
+ { 0x1c, "REFERENCE_WINDOW_WIDTH_PLL_W1" },
+ { 0x20, "FMON_CLK_LAST_COUNT_PLL_T1" },
+ { 0x24, "REFERENCE_WINDOW_WIDTH_PLL_T1" },
+ { 0x28, "FMON_CLK_LAST_COUNT_PLL_A0" },
+ { 0x2c, "REFERENCE_WINDOW_WIDTH_PLL_A0" },
+ { 0x30, "FMON_CLK_LAST_COUNT_PLL_C0" },
+ { 0x34, "REFERENCE_WINDOW_WIDTH_PLL_C0" },
+ { 0x38, "FMON_CLK_LAST_COUNT_PLL_N1" },
+ { 0x3c, "REFERENCE_WINDOW_WIDTH_PLL_N1" },
+ { 0x40, "FMON_CLK_LAST_COUNT_PLL_I1" },
+ { 0x44, "REFERENCE_WINDOW_WIDTH_PLL_I1" },
+ { 0x48, "FMON_CLK_LAST_COUNT_PLL_R1" },
+ { 0x4c, "REFERENCE_WINDOW_WIDTH_PLL_R1" },
+ { 0x50, "FMON_CLK_LAST_COUNT_PLL_P1" },
+ { 0x54, "REFERENCE_WINDOW_WIDTH_PLL_P1" },
+ { 0x58, "FMON_CLK_LAST_COUNT_REF_100_INST0" },
+ { 0x5c, "REFERENCE_WINDOW_WIDTH_REF_100_INST0" },
+ { 0x60, "FMON_CLK_LAST_COUNT_REF_100_INST1" },
+ { 0x64, "REFERENCE_WINDOW_WIDTH_REF_100_INST1" },
+ { 0x68, "FMON_CLK_LAST_COUNT_REF_156" },
+ { 0x6c, "REFERENCE_WINDOW_WIDTH_REF_156" },
+};
+
static struct mlxbf_pmc_context *pmc;
/* UUID used to probe ATF service. */
@@ -1032,6 +1066,9 @@ static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk, size
} else if (strstr(blk, "llt")) {
events = mlxbf_pmc_llt_events;
size = ARRAY_SIZE(mlxbf_pmc_llt_events);
+ } else if (strstr(blk, "clock_measure")) {
+ events = mlxbf_pmc_clock_events;
+ size = ARRAY_SIZE(mlxbf_pmc_clock_events);
} else {
events = NULL;
size = 0;
@@ -1168,7 +1205,7 @@ static int mlxbf_pmc_program_l3_counter(unsigned int blk_num, u32 cnt_num, u32 e
/* Method to handle crspace counter programming */
static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num, u32 evt)
{
- void *addr;
+ void __iomem *addr;
u32 word;
int ret;
@@ -1192,7 +1229,7 @@ static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num,
/* Method to clear crspace counter value */
static int mlxbf_pmc_clear_crspace_counter(unsigned int blk_num, u32 cnt_num)
{
- void *addr;
+ void __iomem *addr;
addr = pmc->block[blk_num].mmio_base +
MLXBF_PMC_CRSPACE_PERFMON_VAL0(pmc->block[blk_num].counters) +
@@ -1405,7 +1442,7 @@ static int mlxbf_pmc_read_l3_event(unsigned int blk_num, u32 cnt_num, u64 *resul
static int mlxbf_pmc_read_crspace_event(unsigned int blk_num, u32 cnt_num, u64 *result)
{
u32 word, evt;
- void *addr;
+ void __iomem *addr;
int ret;
addr = pmc->block[blk_num].mmio_base +
@@ -1466,14 +1503,15 @@ static int mlxbf_pmc_read_event(unsigned int blk_num, u32 cnt_num, bool is_l3, u
/* Method to read a register */
static int mlxbf_pmc_read_reg(unsigned int blk_num, u32 offset, u64 *result)
{
- u32 ecc_out;
+ u32 reg;
- if (strstr(pmc->block_name[blk_num], "ecc")) {
+ if ((strstr(pmc->block_name[blk_num], "ecc")) ||
+ (strstr(pmc->block_name[blk_num], "clock_measure"))) {
if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
- &ecc_out))
+ &reg))
return -EFAULT;
- *result = ecc_out;
+ *result = reg;
return 0;
}
@@ -1487,6 +1525,9 @@ static int mlxbf_pmc_read_reg(unsigned int blk_num, u32 offset, u64 *result)
/* Method to write to a register */
static int mlxbf_pmc_write_reg(unsigned int blk_num, u32 offset, u64 data)
{
+ if (strstr(pmc->block_name[blk_num], "clock_measure"))
+ return -EINVAL;
+
if (strstr(pmc->block_name[blk_num], "ecc")) {
return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
MLXBF_PMC_WRITE_REG_32, data);
@@ -1763,6 +1804,49 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
return count;
}
+/* Show function for "count_clock" sysfs files - only for crspace */
+static ssize_t mlxbf_pmc_count_clock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlxbf_pmc_attribute *attr_count_clock = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ unsigned int blk_num;
+ u32 reg;
+
+ blk_num = attr_count_clock->nr;
+
+ if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_CRSPACE_PERFMON_COUNT_CLOCK(pmc->block[blk_num].counters),
+ &reg))
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%u\n", reg);
+}
+
+/* Store function for "count_clock" sysfs files - only for crspace */
+static ssize_t mlxbf_pmc_count_clock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlxbf_pmc_attribute *attr_count_clock = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ unsigned int blk_num;
+ u32 reg;
+ int err;
+
+ blk_num = attr_count_clock->nr;
+
+ err = kstrtouint(buf, 0, &reg);
+ if (err < 0)
+ return err;
+
+ mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_CRSPACE_PERFMON_COUNT_CLOCK(pmc->block[blk_num].counters),
+ MLXBF_PMC_WRITE_REG_32, reg);
+
+ return count;
+}
+
/* Populate attributes for blocks with counters to monitor performance */
static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_num)
{
@@ -1801,6 +1885,21 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
attr = NULL;
}
+ if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) {
+ /* Program crspace counters to count clock cycles using "count_clock" sysfs */
+ attr = &pmc->block[blk_num].attr_count_clock;
+ attr->dev_attr.attr.mode = 0644;
+ attr->dev_attr.show = mlxbf_pmc_count_clock_show;
+ attr->dev_attr.store = mlxbf_pmc_count_clock_store;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "count_clock");
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+ }
+
pmc->block[blk_num].attr_counter = devm_kcalloc(
dev, pmc->block[blk_num].counters,
sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 6aa2a4650367..b347000e4329 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -232,7 +232,7 @@ static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
regval = !!(regval & data->mask);
}
- return sprintf(buf, "%u\n", regval);
+ return sysfs_emit(buf, "%u\n", regval);
}
#define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index 595276206baf..97fefe6c38d1 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -126,7 +126,7 @@ mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
mutex_unlock(&priv->io_lock);
- return sprintf(buf, "%u\n", regval);
+ return sysfs_emit(buf, "%u\n", regval);
access_error:
mutex_unlock(&priv->io_lock);
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index d4f32ad66530..a594d5fcfcfd 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -371,7 +371,7 @@ static const struct software_node *ssam_node_group_sp8[] = {
NULL,
};
-/* Devices for Surface Pro 9 (Intel/x86) and 10 */
+/* Devices for Surface Pro 9, 10 and 11 (Intel/x86) */
static const struct software_node *ssam_node_group_sp9[] = {
&ssam_node_root,
&ssam_node_hub_kip,
@@ -430,6 +430,9 @@ static const struct acpi_device_id ssam_platform_hub_acpi_match[] = {
/* Surface Pro 10 */
{ "MSHW0510", (unsigned long)ssam_node_group_sp9 },
+ /* Surface Pro 11 */
+ { "MSHW0583", (unsigned long)ssam_node_group_sp9 },
+
/* Surface Book 2 */
{ "MSHW0107", (unsigned long)ssam_node_group_gen5 },
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index 08db878f1d7d..0e479e35e66e 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -40,7 +40,7 @@ struct ssam_tmp_profile_info {
struct ssam_platform_profile_device {
struct ssam_device *sdev;
- struct platform_profile_handler handler;
+ struct device *ppdev;
bool has_fan;
};
@@ -154,14 +154,14 @@ static int convert_profile_to_ssam_fan(struct ssam_device *sdev, enum platform_p
}
}
-static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
+static int ssam_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
struct ssam_platform_profile_device *tpd;
enum ssam_tmp_profile tp;
int status;
- tpd = container_of(pprof, struct ssam_platform_profile_device, handler);
+ tpd = dev_get_drvdata(dev);
status = ssam_tmp_profile_get(tpd->sdev, &tp);
if (status)
@@ -175,13 +175,13 @@ static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
+static int ssam_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
struct ssam_platform_profile_device *tpd;
int tp;
- tpd = container_of(pprof, struct ssam_platform_profile_device, handler);
+ tpd = dev_get_drvdata(dev);
tp = convert_profile_to_ssam_tmp(tpd->sdev, profile);
if (tp < 0)
@@ -201,6 +201,22 @@ static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
return tp;
}
+static int ssam_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops ssam_platform_profile_ops = {
+ .probe = ssam_platform_profile_probe,
+ .profile_get = ssam_platform_profile_get,
+ .profile_set = ssam_platform_profile_set,
+};
+
static int surface_platform_profile_probe(struct ssam_device *sdev)
{
struct ssam_platform_profile_device *tpd;
@@ -210,23 +226,14 @@ static int surface_platform_profile_probe(struct ssam_device *sdev)
return -ENOMEM;
tpd->sdev = sdev;
-
- tpd->handler.profile_get = ssam_platform_profile_get;
- tpd->handler.profile_set = ssam_platform_profile_set;
+ ssam_device_set_drvdata(sdev, tpd);
tpd->has_fan = device_property_read_bool(&sdev->dev, "has_fan");
- set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
+ tpd->ppdev = devm_platform_profile_register(&sdev->dev, "Surface Platform Profile",
+ tpd, &ssam_platform_profile_ops);
- return platform_profile_register(&tpd->handler);
-}
-
-static void surface_platform_profile_remove(struct ssam_device *sdev)
-{
- platform_profile_remove();
+ return PTR_ERR_OR_ZERO(tpd->ppdev);
}
static const struct ssam_device_id ssam_platform_profile_match[] = {
@@ -237,7 +244,6 @@ MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match);
static struct ssam_device_driver surface_platform_profile = {
.probe = surface_platform_profile_probe,
- .remove = surface_platform_profile_remove,
.match_table = ssam_platform_profile_match,
.driver = {
.name = "surface_platform_profile",
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index d09baa3d3d90..69336bd778ee 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -30,7 +30,10 @@
#include <linux/input/sparse-keymap.h>
#include <acpi/video.h>
#include <linux/hwmon.h>
+#include <linux/units.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
@@ -67,10 +70,16 @@ MODULE_LICENSE("GPL");
#define ACER_WMID_GET_GAMING_SYS_INFO_METHODID 5
#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR 14
#define ACER_WMID_SET_GAMING_MISC_SETTING_METHODID 22
+#define ACER_WMID_GET_GAMING_MISC_SETTING_METHODID 23
-#define ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET 0x54
+#define ACER_GAMING_MISC_SETTING_STATUS_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_MISC_SETTING_INDEX_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_MISC_SETTING_VALUE_MASK GENMASK_ULL(15, 8)
-#define ACER_PREDATOR_V4_FAN_SPEED_READ_BIT_MASK GENMASK(20, 8)
+#define ACER_PREDATOR_V4_RETURN_STATUS_BIT_MASK GENMASK_ULL(7, 0)
+#define ACER_PREDATOR_V4_SENSOR_INDEX_BIT_MASK GENMASK_ULL(15, 8)
+#define ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK GENMASK_ULL(23, 8)
+#define ACER_PREDATOR_V4_SUPPORTED_SENSORS_BIT_MASK GENMASK_ULL(39, 24)
/*
* Acer ACPI method GUIDs
@@ -95,12 +104,33 @@ enum acer_wmi_event_ids {
WMID_HOTKEY_EVENT = 0x1,
WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
WMID_GAMING_TURBO_KEY_EVENT = 0x7,
+ WMID_AC_EVENT = 0x8,
};
enum acer_wmi_predator_v4_sys_info_command {
- ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS = 0x02,
- ACER_WMID_CMD_GET_PREDATOR_V4_CPU_FAN_SPEED = 0x0201,
- ACER_WMID_CMD_GET_PREDATOR_V4_GPU_FAN_SPEED = 0x0601,
+ ACER_WMID_CMD_GET_PREDATOR_V4_SUPPORTED_SENSORS = 0x0000,
+ ACER_WMID_CMD_GET_PREDATOR_V4_SENSOR_READING = 0x0001,
+ ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS = 0x0002,
+};
+
+enum acer_wmi_predator_v4_sensor_id {
+ ACER_WMID_SENSOR_CPU_TEMPERATURE = 0x01,
+ ACER_WMID_SENSOR_CPU_FAN_SPEED = 0x02,
+ ACER_WMID_SENSOR_EXTERNAL_TEMPERATURE_2 = 0x03,
+ ACER_WMID_SENSOR_GPU_FAN_SPEED = 0x06,
+ ACER_WMID_SENSOR_GPU_TEMPERATURE = 0x0A,
+};
+
+enum acer_wmi_predator_v4_oc {
+ ACER_WMID_OC_NORMAL = 0x0000,
+ ACER_WMID_OC_TURBO = 0x0002,
+};
+
+enum acer_wmi_gaming_misc_setting {
+ ACER_WMID_MISC_SETTING_OC_1 = 0x0005,
+ ACER_WMID_MISC_SETTING_OC_2 = 0x0007,
+ ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES = 0x000A,
+ ACER_WMID_MISC_SETTING_PLATFORM_PROFILE = 0x000B,
};
static const struct key_entry acer_wmi_keymap[] __initconst = {
@@ -246,7 +276,7 @@ struct hotkey_function_type_aa {
#define ACER_CAP_TURBO_LED BIT(8)
#define ACER_CAP_TURBO_FAN BIT(9)
#define ACER_CAP_PLATFORM_PROFILE BIT(10)
-#define ACER_CAP_FAN_SPEED_READ BIT(11)
+#define ACER_CAP_HWMON BIT(11)
/*
* Interface type flags
@@ -271,6 +301,7 @@ static u16 commun_func_bitmap;
static u8 commun_fn_key_number;
static bool cycle_gaming_thermal_profile = true;
static bool predator_v4;
+static u64 supported_sensors;
module_param(mailled, int, 0444);
module_param(brightness, int, 0444);
@@ -358,7 +389,7 @@ static void __init set_quirks(void)
if (quirks->predator_v4)
interface->capability |= ACER_CAP_PLATFORM_PROFILE |
- ACER_CAP_FAN_SPEED_READ;
+ ACER_CAP_HWMON;
}
static int __init dmi_matched(const struct dmi_system_id *dmi)
@@ -393,6 +424,20 @@ static struct quirk_entry quirk_acer_predator_ph315_53 = {
.gpu_fans = 1,
};
+static struct quirk_entry quirk_acer_predator_ph16_72 = {
+ .turbo = 1,
+ .cpu_fans = 1,
+ .gpu_fans = 1,
+ .predator_v4 = 1,
+};
+
+static struct quirk_entry quirk_acer_predator_pt14_51 = {
+ .turbo = 1,
+ .cpu_fans = 1,
+ .gpu_fans = 1,
+ .predator_v4 = 1,
+};
+
static struct quirk_entry quirk_acer_predator_v4 = {
.predator_v4 = 1,
};
@@ -566,6 +611,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Acer Nitro AN515-58",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Nitro AN515-58"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Acer Predator PH315-53",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -593,6 +647,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Acer Predator PH16-72",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH16-72"),
+ },
+ .driver_data = &quirk_acer_predator_ph16_72,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Acer Predator PH18-71",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -601,6 +664,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_predator_v4,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PT14-51",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PT14-51"),
+ },
+ .driver_data = &quirk_acer_predator_pt14_51,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
@@ -713,29 +785,24 @@ static const struct dmi_system_id non_acer_quirks[] __initconst = {
{}
};
-static struct platform_profile_handler platform_profile_handler;
+static struct device *platform_profile_device;
static bool platform_profile_support;
/*
* The profile used before turbo mode. This variable is needed for
* returning from turbo mode when the mode key is in toggle mode.
*/
-static int last_non_turbo_profile;
-
-enum acer_predator_v4_thermal_profile_ec {
- ACER_PREDATOR_V4_THERMAL_PROFILE_ECO = 0x04,
- ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO = 0x03,
- ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE = 0x02,
- ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET = 0x01,
- ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED = 0x00,
-};
+static int last_non_turbo_profile = INT_MIN;
+
+/* The most performant supported profile */
+static int acer_predator_v4_max_perf;
-enum acer_predator_v4_thermal_profile_wmi {
- ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI = 0x060B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI = 0x050B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI = 0x040B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI = 0x0B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI = 0x010B,
+enum acer_predator_v4_thermal_profile {
+ ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET = 0x00,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED = 0x01,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE = 0x04,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO = 0x05,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_ECO = 0x06,
};
/* Find which quirks are needed for a particular vendor/ model pair */
@@ -1448,6 +1515,45 @@ WMI_gaming_execute_u64(u32 method_id, u64 in, u64 *out)
return status;
}
+static int WMI_gaming_execute_u32_u64(u32 method_id, u32 in, u64 *out)
+{
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer input = {
+ .length = sizeof(in),
+ .pointer = &in,
+ };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = 0;
+
+ status = wmi_evaluate_method(WMID_GUID4, 0, method_id, &input, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = result.pointer;
+ if (obj && out) {
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ *out = obj->integer.value;
+ break;
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length < sizeof(*out))
+ ret = -ENOMSG;
+ else
+ *out = get_unaligned_le64(obj->buffer.pointer);
+
+ break;
+ default:
+ ret = -ENOMSG;
+ break;
+ }
+ }
+
+ kfree(obj);
+
+ return ret;
+}
+
static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
{
u32 method_id = 0;
@@ -1462,9 +1568,6 @@ static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
case ACER_CAP_TURBO_FAN:
method_id = ACER_WMID_SET_GAMING_FAN_BEHAVIOR;
break;
- case ACER_CAP_TURBO_OC:
- method_id = ACER_WMID_SET_GAMING_MISC_SETTING_METHODID;
- break;
default:
return AE_BAD_PARAMETER;
}
@@ -1497,6 +1600,24 @@ static acpi_status WMID_gaming_get_u64(u64 *value, u32 cap)
return status;
}
+static int WMID_gaming_get_sys_info(u32 command, u64 *out)
+{
+ acpi_status status;
+ u64 result;
+
+ status = WMI_gaming_execute_u64(ACER_WMID_GET_GAMING_SYS_INFO_METHODID, command, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_PREDATOR_V4_RETURN_STATUS_BIT_MASK, result))
+ return -EIO;
+
+ *out = result;
+
+ return 0;
+}
+
static void WMID_gaming_set_fan_mode(u8 fan_mode)
{
/* fan_mode = 1 is used for auto, fan_mode = 2 used for turbo*/
@@ -1518,6 +1639,48 @@ static void WMID_gaming_set_fan_mode(u8 fan_mode)
WMID_gaming_set_u64(gpu_fan_config2 | gpu_fan_config1 << 16, ACER_CAP_TURBO_FAN);
}
+static int WMID_gaming_set_misc_setting(enum acer_wmi_gaming_misc_setting setting, u8 value)
+{
+ acpi_status status;
+ u64 input = 0;
+ u64 result;
+
+ input |= FIELD_PREP(ACER_GAMING_MISC_SETTING_INDEX_MASK, setting);
+ input |= FIELD_PREP(ACER_GAMING_MISC_SETTING_VALUE_MASK, value);
+
+ status = WMI_gaming_execute_u64(ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, input, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_MISC_SETTING_STATUS_MASK, result))
+ return -EIO;
+
+ return 0;
+}
+
+static int WMID_gaming_get_misc_setting(enum acer_wmi_gaming_misc_setting setting, u8 *value)
+{
+ u64 input = 0;
+ u64 result;
+ int ret;
+
+ input |= FIELD_PREP(ACER_GAMING_MISC_SETTING_INDEX_MASK, setting);
+
+ ret = WMI_gaming_execute_u32_u64(ACER_WMID_GET_GAMING_MISC_SETTING_METHODID, input,
+ &result);
+ if (ret < 0)
+ return ret;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_MISC_SETTING_STATUS_MASK, result))
+ return -EIO;
+
+ *value = FIELD_GET(ACER_GAMING_MISC_SETTING_VALUE_MASK, result);
+
+ return 0;
+}
+
/*
* Generic Device (interface-independent)
*/
@@ -1744,26 +1907,6 @@ static int acer_gsensor_event(void)
return 0;
}
-static int acer_get_fan_speed(int fan)
-{
- if (quirks->predator_v4) {
- acpi_status status;
- u64 fanspeed;
-
- status = WMI_gaming_execute_u64(
- ACER_WMID_GET_GAMING_SYS_INFO_METHODID,
- fan == 0 ? ACER_WMID_CMD_GET_PREDATOR_V4_CPU_FAN_SPEED :
- ACER_WMID_CMD_GET_PREDATOR_V4_GPU_FAN_SPEED,
- &fanspeed);
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- return FIELD_GET(ACER_PREDATOR_V4_FAN_SPEED_READ_BIT_MASK, fanspeed);
- }
- return -EOPNOTSUPP;
-}
-
/*
* Predator series turbo button
*/
@@ -1783,8 +1926,12 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_fan_mode(0x1);
/* Set OC to normal */
- WMID_gaming_set_u64(0x5, ACER_CAP_TURBO_OC);
- WMID_gaming_set_u64(0x7, ACER_CAP_TURBO_OC);
+ if (has_cap(ACER_CAP_TURBO_OC)) {
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_1,
+ ACER_WMID_OC_NORMAL);
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_2,
+ ACER_WMID_OC_NORMAL);
+ }
} else {
/* Turn on turbo led */
WMID_gaming_set_u64(0x10001, ACER_CAP_TURBO_LED);
@@ -1793,22 +1940,25 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_fan_mode(0x2);
/* Set OC to turbo mode */
- WMID_gaming_set_u64(0x205, ACER_CAP_TURBO_OC);
- WMID_gaming_set_u64(0x207, ACER_CAP_TURBO_OC);
+ if (has_cap(ACER_CAP_TURBO_OC)) {
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_1,
+ ACER_WMID_OC_TURBO);
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_2,
+ ACER_WMID_OC_TURBO);
+ }
}
return turbo_led_state;
}
static int
-acer_predator_v4_platform_profile_get(struct platform_profile_handler *pprof,
+acer_predator_v4_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
u8 tp;
int err;
- err = ec_read(ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET, &tp);
-
- if (err < 0)
+ err = WMID_gaming_get_misc_setting(ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, &tp);
+ if (err)
return err;
switch (tp) {
@@ -1835,74 +1985,112 @@ acer_predator_v4_platform_profile_get(struct platform_profile_handler *pprof,
}
static int
-acer_predator_v4_platform_profile_set(struct platform_profile_handler *pprof,
+acer_predator_v4_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- int tp;
- acpi_status status;
+ int err, tp;
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
break;
case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
break;
case PLATFORM_PROFILE_QUIET:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
break;
case PLATFORM_PROFILE_LOW_POWER:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
break;
default:
return -EOPNOTSUPP;
}
- status = WMI_gaming_execute_u64(
- ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, tp, NULL);
-
- if (ACPI_FAILURE(status))
- return -EIO;
+ err = WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, tp);
+ if (err)
+ return err;
- if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI)
+ if (tp != acer_predator_v4_max_perf)
last_non_turbo_profile = tp;
return 0;
}
-static int acer_platform_profile_setup(void)
+static int
+acer_predator_v4_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ unsigned long supported_profiles;
+ int err;
+
+ err = WMID_gaming_get_misc_setting(ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES,
+ (u8 *)&supported_profiles);
+ if (err)
+ return err;
+
+ /* Iterate through supported profiles in order of increasing performance */
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_ECO, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
+
+ /* We only use this profile as a fallback option in case no prior
+ * profile is supported.
+ */
+ if (last_non_turbo_profile < 0)
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
+
+ /* We need to handle the hypothetical case where only the turbo profile
+ * is supported. In this case the turbo toggle will essentially be a
+ * no-op.
+ */
+ if (last_non_turbo_profile < 0)
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
+ }
+
+ return 0;
+}
+
+static const struct platform_profile_ops acer_predator_v4_platform_profile_ops = {
+ .probe = acer_predator_v4_platform_profile_probe,
+ .profile_get = acer_predator_v4_platform_profile_get,
+ .profile_set = acer_predator_v4_platform_profile_set,
+};
+
+static int acer_platform_profile_setup(struct platform_device *device)
{
if (quirks->predator_v4) {
- int err;
-
- platform_profile_handler.profile_get =
- acer_predator_v4_platform_profile_get;
- platform_profile_handler.profile_set =
- acer_predator_v4_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_PERFORMANCE,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_QUIET,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_LOW_POWER,
- platform_profile_handler.choices);
-
- err = platform_profile_register(&platform_profile_handler);
- if (err)
- return err;
+ platform_profile_device = devm_platform_profile_register(
+ &device->dev, "acer-wmi", NULL, &acer_predator_v4_platform_profile_ops);
+ if (IS_ERR(platform_profile_device))
+ return PTR_ERR(platform_profile_device);
platform_profile_support = true;
-
- /* Set default non-turbo profile */
- last_non_turbo_profile =
- ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
}
return 0;
}
@@ -1910,83 +2098,41 @@ static int acer_platform_profile_setup(void)
static int acer_thermal_profile_change(void)
{
/*
- * This mode key can rotate each mode or toggle turbo mode.
- * On battery, only ECO and BALANCED mode are available.
+ * This mode key will either cycle through each mode or toggle the
+ * most performant profile.
*/
if (quirks->predator_v4) {
u8 current_tp;
- int tp, err;
- u64 on_AC;
- acpi_status status;
-
- err = ec_read(ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET,
- &current_tp);
+ int err, tp;
- if (err < 0)
- return err;
+ if (cycle_gaming_thermal_profile) {
+ platform_profile_cycle();
+ } else {
+ /* Do nothing if no suitable platform profiles where found */
+ if (last_non_turbo_profile < 0)
+ return 0;
- /* Check power source */
- status = WMI_gaming_execute_u64(
- ACER_WMID_GET_GAMING_SYS_INFO_METHODID,
- ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS, &on_AC);
+ err = WMID_gaming_get_misc_setting(
+ ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, &current_tp);
+ if (err)
+ return err;
- if (ACPI_FAILURE(status))
- return -EIO;
-
- switch (current_tp) {
- case ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
- else
+ if (current_tp == acer_predator_v4_max_perf)
tp = last_non_turbo_profile;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI;
- else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_ECO:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI;
else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- default:
- return -EOPNOTSUPP;
- }
+ tp = acer_predator_v4_max_perf;
- status = WMI_gaming_execute_u64(
- ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, tp, NULL);
-
- if (ACPI_FAILURE(status))
- return -EIO;
+ err = WMID_gaming_set_misc_setting(
+ ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, tp);
+ if (err)
+ return err;
- /* Store non-turbo profile for turbo mode toggle*/
- if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI)
- last_non_turbo_profile = tp;
+ /* Store last profile for toggle */
+ if (current_tp != acer_predator_v4_max_perf)
+ last_non_turbo_profile = current_tp;
- platform_profile_notify();
+ platform_profile_notify(platform_profile_device);
+ }
}
return 0;
@@ -2280,6 +2426,9 @@ static void acer_wmi_notify(union acpi_object *obj, void *context)
if (return_value.key_num == 0x5 && has_cap(ACER_CAP_PLATFORM_PROFILE))
acer_thermal_profile_change();
break;
+ case WMID_AC_EVENT:
+ /* We ignore AC events here */
+ break;
default:
pr_warn("Unknown function number - %d - %d\n",
return_value.function, return_value.key_num);
@@ -2530,12 +2679,12 @@ static int acer_platform_probe(struct platform_device *device)
goto error_rfkill;
if (has_cap(ACER_CAP_PLATFORM_PROFILE)) {
- err = acer_platform_profile_setup();
+ err = acer_platform_profile_setup(device);
if (err)
goto error_platform_profile;
}
- if (has_cap(ACER_CAP_FAN_SPEED_READ)) {
+ if (has_cap(ACER_CAP_HWMON)) {
err = acer_wmi_hwmon_init();
if (err)
goto error_hwmon;
@@ -2544,8 +2693,6 @@ static int acer_platform_probe(struct platform_device *device)
return 0;
error_hwmon:
- if (platform_profile_support)
- platform_profile_remove();
error_platform_profile:
acer_rfkill_exit();
error_rfkill:
@@ -2566,9 +2713,6 @@ static void acer_platform_remove(struct platform_device *device)
acer_backlight_exit();
acer_rfkill_exit();
-
- if (platform_profile_support)
- platform_profile_remove();
}
#ifdef CONFIG_PM_SLEEP
@@ -2655,43 +2799,86 @@ static void __init create_debugfs(void)
&interface->debug.wmid_devices);
}
+static const enum acer_wmi_predator_v4_sensor_id acer_wmi_temp_channel_to_sensor_id[] = {
+ [0] = ACER_WMID_SENSOR_CPU_TEMPERATURE,
+ [1] = ACER_WMID_SENSOR_GPU_TEMPERATURE,
+ [2] = ACER_WMID_SENSOR_EXTERNAL_TEMPERATURE_2,
+};
+
+static const enum acer_wmi_predator_v4_sensor_id acer_wmi_fan_channel_to_sensor_id[] = {
+ [0] = ACER_WMID_SENSOR_CPU_FAN_SPEED,
+ [1] = ACER_WMID_SENSOR_GPU_FAN_SPEED,
+};
+
static umode_t acer_wmi_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type, u32 attr,
int channel)
{
+ enum acer_wmi_predator_v4_sensor_id sensor_id;
+ const u64 *supported_sensors = data;
+
switch (type) {
+ case hwmon_temp:
+ sensor_id = acer_wmi_temp_channel_to_sensor_id[channel];
+ break;
case hwmon_fan:
- if (acer_get_fan_speed(channel) >= 0)
- return 0444;
+ sensor_id = acer_wmi_fan_channel_to_sensor_id[channel];
break;
default:
return 0;
}
+ if (*supported_sensors & BIT(sensor_id - 1))
+ return 0444;
+
return 0;
}
static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
+ u64 command = ACER_WMID_CMD_GET_PREDATOR_V4_SENSOR_READING;
+ u64 result;
int ret;
switch (type) {
+ case hwmon_temp:
+ command |= FIELD_PREP(ACER_PREDATOR_V4_SENSOR_INDEX_BIT_MASK,
+ acer_wmi_temp_channel_to_sensor_id[channel]);
+
+ ret = WMID_gaming_get_sys_info(command, &result);
+ if (ret < 0)
+ return ret;
+
+ result = FIELD_GET(ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK, result);
+ *val = result * MILLIDEGREE_PER_DEGREE;
+ return 0;
case hwmon_fan:
- ret = acer_get_fan_speed(channel);
+ command |= FIELD_PREP(ACER_PREDATOR_V4_SENSOR_INDEX_BIT_MASK,
+ acer_wmi_fan_channel_to_sensor_id[channel]);
+
+ ret = WMID_gaming_get_sys_info(command, &result);
if (ret < 0)
return ret;
- *val = ret;
- break;
+
+ *val = FIELD_GET(ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK, result);
+ return 0;
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
static const struct hwmon_channel_info *const acer_wmi_hwmon_info[] = {
- HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT), NULL
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT
+ ),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT
+ ),
+ NULL
};
static const struct hwmon_ops acer_wmi_hwmon_ops = {
@@ -2708,9 +2895,20 @@ static int acer_wmi_hwmon_init(void)
{
struct device *dev = &acer_platform_device->dev;
struct device *hwmon;
+ u64 result;
+ int ret;
+
+ ret = WMID_gaming_get_sys_info(ACER_WMID_CMD_GET_PREDATOR_V4_SUPPORTED_SENSORS, &result);
+ if (ret < 0)
+ return ret;
+
+ /* Return early if no sensors are available */
+ supported_sensors = FIELD_GET(ACER_PREDATOR_V4_SUPPORTED_SENSORS_BIT_MASK, result);
+ if (!supported_sensors)
+ return 0;
hwmon = devm_hwmon_device_register_with_info(dev, "acer",
- &acer_platform_driver,
+ &supported_sensors,
&acer_wmi_hwmon_chip_info,
NULL);
diff --git a/drivers/platform/x86/amd/hsmp/Kconfig b/drivers/platform/x86/amd/hsmp/Kconfig
index 7d10d4462a45..d6f7a62d55b5 100644
--- a/drivers/platform/x86/amd/hsmp/Kconfig
+++ b/drivers/platform/x86/amd/hsmp/Kconfig
@@ -7,7 +7,7 @@ config AMD_HSMP
tristate
menu "AMD HSMP Driver"
- depends on AMD_NB || COMPILE_TEST
+ depends on AMD_NODE || COMPILE_TEST
config AMD_HSMP_ACPI
tristate "AMD HSMP ACPI device driver"
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index e981d45e1c12..c1eccb3c80c5 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/device.h>
@@ -24,6 +23,8 @@
#include <uapi/asm-generic/errno-base.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
@@ -226,7 +227,7 @@ static int hsmp_parse_acpi_table(struct device *dev, u16 sock_ind)
}
static ssize_t hsmp_metric_tbl_acpi_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -285,19 +286,19 @@ static int init_acpi(struct device *dev)
return ret;
}
-static struct bin_attribute hsmp_metric_tbl_attr = {
+static const struct bin_attribute hsmp_metric_tbl_attr = {
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444},
- .read = hsmp_metric_tbl_acpi_read,
+ .read_new = hsmp_metric_tbl_acpi_read,
.size = sizeof(struct hsmp_metric_table),
};
-static struct bin_attribute *hsmp_attr_list[] = {
+static const struct bin_attribute *hsmp_attr_list[] = {
&hsmp_metric_tbl_attr,
NULL
};
-static struct attribute_group hsmp_attr_grp = {
- .bin_attrs = hsmp_attr_list,
+static const struct attribute_group hsmp_attr_grp = {
+ .bin_attrs_new = hsmp_attr_list,
.is_bin_visible = hsmp_is_sock_attr_visible,
};
@@ -321,8 +322,8 @@ static int hsmp_acpi_probe(struct platform_device *pdev)
return -ENOMEM;
if (!hsmp_pdev->is_probed) {
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return -ENODEV;
hsmp_pdev->sock = devm_kcalloc(&pdev->dev, hsmp_pdev->num_sockets,
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 227b4ad4a51a..a3ac09a90de4 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -8,7 +8,6 @@
*/
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/delay.h>
@@ -33,7 +32,13 @@
#define HSMP_WR true
#define HSMP_RD false
-#define DRIVER_VERSION "2.3"
+#define DRIVER_VERSION "2.4"
+
+/*
+ * When same message numbers are used for both GET and SET operation,
+ * bit:31 indicates whether its SET or GET operation.
+ */
+#define CHECK_GET_BIT BIT(31)
static struct hsmp_plat_device hsmp_pdev;
@@ -167,11 +172,28 @@ static int validate_message(struct hsmp_message *msg)
if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_RSVD)
return -ENOMSG;
- /* num_args and response_sz against the HSMP spec */
- if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args ||
- msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz)
+ /*
+ * num_args passed by user should match the num_args specified in
+ * message description table.
+ */
+ if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args)
return -EINVAL;
+ /*
+ * Some older HSMP SET messages are updated to add GET in the same message.
+ * In these messages, GET returns the current value and SET also returns
+ * the successfully set value. To support this GET and SET in same message
+ * while maintaining backward compatibility for the HSMP users,
+ * hsmp_msg_desc_table[] indicates only maximum allowed response_sz.
+ */
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_SET_GET) {
+ if (msg->response_sz > hsmp_msg_desc_table[msg->msg_id].response_sz)
+ return -EINVAL;
+ } else {
+ /* only HSMP_SET or HSMP_GET messages go through this strict check */
+ if (msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz)
+ return -EINVAL;
+ }
return 0;
}
@@ -239,6 +261,18 @@ int hsmp_test(u16 sock_ind, u32 value)
}
EXPORT_SYMBOL_NS_GPL(hsmp_test, "AMD_HSMP");
+static bool is_get_msg(struct hsmp_message *msg)
+{
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_GET)
+ return true;
+
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_SET_GET &&
+ (msg->args[0] & CHECK_GET_BIT))
+ return true;
+
+ return false;
+}
+
long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
int __user *arguser = (int __user *)arg;
@@ -261,7 +295,7 @@ long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
* Device is opened in O_WRONLY mode
* Execute only set/configure commands
*/
- if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_SET)
+ if (is_get_msg(&msg))
return -EPERM;
break;
case FMODE_READ:
@@ -269,7 +303,7 @@ long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
* Device is opened in O_RDONLY mode
* Execute only get/monitor commands
*/
- if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_GET)
+ if (!is_get_msg(&msg))
return -EPERM;
break;
case FMODE_READ | FMODE_WRITE:
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.h b/drivers/platform/x86/amd/hsmp/hsmp.h
index e852f0a947e4..af8b21f821d6 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.h
+++ b/drivers/platform/x86/amd/hsmp/hsmp.h
@@ -21,8 +21,6 @@
#define HSMP_ATTR_GRP_NAME_SIZE 10
-#define MAX_AMD_SOCKETS 8
-
#define HSMP_CDEV_NAME "hsmp_cdev"
#define HSMP_DEVNODE_NAME "hsmp"
@@ -41,7 +39,6 @@ struct hsmp_socket {
void __iomem *virt_base_addr;
struct semaphore hsmp_sem;
char name[HSMP_ATTR_GRP_NAME_SIZE];
- struct pci_dev *root;
struct device *dev;
u16 sock_ind;
int (*amd_hsmp_rdwr)(struct hsmp_socket *sock, u32 off, u32 *val, bool rw);
diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c
index a61f815c9f80..b9782a078dbd 100644
--- a/drivers/platform/x86/amd/hsmp/plat.c
+++ b/drivers/platform/x86/amd/hsmp/plat.c
@@ -10,14 +10,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
+#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
@@ -34,32 +36,16 @@
#define SMN_HSMP_MSG_RESP 0x0010980
#define SMN_HSMP_MSG_DATA 0x00109E0
-#define HSMP_INDEX_REG 0xc4
-#define HSMP_DATA_REG 0xc8
-
static struct hsmp_plat_device *hsmp_pdev;
static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
u32 *value, bool write)
{
- int ret;
-
- if (!sock->root)
- return -ENODEV;
-
- ret = pci_write_config_dword(sock->root, HSMP_INDEX_REG,
- sock->mbinfo.base_addr + offset);
- if (ret)
- return ret;
-
- ret = (write ? pci_write_config_dword(sock->root, HSMP_DATA_REG, *value)
- : pci_read_config_dword(sock->root, HSMP_DATA_REG, value));
-
- return ret;
+ return amd_smn_hsmp_rdwr(sock->sock_ind, sock->mbinfo.base_addr + offset, value, write);
}
static ssize_t hsmp_metric_tbl_plat_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct hsmp_socket *sock;
@@ -95,15 +81,20 @@ static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj,
* Static array of 8 + 1(for NULL) elements is created below
* to create sysfs groups for sockets.
* is_bin_visible function is used to show / hide the necessary groups.
+ *
+ * Validate the maximum number against MAX_AMD_NUM_NODES. If this changes,
+ * then the attributes and groups below must be adjusted.
*/
+static_assert(MAX_AMD_NUM_NODES == 8);
+
#define HSMP_BIN_ATTR(index, _list) \
-static struct bin_attribute attr##index = { \
+static const struct bin_attribute attr##index = { \
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444}, \
.private = (void *)index, \
- .read = hsmp_metric_tbl_plat_read, \
+ .read_new = hsmp_metric_tbl_plat_read, \
.size = sizeof(struct hsmp_metric_table), \
}; \
-static struct bin_attribute _list[] = { \
+static const struct bin_attribute _list[] = { \
&attr##index, \
NULL \
}
@@ -118,8 +109,8 @@ HSMP_BIN_ATTR(6, *sock6_attr_list);
HSMP_BIN_ATTR(7, *sock7_attr_list);
#define HSMP_BIN_ATTR_GRP(index, _list, _name) \
-static struct attribute_group sock##index##_attr_grp = { \
- .bin_attrs = _list, \
+static const struct attribute_group sock##index##_attr_grp = { \
+ .bin_attrs_new = _list, \
.is_bin_visible = hsmp_is_sock_attr_visible, \
.name = #_name, \
}
@@ -159,10 +150,7 @@ static int init_platform_device(struct device *dev)
int ret, i;
for (i = 0; i < hsmp_pdev->num_sockets; i++) {
- if (!node_to_amd_nb(i))
- return -ENODEV;
sock = &hsmp_pdev->sock[i];
- sock->root = node_to_amd_nb(i)->root;
sock->sock_ind = i;
sock->dev = dev;
sock->mbinfo.base_addr = SMN_HSMP_BASE;
@@ -305,11 +293,11 @@ static int __init hsmp_plt_init(void)
return -ENOMEM;
/*
- * amd_nb_num() returns number of SMN/DF interfaces present in the system
+ * amd_num_nodes() returns number of SMN/DF interfaces present in the system
* if we have N SMN/DF interfaces that ideally means N sockets
*/
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return ret;
ret = platform_driver_register(&amd_hsmp_driver);
diff --git a/drivers/platform/x86/amd/pmc/Kconfig b/drivers/platform/x86/amd/pmc/Kconfig
index 94f9563d8be7..eeffdafd686e 100644
--- a/drivers/platform/x86/amd/pmc/Kconfig
+++ b/drivers/platform/x86/amd/pmc/Kconfig
@@ -5,7 +5,7 @@
config AMD_PMC
tristate "AMD SoC PMC driver"
- depends on ACPI && PCI && RTC_CLASS && AMD_NB
+ depends on ACPI && PCI && RTC_CLASS && AMD_NODE
depends on SUSPEND
select SERIO
help
diff --git a/drivers/platform/x86/amd/pmc/Makefile b/drivers/platform/x86/amd/pmc/Makefile
index f1d9ab19d24c..255d94ddf999 100644
--- a/drivers/platform/x86/amd/pmc/Makefile
+++ b/drivers/platform/x86/amd/pmc/Makefile
@@ -4,6 +4,6 @@
# AMD Power Management Controller Driver
#
-amd-pmc-objs := pmc.o pmc-quirks.o
+amd-pmc-objs := pmc.o pmc-quirks.o mp1_stb.o
obj-$(CONFIG_AMD_PMC) += amd-pmc.o
amd-pmc-$(CONFIG_AMD_MP2_STB) += mp2_stb.o
diff --git a/drivers/platform/x86/amd/pmc/mp1_stb.c b/drivers/platform/x86/amd/pmc/mp1_stb.c
new file mode 100644
index 000000000000..c005f00988f7
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/mp1_stb.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD MP1 Smart Trace Buffer (STB) Layer
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Sanket Goswami <Sanket.Goswami@amd.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/amd_nb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "pmc.h"
+
+/* STB Spill to DRAM Parameters */
+#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
+#define S2D_TELEMETRY_BYTES_MAX 0x100000U
+#define S2D_RSVD_RAM_SPACE 0x100000
+
+/* STB Registers */
+#define AMD_STB_PMI_0 0x03E30600
+#define AMD_PMC_STB_DUMMY_PC 0xC6000007
+
+/* STB Spill to DRAM Message Definition */
+#define STB_FORCE_FLUSH_DATA 0xCF
+#define FIFO_SIZE 4096
+
+/* STB S2D(Spill to DRAM) has different message port offset */
+#define AMD_S2D_REGISTER_MESSAGE 0xA20
+#define AMD_S2D_REGISTER_RESPONSE 0xA80
+#define AMD_S2D_REGISTER_ARGUMENT 0xA88
+
+/* STB S2D (Spill to DRAM) message port offset for 44h model */
+#define AMD_GNR_REGISTER_MESSAGE 0x524
+#define AMD_GNR_REGISTER_RESPONSE 0x570
+#define AMD_GNR_REGISTER_ARGUMENT 0xA40
+
+static bool enable_stb;
+module_param(enable_stb, bool, 0644);
+MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
+
+static bool dump_custom_stb;
+module_param(dump_custom_stb, bool, 0644);
+MODULE_PARM_DESC(dump_custom_stb, "Enable to dump full STB buffer");
+
+enum s2d_arg {
+ S2D_TELEMETRY_SIZE = 0x01,
+ S2D_PHYS_ADDR_LOW,
+ S2D_PHYS_ADDR_HIGH,
+ S2D_NUM_SAMPLES,
+ S2D_DRAM_SIZE,
+};
+
+struct amd_stb_v2_data {
+ size_t size;
+ u8 data[] __counted_by(size);
+};
+
+int amd_stb_write(struct amd_pmc_dev *dev, u32 data)
+{
+ int err;
+
+ err = amd_smn_write(0, AMD_STB_PMI_0, data);
+ if (err) {
+ dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+
+ return 0;
+}
+
+int amd_stb_read(struct amd_pmc_dev *dev, u32 *buf)
+{
+ int i, err;
+
+ for (i = 0; i < FIFO_SIZE; i++) {
+ err = amd_smn_read(0, AMD_STB_PMI_0, buf++);
+ if (err) {
+ dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+ }
+
+ return 0;
+}
+
+static int amd_stb_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 size = FIFO_SIZE * sizeof(u32);
+ u32 *buf;
+ int rc;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = amd_stb_read(dev, buf);
+ if (rc) {
+ kfree(buf);
+ return rc;
+ }
+
+ filp->private_data = buf;
+ return rc;
+}
+
+static ssize_t amd_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, loff_t *pos)
+{
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, filp->private_data,
+ FIFO_SIZE * sizeof(u32));
+}
+
+static int amd_stb_debugfs_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_stb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = amd_stb_debugfs_open,
+ .read = amd_stb_debugfs_read,
+ .release = amd_stb_debugfs_release,
+};
+
+/* Enhanced STB Firmware Reporting Mechanism */
+static int amd_stb_handle_efr(struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ struct amd_stb_v2_data *stb_data_arr;
+ u32 fsize;
+
+ fsize = dev->dram_size - S2D_RSVD_RAM_SPACE;
+ stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
+ if (!stb_data_arr)
+ return -ENOMEM;
+
+ stb_data_arr->size = fsize;
+ memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
+ filp->private_data = stb_data_arr;
+
+ return 0;
+}
+
+static int amd_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 fsize, num_samples, val, stb_rdptr_offset = 0;
+ struct amd_stb_v2_data *stb_data_arr;
+ int ret;
+
+ /* Write dummy postcode while reading the STB buffer */
+ ret = amd_stb_write(dev, AMD_PMC_STB_DUMMY_PC);
+ if (ret)
+ dev_err(dev->dev, "error writing to STB: %d\n", ret);
+
+ /* Spill to DRAM num_samples uses separate SMU message port */
+ dev->msg_port = MSG_PORT_S2D;
+
+ ret = amd_pmc_send_cmd(dev, 0, &val, STB_FORCE_FLUSH_DATA, 1);
+ if (ret)
+ dev_dbg_once(dev->dev, "S2D force flush not supported: %d\n", ret);
+
+ /*
+ * We have a custom stb size and the PMFW is supposed to give
+ * the enhanced dram size. Note that we land here only for the
+ * platforms that support enhanced dram size reporting.
+ */
+ if (dump_custom_stb)
+ return amd_stb_handle_efr(filp);
+
+ /* Get the num_samples to calculate the last push location */
+ ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->stb_arg.s2d_msg_id, true);
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = MSG_PORT_PMC;
+ if (ret) {
+ dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret);
+ return ret;
+ }
+
+ fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX);
+ stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
+ if (!stb_data_arr)
+ return -ENOMEM;
+
+ stb_data_arr->size = fsize;
+
+ /*
+ * Start capturing data from the last push location.
+ * This is for general cases, where the stb limits
+ * are meant for standard usage.
+ */
+ if (num_samples > S2D_TELEMETRY_BYTES_MAX) {
+ /* First read oldest data starting 1 behind last write till end of ringbuffer */
+ stb_rdptr_offset = num_samples % S2D_TELEMETRY_BYTES_MAX;
+ fsize = S2D_TELEMETRY_BYTES_MAX - stb_rdptr_offset;
+
+ memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr + stb_rdptr_offset, fsize);
+ /* Second copy the newer samples from offset 0 - last write */
+ memcpy_fromio(stb_data_arr->data + fsize, dev->stb_virt_addr, stb_rdptr_offset);
+ } else {
+ memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
+ }
+
+ filp->private_data = stb_data_arr;
+
+ return 0;
+}
+
+static ssize_t amd_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amd_stb_v2_data *data = filp->private_data;
+
+ return simple_read_from_buffer(buf, size, pos, data->data, data->size);
+}
+
+static int amd_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_stb_debugfs_fops_v2 = {
+ .owner = THIS_MODULE,
+ .open = amd_stb_debugfs_open_v2,
+ .read = amd_stb_debugfs_read_v2,
+ .release = amd_stb_debugfs_release_v2,
+};
+
+static void amd_stb_update_args(struct amd_pmc_dev *dev)
+{
+ if (cpu_feature_enabled(X86_FEATURE_ZEN5))
+ switch (boot_cpu_data.x86_model) {
+ case 0x44:
+ dev->stb_arg.msg = AMD_GNR_REGISTER_MESSAGE;
+ dev->stb_arg.arg = AMD_GNR_REGISTER_ARGUMENT;
+ dev->stb_arg.resp = AMD_GNR_REGISTER_RESPONSE;
+ return;
+ default:
+ break;
+ }
+
+ dev->stb_arg.msg = AMD_S2D_REGISTER_MESSAGE;
+ dev->stb_arg.arg = AMD_S2D_REGISTER_ARGUMENT;
+ dev->stb_arg.resp = AMD_S2D_REGISTER_RESPONSE;
+}
+
+static bool amd_is_stb_supported(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ if (boot_cpu_data.x86_model == 0x44)
+ dev->stb_arg.s2d_msg_id = 0x9B;
+ else
+ dev->stb_arg.s2d_msg_id = 0xBE;
+ break;
+ case AMD_CPU_ID_PS:
+ dev->stb_arg.s2d_msg_id = 0x85;
+ break;
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
+ if (boot_cpu_data.x86_model == 0x70)
+ dev->stb_arg.s2d_msg_id = 0xF1;
+ else
+ dev->stb_arg.s2d_msg_id = 0xDE;
+ break;
+ default:
+ return false;
+ }
+
+ amd_stb_update_args(dev);
+ return true;
+}
+
+int amd_stb_s2d_init(struct amd_pmc_dev *dev)
+{
+ u32 phys_addr_low, phys_addr_hi;
+ u64 stb_phys_addr;
+ u32 size = 0;
+ int ret;
+
+ if (!enable_stb)
+ return 0;
+
+ if (amd_is_stb_supported(dev)) {
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_stb_debugfs_fops_v2);
+ } else {
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_stb_debugfs_fops);
+ return 0;
+ }
+
+ /* Spill to DRAM feature uses separate SMU message port */
+ dev->msg_port = MSG_PORT_S2D;
+
+ amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->stb_arg.s2d_msg_id, true);
+ if (size != S2D_TELEMETRY_BYTES_MAX)
+ return -EIO;
+
+ /* Get DRAM size */
+ ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->stb_arg.s2d_msg_id, true);
+ if (ret || !dev->dram_size)
+ dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
+
+ /* Get STB DRAM address */
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->stb_arg.s2d_msg_id, true);
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->stb_arg.s2d_msg_id, true);
+
+ stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = MSG_PORT_PMC;
+
+ dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size);
+ if (!dev->stb_virt_addr)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index a254debb9256..e6124498b195 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -10,8 +10,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/debugfs.h>
@@ -28,6 +28,8 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
+#include <asm/amd_node.h>
+
#include "pmc.h"
/* SMU communication registers */
@@ -40,24 +42,9 @@
#define AMD_PMC_SCRATCH_REG_1AH 0xF14
/* STB Registers */
-#define AMD_PMC_STB_PMI_0 0x03E30600
#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
-#define AMD_PMC_STB_DUMMY_PC 0xC6000007
-
-/* STB S2D(Spill to DRAM) has different message port offset */
-#define AMD_S2D_REGISTER_MESSAGE 0xA20
-#define AMD_S2D_REGISTER_RESPONSE 0xA80
-#define AMD_S2D_REGISTER_ARGUMENT 0xA88
-
-/* STB Spill to DRAM Parameters */
-#define S2D_TELEMETRY_BYTES_MAX 0x100000U
-#define S2D_RSVD_RAM_SPACE 0x100000
-#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
-
-/* STB Spill to DRAM Message Definition */
-#define STB_FORCE_FLUSH_DATA 0xCF
/* Base address of SMU for mapping physical address to virtual address */
#define AMD_PMC_MAPPING_SIZE 0x01000
@@ -97,7 +84,6 @@
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
-#define FIFO_SIZE 4096
enum amd_pmc_def {
MSG_TEST = 0x01,
@@ -105,24 +91,39 @@ enum amd_pmc_def {
MSG_OS_HINT_RN,
};
-enum s2d_arg {
- S2D_TELEMETRY_SIZE = 0x01,
- S2D_PHYS_ADDR_LOW,
- S2D_PHYS_ADDR_HIGH,
- S2D_NUM_SAMPLES,
- S2D_DRAM_SIZE,
-};
-
-struct amd_pmc_stb_v2_data {
- size_t size;
- u8 data[] __counted_by(size);
-};
-
struct amd_pmc_bit_map {
const char *name;
u32 bit_mask;
};
+static const struct amd_pmc_bit_map soc15_ip_blk_v2[] = {
+ {"DISPLAY", BIT(0)},
+ {"CPU", BIT(1)},
+ {"GFX", BIT(2)},
+ {"VDD", BIT(3)},
+ {"VDD_CCX", BIT(4)},
+ {"ACP", BIT(5)},
+ {"VCN_0", BIT(6)},
+ {"VCN_1", BIT(7)},
+ {"ISP", BIT(8)},
+ {"NBIO", BIT(9)},
+ {"DF", BIT(10)},
+ {"USB3_0", BIT(11)},
+ {"USB3_1", BIT(12)},
+ {"LAPIC", BIT(13)},
+ {"USB3_2", BIT(14)},
+ {"USB4_RT0", BIT(15)},
+ {"USB4_RT1", BIT(16)},
+ {"USB4_0", BIT(17)},
+ {"USB4_1", BIT(18)},
+ {"MPM", BIT(19)},
+ {"JPEG_0", BIT(20)},
+ {"JPEG_1", BIT(21)},
+ {"IPU", BIT(22)},
+ {"UMSCH", BIT(23)},
+ {"VPE", BIT(24)},
+};
+
static const struct amd_pmc_bit_map soc15_ip_blk[] = {
{"DISPLAY", BIT(0)},
{"CPU", BIT(1)},
@@ -146,25 +147,13 @@ static const struct amd_pmc_bit_map soc15_ip_blk[] = {
{"IPU", BIT(19)},
{"UMSCH", BIT(20)},
{"VPE", BIT(21)},
- {}
};
-static bool enable_stb;
-module_param(enable_stb, bool, 0644);
-MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
-
static bool disable_workarounds;
module_param(disable_workarounds, bool, 0644);
MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
-static bool dump_custom_stb;
-module_param(dump_custom_stb, bool, 0644);
-MODULE_PARM_DESC(dump_custom_stb, "Enable to dump full STB buffer");
-
static struct amd_pmc_dev pmc;
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
-static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -193,155 +182,6 @@ struct smu_metrics {
u64 timecondition_notmet_totaltime[32];
} __packed;
-static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp)
-{
- struct amd_pmc_dev *dev = filp->f_inode->i_private;
- u32 size = FIFO_SIZE * sizeof(u32);
- u32 *buf;
- int rc;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = amd_pmc_read_stb(dev, buf);
- if (rc) {
- kfree(buf);
- return rc;
- }
-
- filp->private_data = buf;
- return rc;
-}
-
-static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
- loff_t *pos)
-{
- if (!filp->private_data)
- return -EINVAL;
-
- return simple_read_from_buffer(buf, size, pos, filp->private_data,
- FIFO_SIZE * sizeof(u32));
-}
-
-static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
-{
- kfree(filp->private_data);
- return 0;
-}
-
-static const struct file_operations amd_pmc_stb_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = amd_pmc_stb_debugfs_open,
- .read = amd_pmc_stb_debugfs_read,
- .release = amd_pmc_stb_debugfs_release,
-};
-
-/* Enhanced STB Firmware Reporting Mechanism */
-static int amd_pmc_stb_handle_efr(struct file *filp)
-{
- struct amd_pmc_dev *dev = filp->f_inode->i_private;
- struct amd_pmc_stb_v2_data *stb_data_arr;
- u32 fsize;
-
- fsize = dev->dram_size - S2D_RSVD_RAM_SPACE;
- stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
- if (!stb_data_arr)
- return -ENOMEM;
-
- stb_data_arr->size = fsize;
- memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
- filp->private_data = stb_data_arr;
-
- return 0;
-}
-
-static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
-{
- struct amd_pmc_dev *dev = filp->f_inode->i_private;
- u32 fsize, num_samples, val, stb_rdptr_offset = 0;
- struct amd_pmc_stb_v2_data *stb_data_arr;
- int ret;
-
- /* Write dummy postcode while reading the STB buffer */
- ret = amd_pmc_write_stb(dev, AMD_PMC_STB_DUMMY_PC);
- if (ret)
- dev_err(dev->dev, "error writing to STB: %d\n", ret);
-
- /* Spill to DRAM num_samples uses separate SMU message port */
- dev->msg_port = 1;
-
- ret = amd_pmc_send_cmd(dev, 0, &val, STB_FORCE_FLUSH_DATA, 1);
- if (ret)
- dev_dbg_once(dev->dev, "S2D force flush not supported: %d\n", ret);
-
- /*
- * We have a custom stb size and the PMFW is supposed to give
- * the enhanced dram size. Note that we land here only for the
- * platforms that support enhanced dram size reporting.
- */
- if (dump_custom_stb)
- return amd_pmc_stb_handle_efr(filp);
-
- /* Get the num_samples to calculate the last push location */
- ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->s2d_msg_id, true);
- /* Clear msg_port for other SMU operation */
- dev->msg_port = 0;
- if (ret) {
- dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret);
- return ret;
- }
-
- fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX);
- stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
- if (!stb_data_arr)
- return -ENOMEM;
-
- stb_data_arr->size = fsize;
-
- /*
- * Start capturing data from the last push location.
- * This is for general cases, where the stb limits
- * are meant for standard usage.
- */
- if (num_samples > S2D_TELEMETRY_BYTES_MAX) {
- /* First read oldest data starting 1 behind last write till end of ringbuffer */
- stb_rdptr_offset = num_samples % S2D_TELEMETRY_BYTES_MAX;
- fsize = S2D_TELEMETRY_BYTES_MAX - stb_rdptr_offset;
-
- memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr + stb_rdptr_offset, fsize);
- /* Second copy the newer samples from offset 0 - last write */
- memcpy_fromio(stb_data_arr->data + fsize, dev->stb_virt_addr, stb_rdptr_offset);
- } else {
- memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
- }
-
- filp->private_data = stb_data_arr;
-
- return 0;
-}
-
-static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
- loff_t *pos)
-{
- struct amd_pmc_stb_v2_data *data = filp->private_data;
-
- return simple_read_from_buffer(buf, size, pos, data->data, data->size);
-}
-
-static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
-{
- kfree(filp->private_data);
- return 0;
-}
-
-static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = {
- .owner = THIS_MODULE,
- .open = amd_pmc_stb_debugfs_open_v2,
- .read = amd_pmc_stb_debugfs_read_v2,
- .release = amd_pmc_stb_debugfs_release_v2,
-};
-
static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -350,18 +190,23 @@ static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
dev->num_ips = 12;
- dev->s2d_msg_id = 0xBE;
+ dev->ips_ptr = soc15_ip_blk;
dev->smu_msg = 0x538;
break;
case AMD_CPU_ID_PS:
dev->num_ips = 21;
- dev->s2d_msg_id = 0x85;
+ dev->ips_ptr = soc15_ip_blk;
dev->smu_msg = 0x538;
break;
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
- dev->num_ips = 22;
- dev->s2d_msg_id = 0xDE;
+ if (boot_cpu_data.x86_model == 0x70) {
+ dev->num_ips = ARRAY_SIZE(soc15_ip_blk_v2);
+ dev->ips_ptr = soc15_ip_blk_v2;
+ } else {
+ dev->num_ips = ARRAY_SIZE(soc15_ip_blk);
+ dev->ips_ptr = soc15_ip_blk;
+ }
dev->smu_msg = 0x938;
break;
}
@@ -529,8 +374,8 @@ static int smu_fw_info_show(struct seq_file *s, void *unused)
seq_puts(s, "\n=== Active time (in us) ===\n");
for (idx = 0 ; idx < dev->num_ips ; idx++) {
- if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
- seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+ if (dev->ips_ptr[idx].bit_mask & dev->active_ips)
+ seq_printf(s, "%-8s : %lld\n", dev->ips_ptr[idx].name,
table.timecondition_notmet_lastcapture[idx]);
}
@@ -625,20 +470,6 @@ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
debugfs_remove_recursive(dev->dbgfs_dir);
}
-static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev)
-{
- switch (dev->cpu_id) {
- case AMD_CPU_ID_YC:
- case AMD_CPU_ID_CB:
- case AMD_CPU_ID_PS:
- case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
- case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
- return true;
- default:
- return false;
- }
-}
-
static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
{
dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
@@ -648,14 +479,17 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
&s0ix_stats_fops);
debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
&amd_pmc_idlemask_fops);
- /* Enable STB only when the module_param is set */
- if (enable_stb) {
- if (amd_pmc_is_stb_supported(dev))
- debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
- &amd_pmc_stb_debugfs_fops_v2);
- else
- debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
- &amd_pmc_stb_debugfs_fops);
+}
+
+static char *amd_pmc_get_msg_port(struct amd_pmc_dev *dev)
+{
+ switch (dev->msg_port) {
+ case MSG_PORT_PMC:
+ return "PMC";
+ case MSG_PORT_S2D:
+ return "S2D";
+ default:
+ return "Invalid message port";
}
}
@@ -663,10 +497,10 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
{
u32 value, message, argument, response;
- if (dev->msg_port) {
- message = AMD_S2D_REGISTER_MESSAGE;
- argument = AMD_S2D_REGISTER_ARGUMENT;
- response = AMD_S2D_REGISTER_RESPONSE;
+ if (dev->msg_port == MSG_PORT_S2D) {
+ message = dev->stb_arg.msg;
+ argument = dev->stb_arg.arg;
+ response = dev->stb_arg.resp;
} else {
message = dev->smu_msg;
argument = AMD_PMC_REGISTER_ARGUMENT;
@@ -674,26 +508,26 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
}
value = amd_pmc_reg_read(dev, response);
- dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", amd_pmc_get_msg_port(dev), value);
value = amd_pmc_reg_read(dev, argument);
- dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", amd_pmc_get_msg_port(dev), value);
value = amd_pmc_reg_read(dev, message);
- dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", amd_pmc_get_msg_port(dev), value);
}
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
+int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
{
int rc;
u32 val, message, argument, response;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (dev->msg_port) {
- message = AMD_S2D_REGISTER_MESSAGE;
- argument = AMD_S2D_REGISTER_ARGUMENT;
- response = AMD_S2D_REGISTER_RESPONSE;
+ if (dev->msg_port == MSG_PORT_S2D) {
+ message = dev->stb_arg.msg;
+ argument = dev->stb_arg.arg;
+ response = dev->stb_arg.resp;
} else {
message = dev->smu_msg;
argument = AMD_PMC_REGISTER_ARGUMENT;
@@ -706,7 +540,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
- goto out_unlock;
+ return rc;
}
/* Write zero to response register */
@@ -724,7 +558,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "SMU response timed out\n");
- goto out_unlock;
+ return rc;
}
switch (val) {
@@ -738,21 +572,19 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
case AMD_PMC_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
rc = -EBUSY;
- goto out_unlock;
+ break;
case AMD_PMC_RESULT_CMD_UNKNOWN:
dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
rc = -EINVAL;
- goto out_unlock;
+ break;
case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
case AMD_PMC_RESULT_FAILED:
default:
dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
rc = -EIO;
- goto out_unlock;
+ break;
}
-out_unlock:
- mutex_unlock(&dev->lock);
amd_pmc_dump_registers(dev);
return rc;
}
@@ -881,7 +713,7 @@ static void amd_pmc_s2idle_prepare(void)
return;
}
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
+ rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
@@ -900,7 +732,7 @@ static void amd_pmc_s2idle_check(void)
/* Dump the IdleMask before we add to the STB */
amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK);
+ rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_CHECK);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
@@ -927,7 +759,7 @@ static void amd_pmc_s2idle_restore(void)
/* Let SMU know that we are looking for stats */
amd_pmc_dump_data(pdev);
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
+ rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
@@ -981,74 +813,6 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ }
};
-static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
-{
- u32 phys_addr_low, phys_addr_hi;
- u64 stb_phys_addr;
- u32 size = 0;
- int ret;
-
- /* Spill to DRAM feature uses separate SMU message port */
- dev->msg_port = 1;
-
- amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->s2d_msg_id, true);
- if (size != S2D_TELEMETRY_BYTES_MAX)
- return -EIO;
-
- /* Get DRAM size */
- ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
- if (ret || !dev->dram_size)
- dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
-
- /* Get STB DRAM address */
- amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true);
- amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true);
-
- if (!phys_addr_hi && !phys_addr_low) {
- dev_err(dev->dev, "STB is not enabled on the system; disable enable_stb or contact system vendor\n");
- return -EINVAL;
- }
-
- stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
-
- /* Clear msg_port for other SMU operation */
- dev->msg_port = 0;
-
- dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size);
- if (!dev->stb_virt_addr)
- return -ENOMEM;
-
- return 0;
-}
-
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
-{
- int err;
-
- err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
- if (err) {
- dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
- return pcibios_err_to_errno(err);
- }
-
- return 0;
-}
-
-static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
-{
- int i, err;
-
- for (i = 0; i < FIFO_SIZE; i++) {
- err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
- if (err) {
- dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
- return pcibios_err_to_errno(err);
- }
- }
-
- return 0;
-}
-
static int amd_pmc_probe(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = &pmc;
@@ -1106,12 +870,6 @@ static int amd_pmc_probe(struct platform_device *pdev)
/* Get num of IP blocks within the SoC */
amd_pmc_get_ip_info(dev);
- if (enable_stb && amd_pmc_is_stb_supported(dev)) {
- err = amd_pmc_s2d_init(dev);
- if (err)
- goto err_pci_dev_put;
- }
-
platform_set_drvdata(pdev, dev);
if (IS_ENABLED(CONFIG_SUSPEND)) {
err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
@@ -1122,6 +880,10 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
amd_pmc_dbgfs_register(dev);
+ err = amd_stb_s2d_init(dev);
+ if (err)
+ goto err_pci_dev_put;
+
if (IS_ENABLED(CONFIG_AMD_MP2_STB))
amd_mp2_stb_init(dev);
pm_report_max_hw_sleep(U64_MAX);
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index f1166d15c856..f43f0253b0f5 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -14,6 +14,11 @@
#include <linux/types.h>
#include <linux/mutex.h>
+enum s2d_msg_port {
+ MSG_PORT_PMC,
+ MSG_PORT_S2D,
+};
+
struct amd_mp2_dev {
void __iomem *mmio;
void __iomem *vslbase;
@@ -25,24 +30,31 @@ struct amd_mp2_dev {
bool is_stb_data;
};
+struct stb_arg {
+ u32 s2d_msg_id;
+ u32 msg;
+ u32 arg;
+ u32 resp;
+};
+
struct amd_pmc_dev {
void __iomem *regbase;
void __iomem *smu_virt_addr;
void __iomem *stb_virt_addr;
void __iomem *fch_virt_addr;
- bool msg_port;
u32 base_addr;
u32 cpu_id;
- u32 active_ips;
u32 dram_size;
+ u32 active_ips;
+ const struct amd_pmc_bit_map *ips_ptr;
u32 num_ips;
- u32 s2d_msg_id;
u32 smu_msg;
/* SMU version information */
u8 smu_program;
u8 major;
u8 minor;
u8 rev;
+ u8 msg_port;
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
@@ -50,6 +62,7 @@ struct amd_pmc_dev {
struct quirk_entry *quirks;
bool disable_8042_wakeup;
struct amd_mp2_dev *mp2;
+ struct stb_arg stb_arg;
};
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
@@ -70,4 +83,9 @@ void amd_mp2_stb_deinit(struct amd_pmc_dev *dev);
#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
#define PCI_DEVICE_ID_AMD_MP2_STB 0x172c
+int amd_stb_s2d_init(struct amd_pmc_dev *dev);
+int amd_stb_read(struct amd_pmc_dev *dev, u32 *buf);
+int amd_stb_write(struct amd_pmc_dev *dev, u32 data);
+int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
+
#endif /* PMC_H */
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
index 99d67cdbd91e..25b8f7ae3abd 100644
--- a/drivers/platform/x86/amd/pmf/Kconfig
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -7,7 +7,7 @@ config AMD_PMF
tristate "AMD Platform Management Framework"
depends on ACPI && PCI
depends on POWER_SUPPLY
- depends on AMD_NB
+ depends on AMD_NODE
select ACPI_PLATFORM_PROFILE
depends on TEE && AMDTEE
depends on AMD_SFH_HID
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 7d6079b02589..6b26e48ce8ad 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
amd-pmf-objs := core.o acpi.o sps.o \
auto-mode.o cnqf.o \
- tee-if.o spc.o pmf-quirks.o
+ tee-if.o spc.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index 1b9c7acf0ddf..dd5780a1d06e 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -321,17 +321,29 @@ int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req
req, sizeof(*req));
}
+static void apmf_event_handler_v2(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ int ret;
+
+ guard(mutex)(&pmf_dev->cb_mutex);
+
+ ret = apmf_get_sbios_requests_v2(pmf_dev, &pmf_dev->req);
+ if (ret)
+ dev_err(pmf_dev->dev, "Failed to get v2 SBIOS requests: %d\n", ret);
+}
+
static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
{
struct amd_pmf_dev *pmf_dev = data;
struct apmf_sbios_req req;
int ret;
- mutex_lock(&pmf_dev->update_mutex);
+ guard(mutex)(&pmf_dev->update_mutex);
ret = apmf_get_sbios_requests(pmf_dev, &req);
if (ret) {
dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret);
- goto out;
+ return;
}
if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) {
@@ -353,8 +365,6 @@ static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
if (pmf_dev->amt_enabled)
amd_pmf_update_2_cql(pmf_dev, req.cql_event);
}
-out:
- mutex_unlock(&pmf_dev->update_mutex);
}
static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
@@ -430,6 +440,15 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
apmf_event_handler(ahandle, 0, pmf_dev);
}
+ if (pmf_dev->smart_pc_enabled && pmf_dev->pmf_if_version == PMF_IF_V2) {
+ status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handler_v2, pmf_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pmf_dev->dev, "failed to install notify handler for custom BIOS inputs\n");
+ return -ENODEV;
+ }
+ }
+
return 0;
}
@@ -480,6 +499,9 @@ void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
+
+ if (pmf_dev->smart_pc_enabled && pmf_dev->pmf_if_version == PMF_IF_V2)
+ acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler_v2);
}
int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 06a97c533cb8..a2cb2d5544f5 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -8,13 +8,13 @@
* Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
*/
-#include <asm/amd_nb.h>
#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <asm/amd_node.h>
#include "pmf.h"
/* PMF-SMU communication registers */
@@ -127,7 +127,8 @@ static void amd_pmf_get_metrics(struct work_struct *work)
ktime_t time_elapsed_ms;
int socket_power;
- mutex_lock(&dev->update_mutex);
+ guard(mutex)(&dev->update_mutex);
+
/* Transfer table contents */
memset(dev->buf, 0, sizeof(dev->m_table));
amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
@@ -149,7 +150,6 @@ static void amd_pmf_get_metrics(struct work_struct *work)
dev->start_time = ktime_to_ms(ktime_get());
schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
- mutex_unlock(&dev->update_mutex);
}
static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
@@ -181,7 +181,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
int rc;
u32 val;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
@@ -189,7 +189,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
- goto out_unlock;
+ return rc;
}
/* Write zero to response register */
@@ -207,7 +207,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "SMU response timed out\n");
- goto out_unlock;
+ return rc;
}
switch (val) {
@@ -221,21 +221,19 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
case AMD_PMF_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
rc = -EBUSY;
- goto out_unlock;
+ break;
case AMD_PMF_RESULT_CMD_UNKNOWN:
dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
rc = -EINVAL;
- goto out_unlock;
+ break;
case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
case AMD_PMF_RESULT_FAILED:
default:
dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
rc = -EIO;
- goto out_unlock;
+ break;
}
-out_unlock:
- mutex_unlock(&dev->lock);
amd_pmf_dump_registers(dev);
return rc;
}
@@ -373,7 +371,6 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
power_supply_unreg_notifier(&dev->pwr_src_notifier);
- amd_pmf_deinit_sps(dev);
}
if (dev->smart_pc_enabled) {
@@ -455,8 +452,8 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
+ mutex_init(&dev->cb_mutex);
- amd_pmf_quirks_init(dev);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_dbgfs_register(dev);
@@ -481,6 +478,7 @@ static void amd_pmf_remove(struct platform_device *pdev)
amd_pmf_dbgfs_unregister(dev);
mutex_destroy(&dev->lock);
mutex_destroy(&dev->update_mutex);
+ mutex_destroy(&dev->cb_mutex);
kfree(dev->buf);
}
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
deleted file mode 100644
index 7cde5733b9ca..000000000000
--- a/drivers/platform/x86/amd/pmf/pmf-quirks.c
+++ /dev/null
@@ -1,66 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * AMD Platform Management Framework Driver Quirks
- *
- * Copyright (c) 2024, Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Author: Mario Limonciello <mario.limonciello@amd.com>
- */
-
-#include <linux/dmi.h>
-
-#include "pmf.h"
-
-struct quirk_entry {
- u32 supported_func;
-};
-
-static struct quirk_entry quirk_no_sps_bug = {
- .supported_func = 0x4003,
-};
-
-static const struct dmi_system_id fwbug_list[] = {
- {
- .ident = "ROG Zephyrus G14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA403U"),
- },
- .driver_data = &quirk_no_sps_bug,
- },
- {
- .ident = "ROG Ally X",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "RC72LA"),
- },
- .driver_data = &quirk_no_sps_bug,
- },
- {
- .ident = "ASUS TUF Gaming A14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "FA401W"),
- },
- .driver_data = &quirk_no_sps_bug,
- },
- {}
-};
-
-void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
-{
- const struct dmi_system_id *dmi_id;
- struct quirk_entry *quirks;
-
- dmi_id = dmi_first_match(fwbug_list);
- if (!dmi_id)
- return;
-
- quirks = dmi_id->driver_data;
- if (quirks->supported_func) {
- dev->supported_func = quirks->supported_func;
- pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
- dmi_id->ident);
- }
-}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index a79808fda1d8..e6bdee68ccf3 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -106,9 +106,12 @@ struct cookie_header {
#define PMF_TA_IF_VERSION_MAJOR 1
#define TA_PMF_ACTION_MAX 32
#define TA_PMF_UNDO_MAX 8
-#define TA_OUTPUT_RESERVED_MEM 906
+#define TA_OUTPUT_RESERVED_MEM 922
#define MAX_OPERATION_PARAMS 4
+#define TA_ERROR_CRYPTO_INVALID_PARAM 0x20002
+#define TA_ERROR_CRYPTO_BIN_TOO_LARGE 0x2000d
+
#define PMF_IF_V1 1
#define PMF_IF_V2 2
@@ -338,7 +341,7 @@ struct amd_pmf_dev {
struct mutex lock; /* protects the PMF interface */
u32 supported_func;
enum platform_profile_option current_profile;
- struct platform_profile_handler pprof;
+ struct device *ppdev; /* platform profile class device */
struct dentry *dbgfs_dir;
int hb_interval; /* SBIOS heartbeat interval */
struct delayed_work heart_beat;
@@ -370,6 +373,8 @@ struct amd_pmf_dev {
struct input_dev *pmf_idev;
size_t mtable_size;
struct resource *res;
+ struct apmf_sbios_req_v2 req; /* To get custom bios pending request */
+ struct mutex cb_mutex;
};
struct apmf_sps_prop_granular_v2 {
@@ -616,6 +621,30 @@ enum ta_slider {
TA_MAX,
};
+enum apmf_smartpc_custom_bios_inputs {
+ APMF_SMARTPC_CUSTOM_BIOS_INPUT1,
+ APMF_SMARTPC_CUSTOM_BIOS_INPUT2,
+};
+
+enum apmf_preq_smartpc {
+ NOTIFY_CUSTOM_BIOS_INPUT1 = 5,
+ NOTIFY_CUSTOM_BIOS_INPUT2,
+};
+
+enum platform_type {
+ PTYPE_UNKNOWN = 0,
+ LID_CLOSE,
+ CLAMSHELL,
+ FLAT,
+ TENT,
+ STAND,
+ TABLET,
+ BOOK,
+ PRESENTATION,
+ PULL_FWD,
+ PTYPE_INVALID = 0xf,
+};
+
/* Command ids for TA communication */
enum ta_pmf_command {
TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE,
@@ -657,7 +686,8 @@ struct ta_pmf_condition_info {
u32 power_slider;
u32 lid_state;
bool user_present;
- u32 rsvd1[2];
+ u32 bios_input1;
+ u32 bios_input2;
u32 monitor_count;
u32 rsvd2[2];
u32 bat_design;
@@ -667,7 +697,9 @@ struct ta_pmf_condition_info {
u32 device_state;
u32 socket_power;
u32 skin_temperature;
- u32 rsvd3[5];
+ u32 rsvd3[2];
+ u32 platform_type;
+ u32 rsvd3_1[2];
u32 ambient_light;
u32 length;
u32 avg_c0residency;
@@ -751,7 +783,6 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
struct amd_pmf_static_slider_granular *table);
int amd_pmf_init_sps(struct amd_pmf_dev *dev);
-void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *output);
bool is_pprof_balanced(struct amd_pmf_dev *pmf);
@@ -797,7 +828,4 @@ int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
-/* Quirk infrastructure */
-void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
-
#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index 06226eb0eab3..1d90f9382024 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -16,6 +16,46 @@
#include "pmf.h"
#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *platform_type_as_str(u16 platform_type)
+{
+ switch (platform_type) {
+ case CLAMSHELL:
+ return "CLAMSHELL";
+ case FLAT:
+ return "FLAT";
+ case TENT:
+ return "TENT";
+ case STAND:
+ return "STAND";
+ case TABLET:
+ return "TABLET";
+ case BOOK:
+ return "BOOK";
+ case PRESENTATION:
+ return "PRESENTATION";
+ case PULL_FWD:
+ return "PULL_FWD";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static const char *laptop_placement_as_str(u16 device_state)
+{
+ switch (device_state) {
+ case ON_TABLE:
+ return "ON_TABLE";
+ case ON_LAP_MOTION:
+ return "ON_LAP_MOTION";
+ case IN_BAG:
+ return "IN_BAG";
+ case OUT_OF_BAG:
+ return "OUT_OF_BAG";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static const char *ta_slider_as_str(unsigned int state)
{
switch (state) {
@@ -47,12 +87,38 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
+ dev_dbg(dev->dev, "Platform type: %s\n", platform_type_as_str(in->ev_info.platform_type));
+ dev_dbg(dev->dev, "Laptop placement: %s\n",
+ laptop_placement_as_str(in->ev_info.device_state));
+ dev_dbg(dev->dev, "Custom BIOS input1: %u\n", in->ev_info.bios_input1);
+ dev_dbg(dev->dev, "Custom BIOS input2: %u\n", in->ev_info.bios_input2);
dev_dbg(dev->dev, "==== TA inputs END ====\n");
}
#else
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) {}
#endif
+static void amd_pmf_get_custom_bios_inputs(struct amd_pmf_dev *pdev,
+ struct ta_pmf_enact_table *in)
+{
+ if (!pdev->req.pending_req)
+ return;
+
+ switch (pdev->req.pending_req) {
+ case BIT(NOTIFY_CUSTOM_BIOS_INPUT1):
+ in->ev_info.bios_input1 = pdev->req.custom_policy[APMF_SMARTPC_CUSTOM_BIOS_INPUT1];
+ break;
+ case BIT(NOTIFY_CUSTOM_BIOS_INPUT2):
+ in->ev_info.bios_input2 = pdev->req.custom_policy[APMF_SMARTPC_CUSTOM_BIOS_INPUT2];
+ break;
+ default:
+ dev_dbg(pdev->dev, "Invalid preq for BIOS input: 0x%x\n", pdev->req.pending_req);
+ }
+
+ /* Clear pending requests after handling */
+ memset(&pdev->req, 0, sizeof(pdev->req));
+}
+
static void amd_pmf_get_c0_residency(u16 *core_res, size_t size, struct ta_pmf_enact_table *in)
{
u16 max, avg = 0;
@@ -153,12 +219,14 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
switch (dev->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
val = TA_BEST_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
val = TA_BETTER_PERFORMANCE;
break;
case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
val = TA_BEST_BATTERY;
break;
default:
@@ -190,6 +258,14 @@ static void amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact
} else {
dev_dbg(dev->dev, "HPD is not enabled/detected\n");
}
+
+ /* Get SRA (Secondary Accelerometer) data */
+ if (!amd_get_sfh_info(&sfh_info, MT_SRA)) {
+ in->ev_info.platform_type = sfh_info.platform_type;
+ in->ev_info.device_state = sfh_info.laptop_placement;
+ } else {
+ dev_dbg(dev->dev, "SRA is not enabled/detected\n");
+ }
}
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
@@ -201,4 +277,5 @@ void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_tab
amd_pmf_get_battery_info(dev, in);
amd_pmf_get_slider_info(dev, in);
amd_pmf_get_sensor_info(dev, in);
+ amd_pmf_get_custom_bios_inputs(dev, in);
}
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index 92f7fb22277d..d3083383f11f 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -282,10 +282,10 @@ bool is_pprof_balanced(struct amd_pmf_dev *pmf)
return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
}
-static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
+static int amd_pmf_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ struct amd_pmf_dev *pmf = dev_get_drvdata(dev);
*profile = pmf->current_profile;
return 0;
@@ -297,12 +297,14 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
switch (pmf->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
mode = POWER_MODE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
mode = POWER_MODE_BALANCED_POWER;
break;
case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
mode = POWER_MODE_POWER_SAVER;
break;
default:
@@ -363,10 +365,10 @@ int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
return 0;
}
-static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+static int amd_pmf_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ struct amd_pmf_dev *pmf = dev_get_drvdata(dev);
int ret = 0;
pmf->current_profile = profile;
@@ -387,10 +389,32 @@ static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
return 0;
}
-int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+static int amd_pmf_hidden_choices(void *drvdata, unsigned long *choices)
{
- int err;
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+static const struct platform_profile_ops amd_pmf_profile_ops = {
+ .probe = amd_pmf_profile_probe,
+ .hidden_choices = amd_pmf_hidden_choices,
+ .profile_get = amd_pmf_profile_get,
+ .profile_set = amd_pmf_profile_set,
+};
+
+int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+{
dev->current_profile = PLATFORM_PROFILE_BALANCED;
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
@@ -405,24 +429,12 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
amd_pmf_set_sps_power_limits(dev);
}
- dev->pprof.profile_get = amd_pmf_profile_get;
- dev->pprof.profile_set = amd_pmf_profile_set;
-
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
-
/* Create platform_profile structure and register */
- err = platform_profile_register(&dev->pprof);
- if (err)
- dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
- err);
-
- return err;
-}
+ dev->ppdev = devm_platform_profile_register(dev->dev, "amd-pmf", dev,
+ &amd_pmf_profile_ops);
+ if (IS_ERR(dev->ppdev))
+ dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %ld\n",
+ PTR_ERR(dev->ppdev));
-void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
-{
- platform_profile_remove();
+ return PTR_ERR_OR_ZERO(dev->ppdev);
}
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 8c88769ea1d8..a1e43873a07b 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -27,8 +27,11 @@ module_param(pb_side_load, bool, 0444);
MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures");
#endif
-static const uuid_t amd_pmf_ta_uuid = UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d,
- 0xb1, 0x2d, 0xc5, 0x29, 0xb1, 0x3d, 0x85, 0x43);
+static const uuid_t amd_pmf_ta_uuid[] = { UUID_INIT(0xd9b39bf2, 0x66bd, 0x4154, 0xaf, 0xb8, 0x8a,
+ 0xcc, 0x2b, 0x2b, 0x60, 0xd6),
+ UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, 0xb1, 0x2d, 0xc5,
+ 0x29, 0xb1, 0x3d, 0x85, 0x43),
+ };
static const char *amd_pmf_uevent_as_str(unsigned int state)
{
@@ -321,9 +324,9 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
*/
schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
} else {
- dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
+ dev_dbg(dev->dev, "ta invoke cmd init failed err: %x\n", res);
dev->smart_pc_enabled = false;
- return -EIO;
+ return res;
}
return 0;
@@ -390,12 +393,12 @@ static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const voi
return ver->impl_id == TEE_IMPL_ID_AMDTEE;
}
-static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id)
+static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_t *uuid)
{
struct tee_ioctl_open_session_arg sess_arg = {};
int rc;
- export_uuid(sess_arg.uuid, &amd_pmf_ta_uuid);
+ export_uuid(sess_arg.uuid, uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
@@ -434,7 +437,7 @@ static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
return 0;
}
-static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
+static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
{
u32 size;
int ret;
@@ -445,7 +448,7 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
return PTR_ERR(dev->tee_ctx);
}
- ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id);
+ ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid);
if (ret) {
dev_err(dev->dev, "Failed to open TA session (%d)\n", ret);
ret = -EINVAL;
@@ -489,7 +492,8 @@ static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
{
- int ret;
+ bool status;
+ int ret, i;
ret = apmf_check_smart_pc(dev);
if (ret) {
@@ -502,26 +506,22 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
return -ENODEV;
}
- ret = amd_pmf_tee_init(dev);
- if (ret)
- return ret;
-
INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
ret = amd_pmf_set_dram_addr(dev, true);
if (ret)
- goto error;
+ goto err_cancel_work;
dev->policy_base = devm_ioremap_resource(dev->dev, dev->res);
if (IS_ERR(dev->policy_base)) {
ret = PTR_ERR(dev->policy_base);
- goto error;
+ goto err_free_dram_buf;
}
dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
if (!dev->policy_buf) {
ret = -ENOMEM;
- goto error;
+ goto err_free_dram_buf;
}
memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
@@ -531,24 +531,60 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
if (!dev->prev_data) {
ret = -ENOMEM;
- goto error;
+ goto err_free_policy;
}
- ret = amd_pmf_start_policy_engine(dev);
- if (ret)
- goto error;
+ for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
+ ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
+ if (ret)
+ goto err_free_prev_data;
+
+ ret = amd_pmf_start_policy_engine(dev);
+ switch (ret) {
+ case TA_PMF_TYPE_SUCCESS:
+ status = true;
+ break;
+ case TA_ERROR_CRYPTO_INVALID_PARAM:
+ case TA_ERROR_CRYPTO_BIN_TOO_LARGE:
+ amd_pmf_tee_deinit(dev);
+ status = false;
+ break;
+ default:
+ ret = -EINVAL;
+ amd_pmf_tee_deinit(dev);
+ goto err_free_prev_data;
+ }
+
+ if (status)
+ break;
+ }
+
+ if (!status && !pb_side_load) {
+ ret = -EINVAL;
+ goto err_free_prev_data;
+ }
if (pb_side_load)
amd_pmf_open_pb(dev, dev->dbgfs_dir);
ret = amd_pmf_register_input_device(dev);
if (ret)
- goto error;
+ goto err_pmf_remove_pb;
return 0;
-error:
- amd_pmf_deinit_smart_pc(dev);
+err_pmf_remove_pb:
+ if (pb_side_load && dev->esbin)
+ amd_pmf_remove_pb(dev);
+ amd_pmf_tee_deinit(dev);
+err_free_prev_data:
+ kfree(dev->prev_data);
+err_free_policy:
+ kfree(dev->policy_buf);
+err_free_dram_buf:
+ kfree(dev->buf);
+err_cancel_work:
+ cancel_delayed_work_sync(&dev->pb_work);
return ret;
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index a5933980ade3..3f8b2a324efd 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -50,7 +50,8 @@ MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-do
static struct quirk_entry *quirks;
static bool atkbd_reports_vol_keys;
-static bool asus_i8042_filter(unsigned char data, unsigned char str, struct serio *port)
+static bool asus_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
{
static bool extended_e0;
static bool extended_e1;
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 8bd187e8b47f..38ef778e8c19 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -313,7 +313,7 @@ struct asus_wmi {
bool mid_fan_curve_available;
struct fan_curve_data custom_fan_curves[3];
- struct platform_profile_handler platform_profile_handler;
+ struct device *ppdev;
bool platform_profile_support;
// The RSOC controls the maximum charging percentage.
@@ -3782,7 +3782,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
* Ensure that platform_profile updates userspace with the change to ensure
* that platform_profile and throttle_thermal_policy_mode are in sync.
*/
- platform_profile_notify();
+ platform_profile_notify(asus->ppdev);
return count;
}
@@ -3793,13 +3793,13 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
static DEVICE_ATTR_RW(throttle_thermal_policy);
/* Platform profile ***********************************************************/
-static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+static int asus_wmi_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
struct asus_wmi *asus;
int tp;
- asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+ asus = dev_get_drvdata(dev);
tp = asus->throttle_thermal_policy_mode;
switch (tp) {
@@ -3819,13 +3819,13 @@ static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+static int asus_wmi_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
struct asus_wmi *asus;
int tp;
- asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+ asus = dev_get_drvdata(dev);
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
@@ -3845,6 +3845,21 @@ static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
return throttle_thermal_policy_write(asus);
}
+static int asus_wmi_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops asus_wmi_platform_profile_ops = {
+ .probe = asus_wmi_platform_profile_probe,
+ .profile_get = asus_wmi_platform_profile_get,
+ .profile_set = asus_wmi_platform_profile_set,
+};
+
static int platform_profile_setup(struct asus_wmi *asus)
{
struct device *dev = &asus->platform_device->dev;
@@ -3869,22 +3884,11 @@ static int platform_profile_setup(struct asus_wmi *asus)
dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n");
- asus->platform_profile_handler.profile_get = asus_wmi_platform_profile_get;
- asus->platform_profile_handler.profile_set = asus_wmi_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_QUIET, asus->platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED,
- asus->platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE,
- asus->platform_profile_handler.choices);
-
- err = platform_profile_register(&asus->platform_profile_handler);
- if (err == -EEXIST) {
- pr_warn("%s, a platform_profile handler is already registered\n", __func__);
- return 0;
- } else if (err) {
- pr_err("%s, failed at platform_profile_register: %d\n", __func__, err);
- return err;
+ asus->ppdev = devm_platform_profile_register(dev, "asus-wmi", asus,
+ &asus_wmi_platform_profile_ops);
+ if (IS_ERR(asus->ppdev)) {
+ dev_err(dev, "Failed to register a platform_profile class device\n");
+ return PTR_ERR(asus->ppdev);
}
asus->platform_profile_support = true;
@@ -4815,7 +4819,7 @@ static int asus_wmi_add(struct platform_device *pdev)
}
if (asus->driver->i8042_filter) {
- err = i8042_install_filter(asus->driver->i8042_filter);
+ err = i8042_install_filter(asus->driver->i8042_filter, NULL);
if (err)
pr_warn("Unable to install key filter - %d\n", err);
}
@@ -4842,8 +4846,6 @@ fail_input:
fail_sysfs:
fail_custom_fan_curve:
fail_platform_profile_setup:
- if (asus->platform_profile_support)
- platform_profile_remove();
fail_fan_boost_mode:
fail_platform:
kfree(asus);
@@ -4869,9 +4871,6 @@ static void asus_wmi_remove(struct platform_device *device)
throttle_thermal_policy_set_default(asus);
asus_wmi_battery_exit(asus);
- if (asus->platform_profile_support)
- platform_profile_remove();
-
kfree(asus);
}
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index d02f15fd3482..018dfde4025e 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -73,8 +73,7 @@ struct asus_wmi_driver {
void (*key_filter) (struct asus_wmi_driver *driver, int *code,
unsigned int *value, bool *autorelease);
/* Optional standard i8042 filter */
- bool (*i8042_filter)(unsigned char data, unsigned char str,
- struct serio *serio);
+ i8042_filter_t i8042_filter;
int (*probe) (struct platform_device *device);
void (*detect_quirks) (struct asus_wmi_driver *driver);
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 2dddafb3f7fa..d09060aedd3f 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -152,6 +152,7 @@ config DELL_SMBIOS_SMM
config DELL_SMO8800
tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
default m
+ depends on I2C
depends on ACPI || COMPILE_TEST
help
Say Y here if you want to support SMO88XX freefall devices
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
index 79d60f1bf4c1..bb3cbd470a46 100644
--- a/drivers/platform/x86/dell/Makefile
+++ b/drivers/platform/x86/dell/Makefile
@@ -15,6 +15,7 @@ dell-smbios-objs := dell-smbios-base.o
dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
+obj-$(CONFIG_DELL_SMO8800) += dell-lis3lv02d.o
obj-$(CONFIG_DELL_UART_BACKLIGHT) += dell-uart-backlight.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
dell-wmi-objs := dell-wmi-base.o
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index 341d01d3e3e4..e252e0cf47ef 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -385,12 +385,6 @@ struct color_platform {
u8 red;
} __packed;
-struct platform_zone {
- u8 location;
- struct device_attribute *attr;
- struct color_platform colors;
-};
-
struct wmax_brightness_args {
u32 led_mask;
u32 percentage;
@@ -420,22 +414,9 @@ struct wmax_u32_args {
};
static struct platform_device *platform_device;
-static struct device_attribute *zone_dev_attrs;
-static struct attribute **zone_attrs;
-static struct platform_zone *zone_data;
-static struct platform_profile_handler pp_handler;
+static struct color_platform colors[4];
static enum wmax_thermal_mode supported_thermal_profiles[PLATFORM_PROFILE_LAST];
-static struct platform_driver platform_driver = {
- .driver = {
- .name = "alienware-wmi",
- }
-};
-
-static struct attribute_group zone_attribute_group = {
- .name = "rgb_zones",
-};
-
static u8 interface;
static u8 lighting_control_state;
static u8 global_brightness;
@@ -443,7 +424,7 @@ static u8 global_brightness;
/*
* Helpers used for zone control
*/
-static int parse_rgb(const char *buf, struct platform_zone *zone)
+static int parse_rgb(const char *buf, struct color_platform *colors)
{
long unsigned int rgb;
int ret;
@@ -463,28 +444,14 @@ static int parse_rgb(const char *buf, struct platform_zone *zone)
repackager.package = rgb & 0x0f0f0f0f;
pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
repackager.cp.red, repackager.cp.green, repackager.cp.blue);
- zone->colors = repackager.cp;
+ *colors = repackager.cp;
return 0;
}
-static struct platform_zone *match_zone(struct device_attribute *attr)
-{
- u8 zone;
-
- for (zone = 0; zone < quirks->num_zones; zone++) {
- if ((struct device_attribute *)zone_data[zone].attr == attr) {
- pr_debug("alienware-wmi: matched zone location: %d\n",
- zone_data[zone].location);
- return &zone_data[zone];
- }
- }
- return NULL;
-}
-
/*
* Individual RGB zone control
*/
-static int alienware_update_led(struct platform_zone *zone)
+static int alienware_update_led(u8 location)
{
int method_id;
acpi_status status;
@@ -493,8 +460,8 @@ static int alienware_update_led(struct platform_zone *zone)
struct legacy_led_args legacy_args;
struct wmax_led_args wmax_basic_args;
if (interface == WMAX) {
- wmax_basic_args.led_mask = 1 << zone->location;
- wmax_basic_args.colors = zone->colors;
+ wmax_basic_args.led_mask = 1 << location;
+ wmax_basic_args.colors = colors[location];
wmax_basic_args.state = lighting_control_state;
guid = WMAX_CONTROL_GUID;
method_id = WMAX_METHOD_ZONE_CONTROL;
@@ -502,7 +469,7 @@ static int alienware_update_led(struct platform_zone *zone)
input.length = sizeof(wmax_basic_args);
input.pointer = &wmax_basic_args;
} else {
- legacy_args.colors = zone->colors;
+ legacy_args.colors = colors[location];
legacy_args.brightness = global_brightness;
legacy_args.state = 0;
if (lighting_control_state == LEGACY_BOOTING ||
@@ -511,7 +478,7 @@ static int alienware_update_led(struct platform_zone *zone)
legacy_args.state = lighting_control_state;
} else
guid = LEGACY_CONTROL_GUID;
- method_id = zone->location + 1;
+ method_id = location + 1;
input.length = sizeof(legacy_args);
input.pointer = &legacy_args;
@@ -525,35 +492,153 @@ static int alienware_update_led(struct platform_zone *zone)
}
static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf, u8 location)
{
- struct platform_zone *target_zone;
- target_zone = match_zone(attr);
- if (target_zone == NULL)
- return sprintf(buf, "red: -1, green: -1, blue: -1\n");
return sprintf(buf, "red: %d, green: %d, blue: %d\n",
- target_zone->colors.red,
- target_zone->colors.green, target_zone->colors.blue);
+ colors[location].red, colors[location].green,
+ colors[location].blue);
}
-static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t zone_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count, u8 location)
{
- struct platform_zone *target_zone;
int ret;
- target_zone = match_zone(attr);
- if (target_zone == NULL) {
- pr_err("alienware-wmi: invalid target zone\n");
- return 1;
- }
- ret = parse_rgb(buf, target_zone);
+
+ ret = parse_rgb(buf, &colors[location]);
if (ret)
return ret;
- ret = alienware_update_led(target_zone);
+
+ ret = alienware_update_led(location);
+
return ret ? ret : count;
}
+static ssize_t zone00_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 0);
+}
+
+static ssize_t zone00_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 0);
+}
+
+static DEVICE_ATTR_RW(zone00);
+
+static ssize_t zone01_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 1);
+}
+
+static ssize_t zone01_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 1);
+}
+
+static DEVICE_ATTR_RW(zone01);
+
+static ssize_t zone02_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 2);
+}
+
+static ssize_t zone02_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 2);
+}
+
+static DEVICE_ATTR_RW(zone02);
+
+static ssize_t zone03_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 3);
+}
+
+static ssize_t zone03_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 3);
+}
+
+static DEVICE_ATTR_RW(zone03);
+
+/*
+ * Lighting control state device attribute (Global)
+ */
+static ssize_t lighting_control_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (lighting_control_state == LEGACY_BOOTING)
+ return sysfs_emit(buf, "[booting] running suspend\n");
+ else if (lighting_control_state == LEGACY_SUSPEND)
+ return sysfs_emit(buf, "booting running [suspend]\n");
+
+ return sysfs_emit(buf, "booting [running] suspend\n");
+}
+
+static ssize_t lighting_control_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u8 val;
+
+ if (strcmp(buf, "booting\n") == 0)
+ val = LEGACY_BOOTING;
+ else if (strcmp(buf, "suspend\n") == 0)
+ val = LEGACY_SUSPEND;
+ else if (interface == LEGACY)
+ val = LEGACY_RUNNING;
+ else
+ val = WMAX_RUNNING;
+
+ lighting_control_state = val;
+ pr_debug("alienware-wmi: updated control state to %d\n",
+ lighting_control_state);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(lighting_control_state);
+
+static umode_t zone_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (n < quirks->num_zones + 1)
+ return attr->mode;
+
+ return 0;
+}
+
+static bool zone_group_visible(struct kobject *kobj)
+{
+ return quirks->num_zones > 0;
+}
+DEFINE_SYSFS_GROUP_VISIBLE(zone);
+
+static struct attribute *zone_attrs[] = {
+ &dev_attr_lighting_control_state.attr,
+ &dev_attr_zone00.attr,
+ &dev_attr_zone01.attr,
+ &dev_attr_zone02.attr,
+ &dev_attr_zone03.attr,
+ NULL
+};
+
+static struct attribute_group zone_attribute_group = {
+ .name = "rgb_zones",
+ .is_visible = SYSFS_GROUP_VISIBLE(zone),
+ .attrs = zone_attrs,
+};
+
/*
* LED Brightness (Global)
*/
@@ -582,7 +667,7 @@ static void global_led_set(struct led_classdev *led_cdev,
if (interface == WMAX)
ret = wmax_brightness(brightness);
else
- ret = alienware_update_led(&zone_data[0]);
+ ret = alienware_update_led(0);
if (ret)
pr_err("LED brightness update failed\n");
}
@@ -598,46 +683,8 @@ static struct led_classdev global_led = {
.name = "alienware::global_brightness",
};
-/*
- * Lighting control state device attribute (Global)
- */
-static ssize_t show_control_state(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- if (lighting_control_state == LEGACY_BOOTING)
- return sysfs_emit(buf, "[booting] running suspend\n");
- else if (lighting_control_state == LEGACY_SUSPEND)
- return sysfs_emit(buf, "booting running [suspend]\n");
- return sysfs_emit(buf, "booting [running] suspend\n");
-}
-
-static ssize_t store_control_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- long unsigned int val;
- if (strcmp(buf, "booting\n") == 0)
- val = LEGACY_BOOTING;
- else if (strcmp(buf, "suspend\n") == 0)
- val = LEGACY_SUSPEND;
- else if (interface == LEGACY)
- val = LEGACY_RUNNING;
- else
- val = WMAX_RUNNING;
- lighting_control_state = val;
- pr_debug("alienware-wmi: updated control state to %d\n",
- lighting_control_state);
- return count;
-}
-
-static DEVICE_ATTR(lighting_control_state, 0644, show_control_state,
- store_control_state);
-
static int alienware_zone_init(struct platform_device *dev)
{
- u8 zone;
- char *name;
-
if (interface == WMAX) {
lighting_control_state = WMAX_RUNNING;
} else if (interface == LEGACY) {
@@ -646,68 +693,15 @@ static int alienware_zone_init(struct platform_device *dev)
global_led.max_brightness = 0x0F;
global_brightness = global_led.max_brightness;
- /*
- * - zone_dev_attrs num_zones + 1 is for individual zones and then
- * null terminated
- * - zone_attrs num_zones + 2 is for all attrs in zone_dev_attrs +
- * the lighting control + null terminated
- * - zone_data num_zones is for the distinct zones
- */
- zone_dev_attrs =
- kcalloc(quirks->num_zones + 1, sizeof(struct device_attribute),
- GFP_KERNEL);
- if (!zone_dev_attrs)
- return -ENOMEM;
-
- zone_attrs =
- kcalloc(quirks->num_zones + 2, sizeof(struct attribute *),
- GFP_KERNEL);
- if (!zone_attrs)
- return -ENOMEM;
-
- zone_data =
- kcalloc(quirks->num_zones, sizeof(struct platform_zone),
- GFP_KERNEL);
- if (!zone_data)
- return -ENOMEM;
-
- for (zone = 0; zone < quirks->num_zones; zone++) {
- name = kasprintf(GFP_KERNEL, "zone%02hhX", zone);
- if (name == NULL)
- return 1;
- sysfs_attr_init(&zone_dev_attrs[zone].attr);
- zone_dev_attrs[zone].attr.name = name;
- zone_dev_attrs[zone].attr.mode = 0644;
- zone_dev_attrs[zone].show = zone_show;
- zone_dev_attrs[zone].store = zone_set;
- zone_data[zone].location = zone;
- zone_attrs[zone] = &zone_dev_attrs[zone].attr;
- zone_data[zone].attr = &zone_dev_attrs[zone];
- }
- zone_attrs[quirks->num_zones] = &dev_attr_lighting_control_state.attr;
- zone_attribute_group.attrs = zone_attrs;
-
- led_classdev_register(&dev->dev, &global_led);
-
- return sysfs_create_group(&dev->dev.kobj, &zone_attribute_group);
+ return led_classdev_register(&dev->dev, &global_led);
}
static void alienware_zone_exit(struct platform_device *dev)
{
- u8 zone;
-
if (!quirks->num_zones)
return;
- sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group);
led_classdev_unregister(&global_led);
- if (zone_dev_attrs) {
- for (zone = 0; zone < quirks->num_zones; zone++)
- kfree(zone_dev_attrs[zone].attr.name);
- }
- kfree(zone_dev_attrs);
- kfree(zone_data);
- kfree(zone_attrs);
}
static acpi_status alienware_wmax_command(void *in_args, size_t in_size,
@@ -742,14 +736,15 @@ static acpi_status alienware_wmax_command(void *in_args, size_t in_size,
* The HDMI mux sysfs node indicates the status of the HDMI input mux.
* It can toggle between standard system GPU output and HDMI input.
*/
-static ssize_t show_hdmi_cable(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status =
alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_HDMI_CABLE, &out_data);
@@ -763,14 +758,15 @@ static ssize_t show_hdmi_cable(struct device *dev,
return sysfs_emit(buf, "unconnected connected [unknown]\n");
}
-static ssize_t show_hdmi_source(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t source_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status =
alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_HDMI_STATUS, &out_data);
@@ -785,12 +781,12 @@ static ssize_t show_hdmi_source(struct device *dev,
return sysfs_emit(buf, "input gpu [unknown]\n");
}
-static ssize_t toggle_hdmi_source(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t source_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- acpi_status status;
struct wmax_basic_args args;
+ acpi_status status;
+
if (strcmp(buf, "gpu\n") == 0)
args.arg = 1;
else if (strcmp(buf, "input\n") == 0)
@@ -808,9 +804,14 @@ static ssize_t toggle_hdmi_source(struct device *dev,
return count;
}
-static DEVICE_ATTR(cable, S_IRUGO, show_hdmi_cable, NULL);
-static DEVICE_ATTR(source, S_IRUGO | S_IWUSR, show_hdmi_source,
- toggle_hdmi_source);
+static DEVICE_ATTR_RO(cable);
+static DEVICE_ATTR_RW(source);
+
+static bool hdmi_group_visible(struct kobject *kobj)
+{
+ return quirks->hdmi_mux;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(hdmi);
static struct attribute *hdmi_attrs[] = {
&dev_attr_cable.attr,
@@ -820,38 +821,24 @@ static struct attribute *hdmi_attrs[] = {
static const struct attribute_group hdmi_attribute_group = {
.name = "hdmi",
+ .is_visible = SYSFS_GROUP_VISIBLE(hdmi),
.attrs = hdmi_attrs,
};
-static void remove_hdmi(struct platform_device *dev)
-{
- if (quirks->hdmi_mux > 0)
- sysfs_remove_group(&dev->dev.kobj, &hdmi_attribute_group);
-}
-
-static int create_hdmi(struct platform_device *dev)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->dev.kobj, &hdmi_attribute_group);
- if (ret)
- remove_hdmi(dev);
- return ret;
-}
-
/*
* Alienware GFX amplifier support
* - Currently supports reading cable status
* - Leaving expansion room to possibly support dock/undock events later
*/
-static ssize_t show_amplifier_status(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status =
alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_AMPLIFIER_CABLE, &out_data);
@@ -865,7 +852,13 @@ static ssize_t show_amplifier_status(struct device *dev,
return sysfs_emit(buf, "unconnected connected [unknown]\n");
}
-static DEVICE_ATTR(status, S_IRUGO, show_amplifier_status, NULL);
+static DEVICE_ATTR_RO(status);
+
+static bool amplifier_group_visible(struct kobject *kobj)
+{
+ return quirks->amplifier;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(amplifier);
static struct attribute *amplifier_attrs[] = {
&dev_attr_status.attr,
@@ -874,37 +867,23 @@ static struct attribute *amplifier_attrs[] = {
static const struct attribute_group amplifier_attribute_group = {
.name = "amplifier",
+ .is_visible = SYSFS_GROUP_VISIBLE(amplifier),
.attrs = amplifier_attrs,
};
-static void remove_amplifier(struct platform_device *dev)
-{
- if (quirks->amplifier > 0)
- sysfs_remove_group(&dev->dev.kobj, &amplifier_attribute_group);
-}
-
-static int create_amplifier(struct platform_device *dev)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->dev.kobj, &amplifier_attribute_group);
- if (ret)
- remove_amplifier(dev);
- return ret;
-}
-
/*
* Deep Sleep Control support
* - Modifies BIOS setting for deep sleep control allowing extra wakeup events
*/
-static ssize_t show_deepsleep_status(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t deepsleep_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status = alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_DEEP_SLEEP_STATUS, &out_data);
if (ACPI_SUCCESS(status)) {
@@ -919,12 +898,11 @@ static ssize_t show_deepsleep_status(struct device *dev,
return sysfs_emit(buf, "disabled s5 s5_s4 [unknown]\n");
}
-static ssize_t toggle_deepsleep(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t deepsleep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- acpi_status status;
struct wmax_basic_args args;
+ acpi_status status;
if (strcmp(buf, "disabled\n") == 0)
args.arg = 0;
@@ -943,7 +921,13 @@ static ssize_t toggle_deepsleep(struct device *dev,
return count;
}
-static DEVICE_ATTR(deepsleep, S_IRUGO | S_IWUSR, show_deepsleep_status, toggle_deepsleep);
+static DEVICE_ATTR_RW(deepsleep);
+
+static bool deepsleep_group_visible(struct kobject *kobj)
+{
+ return quirks->deepslp;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(deepsleep);
static struct attribute *deepsleep_attrs[] = {
&dev_attr_deepsleep.attr,
@@ -952,25 +936,10 @@ static struct attribute *deepsleep_attrs[] = {
static const struct attribute_group deepsleep_attribute_group = {
.name = "deepsleep",
+ .is_visible = SYSFS_GROUP_VISIBLE(deepsleep),
.attrs = deepsleep_attrs,
};
-static void remove_deepsleep(struct platform_device *dev)
-{
- if (quirks->deepslp > 0)
- sysfs_remove_group(&dev->dev.kobj, &deepsleep_attribute_group);
-}
-
-static int create_deepsleep(struct platform_device *dev)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->dev.kobj, &deepsleep_attribute_group);
- if (ret)
- remove_deepsleep(dev);
- return ret;
-}
-
/*
* Thermal Profile control
* - Provides thermal profile control through the Platform Profile API
@@ -1000,13 +969,13 @@ static bool is_wmax_thermal_code(u32 code)
static int wmax_thermal_information(u8 operation, u8 arg, u32 *out_data)
{
- acpi_status status;
struct wmax_u32_args in_args = {
.operation = operation,
.arg1 = arg,
.arg2 = 0,
.arg3 = 0,
};
+ acpi_status status;
status = alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_THERMAL_INFORMATION,
@@ -1023,13 +992,13 @@ static int wmax_thermal_information(u8 operation, u8 arg, u32 *out_data)
static int wmax_thermal_control(u8 profile)
{
- acpi_status status;
struct wmax_u32_args in_args = {
.operation = WMAX_OPERATION_ACTIVATE_PROFILE,
.arg1 = profile,
.arg2 = 0,
.arg3 = 0,
};
+ acpi_status status;
u32 out_data;
status = alienware_wmax_command(&in_args, sizeof(in_args),
@@ -1047,13 +1016,13 @@ static int wmax_thermal_control(u8 profile)
static int wmax_game_shift_status(u8 operation, u32 *out_data)
{
- acpi_status status;
struct wmax_u32_args in_args = {
.operation = operation,
.arg1 = 0,
.arg2 = 0,
.arg3 = 0,
};
+ acpi_status status;
status = alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_GAME_SHIFT_STATUS,
@@ -1068,7 +1037,7 @@ static int wmax_game_shift_status(u8 operation, u32 *out_data)
return 0;
}
-static int thermal_profile_get(struct platform_profile_handler *pprof,
+static int thermal_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
u32 out_data;
@@ -1094,7 +1063,7 @@ static int thermal_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int thermal_profile_set(struct platform_profile_handler *pprof,
+static int thermal_profile_set(struct device *dev,
enum platform_profile_option profile)
{
if (quirks->gmode) {
@@ -1120,13 +1089,13 @@ static int thermal_profile_set(struct platform_profile_handler *pprof,
return wmax_thermal_control(supported_thermal_profiles[profile]);
}
-static int create_thermal_profile(void)
+static int thermal_profile_probe(void *drvdata, unsigned long *choices)
{
- u32 out_data;
+ enum platform_profile_option profile;
+ enum wmax_thermal_mode mode;
u8 sys_desc[4];
u32 first_mode;
- enum wmax_thermal_mode mode;
- enum platform_profile_option profile;
+ u32 out_data;
int ret;
ret = wmax_thermal_information(WMAX_OPERATION_SYS_DESCRIPTION,
@@ -1153,31 +1122,56 @@ static int create_thermal_profile(void)
profile = wmax_mode_to_platform_profile[mode];
supported_thermal_profiles[profile] = out_data;
- set_bit(profile, pp_handler.choices);
+ set_bit(profile, choices);
}
- if (bitmap_empty(pp_handler.choices, PLATFORM_PROFILE_LAST))
+ if (bitmap_empty(choices, PLATFORM_PROFILE_LAST))
return -ENODEV;
if (quirks->gmode) {
supported_thermal_profiles[PLATFORM_PROFILE_PERFORMANCE] =
WMAX_THERMAL_MODE_GMODE;
- set_bit(PLATFORM_PROFILE_PERFORMANCE, pp_handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
}
- pp_handler.profile_get = thermal_profile_get;
- pp_handler.profile_set = thermal_profile_set;
-
- return platform_profile_register(&pp_handler);
+ return 0;
}
-static void remove_thermal_profile(void)
+static const struct platform_profile_ops awcc_platform_profile_ops = {
+ .probe = thermal_profile_probe,
+ .profile_get = thermal_profile_get,
+ .profile_set = thermal_profile_set,
+};
+
+static int create_thermal_profile(struct platform_device *platform_device)
{
- if (quirks->thermal)
- platform_profile_remove();
+ struct device *ppdev;
+
+ ppdev = devm_platform_profile_register(&platform_device->dev, "alienware-wmi",
+ NULL, &awcc_platform_profile_ops);
+
+ return PTR_ERR_OR_ZERO(ppdev);
}
+/*
+ * Platform Driver
+ */
+static const struct attribute_group *alienfx_groups[] = {
+ &zone_attribute_group,
+ &hdmi_attribute_group,
+ &amplifier_attribute_group,
+ &deepsleep_attribute_group,
+ NULL
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "alienware-wmi",
+ .dev_groups = alienfx_groups,
+ },
+};
+
static int __init alienware_wmi_init(void)
{
int ret;
@@ -1217,26 +1211,8 @@ static int __init alienware_wmi_init(void)
if (ret)
goto fail_platform_device2;
- if (quirks->hdmi_mux > 0) {
- ret = create_hdmi(platform_device);
- if (ret)
- goto fail_prep_hdmi;
- }
-
- if (quirks->amplifier > 0) {
- ret = create_amplifier(platform_device);
- if (ret)
- goto fail_prep_amplifier;
- }
-
- if (quirks->deepslp > 0) {
- ret = create_deepsleep(platform_device);
- if (ret)
- goto fail_prep_deepsleep;
- }
-
if (quirks->thermal) {
- ret = create_thermal_profile();
+ ret = create_thermal_profile(platform_device);
if (ret)
goto fail_prep_thermal_profile;
}
@@ -1251,11 +1227,7 @@ static int __init alienware_wmi_init(void)
fail_prep_zones:
alienware_zone_exit(platform_device);
- remove_thermal_profile();
fail_prep_thermal_profile:
-fail_prep_deepsleep:
-fail_prep_amplifier:
-fail_prep_hdmi:
platform_device_del(platform_device);
fail_platform_device2:
platform_device_put(platform_device);
@@ -1269,13 +1241,9 @@ module_init(alienware_wmi_init);
static void __exit alienware_wmi_exit(void)
{
- if (platform_device) {
- alienware_zone_exit(platform_device);
- remove_hdmi(platform_device);
- remove_thermal_profile();
- platform_device_unregister(platform_device);
- platform_driver_unregister(&platform_driver);
- }
+ alienware_zone_exit(platform_device);
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
}
module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/dell/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
index 0aeb8149c16b..8149be25fa26 100644
--- a/drivers/platform/x86/dell/dcdbas.c
+++ b/drivers/platform/x86/dell/dcdbas.c
@@ -163,7 +163,7 @@ static ssize_t smi_data_buf_size_store(struct device *dev,
}
static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
ssize_t ret;
@@ -176,7 +176,7 @@ static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
}
static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
ssize_t ret;
@@ -636,9 +636,9 @@ static struct notifier_block dcdbas_reboot_nb = {
.priority = INT_MIN
};
-static DCDBAS_BIN_ATTR_RW(smi_data);
+static const BIN_ATTR_ADMIN_RW(smi_data, 0);
-static struct bin_attribute *dcdbas_bin_attrs[] = {
+static const struct bin_attribute *const dcdbas_bin_attrs[] = {
&bin_attr_smi_data,
NULL
};
@@ -662,7 +662,7 @@ static struct attribute *dcdbas_dev_attrs[] = {
static const struct attribute_group dcdbas_attr_group = {
.attrs = dcdbas_dev_attrs,
- .bin_attrs = dcdbas_bin_attrs,
+ .bin_attrs_new = dcdbas_bin_attrs,
};
static int dcdbas_probe(struct platform_device *dev)
diff --git a/drivers/platform/x86/dell/dcdbas.h b/drivers/platform/x86/dell/dcdbas.h
index 942a23ddded0..a05d7f667586 100644
--- a/drivers/platform/x86/dell/dcdbas.h
+++ b/drivers/platform/x86/dell/dcdbas.h
@@ -56,14 +56,6 @@
#define DCDBAS_DEV_ATTR_WO(_name) \
DEVICE_ATTR(_name,0200,NULL,_name##_store);
-#define DCDBAS_BIN_ATTR_RW(_name) \
-struct bin_attribute bin_attr_##_name = { \
- .attr = { .name = __stringify(_name), \
- .mode = 0600 }, \
- .read = _name##_read, \
- .write = _name##_write, \
-}
-
struct smi_cmd {
__u32 magic;
__u32 ebx;
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 5671bd0deee7..57748c3ea24f 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -103,15 +103,15 @@ static bool mute_led_registered;
struct battery_mode_info {
int token;
- const char *label;
+ enum power_supply_charge_type charge_type;
};
static const struct battery_mode_info battery_modes[] = {
- { BAT_PRI_AC_MODE_TOKEN, "Trickle" },
- { BAT_EXPRESS_MODE_TOKEN, "Fast" },
- { BAT_STANDARD_MODE_TOKEN, "Standard" },
- { BAT_ADAPTIVE_MODE_TOKEN, "Adaptive" },
- { BAT_CUSTOM_MODE_TOKEN, "Custom" },
+ { BAT_PRI_AC_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_TRICKLE },
+ { BAT_EXPRESS_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_FAST },
+ { BAT_STANDARD_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_STANDARD },
+ { BAT_ADAPTIVE_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE },
+ { BAT_CUSTOM_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_CUSTOM },
};
static u32 battery_supported_modes;
@@ -725,8 +725,8 @@ static void dell_update_rfkill(struct work_struct *ignored)
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
{
static bool extended;
@@ -884,7 +884,7 @@ static int __init dell_setup_rfkill(void)
pr_warn("Unable to register dell rbtn notifier\n");
goto err_filter;
} else {
- ret = i8042_install_filter(dell_laptop_i8042_filter);
+ ret = i8042_install_filter(dell_laptop_i8042_filter, NULL);
if (ret) {
pr_warn("Unable to install key filter\n");
goto err_filter;
@@ -2261,46 +2261,42 @@ static ssize_t charge_types_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- ssize_t count = 0;
+ enum power_supply_charge_type charge_type;
int i;
for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
- bool active;
+ charge_type = battery_modes[i].charge_type;
- if (!(battery_supported_modes & BIT(i)))
+ if (!(battery_supported_modes & BIT(charge_type)))
continue;
- active = dell_battery_mode_is_active(battery_modes[i].token);
- count += sysfs_emit_at(buf, count, active ? "[%s] " : "%s ",
- battery_modes[i].label);
- }
+ if (!dell_battery_mode_is_active(battery_modes[i].token))
+ continue;
- /* convert the last space to a newline */
- if (count > 0)
- count--;
- count += sysfs_emit_at(buf, count, "\n");
+ return power_supply_charge_types_show(dev, battery_supported_modes,
+ charge_type, buf);
+ }
- return count;
+ /* No active mode found */
+ return -EIO;
}
static ssize_t charge_types_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- bool matched = false;
- int err, i;
+ int charge_type, err, i;
- for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
- if (!(battery_supported_modes & BIT(i)))
- continue;
+ charge_type = power_supply_charge_types_parse(battery_supported_modes, buf);
+ if (charge_type < 0)
+ return charge_type;
- if (sysfs_streq(battery_modes[i].label, buf)) {
- matched = true;
+ for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
+ if (battery_modes[i].charge_type == charge_type)
break;
- }
}
- if (!matched)
- return -EINVAL;
+ if (i == ARRAY_SIZE(battery_modes))
+ return -ENOENT;
err = dell_battery_set_mode(battery_modes[i].token);
if (err)
@@ -2430,7 +2426,7 @@ static u32 __init battery_get_supported_modes(void)
for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
if (dell_smbios_find_token(battery_modes[i].token))
- modes |= BIT(i);
+ modes |= BIT(battery_modes[i].charge_type);
}
return modes;
diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c
new file mode 100644
index 000000000000..efe26d667973
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-lis3lv02d.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * lis3lv02d i2c-client instantiation for ACPI SMO88xx devices without I2C resources.
+ *
+ * Copyright (C) 2024 Hans de Goede <hansg@kernel.org>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device/bus.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include "dell-smo8800-ids.h"
+
+#define LIS3_WHO_AM_I 0x0f
+
+#define DELL_LIS3LV02D_DMI_ENTRY(product_name, i2c_addr) \
+ { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), \
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, product_name), \
+ }, \
+ .driver_data = (void *)(uintptr_t)(i2c_addr), \
+ }
+
+/*
+ * Accelerometer's I2C address is not specified in DMI nor ACPI,
+ * so it is needed to define mapping table based on DMI product names.
+ */
+static const struct dmi_system_id lis3lv02d_devices[] __initconst = {
+ /*
+ * Dell platform team told us that these Latitude devices have
+ * ST microelectronics accelerometer at I2C address 0x29.
+ */
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E5250", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E5450", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E5550", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6440", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6440 ATG", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6540", 0x29),
+ /*
+ * Additional individual entries were added after verification.
+ */
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude 5480", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6430", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Precision 3540", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Precision M6800", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Vostro V131", 0x1d),
+ DELL_LIS3LV02D_DMI_ENTRY("Vostro 5568", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("XPS 15 7590", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("XPS 15 9550", 0x29),
+ { }
+};
+
+static u8 i2c_addr;
+static struct i2c_client *i2c_dev;
+static bool notifier_registered;
+
+static bool probe_i2c_addr;
+module_param(probe_i2c_addr, bool, 0444);
+MODULE_PARM_DESC(probe_i2c_addr, "Probe the i801 I2C bus for the accelerometer on models where the address is unknown, this may be dangerous.");
+
+static int detect_lis3lv02d(struct i2c_adapter *adap, unsigned short addr)
+{
+ union i2c_smbus_data smbus_data;
+ int err;
+
+ dev_info(&adap->dev, "Probing for lis3lv02d on address 0x%02x\n", addr);
+
+ err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, LIS3_WHO_AM_I,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (err < 0)
+ return 0; /* Not found */
+
+ /* valid who-am-i values are from drivers/misc/lis3lv02d/lis3lv02d.c */
+ switch (smbus_data.byte) {
+ case 0x32:
+ case 0x33:
+ case 0x3a:
+ case 0x3b:
+ break;
+ default:
+ dev_warn(&adap->dev, "Unknown who-am-i register value 0x%02x\n",
+ smbus_data.byte);
+ return 0; /* Not found */
+ }
+
+ dev_info(&adap->dev,
+ "Detected lis3lv02d on address 0x%02x, please report this upstream to platform-driver-x86@vger.kernel.org so that a quirk can be added\n",
+ addr);
+
+ return 1; /* Found */
+}
+
+static bool i2c_adapter_is_main_i801(struct i2c_adapter *adap)
+{
+ /*
+ * Only match the main I801 adapter and reject secondary adapters
+ * which names start with "SMBus I801 IDF adapter".
+ */
+ return strstarts(adap->name, "SMBus I801 adapter");
+}
+
+static int find_i801(struct device *dev, void *data)
+{
+ struct i2c_adapter *adap, **adap_ret = data;
+
+ adap = i2c_verify_adapter(dev);
+ if (!adap)
+ return 0;
+
+ if (!i2c_adapter_is_main_i801(adap))
+ return 0;
+
+ *adap_ret = i2c_get_adapter(adap->nr);
+ return 1;
+}
+
+static void instantiate_i2c_client(struct work_struct *work)
+{
+ struct i2c_board_info info = { };
+ struct i2c_adapter *adap = NULL;
+
+ if (i2c_dev)
+ return;
+
+ /*
+ * bus_for_each_dev() and not i2c_for_each_dev() to avoid
+ * a deadlock when find_i801() calls i2c_get_adapter().
+ */
+ bus_for_each_dev(&i2c_bus_type, NULL, &adap, find_i801);
+ if (!adap)
+ return;
+
+ strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
+
+ if (i2c_addr) {
+ info.addr = i2c_addr;
+ i2c_dev = i2c_new_client_device(adap, &info);
+ } else {
+ /* First try address 0x29 (most used) and then try 0x1d */
+ static const unsigned short addr_list[] = { 0x29, 0x1d, I2C_CLIENT_END };
+
+ i2c_dev = i2c_new_scanned_device(adap, &info, addr_list, detect_lis3lv02d);
+ }
+
+ if (IS_ERR(i2c_dev)) {
+ dev_err(&adap->dev, "error %ld registering i2c_client\n", PTR_ERR(i2c_dev));
+ i2c_dev = NULL;
+ } else {
+ dev_dbg(&adap->dev, "registered lis3lv02d on address 0x%02x\n", info.addr);
+ }
+
+ i2c_put_adapter(adap);
+}
+static DECLARE_WORK(i2c_work, instantiate_i2c_client);
+
+static int i2c_bus_notify(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct i2c_client *client;
+ struct i2c_adapter *adap;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ adap = i2c_verify_adapter(dev);
+ if (!adap)
+ break;
+
+ if (i2c_adapter_is_main_i801(adap))
+ queue_work(system_long_wq, &i2c_work);
+ break;
+ case BUS_NOTIFY_REMOVED_DEVICE:
+ client = i2c_verify_client(dev);
+ if (!client)
+ break;
+
+ if (i2c_dev == client) {
+ dev_dbg(&client->adapter->dev, "lis3lv02d i2c_client removed\n");
+ i2c_dev = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+static struct notifier_block i2c_nb = { .notifier_call = i2c_bus_notify };
+
+static int __init match_acpi_device_ids(struct device *dev, const void *data)
+{
+ return acpi_match_device(data, dev) ? 1 : 0;
+}
+
+static int __init dell_lis3lv02d_init(void)
+{
+ const struct dmi_system_id *lis3lv02d_dmi_id;
+ struct device *dev;
+ int err;
+
+ /*
+ * First check for a matching platform_device. This protects against
+ * SMO88xx ACPI fwnodes which actually do have an I2C resource, which
+ * will already have an i2c_client instantiated (not a platform_device).
+ */
+ dev = bus_find_device(&platform_bus_type, NULL, smo8800_ids, match_acpi_device_ids);
+ if (!dev) {
+ pr_debug("No SMO88xx platform-device found\n");
+ return 0;
+ }
+ put_device(dev);
+
+ lis3lv02d_dmi_id = dmi_first_match(lis3lv02d_devices);
+ if (!lis3lv02d_dmi_id && !probe_i2c_addr) {
+ pr_warn("accelerometer is present on SMBus but its address is unknown, skipping registration\n");
+ pr_info("Pass dell_lis3lv02d.probe_i2c_addr=1 on the kernel command line to probe, this may be dangerous!\n");
+ return 0;
+ }
+
+ if (lis3lv02d_dmi_id)
+ i2c_addr = (long)lis3lv02d_dmi_id->driver_data;
+
+ /*
+ * Register i2c-bus notifier + queue initial scan for lis3lv02d
+ * i2c_client instantiation.
+ */
+ err = bus_register_notifier(&i2c_bus_type, &i2c_nb);
+ if (err)
+ return err;
+
+ notifier_registered = true;
+
+ queue_work(system_long_wq, &i2c_work);
+ return 0;
+}
+module_init(dell_lis3lv02d_init);
+
+static void __exit dell_lis3lv02d_module_exit(void)
+{
+ if (!notifier_registered)
+ return;
+
+ bus_unregister_notifier(&i2c_bus_type, &i2c_nb);
+ cancel_work_sync(&i2c_work);
+ i2c_unregister_device(i2c_dev);
+}
+module_exit(dell_lis3lv02d_module_exit);
+
+MODULE_DESCRIPTION("lis3lv02d i2c-client instantiation for ACPI SMO88xx devices");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-pc.c b/drivers/platform/x86/dell/dell-pc.c
index 972385ca1990..483240bb36e7 100644
--- a/drivers/platform/x86/dell/dell-pc.c
+++ b/drivers/platform/x86/dell/dell-pc.c
@@ -18,10 +18,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_profile.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dell-smbios.h"
+static struct platform_device *platform_device;
+static int supported_modes;
+
static const struct dmi_system_id dell_device_table[] __initconst = {
{
.ident = "Dell Inc.",
@@ -105,8 +109,6 @@ MODULE_DEVICE_TABLE(dmi, dell_device_table);
#define DELL_ACC_SET_FIELD GENMASK(11, 8)
#define DELL_THERMAL_SUPPORTED GENMASK(3, 0)
-static struct platform_profile_handler *thermal_handler;
-
enum thermal_mode_bits {
DELL_BALANCED = BIT(0),
DELL_COOL_BOTTOM = BIT(1),
@@ -182,7 +184,7 @@ static int thermal_set_mode(enum thermal_mode_bits state)
return dell_send_request(&buffer, CLASS_INFO, SELECT_THERMAL_MANAGEMENT);
}
-static int thermal_platform_profile_set(struct platform_profile_handler *pprof,
+static int thermal_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
switch (profile) {
@@ -199,7 +201,7 @@ static int thermal_platform_profile_set(struct platform_profile_handler *pprof,
}
}
-static int thermal_platform_profile_get(struct platform_profile_handler *pprof,
+static int thermal_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
int ret;
@@ -228,10 +230,30 @@ static int thermal_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
+static int thermal_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ if (supported_modes & DELL_QUIET)
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ if (supported_modes & DELL_COOL_BOTTOM)
+ set_bit(PLATFORM_PROFILE_COOL, choices);
+ if (supported_modes & DELL_BALANCED)
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ if (supported_modes & DELL_PERFORMANCE)
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops dell_pc_platform_profile_ops = {
+ .probe = thermal_platform_profile_probe,
+ .profile_get = thermal_platform_profile_get,
+ .profile_set = thermal_platform_profile_set,
+};
+
static int thermal_init(void)
{
+ struct device *ppdev;
int ret;
- int supported_modes;
/* If thermal commands are not supported, exit without error */
if (!dell_smbios_class_is_supported(CLASS_INFO))
@@ -244,37 +266,28 @@ static int thermal_init(void)
if (!supported_modes)
return 0;
- thermal_handler = kzalloc(sizeof(*thermal_handler), GFP_KERNEL);
- if (!thermal_handler)
- return -ENOMEM;
- thermal_handler->profile_get = thermal_platform_profile_get;
- thermal_handler->profile_set = thermal_platform_profile_set;
+ platform_device = platform_device_register_simple("dell-pc", PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(platform_device))
+ return PTR_ERR(platform_device);
- if (supported_modes & DELL_QUIET)
- set_bit(PLATFORM_PROFILE_QUIET, thermal_handler->choices);
- if (supported_modes & DELL_COOL_BOTTOM)
- set_bit(PLATFORM_PROFILE_COOL, thermal_handler->choices);
- if (supported_modes & DELL_BALANCED)
- set_bit(PLATFORM_PROFILE_BALANCED, thermal_handler->choices);
- if (supported_modes & DELL_PERFORMANCE)
- set_bit(PLATFORM_PROFILE_PERFORMANCE, thermal_handler->choices);
-
- /* Clean up if failed */
- ret = platform_profile_register(thermal_handler);
- if (ret) {
- kfree(thermal_handler);
- thermal_handler = NULL;
+ ppdev = devm_platform_profile_register(&platform_device->dev, "dell-pc",
+ NULL, &dell_pc_platform_profile_ops);
+ if (IS_ERR(ppdev)) {
+ ret = PTR_ERR(ppdev);
+ goto cleanup_platform_device;
}
+ return 0;
+
+cleanup_platform_device:
+ platform_device_unregister(platform_device);
+
return ret;
}
static void thermal_cleanup(void)
{
- if (thermal_handler) {
- platform_profile_remove();
- kfree(thermal_handler);
- }
+ platform_device_unregister(platform_device);
}
static int __init dell_init(void)
diff --git a/drivers/platform/x86/dell/dell-smo8800-ids.h b/drivers/platform/x86/dell/dell-smo8800-ids.h
new file mode 100644
index 000000000000..ec58e229ba7a
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-smo8800-ids.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ACPI SMO88XX lis3lv02d freefall / accelerometer device-ids.
+ *
+ * Copyright (C) 2012 Sonal Santan <sonal.santan@gmail.com>
+ * Copyright (C) 2014 Pali Rohár <pali@kernel.org>
+ */
+#ifndef _DELL_SMO8800_IDS_H_
+#define _DELL_SMO8800_IDS_H_
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+static const struct acpi_device_id smo8800_ids[] = {
+ { "SMO8800" },
+ { "SMO8801" },
+ { "SMO8810" },
+ { "SMO8811" },
+ { "SMO8820" },
+ { "SMO8821" },
+ { "SMO8830" },
+ { "SMO8831" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smo8800_ids);
+
+#endif
diff --git a/drivers/platform/x86/dell/dell-smo8800.c b/drivers/platform/x86/dell/dell-smo8800.c
index 87fe03f23f24..8872f9b57fce 100644
--- a/drivers/platform/x86/dell/dell-smo8800.c
+++ b/drivers/platform/x86/dell/dell-smo8800.c
@@ -14,10 +14,10 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
-#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
+#include "dell-smo8800-ids.h"
struct smo8800_device {
u32 irq; /* acpi device irq */
@@ -163,20 +163,6 @@ static void smo8800_remove(struct platform_device *device)
dev_dbg(&device->dev, "device /dev/freefall unregistered\n");
}
-/* NOTE: Keep this list in sync with drivers/i2c/busses/i2c-i801.c */
-static const struct acpi_device_id smo8800_ids[] = {
- { "SMO8800", 0 },
- { "SMO8801", 0 },
- { "SMO8810", 0 },
- { "SMO8811", 0 },
- { "SMO8820", 0 },
- { "SMO8821", 0 },
- { "SMO8830", 0 },
- { "SMO8831", 0 },
- { "", 0 },
-};
-MODULE_DEVICE_TABLE(acpi, smo8800_ids);
-
static struct platform_driver smo8800_driver = {
.probe = smo8800_probe,
.remove = smo8800_remove,
diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
index 6e5dc7e3674f..50002ef13d5d 100644
--- a/drivers/platform/x86/dell/dell-uart-backlight.c
+++ b/drivers/platform/x86/dell/dell-uart-backlight.c
@@ -159,7 +159,7 @@ static int dell_uart_set_bl_power(struct dell_uart_backlight *dell_bl, int power
set_power[0] = DELL_SOF(SET_CMD_LEN);
set_power[1] = CMD_SET_BL_POWER;
- set_power[2] = (power == FB_BLANK_UNBLANK) ? 1 : 0;
+ set_power[2] = (power == BACKLIGHT_POWER_ON) ? 1 : 0;
set_power[3] = dell_uart_checksum(set_power, 3);
ret = dell_uart_bl_command(dell_bl, set_power, SET_CMD_LEN, resp, SET_RESP_LEN);
@@ -283,6 +283,9 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
init_waitqueue_head(&dell_bl->wait_queue);
dell_bl->dev = dev;
+ serdev_device_set_drvdata(serdev, dell_bl);
+ serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
@@ -290,8 +293,6 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
/* 9600 bps, no flow control, these are the default but set them to be sure */
serdev_device_set_baudrate(serdev, 9600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, dell_bl);
- serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
get_version[0] = DELL_SOF(GET_CMD_LEN);
get_version[1] = CMD_GET_VERSION;
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index 40ddc6eb7562..d00389b860e4 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -25,7 +25,6 @@ struct wmi_sysman_priv wmi_priv = {
/* reset bios to defaults */
static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"};
static int reset_option = -1;
-static const struct class *fw_attr_class;
/**
@@ -541,15 +540,11 @@ static int __init sysman_init(void)
goto err_exit_bios_attr_pass_interface;
}
- ret = fw_attributes_class_get(&fw_attr_class);
- if (ret)
- goto err_exit_bios_attr_pass_interface;
-
- wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME);
if (IS_ERR(wmi_priv.class_dev)) {
ret = PTR_ERR(wmi_priv.class_dev);
- goto err_unregister_class;
+ goto err_exit_bios_attr_pass_interface;
}
wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL,
@@ -602,10 +597,7 @@ err_release_attributes_data:
release_attributes_data();
err_destroy_classdev:
- device_destroy(fw_attr_class, MKDEV(0, 0));
-
-err_unregister_class:
- fw_attributes_class_put();
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
err_exit_bios_attr_pass_interface:
exit_bios_attr_pass_interface();
@@ -619,8 +611,7 @@ err_exit_bios_attr_set_interface:
static void __exit sysman_exit(void)
{
release_attributes_data();
- device_destroy(fw_attr_class, MKDEV(0, 0));
- fw_attributes_class_put();
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
exit_bios_attr_set_interface();
exit_bios_attr_pass_interface();
}
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index 9f51e0fcab04..e30ca325938c 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -475,7 +475,7 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
}
static ssize_t data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
ssize_t ret_count = 0;
@@ -492,7 +492,7 @@ static ssize_t data_read(struct file *filp, struct kobject *kobj,
spin_unlock(&rbu_data.lock);
return ret_count;
}
-static BIN_ATTR_RO(data, 0);
+static const BIN_ATTR_RO(data, 0);
static void callbackfn_rbu(const struct firmware *fw, void *context)
{
@@ -530,7 +530,7 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
}
static ssize_t image_type_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int size = 0;
@@ -540,7 +540,7 @@ static ssize_t image_type_read(struct file *filp, struct kobject *kobj,
}
static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int rc = count;
@@ -597,10 +597,10 @@ static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
return rc;
}
-static BIN_ATTR_RW(image_type, 0);
+static const BIN_ATTR_RW(image_type, 0);
static ssize_t packet_size_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int size = 0;
@@ -613,7 +613,7 @@ static ssize_t packet_size_read(struct file *filp, struct kobject *kobj,
}
static ssize_t packet_size_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
unsigned long temp;
@@ -626,9 +626,9 @@ static ssize_t packet_size_write(struct file *filp, struct kobject *kobj,
spin_unlock(&rbu_data.lock);
return count;
}
-static BIN_ATTR_RW(packet_size, 0);
+static const BIN_ATTR_RW(packet_size, 0);
-static struct bin_attribute *rbu_bin_attrs[] = {
+static const struct bin_attribute *const rbu_bin_attrs[] = {
&bin_attr_data,
&bin_attr_image_type,
&bin_attr_packet_size,
@@ -636,7 +636,7 @@ static struct bin_attribute *rbu_bin_attrs[] = {
};
static const struct attribute_group rbu_group = {
- .bin_attrs = rbu_bin_attrs,
+ .bin_attrs_new = rbu_bin_attrs,
};
static int __init dcdrbu_init(void)
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
index 182a07d8ae3d..736e96c186d9 100644
--- a/drivers/platform/x86/firmware_attributes_class.c
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -2,51 +2,25 @@
/* Firmware attributes class helper module */
-#include <linux/mutex.h>
-#include <linux/device/class.h>
#include <linux/module.h>
#include "firmware_attributes_class.h"
-static DEFINE_MUTEX(fw_attr_lock);
-static int fw_attr_inuse;
-
-static const struct class firmware_attributes_class = {
+const struct class firmware_attributes_class = {
.name = "firmware-attributes",
};
+EXPORT_SYMBOL_GPL(firmware_attributes_class);
-int fw_attributes_class_get(const struct class **fw_attr_class)
+static __init int fw_attributes_class_init(void)
{
- int err;
-
- mutex_lock(&fw_attr_lock);
- if (!fw_attr_inuse) { /*first time class is being used*/
- err = class_register(&firmware_attributes_class);
- if (err) {
- mutex_unlock(&fw_attr_lock);
- return err;
- }
- }
- fw_attr_inuse++;
- *fw_attr_class = &firmware_attributes_class;
- mutex_unlock(&fw_attr_lock);
- return 0;
+ return class_register(&firmware_attributes_class);
}
-EXPORT_SYMBOL_GPL(fw_attributes_class_get);
+module_init(fw_attributes_class_init);
-int fw_attributes_class_put(void)
+static __exit void fw_attributes_class_exit(void)
{
- mutex_lock(&fw_attr_lock);
- if (!fw_attr_inuse) {
- mutex_unlock(&fw_attr_lock);
- return -EINVAL;
- }
- fw_attr_inuse--;
- if (!fw_attr_inuse) /* No more consumers */
- class_unregister(&firmware_attributes_class);
- mutex_unlock(&fw_attr_lock);
- return 0;
+ class_unregister(&firmware_attributes_class);
}
-EXPORT_SYMBOL_GPL(fw_attributes_class_put);
+module_exit(fw_attributes_class_exit);
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
MODULE_DESCRIPTION("Firmware attributes class helper module");
diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h
index 363c75f1ac1b..d27abe54fcf9 100644
--- a/drivers/platform/x86/firmware_attributes_class.h
+++ b/drivers/platform/x86/firmware_attributes_class.h
@@ -5,7 +5,8 @@
#ifndef FW_ATTR_CLASS_H
#define FW_ATTR_CLASS_H
-int fw_attributes_class_get(const struct class **fw_attr_class);
-int fw_attributes_class_put(void);
+#include <linux/device/class.h>
+
+extern const struct class firmware_attributes_class;
#endif /* FW_ATTR_CLASS_H */
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ae992ac1ab4a..a0eae24ca9e6 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -505,8 +505,8 @@ static int acpi_fujitsu_bl_add(struct acpi_device *device)
return -ENOMEM;
fujitsu_bl = priv;
- strcpy(acpi_device_name(device), ACPI_FUJITSU_BL_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ strscpy(acpi_device_name(device), ACPI_FUJITSU_BL_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
device->driver_data = priv;
pr_info("ACPI: %s [%s]\n",
@@ -891,8 +891,8 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
WARN_ONCE(fext, "More than one FUJ02E3 ACPI device was found. Driver may not work as intended.");
fext = device;
- strcpy(acpi_device_name(device), ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ strscpy(acpi_device_name(device), ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
device->driver_data = priv;
/* kfifo */
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 2dc50152158a..0b277b7e37dd 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -24,8 +24,6 @@ struct bioscfg_priv bioscfg_drv = {
.mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex),
};
-static const struct class *fw_attr_class;
-
ssize_t display_name_language_code_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -972,11 +970,7 @@ static int __init hp_init(void)
if (ret)
return ret;
- ret = fw_attributes_class_get(&fw_attr_class);
- if (ret)
- goto err_unregister_class;
-
- bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ bioscfg_drv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME);
if (IS_ERR(bioscfg_drv.class_dev)) {
ret = PTR_ERR(bioscfg_drv.class_dev);
@@ -1043,10 +1037,9 @@ err_release_attributes_data:
release_attributes_data();
err_destroy_classdev:
- device_destroy(fw_attr_class, MKDEV(0, 0));
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
err_unregister_class:
- fw_attributes_class_put();
hp_exit_attr_set_interface();
return ret;
@@ -1055,9 +1048,8 @@ err_unregister_class:
static void __exit hp_exit(void)
{
release_attributes_data();
- device_destroy(fw_attr_class, MKDEV(0, 0));
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
- fw_attributes_class_put();
hp_exit_attr_set_interface();
}
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 20c55bab3b8c..db5fdee2109c 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -45,6 +45,10 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45E9-BE91-3D44E2C707E4");
#define HP_OMEN_EC_THERMAL_PROFILE_TIMER_OFFSET 0x63
#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+#define HP_FAN_SPEED_AUTOMATIC 0x00
+#define HP_POWER_LIMIT_DEFAULT 0x00
+#define HP_POWER_LIMIT_NO_CHANGE 0xFF
+
#define ACPI_AC_CLASS "ac_adapter"
#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
@@ -83,11 +87,16 @@ static const char * const omen_timed_thermal_profile_boards[] = {
"8BAD", "8A42", "8A15"
};
-/* DMI Board names of Victus laptops */
+/* DMI Board names of Victus 16-d1xxx laptops */
static const char * const victus_thermal_profile_boards[] = {
"8A25"
};
+/* DMI Board names of Victus 16-s1000 laptops */
+static const char * const victus_s_thermal_profile_boards[] = {
+ "8C9C"
+};
+
enum hp_wmi_radio {
HPWMI_WIFI = 0x0,
HPWMI_BLUETOOTH = 0x1,
@@ -147,12 +156,32 @@ enum hp_wmi_commandtype {
HPWMI_THERMAL_PROFILE_QUERY = 0x4c,
};
+struct victus_power_limits {
+ u8 pl1;
+ u8 pl2;
+ u8 pl4;
+ u8 cpu_gpu_concurrent_limit;
+};
+
+struct victus_gpu_power_modes {
+ u8 ctgp_enable;
+ u8 ppab_enable;
+ u8 dstate;
+ u8 gpu_slowdown_temp;
+};
+
enum hp_wmi_gm_commandtype {
- HPWMI_FAN_SPEED_GET_QUERY = 0x11,
- HPWMI_SET_PERFORMANCE_MODE = 0x1A,
- HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
- HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
- HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
+ HPWMI_FAN_SPEED_GET_QUERY = 0x11,
+ HPWMI_SET_PERFORMANCE_MODE = 0x1A,
+ HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
+ HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
+ HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
+ HPWMI_FAN_COUNT_GET_QUERY = 0x10,
+ HPWMI_GET_GPU_THERMAL_MODES_QUERY = 0x21,
+ HPWMI_SET_GPU_THERMAL_MODES_QUERY = 0x22,
+ HPWMI_SET_POWER_LIMITS_QUERY = 0x29,
+ HPWMI_VICTUS_S_FAN_SPEED_GET_QUERY = 0x2D,
+ HPWMI_FAN_SPEED_SET_QUERY = 0x2E,
};
enum hp_wmi_command {
@@ -211,6 +240,11 @@ enum hp_thermal_profile_victus {
HP_VICTUS_THERMAL_PROFILE_QUIET = 0x03,
};
+enum hp_thermal_profile_victus_s {
+ HP_VICTUS_S_THERMAL_PROFILE_DEFAULT = 0x00,
+ HP_VICTUS_S_THERMAL_PROFILE_PERFORMANCE = 0x01,
+};
+
enum hp_thermal_profile {
HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
HP_THERMAL_PROFILE_DEFAULT = 0x01,
@@ -273,7 +307,7 @@ static DEFINE_MUTEX(active_platform_profile_lock);
static struct input_dev *hp_wmi_input_dev;
static struct input_dev *camera_shutter_input_dev;
static struct platform_device *hp_wmi_platform_dev;
-static struct platform_profile_handler platform_profile_handler;
+static struct device *platform_profile_device;
static struct notifier_block platform_power_source_nb;
static enum platform_profile_option active_platform_profile;
static bool platform_profile_support;
@@ -411,6 +445,26 @@ out_free:
return ret;
}
+/*
+ * Calling this hp_wmi_get_fan_count_userdefine_trigger function also enables
+ * and/or maintains the laptop in user defined thermal and fan states, instead
+ * of using a fallback state. After a 120 seconds timeout however, the laptop
+ * goes back to its fallback state.
+ */
+static int hp_wmi_get_fan_count_userdefine_trigger(void)
+{
+ u8 fan_data[4] = {};
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_COUNT_GET_QUERY, HPWMI_GM,
+ &fan_data, sizeof(u8),
+ sizeof(fan_data));
+ if (ret != 0)
+ return -EINVAL;
+
+ return fan_data[0]; /* Others bytes aren't providing fan count */
+}
+
static int hp_wmi_get_fan_speed(int fan)
{
u8 fsh, fsl;
@@ -429,6 +483,23 @@ static int hp_wmi_get_fan_speed(int fan)
return (fsh << 8) | fsl;
}
+static int hp_wmi_get_fan_speed_victus_s(int fan)
+{
+ u8 fan_data[128] = {};
+ int ret;
+
+ if (fan < 0 || fan >= sizeof(fan_data))
+ return -EINVAL;
+
+ ret = hp_wmi_perform_query(HPWMI_VICTUS_S_FAN_SPEED_GET_QUERY,
+ HPWMI_GM, &fan_data, sizeof(u8),
+ sizeof(fan_data));
+ if (ret != 0)
+ return -EINVAL;
+
+ return fan_data[fan] * 100;
+}
+
static int hp_wmi_read_int(int query)
{
int val = 0, ret;
@@ -557,6 +628,30 @@ static int hp_wmi_fan_speed_max_set(int enabled)
return enabled;
}
+static int hp_wmi_fan_speed_reset(void)
+{
+ u8 fan_speed[2] = { HP_FAN_SPEED_AUTOMATIC, HP_FAN_SPEED_AUTOMATIC };
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_SET_QUERY, HPWMI_GM,
+ &fan_speed, sizeof(fan_speed), 0);
+
+ return ret;
+}
+
+static int hp_wmi_fan_speed_max_reset(void)
+{
+ int ret;
+
+ ret = hp_wmi_fan_speed_max_set(0);
+ if (ret)
+ return ret;
+
+ /* Disabling max fan speed on Victus s1xxx laptops needs a 2nd step: */
+ ret = hp_wmi_fan_speed_reset();
+ return ret;
+}
+
static int hp_wmi_fan_speed_max_get(void)
{
int val = 0, ret;
@@ -1221,7 +1316,7 @@ static int platform_profile_omen_get_ec(enum platform_profile_option *profile)
return 0;
}
-static int platform_profile_omen_get(struct platform_profile_handler *pprof,
+static int platform_profile_omen_get(struct device *dev,
enum platform_profile_option *profile)
{
/*
@@ -1318,7 +1413,7 @@ static int platform_profile_omen_set_ec(enum platform_profile_option profile)
return 0;
}
-static int platform_profile_omen_set(struct platform_profile_handler *pprof,
+static int platform_profile_omen_set(struct device *dev,
enum platform_profile_option profile)
{
int err;
@@ -1345,7 +1440,7 @@ static int thermal_profile_set(int thermal_profile)
sizeof(thermal_profile), 0);
}
-static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+static int hp_wmi_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
int tp;
@@ -1374,7 +1469,7 @@ static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+static int hp_wmi_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
int err, tp;
@@ -1440,11 +1535,11 @@ static int platform_profile_victus_get_ec(enum platform_profile_option *profile)
return 0;
}
-static int platform_profile_victus_get(struct platform_profile_handler *pprof,
+static int platform_profile_victus_get(struct device *dev,
enum platform_profile_option *profile)
{
/* Same behaviour as platform_profile_omen_get */
- return platform_profile_omen_get(pprof, profile);
+ return platform_profile_omen_get(dev, profile);
}
static int platform_profile_victus_set_ec(enum platform_profile_option profile)
@@ -1472,7 +1567,162 @@ static int platform_profile_victus_set_ec(enum platform_profile_option profile)
return 0;
}
-static int platform_profile_victus_set(struct platform_profile_handler *pprof,
+static bool is_victus_s_thermal_profile(void)
+{
+ const char *board_name;
+
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (!board_name)
+ return false;
+
+ return match_string(victus_s_thermal_profile_boards,
+ ARRAY_SIZE(victus_s_thermal_profile_boards),
+ board_name) >= 0;
+}
+
+static int victus_s_gpu_thermal_profile_get(bool *ctgp_enable,
+ bool *ppab_enable,
+ u8 *dstate,
+ u8 *gpu_slowdown_temp)
+{
+ struct victus_gpu_power_modes gpu_power_modes;
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_GET_GPU_THERMAL_MODES_QUERY, HPWMI_GM,
+ &gpu_power_modes, sizeof(gpu_power_modes),
+ sizeof(gpu_power_modes));
+ if (ret == 0) {
+ *ctgp_enable = gpu_power_modes.ctgp_enable ? true : false;
+ *ppab_enable = gpu_power_modes.ppab_enable ? true : false;
+ *dstate = gpu_power_modes.dstate;
+ *gpu_slowdown_temp = gpu_power_modes.gpu_slowdown_temp;
+ }
+
+ return ret;
+}
+
+static int victus_s_gpu_thermal_profile_set(bool ctgp_enable,
+ bool ppab_enable,
+ u8 dstate)
+{
+ struct victus_gpu_power_modes gpu_power_modes;
+ int ret;
+
+ bool current_ctgp_state, current_ppab_state;
+ u8 current_dstate, current_gpu_slowdown_temp;
+
+ /* Retrieving GPU slowdown temperature, in order to keep it unchanged */
+ ret = victus_s_gpu_thermal_profile_get(&current_ctgp_state,
+ &current_ppab_state,
+ &current_dstate,
+ &current_gpu_slowdown_temp);
+ if (ret < 0) {
+ pr_warn("GPU modes not updated, unable to get slowdown temp\n");
+ return ret;
+ }
+
+ gpu_power_modes.ctgp_enable = ctgp_enable ? 0x01 : 0x00;
+ gpu_power_modes.ppab_enable = ppab_enable ? 0x01 : 0x00;
+ gpu_power_modes.dstate = dstate;
+ gpu_power_modes.gpu_slowdown_temp = current_gpu_slowdown_temp;
+
+
+ ret = hp_wmi_perform_query(HPWMI_SET_GPU_THERMAL_MODES_QUERY, HPWMI_GM,
+ &gpu_power_modes, sizeof(gpu_power_modes), 0);
+
+ return ret;
+}
+
+/* Note: HP_POWER_LIMIT_DEFAULT can be used to restore default PL1 and PL2 */
+static int victus_s_set_cpu_pl1_pl2(u8 pl1, u8 pl2)
+{
+ struct victus_power_limits power_limits;
+ int ret;
+
+ /* We need to know both PL1 and PL2 values in order to check them */
+ if (pl1 == HP_POWER_LIMIT_NO_CHANGE || pl2 == HP_POWER_LIMIT_NO_CHANGE)
+ return -EINVAL;
+
+ /* PL2 is not supposed to be lower than PL1 */
+ if (pl2 < pl1)
+ return -EINVAL;
+
+ power_limits.pl1 = pl1;
+ power_limits.pl2 = pl2;
+ power_limits.pl4 = HP_POWER_LIMIT_NO_CHANGE;
+ power_limits.cpu_gpu_concurrent_limit = HP_POWER_LIMIT_NO_CHANGE;
+
+ ret = hp_wmi_perform_query(HPWMI_SET_POWER_LIMITS_QUERY, HPWMI_GM,
+ &power_limits, sizeof(power_limits), 0);
+
+ return ret;
+}
+
+static int platform_profile_victus_s_set_ec(enum platform_profile_option profile)
+{
+ bool gpu_ctgp_enable, gpu_ppab_enable;
+ u8 gpu_dstate; /* Test shows 1 = 100%, 2 = 50%, 3 = 25%, 4 = 12.5% */
+ int err, tp;
+
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = HP_VICTUS_S_THERMAL_PROFILE_PERFORMANCE;
+ gpu_ctgp_enable = true;
+ gpu_ppab_enable = true;
+ gpu_dstate = 1;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = HP_VICTUS_S_THERMAL_PROFILE_DEFAULT;
+ gpu_ctgp_enable = false;
+ gpu_ppab_enable = true;
+ gpu_dstate = 1;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ tp = HP_VICTUS_S_THERMAL_PROFILE_DEFAULT;
+ gpu_ctgp_enable = false;
+ gpu_ppab_enable = false;
+ gpu_dstate = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ hp_wmi_get_fan_count_userdefine_trigger();
+
+ err = omen_thermal_profile_set(tp);
+ if (err < 0) {
+ pr_err("Failed to set platform profile %d: %d\n", profile, err);
+ return err;
+ }
+
+ err = victus_s_gpu_thermal_profile_set(gpu_ctgp_enable,
+ gpu_ppab_enable,
+ gpu_dstate);
+ if (err < 0) {
+ pr_err("Failed to set GPU profile %d: %d\n", profile, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int platform_profile_victus_s_set(struct device *dev,
+ enum platform_profile_option profile)
+{
+ int err;
+
+ guard(mutex)(&active_platform_profile_lock);
+
+ err = platform_profile_victus_s_set_ec(profile);
+ if (err < 0)
+ return err;
+
+ active_platform_profile = profile;
+
+ return 0;
+}
+
+static int platform_profile_victus_set(struct device *dev,
enum platform_profile_option profile)
{
int err;
@@ -1488,6 +1738,26 @@ static int platform_profile_victus_set(struct platform_profile_handler *pprof,
return 0;
}
+static int hp_wmi_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ if (is_omen_thermal_profile()) {
+ set_bit(PLATFORM_PROFILE_COOL, choices);
+ } else if (is_victus_thermal_profile()) {
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ } else if (is_victus_s_thermal_profile()) {
+ /* Adding an equivalent to HP Omen software ECO mode: */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ } else {
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_COOL, choices);
+ }
+
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
static int omen_powersource_event(struct notifier_block *nb,
unsigned long value,
void *data)
@@ -1545,6 +1815,39 @@ static int omen_powersource_event(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int victus_s_powersource_event(struct notifier_block *nb,
+ unsigned long value,
+ void *data)
+{
+ struct acpi_bus_event *event_entry = data;
+ int err;
+
+ if (strcmp(event_entry->device_class, ACPI_AC_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ pr_debug("Received power source device event\n");
+
+ /*
+ * Switching to battery power source while Performance mode is active
+ * needs manual triggering of CPU power limits. Same goes when switching
+ * to AC power source while Performance mode is active. Other modes
+ * however are automatically behaving without any manual action.
+ * Seen on HP 16-s1034nf (board 8C9C) with F.11 and F.13 BIOS versions.
+ */
+
+ if (active_platform_profile == PLATFORM_PROFILE_PERFORMANCE) {
+ pr_debug("Triggering CPU PL1/PL2 actualization\n");
+ err = victus_s_set_cpu_pl1_pl2(HP_POWER_LIMIT_DEFAULT,
+ HP_POWER_LIMIT_DEFAULT);
+ if (err)
+ pr_warn("Failed to actualize power limits: %d\n", err);
+
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
static int omen_register_powersource_event_handler(void)
{
int err;
@@ -1560,13 +1863,57 @@ static int omen_register_powersource_event_handler(void)
return 0;
}
+static int victus_s_register_powersource_event_handler(void)
+{
+ int err;
+
+ platform_power_source_nb.notifier_call = victus_s_powersource_event;
+ err = register_acpi_notifier(&platform_power_source_nb);
+ if (err < 0) {
+ pr_warn("Failed to install ACPI power source notify handler\n");
+ return err;
+ }
+
+ return 0;
+}
+
static inline void omen_unregister_powersource_event_handler(void)
{
unregister_acpi_notifier(&platform_power_source_nb);
}
-static int thermal_profile_setup(void)
+static inline void victus_s_unregister_powersource_event_handler(void)
{
+ unregister_acpi_notifier(&platform_power_source_nb);
+}
+
+static const struct platform_profile_ops platform_profile_omen_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = platform_profile_omen_get,
+ .profile_set = platform_profile_omen_set,
+};
+
+static const struct platform_profile_ops platform_profile_victus_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = platform_profile_victus_get,
+ .profile_set = platform_profile_victus_set,
+};
+
+static const struct platform_profile_ops platform_profile_victus_s_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = platform_profile_omen_get,
+ .profile_set = platform_profile_victus_s_set,
+};
+
+static const struct platform_profile_ops hp_wmi_platform_profile_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = hp_wmi_platform_profile_get,
+ .profile_set = hp_wmi_platform_profile_set,
+};
+
+static int thermal_profile_setup(struct platform_device *device)
+{
+ const struct platform_profile_ops *ops;
int err, tp;
if (is_omen_thermal_profile()) {
@@ -1582,10 +1929,7 @@ static int thermal_profile_setup(void)
if (err < 0)
return err;
- platform_profile_handler.profile_get = platform_profile_omen_get;
- platform_profile_handler.profile_set = platform_profile_omen_set;
-
- set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+ ops = &platform_profile_omen_ops;
} else if (is_victus_thermal_profile()) {
err = platform_profile_victus_get_ec(&active_platform_profile);
if (err < 0)
@@ -1599,10 +1943,19 @@ static int thermal_profile_setup(void)
if (err < 0)
return err;
- platform_profile_handler.profile_get = platform_profile_victus_get;
- platform_profile_handler.profile_set = platform_profile_victus_set;
+ ops = &platform_profile_victus_ops;
+ } else if (is_victus_s_thermal_profile()) {
+ /*
+ * Being unable to retrieve laptop's current thermal profile,
+ * during this setup, we set it to Balanced by default.
+ */
+ active_platform_profile = PLATFORM_PROFILE_BALANCED;
+
+ err = platform_profile_victus_s_set_ec(active_platform_profile);
+ if (err < 0)
+ return err;
- set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
+ ops = &platform_profile_victus_s_ops;
} else {
tp = thermal_profile_get();
@@ -1617,20 +1970,15 @@ static int thermal_profile_setup(void)
if (err)
return err;
- platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
- platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+ ops = &hp_wmi_platform_profile_ops;
}
- set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
-
- err = platform_profile_register(&platform_profile_handler);
- if (err)
- return err;
+ platform_profile_device = devm_platform_profile_register(&device->dev, "hp-wmi",
+ NULL, ops);
+ if (IS_ERR(platform_profile_device))
+ return PTR_ERR(platform_profile_device);
+ pr_info("Registered as platform profile handler\n");
platform_profile_support = true;
return 0;
@@ -1663,7 +2011,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
if (err < 0)
return err;
- thermal_profile_setup();
+ thermal_profile_setup(device);
return 0;
}
@@ -1689,9 +2037,6 @@ static void __exit hp_wmi_bios_remove(struct platform_device *device)
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
-
- if (platform_profile_support)
- platform_profile_remove();
}
static int hp_wmi_resume_handler(struct device *device)
@@ -1759,8 +2104,13 @@ static umode_t hp_wmi_hwmon_is_visible(const void *data,
case hwmon_pwm:
return 0644;
case hwmon_fan:
- if (hp_wmi_get_fan_speed(channel) >= 0)
- return 0444;
+ if (is_victus_s_thermal_profile()) {
+ if (hp_wmi_get_fan_speed_victus_s(channel) >= 0)
+ return 0444;
+ } else {
+ if (hp_wmi_get_fan_speed(channel) >= 0)
+ return 0444;
+ }
break;
default:
return 0;
@@ -1776,8 +2126,10 @@ static int hp_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
switch (type) {
case hwmon_fan:
- ret = hp_wmi_get_fan_speed(channel);
-
+ if (is_victus_s_thermal_profile())
+ ret = hp_wmi_get_fan_speed_victus_s(channel);
+ else
+ ret = hp_wmi_get_fan_speed(channel);
if (ret < 0)
return ret;
*val = ret;
@@ -1810,11 +2162,17 @@ static int hp_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
case hwmon_pwm:
switch (val) {
case 0:
+ if (is_victus_s_thermal_profile())
+ hp_wmi_get_fan_count_userdefine_trigger();
/* 0 is no fan speed control (max), which is 1 for us */
return hp_wmi_fan_speed_max_set(1);
case 2:
/* 2 is automatic speed control, which is 0 for us */
- return hp_wmi_fan_speed_max_set(0);
+ if (is_victus_s_thermal_profile()) {
+ hp_wmi_get_fan_count_userdefine_trigger();
+ return hp_wmi_fan_speed_max_reset();
+ } else
+ return hp_wmi_fan_speed_max_set(0);
default:
/* we don't support manual fan speed control */
return -EINVAL;
@@ -1893,6 +2251,10 @@ static int __init hp_wmi_init(void)
err = omen_register_powersource_event_handler();
if (err)
goto err_unregister_device;
+ } else if (is_victus_s_thermal_profile()) {
+ err = victus_s_register_powersource_event_handler();
+ if (err)
+ goto err_unregister_device;
}
return 0;
@@ -1912,6 +2274,9 @@ static void __exit hp_wmi_exit(void)
if (is_omen_thermal_profile() || is_victus_thermal_profile())
omen_unregister_powersource_event_handler();
+ if (is_victus_s_thermal_profile())
+ victus_s_unregister_powersource_event_handler();
+
if (wmi_has_guid(HPWMI_EVENT_GUID))
hp_wmi_input_destroy();
diff --git a/drivers/platform/x86/hp/hp_accel.c b/drivers/platform/x86/hp/hp_accel.c
index 39a6530f5072..10d5af18d639 100644
--- a/drivers/platform/x86/hp/hp_accel.c
+++ b/drivers/platform/x86/hp/hp_accel.c
@@ -267,7 +267,7 @@ static struct delayed_led_classdev hpled_led = {
};
static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
static bool extended;
@@ -326,7 +326,7 @@ static int lis3lv02d_probe(struct platform_device *device)
/* filter to remove HPQ6000 accelerometer data
* from keyboard bus stream */
if (strstr(dev_name(&device->dev), "HPQ6000"))
- i8042_install_filter(hp_accel_i8042_filter);
+ i8042_install_filter(hp_accel_i8042_filter, NULL);
INIT_WORK(&hpled_led.work, delayed_set_status_worker);
ret = led_classdev_register(NULL, &hpled_led.led_classdev);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index e980dd18e5f6..30bd366d7b58 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -142,7 +142,7 @@ enum {
struct ideapad_dytc_priv {
enum platform_profile_option current_profile;
- struct platform_profile_handler pprof;
+ struct device *ppdev; /* platform profile device */
struct mutex mutex; /* protects the DYTC interface */
struct ideapad_private *priv;
};
@@ -933,10 +933,10 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
* dytc_profile_get: Function to register with platform_profile
* handler. Returns current platform profile.
*/
-static int dytc_profile_get(struct platform_profile_handler *pprof,
+static int dytc_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+ struct ideapad_dytc_priv *dytc = dev_get_drvdata(dev);
*profile = dytc->current_profile;
return 0;
@@ -986,10 +986,10 @@ static int dytc_cql_command(struct ideapad_private *priv, unsigned long cmd,
* dytc_profile_set: Function to register with platform_profile
* handler. Sets current platform profile.
*/
-static int dytc_profile_set(struct platform_profile_handler *pprof,
+static int dytc_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+ struct ideapad_dytc_priv *dytc = dev_get_drvdata(dev);
struct ideapad_private *priv = dytc->priv;
unsigned long output;
int err;
@@ -1023,6 +1023,15 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
return -EINTR;
}
+static int dytc_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
static void dytc_profile_refresh(struct ideapad_private *priv)
{
enum platform_profile_option profile;
@@ -1041,7 +1050,7 @@ static void dytc_profile_refresh(struct ideapad_private *priv)
if (profile != priv->dytc->current_profile) {
priv->dytc->current_profile = profile;
- platform_profile_notify();
+ platform_profile_notify(priv->dytc->ppdev);
}
}
@@ -1063,6 +1072,12 @@ static const struct dmi_system_id ideapad_dytc_v4_allow_table[] = {
{}
};
+static const struct platform_profile_ops dytc_profile_ops = {
+ .probe = dytc_profile_probe,
+ .profile_get = dytc_profile_get,
+ .profile_set = dytc_profile_set,
+};
+
static int ideapad_dytc_profile_init(struct ideapad_private *priv)
{
int err, dytc_version;
@@ -1103,18 +1118,15 @@ static int ideapad_dytc_profile_init(struct ideapad_private *priv)
mutex_init(&priv->dytc->mutex);
priv->dytc->priv = priv;
- priv->dytc->pprof.profile_get = dytc_profile_get;
- priv->dytc->pprof.profile_set = dytc_profile_set;
-
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, priv->dytc->pprof.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, priv->dytc->pprof.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, priv->dytc->pprof.choices);
/* Create platform_profile structure and register */
- err = platform_profile_register(&priv->dytc->pprof);
- if (err)
+ priv->dytc->ppdev = devm_platform_profile_register(&priv->platform_device->dev,
+ "ideapad-laptop", priv->dytc,
+ &dytc_profile_ops);
+ if (IS_ERR(priv->dytc->ppdev)) {
+ err = PTR_ERR(priv->dytc->ppdev);
goto pp_reg_failed;
+ }
/* Ensure initial values are correct */
dytc_profile_refresh(priv);
@@ -1134,7 +1146,6 @@ static void ideapad_dytc_profile_exit(struct ideapad_private *priv)
if (!priv->dytc)
return;
- platform_profile_remove();
mutex_destroy(&priv->dytc->mutex);
kfree(priv->dytc);
diff --git a/drivers/platform/x86/inspur_platform_profile.c b/drivers/platform/x86/inspur_platform_profile.c
index 8440defa6788..e02f5a55a6c5 100644
--- a/drivers/platform/x86/inspur_platform_profile.c
+++ b/drivers/platform/x86/inspur_platform_profile.c
@@ -32,7 +32,7 @@ enum inspur_tmp_profile {
struct inspur_wmi_priv {
struct wmi_device *wdev;
- struct platform_profile_handler handler;
+ struct device *ppdev;
};
static int inspur_wmi_perform_query(struct wmi_device *wdev,
@@ -84,11 +84,10 @@ out_free:
* 0x0: No Error
* 0x1: Error
*/
-static int inspur_platform_profile_set(struct platform_profile_handler *pprof,
+static int inspur_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- struct inspur_wmi_priv *priv = container_of(pprof, struct inspur_wmi_priv,
- handler);
+ struct inspur_wmi_priv *priv = dev_get_drvdata(dev);
u8 ret_code[4] = {0, 0, 0, 0};
int ret;
@@ -132,11 +131,10 @@ static int inspur_platform_profile_set(struct platform_profile_handler *pprof,
* 0x1: Performance Mode
* 0x2: Power Saver Mode
*/
-static int inspur_platform_profile_get(struct platform_profile_handler *pprof,
+static int inspur_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct inspur_wmi_priv *priv = container_of(pprof, struct inspur_wmi_priv,
- handler);
+ struct inspur_wmi_priv *priv = dev_get_drvdata(dev);
u8 ret_code[4] = {0, 0, 0, 0};
int ret;
@@ -166,6 +164,21 @@ static int inspur_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
+static int inspur_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops inspur_platform_profile_ops = {
+ .probe = inspur_platform_profile_probe,
+ .profile_get = inspur_platform_profile_get,
+ .profile_set = inspur_platform_profile_set,
+};
+
static int inspur_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct inspur_wmi_priv *priv;
@@ -177,19 +190,10 @@ static int inspur_wmi_probe(struct wmi_device *wdev, const void *context)
priv->wdev = wdev;
dev_set_drvdata(&wdev->dev, priv);
- priv->handler.profile_get = inspur_platform_profile_get;
- priv->handler.profile_set = inspur_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_LOW_POWER, priv->handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, priv->handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, priv->handler.choices);
+ priv->ppdev = devm_platform_profile_register(&wdev->dev, "inspur-wmi", priv,
+ &inspur_platform_profile_ops);
- return platform_profile_register(&priv->handler);
-}
-
-static void inspur_wmi_remove(struct wmi_device *wdev)
-{
- platform_profile_remove();
+ return PTR_ERR_OR_ZERO(priv->ppdev);
}
static const struct wmi_device_id inspur_wmi_id_table[] = {
@@ -206,7 +210,6 @@ static struct wmi_driver inspur_wmi_driver = {
},
.id_table = inspur_wmi_id_table,
.probe = inspur_wmi_probe,
- .remove = inspur_wmi_remove,
.no_singleton = true,
};
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index eb698dcb9af9..19a2246f2770 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -83,6 +83,7 @@ config INTEL_BXTWC_PMIC_TMU
config INTEL_BYTCRC_PWRSRC
tristate "Intel Bay Trail Crystal Cove power source driver"
depends on INTEL_SOC_PMIC
+ depends on POWER_SUPPLY
help
This option adds a power source driver for Crystal Cove PMICs
on Intel Bay Trail devices.
diff --git a/drivers/platform/x86/intel/bytcrc_pwrsrc.c b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
index 3edc2a9dab38..68ac040082df 100644
--- a/drivers/platform/x86/intel/bytcrc_pwrsrc.c
+++ b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
@@ -8,13 +8,22 @@
* Copyright (C) 2013 Intel Corporation
*/
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/debugfs.h>
+#include <linux/interrupt.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
#include <linux/regmap.h>
+#define CRYSTALCOVE_PWRSRC_IRQ 0x03
#define CRYSTALCOVE_SPWRSRC_REG 0x1E
+#define CRYSTALCOVE_SPWRSRC_USB BIT(0)
+#define CRYSTALCOVE_SPWRSRC_DC BIT(1)
+#define CRYSTALCOVE_SPWRSRC_BATTERY BIT(2)
#define CRYSTALCOVE_RESETSRC0_REG 0x20
#define CRYSTALCOVE_RESETSRC1_REG 0x21
#define CRYSTALCOVE_WAKESRC_REG 0x22
@@ -22,6 +31,7 @@
struct crc_pwrsrc_data {
struct regmap *regmap;
struct dentry *debug_dentry;
+ struct power_supply *psy;
unsigned int resetsrc0;
unsigned int resetsrc1;
unsigned int wakesrc;
@@ -118,13 +128,60 @@ static int crc_pwrsrc_read_and_clear(struct crc_pwrsrc_data *data,
return regmap_write(data->regmap, reg, *val);
}
+static irqreturn_t crc_pwrsrc_irq_handler(int irq, void *_data)
+{
+ struct crc_pwrsrc_data *data = _data;
+ unsigned int irq_mask;
+
+ if (regmap_read(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, &irq_mask))
+ return IRQ_NONE;
+
+ regmap_write(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, irq_mask);
+
+ power_supply_changed(data->psy);
+ return IRQ_HANDLED;
+}
+
+static int crc_pwrsrc_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct crc_pwrsrc_data *data = power_supply_get_drvdata(psy);
+ unsigned int pwrsrc;
+ int ret;
+
+ if (psp != POWER_SUPPLY_PROP_ONLINE)
+ return -EINVAL;
+
+ ret = regmap_read(data->regmap, CRYSTALCOVE_SPWRSRC_REG, &pwrsrc);
+ if (ret)
+ return ret;
+
+ val->intval = !!(pwrsrc & (CRYSTALCOVE_SPWRSRC_USB |
+ CRYSTALCOVE_SPWRSRC_DC));
+ return 0;
+}
+
+static const enum power_supply_property crc_pwrsrc_psy_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc crc_pwrsrc_psy_desc = {
+ .name = "crystal_cove_pwrsrc",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = crc_pwrsrc_psy_props,
+ .num_properties = ARRAY_SIZE(crc_pwrsrc_psy_props),
+ .get_property = crc_pwrsrc_psy_get_property,
+};
+
static int crc_pwrsrc_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
struct crc_pwrsrc_data *data;
- int ret;
+ int irq, ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -149,6 +206,24 @@ static int crc_pwrsrc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (device_property_read_bool(dev->parent, "linux,register-pwrsrc-power_supply")) {
+ struct power_supply_config psy_cfg = { .drv_data = data };
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ data->psy = devm_power_supply_register(dev, &crc_pwrsrc_psy_desc, &psy_cfg);
+ if (IS_ERR(data->psy))
+ return dev_err_probe(dev, PTR_ERR(data->psy), "registering power-supply\n");
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ crc_pwrsrc_irq_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting IRQ\n");
+ }
+
data->debug_dentry = debugfs_create_dir(KBUILD_MODNAME, NULL);
debugfs_create_file("pwrsrc", 0444, data->debug_dentry, data, &pwrsrc_fops);
debugfs_create_file("resetsrc", 0444, data->debug_dentry, data, &resetsrc_fops);
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 927a2993f616..88a1a9ff2f34 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -139,6 +139,13 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
},
},
+ {
+ .ident = "Microsoft Surface Go 4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
index 5c3c0dfa1bf8..f369fb0d3d82 100644
--- a/drivers/platform/x86/intel/ifs/ifs.h
+++ b/drivers/platform/x86/intel/ifs/ifs.h
@@ -23,12 +23,14 @@
* IFS Image
* ---------
*
- * Intel provides a firmware file containing the scan tests via
- * github [#f1]_. Similar to microcode there is a separate file for each
+ * Intel provides firmware files containing the scan tests via the webpage [#f1]_.
+ * Look under "In-Field Scan Test Images Download" section towards the
+ * end of the page. Similar to microcode, there are separate files for each
* family-model-stepping. IFS Images are not applicable for some test types.
* Wherever applicable the sysfs directory would provide a "current_batch" file
* (see below) for loading the image.
*
+ * .. [#f1] https://intel.com/InFieldScan
*
* IFS Image Loading
* -----------------
@@ -125,9 +127,6 @@
* 2) Hardware allows for some number of cores to be tested in parallel.
* The driver does not make use of this, it only tests one core at a time.
*
- * .. [#f1] https://github.com/intel/TBD
- *
- *
* Structural Based Functional Test at Field (SBAF):
* -------------------------------------------------
*
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 0cc80603a8a9..3b48cd7a4075 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -83,8 +83,12 @@ static void int0002_irq_ack(struct irq_data *data)
static void int0002_irq_unmask(struct irq_data *data)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
u32 gpe_en_reg;
+ gpiochip_enable_irq(gc, hwirq);
+
gpe_en_reg = inl(GPE0A_EN_PORT);
gpe_en_reg |= GPE0A_PME_B0_EN_BIT;
outl(gpe_en_reg, GPE0A_EN_PORT);
@@ -92,11 +96,15 @@ static void int0002_irq_unmask(struct irq_data *data)
static void int0002_irq_mask(struct irq_data *data)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
u32 gpe_en_reg;
gpe_en_reg = inl(GPE0A_EN_PORT);
gpe_en_reg &= ~GPE0A_PME_B0_EN_BIT;
outl(gpe_en_reg, GPE0A_EN_PORT);
+
+ gpiochip_disable_irq(gc, hwirq);
}
static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
@@ -140,12 +148,14 @@ static bool int0002_check_wake(void *data)
return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
}
-static struct irq_chip int0002_irqchip = {
+static const struct irq_chip int0002_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
.irq_mask = int0002_irq_mask,
.irq_unmask = int0002_irq_unmask,
.irq_set_wake = int0002_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
@@ -203,7 +213,7 @@ static int int0002_probe(struct platform_device *pdev)
}
girq = &chip->irq;
- girq->chip = &int0002_irqchip;
+ gpio_irq_chip_set_chip(girq, &int0002_irqchip);
/* This let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
index b3a2578e06c1..1638be8fa71e 100644
--- a/drivers/platform/x86/intel/int3472/common.c
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -70,6 +70,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
return -ENODEV;
}
+ dev_dbg(dev, "Sensor name %s\n", acpi_dev_name(sensor));
+
*name_ret = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
acpi_dev_name(sensor));
if (!*name_ret)
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index d881b2cfcdfc..092252eb95a8 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -2,6 +2,7 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
@@ -55,7 +56,7 @@ static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *i
static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
char *path = agpio->resource_source.string_ptr;
struct acpi_device *adev;
@@ -70,14 +71,14 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
if (!adev)
return -ENODEV;
- *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, polarity);
+ *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, gpio_flags);
return 0;
}
static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
int ret;
@@ -87,7 +88,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
}
ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios],
- agpio, func, polarity);
+ agpio, func, gpio_flags);
if (ret)
return ret;
@@ -100,7 +101,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
static struct gpio_desc *
skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
struct gpio_desc *desc;
int ret;
@@ -111,7 +112,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return ERR_PTR(-ENOMEM);
lookup->dev_id = dev_name(int3472->dev);
- ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, polarity);
+ ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, gpio_flags);
if (ret)
return ERR_PTR(ret);
@@ -122,32 +123,76 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return desc;
}
-static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polarity)
+/**
+ * struct int3472_gpio_map - Map GPIOs to whatever is expected by the
+ * sensor driver (as in DT bindings)
+ * @hid: The ACPI HID of the device without the instance number e.g. INT347E
+ * @type_from: The GPIO type from ACPI ?SDT
+ * @type_to: The assigned GPIO type, typically same as @type_from
+ * @func: The function, e.g. "enable"
+ * @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true,
+ * GPIO_ACTIVE_HIGH otherwise
+ */
+struct int3472_gpio_map {
+ const char *hid;
+ u8 type_from;
+ u8 type_to;
+ bool polarity_low;
+ const char *func;
+};
+
+static const struct int3472_gpio_map int3472_gpio_map[] = {
+ { "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" },
+};
+
+static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type,
+ const char **func, unsigned long *gpio_flags)
{
- switch (type) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(int3472_gpio_map); i++) {
+ /*
+ * Map the firmware-provided GPIO to whatever a driver expects
+ * (as in DT bindings). First check if the type matches with the
+ * GPIO map, then further check that the device _HID matches.
+ */
+ if (*type != int3472_gpio_map[i].type_from)
+ continue;
+
+ if (!acpi_dev_hid_uid_match(adev, int3472_gpio_map[i].hid, NULL))
+ continue;
+
+ *type = int3472_gpio_map[i].type_to;
+ *gpio_flags = int3472_gpio_map[i].polarity_low ?
+ GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH;
+ *func = int3472_gpio_map[i].func;
+ return;
+ }
+
+ switch (*type) {
case INT3472_GPIO_TYPE_RESET:
*func = "reset";
- *polarity = GPIO_ACTIVE_LOW;
+ *gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_POWERDOWN:
*func = "powerdown";
- *polarity = GPIO_ACTIVE_LOW;
+ *gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_CLK_ENABLE:
*func = "clk-enable";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_PRIVACY_LED:
*func = "privacy-led";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
*func = "power-enable";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
default:
*func = "unknown";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
}
}
@@ -178,11 +223,11 @@ static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polar
* to create clocks and regulators via the usual frameworks.
*
* Return:
- * * 1 - To continue the loop
- * * 0 - When all resources found are handled properly.
- * * -EINVAL - If the resource is not a GPIO IO resource
- * * -ENODEV - If the resource has no corresponding _DSM entry
- * * -Other - Errors propagated from one of the sub-functions.
+ * * 1 - Continue the loop without adding a copy of the resource to
+ * * the list passed to acpi_dev_get_resources()
+ * * 0 - Continue the loop after adding a copy of the resource to
+ * * the list passed to acpi_dev_get_resources()
+ * * -errno - Error, break loop
*/
static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
void *data)
@@ -194,7 +239,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
struct gpio_desc *gpio;
const char *err_msg;
const char *func;
- u32 polarity;
+ unsigned long gpio_flags;
int ret;
if (!acpi_gpio_get_io_resource(ares, &agpio))
@@ -217,26 +262,26 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
- int3472_get_func_and_polarity(type, &func, &polarity);
+ int3472_get_func_and_polarity(int3472->sensor, &type, &func, &gpio_flags);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
- if (pin != agpio->pin_table[0])
- dev_warn(int3472->dev, "%s %s pin number mismatch _DSM %d resource %d\n",
- func, agpio->resource_source.string_ptr, pin,
- agpio->pin_table[0]);
+ /* Pin field is not really used under Windows and wraps around at 8 bits */
+ if (pin != (agpio->pin_table[0] & 0xff))
+ dev_dbg(int3472->dev, FW_BUG "%s %s pin number mismatch _DSM %d resource %d\n",
+ func, agpio->resource_source.string_ptr, pin, agpio->pin_table[0]);
active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
if (!active_value)
- polarity ^= GPIO_ACTIVE_LOW;
+ gpio_flags ^= GPIO_ACTIVE_LOW;
dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
agpio->resource_source.string_ptr, agpio->pin_table[0],
- str_high_low(polarity == GPIO_ACTIVE_HIGH));
+ str_high_low(gpio_flags == GPIO_ACTIVE_HIGH));
switch (type) {
case INT3472_GPIO_TYPE_RESET:
case INT3472_GPIO_TYPE_POWERDOWN:
- ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, polarity);
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, gpio_flags);
if (ret)
err_msg = "Failed to map GPIO pin to sensor\n";
@@ -244,7 +289,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
case INT3472_GPIO_TYPE_CLK_ENABLE:
case INT3472_GPIO_TYPE_PRIVACY_LED:
case INT3472_GPIO_TYPE_POWER_ENABLE:
- gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, polarity);
+ gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, gpio_flags);
if (IS_ERR(gpio)) {
ret = PTR_ERR(gpio);
err_msg = "Failed to get GPIO\n";
@@ -289,7 +334,8 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
if (ret < 0)
return dev_err_probe(int3472->dev, ret, err_msg);
- return ret;
+ /* Tell acpi_dev_get_resources() to not make a copy of the resource */
+ return 1;
}
static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
@@ -336,6 +382,9 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
struct int3472_cldb cldb;
int ret;
+ if (!adev)
+ return -ENODEV;
+
ret = skl_int3472_fill_cldb(adev, &cldb);
if (ret) {
dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index 1e107fd49f82..81ac4c691963 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -152,6 +152,9 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
int ret;
int i;
+ if (!adev)
+ return -ENODEV;
+
n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
if (n_consumers < 0)
return n_consumers;
diff --git a/drivers/platform/x86/intel/plr_tpmi.c b/drivers/platform/x86/intel/plr_tpmi.c
index 691d43c3592c..2b55347a5a93 100644
--- a/drivers/platform/x86/intel/plr_tpmi.c
+++ b/drivers/platform/x86/intel/plr_tpmi.c
@@ -262,7 +262,7 @@ static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxilia
struct resource *res;
struct tpmi_plr *plr;
void __iomem *base;
- char name[16];
+ char name[17];
int err;
plat_info = tpmi_get_platform_data(auxdev);
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 3e7f99ac8c94..1ee0fb5f8250 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -22,6 +22,7 @@
#include <linux/suspend.h>
#include <linux/units.h>
+#include <asm/cpuid.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/msr.h>
@@ -625,8 +626,8 @@ static u32 convert_ltr_scale(u32 val)
static int pmc_core_ltr_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
- u32 ltr_raw_data, scale, val;
+ u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
+ u32 ltr_raw_data, scale;
u16 snoop_ltr, nonsnoop_ltr;
unsigned int i, index, ltr_index = 0;
@@ -935,13 +936,13 @@ static unsigned int pmc_core_get_crystal_freq(void)
{
unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
- if (boot_cpu_data.cpuid_level < 0x15)
+ if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
return 0;
eax_denominator = ebx_numerator = ecx_hz = edx = 0;
- /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
- cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
+ /* TSC/Crystal ratio, plus optionally Crystal Hz */
+ cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
if (ebx_numerator == 0 || eax_denominator == 0)
return 0;
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 8ed54b7a3333..7233b654bbad 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -81,7 +81,7 @@ EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio, "INTEL_PMT");
*/
static ssize_t
intel_pmt_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct intel_pmt_entry *entry = container_of(attr,
@@ -308,7 +308,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
entry->pmt_bin_attr.attr.name = ns->name;
entry->pmt_bin_attr.attr.mode = 0440;
entry->pmt_bin_attr.mmap = intel_pmt_mmap;
- entry->pmt_bin_attr.read = intel_pmt_read;
+ entry->pmt_bin_attr.read_new = intel_pmt_read;
entry->pmt_bin_attr.size = entry->size;
ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index cd0ba84cc8e4..bafac8aa2baf 100644
--- a/drivers/platform/x86/intel/punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -131,39 +131,6 @@ static int intel_punit_ipc_check_status(IPC_DEV *ipcdev, IPC_TYPE type)
}
/**
- * intel_punit_ipc_simple_command() - Simple IPC command
- * @cmd: IPC command code.
- * @para1: First 8bit parameter, set 0 if not used.
- * @para2: Second 8bit parameter, set 0 if not used.
- *
- * Send a IPC command to P-Unit when there is no data transaction
- *
- * Return: IPC error code or 0 on success.
- */
-int intel_punit_ipc_simple_command(int cmd, int para1, int para2)
-{
- IPC_DEV *ipcdev = punit_ipcdev;
- IPC_TYPE type;
- u32 val;
- int ret;
-
- mutex_lock(&ipcdev->lock);
-
- reinit_completion(&ipcdev->cmd_complete);
- type = (cmd & IPC_PUNIT_CMD_TYPE_MASK) >> IPC_TYPE_OFFSET;
-
- val = cmd & ~IPC_PUNIT_CMD_TYPE_MASK;
- val |= CMD_RUN | para2 << CMD_PARA2_SHIFT | para1 << CMD_PARA1_SHIFT;
- ipc_write_cmd(ipcdev, type, val);
- ret = intel_punit_ipc_check_status(ipcdev, type);
-
- mutex_unlock(&ipcdev->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(intel_punit_ipc_simple_command);
-
-/**
* intel_punit_ipc_command() - IPC command with data and pointers
* @cmd: IPC command code.
* @para1: First 8bit parameter, set 0 if not used.
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
index 33f33b1070fd..30d1c2caf984 100644
--- a/drivers/platform/x86/intel/sdsi.c
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -398,8 +398,8 @@ free_payload:
}
static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -409,11 +409,11 @@ static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
}
-static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
+static const BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -423,7 +423,7 @@ static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
}
-static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
+static const BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
static ssize_t
certificate_read(u64 command, u64 control_flags, struct sdsi_priv *priv,
@@ -469,7 +469,7 @@ free_buffer:
static ssize_t
state_certificate_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -477,11 +477,11 @@ state_certificate_read(struct file *filp, struct kobject *kobj,
return certificate_read(SDSI_CMD_READ_STATE, 0, priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
static ssize_t
meter_certificate_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -489,11 +489,11 @@ meter_certificate_read(struct file *filp, struct kobject *kobj,
return certificate_read(SDSI_CMD_READ_METER, 0, priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
static ssize_t
meter_current_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -502,11 +502,11 @@ meter_current_read(struct file *filp, struct kobject *kobj,
return certificate_read(SDSI_CMD_READ_METER, CTRL_METER_ENABLE_DRAM,
priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
static ssize_t registers_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -528,9 +528,9 @@ static ssize_t registers_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
+static const BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
-static struct bin_attribute *sdsi_bin_attrs[] = {
+static const struct bin_attribute *const sdsi_bin_attrs[] = {
&bin_attr_registers,
&bin_attr_state_certificate,
&bin_attr_meter_certificate,
@@ -576,7 +576,7 @@ static struct attribute *sdsi_attrs[] = {
static const struct attribute_group sdsi_group = {
.attrs = sdsi_attrs,
- .bin_attrs = sdsi_bin_attrs,
+ .bin_attrs_new = sdsi_bin_attrs,
.is_bin_visible = sdsi_battr_is_visible,
};
__ATTRIBUTE_GROUPS(sdsi);
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 8272f1dd0fbc..db3c031d1757 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -404,6 +404,11 @@ static const struct intel_vsec_platform_info oobmsm_info = {
.caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI,
};
+/* DMR OOBMSM info */
+static const struct intel_vsec_platform_info dmr_oobmsm_info = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI,
+};
+
/* TGL info */
static const struct intel_vsec_platform_info tgl_info = {
.caps = VSEC_CAP_TELEMETRY,
@@ -420,6 +425,7 @@ static const struct intel_vsec_platform_info lnl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
@@ -430,6 +436,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
diff --git a/drivers/platform/x86/lenovo-wmi-camera.c b/drivers/platform/x86/lenovo-wmi-camera.c
index 0c0bedaf7407..eb60fb9a5b3f 100644
--- a/drivers/platform/x86/lenovo-wmi-camera.c
+++ b/drivers/platform/x86/lenovo-wmi-camera.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/wmi.h>
+#include <linux/cleanup.h>
#define WMI_LENOVO_CAMERABUTTON_EVENT_GUID "50C76F1F-D8E4-D895-0A3D-62F4EA400013"
@@ -26,10 +27,38 @@ enum {
SW_CAMERA_ON = 1,
};
+static int camera_shutter_input_setup(struct wmi_device *wdev, u8 camera_mode)
+{
+ struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ int err;
+
+ priv->idev = input_allocate_device();
+ if (!priv->idev)
+ return -ENOMEM;
+
+ priv->idev->name = "Lenovo WMI Camera Button";
+ priv->idev->phys = "wmi/input0";
+ priv->idev->id.bustype = BUS_HOST;
+ priv->idev->dev.parent = &wdev->dev;
+
+ input_set_capability(priv->idev, EV_SW, SW_CAMERA_LENS_COVER);
+
+ input_report_switch(priv->idev, SW_CAMERA_LENS_COVER,
+ camera_mode == SW_CAMERA_ON ? 0 : 1);
+ input_sync(priv->idev);
+
+ err = input_register_device(priv->idev);
+ if (err) {
+ input_free_device(priv->idev);
+ priv->idev = NULL;
+ }
+
+ return err;
+}
+
static void lenovo_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
{
struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
- unsigned int keycode;
u8 camera_mode;
if (obj->type != ACPI_TYPE_BUFFER) {
@@ -53,22 +82,24 @@ static void lenovo_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
return;
}
- mutex_lock(&priv->notify_lock);
+ guard(mutex)(&priv->notify_lock);
- keycode = camera_mode == SW_CAMERA_ON ?
- KEY_CAMERA_ACCESS_ENABLE : KEY_CAMERA_ACCESS_DISABLE;
- input_report_key(priv->idev, keycode, 1);
- input_sync(priv->idev);
- input_report_key(priv->idev, keycode, 0);
- input_sync(priv->idev);
+ if (!priv->idev) {
+ if (camera_shutter_input_setup(wdev, camera_mode))
+ dev_warn(&wdev->dev, "Failed to register input device\n");
+ return;
+ }
- mutex_unlock(&priv->notify_lock);
+ if (camera_mode == SW_CAMERA_ON)
+ input_report_switch(priv->idev, SW_CAMERA_LENS_COVER, 0);
+ else
+ input_report_switch(priv->idev, SW_CAMERA_LENS_COVER, 1);
+ input_sync(priv->idev);
}
static int lenovo_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct lenovo_wmi_priv *priv;
- int ret;
priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -76,21 +107,6 @@ static int lenovo_wmi_probe(struct wmi_device *wdev, const void *context)
dev_set_drvdata(&wdev->dev, priv);
- priv->idev = devm_input_allocate_device(&wdev->dev);
- if (!priv->idev)
- return -ENOMEM;
-
- priv->idev->name = "Lenovo WMI Camera Button";
- priv->idev->phys = "wmi/input0";
- priv->idev->id.bustype = BUS_HOST;
- priv->idev->dev.parent = &wdev->dev;
- input_set_capability(priv->idev, EV_KEY, KEY_CAMERA_ACCESS_ENABLE);
- input_set_capability(priv->idev, EV_KEY, KEY_CAMERA_ACCESS_DISABLE);
-
- ret = input_register_device(priv->idev);
- if (ret)
- return ret;
-
mutex_init(&priv->notify_lock);
return 0;
@@ -100,6 +116,9 @@ static void lenovo_wmi_remove(struct wmi_device *wdev)
{
struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ if (priv->idev)
+ input_unregister_device(priv->idev);
+
mutex_destroy(&priv->notify_lock);
}
diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
index d2699ca24f34..a96b215cd2c5 100644
--- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
@@ -199,14 +199,15 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
if (ret)
return ret;
+ serdev_device_set_drvdata(serdev, fc);
+ serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
serdev_device_set_baudrate(serdev, 600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, fc);
- serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
ret = devm_extcon_register_notifier_all(dev, fc->extcon, &fc->nb);
if (ret)
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index e5391a37014d..c4b150fa093f 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -806,8 +806,8 @@ static void msi_send_touchpad_key(struct work_struct *ignored)
}
static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
-static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
{
static bool extended;
@@ -996,7 +996,7 @@ static int __init load_scm_model_init(struct platform_device *sdev)
if (result)
goto fail_input;
- result = i8042_install_filter(msi_laptop_i8042_filter);
+ result = i8042_install_filter(msi_laptop_i8042_filter, NULL);
if (result) {
pr_err("Unable to install key filter\n");
goto fail_filter;
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 22ca70eb8227..2987b4db6009 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -260,7 +260,7 @@ struct pcc_acpi {
* keypress events over the PS/2 kbd interface, filter these out.
*/
static bool panasonic_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
static bool extended;
@@ -1100,7 +1100,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pcc->platform = NULL;
}
- i8042_install_filter(panasonic_i8042_filter);
+ i8042_install_filter(panasonic_i8042_filter, NULL);
return 0;
out_platform:
diff --git a/drivers/platform/x86/quickstart.c b/drivers/platform/x86/quickstart.c
index 8d540a1c8602..c332c7cdaff5 100644
--- a/drivers/platform/x86/quickstart.c
+++ b/drivers/platform/x86/quickstart.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeup.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
index bcf3a0c356ea..57eac75805e2 100644
--- a/drivers/platform/x86/serdev_helpers.h
+++ b/drivers/platform/x86/serdev_helpers.h
@@ -22,32 +22,14 @@
#include <linux/string.h>
static inline struct device *
-get_serdev_controller(const char *serial_ctrl_hid,
- const char *serial_ctrl_uid,
- int serial_ctrl_port,
- const char *serdev_ctrl_name)
+get_serdev_controller_from_parent(struct device *ctrl_dev,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
{
- struct device *ctrl_dev, *child;
- struct acpi_device *ctrl_adev;
+ struct device *child;
char name[32];
int i;
- ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
- if (!ctrl_adev) {
- pr_err("error could not get %s/%s serial-ctrl adev\n",
- serial_ctrl_hid, serial_ctrl_uid);
- return ERR_PTR(-ENODEV);
- }
-
- /* get_first_physical_node() returns a weak ref */
- ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s serial-ctrl physical node\n",
- serial_ctrl_hid, serial_ctrl_uid);
- ctrl_dev = ERR_PTR(-ENODEV);
- goto put_ctrl_adev;
- }
-
/* Walk host -> uart-ctrl -> port -> serdev-ctrl */
for (i = 0; i < 3; i++) {
switch (i) {
@@ -67,14 +49,40 @@ get_serdev_controller(const char *serial_ctrl_hid,
put_device(ctrl_dev);
if (!child) {
pr_err("error could not find '%s' device\n", name);
- ctrl_dev = ERR_PTR(-ENODEV);
- goto put_ctrl_adev;
+ return ERR_PTR(-ENODEV);
}
ctrl_dev = child;
}
-put_ctrl_adev:
- acpi_dev_put(ctrl_adev);
return ctrl_dev;
}
+
+static inline struct device *
+get_serdev_controller(const char *serial_ctrl_hid,
+ const char *serial_ctrl_uid,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
+{
+ struct acpi_device *adev;
+ struct device *parent;
+
+ adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ if (!adev) {
+ pr_err("error could not get %s/%s serial-ctrl adev\n",
+ serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* get_first_physical_node() returns a weak ref */
+ parent = get_device(acpi_get_first_physical_node(adev));
+ acpi_dev_put(adev);
+ if (!parent) {
+ pr_err("error could not get %s/%s serial-ctrl physical node\n",
+ serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* This puts our reference on parent and returns a ref on the ctrl */
+ return get_serdev_controller_from_parent(parent, serial_ctrl_port, serdev_ctrl_name);
+}
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index ed6b28505cd6..db030b0f176a 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -384,6 +384,17 @@ static const struct smi_node cs35l57_hda = {
.bus_type = SMI_AUTO_DETECT,
};
+static const struct smi_node tas2781_hda = {
+ .instances = {
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
/*
* Note new device-ids must also be added to ignore_serial_bus_ids in
* drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
@@ -396,6 +407,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
{ "CSC3556", (unsigned long)&cs35l56_hda },
{ "CSC3557", (unsigned long)&cs35l57_hda },
{ "INT3515", (unsigned long)&int3515_data },
+ { "TXNW2781", (unsigned long)&tas2781_hda },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
{ "CLSA0101", (unsigned long)&cs35l41_hda },
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 38de0cb20d77..323316ac6783 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -194,7 +194,6 @@ static const char * const level_options[] = {
[TLMI_LEVEL_MASTER] = "master",
};
static struct think_lmi tlmi_priv;
-static const struct class *fw_attr_class;
static DEFINE_MUTEX(tlmi_mutex);
static inline struct tlmi_pwd_setting *to_tlmi_pwd_setting(struct kobject *kobj)
@@ -1446,11 +1445,7 @@ static int tlmi_sysfs_init(void)
{
int i, ret;
- ret = fw_attributes_class_get(&fw_attr_class);
- if (ret)
- return ret;
-
- tlmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ tlmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
NULL, "%s", "thinklmi");
if (IS_ERR(tlmi_priv.class_dev)) {
ret = PTR_ERR(tlmi_priv.class_dev);
@@ -1563,9 +1558,8 @@ static int tlmi_sysfs_init(void)
fail_create_attr:
tlmi_release_attr();
fail_device_created:
- device_destroy(fw_attr_class, MKDEV(0, 0));
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
fail_class_created:
- fw_attributes_class_put();
return ret;
}
@@ -1788,8 +1782,7 @@ fail_clear_attr:
static void tlmi_remove(struct wmi_device *wdev)
{
tlmi_release_attr();
- device_destroy(fw_attr_class, MKDEV(0, 0));
- fw_attributes_class_put();
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
}
static int tlmi_probe(struct wmi_device *wdev, const void *context)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 2cfb2ac3f465..1cc91173e012 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -963,6 +963,7 @@ static const struct proc_ops dispatch_proc_ops = {
static struct platform_device *tpacpi_pdev;
static struct platform_device *tpacpi_sensors_pdev;
static struct device *tpacpi_hwmon;
+static struct device *tpacpi_pprof;
static struct input_dev *tpacpi_inputdev;
static struct mutex tpacpi_inputdev_send_mutex;
static LIST_HEAD(tpacpi_all_drivers);
@@ -3275,6 +3276,7 @@ static const struct key_entry keymap_lenovo[] __initconst = {
* scancodes to preserve uAPI compatibility, see tpacpi_input_send_key().
*/
{ KE_KEY, 0x131d, { KEY_VENDOR } }, /* System debug info, similar to old ThinkPad key */
+ { KE_KEY, 0x1320, { KEY_LINK_PHONE } },
{ KE_KEY, TP_HKEY_EV_TRACK_DOUBLETAP /* 0x8036 */, { KEY_PROG4 } },
{ KE_END }
};
@@ -7883,6 +7885,7 @@ static struct ibm_struct volume_driver_data = {
#define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */
#define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */
+#define FAN_CLOCK_TPM (22500*60) /* Ticks per minute for a 22.5 kHz clock */
enum { /* Fan control constants */
fan_status_offset = 0x2f, /* EC register 0x2f */
@@ -7938,6 +7941,7 @@ static int fan_watchdog_maxinterval;
static bool fan_with_ns_addr;
static bool ecfw_with_fan_dec_rpm;
+static bool fan_speed_in_tpr;
static struct mutex fan_mutex;
@@ -8140,8 +8144,11 @@ static int fan_get_speed(unsigned int *speed)
!acpi_ec_read(fan_rpm_offset + 1, &hi)))
return -EIO;
- if (likely(speed))
+ if (likely(speed)) {
*speed = (hi << 8) | lo;
+ if (fan_speed_in_tpr && *speed != 0)
+ *speed = FAN_CLOCK_TPM / *speed;
+ }
break;
case TPACPI_FAN_RD_TPEC_NS:
if (!acpi_ec_read(fan_rpm_status_ns, &lo))
@@ -8174,8 +8181,11 @@ static int fan2_get_speed(unsigned int *speed)
if (rc)
return -EIO;
- if (likely(speed))
+ if (likely(speed)) {
*speed = (hi << 8) | lo;
+ if (fan_speed_in_tpr && *speed != 0)
+ *speed = FAN_CLOCK_TPM / *speed;
+ }
break;
case TPACPI_FAN_RD_TPEC_NS:
@@ -8786,6 +8796,7 @@ static const struct attribute_group fan_driver_attr_group = {
#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
#define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */
#define TPACPI_FAN_DECRPM 0x0020 /* For ECFW's with RPM in register as decimal */
+#define TPACPI_FAN_TPR 0x0040 /* Fan speed is in Ticks Per Revolution */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8815,6 +8826,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('R', '0', 'V', TPACPI_FAN_NS), /* 11e Gen5 KL-Y */
TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */
+ TPACPI_Q_LNV('8', 'F', TPACPI_FAN_TPR), /* ThinkPad x120e */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -8885,6 +8897,8 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (quirks & TPACPI_FAN_Q1)
fan_quirk1_setup();
+ if (quirks & TPACPI_FAN_TPR)
+ fan_speed_in_tpr = true;
/* Try and probe the 2nd fan */
tp_features.second_fan = 1; /* needed for get_speed to work */
res = fan2_get_speed(&speed);
@@ -9958,6 +9972,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
* Individual addressing is broken on models that expose the
* primary battery as BAT1.
*/
+ TPACPI_Q_LNV('G', '8', true), /* ThinkPad X131e */
TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
TPACPI_Q_LNV('J', '7', true), /* B5400 */
TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
@@ -10317,6 +10332,10 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_MODE_PSC_BALANCE 5 /* Default mode aka balanced */
#define DYTC_MODE_PSC_PERFORM 7 /* High power mode aka performance */
+#define DYTC_MODE_PSCV9_LOWPOWER 1 /* Low power mode */
+#define DYTC_MODE_PSCV9_BALANCE 3 /* Default mode aka balanced */
+#define DYTC_MODE_PSCV9_PERFORM 4 /* High power mode aka performance */
+
#define DYTC_ERR_MASK 0xF /* Bits 0-3 in cmd result are the error result */
#define DYTC_ERR_SUCCESS 1 /* CMD completed successful */
@@ -10337,6 +10356,10 @@ static int dytc_capabilities;
static bool dytc_mmc_get_available;
static int profile_force;
+static int platform_psc_profile_lowpower = DYTC_MODE_PSC_LOWPOWER;
+static int platform_psc_profile_balanced = DYTC_MODE_PSC_BALANCE;
+static int platform_psc_profile_performance = DYTC_MODE_PSC_PERFORM;
+
static int convert_dytc_to_profile(int funcmode, int dytcmode,
enum platform_profile_option *profile)
{
@@ -10358,19 +10381,15 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
}
return 0;
case DYTC_FUNCTION_PSC:
- switch (dytcmode) {
- case DYTC_MODE_PSC_LOWPOWER:
+ if (dytcmode == platform_psc_profile_lowpower)
*profile = PLATFORM_PROFILE_LOW_POWER;
- break;
- case DYTC_MODE_PSC_BALANCE:
+ else if (dytcmode == platform_psc_profile_balanced)
*profile = PLATFORM_PROFILE_BALANCED;
- break;
- case DYTC_MODE_PSC_PERFORM:
+ else if (dytcmode == platform_psc_profile_performance)
*profile = PLATFORM_PROFILE_PERFORMANCE;
- break;
- default: /* Unknown mode */
+ else
return -EINVAL;
- }
+
return 0;
case DYTC_FUNCTION_AMT:
/* For now return balanced. It's the closest we have to 'auto' */
@@ -10391,19 +10410,19 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_LOWPOWER;
else if (dytc_capabilities & BIT(DYTC_FC_PSC))
- *perfmode = DYTC_MODE_PSC_LOWPOWER;
+ *perfmode = platform_psc_profile_lowpower;
break;
case PLATFORM_PROFILE_BALANCED:
if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_BALANCE;
else if (dytc_capabilities & BIT(DYTC_FC_PSC))
- *perfmode = DYTC_MODE_PSC_BALANCE;
+ *perfmode = platform_psc_profile_balanced;
break;
case PLATFORM_PROFILE_PERFORMANCE:
if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_PERFORM;
else if (dytc_capabilities & BIT(DYTC_FC_PSC))
- *perfmode = DYTC_MODE_PSC_PERFORM;
+ *perfmode = platform_psc_profile_performance;
break;
default: /* Unknown profile */
return -EOPNOTSUPP;
@@ -10415,7 +10434,7 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
* dytc_profile_get: Function to register with platform_profile
* handler. Returns current platform profile.
*/
-static int dytc_profile_get(struct platform_profile_handler *pprof,
+static int dytc_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
*profile = dytc_current_profile;
@@ -10490,7 +10509,7 @@ static int dytc_cql_command(int command, int *output)
* dytc_profile_set: Function to register with platform_profile
* handler. Sets current platform profile.
*/
-static int dytc_profile_set(struct platform_profile_handler *pprof,
+static int dytc_profile_set(struct device *dev,
enum platform_profile_option profile)
{
int perfmode;
@@ -10539,6 +10558,21 @@ unlock:
return err;
}
+static int dytc_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops dytc_profile_ops = {
+ .probe = dytc_profile_probe,
+ .profile_get = dytc_profile_get,
+ .profile_set = dytc_profile_set,
+};
+
static void dytc_profile_refresh(void)
{
enum platform_profile_option profile;
@@ -10567,24 +10601,14 @@ static void dytc_profile_refresh(void)
err = convert_dytc_to_profile(funcmode, perfmode, &profile);
if (!err && profile != dytc_current_profile) {
dytc_current_profile = profile;
- platform_profile_notify();
+ platform_profile_notify(tpacpi_pprof);
}
}
-static struct platform_profile_handler dytc_profile = {
- .profile_get = dytc_profile_get,
- .profile_set = dytc_profile_set,
-};
-
static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
{
int err, output;
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, dytc_profile.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, dytc_profile.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, dytc_profile.choices);
-
err = dytc_command(DYTC_CMD_QUERY, &output);
if (err)
return err;
@@ -10592,6 +10616,7 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (output & BIT(DYTC_QUERY_ENABLE_BIT))
dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+ dbg_printk(TPACPI_DBG_INIT, "DYTC version %d\n", dytc_version);
/* Check DYTC is enabled and supports mode setting */
if (dytc_version < 5)
return -ENODEV;
@@ -10630,6 +10655,11 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
}
} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
pr_debug("PSC is supported\n");
+ if (dytc_version >= 9) { /* update profiles for DYTC 9 and up */
+ platform_psc_profile_lowpower = DYTC_MODE_PSCV9_LOWPOWER;
+ platform_psc_profile_balanced = DYTC_MODE_PSCV9_BALANCE;
+ platform_psc_profile_performance = DYTC_MODE_PSCV9_PERFORM;
+ }
} else {
dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
return -ENODEV;
@@ -10639,12 +10669,13 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
"DYTC version %d: thermal mode available\n", dytc_version);
/* Create platform_profile structure and register */
- err = platform_profile_register(&dytc_profile);
+ tpacpi_pprof = platform_profile_register(&tpacpi_pdev->dev, "thinkpad-acpi-profile",
+ NULL, &dytc_profile_ops);
/*
* If for some reason platform_profiles aren't enabled
* don't quit terminally.
*/
- if (err)
+ if (IS_ERR(tpacpi_pprof))
return -ENODEV;
/* Ensure initial values are correct */
@@ -10659,7 +10690,8 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
static void dytc_profile_exit(void)
{
- platform_profile_remove();
+ if (!IS_ERR_OR_NULL(tpacpi_pprof))
+ platform_profile_remove(tpacpi_pprof);
}
static struct ibm_struct dytc_profile_driver_data = {
@@ -11681,7 +11713,7 @@ static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
if (strcmp(ibm->name, kp->name) == 0 && ibm->write) {
if (strlen(val) > sizeof(ibms_init[i].param) - 1)
return -ENOSPC;
- strcpy(ibms_init[i].param, val);
+ strscpy(ibms_init[i].param, val);
return 0;
}
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 78a5aac2dcfd..5ad3a7183d33 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2755,7 +2755,7 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
}
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
if (str & I8042_STR_AUXDATA)
return false;
@@ -2915,7 +2915,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
- error = i8042_install_filter(toshiba_acpi_i8042_filter);
+ error = i8042_install_filter(toshiba_acpi_i8042_filter, NULL);
if (error) {
pr_err("Error installing key filter\n");
goto err_free_dev;
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
index df6f0ae6e6c7..3e33da36da8a 100644
--- a/drivers/platform/x86/wmi-bmof.c
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -20,66 +20,66 @@
#define WMI_BMOF_GUID "05901221-D566-11D1-B2F0-00A0C9062910"
-struct bmof_priv {
- union acpi_object *bmofdata;
- struct bin_attribute bmof_bin_attr;
-};
-
-static ssize_t read_bmof(struct file *filp, struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t bmof_read(struct file *filp, struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
- struct bmof_priv *priv = container_of(attr, struct bmof_priv, bmof_bin_attr);
+ struct device *dev = kobj_to_dev(kobj);
+ union acpi_object *obj = dev_get_drvdata(dev);
- return memory_read_from_buffer(buf, count, &off, priv->bmofdata->buffer.pointer,
- priv->bmofdata->buffer.length);
+ return memory_read_from_buffer(buf, count, &off, obj->buffer.pointer, obj->buffer.length);
}
-static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
+static const BIN_ATTR_ADMIN_RO(bmof, 0);
+
+static const struct bin_attribute * const bmof_attrs[] = {
+ &bin_attr_bmof,
+ NULL
+};
+
+static size_t bmof_bin_size(struct kobject *kobj, const struct bin_attribute *attr, int n)
{
- struct bmof_priv *priv;
- int ret;
+ struct device *dev = kobj_to_dev(kobj);
+ union acpi_object *obj = dev_get_drvdata(dev);
+
+ return obj->buffer.length;
+}
- priv = devm_kzalloc(&wdev->dev, sizeof(struct bmof_priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+static const struct attribute_group bmof_group = {
+ .bin_size = bmof_bin_size,
+ .bin_attrs_new = bmof_attrs,
+};
+
+static const struct attribute_group *bmof_groups[] = {
+ &bmof_group,
+ NULL
+};
- dev_set_drvdata(&wdev->dev, priv);
+static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
+{
+ union acpi_object *obj;
- priv->bmofdata = wmidev_block_query(wdev, 0);
- if (!priv->bmofdata) {
+ obj = wmidev_block_query(wdev, 0);
+ if (!obj) {
dev_err(&wdev->dev, "failed to read Binary MOF\n");
return -EIO;
}
- if (priv->bmofdata->type != ACPI_TYPE_BUFFER) {
+ if (obj->type != ACPI_TYPE_BUFFER) {
dev_err(&wdev->dev, "Binary MOF is not a buffer\n");
- ret = -EIO;
- goto err_free;
+ kfree(obj);
+ return -EIO;
}
- sysfs_bin_attr_init(&priv->bmof_bin_attr);
- priv->bmof_bin_attr.attr.name = "bmof";
- priv->bmof_bin_attr.attr.mode = 0400;
- priv->bmof_bin_attr.read = read_bmof;
- priv->bmof_bin_attr.size = priv->bmofdata->buffer.length;
-
- ret = device_create_bin_file(&wdev->dev, &priv->bmof_bin_attr);
- if (ret)
- goto err_free;
+ dev_set_drvdata(&wdev->dev, obj);
return 0;
-
- err_free:
- kfree(priv->bmofdata);
- return ret;
}
static void wmi_bmof_remove(struct wmi_device *wdev)
{
- struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
+ union acpi_object *obj = dev_get_drvdata(&wdev->dev);
- device_remove_bin_file(&wdev->dev, &priv->bmof_bin_attr);
- kfree(priv->bmofdata);
+ kfree(obj);
}
static const struct wmi_device_id wmi_bmof_id_table[] = {
@@ -90,6 +90,7 @@ static const struct wmi_device_id wmi_bmof_id_table[] = {
static struct wmi_driver wmi_bmof_driver = {
.driver = {
.name = "wmi-bmof",
+ .dev_groups = bmof_groups,
},
.probe = wmi_bmof_probe,
.remove = wmi_bmof_remove,
diff --git a/drivers/platform/x86/x86-android-tablets/Makefile b/drivers/platform/x86/x86-android-tablets/Makefile
index 41ece5a37137..313be30548bc 100644
--- a/drivers/platform/x86/x86-android-tablets/Makefile
+++ b/drivers/platform/x86/x86-android-tablets/Makefile
@@ -3,7 +3,7 @@
# X86 Android tablet support Makefile
#
+obj-$(CONFIG_X86_ANDROID_TABLETS) += vexia_atla10_ec.o
obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets.o
-
x86-android-tablets-y := core.o dmi.o shared-psy-info.o \
asus.o lenovo.o other.o
diff --git a/drivers/platform/x86/x86-android-tablets/asus.c b/drivers/platform/x86/x86-android-tablets/asus.c
index 07fbeab2319a..7dde63b9943f 100644
--- a/drivers/platform/x86/x86-android-tablets/asus.c
+++ b/drivers/platform/x86/x86-android-tablets/asus.c
@@ -145,8 +145,8 @@ static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst =
static const struct x86_serdev_info asus_me176c_serdevs[] __initconst = {
{
- .ctrl_hid = "80860F0A",
- .ctrl_uid = "2",
+ .ctrl.acpi.hid = "80860F0A",
+ .ctrl.acpi.uid = "2",
.ctrl_devname = "serial0",
.serdev_hid = "BCM2E3A",
},
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index 4218afcec0e9..2a9c47178505 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -157,7 +157,7 @@ static struct gpiod_lookup_table * const *gpiod_lookup_tables;
static const struct software_node *bat_swnode;
static void (*exit_handler)(void);
-static struct i2c_adapter *
+static __init struct i2c_adapter *
get_i2c_adap_by_handle(const struct x86_i2c_client_info *client_info)
{
acpi_handle handle;
@@ -177,7 +177,7 @@ static __init int match_parent(struct device *dev, const void *data)
return dev->parent == data;
}
-static struct i2c_adapter *
+static __init struct i2c_adapter *
get_i2c_adap_by_pci_parent(const struct x86_i2c_client_info *client_info)
{
struct i2c_adapter *adap = NULL;
@@ -212,7 +212,7 @@ static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info
if (board_info.irq < 0)
return board_info.irq;
- if (dev_info->use_pci_devname)
+ if (dev_info->use_pci)
adap = get_i2c_adap_by_pci_parent(client_info);
else
adap = get_i2c_adap_by_handle(client_info);
@@ -271,15 +271,32 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i
return 0;
}
-static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
+static __init struct device *
+get_serdev_controller_by_pci_parent(const struct x86_serdev_info *info)
{
+ struct pci_dev *pdev;
+
+ pdev = pci_get_domain_bus_and_slot(0, 0, info->ctrl.pci.devfn);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ /* This puts our reference on pdev and returns a ref on the ctrl */
+ return get_serdev_controller_from_parent(&pdev->dev, 0, info->ctrl_devname);
+}
+
+static __init int x86_instantiate_serdev(const struct x86_dev_info *dev_info, int idx)
+{
+ const struct x86_serdev_info *info = &dev_info->serdev_info[idx];
struct acpi_device *serdev_adev;
struct serdev_device *serdev;
struct device *ctrl_dev;
int ret = -ENODEV;
- ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0,
- info->ctrl_devname);
+ if (dev_info->use_pci)
+ ctrl_dev = get_serdev_controller_by_pci_parent(info);
+ else
+ ctrl_dev = get_serdev_controller(info->ctrl.acpi.hid, info->ctrl.acpi.uid,
+ 0, info->ctrl_devname);
if (IS_ERR(ctrl_dev))
return PTR_ERR(ctrl_dev);
@@ -446,7 +463,7 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev)
serdev_count = dev_info->serdev_count;
for (i = 0; i < serdev_count; i++) {
- ret = x86_instantiate_serdev(&dev_info->serdev_info[i], i);
+ ret = x86_instantiate_serdev(dev_info, i);
if (ret < 0) {
x86_android_tablet_remove(pdev);
return ret;
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index ae087f1471c1..1241a97cda39 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -178,8 +178,8 @@ static const struct platform_device_info lenovo_yb1_x90_pdevs[] __initconst = {
*/
static const struct x86_serdev_info lenovo_yb1_x90_serdevs[] __initconst = {
{
- .ctrl_hid = "8086228A",
- .ctrl_uid = "1",
+ .ctrl.acpi.hid = "8086228A",
+ .ctrl.acpi.uid = "1",
.ctrl_devname = "serial0",
.serdev_hid = "BCM2E1A",
},
@@ -601,7 +601,7 @@ static const struct regulator_init_data lenovo_yoga_tab2_1380_bq24190_vbus_init_
.num_consumer_supplies = 1,
};
-struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
+static struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
.regulator_init_data = &lenovo_yoga_tab2_1380_bq24190_vbus_init_data,
};
@@ -726,7 +726,7 @@ static const struct platform_device_info lenovo_yoga_tab2_1380_pdevs[] __initcon
},
};
-const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
+static const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
"bq24190_charger", /* For the Vbus regulator for lc824206xa */
NULL
};
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index 735df818f76b..1d93d9edb23f 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -602,14 +602,14 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
* Vexia EDU ATLA 10 tablet, Android 4.2 / 4.4 + Guadalinex Ubuntu tablet
* distributed to schools in the Spanish Andalucía region.
*/
-const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
+static const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
static const struct property_entry vexia_edu_atla10_ulpmc_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", crystal_cove_pwrsrc_psy),
{ }
};
-const struct software_node vexia_edu_atla10_ulpmc_node = {
+static const struct software_node vexia_edu_atla10_ulpmc_node = {
.properties = vexia_edu_atla10_ulpmc_props,
};
@@ -715,6 +715,14 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
}
};
+static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = {
+ {
+ .ctrl.pci.devfn = PCI_DEVFN(0x1e, 3),
+ .ctrl_devname = "serial0",
+ .serdev_hid = "OBDA8723",
+ },
+};
+
static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = {
.dev_id = "i2c-FTSC1000",
.table = {
@@ -755,9 +763,11 @@ static int __init vexia_edu_atla10_init(struct device *dev)
const struct x86_dev_info vexia_edu_atla10_info __initconst = {
.i2c_client_info = vexia_edu_atla10_i2c_clients,
.i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_i2c_clients),
+ .serdev_info = vexia_edu_atla10_serdevs,
+ .serdev_count = ARRAY_SIZE(vexia_edu_atla10_serdevs),
.gpiod_lookup_tables = vexia_edu_atla10_gpios,
.init = vexia_edu_atla10_init,
- .use_pci_devname = true,
+ .use_pci = true,
};
/*
diff --git a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
new file mode 100644
index 000000000000..5d02af1c5aaa
--- /dev/null
+++ b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * power_supply class (battery) driver for the I2C attached embedded controller
+ * found on Vexia EDU ATLA 10 (9V version) tablets.
+ *
+ * This is based on the ACPI Battery device in the DSDT which should work
+ * expect that it expects the I2C controller to be enumerated as an ACPI
+ * device and the tablet's BIOS enumerates all LPSS devices as PCI devices
+ * (and changing the LPSS BIOS settings from PCI -> ACPI does not work).
+ *
+ * Copyright (c) 2024 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/bits.h>
+#include <linux/devm-helpers.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <asm/byteorder.h>
+
+/* State field uses ACPI Battery spec status bits */
+#define ACPI_BATTERY_STATE_DISCHARGING BIT(0)
+#define ACPI_BATTERY_STATE_CHARGING BIT(1)
+
+#define ATLA10_EC_BATTERY_STATE_COMMAND 0x87
+#define ATLA10_EC_BATTERY_INFO_COMMAND 0x88
+
+/* From broken ACPI battery device in DSDT */
+#define ATLA10_EC_VOLTAGE_MIN_DESIGN_uV 3750000
+
+/* Update data every 5 seconds */
+#define UPDATE_INTERVAL_JIFFIES (5 * HZ)
+
+struct atla10_ec_battery_state {
+ u8 status; /* Using ACPI Battery spec status bits */
+ u8 capacity; /* Percent */
+ __le16 charge_now_mAh;
+ __le16 voltage_now_mV;
+ __le16 current_now_mA;
+ __le16 charge_full_mAh;
+ __le16 temp; /* centi degrees Celsius */
+} __packed;
+
+struct atla10_ec_battery_info {
+ __le16 charge_full_design_mAh;
+ __le16 voltage_now_mV; /* Should be design voltage, but is not ? */
+ __le16 charge_full_design2_mAh;
+} __packed;
+
+struct atla10_ec_data {
+ struct i2c_client *client;
+ struct power_supply *psy;
+ struct delayed_work work;
+ struct mutex update_lock;
+ struct atla10_ec_battery_info info;
+ struct atla10_ec_battery_state state;
+ bool valid; /* true if state is valid */
+ unsigned long last_update; /* In jiffies */
+};
+
+static int atla10_ec_cmd(struct atla10_ec_data *data, u8 cmd, u8 len, u8 *values)
+{
+ struct device *dev = &data->client->dev;
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+ int ret;
+
+ ret = i2c_smbus_read_block_data(data->client, cmd, buf);
+ if (ret != len) {
+ dev_err(dev, "I2C command 0x%02x error: %d\n", cmd, ret);
+ return -EIO;
+ }
+
+ memcpy(values, buf, len);
+ return 0;
+}
+
+static int atla10_ec_update(struct atla10_ec_data *data)
+{
+ int ret;
+
+ if (data->valid && time_before(jiffies, data->last_update + UPDATE_INTERVAL_JIFFIES))
+ return 0;
+
+ ret = atla10_ec_cmd(data, ATLA10_EC_BATTERY_STATE_COMMAND,
+ sizeof(data->state), (u8 *)&data->state);
+ if (ret)
+ return ret;
+
+ data->last_update = jiffies;
+ data->valid = true;
+ return 0;
+}
+
+static int atla10_ec_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct atla10_ec_data *data = power_supply_get_drvdata(psy);
+ int charge_now_mAh, charge_full_mAh, ret;
+
+ guard(mutex)(&data->update_lock);
+
+ ret = atla10_ec_update(data);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (data->state.status & ACPI_BATTERY_STATE_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (data->state.status & ACPI_BATTERY_STATE_CHARGING)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (data->state.capacity == 100)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = data->state.capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ /*
+ * The EC has a bug where it reports charge-full-design as
+ * charge-now when the battery is full. Clamp charge-now to
+ * charge-full to workaround this.
+ */
+ charge_now_mAh = le16_to_cpu(data->state.charge_now_mAh);
+ charge_full_mAh = le16_to_cpu(data->state.charge_full_mAh);
+ val->intval = min(charge_now_mAh, charge_full_mAh) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = le16_to_cpu(data->state.voltage_now_mV) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = le16_to_cpu(data->state.current_now_mA) * 1000;
+ /*
+ * Documentation/ABI/testing/sysfs-class-power specifies
+ * negative current for discharging.
+ */
+ if (data->state.status & ACPI_BATTERY_STATE_DISCHARGING)
+ val->intval = -val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = le16_to_cpu(data->state.charge_full_mAh) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = le16_to_cpu(data->state.temp) / 10;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = le16_to_cpu(data->info.charge_full_design_mAh) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = ATLA10_EC_VOLTAGE_MIN_DESIGN_uV;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void atla10_ec_external_power_changed_work(struct work_struct *work)
+{
+ struct atla10_ec_data *data = container_of(work, struct atla10_ec_data, work.work);
+
+ dev_dbg(&data->client->dev, "External power changed\n");
+ data->valid = false;
+ power_supply_changed(data->psy);
+}
+
+static void atla10_ec_external_power_changed(struct power_supply *psy)
+{
+ struct atla10_ec_data *data = power_supply_get_drvdata(psy);
+
+ /* After charger plug in/out wait 0.5s for things to stabilize */
+ mod_delayed_work(system_wq, &data->work, HZ / 2);
+}
+
+static const enum power_supply_property atla10_ec_psy_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
+static const struct power_supply_desc atla10_ec_psy_desc = {
+ .name = "atla10_ec_battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = atla10_ec_psy_props,
+ .num_properties = ARRAY_SIZE(atla10_ec_psy_props),
+ .get_property = atla10_ec_psy_get_property,
+ .external_power_changed = atla10_ec_external_power_changed,
+};
+
+static int atla10_ec_probe(struct i2c_client *client)
+{
+ struct power_supply_config psy_cfg = { };
+ struct device *dev = &client->dev;
+ struct atla10_ec_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ psy_cfg.drv_data = data;
+ data->client = client;
+
+ ret = devm_mutex_init(dev, &data->update_lock);
+ if (ret)
+ return ret;
+
+ ret = devm_delayed_work_autocancel(dev, &data->work,
+ atla10_ec_external_power_changed_work);
+ if (ret)
+ return ret;
+
+ ret = atla10_ec_cmd(data, ATLA10_EC_BATTERY_INFO_COMMAND,
+ sizeof(data->info), (u8 *)&data->info);
+ if (ret)
+ return ret;
+
+ data->psy = devm_power_supply_register(dev, &atla10_ec_psy_desc, &psy_cfg);
+ return PTR_ERR_OR_ZERO(data->psy);
+}
+
+static const struct i2c_device_id atla10_ec_id_table[] = {
+ { "vexia_atla10_ec" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, atla10_ec_id_table);
+
+static struct i2c_driver atla10_ec_driver = {
+ .driver = {
+ .name = "vexia_atla10_ec",
+ },
+ .probe = atla10_ec_probe,
+ .id_table = atla10_ec_id_table,
+};
+module_i2c_driver(atla10_ec_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Battery driver for Vexia EDU ATLA 10 tablet EC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 0fc7e8cff672..63a38a0069ba 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -57,8 +57,15 @@ struct x86_spi_dev_info {
};
struct x86_serdev_info {
- const char *ctrl_hid;
- const char *ctrl_uid;
+ union {
+ struct {
+ const char *hid;
+ const char *uid;
+ } acpi;
+ struct {
+ unsigned int devfn;
+ } pci;
+ } ctrl;
const char *ctrl_devname;
/*
* ATM the serdev core only supports of or ACPI matching; and so far all
@@ -91,7 +98,7 @@ struct x86_dev_info {
int gpio_button_count;
int (*init)(struct device *dev);
void (*exit)(void);
- bool use_pci_devname;
+ bool use_pci;
};
int x86_android_tablet_get_gpiod(const char *chip, int pin, const char *con_id,
diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
index 42ce41a2fe3a..ff76ea36835e 100644
--- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
@@ -221,7 +221,7 @@ static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = {
SEC_PD(T7_VI_CLK2, 0),
/* ETH is for ethernet online wakeup, and should be always on */
SEC_PD(T7_ETH, GENPD_FLAG_ALWAYS_ON),
- SEC_PD(T7_ISP, 0),
+ TOP_PD(T7_ISP, 0, PWRC_T7_MIPI_ISP_ID),
SEC_PD(T7_MIPI_ISP, 0),
TOP_PD(T7_GDC, 0, PWRC_T7_NIC3_ID),
TOP_PD(T7_DEWARP, 0, PWRC_T7_NIC3_ID),
diff --git a/drivers/pmdomain/arm/scmi_pm_domain.c b/drivers/pmdomain/arm/scmi_pm_domain.c
index a7784a8bb5db..86b531e15b85 100644
--- a/drivers/pmdomain/arm/scmi_pm_domain.c
+++ b/drivers/pmdomain/arm/scmi_pm_domain.c
@@ -96,6 +96,14 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
continue;
}
+ /*
+ * Register the explicit power on request to the firmware so
+ * that it is tracked as used by OSPM agent and not
+ * accidentally turned off with OSPM's knowledge
+ */
+ if (state == SCMI_POWER_STATE_GENERIC_ON)
+ power_ops->state_set(ph, i, state);
+
scmi_pd->domain = i;
scmi_pd->ph = ph;
scmi_pd->name = power_ops->name_get(ph, i);
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 20a9efebbcb7..6c94137865c9 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -3180,6 +3180,8 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
if (!err)
genpd_state->residency_ns = 1000LL * residency;
+ of_property_read_string(state_node, "idle-state-name", &genpd_state->name);
+
genpd_state->power_on_latency_ns = 1000LL * exit_latency;
genpd_state->power_off_latency_ns = 1000LL * entry_latency;
genpd_state->fwnode = &state_node->fwnode;
@@ -3458,7 +3460,10 @@ static int idle_states_show(struct seq_file *s, void *data)
seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
for (i = 0; i < genpd->state_count; i++) {
- idle_time += genpd->states[i].idle_time;
+ struct genpd_power_state *state = &genpd->states[i];
+ char state_name[15];
+
+ idle_time += state->idle_time;
if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
now = ktime_get_mono_fast_ns();
@@ -3468,9 +3473,13 @@ static int idle_states_show(struct seq_file *s, void *data)
}
}
+ if (!state->name)
+ snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i);
+
do_div(idle_time, NSEC_PER_MSEC);
- seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
- genpd->states[i].usage, genpd->states[i].rejected);
+ seq_printf(s, "%-14s %-14llu %-14llu %llu\n",
+ state->name ?: state_name, idle_time,
+ state->usage, state->rejected);
}
genpd_unlock(genpd);
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 9bdb80fd7210..958d34d4821b 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1437,6 +1437,7 @@ static struct platform_driver imx_pgc_domain_driver = {
.driver = {
.name = "imx-pgc",
.pm = &imx_pgc_domain_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = imx_pgc_domain_probe,
.remove = imx_pgc_domain_remove,
@@ -1549,6 +1550,7 @@ static struct platform_driver imx_gpc_driver = {
.driver = {
.name = "imx-gpcv2",
.of_match_table = imx_gpcv2_dt_ids,
+ .suppress_bind_attrs = true,
},
.probe = imx_gpcv2_probe,
};
diff --git a/drivers/pmdomain/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
index 23db85b7aa9e..912802b5215b 100644
--- a/drivers/pmdomain/imx/imx8m-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
@@ -894,6 +894,7 @@ static struct platform_driver imx8m_blk_ctrl_driver = {
.name = "imx8m-blk-ctrl",
.pm = &imx8m_blk_ctrl_pm_ops,
.of_match_table = imx8m_blk_ctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
module_platform_driver(imx8m_blk_ctrl_driver);
diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index e3a0f64c144c..34576be606e3 100644
--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
@@ -770,7 +770,7 @@ static void imx8mp_blk_ctrl_remove(struct platform_device *pdev)
of_genpd_del_provider(pdev->dev.of_node);
- for (i = 0; bc->onecell_data.num_domains; i++) {
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
pm_genpd_remove(&domain->genpd);
@@ -862,6 +862,7 @@ static struct platform_driver imx8mp_blk_ctrl_driver = {
.name = "imx8mp-blk-ctrl",
.pm = &imx8mp_blk_ctrl_pm_ops,
.of_match_table = imx8mp_blk_ctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
module_platform_driver(imx8mp_blk_ctrl_driver);
diff --git a/drivers/pmdomain/mediatek/Kconfig b/drivers/pmdomain/mediatek/Kconfig
index 21305c4f17fe..0e34a517ab7d 100644
--- a/drivers/pmdomain/mediatek/Kconfig
+++ b/drivers/pmdomain/mediatek/Kconfig
@@ -26,4 +26,16 @@ config MTK_SCPSYS_PM_DOMAINS
Control Processor System (SCPSYS) has several power management related
tasks in the system.
+config AIROHA_CPU_PM_DOMAIN
+ tristate "Airoha CPU power domain"
+ default ARCH_AIROHA
+ depends on HAVE_ARM_SMCCC
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y here to enable CPU power domain support for Airoha SoC.
+
+ CPU frequency and power is controlled by ATF with SMC command to
+ set performance states.
+
endmenu
diff --git a/drivers/pmdomain/mediatek/Makefile b/drivers/pmdomain/mediatek/Makefile
index 8cde09e654b3..18ba92e3c418 100644
--- a/drivers/pmdomain/mediatek/Makefile
+++ b/drivers/pmdomain/mediatek/Makefile
@@ -1,3 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
+obj-$(CONFIG_AIROHA_CPU_PM_DOMAIN) += airoha-cpu-pmdomain.o
+
+ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
+# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
+# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling
+# hooks are inserted via the -pg switch.
+CFLAGS_REMOVE_airoha-cpu-pmdomain.o += $(CC_FLAGS_FTRACE)
+endif
diff --git a/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c b/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
new file mode 100644
index 000000000000..0fd88d2f9ac2
--- /dev/null
+++ b/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+#define AIROHA_SIP_AVS_HANDLE 0x82000301
+#define AIROHA_AVS_OP_BASE 0xddddddd0
+#define AIROHA_AVS_OP_MASK GENMASK(1, 0)
+#define AIROHA_AVS_OP_FREQ_DYN_ADJ (AIROHA_AVS_OP_BASE | \
+ FIELD_PREP(AIROHA_AVS_OP_MASK, 0x1))
+#define AIROHA_AVS_OP_GET_FREQ (AIROHA_AVS_OP_BASE | \
+ FIELD_PREP(AIROHA_AVS_OP_MASK, 0x2))
+
+struct airoha_cpu_pmdomain_priv {
+ struct clk_hw hw;
+ struct generic_pm_domain pd;
+};
+
+static long airoha_cpu_pmdomain_clk_round(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static unsigned long airoha_cpu_pmdomain_clk_get(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(AIROHA_SIP_AVS_HANDLE, AIROHA_AVS_OP_GET_FREQ,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ /* SMCCC returns freq in MHz */
+ return (int)(res.a0 * 1000 * 1000);
+}
+
+/* Airoha CPU clk SMCC is always enabled */
+static int airoha_cpu_pmdomain_clk_is_enabled(struct clk_hw *hw)
+{
+ return true;
+}
+
+static const struct clk_ops airoha_cpu_pmdomain_clk_ops = {
+ .recalc_rate = airoha_cpu_pmdomain_clk_get,
+ .is_enabled = airoha_cpu_pmdomain_clk_is_enabled,
+ .round_rate = airoha_cpu_pmdomain_clk_round,
+};
+
+static int airoha_cpu_pmdomain_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(AIROHA_SIP_AVS_HANDLE, AIROHA_AVS_OP_FREQ_DYN_ADJ,
+ 0, state, 0, 0, 0, 0, &res);
+
+ /* SMC signal correct apply by unsetting BIT 0 */
+ return res.a0 & BIT(0) ? -EINVAL : 0;
+}
+
+static int airoha_cpu_pmdomain_probe(struct platform_device *pdev)
+{
+ struct airoha_cpu_pmdomain_priv *priv;
+ struct device *dev = &pdev->dev;
+ const struct clk_init_data init = {
+ .name = "cpu",
+ .ops = &airoha_cpu_pmdomain_clk_ops,
+ /* Clock with no set_rate, can't cache */
+ .flags = CLK_GET_RATE_NOCACHE,
+ };
+ struct generic_pm_domain *pd;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Init and register a get-only clk for Cpufreq */
+ priv->hw.init = &init;
+ ret = devm_clk_hw_register(dev, &priv->hw);
+ if (ret)
+ return ret;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &priv->hw);
+ if (ret)
+ return ret;
+
+ /* Init and register a PD for CPU */
+ pd = &priv->pd;
+ pd->name = "cpu_pd";
+ pd->flags = GENPD_FLAG_ALWAYS_ON;
+ pd->set_performance_state = airoha_cpu_pmdomain_set_performance_state;
+
+ ret = pm_genpd_init(pd, NULL, false);
+ if (ret)
+ return ret;
+
+ ret = of_genpd_add_provider_simple(dev->of_node, pd);
+ if (ret)
+ goto err_add_provider;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+err_add_provider:
+ pm_genpd_remove(pd);
+
+ return ret;
+}
+
+static void airoha_cpu_pmdomain_remove(struct platform_device *pdev)
+{
+ struct airoha_cpu_pmdomain_priv *priv = platform_get_drvdata(pdev);
+
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&priv->pd);
+}
+
+static const struct of_device_id airoha_cpu_pmdomain_of_match[] = {
+ { .compatible = "airoha,en7581-cpufreq" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, airoha_cpu_pmdomain_of_match);
+
+static struct platform_driver airoha_cpu_pmdomain_driver = {
+ .probe = airoha_cpu_pmdomain_probe,
+ .remove = airoha_cpu_pmdomain_remove,
+ .driver = {
+ .name = "airoha-cpu-pmdomain",
+ .of_match_table = airoha_cpu_pmdomain_of_match,
+ },
+};
+module_platform_driver(airoha_cpu_pmdomain_driver);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("CPU PM domain driver for Airoha SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
index 0e4bd749d067..82df7e44250b 100644
--- a/drivers/pmdomain/ti/ti_sci_pm_domains.c
+++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <dt-bindings/soc/ti,sci_pm_domain.h>
@@ -51,6 +53,56 @@ struct ti_sci_pm_domain {
#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
+static inline bool ti_sci_pd_is_valid_constraint(s32 val)
+{
+ return val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void ti_sci_pd_set_lat_constraint(struct device *dev, s32 val)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(genpd);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
+ u16 val_ms;
+ int ret;
+
+ /* PM QoS latency unit is usecs, TI SCI uses msecs */
+ val_ms = val / USEC_PER_MSEC;
+ ret = ti_sci->ops.pm_ops.set_latency_constraint(ti_sci, val_ms, TISCI_MSG_CONSTRAINT_SET);
+ if (ret)
+ dev_err(dev, "ti_sci_pd: set latency constraint failed: ret=%d\n",
+ ret);
+ else
+ dev_dbg(dev, "ti_sci_pd: ID:%d set latency constraint %d\n",
+ pd->idx, val);
+}
+#endif
+
+static inline void ti_sci_pd_set_wkup_constraint(struct device *dev)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(genpd);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
+ int ret;
+
+ if (device_may_wakeup(dev)) {
+ /*
+ * If device can wakeup using IO daisy chain wakeups,
+ * we do not want to set a constraint.
+ */
+ if (dev->power.wakeirq) {
+ dev_dbg(dev, "%s: has wake IRQ, not setting constraints\n", __func__);
+ return;
+ }
+
+ ret = ti_sci->ops.pm_ops.set_device_constraint(ti_sci, pd->idx,
+ TISCI_MSG_CONSTRAINT_SET);
+ if (!ret)
+ dev_dbg(dev, "ti_sci_pd: ID:%d set device constraint.\n", pd->idx);
+ }
+}
+
/*
* ti_sci_pd_power_off(): genpd power down hook
* @domain: pointer to the powerdomain to power off
@@ -79,6 +131,28 @@ static int ti_sci_pd_power_on(struct generic_pm_domain *domain)
return ti_sci->ops.dev_ops.get_device(ti_sci, pd->idx);
}
+#ifdef CONFIG_PM_SLEEP
+static int ti_sci_pd_suspend(struct device *dev)
+{
+ int ret;
+ s32 val;
+
+ ret = pm_generic_suspend(dev);
+ if (ret)
+ return ret;
+
+ val = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
+ if (ti_sci_pd_is_valid_constraint(val))
+ ti_sci_pd_set_lat_constraint(dev, val);
+
+ ti_sci_pd_set_wkup_constraint(dev);
+
+ return 0;
+}
+#else
+#define ti_sci_pd_suspend NULL
+#endif
+
/*
* ti_sci_pd_xlate(): translation service for TI SCI genpds
* @genpdspec: DT identification data for the genpd
@@ -182,6 +256,13 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
pd->pd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
pd->idx = args.args[0];
pd->parent = pd_provider;
+ /*
+ * If SCI constraint functions are present, then firmware
+ * supports the constraints API.
+ */
+ if (pd_provider->ti_sci->ops.pm_ops.set_device_constraint &&
+ pd_provider->ti_sci->ops.pm_ops.set_latency_constraint)
+ pd->pd.domain.ops.suspend = ti_sci_pd_suspend;
pm_genpd_init(&pd->pd, NULL, true);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index f5fc33a8bf44..60bf0ca64cf3 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -26,7 +26,7 @@ config POWER_RESET_AT91_POWEROFF
config POWER_RESET_AT91_RESET
tristate "Atmel AT91 reset driver"
depends on ARCH_AT91
- default SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5
+ default SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA5
help
This driver supports restart for Atmel AT91SAM9 and SAMA5
SoCs
@@ -34,7 +34,7 @@ config POWER_RESET_AT91_RESET
config POWER_RESET_AT91_SAMA5D2_SHDWC
tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver"
depends on ARCH_AT91
- default SOC_SAM9X60 || SOC_SAMA5
+ default SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA5
help
This driver supports the alternate shutdown controller for some Atmel
SAMA5 SoCs. It is present for example on SAMA5D2 SoC.
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
index bb26fa6fa67c..8075382cbc36 100644
--- a/drivers/power/reset/as3722-poweroff.c
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -57,8 +57,6 @@ static int as3722_poweroff_probe(struct platform_device *pdev)
SYS_OFF_PRIO_DEFAULT,
as3722_pm_power_off,
as3722_poweroff);
-
- return 0;
}
static struct platform_driver as3722_poweroff_driver = {
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index edb0df86aff4..c2801bd6384d 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -326,6 +326,7 @@ static const struct of_device_id at91_pmc_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sam9x60-pmc" },
{ .compatible = "microchip,sama7g5-pmc" },
+ { .compatible = "microchip,sam9x7-pmc" },
{ /* Sentinel. */ }
};
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index 52cfeee2cb28..3eaae352ffb9 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -44,7 +44,13 @@ static int gpio_poweroff_do_poweroff(struct sys_off_data *data)
/* give it some time */
mdelay(gpio_poweroff->timeout_ms);
- WARN_ON(1);
+ /*
+ * If code reaches this point, it means that gpio-poweroff has failed
+ * to actually power off the system.
+ * Warn the user that the attempt to poweroff via gpio-poweroff
+ * has gone wrong.
+ */
+ WARN(1, "Failed to poweroff via gpio-poweroff mechanism\n");
return NOTIFY_DONE;
}
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
index cfaa54ced0d0..d9268d150e1f 100644
--- a/drivers/power/reset/keystone-reset.c
+++ b/drivers/power/reset/keystone-reset.c
@@ -87,26 +87,16 @@ static int rsctrl_probe(struct platform_device *pdev)
return -ENODEV;
/* get regmaps */
- pllctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pll");
+ pllctrl_regs = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-pll",
+ 1, &rspll_offset);
if (IS_ERR(pllctrl_regs))
return PTR_ERR(pllctrl_regs);
- devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ devctrl_regs = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev",
+ 1, &rsmux_offset);
if (IS_ERR(devctrl_regs))
return PTR_ERR(devctrl_regs);
- ret = of_property_read_u32_index(np, "ti,syscon-pll", 1, &rspll_offset);
- if (ret) {
- dev_err(dev, "couldn't read the reset pll offset!\n");
- return -EINVAL;
- }
-
- ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, &rsmux_offset);
- if (ret) {
- dev_err(dev, "couldn't read the rsmux offset!\n");
- return -EINVAL;
- }
-
/* set soft/hard reset */
val = of_property_read_bool(np, "ti,soft-reset");
val = val ? RSCFG_RSTYPE_SOFT : RSCFG_RSTYPE_HARD;
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 1a6fc8d38e20..90c664d344d0 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -162,11 +162,11 @@ static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
data->wde_interval = 300L * NSEC_PER_MSEC;
data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC);
- hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_trigger.function = ltc2952_poweroff_timer_trigger;
+ hrtimer_setup(&data->timer_trigger, ltc2952_poweroff_timer_trigger, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&data->timer_wde, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_wde.function = ltc2952_poweroff_timer_wde;
+ hrtimer_setup(&data->timer_wde, ltc2952_poweroff_timer_wde, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static int ltc2952_poweroff_init(struct platform_device *pdev)
diff --git a/drivers/power/sequencing/pwrseq-qcom-wcn.c b/drivers/power/sequencing/pwrseq-qcom-wcn.c
index 682a9beac69e..e8f5030f2639 100644
--- a/drivers/power/sequencing/pwrseq-qcom-wcn.c
+++ b/drivers/power/sequencing/pwrseq-qcom-wcn.c
@@ -272,6 +272,24 @@ static const struct pwrseq_qcom_wcn_pdata pwrseq_qca6390_of_data = {
.targets = pwrseq_qcom_wcn_targets,
};
+static const char *const pwrseq_wcn6750_vregs[] = {
+ "vddaon",
+ "vddasd",
+ "vddpmu",
+ "vddrfa0p8",
+ "vddrfa1p2",
+ "vddrfa1p7",
+ "vddrfa2p2",
+};
+
+static const struct pwrseq_qcom_wcn_pdata pwrseq_wcn6750_of_data = {
+ .vregs = pwrseq_wcn6750_vregs,
+ .num_vregs = ARRAY_SIZE(pwrseq_wcn6750_vregs),
+ .pwup_delay_ms = 50,
+ .gpio_enable_delay_ms = 5,
+ .targets = pwrseq_qcom_wcn_targets,
+};
+
static const char *const pwrseq_wcn6855_vregs[] = {
"vddio",
"vddaon",
@@ -378,6 +396,13 @@ static int pwrseq_qcom_wcn_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(ctx->bt_gpio),
"Failed to get the Bluetooth enable GPIO\n");
+ /*
+ * FIXME: This should actually be GPIOD_OUT_LOW, but doing so would
+ * cause the WLAN power to be toggled, resulting in PCIe link down.
+ * Since the PCIe controller driver is not handling link down currently,
+ * the device becomes unusable. So we need to keep this workaround until
+ * the link down handling is implemented in the controller driver.
+ */
ctx->wlan_gpio = devm_gpiod_get_optional(dev, "wlan-enable",
GPIOD_ASIS);
if (IS_ERR(ctx->wlan_gpio))
@@ -431,6 +456,10 @@ static const struct of_device_id pwrseq_qcom_wcn_of_match[] = {
.compatible = "qcom,wcn7850-pmu",
.data = &pwrseq_wcn7850_of_data,
},
+ {
+ .compatible = "qcom,wcn6750-pmu",
+ .data = &pwrseq_wcn6750_of_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, pwrseq_qcom_wcn_of_match);
diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c
index b7938fbb24a5..edae1e843c51 100644
--- a/drivers/power/supply/88pm860x_battery.c
+++ b/drivers/power/supply/88pm860x_battery.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/power_supply.h>
+#include <linux/string_choices.h>
#include <linux/mfd/88pm860x.h>
#include <linux/delay.h>
@@ -503,8 +504,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info)
data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG);
bat_remove = data & BAT_WU_LOG;
- dev_dbg(info->dev, "battery wake up? %s\n",
- bat_remove != 0 ? "yes" : "no");
+ dev_dbg(info->dev, "battery wake up? %s\n", str_yes_no(bat_remove));
/* restore SOC from RTC domain register */
if (bat_remove == 0) {
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 9f2eef6787f7..7b18358f194a 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -918,6 +918,15 @@ config FUEL_GAUGE_SC27XX
Say Y here to enable support for fuel gauge with SC27XX
PMIC chips.
+config FUEL_GAUGE_STC3117
+ tristate "STMicroelectronics STC3117 fuel gauge driver"
+ depends on CRC8
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here to enable support for fuel gauge with STC3117
+ chip.
+
config CHARGER_UCS1002
tristate "Microchip UCS1002 USB Port Power Controller"
depends on I2C
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 59c4a9f40d28..b55cc48a4c86 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
obj-$(CONFIG_CHARGER_CROS_PCHG) += cros_peripheral_charger.o
obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
obj-$(CONFIG_FUEL_GAUGE_SC27XX) += sc27xx_fuel_gauge.o
+obj-$(CONFIG_FUEL_GAUGE_STC3117) += stc3117_fuel_gauge.o
obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o
obj-$(CONFIG_CHARGER_BD99954) += bd99954-charger.o
obj-$(CONFIG_CHARGER_WILCO) += wilco-charger.o
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 37039e28fc4b..b00c84fbc33c 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -540,10 +540,9 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
return 0;
}
-static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
+static int ab8500_btemp_get_ext_psy_data(struct power_supply *ext, void *data)
{
struct power_supply *psy;
- struct power_supply *ext = dev_get_drvdata(dev);
const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_btemp *di;
union power_supply_propval ret;
@@ -617,7 +616,7 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
*/
static void ab8500_btemp_external_power_changed(struct power_supply *psy)
{
- power_supply_for_each_device(psy, ab8500_btemp_get_ext_psy_data);
+ power_supply_for_each_psy(psy, ab8500_btemp_get_ext_psy_data);
}
/* ab8500 btemp driver interrupts and their respective isr */
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 14e1b448bd39..dc6c8b0dd1cf 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -844,10 +844,9 @@ static void handle_maxim_chg_curr(struct ab8500_chargalg *di)
}
}
-static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+static int ab8500_chargalg_get_ext_psy_data(struct power_supply *ext, void *data)
{
struct power_supply *psy;
- struct power_supply *ext = dev_get_drvdata(dev);
const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_chargalg *di;
union power_supply_propval ret;
@@ -1231,7 +1230,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
int ret;
/* Collect data from all power_supply class devices */
- power_supply_for_each_device(di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
+ power_supply_for_each_psy(di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
ab8500_chargalg_end_of_charge(di);
ab8500_chargalg_check_temp(di);
@@ -1788,13 +1787,12 @@ static int ab8500_chargalg_probe(struct platform_device *pdev)
psy_cfg.drv_data = di;
/* Initilialize safety timer */
- hrtimer_init(&di->safety_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
+ hrtimer_setup(&di->safety_timer, ab8500_chargalg_safety_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initilialize maintenance timer */
- hrtimer_init(&di->maintenance_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- di->maintenance_timer.function =
- ab8500_chargalg_maintenance_timer_expired;
+ hrtimer_setup(&di->maintenance_timer, ab8500_chargalg_maintenance_timer_expired,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* Init work for chargalg */
INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index cece8d6753ac..1042d37424f5 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -1894,10 +1894,9 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
return ret;
}
-static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
+static int ab8500_charger_get_ext_psy_data(struct power_supply *ext, void *data)
{
struct power_supply *psy;
- struct power_supply *ext = dev_get_drvdata(dev);
const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_charger *di;
union power_supply_propval ret;
@@ -1961,7 +1960,7 @@ static void ab8500_charger_check_vbat_work(struct work_struct *work)
struct ab8500_charger *di = container_of(work,
struct ab8500_charger, check_vbat_work.work);
- power_supply_for_each_device(&di->usb_chg, ab8500_charger_get_ext_psy_data);
+ power_supply_for_each_psy(&di->usb_chg, ab8500_charger_get_ext_psy_data);
/* First run old_vbat is 0. */
if (di->old_vbat == 0)
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 78871a2143de..9dd99722667a 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2174,10 +2174,9 @@ static int ab8500_fg_get_property(struct power_supply *psy,
return 0;
}
-static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
+static int ab8500_fg_get_ext_psy_data(struct power_supply *ext, void *data)
{
struct power_supply *psy;
- struct power_supply *ext = dev_get_drvdata(dev);
const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_fg *di;
struct power_supply_battery_info *bi;
@@ -2402,7 +2401,7 @@ out:
*/
static void ab8500_fg_external_power_changed(struct power_supply *psy)
{
- power_supply_for_each_device(psy, ab8500_fg_get_ext_psy_data);
+ power_supply_for_each_psy(psy, ab8500_fg_get_ext_psy_data);
}
/**
@@ -2575,7 +2574,7 @@ static ssize_t ab8505_powercut_flagtime_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2598,7 +2597,7 @@ static ssize_t ab8505_powercut_flagtime_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2625,7 +2624,7 @@ static ssize_t ab8505_powercut_maxtime_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2649,7 +2648,7 @@ static ssize_t ab8505_powercut_maxtime_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2676,7 +2675,7 @@ static ssize_t ab8505_powercut_restart_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2699,7 +2698,7 @@ static ssize_t ab8505_powercut_restart_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2727,7 +2726,7 @@ static ssize_t ab8505_powercut_timer_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2750,7 +2749,7 @@ static ssize_t ab8505_powercut_restart_counter_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2773,7 +2772,7 @@ static ssize_t ab8505_powercut_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2794,7 +2793,7 @@ static ssize_t ab8505_powercut_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2822,7 +2821,7 @@ static ssize_t ab8505_powercut_flag_read(struct device *dev,
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2845,7 +2844,7 @@ static ssize_t ab8505_powercut_debounce_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2868,7 +2867,7 @@ static ssize_t ab8505_powercut_debounce_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2895,7 +2894,7 @@ static ssize_t ab8505_powercut_enable_status_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
diff --git a/drivers/power/supply/apm_power.c b/drivers/power/supply/apm_power.c
index 8ef1b6f1f787..9236e0078578 100644
--- a/drivers/power/supply/apm_power.c
+++ b/drivers/power/supply/apm_power.c
@@ -42,11 +42,11 @@ struct find_bat_param {
int max_energy;
};
-static int __find_main_battery(struct device *dev, void *data)
+static int __find_main_battery(struct power_supply *psy, void *data)
{
struct find_bat_param *bp = (struct find_bat_param *)data;
- bp->bat = dev_get_drvdata(dev);
+ bp->bat = psy;
if (bp->bat->desc->use_for_apm) {
/* nice, we explicitly asked to report this battery. */
@@ -79,7 +79,7 @@ static void find_main_battery(void)
main_battery = NULL;
bp.main = main_battery;
- error = power_supply_for_each_device(&bp, __find_main_battery);
+ error = power_supply_for_each_psy(&bp, __find_main_battery);
if (error) {
main_battery = bp.main;
return;
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index fa27195f074e..3c3158f31a48 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -466,10 +466,9 @@ static int axp717_battery_get_prop(struct power_supply *psy,
/*
* If a fault is detected it must also be cleared; if the
- * condition persists it should reappear (This is an
- * assumption, it's actually not documented). A restart was
- * not sufficient to clear the bit in testing despite the
- * register listed as POR.
+ * condition persists it should reappear. A restart was not
+ * sufficient to clear the bit in testing despite the register
+ * listed as POR.
*/
case POWER_SUPPLY_PROP_HEALTH:
ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT,
@@ -480,26 +479,26 @@ static int axp717_battery_get_prop(struct power_supply *psy,
switch (reg & AXP717_BATT_PMU_FAULT_MASK) {
case AXP717_BATT_UVLO_2_5V:
val->intval = POWER_SUPPLY_HEALTH_DEAD;
- regmap_update_bits(axp20x_batt->regmap,
- AXP717_PMU_FAULT,
- AXP717_BATT_UVLO_2_5V,
- AXP717_BATT_UVLO_2_5V);
+ regmap_write_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_UVLO_2_5V,
+ AXP717_BATT_UVLO_2_5V);
return 0;
case AXP717_BATT_OVER_TEMP:
val->intval = POWER_SUPPLY_HEALTH_HOT;
- regmap_update_bits(axp20x_batt->regmap,
- AXP717_PMU_FAULT,
- AXP717_BATT_OVER_TEMP,
- AXP717_BATT_OVER_TEMP);
+ regmap_write_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_OVER_TEMP,
+ AXP717_BATT_OVER_TEMP);
return 0;
case AXP717_BATT_UNDER_TEMP:
val->intval = POWER_SUPPLY_HEALTH_COLD;
- regmap_update_bits(axp20x_batt->regmap,
- AXP717_PMU_FAULT,
- AXP717_BATT_UNDER_TEMP,
- AXP717_BATT_UNDER_TEMP);
+ regmap_write_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_UNDER_TEMP,
+ AXP717_BATT_UNDER_TEMP);
return 0;
default:
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 25e28dac900d..22f6a3b71632 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -171,6 +171,7 @@ struct bq2415x_device {
char *name;
int autotimer; /* 1 - if driver automatically reset timer, 0 - not */
int automode; /* 1 - enabled, 0 - disabled; -1 - not supported */
+ int charge_status;
int id;
};
@@ -835,11 +836,13 @@ static int bq2415x_notifier_call(struct notifier_block *nb,
if (!bq2415x_update_reported_mode(bq, prop.intval))
return NOTIFY_OK;
+ power_supply_changed(bq->charger);
+
/* if automode is not enabled do not tell about reported_mode */
if (bq->automode < 1)
return NOTIFY_OK;
- schedule_delayed_work(&bq->work, 0);
+ mod_delayed_work(system_wq, &bq->work, 0);
return NOTIFY_OK;
}
@@ -889,12 +892,19 @@ static void bq2415x_timer_work(struct work_struct *work)
int ret;
int error;
int boost;
+ int charge;
if (bq->automode > 0 && (bq->reported_mode != bq->mode)) {
sysfs_notify(&bq->charger->dev.kobj, NULL, "reported_mode");
bq2415x_set_mode(bq, bq->reported_mode);
}
+ charge = bq2415x_exec_command(bq, BQ2415X_CHARGE_STATUS);
+ if (bq->charge_status != charge) {
+ power_supply_changed(bq->charger);
+ bq->charge_status = charge;
+ }
+
if (!bq->autotimer)
return;
@@ -1050,7 +1060,7 @@ static ssize_t bq2415x_sysfs_show_status(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
enum bq2415x_command command;
int ret;
@@ -1083,7 +1093,7 @@ static ssize_t bq2415x_sysfs_set_timer(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
int ret = 0;
@@ -1104,7 +1114,7 @@ static ssize_t bq2415x_sysfs_show_timer(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
if (bq->timer_error)
@@ -1128,7 +1138,7 @@ static ssize_t bq2415x_sysfs_set_mode(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
enum bq2415x_mode mode;
int ret = 0;
@@ -1180,7 +1190,7 @@ static ssize_t bq2415x_sysfs_show_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
ssize_t ret = 0;
@@ -1217,7 +1227,7 @@ static ssize_t bq2415x_sysfs_show_reported_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
if (bq->automode < 0)
@@ -1245,7 +1255,7 @@ static ssize_t bq2415x_sysfs_set_registers(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
ssize_t ret = 0;
unsigned int reg;
@@ -1280,7 +1290,7 @@ static ssize_t bq2415x_sysfs_show_registers(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
ssize_t ret = 0;
@@ -1298,7 +1308,7 @@ static ssize_t bq2415x_sysfs_set_limit(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
long val;
int ret;
@@ -1329,7 +1339,7 @@ static ssize_t bq2415x_sysfs_show_limit(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
int ret;
@@ -1357,7 +1367,7 @@ static ssize_t bq2415x_sysfs_set_enable(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
enum bq2415x_command command;
long val;
@@ -1392,7 +1402,7 @@ static ssize_t bq2415x_sysfs_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
enum bq2415x_command command;
int ret;
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index c47f32f152e6..b4ba01744368 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -152,6 +152,7 @@
#define BQ24296_REG_VPRS_PN_MASK (BIT(7) | BIT(6) | BIT(5))
#define BQ24296_REG_VPRS_PN_SHIFT 5
#define BQ24296_REG_VPRS_PN_24296 0x1
+#define BQ24296_REG_VPRS_PN_24297 0x3
#define BQ24190_REG_VPRS_TS_PROFILE_MASK BIT(2)
#define BQ24190_REG_VPRS_TS_PROFILE_SHIFT 2
#define BQ24190_REG_VPRS_DEV_REG_MASK (BIT(1) | BIT(0))
@@ -208,6 +209,7 @@ enum bq24190_chip {
BQ24192i,
BQ24196,
BQ24296,
+ BQ24297,
};
/*
@@ -422,7 +424,7 @@ static struct bq24190_sysfs_field_info bq24190_sysfs_field_tbl[] = {
BQ24190_SYSFS_FIELD_RO(watchdog, CTTC, WATCHDOG),
BQ24190_SYSFS_FIELD_RW(en_timer, CTTC, EN_TIMER),
BQ24190_SYSFS_FIELD_RW(chg_timer, CTTC, CHG_TIMER),
- BQ24190_SYSFS_FIELD_RW(jeta_iset, CTTC, JEITA_ISET),
+ BQ24190_SYSFS_FIELD_RW(jeita_iset, CTTC, JEITA_ISET),
BQ24190_SYSFS_FIELD_RW(bat_comp, ICTRC, BAT_COMP),
BQ24190_SYSFS_FIELD_RW(vclamp, ICTRC, VCLAMP),
BQ24190_SYSFS_FIELD_RW(treg, ICTRC, TREG),
@@ -480,7 +482,7 @@ static struct bq24190_sysfs_field_info *bq24190_sysfs_field_lookup(
static ssize_t bq24190_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24190_dev_info *bdi = power_supply_get_drvdata(psy);
struct bq24190_sysfs_field_info *info;
ssize_t count;
@@ -510,7 +512,7 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
static ssize_t bq24190_sysfs_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24190_dev_info *bdi = power_supply_get_drvdata(psy);
struct bq24190_sysfs_field_info *info;
int ret;
@@ -1319,6 +1321,7 @@ static int bq24190_charger_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
ret = bq24190_charger_get_charge_type(bdi, val);
break;
case POWER_SUPPLY_PROP_HEALTH:
@@ -1399,6 +1402,7 @@ static int bq24190_charger_set_property(struct power_supply *psy,
ret = bq24190_charger_set_temp_alert_max(bdi, val);
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
ret = bq24190_charger_set_charge_type(bdi, val);
break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
@@ -1427,6 +1431,7 @@ static int bq24190_charger_property_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_ONLINE:
case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
@@ -1475,6 +1480,7 @@ static void bq24190_charger_external_power_changed(struct power_supply *psy)
static enum power_supply_property bq24190_charger_properties[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_TYPES,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
@@ -1504,6 +1510,9 @@ static const struct power_supply_desc bq24190_charger_desc = {
.set_property = bq24190_charger_set_property,
.property_is_writeable = bq24190_charger_property_is_writeable,
.external_power_changed = bq24190_charger_external_power_changed,
+ .charge_types = BIT(POWER_SUPPLY_CHARGE_TYPE_NONE) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_TRICKLE) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_FAST),
};
/* Battery power supply property routines */
@@ -1897,6 +1906,7 @@ static int bq24296_check_chip(struct bq24190_dev_info *bdi)
switch (v) {
case BQ24296_REG_VPRS_PN_24296:
+ case BQ24296_REG_VPRS_PN_24297:
break;
default:
dev_err(bdi->dev, "Error unknown model: 0x%02x\n", v);
@@ -2033,6 +2043,17 @@ static const struct bq24190_chip_info bq24190_chip_info_tbl[] = {
.get_ntc_status = bq24296_charger_get_ntc_status,
.set_otg_vbus = bq24296_set_otg_vbus,
},
+ [BQ24297] = {
+ .ichg_array_size = BQ24296_CCC_ICHG_VALUES_LEN,
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24296_vbus_desc,
+#endif
+ .check_chip = bq24296_check_chip,
+ .set_chg_config = bq24296_battery_set_chg_config,
+ .ntc_fault_mask = BQ24296_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24296_charger_get_ntc_status,
+ .set_otg_vbus = bq24296_set_otg_vbus,
+ },
};
static int bq24190_probe(struct i2c_client *client)
@@ -2289,6 +2310,7 @@ static const struct i2c_device_id bq24190_i2c_ids[] = {
{ "bq24192i", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192i] },
{ "bq24196", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24196] },
{ "bq24296", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24296] },
+ { "bq24297", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24297] },
{ },
};
MODULE_DEVICE_TABLE(i2c, bq24190_i2c_ids);
@@ -2299,6 +2321,7 @@ static const struct of_device_id bq24190_of_match[] = {
{ .compatible = "ti,bq24192i", .data = &bq24190_chip_info_tbl[BQ24192i] },
{ .compatible = "ti,bq24196", .data = &bq24190_chip_info_tbl[BQ24196] },
{ .compatible = "ti,bq24296", .data = &bq24190_chip_info_tbl[BQ24296] },
+ { .compatible = "ti,bq24297", .data = &bq24190_chip_info_tbl[BQ24297] },
{ },
};
MODULE_DEVICE_TABLE(of, bq24190_of_match);
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 801d0d2c5f2e..1416586f2459 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -759,7 +759,7 @@ static ssize_t bq24257_show_ovp_voltage(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
return sysfs_emit(buf, "%u\n", bq24257_vovp_map[bq->init_data.vovp]);
@@ -769,7 +769,7 @@ static ssize_t bq24257_show_in_dpm_voltage(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
return sysfs_emit(buf, "%u\n", bq24257_vindpm_map[bq->init_data.vindpm]);
@@ -779,7 +779,7 @@ static ssize_t bq24257_sysfs_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
int ret;
@@ -801,7 +801,7 @@ static ssize_t bq24257_sysfs_set_enable(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
long val;
int ret;
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 40c5ac7a1118..90a5bccfc6b9 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -123,6 +123,7 @@ enum bq27xxx_reg_index {
BQ27XXX_DM_BLOCK, /* Data Block */
BQ27XXX_DM_DATA, /* Block Data */
BQ27XXX_DM_CKSUM, /* Block Data Checksum */
+ BQ27XXX_REG_SEDVF, /* End-of-discharge Voltage */
BQ27XXX_REG_MAX, /* sentinel */
};
@@ -159,6 +160,7 @@ static u8
[BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
[BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
[BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SEDVF] = 0x77,
},
bq27010_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
@@ -184,6 +186,7 @@ static u8
[BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
[BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
[BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SEDVF] = 0x77,
},
bq2750x_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
@@ -579,6 +582,7 @@ static enum power_supply_property bq27000_props[] = {
POWER_SUPPLY_PROP_POWER_AVG,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
};
static enum power_supply_property bq27010_props[] = {
@@ -599,6 +603,7 @@ static enum power_supply_property bq27010_props[] = {
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
};
#define bq2750x_props bq27510g3_props
@@ -2039,6 +2044,36 @@ static int bq27xxx_battery_voltage(struct bq27xxx_device_info *di,
return 0;
}
+/*
+ * Return the design minimum battery Voltage in microvolts
+ * Or < 0 if something fails.
+ */
+static int bq27xxx_battery_read_dmin_volt(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
+{
+ int volt;
+
+ /* We only have to read design minimum voltage once */
+ if (di->voltage_min_design > 0) {
+ val->intval = di->voltage_min_design;
+ return 0;
+ }
+
+ volt = bq27xxx_read(di, BQ27XXX_REG_SEDVF, true);
+ if (volt < 0) {
+ dev_err(di->dev, "error reading design min voltage\n");
+ return volt;
+ }
+
+ /* SEDVF = Design EDVF / 8 - 256 */
+ val->intval = volt * 8000 + 2048000;
+
+ /* Save for later reads */
+ di->voltage_min_design = val->intval;
+
+ return 0;
+}
+
static int bq27xxx_simple_value(int value,
union power_supply_propval *val)
{
@@ -2119,8 +2154,10 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
* power_supply_battery_info visible in sysfs.
*/
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
return -EINVAL;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ ret = bq27xxx_battery_read_dmin_volt(di, val);
+ break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
ret = bq27xxx_battery_read_cyct(di, val);
break;
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index a69faef444c0..c49e0e4d02f7 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/power/charger-manager.h>
#include <linux/regulator/consumer.h>
+#include <linux/string_choices.h>
#include <linux/sysfs.h>
#include <linux/of.h>
#include <linux/thermal.h>
@@ -1088,7 +1089,7 @@ static ssize_t charger_state_show(struct device *dev,
if (!charger->externally_control)
state = regulator_is_enabled(charger->consumer);
- return sysfs_emit(buf, "%s\n", state ? "enabled" : "disabled");
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(state));
}
static ssize_t charger_externally_control_show(struct device *dev,
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 7781b45a67a7..6625d539d9ae 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
@@ -515,7 +516,7 @@ static void cpcap_charger_vbus_work(struct work_struct *work)
out_err:
cpcap_charger_update_state(ddata, POWER_SUPPLY_STATUS_UNKNOWN);
dev_err(ddata->dev, "%s could not %s vbus: %i\n", __func__,
- ddata->vbus_enabled ? "enable" : "disable", error);
+ str_enable_disable(ddata->vbus_enabled), error);
}
static int cpcap_charger_set_vbus(struct phy_companion *comparator,
diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c
index 9b0a7500296b..02d5bdbe2e8d 100644
--- a/drivers/power/supply/cros_charge-control.c
+++ b/drivers/power/supply/cros_charge-control.c
@@ -20,13 +20,6 @@
BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE) | \
BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE))
-enum CROS_CHCTL_ATTR {
- CROS_CHCTL_ATTR_START_THRESHOLD,
- CROS_CHCTL_ATTR_END_THRESHOLD,
- CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR,
- _CROS_CHCTL_ATTR_COUNT
-};
-
/*
* Semantics of data *returned* from the EC API and Linux sysfs differ
* slightly, also the v1 API can not return any data.
@@ -38,18 +31,13 @@ enum CROS_CHCTL_ATTR {
*/
struct cros_chctl_priv {
+ struct device *dev;
struct cros_ec_device *cros_ec;
struct acpi_battery_hook battery_hook;
struct power_supply *hooked_battery;
u8 cmd_version;
- /* The callbacks need to access this priv structure.
- * As neither the struct device nor power_supply are under the drivers
- * control, embed the attributes within priv to use with container_of().
- */
- struct device_attribute device_attrs[_CROS_CHCTL_ATTR_COUNT];
- struct attribute *attributes[_CROS_CHCTL_ATTR_COUNT];
- struct attribute_group group;
+ const struct power_supply_ext *psy_ext;
struct mutex lock; /* protects fields below and cros_ec */
enum power_supply_charge_behaviour current_behaviour;
@@ -119,26 +107,39 @@ static int cros_chctl_configure_ec(struct cros_chctl_priv *priv)
return cros_chctl_send_charge_control_cmd(priv->cros_ec, priv->cmd_version, &req);
}
-static struct cros_chctl_priv *cros_chctl_attr_to_priv(struct attribute *attr,
- enum CROS_CHCTL_ATTR idx)
+static int cros_chctl_psy_ext_get_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
{
- struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr);
+ struct cros_chctl_priv *priv = data;
- return container_of(dev_attr, struct cros_chctl_priv, device_attrs[idx]);
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ val->intval = priv->current_start_threshold;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ val->intval = priv->current_end_threshold;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ val->intval = priv->current_behaviour;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_priv *priv,
- int is_end_threshold, const char *buf, size_t count)
+static int cros_chctl_psy_ext_set_threshold(struct cros_chctl_priv *priv,
+ enum power_supply_property psp,
+ int val)
{
- int ret, val;
+ int ret;
- ret = kstrtoint(buf, 10, &val);
- if (ret < 0)
- return ret;
if (val < 0 || val > 100)
return -EINVAL;
- if (is_end_threshold) {
+ if (psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD) {
/* Start threshold is not exposed, use fixed value */
if (priv->cmd_version == 2)
priv->current_start_threshold = val == 100 ? 0 : val;
@@ -158,93 +159,73 @@ static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_
return ret;
}
- return count;
-}
-
-static ssize_t charge_control_start_threshold_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_START_THRESHOLD);
-
- guard(mutex)(&priv->lock);
- return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_start_threshold);
+ return 0;
}
-static ssize_t charge_control_start_threshold_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_START_THRESHOLD);
-
- guard(mutex)(&priv->lock);
- return cros_chctl_store_threshold(dev, priv, 0, buf, count);
-}
-static ssize_t charge_control_end_threshold_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int cros_chctl_psy_ext_set_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_END_THRESHOLD);
+ struct cros_chctl_priv *priv = data;
+ int ret;
guard(mutex)(&priv->lock);
- return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_end_threshold);
-}
-
-static ssize_t charge_control_end_threshold_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_END_THRESHOLD);
- guard(mutex)(&priv->lock);
- return cros_chctl_store_threshold(dev, priv, 1, buf, count);
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ return cros_chctl_psy_ext_set_threshold(priv, psp, val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ priv->current_behaviour = val->intval;
+ ret = cros_chctl_configure_ec(priv);
+ if (ret < 0)
+ return ret;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static ssize_t charge_behaviour_show(struct device *dev, struct device_attribute *attr, char *buf)
+static int cros_chctl_psy_prop_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp)
{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR);
-
- guard(mutex)(&priv->lock);
- return power_supply_charge_behaviour_show(dev, EC_CHARGE_CONTROL_BEHAVIOURS,
- priv->current_behaviour, buf);
+ return true;
}
-static ssize_t charge_behaviour_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR);
- int ret;
-
- ret = power_supply_charge_behaviour_parse(EC_CHARGE_CONTROL_BEHAVIOURS, buf);
- if (ret < 0)
- return ret;
-
- guard(mutex)(&priv->lock);
- priv->current_behaviour = ret;
+#define DEFINE_CROS_CHCTL_POWER_SUPPLY_EXTENSION(_name, ...) \
+ static const enum power_supply_property _name ## _props[] = { \
+ __VA_ARGS__, \
+ }; \
+ \
+ static const struct power_supply_ext _name = { \
+ .name = "cros-charge-control", \
+ .properties = _name ## _props, \
+ .num_properties = ARRAY_SIZE(_name ## _props), \
+ .charge_behaviours = EC_CHARGE_CONTROL_BEHAVIOURS, \
+ .get_property = cros_chctl_psy_ext_get_prop, \
+ .set_property = cros_chctl_psy_ext_set_prop, \
+ .property_is_writeable = cros_chctl_psy_prop_is_writeable, \
+ }
- ret = cros_chctl_configure_ec(priv);
- if (ret < 0)
- return ret;
+DEFINE_CROS_CHCTL_POWER_SUPPLY_EXTENSION(cros_chctl_psy_ext_v1,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR
+);
- return count;
-}
+DEFINE_CROS_CHCTL_POWER_SUPPLY_EXTENSION(cros_chctl_psy_ext_v2,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD
+);
-static umode_t cros_chtl_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(attr, n);
-
- if (n == CROS_CHCTL_ATTR_START_THRESHOLD && priv->cmd_version < 3)
- return 0;
- else if (n == CROS_CHCTL_ATTR_END_THRESHOLD && priv->cmd_version < 2)
- return 0;
-
- return attr->mode;
-}
+DEFINE_CROS_CHCTL_POWER_SUPPLY_EXTENSION(cros_chctl_psy_ext_v3,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD
+);
static int cros_chctl_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
{
@@ -254,7 +235,7 @@ static int cros_chctl_add_battery(struct power_supply *battery, struct acpi_batt
return 0;
priv->hooked_battery = battery;
- return device_add_group(&battery->dev, &priv->group);
+ return power_supply_register_extension(battery, priv->psy_ext, priv->dev, priv);
}
static int cros_chctl_remove_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
@@ -262,7 +243,7 @@ static int cros_chctl_remove_battery(struct power_supply *battery, struct acpi_b
struct cros_chctl_priv *priv = container_of(hook, struct cros_chctl_priv, battery_hook);
if (priv->hooked_battery == battery) {
- device_remove_group(&battery->dev, &priv->group);
+ power_supply_unregister_extension(battery, priv->psy_ext);
priv->hooked_battery = NULL;
}
@@ -288,7 +269,6 @@ static int cros_chctl_probe(struct platform_device *pdev)
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
struct cros_ec_device *cros_ec = ec_dev->ec_dev;
struct cros_chctl_priv *priv;
- size_t i;
int ret;
ret = cros_chctl_fwk_charge_control_versions(cros_ec);
@@ -321,19 +301,15 @@ static int cros_chctl_probe(struct platform_device *pdev)
dev_dbg(dev, "Command version: %u\n", (unsigned int)priv->cmd_version);
+ priv->dev = dev;
priv->cros_ec = cros_ec;
- priv->device_attrs[CROS_CHCTL_ATTR_START_THRESHOLD] =
- (struct device_attribute)__ATTR_RW(charge_control_start_threshold);
- priv->device_attrs[CROS_CHCTL_ATTR_END_THRESHOLD] =
- (struct device_attribute)__ATTR_RW(charge_control_end_threshold);
- priv->device_attrs[CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR] =
- (struct device_attribute)__ATTR_RW(charge_behaviour);
- for (i = 0; i < _CROS_CHCTL_ATTR_COUNT; i++) {
- sysfs_attr_init(&priv->device_attrs[i].attr);
- priv->attributes[i] = &priv->device_attrs[i].attr;
- }
- priv->group.is_visible = cros_chtl_attr_is_visible;
- priv->group.attrs = priv->attributes;
+
+ if (priv->cmd_version == 1)
+ priv->psy_ext = &cros_chctl_psy_ext_v1;
+ else if (priv->cmd_version == 2)
+ priv->psy_ext = &cros_chctl_psy_ext_v2;
+ else
+ priv->psy_ext = &cros_chctl_psy_ext_v3;
priv->battery_hook.name = dev_name(dev);
priv->battery_hook.add_battery = cros_chctl_add_battery;
diff --git a/drivers/power/supply/da9030_battery.c b/drivers/power/supply/da9030_battery.c
index 34328f5d556e..ac2e319e9517 100644
--- a/drivers/power/supply/da9030_battery.c
+++ b/drivers/power/supply/da9030_battery.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <linux/string_choices.h>
#include <linux/mfd/da903x.h>
#include <linux/debugfs.h>
@@ -138,7 +139,7 @@ static int bat_debug_show(struct seq_file *s, void *data)
{
struct da9030_charger *charger = s->private;
- seq_printf(s, "charger is %s\n", charger->is_on ? "on" : "off");
+ seq_printf(s, "charger is %s\n", str_on_off(charger->is_on));
if (charger->chdet) {
seq_printf(s, "iset = %dmA, vset = %dmV\n",
charger->mA, charger->mV);
diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c
index 652c1f213af1..4f28ef1bba1a 100644
--- a/drivers/power/supply/da9150-fg.c
+++ b/drivers/power/supply/da9150-fg.c
@@ -247,9 +247,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg,
DA9150_QIF_SD_GAIN_SIZE);
da9150_fg_read_sync_end(fg);
- div = (u64) (sd_gain * shunt_val * 65536ULL);
+ div = 65536ULL * sd_gain * shunt_val;
do_div(div, 1000000);
- res = (u64) (iavg * 1000000ULL);
+ res = 1000000ULL * iavg;
do_div(res, div);
val->intval = (int) res;
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 7cf4ea06b500..83bdec5a2bda 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -195,22 +195,22 @@ static int w1_ds2760_recall_eeprom(struct device *dev, int addr)
}
static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
return w1_ds2760_read(dev, buf, off, count);
}
-static BIN_ATTR_RO(w1_slave, DS2760_DATA_SIZE);
+static const BIN_ATTR_RO(w1_slave, DS2760_DATA_SIZE);
-static struct bin_attribute *w1_ds2760_bin_attrs[] = {
+static const struct bin_attribute *const w1_ds2760_bin_attrs[] = {
&bin_attr_w1_slave,
NULL,
};
static const struct attribute_group w1_ds2760_group = {
- .bin_attrs = w1_ds2760_bin_attrs,
+ .bin_attrs_new = w1_ds2760_bin_attrs,
};
static const struct attribute_group *w1_ds2760_groups[] = {
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index 1e7f297f6cb1..dd9ac7a32967 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -621,7 +621,7 @@ static ssize_t ds2780_set_pio_pin(struct device *dev,
static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -634,7 +634,7 @@ static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -654,19 +654,19 @@ static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
return count;
}
-static struct bin_attribute ds2780_param_eeprom_bin_attr = {
+static const struct bin_attribute ds2780_param_eeprom_bin_attr = {
.attr = {
.name = "param_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS2780_PARAM_EEPROM_SIZE,
- .read = ds2780_read_param_eeprom_bin,
- .write = ds2780_write_param_eeprom_bin,
+ .read_new = ds2780_read_param_eeprom_bin,
+ .write_new = ds2780_write_param_eeprom_bin,
};
static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -679,7 +679,7 @@ static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -699,14 +699,14 @@ static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
return count;
}
-static struct bin_attribute ds2780_user_eeprom_bin_attr = {
+static const struct bin_attribute ds2780_user_eeprom_bin_attr = {
.attr = {
.name = "user_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS2780_USER_EEPROM_SIZE,
- .read = ds2780_read_user_eeprom_bin,
- .write = ds2780_write_user_eeprom_bin,
+ .read_new = ds2780_read_user_eeprom_bin,
+ .write_new = ds2780_write_user_eeprom_bin,
};
static DEVICE_ATTR(pmod_enabled, S_IRUGO | S_IWUSR, ds2780_get_pmod_enabled,
@@ -726,7 +726,7 @@ static struct attribute *ds2780_sysfs_attrs[] = {
NULL
};
-static struct bin_attribute *ds2780_sysfs_bin_attrs[] = {
+static const struct bin_attribute *const ds2780_sysfs_bin_attrs[] = {
&ds2780_param_eeprom_bin_attr,
&ds2780_user_eeprom_bin_attr,
NULL
@@ -734,7 +734,7 @@ static struct bin_attribute *ds2780_sysfs_bin_attrs[] = {
static const struct attribute_group ds2780_sysfs_group = {
.attrs = ds2780_sysfs_attrs,
- .bin_attrs = ds2780_sysfs_bin_attrs,
+ .bin_attrs_new = ds2780_sysfs_bin_attrs,
};
static const struct attribute_group *ds2780_sysfs_groups[] = {
diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index c4f8ccc687f9..8a1f1f9835e0 100644
--- a/drivers/power/supply/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -623,7 +623,7 @@ static ssize_t ds2781_set_pio_pin(struct device *dev,
static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -636,7 +636,7 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -656,19 +656,19 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
return count;
}
-static struct bin_attribute ds2781_param_eeprom_bin_attr = {
+static const struct bin_attribute ds2781_param_eeprom_bin_attr = {
.attr = {
.name = "param_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS2781_PARAM_EEPROM_SIZE,
- .read = ds2781_read_param_eeprom_bin,
- .write = ds2781_write_param_eeprom_bin,
+ .read_new = ds2781_read_param_eeprom_bin,
+ .write_new = ds2781_write_param_eeprom_bin,
};
static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -682,7 +682,7 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -702,14 +702,14 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
return count;
}
-static struct bin_attribute ds2781_user_eeprom_bin_attr = {
+static const struct bin_attribute ds2781_user_eeprom_bin_attr = {
.attr = {
.name = "user_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS2781_USER_EEPROM_SIZE,
- .read = ds2781_read_user_eeprom_bin,
- .write = ds2781_write_user_eeprom_bin,
+ .read_new = ds2781_read_user_eeprom_bin,
+ .write_new = ds2781_write_user_eeprom_bin,
};
static DEVICE_ATTR(pmod_enabled, S_IRUGO | S_IWUSR, ds2781_get_pmod_enabled,
@@ -729,7 +729,7 @@ static struct attribute *ds2781_sysfs_attrs[] = {
NULL
};
-static struct bin_attribute *ds2781_sysfs_bin_attrs[] = {
+static const struct bin_attribute *const ds2781_sysfs_bin_attrs[] = {
&ds2781_param_eeprom_bin_attr,
&ds2781_user_eeprom_bin_attr,
NULL,
@@ -737,7 +737,7 @@ static struct bin_attribute *ds2781_sysfs_bin_attrs[] = {
static const struct attribute_group ds2781_sysfs_group = {
.attrs = ds2781_sysfs_attrs,
- .bin_attrs = ds2781_sysfs_bin_attrs,
+ .bin_attrs_new = ds2781_sysfs_bin_attrs,
};
diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c
index 85aa9c465aa4..cae95d35d398 100644
--- a/drivers/power/supply/ds2782_battery.c
+++ b/drivers/power/supply/ds2782_battery.c
@@ -11,6 +11,7 @@
* UEvent sending added by Evgeny Romanov <romanov@neurosoft.ru>
*/
+#include <linux/devm-helpers.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -57,14 +58,12 @@ struct ds278x_info {
struct power_supply_desc battery_desc;
const struct ds278x_battery_ops *ops;
struct delayed_work bat_work;
- int id;
int rsns;
int capacity;
int status; /* State Of Charge */
};
-static DEFINE_IDR(battery_id);
-static DEFINE_MUTEX(battery_lock);
+static DEFINE_IDA(battery_id);
static inline int ds278x_read_reg(struct ds278x_info *info, int reg, u8 *val)
{
@@ -312,21 +311,6 @@ static void ds278x_power_supply_init(struct power_supply_desc *battery)
battery->external_power_changed = NULL;
}
-static void ds278x_battery_remove(struct i2c_client *client)
-{
- struct ds278x_info *info = i2c_get_clientdata(client);
- int id = info->id;
-
- power_supply_unregister(info->battery);
- cancel_delayed_work_sync(&info->bat_work);
- kfree(info->battery_desc.name);
- kfree(info);
-
- mutex_lock(&battery_lock);
- idr_remove(&battery_id, id);
- mutex_unlock(&battery_lock);
-}
-
#ifdef CONFIG_PM_SLEEP
static int ds278x_suspend(struct device *dev)
@@ -368,6 +352,13 @@ static const struct ds278x_battery_ops ds278x_ops[] = {
}
};
+static void ds278x_free_ida(void *data)
+{
+ int num = (uintptr_t)data;
+
+ ida_free(&battery_id, num);
+}
+
static int ds278x_battery_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -387,32 +378,27 @@ static int ds278x_battery_probe(struct i2c_client *client)
}
/* Get an ID for this battery */
- mutex_lock(&battery_lock);
- ret = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
- mutex_unlock(&battery_lock);
- if (ret < 0)
- goto fail_id;
- num = ret;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ret = -ENOMEM;
- goto fail_info;
- }
+ num = ida_alloc(&battery_id, GFP_KERNEL);
+ if (num < 0)
+ return num;
+ ret = devm_add_action_or_reset(&client->dev, ds278x_free_ida, (void *)(uintptr_t)num);
+ if (ret)
+ return ret;
- info->battery_desc.name = kasprintf(GFP_KERNEL, "%s-%d",
- client->name, num);
- if (!info->battery_desc.name) {
- ret = -ENOMEM;
- goto fail_name;
- }
+ info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->battery_desc.name = devm_kasprintf(&client->dev, GFP_KERNEL,
+ "%s-%d", client->name, num);
+ if (!info->battery_desc.name)
+ return -ENOMEM;
if (id->driver_data == DS2786)
info->rsns = pdata->rsns;
i2c_set_clientdata(client, info);
info->client = client;
- info->id = num;
info->ops = &ds278x_ops[id->driver_data];
ds278x_power_supply_init(&info->battery_desc);
psy_cfg.drv_data = info;
@@ -420,30 +406,20 @@ static int ds278x_battery_probe(struct i2c_client *client)
info->capacity = 100;
info->status = POWER_SUPPLY_STATUS_FULL;
- INIT_DELAYED_WORK(&info->bat_work, ds278x_bat_work);
-
- info->battery = power_supply_register(&client->dev,
- &info->battery_desc, &psy_cfg);
+ info->battery = devm_power_supply_register(&client->dev,
+ &info->battery_desc,
+ &psy_cfg);
if (IS_ERR(info->battery)) {
dev_err(&client->dev, "failed to register battery\n");
- ret = PTR_ERR(info->battery);
- goto fail_register;
- } else {
- schedule_delayed_work(&info->bat_work, DS278x_DELAY);
+ return PTR_ERR(info->battery);
}
- return 0;
+ ret = devm_delayed_work_autocancel(&client->dev, &info->bat_work, ds278x_bat_work);
+ if (ret)
+ return ret;
+ schedule_delayed_work(&info->bat_work, DS278x_DELAY);
-fail_register:
- kfree(info->battery_desc.name);
-fail_name:
- kfree(info);
-fail_info:
- mutex_lock(&battery_lock);
- idr_remove(&battery_id, num);
- mutex_unlock(&battery_lock);
-fail_id:
- return ret;
+ return 0;
}
static const struct i2c_device_id ds278x_id[] = {
@@ -459,7 +435,6 @@ static struct i2c_driver ds278x_battery_driver = {
.pm = &ds278x_battery_pm_ops,
},
.probe = ds278x_battery_probe,
- .remove = ds278x_battery_remove,
.id_table = ds278x_id,
};
module_i2c_driver(ds278x_battery_driver);
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 6139f736ecbe..46d18ce6a739 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -195,6 +195,8 @@ static int init_charge_current_limit(struct device *dev,
{
int i, len;
u32 cur_limit = U32_MAX;
+ bool set_def_limit;
+ u32 def_limit;
gpio_charger->current_limit_gpios = devm_gpiod_get_array_optional(dev,
"charge-current-limit", GPIOD_OUT_LOW);
@@ -228,6 +230,9 @@ static int init_charge_current_limit(struct device *dev,
if (len < 0)
return len;
+ set_def_limit = !device_property_read_u32(dev,
+ "charge-current-limit-default-microamp",
+ &def_limit);
for (i=0; i < gpio_charger->current_limit_map_size; i++) {
if (gpio_charger->current_limit_map[i].limit_ua > cur_limit) {
dev_err(dev, "charge-current-limit-mapping not sorted by current in descending order\n");
@@ -235,8 +240,16 @@ static int init_charge_current_limit(struct device *dev,
}
cur_limit = gpio_charger->current_limit_map[i].limit_ua;
+ if (set_def_limit && def_limit == cur_limit) {
+ set_charge_current_limit(gpio_charger, cur_limit);
+ return 0;
+ }
}
+ if (set_def_limit)
+ dev_warn(dev, "charge-current-limit-default-microamp %u not listed in charge-current-limit-mapping\n",
+ def_limit);
+
/* default to smallest current limitation for safety reasons */
len = gpio_charger->current_limit_map_size - 1;
set_charge_current_limit(gpio_charger,
diff --git a/drivers/power/supply/ip5xxx_power.c b/drivers/power/supply/ip5xxx_power.c
index 82263646ddc6..c448e0ac0dfa 100644
--- a/drivers/power/supply/ip5xxx_power.c
+++ b/drivers/power/supply/ip5xxx_power.c
@@ -7,76 +7,154 @@
#include <linux/power_supply.h>
#include <linux/regmap.h>
-#define IP5XXX_SYS_CTL0 0x01
-#define IP5XXX_SYS_CTL0_WLED_DET_EN BIT(4)
-#define IP5XXX_SYS_CTL0_WLED_EN BIT(3)
-#define IP5XXX_SYS_CTL0_BOOST_EN BIT(2)
-#define IP5XXX_SYS_CTL0_CHARGER_EN BIT(1)
-#define IP5XXX_SYS_CTL1 0x02
-#define IP5XXX_SYS_CTL1_LIGHT_SHDN_EN BIT(1)
-#define IP5XXX_SYS_CTL1_LOAD_PWRUP_EN BIT(0)
-#define IP5XXX_SYS_CTL2 0x0c
-#define IP5XXX_SYS_CTL2_LIGHT_SHDN_TH GENMASK(7, 3)
-#define IP5XXX_SYS_CTL3 0x03
-#define IP5XXX_SYS_CTL3_LONG_PRESS_TIME_SEL GENMASK(7, 6)
-#define IP5XXX_SYS_CTL3_BTN_SHDN_EN BIT(5)
-#define IP5XXX_SYS_CTL4 0x04
-#define IP5XXX_SYS_CTL4_SHDN_TIME_SEL GENMASK(7, 6)
-#define IP5XXX_SYS_CTL4_VIN_PULLOUT_BOOST_EN BIT(5)
-#define IP5XXX_SYS_CTL5 0x07
-#define IP5XXX_SYS_CTL5_NTC_DIS BIT(6)
-#define IP5XXX_SYS_CTL5_WLED_MODE_SEL BIT(1)
-#define IP5XXX_SYS_CTL5_BTN_SHDN_SEL BIT(0)
-#define IP5XXX_CHG_CTL1 0x22
-#define IP5XXX_CHG_CTL1_BOOST_UVP_SEL GENMASK(3, 2)
-#define IP5XXX_CHG_CTL2 0x24
-#define IP5XXX_CHG_CTL2_BAT_TYPE_SEL GENMASK(6, 5)
-#define IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_2V (0x0 << 5)
-#define IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_3V (0x1 << 5)
-#define IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_35V (0x2 << 5)
-#define IP5XXX_CHG_CTL2_CONST_VOLT_SEL GENMASK(2, 1)
-#define IP5XXX_CHG_CTL4 0x26
-#define IP5XXX_CHG_CTL4_BAT_TYPE_SEL_EN BIT(6)
-#define IP5XXX_CHG_CTL4A 0x25
-#define IP5XXX_CHG_CTL4A_CONST_CUR_SEL GENMASK(4, 0)
-#define IP5XXX_MFP_CTL0 0x51
-#define IP5XXX_MFP_CTL1 0x52
-#define IP5XXX_GPIO_CTL2 0x53
-#define IP5XXX_GPIO_CTL2A 0x54
-#define IP5XXX_GPIO_CTL3 0x55
-#define IP5XXX_READ0 0x71
-#define IP5XXX_READ0_CHG_STAT GENMASK(7, 5)
-#define IP5XXX_READ0_CHG_STAT_IDLE (0x0 << 5)
-#define IP5XXX_READ0_CHG_STAT_TRICKLE (0x1 << 5)
-#define IP5XXX_READ0_CHG_STAT_CONST_VOLT (0x2 << 5)
-#define IP5XXX_READ0_CHG_STAT_CONST_CUR (0x3 << 5)
-#define IP5XXX_READ0_CHG_STAT_CONST_VOLT_STOP (0x4 << 5)
-#define IP5XXX_READ0_CHG_STAT_FULL (0x5 << 5)
-#define IP5XXX_READ0_CHG_STAT_TIMEOUT (0x6 << 5)
-#define IP5XXX_READ0_CHG_OP BIT(4)
-#define IP5XXX_READ0_CHG_END BIT(3)
-#define IP5XXX_READ0_CONST_VOLT_TIMEOUT BIT(2)
-#define IP5XXX_READ0_CHG_TIMEOUT BIT(1)
-#define IP5XXX_READ0_TRICKLE_TIMEOUT BIT(0)
-#define IP5XXX_READ0_TIMEOUT GENMASK(2, 0)
-#define IP5XXX_READ1 0x72
-#define IP5XXX_READ1_WLED_PRESENT BIT(7)
-#define IP5XXX_READ1_LIGHT_LOAD BIT(6)
-#define IP5XXX_READ1_VIN_OVERVOLT BIT(5)
-#define IP5XXX_READ2 0x77
-#define IP5XXX_READ2_BTN_PRESS BIT(3)
-#define IP5XXX_READ2_BTN_LONG_PRESS BIT(1)
-#define IP5XXX_READ2_BTN_SHORT_PRESS BIT(0)
-#define IP5XXX_BATVADC_DAT0 0xa2
-#define IP5XXX_BATVADC_DAT1 0xa3
-#define IP5XXX_BATIADC_DAT0 0xa4
-#define IP5XXX_BATIADC_DAT1 0xa5
-#define IP5XXX_BATOCV_DAT0 0xa8
-#define IP5XXX_BATOCV_DAT1 0xa9
+#define IP5XXX_BAT_TYPE_4_2V 0x0
+#define IP5XXX_BAT_TYPE_4_3V 0x1
+#define IP5XXX_BAT_TYPE_4_35V 0x2
+#define IP5XXX_BAT_TYPE_4_4V 0x3
+#define IP5XXX_CHG_STAT_IDLE 0x0
+#define IP5XXX_CHG_STAT_TRICKLE 0x1
+#define IP5XXX_CHG_STAT_CONST_VOLT 0x2
+#define IP5XXX_CHG_STAT_CONST_CUR 0x3
+#define IP5XXX_CHG_STAT_CONST_VOLT_STOP 0x4
+#define IP5XXX_CHG_STAT_FULL 0x5
+#define IP5XXX_CHG_STAT_TIMEOUT 0x6
struct ip5xxx {
struct regmap *regmap;
bool initialized;
+ struct {
+ struct {
+ /* Charger enable */
+ struct regmap_field *enable;
+ /* Constant voltage value */
+ struct regmap_field *const_volt_sel;
+ /* Constant current value */
+ struct regmap_field *const_curr_sel;
+ /* Charger status */
+ struct regmap_field *status;
+ /* Charging ended flag */
+ struct regmap_field *chg_end;
+ /* Timeout flags (CV, charge, trickle) */
+ struct regmap_field *timeout;
+ /* Overvoltage limit */
+ struct regmap_field *vin_overvolt;
+ } charger;
+ struct {
+ /* Boost converter enable */
+ struct regmap_field *enable;
+ struct {
+ /* Light load shutdown enable */
+ struct regmap_field *enable;
+ /* Light load shutdown current limit */
+ struct regmap_field *i_limit;
+ } light_load_shutdown;
+ /* Automatic powerup on increased load */
+ struct regmap_field *load_powerup_en;
+ /* Automatic powerup on VIN pull-out */
+ struct regmap_field *vin_pullout_en;
+ /* Undervoltage limit */
+ struct regmap_field *undervolt_limit;
+ /* Light load status flag */
+ struct regmap_field *light_load_status;
+ } boost;
+ struct {
+ /* NTC disable */
+ struct regmap_field *ntc_dis;
+ /* Battery voltage type */
+ struct regmap_field *type;
+ /* Battery voltage autoset from Vset pin */
+ struct regmap_field *vset_en;
+ struct {
+ /* Battery measurement registers */
+ struct ip5xxx_battery_adc_regs {
+ struct regmap_field *low;
+ struct regmap_field *high;
+ } volt, curr, open_volt;
+ } adc;
+ } battery;
+ struct {
+ /* Double/long press shutdown enable */
+ struct regmap_field *shdn_enable;
+ /* WLED activation: double press or long press */
+ struct regmap_field *wled_mode;
+ /* Shutdown activation: double press or long press */
+ struct regmap_field *shdn_mode;
+ /* Long press time */
+ struct regmap_field *long_press_time;
+ /* Button pressed */
+ struct regmap_field *pressed;
+ /* Button long-pressed */
+ struct regmap_field *long_pressed;
+ /* Button short-pressed */
+ struct regmap_field *short_pressed;
+ } btn;
+ struct {
+ /* WLED enable */
+ struct regmap_field *enable;
+ /* WLED detect */
+ struct regmap_field *detect_en;
+ /* WLED present */
+ struct regmap_field *present;
+ } wled;
+ } regs;
+
+ /* Maximum supported battery voltage (via regs.battery.type) */
+ int vbat_max;
+ /* Scaling constants for regs.boost.undervolt_limit */
+ struct {
+ int setpoint;
+ int microvolts_per_bit;
+ } boost_undervolt;
+ /* Scaling constants for regs.charger.const_curr_sel */
+ struct {
+ int setpoint;
+ } const_curr;
+ /* Whether regs.charger.chg_end is inverted */
+ u8 chg_end_inverted;
+};
+
+#define REG_FIELD_UNSUPPORTED { .lsb = 1 }
+/* Register fields layout. Unsupported registers marked as { .lsb = 1 } */
+struct ip5xxx_regfield_config {
+ const struct reg_field charger_enable;
+ const struct reg_field charger_const_volt_sel;
+ const struct reg_field charger_const_curr_sel;
+ const struct reg_field charger_status;
+ const struct reg_field charger_chg_end;
+ const struct reg_field charger_timeout;
+ const struct reg_field charger_vin_overvolt;
+ const struct reg_field boost_enable;
+ const struct reg_field boost_llshdn_enable;
+ const struct reg_field boost_llshdn_i_limit;
+ const struct reg_field boost_load_powerup_en;
+ const struct reg_field boost_vin_pullout_en;
+ const struct reg_field boost_undervolt_limit;
+ const struct reg_field boost_light_load_status;
+ const struct reg_field battery_ntc_dis;
+ const struct reg_field battery_type;
+ const struct reg_field battery_vset_en;
+ const struct reg_field battery_adc_volt_low;
+ const struct reg_field battery_adc_volt_high;
+ const struct reg_field battery_adc_curr_low;
+ const struct reg_field battery_adc_curr_high;
+ const struct reg_field battery_adc_ovolt_low;
+ const struct reg_field battery_adc_ovolt_high;
+ const struct reg_field btn_shdn_enable;
+ const struct reg_field btn_wled_mode;
+ const struct reg_field btn_shdn_mode;
+ const struct reg_field btn_long_press_time;
+ const struct reg_field btn_pressed;
+ const struct reg_field btn_long_pressed;
+ const struct reg_field btn_short_pressed;
+ const struct reg_field wled_enable;
+ const struct reg_field wled_detect_en;
+ const struct reg_field wled_present;
+
+ int vbat_max;
+ int boost_undervolt_setpoint;
+ int boost_undervolt_uv_per_bit;
+ int const_curr_setpoint;
+ u8 chg_end_inverted;
};
/*
@@ -87,24 +165,30 @@ struct ip5xxx {
* 2) Attempt the initialization sequence on each subsequent register access
* until it succeeds.
*/
-static int ip5xxx_read(struct ip5xxx *ip5xxx, unsigned int reg,
+static int ip5xxx_read(struct ip5xxx *ip5xxx, struct regmap_field *field,
unsigned int *val)
{
int ret;
- ret = regmap_read(ip5xxx->regmap, reg, val);
+ if (!field)
+ return -EOPNOTSUPP;
+
+ ret = regmap_field_read(field, val);
if (ret)
ip5xxx->initialized = false;
return ret;
}
-static int ip5xxx_update_bits(struct ip5xxx *ip5xxx, unsigned int reg,
- unsigned int mask, unsigned int val)
+static int ip5xxx_write(struct ip5xxx *ip5xxx, struct regmap_field *field,
+ unsigned int val)
{
int ret;
- ret = regmap_update_bits(ip5xxx->regmap, reg, mask, val);
+ if (!field)
+ return -EOPNOTSUPP;
+
+ ret = regmap_field_write(field, val);
if (ret)
ip5xxx->initialized = false;
@@ -123,28 +207,26 @@ static int ip5xxx_initialize(struct power_supply *psy)
* Disable shutdown under light load.
* Enable power on when under load.
*/
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL1,
- IP5XXX_SYS_CTL1_LIGHT_SHDN_EN |
- IP5XXX_SYS_CTL1_LOAD_PWRUP_EN,
- IP5XXX_SYS_CTL1_LOAD_PWRUP_EN);
+ if (ip5xxx->regs.boost.light_load_shutdown.enable) {
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.boost.light_load_shutdown.enable, 0);
+ if (ret)
+ return ret;
+ }
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.boost.load_powerup_en, 1);
if (ret)
return ret;
/*
* Enable shutdown after a long button press (as configured below).
*/
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL3,
- IP5XXX_SYS_CTL3_BTN_SHDN_EN,
- IP5XXX_SYS_CTL3_BTN_SHDN_EN);
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.btn.shdn_enable, 1);
if (ret)
return ret;
/*
* Power on automatically when VIN is removed.
*/
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL4,
- IP5XXX_SYS_CTL4_VIN_PULLOUT_BOOST_EN,
- IP5XXX_SYS_CTL4_VIN_PULLOUT_BOOST_EN);
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.boost.vin_pullout_en, 1);
if (ret)
return ret;
@@ -152,12 +234,15 @@ static int ip5xxx_initialize(struct power_supply *psy)
* Enable the NTC.
* Configure the button for two presses => LED, long press => shutdown.
*/
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL5,
- IP5XXX_SYS_CTL5_NTC_DIS |
- IP5XXX_SYS_CTL5_WLED_MODE_SEL |
- IP5XXX_SYS_CTL5_BTN_SHDN_SEL,
- IP5XXX_SYS_CTL5_WLED_MODE_SEL |
- IP5XXX_SYS_CTL5_BTN_SHDN_SEL);
+ if (ip5xxx->regs.battery.ntc_dis) {
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.battery.ntc_dis, 0);
+ if (ret)
+ return ret;
+ }
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.btn.wled_mode, 1);
+ if (ret)
+ return ret;
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.btn.shdn_mode, 1);
if (ret)
return ret;
@@ -186,24 +271,37 @@ static int ip5xxx_battery_get_status(struct ip5xxx *ip5xxx, int *val)
unsigned int rval;
int ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_READ0, &rval);
+ if (!ip5xxx->regs.charger.status) {
+ // Fall-back to Charging Ended bit
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.chg_end, &rval);
+ if (ret)
+ return ret;
+
+ if (rval == ip5xxx->chg_end_inverted)
+ *val = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return 0;
+ }
+
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.status, &rval);
if (ret)
return ret;
- switch (rval & IP5XXX_READ0_CHG_STAT) {
- case IP5XXX_READ0_CHG_STAT_IDLE:
+ switch (rval) {
+ case IP5XXX_CHG_STAT_IDLE:
*val = POWER_SUPPLY_STATUS_DISCHARGING;
break;
- case IP5XXX_READ0_CHG_STAT_TRICKLE:
- case IP5XXX_READ0_CHG_STAT_CONST_CUR:
- case IP5XXX_READ0_CHG_STAT_CONST_VOLT:
+ case IP5XXX_CHG_STAT_TRICKLE:
+ case IP5XXX_CHG_STAT_CONST_CUR:
+ case IP5XXX_CHG_STAT_CONST_VOLT:
*val = POWER_SUPPLY_STATUS_CHARGING;
break;
- case IP5XXX_READ0_CHG_STAT_CONST_VOLT_STOP:
- case IP5XXX_READ0_CHG_STAT_FULL:
+ case IP5XXX_CHG_STAT_CONST_VOLT_STOP:
+ case IP5XXX_CHG_STAT_FULL:
*val = POWER_SUPPLY_STATUS_FULL;
break;
- case IP5XXX_READ0_CHG_STAT_TIMEOUT:
+ case IP5XXX_CHG_STAT_TIMEOUT:
*val = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
default:
@@ -218,22 +316,22 @@ static int ip5xxx_battery_get_charge_type(struct ip5xxx *ip5xxx, int *val)
unsigned int rval;
int ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_READ0, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.status, &rval);
if (ret)
return ret;
- switch (rval & IP5XXX_READ0_CHG_STAT) {
- case IP5XXX_READ0_CHG_STAT_IDLE:
- case IP5XXX_READ0_CHG_STAT_CONST_VOLT_STOP:
- case IP5XXX_READ0_CHG_STAT_FULL:
- case IP5XXX_READ0_CHG_STAT_TIMEOUT:
+ switch (rval) {
+ case IP5XXX_CHG_STAT_IDLE:
+ case IP5XXX_CHG_STAT_CONST_VOLT_STOP:
+ case IP5XXX_CHG_STAT_FULL:
+ case IP5XXX_CHG_STAT_TIMEOUT:
*val = POWER_SUPPLY_CHARGE_TYPE_NONE;
break;
- case IP5XXX_READ0_CHG_STAT_TRICKLE:
+ case IP5XXX_CHG_STAT_TRICKLE:
*val = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
break;
- case IP5XXX_READ0_CHG_STAT_CONST_CUR:
- case IP5XXX_READ0_CHG_STAT_CONST_VOLT:
+ case IP5XXX_CHG_STAT_CONST_CUR:
+ case IP5XXX_CHG_STAT_CONST_VOLT:
*val = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
break;
default:
@@ -248,11 +346,11 @@ static int ip5xxx_battery_get_health(struct ip5xxx *ip5xxx, int *val)
unsigned int rval;
int ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_READ0, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.timeout, &rval);
if (ret)
return ret;
- if (rval & IP5XXX_READ0_TIMEOUT)
+ if (rval)
*val = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
else
*val = POWER_SUPPLY_HEALTH_GOOD;
@@ -265,7 +363,7 @@ static int ip5xxx_battery_get_voltage_max(struct ip5xxx *ip5xxx, int *val)
unsigned int rval;
int ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_CHG_CTL2, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.battery.type, &rval);
if (ret)
return ret;
@@ -273,16 +371,19 @@ static int ip5xxx_battery_get_voltage_max(struct ip5xxx *ip5xxx, int *val)
* It is not clear what this will return if
* IP5XXX_CHG_CTL4_BAT_TYPE_SEL_EN is not set...
*/
- switch (rval & IP5XXX_CHG_CTL2_BAT_TYPE_SEL) {
- case IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_2V:
+ switch (rval) {
+ case IP5XXX_BAT_TYPE_4_2V:
*val = 4200000;
break;
- case IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_3V:
+ case IP5XXX_BAT_TYPE_4_3V:
*val = 4300000;
break;
- case IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_35V:
+ case IP5XXX_BAT_TYPE_4_35V:
*val = 4350000;
break;
+ case IP5XXX_BAT_TYPE_4_4V:
+ *val = 4400000;
+ break;
default:
return -EINVAL;
}
@@ -291,16 +392,16 @@ static int ip5xxx_battery_get_voltage_max(struct ip5xxx *ip5xxx, int *val)
}
static int ip5xxx_battery_read_adc(struct ip5xxx *ip5xxx,
- u8 lo_reg, u8 hi_reg, int *val)
+ struct ip5xxx_battery_adc_regs *regs, int *val)
{
unsigned int hi, lo;
int ret;
- ret = ip5xxx_read(ip5xxx, lo_reg, &lo);
+ ret = ip5xxx_read(ip5xxx, regs->low, &lo);
if (ret)
return ret;
- ret = ip5xxx_read(ip5xxx, hi_reg, &hi);
+ ret = ip5xxx_read(ip5xxx, regs->high, &hi);
if (ret)
return ret;
@@ -335,33 +436,35 @@ static int ip5xxx_battery_get_property(struct power_supply *psy,
return ip5xxx_battery_get_voltage_max(ip5xxx, &val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = ip5xxx_battery_read_adc(ip5xxx, IP5XXX_BATVADC_DAT0,
- IP5XXX_BATVADC_DAT1, &raw);
+ ret = ip5xxx_battery_read_adc(ip5xxx, &ip5xxx->regs.battery.adc.volt, &raw);
+ if (ret)
+ return ret;
val->intval = 2600000 + DIV_ROUND_CLOSEST(raw * 26855, 100);
return 0;
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
- ret = ip5xxx_battery_read_adc(ip5xxx, IP5XXX_BATOCV_DAT0,
- IP5XXX_BATOCV_DAT1, &raw);
+ ret = ip5xxx_battery_read_adc(ip5xxx, &ip5xxx->regs.battery.adc.open_volt, &raw);
+ if (ret)
+ return ret;
val->intval = 2600000 + DIV_ROUND_CLOSEST(raw * 26855, 100);
return 0;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = ip5xxx_battery_read_adc(ip5xxx, IP5XXX_BATIADC_DAT0,
- IP5XXX_BATIADC_DAT1, &raw);
+ ret = ip5xxx_battery_read_adc(ip5xxx, &ip5xxx->regs.battery.adc.curr, &raw);
+ if (ret)
+ return ret;
val->intval = DIV_ROUND_CLOSEST(raw * 149197, 200);
return 0;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- ret = ip5xxx_read(ip5xxx, IP5XXX_CHG_CTL4A, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.const_curr_sel, &rval);
if (ret)
return ret;
- rval &= IP5XXX_CHG_CTL4A_CONST_CUR_SEL;
- val->intval = 100000 * rval;
+ val->intval = ip5xxx->const_curr.setpoint + 100000 * rval;
return 0;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
@@ -373,12 +476,11 @@ static int ip5xxx_battery_get_property(struct power_supply *psy,
if (ret)
return ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_CHG_CTL2, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.const_volt_sel, &rval);
if (ret)
return ret;
- rval &= IP5XXX_CHG_CTL2_CONST_VOLT_SEL;
- val->intval = vmax + 14000 * (rval >> 1);
+ val->intval = vmax + 14000 * rval;
return 0;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
@@ -399,30 +501,36 @@ static int ip5xxx_battery_set_voltage_max(struct ip5xxx *ip5xxx, int val)
unsigned int rval;
int ret;
+ if (val > ip5xxx->vbat_max)
+ return -EINVAL;
+
switch (val) {
case 4200000:
- rval = IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_2V;
+ rval = IP5XXX_BAT_TYPE_4_2V;
break;
case 4300000:
- rval = IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_3V;
+ rval = IP5XXX_BAT_TYPE_4_3V;
break;
case 4350000:
- rval = IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_35V;
+ rval = IP5XXX_BAT_TYPE_4_35V;
+ break;
+ case 4400000:
+ rval = IP5XXX_BAT_TYPE_4_4V;
break;
default:
return -EINVAL;
}
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL2,
- IP5XXX_CHG_CTL2_BAT_TYPE_SEL, rval);
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.battery.type, rval);
if (ret)
return ret;
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL4,
- IP5XXX_CHG_CTL4_BAT_TYPE_SEL_EN,
- IP5XXX_CHG_CTL4_BAT_TYPE_SEL_EN);
- if (ret)
- return ret;
+ /* Don't try to auto-detect battery type, even if the IC could */
+ if (ip5xxx->regs.battery.vset_en) {
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.battery.vset_en, 1);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -443,7 +551,7 @@ static int ip5xxx_battery_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_STATUS:
switch (val->intval) {
case POWER_SUPPLY_STATUS_CHARGING:
- rval = IP5XXX_SYS_CTL0_CHARGER_EN;
+ rval = 1;
break;
case POWER_SUPPLY_STATUS_DISCHARGING:
case POWER_SUPPLY_STATUS_NOT_CHARGING:
@@ -452,25 +560,22 @@ static int ip5xxx_battery_set_property(struct power_supply *psy,
default:
return -EINVAL;
}
- return ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL0,
- IP5XXX_SYS_CTL0_CHARGER_EN, rval);
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.charger.enable, rval);
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
return ip5xxx_battery_set_voltage_max(ip5xxx, val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- rval = val->intval / 100000;
- return ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL4A,
- IP5XXX_CHG_CTL4A_CONST_CUR_SEL, rval);
+ rval = (val->intval - ip5xxx->const_curr.setpoint) / 100000;
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.charger.const_curr_sel, rval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
ret = ip5xxx_battery_get_voltage_max(ip5xxx, &vmax);
if (ret)
return ret;
- rval = ((val->intval - vmax) / 14000) << 1;
- return ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL2,
- IP5XXX_CHG_CTL2_CONST_VOLT_SEL, rval);
+ rval = (val->intval - vmax) / 14000;
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.charger.const_volt_sel, rval);
default:
return -EINVAL;
@@ -515,20 +620,20 @@ static int ip5xxx_boost_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- ret = ip5xxx_read(ip5xxx, IP5XXX_SYS_CTL0, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.boost.enable, &rval);
if (ret)
return ret;
- val->intval = !!(rval & IP5XXX_SYS_CTL0_BOOST_EN);
+ val->intval = !!rval;
return 0;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- ret = ip5xxx_read(ip5xxx, IP5XXX_CHG_CTL1, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.boost.undervolt_limit, &rval);
if (ret)
return ret;
- rval &= IP5XXX_CHG_CTL1_BOOST_UVP_SEL;
- val->intval = 4530000 + 100000 * (rval >> 2);
+ val->intval = ip5xxx->boost_undervolt.setpoint +
+ ip5xxx->boost_undervolt.microvolts_per_bit * rval;
return 0;
default:
@@ -550,14 +655,12 @@ static int ip5xxx_boost_set_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- rval = val->intval ? IP5XXX_SYS_CTL0_BOOST_EN : 0;
- return ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL0,
- IP5XXX_SYS_CTL0_BOOST_EN, rval);
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.boost.enable, !!val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- rval = ((val->intval - 4530000) / 100000) << 2;
- return ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL1,
- IP5XXX_CHG_CTL1_BOOST_UVP_SEL, rval);
+ rval = (val->intval - ip5xxx->boost_undervolt.setpoint) /
+ ip5xxx->boost_undervolt.microvolts_per_bit;
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.boost.undervolt_limit, rval);
default:
return -EINVAL;
@@ -583,13 +686,152 @@ static const struct power_supply_desc ip5xxx_boost_desc = {
static const struct regmap_config ip5xxx_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = IP5XXX_BATOCV_DAT1,
+ .max_register = 0xa9,
+};
+
+static struct ip5xxx_regfield_config ip51xx_fields = {
+ .charger_enable = REG_FIELD(0x01, 1, 1),
+ .charger_const_volt_sel = REG_FIELD(0x24, 1, 2),
+ .charger_const_curr_sel = REG_FIELD(0x25, 0, 4),
+ .charger_status = REG_FIELD(0x71, 5, 7),
+ .charger_chg_end = REG_FIELD(0x71, 3, 3),
+ .charger_timeout = REG_FIELD(0x71, 0, 2),
+ .charger_vin_overvolt = REG_FIELD(0x72, 5, 5),
+ .boost_enable = REG_FIELD(0x01, 2, 2),
+ .boost_llshdn_enable = REG_FIELD(0x02, 1, 1),
+ .boost_llshdn_i_limit = REG_FIELD(0x0c, 3, 7),
+ .boost_load_powerup_en = REG_FIELD(0x02, 0, 0),
+ .boost_vin_pullout_en = REG_FIELD(0x04, 5, 5),
+ .boost_undervolt_limit = REG_FIELD(0x22, 2, 3),
+ .boost_light_load_status = REG_FIELD(0x72, 6, 6),
+ .battery_ntc_dis = REG_FIELD(0x07, 6, 6),
+ .battery_type = REG_FIELD(0x24, 5, 6),
+ .battery_vset_en = REG_FIELD(0x26, 6, 6),
+ .battery_adc_volt_low = REG_FIELD(0xa2, 0, 7),
+ .battery_adc_volt_high = REG_FIELD(0xa3, 0, 5),
+ .battery_adc_curr_low = REG_FIELD(0xa4, 0, 7),
+ .battery_adc_curr_high = REG_FIELD(0xa5, 0, 5),
+ .battery_adc_ovolt_low = REG_FIELD(0xa8, 0, 7),
+ .battery_adc_ovolt_high = REG_FIELD(0xa9, 0, 5),
+ .btn_shdn_enable = REG_FIELD(0x03, 5, 5),
+ .btn_wled_mode = REG_FIELD(0x07, 1, 1),
+ .btn_shdn_mode = REG_FIELD(0x07, 0, 0),
+ .btn_long_press_time = REG_FIELD(0x03, 6, 7),
+ .btn_pressed = REG_FIELD(0x77, 3, 3),
+ .btn_long_pressed = REG_FIELD(0x77, 1, 1),
+ .btn_short_pressed = REG_FIELD(0x77, 0, 0),
+ .wled_enable = REG_FIELD(0x01, 3, 3),
+ .wled_detect_en = REG_FIELD(0x01, 4, 4),
+ .wled_present = REG_FIELD(0x72, 7, 7),
+
+ .vbat_max = 4350000,
+ .boost_undervolt_setpoint = 4530000,
+ .boost_undervolt_uv_per_bit = 100000,
+};
+
+static struct ip5xxx_regfield_config ip5306_fields = {
+ .charger_enable = REG_FIELD(0x00, 4, 4),
+ .charger_const_volt_sel = REG_FIELD(0x22, 0, 1),
+ .charger_const_curr_sel = REG_FIELD(0x24, 0, 4),
+ .charger_status = REG_FIELD_UNSUPPORTED, // other bits...
+ .charger_chg_end = REG_FIELD(0x71, 3, 3),
+ .charger_timeout = REG_FIELD_UNSUPPORTED,
+ .charger_vin_overvolt = REG_FIELD_UNSUPPORTED,
+ .boost_enable = REG_FIELD(0x00, 5, 5),
+ .boost_llshdn_enable = REG_FIELD_UNSUPPORTED,
+ .boost_llshdn_i_limit = REG_FIELD_UNSUPPORTED,
+ .boost_load_powerup_en = REG_FIELD(0x00, 2, 2),
+ .boost_vin_pullout_en = REG_FIELD(0x01, 2, 2),
+ .boost_undervolt_limit = REG_FIELD(0x21, 2, 4),
+ .boost_light_load_status = REG_FIELD(0x72, 2, 2),
+ .battery_ntc_dis = REG_FIELD_UNSUPPORTED,
+ .battery_type = REG_FIELD(0x22, 2, 3),
+ .battery_vset_en = REG_FIELD_UNSUPPORTED,
+ .battery_adc_volt_low = REG_FIELD_UNSUPPORTED,
+ .battery_adc_volt_high = REG_FIELD_UNSUPPORTED,
+ .battery_adc_curr_low = REG_FIELD_UNSUPPORTED,
+ .battery_adc_curr_high = REG_FIELD_UNSUPPORTED,
+ .battery_adc_ovolt_low = REG_FIELD_UNSUPPORTED,
+ .battery_adc_ovolt_high = REG_FIELD_UNSUPPORTED,
+ .btn_shdn_enable = REG_FIELD(0x00, 0, 0),
+ .btn_wled_mode = REG_FIELD(0x01, 6, 6),
+ .btn_shdn_mode = REG_FIELD(0x01, 7, 7),
+ .btn_long_press_time = REG_FIELD(0x02, 4, 4), // +1s
+ .btn_pressed = REG_FIELD_UNSUPPORTED,
+ /* TODO: double press */
+ .btn_long_pressed = REG_FIELD(0x77, 1, 1),
+ .btn_short_pressed = REG_FIELD(0x77, 0, 0),
+ .wled_enable = REG_FIELD_UNSUPPORTED,
+ .wled_detect_en = REG_FIELD_UNSUPPORTED,
+ .wled_present = REG_FIELD_UNSUPPORTED,
+
+ .vbat_max = 4400000,
+ .boost_undervolt_setpoint = 4450000,
+ .boost_undervolt_uv_per_bit = 50000,
+ .const_curr_setpoint = 50000,
+ .chg_end_inverted = 1,
};
+#define ip5xxx_setup_reg(_field, _reg) \
+ do { \
+ if (likely(cfg->_field.lsb <= cfg->_field.msb)) { \
+ struct regmap_field *_tmp = devm_regmap_field_alloc(dev, \
+ ip5xxx->regmap, cfg->_field); \
+ if (!IS_ERR(_tmp)) \
+ ip5xxx->regs._reg = _tmp; \
+ } \
+ } while (0)
+
+static void ip5xxx_setup_regs(struct device *dev, struct ip5xxx *ip5xxx,
+ const struct ip5xxx_regfield_config *cfg)
+{
+ ip5xxx_setup_reg(charger_enable, charger.enable);
+ ip5xxx_setup_reg(charger_const_volt_sel, charger.const_volt_sel);
+ ip5xxx_setup_reg(charger_const_curr_sel, charger.const_curr_sel);
+ ip5xxx_setup_reg(charger_status, charger.status);
+ ip5xxx_setup_reg(charger_chg_end, charger.chg_end);
+ ip5xxx_setup_reg(charger_timeout, charger.timeout);
+ ip5xxx_setup_reg(charger_vin_overvolt, charger.vin_overvolt);
+ ip5xxx_setup_reg(boost_enable, boost.enable);
+ ip5xxx_setup_reg(boost_llshdn_enable, boost.light_load_shutdown.enable);
+ ip5xxx_setup_reg(boost_llshdn_i_limit, boost.light_load_shutdown.i_limit);
+ ip5xxx_setup_reg(boost_load_powerup_en, boost.load_powerup_en);
+ ip5xxx_setup_reg(boost_vin_pullout_en, boost.vin_pullout_en);
+ ip5xxx_setup_reg(boost_undervolt_limit, boost.undervolt_limit);
+ ip5xxx_setup_reg(boost_light_load_status, boost.light_load_status);
+ ip5xxx_setup_reg(battery_ntc_dis, battery.ntc_dis);
+ ip5xxx_setup_reg(battery_type, battery.type);
+ ip5xxx_setup_reg(battery_vset_en, battery.vset_en);
+ ip5xxx_setup_reg(battery_adc_volt_low, battery.adc.volt.low);
+ ip5xxx_setup_reg(battery_adc_volt_high, battery.adc.volt.high);
+ ip5xxx_setup_reg(battery_adc_curr_low, battery.adc.curr.low);
+ ip5xxx_setup_reg(battery_adc_curr_high, battery.adc.curr.high);
+ ip5xxx_setup_reg(battery_adc_ovolt_low, battery.adc.open_volt.low);
+ ip5xxx_setup_reg(battery_adc_ovolt_high, battery.adc.open_volt.high);
+ ip5xxx_setup_reg(btn_shdn_enable, btn.shdn_enable);
+ ip5xxx_setup_reg(btn_wled_mode, btn.wled_mode);
+ ip5xxx_setup_reg(btn_shdn_mode, btn.shdn_mode);
+ ip5xxx_setup_reg(btn_long_press_time, btn.long_press_time);
+ ip5xxx_setup_reg(btn_pressed, btn.pressed);
+ ip5xxx_setup_reg(btn_long_pressed, btn.long_pressed);
+ ip5xxx_setup_reg(btn_short_pressed, btn.short_pressed);
+ ip5xxx_setup_reg(wled_enable, wled.enable);
+ ip5xxx_setup_reg(wled_detect_en, wled.detect_en);
+ ip5xxx_setup_reg(wled_present, wled.present);
+
+ ip5xxx->vbat_max = cfg->vbat_max;
+ ip5xxx->boost_undervolt.setpoint = cfg->boost_undervolt_setpoint;
+ ip5xxx->boost_undervolt.microvolts_per_bit = cfg->boost_undervolt_uv_per_bit;
+ ip5xxx->const_curr.setpoint = cfg->const_curr_setpoint;
+ ip5xxx->chg_end_inverted = cfg->chg_end_inverted;
+}
+
static int ip5xxx_power_probe(struct i2c_client *client)
{
+ const struct ip5xxx_regfield_config *fields = &ip51xx_fields;
struct power_supply_config psy_cfg = {};
struct device *dev = &client->dev;
+ const struct of_device_id *of_id;
struct power_supply *psy;
struct ip5xxx *ip5xxx;
@@ -601,6 +843,11 @@ static int ip5xxx_power_probe(struct i2c_client *client)
if (IS_ERR(ip5xxx->regmap))
return PTR_ERR(ip5xxx->regmap);
+ of_id = i2c_of_match_device(dev->driver->of_match_table, client);
+ if (of_id)
+ fields = (const struct ip5xxx_regfield_config *)of_id->data;
+ ip5xxx_setup_regs(dev, ip5xxx, fields);
+
psy_cfg.of_node = dev->of_node;
psy_cfg.drv_data = ip5xxx;
@@ -616,10 +863,11 @@ static int ip5xxx_power_probe(struct i2c_client *client)
}
static const struct of_device_id ip5xxx_power_of_match[] = {
- { .compatible = "injoinic,ip5108" },
- { .compatible = "injoinic,ip5109" },
- { .compatible = "injoinic,ip5207" },
- { .compatible = "injoinic,ip5209" },
+ { .compatible = "injoinic,ip5108", .data = &ip51xx_fields },
+ { .compatible = "injoinic,ip5109", .data = &ip51xx_fields },
+ { .compatible = "injoinic,ip5207", .data = &ip51xx_fields },
+ { .compatible = "injoinic,ip5209", .data = &ip51xx_fields },
+ { .compatible = "injoinic,ip5306", .data = &ip5306_fields },
{ }
};
MODULE_DEVICE_TABLE(of, ip5xxx_power_of_match);
diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c
index 2e4bc74e1c4a..23eb426295db 100644
--- a/drivers/power/supply/ltc4162-l-charger.c
+++ b/drivers/power/supply/ltc4162-l-charger.c
@@ -1,9 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Driver for Analog Devices (Linear Technology) LTC4162-L charger IC.
+ * Driver for Analog Devices (Linear Technology)
+ * LTC4162-L 35V/3.2A Multi-Cell Lithium-Ion Step-Down Battery Charger
+ * LTC4162-F 35V/3.2A Multi-Cell LiFePO4 Step-Down Battery Charger
+ * LTC4162-S 35V/3.2A Lead-Acid Step-Down Battery Charger
+ * LTC4015 35V/3.2A Multichemistry Buck Battery Charger Controller
* Copyright (C) 2020, Topic Embedded Products
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/of.h>
@@ -47,6 +52,20 @@
#define LTC4162L_VBAT_FILT 0x47
#define LTC4162L_INPUT_UNDERVOLTAGE_DAC 0x4B
+#define LTC4162L_CHEM_MASK GENMASK(11, 8)
+
+enum ltc4162_chem {
+ ltc4162_lad,
+ ltc4162_l42,
+ ltc4162_l41,
+ ltc4162_l40,
+ ltc4162_fad,
+ ltc4162_ffs,
+ ltc4162_fst,
+ ltc4162_sst = 8,
+ ltc4162_sad,
+};
+
/* Enumeration as in datasheet. Individual bits are mutually exclusive. */
enum ltc4162l_state {
battery_detection = 2048,
@@ -75,10 +94,28 @@ enum ltc4162l_charge_status {
/* Magic number to write to ARM_SHIP_MODE register */
#define LTC4162L_ARM_SHIP_MODE_MAGIC 21325
+struct ltc4162l_info;
+
+struct ltc4162l_chip_info {
+ const char *name;
+ int (*get_vbat)(struct ltc4162l_info *info, unsigned int reg,
+ union power_supply_propval *val);
+ int (*get_vcharge)(struct ltc4162l_info *info, unsigned int reg,
+ union power_supply_propval *val);
+ int (*set_vcharge)(struct ltc4162l_info *info, unsigned int reg,
+ unsigned int value);
+ int (*get_die_temp)(struct ltc4162l_info *info,
+ union power_supply_propval *val);
+ unsigned int ibat_resolution_pv;
+ unsigned int vin_resolution_uv;
+ u8 telemetry_mask;
+};
+
struct ltc4162l_info {
struct i2c_client *client;
struct regmap *regmap;
struct power_supply *charger;
+ const struct ltc4162l_chip_info *chip_info;
u32 rsnsb; /* Series resistor that sets charge current, microOhm */
u32 rsnsi; /* Series resistor to measure input current, microOhm */
u8 cell_count; /* Number of connected cells, 0 while unknown */
@@ -108,6 +145,18 @@ static u8 ltc4162l_get_cell_count(struct ltc4162l_info *info)
return val;
};
+static u8 ltc4162l_get_chem_type(struct ltc4162l_info *info)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(info->regmap, LTC4162L_CHEM_CELLS_REG, &val);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(LTC4162L_CHEM_MASK, val);
+};
+
/* Convert enum value to POWER_SUPPLY_STATUS value */
static int ltc4162l_state_decode(enum ltc4162l_state value)
{
@@ -223,25 +272,83 @@ static int ltc4162l_get_vbat(struct ltc4162l_info *info,
unsigned int reg,
union power_supply_propval *val)
{
- unsigned int regval;
+ unsigned int regval, chem_type;
int ret;
ret = regmap_read(info->regmap, reg, &regval);
if (ret)
return ret;
- /* cell_count × 192.4μV/LSB */
- regval *= 1924;
- regval *= ltc4162l_get_cell_count(info);
- regval /= 10;
- val->intval = regval;
+ /*
+ * cell_count × scaling factor
+ * For ltc4162-s, it uses a cell_count value of 2 for each group of 3
+ * physical (2V) cells, thus will return 2, 4, 6, 8 for 6V, 12V, 18V,
+ * and 24V respectively, and has to divide by 2 to multiply the scale
+ * factor by 1, 2, 3, or 4 to represent a 6V, 12V, 18V, or 24V battery
+ * respectively.
+ */
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_fst:
+ regval *= 1924;
+ regval *= ltc4162l_get_cell_count(info);
+ regval /= 10;
+ val->intval = regval;
- return 0;
+ return 0;
+ case ltc4162_sst ... ltc4162_sad:
+ regval *= 3848;
+ regval *= ltc4162l_get_cell_count(info) / 2;
+ regval /= 10;
+ val->intval = regval;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc4015_get_vbat(struct ltc4162l_info *info,
+ unsigned int reg,
+ union power_supply_propval *val)
+{
+ unsigned int regval, chem_type;
+ int ret;
+
+ ret = regmap_read(info->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ /*
+ * cell count x scaling factor
+ * ltc4015 lead-acid fixed and lead-acid programmable corresponds to
+ * 0x7 and 0x8 chem respectively
+ */
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_fst:
+ regval *= 192264;
+ regval *= ltc4162l_get_cell_count(info);
+ regval /= 1000;
+ val->intval = regval;
+
+ return 0;
+ case ltc4162_sst - 1 ... ltc4162_sad - 1:
+ regval *= 128176;
+ regval *= ltc4162l_get_cell_count(info);
+ regval /= 1000;
+ val->intval = regval;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int ltc4162l_get_ibat(struct ltc4162l_info *info,
union power_supply_propval *val)
{
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
unsigned int regval;
int ret;
@@ -249,9 +356,8 @@ static int ltc4162l_get_ibat(struct ltc4162l_info *info,
if (ret)
return ret;
- /* Signed 16-bit number, 1.466μV / RSNSB amperes/LSB. */
ret = (s16)(regval & 0xFFFF);
- val->intval = 100 * mult_frac(ret, 14660, (int)info->rsnsb);
+ val->intval = mult_frac(ret, chip_info->ibat_resolution_pv, info->rsnsb);
return 0;
}
@@ -260,6 +366,7 @@ static int ltc4162l_get_ibat(struct ltc4162l_info *info,
static int ltc4162l_get_input_voltage(struct ltc4162l_info *info,
union power_supply_propval *val)
{
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
unsigned int regval;
int ret;
@@ -267,8 +374,7 @@ static int ltc4162l_get_input_voltage(struct ltc4162l_info *info,
if (ret)
return ret;
- /* 1.649mV/LSB */
- val->intval = regval * 1694;
+ val->intval = regval * chip_info->vin_resolution_uv;
return 0;
}
@@ -276,6 +382,7 @@ static int ltc4162l_get_input_voltage(struct ltc4162l_info *info,
static int ltc4162l_get_input_current(struct ltc4162l_info *info,
union power_supply_propval *val)
{
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
unsigned int regval;
int ret;
@@ -283,11 +390,9 @@ static int ltc4162l_get_input_current(struct ltc4162l_info *info,
if (ret)
return ret;
- /* Signed 16-bit number, 1.466μV / RSNSI amperes/LSB. */
ret = (s16)(regval & 0xFFFF);
- ret *= 14660;
+ ret *= chip_info->ibat_resolution_pv;
ret /= info->rsnsi;
- ret *= 100;
val->intval = ret;
@@ -305,7 +410,7 @@ static int ltc4162l_get_icharge(struct ltc4162l_info *info,
if (ret)
return ret;
- regval &= BIT(6) - 1; /* Only the lower 5 bits */
+ regval &= GENMASK(5, 0);
/* The charge current servo level: (icharge_dac + 1) × 1mV/RSNSB */
++regval;
@@ -336,7 +441,7 @@ static int ltc4162l_get_vcharge(struct ltc4162l_info *info,
unsigned int reg,
union power_supply_propval *val)
{
- unsigned int regval;
+ unsigned int regval, chem_type;
int ret;
u32 voltage;
@@ -344,41 +449,181 @@ static int ltc4162l_get_vcharge(struct ltc4162l_info *info,
if (ret)
return ret;
- regval &= BIT(6) - 1; /* Only the lower 5 bits */
+ regval &= GENMASK(5, 0);
/*
* charge voltage setting can be computed from
- * cell_count × (vcharge_setting × 12.5mV + 3.8125V)
- * where vcharge_setting ranges from 0 to 31 (4.2V max).
+ * cell_count × (vcharge_setting × a + b)
+ * where vcharge_setting ranges from 0 to c (d).
+ * for ltc4162l: a = 12.5mV , b = 3.8125V, c = 31, d = 4.2Vmax
+ * for ltc4162f: a = 12.5mV , b = 3.4125V, c = 31, d = 3.8Vmax
+ *
+ * for ltc4162s, the charge voltage setting can be computed from
+ * N x (vcharge_setting x 28.571mV + 6.0V)
+ * where N is 1, 2, 3, or 4 for 6V, 12V, 18V, or 24V battery respectively,
+ * and vcharge_setting ranges from 0 to 31
*/
- voltage = 3812500 + (regval * 12500);
- voltage *= ltc4162l_get_cell_count(info);
- val->intval = voltage;
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_l40:
+ voltage = 3812500 + (regval * 12500);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
- return 0;
+ return 0;
+ case ltc4162_fad ... ltc4162_fst:
+ voltage = 3412500 + (regval * 12500);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
+
+ return 0;
+ case ltc4162_sst ... ltc4162_sad:
+ voltage = 6000000 + (regval * 28571);
+ voltage *= ltc4162l_get_cell_count(info) / 2;
+ val->intval = voltage;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static int ltc4162l_set_vcharge(struct ltc4162l_info *info,
- unsigned int reg,
- unsigned int value)
+static int ltc4015_get_vcharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ union power_supply_propval *val)
{
- u8 cell_count = ltc4162l_get_cell_count(info);
+ unsigned int regval, chem_type;
+ int ret;
+ u32 voltage;
+
+ ret = regmap_read(info->regmap, reg, &regval);
+ if (ret)
+ return ret;
- if (!cell_count)
- return -EBUSY; /* Not available yet, try again later */
+ regval &= GENMASK(5, 0);
+ /*
+ * charge voltage setting can be computed from:
+ * cell_count × (vcharge_setting × a + b)
+ * where vcharge_setting ranges from 0 to c (d).
+ * Li-Ion: a = 1/80V, b = 3.8125V, c = 31, d = 4.2Vmax
+ * LiFePO4: a = 1/80V, b = 3.4125V, c = 31, d = 3.8Vmax
+ * Lead Acid: a = 1/105V, b = 2V, c = 35, d = 2.6Vmax
+ */
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_l40:
+ voltage = 3812500 + (regval * 12500);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
+
+ return 0;
+ case ltc4162_fad ... ltc4162_fst:
+ voltage = 3412500 + (regval * 12500);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
+
+ return 0;
+ case ltc4162_sst - 1 ... ltc4162_sad - 1:
+ voltage = 2000000 + mult_frac(regval, 1000000, 105);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc4162l_vcharge(unsigned int base_voltage,
+ unsigned int scale_factor,
+ unsigned int range,
+ unsigned int value,
+ u8 cell_count)
+{
value /= cell_count;
- if (value < 3812500)
+ if (value < base_voltage)
return -EINVAL;
- value -= 3812500;
- value /= 12500;
+ value -= base_voltage;
+ value /= scale_factor;
- if (value > 31)
+ if (value > range)
return -EINVAL;
- return regmap_write(info->regmap, reg, value);
+ return value;
+}
+
+static int ltc4162l_set_vcharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ unsigned int value)
+{
+ unsigned int chem_type;
+ u8 cell_count;
+
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_l40:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(3812500, 12500, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ case ltc4162_fad ... ltc4162_fst:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(3412500, 12500, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ case ltc4162_sst ... ltc4162_sad:
+ cell_count = ltc4162l_get_cell_count(info) / 2;
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(6000000, 28571, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc4015_set_vcharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ unsigned int value)
+{
+ unsigned int chem_type;
+ u8 cell_count;
+
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_l40:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(3812500, 12500, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ case ltc4162_fad ... ltc4162_fst:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(3412500, 12500, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ case ltc4162_sst - 1 ... ltc4162_sad - 1:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(2000000, 1000000 / 105, 35,
+ value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ default:
+ return -EINVAL;
+ }
}
static int ltc4162l_get_iin_limit_dac(struct ltc4162l_info *info,
@@ -391,7 +636,7 @@ static int ltc4162l_get_iin_limit_dac(struct ltc4162l_info *info,
if (ret)
return ret;
- regval &= BIT(6) - 1; /* Only 6 bits */
+ regval &= GENMASK(5, 0);
/* (iin_limit_dac + 1) × 500μV / RSNSI */
++regval;
@@ -437,9 +682,30 @@ static int ltc4162l_get_die_temp(struct ltc4162l_info *info,
return 0;
}
+static int ltc4015_get_die_temp(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_DIE_TEMPERATURE, &regval);
+ if (ret)
+ return ret;
+
+ /* (die_temp - 12010) / 45.6°C */
+ ret = (s16)(regval & 0xFFFF);
+ ret -= 12010;
+ ret *= 1000;
+ ret /= 456;
+ val->intval = ret;
+
+ return 0;
+}
+
static int ltc4162l_get_term_current(struct ltc4162l_info *info,
union power_supply_propval *val)
{
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
unsigned int regval;
int ret;
@@ -457,10 +723,9 @@ static int ltc4162l_get_term_current(struct ltc4162l_info *info,
if (ret)
return ret;
- /* 1.466μV / RSNSB amperes/LSB */
- regval *= 14660u;
+ regval *= chip_info->ibat_resolution_pv;
regval /= info->rsnsb;
- val->intval = 100 * regval;
+ val->intval = regval;
return 0;
}
@@ -534,10 +799,11 @@ static ssize_t vbat_show(struct device *dev,
{
struct power_supply *psy = to_power_supply(dev);
struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
union power_supply_propval val;
int ret;
- ret = ltc4162l_get_vbat(info, LTC4162L_VBAT, &val);
+ ret = chip_info->get_vbat(info, LTC4162L_VBAT, &val);
if (ret)
return ret;
@@ -550,10 +816,11 @@ static ssize_t vbat_avg_show(struct device *dev,
{
struct power_supply *psy = to_power_supply(dev);
struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
union power_supply_propval val;
int ret;
- ret = ltc4162l_get_vbat(info, LTC4162L_VBAT_FILT, &val);
+ ret = chip_info->get_vbat(info, LTC4162L_VBAT_FILT, &val);
if (ret)
return ret;
@@ -589,7 +856,8 @@ static ssize_t force_telemetry_show(struct device *dev,
if (ret)
return ret;
- return sysfs_emit(buf, "%u\n", regval & BIT(2) ? 1 : 0);
+ return sysfs_emit(buf, "%u\n", regval &
+ info->chip_info->telemetry_mask ? 1 : 0);
}
static ssize_t force_telemetry_store(struct device *dev,
@@ -607,7 +875,8 @@ static ssize_t force_telemetry_store(struct device *dev,
return ret;
ret = regmap_update_bits(info->regmap, LTC4162L_CONFIG_BITS_REG,
- BIT(2), value ? BIT(2) : 0);
+ info->chip_info->telemetry_mask,
+ value ? info->chip_info->telemetry_mask : 0);
if (ret < 0)
return ret;
@@ -681,6 +950,7 @@ static int ltc4162l_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -702,15 +972,13 @@ static int ltc4162l_get_property(struct power_supply *psy,
return ltc4162l_get_icharge(info,
LTC4162L_CHARGE_CURRENT_SETTING, val);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
- return ltc4162l_get_vcharge(info,
- LTC4162L_VCHARGE_DAC, val);
+ return chip_info->get_vcharge(info, LTC4162L_VCHARGE_DAC, val);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
- return ltc4162l_get_vcharge(info,
- LTC4162L_VCHARGE_SETTING, val);
+ return chip_info->get_vcharge(info, LTC4162L_VCHARGE_SETTING, val);
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
return ltc4162l_get_iin_limit_dac(info, val);
case POWER_SUPPLY_PROP_TEMP:
- return ltc4162l_get_die_temp(info, val);
+ return chip_info->get_die_temp(info, val);
case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
return ltc4162l_get_term_current(info, val);
default:
@@ -772,7 +1040,6 @@ static enum power_supply_property ltc4162l_properties[] = {
};
static const struct power_supply_desc ltc4162l_desc = {
- .name = "ltc4162-l",
.type = POWER_SUPPLY_TYPE_MAINS,
.properties = ltc4162l_properties,
.num_properties = ARRAY_SIZE(ltc4162l_properties),
@@ -781,6 +1048,50 @@ static const struct power_supply_desc ltc4162l_desc = {
.property_is_writeable = ltc4162l_property_is_writeable,
};
+static const struct ltc4162l_chip_info ltc4162l_chip_info = {
+ .name = "ltc4162-l",
+ .get_vbat = ltc4162l_get_vbat,
+ .get_vcharge = ltc4162l_get_vcharge,
+ .set_vcharge = ltc4162l_set_vcharge,
+ .get_die_temp = ltc4162l_get_die_temp,
+ .ibat_resolution_pv = 1466000,
+ .vin_resolution_uv = 1649,
+ .telemetry_mask = BIT(2),
+};
+
+static const struct ltc4162l_chip_info ltc4162f_chip_info = {
+ .name = "ltc4162-f",
+ .get_vbat = ltc4162l_get_vbat,
+ .get_vcharge = ltc4162l_get_vcharge,
+ .set_vcharge = ltc4162l_set_vcharge,
+ .get_die_temp = ltc4162l_get_die_temp,
+ .ibat_resolution_pv = 1466000,
+ .vin_resolution_uv = 1649,
+ .telemetry_mask = BIT(2),
+};
+
+static const struct ltc4162l_chip_info ltc4162s_chip_info = {
+ .name = "ltc4162-s",
+ .get_vbat = ltc4162l_get_vbat,
+ .get_vcharge = ltc4162l_get_vcharge,
+ .set_vcharge = ltc4162l_set_vcharge,
+ .get_die_temp = ltc4162l_get_die_temp,
+ .ibat_resolution_pv = 1466000,
+ .vin_resolution_uv = 1649,
+ .telemetry_mask = BIT(2),
+};
+
+static const struct ltc4162l_chip_info ltc4015_chip_info = {
+ .name = "ltc4015",
+ .get_vbat = ltc4015_get_vbat,
+ .get_vcharge = ltc4015_get_vcharge,
+ .set_vcharge = ltc4015_set_vcharge,
+ .get_die_temp = ltc4015_get_die_temp,
+ .ibat_resolution_pv = 1464870,
+ .vin_resolution_uv = 1648,
+ .telemetry_mask = BIT(4),
+};
+
static bool ltc4162l_is_writeable_reg(struct device *dev, unsigned int reg)
{
/* all registers up to this one are writeable */
@@ -825,6 +1136,8 @@ static int ltc4162l_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct ltc4162l_info *info;
struct power_supply_config ltc4162l_config = {};
+ struct power_supply_desc *desc;
+ const struct ltc4162l_chip_info *chip_info;
u32 value;
int ret;
@@ -839,6 +1152,12 @@ static int ltc4162l_probe(struct i2c_client *client)
info->client = client;
i2c_set_clientdata(client, info);
+ chip_info = i2c_get_match_data(client);
+ if (!chip_info)
+ return -ENODEV;
+
+ info->chip_info = chip_info;
+
info->regmap = devm_regmap_init_i2c(client, &ltc4162l_regmap_config);
if (IS_ERR(info->regmap)) {
dev_err(dev, "Failed to initialize register map\n");
@@ -870,8 +1189,15 @@ static int ltc4162l_probe(struct i2c_client *client)
ltc4162l_config.drv_data = info;
ltc4162l_config.attr_grp = ltc4162l_attr_groups;
- info->charger = devm_power_supply_register(dev, &ltc4162l_desc,
- &ltc4162l_config);
+ /* Duplicate the default descriptor to set name based on chip_info. */
+ desc = devm_kmemdup(dev, &ltc4162l_desc,
+ sizeof(struct power_supply_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->name = chip_info->name;
+
+ info->charger = devm_power_supply_register(dev, desc, &ltc4162l_config);
if (IS_ERR(info->charger)) {
dev_err(dev, "Failed to register charger\n");
return PTR_ERR(info->charger);
@@ -903,14 +1229,20 @@ static void ltc4162l_alert(struct i2c_client *client,
}
static const struct i2c_device_id ltc4162l_i2c_id_table[] = {
- { "ltc4162-l" },
+ { "ltc4015", (kernel_ulong_t)&ltc4015_chip_info },
+ { "ltc4162-f", (kernel_ulong_t)&ltc4162f_chip_info },
+ { "ltc4162-l", (kernel_ulong_t)&ltc4162l_chip_info },
+ { "ltc4162-s", (kernel_ulong_t)&ltc4162s_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc4162l_i2c_id_table);
static const struct of_device_id ltc4162l_of_match[] __maybe_unused = {
- { .compatible = "lltc,ltc4162-l", },
- { },
+ { .compatible = "lltc,ltc4015", .data = &ltc4015_chip_info },
+ { .compatible = "lltc,ltc4162-f", .data = &ltc4162f_chip_info },
+ { .compatible = "lltc,ltc4162-l", .data = &ltc4162l_chip_info },
+ { .compatible = "lltc,ltc4162-s", .data = &ltc4162s_chip_info },
+ { }
};
MODULE_DEVICE_TABLE(of, ltc4162l_of_match);
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 496c3e1f2ee6..655b3f25dbd7 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -16,6 +16,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
@@ -52,13 +53,14 @@
#define MAX17042_VMAX_TOLERANCE 50 /* 50 mV */
struct max17042_chip {
- struct i2c_client *client;
+ struct device *dev;
struct regmap *regmap;
struct power_supply *battery;
enum max170xx_chip_type chip_type;
struct max17042_platform_data *pdata;
struct work_struct work;
int init_complete;
+ int irq;
};
static enum power_supply_property max17042_battery_props[] = {
@@ -573,11 +575,11 @@ static inline int max17042_model_data_compare(struct max17042_chip *chip,
int i;
if (memcmp(data1, data2, size)) {
- dev_err(&chip->client->dev, "%s compare failed\n", __func__);
+ dev_err(chip->dev, "%s compare failed\n", __func__);
for (i = 0; i < size; i++)
- dev_info(&chip->client->dev, "0x%x, 0x%x",
+ dev_info(chip->dev, "0x%x, 0x%x",
data1[i], data2[i]);
- dev_info(&chip->client->dev, "\n");
+ dev_info(chip->dev, "\n");
return -EINVAL;
}
return 0;
@@ -812,14 +814,14 @@ static int max17042_init_chip(struct max17042_chip *chip)
/* write cell characterization data */
ret = max17042_init_model(chip);
if (ret) {
- dev_err(&chip->client->dev, "%s init failed\n",
+ dev_err(chip->dev, "%s init failed\n",
__func__);
return -EIO;
}
ret = max17042_verify_model_lock(chip);
if (ret) {
- dev_err(&chip->client->dev, "%s lock verify failed\n",
+ dev_err(chip->dev, "%s lock verify failed\n",
__func__);
return -EIO;
}
@@ -875,7 +877,7 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
return IRQ_HANDLED;
if ((val & STATUS_SMN_BIT) || (val & STATUS_SMX_BIT)) {
- dev_dbg(&chip->client->dev, "SOC threshold INTR\n");
+ dev_dbg(chip->dev, "SOC threshold INTR\n");
max17042_set_soc_threshold(chip, 1);
}
@@ -907,7 +909,7 @@ static void max17042_init_worker(struct work_struct *work)
static struct max17042_platform_data *
max17042_get_of_pdata(struct max17042_chip *chip)
{
- struct device *dev = &chip->client->dev;
+ struct device *dev = chip->dev;
struct device_node *np = dev->of_node;
u32 prop;
struct max17042_platform_data *pdata;
@@ -949,7 +951,7 @@ static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
static struct max17042_platform_data *
max17042_get_default_pdata(struct max17042_chip *chip)
{
- struct device *dev = &chip->client->dev;
+ struct device *dev = chip->dev;
struct max17042_platform_data *pdata;
int ret, misc_cfg;
@@ -990,7 +992,7 @@ max17042_get_default_pdata(struct max17042_chip *chip)
static struct max17042_platform_data *
max17042_get_pdata(struct max17042_chip *chip)
{
- struct device *dev = &chip->client->dev;
+ struct device *dev = chip->dev;
#ifdef CONFIG_OF
if (dev->of_node)
@@ -1003,6 +1005,7 @@ max17042_get_pdata(struct max17042_chip *chip)
}
static const struct regmap_config max17042_regmap_config = {
+ .name = "max17042",
.reg_bits = 8,
.val_bits = 16,
.val_format_endian = REGMAP_ENDIAN_NATIVE,
@@ -1029,14 +1032,12 @@ static const struct power_supply_desc max17042_no_current_sense_psy_desc = {
.num_properties = ARRAY_SIZE(max17042_battery_props) - 2,
};
-static int max17042_probe(struct i2c_client *client)
+static int max17042_probe(struct i2c_client *client, struct device *dev, int irq,
+ enum max170xx_chip_type chip_type)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct i2c_adapter *adapter = client->adapter;
const struct power_supply_desc *max17042_desc = &max17042_psy_desc;
struct power_supply_config psy_cfg = {};
- const struct acpi_device_id *acpi_id = NULL;
- struct device *dev = &client->dev;
struct max17042_chip *chip;
int ret;
int i;
@@ -1045,33 +1046,25 @@ static int max17042_probe(struct i2c_client *client)
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->client = client;
- if (id) {
- chip->chip_type = id->driver_data;
- } else {
- acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!acpi_id)
- return -ENODEV;
-
- chip->chip_type = acpi_id->driver_data;
- }
+ chip->dev = dev;
+ chip->chip_type = chip_type;
chip->regmap = devm_regmap_init_i2c(client, &max17042_regmap_config);
if (IS_ERR(chip->regmap)) {
- dev_err(&client->dev, "Failed to initialize regmap\n");
+ dev_err(dev, "Failed to initialize regmap\n");
return -EINVAL;
}
chip->pdata = max17042_get_pdata(chip);
if (!chip->pdata) {
- dev_err(&client->dev, "no platform data provided\n");
+ dev_err(dev, "no platform data provided\n");
return -EINVAL;
}
- i2c_set_clientdata(client, chip);
+ dev_set_drvdata(dev, chip);
psy_cfg.drv_data = chip;
psy_cfg.of_node = dev->of_node;
@@ -1095,24 +1088,17 @@ static int max17042_probe(struct i2c_client *client)
regmap_write(chip->regmap, MAX17042_LearnCFG, 0x0007);
}
- chip->battery = devm_power_supply_register(&client->dev, max17042_desc,
+ chip->battery = devm_power_supply_register(dev, max17042_desc,
&psy_cfg);
if (IS_ERR(chip->battery)) {
- dev_err(&client->dev, "failed: power supply register\n");
+ dev_err(dev, "failed: power supply register\n");
return PTR_ERR(chip->battery);
}
- if (client->irq) {
- unsigned int flags = IRQF_ONESHOT;
-
- /*
- * On ACPI systems the IRQ may be handled by ACPI-event code,
- * so we need to share (if the ACPI code is willing to share).
- */
- if (acpi_id)
- flags |= IRQF_SHARED | IRQF_PROBE_SHARED;
+ if (irq) {
+ unsigned int flags = IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED;
- ret = devm_request_threaded_irq(&client->dev, client->irq,
+ ret = devm_request_threaded_irq(dev, irq,
NULL,
max17042_thread_handler, flags,
chip->battery->desc->name,
@@ -1123,18 +1109,20 @@ static int max17042_probe(struct i2c_client *client)
CFG_ALRT_BIT_ENBL);
max17042_set_soc_threshold(chip, 1);
} else {
- client->irq = 0;
+ irq = 0;
if (ret != -EBUSY)
- dev_err(&client->dev, "Failed to get IRQ\n");
+ dev_err(dev, "Failed to get IRQ\n");
}
}
/* Not able to update the charge threshold when exceeded? -> disable */
- if (!client->irq)
+ if (!irq)
regmap_write(chip->regmap, MAX17042_SALRT_Th, 0xff00);
+ chip->irq = irq;
+
regmap_read(chip->regmap, MAX17042_STATUS, &val);
if (val & STATUS_POR_BIT) {
- ret = devm_work_autocancel(&client->dev, &chip->work,
+ ret = devm_work_autocancel(dev, &chip->work,
max17042_init_worker);
if (ret)
return ret;
@@ -1146,6 +1134,44 @@ static int max17042_probe(struct i2c_client *client)
return 0;
}
+static int max17042_i2c_probe(struct i2c_client *client)
+{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ const struct acpi_device_id *acpi_id = NULL;
+ struct device *dev = &client->dev;
+ enum max170xx_chip_type chip_type;
+
+ if (id) {
+ chip_type = id->driver_data;
+ } else {
+ acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!acpi_id)
+ return -ENODEV;
+
+ chip_type = acpi_id->driver_data;
+ }
+
+ return max17042_probe(client, dev, client->irq, chip_type);
+}
+
+static int max17042_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct i2c_client *i2c;
+ const struct platform_device_id *id;
+ int irq;
+
+ i2c = to_i2c_client(pdev->dev.parent);
+ if (!i2c)
+ return -EINVAL;
+
+ dev->of_node = dev->parent->of_node;
+ id = platform_get_device_id(pdev);
+ irq = platform_get_irq(pdev, 0);
+
+ return max17042_probe(i2c, dev, irq, id->driver_data);
+}
+
#ifdef CONFIG_PM_SLEEP
static int max17042_suspend(struct device *dev)
{
@@ -1155,9 +1181,9 @@ static int max17042_suspend(struct device *dev)
* disable the irq and enable irq_wake
* capability to the interrupt line.
*/
- if (chip->client->irq) {
- disable_irq(chip->client->irq);
- enable_irq_wake(chip->client->irq);
+ if (chip->irq) {
+ disable_irq(chip->irq);
+ enable_irq_wake(chip->irq);
}
return 0;
@@ -1167,9 +1193,9 @@ static int max17042_resume(struct device *dev)
{
struct max17042_chip *chip = dev_get_drvdata(dev);
- if (chip->client->irq) {
- disable_irq_wake(chip->client->irq);
- enable_irq(chip->client->irq);
+ if (chip->irq) {
+ disable_irq_wake(chip->irq);
+ enable_irq(chip->irq);
/* re-program the SOC thresholds to 1% change */
max17042_set_soc_threshold(chip, 1);
}
@@ -1190,12 +1216,28 @@ MODULE_DEVICE_TABLE(acpi, max17042_acpi_match);
#endif
#ifdef CONFIG_OF
-static const struct of_device_id max17042_dt_match[] = {
- { .compatible = "maxim,max17042" },
- { .compatible = "maxim,max17047" },
- { .compatible = "maxim,max17050" },
- { .compatible = "maxim,max17055" },
- { .compatible = "maxim,max77849-battery" },
+/*
+ * Device may be instantiated through parent MFD device and device matching is done
+ * through platform_device_id.
+ *
+ * However if device's DT node contains proper clock compatible and driver is
+ * built as a module, then the *module* matching will be done trough DT aliases.
+ * This requires of_device_id table. In the same time this will not change the
+ * actual *device* matching so do not add .of_match_table.
+ */
+static const struct of_device_id max17042_dt_match[] __used = {
+ { .compatible = "maxim,max17042",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17042 },
+ { .compatible = "maxim,max17047",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17047 },
+ { .compatible = "maxim,max17050",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17050 },
+ { .compatible = "maxim,max17055",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17055 },
+ { .compatible = "maxim,max77705-battery",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17047 },
+ { .compatible = "maxim,max77849-battery",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17047 },
{ },
};
MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -1211,6 +1253,17 @@ static const struct i2c_device_id max17042_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max17042_id);
+static const struct platform_device_id max17042_platform_id[] = {
+ { "max17042", MAXIM_DEVICE_TYPE_MAX17042 },
+ { "max17047", MAXIM_DEVICE_TYPE_MAX17047 },
+ { "max17050", MAXIM_DEVICE_TYPE_MAX17050 },
+ { "max17055", MAXIM_DEVICE_TYPE_MAX17055 },
+ { "max77705-battery", MAXIM_DEVICE_TYPE_MAX17047 },
+ { "max77849-battery", MAXIM_DEVICE_TYPE_MAX17047 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max17042_platform_id);
+
static struct i2c_driver max17042_i2c_driver = {
.driver = {
.name = "max17042",
@@ -1218,10 +1271,44 @@ static struct i2c_driver max17042_i2c_driver = {
.of_match_table = of_match_ptr(max17042_dt_match),
.pm = &max17042_pm_ops,
},
- .probe = max17042_probe,
+ .probe = max17042_i2c_probe,
.id_table = max17042_id,
};
-module_i2c_driver(max17042_i2c_driver);
+
+static struct platform_driver max17042_platform_driver = {
+ .driver = {
+ .name = "max17042",
+ .acpi_match_table = ACPI_PTR(max17042_acpi_match),
+ .pm = &max17042_pm_ops,
+ },
+ .probe = max17042_platform_probe,
+ .id_table = max17042_platform_id,
+};
+
+static int __init max17042_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&max17042_platform_driver);
+ if (ret)
+ return ret;
+
+ ret = i2c_add_driver(&max17042_i2c_driver);
+ if (ret) {
+ platform_driver_unregister(&max17042_platform_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(max17042_init);
+
+static void __exit max17042_exit(void)
+{
+ i2c_del_driver(&max17042_i2c_driver);
+ platform_driver_unregister(&max17042_platform_driver);
+}
+module_exit(max17042_exit);
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
diff --git a/drivers/power/supply/max1720x_battery.c b/drivers/power/supply/max1720x_battery.c
index 33105419e242..11580e414713 100644
--- a/drivers/power/supply/max1720x_battery.c
+++ b/drivers/power/supply/max1720x_battery.c
@@ -16,6 +16,11 @@
#include <linux/unaligned.h>
+/* SBS compliant registers */
+#define MAX172XX_TEMP1 0x34
+#define MAX172XX_INT_TEMP 0x35
+#define MAX172XX_TEMP2 0x3B
+
/* Nonvolatile registers */
#define MAX1720X_NXTABLE0 0x80
#define MAX1720X_NRSENSE 0xCF /* RSense in 10^-5 Ohm */
@@ -29,6 +34,7 @@
#define MAX172XX_TEMP 0x08 /* Temperature */
#define MAX172XX_CURRENT 0x0A /* Actual current */
#define MAX172XX_AVG_CURRENT 0x0B /* Average current */
+#define MAX172XX_FULL_CAP 0x10 /* Calculated full capacity */
#define MAX172XX_TTE 0x11 /* Time to empty */
#define MAX172XX_AVG_TA 0x16 /* Average temperature */
#define MAX172XX_CYCLES 0x17
@@ -112,11 +118,15 @@ static const struct regmap_config max1720x_regmap_cfg = {
};
static const struct regmap_range max1720x_nvmem_allow[] = {
+ regmap_reg_range(MAX172XX_TEMP1, MAX172XX_INT_TEMP),
+ regmap_reg_range(MAX172XX_TEMP2, MAX172XX_TEMP2),
regmap_reg_range(MAX1720X_NXTABLE0, MAX1720X_NDEVICE_NAME4),
};
static const struct regmap_range max1720x_nvmem_deny[] = {
- regmap_reg_range(0x00, 0x7F),
+ regmap_reg_range(0x00, 0x33),
+ regmap_reg_range(0x36, 0x3A),
+ regmap_reg_range(0x3C, 0x7F),
regmap_reg_range(0xE0, 0xFF),
};
@@ -250,6 +260,7 @@ static const enum power_supply_property max1720x_battery_props[] = {
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
};
@@ -362,6 +373,10 @@ static int max1720x_battery_get_property(struct power_supply *psy,
ret = regmap_read(info->regmap, MAX172XX_AVG_CURRENT, &reg_val);
val->intval = max172xx_current_to_voltage(reg_val) / info->rsense;
break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ ret = regmap_read(info->regmap, MAX172XX_FULL_CAP, &reg_val);
+ val->intval = max172xx_capacity_to_ps(reg_val);
+ break;
case POWER_SUPPLY_PROP_MODEL_NAME:
ret = regmap_read(info->regmap, MAX172XX_DEV_NAME, &reg_val);
reg_val = FIELD_GET(MAX172XX_DEV_NAME_TYPE_MASK, reg_val);
@@ -382,6 +397,54 @@ static int max1720x_battery_get_property(struct power_supply *psy,
return ret;
}
+static int max1720x_read_temp(struct device *dev, u8 reg, char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct max1720x_device_info *info = power_supply_get_drvdata(psy);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(info->regmap_nv, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Temperature in degrees Celsius starting at absolute zero, -273C or
+ * 0K with an LSb of 0.1C
+ */
+ return sysfs_emit(buf, "%d\n", val - 2730);
+}
+
+static ssize_t temp_ain1_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return max1720x_read_temp(dev, MAX172XX_TEMP1, buf);
+}
+
+static ssize_t temp_ain2_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return max1720x_read_temp(dev, MAX172XX_TEMP2, buf);
+}
+
+static ssize_t temp_int_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return max1720x_read_temp(dev, MAX172XX_INT_TEMP, buf);
+}
+
+static DEVICE_ATTR_RO(temp_ain1);
+static DEVICE_ATTR_RO(temp_ain2);
+static DEVICE_ATTR_RO(temp_int);
+
+static struct attribute *max1720x_attrs[] = {
+ &dev_attr_temp_ain1.attr,
+ &dev_attr_temp_ain2.attr,
+ &dev_attr_temp_int.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(max1720x);
+
static
int max1720x_nvmem_reg_read(void *priv, unsigned int off, void *val, size_t len)
{
@@ -482,6 +545,7 @@ static int max1720x_probe(struct i2c_client *client)
psy_cfg.drv_data = info;
psy_cfg.fwnode = dev_fwnode(dev);
+ psy_cfg.attr_grp = max1720x_groups;
i2c_set_clientdata(client, info);
info->regmap = devm_regmap_init_i2c(client, &max1720x_regmap_cfg);
if (IS_ERR(info->regmap))
diff --git a/drivers/power/supply/mm8013.c b/drivers/power/supply/mm8013.c
index 5bcfaeeda3db..4adf2acc2779 100644
--- a/drivers/power/supply/mm8013.c
+++ b/drivers/power/supply/mm8013.c
@@ -90,7 +90,7 @@ static int mm8013_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct mm8013_chip *chip = psy->drv_data;
+ struct mm8013_chip *chip = power_supply_get_drvdata(psy);
int ret = 0;
u32 regval;
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 9f60094a5599..849f63e89ba0 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -527,7 +527,7 @@ static enum power_supply_property olpc_xo15_bat_props[] = {
#define EEPROM_SIZE (EEPROM_END - EEPROM_START)
static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
uint8_t ec_byte;
int ret;
@@ -547,13 +547,13 @@ static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute olpc_bat_eeprom = {
+static const struct bin_attribute olpc_bat_eeprom = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
},
.size = EEPROM_SIZE,
- .read = olpc_bat_eeprom_read,
+ .read_new = olpc_bat_eeprom_read,
};
/* Allow userspace to see the specific error value pulled from the EC */
@@ -584,15 +584,14 @@ static struct attribute *olpc_bat_sysfs_attrs[] = {
NULL
};
-static struct bin_attribute *olpc_bat_sysfs_bin_attrs[] = {
+static const struct bin_attribute *const olpc_bat_sysfs_bin_attrs[] = {
&olpc_bat_eeprom,
NULL
};
static const struct attribute_group olpc_bat_sysfs_group = {
.attrs = olpc_bat_sysfs_attrs,
- .bin_attrs = olpc_bat_sysfs_bin_attrs,
-
+ .bin_attrs_new = olpc_bat_sysfs_bin_attrs,
};
static const struct attribute_group *olpc_bat_sysfs_groups[] = {
diff --git a/drivers/power/supply/power_supply.h b/drivers/power/supply/power_supply.h
index 7434a6f24775..8f6a2d44b996 100644
--- a/drivers/power/supply/power_supply.h
+++ b/drivers/power/supply/power_supply.h
@@ -9,24 +9,55 @@
* Modified: 2004, Oct Szabolcs Gyurko
*/
+#include <linux/lockdep.h>
+
struct device;
struct device_type;
struct power_supply;
extern int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp);
+extern bool power_supply_has_property(struct power_supply *psy,
+ enum power_supply_property psp);
+extern bool power_supply_ext_has_property(const struct power_supply_ext *ext,
+ enum power_supply_property psp);
+
+struct power_supply_ext_registration {
+ struct list_head list_head;
+ const struct power_supply_ext *ext;
+ struct device *dev;
+ void *data;
+};
+
+/* Make sure that the macro is a single expression */
+#define power_supply_for_each_extension(pos, psy) \
+ if ( ({ lockdep_assert_held(&(psy)->extensions_sem); 0; }) ) \
+ ; \
+ else \
+ list_for_each_entry(pos, &(psy)->extensions, list_head) \
#ifdef CONFIG_SYSFS
extern void __init power_supply_init_attrs(void);
extern int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env);
extern const struct attribute_group *power_supply_attr_groups[];
+extern int power_supply_sysfs_add_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ struct device *dev);
+extern void power_supply_sysfs_remove_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext);
#else
static inline void power_supply_init_attrs(void) {}
#define power_supply_attr_groups NULL
#define power_supply_uevent NULL
+static inline int power_supply_sysfs_add_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ struct device *dev)
+{ return 0; }
+static inline void power_supply_sysfs_remove_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext) {}
#endif /* CONFIG_SYSFS */
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 16085eff0084..76c340b38015 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -66,21 +66,19 @@ static bool __power_supply_is_supplied_by(struct power_supply *supplier,
return false;
}
-static int __power_supply_changed_work(struct device *dev, void *data)
+static int __power_supply_changed_work(struct power_supply *pst, void *data)
{
struct power_supply *psy = data;
- struct power_supply *pst = dev_get_drvdata(dev);
- if (__power_supply_is_supplied_by(psy, pst)) {
- if (pst->desc->external_power_changed)
- pst->desc->external_power_changed(pst);
- }
+ if (__power_supply_is_supplied_by(psy, pst))
+ power_supply_external_power_changed(pst);
return 0;
}
static void power_supply_changed_work(struct work_struct *work)
{
+ int ret;
unsigned long flags;
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
@@ -88,6 +86,16 @@ static void power_supply_changed_work(struct work_struct *work)
dev_dbg(&psy->dev, "%s\n", __func__);
spin_lock_irqsave(&psy->changed_lock, flags);
+
+ if (unlikely(psy->update_groups)) {
+ psy->update_groups = false;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
+ ret = sysfs_update_groups(&psy->dev.kobj, power_supply_dev_type.groups);
+ if (ret)
+ dev_warn(&psy->dev, "failed to update sysfs groups: %pe\n", ERR_PTR(ret));
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ }
+
/*
* Check 'changed' here to avoid issues due to race between
* power_supply_changed() and this routine. In worst case
@@ -98,7 +106,7 @@ static void power_supply_changed_work(struct work_struct *work)
if (likely(psy->changed)) {
psy->changed = false;
spin_unlock_irqrestore(&psy->changed_lock, flags);
- power_supply_for_each_device(psy, __power_supply_changed_work);
+ power_supply_for_each_psy(psy, __power_supply_changed_work);
power_supply_update_leds(psy);
blocking_notifier_call_chain(&power_supply_notifier,
PSY_EVENT_PROP_CHANGED, psy);
@@ -116,11 +124,29 @@ static void power_supply_changed_work(struct work_struct *work)
spin_unlock_irqrestore(&psy->changed_lock, flags);
}
-int power_supply_for_each_device(void *data, int (*fn)(struct device *dev, void *data))
+struct psy_for_each_psy_cb_data {
+ int (*fn)(struct power_supply *psy, void *data);
+ void *data;
+};
+
+static int psy_for_each_psy_cb(struct device *dev, void *data)
{
- return class_for_each_device(&power_supply_class, NULL, data, fn);
+ struct psy_for_each_psy_cb_data *cb_data = data;
+ struct power_supply *psy = dev_to_psy(dev);
+
+ return cb_data->fn(psy, cb_data->data);
}
-EXPORT_SYMBOL_GPL(power_supply_for_each_device);
+
+int power_supply_for_each_psy(void *data, int (*fn)(struct power_supply *psy, void *data))
+{
+ struct psy_for_each_psy_cb_data cb_data = {
+ .fn = fn,
+ .data = data,
+ };
+
+ return class_for_each_device(&power_supply_class, NULL, &cb_data, psy_for_each_psy_cb);
+}
+EXPORT_SYMBOL_GPL(power_supply_for_each_psy);
void power_supply_changed(struct power_supply *psy)
{
@@ -166,11 +192,10 @@ static void power_supply_deferred_register_work(struct work_struct *work)
}
#ifdef CONFIG_OF
-static int __power_supply_populate_supplied_from(struct device *dev,
+static int __power_supply_populate_supplied_from(struct power_supply *epsy,
void *data)
{
struct power_supply *psy = data;
- struct power_supply *epsy = dev_get_drvdata(dev);
struct device_node *np;
int i = 0;
@@ -197,20 +222,19 @@ static int power_supply_populate_supplied_from(struct power_supply *psy)
{
int error;
- error = power_supply_for_each_device(psy, __power_supply_populate_supplied_from);
+ error = power_supply_for_each_psy(psy, __power_supply_populate_supplied_from);
dev_dbg(&psy->dev, "%s %d\n", __func__, error);
return error;
}
-static int __power_supply_find_supply_from_node(struct device *dev,
+static int __power_supply_find_supply_from_node(struct power_supply *epsy,
void *data)
{
struct device_node *np = data;
- struct power_supply *epsy = dev_get_drvdata(dev);
- /* returning non-zero breaks out of power_supply_for_each_device loop */
+ /* returning non-zero breaks out of power_supply_for_each_psy loop */
if (epsy->of_node == np)
return 1;
@@ -222,16 +246,16 @@ static int power_supply_find_supply_from_node(struct device_node *supply_node)
int error;
/*
- * power_supply_for_each_device() either returns its own errors or values
+ * power_supply_for_each_psy() either returns its own errors or values
* returned by __power_supply_find_supply_from_node().
*
* __power_supply_find_supply_from_node() will return 0 (no match)
* or 1 (match).
*
- * We return 0 if power_supply_for_each_device() returned 1, -EPROBE_DEFER if
+ * We return 0 if power_supply_for_each_psy() returned 1, -EPROBE_DEFER if
* it returned 0, or error as returned by it.
*/
- error = power_supply_for_each_device(supply_node, __power_supply_find_supply_from_node);
+ error = power_supply_for_each_psy(supply_node, __power_supply_find_supply_from_node);
return error ? (error == 1 ? 0 : error) : -EPROBE_DEFER;
}
@@ -316,10 +340,9 @@ struct psy_am_i_supplied_data {
unsigned int count;
};
-static int __power_supply_am_i_supplied(struct device *dev, void *_data)
+static int __power_supply_am_i_supplied(struct power_supply *epsy, void *_data)
{
union power_supply_propval ret = {0,};
- struct power_supply *epsy = dev_get_drvdata(dev);
struct psy_am_i_supplied_data *data = _data;
if (__power_supply_is_supplied_by(epsy, data->psy)) {
@@ -337,7 +360,7 @@ int power_supply_am_i_supplied(struct power_supply *psy)
struct psy_am_i_supplied_data data = { psy, 0 };
int error;
- error = power_supply_for_each_device(&data, __power_supply_am_i_supplied);
+ error = power_supply_for_each_psy(&data, __power_supply_am_i_supplied);
dev_dbg(&psy->dev, "%s count %u err %d\n", __func__, data.count, error);
@@ -348,10 +371,9 @@ int power_supply_am_i_supplied(struct power_supply *psy)
}
EXPORT_SYMBOL_GPL(power_supply_am_i_supplied);
-static int __power_supply_is_system_supplied(struct device *dev, void *data)
+static int __power_supply_is_system_supplied(struct power_supply *psy, void *data)
{
union power_supply_propval ret = {0,};
- struct power_supply *psy = dev_get_drvdata(dev);
unsigned int *count = data;
if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret))
@@ -372,7 +394,7 @@ int power_supply_is_system_supplied(void)
int error;
unsigned int count = 0;
- error = power_supply_for_each_device(&count, __power_supply_is_system_supplied);
+ error = power_supply_for_each_psy(&count, __power_supply_is_system_supplied);
/*
* If no system scope power class device was found at all, most probably we
@@ -391,9 +413,8 @@ struct psy_get_supplier_prop_data {
union power_supply_propval *val;
};
-static int __power_supply_get_supplier_property(struct device *dev, void *_data)
+static int __power_supply_get_supplier_property(struct power_supply *epsy, void *_data)
{
- struct power_supply *epsy = dev_get_drvdata(dev);
struct psy_get_supplier_prop_data *data = _data;
if (__power_supply_is_supplied_by(epsy, data->psy))
@@ -418,7 +439,7 @@ int power_supply_get_property_from_supplier(struct power_supply *psy,
* This function is not intended for use with a supply with multiple
* suppliers, we simply pick the first supply to report the psp.
*/
- ret = power_supply_for_each_device(&data, __power_supply_get_supplier_property);
+ ret = power_supply_for_each_psy(&data, __power_supply_get_supplier_property);
if (ret < 0)
return ret;
if (ret == 0)
@@ -444,7 +465,7 @@ EXPORT_SYMBOL_GPL(power_supply_set_battery_charged);
static int power_supply_match_device_by_name(struct device *dev, const void *data)
{
const char *name = data;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
return strcmp(psy->desc->name, name) == 0;
}
@@ -467,7 +488,7 @@ struct power_supply *power_supply_get_by_name(const char *name)
power_supply_match_device_by_name);
if (dev) {
- psy = dev_get_drvdata(dev);
+ psy = dev_to_psy(dev);
atomic_inc(&psy->use_cnt);
}
@@ -524,7 +545,7 @@ struct power_supply *power_supply_get_by_phandle(struct device_node *np,
of_node_put(power_supply_np);
if (dev) {
- psy = dev_get_drvdata(dev);
+ psy = dev_to_psy(dev);
atomic_inc(&psy->use_cnt);
}
@@ -1180,8 +1201,8 @@ bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info,
}
EXPORT_SYMBOL_GPL(power_supply_battery_bti_in_range);
-static bool psy_has_property(const struct power_supply_desc *psy_desc,
- enum power_supply_property psp)
+static bool psy_desc_has_property(const struct power_supply_desc *psy_desc,
+ enum power_supply_property psp)
{
bool found = false;
int i;
@@ -1196,17 +1217,57 @@ static bool psy_has_property(const struct power_supply_desc *psy_desc,
return found;
}
+bool power_supply_ext_has_property(const struct power_supply_ext *psy_ext,
+ enum power_supply_property psp)
+{
+ int i;
+
+ for (i = 0; i < psy_ext->num_properties; i++)
+ if (psy_ext->properties[i] == psp)
+ return true;
+
+ return false;
+}
+
+bool power_supply_has_property(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ struct power_supply_ext_registration *reg;
+
+ if (psy_desc_has_property(psy->desc, psp))
+ return true;
+
+ if (power_supply_battery_info_has_prop(psy->battery_info, psp))
+ return true;
+
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext, psp))
+ return true;
+ }
+
+ return false;
+}
+
int power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ struct power_supply_ext_registration *reg;
+
if (atomic_read(&psy->use_cnt) <= 0) {
if (!psy->initialized)
return -EAGAIN;
return -ENODEV;
}
- if (psy_has_property(psy->desc, psp))
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext, psp))
+ return reg->ext->get_property(psy, reg->ext, reg->data, psp, val);
+ }
+ }
+
+ if (psy_desc_has_property(psy->desc, psp))
return psy->desc->get_property(psy, psp, val);
else if (power_supply_battery_info_has_prop(psy->battery_info, psp))
return power_supply_battery_info_get_prop(psy->battery_info, psp, val);
@@ -1219,7 +1280,24 @@ int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
- if (atomic_read(&psy->use_cnt) <= 0 || !psy->desc->set_property)
+ struct power_supply_ext_registration *reg;
+
+ if (atomic_read(&psy->use_cnt) <= 0)
+ return -ENODEV;
+
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext, psp)) {
+ if (reg->ext->set_property)
+ return reg->ext->set_property(psy, reg->ext, reg->data,
+ psp, val);
+ else
+ return -ENODEV;
+ }
+ }
+ }
+
+ if (!psy->desc->set_property)
return -ENODEV;
return psy->desc->set_property(psy, psp, val);
@@ -1229,7 +1307,22 @@ EXPORT_SYMBOL_GPL(power_supply_set_property);
int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- return psy->desc->property_is_writeable && psy->desc->property_is_writeable(psy, psp);
+ struct power_supply_ext_registration *reg;
+
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext, psp)) {
+ if (reg->ext->property_is_writeable)
+ return reg->ext->property_is_writeable(psy, reg->ext,
+ reg->data, psp);
+ else
+ return 0;
+ }
+ }
+
+ if (!psy->desc->property_is_writeable)
+ return 0;
+
+ return psy->desc->property_is_writeable(psy, psp);
}
void power_supply_external_power_changed(struct power_supply *psy)
@@ -1248,6 +1341,88 @@ int power_supply_powers(struct power_supply *psy, struct device *dev)
}
EXPORT_SYMBOL_GPL(power_supply_powers);
+static int power_supply_update_sysfs_and_hwmon(struct power_supply *psy)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ psy->update_groups = true;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
+
+ power_supply_changed(psy);
+
+ power_supply_remove_hwmon_sysfs(psy);
+ return power_supply_add_hwmon_sysfs(psy);
+}
+
+int power_supply_register_extension(struct power_supply *psy, const struct power_supply_ext *ext,
+ struct device *dev, void *data)
+{
+ struct power_supply_ext_registration *reg;
+ size_t i;
+ int ret;
+
+ if (!psy || !dev || !ext || !ext->name || !ext->properties || !ext->num_properties)
+ return -EINVAL;
+
+ guard(rwsem_write)(&psy->extensions_sem);
+
+ power_supply_for_each_extension(reg, psy)
+ if (strcmp(ext->name, reg->ext->name) == 0)
+ return -EEXIST;
+
+ for (i = 0; i < ext->num_properties; i++)
+ if (power_supply_has_property(psy, ext->properties[i]))
+ return -EEXIST;
+
+ reg = kmalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ reg->ext = ext;
+ reg->dev = dev;
+ reg->data = data;
+ list_add(&reg->list_head, &psy->extensions);
+
+ ret = power_supply_sysfs_add_extension(psy, ext, dev);
+ if (ret)
+ goto sysfs_add_failed;
+
+ ret = power_supply_update_sysfs_and_hwmon(psy);
+ if (ret)
+ goto sysfs_hwmon_failed;
+
+ return 0;
+
+sysfs_hwmon_failed:
+ power_supply_sysfs_remove_extension(psy, ext);
+sysfs_add_failed:
+ list_del(&reg->list_head);
+ kfree(reg);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(power_supply_register_extension);
+
+void power_supply_unregister_extension(struct power_supply *psy, const struct power_supply_ext *ext)
+{
+ struct power_supply_ext_registration *reg;
+
+ guard(rwsem_write)(&psy->extensions_sem);
+
+ power_supply_for_each_extension(reg, psy) {
+ if (reg->ext == ext) {
+ list_del(&reg->list_head);
+ power_supply_sysfs_remove_extension(psy, ext);
+ kfree(reg);
+ power_supply_update_sysfs_and_hwmon(psy);
+ return;
+ }
+ }
+
+ dev_warn(&psy->dev, "Trying to unregister invalid extension");
+}
+EXPORT_SYMBOL_GPL(power_supply_unregister_extension);
+
static void power_supply_dev_release(struct device *dev)
{
struct power_supply *psy = to_power_supply(dev);
@@ -1300,7 +1475,7 @@ static int psy_register_thermal(struct power_supply *psy)
return 0;
/* Register battery zone device psy reports temperature */
- if (psy_has_property(psy->desc, POWER_SUPPLY_PROP_TEMP)) {
+ if (psy_desc_has_property(psy->desc, POWER_SUPPLY_PROP_TEMP)) {
/* Prefer our hwmon device and avoid duplicates */
struct thermal_zone_params tzp = {
.no_hwmon = IS_ENABLED(CONFIG_POWER_SUPPLY_HWMON)
@@ -1402,6 +1577,9 @@ __power_supply_register(struct device *parent,
}
spin_lock_init(&psy->changed_lock);
+ init_rwsem(&psy->extensions_sem);
+ INIT_LIST_HEAD(&psy->extensions);
+
rc = device_add(dev);
if (rc)
goto device_add_failed;
@@ -1418,9 +1596,11 @@ __power_supply_register(struct device *parent,
if (rc)
goto create_triggers_failed;
- rc = power_supply_add_hwmon_sysfs(psy);
- if (rc)
- goto add_hwmon_sysfs_failed;
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ rc = power_supply_add_hwmon_sysfs(psy);
+ if (rc)
+ goto add_hwmon_sysfs_failed;
+ }
/*
* Update use_cnt after any uevents (most notably from device_add()).
diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
index 01be04903d7d..95245e6a6baa 100644
--- a/drivers/power/supply/power_supply_hwmon.c
+++ b/drivers/power/supply/power_supply_hwmon.c
@@ -349,9 +349,28 @@ static const struct hwmon_chip_info power_supply_hwmon_chip_info = {
.info = power_supply_hwmon_info,
};
+static const enum power_supply_property power_supply_hwmon_props[] = {
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TEMP_MAX,
+ POWER_SUPPLY_PROP_TEMP_MIN,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
int power_supply_add_hwmon_sysfs(struct power_supply *psy)
{
- const struct power_supply_desc *desc = psy->desc;
struct power_supply_hwmon *psyhw;
struct device *dev = &psy->dev;
struct device *hwmon;
@@ -377,32 +396,11 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy)
goto error;
}
- for (i = 0; i < desc->num_properties; i++) {
- const enum power_supply_property prop = desc->properties[i];
-
- switch (prop) {
- case POWER_SUPPLY_PROP_CURRENT_AVG:
- case POWER_SUPPLY_PROP_CURRENT_MAX:
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- case POWER_SUPPLY_PROP_POWER_AVG:
- case POWER_SUPPLY_PROP_POWER_NOW:
- case POWER_SUPPLY_PROP_TEMP:
- case POWER_SUPPLY_PROP_TEMP_MAX:
- case POWER_SUPPLY_PROP_TEMP_MIN:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
- case POWER_SUPPLY_PROP_TEMP_AMBIENT:
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
- case POWER_SUPPLY_PROP_VOLTAGE_AVG:
- case POWER_SUPPLY_PROP_VOLTAGE_MIN:
- case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ for (i = 0; i < ARRAY_SIZE(power_supply_hwmon_props); i++) {
+ const enum power_supply_property prop = power_supply_hwmon_props[i];
+
+ if (power_supply_has_property(psy, prop))
set_bit(prop, psyhw->props);
- break;
- default:
- break;
- }
}
name = psy->desc->name;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 571de43fcca9..edb058c19c9c 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -99,6 +99,7 @@ static const char * const POWER_SUPPLY_HEALTH_TEXT[] = {
[POWER_SUPPLY_HEALTH_OVERHEAT] = "Overheat",
[POWER_SUPPLY_HEALTH_DEAD] = "Dead",
[POWER_SUPPLY_HEALTH_OVERVOLTAGE] = "Over voltage",
+ [POWER_SUPPLY_HEALTH_UNDERVOLTAGE] = "Under voltage",
[POWER_SUPPLY_HEALTH_UNSPEC_FAILURE] = "Unspecified failure",
[POWER_SUPPLY_HEALTH_COLD] = "Cold",
[POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE] = "Watchdog timer expire",
@@ -182,6 +183,8 @@ static struct power_supply_attr power_supply_attrs[] __ro_after_init = {
POWER_SUPPLY_ATTR(CHARGE_CONTROL_START_THRESHOLD),
POWER_SUPPLY_ATTR(CHARGE_CONTROL_END_THRESHOLD),
POWER_SUPPLY_ENUM_ATTR(CHARGE_BEHAVIOUR),
+ /* Same enum value texts as "charge_type" without the 's' at the end */
+ _POWER_SUPPLY_ENUM_ATTR(CHARGE_TYPES, POWER_SUPPLY_CHARGE_TYPE_TEXT),
POWER_SUPPLY_ATTR(INPUT_CURRENT_LIMIT),
POWER_SUPPLY_ATTR(INPUT_VOLTAGE_LIMIT),
POWER_SUPPLY_ATTR(INPUT_POWER_LIMIT),
@@ -237,23 +240,52 @@ static enum power_supply_property dev_attr_psp(struct device_attribute *attr)
return to_ps_attr(attr) - power_supply_attrs;
}
+static void power_supply_escape_spaces(const char *str, char *buf, size_t bufsize)
+{
+ strscpy(buf, str, bufsize);
+ strreplace(buf, ' ', '_');
+}
+
+static int power_supply_match_string(const char * const *array, size_t n, const char *s)
+{
+ int ret;
+
+ /* First try an exact match */
+ ret = __sysfs_match_string(array, n, s);
+ if (ret >= 0)
+ return ret;
+
+ /* Second round, try matching with spaces replaced by '_' */
+ for (size_t i = 0; i < n; i++) {
+ char buf[32];
+
+ power_supply_escape_spaces(array[i], buf, sizeof(buf));
+ if (sysfs_streq(buf, s))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
static ssize_t power_supply_show_enum_with_available(
struct device *dev, const char * const labels[], int label_count,
unsigned int available_values, int value, char *buf)
{
bool match = false, available, active;
+ char escaped_label[32];
ssize_t count = 0;
int i;
for (i = 0; i < label_count; i++) {
available = available_values & BIT(i);
active = i == value;
+ power_supply_escape_spaces(labels[i], escaped_label, sizeof(escaped_label));
if (available && active) {
- count += sysfs_emit_at(buf, count, "[%s] ", labels[i]);
+ count += sysfs_emit_at(buf, count, "[%s] ", escaped_label);
match = true;
} else if (available) {
- count += sysfs_emit_at(buf, count, "%s ", labels[i]);
+ count += sysfs_emit_at(buf, count, "%s ", escaped_label);
}
}
@@ -268,11 +300,34 @@ static ssize_t power_supply_show_enum_with_available(
return count;
}
-static ssize_t power_supply_show_property(struct device *dev,
- struct device_attribute *attr,
- char *buf) {
+static ssize_t power_supply_show_charge_behaviour(struct device *dev,
+ struct power_supply *psy,
+ union power_supply_propval *value,
+ char *buf)
+{
+ struct power_supply_ext_registration *reg;
+
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR))
+ return power_supply_charge_behaviour_show(dev,
+ reg->ext->charge_behaviours,
+ value->intval, buf);
+ }
+ }
+
+ return power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
+ value->intval, buf);
+}
+
+static ssize_t power_supply_format_property(struct device *dev,
+ bool uevent,
+ struct device_attribute *attr,
+ char *buf)
+{
ssize_t ret;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
const struct power_supply_attr *ps_attr = to_ps_attr(attr);
enum power_supply_property psp = dev_attr_psp(attr);
union power_supply_propval value;
@@ -287,7 +342,7 @@ static ssize_t power_supply_show_property(struct device *dev,
dev_dbg_ratelimited(dev,
"driver has no data for `%s' property\n",
attr->attr.name);
- else if (ret != -ENODEV && ret != -EAGAIN)
+ else if (ret != -ENODEV && ret != -EAGAIN && ret != -EINVAL)
dev_err_ratelimited(dev,
"driver failed to report `%s' property: %zd\n",
attr->attr.name, ret);
@@ -303,13 +358,21 @@ static ssize_t power_supply_show_property(struct device *dev,
psy->desc->usb_types, value.intval, buf);
break;
case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
- ret = power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
- value.intval, buf);
+ if (uevent) /* no possible values in uevents */
+ goto default_format;
+ ret = power_supply_show_charge_behaviour(dev, psy, &value, buf);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
+ if (uevent) /* no possible values in uevents */
+ goto default_format;
+ ret = power_supply_charge_types_show(dev, psy->desc->charge_types,
+ value.intval, buf);
break;
case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = sysfs_emit(buf, "%s\n", value.strval);
break;
default:
+default_format:
if (ps_attr->text_values_len > 0 &&
value.intval < ps_attr->text_values_len && value.intval >= 0) {
ret = sysfs_emit(buf, "%s\n", ps_attr->text_values[value.intval]);
@@ -321,19 +384,26 @@ static ssize_t power_supply_show_property(struct device *dev,
return ret;
}
+static ssize_t power_supply_show_property(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return power_supply_format_property(dev, false, attr, buf);
+}
+
static ssize_t power_supply_store_property(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count) {
ssize_t ret;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
const struct power_supply_attr *ps_attr = to_ps_attr(attr);
enum power_supply_property psp = dev_attr_psp(attr);
union power_supply_propval value;
ret = -EINVAL;
if (ps_attr->text_values_len > 0) {
- ret = __sysfs_match_string(ps_attr->text_values,
- ps_attr->text_values_len, buf);
+ ret = power_supply_match_string(ps_attr->text_values,
+ ps_attr->text_values_len, buf);
}
/*
@@ -364,9 +434,8 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj,
int attrno)
{
struct device *dev = kobj_to_dev(kobj);
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
- int i;
if (!power_supply_attrs[attrno].prop_name)
return 0;
@@ -374,19 +443,13 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj,
if (attrno == POWER_SUPPLY_PROP_TYPE)
return mode;
- for (i = 0; i < psy->desc->num_properties; i++) {
- int property = psy->desc->properties[i];
-
- if (property == attrno) {
- if (power_supply_property_is_writeable(psy, property) > 0)
- mode |= S_IWUSR;
+ guard(rwsem_read)(&psy->extensions_sem);
- return mode;
- }
- }
-
- if (power_supply_battery_info_has_prop(psy->battery_info, attrno))
+ if (power_supply_has_property(psy, attrno)) {
+ if (power_supply_property_is_writeable(psy, attrno) > 0)
+ mode |= S_IWUSR;
return mode;
+ }
return 0;
}
@@ -396,8 +459,18 @@ static const struct attribute_group power_supply_attr_group = {
.is_visible = power_supply_attr_is_visible,
};
+static struct attribute *power_supply_extension_attrs[] = {
+ NULL
+};
+
+static const struct attribute_group power_supply_extension_group = {
+ .name = "extensions",
+ .attrs = power_supply_extension_attrs,
+};
+
const struct attribute_group *power_supply_attr_groups[] = {
&power_supply_attr_group,
+ &power_supply_extension_group,
NULL
};
@@ -437,8 +510,8 @@ static int add_prop_uevent(const struct device *dev, struct kobj_uevent_env *env
pwr_attr = &power_supply_attrs[prop];
dev_attr = &pwr_attr->dev_attr;
- ret = power_supply_show_property((struct device *)dev, dev_attr, prop_buf);
- if (ret == -ENODEV || ret == -ENODATA) {
+ ret = power_supply_format_property((struct device *)dev, true, dev_attr, prop_buf);
+ if (ret == -ENODEV || ret == -ENODATA || ret == -EINVAL) {
/*
* When a battery is absent, we expect -ENODEV. Don't abort;
* send the uevent with at least the PRESENT=0 property
@@ -459,11 +532,7 @@ static int add_prop_uevent(const struct device *dev, struct kobj_uevent_env *env
int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- const struct power_supply *psy = dev_get_drvdata(dev);
- const enum power_supply_property *battery_props =
- power_supply_battery_info_properties;
- unsigned long psy_drv_properties[POWER_SUPPLY_ATTR_CNT /
- sizeof(unsigned long) + 1] = {0};
+ const struct power_supply *psy = dev_to_psy(dev);
int ret = 0, j;
char *prop_buf;
@@ -491,22 +560,8 @@ int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env)
if (ret)
goto out;
- for (j = 0; j < psy->desc->num_properties; j++) {
- set_bit(psy->desc->properties[j], psy_drv_properties);
- ret = add_prop_uevent(dev, env, psy->desc->properties[j],
- prop_buf);
- if (ret)
- goto out;
- }
-
- for (j = 0; j < power_supply_battery_info_properties_size; j++) {
- if (test_bit(battery_props[j], psy_drv_properties))
- continue;
- if (!power_supply_battery_info_has_prop(psy->battery_info,
- battery_props[j]))
- continue;
- ret = add_prop_uevent(dev, env, battery_props[j],
- prop_buf);
+ for (j = 0; j < POWER_SUPPLY_ATTR_CNT; j++) {
+ ret = add_prop_uevent(dev, env, j, prop_buf);
if (ret)
goto out;
}
@@ -542,3 +597,44 @@ int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const
return -EINVAL;
}
EXPORT_SYMBOL_GPL(power_supply_charge_behaviour_parse);
+
+ssize_t power_supply_charge_types_show(struct device *dev,
+ unsigned int available_types,
+ enum power_supply_charge_type current_type,
+ char *buf)
+{
+ return power_supply_show_enum_with_available(
+ dev, POWER_SUPPLY_CHARGE_TYPE_TEXT,
+ ARRAY_SIZE(POWER_SUPPLY_CHARGE_TYPE_TEXT),
+ available_types, current_type, buf);
+}
+EXPORT_SYMBOL_GPL(power_supply_charge_types_show);
+
+int power_supply_charge_types_parse(unsigned int available_types, const char *buf)
+{
+ int i = power_supply_match_string(POWER_SUPPLY_CHARGE_TYPE_TEXT,
+ ARRAY_SIZE(POWER_SUPPLY_CHARGE_TYPE_TEXT),
+ buf);
+
+ if (i < 0)
+ return i;
+
+ if (available_types & BIT(i))
+ return i;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(power_supply_charge_types_parse);
+
+int power_supply_sysfs_add_extension(struct power_supply *psy, const struct power_supply_ext *ext,
+ struct device *dev)
+{
+ return sysfs_add_link_to_group(&psy->dev.kobj, power_supply_extension_group.name,
+ &dev->kobj, ext->name);
+}
+
+void power_supply_sysfs_remove_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext)
+{
+ sysfs_remove_link_from_group(&psy->dev.kobj, power_supply_extension_group.name, ext->name);
+}
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index a6c204c08232..6f3d0413b1c1 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -21,6 +21,7 @@
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/string_choices.h>
enum {
REG_MANUFACTURER_DATA,
@@ -320,8 +321,8 @@ static int sbs_update_presence(struct sbs_info *chip, bool is_present)
client->flags &= ~I2C_CLIENT_PEC;
}
- dev_dbg(&client->dev, "PEC: %s\n", (client->flags & I2C_CLIENT_PEC) ?
- "enabled" : "disabled");
+ dev_dbg(&client->dev, "PEC: %s\n",
+ str_enabled_disabled(client->flags & I2C_CLIENT_PEC));
if (!chip->is_present && is_present && !chip->charger_broadcasts)
sbs_disable_charger_broadcasts(chip);
diff --git a/drivers/power/supply/stc3117_fuel_gauge.c b/drivers/power/supply/stc3117_fuel_gauge.c
new file mode 100644
index 000000000000..a1bc5970370a
--- /dev/null
+++ b/drivers/power/supply/stc3117_fuel_gauge.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stc3117_fuel_gauge.c - STMicroelectronics STC3117 Fuel Gauge Driver
+ *
+ * Copyright (c) 2024 Silicon Signals Pvt Ltd.
+ * Author: Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>
+ * Bhavin Sharma <bhavin.sharma@siliconsignals.io>
+ */
+
+#include <linux/crc8.h>
+#include <linux/devm-helpers.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+
+#define STC3117_ADDR_MODE 0x00
+#define STC3117_ADDR_CTRL 0x01
+#define STC3117_ADDR_SOC_L 0x02
+#define STC3117_ADDR_SOC_H 0x03
+#define STC3117_ADDR_COUNTER_L 0x04
+#define STC3117_ADDR_COUNTER_H 0x05
+#define STC3117_ADDR_CURRENT_L 0x06
+#define STC3117_ADDR_CURRENT_H 0x07
+#define STC3117_ADDR_VOLTAGE_L 0x08
+#define STC3117_ADDR_VOLTAGE_H 0x09
+#define STC3117_ADDR_TEMPERATURE 0x0A
+#define STC3117_ADDR_AVG_CURRENT_L 0x0B
+#define STC3117_ADDR_AVG_CURRENT_H 0x0C
+#define STC3117_ADDR_OCV_L 0x0D
+#define STC3117_ADDR_OCV_H 0x0E
+#define STC3117_ADDR_CC_CNF_L 0x0F
+#define STC3117_ADDR_CC_CNF_H 0x10
+#define STC3117_ADDR_VM_CNF_L 0x11
+#define STC3117_ADDR_VM_CNF_H 0x12
+#define STC3117_ADDR_ALARM_soc 0x13
+#define STC3117_ADDR_ALARM_VOLTAGE 0x14
+#define STC3117_ADDR_ID 0x18
+#define STC3117_ADDR_CC_ADJ_L 0x1B
+#define STC3117_ADDR_CC_ADJ_H 0x1C
+#define STC3117_ADDR_VM_ADJ_L 0x1D
+#define STC3117_ADDR_VM_ADJ_H 0x1E
+#define STC3117_ADDR_RAM 0x20
+#define STC3117_ADDR_OCV_TABLE 0x30
+#define STC3117_ADDR_SOC_TABLE 0x30
+
+/* Bit mask definition */
+#define STC3117_ID 0x16
+#define STC3117_MIXED_MODE 0x00
+#define STC3117_VMODE BIT(0)
+#define STC3117_GG_RUN BIT(4)
+#define STC3117_CC_MODE BIT(5)
+#define STC3117_BATFAIL BIT(3)
+#define STC3117_PORDET BIT(4)
+#define STC3117_RAM_SIZE 16
+#define STC3117_OCV_TABLE_SIZE 16
+#define STC3117_RAM_TESTWORD 0x53A9
+#define STC3117_SOFT_RESET 0x11
+#define STC3117_NOMINAL_CAPACITY 2600
+
+#define VOLTAGE_LSB_VALUE 9011
+#define CURRENT_LSB_VALUE 24084
+#define APP_CUTOFF_VOLTAGE 2500
+#define MAX_HRSOC 51200
+#define MAX_SOC 1000
+#define CHG_MIN_CURRENT 200
+#define CHG_END_CURRENT 20
+#define APP_MIN_CURRENT (-5)
+#define BATTERY_FULL 95
+#define CRC8_POLYNOMIAL 0x07
+#define CRC8_INIT 0x00
+
+DECLARE_CRC8_TABLE(stc3117_crc_table);
+
+enum stc3117_state {
+ STC3117_INIT,
+ STC3117_RUNNING,
+ STC3117_POWERDN,
+};
+
+/* Default ocv curve Li-ion battery */
+static const int ocv_value[16] = {
+ 3400, 3582, 3669, 3676, 3699, 3737, 3757, 3774,
+ 3804, 3844, 3936, 3984, 4028, 4131, 4246, 4320
+};
+
+union stc3117_internal_ram {
+ u8 ram_bytes[STC3117_RAM_SIZE];
+ struct {
+ u16 testword; /* 0-1 Bytes */
+ u16 hrsoc; /* 2-3 Bytes */
+ u16 cc_cnf; /* 4-5 Bytes */
+ u16 vm_cnf; /* 6-7 Bytes */
+ u8 soc; /* 8 Byte */
+ u8 state; /* 9 Byte */
+ u8 unused[5]; /* 10-14 Bytes */
+ u8 crc; /* 15 Byte */
+ } reg;
+};
+
+struct stc3117_battery_info {
+ int voltage_min_mv;
+ int voltage_max_mv;
+ int battery_capacity_mah;
+ int sense_resistor;
+};
+
+struct stc3117_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct delayed_work update_work;
+ struct power_supply *battery;
+ union stc3117_internal_ram ram_data;
+ struct stc3117_battery_info battery_info;
+
+ u8 soc_tab[16];
+ int cc_cnf;
+ int vm_cnf;
+ int cc_adj;
+ int vm_adj;
+ int avg_current;
+ int avg_voltage;
+ int batt_current;
+ int voltage;
+ int temp;
+ int soc;
+ int ocv;
+ int hrsoc;
+ int presence;
+};
+
+static int stc3117_convert(int value, int factor)
+{
+ value = (value * factor) / 4096;
+ return value * 1000;
+}
+
+static int stc3117_get_battery_data(struct stc3117_data *data)
+{
+ u8 reg_list[16];
+ u8 data_adjust[4];
+ int value, mode;
+
+ regmap_bulk_read(data->regmap, STC3117_ADDR_MODE,
+ reg_list, sizeof(reg_list));
+
+ /* soc */
+ value = (reg_list[3] << 8) + reg_list[2];
+ data->hrsoc = value;
+ data->soc = (value * 10 + 256) / 512;
+
+ /* current in uA*/
+ value = (reg_list[7] << 8) + reg_list[6];
+ data->batt_current = stc3117_convert(value,
+ CURRENT_LSB_VALUE / data->battery_info.sense_resistor);
+
+ /* voltage in uV */
+ value = (reg_list[9] << 8) + reg_list[8];
+ data->voltage = stc3117_convert(value, VOLTAGE_LSB_VALUE);
+
+ /* temp in 1/10 °C */
+ data->temp = reg_list[10] * 10;
+
+ /* Avg current in uA */
+ value = (reg_list[12] << 8) + reg_list[11];
+ regmap_read(data->regmap, STC3117_ADDR_MODE, &mode);
+ if (!(mode & STC3117_VMODE)) {
+ value = stc3117_convert(value,
+ CURRENT_LSB_VALUE / data->battery_info.sense_resistor);
+ value = value / 4;
+ } else {
+ value = stc3117_convert(value, 36 * STC3117_NOMINAL_CAPACITY);
+ }
+ data->avg_current = value;
+
+ /* ocv in uV */
+ value = (reg_list[14] << 8) + reg_list[13];
+ value = stc3117_convert(value, VOLTAGE_LSB_VALUE);
+ value = (value + 2) / 4;
+ data->ocv = value;
+
+ /* CC & VM adjustment counters */
+ regmap_bulk_read(data->regmap, STC3117_ADDR_CC_ADJ_L,
+ data_adjust, sizeof(data_adjust));
+ value = (data_adjust[1] << 8) + data_adjust[0];
+ data->cc_adj = value;
+
+ value = (data_adjust[3] << 8) + data_adjust[2];
+ data->vm_adj = value;
+
+ return 0;
+}
+
+static int ram_write(struct stc3117_data *data)
+{
+ int ret;
+
+ ret = regmap_bulk_write(data->regmap, STC3117_ADDR_RAM,
+ data->ram_data.ram_bytes, STC3117_RAM_SIZE);
+ if (ret)
+ return ret;
+
+ return 0;
+};
+
+static int ram_read(struct stc3117_data *data)
+{
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, STC3117_ADDR_RAM,
+ data->ram_data.ram_bytes, STC3117_RAM_SIZE);
+ if (ret)
+ return ret;
+
+ return 0;
+};
+
+static int stc3117_set_para(struct stc3117_data *data)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, STC3117_ADDR_MODE, STC3117_VMODE);
+
+ for (int i = 0; i < STC3117_OCV_TABLE_SIZE; i++)
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_TABLE + i,
+ ocv_value[i] * 100 / 55);
+ if (data->soc_tab[1] != 0)
+ ret |= regmap_bulk_write(data->regmap, STC3117_ADDR_SOC_TABLE,
+ data->soc_tab, STC3117_OCV_TABLE_SIZE);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_CC_CNF_H,
+ (data->ram_data.reg.cc_cnf >> 8) & 0xFF);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_CC_CNF_L,
+ data->ram_data.reg.cc_cnf & 0xFF);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_VM_CNF_H,
+ (data->ram_data.reg.vm_cnf >> 8) & 0xFF);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_VM_CNF_L,
+ data->ram_data.reg.vm_cnf & 0xFF);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_CTRL, 0x03);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_MODE,
+ STC3117_MIXED_MODE | STC3117_GG_RUN);
+
+ return ret;
+};
+
+static int stc3117_init(struct stc3117_data *data)
+{
+ int id, ret;
+ int ctrl;
+ int ocv_m, ocv_l;
+
+ regmap_read(data->regmap, STC3117_ADDR_ID, &id);
+ if (id != STC3117_ID)
+ return -EINVAL;
+
+ data->cc_cnf = (data->battery_info.battery_capacity_mah *
+ data->battery_info.sense_resistor * 250 + 6194) / 12389;
+ data->vm_cnf = (data->battery_info.battery_capacity_mah
+ * 200 * 50 + 24444) / 48889;
+
+ /* Battery has not been removed */
+ data->presence = 1;
+
+ /* Read RAM data */
+ ret = ram_read(data);
+ if (ret)
+ return ret;
+
+ if (data->ram_data.reg.testword != STC3117_RAM_TESTWORD ||
+ (crc8(stc3117_crc_table, data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE, CRC8_INIT)) != 0) {
+ data->ram_data.reg.testword = STC3117_RAM_TESTWORD;
+ data->ram_data.reg.cc_cnf = data->cc_cnf;
+ data->ram_data.reg.vm_cnf = data->vm_cnf;
+ data->ram_data.reg.crc = crc8(stc3117_crc_table,
+ data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE - 1, CRC8_INIT);
+
+ ret = regmap_read(data->regmap, STC3117_ADDR_OCV_H, &ocv_m);
+
+ ret |= regmap_read(data->regmap, STC3117_ADDR_OCV_L, &ocv_l);
+
+ ret |= stc3117_set_para(data);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_H, ocv_m);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_L, ocv_l);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_read(data->regmap, STC3117_ADDR_CTRL, &ctrl);
+ if (ret)
+ return ret;
+
+ if ((ctrl & STC3117_BATFAIL) != 0 ||
+ (ctrl & STC3117_PORDET) != 0) {
+ ret = regmap_read(data->regmap,
+ STC3117_ADDR_OCV_H, &ocv_m);
+
+ ret |= regmap_read(data->regmap,
+ STC3117_ADDR_OCV_L, &ocv_l);
+
+ ret |= stc3117_set_para(data);
+
+ ret |= regmap_write(data->regmap,
+ STC3117_ADDR_OCV_H, ocv_m);
+
+ ret |= regmap_write(data->regmap,
+ STC3117_ADDR_OCV_L, ocv_l);
+ if (ret)
+ return ret;
+ } else {
+ ret = stc3117_set_para(data);
+ ret |= regmap_write(data->regmap, STC3117_ADDR_SOC_H,
+ (data->ram_data.reg.hrsoc >> 8 & 0xFF));
+ ret |= regmap_write(data->regmap, STC3117_ADDR_SOC_L,
+ (data->ram_data.reg.hrsoc & 0xFF));
+ if (ret)
+ return ret;
+ }
+ }
+
+ data->ram_data.reg.state = STC3117_INIT;
+ data->ram_data.reg.crc = crc8(stc3117_crc_table,
+ data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE - 1, CRC8_INIT);
+ ret = ram_write(data);
+ if (ret)
+ return ret;
+
+ return 0;
+};
+
+static int stc3117_task(struct stc3117_data *data)
+{
+ int id, mode, ret;
+ int count_l, count_m;
+ int ocv_l, ocv_m;
+
+ regmap_read(data->regmap, STC3117_ADDR_ID, &id);
+ if (id != STC3117_ID) {
+ data->presence = 0;
+ return -EINVAL;
+ }
+
+ stc3117_get_battery_data(data);
+
+ /* Read RAM data */
+ ret = ram_read(data);
+ if (ret)
+ return ret;
+
+ if (data->ram_data.reg.testword != STC3117_RAM_TESTWORD ||
+ (crc8(stc3117_crc_table, data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE, CRC8_INIT) != 0)) {
+ data->ram_data.reg.testword = STC3117_RAM_TESTWORD;
+ data->ram_data.reg.cc_cnf = data->cc_cnf;
+ data->ram_data.reg.vm_cnf = data->vm_cnf;
+ data->ram_data.reg.crc = crc8(stc3117_crc_table,
+ data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE - 1, CRC8_INIT);
+ data->ram_data.reg.state = STC3117_INIT;
+ }
+
+ /* check battery presence status */
+ ret = regmap_read(data->regmap, STC3117_ADDR_CTRL, &mode);
+ if ((mode & STC3117_BATFAIL) != 0) {
+ data->presence = 0;
+ data->ram_data.reg.testword = 0;
+ data->ram_data.reg.state = STC3117_INIT;
+ ret = ram_write(data);
+ ret |= regmap_write(data->regmap, STC3117_ADDR_CTRL, STC3117_PORDET);
+ if (ret)
+ return ret;
+ }
+
+ data->presence = 1;
+
+ ret = regmap_read(data->regmap, STC3117_ADDR_MODE, &mode);
+ if (ret)
+ return ret;
+ if ((mode & STC3117_GG_RUN) == 0) {
+ if (data->ram_data.reg.state > STC3117_INIT) {
+ ret = stc3117_set_para(data);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_SOC_H,
+ (data->ram_data.reg.hrsoc >> 8 & 0xFF));
+ ret |= regmap_write(data->regmap, STC3117_ADDR_SOC_L,
+ (data->ram_data.reg.hrsoc & 0xFF));
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_read(data->regmap, STC3117_ADDR_OCV_H, &ocv_m);
+
+ ret |= regmap_read(data->regmap, STC3117_ADDR_OCV_L, &ocv_l);
+
+ ret |= stc3117_set_para(data);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_H, ocv_m);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_L, ocv_l);
+ if (ret)
+ return ret;
+ }
+ data->ram_data.reg.state = STC3117_INIT;
+ }
+
+ regmap_read(data->regmap, STC3117_ADDR_COUNTER_L, &count_l);
+ regmap_read(data->regmap, STC3117_ADDR_COUNTER_H, &count_m);
+
+ count_m = (count_m << 8) + count_l;
+
+ /* INIT state, wait for batt_current & temperature value available: */
+ if (data->ram_data.reg.state == STC3117_INIT && count_m > 4) {
+ data->avg_voltage = data->voltage;
+ data->avg_current = data->batt_current;
+ data->ram_data.reg.state = STC3117_RUNNING;
+ }
+
+ if (data->ram_data.reg.state != STC3117_RUNNING) {
+ data->batt_current = -ENODATA;
+ data->temp = -ENODATA;
+ } else {
+ if (data->voltage < APP_CUTOFF_VOLTAGE)
+ data->soc = -ENODATA;
+
+ if (mode & STC3117_VMODE) {
+ data->avg_current = -ENODATA;
+ data->batt_current = -ENODATA;
+ }
+ }
+
+ data->ram_data.reg.hrsoc = data->hrsoc;
+ data->ram_data.reg.soc = (data->soc + 5) / 10;
+ data->ram_data.reg.crc = crc8(stc3117_crc_table,
+ data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE - 1, CRC8_INIT);
+
+ ret = ram_write(data);
+ if (ret)
+ return ret;
+ return 0;
+};
+
+static void fuel_gauge_update_work(struct work_struct *work)
+{
+ struct stc3117_data *data =
+ container_of(work, struct stc3117_data, update_work.work);
+
+ stc3117_task(data);
+
+ /* Schedule the work to run again in 2 seconds */
+ schedule_delayed_work(&data->update_work, msecs_to_jiffies(2000));
+}
+
+static int stc3117_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct stc3117_data *data = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (data->soc > BATTERY_FULL)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (data->batt_current < 0)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (data->batt_current > 0)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = data->voltage;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = data->batt_current;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = data->ocv;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = data->avg_current;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = data->soc;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = data->temp;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = data->presence;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property stc3117_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_PRESENT,
+};
+
+static const struct power_supply_desc stc3117_battery_desc = {
+ .name = "stc3117-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = stc3117_get_property,
+ .properties = stc3117_battery_props,
+ .num_properties = ARRAY_SIZE(stc3117_battery_props),
+};
+
+static const struct regmap_config stc3117_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int stc3117_probe(struct i2c_client *client)
+{
+ struct stc3117_data *data;
+ struct power_supply_config psy_cfg = {};
+ struct power_supply_battery_info *info;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ data->regmap = devm_regmap_init_i2c(client, &stc3117_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ psy_cfg.drv_data = data;
+ psy_cfg.fwnode = dev_fwnode(&client->dev);
+
+ crc8_populate_msb(stc3117_crc_table, CRC8_POLYNOMIAL);
+
+ data->battery = devm_power_supply_register(&client->dev,
+ &stc3117_battery_desc, &psy_cfg);
+ if (IS_ERR(data->battery))
+ return dev_err_probe(&client->dev, PTR_ERR(data->battery),
+ "failed to register battery\n");
+
+ ret = device_property_read_u32(&client->dev, "shunt-resistor-micro-ohms",
+ &data->battery_info.sense_resistor);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to get shunt-resistor-micro-ohms\n");
+ data->battery_info.sense_resistor = data->battery_info.sense_resistor / 1000;
+
+ ret = power_supply_get_battery_info(data->battery, &info);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to get battery information\n");
+
+ data->battery_info.battery_capacity_mah = info->charge_full_design_uah / 1000;
+ data->battery_info.voltage_min_mv = info->voltage_min_design_uv / 1000;
+ data->battery_info.voltage_max_mv = info->voltage_max_design_uv / 1000;
+
+ ret = stc3117_init(data);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to initialize of stc3117\n");
+
+ ret = devm_delayed_work_autocancel(&client->dev, &data->update_work,
+ fuel_gauge_update_work);
+ if (ret)
+ return ret;
+
+ schedule_delayed_work(&data->update_work, 0);
+
+ return 0;
+}
+
+static const struct i2c_device_id stc3117_id[] = {
+ { "stc3117", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, stc3117_id);
+
+static const struct of_device_id stc3117_of_match[] = {
+ { .compatible = "st,stc3117" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, stc3117_of_match);
+
+static struct i2c_driver stc3117_i2c_driver = {
+ .driver = {
+ .name = "stc3117_i2c_driver",
+ .of_match_table = stc3117_of_match,
+ },
+ .probe = stc3117_probe,
+ .id_table = stc3117_id,
+};
+
+module_i2c_driver(stc3117_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>");
+MODULE_AUTHOR("Bhavin Sharma <bhavin.sharma@siliconsignals.io>");
+MODULE_DESCRIPTION("STC3117 Fuel Gauge Driver");
diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c
index ebd1edde28f1..c759add4df49 100644
--- a/drivers/power/supply/surface_battery.c
+++ b/drivers/power/supply/surface_battery.c
@@ -667,7 +667,7 @@ out:
static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
int status;
@@ -681,7 +681,7 @@ static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, cha
static ssize_t alarm_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
unsigned long value;
int status;
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 442ceb7795e1..2a975a110f48 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -37,6 +37,7 @@ static int battery_charge_counter = -1000;
static int battery_current = -1600;
static enum power_supply_charge_behaviour battery_charge_behaviour =
POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+static bool battery_extension;
static bool module_initialized;
@@ -238,6 +239,87 @@ static const struct power_supply_config test_power_configs[] = {
},
};
+static int test_power_battery_extmanufacture_year = 1234;
+static int test_power_battery_exttemp_max = 1000;
+static const enum power_supply_property test_power_battery_extprops[] = {
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_TEMP_MAX,
+};
+
+static int test_power_battery_extget_property(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ val->intval = test_power_battery_extmanufacture_year;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_MAX:
+ val->intval = test_power_battery_exttemp_max;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int test_power_battery_extset_property(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ test_power_battery_extmanufacture_year = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_MAX:
+ test_power_battery_exttemp_max = val->intval;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int test_power_battery_extproperty_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp)
+{
+ return true;
+}
+
+static const struct power_supply_ext test_power_battery_ext = {
+ .name = "test_power",
+ .properties = test_power_battery_extprops,
+ .num_properties = ARRAY_SIZE(test_power_battery_extprops),
+ .get_property = test_power_battery_extget_property,
+ .set_property = test_power_battery_extset_property,
+ .property_is_writeable = test_power_battery_extproperty_is_writeable,
+};
+
+static void test_power_configure_battery_extension(bool enable)
+{
+ struct power_supply *psy;
+
+ psy = test_power_supplies[TEST_BATTERY];
+
+ if (enable) {
+ if (power_supply_register_extension(psy, &test_power_battery_ext, &psy->dev,
+ NULL)) {
+ pr_err("registering battery extension failed\n");
+ return;
+ }
+ } else {
+ power_supply_unregister_extension(psy, &test_power_battery_ext);
+ }
+
+ battery_extension = enable;
+}
+
static int __init test_power_init(void)
{
int i;
@@ -258,6 +340,8 @@ static int __init test_power_init(void)
}
}
+ test_power_configure_battery_extension(true);
+
module_initialized = true;
return 0;
failed:
@@ -524,6 +608,26 @@ static int param_set_battery_current(const char *key,
#define param_get_battery_current param_get_int
+static int param_set_battery_extension(const char *key,
+ const struct kernel_param *kp)
+{
+ bool prev_battery_extension;
+ int ret;
+
+ prev_battery_extension = battery_extension;
+
+ ret = param_set_bool(key, kp);
+ if (ret)
+ return ret;
+
+ if (prev_battery_extension != battery_extension)
+ test_power_configure_battery_extension(battery_extension);
+
+ return 0;
+}
+
+#define param_get_battery_extension param_get_bool
+
static const struct kernel_param_ops param_ops_ac_online = {
.set = param_set_ac_online,
.get = param_get_ac_online,
@@ -574,6 +678,11 @@ static const struct kernel_param_ops param_ops_battery_current = {
.get = param_get_battery_current,
};
+static const struct kernel_param_ops param_ops_battery_extension = {
+ .set = param_set_battery_extension,
+ .get = param_get_battery_extension,
+};
+
#define param_check_ac_online(name, p) __param_check(name, p, void);
#define param_check_usb_online(name, p) __param_check(name, p, void);
#define param_check_battery_status(name, p) __param_check(name, p, void);
@@ -584,6 +693,7 @@ static const struct kernel_param_ops param_ops_battery_current = {
#define param_check_battery_voltage(name, p) __param_check(name, p, void);
#define param_check_battery_charge_counter(name, p) __param_check(name, p, void);
#define param_check_battery_current(name, p) __param_check(name, p, void);
+#define param_check_battery_extension(name, p) __param_check(name, p, void);
module_param(ac_online, ac_online, 0644);
@@ -621,6 +731,9 @@ MODULE_PARM_DESC(battery_charge_counter,
module_param(battery_current, battery_current, 0644);
MODULE_PARM_DESC(battery_current, "battery current (milliampere)");
+module_param(battery_extension, battery_extension, 0644);
+MODULE_PARM_DESC(battery_extension, "battery extension");
+
MODULE_DESCRIPTION("Power supply driver for testing");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/ug3105_battery.c b/drivers/power/supply/ug3105_battery.c
index ccc5c4d2e230..38e23bdd4603 100644
--- a/drivers/power/supply/ug3105_battery.c
+++ b/drivers/power/supply/ug3105_battery.c
@@ -287,7 +287,6 @@ out:
static enum power_supply_property ug3105_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
@@ -316,9 +315,6 @@ static int ug3105_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_PRESENT:
val->intval = 1;
break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = chip->info->technology;
- break;
case POWER_SUPPLY_PROP_SCOPE:
val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
break;
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 04c212953ded..5ad7cc438068 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -339,8 +339,7 @@ struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
return NULL;
cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
- hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ii_dev->timer.function = idle_inject_timer_fn;
+ hrtimer_setup(&ii_dev->timer, idle_inject_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ii_dev->latency_us = UINT_MAX;
ii_dev->update = update;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 5e793b80fd6b..5ab3feb29686 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1265,6 +1265,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ARROWLAKE, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ARROWLAKE_U, &rapl_defaults_core),
@@ -1273,7 +1274,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &rapl_defaults_cht),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &rapl_defaults_tng),
- X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &rapl_defaults_ann),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID2,&rapl_defaults_ann),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &rapl_defaults_core),
@@ -2063,8 +2064,7 @@ int rapl_package_add_pmu(struct rapl_package *rp)
raw_spin_lock_init(&data->lock);
INIT_LIST_HEAD(&data->active_list);
data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms);
- hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->hrtimer.function = rapl_hrtimer_handle;
+ hrtimer_setup(&data->hrtimer, rapl_hrtimer_handle, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return rapl_pmu_update(rp);
}
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 52c32dcbf7d8..4112a0097338 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -627,8 +627,7 @@ struct powercap_control_type *powercap_register_control_type(
dev_set_name(&control_type->dev, "%s", name);
result = device_register(&control_type->dev);
if (result) {
- if (control_type->allocated)
- kfree(control_type);
+ put_device(&control_type->dev);
return ERR_PTR(result);
}
idr_init(&control_type->idr);
diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile
index ceaf65cc1f1d..0aea394d4e4d 100644
--- a/drivers/pps/Makefile
+++ b/drivers/pps/Makefile
@@ -6,6 +6,7 @@
pps_core-y := pps.o kapi.o sysfs.o
pps_core-$(CONFIG_NTP_PPS) += kc.o
obj-$(CONFIG_PPS) := pps_core.o
-obj-y += clients/ generators/
+obj-y += clients/
+obj-$(CONFIG_PPS_GENERATOR) += generators/
ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 634c3b2f8c26..75c1bae30a7c 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -52,7 +52,9 @@ static irqreturn_t pps_gpio_irq_handler(int irq, void *data)
info = data;
- rising_edge = gpiod_get_value(info->gpio_pin);
+ /* Small trick to bypass the check on edge's direction when capture_clear is unset */
+ rising_edge = info->capture_clear ?
+ gpiod_get_value(info->gpio_pin) : !info->assert_falling_edge;
if ((rising_edge && !info->assert_falling_edge) ||
(!rising_edge && info->assert_falling_edge))
pps_event(info->pps, &ts, PPS_CAPTUREASSERT, data);
@@ -60,6 +62,8 @@ static irqreturn_t pps_gpio_irq_handler(int irq, void *data)
((rising_edge && info->assert_falling_edge) ||
(!rising_edge && !info->assert_falling_edge)))
pps_event(info->pps, &ts, PPS_CAPTURECLEAR, data);
+ else
+ dev_warn_ratelimited(&info->pps->dev, "IRQ did not trigger any PPS event\n");
return IRQ_HANDLED;
}
@@ -214,8 +218,8 @@ static int pps_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n",
- data->irq);
+ dev_dbg(&data->pps->dev, "Registered IRQ %d as PPS source\n",
+ data->irq);
return 0;
}
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index d33106bd7a29..2f465549b843 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -56,7 +56,7 @@ static struct pps_source_info pps_ktimer_info = {
static void __exit pps_ktimer_exit(void)
{
- dev_info(pps->dev, "ktimer PPS source unregistered\n");
+ dev_dbg(&pps->dev, "ktimer PPS source unregistered\n");
del_timer_sync(&ktimer);
pps_unregister_source(pps);
@@ -74,7 +74,7 @@ static int __init pps_ktimer_init(void)
timer_setup(&ktimer, pps_ktimer_event, 0);
mod_timer(&ktimer, jiffies + HZ);
- dev_info(pps->dev, "ktimer PPS source registered\n");
+ dev_dbg(&pps->dev, "ktimer PPS source registered\n");
return 0;
}
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index 443d6bae19d1..fa5660f3c4b7 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -32,7 +32,7 @@ static void pps_tty_dcd_change(struct tty_struct *tty, bool active)
pps_event(pps, &ts, active ? PPS_CAPTUREASSERT :
PPS_CAPTURECLEAR, NULL);
- dev_dbg(pps->dev, "PPS %s at %lu\n",
+ dev_dbg(&pps->dev, "PPS %s at %lu\n",
active ? "assert" : "clear", jiffies);
}
@@ -69,7 +69,7 @@ static int pps_tty_open(struct tty_struct *tty)
goto err_unregister;
}
- dev_info(pps->dev, "source \"%s\" added\n", info.path);
+ dev_dbg(&pps->dev, "source \"%s\" added\n", info.path);
return 0;
@@ -89,7 +89,7 @@ static void pps_tty_close(struct tty_struct *tty)
if (WARN_ON(!pps))
return;
- dev_info(pps->dev, "removed\n");
+ dev_info(&pps->dev, "removed\n");
pps_unregister_source(pps);
}
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index abaffb4e1c1c..24db06750297 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -81,7 +81,7 @@ static void parport_irq(void *handle)
/* check the signal (no signal means the pulse is lost this time) */
if (!signal_is_set(port)) {
local_irq_restore(flags);
- dev_err(dev->pps->dev, "lost the signal\n");
+ dev_err(&dev->pps->dev, "lost the signal\n");
goto out_assert;
}
@@ -98,7 +98,7 @@ static void parport_irq(void *handle)
/* timeout */
dev->cw_err++;
if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
- dev_err(dev->pps->dev, "disabled clear edge capture after %d"
+ dev_err(&dev->pps->dev, "disabled clear edge capture after %d"
" timeouts\n", dev->cw_err);
dev->cw = 0;
dev->cw_err = 0;
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index d615e640fcad..cd94bf3bfaf2 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -3,7 +3,25 @@
# PPS generators configuration
#
-comment "PPS generators support"
+menuconfig PPS_GENERATOR
+ tristate "PPS generators support"
+ help
+ PPS generators are special hardware which are able to produce PPS
+ (Pulse Per Second) signals.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pps_gen_core.
+
+if PPS_GENERATOR
+
+config PPS_GENERATOR_DUMMY
+ tristate "Dummy PPS generator (Testing generator, use for debug)"
+ help
+ If you say yes here you get support for a PPS debugging generator
+ (which generates no PPS signal at all).
+
+ This driver can also be built as a module. If so, the module
+ will be called pps_gen-dummy.
config PPS_GENERATOR_PARPORT
tristate "Parallel port PPS signal generator"
@@ -12,3 +30,5 @@ config PPS_GENERATOR_PARPORT
If you say yes here you get support for a PPS signal generator which
utilizes STROBE pin of a parallel port to send PPS signals. It uses
parport abstraction layer and hrtimers to precisely control the signal.
+
+endif # PPS_GENERATOR
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
index 2589fd0f2481..dc1aa5a4688b 100644
--- a/drivers/pps/generators/Makefile
+++ b/drivers/pps/generators/Makefile
@@ -3,6 +3,10 @@
# Makefile for PPS generators.
#
+pps_gen_core-y := pps_gen.o sysfs.o
+obj-$(CONFIG_PPS_GENERATOR) := pps_gen_core.o
+
+obj-$(CONFIG_PPS_GENERATOR_DUMMY) += pps_gen-dummy.o
obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/generators/pps_gen-dummy.c b/drivers/pps/generators/pps_gen-dummy.c
new file mode 100644
index 000000000000..b284c200cbe5
--- /dev/null
+++ b/drivers/pps/generators/pps_gen-dummy.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PPS dummy generator
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/random.h>
+#include <linux/pps_gen_kernel.h>
+
+static struct pps_gen_device *pps_gen;
+static struct timer_list ktimer;
+
+static unsigned int get_random_delay(void)
+{
+ unsigned int delay = get_random_u8() & 0x0f;
+
+ return (delay + 1) * HZ;
+}
+
+/*
+ * The kernel timer
+ */
+
+static void pps_gen_ktimer_event(struct timer_list *unused)
+{
+ pps_gen_event(pps_gen, PPS_GEN_EVENT_MISSEDPULSE, NULL);
+}
+
+/*
+ * PPS Generator methods
+ */
+
+static int pps_gen_dummy_get_time(struct pps_gen_device *pps_gen,
+ struct timespec64 *time)
+{
+ struct system_time_snapshot snap;
+
+ ktime_get_snapshot(&snap);
+ *time = ktime_to_timespec64(snap.real);
+
+ return 0;
+}
+
+static int pps_gen_dummy_enable(struct pps_gen_device *pps_gen, bool enable)
+{
+ if (enable)
+ mod_timer(&ktimer, jiffies + get_random_delay());
+ else
+ del_timer_sync(&ktimer);
+
+ return 0;
+}
+
+/*
+ * The PPS info struct
+ */
+
+static struct pps_gen_source_info pps_gen_dummy_info = {
+ .use_system_clock = true,
+ .get_time = pps_gen_dummy_get_time,
+ .enable = pps_gen_dummy_enable,
+};
+
+/*
+ * Module staff
+ */
+
+static void __exit pps_gen_dummy_exit(void)
+{
+ del_timer_sync(&ktimer);
+ pps_gen_unregister_source(pps_gen);
+}
+
+static int __init pps_gen_dummy_init(void)
+{
+ pps_gen = pps_gen_register_source(&pps_gen_dummy_info);
+ if (IS_ERR(pps_gen))
+ return PTR_ERR(pps_gen);
+
+ timer_setup(&ktimer, pps_gen_ktimer_event, 0);
+
+ return 0;
+}
+
+module_init(pps_gen_dummy_init);
+module_exit(pps_gen_dummy_exit);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@enneenne.com>");
+MODULE_DESCRIPTION("LinuxPPS dummy generator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/pps_gen.c b/drivers/pps/generators/pps_gen.c
new file mode 100644
index 000000000000..ca592f1736f4
--- /dev/null
+++ b/drivers/pps/generators/pps_gen.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PPS generators core file
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/uaccess.h>
+#include <linux/idr.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/pps_gen_kernel.h>
+#include <linux/slab.h>
+
+/*
+ * Local variables
+ */
+
+static dev_t pps_gen_devt;
+static struct class *pps_gen_class;
+
+static DEFINE_IDA(pps_gen_ida);
+
+/*
+ * Char device methods
+ */
+
+static __poll_t pps_gen_cdev_poll(struct file *file, poll_table *wait)
+{
+ struct pps_gen_device *pps_gen = file->private_data;
+
+ poll_wait(file, &pps_gen->queue, wait);
+ return EPOLLIN | EPOLLRDNORM;
+}
+
+static int pps_gen_cdev_fasync(int fd, struct file *file, int on)
+{
+ struct pps_gen_device *pps_gen = file->private_data;
+
+ return fasync_helper(fd, file, on, &pps_gen->async_queue);
+}
+
+static long pps_gen_cdev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct pps_gen_device *pps_gen = file->private_data;
+ void __user *uarg = (void __user *) arg;
+ unsigned int __user *uiuarg = (unsigned int __user *) arg;
+ unsigned int status;
+ int ret;
+
+ switch (cmd) {
+ case PPS_GEN_SETENABLE:
+ dev_dbg(pps_gen->dev, "PPS_GEN_SETENABLE\n");
+
+ ret = get_user(status, uiuarg);
+ if (ret)
+ return -EFAULT;
+
+ ret = pps_gen->info.enable(pps_gen, status);
+ if (ret)
+ return ret;
+ pps_gen->enabled = status;
+
+ break;
+
+ case PPS_GEN_USESYSTEMCLOCK:
+ dev_dbg(pps_gen->dev, "PPS_GEN_USESYSTEMCLOCK\n");
+
+ ret = put_user(pps_gen->info.use_system_clock, uiuarg);
+ if (ret)
+ return -EFAULT;
+
+ break;
+
+ case PPS_GEN_FETCHEVENT: {
+ struct pps_gen_event info;
+ unsigned int ev = pps_gen->last_ev;
+
+ dev_dbg(pps_gen->dev, "PPS_GEN_FETCHEVENT\n");
+
+ ret = wait_event_interruptible(pps_gen->queue,
+ ev != pps_gen->last_ev);
+ if (ret == -ERESTARTSYS) {
+ dev_dbg(pps_gen->dev, "pending signal caught\n");
+ return -EINTR;
+ }
+
+ spin_lock_irq(&pps_gen->lock);
+ info.sequence = pps_gen->sequence;
+ info.event = pps_gen->event;
+ spin_unlock_irq(&pps_gen->lock);
+
+ ret = copy_to_user(uarg, &info, sizeof(struct pps_gen_event));
+ if (ret)
+ return -EFAULT;
+
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int pps_gen_cdev_open(struct inode *inode, struct file *file)
+{
+ struct pps_gen_device *pps_gen = container_of(inode->i_cdev,
+ struct pps_gen_device, cdev);
+
+ get_device(pps_gen->dev);
+ file->private_data = pps_gen;
+ return 0;
+}
+
+static int pps_gen_cdev_release(struct inode *inode, struct file *file)
+{
+ struct pps_gen_device *pps_gen = file->private_data;
+
+ put_device(pps_gen->dev);
+ return 0;
+}
+
+/*
+ * Char device stuff
+ */
+
+static const struct file_operations pps_gen_cdev_fops = {
+ .owner = THIS_MODULE,
+ .poll = pps_gen_cdev_poll,
+ .fasync = pps_gen_cdev_fasync,
+ .unlocked_ioctl = pps_gen_cdev_ioctl,
+ .open = pps_gen_cdev_open,
+ .release = pps_gen_cdev_release,
+};
+
+static void pps_gen_device_destruct(struct device *dev)
+{
+ struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
+
+ cdev_del(&pps_gen->cdev);
+
+ pr_debug("deallocating pps-gen%d\n", pps_gen->id);
+ ida_free(&pps_gen_ida, pps_gen->id);
+
+ kfree(dev);
+ kfree(pps_gen);
+}
+
+static int pps_gen_register_cdev(struct pps_gen_device *pps_gen)
+{
+ int err;
+ dev_t devt;
+
+ err = ida_alloc_max(&pps_gen_ida, PPS_GEN_MAX_SOURCES - 1, GFP_KERNEL);
+ if (err < 0) {
+ if (err == -ENOSPC) {
+ pr_err("too many PPS sources in the system\n");
+ err = -EBUSY;
+ }
+ return err;
+ }
+ pps_gen->id = err;
+
+ devt = MKDEV(MAJOR(pps_gen_devt), pps_gen->id);
+
+ cdev_init(&pps_gen->cdev, &pps_gen_cdev_fops);
+ pps_gen->cdev.owner = pps_gen->info.owner;
+
+ err = cdev_add(&pps_gen->cdev, devt, 1);
+ if (err) {
+ pr_err("failed to add char device %d:%d\n",
+ MAJOR(pps_gen_devt), pps_gen->id);
+ goto free_ida;
+ }
+ pps_gen->dev = device_create(pps_gen_class, pps_gen->info.parent, devt,
+ pps_gen, "pps-gen%d", pps_gen->id);
+ if (IS_ERR(pps_gen->dev)) {
+ err = PTR_ERR(pps_gen->dev);
+ goto del_cdev;
+ }
+ pps_gen->dev->release = pps_gen_device_destruct;
+ dev_set_drvdata(pps_gen->dev, pps_gen);
+
+ pr_debug("generator got cdev (%d:%d)\n",
+ MAJOR(pps_gen_devt), pps_gen->id);
+
+ return 0;
+
+del_cdev:
+ cdev_del(&pps_gen->cdev);
+free_ida:
+ ida_free(&pps_gen_ida, pps_gen->id);
+ return err;
+}
+
+static void pps_gen_unregister_cdev(struct pps_gen_device *pps_gen)
+{
+ pr_debug("unregistering pps-gen%d\n", pps_gen->id);
+ device_destroy(pps_gen_class, pps_gen->dev->devt);
+}
+
+/*
+ * Exported functions
+ */
+
+/**
+ * pps_gen_register_source() - add a PPS generator in the system
+ * @info: the PPS generator info struct
+ *
+ * This function is used to register a new PPS generator in the system.
+ * When it returns successfully the new generator is up and running, and
+ * it can be managed by the userspace.
+ *
+ * Return: the PPS generator device in case of success, and ERR_PTR(errno)
+ * otherwise.
+ */
+struct pps_gen_device *pps_gen_register_source(struct pps_gen_source_info *info)
+{
+ struct pps_gen_device *pps_gen;
+ int err;
+
+ pps_gen = kzalloc(sizeof(struct pps_gen_device), GFP_KERNEL);
+ if (pps_gen == NULL) {
+ err = -ENOMEM;
+ goto pps_gen_register_source_exit;
+ }
+ pps_gen->info = *info;
+ pps_gen->enabled = false;
+
+ init_waitqueue_head(&pps_gen->queue);
+ spin_lock_init(&pps_gen->lock);
+
+ /* Create the char device */
+ err = pps_gen_register_cdev(pps_gen);
+ if (err < 0) {
+ pr_err(" unable to create char device\n");
+ goto kfree_pps_gen;
+ }
+
+ return pps_gen;
+
+kfree_pps_gen:
+ kfree(pps_gen);
+
+pps_gen_register_source_exit:
+ pr_err("unable to register generator\n");
+
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(pps_gen_register_source);
+
+/**
+ * pps_gen_unregister_source() - remove a PPS generator from the system
+ * @pps_gen: the PPS generator device to be removed
+ *
+ * This function is used to deregister a PPS generator from the system. When
+ * called, it disables the generator so no pulses are generated anymore.
+ */
+void pps_gen_unregister_source(struct pps_gen_device *pps_gen)
+{
+ pps_gen_unregister_cdev(pps_gen);
+}
+EXPORT_SYMBOL(pps_gen_unregister_source);
+
+/* pps_gen_event - register a PPS generator event into the system
+ * @pps: the PPS generator device
+ * @event: the event type
+ * @data: userdef pointer
+ *
+ * This function is used by each PPS generator in order to register a new
+ * PPS event into the system (it's usually called inside an IRQ handler).
+ */
+void pps_gen_event(struct pps_gen_device *pps_gen,
+ unsigned int event, void *data)
+{
+ unsigned long flags;
+
+ dev_dbg(pps_gen->dev, "PPS generator event %u\n", event);
+
+ spin_lock_irqsave(&pps_gen->lock, flags);
+
+ pps_gen->event = event;
+ pps_gen->sequence++;
+
+ pps_gen->last_ev++;
+ wake_up_interruptible_all(&pps_gen->queue);
+ kill_fasync(&pps_gen->async_queue, SIGIO, POLL_IN);
+
+ spin_unlock_irqrestore(&pps_gen->lock, flags);
+}
+EXPORT_SYMBOL(pps_gen_event);
+
+/*
+ * Module stuff
+ */
+
+static void __exit pps_gen_exit(void)
+{
+ class_destroy(pps_gen_class);
+ unregister_chrdev_region(pps_gen_devt, PPS_GEN_MAX_SOURCES);
+}
+
+static int __init pps_gen_init(void)
+{
+ int err;
+
+ pps_gen_class = class_create("pps-gen");
+ if (IS_ERR(pps_gen_class)) {
+ pr_err("failed to allocate class\n");
+ return PTR_ERR(pps_gen_class);
+ }
+ pps_gen_class->dev_groups = pps_gen_groups;
+
+ err = alloc_chrdev_region(&pps_gen_devt, 0,
+ PPS_GEN_MAX_SOURCES, "pps-gen");
+ if (err < 0) {
+ pr_err("failed to allocate char device region\n");
+ goto remove_class;
+ }
+
+ return 0;
+
+remove_class:
+ class_destroy(pps_gen_class);
+ return err;
+}
+
+subsys_initcall(pps_gen_init);
+module_exit(pps_gen_exit);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@enneenne.com>");
+MODULE_DESCRIPTION("LinuxPPS generators support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index d46eed159495..f5eeb4dd01ad 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -208,8 +208,7 @@ static void parport_attach(struct parport *port)
calibrate_port(&device);
- hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- device.timer.function = hrtimer_event;
+ hrtimer_setup(&device.timer, hrtimer_event, CLOCK_REALTIME, HRTIMER_MODE_ABS);
hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
return;
diff --git a/drivers/pps/generators/sysfs.c b/drivers/pps/generators/sysfs.c
new file mode 100644
index 000000000000..faf8b1c6d202
--- /dev/null
+++ b/drivers/pps/generators/sysfs.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PPS generators sysfs support
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/pps_gen_kernel.h>
+
+/*
+ * Attribute functions
+ */
+
+static ssize_t system_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", pps_gen->info.use_system_clock);
+}
+static DEVICE_ATTR_RO(system);
+
+static ssize_t time_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
+ struct timespec64 time;
+ int ret;
+
+ ret = pps_gen->info.get_time(pps_gen, &time);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu %09lu\n", time.tv_sec, time.tv_nsec);
+}
+static DEVICE_ATTR_RO(time);
+
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
+ bool status;
+ int ret;
+
+ ret = kstrtobool(buf, &status);
+ if (ret)
+ return ret;
+
+ ret = pps_gen->info.enable(pps_gen, status);
+ if (ret)
+ return ret;
+ pps_gen->enabled = status;
+
+ return count;
+}
+static DEVICE_ATTR_WO(enable);
+
+static struct attribute *pps_gen_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_time.attr,
+ &dev_attr_system.attr,
+ NULL,
+};
+
+static const struct attribute_group pps_gen_group = {
+ .attrs = pps_gen_attrs,
+};
+
+const struct attribute_group *pps_gen_groups[] = {
+ &pps_gen_group,
+ NULL,
+};
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index d9d566f70ed1..92d1b62ea239 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -41,7 +41,7 @@ static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset)
static void pps_echo_client_default(struct pps_device *pps, int event,
void *data)
{
- dev_info(pps->dev, "echo %s %s\n",
+ dev_info(&pps->dev, "echo %s %s\n",
event & PPS_CAPTUREASSERT ? "assert" : "",
event & PPS_CAPTURECLEAR ? "clear" : "");
}
@@ -112,7 +112,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
goto kfree_pps;
}
- dev_info(pps->dev, "new PPS source %s\n", info->name);
+ dev_dbg(&pps->dev, "new PPS source %s\n", info->name);
return pps;
@@ -166,7 +166,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
/* check event type */
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
- dev_dbg(pps->dev, "PPS event at %lld.%09ld\n",
+ dev_dbg(&pps->dev, "PPS event at %lld.%09ld\n",
(s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
timespec_to_pps_ktime(&ts_real, ts->ts_real);
@@ -188,7 +188,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
/* Save the time stamp */
pps->assert_tu = ts_real;
pps->assert_sequence++;
- dev_dbg(pps->dev, "capture assert seq #%u\n",
+ dev_dbg(&pps->dev, "capture assert seq #%u\n",
pps->assert_sequence);
captured = ~0;
@@ -202,7 +202,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
/* Save the time stamp */
pps->clear_tu = ts_real;
pps->clear_sequence++;
- dev_dbg(pps->dev, "capture clear seq #%u\n",
+ dev_dbg(&pps->dev, "capture clear seq #%u\n",
pps->clear_sequence);
captured = ~0;
diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c
index 50dc59af45be..fbd23295afd7 100644
--- a/drivers/pps/kc.c
+++ b/drivers/pps/kc.c
@@ -43,11 +43,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
pps_kc_hardpps_mode = 0;
pps_kc_hardpps_dev = NULL;
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_info(pps->dev, "unbound kernel"
+ dev_info(&pps->dev, "unbound kernel"
" consumer\n");
} else {
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_err(pps->dev, "selected kernel consumer"
+ dev_err(&pps->dev, "selected kernel consumer"
" is not bound\n");
return -EINVAL;
}
@@ -57,11 +57,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
pps_kc_hardpps_mode = bind_args->edge;
pps_kc_hardpps_dev = pps;
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_info(pps->dev, "bound kernel consumer: "
+ dev_info(&pps->dev, "bound kernel consumer: "
"edge=0x%x\n", bind_args->edge);
} else {
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_err(pps->dev, "another kernel consumer"
+ dev_err(&pps->dev, "another kernel consumer"
" is already bound\n");
return -EINVAL;
}
@@ -83,7 +83,7 @@ void pps_kc_remove(struct pps_device *pps)
pps_kc_hardpps_mode = 0;
pps_kc_hardpps_dev = NULL;
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_info(pps->dev, "unbound kernel consumer"
+ dev_info(&pps->dev, "unbound kernel consumer"
" on device removal\n");
} else
spin_unlock_irq(&pps_kc_hardpps_lock);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 25d47907db17..6a02245ea35f 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -25,7 +25,7 @@
* Local variables
*/
-static dev_t pps_devt;
+static int pps_major;
static struct class *pps_class;
static DEFINE_MUTEX(pps_idr_lock);
@@ -62,7 +62,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
else {
unsigned long ticks;
- dev_dbg(pps->dev, "timeout %lld.%09d\n",
+ dev_dbg(&pps->dev, "timeout %lld.%09d\n",
(long long) fdata->timeout.sec,
fdata->timeout.nsec);
ticks = fdata->timeout.sec * HZ;
@@ -80,7 +80,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
/* Check for pending signals */
if (err == -ERESTARTSYS) {
- dev_dbg(pps->dev, "pending signal caught\n");
+ dev_dbg(&pps->dev, "pending signal caught\n");
return -EINTR;
}
@@ -98,7 +98,7 @@ static long pps_cdev_ioctl(struct file *file,
switch (cmd) {
case PPS_GETPARAMS:
- dev_dbg(pps->dev, "PPS_GETPARAMS\n");
+ dev_dbg(&pps->dev, "PPS_GETPARAMS\n");
spin_lock_irq(&pps->lock);
@@ -114,7 +114,7 @@ static long pps_cdev_ioctl(struct file *file,
break;
case PPS_SETPARAMS:
- dev_dbg(pps->dev, "PPS_SETPARAMS\n");
+ dev_dbg(&pps->dev, "PPS_SETPARAMS\n");
/* Check the capabilities */
if (!capable(CAP_SYS_TIME))
@@ -124,14 +124,14 @@ static long pps_cdev_ioctl(struct file *file,
if (err)
return -EFAULT;
if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
- dev_dbg(pps->dev, "capture mode unspecified (%x)\n",
+ dev_dbg(&pps->dev, "capture mode unspecified (%x)\n",
params.mode);
return -EINVAL;
}
/* Check for supported capabilities */
if ((params.mode & ~pps->info.mode) != 0) {
- dev_dbg(pps->dev, "unsupported capabilities (%x)\n",
+ dev_dbg(&pps->dev, "unsupported capabilities (%x)\n",
params.mode);
return -EINVAL;
}
@@ -144,7 +144,7 @@ static long pps_cdev_ioctl(struct file *file,
/* Restore the read only parameters */
if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
/* section 3.3 of RFC 2783 interpreted */
- dev_dbg(pps->dev, "time format unspecified (%x)\n",
+ dev_dbg(&pps->dev, "time format unspecified (%x)\n",
params.mode);
pps->params.mode |= PPS_TSFMT_TSPEC;
}
@@ -165,7 +165,7 @@ static long pps_cdev_ioctl(struct file *file,
break;
case PPS_GETCAP:
- dev_dbg(pps->dev, "PPS_GETCAP\n");
+ dev_dbg(&pps->dev, "PPS_GETCAP\n");
err = put_user(pps->info.mode, iuarg);
if (err)
@@ -176,7 +176,7 @@ static long pps_cdev_ioctl(struct file *file,
case PPS_FETCH: {
struct pps_fdata fdata;
- dev_dbg(pps->dev, "PPS_FETCH\n");
+ dev_dbg(&pps->dev, "PPS_FETCH\n");
err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
if (err)
@@ -206,7 +206,7 @@ static long pps_cdev_ioctl(struct file *file,
case PPS_KC_BIND: {
struct pps_bind_args bind_args;
- dev_dbg(pps->dev, "PPS_KC_BIND\n");
+ dev_dbg(&pps->dev, "PPS_KC_BIND\n");
/* Check the capabilities */
if (!capable(CAP_SYS_TIME))
@@ -218,7 +218,7 @@ static long pps_cdev_ioctl(struct file *file,
/* Check for supported capabilities */
if ((bind_args.edge & ~pps->info.mode) != 0) {
- dev_err(pps->dev, "unsupported capabilities (%x)\n",
+ dev_err(&pps->dev, "unsupported capabilities (%x)\n",
bind_args.edge);
return -EINVAL;
}
@@ -227,7 +227,7 @@ static long pps_cdev_ioctl(struct file *file,
if (bind_args.tsformat != PPS_TSFMT_TSPEC ||
(bind_args.edge & ~PPS_CAPTUREBOTH) != 0 ||
bind_args.consumer != PPS_KC_HARDPPS) {
- dev_err(pps->dev, "invalid kernel consumer bind"
+ dev_err(&pps->dev, "invalid kernel consumer bind"
" parameters (%x)\n", bind_args.edge);
return -EINVAL;
}
@@ -259,7 +259,7 @@ static long pps_cdev_compat_ioctl(struct file *file,
struct pps_fdata fdata;
int err;
- dev_dbg(pps->dev, "PPS_FETCH\n");
+ dev_dbg(&pps->dev, "PPS_FETCH\n");
err = copy_from_user(&compat, uarg, sizeof(struct pps_fdata_compat));
if (err)
@@ -296,20 +296,36 @@ static long pps_cdev_compat_ioctl(struct file *file,
#define pps_cdev_compat_ioctl NULL
#endif
+static struct pps_device *pps_idr_get(unsigned long id)
+{
+ struct pps_device *pps;
+
+ mutex_lock(&pps_idr_lock);
+ pps = idr_find(&pps_idr, id);
+ if (pps)
+ get_device(&pps->dev);
+
+ mutex_unlock(&pps_idr_lock);
+ return pps;
+}
+
static int pps_cdev_open(struct inode *inode, struct file *file)
{
- struct pps_device *pps = container_of(inode->i_cdev,
- struct pps_device, cdev);
+ struct pps_device *pps = pps_idr_get(iminor(inode));
+
+ if (!pps)
+ return -ENODEV;
+
file->private_data = pps;
- kobject_get(&pps->dev->kobj);
return 0;
}
static int pps_cdev_release(struct inode *inode, struct file *file)
{
- struct pps_device *pps = container_of(inode->i_cdev,
- struct pps_device, cdev);
- kobject_put(&pps->dev->kobj);
+ struct pps_device *pps = file->private_data;
+
+ WARN_ON(pps->id != iminor(inode));
+ put_device(&pps->dev);
return 0;
}
@@ -331,22 +347,13 @@ static void pps_device_destruct(struct device *dev)
{
struct pps_device *pps = dev_get_drvdata(dev);
- cdev_del(&pps->cdev);
-
- /* Now we can release the ID for re-use */
pr_debug("deallocating pps%d\n", pps->id);
- mutex_lock(&pps_idr_lock);
- idr_remove(&pps_idr, pps->id);
- mutex_unlock(&pps_idr_lock);
-
- kfree(dev);
kfree(pps);
}
int pps_register_cdev(struct pps_device *pps)
{
int err;
- dev_t devt;
mutex_lock(&pps_idr_lock);
/*
@@ -363,40 +370,29 @@ int pps_register_cdev(struct pps_device *pps)
goto out_unlock;
}
pps->id = err;
- mutex_unlock(&pps_idr_lock);
-
- devt = MKDEV(MAJOR(pps_devt), pps->id);
-
- cdev_init(&pps->cdev, &pps_cdev_fops);
- pps->cdev.owner = pps->info.owner;
- err = cdev_add(&pps->cdev, devt, 1);
- if (err) {
- pr_err("%s: failed to add char device %d:%d\n",
- pps->info.name, MAJOR(pps_devt), pps->id);
+ pps->dev.class = pps_class;
+ pps->dev.parent = pps->info.dev;
+ pps->dev.devt = MKDEV(pps_major, pps->id);
+ dev_set_drvdata(&pps->dev, pps);
+ dev_set_name(&pps->dev, "pps%d", pps->id);
+ err = device_register(&pps->dev);
+ if (err)
goto free_idr;
- }
- pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
- "pps%d", pps->id);
- if (IS_ERR(pps->dev)) {
- err = PTR_ERR(pps->dev);
- goto del_cdev;
- }
/* Override the release function with our own */
- pps->dev->release = pps_device_destruct;
+ pps->dev.release = pps_device_destruct;
- pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
- MAJOR(pps_devt), pps->id);
+ pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major,
+ pps->id);
+ get_device(&pps->dev);
+ mutex_unlock(&pps_idr_lock);
return 0;
-del_cdev:
- cdev_del(&pps->cdev);
-
free_idr:
- mutex_lock(&pps_idr_lock);
idr_remove(&pps_idr, pps->id);
+ put_device(&pps->dev);
out_unlock:
mutex_unlock(&pps_idr_lock);
return err;
@@ -406,7 +402,13 @@ void pps_unregister_cdev(struct pps_device *pps)
{
pr_debug("unregistering pps%d\n", pps->id);
pps->lookup_cookie = NULL;
- device_destroy(pps_class, pps->dev->devt);
+ device_destroy(pps_class, pps->dev.devt);
+
+ /* Now we can release the ID for re-use */
+ mutex_lock(&pps_idr_lock);
+ idr_remove(&pps_idr, pps->id);
+ put_device(&pps->dev);
+ mutex_unlock(&pps_idr_lock);
}
/*
@@ -426,6 +428,11 @@ void pps_unregister_cdev(struct pps_device *pps)
* so that it will not be used again, even if the pps device cannot
* be removed from the idr due to pending references holding the minor
* number in use.
+ *
+ * Since pps_idr holds a reference to the device, the returned
+ * pps_device is guaranteed to be valid until pps_unregister_cdev() is
+ * called on it. But after calling pps_unregister_cdev(), it may be
+ * freed at any time.
*/
struct pps_device *pps_lookup_dev(void const *cookie)
{
@@ -448,13 +455,11 @@ EXPORT_SYMBOL(pps_lookup_dev);
static void __exit pps_exit(void)
{
class_destroy(pps_class);
- unregister_chrdev_region(pps_devt, PPS_MAX_SOURCES);
+ __unregister_chrdev(pps_major, 0, PPS_MAX_SOURCES, "pps");
}
static int __init pps_init(void)
{
- int err;
-
pps_class = class_create("pps");
if (IS_ERR(pps_class)) {
pr_err("failed to allocate class\n");
@@ -462,8 +467,9 @@ static int __init pps_init(void)
}
pps_class->dev_groups = pps_groups;
- err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
- if (err < 0) {
+ pps_major = __register_chrdev(0, 0, PPS_MAX_SOURCES, "pps",
+ &pps_cdev_fops);
+ if (pps_major < 0) {
pr_err("failed to allocate char device region\n");
goto remove_class;
}
@@ -476,8 +482,7 @@ static int __init pps_init(void)
remove_class:
class_destroy(pps_class);
-
- return err;
+ return pps_major;
}
subsys_initcall(pps_init);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index ea96a14d72d1..bf6468c56419 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2010 OMICRON electronics GmbH
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/posix-clock.h>
#include <linux/poll.h>
@@ -176,6 +177,9 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
struct timespec64 ts;
int enable, err = 0;
+ if (in_compat_syscall() && cmd != PTP_ENABLE_PPS && cmd != PTP_ENABLE_PPS2)
+ arg = (unsigned long)compat_ptr(arg);
+
tsevq = pccontext->private_clkdata;
switch (cmd) {
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 77a36e7bddd5..35a5994bf64f 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -217,6 +217,11 @@ static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
return info->gettime64(info, ts);
}
+static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
static void ptp_aux_kworker(struct kthread_work *work)
{
struct ptp_clock *ptp = container_of(work, struct ptp_clock,
@@ -294,9 +299,12 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
ptp->info->getcrosscycles = ptp->info->getcrosststamp;
}
+ if (!ptp->info->enable)
+ ptp->info->enable = ptp_enable;
+
if (ptp->info->do_aux_work) {
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
- ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
+ ptp->kworker = kthread_run_worker(0, "ptp%d", ptp->index);
if (IS_ERR(ptp->kworker)) {
err = PTR_ERR(ptp->kworker);
pr_err("failed to create ptp aux_worker %d\n", err);
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 5feecaadde8e..b651087f426f 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -3692,7 +3692,7 @@ DEVICE_FREQ_GROUP(freq4, 3);
static ssize_t
disciplining_config_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3727,7 +3727,7 @@ out:
static ssize_t
disciplining_config_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3750,11 +3750,11 @@ disciplining_config_write(struct file *filp, struct kobject *kobj,
return err;
}
-static BIN_ATTR_RW(disciplining_config, OCP_ART_CONFIG_SIZE);
+static const BIN_ATTR_RW(disciplining_config, OCP_ART_CONFIG_SIZE);
static ssize_t
temperature_table_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3789,7 +3789,7 @@ out:
static ssize_t
temperature_table_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3812,7 +3812,7 @@ temperature_table_write(struct file *filp, struct kobject *kobj,
return err;
}
-static BIN_ATTR_RW(temperature_table, OCP_ART_TEMP_TABLE_SIZE);
+static const BIN_ATTR_RW(temperature_table, OCP_ART_TEMP_TABLE_SIZE);
static struct attribute *fb_timecard_attrs[] = {
&dev_attr_serialnum.attr,
@@ -3867,7 +3867,7 @@ static struct attribute *art_timecard_attrs[] = {
NULL,
};
-static struct bin_attribute *bin_art_timecard_attrs[] = {
+static const struct bin_attribute *const bin_art_timecard_attrs[] = {
&bin_attr_disciplining_config,
&bin_attr_temperature_table,
NULL,
@@ -3875,7 +3875,7 @@ static struct bin_attribute *bin_art_timecard_attrs[] = {
static const struct attribute_group art_timecard_group = {
.attrs = art_timecard_attrs,
- .bin_attrs = bin_art_timecard_attrs,
+ .bin_attrs_new = bin_art_timecard_attrs,
};
static const struct ocp_attr_group art_timecard_groups[] = {
@@ -4420,7 +4420,7 @@ ptp_ocp_complete(struct ptp_ocp *bp)
pps = pps_lookup_dev(bp->ptp);
if (pps)
- ptp_ocp_symlink(bp, pps->dev, "pps");
+ ptp_ocp_symlink(bp, &pps->dev, "pps");
ptp_ocp_debugfs_add_device(bp);
diff --git a/drivers/ptp/ptp_vmclock.c b/drivers/ptp/ptp_vmclock.c
index 0a2cfc8ad3c5..b3a83b03d9c1 100644
--- a/drivers/ptp/ptp_vmclock.c
+++ b/drivers/ptp/ptp_vmclock.c
@@ -414,16 +414,16 @@ static ssize_t vmclock_miscdev_read(struct file *fp, char __user *buf,
}
static const struct file_operations vmclock_miscdev_fops = {
+ .owner = THIS_MODULE,
.mmap = vmclock_miscdev_mmap,
.read = vmclock_miscdev_read,
};
/* module operations */
-static void vmclock_remove(struct platform_device *pdev)
+static void vmclock_remove(void *data)
{
- struct device *dev = &pdev->dev;
- struct vmclock_state *st = dev_get_drvdata(dev);
+ struct vmclock_state *st = data;
if (st->ptp_clock)
ptp_clock_unregister(st->ptp_clock);
@@ -506,14 +506,13 @@ static int vmclock_probe(struct platform_device *pdev)
if (ret) {
dev_info(dev, "Failed to obtain physical address: %d\n", ret);
- goto out;
+ return ret;
}
if (resource_size(&st->res) < VMCLOCK_MIN_SIZE) {
dev_info(dev, "Region too small (0x%llx)\n",
resource_size(&st->res));
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
st->clk = devm_memremap(dev, st->res.start, resource_size(&st->res),
MEMREMAP_WB | MEMREMAP_DEC);
@@ -521,31 +520,34 @@ static int vmclock_probe(struct platform_device *pdev)
ret = PTR_ERR(st->clk);
dev_info(dev, "failed to map shared memory\n");
st->clk = NULL;
- goto out;
+ return ret;
}
if (le32_to_cpu(st->clk->magic) != VMCLOCK_MAGIC ||
le32_to_cpu(st->clk->size) > resource_size(&st->res) ||
le16_to_cpu(st->clk->version) != 1) {
dev_info(dev, "vmclock magic fields invalid\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
ret = ida_alloc(&vmclock_ida, GFP_KERNEL);
if (ret < 0)
- goto out;
+ return ret;
st->index = ret;
ret = devm_add_action_or_reset(&pdev->dev, vmclock_put_idx, st);
if (ret)
- goto out;
+ return ret;
st->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "vmclock%d", st->index);
- if (!st->name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!st->name)
+ return -ENOMEM;
+
+ st->miscdev.minor = MISC_DYNAMIC_MINOR;
+
+ ret = devm_add_action_or_reset(&pdev->dev, vmclock_remove, st);
+ if (ret)
+ return ret;
/*
* If the structure is big enough, it can be mapped to userspace.
@@ -554,13 +556,12 @@ static int vmclock_probe(struct platform_device *pdev)
* cross that bridge if/when we come to it.
*/
if (le32_to_cpu(st->clk->size) >= PAGE_SIZE) {
- st->miscdev.minor = MISC_DYNAMIC_MINOR;
st->miscdev.fops = &vmclock_miscdev_fops;
st->miscdev.name = st->name;
ret = misc_register(&st->miscdev);
if (ret)
- goto out;
+ return ret;
}
/* If there is valid clock information, register a PTP clock */
@@ -570,16 +571,14 @@ static int vmclock_probe(struct platform_device *pdev)
if (IS_ERR(st->ptp_clock)) {
ret = PTR_ERR(st->ptp_clock);
st->ptp_clock = NULL;
- vmclock_remove(pdev);
- goto out;
+ return ret;
}
}
if (!st->miscdev.minor && !st->ptp_clock) {
/* Neither miscdev nor PTP registered */
dev_info(dev, "vmclock: Neither miscdev nor PTP available; not registering\n");
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
dev_info(dev, "%s: registered %s%s%s\n", st->name,
@@ -587,10 +586,7 @@ static int vmclock_probe(struct platform_device *pdev)
(st->miscdev.minor && st->ptp_clock) ? ", " : "",
st->ptp_clock ? "PTP" : "");
- dev_set_drvdata(dev, st);
-
- out:
- return ret;
+ return 0;
}
static const struct acpi_device_id vmclock_acpi_ids[] = {
@@ -601,7 +597,6 @@ MODULE_DEVICE_TABLE(acpi, vmclock_acpi_ids);
static struct platform_driver vmclock_platform_driver = {
.probe = vmclock_probe,
- .remove = vmclock_remove,
.driver = {
.name = "vmclock",
.acpi_match_table = vmclock_acpi_ids,
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 675b252d9c8c..ccd54c089bab 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -242,6 +242,9 @@ int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
+ if (!pwmchip_supports_waveform(chip))
+ return -EOPNOTSUPP;
+
if (!pwm_wf_valid(wf))
return -EINVAL;
@@ -294,6 +297,9 @@ int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
+ if (!pwmchip_supports_waveform(chip) || !ops->read_waveform)
+ return -EOPNOTSUPP;
+
guard(pwmchip)(chip);
if (!chip->operational)
@@ -320,6 +326,9 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
+ if (!pwmchip_supports_waveform(chip))
+ return -EOPNOTSUPP;
+
if (!pwm_wf_valid(wf))
return -EINVAL;
@@ -592,7 +601,7 @@ static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state)
state->usage_power == pwm->state.usage_power)
return 0;
- if (ops->write_waveform) {
+ if (pwmchip_supports_waveform(chip)) {
struct pwm_waveform wf;
char wfhw[WFHWSIZE];
@@ -746,7 +755,7 @@ int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state)
if (!chip->operational)
return -ENODEV;
- if (ops->read_waveform) {
+ if (pwmchip_supports_waveform(chip) && ops->read_waveform) {
char wfhw[WFHWSIZE];
struct pwm_waveform wf;
@@ -1276,7 +1285,7 @@ static int pwm_export_child(struct device *pwmchip_dev, struct pwm_device *pwm)
return 0;
}
-static int pwm_unexport_match(struct device *pwm_dev, void *data)
+static int pwm_unexport_match(struct device *pwm_dev, const void *data)
{
return pwm_from_dev(pwm_dev) == data;
}
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index fb3eadf6fbc4..b6c16139ce4a 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -66,20 +66,16 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
pci_set_master(pci);
- ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
- if (ret)
- return dev_err_probe(dev, ret, "Failed to iomap PCI BAR\n");
-
info = (const struct dwc_pwm_info *)id->driver_data;
ddata = devm_kzalloc(dev, struct_size(ddata, chips, info->nr), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
- /*
- * No need to check for pcim_iomap_table() failure,
- * pcim_iomap_regions() already does it for us.
- */
- ddata->io_base = pcim_iomap_table(pci)[0];
+ ddata->io_base = pcim_iomap_region(pci, 0, "pwm-dwc");
+ if (IS_ERR(ddata->io_base))
+ return dev_err_probe(dev, PTR_ERR(ddata->io_base),
+ "Failed to request / iomap PCI BAR\n");
+
ddata->info = info;
for (idx = 0; idx < ddata->info->nr; idx++) {
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index ddc2a4ca90fd..ae25d9321d75 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -18,6 +18,7 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const struct pwm_lpss_boardinfo *info;
+ void __iomem *io_base;
struct pwm_chip *chip;
int err;
@@ -25,12 +26,12 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
if (err < 0)
return err;
- err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
- if (err)
- return err;
+ io_base = pcim_iomap_region(pdev, 0, "pwm-lpss");
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
info = (struct pwm_lpss_boardinfo *)id->driver_data;
- chip = devm_pwm_lpss_probe(&pdev->dev, pcim_iomap_table(pdev)[0], info);
+ chip = devm_pwm_lpss_probe(&pdev->dev, io_base, info);
if (IS_ERR(chip))
return PTR_ERR(chip);
diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
index c1f2287b8e97..12821b4bbf97 100644
--- a/drivers/pwm/pwm-microchip-core.c
+++ b/drivers/pwm/pwm-microchip-core.c
@@ -327,7 +327,7 @@ static int mchp_core_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *
* mchp_core_pwm_calc_period().
* The period is locked and we cannot change this, so we abort.
*/
- if (hw_period_steps == MCHPCOREPWM_PERIOD_STEPS_MAX)
+ if (hw_period_steps > MCHPCOREPWM_PERIOD_STEPS_MAX)
return -EINVAL;
prescale = hw_prescale;
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 989731256f50..5832dce8ed9d 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -167,8 +167,12 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
state->enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
/* Keep PWM counter clock refcount in sync with PWM initial state */
- if (state->enabled)
- clk_enable(priv->clk);
+ if (state->enabled) {
+ int ret = clk_enable(priv->clk);
+
+ if (ret)
+ return ret;
+ }
regmap_read(priv->regmap, STM32_LPTIM_CFGR, &val);
presc = FIELD_GET(STM32_LPTIM_PRESC, val);
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 17e591f61efb..a59de4de18b6 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -858,8 +858,11 @@ static int stm32_pwm_probe(struct platform_device *pdev)
chip->ops = &stm32pwm_ops;
/* Initialize clock refcount to number of enabled PWM channels. */
- for (i = 0; i < num_enabled; i++)
- clk_enable(priv->clk);
+ for (i = 0; i < num_enabled; i++) {
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
+ }
ret = devm_pwmchip_add(dev, chip);
if (ret < 0)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 27afbb9d544b..cbf531d0ba68 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1742,7 +1742,8 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
err = rio_add_net(net);
if (err) {
rmcd_debug(RDEV, "failed to register net, err=%d", err);
- kfree(net);
+ put_device(&net->dev);
+ mport->net = NULL;
goto cleanup;
}
}
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index fdcf742b2adb..c12941f71e2c 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -871,7 +871,10 @@ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport,
dev_set_name(&net->dev, "rnet_%d", net->id);
net->dev.parent = &mport->dev;
net->dev.release = rio_scan_release_dev;
- rio_add_net(net);
+ if (rio_add_net(net)) {
+ put_device(&net->dev);
+ net = NULL;
+ }
}
return net;
diff --git a/drivers/ras/amd/atl/Kconfig b/drivers/ras/amd/atl/Kconfig
index 551680073e43..6e03942cd7da 100644
--- a/drivers/ras/amd/atl/Kconfig
+++ b/drivers/ras/amd/atl/Kconfig
@@ -10,6 +10,7 @@
config AMD_ATL
tristate "AMD Address Translation Library"
depends on AMD_NB && X86_64 && RAS
+ depends on AMD_NODE
depends on MEMORY_FAILURE
default N
help
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index 143d04c779a8..f9be26d25348 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -18,6 +18,7 @@
#include <linux/ras.h>
#include <asm/amd_nb.h>
+#include <asm/amd_node.h>
#include "reg_fields.h"
diff --git a/drivers/regulator/bd96801-regulator.c b/drivers/regulator/bd96801-regulator.c
index 9876cc05867e..3a9d772491a8 100644
--- a/drivers/regulator/bd96801-regulator.c
+++ b/drivers/regulator/bd96801-regulator.c
@@ -5,12 +5,7 @@
/*
* This version of the "BD86801 scalable PMIC"'s driver supports only very
* basic set of the PMIC features. Most notably, there is no support for
- * the ERRB interrupt and the configurations which should be done when the
- * PMIC is in STBY mode.
- *
- * Supporting the ERRB interrupt would require dropping the regmap-IRQ
- * usage or working around (or accepting a presense of) a naming conflict
- * in debugFS IRQs.
+ * the configurations which should be done when the PMIC is in STBY mode.
*
* Being able to reliably do the configurations like changing the
* regulator safety limits (like limits for the over/under -voltages, over
@@ -22,16 +17,14 @@
* be the need to configure these safety limits. Hence it's not simple to
* come up with a generic solution.
*
- * Users who require the ERRB handling and STBY state configurations can
- * have a look at the original RFC:
+ * Users who require the STBY state configurations can have a look at the
+ * original RFC:
* https://lore.kernel.org/all/cover.1712920132.git.mazziesaccount@gmail.com/
- * which implements a workaround to debugFS naming conflict and some of
- * the safety limit configurations - but leaves the state change handling
- * and synchronization to be implemented.
+ * which implements some of the safety limit configurations - but leaves the
+ * state change handling and synchronization to be implemented.
*
* It would be great to hear (and receive a patch!) if you implement the
- * STBY configuration support or a proper fix to the debugFS naming
- * conflict in your downstream driver ;)
+ * STBY configuration support in your downstream driver ;)
*/
#include <linux/cleanup.h>
@@ -728,6 +721,95 @@ static int initialize_pmic_data(struct device *dev,
return 0;
}
+static int bd96801_map_event_all(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int i;
+
+ for (i = 0; i < rid->num_states; i++) {
+ rid->states[i].notifs = REGULATOR_EVENT_FAIL;
+ rid->states[i].errors = REGULATOR_ERROR_FAIL;
+ *dev_mask |= BIT(i);
+ }
+
+ return 0;
+}
+
+static int bd96801_rdev_errb_irqs(struct platform_device *pdev,
+ struct regulator_dev *rdev)
+{
+ int i;
+ void *retp;
+ static const char * const single_out_errb_irqs[] = {
+ "bd96801-%s-pvin-err", "bd96801-%s-ovp-err",
+ "bd96801-%s-uvp-err", "bd96801-%s-shdn-err",
+ };
+
+ for (i = 0; i < ARRAY_SIZE(single_out_errb_irqs); i++) {
+ struct regulator_irq_desc id = {
+ .map_event = bd96801_map_event_all,
+ .irq_off_ms = 1000,
+ };
+ struct regulator_dev *rdev_arr[1];
+ char tmp[255];
+ int irq;
+
+ snprintf(tmp, 255, single_out_errb_irqs[i], rdev->desc->name);
+ tmp[254] = 0;
+ id.name = tmp;
+
+ irq = platform_get_irq_byname(pdev, tmp);
+ if (irq < 0)
+ continue;
+
+ rdev_arr[0] = rdev;
+ retp = devm_regulator_irq_helper(&pdev->dev, &id, irq, 0,
+ REGULATOR_ERROR_FAIL, NULL,
+ rdev_arr, 1);
+ if (IS_ERR(retp))
+ return PTR_ERR(retp);
+
+ }
+ return 0;
+}
+
+static int bd96801_global_errb_irqs(struct platform_device *pdev,
+ struct regulator_dev **rdev, int num_rdev)
+{
+ int i, num_irqs;
+ void *retp;
+ static const char * const global_errb_irqs[] = {
+ "bd96801-otp-err", "bd96801-dbist-err", "bd96801-eep-err",
+ "bd96801-abist-err", "bd96801-prstb-err", "bd96801-drmoserr1",
+ "bd96801-drmoserr2", "bd96801-slave-err", "bd96801-vref-err",
+ "bd96801-tsd", "bd96801-uvlo-err", "bd96801-ovlo-err",
+ "bd96801-osc-err", "bd96801-pon-err", "bd96801-poff-err",
+ "bd96801-cmd-shdn-err", "bd96801-int-shdn-err"
+ };
+
+ num_irqs = ARRAY_SIZE(global_errb_irqs);
+ for (i = 0; i < num_irqs; i++) {
+ int irq;
+ struct regulator_irq_desc id = {
+ .name = global_errb_irqs[i],
+ .map_event = bd96801_map_event_all,
+ .irq_off_ms = 1000,
+ };
+
+ irq = platform_get_irq_byname(pdev, global_errb_irqs[i]);
+ if (irq < 0)
+ continue;
+
+ retp = devm_regulator_irq_helper(&pdev->dev, &id, irq, 0,
+ REGULATOR_ERROR_FAIL, NULL,
+ rdev, num_rdev);
+ if (IS_ERR(retp))
+ return PTR_ERR(retp);
+ }
+
+ return 0;
+}
+
static int bd96801_rdev_intb_irqs(struct platform_device *pdev,
struct bd96801_pmic_data *pdata,
struct bd96801_irqinfo *iinfo,
@@ -783,11 +865,10 @@ static int bd96801_rdev_intb_irqs(struct platform_device *pdev,
return 0;
}
-
-
static int bd96801_probe(struct platform_device *pdev)
{
struct regulator_dev *ldo_errs_rdev_arr[BD96801_NUM_LDOS];
+ struct regulator_dev *all_rdevs[BD96801_NUM_REGULATORS];
struct bd96801_regulator_data *rdesc;
struct regulator_config config = {};
int ldo_errs_arr[BD96801_NUM_LDOS];
@@ -795,6 +876,7 @@ static int bd96801_probe(struct platform_device *pdev)
int temp_notif_ldos = 0;
struct device *parent;
int i, ret;
+ bool use_errb;
void *retp;
parent = pdev->dev.parent;
@@ -819,6 +901,13 @@ static int bd96801_probe(struct platform_device *pdev)
config.regmap = pdata->regmap;
config.dev = parent;
+ ret = of_property_match_string(pdev->dev.parent->of_node,
+ "interrupt-names", "errb");
+ if (ret < 0)
+ use_errb = false;
+ else
+ use_errb = true;
+
ret = bd96801_walk_regulator_dt(&pdev->dev, pdata->regmap, rdesc,
BD96801_NUM_REGULATORS);
if (ret)
@@ -837,6 +926,7 @@ static int bd96801_probe(struct platform_device *pdev)
rdesc[i].desc.name);
return PTR_ERR(rdev);
}
+ all_rdevs[i] = rdev;
/*
* LDOs don't have own temperature monitoring. If temperature
* notification was requested for this LDO from DT then we will
@@ -856,6 +946,12 @@ static int bd96801_probe(struct platform_device *pdev)
if (ret)
return ret;
}
+ /* Register per regulator ERRB notifiers */
+ if (use_errb) {
+ ret = bd96801_rdev_errb_irqs(pdev, rdev);
+ if (ret)
+ return ret;
+ }
}
if (temp_notif_ldos) {
int irq;
@@ -877,6 +973,10 @@ static int bd96801_probe(struct platform_device *pdev)
return PTR_ERR(retp);
}
+ if (use_errb)
+ return bd96801_global_errb_irqs(pdev, all_rdevs,
+ ARRAY_SIZE(all_rdevs));
+
return 0;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 8cb948a91e60..00a7f3617cd8 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -917,6 +917,26 @@ static ssize_t bypass_show(struct device *dev,
}
static DEVICE_ATTR_RO(bypass);
+static ssize_t power_budget_milliwatt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", rdev->constraints->pw_budget_mW);
+}
+static DEVICE_ATTR_RO(power_budget_milliwatt);
+
+static ssize_t power_requested_milliwatt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", rdev->pw_requested_mW);
+}
+static DEVICE_ATTR_RO(power_requested_milliwatt);
+
#define REGULATOR_ERROR_ATTR(name, bit) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
@@ -1149,6 +1169,10 @@ static void print_constraints_debug(struct regulator_dev *rdev)
if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
count += scnprintf(buf + count, len - count, "standby ");
+ if (constraints->pw_budget_mW)
+ count += scnprintf(buf + count, len - count, "%d mW budget",
+ constraints->pw_budget_mW);
+
if (!count)
count = scnprintf(buf, len, "no parameters");
else
@@ -1627,6 +1651,9 @@ static int set_machine_constraints(struct regulator_dev *rdev)
rdev->last_off = ktime_get();
}
+ if (!rdev->constraints->pw_budget_mW)
+ rdev->constraints->pw_budget_mW = INT_MAX;
+
print_constraints(rdev);
return 0;
}
@@ -1803,12 +1830,49 @@ static const struct file_operations constraint_flags_fops = {
#define REG_STR_SIZE 64
+static void link_and_create_debugfs(struct regulator *regulator, struct regulator_dev *rdev,
+ struct device *dev)
+{
+ int err = 0;
+
+ if (dev) {
+ regulator->dev = dev;
+
+ /* Add a link to the device sysfs entry */
+ err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
+ regulator->supply_name);
+ if (err) {
+ rdev_dbg(rdev, "could not add device link %s: %pe\n",
+ dev->kobj.name, ERR_PTR(err));
+ /* non-fatal */
+ }
+ }
+
+ if (err != -EEXIST) {
+ regulator->debugfs = debugfs_create_dir(regulator->supply_name, rdev->debugfs);
+ if (IS_ERR(regulator->debugfs)) {
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ regulator->debugfs = NULL;
+ }
+ }
+
+ if (regulator->debugfs) {
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
+ debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
+ regulator, &constraint_flags_fops);
+ }
+}
+
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name)
{
struct regulator *regulator;
- int err = 0;
lockdep_assert_held_once(&rdev->mutex.base);
@@ -1841,38 +1905,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
list_add(&regulator->list, &rdev->consumer_list);
- if (dev) {
- regulator->dev = dev;
-
- /* Add a link to the device sysfs entry */
- err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
- supply_name);
- if (err) {
- rdev_dbg(rdev, "could not add device link %s: %pe\n",
- dev->kobj.name, ERR_PTR(err));
- /* non-fatal */
- }
- }
-
- if (err != -EEXIST) {
- regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
- if (IS_ERR(regulator->debugfs)) {
- rdev_dbg(rdev, "Failed to create debugfs directory\n");
- regulator->debugfs = NULL;
- }
- }
-
- if (regulator->debugfs) {
- debugfs_create_u32("uA_load", 0444, regulator->debugfs,
- &regulator->uA_load);
- debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].min_uV);
- debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].max_uV);
- debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
- regulator, &constraint_flags_fops);
- }
-
/*
* Check now if the regulator is an always on regulator - if
* it is then we don't need to do nearly so much work for
@@ -1936,6 +1968,20 @@ static struct regulator_dev *regulator_lookup_by_name(const char *name)
return dev ? dev_to_rdev(dev) : NULL;
}
+static struct regulator_dev *regulator_dt_lookup(struct device *dev,
+ const char *supply)
+{
+ struct regulator_dev *r = NULL;
+
+ if (dev_of_node(dev)) {
+ r = of_regulator_dev_lookup(dev, dev_of_node(dev), supply);
+ if (PTR_ERR(r) == -ENODEV)
+ r = NULL;
+ }
+
+ return r;
+}
+
/**
* regulator_dev_lookup - lookup a regulator device.
* @dev: device for regulator "consumer".
@@ -1960,16 +2006,9 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
regulator_supply_alias(&dev, &supply);
/* first do a dt based lookup */
- if (dev_of_node(dev)) {
- r = of_regulator_dev_lookup(dev, dev_of_node(dev), supply);
- if (!IS_ERR(r))
- return r;
- if (PTR_ERR(r) == -EPROBE_DEFER)
- return r;
-
- if (PTR_ERR(r) == -ENODEV)
- r = NULL;
- }
+ r = regulator_dt_lookup(dev, supply);
+ if (r)
+ return r;
/* if not found, try doing it non-dt way */
if (dev)
@@ -2015,7 +2054,17 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (rdev->supply)
return 0;
- r = regulator_dev_lookup(dev, rdev->supply_name);
+ /* first do a dt based lookup on the node described in the virtual
+ * device.
+ */
+ r = regulator_dt_lookup(&rdev->dev, rdev->supply_name);
+
+ /* If regulator not found use usual search path in the parent
+ * device.
+ */
+ if (!r)
+ r = regulator_dev_lookup(dev, rdev->supply_name);
+
if (IS_ERR(r)) {
ret = PTR_ERR(r);
@@ -2025,6 +2074,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (have_full_constraints()) {
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
@@ -2042,6 +2095,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
goto out;
}
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
}
@@ -2089,6 +2146,9 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
regulator_unlock_two(rdev, r, &ww_ctx);
+ /* rdev->supply was created in set_supply() */
+ link_and_create_debugfs(rdev->supply, r, &rdev->dev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
@@ -2167,8 +2227,10 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
* enabled, even if it isn't hooked up, and just
* provide a dummy.
*/
- dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
rdev = dummy_regulator_rdev;
+ if (!rdev)
+ return ERR_PTR(-EPROBE_DEFER);
+ dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
get_device(&rdev->dev);
break;
@@ -2227,6 +2289,8 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
return regulator;
}
+ link_and_create_debugfs(regulator, rdev, dev);
+
rdev->open_count++;
if (get_type == EXCLUSIVE_GET) {
rdev->exclusive = 1;
@@ -4585,6 +4649,87 @@ int regulator_get_current_limit(struct regulator *regulator)
EXPORT_SYMBOL_GPL(regulator_get_current_limit);
/**
+ * regulator_get_unclaimed_power_budget - get regulator unclaimed power budget
+ * @regulator: regulator source
+ *
+ * Return: Unclaimed power budget of the regulator in mW.
+ */
+int regulator_get_unclaimed_power_budget(struct regulator *regulator)
+{
+ return regulator->rdev->constraints->pw_budget_mW -
+ regulator->rdev->pw_requested_mW;
+}
+EXPORT_SYMBOL_GPL(regulator_get_unclaimed_power_budget);
+
+/**
+ * regulator_request_power_budget - request power budget on a regulator
+ * @regulator: regulator source
+ * @pw_req: Power requested
+ *
+ * Return: 0 on success or a negative error number on failure.
+ */
+int regulator_request_power_budget(struct regulator *regulator,
+ unsigned int pw_req)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ int ret = 0, pw_tot_req;
+
+ regulator_lock(rdev);
+ if (rdev->supply) {
+ ret = regulator_request_power_budget(rdev->supply, pw_req);
+ if (ret < 0)
+ goto out;
+ }
+
+ pw_tot_req = rdev->pw_requested_mW + pw_req;
+ if (pw_tot_req > rdev->constraints->pw_budget_mW) {
+ rdev_warn(rdev, "power requested %d mW out of budget %d mW",
+ pw_req,
+ rdev->constraints->pw_budget_mW - rdev->pw_requested_mW);
+ regulator_notifier_call_chain(rdev,
+ REGULATOR_EVENT_OVER_CURRENT_WARN,
+ NULL);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ rdev->pw_requested_mW = pw_tot_req;
+out:
+ regulator_unlock(rdev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_request_power_budget);
+
+/**
+ * regulator_free_power_budget - free power budget on a regulator
+ * @regulator: regulator source
+ * @pw: Power to be released.
+ *
+ * Return: Power budget of the regulator in mW.
+ */
+void regulator_free_power_budget(struct regulator *regulator,
+ unsigned int pw)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ int pw_tot_req;
+
+ regulator_lock(rdev);
+ if (rdev->supply)
+ regulator_free_power_budget(rdev->supply, pw);
+
+ pw_tot_req = rdev->pw_requested_mW - pw;
+ if (pw_tot_req >= 0)
+ rdev->pw_requested_mW = pw_tot_req;
+ else
+ rdev_warn(rdev,
+ "too much power freed %d mW (already requested %d mW)",
+ pw, rdev->pw_requested_mW);
+
+ regulator_unlock(rdev);
+}
+EXPORT_SYMBOL_GPL(regulator_free_power_budget);
+
+/**
* regulator_set_mode - set regulator operating mode
* @regulator: regulator source
* @mode: operating mode - one of the REGULATOR_MODE constants
@@ -4908,7 +5053,7 @@ int _regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].supply, get_type);
if (IS_ERR(consumers[i].consumer)) {
ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
- "Failed to get supply '%s'",
+ "Failed to get supply '%s'\n",
consumers[i].supply);
consumers[i].consumer = NULL;
goto err;
@@ -5222,6 +5367,8 @@ static struct attribute *regulator_dev_attrs[] = {
&dev_attr_suspend_standby_mode.attr,
&dev_attr_suspend_mem_mode.attr,
&dev_attr_suspend_disk_mode.attr,
+ &dev_attr_power_budget_milliwatt.attr,
+ &dev_attr_power_requested_milliwatt.attr,
NULL
};
@@ -5303,6 +5450,10 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj,
attr == &dev_attr_suspend_disk_mode.attr)
return ops->set_suspend_mode ? mode : 0;
+ if (attr == &dev_attr_power_budget_milliwatt.attr ||
+ attr == &dev_attr_power_requested_milliwatt.attr)
+ return rdev->constraints->pw_budget_mW != INT_MAX ? mode : 0;
+
return mode;
}
@@ -5643,43 +5794,36 @@ regulator_register(struct device *dev,
goto clean;
}
- if (config->init_data) {
- /*
- * Providing of_match means the framework is expected to parse
- * DT to get the init_data. This would conflict with provided
- * init_data, if set. Warn if it happens.
- */
- if (regulator_desc->of_match)
- dev_warn(dev, "Using provided init data - OF match ignored\n");
+ /*
+ * DT may override the config->init_data provided if the platform
+ * needs to do so. If so, config->init_data is completely ignored.
+ */
+ init_data = regulator_of_get_init_data(dev, regulator_desc, config,
+ &rdev->dev.of_node);
+ /*
+ * Sometimes not all resources are probed already so we need to take
+ * that into account. This happens most the time if the ena_gpiod comes
+ * from a gpio extender or something else.
+ */
+ if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto clean;
+ }
+
+ /*
+ * We need to keep track of any GPIO descriptor coming from the
+ * device tree until we have handled it over to the core. If the
+ * config that was passed in to this function DOES NOT contain
+ * a descriptor, and the config after this call DOES contain
+ * a descriptor, we definitely got one from parsing the device
+ * tree.
+ */
+ if (!cfg->ena_gpiod && config->ena_gpiod)
+ dangling_of_gpiod = true;
+ if (!init_data) {
init_data = config->init_data;
rdev->dev.of_node = of_node_get(config->of_node);
-
- } else {
- init_data = regulator_of_get_init_data(dev, regulator_desc,
- config,
- &rdev->dev.of_node);
-
- /*
- * Sometimes not all resources are probed already so we need to
- * take that into account. This happens most the time if the
- * ena_gpiod comes from a gpio extender or something else.
- */
- if (PTR_ERR(init_data) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto clean;
- }
-
- /*
- * We need to keep track of any GPIO descriptor coming from the
- * device tree until we have handled it over to the core. If the
- * config that was passed in to this function DOES NOT contain a
- * descriptor, and the config after this call DOES contain a
- * descriptor, we definitely got one from parsing the device
- * tree.
- */
- if (!cfg->ena_gpiod && config->ena_gpiod)
- dangling_of_gpiod = true;
}
ww_mutex_init(&rdev->mutex, &regulator_ww_class);
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 5b9b9e4e762d..9f59889129ab 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -60,7 +60,7 @@ static struct platform_driver dummy_regulator_driver = {
.probe = dummy_regulator_probe,
.driver = {
.name = "reg-dummy",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index e5b4b93c07e3..011088c57891 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -125,6 +125,9 @@ static int of_get_regulation_constraints(struct device *dev,
if (constraints->min_uA != constraints->max_uA)
constraints->valid_ops_mask |= REGULATOR_CHANGE_CURRENT;
+ if (!of_property_read_u32(np, "regulator-power-budget-milliwatt", &pval))
+ constraints->pw_budget_mW = pval;
+
constraints->boot_on = of_property_read_bool(np, "regulator-boot-on");
constraints->always_on = of_property_read_bool(np, "regulator-always-on");
if (!constraints->always_on) /* status change should be possible. */
@@ -446,7 +449,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
"failed to parse DT for regulator %pOFn\n",
child);
of_node_put(child);
- return -EINVAL;
+ goto err_put;
}
match->of_node = of_node_get(child);
count++;
@@ -455,6 +458,18 @@ int of_regulator_match(struct device *dev, struct device_node *node,
}
return count;
+
+err_put:
+ for (i = 0; i < num_matches; i++) {
+ struct of_regulator_match *match = &matches[i];
+
+ match->init_data = NULL;
+ if (match->of_node) {
+ of_node_put(match->of_node);
+ match->of_node = NULL;
+ }
+ }
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(of_regulator_match);
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index 9714afe347dc..faa6b79c27d7 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -247,6 +247,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.ramp_mask = BUCK1_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
.n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -272,6 +273,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK2_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ_STBYREQ,
.ramp_reg = PCA9450_REG_BUCK2CTRL,
.ramp_mask = BUCK2_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -301,6 +303,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK3OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK3CTRL,
.enable_mask = BUCK3_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.ramp_reg = PCA9450_REG_BUCK3CTRL,
.ramp_mask = BUCK3_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -330,6 +333,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK4OUT_MASK,
.enable_reg = PCA9450_REG_BUCK4CTRL,
.enable_mask = BUCK4_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -348,6 +352,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK5OUT_MASK,
.enable_reg = PCA9450_REG_BUCK5CTRL,
.enable_mask = BUCK5_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -366,6 +371,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK6OUT_MASK,
.enable_reg = PCA9450_REG_BUCK6CTRL,
.enable_mask = BUCK6_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -481,6 +487,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK1OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK1CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.ramp_reg = PCA9450_REG_BUCK1CTRL,
.ramp_mask = BUCK1_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -510,6 +517,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK2_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ_STBYREQ,
.ramp_reg = PCA9450_REG_BUCK2CTRL,
.ramp_mask = BUCK2_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -539,6 +547,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK4OUT_MASK,
.enable_reg = PCA9450_REG_BUCK4CTRL,
.enable_mask = BUCK4_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -557,6 +566,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK5OUT_MASK,
.enable_reg = PCA9450_REG_BUCK5CTRL,
.enable_mask = BUCK5_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -575,6 +585,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK6OUT_MASK,
.enable_reg = PCA9450_REG_BUCK6CTRL,
.enable_mask = BUCK6_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
.owner = THIS_MODULE,
},
},
@@ -806,6 +817,24 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = {
},
{
.desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO3,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO3_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo34_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
+ .vsel_reg = PCA9450_REG_LDO3CTRL,
+ .vsel_mask = LDO3OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO3CTRL,
+ .enable_mask = LDO3_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
.name = "ldo4",
.of_match = of_match_ptr("LDO4"),
.regulators_node = of_match_ptr("regulators"),
@@ -905,6 +934,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450->rcnt = ARRAY_SIZE(pca9450bc_regulators);
break;
case PCA9450_TYPE_PCA9451A:
+ case PCA9450_TYPE_PCA9452:
regulator_desc = pca9451a_regulators;
pca9450->rcnt = ARRAY_SIZE(pca9451a_regulators);
break;
@@ -921,25 +951,21 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450->regmap = devm_regmap_init_i2c(i2c,
&pca9450_regmap_config);
- if (IS_ERR(pca9450->regmap)) {
- dev_err(&i2c->dev, "regmap initialization failed\n");
- return PTR_ERR(pca9450->regmap);
- }
+ if (IS_ERR(pca9450->regmap))
+ return dev_err_probe(&i2c->dev, PTR_ERR(pca9450->regmap),
+ "regmap initialization failed\n");
ret = regmap_read(pca9450->regmap, PCA9450_REG_DEV_ID, &device_id);
- if (ret) {
- dev_err(&i2c->dev, "Read device id error\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Read device id error\n");
/* Check your board and dts for match the right pmic */
if (((device_id >> 4) != 0x1 && type == PCA9450_TYPE_PCA9450A) ||
((device_id >> 4) != 0x3 && type == PCA9450_TYPE_PCA9450BC) ||
- ((device_id >> 4) != 0x9 && type == PCA9450_TYPE_PCA9451A)) {
- dev_err(&i2c->dev, "Device id(%x) mismatched\n",
- device_id >> 4);
- return -EINVAL;
- }
+ ((device_id >> 4) != 0x9 && type == PCA9450_TYPE_PCA9451A) ||
+ ((device_id >> 4) != 0x9 && type == PCA9450_TYPE_PCA9452))
+ return dev_err_probe(&i2c->dev, -EINVAL,
+ "Device id(%x) mismatched\n", device_id >> 4);
for (i = 0; i < pca9450->rcnt; i++) {
const struct regulator_desc *desc;
@@ -949,17 +975,16 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
r = &regulator_desc[i];
desc = &r->desc;
+ if (type == PCA9450_TYPE_PCA9451A && !strcmp(desc->name, "ldo3"))
+ continue;
+
config.regmap = pca9450->regmap;
config.dev = pca9450->dev;
rdev = devm_regulator_register(pca9450->dev, desc, &config);
- if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
- dev_err(pca9450->dev,
- "Failed to register regulator(%s): %d\n",
- desc->name, ret);
- return ret;
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(pca9450->dev, PTR_ERR(rdev),
+ "Failed to register regulator(%s)\n", desc->name);
}
if (pca9450->irq) {
@@ -967,29 +992,24 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450_irq_handler,
(IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
"pca9450-irq", pca9450);
- if (ret != 0) {
- dev_err(pca9450->dev, "Failed to request IRQ: %d\n",
- pca9450->irq);
- return ret;
- }
+ if (ret != 0)
+ return dev_err_probe(pca9450->dev, ret, "Failed to request IRQ: %d\n",
+ pca9450->irq);
+
/* Unmask all interrupt except PWRON/WDOG/RSVD */
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_INT1_MSK,
IRQ_VR_FLT1 | IRQ_VR_FLT2 | IRQ_LOWVSYS |
IRQ_THERM_105 | IRQ_THERM_125,
IRQ_PWRON | IRQ_WDOGB | IRQ_RSVD);
- if (ret) {
- dev_err(&i2c->dev, "Unmask irq error\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
}
/* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */
ret = regmap_clear_bits(pca9450->regmap, PCA9450_REG_BUCK123_DVS,
BUCK123_PRESET_EN);
- if (ret) {
- dev_err(&i2c->dev, "Failed to clear PRESET_EN bit: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Failed to clear PRESET_EN bit\n");
if (of_property_read_bool(i2c->dev.of_node, "nxp,wdog_b-warm-reset"))
reset_ctrl = WDOG_B_CFG_WARM;
@@ -999,20 +1019,16 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
/* Set reset behavior on assertion of WDOG_B signal */
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL,
WDOG_B_CFG_MASK, reset_ctrl);
- if (ret) {
- dev_err(&i2c->dev, "Failed to set WDOG_B reset behavior\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Failed to set WDOG_B reset behavior\n");
if (of_property_read_bool(i2c->dev.of_node, "nxp,i2c-lt-enable")) {
/* Enable I2C Level Translator */
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_CONFIG2,
I2C_LT_MASK, I2C_LT_ON_STANDBY_RUN);
- if (ret) {
- dev_err(&i2c->dev,
- "Failed to enable I2C level translator\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret,
+ "Failed to enable I2C level translator\n");
}
/*
@@ -1022,10 +1038,9 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
*/
pca9450->sd_vsel_gpio = gpiod_get_optional(pca9450->dev, "sd-vsel", GPIOD_OUT_HIGH);
- if (IS_ERR(pca9450->sd_vsel_gpio)) {
- dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n");
- return PTR_ERR(pca9450->sd_vsel_gpio);
- }
+ if (IS_ERR(pca9450->sd_vsel_gpio))
+ return dev_err_probe(&i2c->dev, PTR_ERR(pca9450->sd_vsel_gpio),
+ "Failed to get SD_VSEL GPIO\n");
dev_info(&i2c->dev, "%s probed.\n",
type == PCA9450_TYPE_PCA9450A ? "pca9450a" :
@@ -1051,6 +1066,10 @@ static const struct of_device_id pca9450_of_match[] = {
.compatible = "nxp,pca9451a",
.data = (void *)PCA9450_TYPE_PCA9451A,
},
+ {
+ .compatible = "nxp,pca9452",
+ .data = (void *)PCA9450_TYPE_PCA9452,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, pca9450_of_match);
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
index 5925fa7a9a06..9cde7181b0f0 100644
--- a/drivers/regulator/rtq2208-regulator.c
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -27,6 +27,11 @@
#define RTQ2208_REG_LDO1_CFG 0xB1
#define RTQ2208_REG_LDO2_CFG 0xC1
#define RTQ2208_REG_LDO_DVS_CTRL 0xD0
+#define RTQ2208_REG_HIDDEN_BUCKPH 0x55
+#define RTQ2208_REG_HIDDEN_LDOCFG0 0x8F
+#define RTQ2208_REG_HIDDEN_LDOCFG1 0x96
+#define RTQ2208_REG_HIDDEN0 0xFE
+#define RTQ2208_REG_HIDDEN1 0xFF
/* Mask */
#define RTQ2208_BUCK_NR_MTP_SEL_MASK GENMASK(7, 0)
@@ -45,6 +50,11 @@
#define RTQ2208_LDO1_VOSEL_SD_MASK BIT(5)
#define RTQ2208_LDO2_DISCHG_EN_MASK BIT(6)
#define RTQ2208_LDO2_VOSEL_SD_MASK BIT(7)
+#define RTQ2208_MASK_BUCKPH_GROUP1 GENMASK(6, 4)
+#define RTQ2208_MASK_BUCKPH_GROUP2 GENMASK(2, 0)
+#define RTQ2208_MASK_LDO2_OPT0 BIT(7)
+#define RTQ2208_MASK_LDO2_OPT1 BIT(6)
+#define RTQ2208_MASK_LDO1_FIXED BIT(6)
/* Size */
#define RTQ2208_VOUT_MAXNUM 256
@@ -245,11 +255,6 @@ static const unsigned int rtq2208_ldo_volt_table[] = {
3300000,
};
-static struct of_regulator_match rtq2208_ldo_match[] = {
- {.name = "ldo2", },
- {.name = "ldo1", },
-};
-
static unsigned int rtq2208_of_map_mode(unsigned int mode)
{
switch (mode) {
@@ -344,59 +349,6 @@ static irqreturn_t rtq2208_irq_handler(int irqno, void *devid)
return IRQ_HANDLED;
}
-static int rtq2208_of_get_ldo_dvs_ability(struct device *dev)
-{
- struct device_node *np;
- struct of_regulator_match *match;
- struct regulator_desc *desc;
- struct regulator_init_data *init_data;
- u32 fixed_uV;
- int ret, i;
-
- if (!dev->of_node)
- return -ENODEV;
-
- np = of_get_child_by_name(dev->of_node, "regulators");
- if (!np)
- np = dev->of_node;
-
- ret = of_regulator_match(dev, np, rtq2208_ldo_match, ARRAY_SIZE(rtq2208_ldo_match));
-
- of_node_put(np);
-
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(rtq2208_ldo_match); i++) {
- match = rtq2208_ldo_match + i;
- init_data = match->init_data;
- desc = (struct regulator_desc *)match->desc;
-
- if (!init_data || !desc)
- continue;
-
- /* specify working fixed voltage if the propery exists */
- ret = of_property_read_u32(match->of_node, "richtek,fixed-microvolt", &fixed_uV);
-
- if (!ret) {
- if (fixed_uV != init_data->constraints.min_uV ||
- fixed_uV != init_data->constraints.max_uV)
- return -EINVAL;
- desc->n_voltages = 1;
- desc->fixed_uV = fixed_uV;
- desc->fixed_uV = init_data->constraints.min_uV;
- desc->ops = &rtq2208_regulator_ldo_fix_ops;
- } else {
- desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
- desc->volt_table = rtq2208_ldo_volt_table;
- desc->ops = &rtq2208_regulator_ldo_adj_ops;
- }
- }
-
- return 0;
-}
-
-
#define BUCK_INFO(_name, _id) \
{ \
.name = _name, \
@@ -424,9 +376,11 @@ static const struct linear_range rtq2208_vout_range[] = {
REGULATOR_LINEAR_RANGE(1310000, 181, 255, 10000),
};
-static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx)
+static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
struct regulator_desc *desc;
+ unsigned int fixed_uV;
static const struct {
char *name;
int base;
@@ -462,7 +416,8 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->mode_mask = RTQ2208_BUCK_NRMODE_MASK;
- if (idx >= RTQ2208_BUCK_B && idx <= RTQ2208_BUCK_E) {
+ switch (idx) {
+ case RTQ2208_BUCK_B ... RTQ2208_BUCK_E:
/* init buck desc */
desc->ops = &rtq2208_regulator_buck_ops;
desc->vsel_reg = curr_info->base + VSEL_SHIFT(mtp_sel);
@@ -480,7 +435,19 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = BUCK_RG_SHIFT(curr_info->base, 4);
rdesc->suspend_enable_mask = RTQ2208_BUCK_EN_STR_MASK;
rdesc->suspend_mode_mask = RTQ2208_BUCK_STRMODE_MASK;
- } else {
+ break;
+ default:
+ fixed_uV = idx == RTQ2208_LDO2 ? ldo2_fixed : ldo1_fixed;
+ if (fixed_uV) {
+ desc->n_voltages = 1;
+ desc->fixed_uV = fixed_uV;
+ desc->ops = &rtq2208_regulator_ldo_fix_ops;
+ } else {
+ desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
+ desc->volt_table = rtq2208_ldo_volt_table;
+ desc->ops = &rtq2208_regulator_ldo_adj_ops;
+ }
+
/* init ldo desc */
desc->active_discharge_reg = RTQ2208_REG_LDO_DVS_CTRL;
desc->active_discharge_on = curr_info->dis_on;
@@ -490,13 +457,15 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = curr_info->base;
rdesc->suspend_enable_mask = RTQ2208_LDO_EN_STR_MASK;
+ break;
}
}
static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *regulator_idx_table,
- struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev)
+ struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
- int mtp_sel, i, idx, ret;
+ int mtp_sel, i, idx;
/* get mtp_sel0 or mtp_sel1 */
mtp_sel = device_property_read_bool(dev, "richtek,mtp-sel-high");
@@ -508,43 +477,101 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
if (!rdesc[i])
return -ENOMEM;
- rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx);
-
- /* init ldo dvs ability */
- if (idx >= RTQ2208_LDO2)
- rtq2208_ldo_match[idx - RTQ2208_LDO2].desc = &rdesc[i]->desc;
+ rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx, ldo1_fixed, ldo2_fixed);
}
- /* init ldo fixed_uV */
- ret = rtq2208_of_get_ldo_dvs_ability(dev);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get ldo fixed_uV\n");
-
return 0;
}
-/** different slave address corresponds different used bucks
- * slave address 0x10: BUCK[BCA FGE]
- * slave address 0x20: BUCK[BC FGHE]
- * slave address 0x40: BUCK[C G]
- */
-static int rtq2208_regulator_check(int slave_addr, int *num,
- int *regulator_idx_table, unsigned int *buck_masks)
+static int rtq2208_regulator_check(struct device *dev, int *num, int *regulator_idx_table,
+ unsigned int *buck_masks, unsigned int *ldo1_fixed_uV,
+ unsigned int *ldo2_fixed_uV)
{
- static bool rtq2208_used_table[3][RTQ2208_LDO_MAX] = {
- /* BUCK[BCA FGE], LDO[12] */
- {1, 1, 0, 1, 1, 1, 0, 1, 1, 1},
- /* BUCK[BC FGHE], LDO[12]*/
- {1, 1, 0, 0, 1, 1, 1, 1, 1, 1},
- /* BUCK[C G], LDO[12] */
- {0, 1, 0, 0, 0, 1, 0, 0, 1, 1},
- };
- int i, idx = ffs(slave_addr >> 4) - 1;
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+ bool rtq2208_used_table[RTQ2208_LDO_MAX] = {0};
+ u8 entry_key[] = { 0x69, 0x01 };
+ unsigned int buck_phase, ldo_cfg0, ldo_cfg1;
+ int i, ret;
u8 mask;
+ ret = regmap_raw_write(regmap, RTQ2208_REG_HIDDEN0, entry_key, ARRAY_SIZE(entry_key));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enter hidden page\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_BUCKPH, &buck_phase);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read buck phase configuration\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG0, &ldo_cfg0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg0\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG1, &ldo_cfg1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg1\n");
+
+ ret = regmap_write(regmap, RTQ2208_REG_HIDDEN1, 0x00);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to exit hidden page\n");
+
+ dev_info(dev, "BUCK Phase 0x%x\n", buck_phase);
+ /*
+ * Use buck phase configuration to assign used table mask
+ * GROUP1 GROUP2
+ * 0 -> 2P + 2P BC FG
+ * 1 -> 2P + 1P + 1P BCA FGE
+ * 2 -> 1P + 1P + 1P + 1P BCDA FGHE
+ * 3 -> 3P + 1P BC FG
+ * others -> 4P C G
+ */
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP1, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_D] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_A] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_B] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_C] = true;
+ break;
+ }
+
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP2, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_F] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_E] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_H] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_G] = true;
+ break;
+ }
+
+ *ldo1_fixed_uV = FIELD_GET(RTQ2208_MASK_LDO1_FIXED, ldo_cfg1) ? 1200000 : 0;
+
+ if (!FIELD_GET(RTQ2208_MASK_LDO2_OPT0, ldo_cfg0) &&
+ !FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 0;
+ else if (FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 900000;
+ else
+ *ldo2_fixed_uV = 1200000;
+
+ /* By default, LDO1 & LDO2 are always used */
+ rtq2208_used_table[RTQ2208_LDO1] = rtq2208_used_table[RTQ2208_LDO2] = true;
+
for (i = 0; i < RTQ2208_LDO_MAX; i++) {
- if (!rtq2208_used_table[idx][i])
+ if (!rtq2208_used_table[i])
continue;
regulator_idx_table[(*num)++] = i;
@@ -559,7 +586,7 @@ static int rtq2208_regulator_check(int slave_addr, int *num,
static const struct regmap_config rtq2208_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0xEF,
+ .max_register = 0xFF,
};
static int rtq2208_probe(struct i2c_client *i2c)
@@ -573,6 +600,7 @@ static int rtq2208_probe(struct i2c_client *i2c)
int i, ret = 0, idx, n_regulator = 0;
unsigned int regulator_idx_table[RTQ2208_LDO_MAX],
buck_masks[RTQ2208_BUCK_NUM_IRQ_REGS] = {0x33, 0x33, 0x33, 0x33, 0x33};
+ unsigned int ldo1_fixed_uV, ldo2_fixed_uV;
rdev_map = devm_kzalloc(dev, sizeof(struct rtq2208_rdev_map), GFP_KERNEL);
if (!rdev_map)
@@ -583,7 +611,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
return dev_err_probe(dev, PTR_ERR(regmap), "Failed to allocate regmap\n");
/* get needed regulator */
- ret = rtq2208_regulator_check(i2c->addr, &n_regulator, regulator_idx_table, buck_masks);
+ ret = rtq2208_regulator_check(dev, &n_regulator, regulator_idx_table, buck_masks,
+ &ldo1_fixed_uV, &ldo2_fixed_uV);
if (ret)
return dev_err_probe(dev, ret, "Failed to check used regulators\n");
@@ -593,7 +622,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
cfg.dev = dev;
/* init regulator desc */
- ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev);
+ ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev,
+ ldo1_fixed_uV, ldo2_fixed_uV);
if (ret)
return ret;
diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
index 97f5ce138548..c0f5f0a186a3 100644
--- a/drivers/regulator/tps6287x-regulator.c
+++ b/drivers/regulator/tps6287x-regulator.c
@@ -44,10 +44,35 @@ static const unsigned int tps6287x_voltage_range_sel[] = {
0x0, 0x1, 0x2, 0x3
};
+static const unsigned int tps6287x_voltage_range_prefix[] = {
+ 0x000, 0x100, 0x200, 0x300
+};
+
static const unsigned int tps6287x_ramp_table[] = {
10000, 5000, 1250, 500
};
+struct tps6287x_reg_data {
+ int range;
+};
+
+static int tps6287x_best_range(struct regulator_config *config, const struct regulator_desc *desc)
+{
+ const struct linear_range *r;
+ int i;
+
+ if (!config->init_data->constraints.apply_uV)
+ return -1;
+
+ for (i = 0; i < desc->n_linear_ranges; i++) {
+ r = &desc->linear_ranges[i];
+ if (r->min <= config->init_data->constraints.min_uV &&
+ config->init_data->constraints.max_uV <= linear_range_get_max_value(r))
+ return i;
+ }
+ return -1;
+}
+
static int tps6287x_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
unsigned int val;
@@ -91,6 +116,28 @@ static unsigned int tps6287x_of_map_mode(unsigned int mode)
}
}
+static int tps6287x_map_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct tps6287x_reg_data *data = (struct tps6287x_reg_data *)rdev->reg_data;
+ struct linear_range selected_range;
+ int selector, voltage;
+
+ if (!data || data->range == -1)
+ return regulator_map_voltage_pickable_linear_range(rdev, min_uV, max_uV);
+
+ selected_range = rdev->desc->linear_ranges[data->range];
+ selector = DIV_ROUND_UP(min_uV - selected_range.min, selected_range.step);
+ if (selector < selected_range.min_sel || selector > selected_range.max_sel)
+ return -EINVAL;
+
+ selector |= tps6287x_voltage_range_prefix[data->range];
+ voltage = rdev->desc->ops->list_voltage(rdev, selector);
+ if (voltage < min_uV || voltage > max_uV)
+ return -EINVAL;
+
+ return selector;
+}
+
static const struct regulator_ops tps6287x_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -100,6 +147,7 @@ static const struct regulator_ops tps6287x_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
.set_voltage_sel = regulator_set_voltage_sel_pickable_regmap,
.list_voltage = regulator_list_voltage_pickable_linear_range,
+ .map_voltage = tps6287x_map_voltage,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
};
@@ -130,8 +178,14 @@ static int tps6287x_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct regulator_config config = {};
+ struct tps6287x_reg_data *reg_data;
struct regulator_dev *rdev;
+ reg_data = devm_kzalloc(dev, sizeof(struct tps6287x_reg_data), GFP_KERNEL);
+
+ if (!reg_data)
+ return -ENOMEM;
+
config.regmap = devm_regmap_init_i2c(i2c, &tps6287x_regmap_config);
if (IS_ERR(config.regmap)) {
dev_err(dev, "Failed to init i2c\n");
@@ -143,12 +197,15 @@ static int tps6287x_i2c_probe(struct i2c_client *i2c)
config.init_data = of_get_regulator_init_data(dev, dev->of_node,
&tps6287x_reg);
+ reg_data->range = tps6287x_best_range(&config, &tps6287x_reg);
+
rdev = devm_regulator_register(dev, &tps6287x_reg, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "Failed to register regulator\n");
return PTR_ERR(rdev);
}
+ rdev->reg_data = (void *)reg_data;
dev_dbg(dev, "Probed regulator\n");
return 0;
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
index b4065356392f..aa65077f9d41 100644
--- a/drivers/regulator/tps65219-regulator.c
+++ b/drivers/regulator/tps65219-regulator.c
@@ -287,21 +287,6 @@ static irqreturn_t tps65219_regulator_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int tps65219_get_rdev_by_name(const char *regulator_name,
- struct regulator_dev *rdevtbl[7],
- struct regulator_dev **dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(regulators); i++) {
- if (strcmp(regulator_name, regulators[i].name) == 0) {
- *dev = rdevtbl[i];
- return 0;
- }
- }
- return -EINVAL;
-}
-
static int tps65219_regulator_probe(struct platform_device *pdev)
{
struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
@@ -312,23 +297,18 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
int irq;
struct tps65219_regulator_irq_data *irq_data;
struct tps65219_regulator_irq_type *irq_type;
- struct regulator_dev *rdevtbl[7];
config.dev = tps->dev;
config.driver_data = tps;
config.regmap = tps->regmap;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
- dev_dbg(tps->dev, "%s regul i= %d START", __func__, i);
rdev = devm_regulator_register(&pdev->dev, &regulators[i],
&config);
- if (IS_ERR(rdev)) {
- dev_err(tps->dev, "failed to register %s regulator\n",
- regulators[i].name);
- return PTR_ERR(rdev);
- }
- rdevtbl[i] = rdev;
- dev_dbg(tps->dev, "%s regul i= %d COMPLETED", __func__, i);
+ if (IS_ERR(rdev))
+ return dev_err_probe(tps->dev, PTR_ERR(rdev),
+ "Failed to register %s regulator\n",
+ regulators[i].name);
}
irq_data = devm_kmalloc(tps->dev,
@@ -348,14 +328,6 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
irq_data[i].dev = tps->dev;
irq_data[i].type = irq_type;
- tps65219_get_rdev_by_name(irq_type->regulator_name, rdevtbl, &rdev);
- if (IS_ERR(rdev)) {
- dev_err(tps->dev, "Failed to get rdev for %s\n",
- irq_type->regulator_name);
- return -EINVAL;
- }
- irq_data[i].rdev = rdev;
-
error = devm_request_threaded_irq(tps->dev, irq, NULL,
tps65219_regulator_irq_handler,
IRQF_ONESHOT,
@@ -379,7 +351,7 @@ MODULE_DEVICE_TABLE(platform, tps65219_regulator_id_table);
static struct platform_driver tps65219_regulator_driver = {
.driver = {
- .name = "tps65219-pmic",
+ .name = "tps65219-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = tps65219_regulator_probe,
@@ -390,5 +362,4 @@ module_platform_driver(tps65219_regulator_driver);
MODULE_AUTHOR("Jerome Neanne <j-neanne@baylibre.com>");
MODULE_DESCRIPTION("TPS65219 voltage regulator driver");
-MODULE_ALIAS("platform:tps65219-pmic");
MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 6e54093d1732..7b41b4547fa8 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -335,25 +335,16 @@ static int keystone_rproc_of_get_dev_syscon(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- int ret;
if (!of_property_read_bool(np, "ti,syscon-dev")) {
dev_err(dev, "ti,syscon-dev property is absent\n");
return -EINVAL;
}
- ksproc->dev_ctrl =
- syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
- if (IS_ERR(ksproc->dev_ctrl)) {
- ret = PTR_ERR(ksproc->dev_ctrl);
- return ret;
- }
-
- if (of_property_read_u32_index(np, "ti,syscon-dev", 1,
- &ksproc->boot_offset)) {
- dev_err(dev, "couldn't read the boot register offset\n");
- return -EINVAL;
- }
+ ksproc->dev_ctrl = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev",
+ 1, &ksproc->boot_offset);
+ if (IS_ERR(ksproc->dev_ctrl))
+ return PTR_ERR(ksproc->dev_ctrl);
return 0;
}
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 0f4a7065d0bd..8206a1766481 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -1326,6 +1326,11 @@ static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_clus
return ret;
}
+static const struct of_device_id scp_core_match[] = {
+ { .compatible = "mediatek,scp-core" },
+ {}
+};
+
static int scp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1357,13 +1362,15 @@ static int scp_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
mutex_init(&scp_cluster->cluster_lock);
- ret = devm_of_platform_populate(dev);
+ ret = of_platform_populate(dev_of_node(dev), scp_core_match, NULL, dev);
if (ret)
return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
ret = scp_cluster_init(pdev, scp_cluster);
- if (ret)
+ if (ret) {
+ of_platform_depopulate(dev);
return ret;
+ }
return 0;
}
@@ -1379,6 +1386,7 @@ static void scp_remove(struct platform_device *pdev)
rproc_del(scp->rproc);
scp_free(scp);
}
+ of_platform_depopulate(&pdev->dev);
mutex_destroy(&scp_cluster->cluster_lock);
}
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 9ae2e831456d..5f463937cbbf 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -37,6 +37,10 @@
#include <linux/platform_data/dmtimer-omap.h>
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#include <asm/dma-iommu.h>
+#endif
+
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
@@ -1133,7 +1137,6 @@ static int omap_rproc_get_boot_data(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct omap_rproc *oproc = rproc->priv;
const struct omap_rproc_dev_data *data;
- int ret;
data = of_device_get_match_data(&pdev->dev);
if (!data)
@@ -1149,10 +1152,8 @@ static int omap_rproc_get_boot_data(struct platform_device *pdev,
oproc->boot_data->syscon =
syscon_regmap_lookup_by_phandle(np, "ti,bootreg");
- if (IS_ERR(oproc->boot_data->syscon)) {
- ret = PTR_ERR(oproc->boot_data->syscon);
- return ret;
- }
+ if (IS_ERR(oproc->boot_data->syscon))
+ return PTR_ERR(oproc->boot_data->syscon);
if (of_property_read_u32_index(np, "ti,bootreg", 1,
&oproc->boot_data->boot_reg)) {
@@ -1323,6 +1324,19 @@ static int omap_rproc_probe(struct platform_device *pdev)
/* All existing OMAP IPU and DSP processors have an MMU */
rproc->has_iommu = true;
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ /*
+ * Throw away the ARM DMA mapping that we'll never use, so it doesn't
+ * interfere with the core rproc->domain and we get the right DMA ops.
+ */
+ if (pdev->dev.archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(&pdev->dev);
+
+ arm_iommu_detach_device(&pdev->dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
+
ret = omap_rproc_of_get_internal_memories(pdev, rproc);
if (ret)
return ret;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index eb66f78ec8b7..c2cf0d277729 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -2486,6 +2486,13 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->dev.driver_data = rproc;
idr_init(&rproc->notifyids);
+ /* Assign a unique device index and name */
+ rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
+ if (rproc->index < 0) {
+ dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
+ goto put_device;
+ }
+
rproc->name = kstrdup_const(name, GFP_KERNEL);
if (!rproc->name)
goto put_device;
@@ -2496,13 +2503,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
if (rproc_alloc_ops(rproc, ops))
goto put_device;
- /* Assign a unique device index and name */
- rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
- if (rproc->index < 0) {
- dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
- goto put_device;
- }
-
dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
atomic_set(&rproc->power, 0);
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index 5df99bae7131..e6566a9839dc 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -290,26 +290,23 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
if (ddata->config->sw_reset) {
ddata->sw_reset = devm_reset_control_get_exclusive(dev,
"sw_reset");
- if (IS_ERR(ddata->sw_reset)) {
- dev_err(dev, "Failed to get S/W Reset\n");
- return PTR_ERR(ddata->sw_reset);
- }
+ if (IS_ERR(ddata->sw_reset))
+ return dev_err_probe(dev, PTR_ERR(ddata->sw_reset),
+ "Failed to get S/W Reset\n");
}
if (ddata->config->pwr_reset) {
ddata->pwr_reset = devm_reset_control_get_exclusive(dev,
"pwr_reset");
- if (IS_ERR(ddata->pwr_reset)) {
- dev_err(dev, "Failed to get Power Reset\n");
- return PTR_ERR(ddata->pwr_reset);
- }
+ if (IS_ERR(ddata->pwr_reset))
+ return dev_err_probe(dev, PTR_ERR(ddata->pwr_reset),
+ "Failed to get Power Reset\n");
}
ddata->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddata->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(ddata->clk);
- }
+ if (IS_ERR(ddata->clk))
+ return dev_err_probe(dev, PTR_ERR(ddata->clk),
+ "Failed to get clock\n");
err = of_property_read_u32(np, "clock-frequency", &ddata->clk_rate);
if (err) {
@@ -317,18 +314,11 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
return err;
}
- ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (IS_ERR(ddata->boot_base)) {
- dev_err(dev, "Boot base not found\n");
- return PTR_ERR(ddata->boot_base);
- }
-
- err = of_property_read_u32_index(np, "st,syscfg", 1,
- &ddata->boot_offset);
- if (err) {
- dev_err(dev, "Boot offset not found\n");
- return -EINVAL;
- }
+ ddata->boot_base = syscon_regmap_lookup_by_phandle_args(np, "st,syscfg",
+ 1, &ddata->boot_offset);
+ if (IS_ERR(ddata->boot_base))
+ return dev_err_probe(dev, PTR_ERR(ddata->boot_base),
+ "Boot base not found\n");
err = clk_prepare(ddata->clk);
if (err)
@@ -395,32 +385,32 @@ static int st_rproc_probe(struct platform_device *pdev)
*/
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_rx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 0\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 0\n");
goto free_clk;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_tx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 0\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 0\n");
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_TX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_rx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 1\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 1\n");
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_tx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 1\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 1\n");
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_TX] = chan;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 6560b7954027..dbc513c5569c 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -955,6 +955,13 @@ out:
return ret;
}
+static void k3_r5_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
{
struct device *dev = kproc->dev;
@@ -985,27 +992,25 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
return ret;
}
+ ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev);
+ if (ret)
+ return ret;
+
num_rmems--;
- kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem) {
- ret = -ENOMEM;
- goto release_rmem;
- }
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
/* use remaining reserved memory regions for static carveouts */
for (i = 0; i < num_rmems; i++) {
rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np) {
- ret = -EINVAL;
- goto unmap_rmem;
- }
+ if (!rmem_np)
+ return -EINVAL;
rmem = of_reserved_mem_lookup(rmem_np);
of_node_put(rmem_np);
- if (!rmem) {
- ret = -EINVAL;
- goto unmap_rmem;
- }
+ if (!rmem)
+ return -EINVAL;
kproc->rmem[i].bus_addr = rmem->base;
/*
@@ -1020,12 +1025,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
*/
kproc->rmem[i].dev_addr = (u32)rmem->base;
kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
if (!kproc->rmem[i].cpu_addr) {
dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
i + 1, &rmem->base, &rmem->size);
- ret = -ENOMEM;
- goto unmap_rmem;
+ return -ENOMEM;
}
dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
@@ -1036,25 +1040,6 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
kproc->num_rmems = num_rmems;
return 0;
-
-unmap_rmem:
- for (i--; i >= 0; i--)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-release_rmem:
- of_reserved_mem_device_release(dev);
- return ret;
-}
-
-static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
-{
- int i;
-
- for (i = 0; i < kproc->num_rmems; i++)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-
- of_reserved_mem_device_release(kproc->dev);
}
/*
@@ -1281,10 +1266,10 @@ init_rmem:
goto out;
}
- ret = rproc_add(rproc);
+ ret = devm_rproc_add(dev, rproc);
if (ret) {
- dev_err(dev, "rproc_add failed, ret = %d\n", ret);
- goto err_add;
+ dev_err_probe(dev, ret, "rproc_add failed\n");
+ goto out;
}
/* create only one rproc in lockstep, single-cpu or
@@ -1312,7 +1297,7 @@ init_rmem:
dev_err(dev,
"Timed out waiting for %s core to power up!\n",
rproc->name);
- goto err_powerup;
+ goto out;
}
}
@@ -1328,10 +1313,6 @@ err_split:
}
}
-err_powerup:
- rproc_del(rproc);
-err_add:
- k3_r5_reserved_mem_exit(kproc);
out:
/* undo core0 upon any failures on core1 in split-mode */
if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
@@ -1374,10 +1355,6 @@ static void k3_r5_cluster_rproc_exit(void *data)
}
mbox_free_channel(kproc->mbox);
-
- rproc_del(rproc);
-
- k3_r5_reserved_mem_exit(kproc);
}
}
@@ -1510,6 +1487,13 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
return 0;
}
+static void k3_r5_release_tsp(void *data)
+{
+ struct ti_sci_proc *tsp = data;
+
+ ti_sci_proc_release(tsp);
+}
+
static int k3_r5_core_of_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1603,6 +1587,10 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
goto err;
}
+ ret = devm_add_action_or_reset(dev, k3_r5_release_tsp, core->tsp);
+ if (ret)
+ goto err;
+
platform_set_drvdata(pdev, core);
devres_close_group(dev, k3_r5_core_of_init);
@@ -1619,13 +1607,7 @@ err:
*/
static void k3_r5_core_of_exit(struct platform_device *pdev)
{
- struct k3_r5_core *core = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- int ret;
-
- ret = ti_sci_proc_release(core->tsp);
- if (ret)
- dev_err(dev, "failed to release proc, ret = %d\n", ret);
platform_set_drvdata(pdev, NULL);
devres_release_group(dev, k3_r5_core_of_init);
diff --git a/drivers/reset/amlogic/reset-meson-aux.c b/drivers/reset/amlogic/reset-meson-aux.c
index 61ce515d92a2..33c06013439e 100644
--- a/drivers/reset/amlogic/reset-meson-aux.c
+++ b/drivers/reset/amlogic/reset-meson-aux.c
@@ -11,20 +11,20 @@
#include <linux/auxiliary_bus.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
-#include <linux/slab.h>
#include "reset-meson.h"
-#include <soc/amlogic/reset-meson-aux.h>
-static DEFINE_IDA(meson_rst_aux_ida);
-
-struct meson_reset_adev {
- struct auxiliary_device adev;
- struct regmap *map;
+static const struct meson_reset_param meson_a1_audio_param = {
+ .reset_ops = &meson_reset_toggle_ops,
+ .reset_num = 32,
+ .level_offset = 0x28,
};
-#define to_meson_reset_adev(_adev) \
- container_of((_adev), struct meson_reset_adev, adev)
+static const struct meson_reset_param meson_a1_audio_vad_param = {
+ .reset_ops = &meson_reset_toggle_ops,
+ .reset_num = 6,
+ .level_offset = 0x8,
+};
static const struct meson_reset_param meson_g12a_audio_param = {
.reset_ops = &meson_reset_toggle_ops,
@@ -40,6 +40,12 @@ static const struct meson_reset_param meson_sm1_audio_param = {
static const struct auxiliary_device_id meson_reset_aux_ids[] = {
{
+ .name = "a1-audio-clkc.rst-a1",
+ .driver_data = (kernel_ulong_t)&meson_a1_audio_param,
+ }, {
+ .name = "a1-audio-clkc.rst-a1-vad",
+ .driver_data = (kernel_ulong_t)&meson_a1_audio_vad_param,
+ }, {
.name = "axg-audio-clkc.rst-g12a",
.driver_data = (kernel_ulong_t)&meson_g12a_audio_param,
}, {
@@ -54,10 +60,13 @@ static int meson_reset_aux_probe(struct auxiliary_device *adev,
{
const struct meson_reset_param *param =
(const struct meson_reset_param *)(id->driver_data);
- struct meson_reset_adev *raux =
- to_meson_reset_adev(adev);
+ struct regmap *map;
+
+ map = dev_get_regmap(adev->dev.parent, NULL);
+ if (!map)
+ return -EINVAL;
- return meson_reset_controller_register(&adev->dev, raux->map, param);
+ return meson_reset_controller_register(&adev->dev, map, param);
}
static struct auxiliary_driver meson_reset_aux_driver = {
@@ -66,70 +75,6 @@ static struct auxiliary_driver meson_reset_aux_driver = {
};
module_auxiliary_driver(meson_reset_aux_driver);
-static void meson_rst_aux_release(struct device *dev)
-{
- struct auxiliary_device *adev = to_auxiliary_dev(dev);
- struct meson_reset_adev *raux =
- to_meson_reset_adev(adev);
-
- ida_free(&meson_rst_aux_ida, adev->id);
- kfree(raux);
-}
-
-static void meson_rst_aux_unregister_adev(void *_adev)
-{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
-}
-
-int devm_meson_rst_aux_register(struct device *dev,
- struct regmap *map,
- const char *adev_name)
-{
- struct meson_reset_adev *raux;
- struct auxiliary_device *adev;
- int ret;
-
- raux = kzalloc(sizeof(*raux), GFP_KERNEL);
- if (!raux)
- return -ENOMEM;
-
- ret = ida_alloc(&meson_rst_aux_ida, GFP_KERNEL);
- if (ret < 0)
- goto raux_free;
-
- raux->map = map;
-
- adev = &raux->adev;
- adev->id = ret;
- adev->name = adev_name;
- adev->dev.parent = dev;
- adev->dev.release = meson_rst_aux_release;
- device_set_of_node_from_dev(&adev->dev, dev);
-
- ret = auxiliary_device_init(adev);
- if (ret)
- goto ida_free;
-
- ret = __auxiliary_device_add(adev, dev->driver->name);
- if (ret) {
- auxiliary_device_uninit(adev);
- return ret;
- }
-
- return devm_add_action_or_reset(dev, meson_rst_aux_unregister_adev,
- adev);
-
-ida_free:
- ida_free(&meson_rst_aux_ida, adev->id);
-raux_free:
- kfree(raux);
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_meson_rst_aux_register);
-
MODULE_DESCRIPTION("Amlogic Meson Reset Auxiliary driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index aa5464be7053..6d3e75b33260 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -8,6 +8,7 @@
*/
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -72,14 +73,22 @@ static struct regmap *mchp_lan966x_syscon_to_regmap(struct device *dev,
struct device_node *syscon_np)
{
struct regmap_config regmap_config = mchp_lan966x_syscon_regmap_config;
- resource_size_t size;
+ struct resource res;
void __iomem *base;
+ int err;
+
+ err = of_address_to_resource(syscon_np, 0, &res);
+ if (err)
+ return ERR_PTR(err);
- base = devm_of_iomap(dev, syscon_np, 0, &size);
- if (IS_ERR(base))
- return ERR_CAST(base);
+ /* It is not possible to use devm_of_iomap because this resource is
+ * shared with other drivers.
+ */
+ base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!base)
+ return ERR_PTR(-ENOMEM);
- regmap_config.max_register = size - 4;
+ regmap_config.max_register = resource_size(&res) - 4;
return devm_regmap_init_mmio(dev, base, &regmap_config);
}
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 12d0535a874b..8a7f167e405e 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -176,6 +176,7 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
vdev->dev.parent = dev;
priv->vdev = vdev;
+ device_set_of_node_from_dev(&vdev->dev, dev);
error = platform_device_add(vdev);
if (error)
goto err_device_put;
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 712c06c02696..207b64c0a2fe 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -377,9 +377,9 @@ EXPORT_SYMBOL(rpmsg_get_mtu);
* this is used to make sure we're not creating rpmsg devices for channels
* that already exist.
*/
-static int rpmsg_device_match(struct device *dev, void *data)
+static int rpmsg_device_match(struct device *dev, const void *data)
{
- struct rpmsg_channel_info *chinfo = data;
+ const struct rpmsg_channel_info *chinfo = data;
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a60bcc791a48..0bbbf778ecfa 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1316,7 +1316,7 @@ config RTC_DRV_SC27XX
config RTC_DRV_SPEAR
tristate "SPEAR ST RTC"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
If you say Y here you will get support for the RTC found on
spear
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e31fa0ad127e..b88cd4fb295b 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -240,8 +240,7 @@ static struct rtc_device *rtc_allocate_device(void)
/* Init uie timer */
rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, rtc);
/* Init pie timer */
- hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rtc->pie_timer.function = rtc_pie_update_irq;
+ hrtimer_setup(&rtc->pie_timer, rtc_pie_update_irq, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rtc->pie_enabled = 0;
set_bit(RTC_FEATURE_ALARM, rtc->features);
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 5c39cf252392..a3e52a5a708f 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -308,7 +308,7 @@ static int pm80x_rtc_probe(struct platform_device *pdev)
/* remember whether this power up is caused by PMIC RTC or not */
info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return 0;
out_rtc:
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 814230d61842..964cd048fcdb 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -326,7 +326,7 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL);
#endif /* VRTC_CALIBRATION */
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return 0;
}
diff --git a/drivers/rtc/rtc-amlogic-a4.c b/drivers/rtc/rtc-amlogic-a4.c
index 2278b4c98a71..09d78c2cc691 100644
--- a/drivers/rtc/rtc-amlogic-a4.c
+++ b/drivers/rtc/rtc-amlogic-a4.c
@@ -361,7 +361,7 @@ static int aml_rtc_probe(struct platform_device *pdev)
"failed to get_enable rtc sys clk\n");
aml_rtc_init(rtc);
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
platform_set_drvdata(pdev, rtc);
rtc->rtc_dev = devm_rtc_allocate_device(dev);
@@ -391,7 +391,7 @@ static int aml_rtc_probe(struct platform_device *pdev)
return 0;
err_clk:
clk_disable_unprepare(rtc->sys_clk);
- device_init_wakeup(dev, 0);
+ device_init_wakeup(dev, false);
return ret;
}
@@ -426,7 +426,7 @@ static void aml_rtc_remove(struct platform_device *pdev)
struct aml_rtc_data *rtc = dev_get_drvdata(&pdev->dev);
clk_disable_unprepare(rtc->sys_clk);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
}
static const struct aml_rtc_config a5_rtc_config = {
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 569c1054d6b0..713fa0d077cd 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -527,7 +527,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
if (rtc->irq != -1)
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
else
clear_bit(RTC_FEATURE_ALARM, rtc->rtc_dev->features);
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
index 0f21af27f4cf..9682d6457b7f 100644
--- a/drivers/rtc/rtc-as3722.c
+++ b/drivers/rtc/rtc-as3722.c
@@ -187,7 +187,7 @@ static int as3722_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
as3722_rtc->rtc = devm_rtc_device_register(&pdev->dev, "as3722-rtc",
&as3722_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 9b3898b8de7c..f6b0102a843a 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -528,7 +528,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
* being wake-capable; if it didn't, do that here.
*/
if (!device_can_wakeup(&pdev->dev))
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
if (at91_rtc_config->has_correction)
rtc->ops = &sama5d4_rtc_ops;
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 15b21da2788f..38991cca5930 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -353,7 +353,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
/* platform setup code should have handled this; sigh */
if (!device_can_wakeup(&pdev->dev))
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, rtc);
diff --git a/drivers/rtc/rtc-cadence.c b/drivers/rtc/rtc-cadence.c
index bf2a9a1fdea7..8634eea799ab 100644
--- a/drivers/rtc/rtc-cadence.c
+++ b/drivers/rtc/rtc-cadence.c
@@ -359,7 +359,7 @@ static void cdns_rtc_remove(struct platform_device *pdev)
struct cdns_rtc *crtc = platform_get_drvdata(pdev);
cdns_rtc_alarm_irq_enable(&pdev->dev, 0);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
clk_disable_unprepare(crtc->pclk);
clk_disable_unprepare(crtc->ref_clk);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 78f2ce12c75a..8172869bd3d7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -151,11 +151,6 @@ static inline int hpet_set_periodic_freq(unsigned long freq)
return 0;
}
-static inline int hpet_rtc_dropped_irq(void)
-{
- return 0;
-}
-
static inline int hpet_rtc_timer_init(void)
{
return 0;
@@ -864,7 +859,7 @@ static void acpi_cmos_wake_setup(struct device *dev)
dev_info(dev, "RTC can wake from S4\n");
/* RTC always wakes from S1/S2/S3, and often S4/STD */
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
}
static void cmos_check_acpi_rtc_status(struct device *dev,
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index afc8fcba8f88..568a89e79c11 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -295,7 +295,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
}
disable_irq(rtc->update_irq);
- err = device_init_wakeup(dev, 1);
+ err = device_init_wakeup(dev, true);
if (err) {
dev_err(dev, "wakeup initialization failed (%d)\n", err);
/* ignore error and continue without wakeup support */
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 60a48c3ba3ca..865c2e82c7a5 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -337,7 +337,7 @@ static int cros_ec_rtc_probe(struct platform_device *pdev)
return ret;
}
- ret = device_init_wakeup(&pdev->dev, 1);
+ ret = device_init_wakeup(&pdev->dev, true);
if (ret) {
dev_err(&pdev->dev, "failed to initialize wakeup\n");
return ret;
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
index 844168fcae1e..05adec6b77bf 100644
--- a/drivers/rtc/rtc-da9055.c
+++ b/drivers/rtc/rtc-da9055.c
@@ -288,7 +288,7 @@ static int da9055_rtc_probe(struct platform_device *pdev)
if (ret & DA9055_RTC_ALM_EN)
rtc->alarm_enable = 1;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&da9055_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index dd37b055693c..19c09c418746 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -508,7 +508,7 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
return ret;
if (ds3232->irq > 0)
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
ds3232_hwmon_register(dev, name);
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 7b82e4a14b7a..f71a6bb77b2a 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -830,7 +830,7 @@ static int isl1208_setup_irq(struct i2c_client *client, int irq)
isl1208_driver.driver.name,
client);
if (!rc) {
- device_init_wakeup(&client->dev, 1);
+ device_init_wakeup(&client->dev, true);
enable_irq_wake(irq);
} else {
dev_err(&client->dev,
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index bafa7d1b9b88..44bba356268c 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -367,7 +367,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
ret = dev_pm_set_wake_irq(dev, irq);
if (ret)
diff --git a/drivers/rtc/rtc-loongson.c b/drivers/rtc/rtc-loongson.c
index 8d713e563d7c..97e5625c064c 100644
--- a/drivers/rtc/rtc-loongson.c
+++ b/drivers/rtc/rtc-loongson.c
@@ -114,6 +114,13 @@ static irqreturn_t loongson_rtc_isr(int irq, void *id)
struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
rtc_update_irq(priv->rtcdev, 1, RTC_AF | RTC_IRQF);
+
+ /*
+ * The TOY_MATCH0_REG should be cleared 0 here,
+ * otherwise the interrupt cannot be cleared.
+ */
+ regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
+
return IRQ_HANDLED;
}
@@ -131,11 +138,7 @@ static u32 loongson_rtc_handler(void *id)
writel(RTC_STS, priv->pm_base + PM1_STS_REG);
spin_unlock(&priv->lock);
- /*
- * The TOY_MATCH0_REG should be cleared 0 here,
- * otherwise the interrupt cannot be cleared.
- */
- return regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
+ return ACPI_INTERRUPT_HANDLED;
}
static int loongson_rtc_set_enabled(struct device *dev)
@@ -329,7 +332,7 @@ static int loongson_rtc_probe(struct platform_device *pdev)
alarm_irq);
priv->pm_base = regs - priv->config->pm_offset;
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
if (has_acpi_companion(dev))
acpi_install_fixed_event_handler(ACPI_EVENT_RTC,
@@ -360,7 +363,7 @@ static void loongson_rtc_remove(struct platform_device *pdev)
acpi_remove_fixed_event_handler(ACPI_EVENT_RTC,
loongson_rtc_handler);
- device_init_wakeup(dev, 0);
+ device_init_wakeup(dev, false);
loongson_rtc_alarm_irq_enable(dev, 0);
}
diff --git a/drivers/rtc/rtc-lp8788.c b/drivers/rtc/rtc-lp8788.c
index c0b8fbce1082..0793d70507f7 100644
--- a/drivers/rtc/rtc-lp8788.c
+++ b/drivers/rtc/rtc-lp8788.c
@@ -293,7 +293,7 @@ static int lp8788_rtc_probe(struct platform_device *pdev)
rtc->alarm = lp->pdata ? lp->pdata->alarm_sel : DEFAULT_ALARM_SEL;
platform_set_drvdata(pdev, rtc);
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
rtc->rdev = devm_rtc_device_register(dev, "lp8788_rtc",
&lp8788_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 76ad7031a13d..74280bffe1b0 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -257,7 +257,7 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Can't request interrupt.\n");
rtc->irq = -1;
} else {
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
}
}
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index a8f4b645c09d..7bb044d2ac25 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -770,7 +770,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
goto err_rtc;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, id->name,
&max77686_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 64bb8ac6ef62..6ce8afbeac68 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -270,7 +270,7 @@ static int max8925_rtc_probe(struct platform_device *pdev)
/* XXX - isn't this redundant? */
platform_set_drvdata(pdev, info);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max8925-rtc",
&max8925_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
index 20e50d9fdf88..e7618d715bd8 100644
--- a/drivers/rtc/rtc-max8997.c
+++ b/drivers/rtc/rtc-max8997.c
@@ -473,7 +473,7 @@ static int max8997_rtc_probe(struct platform_device *pdev)
max8997_rtc_enable_wtsr(info, true);
max8997_rtc_enable_smpl(info, true);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max8997-rtc",
&max8997_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index 648fa362ec44..5849729f7d01 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -74,7 +74,7 @@ static int meson_vrtc_probe(struct platform_device *pdev)
if (IS_ERR(vrtc->io_alarm))
return PTR_ERR(vrtc->io_alarm);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, vrtc);
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 600328131603..b90f8337a7e6 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -303,7 +303,7 @@ static int mpc5121_rtc_probe(struct platform_device *op)
return PTR_ERR(rtc->regs);
}
- device_init_wakeup(&op->dev, 1);
+ device_init_wakeup(&op->dev, true);
platform_set_drvdata(op, rtc);
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 152699219a2b..6979d225a78e 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -286,7 +286,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rtc->rtc_dev->ops = &mtk_rtc_ops;
rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index 51029c536244..c27ad626d09f 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -264,7 +264,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
}
if (pdata->irq >= 0)
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
else
clear_bit(RTC_FEATURE_ALARM, pdata->rtc->features);
@@ -287,7 +287,7 @@ static void __exit mv_rtc_remove(struct platform_device *pdev)
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq >= 0)
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
if (!IS_ERR(pdata->clk))
clk_disable_unprepare(pdata->clk);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index dbb935dbbd8a..608db97d450c 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -377,7 +377,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
}
if (pdata->irq >= 0) {
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, pdata->irq);
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index 13c041bb79f1..570f27af4732 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -302,7 +302,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
if (pdata->irq < 0)
return pdata->irq;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, pdata->irq);
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c123778e2d9b..0f90065e352c 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -920,7 +920,7 @@ static void omap_rtc_remove(struct platform_device *pdev)
omap_rtc_power_off_rtc = NULL;
}
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
if (!IS_ERR(rtc->clk))
clk_disable_unprepare(rtc->clk);
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 7256a88b490c..aecada6bcf8b 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -287,7 +287,7 @@ static int palmas_rtc_probe(struct platform_device *pdev)
palmas_rtc->irq = platform_get_irq(pdev, 0);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
palmas_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&palmas_rtc_ops, THIS_MODULE);
if (IS_ERR(palmas_rtc->rtc)) {
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 9c04c4e1a49c..31c7dca8f469 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -20,6 +20,7 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/bcd.h>
+#include <linux/bitfield.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -48,6 +49,7 @@
#define PCF2127_BIT_CTRL3_BLF BIT(2)
#define PCF2127_BIT_CTRL3_BF BIT(3)
#define PCF2127_BIT_CTRL3_BTSE BIT(4)
+#define PCF2127_CTRL3_PM GENMASK(7, 5)
/* Time and date registers */
#define PCF2127_REG_TIME_BASE 0x03
#define PCF2127_BIT_SC_OSF BIT(7)
@@ -331,6 +333,84 @@ static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
return 0;
}
+static int pcf2127_param_get(struct device *dev, struct rtc_param *param)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ switch (param->param) {
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(PCF2127_CTRL3_PM, value);
+
+ if (value < 0x3)
+ param->uvalue = RTC_BSM_LEVEL;
+ else if (value < 0x6)
+ param->uvalue = RTC_BSM_DIRECT;
+ else
+ param->uvalue = RTC_BSM_DISABLED;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pcf2127_param_set(struct device *dev, struct rtc_param *param)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ u8 mode = 0;
+ u32 value;
+ int ret;
+
+ switch (param->param) {
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(PCF2127_CTRL3_PM, value);
+
+ if (value > 5)
+ value -= 5;
+ else if (value > 2)
+ value -= 3;
+
+ switch (param->uvalue) {
+ case RTC_BSM_LEVEL:
+ break;
+ case RTC_BSM_DIRECT:
+ mode = 3;
+ break;
+ case RTC_BSM_DISABLED:
+ if (value == 0)
+ value = 1;
+ mode = 5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL3,
+ PCF2127_CTRL3_PM,
+ FIELD_PREP(PCF2127_CTRL3_PM, mode + value));
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int pcf2127_rtc_ioctl(struct device *dev,
unsigned int cmd, unsigned long arg)
{
@@ -741,6 +821,8 @@ static const struct rtc_class_ops pcf2127_rtc_ops = {
.read_alarm = pcf2127_rtc_read_alarm,
.set_alarm = pcf2127_rtc_set_alarm,
.alarm_irq_enable = pcf2127_rtc_alarm_irq_enable,
+ .param_get = pcf2127_param_get,
+ .param_set = pcf2127_param_set,
};
/* sysfs interface */
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index fdbc07f14036..905986c61655 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -322,7 +322,16 @@ static const struct rtc_class_ops pcf85063_rtc_ops = {
static int pcf85063_nvmem_read(void *priv, unsigned int offset,
void *val, size_t bytes)
{
- return regmap_read(priv, PCF85063_REG_RAM, val);
+ unsigned int tmp;
+ int ret;
+
+ ret = regmap_read(priv, PCF85063_REG_RAM, &tmp);
+ if (ret < 0)
+ return ret;
+
+ *(u8 *)val = tmp;
+
+ return 0;
}
static int pcf85063_nvmem_write(void *priv, unsigned int offset,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index bed3c27e665f..2812da2c50c5 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -330,7 +330,7 @@ static int pic32_rtc_probe(struct platform_device *pdev)
pic32_rtc_enable(pdata, 1);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
pdata->rtc->ops = &pic32_rtcops;
pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 2f32187ecc8d..b2518aea4218 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -503,7 +503,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc_dd);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rtc_dd->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc_dd->rtc))
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 34d8545c8e15..62ee6b8f9bcd 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -360,7 +360,7 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
return 0;
}
diff --git a/drivers/rtc/rtc-rc5t583.c b/drivers/rtc/rtc-rc5t583.c
index eecb49bab56a..8ba9cda74acf 100644
--- a/drivers/rtc/rtc-rc5t583.c
+++ b/drivers/rtc/rtc-rc5t583.c
@@ -245,7 +245,7 @@ static int rc5t583_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "IRQ is not free.\n");
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
ricoh_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&rc5t583_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
index 711f62eecd79..74d169102074 100644
--- a/drivers/rtc/rtc-rc5t619.c
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -414,7 +414,7 @@ static int rc5t619_rtc_probe(struct platform_device *pdev)
} else {
/* enable wake */
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
enable_irq_wake(rtc->irq);
}
} else {
diff --git a/drivers/rtc/rtc-renesas-rtca3.c b/drivers/rtc/rtc-renesas-rtca3.c
index d127933bfc8a..a056291d3887 100644
--- a/drivers/rtc/rtc-renesas-rtca3.c
+++ b/drivers/rtc/rtc-renesas-rtca3.c
@@ -768,7 +768,7 @@ static int rtca3_probe(struct platform_device *pdev)
if (ret)
return ret;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
priv->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(priv->rtc_dev))
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index 2d9bcb3ce1e3..59b8e9a30fe6 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -418,7 +418,7 @@ static int rk808_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rk808_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rk808_rtc->rtc))
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index c0ac3bdb2f42..58c957eb753d 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -456,7 +456,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "s3c2410_rtc: RTCCON=%02x\n",
readw(info->base + S3C2410_RTCCON));
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
info->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc)) {
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index dad294a0ce2a..36acca5b2639 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -729,7 +729,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
info->irq, ret);
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
}
return devm_rtc_register_device(info->rtc_dev);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 13799b1abca1..1ad93648d69c 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -292,7 +292,7 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return sa1100_rtc_init(pdev, info);
}
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
index ce7a2ddbbc16..2b83561d4d28 100644
--- a/drivers/rtc/rtc-sc27xx.c
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -613,14 +613,14 @@ static int sprd_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rtc->rtc->ops = &sprd_rtc_ops;
rtc->rtc->range_min = 0;
rtc->rtc->range_max = 5662310399LL;
ret = devm_rtc_register_device(rtc->rtc);
if (ret) {
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
return ret;
}
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index a5df521876ba..9ea40f40188f 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -611,7 +611,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
if (ret)
goto err_unmap;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return 0;
err_unmap:
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 26eed927f8b3..959acff8faff 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -395,7 +395,7 @@ static int spear_rtc_probe(struct platform_device *pdev)
goto err_disable_clock;
if (!device_can_wakeup(&pdev->dev))
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return 0;
@@ -411,7 +411,7 @@ static void spear_rtc_remove(struct platform_device *pdev)
spear_rtc_disable_interrupt(config);
clk_disable_unprepare(config->clk);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 9f1a019ec8af..a0564d443569 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -1074,26 +1074,18 @@ static int stm32_rtc_probe(struct platform_device *pdev)
regs = &rtc->data->regs;
if (rtc->data->need_dbp) {
- rtc->dbp = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "st,syscfg");
+ unsigned int args[2];
+
+ rtc->dbp = syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
+ "st,syscfg",
+ 2, args);
if (IS_ERR(rtc->dbp)) {
dev_err(&pdev->dev, "no st,syscfg\n");
return PTR_ERR(rtc->dbp);
}
- ret = of_property_read_u32_index(pdev->dev.of_node, "st,syscfg",
- 1, &rtc->dbp_reg);
- if (ret) {
- dev_err(&pdev->dev, "can't read DBP register offset\n");
- return ret;
- }
-
- ret = of_property_read_u32_index(pdev->dev.of_node, "st,syscfg",
- 2, &rtc->dbp_mask);
- if (ret) {
- dev_err(&pdev->dev, "can't read DBP register mask\n");
- return ret;
- }
+ rtc->dbp_reg = args[0];
+ rtc->dbp_mask = args[1];
}
if (!rtc->data->has_pclk) {
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index e681c1745866..e5e6013d080e 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -826,7 +826,7 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
clk_prepare_enable(chip->losc);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
chip->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(chip->rtc))
diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
index 9b1ce0e8ba27..519a06e728d6 100644
--- a/drivers/rtc/rtc-sunplus.c
+++ b/drivers/rtc/rtc-sunplus.c
@@ -269,7 +269,7 @@ static int sp_rtc_probe(struct platform_device *plat_dev)
if (ret)
goto free_reset_assert;
- device_init_wakeup(&plat_dev->dev, 1);
+ device_init_wakeup(&plat_dev->dev, true);
dev_set_drvdata(&plat_dev->dev, sp_rtc);
sp_rtc->rtc = devm_rtc_allocate_device(&plat_dev->dev);
@@ -307,7 +307,7 @@ static void sp_rtc_remove(struct platform_device *plat_dev)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev);
- device_init_wakeup(&plat_dev->dev, 0);
+ device_init_wakeup(&plat_dev->dev, false);
reset_control_assert(sp_rtc->rstc);
clk_disable_unprepare(sp_rtc->rtcclk);
}
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 79a3102c8354..46788db89953 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -319,7 +319,7 @@ static int tegra_rtc_probe(struct platform_device *pdev)
writel(0xffffffff, info->base + TEGRA_RTC_REG_INTR_STATUS);
writel(0, info->base + TEGRA_RTC_REG_INTR_MASK);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
ret = devm_request_irq(&pdev->dev, info->irq, tegra_rtc_irq_handler,
IRQF_TRIGGER_HIGH, dev_name(&pdev->dev),
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 7e0d8fb26465..a68b8c884102 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -132,7 +132,7 @@ static int test_probe(struct platform_device *plat_dev)
break;
default:
rtd->rtc->ops = &test_rtc_ops;
- device_init_wakeup(&plat_dev->dev, 1);
+ device_init_wakeup(&plat_dev->dev, true);
}
timer_setup(&rtd->alarm, test_rtc_alarm_handler, 0);
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index e796729fc817..54c8429b16bf 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -241,7 +241,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, rtc);
rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 2ea1bbfbbc2a..284aa2f0392b 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -418,7 +418,7 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
tps_rtc->irq = irq;
if (irq != -1) {
if (device_property_present(tps65910->dev, "wakeup-source"))
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
else
device_set_wakeup_capable(&pdev->dev, 1);
} else {
diff --git a/drivers/rtc/rtc-tps6594.c b/drivers/rtc/rtc-tps6594.c
index e69667634137..7c6246e3f029 100644
--- a/drivers/rtc/rtc-tps6594.c
+++ b/drivers/rtc/rtc-tps6594.c
@@ -37,7 +37,7 @@
#define MAX_OFFSET (277774)
// Number of ticks per hour
-#define TICKS_PER_HOUR (32768 * 3600)
+#define TICKS_PER_HOUR (32768 * 3600LL)
// Multiplier for ppb conversions
#define PPB_MULT NANO
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 794429182b34..e6106e67e1f4 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -572,7 +572,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, twl_rtc);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
twl_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&twl_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 640833e21057..218316be942a 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -420,7 +420,7 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
if (ret & WM831X_RTC_ALM_ENA)
wm831x_rtc->alarm_enabled = 1;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
wm831x_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(wm831x_rtc->rtc))
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 6797eb4d2e49..3bd60d067a5e 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -420,7 +420,7 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
}
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
wm_rtc->rtc = devm_rtc_device_register(&pdev->dev, "wm8350",
&wm8350_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 0813ea1a03c2..6660b664e8dd 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -174,7 +174,7 @@ static int xgene_rtc_probe(struct platform_device *pdev)
/* Turn on the clock and the crystal */
writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
- ret = device_init_wakeup(&pdev->dev, 1);
+ ret = device_init_wakeup(&pdev->dev, true);
if (ret) {
clk_disable_unprepare(pdata->clk);
return ret;
@@ -197,7 +197,7 @@ static void xgene_rtc_remove(struct platform_device *pdev)
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
xgene_rtc_alarm_irq_enable(&pdev->dev, 0);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
clk_disable_unprepare(pdata->clk);
}
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index af1abb69d1e3..f39102b66eac 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -318,8 +318,8 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
return ret;
}
- /* Getting the rtc_clk info */
- xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc_clk");
+ /* Getting the rtc info */
+ xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc");
if (IS_ERR(xrtcdev->rtc_clk)) {
if (PTR_ERR(xrtcdev->rtc_clk) != -EPROBE_DEFER)
dev_warn(&pdev->dev, "Device clock not found.\n");
@@ -337,7 +337,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
xlnx_init_rtc(xrtcdev);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return devm_rtc_register_device(xrtcdev->rtc);
}
@@ -345,7 +345,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
static void xlnx_rtc_remove(struct platform_device *pdev)
{
xlnx_rtc_alarm_irq_enable(&pdev->dev, 0);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
}
static int __maybe_unused xlnx_rtc_suspend(struct device *dev)
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 6da47a65af61..28e92fad0ca1 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -56,7 +56,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
block->tag_set.nr_hw_queues = nr_hw_queues;
block->tag_set.queue_depth = queue_depth;
- block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
block->tag_set.numa_node = NUMA_NO_NODE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
if (rc)
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 3fcfe029db1b..91bbe9d2e5ac 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -461,7 +461,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
bdev->tag_set.cmd_size = sizeof(blk_status_t);
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
- bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
bdev->tag_set.numa_node = NUMA_NO_NODE;
ret = blk_mq_alloc_tag_set(&bdev->tag_set);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index fbffd451031f..45bd001206a2 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -245,7 +245,6 @@ static void sclp_request_timeout(bool force_restart);
static void sclp_process_queue(void);
static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate);
-static int sclp_init(void);
static void
__sclp_queue_read_req(void)
@@ -1251,8 +1250,7 @@ static struct platform_driver sclp_pdrv = {
/* Initialize SCLP driver. Return zero if driver is operational, non-zero
* otherwise. */
-static int
-sclp_init(void)
+int sclp_init(void)
{
unsigned long flags;
int rc = 0;
@@ -1305,13 +1303,7 @@ fail_unlock:
static __init int sclp_initcall(void)
{
- int rc;
-
- rc = platform_driver_register(&sclp_pdrv);
- if (rc)
- return rc;
-
- return sclp_init();
+ return platform_driver_register(&sclp_pdrv);
}
arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6c91e422927f..07a6e8a7f05a 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -85,13 +85,6 @@ typedef unsigned int sclp_cmdw_t;
typedef u64 sccb_mask_t;
-struct sccb_header {
- u16 length;
- u8 function_code;
- u8 control_mask[3];
- u16 response_code;
-} __attribute__((packed));
-
struct init_sccb {
struct sccb_header header;
u16 _reserved;
@@ -196,7 +189,9 @@ struct read_info_sccb {
u8 byte_134; /* 134 */
u8 cpudirq; /* 135 */
u16 cbl; /* 136-137 */
- u8 _pad_138[EXT_SCCB_READ_SCP - 138];
+ u8 byte_138; /* 138 */
+ u8 byte_139; /* 139 */
+ u8 _pad_140[EXT_SCCB_READ_SCP - 140];
} __packed __aligned(PAGE_SIZE);
struct read_storage_sccb {
@@ -238,13 +233,6 @@ struct gds_vector {
u16 gds_id;
} __attribute__((packed));
-struct evbuf_header {
- u16 length;
- u8 type;
- u8 flags;
- u16 _reserved;
-} __attribute__((packed));
-
struct sclp_req {
struct list_head list; /* list_head for request queueing. */
sclp_cmdw_t command; /* sclp command to execute */
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index f56ea9b60e08..ae5d28987177 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -128,7 +128,7 @@ out:
}
static ssize_t sysfs_ofb_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
int rc;
@@ -142,7 +142,7 @@ static const struct bin_attribute ofb_bin_attr = {
.name = "event_data",
.mode = S_IWUSR,
},
- .write = sysfs_ofb_data_write,
+ .write_new = sysfs_ofb_data_write,
};
#endif
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 29156455970e..d9d6edaf8de8 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -55,6 +55,7 @@ static void __init sclp_early_facilities_detect(void)
if (sccb->fac91 & 0x40)
get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_GUEST;
sclp.has_diag204_bif = !!(sccb->fac98 & 0x80);
+ sclp.has_diag310 = !!(sccb->fac91 & 0x80);
if (sccb->cpuoff > 134) {
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
sclp.has_diag320 = !!(sccb->byte_134 & 0x04);
@@ -64,6 +65,8 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_sipl = !!(sccb->cbl & 0x4000);
sclp.has_sipl_eckd = !!(sccb->cbl & 0x2000);
}
+ if (sccb->cpuoff > 139)
+ sclp.has_diag324 = !!(sccb->byte_139 & 0x80);
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index c3466a8c56bb..56400886f7fc 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -24,30 +24,11 @@
#define SCLP_ATYPE_PCI 2
-#define SCLP_ERRNOTIFY_AQ_RESET 0
-#define SCLP_ERRNOTIFY_AQ_REPAIR 1
-#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
-#define SCLP_ERRNOTIFY_AQ_OPTICS_DATA 3
-
static DEFINE_MUTEX(sclp_pci_mutex);
static struct sclp_register sclp_pci_event = {
.send_mask = EVTYP_ERRNOTIFY_MASK,
};
-struct err_notify_evbuf {
- struct evbuf_header header;
- u8 action;
- u8 atype;
- u32 fh;
- u32 fid;
- u8 data[];
-} __packed;
-
-struct err_notify_sccb {
- struct sccb_header header;
- struct err_notify_evbuf evbuf;
-} __packed;
-
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
index c2dc9aadb7d2..8524c14affed 100644
--- a/drivers/s390/char/sclp_sd.c
+++ b/drivers/s390/char/sclp_sd.c
@@ -476,7 +476,7 @@ static struct kobj_type sclp_sd_file_ktype = {
* on EOF.
*/
static ssize_t data_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buffer,
+ const struct bin_attribute *attr, char *buffer,
loff_t off, size_t size)
{
struct sclp_sd_file *sd_file = to_sd_file(kobj);
@@ -539,7 +539,7 @@ static __init struct sclp_sd_file *sclp_sd_file_create(const char *name, u8 di)
sysfs_bin_attr_init(&sd_file->data_attr);
sd_file->data_attr.attr.name = "data";
sd_file->data_attr.attr.mode = 0444;
- sd_file->data_attr.read = data_read;
+ sd_file->data_attr.read_new = data_read;
rc = sysfs_create_bin_file(&sd_file->kobj, &sd_file->data_attr);
if (rc) {
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 3dd50ac9c5b0..b2d93a6e36c4 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -123,7 +123,7 @@ static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
*/
static struct vmlogrdr_priv_t sys_ser[] = {
- { .system_service = "*LOGREC ",
+ { .system_service = { '*', 'L', 'O', 'G', 'R', 'E', 'C', ' ' },
.internal_name = "logrec",
.recording_name = "EREP",
.minor_num = 0,
@@ -132,7 +132,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
.autorecording = 1,
.autopurge = 1,
},
- { .system_service = "*ACCOUNT",
+ { .system_service = { '*', 'A', 'C', 'C', 'O', 'U', 'N', 'T' },
.internal_name = "account",
.recording_name = "ACCOUNT",
.minor_num = 1,
@@ -141,7 +141,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
.autorecording = 1,
.autopurge = 1,
},
- { .system_service = "*SYMPTOM",
+ { .system_service = { '*', 'S', 'Y', 'M', 'P', 'T', 'O', 'M' },
.internal_name = "symptom",
.recording_name = "SYMPTOM",
.minor_num = 2,
@@ -356,7 +356,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
if (connect_rc) {
pr_err("vmlogrdr: iucv connection to %s "
"failed with rc %i \n",
- logptr->system_service, connect_rc);
+ logptr->internal_name, connect_rc);
goto out_path;
}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index cba2d048a96b..4f01b1929240 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -128,7 +128,7 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
* Channel measurement related functions
*/
static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
@@ -142,11 +142,11 @@ static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
sizeof(chp->cmg_chars));
}
-static BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars));
+static const BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars));
static ssize_t measurement_chars_full_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
@@ -196,22 +196,22 @@ static ssize_t chp_measurement_copy_block(void *buf, loff_t off, size_t count,
}
static ssize_t measurement_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return chp_measurement_copy_block(buf, off, count, kobj, false);
}
-static BIN_ATTR_ADMIN_RO(measurement, sizeof(struct cmg_entry));
+static const BIN_ATTR_ADMIN_RO(measurement, sizeof(struct cmg_entry));
static ssize_t ext_measurement_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return chp_measurement_copy_block(buf, off, count, kobj, true);
}
-static BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry));
+static const BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry));
-static struct bin_attribute *measurement_attrs[] = {
+static const struct bin_attribute *measurement_attrs[] = {
&bin_attr_measurement_chars,
&bin_attr_measurement_chars_full,
&bin_attr_measurement,
@@ -435,7 +435,7 @@ static ssize_t speed_bps_show(struct device *dev,
static DEVICE_ATTR_RO(speed_bps);
static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
@@ -448,10 +448,10 @@ static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
return rc;
}
-static BIN_ATTR_RO(util_string,
- sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
+static const BIN_ATTR_RO(util_string,
+ sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
-static struct bin_attribute *chp_bin_attrs[] = {
+static const struct bin_attribute *const chp_bin_attrs[] = {
&bin_attr_util_string,
NULL,
};
@@ -468,9 +468,9 @@ static struct attribute *chp_attrs[] = {
&dev_attr_speed_bps.attr,
NULL,
};
-static struct attribute_group chp_attr_group = {
+static const struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
- .bin_attrs = chp_bin_attrs,
+ .bin_attrs_new = chp_bin_attrs,
};
static const struct attribute_group *chp_attr_groups[] = {
&chp_attr_group,
@@ -695,7 +695,8 @@ static int info_update(void)
if (time_after(jiffies, chp_info_expires)) {
/* Data is too old, update. */
rc = sclp_chp_read_info(&chp_info);
- chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
+ if (!rc)
+ chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL;
}
mutex_unlock(&info_lock);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index acd6790dba4d..61c07b4a0fe8 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -445,7 +445,7 @@ struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
- return cdev->private->dma_area->senseid.ciw + ciw_cnt;
+ return &cdev->private->dma_area->senseid.ciw[ciw_cnt];
return NULL;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 641f0dbb65a9..4bd4c00c9c0c 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -210,11 +210,10 @@ struct qdio_q {
qdio_handler_t (*handler);
struct qdio_irq *irq_ptr;
+
+ /* memory page (PAGE_SIZE) used to place slib and sl on */
+ void *sl_page;
struct sl *sl;
- /*
- * A page is allocated under this pointer and used for slib and sl.
- * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
- */
struct slib *slib;
} __attribute__ ((aligned(256)));
@@ -266,7 +265,7 @@ struct qdio_irq {
#define is_thinint_irq(irq) \
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
- css_general_characteristics.aif_osa)
+ css_general_characteristics.aif_qdio)
#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 99c0fd23022d..ea09aadaae4e 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -83,7 +83,7 @@ static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
for (i = 0; i < count; i++) {
q = queues[i];
- free_page((unsigned long) q->slib);
+ free_page((unsigned long)q->sl_page);
kmem_cache_free(qdio_q_cache, q);
}
}
@@ -109,12 +109,16 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
- q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
- if (!q->slib) {
+ q->sl_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!q->sl_page) {
kmem_cache_free(qdio_q_cache, q);
__qdio_free_queues(irq_ptr_qs, i);
return -ENOMEM;
}
+ q->slib = q->sl_page;
+ /* As per architecture: SLIB is 2K bytes long, and SL 1K. */
+ q->sl = (struct sl *)(q->slib + 1);
+
irq_ptr_qs[i] = q;
}
return 0;
@@ -142,11 +146,15 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
- struct slib *slib = q->slib;
+ struct slib *const slib = q->slib;
+ void *const sl_page = q->sl_page;
+ struct sl *const sl = q->sl;
/* queue must be cleared for qdio_establish */
memset(q, 0, sizeof(*q));
- memset(slib, 0, PAGE_SIZE);
+ memset(sl_page, 0, PAGE_SIZE);
+ q->sl_page = sl_page;
+ q->sl = sl;
q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
@@ -161,7 +169,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
int j;
DBF_HEX(&q, sizeof(void *));
- q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
@@ -423,7 +430,7 @@ int __init qdio_setup_init(void)
/* Check for OSA/FCP thin interrupts (bit 67). */
DBF_EVENT("thinint:%1d",
- (css_general_characteristics.aif_osa) ? 1 : 0);
+ (css_general_characteristics.aif_qdio) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 26e1ea1940ec..62feb2c639d5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -2326,8 +2326,7 @@ static inline int __init ap_async_init(void)
*/
if (MACHINE_IS_VM)
poll_high_timeout = 1500000;
- hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ap_poll_timer.function = ap_poll_timeout;
+ hrtimer_setup(&ap_poll_timer, ap_poll_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
queue_work(system_long_wq, &ap_scan_bus_work);
diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c
index a4eb45803f5e..57edc97bafd2 100644
--- a/drivers/s390/crypto/pkey_sysfs.c
+++ b/drivers/s390/crypto/pkey_sysfs.c
@@ -184,7 +184,7 @@ static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf,
static ssize_t protkey_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -194,7 +194,7 @@ static ssize_t protkey_aes_128_read(struct file *filp,
static ssize_t protkey_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -204,7 +204,7 @@ static ssize_t protkey_aes_192_read(struct file *filp,
static ssize_t protkey_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -214,7 +214,7 @@ static ssize_t protkey_aes_256_read(struct file *filp,
static ssize_t protkey_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -224,7 +224,7 @@ static ssize_t protkey_aes_128_xts_read(struct file *filp,
static ssize_t protkey_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -234,7 +234,7 @@ static ssize_t protkey_aes_256_xts_read(struct file *filp,
static ssize_t protkey_aes_xts_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -244,7 +244,7 @@ static ssize_t protkey_aes_xts_128_read(struct file *filp,
static ssize_t protkey_aes_xts_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -254,7 +254,7 @@ static ssize_t protkey_aes_xts_256_read(struct file *filp,
static ssize_t protkey_hmac_512_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -264,7 +264,7 @@ static ssize_t protkey_hmac_512_read(struct file *filp,
static ssize_t protkey_hmac_1024_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -272,17 +272,17 @@ static ssize_t protkey_hmac_1024_read(struct file *filp,
buf, off, count);
}
-static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
-static BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
-static BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
-static BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
-
-static struct bin_attribute *protkey_attrs[] = {
+static const BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
+static const BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
+static const BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
+static const BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
+
+static const struct bin_attribute *const protkey_attrs[] = {
&bin_attr_protkey_aes_128,
&bin_attr_protkey_aes_192,
&bin_attr_protkey_aes_256,
@@ -295,9 +295,9 @@ static struct bin_attribute *protkey_attrs[] = {
NULL
};
-static struct attribute_group protkey_attr_group = {
- .name = "protkey",
- .bin_attrs = protkey_attrs,
+static const struct attribute_group protkey_attr_group = {
+ .name = "protkey",
+ .bin_attrs_new = protkey_attrs,
};
/*
@@ -341,7 +341,7 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
static ssize_t ccadata_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -351,7 +351,7 @@ static ssize_t ccadata_aes_128_read(struct file *filp,
static ssize_t ccadata_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -361,7 +361,7 @@ static ssize_t ccadata_aes_192_read(struct file *filp,
static ssize_t ccadata_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -371,7 +371,7 @@ static ssize_t ccadata_aes_256_read(struct file *filp,
static ssize_t ccadata_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -381,7 +381,7 @@ static ssize_t ccadata_aes_128_xts_read(struct file *filp,
static ssize_t ccadata_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -389,13 +389,13 @@ static ssize_t ccadata_aes_256_xts_read(struct file *filp,
off, count);
}
-static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
-static struct bin_attribute *ccadata_attrs[] = {
+static const struct bin_attribute *const ccadata_attrs[] = {
&bin_attr_ccadata_aes_128,
&bin_attr_ccadata_aes_192,
&bin_attr_ccadata_aes_256,
@@ -404,9 +404,9 @@ static struct bin_attribute *ccadata_attrs[] = {
NULL
};
-static struct attribute_group ccadata_attr_group = {
- .name = "ccadata",
- .bin_attrs = ccadata_attrs,
+static const struct attribute_group ccadata_attr_group = {
+ .name = "ccadata",
+ .bin_attrs_new = ccadata_attrs,
};
#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
@@ -455,7 +455,7 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
static ssize_t ccacipher_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -465,7 +465,7 @@ static ssize_t ccacipher_aes_128_read(struct file *filp,
static ssize_t ccacipher_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -475,7 +475,7 @@ static ssize_t ccacipher_aes_192_read(struct file *filp,
static ssize_t ccacipher_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -485,7 +485,7 @@ static ssize_t ccacipher_aes_256_read(struct file *filp,
static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -495,7 +495,7 @@ static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -503,13 +503,13 @@ static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
off, count);
}
-static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
-static struct bin_attribute *ccacipher_attrs[] = {
+static const struct bin_attribute *const ccacipher_attrs[] = {
&bin_attr_ccacipher_aes_128,
&bin_attr_ccacipher_aes_192,
&bin_attr_ccacipher_aes_256,
@@ -518,9 +518,9 @@ static struct bin_attribute *ccacipher_attrs[] = {
NULL
};
-static struct attribute_group ccacipher_attr_group = {
- .name = "ccacipher",
- .bin_attrs = ccacipher_attrs,
+static const struct attribute_group ccacipher_attr_group = {
+ .name = "ccacipher",
+ .bin_attrs_new = ccacipher_attrs,
};
/*
@@ -570,7 +570,7 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
static ssize_t ep11_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -580,7 +580,7 @@ static ssize_t ep11_aes_128_read(struct file *filp,
static ssize_t ep11_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -590,7 +590,7 @@ static ssize_t ep11_aes_192_read(struct file *filp,
static ssize_t ep11_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -600,7 +600,7 @@ static ssize_t ep11_aes_256_read(struct file *filp,
static ssize_t ep11_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -610,7 +610,7 @@ static ssize_t ep11_aes_128_xts_read(struct file *filp,
static ssize_t ep11_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -618,13 +618,13 @@ static ssize_t ep11_aes_256_xts_read(struct file *filp,
off, count);
}
-static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-static struct bin_attribute *ep11_attrs[] = {
+static const struct bin_attribute *const ep11_attrs[] = {
&bin_attr_ep11_aes_128,
&bin_attr_ep11_aes_192,
&bin_attr_ep11_aes_256,
@@ -633,9 +633,9 @@ static struct bin_attribute *ep11_attrs[] = {
NULL
};
-static struct attribute_group ep11_attr_group = {
+static const struct attribute_group ep11_attr_group = {
.name = "ep11",
- .bin_attrs = ep11_attrs,
+ .bin_attrs_new = ep11_attrs,
};
const struct attribute_group *pkey_attr_groups[] = {
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index e36e3ea165d3..2f34761e6413 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -588,6 +588,15 @@ out:
return ret;
}
+static void ism_dev_release(struct device *dev)
+{
+ struct ism_dev *ism;
+
+ ism = container_of(dev, struct ism_dev, dev);
+
+ kfree(ism);
+}
+
static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ism_dev *ism;
@@ -601,6 +610,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
ism->dev.parent = &pdev->dev;
+ ism->dev.release = ism_dev_release;
device_initialize(&ism->dev);
dev_set_name(&ism->dev, dev_name(&pdev->dev));
ret = device_add(&ism->dev);
@@ -637,7 +647,7 @@ err:
device_del(&ism->dev);
err_dev:
dev_set_drvdata(&pdev->dev, NULL);
- kfree(ism);
+ put_device(&ism->dev);
return ret;
}
@@ -682,7 +692,7 @@ static void ism_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
device_del(&ism->dev);
dev_set_drvdata(&pdev->dev, NULL);
- kfree(ism);
+ put_device(&ism->dev);
}
static struct pci_driver ism_driver = {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index a3adaec5504e..20328d695ef9 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -7050,14 +7050,16 @@ int qeth_open(struct net_device *dev)
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
- local_bh_disable();
qeth_for_each_output_queue(card, queue, i) {
netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll);
napi_enable(&queue->napi);
- napi_schedule(&queue->napi);
}
-
napi_enable(&card->napi);
+
+ local_bh_disable();
+ qeth_for_each_output_queue(card, queue, i) {
+ napi_schedule(&queue->napi);
+ }
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index d6516ab00437..1d50f463afe7 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -537,6 +537,11 @@ static void zfcp_fc_adisc_handler(void *data)
/* port is still good, nothing to do */
out:
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+ /*
+ * port ref comes from get_device() in zfcp_fc_test_link() and
+ * work item zfcp_fc_link_test_work() passes ref via
+ * zfcp_fc_adisc() to here, if zfcp_fc_adisc() could send ADISC
+ */
put_device(&port->dev);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
@@ -603,7 +608,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
retval = zfcp_fc_adisc(port);
if (retval == 0)
- return;
+ return; /* port ref passed to zfcp_fc_adisc(), no put here */
/* send of ADISC was not possible */
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 22e82000334a..99d6b3f8692b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1218,7 +1218,7 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
/**
* zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
* @wka_port: pointer to zfcp WKA port to send CT/GS to
- * @ct: pointer to struct zfcp_send_ct with data for request
+ * @ct: pointer to struct zfcp_fsf_ct_els with data for CT request
* @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
* @timeout: timeout that hardware should use, and a later software timeout
*/
@@ -1316,7 +1316,7 @@ skip_fsfstatus:
* zfcp_fsf_send_els - initiate an ELS command (FC-FS)
* @adapter: pointer to zfcp adapter
* @d_id: N_Port_ID to send ELS to
- * @els: pointer to struct zfcp_send_els with data for the command
+ * @els: pointer to struct zfcp_fsf_ct_els with data for the ELS command
* @timeout: timeout that hardware should use, and a later software timeout
*/
int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b2a8cd792266..b31f860af47b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -37,11 +37,11 @@ static bool allow_lun_scan = true;
module_param(allow_lun_scan, bool, 0600);
MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
-static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
+static void zfcp_scsi_sdev_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
- /* if previous slave_alloc returned early, there is nothing to do */
+ /* if previous sdev_init returned early, there is nothing to do */
if (!zfcp_sdev->port)
return;
@@ -49,7 +49,8 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
put_device(&zfcp_sdev->port->dev);
}
-static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
+static int zfcp_scsi_sdev_configure(struct scsi_device *sdp,
+ struct queue_limits *lim)
{
if (sdp->tagged_supported)
scsi_change_queue_depth(sdp, default_depth);
@@ -110,7 +111,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
return ret;
}
-static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
+static int zfcp_scsi_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct zfcp_adapter *adapter =
@@ -427,9 +428,9 @@ static const struct scsi_host_template zfcp_scsi_host_template = {
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
- .slave_alloc = zfcp_scsi_slave_alloc,
- .slave_configure = zfcp_scsi_slave_configure,
- .slave_destroy = zfcp_scsi_slave_destroy,
+ .sdev_init = zfcp_scsi_sdev_init,
+ .sdev_configure = zfcp_scsi_sdev_configure,
+ .sdev_destroy = zfcp_scsi_sdev_destroy,
.change_queue_depth = scsi_change_queue_depth,
.host_reset = zfcp_scsi_sysfs_host_reset,
.proc_name = "zfcp",
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 304b81bb5f90..41e36af35488 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -284,7 +284,7 @@ static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port)
goto unlock_host_lock;
}
- /* port is about to be removed, so no more unit_add or slave_alloc */
+ /* port is about to be removed, so no more unit_add or sdev_init */
zfcp_sysfs_port_set_removing(port);
in_use = false;
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 60f2a04f0869..4ef2a635d34f 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -170,7 +170,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
write_unlock_irq(&port->unit_list_lock);
/*
* lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex
- * due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc()
+ * due to zfcp_unit_scsi_scan() => zfcp_scsi_sdev_init()
*/
mutex_unlock(&zfcp_sysfs_port_units_mutex);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 6fb61c88ea11..883d4a12a172 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1968,13 +1968,14 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
} /* End twa_string_lookup() */
/* This function gets called when a disk is coming on-line */
-static int twa_slave_configure(struct scsi_device *sdev)
+static int twa_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
/* Force 60 second timeout */
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
return 0;
-} /* End twa_slave_configure() */
+} /* End twa_sdev_configure() */
static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -1984,7 +1985,7 @@ static const struct scsi_host_template driver_template = {
.bios_param = twa_scsi_biosparam,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
- .slave_configure = twa_slave_configure,
+ .sdev_configure = twa_sdev_configure,
.this_id = -1,
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
@@ -2260,7 +2261,7 @@ out_disable_device:
} /* End twa_resume() */
/* PCI Devices supported by this driver */
-static struct pci_device_id twa_pci_tbl[] = {
+static const struct pci_device_id twa_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index caa6713a62a4..e057ab9c7b90 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -96,7 +96,7 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
/* This function returns AENs through sysfs */
static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -116,18 +116,18 @@ static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
} /* End twl_sysfs_aen_read() */
/* aen_read sysfs attribute initializer */
-static struct bin_attribute twl_sysfs_aen_read_attr = {
+static const struct bin_attribute twl_sysfs_aen_read_attr = {
.attr = {
.name = "3ware_aen_read",
.mode = S_IRUSR,
},
.size = 0,
- .read = twl_sysfs_aen_read
+ .read_new = twl_sysfs_aen_read
};
/* This function returns driver compatibility info through sysfs */
static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -147,13 +147,13 @@ static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
} /* End twl_sysfs_compat_info() */
/* compat_info sysfs attribute initializer */
-static struct bin_attribute twl_sysfs_compat_info_attr = {
+static const struct bin_attribute twl_sysfs_compat_info_attr = {
.attr = {
.name = "3ware_compat_info",
.mode = S_IRUSR,
},
.size = 0,
- .read = twl_sysfs_compat_info
+ .read_new = twl_sysfs_compat_info
};
/* Show some statistics about the card */
@@ -1523,13 +1523,14 @@ static void twl_shutdown(struct pci_dev *pdev)
} /* End twl_shutdown() */
/* This function configures unit settings when a unit is coming on-line */
-static int twl_slave_configure(struct scsi_device *sdev)
+static int twl_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
/* Force 60 second timeout */
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
return 0;
-} /* End twl_slave_configure() */
+} /* End twl_sdev_configure() */
static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -1539,7 +1540,7 @@ static const struct scsi_host_template driver_template = {
.bios_param = twl_scsi_biosparam,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
- .slave_configure = twl_slave_configure,
+ .sdev_configure = twl_sdev_configure,
.this_id = -1,
.sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
@@ -1821,7 +1822,7 @@ out_disable_device:
} /* End twl_resume() */
/* PCI Devices supported by this driver */
-static struct pci_device_id twl_pci_tbl[] = {
+static const struct pci_device_id twl_pci_tbl[] = {
{ PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) },
{ }
};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 2c0fb6da0e60..89bd56f78ef9 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -172,7 +172,7 @@
Initialize queues correctly when loading with no valid units.
1.02.00.034 - Fix tw_decode_bits() to handle multiple errors.
Add support for user configurable cmd_per_lun.
- Add support for sht->slave_configure().
+ Add support for sht->sdev_configure().
1.02.00.035 - Improve tw_allocate_memory() memory allocation.
Fix tw_chrdev_ioctl() to sleep correctly.
1.02.00.036 - Increase character ioctl timeout to 60 seconds.
@@ -2221,13 +2221,13 @@ static void tw_shutdown(struct pci_dev *pdev)
} /* End tw_shutdown() */
/* This function gets called when a disk is coming online */
-static int tw_slave_configure(struct scsi_device *sdev)
+static int tw_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
/* Force 60 second timeout */
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
return 0;
-} /* End tw_slave_configure() */
+} /* End tw_sdev_configure() */
static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -2237,7 +2237,7 @@ static const struct scsi_host_template driver_template = {
.bios_param = tw_scsi_biosparam,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
- .slave_configure = tw_slave_configure,
+ .sdev_configure = tw_sdev_configure,
.this_id = -1,
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
@@ -2393,7 +2393,7 @@ static void tw_remove(struct pci_dev *pdev)
} /* End tw_remove() */
/* PCI Devices supported by this driver */
-static struct pci_device_id tw_pci_tbl[] = {
+static const struct pci_device_id tw_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_1000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_7000,
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 85439e976143..71b7ac027f48 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -158,9 +158,10 @@ STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
-STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
-STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
-STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
+STATIC int NCR_700_sdev_init(struct scsi_device *SDpnt);
+STATIC int NCR_700_sdev_configure(struct scsi_device *SDpnt,
+ struct queue_limits *lim);
+STATIC void NCR_700_sdev_destroy(struct scsi_device *SDpnt);
static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
STATIC const struct attribute_group *NCR_700_dev_groups[];
@@ -330,9 +331,9 @@ NCR_700_detect(struct scsi_host_template *tpnt,
tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
- tpnt->slave_configure = NCR_700_slave_configure;
- tpnt->slave_destroy = NCR_700_slave_destroy;
- tpnt->slave_alloc = NCR_700_slave_alloc;
+ tpnt->sdev_configure = NCR_700_sdev_configure;
+ tpnt->sdev_destroy = NCR_700_sdev_destroy;
+ tpnt->sdev_init = NCR_700_sdev_init;
tpnt->change_queue_depth = NCR_700_change_queue_depth;
if(tpnt->name == NULL)
@@ -2017,7 +2018,7 @@ NCR_700_set_offset(struct scsi_target *STp, int offset)
}
STATIC int
-NCR_700_slave_alloc(struct scsi_device *SDp)
+NCR_700_sdev_init(struct scsi_device *SDp)
{
SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
GFP_KERNEL);
@@ -2029,7 +2030,7 @@ NCR_700_slave_alloc(struct scsi_device *SDp)
}
STATIC int
-NCR_700_slave_configure(struct scsi_device *SDp)
+NCR_700_sdev_configure(struct scsi_device *SDp, struct queue_limits *lim)
{
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
@@ -2052,7 +2053,7 @@ NCR_700_slave_configure(struct scsi_device *SDp)
}
STATIC void
-NCR_700_slave_destroy(struct scsi_device *SDp)
+NCR_700_sdev_destroy(struct scsi_device *SDp)
{
kfree(SDp->hostdata);
SDp->hostdata = NULL;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 2135a2b3e2d0..1f100270cd38 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2153,14 +2153,15 @@ static void __init blogic_inithoststruct(struct blogic_adapter *adapter,
}
/*
- blogic_slaveconfig will actually set the queue depth on individual
+ blogic_sdev_configure will actually set the queue depth on individual
scsi devices as they are permanently added to the device chain. We
shamelessly rip off the SelectQueueDepths code to make this work mostly
like it used to. Since we don't get called once at the end of the scan
but instead get called for each device, we have to do things a bit
differently.
*/
-static int blogic_slaveconfig(struct scsi_device *dev)
+static int blogic_sdev_configure(struct scsi_device *dev,
+ struct queue_limits *lim)
{
struct blogic_adapter *adapter =
(struct blogic_adapter *) dev->host->hostdata;
@@ -3672,7 +3673,7 @@ static const struct scsi_host_template blogic_template = {
.name = "BusLogic",
.info = blogic_drvr_info,
.queuecommand = blogic_qcmd,
- .slave_configure = blogic_slaveconfig,
+ .sdev_configure = blogic_sdev_configure,
.bios_param = blogic_diskparam,
.eh_host_reset_handler = blogic_hostreset,
#if 0
@@ -3715,7 +3716,7 @@ static void __exit blogic_exit(void)
__setup("BusLogic=", blogic_setup);
#ifdef MODULE
-/*static struct pci_device_id blogic_pci_tbl[] = {
+/*static const struct pci_device_id blogic_pci_tbl[] = {
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 7d1ec10f2430..61bf26d4fc10 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1274,7 +1274,8 @@ static inline void blogic_incszbucket(unsigned int *cmdsz_buckets,
static const char *blogic_drvr_info(struct Scsi_Host *);
static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *);
static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *);
-static int blogic_slaveconfig(struct scsi_device *);
+static int blogic_sdev_configure(struct scsi_device *,
+ struct queue_limits *lim);
static void blogic_qcompleted_ccb(struct blogic_ccb *);
static irqreturn_t blogic_inthandler(int, void *);
static int blogic_resetadapter(struct blogic_adapter *, bool hard_reset);
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index b95147fb18b0..a8979f9e30ff 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1206,7 +1206,7 @@ static void inia100_remove_one(struct pci_dev *pdev)
scsi_host_put(shost);
}
-static struct pci_device_id inia100_pci_tbl[] = {
+static const struct pci_device_id inia100_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x1060, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 68f4dbcfff49..91170a67cc91 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -377,15 +377,17 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
}
/**
- * aac_slave_configure - compute queue depths
+ * aac_sdev_configure - compute queue depths
* @sdev: SCSI device we are considering
+ * @lim: Request queue limits
*
* Selects queue depths for each target device based on the host adapter's
* total capacity and the queue depth supported by the target device.
* A queue depth of one automatically disables tagged queueing.
*/
-static int aac_slave_configure(struct scsi_device *sdev)
+static int aac_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
int chn, tid;
@@ -1487,7 +1489,7 @@ static const struct scsi_host_template aac_driver_template = {
.queuecommand = aac_queuecommand,
.bios_param = aac_biosparm,
.shost_groups = aac_host_groups,
- .slave_configure = aac_slave_configure,
+ .sdev_configure = aac_sdev_configure,
.change_queue_depth = aac_change_queue_depth,
.sdev_groups = aac_dev_groups,
.eh_abort_handler = aac_eh_abort,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index fd4fcb37863d..3a2c336307c0 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -4496,7 +4496,7 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
/*
* Microcode operating variables for WDTR, SDTR, and command tag
- * queuing will be set in slave_configure() based on what a
+ * queuing will be set in sdev_configure() based on what a
* device reports it is capable of in Inquiry byte 7.
*
* If SCSI Bus Resets have been disabled, then directly set
@@ -5013,7 +5013,7 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
/*
* Microcode operating variables for WDTR, SDTR, and command tag
- * queuing will be set in slave_configure() based on what a
+ * queuing will be set in sdev_configure() based on what a
* device reports it is capable of in Inquiry byte 7.
*
* If SCSI Bus Resets have been disabled, then directly set
@@ -5508,7 +5508,7 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
/*
* Microcode operating variables for WDTR, SDTR, and command tag
- * queuing will be set in slave_configure() based on what a
+ * queuing will be set in sdev_configure() based on what a
* device reports it is capable of in Inquiry byte 7.
*
* If SCSI Bus Resets have been disabled, then directly set
@@ -7219,7 +7219,7 @@ static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev)
}
static void
-advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
+advansys_narrow_sdev_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
{
ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id;
ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng;
@@ -7345,7 +7345,7 @@ static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc,
}
static void
-advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
+advansys_wide_sdev_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
{
AdvPortAddr iop_base = adv_dvc->iop_base;
unsigned short tidmask = 1 << sdev->id;
@@ -7391,16 +7391,17 @@ advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
* Set the number of commands to queue per device for the
* specified host adapter.
*/
-static int advansys_slave_configure(struct scsi_device *sdev)
+static int advansys_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct asc_board *boardp = shost_priv(sdev->host);
if (ASC_NARROW_BOARD(boardp))
- advansys_narrow_slave_configure(sdev,
- &boardp->dvc_var.asc_dvc_var);
+ advansys_narrow_sdev_configure(sdev,
+ &boardp->dvc_var.asc_dvc_var);
else
- advansys_wide_slave_configure(sdev,
- &boardp->dvc_var.adv_dvc_var);
+ advansys_wide_sdev_configure(sdev,
+ &boardp->dvc_var.adv_dvc_var);
return 0;
}
@@ -10612,7 +10613,7 @@ static const struct scsi_host_template advansys_template = {
.queuecommand = advansys_queuecommand,
.eh_host_reset_handler = advansys_reset,
.bios_param = advansys_biosparam,
- .slave_configure = advansys_slave_configure,
+ .sdev_configure = advansys_sdev_configure,
.cmd_size = sizeof(struct advansys_cmd),
};
@@ -11408,7 +11409,7 @@ static struct eisa_driver advansys_eisa_driver = {
};
/* PCI Devices supported by this driver */
-static struct pci_device_id advansys_pci_tbl[] = {
+static const struct pci_device_id advansys_pci_tbl[] = {
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 4202059815a0..17dfc3c72110 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -672,7 +672,7 @@ ahd_linux_target_destroy(struct scsi_target *starget)
}
static int
-ahd_linux_slave_alloc(struct scsi_device *sdev)
+ahd_linux_sdev_init(struct scsi_device *sdev)
{
struct ahd_softc *ahd =
*((struct ahd_softc **)sdev->host->hostdata);
@@ -701,7 +701,7 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
}
static int
-ahd_linux_slave_configure(struct scsi_device *sdev)
+ahd_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
@@ -906,8 +906,8 @@ struct scsi_host_template aic79xx_driver_template = {
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
- .slave_alloc = ahd_linux_slave_alloc,
- .slave_configure = ahd_linux_slave_configure,
+ .sdev_init = ahd_linux_sdev_init,
+ .sdev_configure = ahd_linux_sdev_configure,
.target_alloc = ahd_linux_target_alloc,
.target_destroy = ahd_linux_target_destroy,
};
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index b0c4f2345321..cebf8c5d0caf 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -632,7 +632,7 @@ ahc_linux_target_destroy(struct scsi_target *starget)
}
static int
-ahc_linux_slave_alloc(struct scsi_device *sdev)
+ahc_linux_sdev_init(struct scsi_device *sdev)
{
struct ahc_softc *ahc =
*((struct ahc_softc **)sdev->host->hostdata);
@@ -664,7 +664,7 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
}
static int
-ahc_linux_slave_configure(struct scsi_device *sdev)
+ahc_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
@@ -791,8 +791,8 @@ struct scsi_host_template aic7xxx_driver_template = {
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
- .slave_alloc = ahc_linux_slave_alloc,
- .slave_configure = ahc_linux_slave_configure,
+ .sdev_init = ahc_linux_sdev_init,
+ .sdev_configure = ahc_linux_sdev_configure,
.target_alloc = ahc_linux_target_alloc,
.target_destroy = ahc_linux_target_destroy,
};
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 65182ad9cdf8..b1c9ce477cbd 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -102,6 +102,7 @@ static void add_conditional(symbol_t *symbol);
static void add_version(const char *verstring);
static int is_download_const(expression_t *immed);
static int is_location_address(symbol_t *symbol);
+int yylex();
void yyerror(const char *string);
#define SRAM_SYMNAME "SRAM_BASE"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
index 8c0479865f04..5c7350eb5b5c 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -61,6 +61,7 @@
static symbol_t *macro_symbol;
static void add_macro_arg(const char *argtext, int position);
+int mmlex();
void mmerror(const char *string);
%}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index c78d4f68eea5..fc7e6c58148d 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -64,6 +64,9 @@ static char *string_buf_ptr;
static int parren_count;
static int quote_count;
static char buf[255];
+void mm_switch_to_buffer(YY_BUFFER_STATE);
+void mmparse();
+void mm_delete_buffer(YY_BUFFER_STATE);
%}
PATH ([/]*[-A-Za-z0-9_.])+
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index fbb29dbb1e50..003e61831e33 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -513,7 +513,7 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
scsi_host_put(esp->host);
}
-static struct pci_device_id am53c974_pci_tbl[] = {
+static const struct pci_device_id am53c974_pci_tbl[] = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ }
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index baeb5e795690..8e3d4799ce93 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -60,7 +60,7 @@
static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin,
+ const struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
@@ -107,7 +107,7 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin,
+ const struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
@@ -155,7 +155,7 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin,
+ const struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
@@ -194,7 +194,7 @@ static const struct bin_attribute arcmsr_sysfs_message_read_attr = {
.mode = S_IRUSR ,
},
.size = ARCMSR_API_DATA_BUFLEN,
- .read = arcmsr_sysfs_iop_message_read,
+ .read_new = arcmsr_sysfs_iop_message_read,
};
static const struct bin_attribute arcmsr_sysfs_message_write_attr = {
@@ -203,7 +203,7 @@ static const struct bin_attribute arcmsr_sysfs_message_write_attr = {
.mode = S_IWUSR,
},
.size = ARCMSR_API_DATA_BUFLEN,
- .write = arcmsr_sysfs_iop_message_write,
+ .write_new = arcmsr_sysfs_iop_message_write,
};
static const struct bin_attribute arcmsr_sysfs_message_clear_attr = {
@@ -212,7 +212,7 @@ static const struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.mode = S_IWUSR,
},
.size = 1,
- .write = arcmsr_sysfs_iop_message_clear,
+ .write_new = arcmsr_sysfs_iop_message_clear,
};
int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 35860c61468b..221a520e8a9b 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -143,7 +143,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
static void arcmsr_set_iop_datetime(struct timer_list *);
-static int arcmsr_slave_config(struct scsi_device *sdev);
+static int arcmsr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -160,7 +161,7 @@ static const struct scsi_host_template arcmsr_scsi_host_template = {
.eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
- .slave_configure = arcmsr_slave_config,
+ .sdev_configure = arcmsr_sdev_configure,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
.can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
.this_id = ARCMSR_SCSI_INITIATOR_ID,
@@ -171,7 +172,7 @@ static const struct scsi_host_template arcmsr_scsi_host_template = {
.no_write_same = 1,
};
-static struct pci_device_id arcmsr_device_id_table[] = {
+static const struct pci_device_id arcmsr_device_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
.driver_data = ACB_ADAPTER_TYPE_A},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
@@ -1044,7 +1045,7 @@ static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
{
timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
- pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
+ pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60);
add_timer(&pacb->refresh_timer);
}
@@ -3344,7 +3345,8 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd)
static DEF_SCSI_QCMD(arcmsr_queue_command)
-static int arcmsr_slave_config(struct scsi_device *sdev)
+static int arcmsr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
unsigned int dev_timeout;
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 928151ec927a..401242912855 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1743,7 +1743,7 @@ static const struct scsi_host_template atp870u_template = {
.max_sectors = ATP870U_MAX_SECTORS,
};
-static struct pci_device_id atp870u_id_table[] = {
+static const struct pci_device_id atp870u_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) },
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 66fb701401de..a719a18f0fbc 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -25,7 +25,7 @@ struct scsi_transport_template *bfad_im_scsi_transport_template;
struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
static void bfad_im_itnim_work_handler(struct work_struct *work);
static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd);
-static int bfad_im_slave_alloc(struct scsi_device *sdev);
+static int bfad_im_sdev_init(struct scsi_device *sdev);
static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port,
struct bfad_itnim_s *itnim);
@@ -404,10 +404,10 @@ bfad_im_reset_target_handler(struct scsi_cmnd *cmnd)
}
/*
- * Scsi_Host template entry slave_destroy.
+ * Scsi_Host template entry sdev_destroy.
*/
static void
-bfad_im_slave_destroy(struct scsi_device *sdev)
+bfad_im_sdev_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
return;
@@ -783,7 +783,7 @@ bfad_thread_workq(struct bfad_s *bfad)
* Return non-zero if fails.
*/
static int
-bfad_im_slave_configure(struct scsi_device *sdev)
+bfad_im_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
scsi_change_queue_depth(sdev, bfa_lun_queue_depth);
return 0;
@@ -800,9 +800,9 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.eh_device_reset_handler = bfad_im_reset_lun_handler,
.eh_target_reset_handler = bfad_im_reset_target_handler,
- .slave_alloc = bfad_im_slave_alloc,
- .slave_configure = bfad_im_slave_configure,
- .slave_destroy = bfad_im_slave_destroy,
+ .sdev_init = bfad_im_sdev_init,
+ .sdev_configure = bfad_im_sdev_configure,
+ .sdev_destroy = bfad_im_sdev_destroy,
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
@@ -823,9 +823,9 @@ struct scsi_host_template bfad_im_vport_template = {
.eh_device_reset_handler = bfad_im_reset_lun_handler,
.eh_target_reset_handler = bfad_im_reset_target_handler,
- .slave_alloc = bfad_im_slave_alloc,
- .slave_configure = bfad_im_slave_configure,
- .slave_destroy = bfad_im_slave_destroy,
+ .sdev_init = bfad_im_sdev_init,
+ .sdev_configure = bfad_im_sdev_configure,
+ .sdev_destroy = bfad_im_sdev_destroy,
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
@@ -915,7 +915,7 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
}
/*
- * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Function is invoked from the SCSI Host Template sdev_init() entry point.
* Has the logic to query the LUN Mask database to check if this LUN needs to
* be made visible to the SCSI mid-layer or not.
*
@@ -946,10 +946,10 @@ bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
}
/*
- * Scsi_Host template entry slave_alloc
+ * Scsi_Host template entry sdev_init
*/
static int
-bfad_im_slave_alloc(struct scsi_device *sdev)
+bfad_im_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct bfad_itnim_data_s *itnim_data;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f49783b89d04..5ac20c93637c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2610,14 +2610,11 @@ static int bnx2fc_cpu_online(unsigned int cpu)
p = &per_cpu(bnx2fc_percpu, cpu);
- thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
- (void *)p, cpu_to_node(cpu),
- "bnx2fc_thread/%d", cpu);
+ thread = kthread_create_on_cpu(bnx2fc_percpu_io_thread,
+ (void *)p, cpu, "bnx2fc_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
- /* bind thread to the cpu */
- kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
@@ -2652,7 +2649,8 @@ static int bnx2fc_cpu_offline(unsigned int cpu)
return 0;
}
-static int bnx2fc_slave_configure(struct scsi_device *sdev)
+static int bnx2fc_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (!bnx2fc_queue_depth)
return 0;
@@ -2951,7 +2949,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
.eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
.eh_host_reset_handler = fc_eh_host_reset,
- .slave_alloc = fc_slave_alloc,
+ .sdev_init = fc_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -2959,7 +2957,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.dma_boundary = 0x7fff,
.max_sectors = 0x3fbf,
.track_queue_depth = 1,
- .slave_configure = bnx2fc_slave_configure,
+ .sdev_configure = bnx2fc_sdev_configure,
.shost_groups = bnx2fc_host_groups,
.cmd_size = sizeof(struct bnx2fc_priv),
};
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 872ad37e2a6e..cecc3a026762 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -415,14 +415,11 @@ static int bnx2i_cpu_online(unsigned int cpu)
p = &per_cpu(bnx2i_percpu, cpu);
- thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
- cpu_to_node(cpu),
- "bnx2i_thread/%d", cpu);
+ thread = kthread_create_on_cpu(bnx2i_percpu_io_thread, (void *)p,
+ cpu, "bnx2i_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
- /* bind thread to the cpu */
- kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 8329f0cab4e7..34bde6650fae 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -800,7 +800,7 @@ csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
rn = req->rnode;
/*
* FW says remote device is lost, but rnode
- * doesnt reflect it.
+ * doesn't reflect it.
*/
if (csio_scsi_itnexus_loss_error(req->wr_status) &&
csio_is_rnode_ready(rn)) {
@@ -2224,7 +2224,7 @@ fail:
}
static int
-csio_slave_alloc(struct scsi_device *sdev)
+csio_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -2237,14 +2237,14 @@ csio_slave_alloc(struct scsi_device *sdev)
}
static int
-csio_slave_configure(struct scsi_device *sdev)
+csio_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
scsi_change_queue_depth(sdev, csio_lun_qdepth);
return 0;
}
static void
-csio_slave_destroy(struct scsi_device *sdev)
+csio_sdev_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
}
@@ -2276,9 +2276,9 @@ struct scsi_host_template csio_fcoe_shost_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
- .slave_alloc = csio_slave_alloc,
- .slave_configure = csio_slave_configure,
- .slave_destroy = csio_slave_destroy,
+ .sdev_init = csio_sdev_init,
+ .sdev_configure = csio_sdev_configure,
+ .sdev_destroy = csio_sdev_destroy,
.scan_finished = csio_scan_finished,
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
@@ -2295,9 +2295,9 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
- .slave_alloc = csio_slave_alloc,
- .slave_configure = csio_slave_configure,
- .slave_destroy = csio_slave_destroy,
+ .sdev_init = csio_sdev_init,
+ .sdev_configure = csio_sdev_configure,
+ .sdev_destroy = csio_sdev_destroy,
.scan_finished = csio_scan_finished,
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
index 5533bdcb0458..c424d36e89a6 100644
--- a/drivers/scsi/cxlflash/Kconfig
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -4,10 +4,12 @@
#
config CXLFLASH
- tristate "Support for IBM CAPI Flash"
+ tristate "Support for IBM CAPI Flash (DEPRECATED)"
depends on PCI && SCSI && (CXL || OCXL) && EEH
select IRQ_POLL
- default m
help
+ The cxlflash driver is deprecated and will be removed in a future
+ kernel release.
+
Allows CAPI Accelerated IO to Flash
If unsure, say N.
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 60d62b93d624..ae626e389c8b 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3177,7 +3177,7 @@ static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
/*
* PCI device binding table
*/
-static struct pci_device_id cxlflash_pci_table[] = {
+static const struct pci_device_id cxlflash_pci_table[] = {
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
@@ -3651,6 +3651,8 @@ static int cxlflash_probe(struct pci_dev *pdev,
int rc = 0;
int k;
+ dev_err_once(&pdev->dev, "DEPRECATION: cxlflash is deprecated and will be removed in a future kernel release\n");
+
dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
__func__, pdev->irq);
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index b375509d1470..97631f48e19d 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -966,7 +966,7 @@ static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach)
*
* This routine is the release handler for the fops registered with
* the CXL services on an initial attach for a context. It is called
- * when a close (explicity by the user or as part of a process tear
+ * when a close (explicitly by the user or as part of a process tear
* down) is performed on the adapter file descriptor returned to the
* user. The user should be aware that explicitly performing a close
* considered catastrophic and subsequent usage of the superpipe API
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index d108a86e196e..e71de2419758 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3715,13 +3715,13 @@ static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
/**
- * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
+ * dc395x_sdev_init - Called by the scsi mid layer to tell us about a new
* scsi device that we need to deal with. We allocate a new device and then
* insert that device into the adapters device list.
*
* @scsi_device: The new scsi device that we need to handle.
**/
-static int dc395x_slave_alloc(struct scsi_device *scsi_device)
+static int dc395x_sdev_init(struct scsi_device *scsi_device)
{
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
struct DeviceCtlBlk *dcb;
@@ -3736,12 +3736,12 @@ static int dc395x_slave_alloc(struct scsi_device *scsi_device)
/**
- * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
+ * dc395x_sdev_destroy - Called by the scsi mid layer to tell us about a
* device that is going away.
*
* @scsi_device: The new scsi device that we need to handle.
**/
-static void dc395x_slave_destroy(struct scsi_device *scsi_device)
+static void dc395x_sdev_destroy(struct scsi_device *scsi_device)
{
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
@@ -4547,8 +4547,8 @@ static const struct scsi_host_template dc395x_driver_template = {
.show_info = dc395x_show_info,
.name = DC395X_BANNER " " DC395X_VERSION,
.queuecommand = dc395x_queue_command,
- .slave_alloc = dc395x_slave_alloc,
- .slave_destroy = dc395x_slave_destroy,
+ .sdev_init = dc395x_sdev_init,
+ .sdev_destroy = dc395x_sdev_destroy,
.can_queue = DC395x_MAX_CAN_QUEUE,
.this_id = 7,
.sg_tablesize = DC395x_MAX_SG_TABLESIZE,
@@ -4668,7 +4668,7 @@ static void dc395x_remove_one(struct pci_dev *dev)
}
-static struct pci_device_id dc395x_pci_table[] = {
+static const struct pci_device_id dc395x_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_TEKRAM,
.device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index dfb091d34363..d6d091b2f3c7 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -127,7 +127,7 @@ static void dmx3191d_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id dmx3191d_pci_tbl[] = {
+static const struct pci_device_id dmx3191d_pci_tbl[] = {
{PCI_VENDOR_ID_DOMEX, PCI_DEVICE_ID_DOMEX_DMX3191D,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
{ }
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index 55d2301bfd7d..8469c156ab33 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -470,7 +470,7 @@ out:
return rc;
}
-static struct pci_device_id efct_pci_table[] = {
+static const struct pci_device_id efct_pci_table[] = {
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
{} /* terminate list */
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 1e2d7c63a8e3..c48275d53aef 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -1411,11 +1411,11 @@ static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
}
/* sysfs handlers */
-extern struct bin_attribute bin_attr_fw;
-extern struct bin_attribute bin_attr_fs;
-extern struct bin_attribute bin_attr_vda;
-extern struct bin_attribute bin_attr_hw;
-extern struct bin_attribute bin_attr_live_nvram;
-extern struct bin_attribute bin_attr_default_nvram;
+extern const struct bin_attribute bin_attr_fw;
+extern const struct bin_attribute bin_attr_fs;
+extern const struct bin_attribute bin_attr_vda;
+extern const struct bin_attribute bin_attr_hw;
+extern const struct bin_attribute bin_attr_live_nvram;
+extern const struct bin_attribute bin_attr_default_nvram;
#endif /* ESAS2R_H */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index f700a16cd885..44871746944a 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -66,7 +66,7 @@ static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
}
static ssize_t read_fw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -75,7 +75,7 @@ static ssize_t read_fw(struct file *file, struct kobject *kobj,
}
static ssize_t write_fw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -84,7 +84,7 @@ static ssize_t write_fw(struct file *file, struct kobject *kobj,
}
static ssize_t read_fs(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -93,7 +93,7 @@ static ssize_t read_fs(struct file *file, struct kobject *kobj,
}
static ssize_t write_fs(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -109,7 +109,7 @@ static ssize_t write_fs(struct file *file, struct kobject *kobj,
}
static ssize_t read_vda(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -118,7 +118,7 @@ static ssize_t read_vda(struct file *file, struct kobject *kobj,
}
static ssize_t write_vda(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -127,7 +127,7 @@ static ssize_t write_vda(struct file *file, struct kobject *kobj,
}
static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -138,7 +138,7 @@ static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
}
static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -158,7 +158,7 @@ static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
}
static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -169,7 +169,7 @@ static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
}
static ssize_t read_hw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -187,7 +187,7 @@ static ssize_t read_hw(struct file *file, struct kobject *kobj,
}
static ssize_t write_hw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -211,12 +211,12 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
}
#define ESAS2R_RW_BIN_ATTR(_name) \
- struct bin_attribute bin_attr_ ## _name = { \
+ const struct bin_attribute bin_attr_ ## _name = { \
.attr = \
{ .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \
.size = 0, \
- .read = read_ ## _name, \
- .write = write_ ## _name }
+ .read_new = read_ ## _name, \
+ .write_new = write_ ## _name }
ESAS2R_RW_BIN_ATTR(fw);
ESAS2R_RW_BIN_ATTR(fs);
@@ -224,10 +224,10 @@ ESAS2R_RW_BIN_ATTR(vda);
ESAS2R_RW_BIN_ATTR(hw);
ESAS2R_RW_BIN_ATTR(live_nvram);
-struct bin_attribute bin_attr_default_nvram = {
+const struct bin_attribute bin_attr_default_nvram = {
.attr = { .name = "default_nvram", .mode = S_IRUGO },
.size = 0,
- .read = read_default_nvram,
+ .read_new = read_default_nvram,
.write = NULL
};
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 0175d2282b45..802718ffad84 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2261,7 +2261,7 @@ static void esp_init_swstate(struct esp *esp)
INIT_LIST_HEAD(&esp->active_cmds);
INIT_LIST_HEAD(&esp->esp_cmd_pool);
- /* Start with a clear state, domain validation (via ->slave_configure,
+ /* Start with a clear state, domain validation (via ->sdev_configure,
* spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
* commands.
*/
@@ -2441,7 +2441,7 @@ static void esp_target_destroy(struct scsi_target *starget)
tp->starget = NULL;
}
-static int esp_slave_alloc(struct scsi_device *dev)
+static int esp_sdev_init(struct scsi_device *dev)
{
struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
@@ -2463,7 +2463,7 @@ static int esp_slave_alloc(struct scsi_device *dev)
return 0;
}
-static int esp_slave_configure(struct scsi_device *dev)
+static int esp_sdev_configure(struct scsi_device *dev, struct queue_limits *lim)
{
struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
@@ -2479,7 +2479,7 @@ static int esp_slave_configure(struct scsi_device *dev)
return 0;
}
-static void esp_slave_destroy(struct scsi_device *dev)
+static void esp_sdev_destroy(struct scsi_device *dev)
{
struct esp_lun_data *lp = dev->hostdata;
@@ -2667,9 +2667,9 @@ const struct scsi_host_template scsi_esp_template = {
.queuecommand = esp_queuecommand,
.target_alloc = esp_target_alloc,
.target_destroy = esp_target_destroy,
- .slave_alloc = esp_slave_alloc,
- .slave_configure = esp_slave_configure,
- .slave_destroy = esp_slave_destroy,
+ .sdev_init = esp_sdev_init,
+ .sdev_configure = esp_sdev_configure,
+ .sdev_destroy = esp_sdev_destroy,
.eh_abort_handler = esp_eh_abort_handler,
.eh_bus_reset_handler = esp_eh_bus_reset_handler,
.eh_host_reset_handler = esp_eh_host_reset_handler,
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 00cd7c0ccc76..7bb0b69bff24 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -80,7 +80,7 @@
/* ESP config register 4 read-write */
#define ESP_CONFIG4_BBTE 0x01 /* Back-to-back transfers (fsc) */
-#define ESP_CONGIG4_TEST 0x02 /* Transfer counter test mode (fsc) */
+#define ESP_CONFIG4_TEST 0x02 /* Transfer counter test mode (fsc) */
#define ESP_CONFIG4_RADE 0x04 /* Active negation (am53c974/fsc) */
#define ESP_CONFIG4_RAE 0x08 /* Act. negation REQ/ACK (am53c974) */
#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature (am53c974) */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 39aec710660c..038e38578676 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -269,7 +269,7 @@ static const struct scsi_host_template fcoe_shost_template = {
.eh_abort_handler = fc_eh_abort,
.eh_device_reset_handler = fc_eh_device_reset,
.eh_host_reset_handler = fc_eh_host_reset,
- .slave_alloc = fc_slave_alloc,
+ .sdev_init = fc_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
diff --git a/drivers/scsi/fdomain_pci.c b/drivers/scsi/fdomain_pci.c
index 3e05ce7b89e5..c15b2ce76e9f 100644
--- a/drivers/scsi/fdomain_pci.c
+++ b/drivers/scsi/fdomain_pci.c
@@ -47,7 +47,7 @@ static void fdomain_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id fdomain_pci_table[] = {
+static const struct pci_device_id fdomain_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70) },
{}
};
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
index 6214a6b2e96d..c025e875009e 100644
--- a/drivers/scsi/fnic/Makefile
+++ b/drivers/scsi/fnic/Makefile
@@ -2,11 +2,13 @@
obj-$(CONFIG_FCOE_FNIC) += fnic.o
fnic-y := \
+ fip.o\
fnic_attrs.o \
fnic_isr.o \
fnic_main.o \
fnic_res.o \
fnic_fcs.o \
+ fdls_disc.o \
fnic_scsi.o \
fnic_trace.o \
fnic_debugfs.o \
@@ -15,4 +17,5 @@ fnic-y := \
vnic_intr.o \
vnic_rq.o \
vnic_wq_copy.o \
- vnic_wq.o
+ vnic_wq.o \
+ fnic_pci_subsys_devid.o
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
new file mode 100644
index 000000000000..11211c469583
--- /dev/null
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -0,0 +1,4997 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <linux/workqueue.h>
+#include "fnic.h"
+#include "fdls_fc.h"
+#include "fnic_fdls.h"
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/scsi_transport_fc.h>
+#include <linux/utsname.h>
+
+#define FC_FC4_TYPE_SCSI 0x08
+#define PORT_SPEED_BIT_8 8
+#define PORT_SPEED_BIT_9 9
+#define PORT_SPEED_BIT_14 14
+#define PORT_SPEED_BIT_15 15
+
+/* FNIC FDMI Register HBA Macros */
+#define FNIC_FDMI_NUM_PORTS 1
+#define FNIC_FDMI_NUM_HBA_ATTRS 9
+#define FNIC_FDMI_TYPE_NODE_NAME 0X1
+#define FNIC_FDMI_TYPE_MANUFACTURER 0X2
+#define FNIC_FDMI_MANUFACTURER "Cisco Systems"
+#define FNIC_FDMI_TYPE_SERIAL_NUMBER 0X3
+#define FNIC_FDMI_TYPE_MODEL 0X4
+#define FNIC_FDMI_TYPE_MODEL_DES 0X5
+#define FNIC_FDMI_MODEL_DESCRIPTION "Cisco Virtual Interface Card"
+#define FNIC_FDMI_TYPE_HARDWARE_VERSION 0X6
+#define FNIC_FDMI_TYPE_DRIVER_VERSION 0X7
+#define FNIC_FDMI_TYPE_ROM_VERSION 0X8
+#define FNIC_FDMI_TYPE_FIRMWARE_VERSION 0X9
+#define FNIC_FDMI_NN_LEN 8
+#define FNIC_FDMI_MANU_LEN 20
+#define FNIC_FDMI_SERIAL_LEN 16
+#define FNIC_FDMI_MODEL_LEN 12
+#define FNIC_FDMI_MODEL_DES_LEN 56
+#define FNIC_FDMI_HW_VER_LEN 16
+#define FNIC_FDMI_DR_VER_LEN 28
+#define FNIC_FDMI_ROM_VER_LEN 8
+#define FNIC_FDMI_FW_VER_LEN 16
+
+/* FNIC FDMI Register PA Macros */
+#define FNIC_FDMI_TYPE_FC4_TYPES 0X1
+#define FNIC_FDMI_TYPE_SUPPORTED_SPEEDS 0X2
+#define FNIC_FDMI_TYPE_CURRENT_SPEED 0X3
+#define FNIC_FDMI_TYPE_MAX_FRAME_SIZE 0X4
+#define FNIC_FDMI_TYPE_OS_NAME 0X5
+#define FNIC_FDMI_TYPE_HOST_NAME 0X6
+#define FNIC_FDMI_NUM_PORT_ATTRS 6
+#define FNIC_FDMI_FC4_LEN 32
+#define FNIC_FDMI_SUPP_SPEED_LEN 4
+#define FNIC_FDMI_CUR_SPEED_LEN 4
+#define FNIC_FDMI_MFS_LEN 4
+#define FNIC_FDMI_MFS 0x800
+#define FNIC_FDMI_OS_NAME_LEN 16
+#define FNIC_FDMI_HN_LEN 24
+
+#define FDLS_FDMI_PLOGI_PENDING 0x1
+#define FDLS_FDMI_REG_HBA_PENDING 0x2
+#define FDLS_FDMI_RPA_PENDING 0x4
+#define FDLS_FDMI_ABORT_PENDING 0x8
+#define FDLS_FDMI_MAX_RETRY 3
+
+#define RETRIES_EXHAUSTED(iport) \
+ (iport->fabric.retry_counter == FABRIC_LOGO_MAX_RETRY)
+
+#define FNIC_TPORT_MAX_NEXUS_RESTART (8)
+
+#define SCHEDULE_OXID_FREE_RETRY_TIME (300)
+
+/* Private Functions */
+static void fdls_fdmi_register_hba(struct fnic_iport_s *iport);
+static void fdls_fdmi_register_pa(struct fnic_iport_s *iport);
+static void fdls_send_rpn_id(struct fnic_iport_s *iport);
+static void fdls_process_flogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr,
+ void *rx_frame);
+static void fnic_fdls_start_plogi(struct fnic_iport_s *iport);
+static void fnic_fdls_start_flogi(struct fnic_iport_s *iport);
+static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport,
+ uint32_t fcid,
+ uint64_t wwpn);
+static void fdls_target_restart_nexus(struct fnic_tport_s *tport);
+static void fdls_start_tport_timer(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, int timeout);
+static void fdls_tport_timer_callback(struct timer_list *t);
+static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport);
+static void fdls_start_fabric_timer(struct fnic_iport_s *iport,
+ int timeout);
+static void fdls_init_plogi_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_els_acc_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_els_rjt_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_logo_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_fabric_abts_frame(uint8_t *frame,
+ struct fnic_iport_s *iport);
+
+uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport)
+{
+ struct fnic *fnic = iport->fnic;
+ uint8_t *frame = NULL;
+
+ frame = mempool_alloc(fnic->frame_pool, GFP_ATOMIC);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame");
+ return NULL;
+ }
+
+ memset(frame, 0, FNIC_FCOE_FRAME_MAXSZ);
+ return frame;
+}
+
+/**
+ * fdls_alloc_oxid - Allocate an oxid from the bitmap based oxid pool
+ * @iport: Handle to iport instance
+ * @oxid_frame_type: Type of frame to allocate
+ * @active_oxid: the oxid which is in use
+ *
+ * Called with fnic lock held
+ */
+uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type,
+ uint16_t *active_oxid)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+ int idx;
+ uint16_t oxid;
+
+ lockdep_assert_held(&fnic->fnic_lock);
+
+ /*
+ * Allocate next available oxid from bitmap
+ */
+ idx = find_next_zero_bit(oxid_pool->bitmap, FNIC_OXID_POOL_SZ, oxid_pool->next_idx);
+ if (idx == FNIC_OXID_POOL_SZ) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Alloc oxid: all oxid slots are busy iport state:%d\n",
+ iport->state);
+ return FNIC_UNASSIGNED_OXID;
+ }
+
+ WARN_ON(test_and_set_bit(idx, oxid_pool->bitmap));
+ oxid_pool->next_idx = (idx + 1) % FNIC_OXID_POOL_SZ; /* cycle through the bitmap */
+
+ oxid = FNIC_OXID_ENCODE(idx, oxid_frame_type);
+ *active_oxid = oxid;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "alloc oxid: 0x%x, iport state: %d\n",
+ oxid, iport->state);
+ return oxid;
+}
+
+/**
+ * fdls_free_oxid_idx - Free the oxid using the idx
+ * @iport: Handle to iport instance
+ * @oxid_idx: The index to free
+ *
+ * Free the oxid immediately and make it available for new requests
+ * Called with fnic lock held
+ */
+static void fdls_free_oxid_idx(struct fnic_iport_s *iport, uint16_t oxid_idx)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+
+ lockdep_assert_held(&fnic->fnic_lock);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "free oxid idx: 0x%x\n", oxid_idx);
+
+ WARN_ON(!test_and_clear_bit(oxid_idx, oxid_pool->bitmap));
+}
+
+/**
+ * fdls_reclaim_oxid_handler - Callback handler for delayed_oxid_work
+ * @work: Handle to work_struct
+ *
+ * Scheduled when an oxid is to be freed later
+ * After freeing expired oxid(s), the handler schedules
+ * another callback with the remaining time
+ * of next unexpired entry in the reclaim list.
+ */
+void fdls_reclaim_oxid_handler(struct work_struct *work)
+{
+ struct fnic_oxid_pool_s *oxid_pool = container_of(work,
+ struct fnic_oxid_pool_s, oxid_reclaim_work.work);
+ struct fnic_iport_s *iport = container_of(oxid_pool,
+ struct fnic_iport_s, oxid_pool);
+ struct fnic *fnic = iport->fnic;
+ struct reclaim_entry_s *reclaim_entry, *next;
+ unsigned long delay_j, cur_jiffies;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reclaim oxid callback\n");
+
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ /* Though the work was scheduled for one entry,
+ * walk through and free the expired entries which might have been scheduled
+ * at around the same time as the first entry
+ */
+ list_for_each_entry_safe(reclaim_entry, next,
+ &(oxid_pool->oxid_reclaim_list), links) {
+
+ /* The list is always maintained in the order of expiry time */
+ cur_jiffies = jiffies;
+ if (time_before(cur_jiffies, reclaim_entry->expires))
+ break;
+
+ list_del(&reclaim_entry->links);
+ fdls_free_oxid_idx(iport, reclaim_entry->oxid_idx);
+ kfree(reclaim_entry);
+ }
+
+ /* schedule to free up the next entry */
+ if (!list_empty(&oxid_pool->oxid_reclaim_list)) {
+ reclaim_entry = list_first_entry(&oxid_pool->oxid_reclaim_list,
+ struct reclaim_entry_s, links);
+
+ delay_j = reclaim_entry->expires - cur_jiffies;
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Scheduling next callback at:%ld jiffies\n", delay_j);
+ }
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+}
+
+/**
+ * fdls_free_oxid - Helper function to free the oxid
+ * @iport: Handle to iport instance
+ * @oxid: oxid to free
+ * @active_oxid: the oxid which is in use
+ *
+ * Called with fnic lock held
+ */
+void fdls_free_oxid(struct fnic_iport_s *iport,
+ uint16_t oxid, uint16_t *active_oxid)
+{
+ fdls_free_oxid_idx(iport, FNIC_OXID_IDX(oxid));
+ *active_oxid = FNIC_UNASSIGNED_OXID;
+}
+
+/**
+ * fdls_schedule_oxid_free - Schedule oxid to be freed later
+ * @iport: Handle to iport instance
+ * @active_oxid: the oxid which is in use
+ *
+ * Gets called in a rare case scenario when both a command
+ * (fdls or target discovery) timed out and the following ABTS
+ * timed out as well, without a link change.
+ *
+ * Called with fnic lock held
+ */
+void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+ struct reclaim_entry_s *reclaim_entry;
+ unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ int oxid_idx = FNIC_OXID_IDX(*active_oxid);
+
+ lockdep_assert_held(&fnic->fnic_lock);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Schedule oxid free. oxid: 0x%x\n", *active_oxid);
+
+ *active_oxid = FNIC_UNASSIGNED_OXID;
+
+ reclaim_entry = (struct reclaim_entry_s *)
+ kzalloc(sizeof(struct reclaim_entry_s), GFP_ATOMIC);
+
+ if (!reclaim_entry) {
+ FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for reclaim struct for oxid idx: %d\n",
+ oxid_idx);
+
+ /* Retry the scheduling */
+ WARN_ON(test_and_set_bit(oxid_idx, oxid_pool->pending_schedule_free));
+ schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, 0);
+ return;
+ }
+
+ reclaim_entry->oxid_idx = oxid_idx;
+ reclaim_entry->expires = round_jiffies(jiffies + delay_j);
+
+ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
+
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+}
+
+/**
+ * fdls_schedule_oxid_free_retry_work - Thread to schedule the
+ * oxid to be freed later
+ *
+ * @work: Handle to the work struct
+ */
+void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
+{
+ struct fnic_oxid_pool_s *oxid_pool = container_of(work,
+ struct fnic_oxid_pool_s, schedule_oxid_free_retry.work);
+ struct fnic_iport_s *iport = container_of(oxid_pool,
+ struct fnic_iport_s, oxid_pool);
+ struct fnic *fnic = iport->fnic;
+ struct reclaim_entry_s *reclaim_entry;
+ unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ int idx;
+
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) {
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Schedule oxid free. oxid idx: %d\n", idx);
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ reclaim_entry = (struct reclaim_entry_s *)
+ kzalloc(sizeof(struct reclaim_entry_s), GFP_KERNEL);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ if (!reclaim_entry) {
+ FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for reclaim struct for oxid idx: 0x%x\n",
+ idx);
+
+ schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry,
+ msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME));
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ return;
+ }
+
+ if (test_and_clear_bit(idx, oxid_pool->pending_schedule_free)) {
+ reclaim_entry->oxid_idx = idx;
+ reclaim_entry->expires = round_jiffies(jiffies + delay_j);
+ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+ } else {
+ /* unlikely scenario, free the allocated memory and continue */
+ kfree(reclaim_entry);
+ }
+}
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+}
+
+static bool fdls_is_oxid_fabric_req(uint16_t oxid)
+{
+ int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_FABRIC_FLOGI:
+ case FNIC_FRAME_TYPE_FABRIC_PLOGI:
+ case FNIC_FRAME_TYPE_FABRIC_RPN:
+ case FNIC_FRAME_TYPE_FABRIC_RFT:
+ case FNIC_FRAME_TYPE_FABRIC_RFF:
+ case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
+ case FNIC_FRAME_TYPE_FABRIC_LOGO:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static bool fdls_is_oxid_fdmi_req(uint16_t oxid)
+{
+ int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ case FNIC_FRAME_TYPE_FDMI_RHBA:
+ case FNIC_FRAME_TYPE_FDMI_RPA:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static bool fdls_is_oxid_tgt_req(uint16_t oxid)
+{
+ int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_TGT_PLOGI:
+ case FNIC_FRAME_TYPE_TGT_PRLI:
+ case FNIC_FRAME_TYPE_TGT_ADISC:
+ case FNIC_FRAME_TYPE_TGT_LOGO:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static void fdls_reset_oxid_pool(struct fnic_iport_s *iport)
+{
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+
+ oxid_pool->next_idx = 0;
+}
+
+void fnic_del_fabric_timer_sync(struct fnic *fnic)
+{
+ fnic->iport.fabric.del_timer_inprogress = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ del_timer_sync(&fnic->iport.fabric.retry_timer);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ fnic->iport.fabric.del_timer_inprogress = 0;
+}
+
+void fnic_del_tport_timer_sync(struct fnic *fnic,
+ struct fnic_tport_s *tport)
+{
+ tport->del_timer_inprogress = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ del_timer_sync(&tport->retry_timer);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ tport->del_timer_inprogress = 0;
+}
+
+static void
+fdls_start_fabric_timer(struct fnic_iport_s *iport, int timeout)
+{
+ u64 fabric_tov;
+ struct fnic *fnic = iport->fnic;
+
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x: Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ iport->fabric.timer_pending = 0;
+ }
+
+ if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ iport->fabric.retry_counter++;
+
+ fabric_tov = jiffies + msecs_to_jiffies(timeout);
+ mod_timer(&iport->fabric.retry_timer, round_jiffies(fabric_tov));
+ iport->fabric.timer_pending = 1;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fabric timer is %d ", timeout);
+}
+
+static void
+fdls_start_tport_timer(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, int timeout)
+{
+ u64 fabric_tov;
+ struct fnic *fnic = iport->fnic;
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ tport->timer_pending = 0;
+ }
+
+ if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED))
+ tport->retry_counter++;
+
+ fabric_tov = jiffies + msecs_to_jiffies(timeout);
+ mod_timer(&tport->retry_timer, round_jiffies(fabric_tov));
+ tport->timer_pending = 1;
+}
+
+void fdls_init_plogi_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_flogi *pplogi;
+ uint8_t s_id[3];
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pplogi = (struct fc_std_flogi) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFC},
+ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els = {
+ .fl_cmd = ELS_PLOGI,
+ .fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI,
+ .sp_lo_ver = FNIC_FC_PH_VER_LO,
+ .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT),
+ .sp_features = cpu_to_be16(FC_SP_FT_CIRO),
+ .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ),
+ .sp_tot_seq = cpu_to_be16(FNIC_FC_CONCUR_SEQS),
+ .sp_rel_off = cpu_to_be16(FNIC_FC_RO_INFO),
+ .sp_e_d_tov = cpu_to_be32(FC_DEF_E_D_TOV)},
+ .fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ),
+ .fl_cssp[2].cp_rdfs = cpu_to_be16(0x800),
+ .fl_cssp[2].cp_con_seq = cpu_to_be16(0xFF),
+ .fl_cssp[2].cp_open_seq = 1}
+ };
+
+ FNIC_STD_SET_NPORT_NAME(&pplogi->els.fl_wwpn, iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&pplogi->els.fl_wwnn, iport->wwnn);
+ FNIC_LOGI_SET_RDF_SIZE(pplogi->els, iport->max_payload_size);
+
+ hton24(s_id, iport->fcid);
+ FNIC_STD_SET_S_ID(pplogi->fchdr, s_id);
+}
+
+static void fdls_init_els_acc_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_els_acc_rsp *pels_acc;
+ uint8_t s_id[3];
+
+ pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pels_acc = (struct fc_std_els_acc_rsp) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP,
+ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}},
+ .acc.la_cmd = ELS_LS_ACC,
+ };
+
+ hton24(s_id, iport->fcid);
+ FNIC_STD_SET_S_ID(pels_acc->fchdr, s_id);
+ FNIC_STD_SET_RX_ID(pels_acc->fchdr, FNIC_UNASSIGNED_RXID);
+}
+
+static void fdls_init_els_rjt_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_els_rjt_rsp *pels_rjt;
+
+ pels_rjt = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pels_rjt = (struct fc_std_els_rjt_rsp) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}},
+ .rej.er_cmd = ELS_LS_RJT,
+ };
+
+ FNIC_STD_SET_RX_ID(pels_rjt->fchdr, FNIC_UNASSIGNED_RXID);
+}
+
+static void fdls_init_logo_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_logo *plogo;
+ uint8_t s_id[3];
+
+ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *plogo = (struct fc_std_logo) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}},
+ .els.fl_cmd = ELS_LOGO,
+ };
+
+ hton24(s_id, iport->fcid);
+ FNIC_STD_SET_S_ID(plogo->fchdr, s_id);
+ memcpy(plogo->els.fl_n_port_id, s_id, 3);
+
+ FNIC_STD_SET_NPORT_NAME(&plogo->els.fl_n_port_wwn,
+ iport->wwpn);
+}
+
+static void fdls_init_fabric_abts_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_frame_header *pfabric_abts;
+
+ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pfabric_abts = (struct fc_frame_header) {
+ .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */
+ .fh_s_id = {0x00, 0x00, 0x00},
+ .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS,
+ .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00,
+ .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000,
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID),
+ .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */
+ };
+}
+
+static void
+fdls_send_rscn_resp(struct fnic_iport_s *iport,
+ struct fc_frame_header *rscn_fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_acc_rsp *pels_acc;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_acc_rsp);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RSCN response");
+ return;
+ }
+
+ pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_acc_frame(frame, iport);
+
+ FNIC_STD_SET_D_ID(pels_acc->fchdr, rscn_fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(rscn_fchdr);
+ FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RSCN response with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_send_logo_resp(struct fnic_iport_s *iport,
+ struct fc_frame_header *req_fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_acc_rsp *plogo_resp;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_acc_rsp);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send LOGO response");
+ return;
+ }
+
+ plogo_resp = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_acc_frame(frame, iport);
+
+ FNIC_STD_SET_D_ID(plogo_resp->fchdr, req_fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(req_fchdr);
+ FNIC_STD_SET_OX_ID(plogo_resp->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send LOGO response with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+void
+fdls_send_tport_abts(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ struct fnic *fnic = iport->fnic;
+ struct fc_frame_header *ptport_abts;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send tport ABTS");
+ return;
+ }
+
+ ptport_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *ptport_abts = (struct fc_frame_header) {
+ .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */
+ .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS,
+ .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00,
+ .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000,
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID),
+ .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */
+ };
+
+ hton24(s_id, iport->fcid);
+ hton24(d_id, tport->fcid);
+ FNIC_STD_SET_S_ID(*ptport_abts, s_id);
+ FNIC_STD_SET_D_ID(*ptport_abts, d_id);
+ tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ FNIC_STD_SET_OX_ID(*ptport_abts, tport->active_oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send tport abts: tport->state: %d ",
+ iport->fcid, tport->state);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov);
+}
+static void fdls_send_fabric_abts(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ struct fnic *fnic = iport->fnic;
+ struct fc_frame_header *pfabric_abts;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send fabric ABTS");
+ return;
+ }
+
+ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_fabric_abts_frame(frame, iport);
+
+ hton24(s_id, iport->fcid);
+
+ switch (iport->fabric.state) {
+ case FDLS_STATE_FABRIC_LOGO:
+ hton24(d_id, FC_FID_FLOGI);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_FABRIC_FLOGI:
+ hton24(d_id, FC_FID_FLOGI);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_FABRIC_PLOGI:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_RPN_ID:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_SCR:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_FCTRL);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_REGISTER_FC4_TYPES:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_REGISTER_FC4_FEATURES:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_GPN_FT:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+ default:
+ return;
+ }
+
+ oxid = iport->active_oxid_fabric_req;
+ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric abts. iport->fabric.state: %d oxid: 0x%x",
+ iport->fcid, iport->fabric.state, oxid);
+
+ iport->fabric.flags |= FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+ iport->fabric.timer_pending = 1;
+}
+
+static void fdls_send_fdmi_abts(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ uint8_t d_id[3];
+ struct fnic *fnic = iport->fnic;
+ struct fc_frame_header *pfabric_abts;
+ unsigned long fdmi_tov;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI ABTS");
+ return;
+ }
+
+ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_fabric_abts_frame(frame, iport);
+
+ hton24(d_id, FC_FID_MGMT_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) {
+ oxid = iport->active_oxid_fdmi_plogi;
+ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ } else {
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) {
+ oxid = iport->active_oxid_fdmi_rhba;
+ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ }
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) {
+ oxid = iport->active_oxid_fdmi_rpa;
+ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ }
+ }
+
+ fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov);
+ mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov));
+ iport->fabric.fdmi_pending |= FDLS_FDMI_ABORT_PENDING;
+}
+
+static void fdls_send_fabric_flogi(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pflogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FLOGI");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pflogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pflogi = (struct fc_std_flogi) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFE},
+ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els.fl_cmd = ELS_FLOGI,
+ .els.fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI,
+ .sp_lo_ver = FNIC_FC_PH_VER_LO,
+ .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT),
+ .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ)},
+ .els.fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ)
+ };
+
+ FNIC_STD_SET_NPORT_NAME(&pflogi->els.fl_wwpn, iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&pflogi->els.fl_wwnn, iport->wwnn);
+ FNIC_LOGI_SET_RDF_SIZE(pflogi->els, iport->max_payload_size);
+ FNIC_LOGI_SET_R_A_TOV(pflogi->els, iport->r_a_tov);
+ FNIC_LOGI_SET_E_D_TOV(pflogi->els, iport->e_d_tov);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FLOGI",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pflogi->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric FLOGI with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.fabric_flogi_sent);
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_fabric_plogi(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pplogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send PLOGI");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_plogi_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_PLOGI,
+ &iport->active_oxid_fabric_req);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send fabric PLOGI",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric PLOGI with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.fabric_plogi_sent);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pplogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+ uint8_t d_id[3];
+ u64 fdmi_tov;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI PLOGI");
+ goto err_out;
+ }
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_plogi_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_PLOGI,
+ &iport->active_oxid_fdmi_plogi);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FDMI PLOGI",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
+
+ hton24(d_id, FC_FID_MGMT_SERV);
+ FNIC_STD_SET_D_ID(pplogi->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI PLOGI with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov);
+ mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov));
+ iport->fabric.fdmi_pending = FDLS_FDMI_PLOGI_PENDING;
+}
+
+static void fdls_send_rpn_id(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_rpn_id *prpn_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rpn_id);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RPN_ID");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ prpn_id = (struct fc_std_rpn_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prpn_id = (struct fc_std_rpn_id) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_RPN_ID)}
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prpn_id->fchdr, fcid);
+
+ FNIC_STD_SET_PORT_ID(prpn_id->rpn_id, fcid);
+ FNIC_STD_SET_PORT_NAME(prpn_id->rpn_id, iport->wwpn);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RPN,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send RPN_ID",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(prpn_id->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RPN ID with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_scr(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_scr *pscr;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_scr);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send SCR");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pscr = (struct fc_std_scr *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pscr = (struct fc_std_scr) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ,
+ .fh_d_id = {0xFF, 0xFF, 0xFD}, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .scr = {.scr_cmd = ELS_SCR,
+ .scr_reg_func = ELS_SCRF_FULL}
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(pscr->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_SCR,
+ &iport->active_oxid_fabric_req);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send SCR",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pscr->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send SCR with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.fabric_scr_sent);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_gpn_ft(struct fnic_iport_s *iport, int fdls_state)
+{
+ uint8_t *frame;
+ struct fc_std_gpn_ft *pgpn_ft;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_gpn_ft);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send GPN FT");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pgpn_ft = (struct fc_std_gpn_ft *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pgpn_ft = (struct fc_std_gpn_ft) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_GPN_FT)},
+ .gpn_ft.fn_fc4_type = 0x08
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(pgpn_ft->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_GPN_FT,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send GPN FT",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pgpn_ft->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send GPN FT with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+ fdls_set_state((&iport->fabric), fdls_state);
+}
+
+static void
+fdls_send_tgt_adisc(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_els_adisc *padisc;
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_adisc);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send TGT ADISC");
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ padisc = (struct fc_std_els_adisc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+
+ hton24(s_id, iport->fcid);
+ hton24(d_id, tport->fcid);
+ memcpy(padisc->els.adisc_port_id, s_id, 3);
+ FNIC_STD_SET_S_ID(padisc->fchdr, s_id);
+ FNIC_STD_SET_D_ID(padisc->fchdr, d_id);
+
+ FNIC_STD_SET_F_CTL(padisc->fchdr, FNIC_ELS_REQ_FCTL << 16);
+ FNIC_STD_SET_R_CTL(padisc->fchdr, FC_RCTL_ELS_REQ);
+ FNIC_STD_SET_TYPE(padisc->fchdr, FC_TYPE_ELS);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_ADISC, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send TGT ADISC",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(padisc->fchdr, oxid);
+ FNIC_STD_SET_RX_ID(padisc->fchdr, FNIC_UNASSIGNED_RXID);
+
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ FNIC_STD_SET_NPORT_NAME(&padisc->els.adisc_wwpn,
+ iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&padisc->els.adisc_wwnn,
+ iport->wwnn);
+
+ padisc->els.adisc_cmd = ELS_ADISC;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send ADISC to tgt fcid: 0x%x",
+ iport->fcid, tport->fcid);
+
+ atomic64_inc(&iport->iport_stats.tport_adisc_sent);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov);
+}
+
+bool fdls_delete_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ struct fnic_tport_event_s *tport_del_evt;
+ struct fnic *fnic = iport->fnic;
+
+ if ((tport->state == FDLS_TGT_STATE_OFFLINING)
+ || (tport->state == FDLS_TGT_STATE_OFFLINE)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: tport state is offlining/offline\n",
+ tport->fcid);
+ return false;
+ }
+
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
+ /*
+ * By setting this flag, the tport will not be seen in a look-up
+ * in an RSCN. Even if we move to multithreaded model, this tport
+ * will be destroyed and a new RSCN will have to create a new one
+ */
+ tport->flags |= FNIC_FDLS_TPORT_TERMINATING;
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ tport->timer_pending = 0;
+ }
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_rport_exch_reset(iport->fnic, tport->fcid);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ if (tport->flags & FNIC_FDLS_SCSI_REGISTERED) {
+ tport_del_evt =
+ kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC);
+ if (!tport_del_evt) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for tport fcid: 0x%0x\n",
+ tport->fcid);
+ return false;
+ }
+ tport_del_evt->event = TGT_EV_RPORT_DEL;
+ tport_del_evt->arg1 = (void *) tport;
+ list_add_tail(&tport_del_evt->links, &fnic->tport_event_list);
+ queue_work(fnic_event_queue, &fnic->tport_work);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport 0x%x not reg with scsi_transport. Freeing locally",
+ tport->fcid);
+ list_del(&tport->links);
+ kfree(tport);
+ }
+ return true;
+}
+
+static void
+fdls_send_tgt_plogi(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pplogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+ uint8_t d_id[3];
+ uint32_t timeout;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send TGT PLOGI");
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_plogi_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PLOGI, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate oxid to send PLOGI to fcid: 0x%x",
+ iport->fcid, tport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
+
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ hton24(d_id, tport->fcid);
+ FNIC_STD_SET_D_ID(pplogi->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send tgt PLOGI to tgt: 0x%x with oxid: 0x%x",
+ iport->fcid, tport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.tport_plogi_sent);
+
+err_out:
+ timeout = max(2 * iport->e_d_tov, iport->plogi_timeout);
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, timeout);
+}
+
+static uint16_t
+fnic_fc_plogi_rsp_rdf(struct fnic_iport_s *iport,
+ struct fc_std_flogi *plogi_rsp)
+{
+ uint16_t b2b_rdf_size =
+ be16_to_cpu(FNIC_LOGI_RDF_SIZE(plogi_rsp->els));
+ uint16_t spc3_rdf_size =
+ be16_to_cpu(plogi_rsp->els.fl_cssp[2].cp_rdfs) & FNIC_FC_C3_RDF;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MFS: b2b_rdf_size: 0x%x spc3_rdf_size: 0x%x",
+ b2b_rdf_size, spc3_rdf_size);
+
+ return min(b2b_rdf_size, spc3_rdf_size);
+}
+
+static void fdls_send_register_fc4_types(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_rft_id *prft_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rft_id);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RFT");
+ return;
+ }
+
+ prft_id = (struct fc_std_rft_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prft_id = (struct fc_std_rft_id) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_RFT_ID)}
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prft_id->fchdr, fcid);
+ FNIC_STD_SET_PORT_ID(prft_id->rft_id, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFT,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send RFT",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prft_id->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RFT with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ prft_id->rft_id.fr_fts.ff_type_map[0] =
+ cpu_to_be32(1 << FC_TYPE_FCP);
+
+ prft_id->rft_id.fr_fts.ff_type_map[1] =
+ cpu_to_be32(1 << (FC_TYPE_CT % FC_NS_BPW));
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_register_fc4_features(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_rff_id *prff_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rff_id);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RFF");
+ return;
+ }
+
+ prff_id = (struct fc_std_rff_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prff_id = (struct fc_std_rff_id) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_RFF_ID)},
+ .rff_id.fr_feat = 0x2,
+ .rff_id.fr_type = FC_TYPE_FCP
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prff_id->fchdr, fcid);
+ FNIC_STD_SET_PORT_ID(prff_id->rff_id, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFF,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send RFF",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prff_id->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RFF with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ prff_id->rff_id.fr_type = FC_TYPE_FCP;
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void
+fdls_send_tgt_prli(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_els_prli *pprli;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_prli);
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ uint32_t timeout;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send TGT PRLI");
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pprli = (struct fc_std_els_prli *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pprli = (struct fc_std_els_prli) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els_prli = {.prli_cmd = ELS_PRLI,
+ .prli_spp_len = 16,
+ .prli_len = cpu_to_be16(0x14)},
+ .sp = {.spp_type = 0x08, .spp_flags = 0x0020,
+ .spp_params = cpu_to_be32(0xA2)}
+ };
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PRLI, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send TGT PRLI to 0x%x",
+ iport->fcid, tport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ hton24(s_id, iport->fcid);
+ hton24(d_id, tport->fcid);
+
+ FNIC_STD_SET_OX_ID(pprli->fchdr, oxid);
+ FNIC_STD_SET_S_ID(pprli->fchdr, s_id);
+ FNIC_STD_SET_D_ID(pprli->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send PRLI to tgt: 0x%x with oxid: 0x%x",
+ iport->fcid, tport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.tport_prli_sent);
+
+err_out:
+ timeout = max(2 * iport->e_d_tov, iport->plogi_timeout);
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, timeout);
+}
+
+/**
+ * fdls_send_fabric_logo - Send flogo to the fcf
+ * @iport: Handle to fnic iport
+ *
+ * This function does not change or check the fabric state.
+ * It the caller's responsibility to set the appropriate iport fabric
+ * state when this is called. Normally it is FDLS_STATE_FABRIC_LOGO.
+ * Currently this assumes to be called with fnic lock held.
+ */
+void fdls_send_fabric_logo(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_logo *plogo;
+ struct fnic *fnic = iport->fnic;
+ uint8_t d_id[3];
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_logo);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send fabric LOGO");
+ return;
+ }
+
+ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_logo_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_LOGO,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send fabric LOGO",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(plogo->fchdr, oxid);
+
+ hton24(d_id, FC_FID_FLOGI);
+ FNIC_STD_SET_D_ID(plogo->fchdr, d_id);
+
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric LOGO with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+/**
+ * fdls_tgt_logout - Send plogo to the remote port
+ * @iport: Handle to fnic iport
+ * @tport: Handle to remote port
+ *
+ * This function does not change or check the fabric/tport state.
+ * It the caller's responsibility to set the appropriate tport/fabric
+ * state when this is called. Normally that is fdls_tgt_state_plogo.
+ * This could be used to send plogo to nameserver process
+ * also not just target processes
+ */
+void fdls_tgt_logout(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_logo *plogo;
+ struct fnic *fnic = iport->fnic;
+ uint8_t d_id[3];
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_logo);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send fabric LOGO");
+ return;
+ }
+
+ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_logo_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_LOGO, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send tgt LOGO",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(plogo->fchdr, oxid);
+
+ hton24(d_id, tport->fcid);
+ FNIC_STD_SET_D_ID(plogo->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send tgt LOGO with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ atomic64_inc(&iport->iport_stats.tport_logo_sent);
+}
+
+static void fdls_tgt_discovery_start(struct fnic_iport_s *iport)
+{
+ struct fnic_tport_s *tport, *next;
+ u32 old_link_down_cnt = iport->fnic->link_down_cnt;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Starting FDLS target discovery", iport->fcid);
+
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ if ((old_link_down_cnt != iport->fnic->link_down_cnt)
+ || (iport->state != FNIC_IPORT_STATE_READY)) {
+ break;
+ }
+ /* if we marked the tport as deleted due to GPN_FT
+ * We should not send ADISC anymore
+ */
+ if ((tport->state == FDLS_TGT_STATE_OFFLINING) ||
+ (tport->state == FDLS_TGT_STATE_OFFLINE))
+ continue;
+
+ /* For tports which have received RSCN */
+ if (tport->flags & FNIC_FDLS_TPORT_SEND_ADISC) {
+ tport->retry_counter = 0;
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_ADISC);
+ tport->flags &= ~FNIC_FDLS_TPORT_SEND_ADISC;
+ fdls_send_tgt_adisc(iport, tport);
+ continue;
+ }
+ if (fdls_get_tport_state(tport) != FDLS_TGT_STATE_INIT) {
+ /* Not a new port, skip */
+ continue;
+ }
+ tport->retry_counter = 0;
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
+ fdls_send_tgt_plogi(iport, tport);
+ }
+ fdls_set_state((&iport->fabric), FDLS_STATE_TGT_DISCOVERY);
+}
+
+/*
+ * Function to restart the IT nexus if we received any out of
+ * sequence PLOGI/PRLI response from the target.
+ * The memory for the new tport structure is allocated
+ * inside fdls_create_tport and added to the iport's tport list.
+ * This will get freed later during tport_offline/linkdown
+ * or module unload. The new_tport pointer will go out of scope
+ * safely since the memory it is
+ * pointing to it will be freed later
+ */
+static void fdls_target_restart_nexus(struct fnic_tport_s *tport)
+{
+ struct fnic_iport_s *iport = tport->iport;
+ struct fnic_tport_s *new_tport = NULL;
+ uint32_t fcid;
+ uint64_t wwpn;
+ int nexus_restart_count;
+ struct fnic *fnic = iport->fnic;
+ bool retval = true;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid: 0x%x state: %d restart_count: %d",
+ tport->fcid, tport->state, tport->nexus_restart_count);
+
+ fcid = tport->fcid;
+ wwpn = tport->wwpn;
+ nexus_restart_count = tport->nexus_restart_count;
+
+ retval = fdls_delete_tport(iport, tport);
+ if (retval != true) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Error deleting tport: 0x%x", fcid);
+ return;
+ }
+
+ if (nexus_restart_count >= FNIC_TPORT_MAX_NEXUS_RESTART) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded nexus restart retries tport: 0x%x",
+ fcid);
+ return;
+ }
+
+ /*
+ * Allocate memory for the new tport and add it to
+ * iport's tport list.
+ * This memory will be freed during tport_offline/linkdown
+ * or module unload. The pointer new_tport is safe to go
+ * out of scope when this function returns, since the memory
+ * it is pointing to is guaranteed to be freed later
+ * as mentioned above.
+ */
+ new_tport = fdls_create_tport(iport, fcid, wwpn);
+ if (!new_tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Error creating new tport: 0x%x", fcid);
+ return;
+ }
+
+ new_tport->nexus_restart_count = nexus_restart_count + 1;
+ fdls_send_tgt_plogi(iport, new_tport);
+ fdls_set_tport_state(new_tport, FDLS_TGT_STATE_PLOGI);
+}
+
+struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport,
+ uint32_t fcid)
+{
+ struct fnic_tport_s *tport, *next;
+
+ list_for_each_entry_safe(tport, next, &(iport->tport_list), links) {
+ if ((tport->fcid == fcid)
+ && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING))
+ return tport;
+ }
+ return NULL;
+}
+
+static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport,
+ uint32_t fcid, uint64_t wwpn)
+{
+ struct fnic_tport_s *tport;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS create tport: fcid: 0x%x wwpn: 0x%llx", fcid, wwpn);
+
+ tport = kzalloc(sizeof(struct fnic_tport_s), GFP_ATOMIC);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Memory allocation failure while creating tport: 0x%x\n",
+ fcid);
+ return NULL;
+ }
+
+ tport->max_payload_size = FNIC_FCOE_MAX_FRAME_SZ;
+ tport->r_a_tov = FC_DEF_R_A_TOV;
+ tport->e_d_tov = FC_DEF_E_D_TOV;
+ tport->fcid = fcid;
+ tport->wwpn = wwpn;
+ tport->iport = iport;
+
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Need to setup tport timer callback");
+
+ timer_setup(&tport->retry_timer, fdls_tport_timer_callback, 0);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Added tport 0x%x", tport->fcid);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_INIT);
+ list_add_tail(&tport->links, &iport->tport_list);
+ atomic_set(&tport->in_flight, 0);
+ return tport;
+}
+
+struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport,
+ uint64_t wwpn)
+{
+ struct fnic_tport_s *tport, *next;
+
+ list_for_each_entry_safe(tport, next, &(iport->tport_list), links) {
+ if ((tport->wwpn == wwpn)
+ && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING))
+ return tport;
+ }
+ return NULL;
+}
+
+static void
+fnic_fdmi_attr_set(void *attr_start, u16 type, u16 len,
+ void *data, u32 *off)
+{
+ u16 size = len + FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ struct fc_fdmi_attr_entry *fdmi_attr = (struct fc_fdmi_attr_entry *)
+ ((u8 *)attr_start + *off);
+
+ put_unaligned_be16(type, &fdmi_attr->type);
+ put_unaligned_be16(size, &fdmi_attr->len);
+ memcpy(fdmi_attr->value, data, len);
+ *off += size;
+}
+
+static void fdls_fdmi_register_hba(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_fdmi_rhba *prhba;
+ struct fc_fdmi_attr_entry *fdmi_attr;
+ uint8_t fcid[3];
+ int err;
+ struct fnic *fnic = iport->fnic;
+ struct vnic_devcmd_fw_info *fw_info = NULL;
+ uint16_t oxid;
+ u32 attr_off_bytes, len;
+ u8 data[64];
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI RHBA");
+ return;
+ }
+
+ prhba = (struct fc_std_fdmi_rhba *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prhba = (struct fc_std_fdmi_rhba) {
+ .fchdr = {
+ .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0XFF, 0XFA},
+ .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)
+ },
+ .fc_std_ct_hdr = {
+ .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT,
+ .ct_fs_subtype = FC_FDMI_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_FDMI_RHBA)
+ },
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prhba->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RHBA,
+ &iport->active_oxid_fdmi_rhba);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FDMI RHBA",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prhba->fchdr, oxid);
+
+ put_unaligned_be64(iport->wwpn, &prhba->rhba.hbaid.id);
+ put_unaligned_be32(FNIC_FDMI_NUM_PORTS, &prhba->rhba.port.numport);
+ put_unaligned_be64(iport->wwpn, &prhba->rhba.port.port[0].portname);
+ put_unaligned_be32(FNIC_FDMI_NUM_HBA_ATTRS,
+ &prhba->rhba.hba_attrs.numattrs);
+
+ fdmi_attr = prhba->rhba.hba_attrs.attr;
+ attr_off_bytes = 0;
+
+ put_unaligned_be64(iport->wwnn, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_NODE_NAME,
+ FNIC_FDMI_NN_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "NN set, off=%d", attr_off_bytes);
+
+ strscpy_pad(data, FNIC_FDMI_MANUFACTURER, FNIC_FDMI_MANU_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MANUFACTURER,
+ FNIC_FDMI_MANU_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MFG set <%s>, off=%d", data, attr_off_bytes);
+
+ err = vnic_dev_fw_info(fnic->vdev, &fw_info);
+ if (!err) {
+ strscpy_pad(data, fw_info->hw_serial_number,
+ FNIC_FDMI_SERIAL_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SERIAL_NUMBER,
+ FNIC_FDMI_SERIAL_LEN, data, &attr_off_bytes);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SERIAL set <%s>, off=%d", data, attr_off_bytes);
+
+ }
+
+ if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN)
+ fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1;
+ strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN);
+ data[FNIC_FDMI_MODEL_LEN - 1] = 0;
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN,
+ data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MODEL set <%s>, off=%d", data, attr_off_bytes);
+
+ strscpy_pad(data, FNIC_FDMI_MODEL_DESCRIPTION, FNIC_FDMI_MODEL_DES_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL_DES,
+ FNIC_FDMI_MODEL_DES_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MODEL_DESC set <%s>, off=%d", data, attr_off_bytes);
+
+ if (!err) {
+ strscpy_pad(data, fw_info->hw_version, FNIC_FDMI_HW_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HARDWARE_VERSION,
+ FNIC_FDMI_HW_VER_LEN, data, &attr_off_bytes);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "HW_VER set <%s>, off=%d", data, attr_off_bytes);
+
+ }
+
+ strscpy_pad(data, DRV_VERSION, FNIC_FDMI_DR_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_DRIVER_VERSION,
+ FNIC_FDMI_DR_VER_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "DRV_VER set <%s>, off=%d", data, attr_off_bytes);
+
+ strscpy_pad(data, "N/A", FNIC_FDMI_ROM_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_ROM_VERSION,
+ FNIC_FDMI_ROM_VER_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ROM_VER set <%s>, off=%d", data, attr_off_bytes);
+
+ if (!err) {
+ strscpy_pad(data, fw_info->fw_version, FNIC_FDMI_FW_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FIRMWARE_VERSION,
+ FNIC_FDMI_FW_VER_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FW_VER set <%s>, off=%d", data, attr_off_bytes);
+ }
+
+ len = sizeof(struct fc_std_fdmi_rhba) + attr_off_bytes;
+ frame_size += len;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI RHBA with oxid: 0x%x fs: %d", iport->fcid,
+ oxid, frame_size);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ iport->fabric.fdmi_pending |= FDLS_FDMI_REG_HBA_PENDING;
+}
+
+static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_fdmi_rpa *prpa;
+ struct fc_fdmi_attr_entry *fdmi_attr;
+ uint8_t fcid[3];
+ struct fnic *fnic = iport->fnic;
+ u32 port_speed_bm;
+ u32 port_speed = vnic_dev_port_speed(fnic->vdev);
+ uint16_t oxid;
+ u32 attr_off_bytes, len;
+ u8 tmp_data[16], data[64];
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI RPA");
+ return;
+ }
+
+ prpa = (struct fc_std_fdmi_rpa *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prpa = (struct fc_std_fdmi_rpa) {
+ .fchdr = {
+ .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFA},
+ .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)
+ },
+ .fc_std_ct_hdr = {
+ .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT,
+ .ct_fs_subtype = FC_FDMI_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_FDMI_RPA)
+ },
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prpa->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RPA,
+ &iport->active_oxid_fdmi_rpa);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FDMI RPA",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prpa->fchdr, oxid);
+
+ put_unaligned_be64(iport->wwpn, &prpa->rpa.port.portname);
+ put_unaligned_be32(FNIC_FDMI_NUM_PORT_ATTRS,
+ &prpa->rpa.hba_attrs.numattrs);
+
+ /* MDS does not support GIGE speed.
+ * Bit shift standard definitions from scsi_transport_fc.h to
+ * match FC spec.
+ */
+ switch (port_speed) {
+ case DCEM_PORTSPEED_10G:
+ case DCEM_PORTSPEED_20G:
+ /* There is no bit for 20G */
+ port_speed_bm = FC_PORTSPEED_10GBIT << PORT_SPEED_BIT_14;
+ break;
+ case DCEM_PORTSPEED_25G:
+ port_speed_bm = FC_PORTSPEED_25GBIT << PORT_SPEED_BIT_8;
+ break;
+ case DCEM_PORTSPEED_40G:
+ case DCEM_PORTSPEED_4x10G:
+ port_speed_bm = FC_PORTSPEED_40GBIT << PORT_SPEED_BIT_9;
+ break;
+ case DCEM_PORTSPEED_100G:
+ port_speed_bm = FC_PORTSPEED_100GBIT << PORT_SPEED_BIT_8;
+ break;
+ default:
+ port_speed_bm = FC_PORTSPEED_1GBIT << PORT_SPEED_BIT_15;
+ break;
+ }
+ attr_off_bytes = 0;
+
+ fdmi_attr = prpa->rpa.hba_attrs.attr;
+
+ put_unaligned_be64(iport->wwnn, data);
+
+ memset(data, 0, FNIC_FDMI_FC4_LEN);
+ data[2] = 1;
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FC4_TYPES,
+ FNIC_FDMI_FC4_LEN, data, &attr_off_bytes);
+
+ put_unaligned_be32(port_speed_bm, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SUPPORTED_SPEEDS,
+ FNIC_FDMI_SUPP_SPEED_LEN, data, &attr_off_bytes);
+
+ put_unaligned_be32(port_speed_bm, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_CURRENT_SPEED,
+ FNIC_FDMI_CUR_SPEED_LEN, data, &attr_off_bytes);
+
+ put_unaligned_be32(FNIC_FDMI_MFS, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MAX_FRAME_SIZE,
+ FNIC_FDMI_MFS_LEN, data, &attr_off_bytes);
+
+ snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d",
+ fnic->host->host_no);
+ strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN);
+ data[FNIC_FDMI_OS_NAME_LEN - 1] = 0;
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME,
+ FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "OS name set <%s>, off=%d", data, attr_off_bytes);
+
+ sprintf(fc_host_system_hostname(fnic->host), "%s", utsname()->nodename);
+ strscpy_pad(data, fc_host_system_hostname(fnic->host),
+ FNIC_FDMI_HN_LEN);
+ data[FNIC_FDMI_HN_LEN - 1] = 0;
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME,
+ FNIC_FDMI_HN_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Host name set <%s>, off=%d", data, attr_off_bytes);
+
+ len = sizeof(struct fc_std_fdmi_rpa) + attr_off_bytes;
+ frame_size += len;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI RPA with oxid: 0x%x fs: %d", iport->fcid,
+ oxid, frame_size);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ iport->fabric.fdmi_pending |= FDLS_FDMI_RPA_PENDING;
+}
+
+void fdls_fabric_timer_callback(struct timer_list *t)
+{
+ struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, retry_timer);
+ struct fnic_iport_s *iport =
+ container_of(fabric, struct fnic_iport_s, fabric);
+ struct fnic *fnic = iport->fnic;
+ unsigned long flags;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tp: %d fab state: %d fab retry counter: %d max_flogi_retries: %d",
+ iport->fabric.timer_pending, iport->fabric.state,
+ iport->fabric.retry_counter, iport->max_flogi_retries);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (!iport->fabric.timer_pending) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ if (iport->fabric.del_timer_inprogress) {
+ iport->fabric.del_timer_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fabric_del_timer inprogress(%d). Skip timer cb",
+ iport->fabric.del_timer_inprogress);
+ return;
+ }
+
+ iport->fabric.timer_pending = 0;
+
+ /* The fabric state indicates which frames have time out, and we retry */
+ switch (iport->fabric.state) {
+ case FDLS_STATE_FABRIC_FLOGI:
+ /* Flogi received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < iport->max_flogi_retries)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_fabric_flogi(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* Flogi has time out 2*ed_tov send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out
+ * Mark the OXID to be freed after 2 * r_a_tov and retry the req
+ */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ if (iport->fabric.retry_counter < iport->max_flogi_retries) {
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+ fdls_send_fabric_flogi(iport);
+ } else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max FLOGI retries");
+ }
+ break;
+ case FDLS_STATE_FABRIC_PLOGI:
+ /* Plogi received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < iport->max_plogi_retries)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_fabric_plogi(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* Plogi has timed out 2*ed_tov send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out
+ * Mark the OXID to be freed after 2 * r_a_tov and retry the req
+ */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ if (iport->fabric.retry_counter < iport->max_plogi_retries) {
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+ fdls_send_fabric_plogi(iport);
+ } else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max PLOGI retries");
+ }
+ break;
+ case FDLS_STATE_RPN_ID:
+ /* Rpn_id received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_rpn_id(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ /* RPN has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FDLS_STATE_SCR:
+ /* scr received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_scr(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ /* scr has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timed out. Starting PLOGI: %p", iport);
+ fnic_fdls_start_plogi(iport);
+ }
+ break;
+ case FDLS_STATE_REGISTER_FC4_TYPES:
+ /* scr received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_register_fc4_types(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* RFT_ID timed out send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timed out. Starting PLOGI: %p", iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FDLS_STATE_REGISTER_FC4_FEATURES:
+ /* scr received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_register_fc4_features(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ /* SCR has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timed out. Starting PLOGI %p", iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FDLS_STATE_RSCN_GPN_FT:
+ case FDLS_STATE_SEND_GPNFT:
+ case FDLS_STATE_GPN_FT:
+ /* GPN_FT received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_gpn_ft(iport, iport->fabric.state);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* gpn_ft has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) {
+ fdls_send_gpn_ft(iport, iport->fabric.state);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timeout for fabric GPN_FT. Check name server: %p",
+ iport);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+void fdls_fdmi_timer_callback(struct timer_list *t)
+{
+ struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, fdmi_timer);
+ struct fnic_iport_s *iport =
+ container_of(fabric, struct fnic_iport_s, fabric);
+ struct fnic *fnic = iport->fnic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ if (!iport->fabric.fdmi_pending) {
+ /* timer expired after fdmi responses received. */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ /* if not abort pending, send an abort */
+ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) {
+ fdls_send_fdmi_abts(iport);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ /* ABTS pending for an active fdmi request that is pending.
+ * That means FDMI ABTS timed out
+ * Schedule to free the OXID after 2*r_a_tov and proceed
+ */
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) {
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_plogi);
+ } else {
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING)
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rhba);
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING)
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rpa);
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ iport->fabric.fdmi_pending = 0;
+ /* If max retries not exhaused, start over from fdmi plogi */
+ if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) {
+ iport->fabric.fdmi_retry++;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "retry fdmi timer %d", iport->fabric.fdmi_retry);
+ fdls_send_fdmi_plogi(iport);
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+static void fdls_send_delete_tport_msg(struct fnic_tport_s *tport)
+{
+ struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport;
+ struct fnic *fnic = iport->fnic;
+ struct fnic_tport_event_s *tport_del_evt;
+
+ tport_del_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC);
+ if (!tport_del_evt) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for tport event fcid: 0x%x",
+ tport->fcid);
+ return;
+ }
+ tport_del_evt->event = TGT_EV_TPORT_DELETE;
+ tport_del_evt->arg1 = (void *) tport;
+ list_add_tail(&tport_del_evt->links, &fnic->tport_event_list);
+ queue_work(fnic_event_queue, &fnic->tport_work);
+}
+
+static void fdls_tport_timer_callback(struct timer_list *t)
+{
+ struct fnic_tport_s *tport = from_timer(tport, t, retry_timer);
+ struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (!tport->timer_pending) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ if (tport->del_timer_inprogress) {
+ tport->del_timer_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport_del_timer inprogress. Skip timer cb tport fcid: 0x%x\n",
+ tport->fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid: 0x%x timer pending: %d state: %d retry counter: %d",
+ tport->fcid, tport->timer_pending, tport->state,
+ tport->retry_counter);
+
+ tport->timer_pending = 0;
+ oxid = tport->active_oxid;
+
+ /* We retry plogi/prli/adisc frames depending on the tport state */
+ switch (tport->state) {
+ case FDLS_TGT_STATE_PLOGI:
+ /* PLOGI frame received a LS_RJT with busy, we retry from here */
+ if ((tport->flags & FNIC_FDLS_RETRY_FRAME)
+ && (tport->retry_counter < iport->max_plogi_retries)) {
+ tport->flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_tgt_plogi(iport, tport);
+ } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ /* Plogi frame has timed out, send abts */
+ fdls_send_tport_abts(iport, tport);
+ } else if (tport->retry_counter < iport->max_plogi_retries) {
+ /*
+ * ABTS has timed out
+ */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport);
+ } else {
+ /* exceeded plogi retry count */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_delete_tport_msg(tport);
+ }
+ break;
+ case FDLS_TGT_STATE_PRLI:
+ /* PRLI received a LS_RJT with busy , hence we retry from here */
+ if ((tport->flags & FNIC_FDLS_RETRY_FRAME)
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+ tport->flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_tgt_prli(iport, tport);
+ } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ /* PRLI has time out, send abts */
+ fdls_send_tport_abts(iport, tport);
+ } else {
+ /* ABTS has timed out for prli, we go back to PLOGI */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
+ }
+ break;
+ case FDLS_TGT_STATE_ADISC:
+ /* ADISC timed out send an ABTS */
+ if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ fdls_send_tport_abts(iport, tport);
+ } else if ((tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+ /*
+ * ABTS has timed out
+ */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_tgt_adisc(iport, tport);
+ } else {
+ /* exceeded retry count */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC not responding. Deleting target port: 0x%x",
+ tport->fcid);
+ fdls_send_delete_tport_msg(tport);
+ }
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "oxid: 0x%x Unknown tport state: 0x%x", oxid, tport->state);
+ break;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+static void fnic_fdls_start_flogi(struct fnic_iport_s *iport)
+{
+ iport->fabric.retry_counter = 0;
+ fdls_send_fabric_flogi(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_FLOGI);
+ iport->fabric.flags = 0;
+}
+
+static void fnic_fdls_start_plogi(struct fnic_iport_s *iport)
+{
+ iport->fabric.retry_counter = 0;
+ fdls_send_fabric_plogi(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_PLOGI);
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ if ((fnic_fdmi_support == 1) && (!(iport->flags & FNIC_FDMI_ACTIVE))) {
+ /* we can do FDMI at the same time */
+ iport->fabric.fdmi_retry = 0;
+ timer_setup(&iport->fabric.fdmi_timer, fdls_fdmi_timer_callback,
+ 0);
+ fdls_send_fdmi_plogi(iport);
+ iport->flags |= FNIC_FDMI_ACTIVE;
+ }
+}
+static void
+fdls_process_tgt_adisc_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint64_t frame_wwnn;
+ uint64_t frame_wwpn;
+ uint16_t oxid;
+ struct fc_std_els_adisc *adisc_rsp = (struct fc_std_els_adisc *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ struct fnic *fnic = iport->fnic;
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Tgt ADISC response tport not found: 0x%x", tgt_fcid);
+ return;
+ }
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ || (tport->state != FDLS_TGT_STATE_ADISC)
+ || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping this ADISC response");
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport state: %d tport state: %d Is abort issued on PRLI? %d",
+ iport->state, tport->state,
+ (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED));
+ return;
+ }
+ if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame from target: 0x%x",
+ tgt_fcid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reason: Stale ADISC/Aborted ADISC/OOO frame delivery");
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+
+ switch (adisc_rsp->els.adisc_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.tport_adisc_ls_accepts);
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport 0x%p Canceling fabric disc timer\n",
+ tport);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+ tport->timer_pending = 0;
+ tport->retry_counter = 0;
+ frame_wwnn = get_unaligned_be64(&adisc_rsp->els.adisc_wwnn);
+ frame_wwpn = get_unaligned_be64(&adisc_rsp->els.adisc_wwpn);
+ if ((frame_wwnn == tport->wwnn) && (frame_wwpn == tport->wwpn)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC accepted from target: 0x%x. Target logged in",
+ tgt_fcid);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_READY);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Error mismatch frame: ADISC");
+ }
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.tport_adisc_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x",
+ tgt_fcid);
+
+ /* Retry ADISC again from the timer routine. */
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC returned ELS_LS_RJT from target: 0x%x",
+ tgt_fcid);
+ fdls_delete_tport(iport, tport);
+ }
+ break;
+ }
+}
+static void
+fdls_process_tgt_plogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint16_t oxid;
+ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ uint16_t max_payload_size;
+ struct fnic *fnic = iport->fnic;
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS processing target PLOGI response: tgt_fcid: 0x%x",
+ tgt_fcid);
+
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport not found: 0x%x", tgt_fcid);
+ return;
+ }
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame! iport state: %d tport state: %d",
+ iport->state, tport->state);
+ return;
+ }
+
+ if (tport->state != FDLS_TGT_STATE_PLOGI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI rsp recvd in wrong state. Drop the frame and restart nexus");
+ fdls_target_restart_nexus(tport);
+ return;
+ }
+
+ if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI response from target: 0x%x. Dropping frame",
+ tgt_fcid);
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+
+ switch (plogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.tport_plogi_ls_accepts);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI accepted by target: 0x%x", tgt_fcid);
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.tport_plogi_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (tport->retry_counter < iport->max_plogi_retries)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x",
+ tgt_fcid);
+ /* Retry plogi again from the timer routine. */
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI returned ELS_LS_RJT from target: 0x%x",
+ tgt_fcid);
+ fdls_delete_tport(iport, tport);
+ return;
+
+ default:
+ atomic64_inc(&iport->iport_stats.tport_plogi_misc_rejects);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI not accepted from target fcid: 0x%x",
+ tgt_fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Found the PLOGI target: 0x%x and state: %d",
+ (unsigned int) tgt_fcid, tport->state);
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+
+ tport->timer_pending = 0;
+ tport->wwpn = get_unaligned_be64(&FNIC_LOGI_PORT_NAME(plogi_rsp->els));
+ tport->wwnn = get_unaligned_be64(&FNIC_LOGI_NODE_NAME(plogi_rsp->els));
+
+ /* Learn the Service Params */
+
+ /* Max frame size - choose the lowest */
+ max_payload_size = fnic_fc_plogi_rsp_rdf(iport, plogi_rsp);
+ tport->max_payload_size =
+ min(max_payload_size, iport->max_payload_size);
+
+ if (tport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MFS: tport max frame size below spec bounds: %d",
+ tport->max_payload_size);
+ tport->max_payload_size = FNIC_MIN_DATA_FIELD_SIZE;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MAX frame size: %u iport max_payload_size: %d tport mfs: %d",
+ max_payload_size, iport->max_payload_size,
+ tport->max_payload_size);
+
+ tport->max_concur_seqs = FNIC_FC_PLOGI_RSP_CONCUR_SEQ(plogi_rsp);
+
+ tport->retry_counter = 0;
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PRLI);
+ fdls_send_tgt_prli(iport, tport);
+}
+static void
+fdls_process_tgt_prli_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint16_t oxid;
+ struct fc_std_els_prli *prli_rsp = (struct fc_std_els_prli *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ struct fnic_tport_event_s *tport_add_evt;
+ struct fnic *fnic = iport->fnic;
+ bool mismatched_tgt = false;
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process tgt PRLI response: 0x%x", tgt_fcid);
+
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport not found: 0x%x", tgt_fcid);
+ /* Handle or just drop? */
+ return;
+ }
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame! iport st: %d tport st: %d tport fcid: 0x%x",
+ iport->state, tport->state, tport->fcid);
+ return;
+ }
+
+ if (tport->state != FDLS_TGT_STATE_PRLI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI rsp recvd in wrong state. Drop frame. Restarting nexus");
+ fdls_target_restart_nexus(tport);
+ return;
+ }
+
+ if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping PRLI response from target: 0x%x ",
+ tgt_fcid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reason: Stale PRLI response/Aborted PDISC/OOO frame delivery");
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+
+ switch (prli_rsp->els_prli.prli_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.tport_prli_ls_accepts);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI accepted from target: 0x%x", tgt_fcid);
+
+ if (prli_rsp->sp.spp_type != FC_FC4_TYPE_SCSI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "mismatched target zoned with FC SCSI initiator: 0x%x",
+ tgt_fcid);
+ mismatched_tgt = true;
+ }
+ if (mismatched_tgt) {
+ fdls_tgt_logout(iport, tport);
+ fdls_delete_tport(iport, tport);
+ return;
+ }
+ break;
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.tport_prli_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x",
+ tgt_fcid);
+
+ /*Retry Plogi again from the timer routine. */
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI returned ELS_LS_RJT from target: 0x%x",
+ tgt_fcid);
+
+ fdls_tgt_logout(iport, tport);
+ fdls_delete_tport(iport, tport);
+ return;
+ default:
+ atomic64_inc(&iport->iport_stats.tport_prli_misc_rejects);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI not accepted from target: 0x%x", tgt_fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Found the PRLI target: 0x%x and state: %d",
+ (unsigned int) tgt_fcid, tport->state);
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+ tport->timer_pending = 0;
+
+ /* Learn Service Params */
+ tport->fcp_csp = be32_to_cpu(prli_rsp->sp.spp_params);
+ tport->retry_counter = 0;
+
+ if (tport->fcp_csp & FCP_SPPF_RETRY)
+ tport->tgt_flags |= FNIC_FC_RP_FLAGS_RETRY;
+
+ /* Check if the device plays Target Mode Function */
+ if (!(tport->fcp_csp & FCP_PRLI_FUNC_TARGET)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Remote port(0x%x): no target support. Deleting it\n",
+ tgt_fcid);
+ fdls_tgt_logout(iport, tport);
+ fdls_delete_tport(iport, tport);
+ return;
+ }
+
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_READY);
+
+ /* Inform the driver about new target added */
+ tport_add_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC);
+ if (!tport_add_evt) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport event memory allocation failure: 0x%0x\n",
+ tport->fcid);
+ return;
+ }
+ tport_add_evt->event = TGT_EV_RPORT_ADD;
+ tport_add_evt->arg1 = (void *) tport;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x add tport event fcid: 0x%x\n",
+ tport->fcid, iport->fcid);
+ list_add_tail(&tport_add_evt->links, &fnic->tport_event_list);
+ queue_work(fnic_event_queue, &fnic->tport_work);
+}
+
+
+static void
+fdls_process_rff_id_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_rff_id *rff_rsp = (struct fc_std_rff_id *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_FEATURES) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF_ID resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+ rsp = FNIC_STD_GET_FC_CT_CMD((&rff_rsp->fc_std_ct_hdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process RFF ID response: 0x%04x", iport->fcid,
+ (uint32_t) rsp);
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (rsp) {
+ case FC_FS_ACC:
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ fdls->retry_counter = 0;
+ fdls_set_state((&iport->fabric), FDLS_STATE_SCR);
+ fdls_send_scr(iport);
+ break;
+ case FC_FS_RJT:
+ reason_code = rff_rsp->fc_std_ct_hdr.ct_reason;
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF_ID ret ELS_LS_RJT BUSY. Retry from timer routine %p",
+ iport);
+
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF_ID returned ELS_LS_RJT. Halting discovery %p",
+ iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+fdls_process_rft_id_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_rft_id *rft_rsp = (struct fc_std_rft_id *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_TYPES) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFT_ID resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+
+ rsp = FNIC_STD_GET_FC_CT_CMD((&rft_rsp->fc_std_ct_hdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process RFT ID response: 0x%04x", iport->fcid,
+ (uint32_t) rsp);
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (rsp) {
+ case FC_FS_ACC:
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ fdls->retry_counter = 0;
+ fdls_send_register_fc4_features(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_FEATURES);
+ break;
+ case FC_FS_RJT:
+ reason_code = rft_rsp->fc_std_ct_hdr.ct_reason;
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: RFT_ID ret ELS_LS_RJT BUSY. Retry from timer routine",
+ iport->fcid);
+
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: RFT_ID REJ. Halting discovery reason %d expl %d",
+ iport->fcid, reason_code,
+ rft_rsp->fc_std_ct_hdr.ct_explan);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+fdls_process_rpn_id_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_rpn_id *rpn_rsp = (struct fc_std_rpn_id *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_RPN_ID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RPN_ID resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+ rsp = FNIC_STD_GET_FC_CT_CMD((&rpn_rsp->fc_std_ct_hdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process RPN ID response: 0x%04x", iport->fcid,
+ (uint32_t) rsp);
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (rsp) {
+ case FC_FS_ACC:
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ fdls->retry_counter = 0;
+ fdls_send_register_fc4_types(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_TYPES);
+ break;
+ case FC_FS_RJT:
+ reason_code = rpn_rsp->fc_std_ct_hdr.ct_reason;
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RPN_ID returned REJ BUSY. Retry from timer routine %p",
+ iport);
+
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RPN_ID ELS_LS_RJT. Halting discovery %p", iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+fdls_process_scr_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_scr *scr_rsp = (struct fc_std_scr *) fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process SCR response: 0x%04x",
+ (uint32_t) scr_rsp->scr.scr_cmd);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_SCR) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ }
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (scr_rsp->scr.scr_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.fabric_scr_ls_accepts);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fdls_send_gpn_ft(iport, FDLS_STATE_GPN_FT);
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.fabric_scr_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR ELS_LS_RJT BUSY. Retry from timer routine %p",
+ iport);
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR returned ELS_LS_RJT. Halting discovery %p",
+ iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n",
+ iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+
+ default:
+ atomic64_inc(&iport->iport_stats.fabric_scr_misc_rejects);
+ break;
+ }
+}
+
+static void
+fdls_process_gpn_ft_tgt_list(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr, int len)
+{
+ struct fc_gpn_ft_rsp_iu *gpn_ft_tgt;
+ struct fnic_tport_s *tport, *next;
+ uint32_t fcid;
+ uint64_t wwpn;
+ int rem_len = len;
+ u32 old_link_down_cnt = iport->fnic->link_down_cnt;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process GPN_FT tgt list", iport->fcid);
+
+ gpn_ft_tgt =
+ (struct fc_gpn_ft_rsp_iu *)((uint8_t *) fchdr +
+ sizeof(struct fc_frame_header)
+ + sizeof(struct fc_ct_hdr));
+ len -= sizeof(struct fc_frame_header) + sizeof(struct fc_ct_hdr);
+
+ while (rem_len > 0) {
+
+ fcid = ntoh24(gpn_ft_tgt->fcid);
+ wwpn = be64_to_cpu(gpn_ft_tgt->wwpn);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport: 0x%x: ctrl:0x%x", fcid, gpn_ft_tgt->ctrl);
+
+ if (fcid == iport->fcid) {
+ if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST)
+ break;
+ gpn_ft_tgt++;
+ rem_len -= sizeof(struct fc_gpn_ft_rsp_iu);
+ continue;
+ }
+
+ tport = fnic_find_tport_by_wwpn(iport, wwpn);
+ if (!tport) {
+ /*
+ * New port registered with the switch or first time query
+ */
+ tport = fdls_create_tport(iport, fcid, wwpn);
+ if (!tport)
+ return;
+ }
+ /*
+ * check if this was an existing tport with same fcid
+ * but whose wwpn has changed now ,then remove it and
+ * create a new one
+ */
+ if (tport->fcid != fcid) {
+ fdls_delete_tport(iport, tport);
+ tport = fdls_create_tport(iport, fcid, wwpn);
+ if (!tport)
+ return;
+ }
+
+ /*
+ * If this GPN_FT rsp is after RSCN then mark the tports which
+ * matches with the new GPN_FT list, if some tport is not
+ * found in GPN_FT we went to delete that tport later.
+ */
+ if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT)
+ tport->flags |= FNIC_FDLS_TPORT_IN_GPN_FT_LIST;
+
+ if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST)
+ break;
+
+ gpn_ft_tgt++;
+ rem_len -= sizeof(struct fc_gpn_ft_rsp_iu);
+ }
+ if (rem_len <= 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN_FT response: malformed/corrupt frame rxlen: %d remlen: %d",
+ len, rem_len);
+}
+
+ /*remove those ports which was not listed in GPN_FT */
+ if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT) {
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+
+ if (!(tport->flags & FNIC_FDLS_TPORT_IN_GPN_FT_LIST)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Remove port: 0x%x not found in GPN_FT list",
+ tport->fcid);
+ fdls_delete_tport(iport, tport);
+ } else {
+ tport->flags &= ~FNIC_FDLS_TPORT_IN_GPN_FT_LIST;
+ }
+ if ((old_link_down_cnt != iport->fnic->link_down_cnt)
+ || (iport->state != FNIC_IPORT_STATE_READY)) {
+ return;
+ }
+ }
+ }
+}
+
+static void
+fdls_process_gpn_ft_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr, int len)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_gpn_ft *gpn_ft_rsp = (struct fc_std_gpn_ft *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ int count = 0;
+ struct fnic_tport_s *tport, *next;
+ u32 old_link_down_cnt = iport->fnic->link_down_cnt;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process GPN_FT response: iport state: %d len: %d",
+ iport->state, len);
+
+ /*
+ * GPNFT response :-
+ * FDLS_STATE_GPN_FT : GPNFT send after SCR state
+ * during fabric discovery(FNIC_IPORT_STATE_FABRIC_DISC)
+ * FDLS_STATE_RSCN_GPN_FT : GPNFT send in response to RSCN
+ * FDLS_STATE_SEND_GPNFT : GPNFT send after deleting a Target,
+ * e.g. after receiving Target LOGO
+ * FDLS_STATE_TGT_DISCOVERY :Target discovery is currently in progress
+ * from previous GPNFT response,a new GPNFT response has come.
+ */
+ if (!(((iport->state == FNIC_IPORT_STATE_FABRIC_DISC)
+ && (fdls_get_state(fdls) == FDLS_STATE_GPN_FT))
+ || ((iport->state == FNIC_IPORT_STATE_READY)
+ && ((fdls_get_state(fdls) == FDLS_STATE_RSCN_GPN_FT)
+ || (fdls_get_state(fdls) == FDLS_STATE_SEND_GPNFT)
+ || (fdls_get_state(fdls) == FDLS_STATE_TGT_DISCOVERY))))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPNFT resp recvd in fab state(%d) iport_state(%d). Dropping.",
+ fdls_get_state(fdls), iport->state);
+ return;
+ }
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ }
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ iport->state = FNIC_IPORT_STATE_READY;
+ rsp = FNIC_STD_GET_FC_CT_CMD((&gpn_ft_rsp->fc_std_ct_hdr));
+
+ switch (rsp) {
+
+ case FC_FS_ACC:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP accept", iport->fcid);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fdls_process_gpn_ft_tgt_list(iport, fchdr, len);
+
+ /*
+ * iport state can change only if link down event happened
+ * We don't need to undo fdls_process_gpn_ft_tgt_list,
+ * that will be taken care in next link up event
+ */
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Halting target discovery: fab st: %d iport st: %d ",
+ fdls_get_state(fdls), iport->state);
+ break;
+ }
+ fdls_tgt_discovery_start(iport);
+ break;
+
+ case FC_FS_RJT:
+ reason_code = gpn_ft_rsp->fc_std_ct_hdr.ct_reason;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP Reject reason: %d", iport->fcid, reason_code);
+
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP ret REJ/BSY. Retry from timer routine",
+ iport->fcid);
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP reject", iport->fcid);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ /*
+ * If GPN_FT ls_rjt then we should delete
+ * all existing tports
+ */
+ count = 0;
+ list_for_each_entry_safe(tport, next, &iport->tport_list,
+ links) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN_FT_REJECT: Remove port: 0x%x",
+ tport->fcid);
+ fdls_delete_tport(iport, tport);
+ if ((old_link_down_cnt != iport->fnic->link_down_cnt)
+ || (iport->state != FNIC_IPORT_STATE_READY)) {
+ return;
+ }
+ count++;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN_FT_REJECT: Removed (0x%x) ports", count);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * fdls_process_fabric_logo_rsp - Handle an flogo response from the fcf
+ * @iport: Handle to fnic iport
+ * @fchdr: Incoming frame
+ */
+static void
+fdls_process_fabric_logo_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_flogi *flogo_rsp = (struct fc_std_flogi *) fchdr;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ }
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (flogo_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ if (iport->fabric.state != FDLS_STATE_FABRIC_LOGO) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flogo response. Fabric not in LOGO state. Dropping! %p",
+ iport);
+ return;
+ }
+
+ iport->fabric.state = FDLS_STATE_FLOGO_DONE;
+ iport->state = FNIC_IPORT_STATE_LINK_WAIT;
+
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport 0x%p Canceling fabric disc timer\n",
+ iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flogo response from Fabric for did: 0x%x",
+ ntoh24(fchdr->fh_d_id));
+ return;
+
+ case ELS_LS_RJT:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flogo response from Fabric for did: 0x%x returned ELS_LS_RJT",
+ ntoh24(fchdr->fh_d_id));
+ return;
+
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGO response not accepted or rejected: 0x%x",
+ flogo_rsp->els.fl_cmd);
+ }
+}
+
+static void
+fdls_process_flogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr, void *rx_frame)
+{
+ struct fnic_fdls_fabric_s *fabric = &iport->fabric;
+ struct fc_std_flogi *flogi_rsp = (struct fc_std_flogi *) fchdr;
+ uint8_t *fcid;
+ uint16_t rdf_size;
+ uint8_t fcmac[6] = { 0x0E, 0XFC, 0x00, 0x00, 0x00, 0x00 };
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS processing FLOGI response", iport->fcid);
+
+ if (fdls_get_state(fabric) != FDLS_STATE_FABRIC_FLOGI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response received in state (%d). Dropping frame",
+ fdls_get_state(fabric));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fabric), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (flogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.fabric_flogi_ls_accepts);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fcid = FNIC_STD_GET_D_ID(fchdr);
+ iport->fcid = ntoh24(fcid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FLOGI response accepted", iport->fcid);
+
+ /* Learn the Service Params */
+ rdf_size = be16_to_cpu(FNIC_LOGI_RDF_SIZE(flogi_rsp->els));
+ if ((rdf_size >= FNIC_MIN_DATA_FIELD_SIZE)
+ && (rdf_size < FNIC_FC_MAX_PAYLOAD_LEN))
+ iport->max_payload_size = min(rdf_size,
+ iport->max_payload_size);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "max_payload_size from fabric: %u set: %d", rdf_size,
+ iport->max_payload_size);
+
+ iport->r_a_tov = be32_to_cpu(FNIC_LOGI_R_A_TOV(flogi_rsp->els));
+ iport->e_d_tov = be32_to_cpu(FNIC_LOGI_E_D_TOV(flogi_rsp->els));
+
+ if (FNIC_LOGI_FEATURES(flogi_rsp->els) & FNIC_FC_EDTOV_NSEC)
+ iport->e_d_tov = iport->e_d_tov / FNIC_NSEC_TO_MSEC;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "From fabric: R_A_TOV: %d E_D_TOV: %d",
+ iport->r_a_tov, iport->e_d_tov);
+
+ fc_host_fabric_name(iport->fnic->host) =
+ get_unaligned_be64(&FNIC_LOGI_NODE_NAME(flogi_rsp->els));
+ fc_host_port_id(iport->fnic->host) = iport->fcid;
+
+ fnic_fdls_learn_fcoe_macs(iport, rx_frame, fcid);
+
+ if (fnic_fdls_register_portid(iport, iport->fcid, rx_frame) != 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FLOGI registration failed", iport->fcid);
+ break;
+ }
+
+ memcpy(&fcmac[3], fcid, 3);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Adding vNIC device MAC addr: %02x:%02x:%02x:%02x:%02x:%02x",
+ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4],
+ fcmac[5]);
+ vnic_dev_add_addr(iport->fnic->vdev, fcmac);
+
+ if (fdls_get_state(fabric) == FDLS_STATE_FABRIC_FLOGI) {
+ fnic_fdls_start_plogi(iport);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response received. Starting PLOGI");
+ } else {
+ /* From FDLS_STATE_FABRIC_FLOGI state fabric can only go to
+ * FDLS_STATE_LINKDOWN
+ * state, hence we don't have to worry about undoing:
+ * the fnic_fdls_register_portid and vnic_dev_add_addr
+ */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response received in state (%d). Dropping frame",
+ fdls_get_state(fabric));
+ }
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.fabric_flogi_ls_rejects);
+ if (fabric->retry_counter < iport->max_flogi_retries) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI returned ELS_LS_RJT BUSY. Retry from timer routine %p",
+ iport);
+
+ /* Retry Flogi again from the timer routine. */
+ fabric->flags |= FNIC_FDLS_RETRY_FRAME;
+
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI returned ELS_LS_RJT. Halting discovery %p",
+ iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport 0x%p Canceling fabric disc timer\n",
+ iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fabric->timer_pending = 0;
+ fabric->retry_counter = 0;
+ }
+ break;
+
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response not accepted: 0x%x",
+ flogi_rsp->els.fl_cmd);
+ atomic64_inc(&iport->iport_stats.fabric_flogi_misc_rejects);
+ break;
+ }
+}
+
+static void
+fdls_process_fabric_plogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *) fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state((&iport->fabric)) != FDLS_STATE_FABRIC_PLOGI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Fabric PLOGI response received in state (%d). Dropping frame",
+ fdls_get_state(&iport->fabric));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (plogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.fabric_plogi_ls_accepts);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x fabric PLOGI response: Accepted\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fdls_set_state(&iport->fabric, FDLS_STATE_RPN_ID);
+ fdls_send_rpn_id(iport);
+ break;
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.fabric_plogi_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (iport->fabric.retry_counter < iport->max_plogi_retries)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Fabric PLOGI ELS_LS_RJT BUSY. Retry from timer routine",
+ iport->fcid);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Fabric PLOGI ELS_LS_RJT. Halting discovery",
+ iport->fcid);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ return;
+ }
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI response not accepted: 0x%x",
+ plogi_rsp->els.fl_cmd);
+ atomic64_inc(&iport->iport_stats.fabric_plogi_misc_rejects);
+ break;
+ }
+}
+
+static void fdls_process_fdmi_plogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ u64 fdmi_tov;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (iport->active_oxid_fdmi_plogi != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fdmi_plogi);
+ return;
+ }
+
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING;
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi);
+
+ if (ntoh24(fchdr->fh_s_id) == FC_FID_MGMT_SERV) {
+ del_timer_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending = 0;
+ switch (plogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process fdmi PLOGI response status: ELS_LS_ACC\n");
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Sending fdmi registration for port 0x%x\n",
+ iport->fcid);
+
+ fdls_fdmi_register_hba(iport);
+ fdls_fdmi_register_pa(iport);
+ fdmi_tov = jiffies + msecs_to_jiffies(5000);
+ mod_timer(&iport->fabric.fdmi_timer,
+ round_jiffies(fdmi_tov));
+ break;
+ case ELS_LS_RJT:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Fabric FDMI PLOGI returned ELS_LS_RJT reason: 0x%x",
+ els_rjt->rej.er_reason);
+
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (iport->fabric.fdmi_retry < 7)) {
+ iport->fabric.fdmi_retry++;
+ fdls_send_fdmi_plogi(iport);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+static void fdls_process_fdmi_reg_ack(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr,
+ int rsp_type)
+{
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+
+ if (!iport->fabric.fdmi_pending) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received FDMI ack while not waiting: 0x%x\n",
+ FNIC_STD_GET_OX_ID(fchdr));
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if ((iport->active_oxid_fdmi_rhba != oxid) &&
+ (iport->active_oxid_fdmi_rpa != oxid)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. oxid recvd: 0x%x, active oxids(rhba,rpa): 0x%x, 0x%x\n",
+ oxid, iport->active_oxid_fdmi_rhba, iport->active_oxid_fdmi_rpa);
+ return;
+ }
+ if (FNIC_FRAME_TYPE(oxid) == FNIC_FRAME_TYPE_FDMI_RHBA) {
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING;
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba);
+ } else {
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING;
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa);
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x: Received FDMI registration ack\n",
+ iport->fcid);
+
+ if (!iport->fabric.fdmi_pending) {
+ del_timer_sync(&iport->fabric.fdmi_timer);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x: Canceling FDMI timer\n",
+ iport->fcid);
+ }
+}
+
+static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t s_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+
+ s_id = ntoh24(FNIC_STD_GET_S_ID(fchdr));
+
+ if (!(s_id != FC_FID_MGMT_SERV)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid SID: 0x%x. Dropping frame",
+ s_id);
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ switch (FNIC_FRAME_TYPE(oxid)) {
+ case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi);
+ break;
+ case FNIC_FRAME_TYPE_FDMI_RHBA:
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba);
+ break;
+ case FNIC_FRAME_TYPE_FDMI_RPA:
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid oxid: 0x%x. Dropping frame",
+ oxid);
+ break;
+ }
+
+ del_timer_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING;
+
+ fdls_send_fdmi_plogi(iport);
+}
+
+static void
+fdls_process_fabric_abts_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t s_id;
+ struct fc_std_abts_ba_acc *ba_acc = (struct fc_std_abts_ba_acc *)fchdr;
+ struct fc_std_abts_ba_rjt *ba_rjt;
+ uint32_t fabric_state = iport->fabric.state;
+ struct fnic *fnic = iport->fnic;
+ int frame_type;
+ uint16_t oxid;
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ ba_rjt = (struct fc_std_abts_ba_rjt *) fchdr;
+
+ if (!((s_id == FC_FID_DIR_SERV) || (s_id == FC_FID_FLOGI)
+ || (s_id == FC_FID_FCTRL))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid SID: 0x%x. Dropping frame",
+ s_id);
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid oxid: 0x%x. Dropping frame",
+ oxid);
+ return;
+ }
+
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp BA_ACC for fabric_state: %d OX_ID: 0x%x",
+ fabric_state, be16_to_cpu(ba_acc->acc.ba_ox_id));
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "BA_RJT fs: %d OX_ID: 0x%x rc: 0x%x rce: 0x%x",
+ fabric_state, FNIC_STD_GET_OX_ID(&ba_rjt->fchdr),
+ ba_rjt->rjt.br_reason, ba_rjt->rjt.br_explan);
+ }
+
+ frame_type = FNIC_FRAME_TYPE(oxid);
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ /* currently error handling/retry logic is same for ABTS BA_ACC & BA_RJT */
+ switch (frame_type) {
+ case FNIC_FRAME_TYPE_FABRIC_FLOGI:
+ if (iport->fabric.retry_counter < iport->max_flogi_retries)
+ fdls_send_fabric_flogi(iport);
+ else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max FLOGI retries");
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_LOGO:
+ if (iport->fabric.retry_counter < FABRIC_LOGO_MAX_RETRY)
+ fdls_send_fabric_logo(iport);
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_PLOGI:
+ if (iport->fabric.retry_counter < iport->max_plogi_retries)
+ fdls_send_fabric_plogi(iport);
+ else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max PLOGI retries");
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_RPN:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_rpn_id(iport);
+ else
+ /* go back to fabric Plogi */
+ fnic_fdls_start_plogi(iport);
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_SCR:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_scr(iport);
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR exhausted retries. Start fabric PLOGI %p",
+ iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_RFT:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_register_fc4_types(iport);
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFT exhausted retries. Start fabric PLOGI %p",
+ iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_RFF:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_register_fc4_features(iport);
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF exhausted retries. Start fabric PLOGI %p",
+ iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
+ if (iport->fabric.retry_counter <= FDLS_RETRY_COUNT)
+ fdls_send_gpn_ft(iport, fabric_state);
+ else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN FT exhausted retries. Start fabric PLOGI %p",
+ iport);
+ break;
+ default:
+ /*
+ * We should not be here since we already validated rx oxid with
+ * our active_oxid_fabric_req
+ */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Invalid OXID/active oxid 0x%x\n", oxid);
+ WARN_ON(true);
+ return;
+ }
+}
+
+static void
+fdls_process_abts_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_abts_ba_acc *pba_acc;
+ uint32_t nport_id;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+ struct fnic_tport_s *tport;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_abts_ba_acc);
+
+ nport_id = ntoh24(fchdr->fh_s_id);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abort from SID 0x%8x", nport_id);
+
+ tport = fnic_find_tport_by_fcid(iport, nport_id);
+ if (tport) {
+ if (tport->active_oxid == oxid) {
+ tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED;
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ }
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate frame to send response for ABTS req",
+ iport->fcid);
+ return;
+ }
+
+ pba_acc = (struct fc_std_abts_ba_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pba_acc = (struct fc_std_abts_ba_acc) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_BA_ACC,
+ .fh_f_ctl = {FNIC_FCP_RSP_FCTL, 0, 0}},
+ .acc = {.ba_low_seq_cnt = 0, .ba_high_seq_cnt = cpu_to_be16(0xFFFF)}
+ };
+
+ FNIC_STD_SET_S_ID(pba_acc->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(pba_acc->fchdr, fchdr->fh_s_id);
+ FNIC_STD_SET_OX_ID(pba_acc->fchdr, FNIC_STD_GET_OX_ID(fchdr));
+ FNIC_STD_SET_RX_ID(pba_acc->fchdr, FNIC_STD_GET_RX_ID(fchdr));
+
+ pba_acc->acc.ba_rx_id = cpu_to_be16(FNIC_STD_GET_RX_ID(fchdr));
+ pba_acc->acc.ba_ox_id = cpu_to_be16(FNIC_STD_GET_OX_ID(fchdr));
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send BA ACC with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_unsupported_els_req(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_rjt_rsp *pls_rsp;
+ uint16_t oxid;
+ uint32_t d_id = ntoh24(fchdr->fh_d_id);
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_rjt_rsp);
+
+ if (iport->fcid != d_id) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping unsupported ELS with illegal frame bits 0x%x\n",
+ d_id);
+ atomic64_inc(&iport->iport_stats.unsupported_frames_dropped);
+ return;
+ }
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping unsupported ELS request in iport state: %d",
+ iport->state);
+ atomic64_inc(&iport->iport_stats.unsupported_frames_dropped);
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send response to unsupported ELS request");
+ return;
+ }
+
+ pls_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_rjt_frame(frame, iport);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Process unsupported ELS request from SID: 0x%x",
+ iport->fcid, ntoh24(fchdr->fh_s_id));
+
+ /* We don't support this ELS request, send a reject */
+ pls_rsp->rej.er_reason = 0x0B;
+ pls_rsp->rej.er_explan = 0x0;
+ pls_rsp->rej.er_vendor = 0x0;
+
+ FNIC_STD_SET_S_ID(pls_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(pls_rsp->fchdr, fchdr->fh_s_id);
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(pls_rsp->fchdr, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_rls_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_rls_acc *prls_acc_rsp;
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rls_acc);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Process RLS request %d", iport->fnic->fnic_num);
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received RLS req in iport state: %d. Dropping the frame.",
+ iport->state);
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RLS accept");
+ return;
+ }
+ prls_acc_rsp = (struct fc_std_rls_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+
+ FNIC_STD_SET_S_ID(prls_acc_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(prls_acc_rsp->fchdr, fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(prls_acc_rsp->fchdr, oxid);
+ FNIC_STD_SET_RX_ID(prls_acc_rsp->fchdr, FNIC_UNASSIGNED_RXID);
+
+ FNIC_STD_SET_F_CTL(prls_acc_rsp->fchdr, FNIC_ELS_REP_FCTL << 16);
+ FNIC_STD_SET_R_CTL(prls_acc_rsp->fchdr, FC_RCTL_ELS_REP);
+ FNIC_STD_SET_TYPE(prls_acc_rsp->fchdr, FC_TYPE_ELS);
+
+ prls_acc_rsp->els.rls_cmd = ELS_LS_ACC;
+ prls_acc_rsp->els.rls_lesb.lesb_link_fail =
+ cpu_to_be32(iport->fnic->link_down_cnt);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_els_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr,
+ uint32_t len)
+{
+ uint8_t *frame;
+ struct fc_std_els_acc_rsp *pels_acc;
+ uint16_t oxid;
+ uint8_t *fc_payload;
+ uint8_t type;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
+
+ fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header);
+ type = *fc_payload;
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping ELS frame type: 0x%x in iport state: %d",
+ type, iport->state);
+ return;
+ }
+ switch (type) {
+ case ELS_ECHO:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "sending LS_ACC for ECHO request %d\n",
+ iport->fnic->fnic_num);
+ break;
+
+ case ELS_RRQ:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "sending LS_ACC for RRQ request %d\n",
+ iport->fnic->fnic_num);
+ break;
+
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "sending LS_ACC for 0x%x ELS frame\n", type);
+ break;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send ELS response for 0x%x",
+ type);
+ return;
+ }
+
+ if (type == ELS_ECHO) {
+ /* Brocade sends a longer payload, copy all frame back */
+ memcpy(frame, fchdr, len);
+ }
+
+ pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_acc_frame(frame, iport);
+
+ FNIC_STD_SET_D_ID(pels_acc->fchdr, fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid);
+
+ if (type == ELS_ECHO)
+ frame_size += len;
+ else
+ frame_size += sizeof(struct fc_std_els_acc_rsp);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_tgt_abts_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t s_id;
+ struct fnic_tport_s *tport;
+ uint32_t tport_state;
+ struct fc_std_abts_ba_acc *ba_acc;
+ struct fc_std_abts_ba_rjt *ba_rjt;
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ int frame_type;
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ ba_acc = (struct fc_std_abts_ba_acc *)fchdr;
+ ba_rjt = (struct fc_std_abts_ba_rjt *)fchdr;
+
+ tport = fnic_find_tport_by_fcid(iport, s_id);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received tgt abts rsp with invalid SID: 0x%x", s_id);
+ return;
+ }
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport 0x%p Canceling fabric disc timer\n", tport);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received tgt abts rsp in iport state(%d). Dropping.",
+ iport->state);
+ return;
+ }
+ tport->timer_pending = 0;
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+ tport_state = tport->state;
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ /*This abort rsp is for ADISC */
+ frame_type = FNIC_FRAME_TYPE(oxid);
+ switch (frame_type) {
+ case FNIC_FRAME_TYPE_TGT_ADISC:
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "OX_ID: 0x%x tgt_fcid: 0x%x rcvd tgt adisc abts resp BA_ACC",
+ be16_to_cpu(ba_acc->acc.ba_ox_id),
+ tport->fcid);
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "ADISC BA_RJT rcvd tport_fcid: 0x%x tport_state: %d ",
+ tport->fcid, tport_state);
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "reason code: 0x%x reason code explanation:0x%x ",
+ ba_rjt->rjt.br_reason,
+ ba_rjt->rjt.br_explan);
+ }
+ if ((tport->retry_counter < FDLS_RETRY_COUNT)
+ && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) {
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_adisc(iport, tport);
+ return;
+ }
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "ADISC not responding. Deleting target port: 0x%x",
+ tport->fcid);
+ fdls_delete_tport(iport, tport);
+ /* Restart discovery of targets */
+ if ((iport->state == FNIC_IPORT_STATE_READY)
+ && (iport->fabric.state != FDLS_STATE_SEND_GPNFT)
+ && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) {
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ }
+ break;
+ case FNIC_FRAME_TYPE_TGT_PLOGI:
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received tgt PLOGI abts response BA_ACC tgt_fcid: 0x%x",
+ tport->fcid);
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x",
+ tport->fcid, FNIC_STD_GET_OX_ID(fchdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "reason code: 0x%x reason code explanation: 0x%x",
+ ba_rjt->rjt.br_reason,
+ ba_rjt->rjt.br_explan);
+ }
+ if ((tport->retry_counter < iport->max_plogi_retries)
+ && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) {
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport);
+ return;
+ }
+
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_delete_tport(iport, tport);
+ /* Restart discovery of targets */
+ if ((iport->state == FNIC_IPORT_STATE_READY)
+ && (iport->fabric.state != FDLS_STATE_SEND_GPNFT)
+ && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) {
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ }
+ break;
+ case FNIC_FRAME_TYPE_TGT_PRLI:
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Received tgt PRLI abts response BA_ACC",
+ tport->fcid);
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x ",
+ tport->fcid, FNIC_STD_GET_OX_ID(fchdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "reason code: 0x%x reason code explanation: 0x%x",
+ ba_rjt->rjt.br_reason,
+ ba_rjt->rjt.br_explan);
+ }
+ if ((tport->retry_counter < FDLS_RETRY_COUNT)
+ && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) {
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_prli(iport, tport);
+ return;
+ }
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport); /* go back to plogi */
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received ABTS response for unknown frame %p", iport);
+ break;
+ }
+
+}
+
+static void
+fdls_process_plogi_req(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_rjt_rsp *pplogi_rsp;
+ uint16_t oxid;
+ uint32_t d_id = ntoh24(fchdr->fh_d_id);
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_rjt_rsp);
+
+ if (iport->fcid != d_id) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received PLOGI with illegal frame bits. Dropping frame from 0x%x",
+ d_id);
+ return;
+ }
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received PLOGI request in iport state: %d Dropping frame",
+ iport->state);
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send response to PLOGI request");
+ return;
+ }
+
+ pplogi_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_rjt_frame(frame, iport);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Process PLOGI request from SID: 0x%x",
+ iport->fcid, ntoh24(fchdr->fh_s_id));
+
+ /* We don't support PLOGI request, send a reject */
+ pplogi_rsp->rej.er_reason = 0x0B;
+ pplogi_rsp->rej.er_explan = 0x0;
+ pplogi_rsp->rej.er_vendor = 0x0;
+
+ FNIC_STD_SET_S_ID(pplogi_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(pplogi_rsp->fchdr, fchdr->fh_s_id);
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(pplogi_rsp->fchdr, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_logo_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ struct fc_std_logo *logo = (struct fc_std_logo *)fchdr;
+ uint32_t nport_id;
+ uint64_t nport_name;
+ struct fnic_tport_s *tport;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+
+ nport_id = ntoh24(logo->els.fl_n_port_id);
+ nport_name = be64_to_cpu(logo->els.fl_n_port_wwn);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Process LOGO request from fcid: 0x%x", nport_id);
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Dropping LOGO req from 0x%x in iport state: %d",
+ nport_id, iport->state);
+ return;
+ }
+
+ tport = fnic_find_tport_by_fcid(iport, nport_id);
+
+ if (!tport) {
+ /* We are not logged in with the nport, log and drop... */
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received LOGO from an nport not logged in: 0x%x(0x%llx)",
+ nport_id, nport_name);
+ return;
+ }
+ if (tport->fcid != nport_id) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received LOGO with invalid target port fcid: 0x%x(0x%llx)",
+ nport_id, nport_name);
+ return;
+ }
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ tport->timer_pending = 0;
+ }
+
+ /* got a logo in response to adisc to a target which has logged out */
+ if (tport->state == FDLS_TGT_STATE_ADISC) {
+ tport->retry_counter = 0;
+ oxid = tport->active_oxid;
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_delete_tport(iport, tport);
+ fdls_send_logo_resp(iport, &logo->fchdr);
+ if ((iport->state == FNIC_IPORT_STATE_READY)
+ && (fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT)
+ && (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Sending GPNFT in response to LOGO from Target:0x%x",
+ nport_id);
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ return;
+ }
+ } else {
+ fdls_delete_tport(iport, tport);
+ }
+ if (iport->state == FNIC_IPORT_STATE_READY) {
+ fdls_send_logo_resp(iport, &logo->fchdr);
+ if ((fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT) &&
+ (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Sending GPNFT in response to LOGO from Target:0x%x",
+ nport_id);
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ }
+ }
+}
+
+static void
+fdls_process_rscn(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ struct fc_std_rscn *rscn;
+ struct fc_els_rscn_page *rscn_port = NULL;
+ int num_ports;
+ struct fnic_tport_s *tport, *next;
+ uint32_t nport_id;
+ uint8_t fcid[3];
+ int newports = 0;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ int rscn_type = NOT_PC_RSCN;
+ uint32_t sid = ntoh24(fchdr->fh_s_id);
+ unsigned long reset_fnic_list_lock_flags = 0;
+ uint16_t rscn_payload_len;
+
+ atomic64_inc(&iport->iport_stats.num_rscns);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process RSCN %p", iport);
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS RSCN received in state(%d). Dropping",
+ fdls_get_state(fdls));
+ return;
+ }
+
+ rscn = (struct fc_std_rscn *)fchdr;
+ rscn_payload_len = be16_to_cpu(rscn->els.rscn_plen);
+
+ /* frame validation */
+ if ((rscn_payload_len % 4 != 0) || (rscn_payload_len < 8)
+ || (rscn_payload_len > 1024)
+ || (rscn->els.rscn_page_len != 4)) {
+ num_ports = 0;
+ if ((rscn_payload_len == 0xFFFF)
+ && (sid == FC_FID_FCTRL)) {
+ rscn_type = PC_RSCN;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "pcrscn: PCRSCN received. sid: 0x%x payload len: 0x%x",
+ sid, rscn_payload_len);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN payload_len: 0x%x page_len: 0x%x",
+ rscn_payload_len, rscn->els.rscn_page_len);
+ /* if this happens then we need to send ADISC to all the tports. */
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ if (tport->state == FDLS_TGT_STATE_READY)
+ tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN for port id: 0x%x", tport->fcid);
+ }
+ } /* end else */
+ } else {
+ num_ports = (rscn_payload_len - 4) / rscn->els.rscn_page_len;
+ rscn_port = (struct fc_els_rscn_page *)(rscn + 1);
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN received for num_ports: %d payload_len: %d page_len: %d ",
+ num_ports, rscn_payload_len, rscn->els.rscn_page_len);
+
+ /*
+ * RSCN have at least one Port_ID page , but may not have any port_id
+ * in it. If no port_id is specified in the Port_ID page , we send
+ * ADISC to all the tports
+ */
+
+ while (num_ports) {
+
+ memcpy(fcid, rscn_port->rscn_fid, 3);
+
+ nport_id = ntoh24(fcid);
+ rscn_port++;
+ num_ports--;
+ /* if this happens then we need to send ADISC to all the tports. */
+ if (nport_id == 0) {
+ list_for_each_entry_safe(tport, next, &iport->tport_list,
+ links) {
+ if (tport->state == FDLS_TGT_STATE_READY)
+ tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN for port id: 0x%x", tport->fcid);
+ }
+ break;
+ }
+ tport = fnic_find_tport_by_fcid(iport, nport_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN port id list: 0x%x", nport_id);
+
+ if (!tport) {
+ newports++;
+ continue;
+ }
+ if (tport->state == FDLS_TGT_STATE_READY)
+ tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC;
+ }
+
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON &&
+ rscn_type == PC_RSCN && fnic->role == FNIC_ROLE_FCP_INITIATOR) {
+
+ if (fnic->pc_rscn_handling_status == PC_RSCN_HANDLING_IN_PROGRESS) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PCRSCN handling already in progress. Skip host reset: %d",
+ iport->fnic->fnic_num);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Processing PCRSCN. Queuing fnic for host reset: %d",
+ iport->fnic->fnic_num);
+ fnic->pc_rscn_handling_status = PC_RSCN_HANDLING_IN_PROGRESS;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+
+ spin_lock_irqsave(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
+ list_add_tail(&fnic->links, &reset_fnic_list);
+ spin_unlock_irqrestore(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
+
+ queue_work(reset_fnic_work_queue, &reset_fnic_work);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process RSCN sending GPN_FT: newports: %d", newports);
+ fdls_send_gpn_ft(iport, FDLS_STATE_RSCN_GPN_FT);
+ fdls_send_rscn_resp(iport, fchdr);
+ }
+}
+
+void fnic_fdls_disc_start(struct fnic_iport_s *iport)
+{
+ struct fnic *fnic = iport->fnic;
+
+ fc_host_fabric_name(iport->fnic->host) = 0;
+ fc_host_post_event(iport->fnic->host, fc_get_event_number(),
+ FCH_EVT_LIPRESET, 0);
+
+ if (!iport->usefip) {
+ if (iport->flags & FNIC_FIRST_LINK_UP) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_scsi_fcpio_reset(iport->fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ iport->flags &= ~FNIC_FIRST_LINK_UP;
+ }
+ fnic_fdls_start_flogi(iport);
+ } else
+ fnic_fdls_start_plogi(iport);
+}
+
+static void
+fdls_process_adisc_req(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_els_adisc *padisc_acc;
+ struct fc_std_els_adisc *adisc_req = (struct fc_std_els_adisc *)fchdr;
+ uint64_t frame_wwnn;
+ uint64_t frame_wwpn;
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint8_t *rjt_frame;
+ uint8_t *acc_frame;
+ struct fc_std_els_rjt_rsp *prjts_rsp;
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ uint16_t rjt_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_rjt_rsp);
+ uint16_t acc_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_adisc);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Process ADISC request %d", iport->fnic->fnic_num);
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport for fcid: 0x%x not found. Dropping ADISC req.",
+ tgt_fcid);
+ return;
+ }
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Dropping ADISC req from fcid: 0x%x in iport state: %d",
+ tgt_fcid, iport->state);
+ return;
+ }
+
+ frame_wwnn = be64_to_cpu(adisc_req->els.adisc_wwnn);
+ frame_wwpn = be64_to_cpu(adisc_req->els.adisc_wwpn);
+
+ if ((frame_wwnn != tport->wwnn) || (frame_wwpn != tport->wwpn)) {
+ /* send reject */
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "ADISC req from fcid: 0x%x mismatch wwpn: 0x%llx wwnn: 0x%llx",
+ tgt_fcid, frame_wwpn, frame_wwnn);
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "local tport wwpn: 0x%llx wwnn: 0x%llx. Sending RJT",
+ tport->wwpn, tport->wwnn);
+
+ rjt_frame = fdls_alloc_frame(iport);
+ if (rjt_frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate rjt_frame to send response to ADISC request");
+ return;
+ }
+
+ prjts_rsp = (struct fc_std_els_rjt_rsp *) (rjt_frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_rjt_frame(rjt_frame, iport);
+
+ prjts_rsp->rej.er_reason = 0x03; /* logical error */
+ prjts_rsp->rej.er_explan = 0x1E; /* N_port login required */
+ prjts_rsp->rej.er_vendor = 0x0;
+
+ FNIC_STD_SET_S_ID(prjts_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(prjts_rsp->fchdr, fchdr->fh_s_id);
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(prjts_rsp->fchdr, oxid);
+
+ fnic_send_fcoe_frame(iport, rjt_frame, rjt_frame_size);
+ return;
+ }
+
+ acc_frame = fdls_alloc_frame(iport);
+ if (acc_frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send ADISC accept");
+ return;
+ }
+
+ padisc_acc = (struct fc_std_els_adisc *) (acc_frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+
+ FNIC_STD_SET_S_ID(padisc_acc->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(padisc_acc->fchdr, fchdr->fh_s_id);
+
+ FNIC_STD_SET_F_CTL(padisc_acc->fchdr, FNIC_ELS_REP_FCTL << 16);
+ FNIC_STD_SET_R_CTL(padisc_acc->fchdr, FC_RCTL_ELS_REP);
+ FNIC_STD_SET_TYPE(padisc_acc->fchdr, FC_TYPE_ELS);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(padisc_acc->fchdr, oxid);
+ FNIC_STD_SET_RX_ID(padisc_acc->fchdr, FNIC_UNASSIGNED_RXID);
+
+ padisc_acc->els.adisc_cmd = ELS_LS_ACC;
+
+ FNIC_STD_SET_NPORT_NAME(&padisc_acc->els.adisc_wwpn,
+ iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&padisc_acc->els.adisc_wwnn,
+ iport->wwnn);
+ memcpy(padisc_acc->els.adisc_port_id, fchdr->fh_d_id, 3);
+
+ fnic_send_fcoe_frame(iport, acc_frame, acc_frame_size);
+}
+
+/*
+ * Performs a validation for all FCOE frames and return the frame type
+ */
+int
+fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint8_t type;
+ uint8_t *fc_payload;
+ uint16_t oxid;
+ uint32_t s_id;
+ uint32_t d_id;
+ struct fnic *fnic = iport->fnic;
+ struct fnic_fdls_fabric_s *fabric = &iport->fabric;
+ int oxid_frame_type;
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header);
+ type = *fc_payload;
+ s_id = ntoh24(fchdr->fh_s_id);
+ d_id = ntoh24(fchdr->fh_d_id);
+
+ /* some common validation */
+ if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) {
+ if ((iport->fcid != d_id) || (!FNIC_FC_FRAME_CS_CTL(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "invalid frame received. Dropping frame");
+ return -1;
+ }
+ }
+
+ /* BLS ABTS response */
+ if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC)
+ || (fchdr->fh_r_ctl == FC_RCTL_BA_RJT)) {
+ if (!(FNIC_FC_FRAME_TYPE_BLS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received ABTS invalid frame. Dropping frame");
+ return -1;
+
+ }
+ if (fdls_is_oxid_fabric_req(oxid)) {
+ if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame",
+ oxid, s_id);
+ return -1;
+ }
+ return FNIC_FABRIC_BLS_ABTS_RSP;
+ } else if (fdls_is_oxid_fdmi_req(oxid)) {
+ return FNIC_FDMI_BLS_ABTS_RSP;
+ } else if (fdls_is_oxid_tgt_req(oxid)) {
+ return FNIC_TPORT_BLS_ABTS_RSP;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received ABTS rsp with unknown oxid(0x%x) from 0x%x. Dropping frame",
+ oxid, s_id);
+ return -1;
+ }
+
+ /* BLS ABTS Req */
+ if ((fchdr->fh_r_ctl == FC_RCTL_BA_ABTS)
+ && (FNIC_FC_FRAME_TYPE_BLS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Receiving Abort Request from s_id: 0x%x", s_id);
+ return FNIC_BLS_ABTS_REQ;
+ }
+
+ /* unsolicited requests frames */
+ if (FNIC_FC_FRAME_UNSOLICITED(fchdr)) {
+ switch (type) {
+ case ELS_LOGO:
+ if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr))
+ || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received LOGO invalid frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_ELS_LOGO_REQ;
+ case ELS_RSCN:
+ if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr))
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))
+ || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received RSCN invalid FCTL. Dropping frame");
+ return -1;
+ }
+ if (s_id != FC_FID_FCTRL)
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received RSCN from target FCTL: 0x%x type: 0x%x s_id: 0x%x.",
+ fchdr->fh_f_ctl[0], fchdr->fh_type, s_id);
+ return FNIC_ELS_RSCN_REQ;
+ case ELS_PLOGI:
+ return FNIC_ELS_PLOGI_REQ;
+ case ELS_ECHO:
+ return FNIC_ELS_ECHO_REQ;
+ case ELS_ADISC:
+ return FNIC_ELS_ADISC;
+ case ELS_RLS:
+ return FNIC_ELS_RLS;
+ case ELS_RRQ:
+ return FNIC_ELS_RRQ;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unsupported frame (type:0x%02x) from fcid: 0x%x",
+ type, s_id);
+ return FNIC_ELS_UNSUPPORTED_REQ;
+ }
+ }
+
+ /* solicited response from fabric or target */
+ oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "oxid frame code: 0x%x, oxid: 0x%x\n", oxid_frame_type, oxid);
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_FABRIC_FLOGI:
+ if (type == ELS_LS_ACC) {
+ if ((s_id != FC_FID_FLOGI)
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ }
+ return FNIC_FABRIC_FLOGI_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_PLOGI:
+ if (type == ELS_LS_ACC) {
+ if ((s_id != FC_FID_DIR_SERV)
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ }
+ return FNIC_FABRIC_PLOGI_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_SCR:
+ if (type == ELS_LS_ACC) {
+ if ((s_id != FC_FID_FCTRL)
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ }
+ return FNIC_FABRIC_SCR_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_RPN:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_RPN_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_RFT:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_RFT_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_RFF:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_RFF_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_GPN_FT_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_LOGO:
+ return FNIC_FABRIC_LOGO_RSP;
+ case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ return FNIC_FDMI_PLOGI_RSP;
+ case FNIC_FRAME_TYPE_FDMI_RHBA:
+ return FNIC_FDMI_REG_HBA_RSP;
+ case FNIC_FRAME_TYPE_FDMI_RPA:
+ return FNIC_FDMI_RPA_RSP;
+ case FNIC_FRAME_TYPE_TGT_PLOGI:
+ return FNIC_TPORT_PLOGI_RSP;
+ case FNIC_FRAME_TYPE_TGT_PRLI:
+ return FNIC_TPORT_PRLI_RSP;
+ case FNIC_FRAME_TYPE_TGT_ADISC:
+ return FNIC_TPORT_ADISC_RSP;
+ case FNIC_FRAME_TYPE_TGT_LOGO:
+ if (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping Unknown frame in tport solicited exchange range type: 0x%x.",
+ fchdr->fh_type);
+ return -1;
+ }
+ return FNIC_TPORT_LOGO_RSP;
+ default:
+ /* Drop the Rx frame and log/stats it */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Solicited response: unknown OXID: 0x%x", oxid);
+ return -1;
+ }
+
+ return -1;
+}
+
+void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame,
+ int len, int fchdr_offset)
+{
+ struct fc_frame_header *fchdr;
+ uint32_t s_id = 0;
+ uint32_t d_id = 0;
+ struct fnic *fnic = iport->fnic;
+ int frame_type;
+
+ fchdr = (struct fc_frame_header *) ((uint8_t *) rx_frame + fchdr_offset);
+ s_id = ntoh24(fchdr->fh_s_id);
+ d_id = ntoh24(fchdr->fh_d_id);
+
+ fnic_debug_dump_fc_frame(fnic, fchdr, len, "Incoming");
+
+ frame_type =
+ fnic_fdls_validate_and_get_frame_type(iport, fchdr);
+
+ /*if we are in flogo drop everything else */
+ if (iport->fabric.state == FDLS_STATE_FABRIC_LOGO &&
+ frame_type != FNIC_FABRIC_LOGO_RSP)
+ return;
+
+ switch (frame_type) {
+ case FNIC_FABRIC_FLOGI_RSP:
+ fdls_process_flogi_rsp(iport, fchdr, rx_frame);
+ break;
+ case FNIC_FABRIC_PLOGI_RSP:
+ fdls_process_fabric_plogi_rsp(iport, fchdr);
+ break;
+ case FNIC_FDMI_PLOGI_RSP:
+ fdls_process_fdmi_plogi_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_RPN_RSP:
+ fdls_process_rpn_id_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_RFT_RSP:
+ fdls_process_rft_id_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_RFF_RSP:
+ fdls_process_rff_id_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_SCR_RSP:
+ fdls_process_scr_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_GPN_FT_RSP:
+ fdls_process_gpn_ft_rsp(iport, fchdr, len);
+ break;
+ case FNIC_TPORT_PLOGI_RSP:
+ fdls_process_tgt_plogi_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_PRLI_RSP:
+ fdls_process_tgt_prli_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_ADISC_RSP:
+ fdls_process_tgt_adisc_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_BLS_ABTS_RSP:
+ fdls_process_tgt_abts_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_LOGO_RSP:
+ /* Logo response from tgt which we have deleted */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Logo response from tgt: 0x%x",
+ ntoh24(fchdr->fh_s_id));
+ break;
+ case FNIC_FABRIC_LOGO_RSP:
+ fdls_process_fabric_logo_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_BLS_ABTS_RSP:
+ fdls_process_fabric_abts_rsp(iport, fchdr);
+ break;
+ case FNIC_FDMI_BLS_ABTS_RSP:
+ fdls_process_fdmi_abts_rsp(iport, fchdr);
+ break;
+ case FNIC_BLS_ABTS_REQ:
+ fdls_process_abts_req(iport, fchdr);
+ break;
+ case FNIC_ELS_UNSUPPORTED_REQ:
+ fdls_process_unsupported_els_req(iport, fchdr);
+ break;
+ case FNIC_ELS_PLOGI_REQ:
+ fdls_process_plogi_req(iport, fchdr);
+ break;
+ case FNIC_ELS_RSCN_REQ:
+ fdls_process_rscn(iport, fchdr);
+ break;
+ case FNIC_ELS_LOGO_REQ:
+ fdls_process_logo_req(iport, fchdr);
+ break;
+ case FNIC_ELS_RRQ:
+ case FNIC_ELS_ECHO_REQ:
+ fdls_process_els_req(iport, fchdr, len);
+ break;
+ case FNIC_ELS_ADISC:
+ fdls_process_adisc_req(iport, fchdr);
+ break;
+ case FNIC_ELS_RLS:
+ fdls_process_rls_req(iport, fchdr);
+ break;
+ case FNIC_FDMI_REG_HBA_RSP:
+ case FNIC_FDMI_RPA_RSP:
+ fdls_process_fdmi_reg_ack(iport, fchdr, frame_type);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "s_id: 0x%x d_did: 0x%x", s_id, d_id);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown FCoE frame of len: %d. Dropping frame", len);
+ break;
+ }
+}
+
+void fnic_fdls_disc_init(struct fnic_iport_s *iport)
+{
+ fdls_reset_oxid_pool(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_INIT);
+}
+
+void fnic_fdls_link_down(struct fnic_iport_s *iport)
+{
+ struct fnic_tport_s *tport, *next;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS processing link down", iport->fcid);
+
+ fdls_set_state((&iport->fabric), FDLS_STATE_LINKDOWN);
+ iport->fabric.flags = 0;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_scsi_fcpio_reset(iport->fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "removing rport: 0x%x", tport->fcid);
+ fdls_delete_tport(iport, tport);
+ }
+
+ if ((fnic_fdmi_support == 1) && (iport->fabric.fdmi_pending > 0)) {
+ del_timer_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending = 0;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS finish processing link down", iport->fcid);
+}
diff --git a/drivers/scsi/fnic/fdls_fc.h b/drivers/scsi/fnic/fdls_fc.h
new file mode 100644
index 000000000000..012f43afd083
--- /dev/null
+++ b/drivers/scsi/fnic/fdls_fc.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _FDLS_FC_H_
+#define _FDLS_FC_H_
+
+/* This file contains the declarations for FC fabric services
+ * and target discovery
+ *
+ * Request and Response for
+ * 1. FLOGI
+ * 2. PLOGI to Fabric Controller
+ * 3. GPN_ID, GPN_FT
+ * 4. RSCN
+ * 5. PLOGI to Target
+ * 6. PRLI to Target
+ */
+
+#include <scsi/scsi.h>
+#include <scsi/fc/fc_els.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_ns.h>
+#include <uapi/scsi/fc/fc_gs.h>
+#include <uapi/linux/if_ether.h>
+#include <scsi/fc/fc_ms.h>
+#include <linux/minmax.h>
+#include <linux/if_ether.h>
+#include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fcoe.h>
+
+#define FDLS_MIN_FRAMES (32)
+#define FDLS_MIN_FRAME_ELEM (4)
+#define FNIC_FCP_SP_RD_XRDY_DIS 0x00000002
+#define FNIC_FCP_SP_TARGET 0x00000010
+#define FNIC_FCP_SP_INITIATOR 0x00000020
+#define FNIC_FCP_SP_CONF_CMPL 0x00000080
+#define FNIC_FCP_SP_RETRY 0x00000100
+
+#define FNIC_FC_CONCUR_SEQS (0xFF)
+#define FNIC_FC_RO_INFO (0x1F)
+
+/* Little Endian */
+#define FNIC_UNASSIGNED_OXID (0xffff)
+#define FNIC_UNASSIGNED_RXID (0xffff)
+#define FNIC_ELS_REQ_FCTL (0x000029)
+#define FNIC_ELS_REP_FCTL (0x000099)
+
+#define FNIC_FCP_RSP_FCTL (0x000099)
+#define FNIC_REQ_ABTS_FCTL (0x000009)
+
+#define FNIC_FC_PH_VER_HI (0x20)
+#define FNIC_FC_PH_VER_LO (0x20)
+#define FNIC_FC_PH_VER (0x2020)
+#define FNIC_FC_B2B_CREDIT (0x0A)
+#define FNIC_FC_B2B_RDF_SZ (0x0800)
+
+#define FNIC_LOGI_RDF_SIZE(_logi) ((_logi).fl_csp.sp_bb_data)
+#define FNIC_LOGI_R_A_TOV(_logi) ((_logi).fl_csp.sp_r_a_tov)
+#define FNIC_LOGI_E_D_TOV(_logi) ((_logi).fl_csp.sp_e_d_tov)
+#define FNIC_LOGI_FEATURES(_logi) (be16_to_cpu((_logi).fl_csp.sp_features))
+#define FNIC_LOGI_PORT_NAME(_logi) ((_logi).fl_wwpn)
+#define FNIC_LOGI_NODE_NAME(_logi) ((_logi).fl_wwnn)
+
+#define FNIC_LOGI_SET_RDF_SIZE(_logi, _rdf_size) \
+ (FNIC_LOGI_RDF_SIZE(_logi) = cpu_to_be16(_rdf_size))
+#define FNIC_LOGI_SET_E_D_TOV(_logi, _e_d_tov) \
+ (FNIC_LOGI_E_D_TOV(_logi) = cpu_to_be32(_e_d_tov))
+#define FNIC_LOGI_SET_R_A_TOV(_logi, _r_a_tov) \
+ (FNIC_LOGI_R_A_TOV(_logi) = cpu_to_be32(_r_a_tov))
+
+#define FNIC_STD_SET_S_ID(_fchdr, _sid) memcpy((_fchdr).fh_s_id, _sid, 3)
+#define FNIC_STD_SET_D_ID(_fchdr, _did) memcpy((_fchdr).fh_d_id, _did, 3)
+#define FNIC_STD_SET_OX_ID(_fchdr, _oxid) ((_fchdr).fh_ox_id = cpu_to_be16(_oxid))
+#define FNIC_STD_SET_RX_ID(_fchdr, _rxid) ((_fchdr).fh_rx_id = cpu_to_be16(_rxid))
+
+#define FNIC_STD_SET_R_CTL(_fchdr, _rctl) ((_fchdr).fh_r_ctl = _rctl)
+#define FNIC_STD_SET_TYPE(_fchdr, _type) ((_fchdr).fh_type = _type)
+#define FNIC_STD_SET_F_CTL(_fchdr, _fctl) \
+ put_unaligned_be24(_fctl, &((_fchdr).fh_f_ctl))
+
+#define FNIC_STD_SET_NPORT_NAME(_ptr, _wwpn) put_unaligned_be64(_wwpn, _ptr)
+#define FNIC_STD_SET_NODE_NAME(_ptr, _wwnn) put_unaligned_be64(_wwnn, _ptr)
+#define FNIC_STD_SET_PORT_ID(__req, __portid) \
+ memcpy(__req.fr_fid.fp_fid, __portid, 3)
+#define FNIC_STD_SET_PORT_NAME(_req, _pName) \
+ (put_unaligned_be64(_pName, &_req.fr_wwn))
+
+#define FNIC_STD_GET_OX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_ox_id))
+#define FNIC_STD_GET_RX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_rx_id))
+#define FNIC_STD_GET_S_ID(_fchdr) ((_fchdr)->fh_s_id)
+#define FNIC_STD_GET_D_ID(_fchdr) ((_fchdr)->fh_d_id)
+#define FNIC_STD_GET_TYPE(_fchdr) ((_fchdr)->fh_type)
+#define FNIC_STD_GET_F_CTL(_fchdr) ((_fchdr)->fh_f_ctl)
+#define FNIC_STD_GET_R_CTL(_fchdr) ((_fchdr)->fh_r_ctl)
+
+#define FNIC_STD_GET_FC_CT_CMD(__fcct_hdr) (be16_to_cpu(__fcct_hdr->ct_cmd))
+
+#define FNIC_FCOE_MAX_FRAME_SZ (2048)
+#define FNIC_FCOE_MIN_FRAME_SZ (280)
+#define FNIC_FC_MAX_PAYLOAD_LEN (2048)
+#define FNIC_MIN_DATA_FIELD_SIZE (256)
+
+#define FNIC_FC_EDTOV_NSEC (0x400)
+#define FNIC_NSEC_TO_MSEC (0x1000000)
+#define FCP_PRLI_FUNC_TARGET (0x0010)
+
+#define FNIC_FC_R_CTL_SOLICITED_DATA (0x21)
+#define FNIC_FC_F_CTL_LAST_END_SEQ (0x98)
+#define FNIC_FC_F_CTL_LAST_END_SEQ_INT (0x99)
+#define FNIC_FC_F_CTL_FIRST_LAST_SEQINIT (0x29)
+#define FNIC_FC_R_CTL_FC4_SCTL (0x03)
+#define FNIC_FC_CS_CTL (0x00)
+
+#define FNIC_FC_FRAME_UNSOLICITED(_fchdr) \
+ (_fchdr->fh_r_ctl == FC_RCTL_ELS_REQ)
+#define FNIC_FC_FRAME_SOLICITED_DATA(_fchdr) \
+ (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_SOLICITED_DATA)
+#define FNIC_FC_FRAME_SOLICITED_CTRL_REPLY(_fchdr) \
+ (_fchdr->fh_r_ctl == FC_RCTL_ELS_REP)
+#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ(_fchdr) \
+ (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ)
+#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ_INT(_fchdr) \
+ (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ_INT)
+#define FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(_fchdr) \
+ (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_FIRST_LAST_SEQINIT)
+#define FNIC_FC_FRAME_FC4_SCTL(_fchdr) \
+ (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_FC4_SCTL)
+#define FNIC_FC_FRAME_TYPE_BLS(_fchdr) (_fchdr->fh_type == FC_TYPE_BLS)
+#define FNIC_FC_FRAME_TYPE_ELS(_fchdr) (_fchdr->fh_type == FC_TYPE_ELS)
+#define FNIC_FC_FRAME_TYPE_FC_GS(_fchdr) (_fchdr->fh_type == FC_TYPE_CT)
+#define FNIC_FC_FRAME_CS_CTL(_fchdr) (_fchdr->fh_cs_ctl == FNIC_FC_CS_CTL)
+
+#define FNIC_FC_C3_RDF (0xfff)
+#define FNIC_FC_PLOGI_RSP_RDF(_plogi_rsp) \
+ (min(_plogi_rsp->u.csp_plogi.b2b_rdf_size, \
+ (_plogi_rsp->spc3[4] & FNIC_FC_C3_RDF)))
+#define FNIC_FC_PLOGI_RSP_CONCUR_SEQ(_plogi_rsp) \
+ (min((uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_csp.sp_tot_seq)), \
+ (uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_cssp[2].cp_con_seq) & 0xff)))
+
+/* FLOGI/PLOGI struct */
+struct fc_std_flogi {
+ struct fc_frame_header fchdr;
+ struct fc_els_flogi els;
+} __packed;
+
+struct fc_std_els_acc_rsp {
+ struct fc_frame_header fchdr;
+ struct fc_els_ls_acc acc;
+} __packed;
+
+struct fc_std_els_rjt_rsp {
+ struct fc_frame_header fchdr;
+ struct fc_els_ls_rjt rej;
+} __packed;
+
+struct fc_std_els_adisc {
+ struct fc_frame_header fchdr;
+ struct fc_els_adisc els;
+} __packed;
+
+struct fc_std_rls_acc {
+ struct fc_frame_header fchdr;
+ struct fc_els_rls_resp els;
+} __packed;
+
+struct fc_std_abts_ba_acc {
+ struct fc_frame_header fchdr;
+ struct fc_ba_acc acc;
+} __packed;
+
+struct fc_std_abts_ba_rjt {
+ struct fc_frame_header fchdr;
+ struct fc_ba_rjt rjt;
+} __packed;
+
+struct fc_std_els_prli {
+ struct fc_frame_header fchdr;
+ struct fc_els_prli els_prli;
+ struct fc_els_spp sp;
+} __packed;
+
+struct fc_std_rpn_id {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_rn_id rpn_id;
+} __packed;
+
+struct fc_std_fdmi_rhba {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_fdmi_rhba rhba;
+} __packed;
+
+struct fc_std_fdmi_rpa {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_fdmi_rpa rpa;
+} __packed;
+
+struct fc_std_rft_id {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_rft_id rft_id;
+} __packed;
+
+struct fc_std_rff_id {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_rff_id rff_id;
+} __packed;
+
+struct fc_std_gpn_ft {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_gid_ft gpn_ft;
+} __packed;
+
+/* Accept CT_IU for GPN_FT */
+struct fc_gpn_ft_rsp_iu {
+ uint8_t ctrl;
+ uint8_t fcid[3];
+ uint32_t rsvd;
+ __be64 wwpn;
+} __packed;
+
+struct fc_std_rls {
+ struct fc_frame_header fchdr;
+ struct fc_els_rls els;
+} __packed;
+
+struct fc_std_scr {
+ struct fc_frame_header fchdr;
+ struct fc_els_scr scr;
+} __packed;
+
+struct fc_std_rscn {
+ struct fc_frame_header fchdr;
+ struct fc_els_rscn els;
+} __packed;
+
+struct fc_std_logo {
+ struct fc_frame_header fchdr;
+ struct fc_els_logo els;
+} __packed;
+
+#define FNIC_ETH_FCOE_HDRS_OFFSET \
+ (sizeof(struct ethhdr) + sizeof(struct fcoe_hdr))
+
+#endif /* _FDLS_FC_H */
diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c
new file mode 100644
index 000000000000..7bb85949033f
--- /dev/null
+++ b/drivers/scsi/fnic/fip.c
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+#include "fnic.h"
+#include "fip.h"
+#include <linux/etherdevice.h>
+
+#define FIP_FNIC_RESET_WAIT_COUNT 15
+
+/**
+ * fnic_fcoe_reset_vlans - Free up the list of discovered vlans
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_reset_vlans(struct fnic *fnic)
+{
+ unsigned long flags;
+ struct fcoe_vlan *vlan, *next;
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ if (!list_empty(&fnic->vlan_list)) {
+ list_for_each_entry_safe(vlan, next, &fnic->vlan_list, list) {
+ list_del(&vlan->list);
+ kfree(vlan);
+ }
+ }
+
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reset vlan complete\n");
+}
+
+/**
+ * fnic_fcoe_send_vlan_req - Send FIP vlan request to all FCFs MAC
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+{
+ uint8_t *frame;
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ u64 vlan_tov;
+ struct fip_vlan_req *pvlan_req;
+ uint16_t frame_size = sizeof(struct fip_vlan_req);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send VLAN req");
+ return;
+ }
+
+ fnic_fcoe_reset_vlans(fnic);
+
+ fnic->set_vlan(fnic, 0);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "set vlan done\n");
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "got MAC 0x%x:%x:%x:%x:%x:%x\n", iport->hwmac[0],
+ iport->hwmac[1], iport->hwmac[2], iport->hwmac[3],
+ iport->hwmac[4], iport->hwmac[5]);
+
+ pvlan_req = (struct fip_vlan_req *) frame;
+ *pvlan_req = (struct fip_vlan_req) {
+ .eth = {.h_dest = FCOE_ALL_FCFS_MAC,
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {.fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_VLAN),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_VLAN_REQ_LEN)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC,
+ .fip_dlen = 2}}
+ };
+
+ memcpy(pvlan_req->eth.h_source, iport->hwmac, ETH_ALEN);
+ memcpy(pvlan_req->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+
+ atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
+
+ iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Send VLAN req\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+
+ vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+ mod_timer(&fnic->retry_fip_timer, round_jiffies(vlan_tov));
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fip timer set\n");
+}
+
+/**
+ * fnic_fcoe_process_vlan_resp - Processes the vlan response from one FCF and
+ * populates VLAN list.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received FIP frame
+ *
+ * Will wait for responses from multiple FCFs until timeout.
+ */
+void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fip_vlan_notif *vlan_notif = (struct fip_vlan_notif *)fiph;
+
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ u16 vid;
+ int num_vlan = 0;
+ int cur_desc, desc_len;
+ struct fcoe_vlan *vlan;
+ struct fip_vlan_desc *vlan_desc;
+ unsigned long flags;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p got vlan resp\n", fnic);
+
+ desc_len = be16_to_cpu(vlan_notif->fip.fip_dl_len);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "desc_len %d\n", desc_len);
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+
+ cur_desc = 0;
+ while (desc_len > 0) {
+ vlan_desc =
+ (struct fip_vlan_desc *)(((char *)vlan_notif->vlans_desc)
+ + cur_desc * 4);
+
+ if (vlan_desc->fd_desc.fip_dtype == FIP_DT_VLAN) {
+ if (vlan_desc->fd_desc.fip_dlen != 1) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Invalid descriptor length(%x) in VLan response\n",
+ vlan_desc->fd_desc.fip_dlen);
+
+ }
+ num_vlan++;
+ vid = be16_to_cpu(vlan_desc->fd_vlan);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "process_vlan_resp: FIP VLAN %d\n", vid);
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+
+ if (!vlan) {
+ /* retry from timer */
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Mem Alloc failure\n");
+ spin_unlock_irqrestore(&fnic->vlans_lock,
+ flags);
+ goto out;
+ }
+ vlan->vid = vid & 0x0fff;
+ vlan->state = FIP_VLAN_AVAIL;
+ list_add_tail(&vlan->list, &fnic->vlan_list);
+ break;
+ }
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Invalid descriptor type(%x) in VLan response\n",
+ vlan_desc->fd_desc.fip_dtype);
+ /*
+ * Note : received a type=2 descriptor here i.e. FIP
+ * MAC Address Descriptor
+ */
+ cur_desc += vlan_desc->fd_desc.fip_dlen;
+ desc_len -= vlan_desc->fd_desc.fip_dlen;
+ }
+
+ /* any VLAN descriptors present ? */
+ if (num_vlan == 0) {
+ atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p No VLAN descriptors in FIP VLAN response\n",
+ fnic);
+ }
+
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+ out:
+ return;
+}
+
+/**
+ * fnic_fcoe_start_fcf_discovery - Start FIP FCF discovery in a selected vlan
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_start_fcf_discovery(struct fnic *fnic)
+{
+ uint8_t *frame;
+ struct fnic_iport_s *iport = &fnic->iport;
+ u64 fcs_tov;
+ struct fip_discovery *pdisc_sol;
+ uint16_t frame_size = sizeof(struct fip_discovery);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to start FCF discovery");
+ return;
+ }
+
+ memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+
+ pdisc_sol = (struct fip_discovery *) frame;
+ *pdisc_sol = (struct fip_discovery) {
+ .eth = {.h_dest = FCOE_ALL_FCFS_MAC,
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER), .fip_op = cpu_to_be16(FIP_OP_DISC),
+ .fip_subcode = FIP_SC_REQ, .fip_dl_len = cpu_to_be16(FIP_DISC_SOL_LEN),
+ .fip_flags = cpu_to_be16(FIP_FL_FPMA)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}},
+ .name_desc = {.fd_desc = {.fip_dtype = FIP_DT_NAME, .fip_dlen = 3}},
+ .fcoe_desc = {.fd_desc = {.fip_dtype = FIP_DT_FCOE_SIZE, .fip_dlen = 1},
+ .fd_size = cpu_to_be16(FCOE_MAX_SIZE)}
+ };
+
+ memcpy(pdisc_sol->eth.h_source, iport->hwmac, ETH_ALEN);
+ memcpy(pdisc_sol->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+ iport->selected_fcf.fcf_priority = 0xFF;
+
+ FNIC_STD_SET_NODE_NAME(&pdisc_sol->name_desc.fd_wwn, iport->wwnn);
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Start FCF discovery\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+
+ iport->fip.state = FDLS_FIP_FCF_DISCOVERY_STARTED;
+
+ fcs_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FCS_TOV);
+ mod_timer(&fnic->retry_fip_timer, round_jiffies(fcs_tov));
+}
+
+/**
+ * fnic_fcoe_fip_discovery_resp - Processes FCF advertisements.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received frame
+ *
+ * FCF advertisements can be:
+ * solicited - Sent in response of a discover FCF FIP request
+ * Store the information of the FCF with highest priority.
+ * Wait until timeout in case of multiple FCFs.
+ *
+ * unsolicited - Sent periodically by the FCF for keep alive.
+ * If FLOGI is in progress or completed and the advertisement is
+ * received by our selected FCF, refresh the keep alive timer.
+ */
+void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_disc_adv *disc_adv = (struct fip_disc_adv *)fiph;
+ u64 fcs_ka_tov;
+ u64 tov;
+ int fka_has_changed;
+
+ switch (iport->fip.state) {
+ case FDLS_FIP_FCF_DISCOVERY_STARTED:
+ if (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p Solicited adv\n", fnic);
+
+ if ((disc_adv->prio_desc.fd_pri <
+ iport->selected_fcf.fcf_priority)
+ && (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_AVAIL)) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p FCF Available\n", fnic);
+ memcpy(iport->selected_fcf.fcf_mac,
+ disc_adv->mac_desc.fd_mac, ETH_ALEN);
+ iport->selected_fcf.fcf_priority =
+ disc_adv->prio_desc.fd_pri;
+ iport->selected_fcf.fka_adv_period =
+ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num, "adv time %d",
+ iport->selected_fcf.fka_adv_period);
+ iport->selected_fcf.ka_disabled =
+ (disc_adv->fka_adv_desc.fd_flags & 1);
+ }
+ }
+ break;
+ case FDLS_FIP_FLOGI_STARTED:
+ case FDLS_FIP_FLOGI_COMPLETE:
+ if (!(be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL)) {
+ /* same fcf */
+ if (memcmp
+ (iport->selected_fcf.fcf_mac,
+ disc_adv->mac_desc.fd_mac, ETH_ALEN) == 0) {
+ if (iport->selected_fcf.fka_adv_period !=
+ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period)) {
+ iport->selected_fcf.fka_adv_period =
+ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period);
+ FNIC_FIP_DBG(KERN_INFO,
+ fnic->host,
+ fnic->fnic_num,
+ "change fka to %d",
+ iport->selected_fcf.fka_adv_period);
+ }
+
+ fka_has_changed =
+ (iport->selected_fcf.ka_disabled == 1)
+ && ((disc_adv->fka_adv_desc.fd_flags & 1) ==
+ 0);
+
+ iport->selected_fcf.ka_disabled =
+ (disc_adv->fka_adv_desc.fd_flags & 1);
+ if (!((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period ==
+ 0))) {
+
+ fcs_ka_tov = jiffies
+ + 3
+ *
+ msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->fcs_ka_timer,
+ round_jiffies(fcs_ka_tov));
+ } else {
+ if (timer_pending(&fnic->fcs_ka_timer))
+ del_timer_sync(&fnic->fcs_ka_timer);
+ }
+
+ if (fka_has_changed) {
+ if (iport->selected_fcf.fka_adv_period != 0) {
+ tov =
+ jiffies +
+ msecs_to_jiffies(
+ iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->enode_ka_timer,
+ round_jiffies(tov));
+
+ tov =
+ jiffies +
+ msecs_to_jiffies
+ (FIP_VN_KA_PERIOD);
+ mod_timer(&fnic->vn_ka_timer,
+ round_jiffies(tov));
+ }
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ } /* end switch */
+}
+
+/**
+ * fnic_fcoe_start_flogi - Send FIP FLOGI to the selected FCF
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_start_flogi(struct fnic *fnic)
+{
+ uint8_t *frame;
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_flogi *pflogi_req;
+ u64 flogi_tov;
+ uint16_t oxid;
+ uint16_t frame_size = sizeof(struct fip_flogi);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to start FIP FLOGI");
+ return;
+ }
+
+ pflogi_req = (struct fip_flogi *) frame;
+ *pflogi_req = (struct fip_flogi) {
+ .eth = {
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_LS),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_FLOGI_LEN),
+ .fip_flags = cpu_to_be16(FIP_FL_FPMA)},
+ .flogi_desc = {
+ .fd_desc = {.fip_dtype = FIP_DT_FLOGI, .fip_dlen = 36},
+ .flogi = {
+ .fchdr = {
+ .fh_r_ctl = FC_RCTL_ELS_REQ,
+ .fh_d_id = {0xFF, 0xFF, 0xFE},
+ .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els = {
+ .fl_cmd = ELS_FLOGI,
+ .fl_csp = {
+ .sp_hi_ver =
+ FNIC_FC_PH_VER_HI,
+ .sp_lo_ver =
+ FNIC_FC_PH_VER_LO,
+ .sp_bb_cred =
+ cpu_to_be16
+ (FNIC_FC_B2B_CREDIT),
+ .sp_bb_data =
+ cpu_to_be16
+ (FNIC_FC_B2B_RDF_SZ)},
+ .fl_cssp[2].cp_class =
+ cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ)
+ },
+ }
+ },
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}
+ };
+
+ memcpy(pflogi_req->eth.h_source, iport->hwmac, ETH_ALEN);
+ if (iport->usefip)
+ memcpy(pflogi_req->eth.h_dest, iport->selected_fcf.fcf_mac,
+ ETH_ALEN);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI,
+ &iport->active_oxid_fabric_req);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate OXID to send FIP FLOGI");
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(pflogi_req->flogi_desc.flogi.fchdr, oxid);
+
+ FNIC_STD_SET_NPORT_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwpn,
+ iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwnn,
+ iport->wwnn);
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FIP start FLOGI\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+ iport->fip.flogi_retry++;
+
+ iport->fip.state = FDLS_FIP_FLOGI_STARTED;
+ flogi_tov = jiffies + msecs_to_jiffies(fnic->config.flogi_timeout);
+ mod_timer(&fnic->retry_fip_timer, round_jiffies(flogi_tov));
+}
+
+/**
+ * fnic_fcoe_process_flogi_resp - Processes FLOGI response from FCF.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received frame
+ *
+ * If successful save assigned fc_id and MAC, program firmware
+ * and start fdls discovery, else restart vlan discovery.
+ */
+void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_flogi_rsp *flogi_rsp = (struct fip_flogi_rsp *)fiph;
+ int desc_len;
+ uint32_t s_id;
+ int frame_type;
+ uint16_t oxid;
+
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct fc_frame_header *fchdr = &flogi_rsp->rsp_desc.flogi.fchdr;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p FIP FLOGI rsp\n", fnic);
+ desc_len = be16_to_cpu(flogi_rsp->fip.fip_dl_len);
+ if (desc_len != 38) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Invalid Descriptor List len (%x). Dropping frame\n",
+ desc_len);
+ return;
+ }
+
+ if (!((flogi_rsp->rsp_desc.fd_desc.fip_dtype == 7)
+ && (flogi_rsp->rsp_desc.fd_desc.fip_dlen == 36))
+ || !((flogi_rsp->mac_desc.fd_desc.fip_dtype == 2)
+ && (flogi_rsp->mac_desc.fd_desc.fip_dlen == 2))) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame invalid type and len mix\n");
+ return;
+ }
+
+ frame_type = fnic_fdls_validate_and_get_frame_type(iport, fchdr);
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ if ((fchdr->fh_f_ctl[0] != 0x98)
+ || (fchdr->fh_r_ctl != 0x23)
+ || (s_id != FC_FID_FLOGI)
+ || (frame_type != FNIC_FABRIC_FLOGI_RSP)
+ || (fchdr->fh_type != 0x01)) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping invalid frame: s_id %x F %x R %x t %x OX_ID %x\n",
+ s_id, fchdr->fh_f_ctl[0], fchdr->fh_r_ctl,
+ fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr));
+ return;
+ }
+
+ if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p rsp for pending FLOGI\n", fnic);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+ del_timer_sync(&fnic->retry_fip_timer);
+
+ if ((be16_to_cpu(flogi_rsp->fip.fip_dl_len) == FIP_FLOGI_LEN)
+ && (flogi_rsp->rsp_desc.flogi.els.fl_cmd == ELS_LS_ACC)) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p FLOGI success\n", fnic);
+ memcpy(iport->fpma, flogi_rsp->mac_desc.fd_mac, ETH_ALEN);
+ iport->fcid =
+ ntoh24(flogi_rsp->rsp_desc.flogi.fchdr.fh_d_id);
+
+ iport->r_a_tov =
+ be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_r_a_tov);
+ iport->e_d_tov =
+ be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_e_d_tov);
+ memcpy(fnic->iport.fcfmac, iport->selected_fcf.fcf_mac,
+ ETH_ALEN);
+ vnic_dev_add_addr(fnic->vdev, flogi_rsp->mac_desc.fd_mac);
+
+ if (fnic_fdls_register_portid(iport, iport->fcid, NULL)
+ != 0) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p flogi registration failed\n",
+ fnic);
+ return;
+ }
+
+ iport->fip.state = FDLS_FIP_FLOGI_COMPLETE;
+ iport->state = FNIC_IPORT_STATE_FABRIC_DISC;
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num, "iport->state:%d\n",
+ iport->state);
+ fnic_fdls_disc_start(iport);
+ if (!((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0))) {
+ u64 tov;
+
+ tov = jiffies
+ +
+ msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->enode_ka_timer,
+ round_jiffies(tov));
+
+ tov =
+ jiffies +
+ msecs_to_jiffies(FIP_VN_KA_PERIOD);
+ mod_timer(&fnic->vn_ka_timer,
+ round_jiffies(tov));
+
+ }
+ } else {
+ /*
+ * If there's FLOGI rejects - clear all
+ * fcf's & restart from scratch
+ */
+ atomic64_inc(&fnic_stats->vlan_stats.flogi_rejects);
+ /* start FCoE VLAN discovery */
+ fnic_fcoe_send_vlan_req(fnic);
+
+ iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED;
+ }
+ }
+}
+
+/**
+ * fnic_common_fip_cleanup - Clean up FCF info and timers in case of
+ * link down/CVL
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_common_fip_cleanup(struct fnic *fnic)
+{
+
+ struct fnic_iport_s *iport = &fnic->iport;
+
+ if (!iport->usefip)
+ return;
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p fip cleanup\n", fnic);
+
+ iport->fip.state = FDLS_FIP_INIT;
+
+ del_timer_sync(&fnic->retry_fip_timer);
+ del_timer_sync(&fnic->fcs_ka_timer);
+ del_timer_sync(&fnic->enode_ka_timer);
+ del_timer_sync(&fnic->vn_ka_timer);
+
+ if (!is_zero_ether_addr(iport->fpma))
+ vnic_dev_del_addr(fnic->vdev, iport->fpma);
+
+ memset(iport->fpma, 0, ETH_ALEN);
+ iport->fcid = 0;
+ iport->r_a_tov = 0;
+ iport->e_d_tov = 0;
+ memset(fnic->iport.fcfmac, 0, ETH_ALEN);
+ memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+ iport->selected_fcf.fcf_priority = 0;
+ iport->selected_fcf.fka_adv_period = 0;
+ iport->selected_fcf.ka_disabled = 0;
+
+ fnic_fcoe_reset_vlans(fnic);
+}
+
+/**
+ * fnic_fcoe_process_cvl - Processes Clear Virtual Link from FCF.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received frame
+ *
+ * Verify that cvl is received from our current FCF for our assigned MAC
+ * and clean up and restart the vlan discovery.
+ */
+void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_cvl *cvl_msg = (struct fip_cvl *)fiph;
+ int i;
+ int found = false;
+ int max_count = 0;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p clear virtual link handler\n", fnic);
+
+ if (!((cvl_msg->fcf_mac_desc.fd_desc.fip_dtype == 2)
+ && (cvl_msg->fcf_mac_desc.fd_desc.fip_dlen == 2))
+ || !((cvl_msg->name_desc.fd_desc.fip_dtype == 4)
+ && (cvl_msg->name_desc.fd_desc.fip_dlen == 3))) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "invalid mix: ft %x fl %x ndt %x ndl %x",
+ cvl_msg->fcf_mac_desc.fd_desc.fip_dtype,
+ cvl_msg->fcf_mac_desc.fd_desc.fip_dlen,
+ cvl_msg->name_desc.fd_desc.fip_dtype,
+ cvl_msg->name_desc.fd_desc.fip_dlen);
+ }
+
+ if (memcmp
+ (iport->selected_fcf.fcf_mac, cvl_msg->fcf_mac_desc.fd_mac, ETH_ALEN)
+ == 0) {
+ for (i = 0; i < ((be16_to_cpu(fiph->fip_dl_len) / 5) - 1); i++) {
+ if (!((cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype == 11)
+ && (cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen == 5))) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Invalid type and len mix type: %d len: %d\n",
+ cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype,
+ cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen);
+ }
+ if (memcmp
+ (iport->fpma, cvl_msg->vn_ports_desc[i].fd_mac,
+ ETH_ALEN) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return;
+ fnic_common_fip_cleanup(fnic);
+
+ while (fnic->reset_in_progress == IN_PROGRESS) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ wait_for_completion_timeout(&fnic->reset_completion_wait,
+ msecs_to_jiffies(5000));
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ max_count++;
+ if (max_count >= FIP_FNIC_RESET_WAIT_COUNT) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Rthr waited too long. Skipping handle link event %p\n",
+ fnic);
+ return;
+ }
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic reset in progress. Link event needs to wait %p",
+ fnic);
+ }
+ fnic->reset_in_progress = IN_PROGRESS;
+ fnic_fdls_link_down(iport);
+ fnic->reset_in_progress = NOT_IN_PROGRESS;
+ complete(&fnic->reset_completion_wait);
+ fnic_fcoe_send_vlan_req(fnic);
+ }
+}
+
+/**
+ * fdls_fip_recv_frame - Demultiplexer for FIP frames
+ * @fnic: Handle to fnic driver instance
+ * @frame: Received ethernet frame
+ */
+int fdls_fip_recv_frame(struct fnic *fnic, void *frame)
+{
+ struct ethhdr *eth = (struct ethhdr *)frame;
+ struct fip_header *fiph;
+ u16 op;
+ u8 sub;
+ int len = 2048;
+
+ if (be16_to_cpu(eth->h_proto) == ETH_P_FIP) {
+ fiph = (struct fip_header *)(eth + 1);
+ op = be16_to_cpu(fiph->fip_op);
+ sub = fiph->fip_subcode;
+
+ fnic_debug_dump_fip_frame(fnic, eth, len, "Incoming");
+
+ if (op == FIP_OP_DISC && sub == FIP_SC_REP)
+ fnic_fcoe_fip_discovery_resp(fnic, fiph);
+ else if (op == FIP_OP_VLAN && sub == FIP_SC_REP)
+ fnic_fcoe_process_vlan_resp(fnic, fiph);
+ else if (op == FIP_OP_CTRL && sub == FIP_SC_REP)
+ fnic_fcoe_process_cvl(fnic, fiph);
+ else if (op == FIP_OP_LS && sub == FIP_SC_REP)
+ fnic_fcoe_process_flogi_resp(fnic, fiph);
+
+ /* Return true if the frame was a FIP frame */
+ return true;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Not a FIP Frame");
+ return false;
+}
+
+void fnic_work_on_fip_timer(struct work_struct *work)
+{
+ struct fnic *fnic = container_of(work, struct fnic, fip_timer_work);
+ struct fnic_iport_s *iport = &fnic->iport;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FIP timeout\n");
+
+ if (iport->fip.state == FDLS_FIP_VLAN_DISCOVERY_STARTED) {
+ fnic_vlan_discovery_timeout(fnic);
+ } else if (iport->fip.state == FDLS_FIP_FCF_DISCOVERY_STARTED) {
+ u8 zmac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FCF Discovery timeout\n");
+ if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) {
+
+ if (iport->flags & FNIC_FIRST_LINK_UP) {
+ fnic_scsi_fcpio_reset(iport->fnic);
+ iport->flags &= ~FNIC_FIRST_LINK_UP;
+ }
+
+ fnic_fcoe_start_flogi(fnic);
+ if (!((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0))) {
+ u64 fcf_tov;
+
+ fcf_tov = jiffies
+ + 3
+ *
+ msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->fcs_ka_timer,
+ round_jiffies(fcf_tov));
+ }
+ } else {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num, "FCF Discovery timeout\n");
+ fnic_vlan_discovery_timeout(fnic);
+ }
+ } else if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) {
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI timeout\n");
+ if (iport->fip.flogi_retry < fnic->config.flogi_retries)
+ fnic_fcoe_start_flogi(fnic);
+ else
+ fnic_vlan_discovery_timeout(fnic);
+ }
+}
+
+/**
+ * fnic_handle_fip_timer - Timeout handler for FIP discover phase.
+ * @t: Handle to the timer list
+ *
+ * Based on the current state, start next phase or restart discovery.
+ */
+void fnic_handle_fip_timer(struct timer_list *t)
+{
+ struct fnic *fnic = from_timer(fnic, t, retry_fip_timer);
+
+ INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fip_timer);
+ queue_work(fnic_fip_queue, &fnic->fip_timer_work);
+}
+
+/**
+ * fnic_handle_enode_ka_timer - FIP node keep alive.
+ * @t: Handle to the timer list
+ */
+void fnic_handle_enode_ka_timer(struct timer_list *t)
+{
+ uint8_t *frame;
+ struct fnic *fnic = from_timer(fnic, t, enode_ka_timer);
+
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_enode_ka *penode_ka;
+ u64 enode_ka_tov;
+ uint16_t frame_size = sizeof(struct fip_enode_ka);
+
+ if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE)
+ return;
+
+ if ((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0)) {
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send enode ka");
+ return;
+ }
+
+ penode_ka = (struct fip_enode_ka *) frame;
+ *penode_ka = (struct fip_enode_ka) {
+ .eth = {
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_CTRL),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_ENODE_KA_LEN)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}
+ };
+
+ memcpy(penode_ka->eth.h_source, iport->hwmac, ETH_ALEN);
+ memcpy(penode_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN);
+ memcpy(penode_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+
+ FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Handle enode KA timer\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+ enode_ka_tov = jiffies
+ + msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->enode_ka_timer, round_jiffies(enode_ka_tov));
+}
+
+/**
+ * fnic_handle_vn_ka_timer - FIP virtual port keep alive.
+ * @t: Handle to the timer list
+ */
+void fnic_handle_vn_ka_timer(struct timer_list *t)
+{
+ uint8_t *frame;
+ struct fnic *fnic = from_timer(fnic, t, vn_ka_timer);
+
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_vn_port_ka *pvn_port_ka;
+ u64 vn_ka_tov;
+ uint8_t fcid[3];
+ uint16_t frame_size = sizeof(struct fip_vn_port_ka);
+
+ if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE)
+ return;
+
+ if ((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0)) {
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send vn ka");
+ return;
+ }
+
+ pvn_port_ka = (struct fip_vn_port_ka *) frame;
+ *pvn_port_ka = (struct fip_vn_port_ka) {
+ .eth = {
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_CTRL),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_VN_KA_LEN)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}},
+ .vn_port_desc = {.fd_desc = {.fip_dtype = FIP_DT_VN_ID, .fip_dlen = 5}}
+ };
+
+ memcpy(pvn_port_ka->eth.h_source, iport->fpma, ETH_ALEN);
+ memcpy(pvn_port_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN);
+ memcpy(pvn_port_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+ memcpy(pvn_port_ka->vn_port_desc.fd_mac, iport->fpma, ETH_ALEN);
+ hton24(fcid, iport->fcid);
+ memcpy(pvn_port_ka->vn_port_desc.fd_fc_id, fcid, 3);
+ FNIC_STD_SET_NPORT_NAME(&pvn_port_ka->vn_port_desc.fd_wwpn, iport->wwpn);
+
+ FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Handle vnport KA timer\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+ vn_ka_tov = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
+ mod_timer(&fnic->vn_ka_timer, round_jiffies(vn_ka_tov));
+}
+
+/**
+ * fnic_vlan_discovery_timeout - Handle vlan discovery timeout
+ * @fnic: Handle to fnic driver instance
+ *
+ * End of VLAN discovery or FCF discovery time window.
+ * Start the FCF discovery if VLAN was never used.
+ */
+void fnic_vlan_discovery_timeout(struct fnic *fnic)
+{
+ struct fcoe_vlan *vlan;
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (!iport->usefip)
+ return;
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ if (list_empty(&fnic->vlan_list)) {
+ /* no vlans available, try again */
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ fnic_fcoe_send_vlan_req(fnic);
+ return;
+ }
+
+ vlan = list_first_entry(&fnic->vlan_list, struct fcoe_vlan, list);
+
+ if (vlan->state == FIP_VLAN_SENT) {
+ if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+ /*
+ * no response on this vlan, remove from the list.
+ * Try the next vlan
+ */
+ list_del(&vlan->list);
+ kfree(vlan);
+ vlan = NULL;
+ if (list_empty(&fnic->vlan_list)) {
+ /* we exhausted all vlans, restart vlan disc */
+ spin_unlock_irqrestore(&fnic->vlans_lock,
+ flags);
+ fnic_fcoe_send_vlan_req(fnic);
+ return;
+ }
+ /* check the next vlan */
+ vlan =
+ list_first_entry(&fnic->vlan_list, struct fcoe_vlan,
+ list);
+
+ fnic->set_vlan(fnic, vlan->vid);
+ vlan->state = FIP_VLAN_SENT; /* sent now */
+
+ }
+ atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
+
+ } else {
+ fnic->set_vlan(fnic, vlan->vid);
+ vlan->state = FIP_VLAN_SENT; /* sent now */
+ }
+ vlan->sol_count++;
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ fnic_fcoe_start_fcf_discovery(fnic);
+}
+
+/**
+ * fnic_work_on_fcs_ka_timer - Handle work on FCS keep alive timer.
+ * @work: the work queue to be serviced
+ *
+ * Finish handling fcs_ka_timer in process context.
+ * Clean up, bring the link down, and restart all FIP discovery.
+ */
+void fnic_work_on_fcs_ka_timer(struct work_struct *work)
+{
+ struct fnic
+ *fnic = container_of(work, struct fnic, fip_timer_work);
+ struct fnic_iport_s *iport = &fnic->iport;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p fcs ka timeout\n", fnic);
+
+ fnic_common_fip_cleanup(fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_fdls_link_down(iport);
+ iport->state = FNIC_IPORT_STATE_FIP;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+
+ fnic_fcoe_send_vlan_req(fnic);
+}
+
+/**
+ * fnic_handle_fcs_ka_timer - Handle FCS keep alive timer.
+ * @t: Handle to the timer list
+ *
+ * No keep alives received from FCF. Clean up, bring the link down
+ * and restart all the FIP discovery.
+ */
+void fnic_handle_fcs_ka_timer(struct timer_list *t)
+{
+ struct fnic *fnic = from_timer(fnic, t, fcs_ka_timer);
+
+ INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fcs_ka_timer);
+ queue_work(fnic_fip_queue, &fnic->fip_timer_work);
+}
diff --git a/drivers/scsi/fnic/fip.h b/drivers/scsi/fnic/fip.h
new file mode 100644
index 000000000000..79fee7628870
--- /dev/null
+++ b/drivers/scsi/fnic/fip.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+#ifndef _FIP_H_
+#define _FIP_H_
+
+#include "fdls_fc.h"
+#include "fnic_fdls.h"
+#include <scsi/fc/fc_fip.h>
+
+/* Drop the cast from the standard definition */
+#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02}
+#define FCOE_MAX_SIZE 0x082E
+
+#define FCOE_CTLR_FIPVLAN_TOV (3*1000)
+#define FCOE_CTLR_FCS_TOV (3*1000)
+#define FCOE_CTLR_MAX_SOL (5*1000)
+
+#define FIP_DISC_SOL_LEN (6)
+#define FIP_VLAN_REQ_LEN (2)
+#define FIP_ENODE_KA_LEN (2)
+#define FIP_VN_KA_LEN (7)
+#define FIP_FLOGI_LEN (38)
+
+enum fdls_vlan_state {
+ FIP_VLAN_AVAIL,
+ FIP_VLAN_SENT
+};
+
+enum fdls_fip_state {
+ FDLS_FIP_INIT,
+ FDLS_FIP_VLAN_DISCOVERY_STARTED,
+ FDLS_FIP_FCF_DISCOVERY_STARTED,
+ FDLS_FIP_FLOGI_STARTED,
+ FDLS_FIP_FLOGI_COMPLETE,
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+ struct list_head list;
+ uint16_t vid; /* vlan ID */
+ uint16_t sol_count; /* no. of sols sent */
+ uint16_t state; /* state */
+};
+
+struct fip_vlan_req {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_vlan_notif {
+ struct fip_header fip;
+ struct fip_vlan_desc vlans_desc[];
+} __packed;
+
+struct fip_vn_port_ka {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+ struct fip_vn_desc vn_port_desc;
+} __packed;
+
+struct fip_enode_ka {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_cvl {
+ struct fip_header fip;
+ struct fip_mac_desc fcf_mac_desc;
+ struct fip_wwn_desc name_desc;
+ struct fip_vn_desc vn_ports_desc[];
+} __packed;
+
+struct fip_flogi_desc {
+ struct fip_desc fd_desc;
+ uint16_t rsvd;
+ struct fc_std_flogi flogi;
+} __packed;
+
+struct fip_flogi_rsp_desc {
+ struct fip_desc fd_desc;
+ uint16_t rsvd;
+ struct fc_std_flogi flogi;
+} __packed;
+
+struct fip_flogi {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_flogi_desc flogi_desc;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_flogi_rsp {
+ struct fip_header fip;
+ struct fip_flogi_rsp_desc rsp_desc;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_discovery {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+ struct fip_wwn_desc name_desc;
+ struct fip_size_desc fcoe_desc;
+} __packed;
+
+struct fip_disc_adv {
+ struct fip_header fip;
+ struct fip_pri_desc prio_desc;
+ struct fip_mac_desc mac_desc;
+ struct fip_wwn_desc name_desc;
+ struct fip_fab_desc fabric_desc;
+ struct fip_fka_desc fka_adv_desc;
+} __packed;
+
+void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph);
+void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph);
+void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph);
+void fnic_work_on_fip_timer(struct work_struct *work);
+void fnic_work_on_fcs_ka_timer(struct work_struct *work);
+void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+void fnic_fcoe_start_fcf_discovery(struct fnic *fnic);
+void fnic_fcoe_start_flogi(struct fnic *fnic);
+void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph);
+void fnic_vlan_discovery_timeout(struct fnic *fnic);
+
+extern struct workqueue_struct *fnic_fip_queue;
+
+#ifdef FNIC_DEBUG
+static inline void
+fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
+ int len, char *pfx)
+{
+ struct fip_header *fiph = (struct fip_header *)(eth + 1);
+ u16 op = be16_to_cpu(fiph->fip_op);
+ u8 sub = fiph->fip_subcode;
+
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "FIP %s packet contents: op: 0x%x sub: 0x%x (len = %d)",
+ pfx, op, sub, len);
+
+ fnic_debug_dump(fnic, (uint8_t *)eth, len);
+}
+
+#else /* FNIC_DEBUG */
+
+static inline void
+fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
+ int len, char *pfx) {}
+#endif /* FNIC_DEBUG */
+
+#endif /* _FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index ce73f08ee889..6c5f6046b1f5 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -10,8 +10,10 @@
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
-#include <scsi/libfc.h>
-#include <scsi/libfcoe.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc_frame.h>
#include "fnic_io.h"
#include "fnic_res.h"
#include "fnic_trace.h"
@@ -24,13 +26,15 @@
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_scsi.h"
+#include "fnic_fdls.h"
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.7.0.0"
+#define DRV_VERSION "1.8.0.0"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
+#define FABRIC_LOGO_MAX_RETRY 3
#define DESC_CLEAN_LOW_WATERMARK 8
#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
@@ -38,6 +42,7 @@
#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
#define FNIC_DFLT_QUEUE_DEPTH 256
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
+#define LUN0_DELAY_TIME 9
/*
* Tag bits used for special requests.
@@ -75,6 +80,77 @@
#define FNIC_DEV_RST_TERM_DONE BIT(20)
#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
+#define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */
+#define FNIC_FCOE_MAX_CMD_LEN 16
+/* Retry supported by rport (returned by PRLI service parameters) */
+#define FNIC_FC_RP_FLAGS_RETRY 0x1
+
+/* Cisco vendor id */
+#define PCI_VENDOR_ID_CISCO 0x1137
+#define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */
+
+/* sereno pcie switch */
+#define PCI_DEVICE_ID_CISCO_SERENO 0x004e
+#define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */
+#define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */
+#define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */
+
+/* Sereno */
+#define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */
+#define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */
+#define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */
+#define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */
+#define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */
+#define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */
+#define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */
+
+/* Cruz */
+#define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */
+#define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */
+/* Cruz MountTian SIOC */
+#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b
+#define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */
+/* Cruz MountTian2 SIOC */
+#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157
+#define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */
+
+/* Bodega */
+/* VIC 1457 PCIe mLOM */
+#define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218
+#define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */
+/* VIC 1487 PCIe mLOM */
+#define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a
+#define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */
+/* VIC 1440 Mezz mLOM */
+#define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215
+#define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */
+#define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */
+#define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */
+#define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */
+#define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */
+#define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */
+#define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */
+
+/* Beverly */
+#define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */
+#define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */
+#define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */
+#define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */
+#define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */
+#define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */
+#define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */
+#define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */
+#define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */
+#define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */
+#define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */
+
+struct fnic_pcie_device {
+ u32 device;
+ u8 *desc;
+ u32 subsystem_device;
+ u8 *subsys_desc;
+};
+
/*
* fnic private data per SCSI command.
* These fields are locked by the hashed io_req_lock.
@@ -127,8 +203,38 @@ static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
#define fnic_clear_state_flags(fnicp, st_flags) \
__fnic_set_state_flags(fnicp, st_flags, 1)
+enum reset_states {
+ NOT_IN_PROGRESS = 0,
+ IN_PROGRESS,
+ RESET_ERROR
+};
+
+enum rscn_type {
+ NOT_PC_RSCN = 0,
+ PC_RSCN
+};
+
+enum pc_rscn_handling_status {
+ PC_RSCN_HANDLING_NOT_IN_PROGRESS = 0,
+ PC_RSCN_HANDLING_IN_PROGRESS
+};
+
+enum pc_rscn_handling_feature {
+ PC_RSCN_HANDLING_FEATURE_OFF = 0,
+ PC_RSCN_HANDLING_FEATURE_ON
+};
+
+extern unsigned int fnic_fdmi_support;
extern unsigned int fnic_log_level;
extern unsigned int io_completions;
+extern struct workqueue_struct *fnic_event_queue;
+
+extern unsigned int pc_rscn_handling_feature_flag;
+extern spinlock_t reset_fnic_list_lock;
+extern struct list_head reset_fnic_list;
+extern struct workqueue_struct *reset_fnic_work_queue;
+extern struct work_struct reset_fnic_work;
+
#define FNIC_MAIN_LOGGING 0x01
#define FNIC_FCS_LOGGING 0x02
@@ -155,6 +261,12 @@ do { \
"fnic<%d>: %s: %d: " fmt, fnic_num,\
__func__, __LINE__, ##args);)
+#define FNIC_FIP_DBG(kern_level, host, fnic_num, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
+ shost_printk(kern_level, host, \
+ "fnic<%d>: %s: %d: " fmt, fnic_num,\
+ __func__, __LINE__, ##args);)
+
#define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
shost_printk(kern_level, host, \
@@ -213,12 +325,26 @@ enum fnic_state {
struct mempool;
+enum fnic_role_e {
+ FNIC_ROLE_FCP_INITIATOR = 0,
+};
+
enum fnic_evt {
FNIC_EVT_START_VLAN_DISC = 1,
FNIC_EVT_START_FCF_DISC = 2,
FNIC_EVT_MAX,
};
+struct fnic_frame_list {
+ /*
+ * Link to frame lists
+ */
+ struct list_head links;
+ void *fp;
+ int frame_len;
+ int rx_ethhdr_stripped;
+};
+
struct fnic_event {
struct list_head list;
struct fnic *fnic;
@@ -235,8 +361,9 @@ struct fnic_cpy_wq {
/* Per-instance private data structure */
struct fnic {
int fnic_num;
- struct fc_lport *lport;
- struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
+ enum fnic_role_e role;
+ struct fnic_iport_s iport;
+ struct Scsi_Host *host;
struct vnic_dev_bar bar0;
struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
@@ -255,6 +382,7 @@ struct fnic {
unsigned int wq_count;
unsigned int cq_count;
+ struct completion reset_completion_wait;
struct mutex sgreset_mutex;
spinlock_t sgreset_lock; /* lock for sgreset */
struct scsi_cmnd *sgreset_sc;
@@ -268,25 +396,27 @@ struct fnic {
u32 vlan_hw_insert:1; /* let hw insert the tag */
u32 in_remove:1; /* fnic device in removal */
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
- u32 link_events:1; /* set when we get any link event*/
-
- struct completion *remove_wait; /* device remove thread blocks */
+ struct completion *fw_reset_done;
+ u32 reset_in_progress;
atomic_t in_flight; /* io counter */
bool internal_reset_inprogress;
u32 _reserved; /* fill hole */
unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
spinlock_t fnic_lock;
+ unsigned long lock_flags;
u16 vlan_id; /* VLAN tag including priority */
u8 data_src_addr[ETH_ALEN];
u64 fcp_input_bytes; /* internal statistic */
u64 fcp_output_bytes; /* internal statistic */
u32 link_down_cnt;
+ u32 soft_reset_count;
int link_status;
struct list_head list;
+ struct list_head links;
struct pci_dev *pdev;
struct vnic_fc_config config;
struct vnic_dev *vdev;
@@ -306,19 +436,29 @@ struct fnic {
struct work_struct link_work;
struct work_struct frame_work;
struct work_struct flush_work;
- struct sk_buff_head frame_queue;
- struct sk_buff_head tx_queue;
+ struct list_head frame_queue;
+ struct list_head tx_queue;
+ mempool_t *frame_pool;
+ mempool_t *frame_elem_pool;
+ struct work_struct tport_work;
+ struct list_head tport_event_list;
+
+ char subsys_desc[14];
+ int subsys_desc_len;
+ int pc_rscn_handling_status;
/*** FIP related data members -- start ***/
void (*set_vlan)(struct fnic *, u16 vlan);
struct work_struct fip_frame_work;
- struct sk_buff_head fip_frame_queue;
+ struct work_struct fip_timer_work;
+ struct list_head fip_frame_queue;
struct timer_list fip_timer;
- struct list_head vlans;
spinlock_t vlans_lock;
-
- struct work_struct event_work;
- struct list_head evlist;
+ struct timer_list retry_fip_timer;
+ struct timer_list fcs_ka_timer;
+ struct timer_list enode_ka_timer;
+ struct timer_list vn_ka_timer;
+ struct list_head vlan_list;
/*** FIP related data members -- end ***/
/* copy work queue cache line section */
@@ -341,11 +481,6 @@ struct fnic {
____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
};
-static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
-{
- return container_of(fip, struct fnic, ctlr);
-}
-
extern struct workqueue_struct *fnic_event_queue;
extern struct workqueue_struct *fnic_fip_queue;
extern const struct attribute_group *fnic_host_groups[];
@@ -356,29 +491,29 @@ int fnic_set_intr_mode_msix(struct fnic *fnic);
void fnic_free_intr(struct fnic *fnic);
int fnic_request_intr(struct fnic *fnic);
-int fnic_send(struct fc_lport *, struct fc_frame *);
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
void fnic_handle_frame(struct work_struct *work);
+void fnic_tport_event_handler(struct work_struct *work);
void fnic_handle_link(struct work_struct *work);
void fnic_handle_event(struct work_struct *work);
+void fdls_reclaim_oxid_handler(struct work_struct *work);
+void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid);
+void fdls_schedule_oxid_free_retry_work(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
void fnic_flush_tx(struct work_struct *work);
-void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
-void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-void fnic_update_mac(struct fc_lport *, u8 *new);
void fnic_update_mac_locked(struct fnic *, u8 *new);
int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int fnic_abort_cmd(struct scsi_cmnd *);
int fnic_device_reset(struct scsi_cmnd *);
-int fnic_host_reset(struct scsi_cmnd *);
-int fnic_reset(struct Scsi_Host *);
-void fnic_scsi_cleanup(struct fc_lport *);
-void fnic_scsi_abort_io(struct fc_lport *);
-void fnic_empty_scsi_cleanup(struct fc_lport *);
-void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
+int fnic_eh_host_reset_handler(struct scsi_cmnd *sc);
+int fnic_host_reset(struct Scsi_Host *shost);
+void fnic_reset(struct Scsi_Host *shost);
+int fnic_issue_fc_host_lip(struct Scsi_Host *shost);
+void fnic_get_host_port_state(struct Scsi_Host *shost);
+void fnic_scsi_fcpio_reset(struct fnic *fnic);
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index);
int fnic_wq_cmpl_handler(struct fnic *fnic, int);
int fnic_flogi_reg_handler(struct fnic *fnic, u32);
@@ -390,14 +525,15 @@ const char *fnic_state_to_str(unsigned int state);
void fnic_mq_map_queues_cpus(struct Scsi_Host *host);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
-
+int fnic_stats_debugfs_init(struct fnic *fnic);
+void fnic_stats_debugfs_remove(struct fnic *fnic);
int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
void fnic_handle_fip_frame(struct work_struct *work);
+void fnic_reset_work_handler(struct work_struct *work);
void fnic_handle_fip_event(struct fnic *fnic);
void fnic_fcoe_reset_vlans(struct fnic *fnic);
-void fnic_fcoe_evlist_free(struct fnic *fnic);
-extern void fnic_handle_fip_timer(struct fnic *fnic);
+extern void fnic_handle_fip_timer(struct timer_list *t);
static inline int
fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
@@ -406,4 +542,90 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
}
void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
+void fnic_free_txq(struct list_head *head);
+int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
+ char **subsys_desc);
+void fnic_fdls_link_status_change(struct fnic *fnic, int linkup);
+void fnic_delete_fcp_tports(struct fnic *fnic);
+void fnic_flush_tport_event_list(struct fnic *fnic);
+int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid);
+unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid);
+unsigned int fnic_count_all_ioreqs(struct fnic *fnic);
+unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq,
+ struct scsi_device *device);
+unsigned int fnic_count_lun_ioreqs(struct fnic *fnic,
+ struct scsi_device *device);
+void fnic_scsi_unload(struct fnic *fnic);
+void fnic_scsi_unload_cleanup(struct fnic *fnic);
+int fnic_get_debug_info(struct stats_debug_info *info,
+ struct fnic *fnic);
+
+struct fnic_scsi_iter_data {
+ struct fnic *fnic;
+ void *data1;
+ void *data2;
+ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2);
+};
+
+static inline bool
+fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data)
+{
+ struct fnic_scsi_iter_data *iter = iter_data;
+
+ return iter->fn(iter->fnic, sc, iter->data1, iter->data2);
+}
+
+static inline void
+fnic_scsi_io_iter(struct fnic *fnic,
+ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2),
+ void *data1, void *data2)
+{
+ struct fnic_scsi_iter_data iter_data = {
+ .fn = fn,
+ .fnic = fnic,
+ .data1 = data1,
+ .data2 = data2,
+ };
+ scsi_host_busy_iter(fnic->host, fnic_io_iter_handler, &iter_data);
+}
+
+#ifdef FNIC_DEBUG
+static inline void
+fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i = i+8) {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8,
+ u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3],
+ u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]);
+ }
+}
+
+static inline void
+fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
+ int len, char *pfx)
+{
+ uint32_t s_id, d_id;
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ d_id = ntoh24(fchdr->fh_d_id);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n",
+ pfx, s_id, d_id, fchdr->fh_type,
+ FNIC_STD_GET_OX_ID(fchdr), len);
+
+ fnic_debug_dump(fnic, (uint8_t *)fchdr, len);
+
+}
+#else /* FNIC_DEBUG */
+static inline void
+fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {}
+static inline void
+fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
+ uint32_t len, char *pfx) {}
+#endif /* FNIC_DEBUG */
#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
index 0c5e57c7e322..705718f0809b 100644
--- a/drivers/scsi/fnic/fnic_attrs.c
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -11,8 +11,8 @@
static ssize_t fnic_show_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fc_lport *lp = shost_priv(class_to_shost(dev));
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic =
+ *((struct fnic **) shost_priv(class_to_shost(dev)));
return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]);
}
@@ -26,9 +26,13 @@ static ssize_t fnic_show_drv_version(struct device *dev,
static ssize_t fnic_show_link_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fc_lport *lp = shost_priv(class_to_shost(dev));
+ struct fnic *fnic =
+ *((struct fnic **) shost_priv(class_to_shost(dev)));
- return sysfs_emit(buf, "%s\n", (lp->link_up) ? "Link Up" : "Link Down");
+ return sysfs_emit(buf, "%s\n",
+ ((fnic->iport.state != FNIC_IPORT_STATE_INIT) &&
+ (fnic->iport.state != FNIC_IPORT_STATE_LINK_WAIT)) ?
+ "Link Up" : "Link Down");
}
static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 2619a2d4f5f1..5767862ae42f 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -7,6 +7,9 @@
#include <linux/vmalloc.h>
#include "fnic.h"
+extern int fnic_get_debug_info(struct stats_debug_info *debug_buffer,
+ struct fnic *fnic);
+
static struct dentry *fnic_trace_debugfs_root;
static struct dentry *fnic_trace_debugfs_file;
static struct dentry *fnic_trace_enable;
@@ -593,6 +596,7 @@ static int fnic_stats_debugfs_open(struct inode *inode,
debug->buf_size = buf_size;
memset((void *)debug->debug_buffer, 0, buf_size);
debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
+ debug->buffer_len += fnic_get_debug_info(debug, fnic);
file->private_data = debug;
@@ -673,26 +677,25 @@ static const struct file_operations fnic_reset_debugfs_fops = {
* It will create file stats and reset_stats under statistics/host# directory
* to log per fnic stats.
*/
-void fnic_stats_debugfs_init(struct fnic *fnic)
+int fnic_stats_debugfs_init(struct fnic *fnic)
{
char name[16];
- snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
+ snprintf(name, sizeof(name), "host%d", fnic->host->host_no);
fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
fnic_stats_debugfs_root);
-
fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_stats_debugfs_fops);
-
fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_reset_debugfs_fops);
+ return 0;
}
/*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index a08293b2ad9f..1e8cd64f9a5c 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -14,701 +14,379 @@
#include <linux/workqueue.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_els.h>
-#include <scsi/fc/fc_fcoe.h>
#include <scsi/fc_frame.h>
-#include <scsi/libfc.h>
+#include <linux/etherdevice.h>
+#include <scsi/scsi_transport_fc.h>
#include "fnic_io.h"
#include "fnic.h"
-#include "fnic_fip.h"
+#include "fnic_fdls.h"
+#include "fdls_fc.h"
#include "cq_enet_desc.h"
#include "cq_exch_desc.h"
+#include "fip.h"
+
+#define MAX_RESET_WAIT_COUNT 64
-static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
-struct workqueue_struct *fnic_fip_queue;
struct workqueue_struct *fnic_event_queue;
-static void fnic_set_eth_mode(struct fnic *);
-static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
-static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
-static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
-static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
-static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
+static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC;
-void fnic_handle_link(struct work_struct *work)
+/*
+ * Internal Functions
+ * This function will initialize the src_mac address to be
+ * used in outgoing frames
+ */
+static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic,
+ uint8_t *src_mac)
{
- struct fnic *fnic = container_of(work, struct fnic, link_work);
- unsigned long flags;
- int old_link_status;
- u32 old_link_down_cnt;
- u64 old_port_speed, new_port_speed;
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
-
- fnic->link_events = 1; /* less work to just set everytime*/
-
- if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
-
- old_link_down_cnt = fnic->link_down_cnt;
- old_link_status = fnic->link_status;
- old_port_speed = atomic64_read(
- &fnic->fnic_stats.misc_stats.current_port_speed);
-
- fnic->link_status = vnic_dev_link_status(fnic->vdev);
- fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
-
- new_port_speed = vnic_dev_port_speed(fnic->vdev);
- atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
- new_port_speed);
- if (old_port_speed != new_port_speed)
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Current vnic speed set to: %llu\n",
- new_port_speed);
-
- switch (vnic_dev_port_speed(fnic->vdev)) {
- case DCEM_PORTSPEED_10G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
- break;
- case DCEM_PORTSPEED_20G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
- break;
- case DCEM_PORTSPEED_25G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
- break;
- case DCEM_PORTSPEED_40G:
- case DCEM_PORTSPEED_4x10G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
- break;
- case DCEM_PORTSPEED_100G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
- break;
- default:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
- break;
- }
-
- if (old_link_status == fnic->link_status) {
- if (!fnic->link_status) {
- /* DOWN -> DOWN */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_LE, "Link Status: DOWN->DOWN",
- strlen("Link Status: DOWN->DOWN"));
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "down->down\n");
- } else {
- if (old_link_down_cnt != fnic->link_down_cnt) {
- /* UP -> DOWN -> UP */
- fnic->lport->host_stats.link_failure_count++;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no,
- FNIC_FC_LE,
- "Link Status:UP_DOWN_UP",
- strlen("Link_Status:UP_DOWN_UP")
- );
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "link down\n");
- fcoe_ctlr_link_down(&fnic->ctlr);
- if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- /* start FCoE VLAN discovery */
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no,
- FNIC_FC_LE,
- "Link Status: UP_DOWN_UP_VLAN",
- strlen(
- "Link Status: UP_DOWN_UP_VLAN")
- );
- fnic_fcoe_send_vlan_req(fnic);
- return;
- }
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "up->down->up: Link up\n");
- fcoe_ctlr_link_up(&fnic->ctlr);
- } else {
- /* UP -> UP */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: UP_UP",
- strlen("Link Status: UP_UP"));
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "up->up\n");
- }
- }
- } else if (fnic->link_status) {
- /* DOWN -> UP */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- /* start FCoE VLAN discovery */
- fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
- strlen("Link Status: DOWN_UP_VLAN"));
- fnic_fcoe_send_vlan_req(fnic);
-
- return;
- }
-
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "down->up: Link up\n");
- fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
- fcoe_ctlr_link_up(&fnic->ctlr);
- } else {
- /* UP -> DOWN */
- fnic->lport->host_stats.link_failure_count++;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "up->down: Link down\n");
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: UP_DOWN",
- strlen("Link Status: UP_DOWN"));
- if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "deleting fip-timer during link-down\n");
- del_timer_sync(&fnic->fip_timer);
- }
- fcoe_ctlr_link_down(&fnic->ctlr);
- }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ src_mac[0], src_mac[1], src_mac[2], src_mac[3],
+ src_mac[4], src_mac[5]);
+ memcpy(fnic->iport.fpma, src_mac, 6);
}
/*
- * This function passes incoming fabric frames to libFC
+ * This function will initialize the dst_mac address to be
+ * used in outgoing frames
*/
-void fnic_handle_frame(struct work_struct *work)
+static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic,
+ uint8_t *dst_mac)
{
- struct fnic *fnic = container_of(work, struct fnic, frame_work);
- struct fc_lport *lp = fnic->lport;
- unsigned long flags;
- struct sk_buff *skb;
- struct fc_frame *fp;
-
- while ((skb = skb_dequeue(&fnic->frame_queue))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3],
+ dst_mac[4], dst_mac[5]);
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- dev_kfree_skb(skb);
- return;
- }
- fp = (struct fc_frame *)skb;
-
- /*
- * If we're in a transitional state, just re-queue and return.
- * The queue will be serviced when we get to a stable state.
- */
- if (fnic->state != FNIC_IN_FC_MODE &&
- fnic->state != FNIC_IN_ETH_MODE) {
- skb_queue_head(&fnic->frame_queue, skb);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
- fc_exch_recv(lp, fp);
- }
+ memcpy(fnic->iport.fcfmac, dst_mac, 6);
}
-void fnic_fcoe_evlist_free(struct fnic *fnic)
+void fnic_get_host_port_state(struct Scsi_Host *shost)
{
- struct fnic_event *fevt = NULL;
- struct fnic_event *next = NULL;
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
+ struct fnic_iport_s *iport = &fnic->iport;
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (list_empty(&fnic->evlist)) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
-
- list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
- list_del(&fevt->list);
- kfree(fevt);
- }
+ if (!fnic->link_status)
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ else if (iport->state == FNIC_IPORT_STATE_READY)
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
-void fnic_handle_event(struct work_struct *work)
+void fnic_fdls_link_status_change(struct fnic *fnic, int linkup)
{
- struct fnic *fnic = container_of(work, struct fnic, event_work);
- struct fnic_event *fevt = NULL;
- struct fnic_event *next = NULL;
- unsigned long flags;
+ struct fnic_iport_s *iport = &fnic->iport;
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (list_empty(&fnic->evlist)) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "link up: %d, usefip: %d", linkup, iport->usefip);
- list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
- if (fnic->stop_rx_link_events) {
- list_del(&fevt->list);
- kfree(fevt);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
- /*
- * If we're in a transitional state, just re-queue and return.
- * The queue will be serviced when we get to a stable state.
- */
- if (fnic->state != FNIC_IN_FC_MODE &&
- fnic->state != FNIC_IN_ETH_MODE) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
- list_del(&fevt->list);
- switch (fevt->event) {
- case FNIC_EVT_START_VLAN_DISC:
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if (linkup) {
+ if (iport->usefip) {
+ iport->state = FNIC_IPORT_STATE_FIP;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "link up: %d, usefip: %d", linkup, iport->usefip);
fnic_fcoe_send_vlan_req(fnic);
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- break;
- case FNIC_EVT_START_FCF_DISC:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Start FCF Discovery\n");
- fnic_fcoe_start_fcf_disc(fnic);
- break;
- default:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Unknown event 0x%x\n", fevt->event);
- break;
+ } else {
+ iport->state = FNIC_IPORT_STATE_FABRIC_DISC;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport->state: %d", iport->state);
+ fnic_fdls_disc_start(iport);
}
- kfree(fevt);
+ } else {
+ iport->state = FNIC_IPORT_STATE_LINK_WAIT;
+ if (!is_zero_ether_addr(iport->fpma))
+ vnic_dev_del_addr(fnic->vdev, iport->fpma);
+ fnic_common_fip_cleanup(fnic);
+ fnic_fdls_link_down(iport);
+
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
-/**
- * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
- * @fip: The FCoE controller that received the frame
- * @skb: The received FIP frame
- *
- * Returns non-zero if the frame is rejected with unsupported cmd with
- * insufficient resource els explanation.
+
+/*
+ * FPMA can be either taken from ethhdr(dst_mac) or flogi resp
+ * or derive from FC_MAP and FCID combination. While it should be
+ * same, revisit this if there is any possibility of not-correct.
*/
-static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
- struct sk_buff *skb)
+void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
+ uint8_t *fcid)
{
- struct fc_lport *lport = fip->lp;
- struct fip_header *fiph;
- struct fc_frame_header *fh = NULL;
- struct fip_desc *desc;
- struct fip_encaps *els;
- u16 op;
- u8 els_op;
- u8 sub;
-
- size_t rlen;
- size_t dlen = 0;
-
- if (skb_linearize(skb))
- return 0;
+ struct fnic *fnic = iport->fnic;
+ struct ethhdr *ethhdr = (struct ethhdr *) rx_frame;
+ uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 };
- if (skb->len < sizeof(*fiph))
- return 0;
+ memcpy(&fcmac[3], fcid, 3);
- fiph = (struct fip_header *)skb->data;
- op = ntohs(fiph->fip_op);
- sub = fiph->fip_subcode;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ ethhdr->h_dest[0], ethhdr->h_dest[1],
+ ethhdr->h_dest[2], ethhdr->h_dest[3],
+ ethhdr->h_dest[4], ethhdr->h_dest[5]);
- if (op != FIP_OP_LS)
- return 0;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4],
+ fcmac[5]);
- if (sub != FIP_SC_REP)
- return 0;
-
- rlen = ntohs(fiph->fip_dl_len) * 4;
- if (rlen + sizeof(*fiph) > skb->len)
- return 0;
-
- desc = (struct fip_desc *)(fiph + 1);
- dlen = desc->fip_dlen * FIP_BPW;
+ fnic_fdls_set_fcoe_srcmac(fnic, fcmac);
+ fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source);
+}
- if (desc->fip_dtype == FIP_DT_FLOGI) {
+void fnic_fdls_init(struct fnic *fnic, int usefip)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
- if (dlen < sizeof(*els) + sizeof(*fh) + 1)
- return 0;
+ /* Initialize iPort structure */
+ iport->state = FNIC_IPORT_STATE_INIT;
+ iport->fnic = fnic;
+ iport->usefip = usefip;
- els = (struct fip_encaps *)desc;
- fh = (struct fc_frame_header *)(els + 1);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x",
+ iport->hwmac[0], iport->hwmac[1], iport->hwmac[2],
+ iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]);
- if (!fh)
- return 0;
+ INIT_LIST_HEAD(&iport->tport_list);
+ INIT_LIST_HEAD(&iport->tport_list_pending_del);
- /*
- * ELS command code, reason and explanation should be = Reject,
- * unsupported command and insufficient resource
- */
- els_op = *(u8 *)(fh + 1);
- if (els_op == ELS_LS_RJT) {
- shost_printk(KERN_INFO, lport->host,
- "Flogi Request Rejected by Switch\n");
- return 1;
- }
- shost_printk(KERN_INFO, lport->host,
- "Flogi Request Accepted by Switch\n");
- }
- return 0;
+ fnic_fdls_disc_init(iport);
}
-static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+void fnic_handle_link(struct work_struct *work)
{
- struct fcoe_ctlr *fip = &fnic->ctlr;
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- struct sk_buff *skb;
- char *eth_fr;
- struct fip_vlan *vlan;
- u64 vlan_tov;
+ struct fnic *fnic = container_of(work, struct fnic, link_work);
+ int old_link_status;
+ u32 old_link_down_cnt;
+ int max_count = 0;
- fnic_fcoe_reset_vlans(fnic);
- fnic->set_vlan(fnic, 0);
+ if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI)
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Interrupt mode is not MSI\n");
- if (printk_ratelimit())
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Sending VLAN request...\n");
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
- skb = dev_alloc_skb(sizeof(struct fip_vlan));
- if (!skb)
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Stop link rx events\n");
return;
-
- eth_fr = (char *)skb->data;
- vlan = (struct fip_vlan *)eth_fr;
-
- memset(vlan, 0, sizeof(*vlan));
- memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
- memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
- vlan->eth.h_proto = htons(ETH_P_FIP);
-
- vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
- vlan->fip.fip_op = htons(FIP_OP_VLAN);
- vlan->fip.fip_subcode = FIP_SC_VL_REQ;
- vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
-
- vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
- vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
- memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
-
- vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
- vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
- put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
- atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
-
- skb_put(skb, sizeof(*vlan));
- skb->protocol = htons(ETH_P_FIP);
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- fip->send(fip, skb);
-
- /* set a timer so that we can retry if there no response */
- vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
- mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
-}
-
-static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
-{
- struct fcoe_ctlr *fip = &fnic->ctlr;
- struct fip_header *fiph;
- struct fip_desc *desc;
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- u16 vid;
- size_t rlen;
- size_t dlen;
- struct fcoe_vlan *vlan;
- u64 sol_time;
- unsigned long flags;
-
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Received VLAN response...\n");
-
- fiph = (struct fip_header *) skb->data;
-
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
- ntohs(fiph->fip_op), fiph->fip_subcode);
-
- rlen = ntohs(fiph->fip_dl_len) * 4;
- fnic_fcoe_reset_vlans(fnic);
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- desc = (struct fip_desc *)(fiph + 1);
- while (rlen > 0) {
- dlen = desc->fip_dlen * FIP_BPW;
- switch (desc->fip_dtype) {
- case FIP_DT_VLAN:
- vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
- shost_printk(KERN_INFO, fnic->lport->host,
- "process_vlan_resp: FIP VLAN %d\n", vid);
- vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
- if (!vlan) {
- /* retry from timer */
- spin_unlock_irqrestore(&fnic->vlans_lock,
- flags);
- goto out;
- }
- vlan->vid = vid & 0x0fff;
- vlan->state = FIP_VLAN_AVAIL;
- list_add_tail(&vlan->list, &fnic->vlans);
- break;
- }
- desc = (struct fip_desc *)((char *)desc + dlen);
- rlen -= dlen;
}
- /* any VLAN descriptors present ? */
- if (list_empty(&fnic->vlans)) {
- /* retry from timer */
- atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "No VLAN descriptors in FIP VLAN response\n");
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- goto out;
+ /* Do not process if the fnic is already in transitional state */
+ if ((fnic->state != FNIC_IN_ETH_MODE)
+ && (fnic->state != FNIC_IN_FC_MODE)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic in transitional state: %d. link up: %d ignored",
+ fnic->state, vnic_dev_link_status(fnic->vdev));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Current link status: %d iport state: %d\n",
+ fnic->link_status, fnic->iport.state);
+ return;
}
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- fnic->set_vlan(fnic, vlan->vid);
- vlan->state = FIP_VLAN_SENT; /* sent now */
- vlan->sol_count++;
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
-
- /* start the solicitation */
- fcoe_ctlr_link_up(fip);
-
- sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
- mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
-out:
- return;
-}
-
-static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
-{
- unsigned long flags;
- struct fcoe_vlan *vlan;
- u64 sol_time;
-
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- fnic->set_vlan(fnic, vlan->vid);
- vlan->state = FIP_VLAN_SENT; /* sent now */
- vlan->sol_count = 1;
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
-
- /* start the solicitation */
- fcoe_ctlr_link_up(&fnic->ctlr);
-
- sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
- mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
-}
-
-static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
-{
- unsigned long flags;
- struct fcoe_vlan *fvlan;
+ old_link_down_cnt = fnic->link_down_cnt;
+ old_link_status = fnic->link_status;
+ fnic->link_status = vnic_dev_link_status(fnic->vdev);
+ fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- if (list_empty(&fnic->vlans)) {
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return -EINVAL;
+ while (fnic->reset_in_progress == IN_PROGRESS) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic reset in progress. Link event needs to wait\n");
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "waiting for reset completion\n");
+ wait_for_completion_timeout(&fnic->reset_completion_wait,
+ msecs_to_jiffies(5000));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "woken up from reset completion wait\n");
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ max_count++;
+ if (max_count >= MAX_RESET_WAIT_COUNT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Rstth waited for too long. Skipping handle link event\n");
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ return;
+ }
}
-
- fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- if (fvlan->state == FIP_VLAN_USED) {
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return 0;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Marking fnic reset in progress\n");
+ fnic->reset_in_progress = IN_PROGRESS;
+
+ if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) ||
+ (fnic->link_status != old_link_status)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "old link status: %d link status: %d\n",
+ old_link_status, (int) fnic->link_status);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "old down count %d down count: %d\n",
+ old_link_down_cnt, (int) fnic->link_down_cnt);
}
- if (fvlan->state == FIP_VLAN_SENT) {
- fvlan->state = FIP_VLAN_USED;
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return 0;
+ if (old_link_status == fnic->link_status) {
+ if (!fnic->link_status) {
+ /* DOWN -> DOWN */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "down->down\n");
+ } else {
+ if (old_link_down_cnt != fnic->link_down_cnt) {
+ /* UP -> DOWN -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "up->down. Link down\n");
+ fnic_fdls_link_status_change(fnic, 0);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "down->up. Link up\n");
+ fnic_fdls_link_status_change(fnic, 1);
+ } else {
+ /* UP -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "up->up\n");
+ }
+ }
+ } else if (fnic->link_status) {
+ /* DOWN -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "down->up. Link up\n");
+ fnic_fdls_link_status_change(fnic, 1);
+ } else {
+ /* UP -> DOWN */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "up->down. Link down\n");
+ fnic_fdls_link_status_change(fnic, 0);
}
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return -EINVAL;
-}
-static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
-{
- struct fnic_event *fevt;
- unsigned long flags;
-
- fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
- if (!fevt)
- return;
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ fnic->reset_in_progress = NOT_IN_PROGRESS;
+ complete(&fnic->reset_completion_wait);
- fevt->fnic = fnic;
- fevt->event = ev;
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- list_add_tail(&fevt->list, &fnic->evlist);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
- schedule_work(&fnic->event_work);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Marking fnic reset completion\n");
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
-static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
+void fnic_handle_frame(struct work_struct *work)
{
- struct fip_header *fiph;
- int ret = 1;
- u16 op;
- u8 sub;
+ struct fnic *fnic = container_of(work, struct fnic, frame_work);
+ struct fnic_frame_list *cur_frame, *next;
+ int fchdr_offset = 0;
- if (!skb || !(skb->data))
- return -1;
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) {
+ if (fnic->stop_rx_link_events) {
+ list_del(&cur_frame->links);
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ kfree(cur_frame->fp);
+ mempool_free(cur_frame, fnic->frame_elem_pool);
+ return;
+ }
- if (skb_linearize(skb))
- goto drop;
+ /*
+ * If we're in a transitional state, just re-queue and return.
+ * The queue will be serviced when we get to a stable state.
+ */
+ if (fnic->state != FNIC_IN_FC_MODE &&
+ fnic->state != FNIC_IN_ETH_MODE) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Cannot process frame in transitional state\n");
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ return;
+ }
- fiph = (struct fip_header *)skb->data;
- op = ntohs(fiph->fip_op);
- sub = fiph->fip_subcode;
+ list_del(&cur_frame->links);
- if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
- goto drop;
+ /* Frames from FCP_RQ will have ethhdrs stripped off */
+ fchdr_offset = (cur_frame->rx_ethhdr_stripped) ?
+ 0 : FNIC_ETH_FCOE_HDRS_OFFSET;
- if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
- goto drop;
+ fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp,
+ cur_frame->frame_len, fchdr_offset);
- if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
- if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
- goto drop;
- /* pass it on to fcoe */
- ret = 1;
- } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
- /* set the vlan as used */
- fnic_fcoe_process_vlan_resp(fnic, skb);
- ret = 0;
- } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
- /* received CVL request, restart vlan disc */
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- /* pass it on to fcoe */
- ret = 1;
+ kfree(cur_frame->fp);
+ mempool_free(cur_frame, fnic->frame_elem_pool);
}
-drop:
- return ret;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
void fnic_handle_fip_frame(struct work_struct *work)
{
+ struct fnic_frame_list *cur_frame, *next;
struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- unsigned long flags;
- struct sk_buff *skb;
- struct ethhdr *eh;
- while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
- spin_lock_irqsave(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Processing FIP frame\n");
+
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue,
+ links) {
if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- dev_kfree_skb(skb);
+ list_del(&cur_frame->links);
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ kfree(cur_frame->fp);
+ kfree(cur_frame);
return;
}
+
/*
* If we're in a transitional state, just re-queue and return.
* The queue will be serviced when we get to a stable state.
*/
if (fnic->state != FNIC_IN_FC_MODE &&
- fnic->state != FNIC_IN_ETH_MODE) {
- skb_queue_head(&fnic->fip_frame_queue, skb);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic->state != FNIC_IN_ETH_MODE) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
return;
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- eh = (struct ethhdr *)skb->data;
- if (eh->h_proto == htons(ETH_P_FIP)) {
- skb_pull(skb, sizeof(*eh));
- if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
- dev_kfree_skb(skb);
- continue;
- }
- /*
- * If there's FLOGI rejects - clear all
- * fcf's & restart from scratch
- */
- if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
- atomic64_inc(
- &fnic_stats->vlan_stats.flogi_rejects);
- shost_printk(KERN_INFO, fnic->lport->host,
- "Trigger a Link down - VLAN Disc\n");
- fcoe_ctlr_link_down(&fnic->ctlr);
- /* start FCoE VLAN discovery */
- fnic_fcoe_send_vlan_req(fnic);
- dev_kfree_skb(skb);
- continue;
- }
- fcoe_ctlr_recv(&fnic->ctlr, skb);
- continue;
+
+ list_del(&cur_frame->links);
+
+ if (fdls_fip_recv_frame(fnic, cur_frame->fp)) {
+ kfree(cur_frame->fp);
+ kfree(cur_frame);
}
}
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
/**
* fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
* @fnic: fnic instance.
- * @skb: Ethernet Frame.
+ * @fp: Ethernet Frame.
*/
-static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
+static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp)
{
- struct fc_frame *fp;
struct ethhdr *eh;
- struct fcoe_hdr *fcoe_hdr;
- struct fcoe_crc_eof *ft;
+ struct fnic_frame_list *fip_fr_elem;
+ unsigned long flags;
- /*
- * Undo VLAN encapsulation if present.
- */
- eh = (struct ethhdr *)skb->data;
- if (eh->h_proto == htons(ETH_P_8021Q)) {
- memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
- eh = skb_pull(skb, VLAN_HLEN);
- skb_reset_mac_header(skb);
- }
- if (eh->h_proto == htons(ETH_P_FIP)) {
- if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
- printk(KERN_ERR "Dropped FIP frame, as firmware "
- "uses non-FIP mode, Enable FIP "
- "using UCSM\n");
- goto drop;
- }
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
- }
- skb_queue_tail(&fnic->fip_frame_queue, skb);
+ eh = (struct ethhdr *) fp;
+ if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) {
+ fip_fr_elem = (struct fnic_frame_list *)
+ kzalloc(sizeof(struct fnic_frame_list), GFP_ATOMIC);
+ if (!fip_fr_elem)
+ return 0;
+ fip_fr_elem->fp = fp;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
queue_work(fnic_fip_queue, &fnic->fip_frame_work);
- return 1; /* let caller know packet was used */
- }
- if (eh->h_proto != htons(ETH_P_FCOE))
- goto drop;
- skb_set_network_header(skb, sizeof(*eh));
- skb_pull(skb, sizeof(*eh));
-
- fcoe_hdr = (struct fcoe_hdr *)skb->data;
- if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
- goto drop;
-
- fp = (struct fc_frame *)skb;
- fc_frame_init(fp);
- fr_sof(fp) = fcoe_hdr->fcoe_sof;
- skb_pull(skb, sizeof(struct fcoe_hdr));
- skb_reset_transport_header(skb);
-
- ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
- fr_eof(fp) = ft->fcoe_eof;
- skb_trim(skb, skb->len - sizeof(*ft));
- return 0;
-drop:
- dev_kfree_skb_irq(skb);
- return -1;
+ return 1; /* let caller know packet was used */
+ } else
+ return 0;
}
/**
@@ -720,206 +398,147 @@ drop:
*/
void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
{
- u8 *ctl = fnic->ctlr.ctl_src_addr;
+ struct fnic_iport_s *iport = &fnic->iport;
+ u8 *ctl = iport->hwmac;
u8 *data = fnic->data_src_addr;
if (is_zero_ether_addr(new))
new = ctl;
if (ether_addr_equal(data, new))
return;
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "update_mac %pM\n", new);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Update MAC: %u\n", *new);
+
if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
vnic_dev_del_addr(fnic->vdev, data);
+
memcpy(data, new, ETH_ALEN);
if (!ether_addr_equal(new, ctl))
vnic_dev_add_addr(fnic->vdev, new);
}
-/**
- * fnic_update_mac() - set data MAC address and filters.
- * @lport: local port.
- * @new: newly-assigned FCoE MAC address.
- */
-void fnic_update_mac(struct fc_lport *lport, u8 *new)
-{
- struct fnic *fnic = lport_priv(lport);
-
- spin_lock_irq(&fnic->fnic_lock);
- fnic_update_mac_locked(fnic, new);
- spin_unlock_irq(&fnic->fnic_lock);
-}
-
-/**
- * fnic_set_port_id() - set the port_ID after successful FLOGI.
- * @lport: local port.
- * @port_id: assigned FC_ID.
- * @fp: received frame containing the FLOGI accept or NULL.
- *
- * This is called from libfc when a new FC_ID has been assigned.
- * This causes us to reset the firmware to FC_MODE and setup the new MAC
- * address and FC_ID.
- *
- * It is also called with FC_ID 0 when we're logged off.
- *
- * If the FC_ID is due to point-to-point, fp may be NULL.
- */
-void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
-{
- struct fnic *fnic = lport_priv(lport);
- u8 *mac;
- int ret;
-
- FNIC_FCS_DBG(KERN_DEBUG, lport->host, fnic->fnic_num,
- "set port_id 0x%x fp 0x%p\n",
- port_id, fp);
-
- /*
- * If we're clearing the FC_ID, change to use the ctl_src_addr.
- * Set ethernet mode to send FLOGI.
- */
- if (!port_id) {
- fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
- fnic_set_eth_mode(fnic);
- return;
- }
-
- if (fp) {
- mac = fr_cb(fp)->granted_mac;
- if (is_zero_ether_addr(mac)) {
- /* non-FIP - FLOGI already accepted - ignore return */
- fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
- }
- fnic_update_mac(lport, mac);
- }
-
- /* Change state to reflect transition to FC mode */
- spin_lock_irq(&fnic->fnic_lock);
- if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
- fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
- else {
- FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "Unexpected fnic state: %s processing FLOGI response",
- fnic_state_to_str(fnic->state));
- spin_unlock_irq(&fnic->fnic_lock);
- return;
- }
- spin_unlock_irq(&fnic->fnic_lock);
-
- /*
- * Send FLOGI registration to firmware to set up FC mode.
- * The new address will be set up when registration completes.
- */
- ret = fnic_flogi_reg_handler(fnic, port_id);
-
- if (ret < 0) {
- spin_lock_irq(&fnic->fnic_lock);
- if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
- fnic->state = FNIC_IN_ETH_MODE;
- spin_unlock_irq(&fnic->fnic_lock);
- }
-}
-
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
*cq_desc, struct vnic_rq_buf *buf,
int skipped __attribute__((unused)),
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb;
- struct fc_frame *fp;
+ uint8_t *fp;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ unsigned int ethhdr_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe = 0, fcoe_sof, fcoe_eof;
- u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
+ u8 fcoe_fnic_crc_ok = 1, fcoe_enc_error = 0;
u8 fcs_ok = 1, packet_error = 0;
- u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
+ u16 q_number, completed_index, vlan;
u32 rss_hash;
+ u16 checksum;
+ u8 csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 fcoe = 0, fcoe_sof, fcoe_eof;
u16 exchange_id, tmpl;
u8 sof = 0;
u8 eof = 0;
u32 fcp_bytes_written = 0;
+ u16 enet_bytes_written = 0;
+ u32 bytes_written = 0;
unsigned long flags;
+ struct fnic_frame_list *frame_elem = NULL;
+ struct ethhdr *eh;
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- skb = buf->os_buf;
- fp = (struct fc_frame *)skb;
+ DMA_FROM_DEVICE);
+ fp = (uint8_t *) buf->os_buf;
buf->os_buf = NULL;
cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
if (type == CQ_DESC_TYPE_RQ_FCP) {
- cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
- &tmpl, &fcp_bytes_written, &sof, &eof,
- &ingress_port, &packet_error,
- &fcoe_enc_error, &fcs_ok, &vlan_stripped,
- &vlan);
- skb_trim(skb, fcp_bytes_written);
- fr_sof(fp) = sof;
- fr_eof(fp) = eof;
-
+ cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type,
+ &color, &q_number, &completed_index, &eop, &sop,
+ &fcoe_fnic_crc_ok, &exchange_id, &tmpl,
+ &fcp_bytes_written, &sof, &eof, &ingress_port,
+ &packet_error, &fcoe_enc_error, &fcs_ok,
+ &vlan_stripped, &vlan);
+ ethhdr_stripped = 1;
+ bytes_written = fcp_bytes_written;
} else if (type == CQ_DESC_TYPE_RQ_ENET) {
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop,
- &rss_type, &csum_not_calc, &rss_hash,
- &bytes_written, &packet_error,
- &vlan_stripped, &vlan, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok,
- &fcoe_enc_error, &fcoe_eof,
- &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4,
- &ipv4_fragment, &fcs_ok);
- skb_trim(skb, bytes_written);
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type,
+ &color, &q_number, &completed_index,
+ &ingress_port, &fcoe, &eop, &sop, &rss_type,
+ &csum_not_calc, &rss_hash, &enet_bytes_written,
+ &packet_error, &vlan_stripped, &vlan,
+ &checksum, &fcoe_sof, &fcoe_fnic_crc_ok,
+ &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok,
+ &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4,
+ &ipv4_fragment, &fcs_ok);
+
+ ethhdr_stripped = 0;
+ bytes_written = enet_bytes_written;
+
if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fcs error. dropping packet.\n");
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p fcs error. Dropping packet.\n", fnic);
goto drop;
}
- if (fnic_import_rq_eth_pkt(fnic, skb))
- return;
+ eh = (struct ethhdr *) fp;
+ if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) {
+ if (fnic_import_rq_eth_pkt(fnic, fp))
+ return;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping h_proto 0x%x",
+ be16_to_cpu(eh->h_proto));
+ goto drop;
+ }
} else {
- /* wrong CQ type*/
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic rq_cmpl wrong cq type x%x\n", type);
+ /* wrong CQ type */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic rq_cmpl wrong cq type x%x\n", type);
goto drop;
}
- if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+ if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic rq_cmpl fcoe x%x fcsok x%x"
- " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
- " x%x\n",
- fcoe, fcs_ok, packet_error,
- fcoe_fc_crc_ok, fcoe_enc_error);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n",
+ fcoe, fcs_ok, packet_error,
+ fcoe_fnic_crc_ok, fcoe_enc_error);
goto drop;
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic->stop_rx_link_events: %d\n",
+ fnic->stop_rx_link_events);
goto drop;
}
- fr_dev(fp) = fnic->lport;
+
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
- (char *)skb->data, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
+
+ frame_elem = mempool_alloc(fnic->frame_elem_pool,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!frame_elem) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for frame elem");
+ goto drop;
}
+ frame_elem->fp = fp;
+ frame_elem->rx_ethhdr_stripped = ethhdr_stripped;
+ frame_elem->frame_len = bytes_written;
- skb_queue_tail(&fnic->frame_queue, skb);
- queue_work(fnic_event_queue, &fnic->frame_work);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&frame_elem->links, &fnic->frame_queue);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ queue_work(fnic_event_queue, &fnic->frame_work);
return;
+
drop:
- dev_kfree_skb_irq(skb);
+ kfree(fp);
}
static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
@@ -945,10 +564,10 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
fnic_rq_cmpl_handler_cont,
NULL);
- if (cur_work_done) {
+ if (cur_work_done && fnic->stop_rx_link_events != 1) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err)
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"fnic_alloc_rq_frame can't alloc"
" frame\n");
}
@@ -966,218 +585,179 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
int fnic_alloc_rq_frame(struct vnic_rq *rq)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb;
+ void *buf;
u16 len;
dma_addr_t pa;
- int r;
+ int ret;
- len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
- skb = dev_alloc_skb(len);
- if (!skb) {
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Unable to allocate RQ sk_buff\n");
+ len = FNIC_FRAME_HT_ROOM;
+ buf = kmalloc(len, GFP_ATOMIC);
+ if (!buf) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unable to allocate RQ buffer of size: %d\n", len);
return -ENOMEM;
}
- skb_reset_mac_header(skb);
- skb_reset_transport_header(skb);
- skb_reset_network_header(skb);
- skb_put(skb, len);
- pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
+
+ pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
- r = -ENOMEM;
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
- goto free_skb;
+ ret = -ENOMEM;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PCI mapping failed with error %d\n", ret);
+ goto free_buf;
}
- fnic_queue_rq_desc(rq, skb, pa, len);
+ fnic_queue_rq_desc(rq, buf, pa, len);
return 0;
-
-free_skb:
- kfree_skb(skb);
- return r;
+free_buf:
+ kfree(buf);
+ return ret;
}
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
- struct fc_frame *fp = buf->os_buf;
+ void *rq_buf = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_FROM_DEVICE);
- dev_kfree_skb(fp_skb(fp));
+ kfree(rq_buf);
buf->os_buf = NULL;
}
-/**
- * fnic_eth_send() - Send Ethernet frame.
- * @fip: fcoe_ctlr instance.
- * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
- */
-void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
-{
- struct fnic *fnic = fnic_from_ctlr(fip);
- struct vnic_wq *wq = &fnic->wq[0];
- dma_addr_t pa;
- struct ethhdr *eth_hdr;
- struct vlan_ethhdr *vlan_hdr;
- unsigned long flags;
-
- if (!fnic->vlan_hw_insert) {
- eth_hdr = (struct ethhdr *)skb_mac_header(skb);
- vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
- memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
- vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
- vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
- vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
- }
- } else {
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
- }
- }
-
- pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&fnic->pdev->dev, pa)) {
- printk(KERN_ERR "DMA mapping failed\n");
- goto free_skb;
- }
-
- spin_lock_irqsave(&fnic->wq_lock[0], flags);
- if (!vnic_wq_desc_avail(wq))
- goto irq_restore;
-
- fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
- 0 /* hw inserts cos value */,
- fnic->vlan_id, 1);
- spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- return;
-
-irq_restore:
- spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
-free_skb:
- kfree_skb(skb);
-}
-
/*
* Send FC frame.
*/
-static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
+static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len)
{
struct vnic_wq *wq = &fnic->wq[0];
- struct sk_buff *skb;
dma_addr_t pa;
- struct ethhdr *eth_hdr;
- struct vlan_ethhdr *vlan_hdr;
- struct fcoe_hdr *fcoe_hdr;
- struct fc_frame_header *fh;
- u32 tot_len, eth_hdr_len;
int ret = 0;
unsigned long flags;
- fh = fc_frame_header_get(fp);
- skb = fp_skb(fp);
+ pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE);
- if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
- fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
- return 0;
-
- if (!fnic->vlan_hw_insert) {
- eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
- vlan_hdr = skb_push(skb, eth_hdr_len);
- eth_hdr = (struct ethhdr *)vlan_hdr;
- vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
- vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
- vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
- fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
- } else {
- eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
- eth_hdr = skb_push(skb, eth_hdr_len);
- eth_hdr->h_proto = htons(ETH_P_FCOE);
- fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
- }
-
- if (fnic->ctlr.map_dest)
- fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
- else
- memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
- memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
-
- tot_len = skb->len;
- BUG_ON(tot_len % 4);
-
- memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
- fcoe_hdr->fcoe_sof = fr_sof(fp);
- if (FC_FCOE_VER)
- FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
-
- pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
- if (dma_mapping_error(&fnic->pdev->dev, pa)) {
- ret = -ENOMEM;
- printk(KERN_ERR "DMA map failed with error %d\n", ret);
- goto free_skb_on_err;
- }
-
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
- (char *)eth_hdr, tot_len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ if ((fnic_fc_trace_set_data(fnic->fnic_num,
+ FNIC_FC_SEND | 0x80, (char *) frame,
+ frame_len)) != 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic ctlr frame trace error");
}
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
- dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
+ dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "vnic work queue descriptor is not available");
ret = -1;
- goto irq_restore;
+ goto fnic_send_frame_end;
}
- fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
- 0 /* hw inserts cos value */,
- fnic->vlan_id, 1, 1, 1);
+ /* hw inserts cos value */
+ fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T,
+ 0, fnic->vlan_id, 1, 1, 1);
-irq_restore:
+fnic_send_frame_end:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
-
-free_skb_on_err:
- if (ret)
- dev_kfree_skb_any(fp_skb(fp));
-
return ret;
}
-/*
- * fnic_send
- * Routine to send a raw frame
+/**
+ * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE
+ * info. This interface is used only in the non fast path. (login, fabric
+ * registrations etc.)
+ *
+ * @fnic: fnic instance
+ * @frame: frame structure with FC payload filled in
+ * @frame_size: length of the frame to be sent
+ * @srcmac: source mac address
+ * @dstmac: destination mac address
+ *
+ * Called with the fnic lock held.
*/
-int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
+static int
+fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size,
+ uint8_t *srcmac, uint8_t *dstmac)
{
- struct fnic *fnic = lport_priv(lp);
- unsigned long flags;
+ struct ethhdr *pethhdr;
+ struct fcoe_hdr *pfcoe_hdr;
+ struct fnic_frame_list *frame_elem;
+ int len = frame_size;
+ int ret;
+ struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame +
+ FNIC_ETH_FCOE_HDRS_OFFSET);
- if (fnic->in_remove) {
- dev_kfree_skb(fp_skb(fp));
- return -1;
- }
+ pethhdr = (struct ethhdr *) frame;
+ pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE);
+ memcpy(pethhdr->h_source, srcmac, ETH_ALEN);
+ memcpy(pethhdr->h_dest, dstmac, ETH_ALEN);
+
+ pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr));
+ pfcoe_hdr->fcoe_sof = FC_SOF_I3;
/*
* Queue frame if in a transitional state.
* This occurs while registering the Port_ID / MAC address after FLOGI.
*/
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
- skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if ((fnic->state != FNIC_IN_FC_MODE)
+ && (fnic->state != FNIC_IN_ETH_MODE)) {
+ frame_elem = mempool_alloc(fnic->frame_elem_pool,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!frame_elem) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for frame elem");
+ return -ENOMEM;
+ }
+
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n",
+ ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id),
+ fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr));
+ frame_elem->fp = frame;
+ frame_elem->frame_len = len;
+ list_add_tail(&frame_elem->links, &fnic->tx_queue);
return 0;
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return fnic_send_frame(fnic, fp);
+ fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing");
+
+ ret = fnic_send_frame(fnic, frame, len);
+ return ret;
+}
+
+void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
+ int frame_size)
+{
+ struct fnic *fnic = iport->fnic;
+ uint8_t *dstmac, *srcmac;
+
+ /* If module unload is in-progress, don't send */
+ if (fnic->in_remove)
+ return;
+
+ if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) {
+ srcmac = iport->fpma;
+ dstmac = iport->fcfmac;
+ } else {
+ srcmac = iport->hwmac;
+ dstmac = FCOE_ALL_FCF_MAC;
+ }
+
+ fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac);
+}
+
+int
+fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame,
+ int frame_size)
+{
+ struct fnic *fnic = iport->fnic;
+
+ if (fnic->in_remove)
+ return -1;
+
+ fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing");
+ return fnic_send_frame(fnic, frame, frame_size);
}
/**
@@ -1193,64 +773,87 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
void fnic_flush_tx(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, flush_work);
- struct sk_buff *skb;
struct fc_frame *fp;
+ struct fnic_frame_list *cur_frame, *next;
- while ((skb = skb_dequeue(&fnic->tx_queue))) {
- fp = (struct fc_frame *)skb;
- fnic_send_frame(fnic, fp);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flush queued frames");
+
+ list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) {
+ fp = cur_frame->fp;
+ list_del(&cur_frame->links);
+ fnic_send_frame(fnic, fp, cur_frame->frame_len);
+ mempool_free(cur_frame, fnic->frame_elem_pool);
}
}
-/**
- * fnic_set_eth_mode() - put fnic into ethernet mode.
- * @fnic: fnic device
- *
- * Called without fnic lock held.
- */
-static void fnic_set_eth_mode(struct fnic *fnic)
+int
+fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
+ void *fp)
{
- unsigned long flags;
- enum fnic_state old_state;
+ struct fnic *fnic = iport->fnic;
+ struct ethhdr *ethhdr;
int ret;
- spin_lock_irqsave(&fnic->fnic_lock, flags);
-again:
- old_state = fnic->state;
- switch (old_state) {
- case FNIC_IN_FC_MODE:
- case FNIC_IN_ETH_TRANS_FC_MODE:
- default:
- fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id,
+ fp, fnic->state);
- ret = fnic_fw_reset_handler(fnic);
+ if (fp) {
+ ethhdr = (struct ethhdr *) fp;
+ vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest);
+ }
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
- goto again;
- if (ret)
- fnic->state = old_state;
- break;
-
- case FNIC_IN_FC_TRANS_ETH_MODE:
- case FNIC_IN_ETH_MODE:
- break;
+ /* Change state to reflect transition to FC mode */
+ if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
+ fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unexpected fnic state while processing FLOGI response\n");
+ return -1;
+ }
+
+ /*
+ * Send FLOGI registration to firmware to set up FC mode.
+ * The new address will be set up when registration completes.
+ */
+ ret = fnic_flogi_reg_handler(fnic, port_id);
+ if (ret < 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI registration error ret: %d fnic state: %d\n",
+ ret, fnic->state);
+ if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
+ fnic->state = FNIC_IN_ETH_MODE;
+
+ return -1;
+ }
+ iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI registration success\n");
+ return 0;
+}
+
+void fnic_free_txq(struct list_head *head)
+{
+ struct fnic_frame_list *cur_frame, *next;
+
+ list_for_each_entry_safe(cur_frame, next, head, links) {
+ list_del(&cur_frame->links);
+ kfree(cur_frame->fp);
+ kfree(cur_frame);
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct cq_desc *cq_desc,
struct vnic_wq_buf *buf, void *opaque)
{
- struct sk_buff *skb = buf->os_buf;
- struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_TO_DEVICE);
- dev_kfree_skb_irq(fp_skb(fp));
+ mempool_free(buf->os_buf, fnic->frame_pool);
buf->os_buf = NULL;
}
@@ -1288,119 +891,218 @@ int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
- struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_TO_DEVICE);
- dev_kfree_skb(fp_skb(fp));
+ kfree(buf->os_buf);
buf->os_buf = NULL;
}
-void fnic_fcoe_reset_vlans(struct fnic *fnic)
+void
+fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport,
+ unsigned long flags)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fc_rport *rport;
+ struct fc_rport_identifiers ids;
+ struct rport_dd_data_s *rdd_data;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Adding rport fcid: 0x%x", tport->fcid);
+
+ ids.node_name = tport->wwnn;
+ ids.port_name = tport->wwpn;
+ ids.port_id = tport->fcid;
+ ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ rport = fc_remote_port_add(fnic->host, 0, &ids);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (!rport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to add rport for tport: 0x%x", tport->fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Added rport fcid: 0x%x", tport->fcid);
+
+ /* Mimic these assignments in queuecommand to avoid timing issues */
+ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
+ rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
+ rdd_data = rport->dd_data;
+ rdd_data->tport = tport;
+ rdd_data->iport = iport;
+ tport->rport = rport;
+ tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
+}
+
+void
+fnic_fdls_remove_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, unsigned long flags)
+{
+ struct fnic *fnic = iport->fnic;
+ struct rport_dd_data_s *rdd_data;
+
+ struct fc_rport *rport;
+
+ if (!tport)
+ return;
+
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE);
+ rport = tport->rport;
+
+ if (rport) {
+ /* tport resource release will be done
+ * after fnic_terminate_rport_io()
+ */
+ tport->flags |= FNIC_FDLS_TPORT_DELETED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ /* Interface to scsi_fc_transport */
+ fc_remote_port_delete(rport);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Deregistered and freed tport fcid: 0x%x from scsi transport fc",
+ tport->fcid);
+
+ /*
+ * the dd_data is allocated by fc transport
+ * of size dd_fcrport_size
+ */
+ rdd_data = rport->dd_data;
+ rdd_data->tport = NULL;
+ rdd_data->iport = NULL;
+ list_del(&tport->links);
+ kfree(tport);
+ } else {
+ fnic_del_tport_timer_sync(fnic, tport);
+ list_del(&tport->links);
+ kfree(tport);
+ }
+}
+
+void fnic_delete_fcp_tports(struct fnic *fnic)
{
+ struct fnic_tport_s *tport, *next;
unsigned long flags;
- struct fcoe_vlan *vlan;
- struct fcoe_vlan *next;
- /*
- * indicate a link down to fcoe so that all fcf's are free'd
- * might not be required since we did this before sending vlan
- * discovery request
- */
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- if (!list_empty(&fnic->vlans)) {
- list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
- list_del(&vlan->list);
- kfree(vlan);
- }
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "removing fcp rport fcid: 0x%x", tport->fcid);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
+ fnic_del_tport_timer_sync(fnic, tport);
+ fnic_fdls_remove_tport(&fnic->iport, tport, flags);
}
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
-void fnic_handle_fip_timer(struct fnic *fnic)
+/**
+ * fnic_tport_event_handler() - Handler for remote port events
+ * in the tport_event_queue.
+ *
+ * @work: Handle to the remote port being dequeued
+ */
+void fnic_tport_event_handler(struct work_struct *work)
{
+ struct fnic *fnic = container_of(work, struct fnic, tport_work);
+ struct fnic_tport_event_s *cur_evt, *next;
unsigned long flags;
- struct fcoe_vlan *vlan;
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- u64 sol_time;
+ struct fnic_tport_s *tport;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
+ list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
+ tport = cur_evt->arg1;
+ switch (cur_evt->event) {
+ case TGT_EV_RPORT_ADD:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Add rport event");
+ if (tport->state == FDLS_TGT_STATE_READY) {
+ fnic_fdls_add_tport(&fnic->iport,
+ (struct fnic_tport_s *) cur_evt->arg1, flags);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Target not ready. Add rport event dropped: 0x%x",
+ tport->fcid);
+ }
+ break;
+ case TGT_EV_RPORT_DEL:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Remove rport event");
+ if (tport->state == FDLS_TGT_STATE_OFFLINING) {
+ fnic_fdls_remove_tport(&fnic->iport,
+ (struct fnic_tport_s *) cur_evt->arg1, flags);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "remove rport event dropped tport fcid: 0x%x",
+ tport->fcid);
+ }
+ break;
+ case TGT_EV_TPORT_DELETE:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Delete tport event");
+ fdls_delete_tport(tport->iport, tport);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unknown tport event");
+ break;
+ }
+ list_del(&cur_evt->links);
+ kfree(cur_evt);
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
- if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
- return;
+void fnic_flush_tport_event_list(struct fnic *fnic)
+{
+ struct fnic_tport_event_s *cur_evt, *next;
+ unsigned long flags;
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- if (list_empty(&fnic->vlans)) {
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- /* no vlans available, try again */
- if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
- if (printk_ratelimit())
- shost_printk(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- return;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
+ list_del(&cur_evt->links);
+ kfree(cur_evt);
}
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fip_timer: vlan %d state %d sol_count %d\n",
- vlan->vid, vlan->state, vlan->sol_count);
- switch (vlan->state) {
- case FIP_VLAN_USED:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "FIP VLAN is selected for FC transaction\n");
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- break;
- case FIP_VLAN_FAILED:
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- /* if all vlans are in failed state, restart vlan disc */
- if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
- if (printk_ratelimit())
- shost_printk(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- break;
- case FIP_VLAN_SENT:
- if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
- /*
- * no response on this vlan, remove from the list.
- * Try the next vlan
- */
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Dequeue this VLAN ID %d from list\n",
- vlan->vid);
- list_del(&vlan->list);
- kfree(vlan);
- vlan = NULL;
- if (list_empty(&fnic->vlans)) {
- /* we exhausted all vlans, restart vlan disc */
- spin_unlock_irqrestore(&fnic->vlans_lock,
- flags);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "fip_timer: vlan list empty, "
- "trigger vlan disc\n");
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- return;
- }
- /* check the next vlan */
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
- list);
- fnic->set_vlan(fnic, vlan->vid);
- vlan->state = FIP_VLAN_SENT; /* sent now */
- }
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
- vlan->sol_count++;
- sol_time = jiffies + msecs_to_jiffies
- (FCOE_CTLR_START_DELAY);
- mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
- break;
+void fnic_reset_work_handler(struct work_struct *work)
+{
+ struct fnic *cur_fnic, *next_fnic;
+ unsigned long reset_fnic_list_lock_flags;
+ int host_reset_ret_code;
+
+ /*
+ * This is a single thread. It is per fnic module, not per fnic
+ * All the fnics that need to be reset
+ * have been serialized via the reset fnic list.
+ */
+ spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags);
+ list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) {
+ list_del(&cur_fnic->links);
+ spin_unlock_irqrestore(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
+
+ dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n",
+ cur_fnic->fnic_num);
+ host_reset_ret_code = fnic_host_reset(cur_fnic->host);
+ dev_err(&cur_fnic->pdev->dev,
+ "fnic: <%d>: returned from host reset with status: %d\n",
+ cur_fnic->fnic_num, host_reset_ret_code);
+
+ spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
+ cur_fnic->pc_rscn_handling_status =
+ PC_RSCN_HANDLING_NOT_IN_PROGRESS;
+ spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
+
+ spin_lock_irqsave(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
}
+ spin_unlock_irqrestore(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
}
diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h
new file mode 100644
index 000000000000..8e610b65ad57
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fdls.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _FNIC_FDLS_H_
+#define _FNIC_FDLS_H_
+
+#include "fnic_stats.h"
+#include "fdls_fc.h"
+
+/* FDLS - Fabric discovery and login services
+ * -> VLAN discovery
+ * -> retry every retry delay seconds until it succeeds.
+ * <- List of VLANs
+ *
+ * -> Solicitation
+ * <- Solicitation response (Advertisement)
+ *
+ * -> FCF selection & FLOGI ( FLOGI timeout - 2 * E_D_TOV)
+ * <- FLOGI response
+ *
+ * -> FCF keep alive
+ * <- FCF keep alive
+ *
+ * -> PLOGI to FFFFFC (DNS) (PLOGI timeout - 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- PLOGI response
+ * -> Retry PLOGI to FFFFFC (DNS) - Number of retries from vnic.cfg
+ *
+ * -> SCR to FFFFFC (DNS) (SCR timeout - 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- SCR response
+ * -> Retry SCR - Number of retries 2
+ *
+ * -> GPN_FT to FFFFFC (GPN_FT timeout - 2 * R_A_TOV)a
+ * -> Retry on BUSY until it succeeds
+ * -> Retry on BUSY until it succeeds
+ * -> 2 retries on timeout
+ *
+ * -> RFT_ID to FFFFFC (DNS) (RFT_ID timeout - 3 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * -> Retry RFT_ID to FFFFFC (DNS) (Number of retries 2 )
+ * -> Ignore if both retires fail.
+ *
+ * Session establishment with targets
+ * For each PWWN
+ * -> PLOGI to FCID of that PWWN (PLOGI timeout 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- PLOGI response
+ * -> Retry PLOGI. Num retries using vnic.cfg
+ *
+ * -> PRLI to FCID of that PWWN (PRLI timeout 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- PRLI response
+ * -> Retry PRLI. Num retries using vnic.cfg
+ *
+ */
+
+#define FDLS_RETRY_COUNT 2
+
+/*
+ * OXID encoding:
+ * bits 0-8: oxid idx - allocated from poool
+ * bits 9-13: oxid frame code from fnic_oxid_frame_type_e
+ * bits 14-15: all zeros
+ */
+#define FNIC_OXID_POOL_SZ (512) /* always power of 2 */
+#define FNIC_OXID_ENCODE(idx, frame_type) (frame_type | idx)
+#define FNIC_FRAME_MASK 0xFE00
+#define FNIC_FRAME_TYPE(oxid) (oxid & FNIC_FRAME_MASK)
+#define FNIC_OXID_IDX(oxid) ((oxid) & (FNIC_OXID_POOL_SZ - 1))
+
+#define OXID_RECLAIM_TOV(iport) (2 * iport->r_a_tov) /* in milliseconds */
+
+#define FNIC_FDLS_FABRIC_ABORT_ISSUED 0x1
+#define FNIC_FDLS_FPMA_LEARNT 0x2
+
+/* tport flags */
+#define FNIC_FDLS_TPORT_IN_GPN_FT_LIST 0x1
+#define FNIC_FDLS_TGT_ABORT_ISSUED 0x2
+#define FNIC_FDLS_TPORT_SEND_ADISC 0x4
+#define FNIC_FDLS_RETRY_FRAME 0x8
+#define FNIC_FDLS_TPORT_BUSY 0x10
+#define FNIC_FDLS_TPORT_TERMINATING 0x20
+#define FNIC_FDLS_TPORT_DELETED 0x40
+#define FNIC_FDLS_SCSI_REGISTERED 0x200
+
+/* Retry supported by rport(returned by prli service parameters) */
+#define FDLS_FC_RP_FLAGS_RETRY 0x1
+
+#define fdls_set_state(_fdls_fabric, _state) ((_fdls_fabric)->state = _state)
+#define fdls_get_state(_fdls_fabric) ((_fdls_fabric)->state)
+
+#define FNIC_FDMI_ACTIVE 0x8
+#define FNIC_FIRST_LINK_UP 0x2
+
+#define fdls_set_tport_state(_tport, _state) (_tport->state = _state)
+#define fdls_get_tport_state(_tport) (_tport->state)
+
+#define FNIC_PORTSPEED_10GBIT 1
+#define FNIC_FRAME_HT_ROOM (2148)
+#define FNIC_FCOE_FRAME_MAXSZ (2112)
+
+
+#define FNIC_FRAME_TYPE_FABRIC_FLOGI 0x1000
+#define FNIC_FRAME_TYPE_FABRIC_PLOGI 0x1200
+#define FNIC_FRAME_TYPE_FABRIC_RPN 0x1400
+#define FNIC_FRAME_TYPE_FABRIC_RFT 0x1600
+#define FNIC_FRAME_TYPE_FABRIC_RFF 0x1800
+#define FNIC_FRAME_TYPE_FABRIC_SCR 0x1A00
+#define FNIC_FRAME_TYPE_FABRIC_GPN_FT 0x1C00
+#define FNIC_FRAME_TYPE_FABRIC_LOGO 0x1E00
+#define FNIC_FRAME_TYPE_FDMI_PLOGI 0x2000
+#define FNIC_FRAME_TYPE_FDMI_RHBA 0x2200
+#define FNIC_FRAME_TYPE_FDMI_RPA 0x2400
+#define FNIC_FRAME_TYPE_TGT_PLOGI 0x2600
+#define FNIC_FRAME_TYPE_TGT_PRLI 0x2800
+#define FNIC_FRAME_TYPE_TGT_ADISC 0x2A00
+#define FNIC_FRAME_TYPE_TGT_LOGO 0x2C00
+
+struct fnic_fip_fcf_s {
+ uint16_t vlan_id;
+ uint8_t fcf_mac[6];
+ uint8_t fcf_priority;
+ uint32_t fka_adv_period;
+ uint8_t ka_disabled;
+};
+
+enum fnic_fdls_state_e {
+ FDLS_STATE_INIT = 0,
+ FDLS_STATE_LINKDOWN,
+ FDLS_STATE_FABRIC_LOGO,
+ FDLS_STATE_FLOGO_DONE,
+ FDLS_STATE_FABRIC_FLOGI,
+ FDLS_STATE_FABRIC_PLOGI,
+ FDLS_STATE_RPN_ID,
+ FDLS_STATE_REGISTER_FC4_TYPES,
+ FDLS_STATE_REGISTER_FC4_FEATURES,
+ FDLS_STATE_SCR,
+ FDLS_STATE_GPN_FT,
+ FDLS_STATE_TGT_DISCOVERY,
+ FDLS_STATE_RSCN_GPN_FT,
+ FDLS_STATE_SEND_GPNFT
+};
+
+struct fnic_fdls_fabric_s {
+ enum fnic_fdls_state_e state;
+ uint32_t flags;
+ struct list_head tport_list; /* List of discovered tports */
+ struct timer_list retry_timer;
+ int del_timer_inprogress;
+ int del_fdmi_timer_inprogress;
+ int retry_counter;
+ int timer_pending;
+ int fdmi_retry;
+ struct timer_list fdmi_timer;
+ int fdmi_pending;
+};
+
+struct fnic_fdls_fip_s {
+ uint32_t state;
+ uint32_t flogi_retry;
+};
+
+/* Message to tport_event_handler */
+enum fnic_tgt_msg_id {
+ TGT_EV_NONE = 0,
+ TGT_EV_RPORT_ADD,
+ TGT_EV_RPORT_DEL,
+ TGT_EV_TPORT_DELETE,
+ TGT_EV_REMOVE
+};
+
+struct fnic_tport_event_s {
+ struct list_head links;
+ enum fnic_tgt_msg_id event;
+ void *arg1;
+};
+
+enum fdls_tgt_state_e {
+ FDLS_TGT_STATE_INIT = 0,
+ FDLS_TGT_STATE_PLOGI,
+ FDLS_TGT_STATE_PRLI,
+ FDLS_TGT_STATE_READY,
+ FDLS_TGT_STATE_LOGO_RECEIVED,
+ FDLS_TGT_STATE_ADISC,
+ FDL_TGT_STATE_PLOGO,
+ FDLS_TGT_STATE_OFFLINING,
+ FDLS_TGT_STATE_OFFLINE
+};
+
+struct fnic_tport_s {
+ struct list_head links; /* To link the tports */
+ enum fdls_tgt_state_e state;
+ uint32_t flags;
+ uint32_t fcid;
+ uint64_t wwpn;
+ uint64_t wwnn;
+ uint16_t active_oxid;
+ uint16_t tgt_flags;
+ atomic_t in_flight; /* io counter */
+ uint16_t max_payload_size;
+ uint16_t r_a_tov;
+ uint16_t e_d_tov;
+ uint16_t lun0_delay;
+ int max_concur_seqs;
+ uint32_t fcp_csp;
+ struct timer_list retry_timer;
+ int del_timer_inprogress;
+ int retry_counter;
+ int timer_pending;
+ unsigned int num_pending_cmds;
+ int nexus_restart_count;
+ int exch_reset_in_progress;
+ void *iport;
+ struct work_struct tport_del_work;
+ struct completion *tport_del_done;
+ struct fc_rport *rport;
+ char str_wwpn[20];
+ char str_wwnn[20];
+};
+
+/* OXID pool related structures */
+struct reclaim_entry_s {
+ struct list_head links;
+ /* oxid that needs to be freed after 2*r_a_tov */
+ uint16_t oxid_idx;
+ /* in jiffies. Use this to waiting time */
+ unsigned long expires;
+ unsigned long *bitmap;
+};
+
+/* used for allocating oxids for fabric and fdmi requests */
+struct fnic_oxid_pool_s {
+ DECLARE_BITMAP(bitmap, FNIC_OXID_POOL_SZ);
+ int sz; /* size of the pool or block */
+ int next_idx; /* used for cycling through the oxid pool */
+
+ /* retry schedule free */
+ DECLARE_BITMAP(pending_schedule_free, FNIC_OXID_POOL_SZ);
+ struct delayed_work schedule_oxid_free_retry;
+
+ /* List of oxids that need to be freed and reclaimed.
+ * This list is shared by all the oxid pools
+ */
+ struct list_head oxid_reclaim_list;
+ /* Work associated with reclaim list */
+ struct delayed_work oxid_reclaim_work;
+};
+
+/* iport */
+enum fnic_iport_state_e {
+ FNIC_IPORT_STATE_INIT = 0,
+ FNIC_IPORT_STATE_LINK_WAIT,
+ FNIC_IPORT_STATE_FIP,
+ FNIC_IPORT_STATE_FABRIC_DISC,
+ FNIC_IPORT_STATE_READY
+};
+
+struct fnic_iport_s {
+ enum fnic_iport_state_e state;
+ struct fnic *fnic;
+ uint64_t boot_time;
+ uint32_t flags;
+ int usefip;
+ uint8_t hwmac[6]; /* HW MAC Addr */
+ uint8_t fpma[6]; /* Fabric Provided MA */
+ uint8_t fcfmac[6]; /* MAC addr of Fabric */
+ uint16_t vlan_id;
+ uint32_t fcid;
+
+ /* oxid pool */
+ struct fnic_oxid_pool_s oxid_pool;
+
+ /*
+ * fabric reqs are serialized and only one req at a time.
+ * Tracking the oxid for sending abort
+ */
+ uint16_t active_oxid_fabric_req;
+ /* fdmi only */
+ uint16_t active_oxid_fdmi_plogi;
+ uint16_t active_oxid_fdmi_rhba;
+ uint16_t active_oxid_fdmi_rpa;
+
+ struct fnic_fip_fcf_s selected_fcf;
+ struct fnic_fdls_fip_s fip;
+ struct fnic_fdls_fabric_s fabric;
+ struct list_head tport_list;
+ struct list_head tport_list_pending_del;
+ /* list of tports for which we are yet to send PLOGO */
+ struct list_head inprocess_tport_list;
+ struct list_head deleted_tport_list;
+ struct work_struct tport_event_work;
+ uint32_t e_d_tov; /* msec */
+ uint32_t r_a_tov; /* msec */
+ uint32_t link_supported_speeds;
+ uint32_t max_flogi_retries;
+ uint32_t max_plogi_retries;
+ uint32_t plogi_timeout;
+ uint32_t service_params;
+ uint64_t wwpn;
+ uint64_t wwnn;
+ uint16_t max_payload_size;
+ spinlock_t deleted_tport_lst_lock;
+ struct completion *flogi_reg_done;
+ struct fnic_iport_stats iport_stats;
+ char str_wwpn[20];
+ char str_wwnn[20];
+};
+
+struct rport_dd_data_s {
+ struct fnic_tport_s *tport;
+ struct fnic_iport_s *iport;
+};
+
+enum fnic_recv_frame_type_e {
+ FNIC_FABRIC_FLOGI_RSP = 1,
+ FNIC_FABRIC_PLOGI_RSP,
+ FNIC_FABRIC_RPN_RSP,
+ FNIC_FABRIC_RFT_RSP,
+ FNIC_FABRIC_RFF_RSP,
+ FNIC_FABRIC_SCR_RSP,
+ FNIC_FABRIC_GPN_FT_RSP,
+ FNIC_FABRIC_BLS_ABTS_RSP,
+ FNIC_FDMI_PLOGI_RSP,
+ FNIC_FDMI_REG_HBA_RSP,
+ FNIC_FDMI_RPA_RSP,
+ FNIC_FDMI_BLS_ABTS_RSP,
+ FNIC_FABRIC_LOGO_RSP,
+
+ /* responses to target requests */
+ FNIC_TPORT_PLOGI_RSP,
+ FNIC_TPORT_PRLI_RSP,
+ FNIC_TPORT_ADISC_RSP,
+ FNIC_TPORT_BLS_ABTS_RSP,
+ FNIC_TPORT_LOGO_RSP,
+
+ /* unsolicited requests */
+ FNIC_BLS_ABTS_REQ,
+ FNIC_ELS_PLOGI_REQ,
+ FNIC_ELS_RSCN_REQ,
+ FNIC_ELS_LOGO_REQ,
+ FNIC_ELS_ECHO_REQ,
+ FNIC_ELS_ADISC,
+ FNIC_ELS_RLS,
+ FNIC_ELS_RRQ,
+ FNIC_ELS_UNSUPPORTED_REQ,
+};
+
+enum fnic_port_speeds {
+ DCEM_PORTSPEED_NONE = 0,
+ DCEM_PORTSPEED_1G = 1000,
+ DCEM_PORTSPEED_2G = 2000,
+ DCEM_PORTSPEED_4G = 4000,
+ DCEM_PORTSPEED_8G = 8000,
+ DCEM_PORTSPEED_10G = 10000,
+ DCEM_PORTSPEED_16G = 16000,
+ DCEM_PORTSPEED_20G = 20000,
+ DCEM_PORTSPEED_25G = 25000,
+ DCEM_PORTSPEED_32G = 32000,
+ DCEM_PORTSPEED_40G = 40000,
+ DCEM_PORTSPEED_4x10G = 41000,
+ DCEM_PORTSPEED_50G = 50000,
+ DCEM_PORTSPEED_64G = 64000,
+ DCEM_PORTSPEED_100G = 100000,
+ DCEM_PORTSPEED_128G = 128000,
+};
+
+/* Function Declarations */
+/* fdls_disc.c */
+void fnic_fdls_disc_init(struct fnic_iport_s *iport);
+void fnic_fdls_disc_start(struct fnic_iport_s *iport);
+void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame,
+ int len, int fchdr_offset);
+void fnic_fdls_link_down(struct fnic_iport_s *iport);
+int fdls_init_frame_pool(struct fnic_iport_s *iport);
+uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport);
+uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type,
+ uint16_t *active_oxid);
+void fdls_free_oxid(struct fnic_iport_s *iport,
+ uint16_t oxid, uint16_t *active_oxid);
+void fdls_tgt_logout(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport);
+void fnic_del_fabric_timer_sync(struct fnic *fnic);
+void fnic_del_tport_timer_sync(struct fnic *fnic,
+ struct fnic_tport_s *tport);
+void fdls_send_fabric_logo(struct fnic_iport_s *iport);
+int fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr);
+void fdls_send_tport_abts(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport);
+bool fdls_delete_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport);
+void fdls_fdmi_timer_callback(struct timer_list *t);
+
+/* fnic_fcs.c */
+void fnic_fdls_init(struct fnic *fnic, int usefip);
+void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
+ int frame_size);
+void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+int fnic_send_fip_frame(struct fnic_iport_s *iport,
+ void *frame, int frame_size);
+void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
+ uint8_t *fcid);
+void fnic_fdls_add_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, unsigned long flags);
+void fnic_fdls_remove_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport,
+ unsigned long flags);
+
+/* fip.c */
+void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+void fnic_common_fip_cleanup(struct fnic *fnic);
+int fdls_fip_recv_frame(struct fnic *fnic, void *frame);
+void fnic_handle_fcs_ka_timer(struct timer_list *t);
+void fnic_handle_enode_ka_timer(struct timer_list *t);
+void fnic_handle_vn_ka_timer(struct timer_list *t);
+void fnic_handle_fip_timer(struct timer_list *t);
+extern void fdls_fabric_timer_callback(struct timer_list *t);
+
+/* fnic_scsi.c */
+void fnic_scsi_fcpio_reset(struct fnic *fnic);
+extern void fdls_fabric_timer_callback(struct timer_list *t);
+void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid);
+int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
+ void *fp);
+struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport,
+ uint32_t fcid);
+struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport,
+ uint64_t wwpn);
+
+#endif /* _FNIC_FDLS_H_ */
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
deleted file mode 100644
index 79f53029737b..000000000000
--- a/drivers/scsi/fnic/fnic_fip.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
- */
-
-#ifndef _FNIC_FIP_H_
-#define _FNIC_FIP_H_
-
-
-#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */
-#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */
-#define FCOE_CTLR_MAX_SOL 8
-
-#define FINC_MAX_FLOGI_REJECTS 8
-
-struct vlan {
- __be16 vid;
- __be16 type;
-};
-
-/*
- * VLAN entry.
- */
-struct fcoe_vlan {
- struct list_head list;
- u16 vid; /* vlan ID */
- u16 sol_count; /* no. of sols sent */
- u16 state; /* state */
-};
-
-enum fip_vlan_state {
- FIP_VLAN_AVAIL = 0, /* don't do anything */
- FIP_VLAN_SENT = 1, /* sent */
- FIP_VLAN_USED = 2, /* succeed */
- FIP_VLAN_FAILED = 3, /* failed to response */
-};
-
-struct fip_vlan {
- struct ethhdr eth;
- struct fip_header fip;
- struct {
- struct fip_mac_desc mac;
- struct fip_wwn_desc wwnn;
- } desc;
-};
-
-#endif /* __FINC_FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index 5895ead20e14..0d974e040ab7 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -7,6 +7,7 @@
#define _FNIC_IO_H_
#include <scsi/fc/fc_fcp.h>
+#include "fnic_fdls.h"
#define FNIC_DFLT_SG_DESC_CNT 32
#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
@@ -41,6 +42,8 @@ enum fnic_ioreq_state {
};
struct fnic_io_req {
+ struct fnic_iport_s *iport;
+ struct fnic_tport_s *tport;
struct host_sg_desc *sgl_list; /* sgl list */
void *sgl_list_alloc; /* sgl list address used for free */
dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
@@ -55,15 +58,4 @@ struct fnic_io_req {
unsigned int tag;
struct scsi_cmnd *sc; /* midlayer's cmd pointer */
};
-
-enum fnic_port_speeds {
- DCEM_PORTSPEED_NONE = 0,
- DCEM_PORTSPEED_1G = 1000,
- DCEM_PORTSPEED_10G = 10000,
- DCEM_PORTSPEED_20G = 20000,
- DCEM_PORTSPEED_25G = 25000,
- DCEM_PORTSPEED_40G = 40000,
- DCEM_PORTSPEED_4x10G = 41000,
- DCEM_PORTSPEED_100G = 100000,
-};
#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index ff85441c6cea..7ed50b11afa6 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -7,7 +7,7 @@
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <scsi/libfc.h>
+#include <scsi/scsi_transport_fc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
@@ -222,7 +222,7 @@ int fnic_request_intr(struct fnic *fnic)
fnic->msix[i].devname,
fnic->msix[i].devid);
if (err) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"request_irq failed with error: %d\n",
err);
fnic_free_intr(fnic);
@@ -250,10 +250,10 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
* (last INTR is used for WQ/RQ errors and notification area)
*/
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"rq-array size: %d wq-array size: %d copy-wq array size: %d\n",
n, m, o);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n",
fnic->rq_count, fnic->raw_wq_count,
fnic->wq_copy_count, fnic->cq_count);
@@ -265,17 +265,17 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"allocated %d MSI-X vectors\n",
vec_count);
if (vec_count > 0) {
if (vec_count < vecs) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"interrupts number mismatch: vec_count: %d vecs: %d\n",
vec_count, vecs);
if (vec_count < min_irqs) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"no interrupts for copy wq\n");
return 1;
}
@@ -287,7 +287,7 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
fnic->wq_copy_count = vec_count - n - m - 1;
fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count;
if (fnic->cq_count != vec_count - 1) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"CQ count: %d does not match MSI-X vector count: %d\n",
fnic->cq_count, vec_count);
fnic->cq_count = vec_count - 1;
@@ -295,23 +295,23 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
fnic->intr_count = vec_count;
fnic->err_intr_offset = fnic->rq_count + fnic->wq_count;
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"rq_count: %d raw_wq_count: %d copy_wq_base: %d\n",
fnic->rq_count,
fnic->raw_wq_count, fnic->copy_wq_base);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"wq_copy_count: %d wq_count: %d cq_count: %d\n",
fnic->wq_copy_count,
fnic->wq_count, fnic->cq_count);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"intr_count: %d err_intr_offset: %u",
fnic->intr_count,
fnic->err_intr_offset);
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"fnic using MSI-X\n");
return 0;
}
@@ -351,7 +351,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->intr_count = 1;
fnic->err_intr_offset = 0;
- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Using MSI Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
@@ -377,7 +377,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->cq_count = 3;
fnic->intr_count = 3;
- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Using Legacy Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index adec0df24bc4..0b20ac8c3f46 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -16,21 +16,20 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/if_ether.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_tcq.h>
-#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "fnic_io.h"
-#include "fnic_fip.h"
#include "fnic.h"
+#include "fnic_fdls.h"
+#include "fdls_fc.h"
#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
@@ -39,12 +38,18 @@
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
+static struct kmem_cache *fdls_frame_cache;
+static struct kmem_cache *fdls_frame_elem_cache;
static LIST_HEAD(fnic_list);
static DEFINE_SPINLOCK(fnic_list_lock);
static DEFINE_IDA(fnic_ida);
+struct work_struct reset_fnic_work;
+LIST_HEAD(reset_fnic_list);
+DEFINE_SPINLOCK(reset_fnic_list_lock);
+
/* Supported devices by fnic module */
-static struct pci_device_id fnic_id_table[] = {
+static const struct pci_device_id fnic_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
{ 0, }
};
@@ -60,6 +65,14 @@ unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+unsigned int fnic_fdmi_support = 1;
+module_param(fnic_fdmi_support, int, 0644);
+MODULE_PARM_DESC(fnic_fdmi_support, "FDMI support");
+
+static unsigned int fnic_tgt_id_binding = 1;
+module_param(fnic_tgt_id_binding, uint, 0644);
+MODULE_PARM_DESC(fnic_tgt_id_binding,
+ "Target ID binding (0 for none. 1 for binding by WWPN (default))");
unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS;
module_param(io_completions, int, S_IRUGO|S_IWUSR);
@@ -79,15 +92,15 @@ static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
-static struct libfc_function_template fnic_transport_template = {
- .frame_send = fnic_send,
- .lport_set_port_id = fnic_set_port_id,
- .fcp_abort_io = fnic_empty_scsi_cleanup,
- .fcp_cleanup = fnic_empty_scsi_cleanup,
- .exch_mgr_reset = fnic_exch_mgr_reset
-};
+unsigned int pc_rscn_handling_feature_flag = PC_RSCN_HANDLING_FEATURE_ON;
+module_param(pc_rscn_handling_feature_flag, uint, 0644);
+MODULE_PARM_DESC(pc_rscn_handling_feature_flag,
+ "PCRSCN handling (0 for none. 1 to handle PCRSCN (default))");
-static int fnic_slave_alloc(struct scsi_device *sdev)
+struct workqueue_struct *reset_fnic_work_queue;
+struct workqueue_struct *fnic_fip_queue;
+
+static int fnic_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -105,8 +118,8 @@ static const struct scsi_host_template fnic_host_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = fnic_abort_cmd,
.eh_device_reset_handler = fnic_device_reset,
- .eh_host_reset_handler = fnic_host_reset,
- .slave_alloc = fnic_slave_alloc,
+ .eh_host_reset_handler = fnic_eh_host_reset_handler,
+ .sdev_init = fnic_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -146,7 +159,7 @@ static struct fc_function_template fnic_fc_functions = {
.get_host_speed = fnic_get_host_speed,
.show_host_speed = 1,
.show_host_port_type = 1,
- .get_host_port_state = fc_get_host_port_state,
+ .get_host_port_state = fnic_get_host_port_state,
.show_host_port_state = 1,
.show_host_symbolic_name = 1,
.show_rport_maxframe_size = 1,
@@ -157,54 +170,88 @@ static struct fc_function_template fnic_fc_functions = {
.show_starget_port_id = 1,
.show_rport_dev_loss_tmo = 1,
.set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
- .issue_fc_host_lip = fnic_reset,
+ .issue_fc_host_lip = fnic_issue_fc_host_lip,
.get_fc_host_stats = fnic_get_stats,
.reset_fc_host_stats = fnic_reset_host_stats,
- .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+ .dd_fcrport_size = sizeof(struct rport_dd_data_s),
.terminate_rport_io = fnic_terminate_rport_io,
- .bsg_request = fc_lport_bsg_request,
+ .bsg_request = NULL,
};
static void fnic_get_host_speed(struct Scsi_Host *shost)
{
- struct fc_lport *lp = shost_priv(shost);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
u32 port_speed = vnic_dev_port_speed(fnic->vdev);
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+
+ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "port_speed: %d Mbps", port_speed);
+ atomic64_set(&fnic_stats->misc_stats.port_speed_in_mbps, port_speed);
/* Add in other values as they get defined in fw */
switch (port_speed) {
+ case DCEM_PORTSPEED_1G:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case DCEM_PORTSPEED_2G:
+ fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+ break;
+ case DCEM_PORTSPEED_4G:
+ fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+ break;
+ case DCEM_PORTSPEED_8G:
+ fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+ break;
case DCEM_PORTSPEED_10G:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
+ case DCEM_PORTSPEED_16G:
+ fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+ break;
case DCEM_PORTSPEED_20G:
fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
break;
case DCEM_PORTSPEED_25G:
fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
break;
+ case DCEM_PORTSPEED_32G:
+ fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
+ break;
case DCEM_PORTSPEED_40G:
case DCEM_PORTSPEED_4x10G:
fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
break;
+ case DCEM_PORTSPEED_50G:
+ fc_host_speed(shost) = FC_PORTSPEED_50GBIT;
+ break;
+ case DCEM_PORTSPEED_64G:
+ fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
+ break;
case DCEM_PORTSPEED_100G:
fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
break;
+ case DCEM_PORTSPEED_128G:
+ fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
+ break;
default:
+ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unknown FC speed: %d Mbps", port_speed);
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
}
+/* Placeholder function */
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
{
int ret;
- struct fc_lport *lp = shost_priv(host);
- struct fnic *fnic = lport_priv(lp);
- struct fc_host_statistics *stats = &lp->host_stats;
+ struct fnic *fnic = *((struct fnic **) shost_priv(host));
+ struct fc_host_statistics *stats = &fnic->fnic_stats.host_stats;
struct vnic_stats *vs;
unsigned long flags;
- if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
+ if (time_before
+ (jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
return stats;
fnic->stats_time = jiffies;
@@ -213,24 +260,22 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic: Get vnic stats failed"
- " 0x%x", ret);
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fnic: Get vnic stats failed: 0x%x", ret);
return stats;
}
vs = fnic->stats;
stats->tx_frames = vs->tx.tx_unicast_frames_ok;
- stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
+ stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
stats->rx_frames = vs->rx.rx_unicast_frames_ok;
- stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
+ stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
stats->invalid_crc_count = vs->rx.rx_crc_errors;
stats->seconds_since_last_reset =
- (jiffies - fnic->stats_reset_time) / HZ;
+ (jiffies - fnic->stats_reset_time) / HZ;
stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
-
return stats;
}
@@ -311,8 +356,7 @@ void fnic_dump_fchost_stats(struct Scsi_Host *host,
static void fnic_reset_host_stats(struct Scsi_Host *host)
{
int ret;
- struct fc_lport *lp = shost_priv(host);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(host));
struct fc_host_statistics *stats;
unsigned long flags;
@@ -325,7 +369,7 @@ static void fnic_reset_host_stats(struct Scsi_Host *host)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"fnic: Reset vnic stats failed"
" 0x%x", ret);
return;
@@ -344,25 +388,19 @@ void fnic_log_q_error(struct fnic *fnic)
for (i = 0; i < fnic->raw_wq_count; i++) {
error_status = ioread32(&fnic->wq[i].ctrl->error_status);
if (error_status)
- shost_printk(KERN_ERR, fnic->lport->host,
- "WQ[%d] error_status"
- " %d\n", i, error_status);
+ dev_err(&fnic->pdev->dev, "WQ[%d] error_status %d\n", i, error_status);
}
for (i = 0; i < fnic->rq_count; i++) {
error_status = ioread32(&fnic->rq[i].ctrl->error_status);
if (error_status)
- shost_printk(KERN_ERR, fnic->lport->host,
- "RQ[%d] error_status"
- " %d\n", i, error_status);
+ dev_err(&fnic->pdev->dev, "RQ[%d] error_status %d\n", i, error_status);
}
for (i = 0; i < fnic->wq_copy_count; i++) {
error_status = ioread32(&fnic->hw_copy_wq[i].ctrl->error_status);
if (error_status)
- shost_printk(KERN_ERR, fnic->lport->host,
- "CWQ[%d] error_status"
- " %d\n", i, error_status);
+ dev_err(&fnic->pdev->dev, "CWQ[%d] error_status %d\n", i, error_status);
}
}
@@ -396,8 +434,7 @@ static int fnic_notify_set(struct fnic *fnic)
err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->copy_wq_base);
break;
default:
- shost_printk(KERN_ERR, fnic->lport->host,
- "Interrupt mode should be set up"
+ dev_err(&fnic->pdev->dev, "Interrupt mode should be set up"
" before devcmd notify set %d\n",
vnic_dev_get_intr_mode(fnic->vdev));
err = -1;
@@ -416,13 +453,6 @@ static void fnic_notify_timer(struct timer_list *t)
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
-static void fnic_fip_notify_timer(struct timer_list *t)
-{
- struct fnic *fnic = from_timer(fnic, t, fip_timer);
-
- fnic_handle_fip_timer(fnic);
-}
-
static void fnic_notify_timer_start(struct fnic *fnic)
{
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -522,6 +552,8 @@ static int fnic_cleanup(struct fnic *fnic)
vnic_intr_clean(&fnic->intr[i]);
mempool_destroy(fnic->io_req_pool);
+ mempool_destroy(fnic->frame_pool);
+ mempool_destroy(fnic->frame_elem_pool);
for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
mempool_destroy(fnic->io_sgl_pool[i]);
@@ -534,25 +566,36 @@ static void fnic_iounmap(struct fnic *fnic)
iounmap(fnic->bar0.vaddr);
}
-/**
- * fnic_get_mac() - get assigned data MAC address for FIP code.
- * @lport: local port.
- */
-static u8 *fnic_get_mac(struct fc_lport *lport)
+static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
{
- struct fnic *fnic = lport_priv(lport);
+ vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
- return fnic->data_src_addr;
+static void fnic_scsi_init(struct fnic *fnic)
+{
+ struct Scsi_Host *host = fnic->host;
+
+ snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
+ host->host_no);
+
+ host->transportt = fnic_fc_transport;
}
-static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
+static void fnic_free_ioreq_tables_mq(struct fnic *fnic)
{
- vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+ int hwq;
+
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
+ kfree(fnic->sw_copy_wq[hwq].io_req_table);
}
static int fnic_scsi_drv_init(struct fnic *fnic)
{
- struct Scsi_Host *host = fnic->lport->host;
+ struct Scsi_Host *host = fnic->host;
+ int err;
+ struct pci_dev *pdev = fnic->pdev;
+ struct fnic_iport_s *iport = &fnic->iport;
+ int hwq;
/* Configure maximum outstanding IO reqs*/
if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
@@ -563,103 +606,160 @@ static int fnic_scsi_drv_init(struct fnic *fnic)
fnic->fnic_max_tag_id = host->can_queue;
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FCOE_MAX_CMD_LEN;
+ host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN;
host->nr_hw_queues = fnic->wq_copy_count;
- shost_printk(KERN_INFO, host,
- "fnic: can_queue: %d max_lun: %llu",
+ dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu",
host->can_queue, host->max_lun);
- shost_printk(KERN_INFO, host,
- "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
+ dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
host->max_id, host->max_cmd_len, host->nr_hw_queues);
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) {
+ fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id;
+ fnic->sw_copy_wq[hwq].io_req_table =
+ kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) *
+ sizeof(struct fnic_io_req *), GFP_KERNEL);
+
+ if (!fnic->sw_copy_wq[hwq].io_req_table) {
+ fnic_free_ioreq_tables_mq(fnic);
+ return -ENOMEM;
+ }
+ }
+
+ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
+ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size);
+
+ fnic_scsi_init(fnic);
+
+ err = scsi_add_host(fnic->host, &pdev->dev);
+ if (err) {
+ dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n");
+ return err;
+ }
+ fc_host_maxframe_size(fnic->host) = iport->max_payload_size;
+ fc_host_dev_loss_tmo(fnic->host) =
+ fnic->config.port_down_timeout / 1000;
+ sprintf(fc_host_symbolic_name(fnic->host),
+ DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
+ fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT;
+ fc_host_node_name(fnic->host) = iport->wwnn;
+ fc_host_port_name(fnic->host) = iport->wwpn;
+ fc_host_supported_classes(fnic->host) = FC_COS_CLASS3;
+ memset(fc_host_supported_fc4s(fnic->host), 0,
+ sizeof(fc_host_supported_fc4s(fnic->host)));
+ fc_host_supported_fc4s(fnic->host)[2] = 1;
+ fc_host_supported_fc4s(fnic->host)[7] = 1;
+ fc_host_supported_speeds(fnic->host) = 0;
+ fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT;
+
+ dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data);
+ if (fnic->host->shost_data != NULL) {
+ if (fnic_tgt_id_binding == 0) {
+ dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n");
+ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE;
+ } else {
+ dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n");
+ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN;
+ }
+ }
+
+ fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
+ if (!fnic->io_req_pool) {
+ scsi_remove_host(fnic->host);
+ return -ENOMEM;
+ }
+
return 0;
}
void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
{
- struct fc_lport *lp = shost_priv(host);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(host));
struct pci_dev *l_pdev = fnic->pdev;
int intr_mode = fnic->config.intr_mode;
struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT];
if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) {
- FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"intr_mode is not msix\n");
return;
}
- FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"qmap->nr_queues: %d\n", qmap->nr_queues);
if (l_pdev == NULL) {
- FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"l_pdev is null\n");
return;
}
- blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET);
+ blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET);
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct Scsi_Host *host;
- struct fc_lport *lp;
+ struct Scsi_Host *host = NULL;
struct fnic *fnic;
mempool_t *pool;
+ struct fnic_iport_s *iport;
int err = 0;
int fnic_id = 0;
int i;
unsigned long flags;
- int hwq;
+ char *desc, *subsys_desc;
+ int len;
/*
- * Allocate SCSI Host and set up association between host,
- * local port, and fnic
+ * Allocate fnic
*/
- lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
- if (!lp) {
- printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
+ fnic = kzalloc(sizeof(struct fnic), GFP_KERNEL);
+ if (!fnic) {
err = -ENOMEM;
- goto err_out;
+ goto err_out_fnic_alloc;
}
- host = lp->host;
- fnic = lport_priv(lp);
+ iport = &fnic->iport;
fnic_id = ida_alloc(&fnic_ida, GFP_KERNEL);
if (fnic_id < 0) {
- pr_err("Unable to alloc fnic ID\n");
+ dev_err(&pdev->dev, "Unable to alloc fnic ID\n");
err = fnic_id;
goto err_out_ida_alloc;
}
- fnic->lport = lp;
- fnic->ctlr.lp = lp;
- fnic->link_events = 0;
- fnic->pdev = pdev;
-
- snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
- host->host_no);
- host->transportt = fnic_fc_transport;
+ fnic->pdev = pdev;
fnic->fnic_num = fnic_id;
- fnic_stats_debugfs_init(fnic);
+
+ /* Find model name from PCIe subsys ID */
+ if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) {
+ dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc);
+
+ /* Update FDMI model */
+ fnic->subsys_desc_len = strlen(subsys_desc);
+ len = ARRAY_SIZE(fnic->subsys_desc);
+ if (fnic->subsys_desc_len > len)
+ fnic->subsys_desc_len = len;
+ memcpy(fnic->subsys_desc, subsys_desc, fnic->subsys_desc_len);
+ dev_info(&fnic->pdev->dev, "FDMI Model: %s\n", fnic->subsys_desc);
+ } else {
+ fnic->subsys_desc_len = 0;
+ dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown",
+ pdev->subsystem_device);
+ }
err = pci_enable_device(pdev);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Cannot enable PCI device, aborting.\n");
- goto err_out_free_hba;
+ dev_err(&fnic->pdev->dev, "Cannot enable PCI device, aborting.\n");
+ goto err_out_pci_enable_device;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Cannot enable PCI resources, aborting\n");
- goto err_out_disable_device;
+ dev_err(&fnic->pdev->dev, "Cannot enable PCI resources, aborting\n");
+ goto err_out_pci_request_regions;
}
pci_set_master(pdev);
@@ -672,19 +772,17 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "No usable DMA configuration "
+ dev_err(&fnic->pdev->dev, "No usable DMA configuration "
"aborting\n");
- goto err_out_release_regions;
+ goto err_out_set_dma_mask;
}
}
/* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "BAR0 not memory-map'able, aborting.\n");
+ dev_err(&fnic->pdev->dev, "BAR0 not memory-map'able, aborting.\n");
err = -ENODEV;
- goto err_out_release_regions;
+ goto err_out_map_bar;
}
fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
@@ -692,61 +790,79 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fnic->bar0.len = pci_resource_len(pdev, 0);
if (!fnic->bar0.vaddr) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Cannot memory-map BAR0 res hdr, "
+ dev_err(&fnic->pdev->dev, "Cannot memory-map BAR0 res hdr, "
"aborting.\n");
err = -ENODEV;
- goto err_out_release_regions;
+ goto err_out_fnic_map_bar;
}
fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
if (!fnic->vdev) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC registration failed, "
+ dev_err(&fnic->pdev->dev, "vNIC registration failed, "
"aborting.\n");
err = -ENODEV;
- goto err_out_iounmap;
+ goto err_out_dev_register;
}
err = vnic_dev_cmd_init(fnic->vdev);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vnic_dev_cmd_init() returns %d, aborting\n",
+ dev_err(&fnic->pdev->dev, "vnic_dev_cmd_init() returns %d, aborting\n",
err);
- goto err_out_vnic_unregister;
+ goto err_out_dev_cmd_init;
}
err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC dev open failed, aborting.\n");
- goto err_out_dev_cmd_deinit;
+ dev_err(&fnic->pdev->dev, "vNIC dev open failed, aborting.\n");
+ goto err_out_dev_open;
}
err = vnic_dev_init(fnic->vdev, 0);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC dev init failed, aborting.\n");
- goto err_out_dev_close;
+ dev_err(&fnic->pdev->dev, "vNIC dev init failed, aborting.\n");
+ goto err_out_dev_init;
}
- err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+ err = vnic_dev_mac_addr(fnic->vdev, iport->hwmac);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC get MAC addr failed \n");
- goto err_out_dev_close;
+ dev_err(&fnic->pdev->dev, "vNIC get MAC addr failed\n");
+ goto err_out_dev_mac_addr;
}
/* set data_src for point-to-point mode and to keep it non-zero */
- memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
+ memcpy(fnic->data_src_addr, iport->hwmac, ETH_ALEN);
/* Get vNIC configuration */
err = fnic_get_vnic_config(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Get vNIC configuration failed, "
+ dev_err(&fnic->pdev->dev, "Get vNIC configuration failed, "
"aborting.\n");
- goto err_out_dev_close;
+ goto err_out_fnic_get_config;
+ }
+
+ switch (fnic->config.flags & 0xff0) {
+ case VFCF_FC_INITIATOR:
+ {
+ host =
+ scsi_host_alloc(&fnic_host_template,
+ sizeof(struct fnic *));
+ if (!host) {
+ dev_err(&fnic->pdev->dev, "Unable to allocate scsi host\n");
+ err = -ENOMEM;
+ goto err_out_scsi_host_alloc;
+ }
+ *((struct fnic **) shost_priv(host)) = fnic;
+
+ fnic->host = host;
+ fnic->role = FNIC_ROLE_FCP_INITIATOR;
+ dev_info(&fnic->pdev->dev, "fnic: %d is scsi initiator\n",
+ fnic->fnic_num);
+ }
+ break;
+ default:
+ dev_info(&fnic->pdev->dev, "fnic: %d has no role defined\n", fnic->fnic_num);
+ err = -EINVAL;
+ goto err_out_fnic_role;
}
/* Setup PCI resources */
@@ -756,29 +872,18 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = fnic_set_intr_mode(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to set intr mode, "
+ dev_err(&fnic->pdev->dev, "Failed to set intr mode, "
"aborting.\n");
- goto err_out_dev_close;
+ goto err_out_fnic_set_intr_mode;
}
err = fnic_alloc_vnic_resources(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to alloc vNIC resources, "
+ dev_err(&fnic->pdev->dev, "Failed to alloc vNIC resources, "
"aborting.\n");
- goto err_out_clear_intr;
+ goto err_out_fnic_alloc_vnic_res;
}
-
- fnic_scsi_drv_init(fnic);
-
- for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) {
- fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id;
- fnic->sw_copy_wq[hwq].io_req_table =
- kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) *
- sizeof(struct fnic_io_req *), GFP_KERNEL);
- }
- shost_printk(KERN_INFO, fnic->lport->host, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
+ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size);
/* initialize all fnic locks */
@@ -794,50 +899,56 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fnic->fw_ack_index[i] = -1;
}
- err = -ENOMEM;
- fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
- if (!fnic->io_req_pool)
- goto err_out_free_resources;
-
pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
- if (!pool)
- goto err_out_free_ioreq_pool;
+ if (!pool) {
+ err = -ENOMEM;
+ goto err_out_free_resources;
+ }
fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
- if (!pool)
+ if (!pool) {
+ err = -ENOMEM;
goto err_out_free_dflt_pool;
+ }
fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
+ pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, fdls_frame_cache);
+ if (!pool) {
+ err = -ENOMEM;
+ goto err_out_fdls_frame_pool;
+ }
+ fnic->frame_pool = pool;
+
+ pool = mempool_create_slab_pool(FDLS_MIN_FRAME_ELEM,
+ fdls_frame_elem_cache);
+ if (!pool) {
+ err = -ENOMEM;
+ goto err_out_fdls_frame_elem_pool;
+ }
+ fnic->frame_elem_pool = pool;
+
/* setup vlan config, hw inserts vlan header */
fnic->vlan_hw_insert = 1;
fnic->vlan_id = 0;
- /* Initialize the FIP fcoe_ctrl struct */
- fnic->ctlr.send = fnic_eth_send;
- fnic->ctlr.update_mac = fnic_update_mac;
- fnic->ctlr.get_src_addr = fnic_get_mac;
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- shost_printk(KERN_INFO, fnic->lport->host,
- "firmware supports FIP\n");
+ dev_info(&fnic->pdev->dev, "firmware supports FIP\n");
/* enable directed and multicast */
vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
- vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
- fnic->set_vlan = fnic_set_vlan;
- fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
- timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
+ vnic_dev_add_addr(fnic->vdev, iport->hwmac);
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
- INIT_WORK(&fnic->event_work, fnic_handle_event);
- skb_queue_head_init(&fnic->fip_frame_queue);
- INIT_LIST_HEAD(&fnic->evlist);
- INIT_LIST_HEAD(&fnic->vlans);
+ INIT_LIST_HEAD(&fnic->fip_frame_queue);
+ INIT_LIST_HEAD(&fnic->vlan_list);
+ timer_setup(&fnic->retry_fip_timer, fnic_handle_fip_timer, 0);
+ timer_setup(&fnic->fcs_ka_timer, fnic_handle_fcs_ka_timer, 0);
+ timer_setup(&fnic->enode_ka_timer, fnic_handle_enode_ka_timer, 0);
+ timer_setup(&fnic->vn_ka_timer, fnic_handle_vn_ka_timer, 0);
+ fnic->set_vlan = fnic_set_vlan;
} else {
- shost_printk(KERN_INFO, fnic->lport->host,
- "firmware uses non-FIP mode\n");
- fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
- fnic->ctlr.state = FIP_ST_NON_FIP;
+ dev_info(&fnic->pdev->dev, "firmware uses non-FIP mode\n");
}
fnic->state = FNIC_IN_FC_MODE;
@@ -850,9 +961,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup notification buffer area */
err = fnic_notify_set(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to alloc notify buffer, aborting.\n");
- goto err_out_free_max_pool;
+ dev_err(&fnic->pdev->dev, "Failed to alloc notify buffer, aborting.\n");
+ goto err_out_fnic_notify_set;
}
/* Setup notify timer when using MSI interrupts */
@@ -863,13 +973,62 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < fnic->rq_count; i++) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic_alloc_rq_frame can't alloc "
+ dev_err(&fnic->pdev->dev, "fnic_alloc_rq_frame can't alloc "
"frame\n");
- goto err_out_rq_buf;
+ goto err_out_alloc_rq_buf;
}
}
+ init_completion(&fnic->reset_completion_wait);
+
+ /* Start local port initialization */
+ iport->max_flogi_retries = fnic->config.flogi_retries;
+ iport->max_plogi_retries = fnic->config.plogi_retries;
+ iport->plogi_timeout = fnic->config.plogi_timeout;
+ iport->service_params =
+ (FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS |
+ FNIC_FCP_SP_CONF_CMPL);
+ if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
+ iport->service_params |= FNIC_FCP_SP_RETRY;
+
+ iport->boot_time = jiffies;
+ iport->e_d_tov = fnic->config.ed_tov;
+ iport->r_a_tov = fnic->config.ra_tov;
+ iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT;
+ iport->wwpn = fnic->config.port_wwn;
+ iport->wwnn = fnic->config.node_wwn;
+
+ iport->max_payload_size = fnic->config.maxdatafieldsize;
+
+ if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) ||
+ (iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) ||
+ ((iport->max_payload_size % 4) != 0)) {
+ iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN;
+ }
+
+ iport->flags |= FNIC_FIRST_LINK_UP;
+
+ timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback,
+ 0);
+
+ fnic->stats_reset_time = jiffies;
+
+ INIT_WORK(&fnic->link_work, fnic_handle_link);
+ INIT_WORK(&fnic->frame_work, fnic_handle_frame);
+ INIT_WORK(&fnic->tport_work, fnic_tport_event_handler);
+ INIT_WORK(&fnic->flush_work, fnic_flush_tx);
+
+ INIT_LIST_HEAD(&fnic->frame_queue);
+ INIT_LIST_HEAD(&fnic->tx_queue);
+ INIT_LIST_HEAD(&fnic->tport_event_list);
+
+ INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry,
+ fdls_schedule_oxid_free_retry_work);
+
+ /* Initialize the oxid reclaim list and work struct */
+ INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list);
+ INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler);
+
/* Enable all queues */
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_enable(&fnic->wq[i]);
@@ -880,180 +1039,131 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_enable(&fnic->hw_copy_wq[i]);
- err = fnic_request_intr(fnic);
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to request irq.\n");
- goto err_out_request_intr;
- }
+ vnic_dev_enable(fnic->vdev);
- /*
- * Initialization done with PCI system, hardware, firmware.
- * Add host to SCSI
- */
- err = scsi_add_host(lp->host, &pdev->dev);
+ err = fnic_request_intr(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic: scsi_add_host failed...exiting\n");
- goto err_out_scsi_add_host;
+ dev_err(&fnic->pdev->dev, "Unable to request irq.\n");
+ goto err_out_fnic_request_intr;
}
+ fnic_notify_timer_start(fnic);
- /* Start local port initiatialization */
-
- lp->link_up = 0;
-
- lp->max_retry_count = fnic->config.flogi_retries;
- lp->max_rport_retry_count = fnic->config.plogi_retries;
- lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
- FCP_SPPF_CONF_COMPL);
- if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
- lp->service_params |= FCP_SPPF_RETRY;
-
- lp->boot_time = jiffies;
- lp->e_d_tov = fnic->config.ed_tov;
- lp->r_a_tov = fnic->config.ra_tov;
- lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
- fc_set_wwnn(lp, fnic->config.node_wwn);
- fc_set_wwpn(lp, fnic->config.port_wwn);
-
- fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);
-
- if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
- FCPIO_HOST_EXCH_RANGE_END, NULL)) {
- err = -ENOMEM;
- goto err_out_fc_exch_mgr_alloc;
- }
-
- fc_lport_init_stats(lp);
- fnic->stats_reset_time = jiffies;
+ fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE));
- fc_lport_config(lp);
+ err = fnic_scsi_drv_init(fnic);
+ if (err)
+ goto err_out_scsi_drv_init;
- if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
- sizeof(struct fc_frame_header))) {
- err = -EINVAL;
- goto err_out_free_exch_mgr;
+ err = fnic_stats_debugfs_init(fnic);
+ if (err) {
+ dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n");
+ goto err_out_free_stats_debugfs;
}
- fc_host_maxframe_size(lp->host) = lp->mfs;
- fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;
- sprintf(fc_host_symbolic_name(lp->host),
- DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_unmask(&fnic->intr[i]);
spin_lock_irqsave(&fnic_list_lock, flags);
list_add_tail(&fnic->list, &fnic_list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
- INIT_WORK(&fnic->link_work, fnic_handle_link);
- INIT_WORK(&fnic->frame_work, fnic_handle_frame);
- INIT_WORK(&fnic->flush_work, fnic_flush_tx);
- skb_queue_head_init(&fnic->frame_queue);
- skb_queue_head_init(&fnic->tx_queue);
-
- fc_fabric_login(lp);
-
- vnic_dev_enable(fnic->vdev);
-
- for (i = 0; i < fnic->intr_count; i++)
- vnic_intr_unmask(&fnic->intr[i]);
-
- fnic_notify_timer_start(fnic);
-
return 0;
-err_out_free_exch_mgr:
- fc_exch_mgr_free(lp);
-err_out_fc_exch_mgr_alloc:
- fc_remove_host(lp->host);
- scsi_remove_host(lp->host);
-err_out_scsi_add_host:
+err_out_free_stats_debugfs:
+ fnic_stats_debugfs_remove(fnic);
+ fnic_free_ioreq_tables_mq(fnic);
+ scsi_remove_host(fnic->host);
+err_out_scsi_drv_init:
fnic_free_intr(fnic);
-err_out_request_intr:
- for (i = 0; i < fnic->rq_count; i++)
+err_out_fnic_request_intr:
+err_out_alloc_rq_buf:
+ for (i = 0; i < fnic->rq_count; i++) {
+ if (ioread32(&fnic->rq[i].ctrl->enable))
+ vnic_rq_disable(&fnic->rq[i]);
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
-err_out_rq_buf:
+ }
vnic_dev_notify_unset(fnic->vdev);
-err_out_free_max_pool:
+err_out_fnic_notify_set:
+ mempool_destroy(fnic->frame_elem_pool);
+err_out_fdls_frame_elem_pool:
+ mempool_destroy(fnic->frame_pool);
+err_out_fdls_frame_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
-err_out_free_ioreq_pool:
- mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
- for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
- kfree(fnic->sw_copy_wq[hwq].io_req_table);
fnic_free_vnic_resources(fnic);
-err_out_clear_intr:
+err_out_fnic_alloc_vnic_res:
fnic_clear_intr_mode(fnic);
-err_out_dev_close:
+err_out_fnic_set_intr_mode:
+ scsi_host_put(fnic->host);
+err_out_fnic_role:
+err_out_scsi_host_alloc:
+err_out_fnic_get_config:
+err_out_dev_mac_addr:
+err_out_dev_init:
vnic_dev_close(fnic->vdev);
-err_out_dev_cmd_deinit:
-err_out_vnic_unregister:
+err_out_dev_open:
+err_out_dev_cmd_init:
vnic_dev_unregister(fnic->vdev);
-err_out_iounmap:
+err_out_dev_register:
fnic_iounmap(fnic);
-err_out_release_regions:
+err_out_fnic_map_bar:
+err_out_map_bar:
+err_out_set_dma_mask:
pci_release_regions(pdev);
-err_out_disable_device:
+err_out_pci_request_regions:
pci_disable_device(pdev);
-err_out_free_hba:
- fnic_stats_debugfs_remove(fnic);
+err_out_pci_enable_device:
ida_free(&fnic_ida, fnic->fnic_num);
err_out_ida_alloc:
- scsi_host_put(lp->host);
-err_out:
+ kfree(fnic);
+err_out_fnic_alloc:
return err;
}
static void fnic_remove(struct pci_dev *pdev)
{
struct fnic *fnic = pci_get_drvdata(pdev);
- struct fc_lport *lp = fnic->lport;
unsigned long flags;
- int hwq;
/*
- * Mark state so that the workqueue thread stops forwarding
- * received frames and link events to the local port. ISR and
- * other threads that can queue work items will also stop
- * creating work items on the fnic workqueue
+ * Sometimes when probe() fails and do not exit with an error code,
+ * remove() gets called with 'drvdata' not set. Avoid a crash by
+ * adding a defensive check.
*/
+ if (!fnic)
+ return;
+
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->stop_rx_link_events = 1;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
- del_timer_sync(&fnic->notify_timer);
-
/*
* Flush the fnic event queue. After this call, there should
* be no event queued for this fnic device in the workqueue
*/
flush_workqueue(fnic_event_queue);
- skb_queue_purge(&fnic->frame_queue);
- skb_queue_purge(&fnic->tx_queue);
+
+ fnic_scsi_unload(fnic);
+
+ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
+ del_timer_sync(&fnic->notify_timer);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- del_timer_sync(&fnic->fip_timer);
- skb_queue_purge(&fnic->fip_frame_queue);
+ del_timer_sync(&fnic->retry_fip_timer);
+ del_timer_sync(&fnic->fcs_ka_timer);
+ del_timer_sync(&fnic->enode_ka_timer);
+ del_timer_sync(&fnic->vn_ka_timer);
+
+ fnic_free_txq(&fnic->fip_frame_queue);
fnic_fcoe_reset_vlans(fnic);
- fnic_fcoe_evlist_free(fnic);
}
- /*
- * Log off the fabric. This stops all remote ports, dns port,
- * logs off the fabric. This flushes all rport, disc, lport work
- * before returning
- */
- fc_fabric_logoff(fnic->lport);
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->in_remove = 1;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0))
+ del_timer_sync(&fnic->iport.fabric.fdmi_timer);
- fcoe_ctlr_destroy(&fnic->ctlr);
- fc_lport_destroy(lp);
fnic_stats_debugfs_remove(fnic);
/*
@@ -1063,18 +1173,13 @@ static void fnic_remove(struct pci_dev *pdev)
*/
fnic_cleanup(fnic);
- BUG_ON(!skb_queue_empty(&fnic->frame_queue));
- BUG_ON(!skb_queue_empty(&fnic->tx_queue));
-
spin_lock_irqsave(&fnic_list_lock, flags);
list_del(&fnic->list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
- fc_remove_host(fnic->lport->host);
- scsi_remove_host(fnic->lport->host);
- for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
- kfree(fnic->sw_copy_wq[hwq].io_req_table);
- fc_exch_mgr_free(fnic->lport);
+ fnic_free_txq(&fnic->frame_queue);
+ fnic_free_txq(&fnic->tx_queue);
+
vnic_dev_notify_unset(fnic->vdev);
fnic_free_intr(fnic);
fnic_free_vnic_resources(fnic);
@@ -1084,8 +1189,11 @@ static void fnic_remove(struct pci_dev *pdev)
fnic_iounmap(fnic);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
ida_free(&fnic_ida, fnic->fnic_num);
- scsi_host_put(lp->host);
+ fnic_scsi_unload_cleanup(fnic);
+ scsi_host_put(fnic->host);
+ kfree(fnic);
}
static struct pci_driver fnic_driver = {
@@ -1161,6 +1269,24 @@ static int __init fnic_init_module(void)
goto err_create_fnic_ioreq_slab;
}
+ fdls_frame_cache = kmem_cache_create("fdls_frames",
+ FNIC_FCOE_FRAME_MAXSZ,
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fdls_frame_cache) {
+ pr_err("fnic fdls frame cache create failed\n");
+ err = -ENOMEM;
+ goto err_create_fdls_frame_cache;
+ }
+
+ fdls_frame_elem_cache = kmem_cache_create("fdls_frame_elem",
+ sizeof(struct fnic_frame_list),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fdls_frame_elem_cache) {
+ pr_err("fnic fdls frame elem cache create failed\n");
+ err = -ENOMEM;
+ goto err_create_fdls_frame_cache_elem;
+ }
+
fnic_event_queue =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq");
if (!fnic_event_queue) {
@@ -1177,6 +1303,19 @@ static int __init fnic_init_module(void)
goto err_create_fip_workq;
}
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) {
+ reset_fnic_work_queue =
+ create_singlethread_workqueue("reset_fnic_work_queue");
+ if (!reset_fnic_work_queue) {
+ pr_err("reset fnic work queue create failed\n");
+ err = -ENOMEM;
+ goto err_create_reset_fnic_workq;
+ }
+ spin_lock_init(&reset_fnic_list_lock);
+ INIT_LIST_HEAD(&reset_fnic_list);
+ INIT_WORK(&reset_fnic_work, fnic_reset_work_handler);
+ }
+
fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
if (!fnic_fc_transport) {
printk(KERN_ERR PFX "fc_attach_transport error\n");
@@ -1197,8 +1336,15 @@ err_pci_register:
err_fc_transport:
destroy_workqueue(fnic_fip_queue);
err_create_fip_workq:
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
+ destroy_workqueue(reset_fnic_work_queue);
+err_create_reset_fnic_workq:
destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
+ kmem_cache_destroy(fdls_frame_elem_cache);
+err_create_fdls_frame_cache_elem:
+ kmem_cache_destroy(fdls_frame_cache);
+err_create_fdls_frame_cache:
kmem_cache_destroy(fnic_io_req_cache);
err_create_fnic_ioreq_slab:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
@@ -1215,11 +1361,18 @@ static void __exit fnic_cleanup_module(void)
{
pci_unregister_driver(&fnic_driver);
destroy_workqueue(fnic_event_queue);
- if (fnic_fip_queue)
+
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
+ destroy_workqueue(reset_fnic_work_queue);
+
+ if (fnic_fip_queue) {
+ flush_workqueue(fnic_fip_queue);
destroy_workqueue(fnic_fip_queue);
+ }
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
+ kmem_cache_destroy(fdls_frame_cache);
fc_release_transport(fnic_fc_transport);
fnic_trace_free();
fnic_fc_trace_free();
diff --git a/drivers/scsi/fnic/fnic_pci_subsys_devid.c b/drivers/scsi/fnic/fnic_pci_subsys_devid.c
new file mode 100644
index 000000000000..36a2c1268422
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_pci_subsys_devid.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/if_ether.h>
+#include "fnic.h"
+
+static struct fnic_pcie_device fnic_pcie_device_table[] = {
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_VASONA,
+ "VIC 1280"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_COTATI,
+ "VIC 1240"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
+ PCI_SUBDEVICE_ID_CISCO_LEXINGTON, "VIC 1225"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_ICEHOUSE,
+ "VIC 1285"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
+ PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE, "VIC 1225T"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
+ PCI_SUBDEVICE_ID_CISCO_SUSANVILLE, "VIC 1227"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_TORRANCE,
+ "VIC 1227T"},
+
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CALISTOGA,
+ "VIC 1340"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW,
+ "VIC 1380"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN,
+ "C3260-SIOC"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLEARLAKE,
+ "VIC 1385"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2,
+ "C3260-SIOC"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLAREMONT,
+ "VIC 1387"},
+
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRADBURY,
+ "VIC 1457"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BRENTWOOD, "VIC 1455"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BURLINGAME, "VIC 1487"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BAYSIDE,
+ "VIC 1485"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD, "VIC 1440"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BOONVILLE, "VIC 1480"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENICIA,
+ "VIC 1495"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BEAUMONT,
+ "VIC 1497"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRISBANE,
+ "VIC 1467"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENTON,
+ "VIC 1477"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER, "VIC 14425"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK, "VIC 14825"},
+
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_BERN,
+ "VIC 15420"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_STOCKHOLM, "VIC 15428"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_KRAKOW,
+ "VIC 15411"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_LUCERNE, "VIC 15231"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_TURKU,
+ "VIC 15238"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_GENEVA,
+ "VIC 15422"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_HELSINKI, "VIC 15235"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_GOTHENBURG, "VIC 15425"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS, "VIC 15237"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_ZURICH,
+ "VIC 15230"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_RIGA,
+ "VIC 15427"},
+
+ {0,}
+};
+
+int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
+ char **subsys_desc)
+{
+ unsigned short device = PCI_DEVICE_ID_CISCO_VIC_FC;
+ int max = ARRAY_SIZE(fnic_pcie_device_table);
+ struct fnic_pcie_device *t = fnic_pcie_device_table;
+ int index = 0;
+
+ if (pdev->device != device)
+ return 1;
+
+ while (t->device != 0) {
+ if (memcmp
+ ((char *) &pdev->subsystem_device,
+ (char *) &t->subsystem_device, sizeof(short)) == 0)
+ break;
+ t++;
+ index++;
+ }
+
+ if (index >= max - 1) {
+ *desc = NULL;
+ *subsys_desc = NULL;
+ return 1;
+ }
+
+ *desc = fnic_pcie_device_table[index].desc;
+ *subsys_desc = fnic_pcie_device_table[index].subsys_desc;
+ return 0;
+}
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 33dd27f6f24e..763475587b7f 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -30,9 +30,7 @@ int fnic_get_vnic_config(struct fnic *fnic)
offsetof(struct vnic_fc_config, m), \
sizeof(c->m), &c->m); \
if (err) { \
- shost_printk(KERN_ERR, fnic->lport->host, \
- "Error getting %s, %d\n", #m, \
- err); \
+ dev_err(&fnic->pdev->dev, "Error getting %s, %d\n", #m, err); \
return err; \
} \
} while (0);
@@ -60,6 +58,11 @@ int fnic_get_vnic_config(struct fnic *fnic)
GET_CONFIG(intr_mode);
GET_CONFIG(wq_copy_count);
+ if ((c->flags & (VFCF_FC_INITIATOR)) == 0) {
+ dev_info(&fnic->pdev->dev, "vNIC role not defined (def role: FC Init)\n");
+ c->flags |= VFCF_FC_INITIATOR;
+ }
+
c->wq_enet_desc_count =
min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
@@ -139,40 +142,28 @@ int fnic_get_vnic_config(struct fnic *fnic)
c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC MAC addr %pM "
- "wq/wq_copy/rq %d/%d/%d\n",
- fnic->ctlr.ctl_src_addr,
+ dev_info(&fnic->pdev->dev, "fNIC MAC addr %p wq/wq_copy/rq %d/%d/%d\n",
+ fnic->data_src_addr,
c->wq_enet_desc_count, c->wq_copy_desc_count,
c->rq_desc_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC node wwn %llx port wwn %llx\n",
+ dev_info(&fnic->pdev->dev, "fNIC node wwn 0x%llx port wwn 0x%llx\n",
c->node_wwn, c->port_wwn);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC ed_tov %d ra_tov %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC ed_tov %d ra_tov %d\n",
c->ed_tov, c->ra_tov);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC mtu %d intr timer %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC mtu %d intr timer %d\n",
c->maxdatafieldsize, c->intr_timer);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC flags 0x%x luns per tgt %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC flags 0x%x luns per tgt %d\n",
c->flags, c->luns_per_tgt);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC flogi_retries %d flogi timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC flogi_retries %d flogi timeout %d\n",
c->flogi_retries, c->flogi_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC plogi retries %d plogi timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC plogi retries %d plogi timeout %d\n",
c->plogi_retries, c->plogi_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC io throttle count %d link dn timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC io throttle count %d link dn timeout %d\n",
c->io_throttle_count, c->link_down_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC port dn io retries %d port dn timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC port dn io retries %d port dn timeout %d\n",
c->port_down_io_retries, c->port_down_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC wq_copy_count: %d\n", c->wq_copy_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC intr mode: %d\n", c->intr_mode);
+ dev_info(&fnic->pdev->dev, "fNIC wq_copy_count: %d\n", c->wq_copy_count);
+ dev_info(&fnic->pdev->dev, "fNIC intr mode: %d\n", c->intr_mode);
return 0;
}
@@ -206,18 +197,12 @@ void fnic_get_res_counts(struct fnic *fnic)
fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
RES_TYPE_INTR_CTRL);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources wq_count: %d\n", fnic->wq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources rq_count: %d\n", fnic->rq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources cq_count: %d\n", fnic->cq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources intr_count: %d\n", fnic->intr_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources wq_count: %d\n", fnic->wq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources rq_count: %d\n", fnic->rq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources cq_count: %d\n", fnic->cq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources intr_count: %d\n", fnic->intr_count);
}
void fnic_free_vnic_resources(struct fnic *fnic)
@@ -253,19 +238,17 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
- shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
+ dev_info(&fnic->pdev->dev, "vNIC interrupt mode: %s\n",
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
"MSI-X" : "unknown");
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d",
+ dev_info(&fnic->pdev->dev, "res avail: wq %d cp_wq %d raw_wq %d rq %d",
fnic->wq_count, fnic->wq_copy_count,
fnic->raw_wq_count, fnic->rq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n",
+ dev_info(&fnic->pdev->dev, "res avail: cq %d intr %d cpy-wq desc count %d\n",
fnic->cq_count, fnic->intr_count,
fnic->config.wq_copy_desc_count);
@@ -340,8 +323,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
RES_TYPE_INTR_PBA_LEGACY, 0);
if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to hook legacy pba resource\n");
+ dev_err(&fnic->pdev->dev, "Failed to hook legacy pba resource\n");
err = -ENODEV;
goto err_out_cleanup;
}
@@ -444,8 +426,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
/* init the stats memory by making the first call here */
err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vnic_dev_stats_dump failed - x%x\n", err);
+ dev_err(&fnic->pdev->dev, "vnic_dev_stats_dump failed - x%x\n", err);
goto err_out_cleanup;
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2ba61dba4569..7133b254cbe4 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -23,11 +23,13 @@
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcoe.h>
-#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
+#include <scsi/scsi_transport_fc.h>
#include "fnic_io.h"
#include "fnic.h"
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
+
const char *fnic_state_str[] = {
[FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
@@ -65,6 +67,18 @@ static const char *fcpio_status_str[] = {
[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
};
+enum terminate_io_return {
+ TERM_SUCCESS = 0,
+ TERM_NO_SC = 1,
+ TERM_IO_REQ_NOT_FOUND,
+ TERM_ANOTHER_PORT,
+ TERM_GSTATE,
+ TERM_IO_BLOCKED,
+ TERM_OUT_OF_WQ_DESC,
+ TERM_TIMED_OUT,
+ TERM_MISC,
+};
+
const char *fnic_state_to_str(unsigned int state)
{
if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
@@ -90,8 +104,6 @@ static const char *fnic_fcpio_status_to_str(unsigned int status)
return fcpio_status_str[status];
}
-static void fnic_cleanup_io(struct fnic *fnic);
-
/*
* Unmap the data buffer and sense buffer for an io_req,
* also unmap and free the device-private scatter/gather list.
@@ -114,6 +126,65 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
+static bool
+fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2)
+{
+ u32 *portid = data1;
+ unsigned int *count = data2;
+ struct fnic_io_req *io_req = fnic_priv(sc)->io_req;
+
+ if (!io_req || (*portid && (io_req->port_id != *portid)))
+ return true;
+
+ *count += 1;
+ return true;
+}
+
+unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid)
+{
+ unsigned int count = 0;
+
+ fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter,
+ &portid, &count);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "portid = 0x%x count = %u\n", portid, count);
+ return count;
+}
+
+unsigned int fnic_count_all_ioreqs(struct fnic *fnic)
+{
+ return fnic_count_ioreqs(fnic, 0);
+}
+
+static bool
+fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2)
+{
+ struct scsi_device *scsi_device = data1;
+ unsigned int *count = data2;
+
+ if (sc->device != scsi_device || !fnic_priv(sc)->io_req)
+ return true;
+
+ *count += 1;
+ return true;
+}
+
+unsigned int
+fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device)
+{
+ unsigned int count = 0;
+
+ fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter,
+ scsi_device, &count);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "lun = %p count = %u\n", scsi_device, count);
+ return count;
+}
+
/* Free up Copy Wq descriptors. Called with copy_wq lock held */
static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq)
{
@@ -179,12 +250,11 @@ int fnic_fw_reset_handler(struct fnic *fnic)
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
int ret = 0;
unsigned long flags;
+ unsigned int ioreq_count;
/* indicate fwreset to io path */
fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
-
- skb_queue_purge(&fnic->frame_queue);
- skb_queue_purge(&fnic->tx_queue);
+ ioreq_count = fnic_count_all_ioreqs(fnic);
/* wait for io cmpl */
while (atomic_read(&fnic->in_flight))
@@ -198,6 +268,8 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!vnic_wq_copy_desc_avail(wq))
ret = -EAGAIN;
else {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ioreq_count: %u\n", ioreq_count);
fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
@@ -211,11 +283,11 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!ret) {
atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Issued fw reset\n");
} else {
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to issue fw reset\n");
}
@@ -231,10 +303,10 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
{
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
enum fcpio_flogi_reg_format_type format;
- struct fc_lport *lp = fnic->lport;
u8 gw_mac[ETH_ALEN];
int ret = 0;
unsigned long flags;
+ struct fnic_iport_s *iport = &fnic->iport;
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
@@ -246,28 +318,23 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
goto flogi_reg_ioreq_end;
}
- if (fnic->ctlr.map_dest) {
- eth_broadcast_addr(gw_mac);
- format = FCPIO_FLOGI_REG_DEF_DEST;
- } else {
- memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
- format = FCPIO_FLOGI_REG_GW_DEST;
- }
+ memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN);
+ format = FCPIO_FLOGI_REG_GW_DEST;
- if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
+ if (fnic->config.flags & VFCF_FIP_CAPABLE) {
fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
fc_id, gw_mac,
- fnic->data_src_addr,
- lp->r_a_tov, lp->e_d_tov);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
- fc_id, fnic->data_src_addr, gw_mac);
+ fnic->iport.fpma,
+ iport->r_a_tov, iport->e_d_tov);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n",
+ fc_id, fnic->iport.fpma, gw_mac);
} else {
fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
format, fc_id, gw_mac);
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "FLOGI reg issued fcid 0x%x map %d dest 0x%p\n",
- fc_id, fnic->ctlr.map_dest, gw_mac);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI reg issued fcid 0x%x dest %p\n",
+ fc_id, gw_mac);
}
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
@@ -295,13 +362,17 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
{
struct scatterlist *sg;
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
struct host_sg_desc *desc;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned int i;
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
+ struct fnic_tport_s *tport;
+ struct rport_dd_data_s *rdd_data;
+
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
if (sg_count) {
/* For each SGE, create a device desc entry */
@@ -342,7 +413,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
free_wq_copy_descs(fnic, wq, hwq);
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"fnic_queue_wq_copy_desc failure - no descriptors\n");
atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
return SCSI_MLQUEUE_HOST_BUSY;
@@ -356,7 +427,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
exch_flags = 0;
if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
- (rp->flags & FC_RP_FLAGS_RETRY))
+ (tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY))
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag,
@@ -371,8 +442,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
sc->cmnd, sc->cmd_len,
scsi_bufflen(sc),
fc_lun.scsi_lun, io_req->port_id,
- rport->maxframe_size, rp->r_a_tov,
- rp->e_d_tov);
+ tport->max_payload_size,
+ tport->r_a_tov, tport->e_d_tov);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
@@ -388,10 +459,10 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
struct request *const rq = scsi_cmd_to_rq(sc);
uint32_t mqtag = 0;
void (*done)(struct scsi_cmnd *) = scsi_done;
- struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host));
+ struct fnic_iport_s *iport = NULL;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct vnic_wq_copy *wq;
int ret = 1;
@@ -400,32 +471,14 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
unsigned long flags = 0;
unsigned long ptr;
int io_lock_acquired = 0;
- struct fc_rport_libfc_priv *rp;
uint16_t hwq = 0;
-
- mqtag = blk_mq_unique_tag(rq);
- spin_lock_irqsave(&fnic->fnic_lock, flags);
-
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
- fnic->state_flags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
-
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
- fnic->state_flags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ struct fnic_tport_s *tport = NULL;
+ struct rport_dd_data_s *rdd_data;
+ uint16_t lun0_delay = 0;
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"returning DID_NO_CONNECT for IO as rport is NULL\n");
sc->result = DID_NO_CONNECT << 16;
done(sc);
@@ -434,50 +487,96 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
ret = fc_remote_port_chkready(rport);
if (ret) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"rport is not ready\n");
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
sc->result = ret;
done(sc);
return 0;
}
- rp = rport->dd_data;
- if (!rp || rp->rp_state == RPORT_ST_DELETE) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "rport 0x%x removed, returning DID_NO_CONNECT\n",
- rport->port_id);
+ mqtag = blk_mq_unique_tag(rq);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ iport = &fnic->iport;
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
- sc->result = DID_NO_CONNECT<<16;
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "returning DID_NO_CONNECT for IO as iport state: %d\n",
+ iport->state);
+ sc->result = DID_NO_CONNECT << 16;
done(sc);
return 0;
}
- if (rp->rp_state != RPORT_ST_READY) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
- rport->port_id, rp->rp_state);
+ /* fc_remote_port_add() may have added the tport to
+ * fc_transport but dd_data not yet set
+ */
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
+ if (!tport || (rdd_data->iport != iport)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "dd_data not yet set in SCSI for rport portid: 0x%x\n",
+ rport->port_id);
+ tport = fnic_find_tport_by_fcid(iport, rport->port_id);
+ if (!tport) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n",
+ rport->port_id);
+ sc->result = DID_BUS_BUSY << 16;
+ done(sc);
+ return 0;
+ }
+
+ /* Re-assign same params as in fnic_fdls_add_tport */
+ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
+ rport->supported_classes =
+ FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
+ /* the dd_data is allocated by fctransport of size dd_fcrport_size */
+ rdd_data = rport->dd_data;
+ rdd_data->tport = tport;
+ rdd_data->iport = iport;
+ tport->rport = rport;
+ tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
+ }
- sc->result = DID_IMM_RETRY << 16;
+ if ((tport->state != FDLS_TGT_STATE_READY)
+ && (tport->state != FDLS_TGT_STATE_ADISC)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "returning DID_NO_CONNECT for IO as tport state: %d\n",
+ tport->state);
+ sc->result = DID_NO_CONNECT << 16;
done(sc);
return 0;
}
- if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ atomic_inc(&fnic->in_flight);
+ atomic_inc(&tport->in_flight);
+
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
+ atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "state not ready: %d/link not up: %d Returning HOST_BUSY\n",
- lp->state, lp->link_up);
return SCSI_MLQUEUE_HOST_BUSY;
}
- atomic_inc(&fnic->in_flight);
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
+ fnic->state_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ if (!tport->lun0_delay) {
+ lun0_delay = 1;
+ tport->lun0_delay++;
+ }
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
@@ -499,6 +598,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
goto out;
}
+ io_req->tport = tport;
/* Determine the type of scatter/gather list we need */
io_req->sgl_cnt = sg_count;
io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
@@ -575,6 +675,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
mempool_free(io_req, fnic->io_req_pool);
}
atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
return ret;
} else {
atomic64_inc(&fnic_stats->io_stats.active_ios);
@@ -602,6 +703,14 @@ out:
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
+
+ if (lun0_delay) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "LUN0 delay\n");
+ mdelay(LUN0_DELAY_TIME);
+ }
+
return ret;
}
@@ -625,7 +734,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
atomic64_inc(&reset_stats->fw_reset_completions);
/* Clean up all outstanding io requests */
- fnic_cleanup_io(fnic);
+ fnic_cleanup_io(fnic, SCSI_NO_TAG);
atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
@@ -637,44 +746,37 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
/* Check status of reset completion */
if (!hdr_status) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"reset cmpl success\n");
/* Ready to send flogi out */
fnic->state = FNIC_IN_ETH_MODE;
} else {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"reset failed with header status: %s\n",
fnic_fcpio_status_to_str(hdr_status));
- /*
- * Unable to change to eth mode, cannot send out flogi
- * Change state to fc mode, so that subsequent Flogi
- * requests from libFC will cause more attempts to
- * reset the firmware. Free the cached flogi
- */
fnic->state = FNIC_IN_FC_MODE;
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
} else {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Unexpected state while processing reset completion: %s\n",
fnic_state_to_str(fnic->state));
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
- /* Thread removing device blocks till firmware reset is complete */
- if (fnic->remove_wait)
- complete(fnic->remove_wait);
+ if (fnic->fw_reset_done)
+ complete(fnic->fw_reset_done);
/*
* If fnic is being removed, or fw reset failed
* free the flogi frame. Else, send it out
*/
- if (fnic->remove_wait || ret) {
+ if (ret) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- skb_queue_purge(&fnic->tx_queue);
+ fnic_free_txq(&fnic->tx_queue);
goto reset_cmpl_handler_end;
}
@@ -710,19 +812,19 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
/* Check flogi registration completion status */
if (!hdr_status) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "flog reg succeeded\n");
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "FLOGI reg succeeded\n");
fnic->state = FNIC_IN_FC_MODE;
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, fnic->fnic_num,
- "fnic flogi reg :failed %s\n",
+ fnic->host, fnic->fnic_num,
+ "fnic flogi reg failed: %s\n",
fnic_fcpio_status_to_str(hdr_status));
fnic->state = FNIC_IN_ETH_MODE;
ret = -1;
}
} else {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Unexpected fnic state %s while"
" processing flogi reg completion\n",
fnic_state_to_str(fnic->state));
@@ -795,7 +897,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags);
FNIC_TRACE(fnic_fcpio_ack_handler,
- fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+ fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
ox_id_tag[4], ox_id_tag[5]);
}
@@ -833,36 +935,36 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
hwq = blk_mq_unique_tag_to_hwq(mqtag);
if (hwq != cq_index) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s icmnd completion on the wrong queue\n",
fnic_fcpio_status_to_str(hdr_status));
}
if (tag >= fnic->fnic_max_tag_id) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s Out of range tag\n",
fnic_fcpio_status_to_str(hdr_status));
return;
}
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- sc = scsi_host_find_tag(fnic->lport->host, id);
+ sc = scsi_host_find_tag(fnic->host, id);
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"icmnd_cmpl sc is null - "
"hdr status = %s tag = 0x%x desc = 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, desc);
FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
- fnic->lport->host->host_no, id,
+ fnic->host->host_no, id,
((u64)icmnd_cmpl->_resvd0[1] << 16 |
(u64)icmnd_cmpl->_resvd0[0]),
((u64)hdr_status << 16 |
@@ -885,7 +987,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"icmnd_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, sc);
@@ -912,7 +1014,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if(FCPIO_ABORTED == hdr_status)
fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"icmnd_cmpl abts pending "
"hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
@@ -943,6 +1045,9 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "xfer_len: %llu", xfer_len);
break;
case FCPIO_TIMEOUT: /* request was timed out */
@@ -1004,7 +1109,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
- shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+ shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
}
@@ -1024,13 +1129,13 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
desc, cmd_trace, fnic_flags_and_state(sc));
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
- fnic->lport->host_stats.fcp_input_requests++;
+ fnic_stats->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
- fnic->lport->host_stats.fcp_output_requests++;
+ fnic_stats->host_stats.fcp_output_requests++;
fnic->fcp_output_bytes += xfer_len;
} else
- fnic->lport->host_stats.fcp_control_requests++;
+ fnic_stats->host_stats.fcp_control_requests++;
/* Call SCSI completion function to complete the IO */
scsi_done(sc);
@@ -1097,27 +1202,27 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK);
if (hwq != cq_index) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s ITMF completion on the wrong queue\n",
fnic_fcpio_status_to_str(hdr_status));
}
if (tag > fnic->fnic_max_tag_id) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s Tag out of range\n",
fnic_fcpio_status_to_str(hdr_status));
return;
} else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s Tag out of range\n",
fnic_fcpio_status_to_str(hdr_status));
return;
@@ -1133,14 +1238,14 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if (io_req)
sc = io_req->sc;
} else {
- sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+ sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK);
}
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
fnic_fcpio_status_to_str(hdr_status), tag);
return;
@@ -1152,7 +1257,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"itmf_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), tag, sc);
@@ -1163,7 +1268,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
/* Abort and terminate completion of device reset req */
/* REVISIT : Add asserts about various flags */
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n",
hwq, mqtag, tag,
fnic_fcpio_status_to_str(hdr_status));
@@ -1175,7 +1280,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
- shost_printk(KERN_DEBUG, fnic->lport->host,
+ shost_printk(KERN_DEBUG, fnic->host,
"hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n",
hwq, mqtag, tag,
fnic_fcpio_status_to_str(hdr_status));
@@ -1190,7 +1295,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
&term_stats->terminate_fw_timeouts);
break;
case FCPIO_ITMF_REJECTED:
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"abort reject recd. id %d\n",
(int)(id & FNIC_TAG_MASK));
break;
@@ -1225,7 +1330,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
@@ -1238,11 +1343,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if (io_req->abts_done) {
complete(io_req->abts_done);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_INFO, fnic->lport->host,
+ shost_printk(KERN_INFO, fnic->host,
"hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n",
hwq, mqtag, tag);
} else {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1273,7 +1378,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
- shost_printk(KERN_INFO, fnic->lport->host,
+ shost_printk(KERN_INFO, fnic->host,
"hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1285,7 +1390,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1298,7 +1403,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"dev reset cmpl recd after time out. "
"id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -1307,7 +1412,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
}
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1316,7 +1421,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else {
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n",
__func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
@@ -1371,7 +1476,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
break;
default:
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"firmware completion type %d\n",
desc->hdr.type);
break;
@@ -1414,8 +1519,8 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
struct request *const rq = scsi_cmd_to_rq(sc);
struct fnic *fnic = data;
struct fnic_io_req *io_req;
- unsigned long flags = 0;
unsigned long start_time = 0;
+ unsigned long flags;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
uint16_t hwq = 0;
int tag;
@@ -1432,14 +1537,14 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n",
hwq, mqtag, tag, fnic_priv(sc)->flags);
return true;
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
- !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
+ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
/*
* We will be here only when FW completes reset
* without sending completions for outstanding ios.
@@ -1449,6 +1554,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
complete(io_req->dr_done);
else if (io_req && io_req->abts_done)
complete(io_req->abts_done);
+
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
} else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
@@ -1458,19 +1564,19 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
fnic_priv(sc)->io_req = NULL;
io_req->sc = NULL;
+ start_time = io_req->start_time;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
/*
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
- start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
sc->result = DID_TRANSPORT_DISRUPTED << 16;
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "mqtag:0x%x tag: 0x%x sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
mqtag, tag, sc, (jiffies - start_time));
if (atomic64_read(&fnic->io_cmpl_skip))
@@ -1479,23 +1585,60 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
atomic64_inc(&fnic_stats->io_stats.io_completions);
FNIC_TRACE(fnic_cleanup_io,
- sc->device->host->host_no, tag, sc,
- jiffies_to_msecs(jiffies - start_time),
- 0, ((u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 |
- (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- fnic_flags_and_state(sc));
-
+ sc->device->host->host_no, tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64) sc->cmnd[0] << 32 |
+ (u64) sc->cmnd[2] << 24 |
+ (u64) sc->cmnd[3] << 16 |
+ (u64) sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)->
+ state));
+
+ /* Complete the command to SCSI */
scsi_done(sc);
-
return true;
}
-static void fnic_cleanup_io(struct fnic *fnic)
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
{
- scsi_host_busy_iter(fnic->lport->host,
- fnic_cleanup_io_iter, fnic);
+ unsigned int io_count = 0;
+ unsigned long flags;
+ struct fnic_io_req *io_req = NULL;
+ struct scsi_cmnd *sc = NULL;
+
+ io_count = fnic_count_all_ioreqs(fnic);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Outstanding ioreq count: %d active io count: %lld Waiting\n",
+ io_count,
+ atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
+
+ scsi_host_busy_iter(fnic->host,
+ fnic_cleanup_io_iter, fnic);
+
+ /* with sg3utils device reset, SC needs to be retrieved from ioreq */
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+ io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id];
+ if (io_req) {
+ sc = io_req->sc;
+ if (sc) {
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
+ && !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+
+ while ((io_count = fnic_count_all_ioreqs(fnic))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Outstanding ioreq count: %d active io count: %lld Waiting\n",
+ io_count,
+ atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
+
+ schedule_timeout(msecs_to_jiffies(100));
+ }
}
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
@@ -1516,7 +1659,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
if (id >= fnic->fnic_max_tag_id)
return;
- sc = scsi_host_find_tag(fnic->lport->host, id);
+ sc = scsi_host_find_tag(fnic->host, id);
if (!sc)
return;
@@ -1545,7 +1688,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
wq_copy_cleanup_scsi_cmd:
sc->result = DID_NO_CONNECT << 16;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
FNIC_TRACE(fnic_wq_copy_cleanup_handler,
@@ -1567,10 +1710,13 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq];
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
+ struct fnic_tport_s *tport = io_req->tport;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
FNIC_FLAGS_IO_BLOCKED))) {
+ atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return 1;
} else
@@ -1585,7 +1731,8 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ atomic_dec(&tport->in_flight);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"fnic_queue_abort_io_req: failure: no descriptors\n");
atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
return 1;
@@ -1619,20 +1766,24 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
struct fnic *fnic = iter_data->fnic;
int abt_tag = 0;
struct fnic_io_req *io_req;
- unsigned long flags;
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state;
uint16_t hwq = 0;
+ unsigned long flags;
abt_tag = blk_mq_unique_tag(rq);
hwq = blk_mq_unique_tag_to_hwq(abt_tag);
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
+ if (!sc) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq);
+ return true;
+ }
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
-
if (!io_req || io_req->port_id != iter_data->port_id) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
@@ -1640,7 +1791,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n",
hwq, abt_tag, fnic_priv(sc)->flags);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
@@ -1655,37 +1806,40 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
+
if (io_req->abts_done) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic_rport_exch_reset: io_req->abts_done is set "
- "state is %s\n",
+ shost_printk(KERN_ERR, fnic->host,
+ "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
}
if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "rport_exch_reset "
- "IO not yet issued %p tag 0x%x flags "
- "%x state %d\n",
- sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
+ shost_printk(KERN_ERR, fnic->host,
+ "rport_exch_reset IO not yet issued %p abt_tag 0x%x",
+ sc, abt_tag);
+ shost_printk(KERN_ERR, fnic->host,
+ "flags %x state %d\n", fnic_priv(sc)->flags,
+ fnic_priv(sc)->state);
}
old_ioreq_state = fnic_priv(sc)->state;
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
+
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "dev reset sc 0x%p\n", sc);
}
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
- BUG_ON(io_req->abts_done);
-
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc);
+ WARN_ON_ONCE(io_req->abts_done);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"fnic_rport_reset_exch: Issuing abts\n");
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- /* Now queue the abort command to firmware */
+ /* Queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, abt_tag,
@@ -1698,7 +1852,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
* lun reset
*/
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n",
hwq, abt_tag, fnic_priv(sc)->flags);
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
@@ -1714,11 +1868,14 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
atomic64_inc(&term_stats->terminates);
iter_data->term_cnt++;
}
+
return true;
}
-static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
+ unsigned int io_count = 0;
+ unsigned long flags;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct fnic_rport_abort_io_iter_data iter_data = {
.fnic = fnic,
@@ -1726,53 +1883,115 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
.term_cnt = 0,
};
- FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, fnic->fnic_num,
- "fnic_rport_exch_reset called portid 0x%06x\n",
- port_id);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fnic rport exchange reset for tport: 0x%06x\n",
+ port_id);
if (fnic->in_remove)
return;
- scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
+ io_count = fnic_count_ioreqs(fnic, port_id);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Starting terminates: rport:0x%x portid-io-count: %d active-io-count: %lld\n",
+ port_id, io_count,
+ atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ /* Bump in_flight counter to hold off fnic_fw_reset_handler. */
+ atomic_inc(&fnic->in_flight);
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
+ atomic_dec(&fnic->in_flight);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter,
&iter_data);
+
if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
+ atomic_dec(&fnic->in_flight);
+
+ while ((io_count = fnic_count_ioreqs(fnic, port_id)))
+ schedule_timeout(msecs_to_jiffies(1000));
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "rport: 0x%x remaining portid-io-count: %d ",
+ port_id, io_count);
}
void fnic_terminate_rport_io(struct fc_rport *rport)
{
- struct fc_rport_libfc_priv *rdata;
- struct fc_lport *lport;
- struct fnic *fnic;
+ struct fnic_tport_s *tport;
+ struct rport_dd_data_s *rdd_data;
+ struct fnic_iport_s *iport = NULL;
+ struct fnic *fnic = NULL;
if (!rport) {
- printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
+ pr_err("rport is NULL\n");
return;
}
- rdata = rport->dd_data;
- if (!rdata) {
- printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
- return;
+ rdd_data = rport->dd_data;
+ if (rdd_data) {
+ tport = rdd_data->tport;
+ if (!tport) {
+ pr_err(
+ "term rport io called after tport is deleted. Returning 0x%8x\n",
+ rport->port_id);
+ } else {
+ pr_err(
+ "term rport io called after tport is set 0x%8x\n",
+ rport->port_id);
+ pr_err(
+ "tport maybe rediscovered\n");
+
+ iport = (struct fnic_iport_s *) tport->iport;
+ fnic = iport->fnic;
+ fnic_rport_exch_reset(fnic, rport->port_id);
+ }
}
- lport = rdata->local_port;
+}
- if (!lport) {
- printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
- return;
- }
- fnic = lport_priv(lport);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
- rport->port_name, rport->node_name, rport,
- rport->port_id);
+/*
+ * FCP-SCSI specific handling for module unload
+ *
+ */
+void fnic_scsi_unload(struct fnic *fnic)
+{
+ unsigned long flags;
- if (fnic->in_remove)
- return;
+ /*
+ * Mark state so that the workqueue thread stops forwarding
+ * received frames and link events to the local port. ISR and
+ * other threads that can queue work items will also stop
+ * creating work items on the fnic workqueue
+ */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT)
+ fnic_scsi_fcpio_reset(fnic);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->in_remove = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ fnic_flush_tport_event_list(fnic);
+ fnic_delete_fcp_tports(fnic);
+}
- fnic_rport_exch_reset(fnic, rport->port_id);
+void fnic_scsi_unload_cleanup(struct fnic *fnic)
+{
+ int hwq = 0;
+
+ fc_remove_host(fnic->host);
+ scsi_remove_host(fnic->host);
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
+ kfree(fnic->sw_copy_wq[hwq].io_req_table);
}
/*
@@ -1783,10 +2002,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
int fnic_abort_cmd(struct scsi_cmnd *sc)
{
struct request *const rq = scsi_cmd_to_rq(sc);
- struct fc_lport *lp;
+ struct fnic_iport_s *iport;
+ struct fnic_tport_s *tport;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
+ struct rport_dd_data_s *rdd_data;
unsigned long flags;
unsigned long start_time = 0;
int ret = SUCCESS;
@@ -1806,11 +2027,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
- lp = shost_priv(sc->device->host);
-
- fnic = lport_priv(lp);
+ fnic = *((struct fnic **) shost_priv(sc->device->host));
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ iport = &fnic->iport;
+
fnic_stats = &fnic->fnic_stats;
abts_stats = &fnic->fnic_stats.abts_stats;
term_stats = &fnic->fnic_stats.term_stats;
@@ -1821,7 +2042,44 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
- if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
+
+ if (!tport) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Abort cmd called after tport delete! rport fcid: 0x%x",
+ rport->port_id);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n",
+ sc->device->lun, hwq, mqtag,
+ sc->cmnd[0], fnic_priv(sc)->flags);
+ ret = FAILED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_abort_cmd_end;
+ }
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x",
+ rport->port_id, sc->device->lun, hwq, mqtag);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Op: 0x%x flags: 0x%x\n",
+ sc->cmnd[0],
+ fnic_priv(sc)->flags);
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport NOT in READY state");
+ ret = FAILED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_abort_cmd_end;
+ }
+
+ if ((tport->state != FDLS_TGT_STATE_READY) &&
+ (tport->state != FDLS_TGT_STATE_ADISC)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport state: %d\n", tport->state);
ret = FAILED;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto fnic_abort_cmd_end;
@@ -1843,6 +2101,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
+ ret = FAILED;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
goto fnic_abort_cmd_end;
}
@@ -1870,7 +2129,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
else
atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"CDB Opcode: 0x%02x Abort issued time: %lu msec\n",
sc->cmnd[0], abt_issued_time);
/*
@@ -1893,7 +2152,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (fc_remote_port_chkready(rport) == 0)
task_req = FCPIO_ITMF_ABT_TASK;
else {
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
task_req = FCPIO_ITMF_ABT_TASK_TERM;
}
@@ -1961,7 +2220,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Issuing host reset due to out of order IO\n");
ret = FAILED;
@@ -2009,7 +2268,7 @@ fnic_abort_cmd_end:
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
@@ -2027,6 +2286,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
unsigned long flags;
uint16_t hwq = 0;
uint32_t tag = 0;
+ struct fnic_tport_s *tport = io_req->tport;
tag = io_req->tag;
hwq = blk_mq_unique_tag_to_hwq(tag);
@@ -2037,8 +2297,10 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
FNIC_FLAGS_IO_BLOCKED))) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return FAILED;
- } else
+ } else {
atomic_inc(&fnic->in_flight);
+ atomic_inc(&tport->in_flight);
+ }
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
@@ -2047,7 +2309,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
free_wq_copy_descs(fnic, wq, hwq);
if (!vnic_wq_copy_desc_avail(wq)) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"queue_dr_io_req failure - no descriptors\n");
atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
ret = -EAGAIN;
@@ -2072,6 +2334,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
return ret;
}
@@ -2114,7 +2377,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
@@ -2124,14 +2387,14 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
(!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"dev rst not pending sc 0x%p\n", sc);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
if (io_req->abts_done)
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"%s: io_req->abts_done is set state is %s\n",
__func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
old_ioreq_state = fnic_priv(sc)->state;
@@ -2147,7 +2410,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
BUG_ON(io_req->abts_done);
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"dev rst sc 0x%p\n", sc);
}
@@ -2169,7 +2432,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
iter_data->ret = FAILED;
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d abt_tag: 0x%lx Abort could not be queued\n",
hwq, abt_tag);
return false;
@@ -2248,7 +2511,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
iter_data.lr_sc = lr_sc;
- scsi_host_busy_iter(fnic->lport->host,
+ scsi_host_busy_iter(fnic->host,
fnic_pending_aborts_iter, &iter_data);
if (iter_data.ret == FAILED) {
ret = iter_data.ret;
@@ -2261,7 +2524,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
ret = 1;
clean_pending_aborts_end:
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"exit status: %d\n", ret);
return ret;
}
@@ -2274,11 +2537,11 @@ clean_pending_aborts_end:
int fnic_device_reset(struct scsi_cmnd *sc)
{
struct request *rq = scsi_cmd_to_rq(sc);
- struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
int status;
+ int count = 0;
int ret = FAILED;
unsigned long flags;
unsigned long start_time = 0;
@@ -2289,31 +2552,63 @@ int fnic_device_reset(struct scsi_cmnd *sc)
DECLARE_COMPLETION_ONSTACK(tm_done);
bool new_sc = 0;
uint16_t hwq = 0;
+ struct fnic_iport_s *iport = NULL;
+ struct rport_dd_data_s *rdd_data;
+ struct fnic_tport_s *tport;
+ u32 old_soft_reset_count;
+ u32 old_link_down_cnt;
+ int exit_dr = 0;
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
- lp = shost_priv(sc->device->host);
+ fnic = *((struct fnic **) shost_priv(sc->device->host));
+ iport = &fnic->iport;
- fnic = lport_priv(lp);
fnic_stats = &fnic->fnic_stats;
- reset_stats = &fnic->fnic_stats.reset_stats;
+ reset_stats = &fnic_stats->reset_stats;
atomic64_inc(&reset_stats->device_resets);
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fcid: 0x%x lun: 0x%llx hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
rport->port_id, sc->device->lun, hwq, mqtag,
fnic_priv(sc)->flags);
- if (lp->state != LPORT_ST_READY || !(lp->link_up))
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
+ if (!tport) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n",
+ rport->port_id, sc->device->lun);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_device_reset_end;
+ }
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport NOT in READY state");
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_device_reset_end;
+ }
+
+ if ((tport->state != FDLS_TGT_STATE_READY) &&
+ (tport->state != FDLS_TGT_STATE_ADISC)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport state: %d\n", tport->state);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto fnic_device_reset_end;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/* Check if remote port up */
if (fc_remote_port_chkready(rport)) {
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
goto fnic_device_reset_end;
}
@@ -2352,6 +2647,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req->port_id = rport->port_id;
io_req->tag = mqtag;
fnic_priv(sc)->io_req = io_req;
+ io_req->tport = tport;
io_req->sc = sc;
if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL)
@@ -2366,7 +2662,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "TAG %x\n", mqtag);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
@@ -2383,6 +2679,11 @@ int fnic_device_reset(struct scsi_cmnd *sc)
fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ old_link_down_cnt = iport->fnic->link_down_cnt;
+ old_soft_reset_count = fnic->soft_reset_count;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
/*
* Wait on the local completion for LUN reset. The io_req may be
* freed while we wait since we hold no lock.
@@ -2390,14 +2691,39 @@ int fnic_device_reset(struct scsi_cmnd *sc)
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+ /*
+ * Wake up can be due to the following reasons:
+ * 1) The device reset completed from target.
+ * 2) Device reset timed out.
+ * 3) A link-down/host_reset may have happened in between.
+ * 4) The device reset was aborted and io_req->dr_done was called.
+ */
+
+ exit_dr = 0;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if ((old_link_down_cnt != fnic->link_down_cnt) ||
+ (fnic->reset_in_progress) ||
+ (fnic->soft_reset_count != old_soft_reset_count) ||
+ (iport->state != FNIC_IPORT_STATE_READY))
+ exit_dr = 1;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc);
goto fnic_device_reset_end;
}
+
+ if (exit_dr) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Host reset called for fnic. Exit device reset\n");
+ io_req->dr_done = NULL;
+ goto fnic_device_reset_clean;
+ }
io_req->dr_done = NULL;
status = fnic_priv(sc)->lr_status;
@@ -2408,53 +2734,11 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
if (status == FCPIO_INVALID_CODE) {
atomic64_inc(&reset_stats->device_reset_timeouts);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Device reset timed out\n");
fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
int_to_scsilun(sc->device->lun, &fc_lun);
- /*
- * Issue abort and terminate on device reset request.
- * If q'ing of terminate fails, retry it after a delay.
- */
- while (1) {
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- break;
- }
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- if (fnic_queue_abort_io_req(fnic,
- mqtag | FNIC_TAG_DEV_RST,
- FCPIO_ITMF_ABT_TASK_TERM,
- fc_lun.scsi_lun, io_req, hwq)) {
- wait_for_completion_timeout(&tm_done,
- msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
- } else {
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
- fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
- io_req->abts_done = &tm_done;
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Abort and terminate issued on Device reset mqtag 0x%x sc 0x%p\n",
- mqtag, sc);
- break;
- }
- }
- while (1) {
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- wait_for_completion_timeout(&tm_done,
- msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
- break;
- } else {
- io_req = fnic_priv(sc)->io_req;
- io_req->abts_done = NULL;
- goto fnic_device_reset_clean;
- }
- }
+ goto fnic_device_reset_clean;
} else {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
}
@@ -2463,7 +2747,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (status != FCPIO_SUCCESS) {
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, fnic->fnic_num,
+ fnic->host, fnic->fnic_num,
"Device reset completed - failed\n");
io_req = fnic_priv(sc)->io_req;
goto fnic_device_reset_clean;
@@ -2479,9 +2763,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Device reset failed"
- " since could not abort all IOs\n");
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Device reset failed: Cannot abort all IOs\n");
goto fnic_device_reset_clean;
}
@@ -2507,6 +2790,15 @@ fnic_device_reset_clean:
mempool_free(io_req, fnic->io_req_pool);
}
+ /*
+ * If link-event is seen while LUN reset is issued we need
+ * to complete the LUN reset here
+ */
+ if (!new_sc) {
+ sc->result = DID_RESET << 16;
+ scsi_done(sc);
+ }
+
fnic_device_reset_end:
FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
jiffies_to_msecs(jiffies - start_time),
@@ -2520,7 +2812,18 @@ fnic_device_reset_end:
mutex_unlock(&fnic->sgreset_mutex);
}
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) {
+ if (count >= 2) {
+ ret = FAILED;
+ break;
+ }
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Cannot clean up all IOs for the LUN\n");
+ schedule_timeout(msecs_to_jiffies(1000));
+ count++;
+ }
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
@@ -2531,67 +2834,78 @@ fnic_device_reset_end:
return ret;
}
-/* Clean up all IOs, clean up libFC local port */
-int fnic_reset(struct Scsi_Host *shost)
+static void fnic_post_flogo_linkflap(struct fnic *fnic)
+{
+ unsigned long flags;
+
+ fnic_fdls_link_status_change(fnic, 0);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (fnic->link_status) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic_fdls_link_status_change(fnic, 1);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+/* Logout from all the targets and simulate link flap */
+void fnic_reset(struct Scsi_Host *shost)
{
- struct fc_lport *lp;
struct fnic *fnic;
- int ret = 0;
struct reset_stats *reset_stats;
- lp = shost_priv(shost);
- fnic = lport_priv(lp);
+ fnic = *((struct fnic **) shost_priv(shost));
reset_stats = &fnic->fnic_stats.reset_stats;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Issuing fnic reset\n");
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Issuing fnic reset\n");
atomic64_inc(&reset_stats->fnic_resets);
+ fnic_post_flogo_linkflap(fnic);
- /*
- * Reset local port, this will clean up libFC exchanges,
- * reset remote port sessions, and if link is up, begin flogi
- */
- ret = fc_lport_reset(lp);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Returning from fnic reset");
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Returning from fnic reset with: %s\n",
- (ret == 0) ? "SUCCESS" : "FAILED");
+ atomic64_inc(&reset_stats->fnic_reset_completions);
+}
- if (ret == 0)
- atomic64_inc(&reset_stats->fnic_reset_completions);
- else
- atomic64_inc(&reset_stats->fnic_reset_failures);
+int fnic_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+ int ret = 0;
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FC host lip issued");
+ ret = fnic_host_reset(shost);
return ret;
}
-/*
- * SCSI Error handling calls driver's eh_host_reset if all prior
- * error handling levels return FAILED. If host reset completes
- * successfully, and if link is up, then Fabric login begins.
- *
- * Host Reset is the highest level of error recovery. If this fails, then
- * host is offlined by SCSI.
- *
- */
-int fnic_host_reset(struct scsi_cmnd *sc)
+int fnic_host_reset(struct Scsi_Host *shost)
{
- int ret;
+ int ret = SUCCESS;
unsigned long wait_host_tmo;
- struct Scsi_Host *shost = sc->device->host;
- struct fc_lport *lp = shost_priv(shost);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
unsigned long flags;
+ struct fnic_iport_s *iport = &fnic->iport;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (!fnic->internal_reset_inprogress) {
- fnic->internal_reset_inprogress = true;
+ if (fnic->reset_in_progress == NOT_IN_PROGRESS) {
+ fnic->reset_in_progress = IN_PROGRESS;
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "host reset in progress skipping another host reset\n");
- return SUCCESS;
+ wait_for_completion_timeout(&fnic->reset_completion_wait,
+ msecs_to_jiffies(10000));
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->reset_in_progress == IN_PROGRESS) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
+ "Firmware reset in progress. Skipping another host reset\n");
+ return SUCCESS;
+ }
+ fnic->reset_in_progress = IN_PROGRESS;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
@@ -2600,140 +2914,34 @@ int fnic_host_reset(struct scsi_cmnd *sc)
* scsi-ml tries to send a TUR to every device if host reset is
* successful, so before returning to scsi, fabric should be up
*/
- ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
- if (ret == SUCCESS) {
+ fnic_reset(shost);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->reset_in_progress = NOT_IN_PROGRESS;
+ complete(&fnic->reset_completion_wait);
+ fnic->soft_reset_count++;
+
+ /* wait till the link is up */
+ if (fnic->link_status) {
wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
ret = FAILED;
while (time_before(jiffies, wait_host_tmo)) {
- if ((lp->state == LPORT_ST_READY) &&
- (lp->link_up)) {
+ if (iport->state != FNIC_IPORT_STATE_READY
+ && fnic->link_status) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ ssleep(1);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ } else {
ret = SUCCESS;
break;
}
- ssleep(1);
}
}
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->internal_reset_inprogress = false;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return ret;
-}
-
-/*
- * This fxn is called from libFC when host is removed
- */
-void fnic_scsi_abort_io(struct fc_lport *lp)
-{
- int err = 0;
- unsigned long flags;
- enum fnic_state old_state;
- struct fnic *fnic = lport_priv(lp);
- DECLARE_COMPLETION_ONSTACK(remove_wait);
-
- /* Issue firmware reset for fnic, wait for reset to complete */
-retry_fw_reset:
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) &&
- fnic->link_events) {
- /* fw reset is in progress, poll for its completion */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- schedule_timeout(msecs_to_jiffies(100));
- goto retry_fw_reset;
- }
-
- fnic->remove_wait = &remove_wait;
- old_state = fnic->state;
- fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
- fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- err = fnic_fw_reset_handler(fnic);
- if (err) {
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
- fnic->state = old_state;
- fnic->remove_wait = NULL;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
-
- /* Wait for firmware reset to complete */
- wait_for_completion_timeout(&remove_wait,
- msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->remove_wait = NULL;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic_scsi_abort_io %s\n",
- (fnic->state == FNIC_IN_ETH_MODE) ?
- "SUCCESS" : "FAILED");
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
-}
-
-/*
- * This fxn called from libFC to clean up driver IO state on link down
- */
-void fnic_scsi_cleanup(struct fc_lport *lp)
-{
- unsigned long flags;
- enum fnic_state old_state;
- struct fnic *fnic = lport_priv(lp);
-
- /* issue fw reset */
-retry_fw_reset:
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
- /* fw reset is in progress, poll for its completion */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- schedule_timeout(msecs_to_jiffies(100));
- goto retry_fw_reset;
- }
- old_state = fnic->state;
- fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
- fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
- if (fnic_fw_reset_handler(fnic)) {
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
- fnic->state = old_state;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- }
-
-}
-
-void fnic_empty_scsi_cleanup(struct fc_lport *lp)
-{
-}
-
-void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
-{
- struct fnic *fnic = lport_priv(lp);
-
- /* Non-zero sid, nothing to do */
- if (sid)
- goto call_fc_exch_mgr_reset;
-
- if (did) {
- fnic_rport_exch_reset(fnic, did);
- goto call_fc_exch_mgr_reset;
- }
-
- /*
- * sid = 0, did = 0
- * link down or device being removed
- */
- if (!fnic->in_remove)
- fnic_scsi_cleanup(lp);
- else
- fnic_scsi_abort_io(lp);
-
- /* call libFC exch mgr reset to reset its exchanges */
-call_fc_exch_mgr_reset:
- fc_exch_mgr_reset(lp, sid, did);
-
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "host reset return status: %d\n", ret);
+ return ret;
}
static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
@@ -2771,7 +2979,7 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"hwq: %d tag: 0x%x Found IO in state: %s on lun\n",
hwq, tag,
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
@@ -2804,8 +3012,81 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
}
/* walk again to check, if IOs are still pending in fw */
- scsi_host_busy_iter(fnic->lport->host,
+ scsi_host_busy_iter(fnic->host,
fnic_abts_pending_iter, &iter_data);
return iter_data.ret;
}
+
+/*
+ * SCSI Error handling calls driver's eh_host_reset if all prior
+ * error handling levels return FAILED. If host reset completes
+ * successfully, and if link is up, then Fabric login begins.
+ *
+ * Host Reset is the highest level of error recovery. If this fails, then
+ * host is offlined by SCSI.
+ *
+ */
+int fnic_eh_host_reset_handler(struct scsi_cmnd *sc)
+{
+ int ret = 0;
+ struct Scsi_Host *shost = sc->device->host;
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
+
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "SCSI error handling: fnic host reset");
+
+ ret = fnic_host_reset(shost);
+ return ret;
+}
+
+
+void fnic_scsi_fcpio_reset(struct fnic *fnic)
+{
+ unsigned long flags;
+ enum fnic_state old_state;
+ struct fnic_iport_s *iport = &fnic->iport;
+ DECLARE_COMPLETION_ONSTACK(fw_reset_done);
+ int time_remain;
+
+ /* issue fw reset */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic is in unexpected state: %d for fw_reset\n",
+ fnic->state);
+ return;
+ }
+
+ old_state = fnic->state;
+ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+
+ fnic_update_mac_locked(fnic, iport->hwmac);
+ fnic->fw_reset_done = &fw_reset_done;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Issuing fw reset\n");
+ if (fnic_fw_reset_handler(fnic)) {
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+ fnic->state = old_state;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ } else {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Waiting for fw completion\n");
+ time_remain = wait_for_completion_timeout(&fw_reset_done,
+ msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT));
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Woken up after fw completion timeout\n");
+ if (time_remain == 0) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FW reset completion timed out after %d ms)\n",
+ FNIC_FW_RESET_TIMEOUT);
+ }
+ atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts);
+ }
+ fnic->fw_reset_done = NULL;
+}
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 9d7f98c452dd..8ddd20401a59 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -3,6 +3,7 @@
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
#define FNIC_MQ_MAX_QUEUES 64
+#include <scsi/scsi_transport_fc.h>
struct stats_timestamps {
struct timespec64 last_reset_time;
@@ -63,6 +64,7 @@ struct reset_stats {
atomic64_t fw_resets;
atomic64_t fw_reset_completions;
atomic64_t fw_reset_failures;
+ atomic64_t fw_reset_timeouts;
atomic64_t fnic_resets;
atomic64_t fnic_reset_completions;
atomic64_t fnic_reset_failures;
@@ -102,10 +104,51 @@ struct misc_stats {
atomic64_t no_icmnd_itmf_cmpls;
atomic64_t check_condition;
atomic64_t queue_fulls;
- atomic64_t rport_not_ready;
+ atomic64_t tport_not_ready;
+ atomic64_t iport_not_ready;
atomic64_t frame_errors;
atomic64_t current_port_speed;
atomic64_t intx_dummy;
+ atomic64_t port_speed_in_mbps;
+};
+
+struct fnic_iport_stats {
+ atomic64_t num_linkdn;
+ atomic64_t num_linkup;
+ atomic64_t link_failure_count;
+ atomic64_t num_rscns;
+ atomic64_t rscn_redisc;
+ atomic64_t rscn_not_redisc;
+ atomic64_t frame_err;
+ atomic64_t num_rnid;
+ atomic64_t fabric_flogi_sent;
+ atomic64_t fabric_flogi_ls_accepts;
+ atomic64_t fabric_flogi_ls_rejects;
+ atomic64_t fabric_flogi_misc_rejects;
+ atomic64_t fabric_plogi_sent;
+ atomic64_t fabric_plogi_ls_accepts;
+ atomic64_t fabric_plogi_ls_rejects;
+ atomic64_t fabric_plogi_misc_rejects;
+ atomic64_t fabric_scr_sent;
+ atomic64_t fabric_scr_ls_accepts;
+ atomic64_t fabric_scr_ls_rejects;
+ atomic64_t fabric_scr_misc_rejects;
+ atomic64_t fabric_logo_sent;
+ atomic64_t tport_alive;
+ atomic64_t tport_plogi_sent;
+ atomic64_t tport_plogi_ls_accepts;
+ atomic64_t tport_plogi_ls_rejects;
+ atomic64_t tport_plogi_misc_rejects;
+ atomic64_t tport_prli_sent;
+ atomic64_t tport_prli_ls_accepts;
+ atomic64_t tport_prli_ls_rejects;
+ atomic64_t tport_prli_misc_rejects;
+ atomic64_t tport_adisc_sent;
+ atomic64_t tport_adisc_ls_accepts;
+ atomic64_t tport_adisc_ls_rejects;
+ atomic64_t tport_logo_sent;
+ atomic64_t unsupported_frames_ls_rejects;
+ atomic64_t unsupported_frames_dropped;
};
struct fnic_stats {
@@ -116,6 +159,7 @@ struct fnic_stats {
struct reset_stats reset_stats;
struct fw_stats fw_stats;
struct vlan_stats vlan_stats;
+ struct fc_host_statistics host_stats;
struct misc_stats misc_stats;
};
@@ -127,6 +171,5 @@ struct stats_debug_info {
};
int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
-void fnic_stats_debugfs_init(struct fnic *);
-void fnic_stats_debugfs_remove(struct fnic *);
+const char *fnic_role_to_str(unsigned int role);
#endif /* _FNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index aaa4ea02fb7c..cdc6b12b1ec2 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -8,6 +8,7 @@
#include <linux/kallsyms.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
+#include <scsi/scsi_transport_fc.h>
#include "fnic_io.h"
#include "fnic.h"
@@ -29,6 +30,17 @@ int fnic_fc_tracing_enabled = 1;
int fnic_fc_trace_cleared = 1;
static DEFINE_SPINLOCK(fnic_fc_trace_lock);
+static const char * const fnic_role_str[] = {
+ [FNIC_ROLE_FCP_INITIATOR] = "FCP_Initiator",
+};
+
+const char *fnic_role_to_str(unsigned int role)
+{
+ if (role >= ARRAY_SIZE(fnic_role_str) || !fnic_role_str[role])
+ return "Unknown";
+
+ return fnic_role_str[role];
+}
/*
* fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
@@ -423,7 +435,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of Check Conditions encountered: %lld\n"
"Number of QUEUE Fulls: %lld\n"
"Number of rport not ready: %lld\n"
- "Number of receive frame errors: %lld\n",
+ "Number of receive frame errors: %lld\n"
+ "Port speed (in Mbps): %lld\n",
(u64)stats->misc_stats.last_isr_time,
(s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
@@ -446,18 +459,68 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
(u64)atomic64_read(&stats->misc_stats.check_condition),
(u64)atomic64_read(&stats->misc_stats.queue_fulls),
- (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
- (u64)atomic64_read(&stats->misc_stats.frame_errors));
-
- len += scnprintf(debug->debug_buffer + len, buf_size - len,
- "Firmware reported port speed: %llu\n",
- (u64)atomic64_read(
- &stats->misc_stats.current_port_speed));
+ (u64)atomic64_read(&stats->misc_stats.tport_not_ready),
+ (u64)atomic64_read(&stats->misc_stats.frame_errors),
+ (u64)atomic64_read(&stats->misc_stats.port_speed_in_mbps));
return len;
}
+int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ int buf_size = info->buf_size;
+ int len = info->buffer_len;
+ struct fnic_tport_s *tport, *next;
+ unsigned long flags;
+
+ len += snprintf(info->debug_buffer + len, buf_size - len,
+ "------------------------------------------\n"
+ "\t\t Debug Info\n"
+ "------------------------------------------\n");
+ len += snprintf(info->debug_buffer + len, buf_size - len,
+ "fnic Name:%s number:%d Role:%s State:%s\n",
+ fnic->name, fnic->fnic_num,
+ fnic_role_to_str(fnic->role),
+ fnic_state_to_str(fnic->state));
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n",
+ iport->state, iport->flags, iport->vlan_id, iport->fcid);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "usefip:%d fip_state:%d fip_flogi_retry:%d\n",
+ iport->usefip, iport->fip.state, iport->fip.flogi_retry);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "fpma %02x:%02x:%02x:%02x:%02x:%02x",
+ iport->fpma[5], iport->fpma[4], iport->fpma[3],
+ iport->fpma[2], iport->fpma[1], iport->fpma[0]);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n",
+ iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3],
+ iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n",
+ iport->fabric.state, iport->fabric.flags,
+ iport->fabric.retry_counter, iport->e_d_tov,
+ iport->r_a_tov);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ len += snprintf(info->debug_buffer + len, buf_size - len,
+ "tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n",
+ tport->fcid, tport->state, tport->flags,
+ atomic_read(&tport->in_flight),
+ tport->retry_counter);
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return len;
+}
+
/*
* fnic_trace_buf_init - Initialize fnic trace buffer logging facility
*
@@ -485,8 +548,7 @@ int fnic_trace_buf_init(void)
}
fnic_trace_entries.page_offset =
- vmalloc(array_size(fnic_max_trace_entries,
- sizeof(unsigned long)));
+ vcalloc(fnic_max_trace_entries, sizeof(unsigned long));
if (!fnic_trace_entries.page_offset) {
printk(KERN_ERR PFX "Failed to allocate memory for"
" page_offset\n");
@@ -497,8 +559,6 @@ int fnic_trace_buf_init(void)
err = -ENOMEM;
goto err_fnic_trace_buf_init;
}
- memset((void *)fnic_trace_entries.page_offset, 0,
- (fnic_max_trace_entries * sizeof(unsigned long)));
fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
fnic_buf_head = fnic_trace_buf_p;
@@ -559,8 +619,7 @@ int fnic_fc_trace_init(void)
fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
FC_TRC_SIZE_BYTES;
fnic_fc_ctlr_trace_buf_p =
- (unsigned long)vmalloc(array_size(PAGE_SIZE,
- fnic_fc_trace_max_pages));
+ (unsigned long)vcalloc(fnic_fc_trace_max_pages, PAGE_SIZE);
if (!fnic_fc_ctlr_trace_buf_p) {
pr_err("fnic: Failed to allocate memory for "
"FC Control Trace Buf\n");
@@ -568,13 +627,9 @@ int fnic_fc_trace_init(void)
goto err_fnic_fc_ctlr_trace_buf_init;
}
- memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
- fnic_fc_trace_max_pages * PAGE_SIZE);
-
/* Allocate memory for page offset */
fc_trace_entries.page_offset =
- vmalloc(array_size(fc_trace_max_entries,
- sizeof(unsigned long)));
+ vcalloc(fc_trace_max_entries, sizeof(unsigned long));
if (!fc_trace_entries.page_offset) {
pr_err("fnic:Failed to allocate memory for page_offset\n");
if (fnic_fc_ctlr_trace_buf_p) {
@@ -585,8 +640,6 @@ int fnic_fc_trace_init(void)
err = -ENOMEM;
goto err_fnic_fc_ctlr_trace_buf_init;
}
- memset((void *)fc_trace_entries.page_offset, 0,
- (fc_trace_max_entries * sizeof(unsigned long)));
fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
@@ -688,7 +741,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
*/
if (frame_type == FNIC_FC_RECV) {
eth_fcoe_hdr_len = sizeof(struct ethhdr) +
- sizeof(struct fcoe_hdr);
+ sizeof(struct fcoe_hdr);
memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
/* Copy the rest of data frame */
memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index a44768bceb9a..2d438d722d0b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -9,7 +9,6 @@
#include <linux/acpi.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dmapool.h>
@@ -644,9 +643,8 @@ extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
extern void hisi_sas_remove(struct platform_device *pdev);
-int hisi_sas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
-extern int hisi_sas_slave_alloc(struct scsi_device *sdev);
+int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim);
+extern int hisi_sas_sdev_init(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 53cb15f6714b..da4a2ed8ee86 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -805,13 +805,13 @@ static int hisi_sas_init_device(struct domain_device *device)
return rc;
}
-int hisi_sas_slave_alloc(struct scsi_device *sdev)
+int hisi_sas_sdev_init(struct scsi_device *sdev)
{
struct domain_device *ddev = sdev_to_domain_dev(sdev);
struct hisi_sas_device *sas_dev = ddev->lldd_dev;
int rc;
- rc = sas_slave_alloc(sdev);
+ rc = sas_sdev_init(sdev);
if (rc)
return rc;
@@ -821,7 +821,7 @@ int hisi_sas_slave_alloc(struct scsi_device *sdev)
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
+EXPORT_SYMBOL_GPL(hisi_sas_sdev_init);
static int hisi_sas_dev_found(struct domain_device *device)
{
@@ -868,11 +868,10 @@ err_out:
return rc;
}
-int hisi_sas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
- int ret = sas_device_configure(sdev, lim);
+ int ret = sas_sdev_configure(sdev, lim);
if (ret)
return ret;
@@ -881,7 +880,7 @@ int hisi_sas_device_configure(struct scsi_device *sdev,
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_sas_device_configure);
+EXPORT_SYMBOL_GPL(hisi_sas_sdev_configure);
void hisi_sas_scan_start(struct Scsi_Host *shost)
{
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index c3e571be2222..bb78e53c66e2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1753,11 +1753,11 @@ static int check_fw_info_v1_hw(struct hisi_hba *hisi_hba)
static const struct scsi_host_template sht_v1_hw = {
LIBSAS_SHT_BASE_NO_SLAVE_INIT
- .device_configure = hisi_sas_device_configure,
+ .sdev_configure = hisi_sas_sdev_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .slave_alloc = hisi_sas_slave_alloc,
+ .sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v1_hw_groups,
.host_reset = hisi_sas_host_reset,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 1a62b5d15eca..71cd5b4450c2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3585,11 +3585,11 @@ static int check_fw_info_v2_hw(struct hisi_hba *hisi_hba)
static const struct scsi_host_template sht_v2_hw = {
LIBSAS_SHT_BASE_NO_SLAVE_INIT
- .device_configure = hisi_sas_device_configure,
+ .sdev_configure = hisi_sas_sdev_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .slave_alloc = hisi_sas_slave_alloc,
+ .sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v2_hw_groups,
.sdev_groups = sdev_groups_v2_hw,
.host_reset = hisi_sas_host_reset,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 5db931663ae4..48b95d9a7927 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2908,12 +2908,12 @@ static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev,
}
static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw);
-static int device_configure_v3_hw(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int sdev_configure_v3_hw(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
struct hisi_hba *hisi_hba = shost_priv(shost);
- int ret = hisi_sas_device_configure(sdev, lim);
+ int ret = hisi_sas_sdev_configure(sdev, lim);
struct device *dev = hisi_hba->dev;
if (ret)
@@ -3328,24 +3328,24 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
- BASE_VECTORS_V3_HW);
+ blk_mq_map_hw_queues(qmap, hisi_hba->dev,
+ BASE_VECTORS_V3_HW);
qoff += qmap->nr_queues;
}
}
static const struct scsi_host_template sht_v3_hw = {
LIBSAS_SHT_BASE_NO_SLAVE_INIT
- .device_configure = device_configure_v3_hw,
+ .sdev_configure = sdev_configure_v3_hw,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.map_queues = hisi_sas_map_queues,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
.sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .slave_alloc = hisi_sas_slave_alloc,
+ .sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v3_hw_groups,
.sdev_groups = sdev_groups_v3_hw,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
.mq_poll = queue_complete_v3_hw,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 0c49414c1f35..84d8de07b7ae 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -283,9 +283,10 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
-static int hpsa_slave_alloc(struct scsi_device *sdev);
-static int hpsa_slave_configure(struct scsi_device *sdev);
-static void hpsa_slave_destroy(struct scsi_device *sdev);
+static int hpsa_sdev_init(struct scsi_device *sdev);
+static int hpsa_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
+static void hpsa_sdev_destroy(struct scsi_device *sdev);
static void hpsa_update_scsi_devices(struct ctlr_info *h);
static int check_for_unit_attention(struct ctlr_info *h,
@@ -978,9 +979,9 @@ static const struct scsi_host_template hpsa_driver_template = {
.this_id = -1,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
- .slave_alloc = hpsa_slave_alloc,
- .slave_configure = hpsa_slave_configure,
- .slave_destroy = hpsa_slave_destroy,
+ .sdev_init = hpsa_sdev_init,
+ .sdev_configure = hpsa_sdev_configure,
+ .sdev_destroy = hpsa_sdev_destroy,
#ifdef CONFIG_COMPAT
.compat_ioctl = hpsa_compat_ioctl,
#endif
@@ -2107,7 +2108,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
return NULL;
}
-static int hpsa_slave_alloc(struct scsi_device *sdev)
+static int hpsa_sdev_init(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *sd = NULL;
unsigned long flags;
@@ -2142,7 +2143,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
/* configure scsi device based on internal per-device structure */
#define CTLR_TIMEOUT (120 * HZ)
-static int hpsa_slave_configure(struct scsi_device *sdev)
+static int hpsa_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct hpsa_scsi_dev_t *sd;
int queue_depth;
@@ -2173,7 +2175,7 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void hpsa_slave_destroy(struct scsi_device *sdev)
+static void hpsa_sdev_destroy(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *hdev = NULL;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index e889f268601b..21f1d9871a33 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1151,8 +1151,8 @@ static struct attribute *hptiop_host_attrs[] = {
ATTRIBUTE_GROUPS(hptiop_host);
-static int hptiop_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int hptiop_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (sdev->type == TYPE_TAPE)
lim->max_hw_sectors = 8192;
@@ -1168,7 +1168,7 @@ static const struct scsi_host_template driver_template = {
.emulated = 0,
.proc_name = driver_name,
.shost_groups = hptiop_host_groups,
- .device_configure = hptiop_device_configure,
+ .sdev_configure = hptiop_sdev_configure,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,
.cmd_size = sizeof(struct hpt_cmd_priv),
@@ -1634,7 +1634,7 @@ static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
.host_phy_flag = cpu_to_le64(1),
};
-static struct pci_device_id hptiop_id_table[] = {
+static const struct pci_device_id hptiop_id_table[] = {
{ PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
{ PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index e66c3ef74267..773ec2f31bc4 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3393,7 +3393,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
/**
- * ibmvfc_slave_alloc - Setup the device's task set value
+ * ibmvfc_sdev_init - Setup the device's task set value
* @sdev: struct scsi_device device to configure
*
* Set the device's task set value so that error handling works as
@@ -3402,7 +3402,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
* Returns:
* 0 on success / -ENXIO if device does not exist
**/
-static int ibmvfc_slave_alloc(struct scsi_device *sdev)
+static int ibmvfc_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -3441,8 +3441,9 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
}
/**
- * ibmvfc_slave_configure - Configure the device
+ * ibmvfc_sdev_configure - Configure the device
* @sdev: struct scsi_device device to configure
+ * @lim: Request queue limits
*
* Enable allow_restart for a device if it is a disk. Adjust the
* queue_depth here also.
@@ -3450,7 +3451,8 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
* Returns:
* 0
**/
-static int ibmvfc_slave_configure(struct scsi_device *sdev)
+static int ibmvfc_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
unsigned long flags = 0;
@@ -3639,7 +3641,7 @@ static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
* number of bytes printed to buffer
**/
static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3662,13 +3664,13 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute ibmvfc_trace_attr = {
+static const struct bin_attribute ibmvfc_trace_attr = {
.attr = {
.name = "trace",
.mode = S_IRUGO,
},
.size = 0,
- .read = ibmvfc_read_trace,
+ .read_new = ibmvfc_read_trace,
};
#endif
@@ -3696,8 +3698,8 @@ static const struct scsi_host_template driver_template = {
.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
- .slave_alloc = ibmvfc_slave_alloc,
- .slave_configure = ibmvfc_slave_configure,
+ .sdev_init = ibmvfc_sdev_init,
+ .sdev_configure = ibmvfc_sdev_configure,
.target_alloc = ibmvfc_target_alloc,
.scan_finished = ibmvfc_scan_finished,
.change_queue_depth = ibmvfc_change_queue_depth,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 71f3e9563520..16a1aac11911 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1860,14 +1860,16 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
}
/**
- * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
+ * ibmvscsi_sdev_configure: Set the "allow_restart" flag for each disk.
* @sdev: struct scsi_device device to configure
+ * @lim: Request queue limits
*
* Enable allow_restart for a device if it is a disk. Adjust the
* queue_depth here also as is required by the documentation for
* struct scsi_host_template.
*/
-static int ibmvscsi_slave_configure(struct scsi_device *sdev)
+static int ibmvscsi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
unsigned long lock_flags = 0;
@@ -2091,7 +2093,7 @@ static struct scsi_host_template driver_template = {
.eh_abort_handler = ibmvscsi_eh_abort_handler,
.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
.eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
- .slave_configure = ibmvscsi_slave_configure,
+ .sdev_configure = ibmvscsi_sdev_configure,
.change_queue_depth = ibmvscsi_change_queue_depth,
.host_reset = ibmvscsi_host_reset,
.cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 16d085d56e9d..9e42230e42b8 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2922,9 +2922,7 @@ static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
struct timer_cb *p_timer;
p_timer = &vscsi->rsp_q_timer;
- hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
- p_timer->timer.function = ibmvscsis_service_wait_q;
+ hrtimer_setup(&p_timer->timer, ibmvscsis_service_wait_q, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
p_timer->started = false;
p_timer->timer_pops = 0;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 625fd547ee60..8648bd965287 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2941,7 +2941,7 @@ static void initio_remove_one(struct pci_dev *pdev)
MODULE_LICENSE("GPL");
-static struct pci_device_id initio_pci_tbl[] = {
+static const struct pci_device_id initio_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 31cf2d31cceb..3bfafd43e42a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3366,7 +3366,7 @@ static void ipr_worker_thread(struct work_struct *work)
* number of bytes printed to buffer
**/
static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3383,13 +3383,13 @@ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
return ret;
}
-static struct bin_attribute ipr_trace_attr = {
+static const struct bin_attribute ipr_trace_attr = {
.attr = {
.name = "trace",
.mode = S_IRUGO,
},
.size = 0,
- .read = ipr_read_trace,
+ .read_new = ipr_read_trace,
};
#endif
@@ -4087,7 +4087,7 @@ static struct device_attribute ipr_ioa_fw_type_attr = {
};
static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4111,7 +4111,7 @@ static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
}
static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4134,14 +4134,14 @@ static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
return count;
}
-static struct bin_attribute ipr_ioa_async_err_log = {
+static const struct bin_attribute ipr_ioa_async_err_log = {
.attr = {
.name = "async_err_log",
.mode = S_IRUGO | S_IWUSR,
},
.size = 0,
- .read = ipr_read_async_err_log,
- .write = ipr_next_async_err_log
+ .read_new = ipr_read_async_err_log,
+ .write_new = ipr_next_async_err_log
};
static struct attribute *ipr_ioa_attrs[] = {
@@ -4172,7 +4172,7 @@ ATTRIBUTE_GROUPS(ipr_ioa);
* number of bytes printed to buffer
**/
static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4361,7 +4361,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
* number of bytes printed to buffer
**/
static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4385,14 +4385,14 @@ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute ipr_dump_attr = {
+static const struct bin_attribute ipr_dump_attr = {
.attr = {
.name = "dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = ipr_read_dump,
- .write = ipr_write_dump
+ .read_new = ipr_read_dump,
+ .write_new = ipr_write_dump
};
#else
static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
@@ -4745,13 +4745,13 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
}
/**
- * ipr_slave_destroy - Unconfigure a SCSI device
+ * ipr_sdev_destroy - Unconfigure a SCSI device
* @sdev: scsi device struct
*
* Return value:
* nothing
**/
-static void ipr_slave_destroy(struct scsi_device *sdev)
+static void ipr_sdev_destroy(struct scsi_device *sdev)
{
struct ipr_resource_entry *res;
struct ipr_ioa_cfg *ioa_cfg;
@@ -4769,7 +4769,7 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
}
/**
- * ipr_device_configure - Configure a SCSI device
+ * ipr_sdev_configure - Configure a SCSI device
* @sdev: scsi device struct
* @lim: queue limits
*
@@ -4778,8 +4778,8 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
* Return value:
* 0 on success
**/
-static int ipr_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int ipr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
@@ -4815,7 +4815,7 @@ static int ipr_device_configure(struct scsi_device *sdev,
}
/**
- * ipr_slave_alloc - Prepare for commands to a device.
+ * ipr_sdev_init - Prepare for commands to a device.
* @sdev: scsi device struct
*
* This function saves a pointer to the resource entry
@@ -4826,7 +4826,7 @@ static int ipr_device_configure(struct scsi_device *sdev,
* Return value:
* 0 on success / -ENXIO if device does not exist
**/
-static int ipr_slave_alloc(struct scsi_device *sdev)
+static int ipr_sdev_init(struct scsi_device *sdev)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
@@ -6398,9 +6398,9 @@ static const struct scsi_host_template driver_template = {
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
.eh_host_reset_handler = ipr_eh_host_reset,
- .slave_alloc = ipr_slave_alloc,
- .device_configure = ipr_device_configure,
- .slave_destroy = ipr_slave_destroy,
+ .sdev_init = ipr_sdev_init,
+ .sdev_configure = ipr_sdev_configure,
+ .sdev_destroy = ipr_sdev_destroy,
.scan_finished = ipr_scan_finished,
.target_destroy = ipr_target_destroy,
.change_queue_depth = ipr_change_queue_depth,
@@ -9844,7 +9844,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
}
}
-static struct pci_device_id ipr_pci_table[] = {
+static const struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 10cf5775a939..cce6c6b409ad 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -364,7 +364,7 @@ static struct scsi_host_template ips_driver_template = {
.proc_name = "ips",
.show_info = ips_show_info,
.write_info = ips_write_info,
- .slave_configure = ips_slave_configure,
+ .sdev_configure = ips_sdev_configure,
.bios_param = ips_biosparam,
.this_id = -1,
.sg_tablesize = IPS_MAX_SG,
@@ -1166,7 +1166,7 @@ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
/****************************************************************************/
/* */
-/* Routine Name: ips_slave_configure */
+/* Routine Name: ips_sdev_configure */
/* */
/* Routine Description: */
/* */
@@ -1174,7 +1174,7 @@ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
/* */
/****************************************************************************/
static int
-ips_slave_configure(struct scsi_device * SDptr)
+ips_sdev_configure(struct scsi_device *SDptr, struct queue_limits *lim)
{
ips_ha_t *ha;
int min;
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 65edf000e447..8ac932ec4444 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -400,7 +400,8 @@
*/
static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[]);
- static int ips_slave_configure(struct scsi_device *SDptr);
+ static int ips_sdev_configure(struct scsi_device *SDptr,
+ struct queue_limits *lim);
/*
* Raid Command Formats
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 866950a02965..287e1ba8ddd7 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -422,21 +422,6 @@ enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
}
}
-enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
-{
- struct sci_base_state_machine *sm = &idev->sm;
- enum sci_remote_device_states state = sm->current_state_id;
-
- if (state != SCI_DEV_RESETTING) {
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
- __func__, dev_state_name(state));
- return SCI_FAILURE_INVALID_STATE;
- }
-
- sci_change_state(sm, SCI_DEV_READY);
- return SCI_SUCCESS;
-}
-
enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
u32 frame_index)
{
@@ -1694,20 +1679,6 @@ enum sci_status sci_remote_device_abort_requests_pending_abort(
return sci_remote_device_terminate_reqs_checkabort(idev, 1);
}
-enum sci_status isci_remote_device_reset_complete(
- struct isci_host *ihost,
- struct isci_remote_device *idev)
-{
- unsigned long flags;
- enum sci_status status;
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
- status = sci_remote_device_reset_complete(idev);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- return status;
-}
-
void isci_dev_set_hang_detection_timeout(
struct isci_remote_device *idev,
u32 timeout)
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 3ad681c4c20a..27ae45332704 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -175,19 +175,6 @@ enum sci_status sci_remote_device_reset(
struct isci_remote_device *idev);
/**
- * sci_remote_device_reset_complete() - This method informs the device object
- * that the reset operation is complete and the device can resume operation
- * again.
- * @remote_device: This parameter specifies the device which is to be informed
- * of the reset complete operation.
- *
- * An indication that the device is resuming operation. SCI_SUCCESS the device
- * is resuming operation.
- */
-enum sci_status sci_remote_device_reset_complete(
- struct isci_remote_device *idev);
-
-/**
* enum sci_remote_device_states - This enumeration depicts all the states
* for the common remote device state machine.
* @SCI_DEV_INITIAL: Simply the initial state for the base remote device
@@ -364,10 +351,6 @@ enum sci_status isci_remote_device_reset(
struct isci_host *ihost,
struct isci_remote_device *idev);
-enum sci_status isci_remote_device_reset_complete(
- struct isci_host *ihost,
- struct isci_remote_device *idev);
-
enum sci_status isci_remote_device_suspend_terminate(
struct isci_host *ihost,
struct isci_remote_device *idev,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index c708e1059638..e81f60985193 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1057,8 +1057,8 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
return 0;
}
-static int iscsi_sw_tcp_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int iscsi_sw_tcp_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
struct iscsi_session *session = tcp_sw_host->session;
@@ -1083,7 +1083,7 @@ static const struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
- .device_configure = iscsi_sw_tcp_device_configure,
+ .sdev_configure = iscsi_sw_tcp_sdev_configure,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 80be3a936d92..fd1ef06655cb 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2222,13 +2222,13 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
EXPORT_SYMBOL(fc_eh_host_reset);
/**
- * fc_slave_alloc() - Configure the queue depth of a Scsi_Host
+ * fc_sdev_init() - Configure the queue depth of a Scsi_Host
* @sdev: The SCSI device that identifies the SCSI host
*
* Configures queue depth based on host's cmd_per_len. If not set
* then we use the libfc default.
*/
-int fc_slave_alloc(struct scsi_device *sdev)
+int fc_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -2238,7 +2238,7 @@ int fc_slave_alloc(struct scsi_device *sdev)
scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
return 0;
}
-EXPORT_SYMBOL(fc_slave_alloc);
+EXPORT_SYMBOL(fc_sdev_init);
/**
* fc_fcp_destroy() - Tear down the FCP layer for a given local port
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index da11d32840e2..55ce7892f217 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -804,15 +804,14 @@ EXPORT_SYMBOL_GPL(sas_target_alloc);
#define SAS_DEF_QD 256
-int sas_device_configure(struct scsi_device *scsi_dev,
- struct queue_limits *lim)
+int sas_sdev_configure(struct scsi_device *scsi_dev, struct queue_limits *lim)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
if (dev_is_sata(dev)) {
- ata_sas_device_configure(scsi_dev, lim, dev->sata_dev.ap);
+ ata_sas_sdev_configure(scsi_dev, lim, dev->sata_dev.ap);
return 0;
}
@@ -830,7 +829,7 @@ int sas_device_configure(struct scsi_device *scsi_dev,
return 0;
}
-EXPORT_SYMBOL_GPL(sas_device_configure);
+EXPORT_SYMBOL_GPL(sas_sdev_configure);
int sas_change_queue_depth(struct scsi_device *sdev, int depth)
{
@@ -1194,14 +1193,14 @@ void sas_task_abort(struct sas_task *task)
}
EXPORT_SYMBOL_GPL(sas_task_abort);
-int sas_slave_alloc(struct scsi_device *sdev)
+int sas_sdev_init(struct scsi_device *sdev)
{
if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
return -ENXIO;
return 0;
}
-EXPORT_SYMBOL_GPL(sas_slave_alloc);
+EXPORT_SYMBOL_GPL(sas_sdev_init);
void sas_target_destroy(struct scsi_target *starget)
{
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e5a9c5a323f8..62438e84e52a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1715,18 +1715,12 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
-static inline unsigned int
+static __always_inline unsigned int
lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
- unsigned int cpu_it;
-
- for_each_cpu_wrap(cpu_it, mask, start) {
- if (cpu_online(cpu_it))
- break;
- }
-
- return cpu_it;
+ return cpumask_next_and_wrap(start, mask, cpu_online_mask);
}
+
/**
* lpfc_next_present_cpu - Finds next present CPU after n
* @n: the cpu prior to search
@@ -1734,16 +1728,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
* Note: If no next present cpu, then fallback to first present cpu.
*
**/
-static inline unsigned int lpfc_next_present_cpu(int n)
+static __always_inline unsigned int lpfc_next_present_cpu(int n)
{
- unsigned int cpu;
-
- cpu = cpumask_next(n, cpu_present_mask);
-
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_present_mask);
-
- return cpu;
+ return cpumask_next_wrap(n, cpu_present_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 39b504164ecc..0d0213bba35d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -6185,7 +6185,7 @@ const struct attribute_group *lpfc_vport_groups[] = {
**/
static ssize_t
sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -6244,7 +6244,7 @@ sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
**/
static ssize_t
sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -6280,14 +6280,14 @@ sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_ctlreg_attr = {
+static const struct bin_attribute sysfs_ctlreg_attr = {
.attr = {
.name = "ctlreg",
.mode = S_IRUSR | S_IWUSR,
},
.size = 256,
- .read = sysfs_ctlreg_read,
- .write = sysfs_ctlreg_write,
+ .read_new = sysfs_ctlreg_read,
+ .write_new = sysfs_ctlreg_write,
};
/**
@@ -6308,7 +6308,7 @@ static struct bin_attribute sysfs_ctlreg_attr = {
**/
static ssize_t
sysfs_mbox_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return -EPERM;
@@ -6332,20 +6332,20 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj,
**/
static ssize_t
sysfs_mbox_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return -EPERM;
}
-static struct bin_attribute sysfs_mbox_attr = {
+static const struct bin_attribute sysfs_mbox_attr = {
.attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
},
.size = MAILBOX_SYSFS_MAX,
- .read = sysfs_mbox_read,
- .write = sysfs_mbox_write,
+ .read_new = sysfs_mbox_read,
+ .write_new = sysfs_mbox_write,
};
/**
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 1c6b024160da..c8f8496bbdf8 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -120,6 +120,16 @@ enum ELX_LOOPBACK_CMD {
#define ELX_LOOPBACK_HEADER_SZ \
(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
+/* For non-embedded read object command */
+#define READ_OBJ_EMB0_SCHEME_0 {1, 10, 256, 128}
+#define READ_OBJ_EMB0_SCHEME_1 {11, LPFC_EMB0_MAX_RD_OBJ_HBD_CNT, 512, 192}
+static const struct lpfc_read_object_cmd_scheme {
+ u32 min_hbd_cnt;
+ u32 max_hbd_cnt;
+ u32 cmd_size;
+ u32 payload_word_offset;
+} rd_obj_scheme[2] = {READ_OBJ_EMB0_SCHEME_0, READ_OBJ_EMB0_SCHEME_1};
+
struct lpfc_dmabufext {
struct lpfc_dmabuf dma;
uint32_t size;
@@ -3539,6 +3549,103 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
}
/**
+ * lpfc_rd_obj_emb0_handle_job - Handles completion for non-embedded
+ * READ_OBJECT_V0 mailbox commands
+ * @phba: pointer to lpfc_hba data struct
+ * @pmb_buf: pointer to mailbox buffer
+ * @sli_cfg_mbx: pointer to SLI_CONFIG mailbox memory region
+ * @job: pointer to bsg_job struct
+ * @bsg_reply: point to bsg_reply struct
+ *
+ * Given a non-embedded READ_OBJECT_V0's HBD_CNT, this routine copies
+ * a READ_OBJECT_V0 mailbox command's read data payload into a bsg_job
+ * structure for passing back to application layer.
+ *
+ * Return codes
+ * 0 - successful
+ * -EINVAL - invalid HBD_CNT
+ * -ENODEV - pointer to bsg_job struct is NULL
+ **/
+static int
+lpfc_rd_obj_emb0_handle_job(struct lpfc_hba *phba, u8 *pmb_buf,
+ struct lpfc_sli_config_mbox *sli_cfg_mbx,
+ struct bsg_job *job,
+ struct fc_bsg_reply *bsg_reply)
+{
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ struct lpfc_sli_config_emb0_subsys *emb0_subsys;
+ u32 hbd_cnt;
+ u32 dma_buf_len;
+ u8 i = 0;
+ size_t extra_bytes;
+ off_t skip = 0;
+
+ if (!job) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2496 NULL job\n");
+ return -ENODEV;
+ }
+
+ if (!bsg_reply) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2498 NULL bsg_reply\n");
+ return -ENODEV;
+ }
+
+ emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys;
+
+ hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt,
+ emb0_subsys);
+
+ /* Calculate where the read object's read data payload is located based
+ * on HBD count scheme.
+ */
+ if (hbd_cnt >= rd_obj_scheme[0].min_hbd_cnt &&
+ hbd_cnt <= rd_obj_scheme[0].max_hbd_cnt) {
+ skip = rd_obj_scheme[0].payload_word_offset * 4;
+ } else if (hbd_cnt >= rd_obj_scheme[1].min_hbd_cnt &&
+ hbd_cnt <= rd_obj_scheme[1].max_hbd_cnt) {
+ skip = rd_obj_scheme[1].payload_word_offset * 4;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2497 bad hbd_count 0x%08x\n",
+ hbd_cnt);
+ return -EINVAL;
+ }
+
+ /* Copy SLI_CONFIG command and READ_OBJECT response first */
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, skip);
+
+ /* Copy data from hbds */
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ list) {
+ dma_buf_len = emb0_subsys->hbd[i].buf_len;
+
+ /* Use sg_copy_buffer to specify a skip offset */
+ extra_bytes = sg_copy_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ curr_dmabuf->virt,
+ dma_buf_len, skip, false);
+
+ bsg_reply->reply_payload_rcv_len += extra_bytes;
+
+ skip += extra_bytes;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2499 copied hbd[%d] "
+ "0x%zx bytes\n",
+ i, extra_bytes);
+ i++;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
@@ -3551,10 +3658,10 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct bsg_job *job;
- struct fc_bsg_reply *bsg_reply;
+ struct fc_bsg_reply *bsg_reply = NULL;
uint8_t *pmb, *pmb_buf;
unsigned long flags;
- uint32_t size;
+ u32 size, opcode;
int rc = 0;
struct lpfc_dmabuf *dmabuf;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -3591,6 +3698,24 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
&pmbx[sizeof(MAILBOX_t)],
sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+
+ /* Special handling for non-embedded READ_OBJECT */
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ if (job) {
+ rc = lpfc_rd_obj_emb0_handle_job(phba, pmb_buf,
+ sli_cfg_mbx,
+ job,
+ bsg_reply);
+ bsg_reply->result = rc;
+ goto done;
+ }
+ break;
+ default:
+ break;
+ }
}
/* Complete the job if the job is still active */
@@ -3604,12 +3729,14 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* result for successful */
bsg_reply->result = 0;
+done:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer mailbox command "
"(x%x/x%x) complete bsg job done, bsize:%d\n",
phba->mbox_ext_buf_ctx.nembType,
- phba->mbox_ext_buf_ctx.mboxType, size);
+ phba->mbox_ext_buf_ctx.mboxType,
+ job->reply_payload.payload_len);
lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType,
@@ -3819,14 +3946,16 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
{
struct fc_bsg_request *bsg_request = job->request;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_sli_config_emb0_subsys *emb0_subsys;
+ struct list_head *ext_dmabuf_list;
struct dfc_mbox_req *mbox_req;
struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
- uint32_t ext_buf_cnt, ext_buf_index;
+ u32 ext_buf_cnt, ext_buf_index, hbd_cnt;
struct lpfc_dmabuf *ext_dmabuf = NULL;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
- uint8_t *pmbx;
+ u8 *pmbx, opcode;
int rc, i;
mbox_req =
@@ -3836,8 +3965,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
if (nemb_tp == nemb_mse) {
+ emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys;
ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
- &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ &emb0_subsys->sli_config_hdr);
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2945 Handled SLI_CONFIG(mse) rd, "
@@ -3847,6 +3977,57 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
rc = -ERANGE;
goto job_error;
}
+
+ /* Special handling for non-embedded READ_OBJECT */
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, emb0_subsys);
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt,
+ emb0_subsys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2449 SLI_CONFIG(mse) rd non-embedded "
+ "hbd count = %d\n",
+ hbd_cnt);
+
+ ext_dmabuf_list =
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list;
+
+ /* Allocate hbds */
+ for (i = 0; i < hbd_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ ext_dmabuf_list);
+ }
+
+ /* Fill out the physical memory addresses for the
+ * hbds
+ */
+ i = 0;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ ext_dmabuf_list, list) {
+ emb0_subsys->hbd[i].pa_hi =
+ putPaddrHigh(curr_dmabuf->phys);
+ emb0_subsys->hbd[i].pa_lo =
+ putPaddrLow(curr_dmabuf->phys);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2495 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, "
+ "addrLo:x%x\n", i,
+ emb0_subsys->hbd[i].buf_len,
+ emb0_subsys->hbd[i].pa_hi,
+ emb0_subsys->hbd[i].pa_lo);
+ i++;
+ }
+ break;
+ default:
+ break;
+ }
+
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2941 Handled SLI_CONFIG(mse) rd, "
"ext_buf_cnt:%d\n", ext_buf_cnt);
@@ -4223,6 +4404,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
case COMN_OPCODE_GET_PROFILE_CONFIG:
case COMN_OPCODE_SET_FEATURES:
+ case COMN_OPCODE_READ_OBJECT:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
@@ -4665,8 +4847,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
bsg_reply->reply_payload_rcv_len = 0;
/* sanity check to protect driver */
- if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
- job->request_payload.payload_len > BSG_MBOX_SIZE) {
+ if (job->request_payload.payload_len > BSG_MBOX_SIZE) {
rc = -ERANGE;
goto job_done;
}
@@ -4737,6 +4918,19 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
+ /* non-embedded SLI_CONFIG requests already parsed, check others */
+ if (unlikely(job->reply_payload.payload_len > BSG_MBOX_SIZE)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2729 Cmd x%x (x%x/x%x) request has "
+ "out-of-range reply payload length x%x\n",
+ pmb->mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, pmboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, pmboxq),
+ job->reply_payload.payload_len);
+ rc = -ERANGE;
+ goto job_done;
+ }
+
/* If HBA encountered an error attention, allow only DUMP
* or RESTART mailbox commands until the HBA is restarted.
*/
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 3c04ca2d7455..27e7a033b53d 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -239,12 +239,27 @@ struct lpfc_sli_config_emb0_subsys {
uint32_t timeout; /* comn_set_feature timeout */
uint32_t request_length; /* comn_set_feature request len */
uint32_t version; /* comn_set_feature version */
- uint32_t csf_feature; /* comn_set_feature feature */
+ uint32_t word68; /* comn_set_feature feature */
+#define lpfc_emb0_subcmnd_csf_feat_SHIFT 0
+#define lpfc_emb0_subcmnd_csf_feat_MASK 0xffffffff
+#define lpfc_emb0_subcmnd_csf_feat_WORD word68
+#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_SHIFT 0
+#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_MASK 0x00ffffff
+#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_WORD word68
uint32_t word69; /* comn_set_feature parameter len */
uint32_t word70; /* comn_set_feature parameter val0 */
#define lpfc_emb0_subcmnd_csf_p0_SHIFT 0
#define lpfc_emb0_subcmnd_csf_p0_MASK 0x3
#define lpfc_emb0_subcmnd_csf_p0_WORD word70
+ uint32_t reserved71[25];
+ uint32_t word96; /* rd_obj hbd_count */
+#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_SHIFT 0
+#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_MASK 0xffffffff
+#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_WORD word96
+#define LPFC_EMB0_MAX_RD_OBJ_HBD_CNT 31
+ struct lpfc_sli_config_hbd hbd[LPFC_EMB0_MAX_RD_OBJ_HBD_CNT];
+ uint32_t word190;
+ uint32_t word191;
};
struct lpfc_sli_config_emb1_subsys {
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 30891ad17e2a..12c67cdd7c19 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1646,14 +1646,12 @@ out:
/* If the caller wanted a synchronous DA_ID completion, signal the
* wait obj and clear flag to reset the vport.
*/
- if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) {
+ if (test_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags)) {
if (ndlp->da_id_waitq)
wake_up(ndlp->da_id_waitq);
}
- spin_lock_irq(&ndlp->lock);
- ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
- spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags);
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 3e173b5d00e0..3d47dc7458d1 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -85,13 +85,13 @@ enum lpfc_fc4_xpt_flags {
NLP_XPT_HAS_HH = 0x10
};
-enum lpfc_nlp_save_flags {
+enum lpfc_nlp_save_flags { /* mask bits */
/* devloss occurred during recovery */
- NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
+ NLP_IN_RECOV_POST_DEV_LOSS,
/* wait for outstanding LOGO to cmpl */
- NLP_WAIT_FOR_LOGO = 0x2,
+ NLP_WAIT_FOR_LOGO,
/* wait for outstanding DA_ID to finish */
- NLP_WAIT_FOR_DA_ID = 0x4
+ NLP_WAIT_FOR_DA_ID
};
struct lpfc_nodelist {
@@ -154,7 +154,7 @@ struct lpfc_nodelist {
uint32_t fc4_prli_sent;
/* flags to keep ndlp alive until special conditions are met */
- enum lpfc_nlp_save_flags save_flags;
+ unsigned long save_flags;
enum lpfc_fc4_xpt_flags fc4_xpt_flags;
@@ -208,7 +208,6 @@ enum lpfc_nlp_flag {
NPR list */
NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */
NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */
- NLP_TARGET_REMOVE = 28, /* Target remove in process */
NLP_SC_REQ = 29, /* Target requires authentication */
NLP_FIRSTBURST = 30, /* Target supports FirstBurst */
NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 37f0a930d469..1d7db49a8fe4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2988,12 +2988,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
- spin_lock_irq(&ndlp->lock);
- if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ if (test_and_clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags))
wake_up_waiter = 1;
- ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
- }
- spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO cmpl: status:x%x/x%x did:x%x",
@@ -3035,19 +3031,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Call state machine. This will unregister the rpi if needed. */
lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
- if (skip_recovery)
- goto out;
-
- /* The driver sets this flag for an NPIV instance that doesn't want to
- * log into the remote port.
- */
- if (test_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag)) {
- clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_DEVICE_RM);
- goto out_rsrc_free;
- }
-
out:
/* At this point, the LOGO processing is complete. NOTE: For a
* pt2pt topology, we are assuming the NPortID will only change
@@ -3091,7 +3074,7 @@ out:
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
}
-out_rsrc_free:
+
/* Driver is done with the I/O. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@@ -4583,6 +4566,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int link_reset = 0, rc;
u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ u8 rsn_code_exp = 0;
/* Note: cmd_dmabuf may be 0 for internal driver abort
@@ -4798,11 +4782,22 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case LSRJT_LOGICAL_BSY:
+ rsn_code_exp = stat.un.b.lsRjtRsnCodeExp;
if ((cmd == ELS_CMD_PLOGI) ||
(cmd == ELS_CMD_PRLI) ||
(cmd == ELS_CMD_NVMEPRLI)) {
delay = 1000;
maxretry = 48;
+
+ /* An authentication LS_RJT reason code
+ * explanation means some error in the
+ * security settings end-to-end. Reduce
+ * the retry count to allow lpfc to clear
+ * RSCN mode and not race with dev_loss.
+ */
+ if (cmd == ELS_CMD_PLOGI &&
+ rsn_code_exp == LSEXP_AUTH_REQ)
+ maxretry = 8;
} else if (cmd == ELS_CMD_FDISC) {
/* FDISC retry policy */
maxretry = 48;
@@ -4831,6 +4826,20 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"0820 FLOGI (x%x). "
"BBCredit Not Supported\n",
stat.un.lsRjtError);
+ } else if (cmd == ELS_CMD_PLOGI) {
+ rsn_code_exp = stat.un.b.lsRjtRsnCodeExp;
+
+ /* An authentication LS_RJT reason code
+ * explanation means some error in the
+ * security settings end-to-end. Reduce
+ * the retry count to allow lpfc to clear
+ * RSCN mode and not race with dev_loss.
+ */
+ if (rsn_code_exp == LSEXP_AUTH_REQ) {
+ delay = 1000;
+ retry = 1;
+ maxretry = 8;
+ }
}
break;
@@ -10411,8 +10420,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- clear_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag);
-
lpfc_disc_state_machine(vport, ndlp, elsiocb,
NLP_EVT_RCV_PLOGI);
@@ -11498,15 +11505,13 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_can_disctmo(vport);
}
- if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ if (test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) {
/* Wake up lpfc_vport_delete if waiting...*/
if (ndlp->logo_waitq)
wake_up(ndlp->logo_waitq);
clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
- spin_lock_irq(&ndlp->lock);
- ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
- spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags);
}
/* Safe to release resources now. */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4036a9838bb5..36e66df36a18 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -414,12 +414,7 @@ void
lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
- unsigned long iflags;
-
- spin_lock_irqsave(&ndlp->lock, iflags);
- if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
- ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
+ if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
"8438 Devloss timeout reversed on DID x%x "
@@ -427,9 +422,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
"port_state = x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
ndlp->nlp_flag, vport->port_state);
- return;
}
- spin_unlock_irqrestore(&ndlp->lock, iflags);
}
/**
@@ -546,9 +539,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp, ndlp->nlp_flag,
vport->port_state);
- spin_lock_irqsave(&ndlp->lock, iflags);
- ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
+ set_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags);
return fcf_inuse;
} else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Fabric node fully recovered before this dev_loss_tmo
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index d5c15742f7f2..32298285ea5e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -724,6 +724,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
#define LSEXP_OUT_OF_RESOURCE 0x29
#define LSEXP_CANT_GIVE_DATA 0x2A
#define LSEXP_REQ_UNSUPPORTED 0x2C
+#define LSEXP_AUTH_REQ 0x48
#define LSEXP_NO_RSRC_ASSIGN 0x52
uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
} b;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 26e1313ebb21..2dedb273b091 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1907,22 +1907,22 @@ struct lpfc_mbx_query_fw_config {
uint32_t asic_revision;
uint32_t physical_port;
uint32_t function_mode;
-#define LPFC_FCOE_INI_MODE 0x00000040
-#define LPFC_FCOE_TGT_MODE 0x00000080
+#define LPFC_FC_INI_MODE 0x00000040
+#define LPFC_FC_TGT_MODE 0x00000080
#define LPFC_DUA_MODE 0x00000800
- uint32_t ulp0_mode;
-#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
-#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
- uint32_t ulp0_nap_words[12];
- uint32_t ulp1_mode;
- uint32_t ulp1_nap_words[12];
+ uint32_t oper_mode;
+ uint32_t rsvd9[2];
+ uint32_t wqid_base;
+ uint32_t wqid_tot;
+ uint32_t rqid_base;
+ uint32_t rqid_tot;
+ uint32_t rsvd15[19];
uint32_t function_capabilities;
uint32_t cqid_base;
uint32_t cqid_tot;
uint32_t eqid_base;
uint32_t eqid_tot;
- uint32_t ulp0_nap2_words[2];
- uint32_t ulp1_nap2_words[2];
+ uint32_t rsvd39[4];
} rsp;
};
@@ -3778,25 +3778,22 @@ struct lpfc_mbx_get_prof_cfg {
struct lpfc_controller_attribute {
uint32_t version_string[8];
uint32_t manufacturer_name[8];
- uint32_t supported_modes;
+ uint32_t rsvd16;
uint32_t word17;
-#define lpfc_cntl_attr_eprom_ver_lo_SHIFT 0
-#define lpfc_cntl_attr_eprom_ver_lo_MASK 0x000000ff
-#define lpfc_cntl_attr_eprom_ver_lo_WORD word17
-#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
-#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
-#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
#define lpfc_cntl_attr_flash_id_SHIFT 16
#define lpfc_cntl_attr_flash_id_MASK 0x000000ff
#define lpfc_cntl_attr_flash_id_WORD word17
- uint32_t mbx_da_struct_ver;
- uint32_t ep_fw_da_struct_ver;
+#define lpfc_cntl_attr_boot_enable_SHIFT 24
+#define lpfc_cntl_attr_boot_enable_MASK 0x00000001
+#define lpfc_cntl_attr_boot_enable_WORD word17
+ uint32_t rsvd18[2];
uint32_t ncsi_ver_str[3];
- uint32_t dflt_ext_timeout;
+ uint32_t rsvd23;
uint32_t model_number[8];
uint32_t description[16];
uint32_t serial_number[8];
- uint32_t ip_ver_str[8];
+ uint32_t ipl_name[5];
+ uint32_t rsvd61[3];
uint32_t fw_ver_str[8];
uint32_t bios_ver_str[8];
uint32_t redboot_ver_str[8];
@@ -3804,53 +3801,31 @@ struct lpfc_controller_attribute {
uint32_t flash_fw_ver_str[8];
uint32_t functionality;
uint32_t word105;
-#define lpfc_cntl_attr_max_cbd_len_SHIFT 0
-#define lpfc_cntl_attr_max_cbd_len_MASK 0x0000ffff
-#define lpfc_cntl_attr_max_cbd_len_WORD word105
#define lpfc_cntl_attr_asic_rev_SHIFT 16
#define lpfc_cntl_attr_asic_rev_MASK 0x000000ff
#define lpfc_cntl_attr_asic_rev_WORD word105
-#define lpfc_cntl_attr_gen_guid0_SHIFT 24
-#define lpfc_cntl_attr_gen_guid0_MASK 0x000000ff
-#define lpfc_cntl_attr_gen_guid0_WORD word105
- uint32_t gen_guid1_12[3];
+ uint32_t rsvd106[3];
uint32_t word109;
-#define lpfc_cntl_attr_gen_guid13_14_SHIFT 0
-#define lpfc_cntl_attr_gen_guid13_14_MASK 0x0000ffff
-#define lpfc_cntl_attr_gen_guid13_14_WORD word109
-#define lpfc_cntl_attr_gen_guid15_SHIFT 16
-#define lpfc_cntl_attr_gen_guid15_MASK 0x000000ff
-#define lpfc_cntl_attr_gen_guid15_WORD word109
#define lpfc_cntl_attr_hba_port_cnt_SHIFT 24
#define lpfc_cntl_attr_hba_port_cnt_MASK 0x000000ff
#define lpfc_cntl_attr_hba_port_cnt_WORD word109
- uint32_t word110;
-#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT 0
-#define lpfc_cntl_attr_dflt_lnk_tmo_MASK 0x0000ffff
-#define lpfc_cntl_attr_dflt_lnk_tmo_WORD word110
-#define lpfc_cntl_attr_multi_func_dev_SHIFT 24
-#define lpfc_cntl_attr_multi_func_dev_MASK 0x000000ff
-#define lpfc_cntl_attr_multi_func_dev_WORD word110
+ uint32_t rsvd110;
uint32_t word111;
-#define lpfc_cntl_attr_cache_valid_SHIFT 0
-#define lpfc_cntl_attr_cache_valid_MASK 0x000000ff
-#define lpfc_cntl_attr_cache_valid_WORD word111
#define lpfc_cntl_attr_hba_status_SHIFT 8
#define lpfc_cntl_attr_hba_status_MASK 0x000000ff
#define lpfc_cntl_attr_hba_status_WORD word111
-#define lpfc_cntl_attr_max_domain_SHIFT 16
-#define lpfc_cntl_attr_max_domain_MASK 0x000000ff
-#define lpfc_cntl_attr_max_domain_WORD word111
#define lpfc_cntl_attr_lnk_numb_SHIFT 24
#define lpfc_cntl_attr_lnk_numb_MASK 0x0000003f
#define lpfc_cntl_attr_lnk_numb_WORD word111
#define lpfc_cntl_attr_lnk_type_SHIFT 30
#define lpfc_cntl_attr_lnk_type_MASK 0x00000003
#define lpfc_cntl_attr_lnk_type_WORD word111
- uint32_t fw_post_status;
- uint32_t hba_mtu[8];
+ uint32_t rsvd112[9];
uint32_t word121;
- uint32_t reserved1[3];
+#define lpfc_cntl_attr_asic_gen_SHIFT 8
+#define lpfc_cntl_attr_asic_gen_MASK 0x000000ff
+#define lpfc_cntl_attr_asic_gen_WORD word121
+ uint32_t rsvd122[3];
uint32_t word125;
#define lpfc_cntl_attr_pci_vendor_id_SHIFT 0
#define lpfc_cntl_attr_pci_vendor_id_MASK 0x0000ffff
@@ -3875,15 +3850,7 @@ struct lpfc_controller_attribute {
#define lpfc_cntl_attr_pci_fnc_num_SHIFT 16
#define lpfc_cntl_attr_pci_fnc_num_MASK 0x000000ff
#define lpfc_cntl_attr_pci_fnc_num_WORD word127
-#define lpfc_cntl_attr_inf_type_SHIFT 24
-#define lpfc_cntl_attr_inf_type_MASK 0x000000ff
-#define lpfc_cntl_attr_inf_type_WORD word127
- uint32_t unique_id[2];
- uint32_t word130;
-#define lpfc_cntl_attr_num_netfil_SHIFT 0
-#define lpfc_cntl_attr_num_netfil_MASK 0x000000ff
-#define lpfc_cntl_attr_num_netfil_WORD word130
- uint32_t reserved2[4];
+ uint32_t rsvd128[7];
};
struct lpfc_mbx_get_cntl_attributes {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7f57397d91a9..919bf9b7ac26 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -598,7 +598,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
jiffies + msecs_to_jiffies(1000 * timeout));
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
- jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
@@ -1267,7 +1267,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&phba->hb_tmofunc,
jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
return;
}
@@ -1555,7 +1555,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* If IOs are completing, no need to issue a MBX_HEARTBEAT */
spin_lock_irq(&phba->pport->work_port_lock);
if (time_after(phba->last_completion_time +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+ secs_to_jiffies(LPFC_HB_MBOX_INTERVAL),
jiffies)) {
spin_unlock_irq(&phba->pport->work_port_lock);
if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
@@ -3354,7 +3354,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (mbx_action == LPFC_MBX_NO_WAIT)
return;
- timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->sli.mbox_active) {
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
@@ -3847,8 +3847,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* Otherwise, let dev_loss take care of
* the node.
*/
- if (!(ndlp->save_flags &
- NLP_IN_RECOV_POST_DEV_LOSS) &&
+ if (!test_bit(NLP_IN_RECOV_POST_DEV_LOSS,
+ &ndlp->save_flags) &&
!(ndlp->fc4_xpt_flags &
(NVME_XPT_REGD | SCSI_XPT_REGD)))
lpfc_disc_state_machine
@@ -4924,14 +4924,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
stat = 1;
goto finished;
}
- if (time >= msecs_to_jiffies(30 * 1000)) {
+ if (time >= secs_to_jiffies(30)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0461 Scanning longer than 30 "
"seconds. Continuing initialization\n");
stat = 1;
goto finished;
}
- if (time >= msecs_to_jiffies(15 * 1000) &&
+ if (time >= secs_to_jiffies(15) &&
phba->link_state <= LPFC_LINK_DOWN) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0465 Link down longer than 15 "
@@ -4945,7 +4945,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (vport->num_disc_nodes || vport->fc_prli_sent)
goto finished;
if (!atomic_read(&vport->fc_map_cnt) &&
- time < msecs_to_jiffies(2 * 1000))
+ time < secs_to_jiffies(2))
goto finished;
if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
goto finished;
@@ -5179,8 +5179,8 @@ lpfc_vmid_poll(struct timer_list *t)
lpfc_worker_wake_up(phba);
/* restart the timer for the next iteration */
- mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
- LPFC_VMID_TIMER));
+ mod_timer(&phba->inactive_vmid_poll,
+ jiffies + secs_to_jiffies(LPFC_VMID_TIMER));
}
/**
@@ -7952,11 +7952,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
/* CMF congestion timer */
- hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_timer.function = lpfc_cmf_timer;
+ hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* CMF 1 minute stats collection timer */
- hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
+ hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Control structure for handling external multi-buffer mailbox
@@ -11109,14 +11108,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.fw_func_mode =
mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
- phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
- phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
phba->sli4_hba.physical_port =
mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
- "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
- phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+ "3251 QUERY_FW_CFG: func_mode:x%x\n",
+ phba->sli4_hba.fw_func_mode);
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -12876,7 +12872,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
if (offline) {
/* Find next online CPU on original mask */
- cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_next = cpumask_next_wrap(cpu, orig_mask);
cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e98f1c2b2220..fb6dbcb86c09 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2524,8 +2524,10 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
/* addr mode is bit wise inverted value of fcf addr_mode */
- bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
- (~phba->fcf.addr_mode) & 0x3);
+ if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
+ bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
+ (~phba->fcf.addr_mode) & 0x3);
+ }
} else {
/* This is ONLY for NVMET MRQ == 1 */
if (phba->cfg_nvmet_mrq != 1)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 4d88cfe71cae..a596b80d03d4 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -64,9 +64,6 @@ static int
lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_name *nn, struct lpfc_name *pn)
{
- /* First, we MUST have a RPI registered */
- if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag))
- return 0;
/* Compare the ADISC rsp WWNN / WWPN matches our internal node
* table entry for that node.
@@ -735,6 +732,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ADISC *ap;
uint32_t *lp;
uint32_t cmd;
+ int rc;
pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
@@ -759,21 +757,29 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* resume the RPI before the ACC goes out.
*/
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
- GFP_KERNEL);
- if (elsiocb) {
- /* Save info from cmd IOCB used in rsp */
- memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
- sizeof(struct lpfc_iocbq));
-
- /* Save the ELS cmd */
- elsiocb->drvrTimeout = cmd;
-
- if (lpfc_sli4_resume_rpi(ndlp,
- lpfc_mbx_cmpl_resume_rpi,
- elsiocb))
- kfree(elsiocb);
- goto out;
+ /* Don't resume an unregistered RPI - unnecessary
+ * mailbox. Just send the ACC when the RPI is not
+ * registered.
+ */
+ if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
+ elsiocb = kmalloc(sizeof(*elsiocb), GFP_KERNEL);
+ if (elsiocb) {
+ /* Save info from cmd IOCB used in
+ * rsp
+ */
+ memcpy(elsiocb, cmdiocb,
+ sizeof(*elsiocb));
+
+ elsiocb->drvrTimeout = cmd;
+
+ rc = lpfc_sli4_resume_rpi(ndlp,
+ lpfc_mbx_cmpl_resume_rpi,
+ elsiocb);
+ if (rc)
+ kfree(elsiocb);
+
+ goto out;
+ }
}
}
@@ -815,7 +821,6 @@ out:
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return 0;
}
@@ -906,7 +911,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_printf_vlog(vport, KERN_INFO,
@@ -1332,7 +1337,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
/* Put ndlp in npr state set plogi timer for 1 sec */
- mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
@@ -1936,7 +1941,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
@@ -2255,11 +2260,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
out:
- set_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag);
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_DISCOVERY | LOG_NODE,
+ "6228 Sending LOGO, determined nlp_type "
+ "0x%x nlp_flag x%lx refcnt %u\n",
+ ndlp->nlp_type, ndlp->nlp_flag,
+ kref_read(&ndlp->kref));
lpfc_issue_els_logo(vport, ndlp, 0);
-
- ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@@ -2743,7 +2750,7 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) {
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 43dc1da4a156..b1adb9f59097 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2237,7 +2237,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
* wait. Print a message if a 10 second wait expires and renew the
* wait. This is unexpected.
*/
- wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
+ wait_tmo = secs_to_jiffies(LPFC_NVME_WAIT_TMO);
while (true) {
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 905026a4782c..055ed632c14d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5136,6 +5136,12 @@ lpfc_info(struct Scsi_Host *host)
goto buffer_done;
}
+ /* Support for BSG ioctls */
+ scnprintf(tmp, sizeof(tmp), " BSG");
+ if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+ sizeof(lpfcinfobuf))
+ goto buffer_done;
+
/* PCI resettable */
if (!lpfc_check_pci_resettable(phba)) {
scnprintf(tmp, sizeof(tmp), " PCI resettable");
@@ -6120,31 +6126,28 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
/* Issue LOGO, if no LOGO is outstanding */
spin_lock_irqsave(&pnode->lock, flags);
- if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
+ if (!test_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags) &&
!pnode->logo_waitq) {
pnode->logo_waitq = &waitq;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag);
- pnode->save_flags |= NLP_WAIT_FOR_LOGO;
spin_unlock_irqrestore(&pnode->lock, flags);
+ set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag);
+ set_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags);
lpfc_unreg_rpi(vport, pnode);
wait_event_timeout(waitq,
- (!(pnode->save_flags &
- NLP_WAIT_FOR_LOGO)),
+ !test_bit(NLP_WAIT_FOR_LOGO,
+ &pnode->save_flags),
msecs_to_jiffies(dev_loss_tmo *
1000));
- if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
+ if (test_and_clear_bit(NLP_WAIT_FOR_LOGO,
+ &pnode->save_flags))
lpfc_printf_vlog(vport, KERN_ERR, logit,
"0725 SCSI layer TGTRST "
"failed & LOGO TMO (%d, %llu) "
"return x%x\n",
tgt_id, lun_id, status);
- spin_lock_irqsave(&pnode->lock, flags);
- pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
- } else {
- spin_lock_irqsave(&pnode->lock, flags);
- }
+ spin_lock_irqsave(&pnode->lock, flags);
pnode->logo_waitq = NULL;
spin_unlock_irqrestore(&pnode->lock, flags);
status = SUCCESS;
@@ -6226,7 +6229,7 @@ error:
}
/**
- * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
+ * lpfc_sdev_init - scsi_host_template sdev_init entry point
* @sdev: Pointer to scsi_device.
*
* This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
@@ -6239,7 +6242,7 @@ error:
* 0 - Success
**/
static int
-lpfc_slave_alloc(struct scsi_device *sdev)
+lpfc_sdev_init(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -6342,8 +6345,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
}
/**
- * lpfc_slave_configure - scsi_host_template slave_configure entry point
+ * lpfc_sdev_configure - scsi_host_template sdev_configure entry point
* @sdev: Pointer to scsi_device.
+ * @lim: Request queue limits.
*
* This routine configures following items
* - Tag command queuing support for @sdev if supported.
@@ -6353,7 +6357,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
* 0 - Success
**/
static int
-lpfc_slave_configure(struct scsi_device *sdev)
+lpfc_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -6371,13 +6375,13 @@ lpfc_slave_configure(struct scsi_device *sdev)
}
/**
- * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
+ * lpfc_sdev_destroy - sdev_destroy entry point of SHT data structure
* @sdev: Pointer to scsi_device.
*
* This routine sets @sdev hostatdata filed to null.
**/
static void
-lpfc_slave_destroy(struct scsi_device *sdev)
+lpfc_sdev_destroy(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -6422,7 +6426,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
{
struct lpfc_device_data *lun_info;
- int memory_flags;
+ gfp_t memory_flags;
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
!(phba->cfg_fof))
@@ -6737,7 +6741,13 @@ lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
static int
-lpfc_no_slave(struct scsi_device *sdev)
+lpfc_init_no_sdev(struct scsi_device *sdev)
+{
+ return -ENODEV;
+}
+
+static int
+lpfc_config_no_sdev(struct scsi_device *sdev, struct queue_limits *lim)
{
return -ENODEV;
}
@@ -6748,8 +6758,8 @@ struct scsi_host_template lpfc_template_nvme = {
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_no_command,
- .slave_alloc = lpfc_no_slave,
- .slave_configure = lpfc_no_slave,
+ .sdev_init = lpfc_init_no_sdev,
+ .sdev_configure = lpfc_config_no_sdev,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = 1,
@@ -6772,9 +6782,9 @@ struct scsi_host_template lpfc_template = {
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_host_reset_handler = lpfc_host_reset_handler,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
+ .sdev_init = lpfc_sdev_init,
+ .sdev_configure = lpfc_sdev_configure,
+ .sdev_destroy = lpfc_sdev_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
@@ -6799,9 +6809,9 @@ struct scsi_host_template lpfc_vport_template = {
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = NULL,
.eh_host_reset_handler = NULL,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
+ .sdev_init = lpfc_sdev_init,
+ .sdev_configure = lpfc_sdev_configure,
+ .sdev_destroy = lpfc_sdev_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 874644b31a3e..3fd9723cd271 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -9012,7 +9012,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
- jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
@@ -13323,7 +13323,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
lpfc_sli_mbox_sys_flush(phba);
return;
}
- timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
/* Disable softirqs, including timers from obtaining phba->hbalock */
local_bh_disable();
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c1e9ec0243ba..9be3da91c923 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -865,8 +865,6 @@ struct lpfc_sli4_hba {
struct lpfc_name wwpn;
uint32_t fw_func_mode; /* FW function protocol mode */
- uint32_t ulp0_mode; /* ULP0 protocol mode */
- uint32_t ulp1_mode; /* ULP1 protocol mode */
/* Optimized Access Storage specific queues/structures */
uint64_t oas_next_lun;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 61fe1220f8ad..c35f7225058e 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.6"
+#define LPFC_DRIVER_VERSION "14.4.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index cc3e4736f2fe..14dbfe954e42 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -278,7 +278,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
mod_timer(&vport->phba->inactive_vmid_poll,
jiffies +
- msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ secs_to_jiffies(LPFC_VMID_TIMER));
vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 9e0e35763377..3d70cc517573 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -492,21 +492,22 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
spin_lock_irq(&ndlp->lock);
- if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
+ if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags) &&
!ndlp->logo_waitq) {
ndlp->logo_waitq = &waitq;
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
- ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
+ set_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags);
}
spin_unlock_irq(&ndlp->lock);
rc = lpfc_issue_els_npiv_logo(vport, ndlp);
if (!rc) {
wait_event_timeout(waitq,
- (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
+ !test_bit(NLP_WAIT_FOR_LOGO,
+ &ndlp->save_flags),
msecs_to_jiffies(phba->fc_ratov * 2000));
- if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
+ if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags))
goto logo_cmpl;
/* LOGO wait failed. Correct status. */
rc = -EINTR;
@@ -516,9 +517,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Error - clean up node flags. */
clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
- spin_lock_irq(&ndlp->lock);
- ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
- spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags);
logo_cmpl:
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
@@ -696,19 +695,20 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = &waitq;
- ndlp->save_flags |= NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
+ set_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags);
rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
if (!rc) {
wait_event_timeout(waitq,
- !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID),
+ !test_bit(NLP_WAIT_FOR_DA_ID,
+ &ndlp->save_flags),
msecs_to_jiffies(phba->fc_ratov * 2000));
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
"1829 DA_ID issue status %d. "
- "SFlag x%x NState x%x, NFlag x%lx "
+ "SFlag x%lx NState x%x, NFlag x%lx "
"Rpi x%x\n",
rc, ndlp->save_flags, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
@@ -718,8 +718,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
*/
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = NULL;
- ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags);
}
issue_logo:
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 38976f94453e..adab151663dd 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4551,7 +4551,7 @@ megaraid_shutdown(struct pci_dev *pdev)
__megaraid_shutdown(adapter);
}
-static struct pci_device_id megaraid_pci_tbl[] = {
+static const struct pci_device_id megaraid_pci_tbl[] = {
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index bc867da650b6..60cc3372991f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -199,7 +199,7 @@ MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
/*
* PCI table for all supported controllers.
*/
-static struct pci_device_id pci_id_table_g[] = {
+static const struct pci_device_id pci_id_table_g[] = {
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
@@ -621,7 +621,7 @@ megaraid_io_attach(adapter_t *adapter)
host = scsi_host_alloc(&megaraid_template_g, 8);
if (!host) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid mbox: scsi_register failed\n"));
+ "megaraid mbox: scsi_host_alloc failed\n"));
return -1;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 50f1dcb6d584..d85f990aec88 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,7 +37,6 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -147,7 +146,7 @@ megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
/*
* PCI ID table for all supported controllers
*/
-static struct pci_device_id megasas_pci_table[] = {
+static const struct pci_device_id megasas_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
/* xscale IOP */
@@ -2068,8 +2067,8 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
}
-static int megasas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int megasas_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
u16 pd_index = 0;
struct megasas_instance *instance;
@@ -2109,7 +2108,7 @@ static int megasas_device_configure(struct scsi_device *sdev,
return 0;
}
-static int megasas_slave_alloc(struct scsi_device *sdev)
+static int megasas_sdev_init(struct scsi_device *sdev)
{
u16 pd_index = 0, ld_tgt_id;
struct megasas_instance *instance ;
@@ -2154,7 +2153,7 @@ scan_target:
return 0;
}
-static void megasas_slave_destroy(struct scsi_device *sdev)
+static void megasas_sdev_destroy(struct scsi_device *sdev)
{
u16 ld_tgt_id;
struct megasas_instance *instance;
@@ -3193,7 +3192,7 @@ static void megasas_map_queues(struct Scsi_Host *shost)
map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
map->nr_queues = instance->msix_vectors - offset;
map->queue_offset = 0;
- blk_mq_pci_map_queues(map, instance->pdev, offset);
+ blk_mq_map_hw_queues(map, &instance->pdev->dev, offset);
qoff += map->nr_queues;
offset += map->nr_queues;
@@ -3510,9 +3509,9 @@ static const struct scsi_host_template megasas_template = {
.module = THIS_MODULE,
.name = "Avago SAS based MegaRAID driver",
.proc_name = "megaraid_sas",
- .device_configure = megasas_device_configure,
- .slave_alloc = megasas_slave_alloc,
- .slave_destroy = megasas_slave_destroy,
+ .sdev_configure = megasas_sdev_configure,
+ .sdev_init = megasas_sdev_init,
+ .sdev_destroy = megasas_sdev_destroy,
.queuecommand = megasas_queue_command,
.eh_target_reset_handler = megasas_reset_target,
.eh_abort_handler = megasas_task_abort,
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0c3e1ac076b5..0d72b5f1b69d 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -12,7 +12,6 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/delay.h>
#include <linux/dmapool.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 10b8e4dc64f8..7589f48aebc8 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -2951,6 +2951,7 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
.max_hw_sectors = MPI3MR_MAX_APP_XFER_SECTORS,
.max_segments = MPI3MR_MAX_APP_XFER_SEGMENTS,
};
+ struct request_queue *q;
device_initialize(bsg_dev);
@@ -2966,14 +2967,17 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
return;
}
- mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
+ q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
mpi3mr_bsg_request, NULL, 0);
- if (IS_ERR(mrioc->bsg_queue)) {
+ if (IS_ERR(q)) {
ioc_err(mrioc, "%s: bsg registration failed\n",
dev_name(bsg_dev));
device_del(bsg_dev);
put_device(bsg_dev);
+ return;
}
+
+ mrioc->bsg_queue = q;
}
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 1bef88130d0c..b9a51d3f2024 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -4042,7 +4042,7 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset);
else
blk_mq_map_queues(map);
@@ -4465,14 +4465,14 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost,
}
/**
- * mpi3mr_slave_destroy - Slave destroy callback handler
+ * mpi3mr_sdev_destroy - Slave destroy callback handler
* @sdev: SCSI device reference
*
* Cleanup and free per device(lun) private data.
*
* Return: Nothing.
*/
-static void mpi3mr_slave_destroy(struct scsi_device *sdev)
+static void mpi3mr_sdev_destroy(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
@@ -4552,7 +4552,7 @@ static void mpi3mr_target_destroy(struct scsi_target *starget)
}
/**
- * mpi3mr_device_configure - Slave configure callback handler
+ * mpi3mr_sdev_configure - Slave configure callback handler
* @sdev: SCSI device reference
* @lim: queue limits
*
@@ -4561,8 +4561,8 @@ static void mpi3mr_target_destroy(struct scsi_target *starget)
*
* Return: 0 always.
*/
-static int mpi3mr_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int mpi3mr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct scsi_target *starget;
struct Scsi_Host *shost;
@@ -4599,14 +4599,14 @@ static int mpi3mr_device_configure(struct scsi_device *sdev,
}
/**
- * mpi3mr_slave_alloc -Slave alloc callback handler
+ * mpi3mr_sdev_init -Slave alloc callback handler
* @sdev: SCSI device reference
*
* Allocate per device(lun) private data and initialize it.
*
* Return: 0 on success -ENOMEM on memory allocation failure.
*/
-static int mpi3mr_slave_alloc(struct scsi_device *sdev)
+static int mpi3mr_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
@@ -5062,10 +5062,10 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.proc_name = MPI3MR_DRIVER_NAME,
.queuecommand = mpi3mr_qcmd,
.target_alloc = mpi3mr_target_alloc,
- .slave_alloc = mpi3mr_slave_alloc,
- .device_configure = mpi3mr_device_configure,
+ .sdev_init = mpi3mr_sdev_init,
+ .sdev_configure = mpi3mr_sdev_configure,
.target_destroy = mpi3mr_target_destroy,
- .slave_destroy = mpi3mr_slave_destroy,
+ .sdev_destroy = mpi3mr_sdev_destroy,
.scan_finished = mpi3mr_scan_finished,
.scan_start = mpi3mr_scan_start,
.change_queue_depth = mpi3mr_change_queue_depth,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 0ba9e6a6a13c..c8d6ced5640e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -105,10 +105,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 587f7d248219..d123d3b740e1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -606,7 +606,7 @@ typedef struct _MPI2_CONFIG_REPLY {
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
- U8 ChipName[16]; /*0x04 */
+ U8 ChipName[16] __nonstring; /*0x04 */
U8 ChipRevision[8]; /*0x14 */
U8 BoardName[16]; /*0x1C */
U8 BoardAssembly[16]; /*0x2C */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 16ac2267c71e..dc43cfa83088 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5627,10 +5627,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
if (rc)
return rc;
if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
- pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
+ pr_err("%s: overriding NVDATA EEDPTagMode setting from 0 to 1\n",
ioc->name);
- ioc->manu_pg11.EEDPTagMode &= ~0x3;
- ioc->manu_pg11.EEDPTagMode |= 0x1;
+ ioc->manu_pg11.EEDPTagMode = 0x1;
mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
&ioc->manu_pg11);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index f2a55aa5fe65..a456e5ec74d8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -53,7 +53,6 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/raid_class.h>
-#include <linux/blk-mq-pci.h>
#include <linux/unaligned.h>
#include "mpt3sas_base.h"
@@ -2026,14 +2025,14 @@ scsih_target_destroy(struct scsi_target *starget)
}
/**
- * scsih_slave_alloc - device add routine
+ * scsih_sdev_init - device add routine
* @sdev: scsi device struct
*
* Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-scsih_slave_alloc(struct scsi_device *sdev)
+scsih_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct MPT3SAS_ADAPTER *ioc;
@@ -2108,11 +2107,11 @@ scsih_slave_alloc(struct scsi_device *sdev)
}
/**
- * scsih_slave_destroy - device destroy routine
+ * scsih_sdev_destroy - device destroy routine
* @sdev: scsi device struct
*/
static void
-scsih_slave_destroy(struct scsi_device *sdev)
+scsih_sdev_destroy(struct scsi_device *sdev)
{
struct MPT3SAS_TARGET *sas_target_priv_data;
struct scsi_target *starget;
@@ -2497,7 +2496,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
}
/**
- * scsih_device_configure - device configure routine.
+ * scsih_sdev_configure - device configure routine.
* @sdev: scsi device struct
* @lim: queue limits
*
@@ -2505,7 +2504,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
* the device is ignored.
*/
static int
-scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
+scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -11890,7 +11889,7 @@ static void scsih_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, ioc->pdev, offset);
+ blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
else
blk_mq_map_queues(map);
@@ -11905,10 +11904,10 @@ static const struct scsi_host_template mpt2sas_driver_template = {
.proc_name = MPT2SAS_DRIVER_NAME,
.queuecommand = scsih_qcmd,
.target_alloc = scsih_target_alloc,
- .slave_alloc = scsih_slave_alloc,
- .device_configure = scsih_device_configure,
+ .sdev_init = scsih_sdev_init,
+ .sdev_configure = scsih_sdev_configure,
.target_destroy = scsih_target_destroy,
- .slave_destroy = scsih_slave_destroy,
+ .sdev_destroy = scsih_sdev_destroy,
.scan_finished = scsih_scan_finished,
.scan_start = scsih_scan_start,
.change_queue_depth = scsih_change_queue_depth,
@@ -11943,10 +11942,10 @@ static const struct scsi_host_template mpt3sas_driver_template = {
.proc_name = MPT3SAS_DRIVER_NAME,
.queuecommand = scsih_qcmd,
.target_alloc = scsih_target_alloc,
- .slave_alloc = scsih_slave_alloc,
- .device_configure = scsih_device_configure,
+ .sdev_init = scsih_sdev_init,
+ .sdev_configure = scsih_sdev_configure,
.target_destroy = scsih_target_destroy,
- .slave_destroy = scsih_slave_destroy,
+ .sdev_destroy = scsih_sdev_destroy,
.scan_finished = scsih_scan_finished,
.scan_start = scsih_scan_start,
.change_queue_depth = scsih_change_queue_depth,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d84413b77d84..dc74ebc6405a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -328,10 +328,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 020037cbf0d9..2c72da6b8cf0 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -609,7 +609,7 @@ static void mvs_pci_remove(struct pci_dev *pdev)
return;
}
-static struct pci_device_id mvs_pci_table[] = {
+static const struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
{
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index d9d366ec17dc..96549e7f5705 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2000,7 +2000,8 @@ static struct mvumi_instance_template mvumi_instance_9580 = {
.reset_host = mvumi_reset_host_9580,
};
-static int mvumi_slave_configure(struct scsi_device *sdev)
+static int mvumi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct mvumi_hba *mhba;
unsigned char bitcount = sizeof(unsigned char) * 8;
@@ -2172,7 +2173,7 @@ static const struct scsi_host_template mvumi_template = {
.module = THIS_MODULE,
.name = "Marvell Storage Controller",
- .slave_configure = mvumi_slave_configure,
+ .sdev_configure = mvumi_sdev_configure,
.queuecommand = mvumi_queue_command,
.eh_timed_out = mvumi_timed_out,
.eh_host_reset_handler = mvumi_host_reset,
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index a7e64b867c8e..dc4bd422b601 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1619,7 +1619,7 @@ static int myrb_queuecommand(struct Scsi_Host *shost,
return myrb_pthru_queuecommand(shost, scmd);
}
-static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
+static int myrb_ldev_sdev_init(struct scsi_device *sdev)
{
struct myrb_hba *cb = shost_priv(sdev->host);
struct myrb_ldev_info *ldev_info;
@@ -1627,8 +1627,6 @@ static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
enum raid_level level;
ldev_info = cb->ldev_info_buf + ldev_num;
- if (!ldev_info)
- return -ENXIO;
sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
if (!sdev->hostdata)
@@ -1665,7 +1663,7 @@ static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
+static int myrb_pdev_sdev_init(struct scsi_device *sdev)
{
struct myrb_hba *cb = shost_priv(sdev->host);
struct myrb_pdev_state *pdev_info;
@@ -1701,7 +1699,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int myrb_slave_alloc(struct scsi_device *sdev)
+static int myrb_sdev_init(struct scsi_device *sdev)
{
if (sdev->channel > myrb_logical_channel(sdev->host))
return -ENXIO;
@@ -1710,12 +1708,13 @@ static int myrb_slave_alloc(struct scsi_device *sdev)
return -ENXIO;
if (sdev->channel == myrb_logical_channel(sdev->host))
- return myrb_ldev_slave_alloc(sdev);
+ return myrb_ldev_sdev_init(sdev);
- return myrb_pdev_slave_alloc(sdev);
+ return myrb_pdev_sdev_init(sdev);
}
-static int myrb_slave_configure(struct scsi_device *sdev)
+static int myrb_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct myrb_ldev_info *ldev_info;
@@ -1741,7 +1740,7 @@ static int myrb_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void myrb_slave_destroy(struct scsi_device *sdev)
+static void myrb_sdev_destroy(struct scsi_device *sdev)
{
kfree(sdev->hostdata);
}
@@ -2208,9 +2207,9 @@ static const struct scsi_host_template myrb_template = {
.proc_name = "myrb",
.queuecommand = myrb_queuecommand,
.eh_host_reset_handler = myrb_host_reset,
- .slave_alloc = myrb_slave_alloc,
- .slave_configure = myrb_slave_configure,
- .slave_destroy = myrb_slave_destroy,
+ .sdev_init = myrb_sdev_init,
+ .sdev_configure = myrb_sdev_configure,
+ .sdev_destroy = myrb_sdev_destroy,
.bios_param = myrb_biosparam,
.cmd_size = sizeof(struct myrb_cmdblk),
.shost_groups = myrb_shost_groups,
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 1469d0c54e45..95af3bb03834 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1786,7 +1786,7 @@ static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
return ldev_num;
}
-static int myrs_slave_alloc(struct scsi_device *sdev)
+static int myrs_sdev_init(struct scsi_device *sdev)
{
struct myrs_hba *cs = shost_priv(sdev->host);
unsigned char status;
@@ -1882,7 +1882,8 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int myrs_slave_configure(struct scsi_device *sdev)
+static int myrs_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct myrs_hba *cs = shost_priv(sdev->host);
struct myrs_ldev_info *ldev_info;
@@ -1910,7 +1911,7 @@ static int myrs_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void myrs_slave_destroy(struct scsi_device *sdev)
+static void myrs_sdev_destroy(struct scsi_device *sdev)
{
kfree(sdev->hostdata);
}
@@ -1921,9 +1922,9 @@ static const struct scsi_host_template myrs_template = {
.proc_name = "myrs",
.queuecommand = myrs_queuecommand,
.eh_host_reset_handler = myrs_host_reset,
- .slave_alloc = myrs_slave_alloc,
- .slave_configure = myrs_slave_configure,
- .slave_destroy = myrs_slave_destroy,
+ .sdev_init = myrs_sdev_init,
+ .sdev_configure = myrs_sdev_configure,
+ .sdev_destroy = myrs_sdev_destroy,
.cmd_size = sizeof(struct myrs_cmdblk),
.shost_groups = myrs_shost_groups,
.sdev_groups = myrs_sdev_groups,
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 35869b4f9329..14ac81ec0aa0 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -7786,7 +7786,7 @@ static void __init ncr_getclock (struct ncb *np, int mult)
/*===================== LINUX ENTRY POINTS SECTION ==========================*/
-static int ncr53c8xx_slave_alloc(struct scsi_device *device)
+static int ncr53c8xx_sdev_init(struct scsi_device *device)
{
struct Scsi_Host *host = device->host;
struct ncb *np = ((struct host_data *) host->hostdata)->ncb;
@@ -7796,7 +7796,8 @@ static int ncr53c8xx_slave_alloc(struct scsi_device *device)
return 0;
}
-static int ncr53c8xx_slave_configure(struct scsi_device *device)
+static int ncr53c8xx_sdev_configure(struct scsi_device *device,
+ struct queue_limits *lim)
{
struct Scsi_Host *host = device->host;
struct ncb *np = ((struct host_data *) host->hostdata)->ncb;
@@ -8093,8 +8094,8 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
tpnt->shost_groups = ncr53c8xx_host_groups;
tpnt->queuecommand = ncr53c8xx_queue_command;
- tpnt->slave_configure = ncr53c8xx_slave_configure;
- tpnt->slave_alloc = ncr53c8xx_slave_alloc;
+ tpnt->sdev_configure = ncr53c8xx_sdev_configure;
+ tpnt->sdev_init = ncr53c8xx_sdev_init;
tpnt->eh_bus_reset_handler = ncr53c8xx_bus_reset;
tpnt->can_queue = SCSI_NCR_CAN_QUEUE;
tpnt->this_id = 7;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index b7987019686e..abc4ce9eae74 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -66,7 +66,7 @@ static const char *nsp32_release_version = "1.2";
/****************************************************************************
* Supported hardware
*/
-static struct pci_device_id nsp32_pci_table[] = {
+static const struct pci_device_id nsp32_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_IODATA,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II,
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 7871e29a820a..4e19d61dffbb 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -90,7 +90,7 @@ enum port_type {
#define PM8001_MAX_PORTS 16 /* max. possible ports */
#define PM8001_MAX_DEVICES 2048 /* max supported device */
#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */
-#define PM8001_RESERVE_SLOT 8
+#define PM8001_RESERVE_SLOT 128
#define PM8001_SECTOR_SIZE 512
#define PM8001_PAGE_SIZE_4K 4096
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index dec1e2d380f1..42a4eeac24c9 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3472,12 +3472,13 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
status, tag, scp);
switch (status) {
case IO_SUCCESS:
- pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n");
+ pm8001_dbg(pm8001_ha, FAIL, "ABORT IO_SUCCESS for tag %#x\n",
+ tag);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_SAM_STAT_GOOD;
break;
case IO_NOT_VALID:
- pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n");
+ pm8001_dbg(pm8001_ha, FAIL, "IO_NOT_VALID for tag %#x\n", tag);
ts->resp = TMF_RESP_FUNC_FAILED;
break;
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f8c81e53e93f..599410bcdfea 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -105,7 +105,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (pm8001_ha->number_of_intr > 1) {
- blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+ blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1);
return;
}
@@ -736,7 +736,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
return -EIO;
}
time_remaining = wait_for_completion_timeout(&completion,
- msecs_to_jiffies(60*1000)); // 1 min
+ secs_to_jiffies(60)); // 1 min
if (!time_remaining) {
kfree(payload.func_specific);
pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");
@@ -1435,7 +1435,7 @@ err_out_disable:
/* update of pci device, vendor id and driver data with
* unique value for each of the controller
*/
-static struct pci_device_id pm8001_pci_table[] = {
+static const struct pci_device_id pm8001_pci_table[] = {
{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
{ PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 },
{ PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 },
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index d80cffd25a6e..183ce00aa671 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -101,6 +101,63 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
return 0;
}
+static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
+ int *ata_tag, bool *task_aborted)
+{
+ unsigned long flags;
+ struct ata_queued_cmd *qc = NULL;
+
+ *ata_op = 0;
+ *ata_tag = -1;
+ *task_aborted = false;
+
+ if (!task)
+ return;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED)))
+ *task_aborted = true;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (task->task_proto == SAS_PROTOCOL_STP) {
+ // sas_ata_qc_issue path uses SAS_PROTOCOL_STP.
+ // This only works for scsi + libsas + libata users.
+ qc = task->uldd_task;
+ if (qc) {
+ *ata_op = qc->tf.command;
+ *ata_tag = qc->tag;
+ }
+ }
+}
+
+void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *target_pm8001_dev)
+{
+ int i = 0, ata_op = 0, ata_tag = -1;
+ struct pm8001_ccb_info *ccb = NULL;
+ struct sas_task *task = NULL;
+ struct pm8001_device *pm8001_dev = NULL;
+ bool task_aborted;
+
+ for (i = 0; i < pm8001_ha->ccb_count; i++) {
+ ccb = &pm8001_ha->ccb_info[i];
+ if (ccb->ccb_tag == PM8001_INVALID_TAG)
+ continue;
+ pm8001_dev = ccb->device;
+ if (target_pm8001_dev && pm8001_dev &&
+ target_pm8001_dev != pm8001_dev)
+ continue;
+ task = ccb->task;
+ pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "tag %#x, device %#x task %p task aborted %d ata opcode %#x ata tag %d\n",
+ ccb->ccb_tag,
+ (pm8001_dev ? pm8001_dev->device_id : 0),
+ task, task_aborted,
+ ata_op, ata_tag);
+ }
+}
+
/**
* pm8001_mem_alloc - allocate memory for pm8001.
* @pdev: pci device.
@@ -374,23 +431,6 @@ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
}
- /* Find the local port id that's attached to this device */
-static int sas_find_local_port_id(struct domain_device *dev)
-{
- struct domain_device *pdev = dev->parent;
-
- /* Directly attached device */
- if (!pdev)
- return dev->port->id;
- while (pdev) {
- struct domain_device *pdev_p = pdev->parent;
- if (!pdev_p)
- return pdev->port->id;
- pdev = pdev->parent;
- }
- return 0;
-}
-
#define DEV_IS_GONE(pm8001_dev) \
((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
@@ -463,10 +503,10 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_dev = dev->lldd_dev;
- port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+ port = pm8001_ha->phy[pm8001_dev->attached_phy].port;
if (!internal_abort &&
- (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
+ (DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
if (sas_protocol_ata(task_proto)) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 42c7b3f7afbf..315f6a7523f0 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -56,7 +56,6 @@
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
@@ -787,6 +786,8 @@ static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
}
void pm8001_setds_completion(struct domain_device *dev);
void pm8001_tmf_aborted(struct sas_task *task);
+void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *dev);
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index e65951dd2024..5b373c53c036 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2246,7 +2246,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
u32 param;
u32 status;
u32 tag;
- int i, j;
+ int i, j, ata_tag = -1;
u8 sata_addr_low[4];
u32 temp_sata_addr_low, temp_sata_addr_hi;
u8 sata_addr_hi[4];
@@ -2256,6 +2256,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
u32 *sata_resp;
struct pm8001_device *pm8001_dev;
unsigned long flags;
+ struct ata_queued_cmd *qc;
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
@@ -2267,8 +2268,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
pm8001_dev = ccb->device;
if (t) {
- if (t->dev && (t->dev->lldd_dev))
+ if (t->dev && (t->dev->lldd_dev)) {
pm8001_dev = t->dev->lldd_dev;
+ qc = t->uldd_task;
+ ata_tag = qc ? qc->tag : -1;
+ }
} else {
pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n",
ccb->ccb_tag);
@@ -2276,16 +2280,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
return;
}
-
if (pm8001_dev && unlikely(!t->lldd_task || !t->dev))
return;
ts = &t->task_status;
-
if (status != IO_SUCCESS) {
pm8001_dbg(pm8001_ha, FAIL,
- "IO failed device_id %u status 0x%x tag %d\n",
- pm8001_dev->device_id, status, tag);
+ "IO failed status %#x pm80xx tag %#x ata tag %d\n",
+ status, tag, ata_tag);
}
/* Print sas address of IO failed device */
@@ -2667,13 +2669,19 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
/* Check if this is NCQ error */
if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+ /* tag value is invalid with this event */
+ pm8001_dbg(pm8001_ha, FAIL, "NCQ ERROR for device %#x tag %#x\n",
+ dev_id, tag);
+
/* find device using device id */
pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
/* send read log extension by aborting the link - libata does what we want */
- if (pm8001_dev)
+ if (pm8001_dev) {
+ pm80xx_show_pending_commands(pm8001_ha, pm8001_dev);
pm8001_handle_event(pm8001_ha,
pm8001_dev,
IO_XFER_ERROR_ABORTED_NCQ_MODE);
+ }
return;
}
@@ -3336,10 +3344,11 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phy_id =
le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ u32 tag = le32_to_cpu(pPayload->tag);
pm8001_dbg(pm8001_ha, INIT,
- "phy start resp status:0x%x, phyid:0x%x\n",
- status, phy_id);
+ "phy start resp status:0x%x, phyid:0x%x, tag 0x%x\n",
+ status, phy_id, tag);
if (status == 0)
phy->phy_state = PHY_LINK_DOWN;
@@ -3348,6 +3357,8 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
complete(phy->enable_completion);
phy->enable_completion = NULL;
}
+
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
@@ -3628,8 +3639,10 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phyid =
le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
- pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n",
- phyid, status);
+ u32 tag = le32_to_cpu(pPayload->tag);
+
+ pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x tag 0x%x\n", phyid,
+ status, tag);
if (status == PHY_STOP_SUCCESS ||
status == PHY_STOP_ERR_DEVICE_ATTACHED) {
phy->phy_state = PHY_LINK_DISABLE;
@@ -3637,6 +3650,7 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->sas_phy.linkrate = SAS_PHY_DISABLED;
}
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
@@ -3655,10 +3669,9 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
u32 tag = le32_to_cpu(pPayload->tag);
pm8001_dbg(pm8001_ha, MSG,
- "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
- status, err_qlfr_pgcd);
+ "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x tag 0x%x\n",
+ status, err_qlfr_pgcd, tag);
pm8001_tag_free(pm8001_ha, tag);
-
return 0;
}
@@ -4632,9 +4645,16 @@ static int
pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
{
struct phy_start_req payload;
- u32 tag = 0x01;
+ int ret;
+ u32 tag;
u32 opcode = OPC_INB_PHYSTART;
+ ret = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n");
+ return ret;
+ }
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
@@ -4670,9 +4690,16 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
u8 phy_id)
{
struct phy_stop_req payload;
- u32 tag = 0x01;
+ int ret;
+ u32 tag;
u32 opcode = OPC_INB_PHYSTOP;
+ ret = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n");
+ return ret;
+ }
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4c5881917d76..3ba53916fd86 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -113,7 +113,7 @@ static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
/*
* PCI device ids supported by pmcraid driver
*/
-static struct pci_device_id pmcraid_pci_table[] = {
+static const struct pci_device_id pmcraid_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
},
@@ -125,7 +125,7 @@ MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
/**
- * pmcraid_slave_alloc - Prepare for commands to a device
+ * pmcraid_sdev_init - Prepare for commands to a device
* @scsi_dev: scsi device struct
*
* This function is called by mid-layer prior to sending any command to the new
@@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
* Return value:
* 0 on success / -ENXIO if device does not exist
*/
-static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
+static int pmcraid_sdev_init(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *temp, *res = NULL;
struct pmcraid_instance *pinstance;
@@ -197,7 +197,7 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
}
/**
- * pmcraid_device_configure - Configures a SCSI device
+ * pmcraid_sdev_configure - Configures a SCSI device
* @scsi_dev: scsi device struct
* @lim: queue limits
*
@@ -210,8 +210,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
* Return value:
* 0 on success
*/
-static int pmcraid_device_configure(struct scsi_device *scsi_dev,
- struct queue_limits *lim)
+static int pmcraid_sdev_configure(struct scsi_device *scsi_dev,
+ struct queue_limits *lim)
{
struct pmcraid_resource_entry *res = scsi_dev->hostdata;
@@ -248,17 +248,17 @@ static int pmcraid_device_configure(struct scsi_device *scsi_dev,
}
/**
- * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
+ * pmcraid_sdev_destroy - Unconfigure a SCSI device before removing it
*
* @scsi_dev: scsi device struct
*
* This is called by mid-layer before removing a device. Pointer assignments
- * done in pmcraid_slave_alloc will be reset to NULL here.
+ * done in pmcraid_sdev_init will be reset to NULL here.
*
* Return value
* none
*/
-static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
+static void pmcraid_sdev_destroy(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *res;
@@ -3668,9 +3668,9 @@ static const struct scsi_host_template pmcraid_host_template = {
.eh_device_reset_handler = pmcraid_eh_device_reset_handler,
.eh_host_reset_handler = pmcraid_eh_host_reset_handler,
- .slave_alloc = pmcraid_slave_alloc,
- .device_configure = pmcraid_device_configure,
- .slave_destroy = pmcraid_slave_destroy,
+ .sdev_init = pmcraid_sdev_init,
+ .sdev_configure = pmcraid_sdev_configure,
+ .sdev_destroy = pmcraid_sdev_destroy,
.change_queue_depth = pmcraid_change_queue_depth,
.can_queue = PMCRAID_MAX_IO_CMD,
.this_id = -1,
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 90495a832f34..92fe5c5c5bb0 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -61,7 +61,8 @@ enum lv1_atapi_in_out {
};
-static int ps3rom_slave_configure(struct scsi_device *scsi_dev)
+static int ps3rom_sdev_configure(struct scsi_device *scsi_dev,
+ struct queue_limits *lim)
{
struct ps3rom_private *priv = shost_priv(scsi_dev->host);
struct ps3_storage_device *dev = priv->dev;
@@ -325,7 +326,7 @@ done:
static const struct scsi_host_template ps3rom_host_template = {
.name = DEVICE_NAME,
- .slave_configure = ps3rom_slave_configure,
+ .sdev_configure = ps3rom_sdev_configure,
.queuecommand = ps3rom_queuecommand,
.can_queue = 1,
.this_id = 7,
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index 8d8c760eee43..769da92ee20d 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -104,7 +104,7 @@ void qedf_capture_grc_dump(struct qedf_ctx *qedf)
static ssize_t
qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
ssize_t ret = 0;
@@ -124,7 +124,7 @@ qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
static ssize_t
qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct fc_lport *lport = NULL;
@@ -160,14 +160,14 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_grcdump_attr = {
+static const struct bin_attribute sysfs_grcdump_attr = {
.attr = {
.name = "grcdump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qedf_sysfs_read_grcdump,
- .write = qedf_sysfs_write_grcdump,
+ .read_new = qedf_sysfs_read_grcdump,
+ .write_new = qedf_sysfs_write_grcdump,
};
static struct sysfs_bin_attrs bin_file_entries[] = {
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 5ec2b817c694..eeb6c841dacb 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -100,7 +100,7 @@ struct Scsi_Host;
struct sysfs_bin_attrs {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
};
extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d2f47dc31dbf..436bd29d5eba 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -982,7 +982,8 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
return SUCCESS;
}
-static int qedf_slave_configure(struct scsi_device *sdev)
+static int qedf_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (qedf_queue_depth) {
scsi_change_queue_depth(sdev, qedf_queue_depth);
@@ -1003,7 +1004,7 @@ static const struct scsi_host_template qedf_host_template = {
.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
.eh_host_reset_handler = qedf_eh_host_reset,
- .slave_configure = qedf_slave_configure,
+ .sdev_configure = qedf_sdev_configure,
.dma_boundary = QED_HW_DMA_BOUNDARY,
.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
.can_queue = FCOE_PARAMS_NUM_TASKS,
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index fdda12ef13b0..5a1ec4542183 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -91,7 +91,7 @@ struct Scsi_Host;
struct sysfs_bin_attrs {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
};
int qedi_create_sysfs_attr(struct Scsi_Host *shost,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 628d59dda20c..c9539897048a 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1961,13 +1961,11 @@ static int qedi_cpu_online(unsigned int cpu)
struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct task_struct *thread;
- thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
- cpu_to_node(cpu),
- "qedi_thread/%d", cpu);
+ thread = kthread_create_on_cpu(qedi_percpu_io_thread, (void *)p,
+ cpu, "qedi_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
- kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
@@ -2869,7 +2867,7 @@ static void qedi_remove(struct pci_dev *pdev)
__qedi_remove(pdev, QEDI_MODE_NORMAL);
}
-static struct pci_device_id qedi_pci_tbl[] = {
+static const struct pci_device_id qedi_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
{ 0 },
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8958547ac111..47d74f881948 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -501,7 +501,7 @@ struct qla_boards {
};
/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
-static struct pci_device_id qla1280_pci_tbl[] = {
+static const struct pci_device_id qla1280_pci_tbl[] = {
{PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
@@ -1159,7 +1159,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
/**************************************************************************
- * qla1280_slave_configure
+ * qla1280_sdev_configure
*
* Description:
* Determines the queue depth for a given device. There are two ways
@@ -1170,7 +1170,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
* default queue depth (dependent on the number of hardware SCBs).
**************************************************************************/
static int
-qla1280_slave_configure(struct scsi_device *device)
+qla1280_sdev_configure(struct scsi_device *device, struct queue_limits *lim)
{
struct scsi_qla_host *ha;
int default_depth = 3;
@@ -2867,7 +2867,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
cpu_to_le32(upper_32_bits(dma_handle)),
cpu_to_le32(lower_32_bits(dma_handle)),
- cpu_to_le32(sg_dma_len(sg_next(s))));
+ cpu_to_le32(sg_dma_len(s)));
remseg--;
}
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
@@ -4121,7 +4121,7 @@ static const struct scsi_host_template qla1280_driver_template = {
.proc_name = "qla1280",
.name = "Qlogic ISP 1280/12160",
.info = qla1280_info,
- .slave_configure = qla1280_slave_configure,
+ .sdev_configure = qla1280_sdev_configure,
.queuecommand = qla1280_queuecommand,
.eh_abort_handler = qla1280_eh_abort,
.eh_device_reset_handler= qla1280_eh_device_reset,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index e6ece30c4348..dcb0c2af1fa7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -17,7 +17,7 @@ static int qla24xx_vport_disable(struct fc_vport *, bool);
static ssize_t
qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -58,7 +58,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
static ssize_t
qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -168,19 +168,19 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_fw_dump_attr = {
+static const struct bin_attribute sysfs_fw_dump_attr = {
.attr = {
.name = "fw_dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_fw_dump,
- .write = qla2x00_sysfs_write_fw_dump,
+ .read_new = qla2x00_sysfs_read_fw_dump,
+ .write_new = qla2x00_sysfs_write_fw_dump,
};
static ssize_t
qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -220,7 +220,7 @@ skip:
static ssize_t
qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -282,19 +282,19 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_nvram_attr = {
+static const struct bin_attribute sysfs_nvram_attr = {
.attr = {
.name = "nvram",
.mode = S_IRUSR | S_IWUSR,
},
.size = 512,
- .read = qla2x00_sysfs_read_nvram,
- .write = qla2x00_sysfs_write_nvram,
+ .read_new = qla2x00_sysfs_read_nvram,
+ .write_new = qla2x00_sysfs_write_nvram,
};
static ssize_t
qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -318,7 +318,7 @@ out:
static ssize_t
qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -344,19 +344,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_optrom_attr = {
+static const struct bin_attribute sysfs_optrom_attr = {
.attr = {
.name = "optrom",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_optrom,
- .write = qla2x00_sysfs_write_optrom,
+ .read_new = qla2x00_sysfs_read_optrom,
+ .write_new = qla2x00_sysfs_write_optrom,
};
static ssize_t
qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -529,18 +529,18 @@ out:
return rval;
}
-static struct bin_attribute sysfs_optrom_ctl_attr = {
+static const struct bin_attribute sysfs_optrom_ctl_attr = {
.attr = {
.name = "optrom_ctl",
.mode = S_IWUSR,
},
.size = 0,
- .write = qla2x00_sysfs_write_optrom_ctl,
+ .write_new = qla2x00_sysfs_write_optrom_ctl,
};
static ssize_t
qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -587,7 +587,7 @@ skip:
static ssize_t
qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -642,19 +642,19 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_vpd_attr = {
+static const struct bin_attribute sysfs_vpd_attr = {
.attr = {
.name = "vpd",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_vpd,
- .write = qla2x00_sysfs_write_vpd,
+ .read_new = qla2x00_sysfs_read_vpd,
+ .write_new = qla2x00_sysfs_write_vpd,
};
static ssize_t
qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -679,18 +679,18 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_sfp_attr = {
+static const struct bin_attribute sysfs_sfp_attr = {
.attr = {
.name = "sfp",
.mode = S_IRUSR | S_IWUSR,
},
.size = SFP_DEV_SIZE,
- .read = qla2x00_sysfs_read_sfp,
+ .read_new = qla2x00_sysfs_read_sfp,
};
static ssize_t
qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -823,19 +823,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_reset_attr = {
+static const struct bin_attribute sysfs_reset_attr = {
.attr = {
.name = "reset",
.mode = S_IWUSR,
},
.size = 0,
- .write = qla2x00_sysfs_write_reset,
+ .write_new = qla2x00_sysfs_write_reset,
};
static ssize_t
qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -866,18 +866,18 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_issue_logo_attr = {
+static const struct bin_attribute sysfs_issue_logo_attr = {
.attr = {
.name = "issue_logo",
.mode = S_IWUSR,
},
.size = 0,
- .write = qla2x00_issue_logo,
+ .write_new = qla2x00_issue_logo,
};
static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -929,18 +929,18 @@ do_read:
return count;
}
-static struct bin_attribute sysfs_xgmac_stats_attr = {
+static const struct bin_attribute sysfs_xgmac_stats_attr = {
.attr = {
.name = "xgmac_stats",
.mode = S_IRUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_xgmac_stats,
+ .read_new = qla2x00_sysfs_read_xgmac_stats,
};
static ssize_t
qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -987,18 +987,18 @@ do_read:
return count;
}
-static struct bin_attribute sysfs_dcbx_tlv_attr = {
+static const struct bin_attribute sysfs_dcbx_tlv_attr = {
.attr = {
.name = "dcbx_tlv",
.mode = S_IRUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_dcbx_tlv,
+ .read_new = qla2x00_sysfs_read_dcbx_tlv,
};
static struct sysfs_entry {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
int type;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr, },
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 15066c112817..cb95b7b12051 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -4098,6 +4098,8 @@ struct qla_hw_data {
uint32_t npiv_supported :1;
uint32_t pci_channel_io_perm_failure :1;
uint32_t fce_enabled :1;
+ uint32_t user_enabled_fce :1;
+ uint32_t fce_dump_buf_alloced :1;
uint32_t fac_supported :1;
uint32_t chip_reset_done :1;
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a1545dad0c0c..08273520c777 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -409,26 +409,31 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
mutex_lock(&ha->fce_mutex);
- seq_puts(s, "FCE Trace Buffer\n");
- seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
- seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
- seq_puts(s, "FCE Enable Registers\n");
- seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
- ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
- ha->fce_mb[5], ha->fce_mb[6]);
-
- fce = (uint32_t *) ha->fce;
- fce_start = (unsigned long long) ha->fce_dma;
- for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
- if (cnt % 8 == 0)
- seq_printf(s, "\n%llx: ",
- (unsigned long long)((cnt * 4) + fce_start));
- else
- seq_putc(s, ' ');
- seq_printf(s, "%08x", *fce++);
- }
+ if (ha->flags.user_enabled_fce) {
+ seq_puts(s, "FCE Trace Buffer\n");
+ seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
+ seq_printf(s, "Base = %llx\n\n", (unsigned long long)ha->fce_dma);
+ seq_puts(s, "FCE Enable Registers\n");
+ seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
+ ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
+ ha->fce_mb[5], ha->fce_mb[6]);
+
+ fce = (uint32_t *)ha->fce;
+ fce_start = (unsigned long long)ha->fce_dma;
+ for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
+ if (cnt % 8 == 0)
+ seq_printf(s, "\n%llx: ",
+ (unsigned long long)((cnt * 4) + fce_start));
+ else
+ seq_putc(s, ' ');
+ seq_printf(s, "%08x", *fce++);
+ }
- seq_puts(s, "\nEnd\n");
+ seq_puts(s, "\nEnd\n");
+ } else {
+ seq_puts(s, "FCE Trace is currently not enabled\n");
+ seq_puts(s, "\techo [ 1 | 0 ] > fce\n");
+ }
mutex_unlock(&ha->fce_mutex);
@@ -467,7 +472,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
struct qla_hw_data *ha = vha->hw;
int rval;
- if (ha->flags.fce_enabled)
+ if (ha->flags.fce_enabled || !ha->fce)
goto out;
mutex_lock(&ha->fce_mutex);
@@ -488,11 +493,88 @@ out:
return single_release(inode, file);
}
+static ssize_t
+qla2x00_dfs_fce_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *s = file->private_data;
+ struct scsi_qla_host *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+ char *buf;
+ int rc = 0;
+ unsigned long enable;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
+ ql_dbg(ql_dbg_user, vha, 0xd034,
+ "this adapter does not support FCE.");
+ return -EINVAL;
+ }
+
+ buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(buf)) {
+ ql_dbg(ql_dbg_user, vha, 0xd037,
+ "fail to copy user buffer.");
+ return PTR_ERR(buf);
+ }
+
+ enable = kstrtoul(buf, 0, 0);
+ rc = count;
+
+ mutex_lock(&ha->fce_mutex);
+
+ if (enable) {
+ if (ha->flags.user_enabled_fce) {
+ mutex_unlock(&ha->fce_mutex);
+ goto out_free;
+ }
+ ha->flags.user_enabled_fce = 1;
+ if (!ha->fce) {
+ rc = qla2x00_alloc_fce_trace(vha);
+ if (rc) {
+ ha->flags.user_enabled_fce = 0;
+ mutex_unlock(&ha->fce_mutex);
+ goto out_free;
+ }
+
+ /* adjust fw dump buffer to take into account of this feature */
+ if (!ha->flags.fce_dump_buf_alloced)
+ qla2x00_alloc_fw_dump(vha);
+ }
+
+ if (!ha->flags.fce_enabled)
+ qla_enable_fce_trace(vha);
+
+ ql_dbg(ql_dbg_user, vha, 0xd045, "User enabled FCE .\n");
+ } else {
+ if (!ha->flags.user_enabled_fce) {
+ mutex_unlock(&ha->fce_mutex);
+ goto out_free;
+ }
+ ha->flags.user_enabled_fce = 0;
+ if (ha->flags.fce_enabled) {
+ qla2x00_disable_fce_trace(vha, NULL, NULL);
+ ha->flags.fce_enabled = 0;
+ }
+
+ qla2x00_free_fce_trace(ha);
+ /* no need to re-adjust fw dump buffer */
+
+ ql_dbg(ql_dbg_user, vha, 0xd04f, "User disabled FCE .\n");
+ }
+
+ mutex_unlock(&ha->fce_mutex);
+out_free:
+ kfree(buf);
+ return rc;
+}
+
static const struct file_operations dfs_fce_ops = {
.open = qla2x00_dfs_fce_open,
.read = seq_read,
.llseek = seq_lseek,
.release = qla2x00_dfs_fce_release,
+ .write = qla2x00_dfs_fce_write,
};
static int
@@ -626,8 +708,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
goto out;
- if (!ha->fce)
- goto out;
if (qla2x00_dfs_root)
goto create_dir;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index cededfda9d0e..e556f57c91af 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -11,6 +11,9 @@
/*
* Global Function Prototypes in qla_init.c source file.
*/
+int qla2x00_alloc_fce_trace(scsi_qla_host_t *);
+void qla2x00_free_fce_trace(struct qla_hw_data *ha);
+void qla_enable_fce_trace(scsi_qla_host_t *);
extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 31fc6a0eca3e..79cdfec2bca3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2681,7 +2681,7 @@ exit:
return rval;
}
-static void qla_enable_fce_trace(scsi_qla_host_t *vha)
+void qla_enable_fce_trace(scsi_qla_host_t *vha)
{
int rval;
struct qla_hw_data *ha = vha->hw;
@@ -3717,25 +3717,24 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
return rval;
}
-static void
-qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+int qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
{
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
if (!IS_FWI2_CAPABLE(ha))
- return;
+ return -EINVAL;
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- return;
+ return -EINVAL;
if (ha->fce) {
ql_dbg(ql_dbg_init, vha, 0x00bd,
"%s: FCE Mem is already allocated.\n",
__func__);
- return;
+ return -EIO;
}
/* Allocate memory for Fibre Channel Event Buffer. */
@@ -3745,7 +3744,7 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
ql_log(ql_log_warn, vha, 0x00be,
"Unable to allocate (%d KB) for FCE.\n",
FCE_SIZE / 1024);
- return;
+ return -ENOMEM;
}
ql_dbg(ql_dbg_init, vha, 0x00c0,
@@ -3754,6 +3753,16 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
ha->fce_dma = tc_dma;
ha->fce = tc;
ha->fce_bufs = FCE_NUM_BUFFERS;
+ return 0;
+}
+
+void qla2x00_free_fce_trace(struct qla_hw_data *ha)
+{
+ if (!ha->fce)
+ return;
+ dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma);
+ ha->fce = NULL;
+ ha->fce_dma = 0;
}
static void
@@ -3844,9 +3853,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
- qla2x00_alloc_fce_trace(vha);
- if (ha->fce)
+ if (ha->fce) {
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+ ha->flags.fce_dump_buf_alloced = 1;
+ }
qla2x00_alloc_eft_trace(vha);
if (ha->eft)
eft_size = EFT_SIZE;
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4f63aff333db..3a2bd953a976 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -282,8 +282,8 @@ struct register_host_info {
#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
struct config_info_data {
- uint8_t model_num[16];
- uint8_t model_description[80];
+ uint8_t model_num[16] __nonstring;
+ uint8_t model_description[80] __nonstring;
uint8_t reserved0[160];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 8f4cc136a9c9..8ee2e337c9e1 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -8,7 +8,6 @@
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@@ -841,7 +840,7 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
{
struct scsi_qla_host *vha = lport->private;
- blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
+ blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7ab717ed7232..6b9b8218b512 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,7 +13,6 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
-#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
#include <linux/crash_dump.h>
#include <linux/trace_events.h>
@@ -1934,7 +1933,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
}
static int
-qla2xxx_slave_alloc(struct scsi_device *sdev)
+qla2xxx_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -1947,7 +1946,7 @@ qla2xxx_slave_alloc(struct scsi_device *sdev)
}
static int
-qla2xxx_slave_configure(struct scsi_device *sdev)
+qla2xxx_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct req_que *req = vha->req;
@@ -1957,7 +1956,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
}
static void
-qla2xxx_slave_destroy(struct scsi_device *sdev)
+qla2xxx_sdev_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
}
@@ -8071,7 +8070,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost)
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
+ blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev,
+ vha->irq_offset);
}
struct scsi_host_template qla2xxx_driver_template = {
@@ -8087,10 +8087,10 @@ struct scsi_host_template qla2xxx_driver_template = {
.eh_bus_reset_handler = qla2xxx_eh_bus_reset,
.eh_host_reset_handler = qla2xxx_eh_host_reset,
- .slave_configure = qla2xxx_slave_configure,
+ .sdev_configure = qla2xxx_sdev_configure,
- .slave_alloc = qla2xxx_slave_alloc,
- .slave_destroy = qla2xxx_slave_destroy,
+ .sdev_init = qla2xxx_sdev_init,
+ .sdev_destroy = qla2xxx_sdev_destroy,
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = scsi_change_queue_depth,
@@ -8116,7 +8116,7 @@ static const struct pci_error_handlers qla2xxx_err_handler = {
.reset_done = qla_pci_reset_done,
};
-static struct pci_device_id qla2xxx_pci_tbl[] = {
+static const struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index abfa6ef60480..e3f85d6ea0db 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -10,7 +10,7 @@
static ssize_t
qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
@@ -28,7 +28,7 @@ qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
static ssize_t
qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
@@ -104,19 +104,19 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_fw_dump_attr = {
+static const struct bin_attribute sysfs_fw_dump_attr = {
.attr = {
.name = "fw_dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla4_8xxx_sysfs_read_fw_dump,
- .write = qla4_8xxx_sysfs_write_fw_dump,
+ .read_new = qla4_8xxx_sysfs_read_fw_dump,
+ .write_new = qla4_8xxx_sysfs_write_fw_dump,
};
static struct sysfs_entry {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr },
{ NULL },
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index d91f54a6e752..6b0e6b4cd8af 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -160,7 +160,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
-static int qla4xxx_slave_alloc(struct scsi_device *device);
+static int qla4xxx_sdev_init(struct scsi_device *device);
static umode_t qla4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
@@ -234,7 +234,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.eh_host_reset_handler = qla4xxx_eh_host_reset,
.eh_timed_out = qla4xxx_eh_cmd_timed_out,
- .slave_alloc = qla4xxx_slave_alloc,
+ .sdev_init = qla4xxx_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
@@ -7189,7 +7189,8 @@ exit_new_nt_list:
* 1: if flashnode entry is non-persistent
* 0: if flashnode entry is persistent
**/
-static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
+static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev,
+ const void *data)
{
struct iscsi_bus_flash_session *fnode_sess;
@@ -9052,7 +9053,7 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
}
}
-static int qla4xxx_slave_alloc(struct scsi_device *sdev)
+static int qla4xxx_sdev_init(struct scsi_device *sdev)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_session *sess;
@@ -9846,7 +9847,7 @@ static const struct pci_error_handlers qla4xxx_err_handler = {
.resume = qla4xxx_pci_resume,
};
-static struct pci_device_id qla4xxx_pci_tbl[] = {
+static const struct pci_device_id qla4xxx_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP4010,
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 74866b9f2b14..c9984ef57f26 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -975,7 +975,8 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
}
-static int qlogicpti_slave_configure(struct scsi_device *sdev)
+static int qlogicpti_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct qlogicpti *qpti = shost_priv(sdev->host);
int tgt = sdev->id;
@@ -1292,7 +1293,7 @@ static const struct scsi_host_template qpti_template = {
.name = "qlogicpti",
.info = qlogicpti_info,
.queuecommand = qlogicpti_queuecommand,
- .slave_configure = qlogicpti_slave_configure,
+ .sdev_configure = qlogicpti_sdev_configure,
.eh_abort_handler = qlogicpti_abort,
.eh_host_reset_handler = qlogicpti_reset,
.can_queue = QLOGICPTI_REQ_QUEUE_LEN,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 680ba180a672..fe5c30bb2639 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5879,23 +5879,24 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
return open_devip;
}
-static int scsi_debug_slave_alloc(struct scsi_device *sdp)
+static int scsi_debug_sdev_init(struct scsi_device *sdp)
{
if (sdebug_verbose)
- pr_info("slave_alloc <%u %u %u %llu>\n",
+ pr_info("sdev_init <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
return 0;
}
-static int scsi_debug_slave_configure(struct scsi_device *sdp)
+static int scsi_debug_sdev_configure(struct scsi_device *sdp,
+ struct queue_limits *lim)
{
struct sdebug_dev_info *devip =
(struct sdebug_dev_info *)sdp->hostdata;
struct dentry *dentry;
if (sdebug_verbose)
- pr_info("slave_configure <%u %u %u %llu>\n",
+ pr_info("sdev_configure <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
@@ -5927,14 +5928,14 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
return 0;
}
-static void scsi_debug_slave_destroy(struct scsi_device *sdp)
+static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
{
struct sdebug_dev_info *devip =
(struct sdebug_dev_info *)sdp->hostdata;
struct sdebug_err_inject *err;
if (sdebug_verbose)
- pr_info("slave_destroy <%u %u %u %llu>\n",
+ pr_info("sdev_destroy <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (!devip)
@@ -6383,8 +6384,8 @@ static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
sd_dp = &sqcp->sd_dp;
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+ hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
sqcp->scmd = scmd;
@@ -8706,15 +8707,15 @@ static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
return 0;
}
-static struct scsi_host_template sdebug_driver_template = {
+static const struct scsi_host_template sdebug_driver_template = {
.show_info = scsi_debug_show_info,
.write_info = scsi_debug_write_info,
.proc_name = sdebug_proc_name,
.name = "SCSI DEBUG",
.info = scsi_debug_info,
- .slave_alloc = scsi_debug_slave_alloc,
- .slave_configure = scsi_debug_slave_configure,
- .slave_destroy = scsi_debug_slave_destroy,
+ .sdev_init = scsi_debug_sdev_init,
+ .sdev_configure = scsi_debug_sdev_configure,
+ .sdev_destroy = scsi_debug_sdev_destroy,
.ioctl = scsi_debug_ioctl,
.queuecommand = scsi_debug_queuecommand,
.change_queue_depth = sdebug_change_qdepth,
@@ -8732,6 +8733,7 @@ static struct scsi_host_template sdebug_driver_template = {
.max_sectors = -1U,
.max_segment_size = -1U,
.module = THIS_MODULE,
+ .skip_settle_delay = 1,
.track_queue_depth = 1,
.cmd_size = sizeof(struct sdebug_scsi_cmd),
.init_cmd_priv = sdebug_init_cmd_priv,
@@ -8748,17 +8750,17 @@ static int sdebug_driver_probe(struct device *dev)
sdbg_host = dev_to_sdebug_host(dev);
- sdebug_driver_template.can_queue = sdebug_max_queue;
- sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
- if (!sdebug_clustering)
- sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
-
hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
if (NULL == hpnt) {
pr_err("scsi_host_alloc failed\n");
error = -ENODEV;
return error;
}
+ hpnt->can_queue = sdebug_max_queue;
+ hpnt->cmd_per_lun = sdebug_max_queue;
+ if (!sdebug_clustering)
+ hpnt->dma_boundary = PAGE_SIZE - 1;
+
if (submit_queues > nr_cpu_ids) {
pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
my_name, submit_queues, nr_cpu_ids);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 10154d78e336..815e7d63f3e2 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2363,14 +2363,14 @@ int scsi_error_handler(void *data)
return 0;
}
-/*
- * Function: scsi_report_bus_reset()
+/**
+ * scsi_report_bus_reset() - report bus reset observed
*
- * Purpose: Utility function used by low-level drivers to report that
- * they have observed a bus reset on the bus being handled.
+ * Utility function used by low-level drivers to report that
+ * they have observed a bus reset on the bus being handled.
*
- * Arguments: shost - Host in question
- * channel - channel on which reset was observed.
+ * @shost: Host in question
+ * @channel: channel on which reset was observed.
*
* Returns: Nothing
*
@@ -2395,15 +2395,15 @@ void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
}
EXPORT_SYMBOL(scsi_report_bus_reset);
-/*
- * Function: scsi_report_device_reset()
+/**
+ * scsi_report_device_reset() - report device reset observed
*
- * Purpose: Utility function used by low-level drivers to report that
- * they have observed a device reset on the device being handled.
+ * Utility function used by low-level drivers to report that
+ * they have observed a device reset on the device being handled.
*
- * Arguments: shost - Host in question
- * channel - channel on which reset was observed
- * target - target on which reset was observed
+ * @shost: Host in question
+ * @channel: channel on which reset was observed
+ * @target: target on which reset was observed
*
* Returns: Nothing
*
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 6f6c5973c3ea..2fa45556e1ea 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -37,8 +37,10 @@
* @host: host to identify
* @buffer: userspace buffer for identification
*
- * Return an identifying string at @buffer, if @buffer is non-NULL, filling
- * to the length stored at * (int *) @buffer.
+ * Return:
+ * * if successful, %1 and an identifying string at @buffer, if @buffer
+ * is non-NULL, filling to the length stored at * (int *) @buffer.
+ * * <0 error code on failure.
*/
static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
{
@@ -121,6 +123,16 @@ out:
return result;
}
+/**
+ * scsi_set_medium_removal() - send command to allow or prevent medium removal
+ * @sdev: target scsi device
+ * @state: removal state to set (prevent or allow)
+ *
+ * Returns:
+ * * %0 if @sdev is not removable or not lockable or successful.
+ * * non-%0 is a SCSI result code if > 0 or kernel error code if < 0.
+ * * Sets @sdev->locked to the new state on success.
+ */
int scsi_set_medium_removal(struct scsi_device *sdev, char state)
{
char scsi_cmd[MAX_COMMAND_SIZE];
@@ -242,11 +254,15 @@ static int scsi_send_start_stop(struct scsi_device *sdev, int data)
NORMAL_RETRIES);
}
-/*
- * Check if the given command is allowed.
+/**
+ * scsi_cmd_allowed() - Check if the given command is allowed.
+ * @cmd: SCSI command to check
+ * @open_for_write: is the file / block device opened for writing?
*
* Only a subset of commands are allowed for unprivileged users. Commands used
* to format the media, update the firmware, etc. are not permitted.
+ *
+ * Return: %true if the cmd is allowed, otherwise @false.
*/
bool scsi_cmd_allowed(unsigned char *cmd, bool open_for_write)
{
@@ -859,6 +875,8 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, bool open_for_write,
* Description: The scsi_ioctl() function differs from most ioctls in that it
* does not take a major/minor number as the dev field. Rather, it takes
* a pointer to a &struct scsi_device.
+ *
+ * Return: varies depending on the @cmd
*/
int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd,
void __user *arg)
@@ -941,8 +959,15 @@ int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd,
}
EXPORT_SYMBOL(scsi_ioctl);
-/*
+/**
+ * scsi_ioctl_block_when_processing_errors - prevent commands from being queued
+ * @sdev: target scsi device
+ * @cmd: which ioctl is it
+ * @ndelay: no delay (non-blocking)
+ *
* We can process a reset even when a device isn't fully operable.
+ *
+ * Return: %0 on success, <0 error code.
*/
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, int cmd,
bool ndelay)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index adee6f60c966..f1cfe0bb89b2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -184,6 +184,10 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, true);
}
+/**
+ * scsi_failures_reset_retries - reset all failures to zero
+ * @failures: &struct scsi_failures with specific failure modes set
+ */
void scsi_failures_reset_retries(struct scsi_failures *failures)
{
struct scsi_failure *failure;
@@ -210,6 +214,9 @@ static int scsi_check_passthrough(struct scsi_cmnd *scmd,
struct scsi_sense_hdr sshdr;
enum sam_status status;
+ if (!scmd->result)
+ return 0;
+
if (!failures)
return 0;
@@ -865,13 +872,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x1a: /* start stop unit in progress */
case 0x1b: /* sanitize in progress */
case 0x1d: /* configuration in progress */
- case 0x24: /* depopulation in progress */
- case 0x25: /* depopulation restore in progress */
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
action = ACTION_DELAYED_REPREP;
break;
+ /*
+ * Depopulation might take many hours,
+ * thus it is not worthwhile to retry.
+ */
+ case 0x24: /* depopulation in progress */
+ case 0x25: /* depopulation restore in progress */
+ fallthrough;
default:
action = ACTION_FAIL;
break;
@@ -1214,6 +1226,15 @@ static void scsi_initialize_rq(struct request *rq)
cmd->retries = 0;
}
+/**
+ * scsi_alloc_request - allocate a block request and partially
+ * initialize its &scsi_cmnd
+ * @q: the device's request queue
+ * @opf: the request operation code
+ * @flags: block layer allocation flags
+ *
+ * Return: &struct request pointer on success or %NULL on failure
+ */
struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags)
{
@@ -1648,13 +1669,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
if (in_flight)
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /*
- * Only clear the driver-private command data if the LLD does not supply
- * a function to initialize that data.
- */
- if (!shost->hostt->init_cmd_priv)
- memset(cmd + 1, 0, shost->hostt->cmd_size);
-
cmd->prot_op = SCSI_PROT_NORMAL;
if (blk_rq_bytes(req))
cmd->sc_data_direction = rq_dma_dir(req);
@@ -1821,6 +1835,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
+ /*
+ * Only clear the driver-private command data if the LLD does not supply
+ * a function to initialize that data.
+ */
+ if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
+ memset(cmd + 1, 0, shost->hostt->cmd_size);
+
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = scsi_prepare_cmd(req);
if (ret != BLK_STS_OK)
@@ -2065,9 +2086,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->queue_depth = shost->can_queue;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev);
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
- tag_set->flags |=
- BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
+ if (shost->hostt->tag_alloc_policy_rr)
+ tag_set->flags |= BLK_MQ_F_TAG_RR;
if (shost->queuecommand_may_block)
tag_set->flags |= BLK_MQ_F_BLOCKING;
tag_set->driver_data = shost;
@@ -2721,6 +2741,7 @@ int
scsi_device_quiesce(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
+ unsigned int memflags;
int err;
/*
@@ -2735,7 +2756,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
blk_set_pm_only(q);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
/*
* Ensure that the effect of blk_set_pm_only() will be visible
* for percpu_ref_tryget() callers that occur after the queue
@@ -2743,7 +2764,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
* was called. See also https://lwn.net/Articles/573497/.
*/
synchronize_rcu();
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
mutex_lock(&sdev->state_mutex);
err = scsi_device_set_state(sdev, SDEV_QUIESCE);
@@ -3365,14 +3386,16 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
}
EXPORT_SYMBOL(scsi_vpd_lun_id);
-/*
+/**
* scsi_vpd_tpg_id - return a target port group identifier
* @sdev: SCSI device
+ * @rel_id: pointer to return relative target port in if not %NULL
*
* Returns the Target Port Group identifier from the information
- * froom VPD page 0x83 of the device.
+ * from VPD page 0x83 of the device.
+ * Optionally sets @rel_id to the relative target port on success.
*
- * Returns the identifier or error on failure.
+ * Return: the identifier or error on failure.
*/
int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
{
diff --git a/drivers/scsi/scsi_lib_test.c b/drivers/scsi/scsi_lib_test.c
index 99834426a100..ae8af0e0047a 100644
--- a/drivers/scsi/scsi_lib_test.c
+++ b/drivers/scsi/scsi_lib_test.c
@@ -67,6 +67,13 @@ static void scsi_lib_test_multiple_sense(struct kunit *test)
};
int i;
+ /* Success */
+ sc.result = 0;
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
+ /* Command failed but caller did not pass in a failures array */
+ scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
/* Match end of array */
scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 042329b74c6e..96d7e1a9a7c7 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -220,6 +220,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
int new_shift = sbitmap_calculate_shift(depth);
bool need_alloc = !sdev->budget_map.map;
bool need_free = false;
+ unsigned int memflags;
int ret;
struct sbitmap sb_backup;
@@ -227,7 +228,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
/*
* realloc if new shift is calculated, which is caused by setting
- * up one new default queue depth after calling ->device_configure
+ * up one new default queue depth after calling ->sdev_configure
*/
if (!need_alloc && new_shift != sdev->budget_map.shift)
need_alloc = need_free = true;
@@ -240,12 +241,12 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
* and here disk isn't added yet, so freezing is pretty fast
*/
if (need_free) {
- blk_mq_freeze_queue(sdev->request_queue);
+ memflags = blk_mq_freeze_queue(sdev->request_queue);
sb_backup = sdev->budget_map;
}
ret = sbitmap_init_node(&sdev->budget_map,
scsi_device_max_queue_depth(sdev),
- new_shift, GFP_KERNEL,
+ new_shift, GFP_NOIO,
sdev->request_queue->node, false, true);
if (!ret)
sbitmap_resize(&sdev->budget_map, depth);
@@ -256,7 +257,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
else
sbitmap_free(&sb_backup);
ret = 0;
- blk_mq_unfreeze_queue(sdev->request_queue);
+ blk_mq_unfreeze_queue(sdev->request_queue, memflags);
}
return ret;
}
@@ -265,7 +266,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
* @lun: which lun
- * @hostdata: usually NULL and set by ->slave_alloc instead
+ * @hostdata: usually NULL and set by ->sdev_init instead
*
* Description:
* Allocate, initialize for io, and return a pointer to a scsi_Device.
@@ -312,11 +313,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sdev_gendev.parent = get_device(&starget->dev);
sdev->sdev_target = starget;
- /* usually NULL and set by ->slave_alloc instead */
+ /* usually NULL and set by ->sdev_init instead */
sdev->hostdata = hostdata;
/* if the device needs this changing, it may do so in the
- * slave_configure function */
+ * sdev_configure function */
sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
/*
@@ -363,8 +364,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
scsi_sysfs_device_initialize(sdev);
- if (shost->hostt->slave_alloc) {
- ret = shost->hostt->slave_alloc(sdev);
+ if (shost->hostt->sdev_init) {
+ ret = shost->hostt->sdev_init(sdev);
if (ret) {
/*
* if LLDD reports slave not present, don't clutter
@@ -1074,10 +1075,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
else if (*bflags & BLIST_MAX_1024)
lim.max_hw_sectors = 1024;
- if (hostt->device_configure)
- ret = hostt->device_configure(sdev, &lim);
- else if (hostt->slave_configure)
- ret = hostt->slave_configure(sdev);
+ if (hostt->sdev_configure)
+ ret = hostt->sdev_configure(sdev, &lim);
if (ret) {
queue_limits_cancel_update(sdev->request_queue);
/*
@@ -1097,12 +1096,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
/*
- * The queue_depth is often changed in ->device_configure.
+ * The queue_depth is often changed in ->sdev_configure.
*
* Set up budget map again since memory consumption of the map depends
* on actual queue depth.
*/
- if (hostt->device_configure || hostt->slave_configure)
+ if (hostt->sdev_configure)
scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
if (sdev->scsi_level >= SCSI_3)
@@ -1636,6 +1635,24 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
}
EXPORT_SYMBOL(__scsi_add_device);
+/**
+ * scsi_add_device - creates a new SCSI (LU) instance
+ * @host: the &Scsi_Host instance where the device is located
+ * @channel: target channel number (rarely other than %0)
+ * @target: target id number
+ * @lun: LUN of target device
+ *
+ * Probe for a specific LUN and add it if found.
+ *
+ * Notes: This call is usually performed internally during a SCSI
+ * bus scan when an HBA is added (i.e. scsi_scan_host()). So it
+ * should only be called if the HBA becomes aware of a new SCSI
+ * device (LU) after scsi_scan_host() has completed. If successful
+ * this call can lead to sdev_init() and sdev_configure() callbacks
+ * into the LLD.
+ *
+ * Return: %0 on success or negative error code on failure
+ */
int scsi_add_device(struct Scsi_Host *host, uint channel,
uint target, u64 lun)
{
@@ -2027,6 +2044,8 @@ static void do_scan_async(void *_data, async_cookie_t c)
/**
* scsi_scan_host - scan the given adapter
* @shost: adapter to scan
+ *
+ * Notes: Should be called after scsi_add_host()
**/
void scsi_scan_host(struct Scsi_Host *shost)
{
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 093774d77534..be4aef0f4f99 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -12,7 +12,7 @@
#include "scsi_priv.h"
-static struct ctl_table scsi_table[] = {
+static const struct ctl_table scsi_table[] = {
{ .procname = "logging_level",
.data = &scsi_logging_level,
.maxlen = sizeof(scsi_logging_level),
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index f3a1ecb42128..d772258e29ad 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -898,7 +898,7 @@ static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
#define sdev_vpd_pg_attr(_page) \
static ssize_t \
show_vpd_##_page(struct file *filp, struct kobject *kobj, \
- struct bin_attribute *bin_attr, \
+ const struct bin_attribute *bin_attr, \
char *buf, loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
@@ -914,10 +914,10 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj, \
rcu_read_unlock(); \
return ret; \
} \
-static struct bin_attribute dev_attr_vpd_##_page = { \
+static const struct bin_attribute dev_attr_vpd_##_page = { \
.attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
.size = 0, \
- .read = show_vpd_##_page, \
+ .read_new = show_vpd_##_page, \
};
sdev_vpd_pg_attr(pg83);
@@ -930,7 +930,7 @@ sdev_vpd_pg_attr(pgb7);
sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -943,13 +943,13 @@ static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
sdev->inquiry_len);
}
-static struct bin_attribute dev_attr_inquiry = {
+static const struct bin_attribute dev_attr_inquiry = {
.attr = {
.name = "inquiry",
.mode = S_IRUGO,
},
.size = 0,
- .read = show_inquiry,
+ .read_new = show_inquiry,
};
static ssize_t
@@ -1348,7 +1348,7 @@ static struct attribute *scsi_sdev_attrs[] = {
NULL
};
-static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+static const struct bin_attribute *const scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pg0,
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
@@ -1362,7 +1362,7 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
};
static struct attribute_group scsi_sdev_attr_group = {
.attrs = scsi_sdev_attrs,
- .bin_attrs = scsi_sdev_bin_attrs,
+ .bin_attrs_new = scsi_sdev_bin_attrs,
.is_visible = scsi_sdev_attr_is_visible,
.is_bin_visible = scsi_sdev_bin_attr_is_visible,
};
@@ -1513,8 +1513,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
- if (sdev->host->hostt->slave_destroy)
- sdev->host->hostt->slave_destroy(sdev);
+ if (sdev->host->hostt->sdev_destroy)
+ sdev->host->hostt->sdev_destroy(sdev);
transport_destroy_device(dev);
/*
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fde7de3b1e55..9c347c64c315 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1324,7 +1324,7 @@ EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
* 1 on success
* 0 on failure
*/
-static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
+static int iscsi_is_flashnode_conn_dev(struct device *dev, const void *data)
{
return dev->bus == &iscsi_flashnode_bus;
}
@@ -1335,7 +1335,7 @@ static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
return 0;
}
-static int flashnode_match_index(struct device *dev, void *data)
+static int flashnode_match_index(struct device *dev, const void *data)
{
struct iscsi_bus_flash_session *fnode_sess = NULL;
int ret = 0;
@@ -1344,7 +1344,7 @@ static int flashnode_match_index(struct device *dev, void *data)
goto exit_match_index;
fnode_sess = iscsi_dev_to_flash_session(dev);
- ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0;
+ ret = (fnode_sess->target_id == *((const int *)data)) ? 1 : 0;
exit_match_index:
return ret;
@@ -1389,8 +1389,8 @@ iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
* %NULL on failure
*/
struct device *
-iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
- int (*fn)(struct device *dev, void *data))
+iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data,
+ device_match_t fn)
{
return device_find_child(&shost->shost_gendev, data, fn);
}
@@ -2122,33 +2122,6 @@ destroy_wq:
}
EXPORT_SYMBOL_GPL(iscsi_add_session);
-/**
- * iscsi_create_session - create iscsi class session
- * @shost: scsi host
- * @transport: iscsi transport
- * @dd_size: private driver data size
- * @target_id: which target
- *
- * This can be called from a LLD or iscsi_transport.
- */
-struct iscsi_cls_session *
-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
- int dd_size, unsigned int target_id)
-{
- struct iscsi_cls_session *session;
-
- session = iscsi_alloc_session(shost, transport, dd_size);
- if (!session)
- return NULL;
-
- if (iscsi_add_session(session, target_id)) {
- iscsi_free_session(session);
- return NULL;
- }
- return session;
-}
-EXPORT_SYMBOL_GPL(iscsi_create_session);
-
static void iscsi_conn_release(struct device *dev)
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
@@ -4104,7 +4077,7 @@ iscsi_if_rx(struct sk_buff *skb)
}
do {
/*
- * special case for GET_STATS:
+ * special case for GET_STATS, GET_CHAP and GET_HOST_STATS:
* on success - sending reply and stats from
* inside of if_recv_msg(),
* on error - fall through.
@@ -4113,6 +4086,8 @@ iscsi_if_rx(struct sk_buff *skb)
break;
if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
break;
+ if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err)
+ break;
err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
ev, sizeof(*ev));
if (err == -EAGAIN && --retries < 0) {
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 4e33f1661e4c..351b028ef893 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -888,7 +888,8 @@ static void sas_port_delete_link(struct sas_port *port,
sysfs_remove_link(&phy->dev.kobj, "port");
}
-/** sas_port_alloc - allocate and initialize a SAS port structure
+/**
+ * sas_port_alloc - allocate and initialize a SAS port structure
*
* @parent: parent device
* @port_id: port number
@@ -897,7 +898,7 @@ static void sas_port_delete_link(struct sas_port *port,
* below the device specified by @parent which must be either a Scsi_Host
* or a sas_expander_device.
*
- * Returns %NULL on error
+ * Returns: %NULL on error
*/
struct sas_port *sas_port_alloc(struct device *parent, int port_id)
{
@@ -932,7 +933,8 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
}
EXPORT_SYMBOL(sas_port_alloc);
-/** sas_port_alloc_num - allocate and initialize a SAS port structure
+/**
+ * sas_port_alloc_num - allocate and initialize a SAS port structure
*
* @parent: parent device
*
@@ -942,7 +944,7 @@ EXPORT_SYMBOL(sas_port_alloc);
* the device tree below the device specified by @parent which must be
* either a Scsi_Host or a sas_expander_device.
*
- * Returns %NULL on error
+ * Returns: %NULL on error
*/
struct sas_port *sas_port_alloc_num(struct device *parent)
{
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 64852e6df3e3..fe47850a8258 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -985,7 +985,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
}
-/** spi_dv_device - Do Domain Validation on the device
+/**
+ * spi_dv_device - Do Domain Validation on the device
* @sdev: scsi device to validate
*
* Performs the domain validation on the given device in the
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8947dab132d7..950d8c9fb884 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -177,9 +177,8 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
lim = queue_limits_start_update(sdkp->disk->queue);
sd_set_flush_flag(sdkp, &lim);
- blk_mq_freeze_queue(sdkp->disk->queue);
- ret = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ ret = queue_limits_commit_update_frozen(sdkp->disk->queue,
+ &lim);
if (ret)
return ret;
return count;
@@ -483,9 +482,7 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_discard(sdkp, &lim, mode);
- blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
if (err)
return err;
return count;
@@ -594,9 +591,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_write_same(sdkp, &lim);
- blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
if (err)
return err;
return count;
@@ -814,14 +809,14 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
- if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ if (bio_integrity_flagged(bio, BIP_CHECK_GUARD))
scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
}
if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
- if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ if (bio_integrity_flagged(bio, BIP_CHECK_REFTAG))
scmd->prot_flags |= SCSI_PROT_REF_CHECK;
}
@@ -996,6 +991,7 @@ static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim)
lim->atomic_write_hw_boundary = 0;
lim->atomic_write_hw_unit_min = unit_min * logical_block_size;
lim->atomic_write_hw_unit_max = unit_max * logical_block_size;
+ lim->features |= BLK_FEAT_ATOMIC_WRITES;
}
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
@@ -3803,9 +3799,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_config_write_same(sdkp, &lim);
kfree(buffer);
- blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
if (err)
return err;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 94127868bedf..effb7e768165 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1639,7 +1639,7 @@ MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
-static struct ctl_table sg_sysctls[] = {
+static const struct ctl_table sg_sysctls[] = {
{
.procname = "sg-big-buff",
.data = &sg_big_buff,
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 870f37b70546..0da7be40c925 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -19,7 +19,6 @@
#include <linux/bcd.h>
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -6490,7 +6489,7 @@ out:
return SUCCESS;
}
-static int pqi_slave_alloc(struct scsi_device *sdev)
+static int pqi_sdev_init(struct scsi_device *sdev)
{
struct pqi_scsi_dev *device;
unsigned long flags;
@@ -6547,10 +6546,10 @@ static void pqi_map_queues(struct Scsi_Host *shost)
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
if (!ctrl_info->disable_managed_interrupts)
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ctrl_info->pci_dev, 0);
+ blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ &ctrl_info->pci_dev->dev, 0);
else
- return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
@@ -6558,7 +6557,8 @@ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
}
-static int pqi_slave_configure(struct scsi_device *sdev)
+static int pqi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
int rc = 0;
struct pqi_scsi_dev *device;
@@ -6574,7 +6574,7 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return rc;
}
-static void pqi_slave_destroy(struct scsi_device *sdev)
+static void pqi_sdev_destroy(struct scsi_device *sdev)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
@@ -7549,9 +7549,9 @@ static const struct scsi_host_template pqi_driver_template = {
.eh_device_reset_handler = pqi_eh_device_reset_handler,
.eh_abort_handler = pqi_eh_abort_handler,
.ioctl = pqi_ioctl,
- .slave_alloc = pqi_slave_alloc,
- .slave_configure = pqi_slave_configure,
- .slave_destroy = pqi_slave_destroy,
+ .sdev_init = pqi_sdev_init,
+ .sdev_configure = pqi_sdev_configure,
+ .sdev_destroy = pqi_sdev_destroy,
.map_queues = pqi_map_queues,
.sdev_groups = pqi_sdev_groups,
.shost_groups = pqi_shost_groups,
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 9be3f0193145..1c24517e4e65 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -21,7 +21,7 @@
#define PCI_DEVICE_ID_CISCO_SNIC 0x0046
/* Supported devices by snic module */
-static struct pci_device_id snic_id_table[] = {
+static const struct pci_device_id snic_id_table[] = {
{PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
{ 0, } /* end of table */
};
@@ -42,11 +42,11 @@ module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
/*
- * snic_slave_alloc : callback function to SCSI Mid Layer, called on
+ * snic_sdev_init : callback function to SCSI Mid Layer, called on
* scsi device initialization.
*/
static int
-snic_slave_alloc(struct scsi_device *sdev)
+snic_sdev_init(struct scsi_device *sdev)
{
struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
@@ -57,11 +57,11 @@ snic_slave_alloc(struct scsi_device *sdev)
}
/*
- * snic_slave_configure : callback function to SCSI Mid Layer, called on
+ * snic_sdev_configure : callback function to SCSI Mid Layer, called on
* scsi device initialization.
*/
static int
-snic_slave_configure(struct scsi_device *sdev)
+snic_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct snic *snic = shost_priv(sdev->host);
u32 qdepth = 0, max_ios = 0;
@@ -107,8 +107,8 @@ static const struct scsi_host_template snic_host_template = {
.eh_abort_handler = snic_abort_cmd,
.eh_device_reset_handler = snic_device_reset,
.eh_host_reset_handler = snic_host_reset,
- .slave_alloc = snic_slave_alloc,
- .slave_configure = snic_slave_configure,
+ .sdev_init = snic_sdev_init,
+ .sdev_configure = snic_sdev_configure,
.change_queue_depth = snic_change_queue_depth,
.this_id = -1,
.cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 198bec87bb8e..b17796d5ee66 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -797,10 +797,7 @@ static int get_sectorsize(struct scsi_cd *cd)
lim = queue_limits_start_update(q);
lim.logical_block_size = sector_size;
- blk_mq_freeze_queue(q);
- err = queue_limits_commit_update(q, &lim);
- blk_mq_unfreeze_queue(q);
- return err;
+ return queue_limits_commit_update_frozen(q, &lim);
}
static int get_capabilities(struct scsi_cd *cd)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e8ef27d7ef61..ebbd50ec0cda 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1030,6 +1030,11 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
break;
}
+ if (STp->first_tur) {
+ /* Don't set pos_unknown right after device recognition */
+ STp->pos_unknown = 0;
+ STp->first_tur = 0;
+ }
if (SRpnt != NULL)
st_release_request(SRpnt);
@@ -4328,6 +4333,7 @@ static int st_probe(struct device *dev)
blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
tpnt->long_timeout = ST_LONG_TIMEOUT;
tpnt->try_dio = try_direct_io;
+ tpnt->first_tur = 1;
for (i = 0; i < ST_NBR_MODES; i++) {
STm = &(tpnt->modes[i]);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 7a68eaba7e81..1aaaf5369a40 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -170,6 +170,7 @@ struct scsi_tape {
unsigned char rew_at_close; /* rewind necessary at close */
unsigned char inited;
unsigned char cleaning_req; /* cleaning requested? */
+ unsigned char first_tur; /* first TEST UNIT READY */
int block_size;
int min_block;
int max_block;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 0e81125df8c7..63ed7f9aaa93 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -584,7 +584,7 @@ static void return_abnormal_state(struct st_hba *hba, int status)
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static int
-stex_slave_config(struct scsi_device *sdev)
+stex_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
@@ -1481,14 +1481,14 @@ static const struct scsi_host_template driver_template = {
.proc_name = DRV_NAME,
.bios_param = stex_biosparam,
.queuecommand = stex_queuecommand,
- .slave_configure = stex_slave_config,
+ .sdev_configure = stex_sdev_configure,
.eh_abort_handler = stex_abort,
.eh_host_reset_handler = stex_reset,
.this_id = -1,
.dma_boundary = PAGE_SIZE - 1,
};
-static struct pci_device_id stex_pci_tbl[] = {
+static const struct pci_device_id stex_pci_tbl[] = {
/* st_shasta */
{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index d0b55c1fa908..a8614e54544e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -171,6 +171,12 @@ do { \
dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \
} while (0)
+#define storvsc_log_ratelimited(dev, level, fmt, ...) \
+do { \
+ if (do_logging(level)) \
+ dev_warn_ratelimited(&(dev)->device, fmt, ##__VA_ARGS__); \
+} while (0)
+
struct vmscsi_request {
u16 length;
u8 srb_status;
@@ -917,14 +923,13 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
/*
* Allocate state to manage the sub-channels.
- * We allocate an array based on the numbers of possible CPUs
- * (Hyper-V does not support cpu online/offline).
- * This Array will be sparseley populated with unique
- * channels - primary + sub-channels.
- * We will however populate all the slots to evenly distribute
- * the load.
+ * We allocate an array based on the number of CPU ids. This array
+ * is initially sparsely populated for the CPUs assigned to channels:
+ * primary + sub-channels. As I/Os are initiated by different CPUs,
+ * the slots for all online CPUs are populated to evenly distribute
+ * the load across all channels.
*/
- stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *),
+ stor_device->stor_chns = kcalloc(nr_cpu_ids, sizeof(void *),
GFP_KERNEL);
if (stor_device->stor_chns == NULL)
return -ENOMEM;
@@ -1177,7 +1182,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
- storvsc_log(device, loglevel,
+ storvsc_log_ratelimited(device, loglevel,
"tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
scsi_cmd_to_rq(request->cmd)->tag,
stor_pkt->vm_srb.cdb[0],
@@ -1579,7 +1584,8 @@ static int storvsc_device_alloc(struct scsi_device *sdevice)
return 0;
}
-static int storvsc_device_configure(struct scsi_device *sdevice)
+static int storvsc_sdev_configure(struct scsi_device *sdevice,
+ struct queue_limits *lim)
{
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
@@ -1794,6 +1800,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
+ payload->range.len = 0;
payload_sz = 0;
if (scsi_sg_count(scmnd)) {
@@ -1880,8 +1887,8 @@ static struct scsi_host_template scsi_driver = {
.eh_host_reset_handler = storvsc_host_reset_handler,
.proc_name = "storvsc_host",
.eh_timed_out = storvsc_eh_timed_out,
- .slave_alloc = storvsc_device_alloc,
- .slave_configure = storvsc_device_configure,
+ .sdev_init = storvsc_device_alloc,
+ .sdev_configure = storvsc_sdev_configure,
.cmd_per_lun = 2048,
.this_id = -1,
/* Ensure there are no gaps in presented sgls */
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index a2560cc807d3..212d89d0d23e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -765,7 +765,7 @@ static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
}
}
-static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
+static int sym53c8xx_sdev_init(struct scsi_device *sdev)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
@@ -825,7 +825,8 @@ out:
/*
* Linux entry point for device queue sizing.
*/
-static int sym53c8xx_slave_configure(struct scsi_device *sdev)
+static int sym53c8xx_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
@@ -861,14 +862,14 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
+static void sym53c8xx_sdev_destroy(struct scsi_device *sdev)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
struct sym_lcb *lp = sym_lp(tp, sdev->lun);
unsigned long flags;
- /* if slave_alloc returned before allocating a sym_lcb, return */
+ /* if sdev_init returned before allocating a sym_lcb, return */
if (!lp)
return;
@@ -1684,9 +1685,9 @@ static const struct scsi_host_template sym2_template = {
.info = sym53c8xx_info,
.cmd_size = sizeof(struct sym_ucmd),
.queuecommand = sym53c8xx_queue_command,
- .slave_alloc = sym53c8xx_slave_alloc,
- .slave_configure = sym53c8xx_slave_configure,
- .slave_destroy = sym53c8xx_slave_destroy,
+ .sdev_init = sym53c8xx_sdev_init,
+ .sdev_configure = sym53c8xx_sdev_configure,
+ .sdev_destroy = sym53c8xx_sdev_destroy,
.eh_abort_handler = sym53c8xx_eh_abort_handler,
.eh_target_reset_handler = sym53c8xx_eh_target_reset_handler,
.eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
@@ -2030,7 +2031,7 @@ static struct spi_function_template sym2_transport_functions = {
.get_signalling = sym2_get_signalling,
};
-static struct pci_device_id sym2_id_table[] = {
+static const struct pci_device_id sym2_id_table[] = {
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8471f38b730e..21ce3e940192 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -29,7 +29,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_devinfo.h>
#include <linux/seqlock.h>
-#include <linux/blk-mq-virtio.h>
#include "sd.h"
@@ -746,7 +745,7 @@ static void virtscsi_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(map);
else
- blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
+ blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
}
}
@@ -801,7 +800,7 @@ static const struct scsi_host_template virtscsi_host_template = {
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
.eh_timed_out = virtscsi_eh_timed_out,
- .slave_alloc = virtscsi_device_alloc,
+ .sdev_init = virtscsi_device_alloc,
.dma_boundary = UINT_MAX,
.map_queues = virtscsi_map_queues,
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9ec55ddc1204..924025305753 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -735,7 +735,8 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
}
-static int scsifront_sdev_configure(struct scsi_device *sdev)
+static int scsifront_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
int err;
@@ -776,8 +777,8 @@ static const struct scsi_host_template scsifront_sht = {
.queuecommand = scsifront_queuecommand,
.eh_abort_handler = scsifront_eh_abort_handler,
.eh_device_reset_handler = scsifront_dev_reset_handler,
- .slave_configure = scsifront_sdev_configure,
- .slave_destroy = scsifront_sdev_destroy,
+ .sdev_configure = scsifront_sdev_configure,
+ .sdev_destroy = scsifront_sdev_destroy,
.cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
.can_queue = VSCSIIF_MAX_REQS,
.this_id = -1,
@@ -1074,8 +1075,8 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
continue;
/*
- * Front device state path, used in slave_configure called
- * on successfull scsi_add_device, and in slave_destroy called
+ * Front device state path, used in sdev_configure called
+ * on successfull scsi_add_device, and in sdev_destroy called
* on remove of a device.
*/
snprintf(info->dev_state_path, sizeof(info->dev_state_path),
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 65e5515f7555..005fa2ef100f 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -328,7 +328,8 @@ void slim_report_absent(struct slim_device *sbdev)
}
EXPORT_SYMBOL_GPL(slim_report_absent);
-static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b)
+static bool slim_eaddr_equal(const struct slim_eaddr *a,
+ const struct slim_eaddr *b)
{
return (a->manf_id == b->manf_id &&
a->prod_code == b->prod_code &&
@@ -336,9 +337,9 @@ static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b)
a->instance == b->instance);
}
-static int slim_match_dev(struct device *dev, void *data)
+static int slim_match_dev(struct device *dev, const void *data)
{
- struct slim_eaddr *e_addr = data;
+ const struct slim_eaddr *e_addr = data;
struct slim_device *sbdev = to_slim_device(dev);
return slim_eaddr_equal(&sbdev->e_addr, e_addr);
@@ -384,21 +385,13 @@ struct slim_device *slim_get_device(struct slim_controller *ctrl,
}
EXPORT_SYMBOL_GPL(slim_get_device);
-static int of_slim_match_dev(struct device *dev, void *data)
-{
- struct device_node *np = data;
- struct slim_device *sbdev = to_slim_device(dev);
-
- return (sbdev->dev.of_node == np);
-}
-
static struct slim_device *of_find_slim_device(struct slim_controller *ctrl,
struct device_node *np)
{
struct slim_device *sbdev;
struct device *dev;
- dev = device_find_child(ctrl->dev, np, of_slim_match_dev);
+ dev = device_find_child(ctrl->dev, np, device_match_of_node);
if (dev) {
sbdev = to_slim_device(dev);
return sbdev;
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 242570a5e565..6f01d944f9c6 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -13,8 +13,8 @@
*
* @ctrl: Controller handle
* @reply: Reply received from the device
- * @len: Length of the reply
* @tid: Transaction ID received with which framework can associate reply.
+ * @len: Length of the reply
*
* Called by controller to inform framework about the response received.
* This helps in making the API asynchronous, and controller-driver doesn't need
@@ -148,8 +148,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
}
ret = ctrl->xfer_msg(ctrl, txn);
-
- if (!ret && need_tid && !txn->msg->comp) {
+ if (ret == -ETIMEDOUT) {
+ slim_free_txn_tid(ctrl, txn);
+ } else if (!ret && need_tid && !txn->msg->comp) {
unsigned long ms = txn->rl + HZ;
time_left = wait_for_completion_timeout(txn->comp,
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 2a42b28931c9..298b542dd1c0 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -399,7 +399,7 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = {
static int __init atmel_soc_device_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
if (!of_match_node(at91_soc_allowed_list, np))
return 0;
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index b7e8e5ec884c..f4d3c2146f4f 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -108,14 +108,12 @@ static int on_all_cpus(int (*fn)(void))
.fn = fn,
.started = ATOMIC_INIT(0)
};
- struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
- "hotpotato%d", cpu);
+ struct task_struct *k = kthread_run_on_cpu(bstrap_fn, &bstrap,
+ cpu, "hotpotato%d");
int ret;
if (IS_ERR(k))
return -ENOMEM;
- kthread_bind(k, cpu);
- wake_up_process(k);
/*
* If we call kthread_stop() before the "wake up" has had an
* effect, then the thread may exit with -EINTR without ever
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index 8aa8dec14911..444a8f59b7da 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -1539,8 +1539,8 @@ static ssize_t used_types_show(struct kobject *kobj,
u16 i;
for (i = 0; i < hdev->used_type_num - 1; i++)
- len += sysfs_emit(&buf[len], "%s ", hdev->type_name_maps[i].name);
- len += sysfs_emit(&buf[len], "%s\n", hdev->type_name_maps[i].name);
+ len += sysfs_emit_at(buf, len, "%s ", hdev->type_name_maps[i].name);
+ len += sysfs_emit_at(buf, len, "%s\n", hdev->type_name_maps[i].name);
return len;
}
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 3ad321ca608a..ca6a5fa1618f 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -3,4 +3,4 @@ ifeq ($(CONFIG_ARM),y)
obj-$(CONFIG_ARCH_MXC) += soc-imx.o
endif
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
-obj-$(CONFIG_SOC_IMX9) += imx93-src.o
+obj-$(CONFIG_SOC_IMX9) += imx93-src.o soc-imx9.o
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 8ac7658e3d52..3ed8161d7d28 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -192,9 +192,20 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
devm_kasprintf((dev), GFP_KERNEL, "%d.%d", ((soc_rev) >> 4) & 0xf, (soc_rev) & 0xf) : \
"unknown"
+static void imx8m_unregister_soc(void *data)
+{
+ soc_device_unregister(data);
+}
+
+static void imx8m_unregister_cpufreq(void *data)
+{
+ platform_device_unregister(data);
+}
+
static int imx8m_soc_probe(struct platform_device *pdev)
{
struct soc_device_attribute *soc_dev_attr;
+ struct platform_device *cpufreq_dev;
const struct imx8_soc_data *data;
struct device *dev = &pdev->dev;
const struct of_device_id *id;
@@ -239,11 +250,22 @@ static int imx8m_soc_probe(struct platform_device *pdev)
if (IS_ERR(soc_dev))
return PTR_ERR(soc_dev);
+ ret = devm_add_action(dev, imx8m_unregister_soc, soc_dev);
+ if (ret)
+ return ret;
+
pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
soc_dev_attr->revision);
- if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
- platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) {
+ cpufreq_dev = platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ERR(cpufreq_dev))
+ return dev_err_probe(dev, PTR_ERR(cpufreq_dev),
+ "Failed to register imx-cpufreq-dev device\n");
+ ret = devm_add_action(dev, imx8m_unregister_cpufreq, cpufreq_dev);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/soc/imx/soc-imx9.c b/drivers/soc/imx/soc-imx9.c
new file mode 100644
index 000000000000..b46d22cf0212
--- /dev/null
+++ b/drivers/soc/imx/soc-imx9.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define IMX_SIP_GET_SOC_INFO 0xc2000006
+#define SOC_ID(x) (((x) & 0xFFFF) >> 8)
+#define SOC_REV_MAJOR(x) ((((x) >> 28) & 0xF) - 0x9)
+#define SOC_REV_MINOR(x) (((x) >> 24) & 0xF)
+
+static int imx9_soc_probe(struct platform_device *pdev)
+{
+ struct soc_device_attribute *attr;
+ struct arm_smccc_res res;
+ struct soc_device *sdev;
+ u32 soc_id, rev_major, rev_minor;
+ u64 uid127_64, uid63_0;
+ int err;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ err = of_property_read_string(of_root, "model", &attr->machine);
+ if (err) {
+ pr_err("%s: missing model property: %d\n", __func__, err);
+ goto attr;
+ }
+
+ attr->family = kasprintf(GFP_KERNEL, "Freescale i.MX");
+
+ /*
+ * Retrieve the soc id, rev & uid info:
+ * res.a1[31:16]: soc revision;
+ * res.a1[15:0]: soc id;
+ * res.a2: uid[127:64];
+ * res.a3: uid[63:0];
+ */
+ arm_smccc_smc(IMX_SIP_GET_SOC_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_err("%s: SMC failed: 0x%lx\n", __func__, res.a0);
+ err = -EINVAL;
+ goto family;
+ }
+
+ soc_id = SOC_ID(res.a1);
+ rev_major = SOC_REV_MAJOR(res.a1);
+ rev_minor = SOC_REV_MINOR(res.a1);
+
+ attr->soc_id = kasprintf(GFP_KERNEL, "i.MX%2x", soc_id);
+ attr->revision = kasprintf(GFP_KERNEL, "%d.%d", rev_major, rev_minor);
+
+ uid127_64 = res.a2;
+ uid63_0 = res.a3;
+ attr->serial_number = kasprintf(GFP_KERNEL, "%016llx%016llx", uid127_64, uid63_0);
+
+ sdev = soc_device_register(attr);
+ if (IS_ERR(sdev)) {
+ err = PTR_ERR(sdev);
+ pr_err("%s failed to register SoC as a device: %d\n", __func__, err);
+ goto serial_number;
+ }
+
+ return 0;
+
+serial_number:
+ kfree(attr->serial_number);
+ kfree(attr->revision);
+ kfree(attr->soc_id);
+family:
+ kfree(attr->family);
+attr:
+ kfree(attr);
+ return err;
+}
+
+static __maybe_unused const struct of_device_id imx9_soc_match[] = {
+ { .compatible = "fsl,imx93", },
+ { .compatible = "fsl,imx95", },
+ { }
+};
+
+#define IMX_SOC_DRIVER "imx9-soc"
+
+static struct platform_driver imx9_soc_driver = {
+ .probe = imx9_soc_probe,
+ .driver = {
+ .name = IMX_SOC_DRIVER,
+ },
+};
+
+static int __init imx9_soc_init(void)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ /* No match means it is not an i.MX 9 series SoC, do nothing. */
+ if (!of_match_node(imx9_soc_match, of_root))
+ return 0;
+
+ ret = platform_driver_register(&imx9_soc_driver);
+ if (ret) {
+ pr_err("failed to register imx9_soc platform driver: %d\n", ret);
+ return ret;
+ }
+
+ pdev = platform_device_register_simple(IMX_SOC_DRIVER, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("failed to register imx9_soc platform device: %ld\n", PTR_ERR(pdev));
+ platform_driver_unregister(&imx9_soc_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+device_initcall(imx9_soc_init);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP i.MX9 SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index d08bfc8ef7be..104a5f9bfd26 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -69,14 +69,11 @@ static int litex_check_csr_access(void __iomem *reg_addr)
struct litex_soc_ctrl_device {
void __iomem *base;
- struct notifier_block reset_nb;
};
-static int litex_reset_handler(struct notifier_block *this, unsigned long mode,
- void *cmd)
+static int litex_reset_handler(struct sys_off_data *data)
{
- struct litex_soc_ctrl_device *soc_ctrl_dev =
- container_of(this, struct litex_soc_ctrl_device, reset_nb);
+ struct litex_soc_ctrl_device *soc_ctrl_dev = data->cb_data;
litex_write32(soc_ctrl_dev->base + RESET_REG_OFF, RESET_REG_VALUE);
return NOTIFY_DONE;
@@ -105,11 +102,9 @@ static int litex_soc_ctrl_probe(struct platform_device *pdev)
if (error)
return error;
- platform_set_drvdata(pdev, soc_ctrl_dev);
-
- soc_ctrl_dev->reset_nb.notifier_call = litex_reset_handler;
- soc_ctrl_dev->reset_nb.priority = 128;
- error = register_restart_handler(&soc_ctrl_dev->reset_nb);
+ error = devm_register_restart_handler(&pdev->dev,
+ litex_reset_handler,
+ soc_ctrl_dev);
if (error) {
dev_warn(&pdev->dev, "cannot register restart handler: %d\n",
error);
@@ -118,20 +113,12 @@ static int litex_soc_ctrl_probe(struct platform_device *pdev)
return 0;
}
-static void litex_soc_ctrl_remove(struct platform_device *pdev)
-{
- struct litex_soc_ctrl_device *soc_ctrl_dev = platform_get_drvdata(pdev);
-
- unregister_restart_handler(&soc_ctrl_dev->reset_nb);
-}
-
static struct platform_driver litex_soc_ctrl_driver = {
.driver = {
.name = "litex-soc-controller",
.of_match_table = litex_soc_ctrl_of_match,
},
.probe = litex_soc_ctrl_probe,
- .remove = litex_soc_ctrl_remove,
};
module_platform_driver(litex_soc_ctrl_driver);
diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c
index ae42e3a9127f..16913c3ef65c 100644
--- a/drivers/soc/loongson/loongson2_guts.c
+++ b/drivers/soc/loongson/loongson2_guts.c
@@ -114,8 +114,11 @@ static int loongson2_guts_probe(struct platform_device *pdev)
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
of_node_put(root);
- if (machine)
+ if (machine) {
soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ if (!soc_dev_attr.machine)
+ return -ENOMEM;
+ }
svr = loongson2_guts_get_svr();
soc_die = loongson2_soc_die_match(svr, loongson2_soc_die);
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 0a05ee87a0fc..455221e8de24 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -524,23 +524,5 @@ int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
}
EXPORT_SYMBOL(cmdq_pkt_eoc);
-int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
-{
- struct cmdq_instruction inst = { {0} };
- int err;
-
- /* insert EOC and generate IRQ for each command iteration */
- err = cmdq_pkt_eoc(pkt);
- if (err < 0)
- return err;
-
- /* JUMP to end */
- inst.op = CMDQ_CODE_JUMP;
- inst.value = CMDQ_JUMP_PASS >>
- cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
- return cmdq_pkt_append_command(pkt, inst);
-}
-EXPORT_SYMBOL(cmdq_pkt_finalize);
-
MODULE_DESCRIPTION("MediaTek Command Queue (CMDQ) driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
index 2a1adcb87d4e..f54c966138b5 100644
--- a/drivers/soc/mediatek/mtk-devapc.c
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -273,23 +273,31 @@ static int mtk_devapc_probe(struct platform_device *pdev)
return -EINVAL;
devapc_irq = irq_of_parse_and_map(node, 0);
- if (!devapc_irq)
- return -EINVAL;
+ if (!devapc_irq) {
+ ret = -EINVAL;
+ goto err;
+ }
ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock");
- if (IS_ERR(ctx->infra_clk))
- return -EINVAL;
+ if (IS_ERR(ctx->infra_clk)) {
+ ret = -EINVAL;
+ goto err;
+ }
ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
IRQF_TRIGGER_NONE, "devapc", ctx);
if (ret)
- return ret;
+ goto err;
platform_set_drvdata(pdev, ctx);
start_devapc(ctx);
return 0;
+
+err:
+ iounmap(ctx->infra_base);
+ return ret;
}
static void mtk_devapc_remove(struct platform_device *pdev)
@@ -297,6 +305,7 @@ static void mtk_devapc_remove(struct platform_device *pdev)
struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
stop_devapc(ctx);
+ iounmap(ctx->infra_base);
}
static struct platform_driver mtk_devapc_driver = {
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 9fdc0ef79202..0bcd85826375 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -2518,8 +2518,8 @@ static int pwrap_probe(struct platform_device *pdev)
}
}
- ret = devm_clk_bulk_get_all_enable(wrp->dev, &clk);
- if (ret)
+ ret = devm_clk_bulk_get_all_enabled(wrp->dev, &clk);
+ if (ret < 0)
return dev_err_probe(wrp->dev, ret,
"failed to get clocks\n");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 74b9121240f8..58e63cf0036b 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -139,7 +139,7 @@ config QCOM_RAMP_CTRL
config QCOM_RMTFS_MEM
tristate "Qualcomm Remote Filesystem memory driver"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
select QCOM_SCM
help
The Qualcomm remote filesystem memory driver is used for allocating
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 32c3bc887cef..56823b6a2fac 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -142,6 +142,7 @@ struct qcom_llcc_config {
bool skip_llcc_cfg;
bool no_edac;
bool irq_configured;
+ bool no_broadcast_register;
};
struct qcom_sct_config {
@@ -154,6 +155,38 @@ enum llcc_reg_offset {
LLCC_COMMON_STATUS0,
};
+static const struct llcc_slice_config ipq5424_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0xFFFF,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ .stale_cap_en = true,
+ .alloc_oneway_en = true,
+ .ovcap_en = true,
+ .ovcap_prio = true,
+ .vict_prio = true,
+ },
+ {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 256,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xF000,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ .stale_cap_en = true,
+ },
+};
+
static const struct llcc_slice_config sa8775p_data[] = {
{
.usecase_id = LLCC_CPUSS,
@@ -3004,6 +3037,7 @@ static const struct llcc_slice_config x1e80100_data[] = {
.fixed_size = true,
.bonus_ways = 0xfff,
.cache_mode = 0,
+ .activate_on_init = true,
}, {
.usecase_id = LLCC_CAMEXP0,
.slice_id = 4,
@@ -3185,6 +3219,16 @@ static const struct qcom_llcc_config qdu1000_cfg[] = {
},
};
+static const struct qcom_llcc_config ipq5424_cfg[] = {
+ {
+ .sct_data = ipq5424_data,
+ .size = ARRAY_SIZE(ipq5424_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .no_broadcast_register = true,
+ },
+};
+
static const struct qcom_llcc_config sa8775p_cfg[] = {
{
.sct_data = sa8775p_data,
@@ -3360,6 +3404,11 @@ static const struct qcom_sct_config qdu1000_cfgs = {
.num_config = ARRAY_SIZE(qdu1000_cfg),
};
+static const struct qcom_sct_config ipq5424_cfgs = {
+ .llcc_config = ipq5424_cfg,
+ .num_config = ARRAY_SIZE(ipq5424_cfg),
+};
+
static const struct qcom_sct_config sa8775p_cfgs = {
.llcc_config = sa8775p_cfg,
.num_config = ARRAY_SIZE(sa8775p_cfg),
@@ -3957,8 +4006,12 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
if (IS_ERR(drv_data->bcast_regmap)) {
- ret = PTR_ERR(drv_data->bcast_regmap);
- goto err;
+ if (cfg->no_broadcast_register) {
+ drv_data->bcast_regmap = regmap;
+ } else {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
}
/* Extract version of the IP */
@@ -4029,6 +4082,7 @@ err:
}
static const struct of_device_id qcom_llcc_of_match[] = {
+ { .compatible = "qcom,ipq5424-llcc", .data = &ipq5424_cfgs},
{ .compatible = "qcom,qcs615-llcc", .data = &qcs615_cfgs},
{ .compatible = "qcom,qcs8300-llcc", .data = &qcs8300_cfgs},
{ .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs},
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 328b6153b2be..71be378d2e43 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -75,7 +75,6 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
{
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
locator_hdl);
- struct pdr_service *pds;
mutex_lock(&pdr->lock);
/* Create a local client port for QMI communication */
@@ -87,12 +86,7 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
mutex_unlock(&pdr->lock);
/* Service pending lookup requests */
- mutex_lock(&pdr->list_lock);
- list_for_each_entry(pds, &pdr->lookups, node) {
- if (pds->need_locator_lookup)
- schedule_work(&pdr->locator_work);
- }
- mutex_unlock(&pdr->list_lock);
+ schedule_work(&pdr->locator_work);
return 0;
}
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index caf3f63d940e..cde19cdfd3c7 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -4,6 +4,7 @@
* Copyright (c) 2022, Linaro Ltd
*/
#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -100,15 +101,13 @@ void pmic_glink_client_register(struct pmic_glink_client *client)
struct pmic_glink *pg = client->pg;
unsigned long flags;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
spin_lock_irqsave(&pg->client_lock, flags);
list_add(&client->node, &pg->clients);
client->pdr_notify(client->priv, pg->client_state);
spin_unlock_irqrestore(&pg->client_lock, flags);
- mutex_unlock(&pg->state_lock);
-
}
EXPORT_SYMBOL_GPL(pmic_glink_client_register);
@@ -119,26 +118,25 @@ int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
unsigned long start;
int ret;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
if (!pg->ept) {
- ret = -ECONNRESET;
- } else {
- start = jiffies;
- for (;;) {
- ret = rpmsg_send(pg->ept, data, len);
- if (ret != -EAGAIN)
- break;
-
- if (timeout_reached) {
- ret = -ETIMEDOUT;
- break;
- }
-
- usleep_range(1000, 5000);
- timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT);
+ return -ECONNRESET;
+ }
+
+ start = jiffies;
+ for (;;) {
+ ret = rpmsg_send(pg->ept, data, len);
+ if (ret != -EAGAIN)
+ break;
+
+ if (timeout_reached) {
+ ret = -ETIMEDOUT;
+ break;
}
+
+ usleep_range(1000, 5000);
+ timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT);
}
- mutex_unlock(&pg->state_lock);
return ret;
}
@@ -227,51 +225,42 @@ static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv)
{
struct pmic_glink *pg = priv;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->pdr_state = state;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
}
static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
{
- struct pmic_glink *pg = __pmic_glink;
- int ret = 0;
+ struct pmic_glink *pg;
- mutex_lock(&__pmic_glink_lock);
- if (!pg) {
- ret = dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
- goto out_unlock;
- }
+ guard(mutex)(&__pmic_glink_lock);
+ pg = __pmic_glink;
+ if (!pg)
+ return dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
dev_set_drvdata(&rpdev->dev, pg);
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->ept = rpdev->ept;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
-out_unlock:
- mutex_unlock(&__pmic_glink_lock);
- return ret;
+ return 0;
}
static void pmic_glink_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct pmic_glink *pg;
- mutex_lock(&__pmic_glink_lock);
+ guard(mutex)(&__pmic_glink_lock);
pg = __pmic_glink;
if (!pg)
- goto out_unlock;
+ return;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->ept = NULL;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
-out_unlock:
- mutex_unlock(&__pmic_glink_lock);
}
static const struct rpmsg_device_id pmic_glink_rpmsg_id_match[] = {
@@ -378,9 +367,8 @@ static void pmic_glink_remove(struct platform_device *pdev)
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
- mutex_lock(&__pmic_glink_lock);
+ guard(mutex)(&__pmic_glink_lock);
__pmic_glink = NULL;
- mutex_unlock(&__pmic_glink_lock);
}
static const unsigned long pmic_glink_sc8280xp_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 463b1c528831..bd06ce161804 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -5,6 +5,7 @@
*/
#include <linux/auxiliary_bus.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -114,7 +115,7 @@ static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cm
* The USBC_CMD_WRITE_REQ ack doesn't identify the request, so wait for
* one ack at a time.
*/
- mutex_lock(&altmode->lock);
+ guard(mutex)(&altmode->lock);
req.hdr.owner = cpu_to_le32(altmode->owner_id);
req.hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP);
@@ -125,18 +126,16 @@ static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cm
ret = pmic_glink_send(altmode->client, &req, sizeof(req));
if (ret) {
dev_err(altmode->dev, "failed to send altmode request: %#x (%d)\n", cmd, ret);
- goto out_unlock;
+ return ret;
}
left = wait_for_completion_timeout(&altmode->pan_ack, 5 * HZ);
if (!left) {
dev_err(altmode->dev, "timeout waiting for altmode request ack for: %#x\n", cmd);
- ret = -ETIMEDOUT;
+ return -ETIMEDOUT;
}
-out_unlock:
- mutex_unlock(&altmode->lock);
- return ret;
+ return 0;
}
static void pmic_glink_altmode_enable_dp(struct pmic_glink_altmode *altmode,
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 6e30f08761aa..154ca5beb471 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -553,6 +553,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm4250", .data = sm6115_domains, },
{ .compatible = "qcom,sm6115", .data = sm6115_domains, },
{ .compatible = "qcom,sm6350", .data = sm6350_domains, },
+ { .compatible = "qcom,sm7225", .data = sm6350_domains, },
{ .compatible = "qcom,sm7325", .data = sc7280_domains, },
{ .compatible = "qcom,sm8150", .data = sm8150_domains, },
{ .compatible = "qcom,sm8250", .data = sm8250_domains, },
@@ -561,6 +562,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm8550", .data = sm8550_domains, },
{ .compatible = "qcom,sm8650", .data = sm8550_domains, },
{ .compatible = "qcom,x1e80100", .data = x1e80100_domains, },
+ { .compatible = "qcom,x1p42100", .data = x1e80100_domains, },
{},
};
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 33603b8fd8f3..1b32469f2789 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -125,7 +125,7 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct class rmtfs_class = {
+static const struct class rmtfs_class = {
.name = "rmtfs",
};
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index d9bfac6c54fb..cc5be8019b6a 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -112,7 +112,8 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
if (args.args_count != 1) {
dev_err(dev, "invalid #qcom,smem-state-cells\n");
- return ERR_PTR(-EINVAL);
+ state = ERR_PTR(-EINVAL);
+ goto put;
}
state = of_node_to_state(args.np);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 4783ab1adb8d..a3e88ced328a 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -365,7 +365,7 @@ static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p)
{
struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
- seq_printf(p, " %8s", dev_name(entry->smp2p->dev));
+ seq_printf(p, "%8s", dev_name(entry->smp2p->dev));
}
static struct irq_chip smp2p_irq_chip = {
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 62fadfe44a09..18d7f1be9093 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -451,6 +451,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(QCS9100) },
{ qcom_board_id(QCS8300) },
{ qcom_board_id(QCS8275) },
+ { qcom_board_id(QCS9075) },
{ qcom_board_id(QCS615) },
};
@@ -796,7 +797,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
if (!qs->attr.soc_id || !qs->attr.revision)
return -ENOMEM;
- if (offsetof(struct socinfo, serial_num) <= item_size) {
+ if (offsetofend(struct socinfo, serial_num) <= item_size) {
qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%u",
le32_to_cpu(info->serial_num));
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 9f7fe02310b9..6d2e135eed89 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -345,6 +345,11 @@ config ARCH_R9A09G011
help
This enables support for the Renesas RZ/V2M SoC.
+config ARCH_R9A09G047
+ bool "ARM64 Platform support for RZ/G3E"
+ help
+ This enables support for the Renesas RZ/G3E SoC variants.
+
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
select RENESAS_RZV2H_ICU
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index d8c53cec7f37..dd5256e5aae1 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -126,7 +126,7 @@ static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
if (ret)
return ret;
}
- return ret;
+ return 0;
}
static bool tensor_is_atomic(unsigned int reg)
diff --git a/drivers/soc/tegra/cbb/tegra-cbb.c b/drivers/soc/tegra/cbb/tegra-cbb.c
index 84ab46c9d9f5..6215c6a84fbe 100644
--- a/drivers/soc/tegra/cbb/tegra-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra-cbb.c
@@ -69,19 +69,12 @@ static int tegra_cbb_err_show(struct seq_file *file, void *data)
}
DEFINE_SHOW_ATTRIBUTE(tegra_cbb_err);
-static int tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
+static void tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
{
static struct dentry *root;
- if (!root) {
+ if (!root)
root = debugfs_create_file("tegra_cbb_err", 0444, NULL, cbb, &tegra_cbb_err_fops);
- if (IS_ERR_OR_NULL(root)) {
- pr_err("%s(): could not create debugfs node\n", __func__);
- return PTR_ERR(root);
- }
- }
-
- return 0;
}
void tegra_cbb_stall_enable(struct tegra_cbb *cbb)
@@ -148,13 +141,8 @@ int tegra_cbb_register(struct tegra_cbb *cbb)
{
int ret;
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- ret = tegra_cbb_err_debugfs_init(cbb);
- if (ret) {
- dev_err(cbb->dev, "failed to create debugfs\n");
- return ret;
- }
- }
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_cbb_err_debugfs_init(cbb);
/* register interrupt handler for errors due to different initiators */
ret = cbb->ops->interrupt_enable(cbb);
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
index 5cf0e8c34164..c74629af9bb5 100644
--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
@@ -277,7 +277,7 @@ static void tegra234_lookup_slave_timeout(struct seq_file *file, struct tegra234
* which timed out.
* a) Get block number from the index of set bit in
* <FABRIC>_SN_AXI2APB_<>_BLOCK_TMO_STATUS_0 register.
- * b) Get address of register repective to block number i.e.
+ * b) Get address of register respective to block number i.e.
* <FABRIC>_SN_AXI2APB_<>_BLOCK<index-set-bit>_TMO_0.
* c) Read the register in above step to get client_id which
* timed out as per the set bits.
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index eb14e5ff5a0a..e24ab5f7d2bf 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -647,15 +647,20 @@ static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
};
static const struct nvmem_keepout tegra234_fuse_keepouts[] = {
- { .start = 0x01c, .end = 0x0c8 },
- { .start = 0x12c, .end = 0x184 },
+ { .start = 0x01c, .end = 0x064 },
+ { .start = 0x084, .end = 0x0a0 },
+ { .start = 0x0a4, .end = 0x0c8 },
+ { .start = 0x12c, .end = 0x164 },
+ { .start = 0x16c, .end = 0x184 },
{ .start = 0x190, .end = 0x198 },
{ .start = 0x1a0, .end = 0x204 },
- { .start = 0x21c, .end = 0x250 },
- { .start = 0x25c, .end = 0x2f0 },
+ { .start = 0x21c, .end = 0x2f0 },
{ .start = 0x310, .end = 0x3d8 },
- { .start = 0x400, .end = 0x4f0 },
- { .start = 0x4f8, .end = 0x7e8 },
+ { .start = 0x400, .end = 0x420 },
+ { .start = 0x444, .end = 0x490 },
+ { .start = 0x4bc, .end = 0x4f0 },
+ { .start = 0x4f8, .end = 0x54c },
+ { .start = 0x57c, .end = 0x7e8 },
{ .start = 0x8d0, .end = 0x8d8 },
{ .start = 0xacc, .end = 0xf00 }
};
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index c36364522157..193266f5e3f9 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -103,19 +103,15 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
if (ret)
return ret;
- msi_lock_descs(dev);
+ guard(msi_descs_lock)(dev);
nvec = ti_sci_inta_msi_alloc_descs(dev, res);
- if (nvec <= 0) {
- ret = nvec;
- goto unlock;
- }
+ if (nvec <= 0)
+ return nvec;
/* Use alloc ALL as it's unclear whether there are gaps in the indices */
ret = msi_domain_alloc_irqs_all_locked(dev, MSI_DEFAULT_DOMAIN, nvec);
if (ret)
dev_err(dev, "Failed to allocate IRQs %d\n", ret);
-unlock:
- msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 96a7f9709720..5a54b10daf77 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -384,7 +384,7 @@ static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
return slave_stat;
}
-static int amd_sdw_compute_params(struct sdw_bus *bus)
+static int amd_sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
{
struct sdw_transport_data t_data = {0};
struct sdw_master_runtime *m_rt;
@@ -410,7 +410,7 @@ static int amd_sdw_compute_params(struct sdw_bus *bus)
sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
false, SDW_BLK_GRP_CNT_1, sample_int,
port_bo, port_bo >> 8, hstart, hstop,
- SDW_BLK_PKG_PER_PORT, 0x0);
+ SDW_BLK_PKG_PER_PORT, p_rt->lane);
sdw_fill_port_params(&p_rt->port_params,
p_rt->num, bps,
@@ -1190,6 +1190,7 @@ static int __maybe_unused amd_resume_runtime(struct device *dev)
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
return amd_sdw_clock_stop_exit(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ writel(0x00, amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
if (val) {
val |= AMD_SDW_CLK_RESUME_REQ;
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index d1dc62c34f1c..9b295fc9acd5 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -813,6 +813,16 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
}
EXPORT_SYMBOL(sdw_extract_slave_id);
+bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave)
+{
+ /*
+ * Dynamic scaling is a defined by SDCA. However, some devices expose the class ID but
+ * can't support dynamic scaling. We might need a quirk to handle such devices.
+ */
+ return slave->id.class_id;
+}
+EXPORT_SYMBOL(is_clock_scaling_supported_by_slave);
+
static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
{
u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
@@ -1276,23 +1286,12 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
return ret;
}
-static int sdw_slave_set_frequency(struct sdw_slave *slave)
+int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base)
{
u32 mclk_freq = slave->bus->prop.mclk_freq;
u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
unsigned int scale;
u8 scale_index;
- u8 base;
- int ret;
-
- /*
- * frequency base and scale registers are required for SDCA
- * devices. They may also be used for 1.2+/non-SDCA devices.
- * Driver can set the property, we will need a DisCo property
- * to discover this case from platform firmware.
- */
- if (!slave->id.class_id && !slave->prop.clock_reg_supported)
- return 0;
if (!mclk_freq) {
dev_err(&slave->dev,
@@ -1311,19 +1310,19 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
*/
if (!(19200000 % mclk_freq)) {
mclk_freq = 19200000;
- base = SDW_SCP_BASE_CLOCK_19200000_HZ;
+ *base = SDW_SCP_BASE_CLOCK_19200000_HZ;
} else if (!(22579200 % mclk_freq)) {
mclk_freq = 22579200;
- base = SDW_SCP_BASE_CLOCK_22579200_HZ;
+ *base = SDW_SCP_BASE_CLOCK_22579200_HZ;
} else if (!(24576000 % mclk_freq)) {
mclk_freq = 24576000;
- base = SDW_SCP_BASE_CLOCK_24576000_HZ;
+ *base = SDW_SCP_BASE_CLOCK_24576000_HZ;
} else if (!(32000000 % mclk_freq)) {
mclk_freq = 32000000;
- base = SDW_SCP_BASE_CLOCK_32000000_HZ;
+ *base = SDW_SCP_BASE_CLOCK_32000000_HZ;
} else if (!(96000000 % mclk_freq)) {
mclk_freq = 24000000;
- base = SDW_SCP_BASE_CLOCK_24000000_HZ;
+ *base = SDW_SCP_BASE_CLOCK_24000000_HZ;
} else {
dev_err(&slave->dev,
"Unsupported clock base, mclk %d\n",
@@ -1354,6 +1353,34 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
}
scale_index++;
+ dev_dbg(&slave->dev,
+ "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
+ *base, scale_index, mclk_freq, curr_freq);
+
+ return scale_index;
+}
+EXPORT_SYMBOL(sdw_slave_get_scale_index);
+
+static int sdw_slave_set_frequency(struct sdw_slave *slave)
+{
+ int scale_index;
+ u8 base;
+ int ret;
+
+ /*
+ * frequency base and scale registers are required for SDCA
+ * devices. They may also be used for 1.2+/non-SDCA devices.
+ * Driver can set the property directly, for now there's no
+ * DisCo property to discover support for the scaling registers
+ * from platform firmware.
+ */
+ if (!slave->id.class_id && !slave->prop.clock_reg_supported)
+ return 0;
+
+ scale_index = sdw_slave_get_scale_index(slave, &base);
+ if (scale_index < 0)
+ return scale_index;
+
ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
if (ret < 0) {
dev_err(&slave->dev,
@@ -1373,10 +1400,6 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
dev_err(&slave->dev,
"SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
- dev_dbg(&slave->dev,
- "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
- base, scale_index, mclk_freq, curr_freq);
-
return ret;
}
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index fda6b24ac2da..fc990171b3f7 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -90,6 +90,7 @@ int sdw_find_col_index(int col);
* @transport_params: Transport parameters
* @port_params: Port parameters
* @port_node: List node for Master or Slave port_list
+ * @lane: Which lane is used
*
* SoundWire spec has no mention of ports for Master interface but the
* concept is logically extended.
@@ -100,6 +101,7 @@ struct sdw_port_runtime {
struct sdw_transport_params transport_params;
struct sdw_port_params port_params;
struct list_head port_node;
+ unsigned int lane;
};
/**
@@ -149,6 +151,7 @@ struct sdw_transport_data {
int hstop;
int block_offset;
int sub_block_offset;
+ unsigned int lane;
};
struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 77dc094075e1..e98d5db81b1c 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -167,9 +167,6 @@ static int sdw_drv_remove(struct device *dev)
slave->probed = false;
- if (slave->prop.use_domain_irq)
- sdw_irq_dispose_mapping(slave);
-
mutex_unlock(&slave->sdw_dev_lock);
if (drv->remove)
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
index b9316207c3ab..59965f43c2fb 100644
--- a/drivers/soundwire/generic_bandwidth_allocation.c
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -18,6 +18,7 @@
struct sdw_group_params {
unsigned int rate;
+ unsigned int lane;
int full_bw;
int payload_bw;
int hwidth;
@@ -27,6 +28,7 @@ struct sdw_group {
unsigned int count;
unsigned int max_size;
unsigned int *rates;
+ unsigned int *lanes;
};
void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
@@ -48,6 +50,9 @@ void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
slave_total_ch = 0;
list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+ if (p_rt->lane != t_data->lane)
+ continue;
+
ch = hweight32(p_rt->ch_mask);
sdw_fill_xport_params(&p_rt->transport_params,
@@ -56,7 +61,7 @@ void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
sample_int, port_bo, port_bo >> 8,
t_data->hstart,
t_data->hstop,
- SDW_BLK_PKG_PER_PORT, 0x0);
+ SDW_BLK_PKG_PER_PORT, p_rt->lane);
sdw_fill_port_params(&p_rt->port_params,
p_rt->num, bps,
@@ -105,11 +110,13 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
t_data.hstart = hstart;
list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ if (p_rt->lane != params->lane)
+ continue;
sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
false, SDW_BLK_GRP_CNT_1, sample_int,
*port_bo, (*port_bo) >> 8, hstart, hstop,
- SDW_BLK_PKG_PER_PORT, 0x0);
+ SDW_BLK_PKG_PER_PORT, p_rt->lane);
sdw_fill_port_params(&p_rt->port_params,
p_rt->num, bps,
@@ -131,6 +138,7 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
(*port_bo) += bps * ch;
}
+ t_data.lane = params->lane;
sdw_compute_slave_ports(m_rt, &t_data);
}
@@ -138,69 +146,107 @@ static void _sdw_compute_port_params(struct sdw_bus *bus,
struct sdw_group_params *params, int count)
{
struct sdw_master_runtime *m_rt;
- int hstop = bus->params.col - 1;
- int port_bo, i;
+ int port_bo, i, l;
+ int hstop;
/* Run loop for all groups to compute transport parameters */
- for (i = 0; i < count; i++) {
- port_bo = 1;
+ for (l = 0; l < SDW_MAX_LANES; l++) {
+ if (l > 0 && !bus->lane_used_bandwidth[l])
+ continue;
+ /* reset hstop for each lane */
+ hstop = bus->params.col - 1;
+ for (i = 0; i < count; i++) {
+ if (params[i].lane != l)
+ continue;
+ port_bo = 1;
- list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
- sdw_compute_master_ports(m_rt, &params[i], &port_bo, hstop);
- }
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ sdw_compute_master_ports(m_rt, &params[i], &port_bo, hstop);
+ }
- hstop = hstop - params[i].hwidth;
+ hstop = hstop - params[i].hwidth;
+ }
}
}
static int sdw_compute_group_params(struct sdw_bus *bus,
+ struct sdw_stream_runtime *stream,
struct sdw_group_params *params,
- int *rates, int count)
+ struct sdw_group *group)
{
struct sdw_master_runtime *m_rt;
+ struct sdw_port_runtime *p_rt;
int sel_col = bus->params.col;
unsigned int rate, bps, ch;
- int i, column_needed = 0;
+ int i, l, column_needed;
/* Calculate bandwidth per group */
- for (i = 0; i < count; i++) {
- params[i].rate = rates[i];
+ for (i = 0; i < group->count; i++) {
+ params[i].rate = group->rates[i];
+ params[i].lane = group->lanes[i];
params[i].full_bw = bus->params.curr_dr_freq / params[i].rate;
}
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
- rate = m_rt->stream->params.rate;
- bps = m_rt->stream->params.bps;
- ch = m_rt->ch_count;
+ if (m_rt->stream == stream) {
+ /* Only runtime during prepare should be added */
+ if (stream->state != SDW_STREAM_CONFIGURED)
+ continue;
+ } else {
+ /*
+ * Include runtimes with running (ENABLED state) and paused (DISABLED state)
+ * streams
+ */
+ if (m_rt->stream->state != SDW_STREAM_ENABLED &&
+ m_rt->stream->state != SDW_STREAM_DISABLED)
+ continue;
+ }
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ rate = m_rt->stream->params.rate;
+ bps = m_rt->stream->params.bps;
+ ch = hweight32(p_rt->ch_mask);
- for (i = 0; i < count; i++) {
- if (rate == params[i].rate)
- params[i].payload_bw += bps * ch;
+ for (i = 0; i < group->count; i++) {
+ if (rate == params[i].rate && p_rt->lane == params[i].lane)
+ params[i].payload_bw += bps * ch;
+ }
}
}
- for (i = 0; i < count; i++) {
- params[i].hwidth = (sel_col *
- params[i].payload_bw + params[i].full_bw - 1) /
- params[i].full_bw;
+ for (l = 0; l < SDW_MAX_LANES; l++) {
+ if (l > 0 && !bus->lane_used_bandwidth[l])
+ continue;
+ /* reset column_needed for each lane */
+ column_needed = 0;
+ for (i = 0; i < group->count; i++) {
+ if (params[i].lane != l)
+ continue;
+
+ params[i].hwidth = (sel_col * params[i].payload_bw +
+ params[i].full_bw - 1) / params[i].full_bw;
- column_needed += params[i].hwidth;
+ column_needed += params[i].hwidth;
+ /* There is no control column for lane 1 and above */
+ if (column_needed > sel_col)
+ return -EINVAL;
+ /* Column 0 is control column on lane 0 */
+ if (params[i].lane == 0 && column_needed > sel_col - 1)
+ return -EINVAL;
+ }
}
- if (column_needed > sel_col - 1)
- return -EINVAL;
return 0;
}
static int sdw_add_element_group_count(struct sdw_group *group,
- unsigned int rate)
+ unsigned int rate, unsigned int lane)
{
int num = group->count;
int i;
for (i = 0; i <= num; i++) {
- if (rate == group->rates[i])
+ if (rate == group->rates[i] && lane == group->lanes[i])
break;
if (i != num)
@@ -208,6 +254,7 @@ static int sdw_add_element_group_count(struct sdw_group *group,
if (group->count >= group->max_size) {
unsigned int *rates;
+ unsigned int *lanes;
group->max_size += 1;
rates = krealloc(group->rates,
@@ -215,10 +262,20 @@ static int sdw_add_element_group_count(struct sdw_group *group,
GFP_KERNEL);
if (!rates)
return -ENOMEM;
+
group->rates = rates;
+
+ lanes = krealloc(group->lanes,
+ (sizeof(int) * group->max_size),
+ GFP_KERNEL);
+ if (!lanes)
+ return -ENOMEM;
+
+ group->lanes = lanes;
}
- group->rates[group->count++] = rate;
+ group->rates[group->count] = rate;
+ group->lanes[group->count++] = lane;
}
return 0;
@@ -228,6 +285,7 @@ static int sdw_get_group_count(struct sdw_bus *bus,
struct sdw_group *group)
{
struct sdw_master_runtime *m_rt;
+ struct sdw_port_runtime *p_rt;
unsigned int rate;
int ret = 0;
@@ -237,17 +295,32 @@ static int sdw_get_group_count(struct sdw_bus *bus,
if (!group->rates)
return -ENOMEM;
+ group->lanes = kcalloc(group->max_size, sizeof(int), GFP_KERNEL);
+ if (!group->lanes) {
+ kfree(group->rates);
+ group->rates = NULL;
+ return -ENOMEM;
+ }
+
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ if (m_rt->stream->state == SDW_STREAM_DEPREPARED)
+ continue;
+
rate = m_rt->stream->params.rate;
if (m_rt == list_first_entry(&bus->m_rt_list,
struct sdw_master_runtime,
bus_node)) {
group->rates[group->count++] = rate;
-
- } else {
- ret = sdw_add_element_group_count(group, rate);
+ }
+ /*
+ * Different ports could use different lane, add group element
+ * even if m_rt is the first entry
+ */
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ ret = sdw_add_element_group_count(group, rate, p_rt->lane);
if (ret < 0) {
kfree(group->rates);
+ kfree(group->lanes);
return ret;
}
}
@@ -260,8 +333,9 @@ static int sdw_get_group_count(struct sdw_bus *bus,
* sdw_compute_port_params: Compute transport and port parameters
*
* @bus: SDW Bus instance
+ * @stream: Soundwire stream
*/
-static int sdw_compute_port_params(struct sdw_bus *bus)
+static int sdw_compute_port_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
{
struct sdw_group_params *params = NULL;
struct sdw_group group;
@@ -281,8 +355,7 @@ static int sdw_compute_port_params(struct sdw_bus *bus)
}
/* Compute transport parameters for grouped streams */
- ret = sdw_compute_group_params(bus, params,
- &group.rates[0], group.count);
+ ret = sdw_compute_group_params(bus, stream, params, &group);
if (ret < 0)
goto free_params;
@@ -292,6 +365,7 @@ free_params:
kfree(params);
out:
kfree(group.rates);
+ kfree(group.lanes);
return ret;
}
@@ -299,7 +373,6 @@ out:
static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
{
struct sdw_master_prop *prop = &bus->prop;
- int frame_int, frame_freq;
int r, c;
for (c = 0; c < SDW_FRAME_COLS; c++) {
@@ -308,11 +381,8 @@ static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
sdw_cols[c] != prop->default_col)
continue;
- frame_int = sdw_rows[r] * sdw_cols[c];
- frame_freq = clk_freq / frame_int;
-
- if ((clk_freq - (frame_freq * SDW_FRAME_CTRL_BITS)) <
- bus->params.bandwidth)
+ if (clk_freq * (sdw_cols[c] - 1) <
+ bus->params.bandwidth * sdw_cols[c])
continue;
bus->params.row = sdw_rows[r];
@@ -324,6 +394,95 @@ static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
return -EINVAL;
}
+static bool is_clock_scaling_supported(struct sdw_bus *bus)
+{
+ struct sdw_master_runtime *m_rt;
+ struct sdw_slave_runtime *s_rt;
+
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node)
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node)
+ if (!is_clock_scaling_supported_by_slave(s_rt->slave))
+ return false;
+
+ return true;
+}
+
+/**
+ * is_lane_connected_to_all_peripherals: Check if the given manager lane connects to all peripherals
+ * So that all peripherals can use the manager lane.
+ *
+ * @m_rt: Manager runtime
+ * @lane: Lane number
+ */
+static bool is_lane_connected_to_all_peripherals(struct sdw_master_runtime *m_rt, unsigned int lane)
+{
+ struct sdw_slave_prop *slave_prop;
+ struct sdw_slave_runtime *s_rt;
+ int i;
+
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ slave_prop = &s_rt->slave->prop;
+ for (i = 1; i < SDW_MAX_LANES; i++) {
+ if (slave_prop->lane_maps[i] == lane) {
+ dev_dbg(&s_rt->slave->dev,
+ "M lane %d is connected to P lane %d\n",
+ lane, i);
+ break;
+ }
+ }
+ if (i == SDW_MAX_LANES) {
+ dev_dbg(&s_rt->slave->dev, "M lane %d is not connected\n", lane);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int get_manager_lane(struct sdw_bus *bus, struct sdw_master_runtime *m_rt,
+ struct sdw_slave_runtime *s_rt, unsigned int curr_dr_freq)
+{
+ struct sdw_slave_prop *slave_prop = &s_rt->slave->prop;
+ struct sdw_port_runtime *m_p_rt;
+ unsigned int required_bandwidth;
+ int m_lane;
+ int l;
+
+ for (l = 1; l < SDW_MAX_LANES; l++) {
+ if (!slave_prop->lane_maps[l])
+ continue;
+
+ required_bandwidth = 0;
+ list_for_each_entry(m_p_rt, &m_rt->port_list, port_node) {
+ required_bandwidth += m_rt->stream->params.rate *
+ hweight32(m_p_rt->ch_mask) *
+ m_rt->stream->params.bps;
+ }
+ if (required_bandwidth <=
+ curr_dr_freq - bus->lane_used_bandwidth[l]) {
+ /* Check if m_lane is connected to all Peripherals */
+ if (!is_lane_connected_to_all_peripherals(m_rt,
+ slave_prop->lane_maps[l])) {
+ dev_dbg(bus->dev,
+ "Not all Peripherals are connected to M lane %d\n",
+ slave_prop->lane_maps[l]);
+ continue;
+ }
+ m_lane = slave_prop->lane_maps[l];
+ dev_dbg(&s_rt->slave->dev, "M lane %d is used\n", m_lane);
+ bus->lane_used_bandwidth[l] += required_bandwidth;
+ /*
+ * Use non-zero manager lane, subtract the lane 0
+ * bandwidth that is already calculated
+ */
+ bus->params.bandwidth -= required_bandwidth;
+ return m_lane;
+ }
+ }
+
+ /* No available multi lane found, only lane 0 can be used */
+ return 0;
+}
+
/**
* sdw_compute_bus_params: Compute bus parameters
*
@@ -331,10 +490,16 @@ static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
*/
static int sdw_compute_bus_params(struct sdw_bus *bus)
{
- unsigned int curr_dr_freq = 0;
struct sdw_master_prop *mstr_prop = &bus->prop;
- int i, clk_values, ret;
+ struct sdw_slave_prop *slave_prop;
+ struct sdw_port_runtime *m_p_rt;
+ struct sdw_port_runtime *s_p_rt;
+ struct sdw_master_runtime *m_rt;
+ struct sdw_slave_runtime *s_rt;
+ unsigned int curr_dr_freq = 0;
+ int i, l, clk_values, ret;
bool is_gear = false;
+ int m_lane = 0;
u32 *clk_buf;
if (mstr_prop->num_clk_gears) {
@@ -349,6 +514,10 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
clk_buf = NULL;
}
+ /* If dynamic scaling is not supported, don't try higher freq */
+ if (!is_clock_scaling_supported(bus))
+ clk_values = 1;
+
for (i = 0; i < clk_values; i++) {
if (!clk_buf)
curr_dr_freq = bus->params.max_dr_freq;
@@ -357,10 +526,26 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
(bus->params.max_dr_freq >> clk_buf[i]) :
clk_buf[i] * SDW_DOUBLE_RATE_FACTOR;
- if (curr_dr_freq <= bus->params.bandwidth)
- continue;
+ if (curr_dr_freq * (mstr_prop->default_col - 1) >=
+ bus->params.bandwidth * mstr_prop->default_col)
+ break;
- break;
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ /*
+ * Get the first s_rt that will be used to find the available lane that
+ * can be used. No need to check all Peripherals because we can't use
+ * multi-lane if we can't find any available lane for the first Peripheral.
+ */
+ s_rt = list_first_entry(&m_rt->slave_rt_list,
+ struct sdw_slave_runtime, m_rt_node);
+
+ /*
+ * Find the available Manager lane that connected to the first Peripheral.
+ */
+ m_lane = get_manager_lane(bus, m_rt, s_rt, curr_dr_freq);
+ if (m_lane > 0)
+ goto out;
+ }
/*
* TODO: Check all the Slave(s) port(s) audio modes and find
@@ -374,6 +559,38 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
__func__, bus->params.bandwidth);
return -EINVAL;
}
+out:
+ /* multilane can be used */
+ if (m_lane > 0) {
+ /* Set Peripheral lanes */
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ slave_prop = &s_rt->slave->prop;
+ for (l = 1; l < SDW_MAX_LANES; l++) {
+ if (slave_prop->lane_maps[l] == m_lane) {
+ list_for_each_entry(s_p_rt, &s_rt->port_list, port_node) {
+ s_p_rt->lane = l;
+ dev_dbg(&s_rt->slave->dev,
+ "Set P lane %d for port %d\n",
+ l, s_p_rt->num);
+ }
+ break;
+ }
+ }
+ }
+ /*
+ * Set Manager lanes. Configure the last m_rt in bus->m_rt_list only since
+ * we don't want to touch other m_rts that are already working.
+ */
+ list_for_each_entry(m_p_rt, &m_rt->port_list, port_node) {
+ m_p_rt->lane = m_lane;
+ }
+ }
+
+ if (!mstr_prop->default_frame_rate || !mstr_prop->default_row)
+ return -EINVAL;
+
+ mstr_prop->default_col = curr_dr_freq / mstr_prop->default_frame_rate /
+ mstr_prop->default_row;
ret = sdw_select_row_col(bus, curr_dr_freq);
if (ret < 0) {
@@ -390,8 +607,9 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
* sdw_compute_params: Compute bus, transport and port parameters
*
* @bus: SDW Bus instance
+ * @stream: Soundwire stream
*/
-int sdw_compute_params(struct sdw_bus *bus)
+int sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
{
int ret;
@@ -401,7 +619,7 @@ int sdw_compute_params(struct sdw_bus *bus)
return ret;
/* Compute transport and port params */
- ret = sdw_compute_port_params(bus);
+ ret = sdw_compute_port_params(bus, stream);
if (ret < 0) {
dev_err(bus->dev, "Compute transport params failed: %d\n", ret);
return ret;
diff --git a/drivers/soundwire/irq.c b/drivers/soundwire/irq.c
index 0c08cebb1235..c237e6d0766b 100644
--- a/drivers/soundwire/irq.c
+++ b/drivers/soundwire/irq.c
@@ -46,14 +46,18 @@ void sdw_irq_delete(struct sdw_bus *bus)
irq_domain_remove(bus->domain);
}
+static void sdw_irq_dispose_mapping(void *data)
+{
+ struct sdw_slave *slave = data;
+
+ irq_dispose_mapping(irq_find_mapping(slave->bus->domain, slave->dev_num));
+}
+
void sdw_irq_create_mapping(struct sdw_slave *slave)
{
slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num);
if (!slave->irq)
dev_warn(&slave->dev, "Failed to map IRQ\n");
-}
-void sdw_irq_dispose_mapping(struct sdw_slave *slave)
-{
- irq_dispose_mapping(irq_find_mapping(slave->bus->domain, slave->dev_num));
+ devm_add_action_or_reset(&slave->dev, sdw_irq_dispose_mapping, slave);
}
diff --git a/drivers/soundwire/irq.h b/drivers/soundwire/irq.h
index 58a58046d92b..86e2318409da 100644
--- a/drivers/soundwire/irq.h
+++ b/drivers/soundwire/irq.h
@@ -16,7 +16,6 @@ int sdw_irq_create(struct sdw_bus *bus,
struct fwnode_handle *fwnode);
void sdw_irq_delete(struct sdw_bus *bus);
void sdw_irq_create_mapping(struct sdw_slave *slave);
-void sdw_irq_dispose_mapping(struct sdw_slave *slave);
#else /* CONFIG_IRQ_DOMAIN */
@@ -34,10 +33,6 @@ static inline void sdw_irq_create_mapping(struct sdw_slave *slave)
{
}
-static inline void sdw_irq_dispose_mapping(struct sdw_slave *slave)
-{
-}
-
#endif /* CONFIG_IRQ_DOMAIN */
#endif /* __SDW_IRQ_H */
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index 9d59f486edbe..65afb28ef8fa 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -366,6 +366,44 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
return 0;
}
+/*
+ * In MIPI DisCo spec for SoundWire, lane mapping for a slave device is done with
+ * mipi-sdw-lane-x-mapping properties, where x is 1..7, and the values for those
+ * properties are mipi-sdw-manager-lane-x or mipi-sdw-peripheral-link-y, where x
+ * is an integer between 1 to 7 if the lane is connected to a manager lane, y is a
+ * character between A to E if the lane is connected to another peripheral lane.
+ */
+int sdw_slave_read_lane_mapping(struct sdw_slave *slave)
+{
+ struct sdw_slave_prop *prop = &slave->prop;
+ struct device *dev = &slave->dev;
+ char prop_name[30];
+ const char *prop_val;
+ size_t len;
+ int ret, i;
+ u8 lane;
+
+ for (i = 0; i < SDW_MAX_LANES; i++) {
+ snprintf(prop_name, sizeof(prop_name), "mipi-sdw-lane-%d-mapping", i);
+ ret = device_property_read_string(dev, prop_name, &prop_val);
+ if (ret)
+ continue;
+
+ len = strlen(prop_val);
+ if (len < 1)
+ return -EINVAL;
+
+ /* The last character is enough to identify the connection */
+ ret = kstrtou8(&prop_val[len - 1], 10, &lane);
+ if (ret)
+ return ret;
+ if (in_range(lane, 1, SDW_MAX_LANES - 1))
+ prop->lane_maps[i] = lane;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(sdw_slave_read_lane_mapping);
+
/**
* sdw_slave_read_prop() - Read Slave properties
* @slave: SDW Slave
@@ -486,6 +524,6 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
sdw_slave_read_dpn(slave, prop->sink_dpn_prop, nval,
prop->sink_ports, "sink");
- return 0;
+ return sdw_slave_read_lane_mapping(slave);
}
EXPORT_SYMBOL(sdw_slave_read_prop);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index e00c5ac496a6..0f45e3404756 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -1072,7 +1072,7 @@ static const struct sdw_master_ops qcom_swrm_ops = {
.pre_bank_switch = qcom_swrm_pre_bank_switch,
};
-static int qcom_swrm_compute_params(struct sdw_bus *bus)
+static int qcom_swrm_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
{
struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
struct sdw_master_runtime *m_rt;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 7aa4900dcf31..e9df503332bb 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -629,8 +629,44 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
static int sdw_program_params(struct sdw_bus *bus, bool prepare)
{
struct sdw_master_runtime *m_rt;
+ struct sdw_slave *slave;
int ret = 0;
+ u32 addr1;
+
+ /* Check if all Peripherals comply with SDCA */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num_sticky)
+ continue;
+ if (!is_clock_scaling_supported_by_slave(slave)) {
+ dev_dbg(&slave->dev, "The Peripheral doesn't comply with SDCA\n");
+ goto manager_runtime;
+ }
+ }
+
+ if (bus->params.next_bank)
+ addr1 = SDW_SCP_BUSCLOCK_SCALE_B1;
+ else
+ addr1 = SDW_SCP_BUSCLOCK_SCALE_B0;
+
+ /* Program SDW_SCP_BUSCLOCK_SCALE if all Peripherals comply with SDCA */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ int scale_index;
+ u8 base;
+
+ if (!slave->dev_num_sticky)
+ continue;
+ scale_index = sdw_slave_get_scale_index(slave, &base);
+ if (scale_index < 0)
+ return scale_index;
+
+ ret = sdw_write_no_pm(slave, addr1, scale_index);
+ if (ret < 0) {
+ dev_err(&slave->dev, "SDW_SCP_BUSCLOCK_SCALE register write failed\n");
+ return ret;
+ }
+ }
+manager_runtime:
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
/*
@@ -1383,7 +1419,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
/* Compute params */
if (bus->compute_params) {
- ret = bus->compute_params(bus);
+ ret = bus->compute_params(bus, stream);
if (ret < 0) {
dev_err(bus->dev, "Compute params failed: %d\n",
ret);
@@ -1642,9 +1678,19 @@ EXPORT_SYMBOL(sdw_disable_stream);
static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
{
struct sdw_master_runtime *m_rt;
+ struct sdw_port_runtime *p_rt;
+ unsigned int multi_lane_bandwidth;
+ unsigned int bandwidth;
struct sdw_bus *bus;
+ int state = stream->state;
int ret = 0;
+ /*
+ * first mark the state as DEPREPARED so that it is not taken into account
+ * for bit allocation
+ */
+ stream->state = SDW_STREAM_DEPREPARED;
+
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
bus = m_rt->bus;
/* De-prepare port(s) */
@@ -1652,19 +1698,34 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
if (ret < 0) {
dev_err(bus->dev,
"De-prepare port(s) failed: %d\n", ret);
+ stream->state = state;
return ret;
}
+ multi_lane_bandwidth = 0;
+
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ if (!p_rt->lane)
+ continue;
+
+ bandwidth = m_rt->stream->params.rate * hweight32(p_rt->ch_mask) *
+ m_rt->stream->params.bps;
+ multi_lane_bandwidth += bandwidth;
+ bus->lane_used_bandwidth[p_rt->lane] -= bandwidth;
+ if (!bus->lane_used_bandwidth[p_rt->lane])
+ p_rt->lane = 0;
+ }
/* TODO: Update this during Device-Device support */
- bus->params.bandwidth -= m_rt->stream->params.rate *
- m_rt->ch_count * m_rt->stream->params.bps;
+ bandwidth = m_rt->stream->params.rate * m_rt->ch_count * m_rt->stream->params.bps;
+ bus->params.bandwidth -= bandwidth - multi_lane_bandwidth;
/* Compute params */
if (bus->compute_params) {
- ret = bus->compute_params(bus);
+ ret = bus->compute_params(bus, stream);
if (ret < 0) {
dev_err(bus->dev, "Compute params failed: %d\n",
ret);
+ stream->state = state;
return ret;
}
}
@@ -1673,11 +1734,11 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "%s: Program params failed: %d\n", __func__, ret);
+ stream->state = state;
return ret;
}
}
- stream->state = SDW_STREAM_DEPREPARED;
return do_bank_switch(stream);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f51f9466e518..ea8a31032927 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -542,6 +542,18 @@ config SPI_JCORE
This enables support for the SPI master controller in the J-Core
synthesizable, open source SoC.
+config SPI_KSPI2
+ tristate "Support for KEBA SPI master type 2 hardware"
+ depends on HAS_IOMEM
+ depends on KEBA_CP500 || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This driver supports KEBA SPI master type 2 FPGA implementation,
+ as found on CP500 devices for example.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-kspi2.
+
config SPI_LM70_LLP
tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
depends on PARPORT
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index aea5e54de195..9db7554c1864 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o
obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
+obj-$(CONFIG_SPI_KSPI2) += spi-kspi2.o
obj-$(CONFIG_SPI_LJCA) += spi-ljca.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LOONGSON_CORE) += spi-loongson-core.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 316bce577081..244ac0106862 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -11,11 +11,15 @@
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -34,6 +38,7 @@
#define QSPI_IDR 0x0018 /* Interrupt Disable Register */
#define QSPI_IMR 0x001c /* Interrupt Mask Register */
#define QSPI_SCR 0x0020 /* Serial Clock Register */
+#define QSPI_SR2 0x0024 /* SAMA7G5 Status Register */
#define QSPI_IAR 0x0030 /* Instruction Address Register */
#define QSPI_ICR 0x0034 /* Instruction Code Register */
@@ -44,16 +49,32 @@
#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
#define QSPI_SKR 0x0044 /* Scrambling Key Register */
+#define QSPI_REFRESH 0x0050 /* Refresh Register */
+#define QSPI_WRACNT 0x0054 /* Write Access Counter Register */
+#define QSPI_DLLCFG 0x0058 /* DLL Configuration Register */
+#define QSPI_PCALCFG 0x005C /* Pad Calibration Configuration Register */
+#define QSPI_PCALBP 0x0060 /* Pad Calibration Bypass Register */
+#define QSPI_TOUT 0x0064 /* Timeout Register */
+
#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
#define QSPI_VERSION 0x00FC /* Version Register */
+#define SAMA7G5_QSPI0_MAX_SPEED_HZ 200000000
+#define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ 133000000
/* Bitfields in QSPI_CR (Control Register) */
#define QSPI_CR_QSPIEN BIT(0)
#define QSPI_CR_QSPIDIS BIT(1)
+#define QSPI_CR_DLLON BIT(2)
+#define QSPI_CR_DLLOFF BIT(3)
+#define QSPI_CR_STPCAL BIT(4)
+#define QSPI_CR_SRFRSH BIT(5)
#define QSPI_CR_SWRST BIT(7)
+#define QSPI_CR_UPDCFG BIT(8)
+#define QSPI_CR_STTFR BIT(9)
+#define QSPI_CR_RTOUT BIT(10)
#define QSPI_CR_LASTXFER BIT(24)
/* Bitfields in QSPI_MR (Mode Register) */
@@ -61,12 +82,14 @@
#define QSPI_MR_LLB BIT(1)
#define QSPI_MR_WDRBT BIT(2)
#define QSPI_MR_SMRM BIT(3)
+#define QSPI_MR_DQSDLYEN BIT(3)
#define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
#define QSPI_MR_CSMODE_LASTXFER (1 << 4)
#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
#define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
+#define QSPI_MR_OENSD BIT(15)
#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
#define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
@@ -80,6 +103,13 @@
#define QSPI_SR_CSR BIT(8)
#define QSPI_SR_CSS BIT(9)
#define QSPI_SR_INSTRE BIT(10)
+#define QSPI_SR_LWRA BIT(11)
+#define QSPI_SR_QITF BIT(12)
+#define QSPI_SR_QITR BIT(13)
+#define QSPI_SR_CSFA BIT(14)
+#define QSPI_SR_CSRA BIT(15)
+#define QSPI_SR_RFRSHD BIT(16)
+#define QSPI_SR_TOUT BIT(17)
#define QSPI_SR_QSPIENS BIT(24)
#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
@@ -92,9 +122,22 @@
#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
+/* Bitfields in QSPI_SR2 (SAMA7G5 Status Register) */
+#define QSPI_SR2_SYNCBSY BIT(0)
+#define QSPI_SR2_QSPIENS BIT(1)
+#define QSPI_SR2_CSS BIT(2)
+#define QSPI_SR2_RBUSY BIT(3)
+#define QSPI_SR2_HIDLE BIT(4)
+#define QSPI_SR2_DLOCK BIT(5)
+#define QSPI_SR2_CALBSY BIT(6)
+
+/* Bitfields in QSPI_IAR (Instruction Address Register) */
+#define QSPI_IAR_ADDR GENMASK(31, 0)
+
/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
#define QSPI_ICR_INST_MASK GENMASK(7, 0)
#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
+#define QSPI_ICR_INST_MASK_SAMA7G5 GENMASK(15, 0)
#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
@@ -107,6 +150,9 @@
#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
+#define QSPI_IFR_WIDTH_OCT_OUTPUT (7 << 0)
+#define QSPI_IFR_WIDTH_OCT_IO (8 << 0)
+#define QSPI_IFR_WIDTH_OCT_CMD (9 << 0)
#define QSPI_IFR_INSTEN BIT(4)
#define QSPI_IFR_ADDREN BIT(5)
#define QSPI_IFR_OPTEN BIT(6)
@@ -117,19 +163,60 @@
#define QSPI_IFR_OPTL_4BIT (2 << 8)
#define QSPI_IFR_OPTL_8BIT (3 << 8)
#define QSPI_IFR_ADDRL BIT(10)
+#define QSPI_IFR_ADDRL_SAMA7G5 GENMASK(11, 10)
#define QSPI_IFR_TFRTYP_MEM BIT(12)
#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
#define QSPI_IFR_CRM BIT(14)
+#define QSPI_IFR_DDREN BIT(15)
#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+#define QSPI_IFR_END BIT(22)
+#define QSPI_IFR_SMRM BIT(23)
#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
+#define QSPI_IFR_DQSEN BIT(25)
+#define QSPI_IFR_DDRCMDEN BIT(26)
+#define QSPI_IFR_HFWBEN BIT(27)
+#define QSPI_IFR_PROTTYP GENMASK(29, 28)
+#define QSPI_IFR_PROTTYP_STD_SPI 0
+#define QSPI_IFR_PROTTYP_TWIN_QUAD 1
+#define QSPI_IFR_PROTTYP_OCTAFLASH 2
+#define QSPI_IFR_PROTTYP_HYPERFLASH 3
/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
#define QSPI_SMR_SCREN BIT(0)
#define QSPI_SMR_RVDIS BIT(1)
+#define QSPI_SMR_SCRKL BIT(2)
+
+/* Bitfields in QSPI_REFRESH (Refresh Register) */
+#define QSPI_REFRESH_DELAY_COUNTER GENMASK(31, 0)
+
+/* Bitfields in QSPI_WRACNT (Write Access Counter Register) */
+#define QSPI_WRACNT_NBWRA GENMASK(31, 0)
+
+/* Bitfields in QSPI_DLLCFG (DLL Configuration Register) */
+#define QSPI_DLLCFG_RANGE BIT(0)
+
+/* Bitfields in QSPI_PCALCFG (DLL Pad Calibration Configuration Register) */
+#define QSPI_PCALCFG_AAON BIT(0)
+#define QSPI_PCALCFG_DAPCAL BIT(1)
+#define QSPI_PCALCFG_DIFFPM BIT(2)
+#define QSPI_PCALCFG_CLKDIV GENMASK(6, 4)
+#define QSPI_PCALCFG_CALCNT GENMASK(16, 8)
+#define QSPI_PCALCFG_CALP GENMASK(27, 24)
+#define QSPI_PCALCFG_CALN GENMASK(31, 28)
+
+/* Bitfields in QSPI_PCALBP (DLL Pad Calibration Bypass Register) */
+#define QSPI_PCALBP_BPEN BIT(0)
+#define QSPI_PCALBP_CALPBP GENMASK(11, 8)
+#define QSPI_PCALBP_CALNBP GENMASK(19, 16)
+
+/* Bitfields in QSPI_TOUT (Timeout Register) */
+#define QSPI_TOUT_TCNTM GENMASK(15, 0)
/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
#define QSPI_WPMR_WPEN BIT(0)
+#define QSPI_WPMR_WPITEN BIT(1)
+#define QSPI_WPMR_WPCREN BIT(2)
#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
@@ -138,23 +225,74 @@
#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+#define ATMEL_QSPI_TIMEOUT 1000 /* ms */
+#define ATMEL_QSPI_SYNC_TIMEOUT 300 /* ms */
+#define QSPI_DLLCFG_THRESHOLD_FREQ 90000000U
+#define QSPI_CALIB_TIME 2000 /* 2 us */
+
+/* Use PIO for small transfers. */
+#define ATMEL_QSPI_DMA_MIN_BYTES 16
+/**
+ * struct atmel_qspi_pcal - Pad Calibration Clock Division
+ * @pclk_rate: peripheral clock rate.
+ * @pclk_div: calibration clock division. The clock applied to the calibration
+ * cell is divided by pclk_div + 1.
+ */
+struct atmel_qspi_pcal {
+ u32 pclk_rate;
+ u8 pclk_div;
+};
+
+#define ATMEL_QSPI_PCAL_ARRAY_SIZE 8
+static const struct atmel_qspi_pcal pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE] = {
+ {25000000, 0},
+ {50000000, 1},
+ {75000000, 2},
+ {100000000, 3},
+ {125000000, 4},
+ {150000000, 5},
+ {175000000, 6},
+ {200000000, 7},
+};
+
struct atmel_qspi_caps {
+ u32 max_speed_hz;
bool has_qspick;
+ bool has_gclk;
bool has_ricr;
+ bool octal;
+ bool has_dma;
};
+struct atmel_qspi_ops;
+
struct atmel_qspi {
void __iomem *regs;
void __iomem *mem;
struct clk *pclk;
struct clk *qspick;
+ struct clk *gclk;
struct platform_device *pdev;
const struct atmel_qspi_caps *caps;
+ const struct atmel_qspi_ops *ops;
resource_size_t mmap_size;
u32 pending;
+ u32 irq_mask;
u32 mr;
u32 scr;
+ u32 target_max_speed_hz;
struct completion cmd_completion;
+ struct completion dma_completion;
+ dma_addr_t mmap_phys_base;
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+};
+
+struct atmel_qspi_ops {
+ int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
+ u32 *offset);
+ int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
+ u32 offset);
};
struct atmel_qspi_mode {
@@ -174,6 +312,19 @@ static const struct atmel_qspi_mode atmel_qspi_modes[] = {
{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
+static const struct atmel_qspi_mode atmel_qspi_sama7g5_modes[] = {
+ { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
+ { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
+ { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
+ { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
+ { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
+ { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
+ { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
+ { 1, 1, 8, QSPI_IFR_WIDTH_OCT_OUTPUT },
+ { 1, 8, 8, QSPI_IFR_WIDTH_OCT_IO },
+ { 8, 8, 8, QSPI_IFR_WIDTH_OCT_CMD },
+};
+
#ifdef VERBOSE_DEBUG
static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
{
@@ -196,6 +347,8 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
return "IMR";
case QSPI_SCR:
return "SCR";
+ case QSPI_SR2:
+ return "SR2";
case QSPI_IAR:
return "IAR";
case QSPI_ICR:
@@ -208,6 +361,18 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
return "SMR";
case QSPI_SKR:
return "SKR";
+ case QSPI_REFRESH:
+ return "REFRESH";
+ case QSPI_WRACNT:
+ return "WRACNT";
+ case QSPI_DLLCFG:
+ return "DLLCFG";
+ case QSPI_PCALCFG:
+ return "PCALCFG";
+ case QSPI_PCALBP:
+ return "PCALBP";
+ case QSPI_TOUT:
+ return "TOUT";
case QSPI_WPMR:
return "WPMR";
case QSPI_WPSR:
@@ -249,6 +414,28 @@ static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
writel_relaxed(value, aq->regs + offset);
}
+static int atmel_qspi_reg_sync(struct atmel_qspi *aq)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_SYNCBSY), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ return ret;
+}
+
+static int atmel_qspi_update_config(struct atmel_qspi *aq)
+{
+ int ret;
+
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_UPDCFG, aq, QSPI_CR);
+ return atmel_qspi_reg_sync(aq);
+}
+
static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
const struct atmel_qspi_mode *mode)
{
@@ -275,12 +462,31 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
return -EOPNOTSUPP;
}
+static int atmel_qspi_sama7g5_find_mode(const struct spi_mem_op *op)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(atmel_qspi_sama7g5_modes); i++)
+ if (atmel_qspi_is_compatible(op, &atmel_qspi_sama7g5_modes[i]))
+ return i;
+
+ return -EOPNOTSUPP;
+}
+
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
if (!spi_mem_default_supports_op(mem, op))
return false;
+ if (aq->caps->octal) {
+ if (atmel_qspi_sama7g5_find_mode(op) < 0)
+ return false;
+ else
+ return true;
+ }
+
if (atmel_qspi_find_mode(op) < 0)
return false;
@@ -292,6 +498,25 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
return true;
}
+/*
+ * If the QSPI controller is set in regular SPI mode, set it in
+ * Serial Memory Mode (SMM).
+ */
+static int atmel_qspi_set_serial_memory_mode(struct atmel_qspi *aq)
+{
+ int ret = 0;
+
+ if (!(aq->mr & QSPI_MR_SMM)) {
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->mr, aq, QSPI_MR);
+
+ if (aq->caps->has_gclk)
+ ret = atmel_qspi_update_config(aq);
+ }
+
+ return ret;
+}
+
static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
const struct spi_mem_op *op, u32 *offset)
{
@@ -371,14 +596,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
ifr |= QSPI_IFR_TFRTYP_MEM;
}
- /*
- * If the QSPI controller is set in regular SPI mode, set it in
- * Serial Memory Mode (SMM).
- */
- if (!(aq->mr & QSPI_MR_SMM)) {
- aq->mr |= QSPI_MR_SMM;
- atmel_qspi_write(aq->mr, aq, QSPI_MR);
- }
+ mode = atmel_qspi_set_serial_memory_mode(aq);
+ if (mode < 0)
+ return mode;
/* Clear pending interrupts */
(void)atmel_qspi_read(aq, QSPI_SR);
@@ -404,10 +624,323 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
return 0;
}
+static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
+{
+ int err = 0;
+ u32 sr;
+
+ /* Poll INSTRuction End status */
+ sr = atmel_qspi_read(aq, QSPI_SR);
+ if ((sr & irq_mask) == irq_mask)
+ return 0;
+
+ /* Wait for INSTRuction End interrupt */
+ reinit_completion(&aq->cmd_completion);
+ aq->pending = sr & irq_mask;
+ aq->irq_mask = irq_mask;
+ atmel_qspi_write(irq_mask, aq, QSPI_IER);
+ if (!wait_for_completion_timeout(&aq->cmd_completion,
+ msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
+ err = -ETIMEDOUT;
+ atmel_qspi_write(irq_mask, aq, QSPI_IDR);
+
+ return err;
+}
+
+static int atmel_qspi_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, u32 offset)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
+
+ /* Skip to the final steps if there is no data */
+ if (!op->data.nbytes)
+ return atmel_qspi_wait_for_completion(aq,
+ QSPI_SR_CMD_COMPLETED);
+
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)atmel_qspi_read(aq, QSPI_IFR);
+
+ /* Send/Receive data */
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
+
+ /* Synchronize AHB and APB accesses again */
+ rmb();
+ } else {
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
+
+ /* Synchronize AHB and APB accesses again */
+ wmb();
+ }
+
+ /* Release the chip-select */
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
+}
+
+static int atmel_qspi_sama7g5_set_cfg(struct atmel_qspi *aq,
+ const struct spi_mem_op *op, u32 *offset)
+{
+ u32 iar, icr, ifr;
+ int mode, ret;
+
+ iar = 0;
+ icr = FIELD_PREP(QSPI_ICR_INST_MASK_SAMA7G5, op->cmd.opcode);
+ ifr = QSPI_IFR_INSTEN;
+
+ mode = atmel_qspi_sama7g5_find_mode(op);
+ if (mode < 0)
+ return mode;
+ ifr |= atmel_qspi_sama7g5_modes[mode].config;
+
+ if (op->dummy.buswidth && op->dummy.nbytes) {
+ if (op->addr.dtr && op->dummy.dtr && op->data.dtr)
+ ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
+ (2 * op->dummy.buswidth));
+ else
+ ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ }
+
+ if (op->addr.buswidth && op->addr.nbytes) {
+ ifr |= FIELD_PREP(QSPI_IFR_ADDRL_SAMA7G5, op->addr.nbytes - 1) |
+ QSPI_IFR_ADDREN;
+ iar = FIELD_PREP(QSPI_IAR_ADDR, op->addr.val);
+ }
+
+ if (op->addr.dtr && op->dummy.dtr && op->data.dtr) {
+ ifr |= QSPI_IFR_DDREN;
+ if (op->cmd.dtr)
+ ifr |= QSPI_IFR_DDRCMDEN;
+
+ ifr |= QSPI_IFR_DQSEN;
+ }
+
+ if (op->cmd.buswidth == 8 || op->addr.buswidth == 8 ||
+ op->data.buswidth == 8)
+ ifr |= FIELD_PREP(QSPI_IFR_PROTTYP, QSPI_IFR_PROTTYP_OCTAFLASH);
+
+ /* offset of the data access in the QSPI memory space */
+ *offset = iar;
+
+ /* Set data enable */
+ if (op->data.nbytes) {
+ ifr |= QSPI_IFR_DATAEN;
+
+ if (op->addr.nbytes)
+ ifr |= QSPI_IFR_TFRTYP_MEM;
+ }
+
+ ret = atmel_qspi_set_serial_memory_mode(aq);
+ if (ret < 0)
+ return ret;
+
+ /* Clear pending interrupts */
+ (void)atmel_qspi_read(aq, QSPI_SR);
+
+ /* Set QSPI Instruction Frame registers */
+ if (op->addr.nbytes && !op->data.nbytes)
+ atmel_qspi_write(iar, aq, QSPI_IAR);
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ atmel_qspi_write(icr, aq, QSPI_RICR);
+ } else {
+ atmel_qspi_write(icr, aq, QSPI_WICR);
+ if (op->data.nbytes)
+ atmel_qspi_write(FIELD_PREP(QSPI_WRACNT_NBWRA,
+ op->data.nbytes),
+ aq, QSPI_WRACNT);
+ }
+
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
+
+ return atmel_qspi_update_config(aq);
+}
+
+static void atmel_qspi_dma_callback(void *param)
+{
+ struct atmel_qspi *aq = param;
+
+ complete(&aq->dma_completion);
+}
+
+static int atmel_qspi_dma_xfer(struct atmel_qspi *aq, struct dma_chan *chan,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ unsigned int len)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ int ret;
+
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx) {
+ dev_err(&aq->pdev->dev, "device_prep_dma_memcpy error\n");
+ return -EIO;
+ }
+
+ reinit_completion(&aq->dma_completion);
+ tx->callback = atmel_qspi_dma_callback;
+ tx->callback_param = aq;
+ cookie = tx->tx_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(&aq->pdev->dev, "dma_submit_error %d\n", cookie);
+ return ret;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&aq->dma_completion,
+ msecs_to_jiffies(20 * ATMEL_QSPI_TIMEOUT));
+ if (ret == 0) {
+ dmaengine_terminate_sync(chan);
+ dev_err(&aq->pdev->dev, "DMA wait_for_completion_timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_dma_rx_xfer(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt, loff_t loff)
+{
+ struct atmel_qspi *aq =
+ spi_controller_get_devdata(mem->spi->controller);
+ struct scatterlist *sg;
+ dma_addr_t dma_src;
+ unsigned int i, len;
+ int ret;
+
+ dma_src = aq->mmap_phys_base + loff;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ len = sg_dma_len(sg);
+ ret = atmel_qspi_dma_xfer(aq, aq->rx_chan, sg_dma_address(sg),
+ dma_src, len);
+ if (ret)
+ return ret;
+ dma_src += len;
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_dma_tx_xfer(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt, loff_t loff)
+{
+ struct atmel_qspi *aq =
+ spi_controller_get_devdata(mem->spi->controller);
+ struct scatterlist *sg;
+ dma_addr_t dma_dst;
+ unsigned int i, len;
+ int ret;
+
+ dma_dst = aq->mmap_phys_base + loff;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ len = sg_dma_len(sg);
+ ret = atmel_qspi_dma_xfer(aq, aq->tx_chan, dma_dst,
+ sg_dma_address(sg), len);
+ if (ret)
+ return ret;
+ dma_dst += len;
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_dma_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, loff_t loff)
+{
+ struct sg_table sgt;
+ int ret;
+
+ ret = spi_controller_dma_map_mem_op_data(mem->spi->controller, op,
+ &sgt);
+ if (ret)
+ return ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ ret = atmel_qspi_dma_rx_xfer(mem, op, &sgt, loff);
+ else
+ ret = atmel_qspi_dma_tx_xfer(mem, op, &sgt, loff);
+
+ spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt);
+
+ return ret;
+}
+
+static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
+ const struct spi_mem_op *op, u32 offset)
+{
+ struct atmel_qspi *aq =
+ spi_controller_get_devdata(mem->spi->controller);
+ u32 val;
+ int ret;
+
+ if (!op->data.nbytes) {
+ /* Start the transfer. */
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_STTFR, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
+ }
+
+ /* Send/Receive data. */
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (aq->rx_chan && op->addr.nbytes &&
+ op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
+ ret = atmel_qspi_dma_transfer(mem, op, offset);
+ if (ret)
+ return ret;
+ } else {
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
+ }
+
+ if (op->addr.nbytes) {
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_RBUSY), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (aq->tx_chan && op->addr.nbytes &&
+ op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
+ ret = atmel_qspi_dma_transfer(mem, op, offset);
+ if (ret)
+ return ret;
+ } else {
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
+ }
+
+ ret = atmel_qspi_wait_for_completion(aq, QSPI_SR_LWRA);
+ if (ret)
+ return ret;
+ }
+
+ /* Release the chip-select. */
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+
+ return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
+}
+
static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
- u32 sr, offset;
+ u32 offset;
int err;
/*
@@ -416,46 +949,20 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* when the flash memories overrun the controller's memory space.
*/
if (op->addr.val + op->data.nbytes > aq->mmap_size)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+
+ if (op->addr.nbytes > 4)
+ return -EOPNOTSUPP;
err = pm_runtime_resume_and_get(&aq->pdev->dev);
if (err < 0)
return err;
- err = atmel_qspi_set_cfg(aq, op, &offset);
+ err = aq->ops->set_cfg(aq, op, &offset);
if (err)
goto pm_runtime_put;
- /* Skip to the final steps if there is no data */
- if (op->data.nbytes) {
- /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
- (void)atmel_qspi_read(aq, QSPI_IFR);
-
- /* Send/Receive data */
- if (op->data.dir == SPI_MEM_DATA_IN)
- memcpy_fromio(op->data.buf.in, aq->mem + offset,
- op->data.nbytes);
- else
- memcpy_toio(aq->mem + offset, op->data.buf.out,
- op->data.nbytes);
-
- /* Release the chip-select */
- atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
- }
-
- /* Poll INSTRuction End status */
- sr = atmel_qspi_read(aq, QSPI_SR);
- if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
- goto pm_runtime_put;
-
- /* Wait for INSTRuction End interrupt */
- reinit_completion(&aq->cmd_completion);
- aq->pending = sr & QSPI_SR_CMD_COMPLETED;
- atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
- if (!wait_for_completion_timeout(&aq->cmd_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
- atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
+ err = aq->ops->transfer(mem, op, offset);
pm_runtime_put:
pm_runtime_mark_last_busy(&aq->pdev->dev);
@@ -474,6 +981,159 @@ static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
.get_name = atmel_qspi_get_name
};
+static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq)
+{
+ unsigned long pclk_rate;
+ u32 status, val;
+ int i, ret;
+ u8 pclk_div = 0;
+
+ pclk_rate = clk_get_rate(aq->pclk);
+ if (!pclk_rate)
+ return -EINVAL;
+
+ for (i = 0; i < ATMEL_QSPI_PCAL_ARRAY_SIZE; i++) {
+ if (pclk_rate <= pcal[i].pclk_rate) {
+ pclk_div = pcal[i].pclk_div;
+ break;
+ }
+ }
+
+ /*
+ * Use the biggest divider in case the peripheral clock exceeds
+ * 200MHZ.
+ */
+ if (pclk_rate > pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_rate)
+ pclk_div = pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_div;
+
+ /* Disable QSPI while configuring the pad calibration. */
+ status = atmel_qspi_read(aq, QSPI_SR2);
+ if (status & QSPI_SR2_QSPIENS) {
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+ }
+
+ /*
+ * The analog circuitry is not shut down at the end of the calibration
+ * and the start-up time is only required for the first calibration
+ * sequence, thus increasing performance. Set the delay between the Pad
+ * calibration analog circuitry and the calibration request to 2us.
+ */
+ atmel_qspi_write(QSPI_PCALCFG_AAON |
+ FIELD_PREP(QSPI_PCALCFG_CLKDIV, pclk_div) |
+ FIELD_PREP(QSPI_PCALCFG_CALCNT,
+ 2 * (pclk_rate / 1000000)),
+ aq, QSPI_PCALCFG);
+
+ /* DLL On + start calibration. */
+ atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR);
+
+ /* Check synchronization status before updating configuration. */
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ (val & QSPI_SR2_DLOCK) &&
+ !(val & QSPI_SR2_CALBSY), 40,
+ ATMEL_QSPI_TIMEOUT);
+
+ /* Refresh analogic blocks every 1 ms.*/
+ atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER,
+ aq->target_max_speed_hz / 1000),
+ aq, QSPI_REFRESH);
+
+ return ret;
+}
+
+static int atmel_qspi_set_gclk(struct atmel_qspi *aq)
+{
+ u32 status, val;
+ int ret;
+
+ /* Disable DLL before setting GCLK */
+ status = atmel_qspi_read(aq, QSPI_SR2);
+ if (status & QSPI_SR2_DLOCK) {
+ atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ)
+ atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG);
+ else
+ atmel_qspi_write(0, aq, QSPI_DLLCFG);
+
+ ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz);
+ if (ret) {
+ dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n");
+ return ret;
+ }
+
+ /* Enable the QSPI generic clock */
+ ret = clk_prepare_enable(aq->gclk);
+ if (ret)
+ dev_err(&aq->pdev->dev, "Failed to enable generic clock.\n");
+
+ return ret;
+}
+
+static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq)
+{
+ u32 val;
+ int ret;
+
+ ret = atmel_qspi_set_gclk(aq);
+ if (ret)
+ return ret;
+
+ if (aq->caps->octal) {
+ ret = atmel_qspi_set_pad_calibration(aq);
+ if (ret)
+ return ret;
+ } else {
+ atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ (val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ }
+
+ /* Set the QSPI controller by default in Serial Memory Mode */
+ aq->mr |= QSPI_MR_DQSDLYEN;
+ ret = atmel_qspi_set_serial_memory_mode(aq);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the QSPI controller. */
+ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ val & QSPI_SR2_QSPIENS, 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (aq->caps->octal) {
+ ret = readl_poll_timeout(aq->regs + QSPI_SR, val,
+ val & QSPI_SR_RFRSHD, 40,
+ ATMEL_QSPI_TIMEOUT);
+ }
+
+ atmel_qspi_write(QSPI_TOUT_TCNTM, aq, QSPI_TOUT);
+ return ret;
+}
+
+static int atmel_qspi_sama7g5_setup(struct spi_device *spi)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(spi->controller);
+
+ /* The controller can communicate with a single peripheral device (target). */
+ aq->target_max_speed_hz = spi->max_speed_hz;
+
+ return atmel_qspi_sama7g5_init(aq);
+}
+
static int atmel_qspi_setup(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->controller;
@@ -488,6 +1148,9 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
+ if (aq->caps->has_gclk)
+ return atmel_qspi_sama7g5_setup(spi);
+
src_rate = clk_get_rate(aq->pclk);
if (!src_rate)
return -EINVAL;
@@ -573,17 +1236,29 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi)
return 0;
}
-static void atmel_qspi_init(struct atmel_qspi *aq)
+static int atmel_qspi_init(struct atmel_qspi *aq)
{
+ int ret;
+
+ if (aq->caps->has_gclk) {
+ ret = atmel_qspi_reg_sync(aq);
+ if (ret)
+ return ret;
+ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
+ return 0;
+ }
+
/* Reset the QSPI controller */
atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
- aq->mr |= QSPI_MR_SMM;
- atmel_qspi_write(aq->mr, aq, QSPI_MR);
+ ret = atmel_qspi_set_serial_memory_mode(aq);
+ if (ret < 0)
+ return ret;
/* Enable the QSPI controller */
atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
+ return 0;
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
@@ -599,12 +1274,65 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
return IRQ_NONE;
aq->pending |= pending;
- if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ if ((aq->pending & aq->irq_mask) == aq->irq_mask)
complete(&aq->cmd_completion);
return IRQ_HANDLED;
}
+static int atmel_qspi_dma_init(struct spi_controller *ctrl)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+ aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx");
+ if (IS_ERR(aq->rx_chan)) {
+ aq->rx_chan = NULL;
+ return dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan),
+ "RX DMA channel is not available\n");
+ }
+
+ aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx");
+ if (IS_ERR(aq->tx_chan)) {
+ ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan),
+ "TX DMA channel is not available\n");
+ goto release_rx_chan;
+ }
+
+ ctrl->dma_rx = aq->rx_chan;
+ ctrl->dma_tx = aq->tx_chan;
+ init_completion(&aq->dma_completion);
+
+ dev_info(&aq->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan));
+
+ return 0;
+
+release_rx_chan:
+ dma_release_channel(aq->rx_chan);
+ aq->rx_chan = NULL;
+ aq->tx_chan = NULL;
+ return ret;
+}
+
+static void atmel_qspi_dma_release(struct atmel_qspi *aq)
+{
+ if (aq->rx_chan)
+ dma_release_channel(aq->rx_chan);
+ if (aq->tx_chan)
+ dma_release_channel(aq->tx_chan);
+}
+
+static const struct atmel_qspi_ops atmel_qspi_ops = {
+ .set_cfg = atmel_qspi_set_cfg,
+ .transfer = atmel_qspi_transfer,
+};
+
+static const struct atmel_qspi_ops atmel_qspi_sama7g5_ops = {
+ .set_cfg = atmel_qspi_sama7g5_set_cfg,
+ .transfer = atmel_qspi_sama7g5_transfer,
+};
+
static int atmel_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
@@ -616,7 +1344,27 @@ static int atmel_qspi_probe(struct platform_device *pdev)
if (!ctrl)
return -ENOMEM;
+ aq = spi_controller_get_devdata(ctrl);
+
+ aq->caps = of_device_get_match_data(&pdev->dev);
+ if (!aq->caps) {
+ dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
+ return -EINVAL;
+ }
+
+ init_completion(&aq->cmd_completion);
+ aq->pdev = pdev;
+
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ if (aq->caps->octal)
+ ctrl->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
+
+ if (aq->caps->has_gclk)
+ aq->ops = &atmel_qspi_sama7g5_ops;
+ else
+ aq->ops = &atmel_qspi_ops;
+
+ ctrl->max_speed_hz = aq->caps->max_speed_hz;
ctrl->setup = atmel_qspi_setup;
ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
ctrl->bus_num = -1;
@@ -625,11 +1373,6 @@ static int atmel_qspi_probe(struct platform_device *pdev)
ctrl->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, ctrl);
- aq = spi_controller_get_devdata(ctrl);
-
- init_completion(&aq->cmd_completion);
- aq->pdev = pdev;
-
/* Map the registers */
aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
if (IS_ERR(aq->regs))
@@ -644,57 +1387,52 @@ static int atmel_qspi_probe(struct platform_device *pdev)
"missing AHB memory\n");
aq->mmap_size = resource_size(res);
+ aq->mmap_phys_base = (dma_addr_t)res->start;
/* Get the peripheral clock */
- aq->pclk = devm_clk_get(&pdev->dev, "pclk");
+ aq->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
if (IS_ERR(aq->pclk))
- aq->pclk = devm_clk_get(&pdev->dev, NULL);
+ aq->pclk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(aq->pclk))
return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
"missing peripheral clock\n");
- /* Enable the peripheral clock */
- err = clk_prepare_enable(aq->pclk);
- if (err)
- return dev_err_probe(&pdev->dev, err,
- "failed to enable the peripheral clock\n");
-
- aq->caps = of_device_get_match_data(&pdev->dev);
- if (!aq->caps) {
- dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
- err = -EINVAL;
- goto disable_pclk;
- }
-
if (aq->caps->has_qspick) {
/* Get the QSPI system clock */
- aq->qspick = devm_clk_get(&pdev->dev, "qspick");
+ aq->qspick = devm_clk_get_enabled(&pdev->dev, "qspick");
if (IS_ERR(aq->qspick)) {
dev_err(&pdev->dev, "missing system clock\n");
err = PTR_ERR(aq->qspick);
- goto disable_pclk;
+ return err;
}
- /* Enable the QSPI system clock */
- err = clk_prepare_enable(aq->qspick);
- if (err) {
- dev_err(&pdev->dev,
- "failed to enable the QSPI system clock\n");
- goto disable_pclk;
+ } else if (aq->caps->has_gclk) {
+ /* Get the QSPI generic clock */
+ aq->gclk = devm_clk_get(&pdev->dev, "gclk");
+ if (IS_ERR(aq->gclk)) {
+ dev_err(&pdev->dev, "missing Generic clock\n");
+ err = PTR_ERR(aq->gclk);
+ return err;
}
}
+ if (aq->caps->has_dma) {
+ err = atmel_qspi_dma_init(ctrl);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
/* Request the IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
- goto disable_qspick;
+ goto dma_release;
}
err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
0, dev_name(&pdev->dev), aq);
if (err)
- goto disable_qspick;
+ goto dma_release;
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -702,7 +1440,9 @@ static int atmel_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
- atmel_qspi_init(aq);
+ err = atmel_qspi_init(aq);
+ if (err)
+ goto dma_release;
err = spi_register_controller(ctrl);
if (err) {
@@ -710,21 +1450,57 @@ static int atmel_qspi_probe(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- goto disable_qspick;
+ goto dma_release;
}
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
-disable_qspick:
- clk_disable_unprepare(aq->qspick);
-disable_pclk:
- clk_disable_unprepare(aq->pclk);
+dma_release:
+ if (aq->caps->has_dma)
+ atmel_qspi_dma_release(aq);
return err;
}
+static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_RBUSY) &&
+ (val & QSPI_SR2_HIDLE), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_QSPIENS), 40,
+ ATMEL_QSPI_SYNC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(aq->gclk);
+
+ atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_CALBSY), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void atmel_qspi_remove(struct platform_device *pdev)
{
struct spi_controller *ctrl = platform_get_drvdata(pdev);
@@ -735,9 +1511,17 @@ static void atmel_qspi_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret >= 0) {
+ if (aq->caps->has_dma)
+ atmel_qspi_dma_release(aq);
+
+ if (aq->caps->has_gclk) {
+ ret = atmel_qspi_sama7g5_suspend(aq);
+ if (ret)
+ dev_warn(&pdev->dev, "Failed to de-init device on remove: %d\n", ret);
+ return;
+ }
+
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
- clk_disable(aq->qspick);
- clk_disable(aq->pclk);
} else {
/*
* atmel_qspi_runtime_{suspend,resume} just disable and enable
@@ -747,9 +1531,6 @@ static void atmel_qspi_remove(struct platform_device *pdev)
dev_warn(&pdev->dev, "Failed to resume device on remove\n");
}
- clk_unprepare(aq->qspick);
- clk_unprepare(aq->pclk);
-
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -765,6 +1546,12 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
if (ret < 0)
return ret;
+ if (aq->caps->has_gclk) {
+ ret = atmel_qspi_sama7g5_suspend(aq);
+ clk_disable_unprepare(aq->pclk);
+ return ret;
+ }
+
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
pm_runtime_mark_last_busy(dev);
@@ -792,6 +1579,9 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
return ret;
}
+ if (aq->caps->has_gclk)
+ return atmel_qspi_sama7g5_init(aq);
+
ret = pm_runtime_force_resume(dev);
if (ret < 0)
return ret;
@@ -847,6 +1637,19 @@ static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
.has_ricr = true,
};
+static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = {
+ .max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
+ .has_gclk = true,
+ .octal = true,
+ .has_dma = true,
+};
+
+static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = {
+ .max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
+ .has_gclk = true,
+ .has_dma = true,
+};
+
static const struct of_device_id atmel_qspi_dt_ids[] = {
{
.compatible = "atmel,sama5d2-qspi",
@@ -856,6 +1659,15 @@ static const struct of_device_id atmel_qspi_dt_ids[] = {
.compatible = "microchip,sam9x60-qspi",
.data = &atmel_sam9x60_qspi_caps,
},
+ {
+ .compatible = "microchip,sama7g5-ospi",
+ .data = &atmel_sama7g5_ospi_caps,
+ },
+ {
+ .compatible = "microchip,sama7g5-qspi",
+ .data = &atmel_sama7g5_qspi_caps,
+ },
+
{ /* sentinel */ }
};
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index d30a21b0b05f..c85997478b81 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -298,19 +298,16 @@ static const struct amd_spi_freq amd_spi_freq[] = {
{ AMD_SPI_MIN_HZ, F_800KHz, 0},
};
-static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
+static void amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
{
unsigned int i, spd7_val, alt_spd;
- if (speed_hz < AMD_SPI_MIN_HZ)
- return -EINVAL;
-
for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
if (speed_hz >= amd_spi_freq[i].speed_hz)
break;
if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz)
- return 0;
+ return;
amd_spi->speed_hz = amd_spi_freq[i].speed_hz;
@@ -329,8 +326,6 @@ static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val,
AMD_SPI_SPD7_MASK);
}
-
- return 0;
}
static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
@@ -479,6 +474,9 @@ static bool amd_spi_supports_op(struct spi_mem *mem,
return false;
}
+ if (op->max_freq < mem->spi->controller->min_speed_hz)
+ return false;
+
return spi_mem_default_supports_op(mem, op);
}
@@ -672,13 +670,10 @@ static int amd_spi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct amd_spi *amd_spi;
- int ret;
amd_spi = spi_controller_get_devdata(mem->spi->controller);
- ret = amd_set_spi_freq(amd_spi, mem->spi->max_speed_hz);
- if (ret)
- return ret;
+ amd_set_spi_freq(amd_spi, op->max_freq);
if (amd_spi->version == AMD_SPI_V2)
amd_set_spi_addr_mode(amd_spi, op);
@@ -693,10 +688,10 @@ static int amd_spi_exec_mem_op(struct spi_mem *mem,
amd_spi_mem_data_out(amd_spi, op);
break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- return ret;
+ return 0;
}
static const struct spi_controller_mem_ops amd_spi_mem_ops = {
@@ -705,6 +700,10 @@ static const struct spi_controller_mem_ops amd_spi_mem_ops = {
.supports_op = amd_spi_supports_op,
};
+static const struct spi_controller_mem_caps amd_spi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int amd_spi_host_transfer(struct spi_controller *host,
struct spi_message *msg)
{
@@ -782,6 +781,7 @@ static int amd_spi_probe(struct platform_device *pdev)
host->setup = amd_spi_host_setup;
host->transfer_one_message = amd_spi_host_transfer;
host->mem_ops = &amd_spi_mem_ops;
+ host->mem_caps = &amd_spi_mem_caps;
host->max_transfer_size = amd_spi_max_transfer_size;
host->max_message_size = amd_spi_max_transfer_size;
diff --git a/drivers/spi/spi-amlogic-spifc-a1.c b/drivers/spi/spi-amlogic-spifc-a1.c
index fadf6667cd51..18c9aa2cbc29 100644
--- a/drivers/spi/spi-amlogic-spifc-a1.c
+++ b/drivers/spi/spi-amlogic-spifc-a1.c
@@ -259,7 +259,7 @@ static int amlogic_spifc_a1_exec_op(struct spi_mem *mem,
size_t data_size = op->data.nbytes;
int ret;
- ret = amlogic_spifc_a1_set_freq(spifc, mem->spi->max_speed_hz);
+ ret = amlogic_spifc_a1_set_freq(spifc, op->max_freq);
if (ret)
return ret;
@@ -320,6 +320,10 @@ static const struct spi_controller_mem_ops amlogic_spifc_a1_mem_ops = {
.adjust_op_size = amlogic_spifc_a1_adjust_op_size,
};
+static const struct spi_controller_mem_caps amlogic_spifc_a1_mem_caps = {
+ .per_op_freq = true,
+};
+
static int amlogic_spifc_a1_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
@@ -356,6 +360,7 @@ static int amlogic_spifc_a1_probe(struct platform_device *pdev)
ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
ctrl->auto_runtime_pm = true;
ctrl->mem_ops = &amlogic_spifc_a1_mem_ops;
+ ctrl->mem_caps = &amlogic_spifc_a1_mem_caps;
ctrl->min_speed_hz = SPIFC_A1_MIN_HZ;
ctrl->max_speed_hz = SPIFC_A1_MAX_HZ;
ctrl->mode_bits = (SPI_RX_DUAL | SPI_TX_DUAL |
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index a031ecb358e0..0cd37a7436d5 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -43,10 +43,13 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX);
#define CQSPI_SLOW_SRAM BIT(4)
#define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5)
#define CQSPI_RD_NO_IRQ BIT(6)
-#define CQSPI_DISABLE_STIG_MODE BIT(7)
+#define CQSPI_DMA_SET_MASK BIT(7)
+#define CQSPI_SUPPORT_DEVICE_RESET BIT(8)
+#define CQSPI_DISABLE_STIG_MODE BIT(9)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
+#define CQSPI_SUPPORTS_QUAD BIT(1)
#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
@@ -111,7 +114,7 @@ struct cqspi_st {
struct cqspi_driver_platdata {
u32 hwcaps_mask;
- u8 quirks;
+ u16 quirks;
int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
u_char *rxbuf, loff_t from_addr, size_t n_rx);
u32 (*get_dma_status)(struct cqspi_st *cqspi);
@@ -146,6 +149,8 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CONFIG_IDLE_LSB 31
#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
+#define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5)
+#define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6)
#define CQSPI_REG_RD_INSTR 0x04
#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
@@ -832,6 +837,25 @@ failrd:
return ret;
}
+static void cqspi_device_reset(struct cqspi_st *cqspi)
+{
+ u32 reg;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+ /*
+ * NOTE: Delay timing implementation is derived from
+ * spi_nor_hw_reset()
+ */
+ writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
+ usleep_range(1, 5);
+ writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
+ usleep_range(100, 150);
+ writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
+ usleep_range(1000, 1200);
+}
+
static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
{
void __iomem *reg_base = cqspi->iobase;
@@ -1409,7 +1433,7 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
struct cqspi_flash_pdata *f_pdata;
f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)];
- cqspi_configure(f_pdata, mem->spi->max_speed_hz);
+ cqspi_configure(f_pdata, op->max_freq);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
/*
@@ -1658,6 +1682,7 @@ static const struct spi_controller_mem_ops cqspi_mem_ops = {
static const struct spi_controller_mem_caps cqspi_mem_caps = {
.dtr = true,
+ .per_op_freq = true,
};
static int cqspi_setup_flash(struct cqspi_st *cqspi)
@@ -1865,6 +1890,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->master_ref_clk_hz);
if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
+ if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD)
+ host->mode_bits |= SPI_TX_QUAD;
if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
cqspi->use_direct_mode = true;
cqspi->use_direct_mode_wr = true;
@@ -1886,8 +1913,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (ddata->quirks & CQSPI_DISABLE_STIG_MODE)
cqspi->disable_stig_mode = true;
- if (of_device_is_compatible(pdev->dev.of_node,
- "xlnx,versal-ospi-1.0")) {
+ if (ddata->quirks & CQSPI_DMA_SET_MASK) {
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
goto probe_reset_failed;
@@ -1917,6 +1943,9 @@ static int cqspi_probe(struct platform_device *pdev)
host->num_chipselect = cqspi->num_chipselect;
+ if (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)
+ cqspi_device_reset(cqspi);
+
if (cqspi->use_direct_mode) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER)
@@ -2037,7 +2066,7 @@ static const struct cqspi_driver_platdata k2g_qspi = {
};
static const struct cqspi_driver_platdata am654_ospi = {
- .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD,
.quirks = CQSPI_NEEDS_WR_DELAY,
};
@@ -2054,7 +2083,17 @@ static const struct cqspi_driver_platdata socfpga_qspi = {
static const struct cqspi_driver_platdata versal_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
- .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA
+ | CQSPI_DMA_SET_MASK,
+ .indirect_read_dma = cqspi_versal_indirect_read_dma,
+ .get_dma_status = cqspi_get_versal_dma_status,
+};
+
+static const struct cqspi_driver_platdata versal2_ospi = {
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA
+ | CQSPI_DMA_SET_MASK
+ | CQSPI_SUPPORT_DEVICE_RESET,
.indirect_read_dma = cqspi_versal_indirect_read_dma,
.get_dma_status = cqspi_get_versal_dma_status,
};
@@ -2111,6 +2150,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "mobileye,eyeq5-ospi",
.data = &mobileye_eyeq5_ospi,
},
+ {
+ .compatible = "amd,versal2-ospi",
+ .data = &versal2_ospi,
+ },
{ /* end of table */ }
};
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index ea517af9435f..941ecc6f59f8 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -677,7 +677,7 @@ static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
* operation. Transmit-only mode is suitable for the rest of them.
*/
cfg.dfs = 8;
- cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
+ cfg.freq = clamp(op->max_freq, 0U, dws->max_mem_freq);
if (op->data.dir == SPI_MEM_DATA_IN) {
cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
cfg.ndf = op->data.nbytes;
@@ -894,6 +894,10 @@ static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
}
+static const struct spi_controller_mem_caps dw_spi_mem_caps = {
+ .per_op_freq = true,
+};
+
int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
{
struct spi_controller *host;
@@ -941,8 +945,10 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
host->set_cs = dw_spi_set_cs;
host->transfer_one = dw_spi_transfer_one;
host->handle_err = dw_spi_handle_err;
- if (dws->mem_ops.exec_op)
+ if (dws->mem_ops.exec_op) {
host->mem_ops = &dws->mem_ops;
+ host->mem_caps = &dw_spi_mem_caps;
+ }
host->max_speed_hz = dws->max_freq;
host->flags = SPI_CONTROLLER_GPIO_SS;
host->auto_runtime_pm = true;
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 9ec53bf0dda8..355e6a39fb41 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -522,9 +522,10 @@ static void fsl_qspi_invalidate(struct fsl_qspi *q)
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
}
-static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
+static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
- unsigned long rate = spi->max_speed_hz;
+ unsigned long rate = op->max_freq;
int ret;
if (q->selected == spi_get_chipselect(spi, 0))
@@ -652,7 +653,7 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
- fsl_qspi_select_mem(q, mem->spi);
+ fsl_qspi_select_mem(q, mem->spi, op);
if (needs_amba_base_offset(q))
addr_offset = q->memmap_phy;
@@ -839,6 +840,10 @@ static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
.get_name = fsl_qspi_get_name,
};
+static const struct spi_controller_mem_caps fsl_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int fsl_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -923,6 +928,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ctlr->bus_num = -1;
ctlr->num_chipselect = 4;
ctlr->mem_ops = &fsl_qspi_mem_ops;
+ ctlr->mem_caps = &fsl_qspi_mem_caps;
fsl_qspi_default_setup(q);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 856a4a9def66..2f2082652a1a 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -618,7 +618,7 @@ static struct spi_controller *fsl_spi_probe(struct device *dev,
if (ret < 0)
goto err_probe;
- dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
+ dev_info(dev, "at MMIO %pa (irq = %d), %s mode\n", &mem->start,
mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
return host;
diff --git a/drivers/spi/spi-kspi2.c b/drivers/spi/spi-kspi2.c
new file mode 100644
index 000000000000..ca73ec52ce63
--- /dev/null
+++ b/drivers/spi/spi-kspi2.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) KEBA Industrial Automation Gmbh 2024
+ *
+ * Driver for KEBA SPI host controller type 2 FPGA IP core
+ */
+
+#include <linux/iopoll.h>
+#include <linux/misc/keba.h>
+#include <linux/spi/spi.h>
+
+#define KSPI2 "kspi2"
+
+#define KSPI2_CLK_FREQ_REG 0x03
+#define KSPI2_CLK_FREQ_MASK 0x0f
+#define KSPI2_CLK_FREQ_62_5M 0x0
+#define KSPI2_CLK_FREQ_33_3M 0x1
+#define KSPI2_CLK_FREQ_125M 0x2
+#define KSPI2_CLK_FREQ_50M 0x3
+#define KSPI2_CLK_FREQ_100M 0x4
+
+#define KSPI2_CONTROL_REG 0x04
+#define KSPI2_CONTROL_CLK_DIV_MAX 0x0f
+#define KSPI2_CONTROL_CLK_DIV_MASK 0x0f
+#define KSPI2_CONTROL_CPHA 0x10
+#define KSPI2_CONTROL_CPOL 0x20
+#define KSPI2_CONTROL_CLK_MODE_MASK 0x30
+#define KSPI2_CONTROL_INIT KSPI2_CONTROL_CLK_DIV_MAX
+
+#define KSPI2_STATUS_REG 0x08
+#define KSPI2_STATUS_IN_USE 0x01
+#define KSPI2_STATUS_BUSY 0x02
+
+#define KSPI2_DATA_REG 0x0c
+
+#define KSPI2_CS_NR_REG 0x10
+#define KSPI2_CS_NR_NONE 0xff
+
+#define KSPI2_MODE_BITS (SPI_CPHA | SPI_CPOL)
+#define KSPI2_NUM_CS 255
+
+#define KSPI2_SPEED_HZ_MIN(kspi) (kspi->base_speed_hz / 65536)
+#define KSPI2_SPEED_HZ_MAX(kspi) (kspi->base_speed_hz / 2)
+
+/* timeout is 10 times the time to transfer one byte at slowest clock */
+#define KSPI2_XFER_TIMEOUT_US(kspi) (USEC_PER_SEC / \
+ KSPI2_SPEED_HZ_MIN(kspi) * 8 * 10)
+
+#define KSPI2_INUSE_SLEEP_US (2 * USEC_PER_MSEC)
+#define KSPI2_INUSE_TIMEOUT_US (10 * USEC_PER_SEC)
+
+struct kspi2 {
+ struct keba_spi_auxdev *auxdev;
+ void __iomem *base;
+ struct spi_controller *host;
+
+ u32 base_speed_hz; /* SPI base clock frequency in HZ */
+ u8 control_shadow;
+
+ struct spi_device **device;
+ int device_size;
+};
+
+static int kspi2_inuse_lock(struct kspi2 *kspi)
+{
+ u8 sts;
+ int ret;
+
+ /*
+ * The SPI controller has an IN_USE bit for locking access to the
+ * controller. This enables the use of the SPI controller by other none
+ * Linux processors.
+ *
+ * If the SPI controller is free, then the first read returns
+ * IN_USE == 0. After that the SPI controller is locked and further
+ * reads of IN_USE return 1.
+ *
+ * The SPI controller is unlocked by writing 1 into IN_USE.
+ *
+ * The IN_USE bit acts as a hardware semaphore for the SPI controller.
+ * Poll for semaphore, but sleep while polling to free the CPU.
+ */
+ ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG,
+ sts, (sts & KSPI2_STATUS_IN_USE) == 0,
+ KSPI2_INUSE_SLEEP_US, KSPI2_INUSE_TIMEOUT_US);
+ if (ret != 0)
+ dev_warn(&kspi->auxdev->auxdev.dev, "%s err!\n", __func__);
+
+ return ret;
+}
+
+static void kspi2_inuse_unlock(struct kspi2 *kspi)
+{
+ /* unlock the controller by writing 1 into IN_USE */
+ iowrite8(KSPI2_STATUS_IN_USE, kspi->base + KSPI2_STATUS_REG);
+}
+
+static int kspi2_prepare_hardware(struct spi_controller *host)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+
+ /* lock hardware semaphore before actual use of controller */
+ return kspi2_inuse_lock(kspi);
+}
+
+static int kspi2_unprepare_hardware(struct spi_controller *host)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+
+ /* unlock hardware semaphore after actual use of controller */
+ kspi2_inuse_unlock(kspi);
+
+ return 0;
+}
+
+static u8 kspi2_calc_minimal_divider(struct kspi2 *kspi, u32 max_speed_hz)
+{
+ u8 div;
+
+ /*
+ * Divider values 2, 4, 8, 16, ..., 65536 are possible. They are coded
+ * as 0, 1, 2, 3, ..., 15 in the CONTROL_CLK_DIV bit.
+ */
+ for (div = 0; div < KSPI2_CONTROL_CLK_DIV_MAX; div++) {
+ if ((kspi->base_speed_hz >> (div + 1)) <= max_speed_hz)
+ return div;
+ }
+
+ /* return divider for slowest clock if loop fails to find one */
+ return KSPI2_CONTROL_CLK_DIV_MAX;
+}
+
+static void kspi2_write_control_reg(struct kspi2 *kspi, u8 val, u8 mask)
+{
+ /* write control register only when necessary to improve performance */
+ if (val != (kspi->control_shadow & mask)) {
+ kspi->control_shadow = (kspi->control_shadow & ~mask) | val;
+ iowrite8(kspi->control_shadow, kspi->base + KSPI2_CONTROL_REG);
+ }
+}
+
+static int kspi2_txrx_byte(struct kspi2 *kspi, u8 tx, u8 *rx)
+{
+ u8 sts;
+ int ret;
+
+ /* start transfer by writing TX byte */
+ iowrite8(tx, kspi->base + KSPI2_DATA_REG);
+
+ /* wait till finished (BUSY == 0) */
+ ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG,
+ sts, (sts & KSPI2_STATUS_BUSY) == 0,
+ 0, KSPI2_XFER_TIMEOUT_US(kspi));
+ if (ret != 0)
+ return ret;
+
+ /* read RX byte */
+ if (rx)
+ *rx = ioread8(kspi->base + KSPI2_DATA_REG);
+
+ return 0;
+}
+
+static int kspi2_process_transfer(struct kspi2 *kspi, struct spi_transfer *t)
+{
+ u8 tx = 0;
+ u8 rx;
+ int i;
+ int ret;
+
+ for (i = 0; i < t->len; i++) {
+ if (t->tx_buf)
+ tx = ((const u8 *)t->tx_buf)[i];
+
+ ret = kspi2_txrx_byte(kspi, tx, &rx);
+ if (ret)
+ return ret;
+
+ if (t->rx_buf)
+ ((u8 *)t->rx_buf)[i] = rx;
+ }
+
+ return 0;
+}
+
+static int kspi2_setup_transfer(struct kspi2 *kspi,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ u32 max_speed_hz = spi->max_speed_hz;
+ u8 clk_div;
+
+ /*
+ * spi_device (spi) has default parameters. Some of these can be
+ * overwritten by parameters in spi_transfer (t).
+ */
+ if (t->bits_per_word && ((t->bits_per_word % 8) != 0)) {
+ dev_err(&spi->dev, "Word width %d not supported!\n",
+ t->bits_per_word);
+
+ return -EINVAL;
+ }
+
+ if (t->speed_hz && (t->speed_hz < max_speed_hz))
+ max_speed_hz = t->speed_hz;
+
+ clk_div = kspi2_calc_minimal_divider(kspi, max_speed_hz);
+ kspi2_write_control_reg(kspi, clk_div, KSPI2_CONTROL_CLK_DIV_MASK);
+
+ return 0;
+}
+
+static int kspi2_transfer_one(struct spi_controller *host,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+ int ret;
+
+ ret = kspi2_setup_transfer(kspi, spi, t);
+ if (ret != 0)
+ return ret;
+
+ if (t->len) {
+ ret = kspi2_process_transfer(kspi, t);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kspi2_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_controller *host = spi->controller;
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+
+ /* controller is using active low chip select signals by design */
+ if (!enable)
+ iowrite8(spi_get_chipselect(spi, 0), kspi->base + KSPI2_CS_NR_REG);
+ else
+ iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG);
+}
+
+static int kspi2_prepare_message(struct spi_controller *host,
+ struct spi_message *msg)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(host);
+ struct spi_device *spi = msg->spi;
+ u8 mode = 0;
+
+ /* setup SPI clock phase and polarity */
+ if (spi->mode & SPI_CPHA)
+ mode |= KSPI2_CONTROL_CPHA;
+ if (spi->mode & SPI_CPOL)
+ mode |= KSPI2_CONTROL_CPOL;
+ kspi2_write_control_reg(kspi, mode, KSPI2_CONTROL_CLK_MODE_MASK);
+
+ return 0;
+}
+
+static int kspi2_setup(struct spi_device *spi)
+{
+ struct kspi2 *kspi = spi_controller_get_devdata(spi->controller);
+
+ /*
+ * Check only parameters. Actual setup is done in kspi2_prepare_message
+ * and directly before the SPI transfer starts.
+ */
+
+ if (spi->mode & ~KSPI2_MODE_BITS) {
+ dev_err(&spi->dev, "Mode %d not supported!\n", spi->mode);
+
+ return -EINVAL;
+ }
+
+ if ((spi->bits_per_word % 8) != 0) {
+ dev_err(&spi->dev, "Word width %d not supported!\n",
+ spi->bits_per_word);
+
+ return -EINVAL;
+ }
+
+ if ((spi->max_speed_hz == 0) ||
+ (spi->max_speed_hz > KSPI2_SPEED_HZ_MAX(kspi)))
+ spi->max_speed_hz = KSPI2_SPEED_HZ_MAX(kspi);
+
+ if (spi->max_speed_hz < KSPI2_SPEED_HZ_MIN(kspi)) {
+ dev_err(&spi->dev, "Requested speed of %d Hz is too low!\n",
+ spi->max_speed_hz);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void kspi2_unregister_devices(struct kspi2 *kspi)
+{
+ int i;
+
+ for (i = 0; i < kspi->device_size; i++) {
+ struct spi_device *device = kspi->device[i];
+
+ if (device)
+ spi_unregister_device(device);
+ }
+}
+
+static int kspi2_register_devices(struct kspi2 *kspi)
+{
+ struct spi_board_info *info = kspi->auxdev->info;
+ int i;
+
+ /* register all known SPI devices */
+ for (i = 0; i < kspi->auxdev->info_size; i++) {
+ struct spi_device *device = spi_new_device(kspi->host, &info[i]);
+
+ if (!device) {
+ kspi2_unregister_devices(kspi);
+
+ return -ENODEV;
+ }
+ kspi->device[i] = device;
+ }
+
+ return 0;
+}
+
+static void kspi2_init(struct kspi2 *kspi)
+{
+ iowrite8(KSPI2_CONTROL_INIT, kspi->base + KSPI2_CONTROL_REG);
+ kspi->control_shadow = KSPI2_CONTROL_INIT;
+
+ iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG);
+}
+
+static int kspi2_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &auxdev->dev;
+ struct spi_controller *host;
+ struct kspi2 *kspi;
+ u8 clk_reg;
+ int ret;
+
+ host = devm_spi_alloc_host(dev, sizeof(struct kspi2));
+ if (!host)
+ return -ENOMEM;
+ kspi = spi_controller_get_devdata(host);
+ kspi->auxdev = container_of(auxdev, struct keba_spi_auxdev, auxdev);
+ kspi->host = host;
+ kspi->device = devm_kcalloc(dev, kspi->auxdev->info_size,
+ sizeof(*kspi->device), GFP_KERNEL);
+ if (!kspi->device)
+ return -ENOMEM;
+ kspi->device_size = kspi->auxdev->info_size;
+ auxiliary_set_drvdata(auxdev, kspi);
+
+ kspi->base = devm_ioremap_resource(dev, &kspi->auxdev->io);
+ if (IS_ERR(kspi->base))
+ return PTR_ERR(kspi->base);
+
+ /* read the SPI base clock frequency */
+ clk_reg = ioread8(kspi->base + KSPI2_CLK_FREQ_REG);
+ switch (clk_reg & KSPI2_CLK_FREQ_MASK) {
+ case KSPI2_CLK_FREQ_62_5M:
+ kspi->base_speed_hz = 62500000; break;
+ case KSPI2_CLK_FREQ_33_3M:
+ kspi->base_speed_hz = 33333333; break;
+ case KSPI2_CLK_FREQ_125M:
+ kspi->base_speed_hz = 125000000; break;
+ case KSPI2_CLK_FREQ_50M:
+ kspi->base_speed_hz = 50000000; break;
+ case KSPI2_CLK_FREQ_100M:
+ kspi->base_speed_hz = 100000000; break;
+ default:
+ dev_err(dev, "Undefined SPI base clock frequency!\n");
+ return -ENODEV;
+ }
+
+ kspi2_init(kspi);
+
+ host->bus_num = -1;
+ host->num_chipselect = KSPI2_NUM_CS;
+ host->mode_bits = KSPI2_MODE_BITS;
+ host->setup = kspi2_setup;
+ host->prepare_transfer_hardware = kspi2_prepare_hardware;
+ host->unprepare_transfer_hardware = kspi2_unprepare_hardware;
+ host->prepare_message = kspi2_prepare_message;
+ host->set_cs = kspi2_set_cs;
+ host->transfer_one = kspi2_transfer_one;
+ ret = devm_spi_register_controller(dev, host);
+ if (ret) {
+ dev_err(dev, "Failed to register host (%d)!\n", ret);
+ return ret;
+ }
+
+ ret = kspi2_register_devices(kspi);
+ if (ret) {
+ dev_err(dev, "Failed to register devices (%d)!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kspi2_remove(struct auxiliary_device *auxdev)
+{
+ struct kspi2 *kspi = auxiliary_get_drvdata(auxdev);
+
+ kspi2_unregister_devices(kspi);
+}
+
+static const struct auxiliary_device_id kspi2_devtype_aux[] = {
+ { .name = "keba.spi" },
+ { },
+};
+MODULE_DEVICE_TABLE(auxiliary, kspi2_devtype_aux);
+
+static struct auxiliary_driver kspi2_driver_aux = {
+ .name = KSPI2,
+ .id_table = kspi2_devtype_aux,
+ .probe = kspi2_probe,
+ .remove = kspi2_remove,
+};
+module_auxiliary_driver(kspi2_driver_aux);
+
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA SPI host controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index abc6792e738c..a9f0f47f4759 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -187,6 +187,16 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
return false;
}
+ if (op->max_freq && mem->spi->controller->min_speed_hz &&
+ op->max_freq < mem->spi->controller->min_speed_hz)
+ return false;
+
+ if (op->max_freq &&
+ op->max_freq < mem->spi->max_speed_hz) {
+ if (!spi_mem_controller_is_capable(ctlr, per_op_freq))
+ return false;
+ }
+
return spi_mem_check_buswidth(mem, op);
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
@@ -364,6 +374,9 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
u8 *tmpbuf;
int ret;
+ /* Make sure the operation frequency is correct before going futher */
+ spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
+
ret = spi_mem_check_op(op);
if (ret)
return ret;
@@ -410,6 +423,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf;
xfers[xferpos].len = op->cmd.nbytes;
xfers[xferpos].tx_nbits = op->cmd.buswidth;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen++;
@@ -424,6 +438,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf + 1;
xfers[xferpos].len = op->addr.nbytes;
xfers[xferpos].tx_nbits = op->addr.buswidth;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->addr.nbytes;
@@ -435,6 +450,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
xfers[xferpos].dummy_data = 1;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
@@ -450,6 +466,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
}
xfers[xferpos].len = op->data.nbytes;
+ xfers[xferpos].speed_hz = op->max_freq;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->data.nbytes;
@@ -528,6 +545,53 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+/**
+ * spi_mem_adjust_op_freq() - Adjust the frequency of a SPI mem operation to
+ * match controller, PCB and chip limitations
+ * @mem: the SPI memory
+ * @op: the operation to adjust
+ *
+ * Some chips have per-op frequency limitations and must adapt the maximum
+ * speed. This function allows SPI mem drivers to set @op->max_freq to the
+ * maximum supported value.
+ */
+void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ if (!op->max_freq || op->max_freq > mem->spi->max_speed_hz)
+ op->max_freq = mem->spi->max_speed_hz;
+}
+EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq);
+
+/**
+ * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an
+ * operation. This helps finding the best variant
+ * among a list of possible choices.
+ * @op: the operation to benchmark
+ *
+ * Some chips have per-op frequency limitations, PCBs usually have their own
+ * limitations as well, and controllers can support dual, quad or even octal
+ * modes, sometimes in DTR. All these combinations make it impossible to
+ * statically list the best combination for all situations. If we want something
+ * accurate, all these combinations should be rated (eg. with a time estimate)
+ * and the best pick should be taken based on these calculations.
+ *
+ * Returns a ns estimate for the time this op would take.
+ */
+u64 spi_mem_calc_op_duration(struct spi_mem_op *op)
+{
+ u64 ncycles = 0;
+ u32 ns_per_cycles;
+
+ ns_per_cycles = 1000000000 / op->max_freq;
+ ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1);
+ ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1);
+ ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+ ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1);
+
+ return ncycles * ns_per_cycles;
+}
+EXPORT_SYMBOL_GPL(spi_mem_calc_op_duration);
+
static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
index ad2b5ffa6153..fa828fcaaef2 100644
--- a/drivers/spi/spi-microchip-core-qspi.c
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -265,7 +265,8 @@ static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
return ret;
}
-static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi)
+static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
unsigned long clk_hz;
u32 control, baud_rate_val = 0;
@@ -274,11 +275,11 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi
if (!clk_hz)
return -EINVAL;
- baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz);
+ baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq);
if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
dev_err(&spi->dev,
"could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
- spi->max_speed_hz, clk_hz);
+ op->max_freq, clk_hz);
return -EINVAL;
}
@@ -399,7 +400,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
if (err)
goto error;
- err = mchp_coreqspi_setup_clock(qspi, mem->spi);
+ err = mchp_coreqspi_setup_clock(qspi, mem->spi, op);
if (err)
goto error;
@@ -457,6 +458,10 @@ error:
static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata(mem->spi->controller);
+ unsigned long clk_hz;
+ u32 baud_rate_val;
+
if (!spi_mem_default_supports_op(mem, op))
return false;
@@ -479,6 +484,14 @@ static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_
return false;
}
+ clk_hz = clk_get_rate(qspi->clk);
+ if (!clk_hz)
+ return false;
+
+ baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq);
+ if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER)
+ return false;
+
return true;
}
@@ -498,6 +511,10 @@ static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
.exec_op = mchp_coreqspi_exec_op,
};
+static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int mchp_coreqspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -540,6 +557,7 @@ static int mchp_coreqspi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &mchp_coreqspi_mem_ops;
+ ctlr->mem_caps = &mchp_coreqspi_mem_caps;
ctlr->setup = mchp_coreqspi_setup_op;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index 5b6af55855ef..62ba0bd9cbb7 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -70,8 +70,7 @@
#define INT_RX_CHANNEL_OVERFLOW BIT(2)
#define INT_TX_CHANNEL_UNDERRUN BIT(3)
-#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \
- CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
+#define INT_ENABLE_MASK (CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
#define REG_CONTROL (0x00)
#define REG_FRAME_SIZE (0x04)
@@ -133,10 +132,15 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi)
mchp_corespi_write(spi, REG_CONTROL, control);
}
-static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max)
{
- while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
- u32 data = mchp_corespi_read(spi, REG_RX_DATA);
+ for (int i = 0; i < fifo_max; i++) {
+ u32 data;
+
+ while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)
+ ;
+
+ data = mchp_corespi_read(spi, REG_RX_DATA);
spi->rx_len -= spi->n_bytes;
@@ -211,11 +215,10 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
mchp_corespi_write(spi, REG_FRAMESUP, len);
}
-static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max)
{
- int fifo_max, i = 0;
+ int i = 0;
- fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
mchp_corespi_set_xfer_size(spi, fifo_max);
while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
@@ -413,19 +416,6 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
if (intfield == 0)
return IRQ_NONE;
- if (intfield & INT_TXDONE)
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
-
- if (intfield & INT_RXRDY) {
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
-
- if (spi->rx_len)
- mchp_corespi_read_fifo(spi);
- }
-
- if (!spi->rx_len && !spi->tx_len)
- finalise = true;
-
if (intfield & INT_RX_CHANNEL_OVERFLOW) {
mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
finalise = true;
@@ -512,9 +502,14 @@ static int mchp_corespi_transfer_one(struct spi_controller *host,
mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
- while (spi->tx_len)
- mchp_corespi_write_fifo(spi);
+ while (spi->tx_len) {
+ int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
+
+ mchp_corespi_write_fifo(spi, fifo_max);
+ mchp_corespi_read_fifo(spi, fifo_max);
+ }
+ spi_finalize_current_transfer(host);
return 1;
}
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 85f3bafc975d..197bf2dbe5de 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -961,7 +961,7 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem,
mtk_spi_reset(mdata);
mtk_spi_hw_init(mem->spi->controller, mem->spi);
- mtk_spi_prepare_transfer(mem->spi->controller, mem->spi->max_speed_hz);
+ mtk_spi_prepare_transfer(mem->spi->controller, op->max_freq);
reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
/* opcode byte len */
@@ -1122,6 +1122,10 @@ static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
.exec_op = mtk_spi_mem_exec_op,
};
+static const struct spi_controller_mem_caps mtk_spi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int mtk_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1160,6 +1164,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
if (mdata->dev_comp->ipm_design) {
mdata->dev = dev;
host->mem_ops = &mtk_spi_mem_ops;
+ host->mem_caps = &mtk_spi_mem_caps;
init_completion(&mdata->spimem_done);
}
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index 809767d3145c..eeaea6a5e310 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -522,7 +522,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
int i, ret;
u8 addr[8], cmd[2];
- ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
+ ret = mxic_spi_set_freq(mxic, op->max_freq);
if (ret)
return ret;
@@ -582,6 +582,7 @@ static const struct spi_controller_mem_caps mxic_spi_mem_caps = {
.dtr = true,
.ecc = true,
.swap16 = true,
+ .per_op_freq = true,
};
static void mxic_spi_set_cs(struct spi_device *spi, bool lvl)
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index e6d955d964f4..43455305fdf4 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -381,6 +381,8 @@ static int mxs_spi_transfer_one(struct spi_controller *host,
if (status)
break;
+ t->effective_speed_hz = ssp->clk_rate;
+
/* De-assert on last transfer, inverted by cs_change flag */
flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
TXRX_DEASSERT_CS : 0;
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 1161b9e5a4dc..bad6b30bab0e 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -705,9 +705,10 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
* Value for rest of the CS FLSHxxCR0 register would be zero.
*
*/
-static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
+static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
- unsigned long rate = spi->max_speed_hz;
+ unsigned long rate = op->max_freq;
int ret;
uint64_t size_kb;
@@ -931,7 +932,7 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true);
WARN_ON(err);
- nxp_fspi_select_mem(f, mem->spi);
+ nxp_fspi_select_mem(f, mem->spi, op);
nxp_fspi_prepare_lut(f, op);
/*
@@ -1149,6 +1150,10 @@ static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
.get_name = nxp_fspi_get_name,
};
+static const struct spi_controller_mem_caps nxp_fspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int nxp_fspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -1246,6 +1251,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
ctlr->bus_num = -1;
ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
ctlr->mem_ops = &nxp_fspi_mem_ops;
+ ctlr->mem_caps = &nxp_fspi_mem_caps;
nxp_fspi_default_setup(f);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index add6247d3481..29c616e2c408 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1561,10 +1561,15 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
}
mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
- if (IS_ERR(mcspi->ref_clk))
- mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
- else
+ if (IS_ERR(mcspi->ref_clk)) {
+ status = PTR_ERR(mcspi->ref_clk);
+ dev_err_probe(&pdev->dev, status, "Failed to get ref_clk");
+ goto free_ctlr;
+ }
+ if (mcspi->ref_clk)
mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
+ else
+ mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
ctlr->max_speed_hz = mcspi->ref_clk_hz;
ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 903d76145272..06711a62fa3d 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -73,8 +73,9 @@ struct chip_data {
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
#define LPSS_PRIV_CLOCK_GATE 0x38
-#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
-#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_OFF 0x0
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
@@ -321,6 +322,20 @@ static void __lpss_ssp_write_priv(struct driver_data *drv_data,
writel(value, drv_data->lpss_base + offset);
}
+static bool __lpss_ssp_update_priv(struct driver_data *drv_data, unsigned int offset,
+ u32 mask, u32 value)
+{
+ u32 new, curr;
+
+ curr = __lpss_ssp_read_priv(drv_data, offset);
+ new = (curr & ~mask) | (value & mask);
+ if (new == curr)
+ return false;
+
+ __lpss_ssp_write_priv(drv_data, offset, new);
+ return true;
+}
+
/*
* lpss_ssp_setup - perform LPSS SSP specific setup
* @drv_data: pointer to the driver private data
@@ -337,21 +352,16 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
drv_data->lpss_base = drv_data->ssp->mmio_base + config->offset;
/* Enable software chip select control */
- value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
- value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
- value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
- __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ value = LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, value, value);
/* Enable multiblock DMA transfers */
if (drv_data->controller_info->enable_dma) {
- __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
+ __lpss_ssp_update_priv(drv_data, config->reg_ssp, BIT(0), BIT(0));
if (config->reg_general >= 0) {
- value = __lpss_ssp_read_priv(drv_data,
- config->reg_general);
- value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
- __lpss_ssp_write_priv(drv_data,
- config->reg_general, value);
+ value = LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+ __lpss_ssp_update_priv(drv_data, config->reg_general, value, value);
}
}
}
@@ -361,30 +371,19 @@ static void lpss_ssp_select_cs(struct spi_device *spi,
{
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
- u32 value, cs;
+ u32 cs;
- if (!config->cs_sel_mask)
+ cs = spi_get_chipselect(spi, 0) << config->cs_sel_shift;
+ if (!__lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, config->cs_sel_mask, cs))
return;
- value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
-
- cs = spi_get_chipselect(spi, 0);
- cs <<= config->cs_sel_shift;
- if (cs != (value & config->cs_sel_mask)) {
- /*
- * When switching another chip select output active the
- * output must be selected first and wait 2 ssp_clk cycles
- * before changing state to active. Otherwise a short
- * glitch will occur on the previous chip select since
- * output select is latched but state control is not.
- */
- value &= ~config->cs_sel_mask;
- value |= cs;
- __lpss_ssp_write_priv(drv_data,
- config->reg_cs_ctrl, value);
- ndelay(1000000000 /
- (drv_data->controller->max_speed_hz / 2));
- }
+ /*
+ * When switching another chip select output active the output must be
+ * selected first and wait 2 ssp_clk cycles before changing state to
+ * active. Otherwise a short glitch will occur on the previous chip
+ * select since output select is latched but state control is not.
+ */
+ ndelay(1000000000 / (drv_data->controller->max_speed_hz / 2));
}
static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
@@ -392,34 +391,27 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
const struct lpss_config *config;
- u32 value;
+ u32 mask;
config = lpss_get_config(drv_data);
if (enable)
lpss_ssp_select_cs(spi, config);
- value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
- if (enable)
- value &= ~LPSS_CS_CONTROL_CS_HIGH;
- else
- value |= LPSS_CS_CONTROL_CS_HIGH;
- __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ mask = LPSS_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, mask, enable ? 0 : mask);
if (config->cs_clk_stays_gated) {
- u32 clkgate;
-
/*
* Changing CS alone when dynamic clock gating is on won't
* actually flip CS at that time. This ruins SPI transfers
* that specify delays, or have no data. Toggle the clock mode
* to force on briefly to poke the CS pin to move.
*/
- clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
- value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
- LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
-
- __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
- __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
+ mask = LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK;
+ if (__lpss_ssp_update_priv(drv_data, LPSS_PRIV_CLOCK_GATE, mask,
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON))
+ __lpss_ssp_update_priv(drv_data, LPSS_PRIV_CLOCK_GATE, mask,
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_OFF);
}
}
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index 70bbb459caa4..f3fe10eddb6a 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -13,12 +13,14 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
+#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <linux/interrupt.h>
#include <linux/spi/spi-mem.h>
/* System control */
@@ -110,6 +112,7 @@
#define SFC_VER_3 0x3
#define SFC_VER_4 0x4
#define SFC_VER_5 0x5
+#define SFC_VER_8 0x8
/* Delay line controller register */
#define SFC_DLL_CTRL0 0x3C
@@ -150,16 +153,13 @@
/* Data */
#define SFC_DATA 0x108
-/* The controller and documentation reports that it supports up to 4 CS
- * devices (0-3), however I have only been able to test a single CS (CS 0)
- * due to the configuration of my device.
- */
-#define SFC_MAX_CHIPSELECT_NUM 4
+#define SFC_CS1_REG_OFFSET 0x200
+
+#define SFC_MAX_CHIPSELECT_NUM 2
-/* The SFC can transfer max 16KB - 1 at one time
- * we set it to 15.5KB here for alignment.
- */
#define SFC_MAX_IOSIZE_VER3 (512 * 31)
+/* Although up to 4GB, 64KB is enough with less mem reserved */
+#define SFC_MAX_IOSIZE_VER4 (0x10000U)
/* DMA is only enabled for large data transmission */
#define SFC_DMA_TRANS_THRETHOLD (0x40)
@@ -169,12 +169,14 @@
*/
#define SFC_MAX_SPEED (150 * 1000 * 1000)
+#define ROCKCHIP_AUTOSUSPEND_DELAY 2000
+
struct rockchip_sfc {
struct device *dev;
void __iomem *regbase;
struct clk *hclk;
struct clk *clk;
- u32 frequency;
+ u32 speed[SFC_MAX_CHIPSELECT_NUM];
/* virtual mapped addr for dma_buffer */
void *buffer;
dma_addr_t dma_buffer;
@@ -216,6 +218,22 @@ static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
return SFC_MAX_IOSIZE_VER3;
}
+static int rockchip_sfc_clk_set_rate(struct rockchip_sfc *sfc, unsigned long speed)
+{
+ if (sfc->version >= SFC_VER_8)
+ return clk_set_rate(sfc->clk, speed * 2);
+ else
+ return clk_set_rate(sfc->clk, speed);
+}
+
+static unsigned long rockchip_sfc_clk_get_rate(struct rockchip_sfc *sfc)
+{
+ if (sfc->version >= SFC_VER_8)
+ return clk_get_rate(sfc->clk) / 2;
+ else
+ return clk_get_rate(sfc->clk);
+}
+
static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
{
u32 reg;
@@ -302,6 +320,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
u32 len)
{
u32 ctrl = 0, cmd = 0;
+ u8 cs = spi_get_chipselect(mem->spi, 0);
/* set CMD */
cmd = op->cmd.opcode;
@@ -315,7 +334,8 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
} else {
cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
- writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
+ writel(op->addr.nbytes * 8 - 1,
+ sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_ABIT);
}
ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
@@ -347,7 +367,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
/* set the Controller */
ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
- cmd |= spi_get_chipselect(mem->spi, 0) << SFC_CMD_CS_SHIFT;
+ cmd |= cs << SFC_CMD_CS_SHIFT;
dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
op->addr.nbytes, op->addr.buswidth,
@@ -355,7 +375,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
ctrl, cmd, op->addr.val, len);
- writel(ctrl, sfc->regbase + SFC_CTRL);
+ writel(ctrl, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_CTRL);
writel(cmd, sfc->regbase + SFC_CMD);
if (op->addr.nbytes)
writel(op->addr.val, sfc->regbase + SFC_ADDR);
@@ -453,8 +473,10 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
- if (op->data.dir == SPI_MEM_DATA_OUT)
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
memcpy(sfc->buffer, op->data.buf.out, len);
+ dma_sync_single_for_device(sfc->dev, sfc->dma_buffer, len, DMA_TO_DEVICE);
+ }
ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
@@ -462,8 +484,11 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
ret = -ETIMEDOUT;
}
rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
- if (op->data.dir == SPI_MEM_DATA_IN)
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_sync_single_for_cpu(sfc->dev, sfc->dma_buffer, len, DMA_FROM_DEVICE);
memcpy(op->data.buf.in, sfc->buffer, len);
+ }
return ret;
}
@@ -473,6 +498,16 @@ static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
int ret = 0;
u32 status;
+ /*
+ * There is very little data left in fifo, and the controller will
+ * complete the transmission in a short period of time.
+ */
+ ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
+ !(status & SFC_SR_IS_BUSY),
+ 0, 10);
+ if (!ret)
+ return 0;
+
ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
!(status & SFC_SR_IS_BUSY),
20, timeout_us);
@@ -491,14 +526,22 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op
struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller);
u32 len = op->data.nbytes;
int ret;
+ u8 cs = spi_get_chipselect(mem->spi, 0);
- if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) {
- ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz);
+ ret = pm_runtime_get_sync(sfc->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sfc->dev);
+ return ret;
+ }
+
+ if (unlikely(op->max_freq != sfc->speed[cs]) &&
+ !has_acpi_companion(sfc->dev)) {
+ ret = rockchip_sfc_clk_set_rate(sfc, op->max_freq);
if (ret)
- return ret;
- sfc->frequency = mem->spi->max_speed_hz;
+ goto out;
+ sfc->speed[cs] = op->max_freq;
dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
- sfc->frequency, clk_get_rate(sfc->clk));
+ sfc->speed[cs], rockchip_sfc_clk_get_rate(sfc));
}
rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
@@ -515,11 +558,17 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op
if (ret != len) {
dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
}
- return rockchip_sfc_xfer_done(sfc, 100000);
+ ret = rockchip_sfc_xfer_done(sfc, 100000);
+out:
+ pm_runtime_mark_last_busy(sfc->dev);
+ pm_runtime_put_autosuspend(sfc->dev);
+
+ return ret;
}
static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
@@ -536,6 +585,10 @@ static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
.adjust_op_size = rockchip_sfc_adjust_op_size,
};
+static const struct spi_controller_mem_caps rockchip_sfc_mem_caps = {
+ .per_op_freq = true,
+};
+
static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
{
struct rockchip_sfc *sfc = dev_id;
@@ -561,6 +614,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
struct spi_controller *host;
struct rockchip_sfc *sfc;
int ret;
+ u32 i, val;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*sfc));
if (!host)
@@ -568,6 +622,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->mem_ops = &rockchip_sfc_mem_ops;
+ host->mem_caps = &rockchip_sfc_mem_caps;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
host->max_speed_hz = SFC_MAX_SPEED;
@@ -581,31 +636,29 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
if (IS_ERR(sfc->regbase))
return PTR_ERR(sfc->regbase);
- sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
+ if (!has_acpi_companion(&pdev->dev))
+ sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
if (IS_ERR(sfc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(sfc->clk),
"Failed to get sfc interface clk\n");
- sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
+ if (!has_acpi_companion(&pdev->dev))
+ sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
if (IS_ERR(sfc->hclk))
return dev_err_probe(&pdev->dev, PTR_ERR(sfc->hclk),
"Failed to get sfc ahb clk\n");
- sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma");
-
- if (sfc->use_dma) {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_warn(dev, "Unable to set dma mask\n");
- return ret;
- }
-
- sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3,
- &sfc->dma_buffer, GFP_KERNEL);
- if (!sfc->buffer)
- return -ENOMEM;
+ if (has_acpi_companion(&pdev->dev)) {
+ ret = device_property_read_u32(&pdev->dev, "clock-frequency", &val);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to find clock-frequency in ACPI\n");
+ for (i = 0; i < SFC_MAX_CHIPSELECT_NUM; i++)
+ sfc->speed[i] = val;
}
+ sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma");
+
ret = clk_prepare_enable(sfc->hclk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable ahb clk\n");
@@ -630,19 +683,47 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
goto err_irq;
}
+ platform_set_drvdata(pdev, sfc);
+
ret = rockchip_sfc_init(sfc);
if (ret)
goto err_irq;
- sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
sfc->version = rockchip_sfc_get_version(sfc);
+ sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
+
+ pm_runtime_set_autosuspend_delay(dev, ROCKCHIP_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_noresume(dev);
+
+ if (sfc->use_dma) {
+ sfc->buffer = (u8 *)__get_free_pages(GFP_KERNEL | GFP_DMA32,
+ get_order(sfc->max_iosize));
+ if (!sfc->buffer) {
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+ sfc->dma_buffer = virt_to_phys(sfc->buffer);
+ }
- ret = spi_register_controller(host);
+ ret = devm_spi_register_controller(dev, host);
if (ret)
- goto err_irq;
+ goto err_register;
- return 0;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+err_register:
+ free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
+err_dma:
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
err_irq:
clk_disable_unprepare(sfc->clk);
err_clk:
@@ -657,11 +738,80 @@ static void rockchip_sfc_remove(struct platform_device *pdev)
struct spi_controller *host = sfc->host;
spi_unregister_controller(host);
+ free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
clk_disable_unprepare(sfc->clk);
clk_disable_unprepare(sfc->hclk);
}
+#ifdef CONFIG_PM
+static int rockchip_sfc_runtime_suspend(struct device *dev)
+{
+ struct rockchip_sfc *sfc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(sfc->clk);
+ clk_disable_unprepare(sfc->hclk);
+
+ return 0;
+}
+
+static int rockchip_sfc_runtime_resume(struct device *dev)
+{
+ struct rockchip_sfc *sfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(sfc->hclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(sfc->clk);
+ if (ret < 0)
+ clk_disable_unprepare(sfc->hclk);
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_sfc_suspend(struct device *dev)
+{
+ pinctrl_pm_select_sleep_state(dev);
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int rockchip_sfc_resume(struct device *dev)
+{
+ struct rockchip_sfc *sfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ rockchip_sfc_init(sfc);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops rockchip_sfc_pm_ops = {
+ SET_RUNTIME_PM_OPS(rockchip_sfc_runtime_suspend,
+ rockchip_sfc_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(rockchip_sfc_suspend, rockchip_sfc_resume)
+};
+
static const struct of_device_id rockchip_sfc_dt_ids[] = {
{ .compatible = "rockchip,sfc"},
{ /* sentinel */ }
@@ -672,6 +822,7 @@ static struct platform_driver rockchip_sfc_driver = {
.driver = {
.name = "rockchip-sfc",
.of_match_table = rockchip_sfc_dt_ids,
+ .pm = &rockchip_sfc_pm_ops,
},
.probe = rockchip_sfc_probe,
.remove = rockchip_sfc_remove,
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index eecf9ea95ae3..1627aa66c965 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -7,13 +7,15 @@
#include <linux/kernel.h>
#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
#include <linux/platform_data/sc18is602.h>
+#include <linux/property.h>
+
#include <linux/gpio/consumer.h>
enum chips { sc18is602, sc18is602b, sc18is603 };
@@ -236,9 +238,7 @@ static int sc18is602_setup(struct spi_device *spi)
static int sc18is602_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
- struct device_node *np = dev->of_node;
struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
struct sc18is602 *hw;
struct spi_controller *host;
@@ -251,8 +251,9 @@ static int sc18is602_probe(struct i2c_client *client)
if (!host)
return -ENOMEM;
+ device_set_node(&host->dev, dev_fwnode(dev));
+
hw = spi_controller_get_devdata(host);
- i2c_set_clientdata(client, hw);
/* assert reset and then release */
hw->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
@@ -265,11 +266,7 @@ static int sc18is602_probe(struct i2c_client *client)
hw->dev = dev;
hw->ctrl = 0xff;
- if (client->dev.of_node)
- hw->id = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- hw->id = id->driver_data;
-
+ hw->id = (uintptr_t)i2c_get_match_data(client);
switch (hw->id) {
case sc18is602:
case sc18is602b:
@@ -278,28 +275,21 @@ static int sc18is602_probe(struct i2c_client *client)
break;
case sc18is603:
host->num_chipselect = 2;
- if (pdata) {
+ if (pdata)
hw->freq = pdata->clock_frequency;
- } else {
- const __be32 *val;
- int len;
-
- val = of_get_property(np, "clock-frequency", &len);
- if (val && len >= sizeof(__be32))
- hw->freq = be32_to_cpup(val);
- }
+ else
+ device_property_read_u32(dev, "clock-frequency", &hw->freq);
if (!hw->freq)
hw->freq = SC18IS602_CLOCK;
break;
}
- host->bus_num = np ? -1 : client->adapter->nr;
+ host->bus_num = dev_fwnode(dev) ? -1 : client->adapter->nr;
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->setup = sc18is602_setup;
host->transfer_one_message = sc18is602_transfer_one;
host->max_transfer_size = sc18is602_max_transfer_size;
host->max_message_size = sc18is602_max_transfer_size;
- host->dev.of_node = np;
host->min_speed_hz = hw->freq / 128;
host->max_speed_hz = hw->freq / 4;
@@ -314,7 +304,7 @@ static const struct i2c_device_id sc18is602_id[] = {
};
MODULE_DEVICE_TABLE(i2c, sc18is602_id);
-static const struct of_device_id sc18is602_of_match[] __maybe_unused = {
+static const struct of_device_id sc18is602_of_match[] = {
{
.compatible = "nxp,sc18is602",
.data = (void *)sc18is602
@@ -334,7 +324,7 @@ MODULE_DEVICE_TABLE(of, sc18is602_of_match);
static struct i2c_driver sc18is602_driver = {
.driver = {
.name = "sc18is602",
- .of_match_table = of_match_ptr(sc18is602_of_match),
+ .of_match_table = sc18is602_of_match,
},
.probe = sc18is602_probe,
.id_table = sc18is602_id,
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
index adac645732fe..c4969f66a0ba 100644
--- a/drivers/spi/spi-sn-f-ospi.c
+++ b/drivers/spi/spi-sn-f-ospi.c
@@ -116,6 +116,9 @@ struct f_ospi {
static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
{
+ if (!op->dummy.nbytes)
+ return 0;
+
return (op->dummy.nbytes * 8) / op->dummy.buswidth;
}
@@ -335,7 +338,6 @@ static void f_ospi_config_indir_protocol(struct f_ospi *ospi,
static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct spi_device *spi = mem->spi;
u32 irq_stat_en;
int ret;
@@ -343,7 +345,7 @@ static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem,
if (ret)
return ret;
- f_ospi_config_clk(ospi, spi->max_speed_hz);
+ f_ospi_config_clk(ospi, op->max_freq);
f_ospi_config_indir_protocol(ospi, mem, op);
@@ -577,6 +579,10 @@ static const struct spi_controller_mem_ops f_ospi_mem_ops = {
.exec_op = f_ospi_exec_op,
};
+static const struct spi_controller_mem_caps f_ospi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int f_ospi_init(struct f_ospi *ospi)
{
int ret;
@@ -614,6 +620,7 @@ static int f_ospi_probe(struct platform_device *pdev)
| SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL
| SPI_MODE_0 | SPI_MODE_1 | SPI_LSB_FIRST;
ctlr->mem_ops = &f_ospi_mem_ops;
+ ctlr->mem_caps = &f_ospi_mem_caps;
ctlr->bus_num = -1;
of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (num_cs > OSPI_NUM_CS) {
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 9122350402b5..a284d2794586 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -623,7 +623,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
mutex_lock(&qspi->list_lock);
if (!qspi->mmap_enabled || qspi->current_cs != spi_get_chipselect(mem->spi, 0)) {
- ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz);
+ ti_qspi_setup_clk(qspi, op->max_freq);
ti_qspi_enable_memory_map(mem->spi);
}
ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
@@ -658,6 +658,10 @@ static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.adjust_op_size = ti_qspi_adjust_op_size,
};
+static const struct spi_controller_mem_caps ti_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
static int ti_qspi_start_transfer_one(struct spi_controller *host,
struct spi_message *m)
{
@@ -777,6 +781,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
host->mem_ops = &ti_qspi_mem_ops;
+ host->mem_caps = &ti_qspi_mem_caps;
if (!of_property_read_u32(np, "num-cs", &num_cs))
host->num_chipselect = num_cs;
@@ -826,20 +831,12 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (of_property_present(np, "syscon-chipselects")) {
qspi->ctrl_base =
- syscon_regmap_lookup_by_phandle(np,
- "syscon-chipselects");
+ syscon_regmap_lookup_by_phandle_args(np, "syscon-chipselects",
+ 1, &qspi->ctrl_reg);
if (IS_ERR(qspi->ctrl_base)) {
ret = PTR_ERR(qspi->ctrl_base);
goto free_host;
}
- ret = of_property_read_u32_index(np,
- "syscon-chipselects",
- 1, &qspi->ctrl_reg);
- if (ret) {
- dev_err(&pdev->dev,
- "couldn't get ctrl_mod reg index\n");
- goto free_host;
- }
}
qspi->fclk = devm_clk_get(&pdev->dev, "fck");
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index dee9c339a35e..2bd25c75f881 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -318,6 +318,7 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
* zynq_qspi_config_op - Configure QSPI controller for specified transfer
* @xqspi: Pointer to the zynq_qspi structure
* @spi: Pointer to the spi_device structure
+ * @op: The memory operation to execute
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -331,7 +332,8 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
* controller the driver will set the highest or lowest frequency supported by
* controller.
*/
-static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
+static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi,
+ const struct spi_mem_op *op)
{
u32 config_reg, baud_rate_val = 0;
@@ -346,7 +348,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
*/
while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
(clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
- spi->max_speed_hz)
+ op->max_freq)
baud_rate_val++;
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
@@ -379,12 +381,21 @@ static int zynq_qspi_setup_op(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
+ int ret;
if (ctlr->busy)
return -EBUSY;
- clk_enable(qspi->refclk);
- clk_enable(qspi->pclk);
+ ret = clk_enable(qspi->refclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(qspi->pclk);
+ if (ret) {
+ clk_disable(qspi->refclk);
+ return ret;
+ }
+
zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
@@ -534,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
op->dummy.buswidth, op->data.buswidth);
zynq_qspi_chipselect(mem->spi, true);
- zynq_qspi_config_op(xqspi, mem->spi);
+ zynq_qspi_config_op(xqspi, mem->spi, op);
if (op->cmd.opcode) {
reinit_completion(&xqspi->data_completion);
@@ -620,6 +631,10 @@ static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
.exec_op = zynq_qspi_exec_mem_op,
};
+static const struct spi_controller_mem_caps zynq_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
/**
* zynq_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
@@ -706,6 +721,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->mem_ops = &zynq_qspi_mem_ops;
+ ctlr->mem_caps = &zynq_qspi_mem_caps;
ctlr->setup = zynq_qspi_setup_op;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
ctlr->dev.of_node = np;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 549a6e0c9654..d800d79f62a7 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -535,7 +535,7 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
* zynqmp_qspi_config_op - Configure QSPI controller for specified
* transfer
* @xqspi: Pointer to the zynqmp_qspi structure
- * @qspi: Pointer to the spi_device structure
+ * @op: The memory operation to execute
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -553,12 +553,12 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
* frequency supported by controller.
*/
static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
- struct spi_device *qspi)
+ const struct spi_mem_op *op)
{
ulong clk_rate;
u32 config_reg, req_speed_hz, baud_rate_val = 0;
- req_speed_hz = qspi->max_speed_hz;
+ req_speed_hz = op->max_freq;
if (xqspi->speed_hz != req_speed_hz) {
xqspi->speed_hz = req_speed_hz;
@@ -1072,7 +1072,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
op->dummy.buswidth, op->data.buswidth);
mutex_lock(&xqspi->op_lock);
- zynqmp_qspi_config_op(xqspi, mem->spi);
+ zynqmp_qspi_config_op(xqspi, op);
zynqmp_qspi_chipselect(mem->spi, false);
genfifoentry |= xqspi->genfifocs;
genfifoentry |= xqspi->genfifobus;
@@ -1224,6 +1224,10 @@ static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
.exec_op = zynqmp_qspi_exec_op,
};
+static const struct spi_controller_mem_caps zynqmp_qspi_mem_caps = {
+ .per_op_freq = true,
+};
+
/**
* zynqmp_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
@@ -1333,6 +1337,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &zynqmp_qspi_mem_ops;
+ ctlr->mem_caps = &zynqmp_qspi_mem_caps;
ctlr->setup = zynqmp_qspi_setup_op;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->dev.of_node = np;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ff1add2ecb91..ff07c87dbadc 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -410,29 +410,21 @@ static int spi_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
struct spi_device *spi = to_spi_device(dev);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
int ret;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret)
return ret;
- if (dev->of_node) {
+ if (is_of_node(fwnode))
spi->irq = of_irq_get(dev->of_node, 0);
- if (spi->irq == -EPROBE_DEFER)
- return dev_err_probe(dev, -EPROBE_DEFER, "Failed to get irq\n");
- if (spi->irq < 0)
- spi->irq = 0;
- }
-
- if (has_acpi_companion(dev) && spi->irq < 0) {
- struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
-
- spi->irq = acpi_dev_gpio_irq_get(adev, 0);
- if (spi->irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (spi->irq < 0)
- spi->irq = 0;
- }
+ else if (is_acpi_device_node(fwnode) && spi->irq < 0)
+ spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
+ if (spi->irq == -EPROBE_DEFER)
+ return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
+ if (spi->irq < 0)
+ spi->irq = 0;
ret = dev_pm_domain_attach(dev, true);
if (ret)
@@ -874,15 +866,18 @@ EXPORT_SYMBOL_GPL(spi_new_device);
*/
void spi_unregister_device(struct spi_device *spi)
{
+ struct fwnode_handle *fwnode;
+
if (!spi)
return;
- if (spi->dev.of_node) {
- of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
- of_node_put(spi->dev.of_node);
+ fwnode = dev_fwnode(&spi->dev);
+ if (is_of_node(fwnode)) {
+ of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
+ of_node_put(to_of_node(fwnode));
+ } else if (is_acpi_device_node(fwnode)) {
+ acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
}
- if (ACPI_COMPANION(&spi->dev))
- acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_remove_software_node(&spi->dev);
device_del(&spi->dev);
spi_cleanup(spi);
@@ -1059,7 +1054,7 @@ static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool
* ambiguity. That's why we use enable, that takes SPI_CS_HIGH
* into account.
*/
- if (has_acpi_companion(&spi->dev))
+ if (is_acpi_device_node(dev_fwnode(&spi->dev)))
gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
else
/* Polarity handled by GPIO library */
@@ -2060,7 +2055,7 @@ static int spi_init_queue(struct spi_controller *ctlr)
ctlr->busy = false;
ctlr->queue_empty = true;
- ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
+ ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
if (IS_ERR(ctlr->kworker)) {
dev_err(&ctlr->dev, "failed to create message pump kworker\n");
return PTR_ERR(ctlr->kworker);
@@ -2959,9 +2954,13 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct device *child;
+ int ret;
child = device_find_any_child(&ctlr->dev);
- return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ put_device(child);
+
+ return ret;
}
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
@@ -4841,7 +4840,7 @@ extern struct notifier_block spi_of_notifier;
#if IS_ENABLED(CONFIG_ACPI)
static int spi_acpi_controller_match(struct device *dev, const void *data)
{
- return ACPI_COMPANION(dev->parent) == data;
+ return device_match_acpi_dev(dev->parent, data);
}
struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 653f82984216..58ae4304fdab 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -698,19 +698,24 @@ static const struct class spidev_class = {
.name = "spidev",
};
+/*
+ * The spi device ids are expected to match the device names of the
+ * spidev_dt_ids array below. Both arrays are kept in the same ordering.
+ */
static const struct spi_device_id spidev_spi_ids[] = {
- { .name = "bh2228fv" },
- { .name = "dh2228fv" },
- { .name = "jg10309-01" },
- { .name = "ltc2488" },
- { .name = "sx1301" },
- { .name = "bk4" },
- { .name = "dhcom-board" },
- { .name = "m53cpld" },
- { .name = "spi-petra" },
- { .name = "spi-authenta" },
- { .name = "em3581" },
- { .name = "si3210" },
+ { .name = /* cisco */ "spi-petra" },
+ { .name = /* dh */ "dhcom-board" },
+ { .name = /* elgin */ "jg10309-01" },
+ { .name = /* lineartechnology */ "ltc2488" },
+ { .name = /* lwn */ "bk4" },
+ { .name = /* lwn */ "bk4-spi" },
+ { .name = /* menlo */ "m53cpld" },
+ { .name = /* micron */ "spi-authenta" },
+ { .name = /* rohm */ "bh2228fv" },
+ { .name = /* rohm */ "dh2228fv" },
+ { .name = /* semtech */ "sx1301" },
+ { .name = /* silabs */ "em3581" },
+ { .name = /* silabs */ "si3210" },
{},
};
MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
@@ -734,6 +739,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "elgin,jg10309-01", .data = &spidev_of_check },
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
+ { .compatible = "lwn,bk4-spi", .data = &spidev_of_check },
{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
{ .compatible = "micron,spi-authenta", .data = &spidev_of_check },
{ .compatible = "rohm,bh2228fv", .data = &spidev_of_check },
diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c
index 3cafdf22c909..122140b97579 100644
--- a/drivers/spmi/hisi-spmi-controller.c
+++ b/drivers/spmi/hisi-spmi-controller.c
@@ -300,9 +300,6 @@ static int spmi_controller_probe(struct platform_device *pdev)
spin_lock_init(&spmi_controller->lock);
- ctrl->dev.parent = pdev->dev.parent;
- ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
-
/* Callbacks */
ctrl->read_cmd = spmi_read_cmd;
ctrl->write_cmd = spmi_write_cmd;
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index fb0101da1485..3cf8d9bd4566 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -517,7 +517,7 @@ static void of_spmi_register_devices(struct spmi_controller *ctrl)
if (!sdev)
continue;
- sdev->dev.of_node = node;
+ device_set_node(&sdev->dev, of_fwnode_handle(node));
sdev->usid = (u8)reg[0];
err = spmi_device_add(sdev);
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index f6db2933ebba..6736b09b2f45 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include "fbtft.h"
@@ -162,7 +163,7 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
static int blank(struct fbtft_par *par, bool on)
{
fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
+ __func__, str_true_false(on));
if (on)
write_reg(par, 0xAE);
else
diff --git a/drivers/staging/gpib/Kconfig b/drivers/staging/gpib/Kconfig
index 259f3ff33646..81510db3072e 100644
--- a/drivers/staging/gpib/Kconfig
+++ b/drivers/staging/gpib/Kconfig
@@ -65,6 +65,8 @@ config GPIB_NI_PCI_ISA
depends on ISA_BUS || PCI || PCMCIA
depends on HAS_IOPORT
depends on !X86_PAE
+ depends on PCMCIA || !PCMCIA
+ depends on HAS_IOPORT_MAP
select GPIB_COMMON
select GPIB_NEC7210
help
@@ -89,6 +91,7 @@ config GPIB_CB7210
depends on HAS_IOPORT
depends on ISA_BUS || PCI || PCMCIA
depends on !X86_PAE
+ depends on PCMCIA || !PCMCIA
select GPIB_COMMON
select GPIB_NEC7210
help
@@ -177,6 +180,7 @@ config GPIB_HP82341
config GPIB_INES
tristate "INES"
depends on PCI || ISA_BUS || PCMCIA
+ depends on PCMCIA || !PCMCIA
depends on HAS_IOPORT
depends on !X86_PAE
select GPIB_COMMON
@@ -199,8 +203,8 @@ config GPIB_INES
called cb7210.
config GPIB_PCMCIA
- bool "PCMCIA/Cardbus support for NI MC and Ines boards"
- depends on PCCARD && (GPIB_NI_PCI_ISA || GPIB_CB7210 || GPIB_INES)
+ def_bool y
+ depends on PCMCIA && (GPIB_NI_PCI_ISA || GPIB_CB7210 || GPIB_INES)
help
Enable PCMCIA/CArdbus support for National Instruments,
measurement computing boards and Ines boards.
diff --git a/drivers/staging/gpib/agilent_82350b/Makefile b/drivers/staging/gpib/agilent_82350b/Makefile
index d9236c92e04b..f24e1e713a63 100644
--- a/drivers/staging/gpib/agilent_82350b/Makefile
+++ b/drivers/staging/gpib/agilent_82350b/Makefile
@@ -1,2 +1,2 @@
-obj-m += agilent_82350b.o
+obj-$(CONFIG_GPIB_AGILENT_82350B) += agilent_82350b.o
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 53006d0cc79c..3f4f95b7fe34 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -30,11 +30,8 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
unsigned short event_status;
int i, num_fifo_bytes;
//hardware doesn't support checking for end-of-string character when using fifo
- if (tms_priv->eos_flags & REOS) {
- //pr_info("ag-rd: using tms9914 read for REOS %x EOS %x\n",tms_priv->eos_flags,
- // tms_priv->eos);
+ if (tms_priv->eos_flags & REOS)
return tms9914_read(board, tms_priv, buffer, length, end, bytes_read);
- }
clear_bit(DEV_CLEAR_BN, &tms_priv->state);
@@ -700,7 +697,7 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
GPIB_82350A_REGION));
dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
driver_name, a_priv->gpib_base);
- tms_priv->iobase = a_priv->gpib_base + TMS9914_BASE_REG;
+ tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device,
SRAM_82350A_REGION),
pci_resource_len(a_priv->pci_device,
@@ -724,7 +721,7 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
pci_resource_len(a_priv->pci_device, GPIB_REGION));
dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
driver_name, a_priv->gpib_base);
- tms_priv->iobase = a_priv->gpib_base + TMS9914_BASE_REG;
+ tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device, SRAM_REGION),
pci_resource_len(a_priv->pci_device, SRAM_REGION));
dev_dbg(board->gpib_dev, "%s: sram base address remapped to 0x%p\n",
@@ -811,15 +808,15 @@ void agilent_82350b_detach(gpib_board_t *board)
if (a_priv->gpib_base) {
tms9914_board_reset(tms_priv);
if (a_priv->misc_base)
- iounmap((void *)a_priv->misc_base);
+ iounmap(a_priv->misc_base);
if (a_priv->borg_base)
- iounmap((void *)a_priv->borg_base);
+ iounmap(a_priv->borg_base);
if (a_priv->sram_base)
- iounmap((void *)a_priv->sram_base);
+ iounmap(a_priv->sram_base);
if (a_priv->gpib_base)
- iounmap((void *)a_priv->gpib_base);
+ iounmap(a_priv->gpib_base);
if (a_priv->plx_base)
- iounmap((void *)a_priv->plx_base);
+ iounmap(a_priv->plx_base);
pci_release_regions(a_priv->pci_device);
}
if (a_priv->pci_device)
@@ -828,58 +825,58 @@ void agilent_82350b_detach(gpib_board_t *board)
agilent_82350b_free_private(board);
}
-gpib_interface_t agilent_82350b_unaccel_interface = {
-name: "agilent_82350b_unaccel",
-attach : agilent_82350b_unaccel_attach,
-detach : agilent_82350b_detach,
-read : agilent_82350b_read,
-write : agilent_82350b_write,
-command : agilent_82350b_command,
-request_system_control : agilent_82350b_request_system_control,
-take_control : agilent_82350b_take_control,
-go_to_standby : agilent_82350b_go_to_standby,
-interface_clear : agilent_82350b_interface_clear,
-remote_enable : agilent_82350b_remote_enable,
-enable_eos : agilent_82350b_enable_eos,
-disable_eos : agilent_82350b_disable_eos,
-parallel_poll : agilent_82350b_parallel_poll,
-parallel_poll_configure : agilent_82350b_parallel_poll_configure,
-parallel_poll_response : agilent_82350b_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : agilent_82350b_line_status,
-update_status : agilent_82350b_update_status,
-primary_address : agilent_82350b_primary_address,
-secondary_address : agilent_82350b_secondary_address,
-serial_poll_response : agilent_82350b_serial_poll_response,
-t1_delay : agilent_82350b_t1_delay,
-return_to_local : agilent_82350b_return_to_local,
+static gpib_interface_t agilent_82350b_unaccel_interface = {
+ .name = "agilent_82350b_unaccel",
+ .attach = agilent_82350b_unaccel_attach,
+ .detach = agilent_82350b_detach,
+ .read = agilent_82350b_read,
+ .write = agilent_82350b_write,
+ .command = agilent_82350b_command,
+ .request_system_control = agilent_82350b_request_system_control,
+ .take_control = agilent_82350b_take_control,
+ .go_to_standby = agilent_82350b_go_to_standby,
+ .interface_clear = agilent_82350b_interface_clear,
+ .remote_enable = agilent_82350b_remote_enable,
+ .enable_eos = agilent_82350b_enable_eos,
+ .disable_eos = agilent_82350b_disable_eos,
+ .parallel_poll = agilent_82350b_parallel_poll,
+ .parallel_poll_configure = agilent_82350b_parallel_poll_configure,
+ .parallel_poll_response = agilent_82350b_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = agilent_82350b_line_status,
+ .update_status = agilent_82350b_update_status,
+ .primary_address = agilent_82350b_primary_address,
+ .secondary_address = agilent_82350b_secondary_address,
+ .serial_poll_response = agilent_82350b_serial_poll_response,
+ .t1_delay = agilent_82350b_t1_delay,
+ .return_to_local = agilent_82350b_return_to_local,
};
-gpib_interface_t agilent_82350b_interface = {
-name: "agilent_82350b",
-attach : agilent_82350b_accel_attach,
-detach : agilent_82350b_detach,
-read : agilent_82350b_accel_read,
-write : agilent_82350b_accel_write,
-command : agilent_82350b_command,
-request_system_control : agilent_82350b_request_system_control,
-take_control : agilent_82350b_take_control,
-go_to_standby : agilent_82350b_go_to_standby,
-interface_clear : agilent_82350b_interface_clear,
-remote_enable : agilent_82350b_remote_enable,
-enable_eos : agilent_82350b_enable_eos,
-disable_eos : agilent_82350b_disable_eos,
-parallel_poll : agilent_82350b_parallel_poll,
-parallel_poll_configure : agilent_82350b_parallel_poll_configure,
-parallel_poll_response : agilent_82350b_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : agilent_82350b_line_status,
-update_status : agilent_82350b_update_status,
-primary_address : agilent_82350b_primary_address,
-secondary_address : agilent_82350b_secondary_address,
-serial_poll_response : agilent_82350b_serial_poll_response,
-t1_delay : agilent_82350b_t1_delay,
-return_to_local : agilent_82350b_return_to_local,
+static gpib_interface_t agilent_82350b_interface = {
+ .name = "agilent_82350b",
+ .attach = agilent_82350b_accel_attach,
+ .detach = agilent_82350b_detach,
+ .read = agilent_82350b_accel_read,
+ .write = agilent_82350b_accel_write,
+ .command = agilent_82350b_command,
+ .request_system_control = agilent_82350b_request_system_control,
+ .take_control = agilent_82350b_take_control,
+ .go_to_standby = agilent_82350b_go_to_standby,
+ .interface_clear = agilent_82350b_interface_clear,
+ .remote_enable = agilent_82350b_remote_enable,
+ .enable_eos = agilent_82350b_enable_eos,
+ .disable_eos = agilent_82350b_disable_eos,
+ .parallel_poll = agilent_82350b_parallel_poll,
+ .parallel_poll_configure = agilent_82350b_parallel_poll_configure,
+ .parallel_poll_response = agilent_82350b_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = agilent_82350b_line_status,
+ .update_status = agilent_82350b_update_status,
+ .primary_address = agilent_82350b_primary_address,
+ .secondary_address = agilent_82350b_secondary_address,
+ .serial_poll_response = agilent_82350b_serial_poll_response,
+ .t1_delay = agilent_82350b_t1_delay,
+ .return_to_local = agilent_82350b_return_to_local,
};
static int agilent_82350b_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -910,13 +907,30 @@ static int __init agilent_82350b_init_module(void)
result = pci_register_driver(&agilent_82350b_pci_driver);
if (result) {
- pr_err("agilent_82350b: pci_driver_register failed!\n");
+ pr_err("agilent_82350b: pci_register_driver failed: error = %d\n", result);
return result;
}
- gpib_register_driver(&agilent_82350b_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&agilent_82350b_interface, THIS_MODULE);
+ result = gpib_register_driver(&agilent_82350b_unaccel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("agilent_82350b: gpib_register_driver failed: error = %d\n", result);
+ goto err_unaccel;
+ }
+
+ result = gpib_register_driver(&agilent_82350b_interface, THIS_MODULE);
+ if (result) {
+ pr_err("agilent_82350b: gpib_register_driver failed: error = %d\n", result);
+ goto err_interface;
+ }
+
return 0;
+
+err_interface:
+ gpib_unregister_driver(&agilent_82350b_unaccel_interface);
+err_unaccel:
+ pci_unregister_driver(&agilent_82350b_pci_driver);
+
+ return result;
}
static void __exit agilent_82350b_exit_module(void)
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
index 30683d67d170..32b322113c10 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
@@ -45,11 +45,11 @@ enum board_model {
struct agilent_82350b_priv {
struct tms9914_priv tms9914_priv;
struct pci_dev *pci_device;
- void *plx_base; //82350a only
- void *gpib_base;
- void *sram_base;
- void *misc_base;
- void *borg_base;
+ void __iomem *plx_base; //82350a only
+ void __iomem *gpib_base;
+ void __iomem *sram_base;
+ void __iomem *misc_base;
+ void __iomem *borg_base;
int irq;
unsigned short card_mode_bits;
unsigned short event_status_bits;
@@ -60,8 +60,6 @@ struct agilent_82350b_priv {
// driver name
extern const char *driver_name;
-// interfaces
-extern gpib_interface_t agilent_82350b_interface;
// init functions
int agilent_82350b_unaccel_attach(gpib_board_t *board, const gpib_board_config_t *config);
diff --git a/drivers/staging/gpib/agilent_82357a/Makefile b/drivers/staging/gpib/agilent_82357a/Makefile
index 4a1d940fce2b..81a55c257a6e 100644
--- a/drivers/staging/gpib/agilent_82357a/Makefile
+++ b/drivers/staging/gpib/agilent_82357a/Makefile
@@ -1,4 +1,4 @@
-obj-m += agilent_82357a.o
+obj-$(CONFIG_GPIB_AGILENT_82357A) += agilent_82357a.o
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
index bf05fb4a736b..69f0e490d401 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
+++ b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
@@ -19,7 +19,7 @@ MODULE_DESCRIPTION("GPIB driver for Agilent 82357A/B usb adapters");
#define MAX_NUM_82357A_INTERFACES 128
static struct usb_interface *agilent_82357a_driver_interfaces[MAX_NUM_82357A_INTERFACES];
-DEFINE_MUTEX(agilent_82357a_hotplug_lock); // protect board insertion and removal
+static DEFINE_MUTEX(agilent_82357a_hotplug_lock); // protect board insertion and removal
static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned int clear_mask);
@@ -1146,25 +1146,6 @@ setup_exit:
return retval;
}
-#ifdef RESET_USB_CONFIG
-static int agilent_82357a_reset_usb_configuration(gpib_board_t *board)
-{
- struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
- struct usb_device *usb_dev;
- int retval;
-
- if (!a_priv->bus_interface)
- return -ENODEV;
- usb_dev = interface_to_usbdev(a_priv->bus_interface);
- retval = usb_reset_configuration(usb_dev);
- if (retval)
- dev_err(&usb_dev->dev, "%s: usb_reset_configuration() returned %i\n",
- __func__, retval);
- return retval;
-}
-#endif
-
static void agilent_82357a_cleanup_urbs(struct agilent_82357a_priv *a_priv)
{
if (a_priv && a_priv->bus_interface) {
@@ -1175,15 +1156,23 @@ static void agilent_82357a_cleanup_urbs(struct agilent_82357a_priv *a_priv)
}
};
+static void agilent_82357a_release_urbs(struct agilent_82357a_priv *a_priv)
+{
+ if (a_priv) {
+ usb_free_urb(a_priv->interrupt_urb);
+ a_priv->interrupt_urb = NULL;
+ kfree(a_priv->interrupt_buffer);
+ }
+}
+
static int agilent_82357a_allocate_private(gpib_board_t *board)
{
struct agilent_82357a_priv *a_priv;
- board->private_data = kmalloc(sizeof(struct agilent_82357a_priv), GFP_KERNEL);
+ board->private_data = kzalloc(sizeof(struct agilent_82357a_priv), GFP_KERNEL);
if (!board->private_data)
return -ENOMEM;
a_priv = board->private_data;
- memset(a_priv, 0, sizeof(struct agilent_82357a_priv));
mutex_init(&a_priv->bulk_transfer_lock);
mutex_init(&a_priv->bulk_alloc_lock);
mutex_init(&a_priv->control_alloc_lock);
@@ -1191,11 +1180,11 @@ static int agilent_82357a_allocate_private(gpib_board_t *board)
return 0;
}
-static void agilent_82357a_free_private(struct agilent_82357a_priv *a_priv)
+static void agilent_82357a_free_private(gpib_board_t *board)
{
- usb_free_urb(a_priv->interrupt_urb);
- kfree(a_priv->interrupt_buffer);
- kfree(a_priv);
+ kfree(board->private_data);
+ board->private_data = NULL;
+
}
static int agilent_82357a_init(gpib_board_t *board)
@@ -1342,16 +1331,14 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
a_priv->bus_interface = agilent_82357a_driver_interfaces[i];
usb_set_intfdata(agilent_82357a_driver_interfaces[i], board);
usb_dev = interface_to_usbdev(a_priv->bus_interface);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d attached to gpib minor %d, agilent usb interface %i\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
break;
}
}
if (i == MAX_NUM_82357A_INTERFACES) {
- mutex_unlock(&agilent_82357a_hotplug_lock);
- pr_err("No Agilent 82357 gpib adapters found, have you loaded its firmware?\n");
- return -ENODEV;
+ dev_err(board->gpib_dev,
+ "No Agilent 82357 gpib adapters found, have you loaded its firmware?\n");
+ retval = -ENODEV;
+ goto attach_fail;
}
product_id = le16_to_cpu(interface_to_usbdev(a_priv->bus_interface)->descriptor.idProduct);
switch (product_id) {
@@ -1365,20 +1352,13 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
break;
default:
dev_err(&usb_dev->dev, "bug, unhandled product_id in switch?\n");
- return -EIO;
- }
-#ifdef RESET_USB_CONFIG
- retval = agilent_82357a_reset_usb_configuration(board);
- if (retval < 0) {
- mutex_unlock(&agilent_82357a_hotplug_lock);
- return retval;
+ retval = -EIO;
+ goto attach_fail;
}
-#endif
+
retval = agilent_82357a_setup_urbs(board);
- if (retval < 0) {
- mutex_unlock(&agilent_82357a_hotplug_lock);
- return retval;
- }
+ if (retval < 0)
+ goto attach_fail;
timer_setup(&a_priv->bulk_timer, agilent_82357a_timeout_handler, 0);
@@ -1387,11 +1367,19 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
retval = agilent_82357a_init(board);
if (retval < 0) {
- mutex_unlock(&agilent_82357a_hotplug_lock);
- return retval;
+ agilent_82357a_cleanup_urbs(a_priv);
+ agilent_82357a_release_urbs(a_priv);
+ goto attach_fail;
}
- dev_info(&usb_dev->dev, "%s: attached\n", __func__);
+ dev_info(&usb_dev->dev,
+ "bus %d dev num %d attached to gpib minor %d, agilent usb interface %i\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ mutex_unlock(&agilent_82357a_hotplug_lock);
+ return retval;
+
+attach_fail:
+ agilent_82357a_free_private(board);
mutex_unlock(&agilent_82357a_hotplug_lock);
return retval;
}
@@ -1441,12 +1429,10 @@ static int agilent_82357a_go_idle(gpib_board_t *board)
static void agilent_82357a_detach(gpib_board_t *board)
{
struct agilent_82357a_priv *a_priv;
- struct usb_device *usb_dev;
mutex_lock(&agilent_82357a_hotplug_lock);
a_priv = board->private_data;
- usb_dev = interface_to_usbdev(a_priv->bus_interface);
if (a_priv) {
if (a_priv->bus_interface) {
agilent_82357a_go_idle(board);
@@ -1456,40 +1442,41 @@ static void agilent_82357a_detach(gpib_board_t *board)
mutex_lock(&a_priv->bulk_alloc_lock);
mutex_lock(&a_priv->interrupt_alloc_lock);
agilent_82357a_cleanup_urbs(a_priv);
- agilent_82357a_free_private(a_priv);
+ agilent_82357a_release_urbs(a_priv);
+ agilent_82357a_free_private(board);
}
- dev_info(&usb_dev->dev, "%s: detached\n", __func__);
+ dev_info(board->gpib_dev, "%s: detached\n", __func__);
mutex_unlock(&agilent_82357a_hotplug_lock);
}
-gpib_interface_t agilent_82357a_gpib_interface = {
-name: "agilent_82357a",
-attach : agilent_82357a_attach,
-detach : agilent_82357a_detach,
-read : agilent_82357a_read,
-write : agilent_82357a_write,
-command : agilent_82357a_command,
-take_control : agilent_82357a_take_control,
-go_to_standby : agilent_82357a_go_to_standby,
-request_system_control : agilent_82357a_request_system_control,
-interface_clear : agilent_82357a_interface_clear,
-remote_enable : agilent_82357a_remote_enable,
-enable_eos : agilent_82357a_enable_eos,
-disable_eos : agilent_82357a_disable_eos,
-parallel_poll : agilent_82357a_parallel_poll,
-parallel_poll_configure : agilent_82357a_parallel_poll_configure,
-parallel_poll_response : agilent_82357a_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : agilent_82357a_line_status,
-update_status : agilent_82357a_update_status,
-primary_address : agilent_82357a_primary_address,
-secondary_address : agilent_82357a_secondary_address,
-serial_poll_response : agilent_82357a_serial_poll_response,
-serial_poll_status : agilent_82357a_serial_poll_status,
-t1_delay : agilent_82357a_t1_delay,
-return_to_local : agilent_82357a_return_to_local,
-no_7_bit_eos : 1,
-skip_check_for_command_acceptors : 1
+static gpib_interface_t agilent_82357a_gpib_interface = {
+ .name = "agilent_82357a",
+ .attach = agilent_82357a_attach,
+ .detach = agilent_82357a_detach,
+ .read = agilent_82357a_read,
+ .write = agilent_82357a_write,
+ .command = agilent_82357a_command,
+ .take_control = agilent_82357a_take_control,
+ .go_to_standby = agilent_82357a_go_to_standby,
+ .request_system_control = agilent_82357a_request_system_control,
+ .interface_clear = agilent_82357a_interface_clear,
+ .remote_enable = agilent_82357a_remote_enable,
+ .enable_eos = agilent_82357a_enable_eos,
+ .disable_eos = agilent_82357a_disable_eos,
+ .parallel_poll = agilent_82357a_parallel_poll,
+ .parallel_poll_configure = agilent_82357a_parallel_poll_configure,
+ .parallel_poll_response = agilent_82357a_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = agilent_82357a_line_status,
+ .update_status = agilent_82357a_update_status,
+ .primary_address = agilent_82357a_primary_address,
+ .secondary_address = agilent_82357a_secondary_address,
+ .serial_poll_response = agilent_82357a_serial_poll_response,
+ .serial_poll_status = agilent_82357a_serial_poll_status,
+ .t1_delay = agilent_82357a_t1_delay,
+ .return_to_local = agilent_82357a_return_to_local,
+ .no_7_bit_eos = 1,
+ .skip_check_for_command_acceptors = 1
};
// Table with the USB-devices: just now only testing IDs
@@ -1691,12 +1678,24 @@ static struct usb_driver agilent_82357a_bus_driver = {
static int __init agilent_82357a_init_module(void)
{
int i;
+ int ret;
pr_info("agilent_82357a_gpib driver loading");
for (i = 0; i < MAX_NUM_82357A_INTERFACES; ++i)
agilent_82357a_driver_interfaces[i] = NULL;
- usb_register(&agilent_82357a_bus_driver);
- gpib_register_driver(&agilent_82357a_gpib_interface, THIS_MODULE);
+
+ ret = usb_register(&agilent_82357a_bus_driver);
+ if (ret) {
+ pr_err("agilent_82357a: usb_register failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&agilent_82357a_gpib_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("agilent_82357a: gpib_register_driver failed: error = %d\n", ret);
+ usb_deregister(&agilent_82357a_bus_driver);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/staging/gpib/cb7210/Makefile b/drivers/staging/gpib/cb7210/Makefile
index 22e0214fc17d..cda0725d6487 100644
--- a/drivers/staging/gpib/cb7210/Makefile
+++ b/drivers/staging/gpib/cb7210/Makefile
@@ -1,4 +1,4 @@
ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
-obj-m += cb7210.o
+obj-$(CONFIG_GPIB_CB7210) += cb7210.o
diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/staging/gpib/cb7210/cb7210.c
index 63df7f3eb3f3..4d22f647a453 100644
--- a/drivers/staging/gpib/cb7210/cb7210.c
+++ b/drivers/staging/gpib/cb7210/cb7210.c
@@ -683,170 +683,170 @@ void cb7210_return_to_local(gpib_board_t *board)
write_byte(nec_priv, AUX_RTL, AUXMR);
}
-gpib_interface_t cb_pci_unaccel_interface = {
-name: "cbi_pci_unaccel",
-attach : cb_pci_attach,
-detach : cb_pci_detach,
-read : cb7210_read,
-write : cb7210_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pci_unaccel_interface = {
+ .name = "cbi_pci_unaccel",
+ .attach = cb_pci_attach,
+ .detach = cb_pci_detach,
+ .read = cb7210_read,
+ .write = cb7210_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_pci_accel_interface = {
-name: "cbi_pci_accel",
-attach : cb_pci_attach,
-detach : cb_pci_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pci_accel_interface = {
+ .name = "cbi_pci_accel",
+ .attach = cb_pci_attach,
+ .detach = cb_pci_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_pci_interface = {
-name: "cbi_pci",
-attach : cb_pci_attach,
-detach : cb_pci_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pci_interface = {
+ .name = "cbi_pci",
+ .attach = cb_pci_attach,
+ .detach = cb_pci_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_isa_unaccel_interface = {
-name: "cbi_isa_unaccel",
-attach : cb_isa_attach,
-detach : cb_isa_detach,
-read : cb7210_read,
-write : cb7210_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_isa_unaccel_interface = {
+ .name = "cbi_isa_unaccel",
+ .attach = cb_isa_attach,
+ .detach = cb_isa_detach,
+ .read = cb7210_read,
+ .write = cb7210_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_isa_interface = {
-name: "cbi_isa",
-attach : cb_isa_attach,
-detach : cb_isa_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_isa_interface = {
+ .name = "cbi_isa",
+ .attach = cb_isa_attach,
+ .detach = cb_isa_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_isa_accel_interface = {
-name: "cbi_isa_accel",
-attach : cb_isa_attach,
-detach : cb_isa_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_isa_accel_interface = {
+ .name = "cbi_isa_accel",
+ .attach = cb_isa_attach,
+ .detach = cb_isa_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
static int cb7210_allocate_private(gpib_board_t *board)
@@ -971,12 +971,12 @@ int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
switch (cb_priv->pci_chip) {
case PCI_CHIP_AMCC_S5933:
cb_priv->amcc_iobase = pci_resource_start(cb_priv->pci_device, 0);
- nec_priv->iobase = (void *)(pci_resource_start(cb_priv->pci_device, 1));
+ nec_priv->iobase = pci_resource_start(cb_priv->pci_device, 1);
cb_priv->fifo_iobase = pci_resource_start(cb_priv->pci_device, 2);
break;
case PCI_CHIP_QUANCOM:
- nec_priv->iobase = (void *)(pci_resource_start(cb_priv->pci_device, 0));
- cb_priv->fifo_iobase = (unsigned long)nec_priv->iobase;
+ nec_priv->iobase = pci_resource_start(cb_priv->pci_device, 0);
+ cb_priv->fifo_iobase = nec_priv->iobase;
break;
default:
pr_err("cb7210: bug! unhandled pci_chip=%i\n", cb_priv->pci_chip);
@@ -1040,8 +1040,8 @@ int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
cb_priv = board->private_data;
nec_priv = &cb_priv->nec7210_priv;
- if (request_region((unsigned long)config->ibbase, cb7210_iosize, "cb7210") == 0) {
- pr_err("gpib: ioports starting at 0x%p are already in use\n", config->ibbase);
+ if (!request_region(config->ibbase, cb7210_iosize, "cb7210")) {
+ pr_err("gpib: ioports starting at 0x%x are already in use\n", config->ibbase);
return -EIO;
}
nec_priv->iobase = config->ibbase;
@@ -1351,100 +1351,94 @@ static struct pcmcia_driver cb_gpib_cs_driver = {
.resume = cb_gpib_resume,
};
-int cb_pcmcia_init_module(void)
-{
- pcmcia_register_driver(&cb_gpib_cs_driver);
- return 0;
-}
-
void cb_pcmcia_cleanup_module(void)
{
DEBUG(0, "cb_gpib_cs: unloading\n");
pcmcia_unregister_driver(&cb_gpib_cs_driver);
}
-gpib_interface_t cb_pcmcia_unaccel_interface = {
-name: "cbi_pcmcia_unaccel",
-attach : cb_pcmcia_attach,
-detach : cb_pcmcia_detach,
-read : cb7210_read,
-write : cb7210_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pcmcia_unaccel_interface = {
+ .name = "cbi_pcmcia_unaccel",
+ .attach = cb_pcmcia_attach,
+ .detach = cb_pcmcia_detach,
+ .read = cb7210_read,
+ .write = cb7210_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_pcmcia_interface = {
-name: "cbi_pcmcia",
-attach : cb_pcmcia_attach,
-detach : cb_pcmcia_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pcmcia_interface = {
+ .name = "cbi_pcmcia",
+ .attach = cb_pcmcia_attach,
+ .detach = cb_pcmcia_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_pcmcia_accel_interface = {
-name: "cbi_pcmcia_accel",
-attach : cb_pcmcia_attach,
-detach : cb_pcmcia_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pcmcia_accel_interface = {
+ .name = "cbi_pcmcia_accel",
+ .attach = cb_pcmcia_attach,
+ .detach = cb_pcmcia_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
@@ -1465,13 +1459,13 @@ int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
cb_priv = board->private_data;
nec_priv = &cb_priv->nec7210_priv;
- if (request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
- "cb7210") == 0) {
+ if (!request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
+ "cb7210")) {
pr_err("gpib: ioports starting at 0x%lx are already in use\n",
(unsigned long)curr_dev->resource[0]->start);
return -EIO;
}
- nec_priv->iobase = (void *)(unsigned long)curr_dev->resource[0]->start;
+ nec_priv->iobase = curr_dev->resource[0]->start;
cb_priv->fifo_iobase = curr_dev->resource[0]->start;
if (request_irq(curr_dev->irq, cb7210_interrupt, IRQF_SHARED,
@@ -1506,32 +1500,102 @@ void cb_pcmcia_detach(gpib_board_t *board)
static int __init cb7210_init_module(void)
{
- int err = 0;
- int result;
+ int ret;
+
+ ret = pci_register_driver(&cb7210_pci_driver);
+ if (ret) {
+ pr_err("cb7210: pci_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&cb_pci_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci;
+ }
+
+ ret = gpib_register_driver(&cb_isa_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_isa;
+ }
- result = pci_register_driver(&cb7210_pci_driver);
- if (result) {
- pr_err("cb7210: pci_driver_register failed!\n");
- return result;
+ ret = gpib_register_driver(&cb_pci_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci_accel;
}
- gpib_register_driver(&cb_pci_interface, THIS_MODULE);
- gpib_register_driver(&cb_isa_interface, THIS_MODULE);
- gpib_register_driver(&cb_pci_accel_interface, THIS_MODULE);
- gpib_register_driver(&cb_pci_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&cb_isa_accel_interface, THIS_MODULE);
- gpib_register_driver(&cb_isa_unaccel_interface, THIS_MODULE);
-
-#ifdef GPIB__PCMCIA
- gpib_register_driver(&cb_pcmcia_interface, THIS_MODULE);
- gpib_register_driver(&cb_pcmcia_accel_interface, THIS_MODULE);
- gpib_register_driver(&cb_pcmcia_unaccel_interface, THIS_MODULE);
- err += cb_pcmcia_init_module();
+ ret = gpib_register_driver(&cb_pci_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci_unaccel;
+ }
+
+ ret = gpib_register_driver(&cb_isa_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_isa_accel;
+ }
+
+ ret = gpib_register_driver(&cb_isa_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_isa_unaccel;
+ }
+
+#ifdef GPIB_PCMCIA
+ ret = gpib_register_driver(&cb_pcmcia_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia;
+ }
+
+ ret = gpib_register_driver(&cb_pcmcia_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_accel;
+ }
+
+ ret = gpib_register_driver(&cb_pcmcia_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_unaccel;
+ }
+
+ ret = pcmcia_register_driver(&cb_gpib_cs_driver);
+ if (ret) {
+ pr_err("cb7210: pcmcia_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_driver;
+ }
#endif
- if (err)
- return -1;
return 0;
+
+#ifdef GPIB_PCMCIA
+err_pcmcia_driver:
+ gpib_unregister_driver(&cb_pcmcia_unaccel_interface);
+err_pcmcia_unaccel:
+ gpib_unregister_driver(&cb_pcmcia_accel_interface);
+err_pcmcia_accel:
+ gpib_unregister_driver(&cb_pcmcia_interface);
+err_pcmcia:
+#endif
+ gpib_unregister_driver(&cb_isa_unaccel_interface);
+err_isa_unaccel:
+ gpib_unregister_driver(&cb_isa_accel_interface);
+err_isa_accel:
+ gpib_unregister_driver(&cb_pci_unaccel_interface);
+err_pci_unaccel:
+ gpib_unregister_driver(&cb_pci_accel_interface);
+err_pci_accel:
+ gpib_unregister_driver(&cb_isa_interface);
+err_isa:
+ gpib_unregister_driver(&cb_pci_interface);
+err_pci:
+ pci_unregister_driver(&cb7210_pci_driver);
+
+ return ret;
}
static void __exit cb7210_exit_module(void)
diff --git a/drivers/staging/gpib/cb7210/cb7210.h b/drivers/staging/gpib/cb7210/cb7210.h
index 4ad976de2b68..d56cd905cc8c 100644
--- a/drivers/staging/gpib/cb7210/cb7210.h
+++ b/drivers/staging/gpib/cb7210/cb7210.h
@@ -36,11 +36,6 @@ struct cb7210_priv {
unsigned in_fifo_half_full : 1;
};
-// interfaces
-extern gpib_interface_t cb_pcmcia_interface;
-extern gpib_interface_t cb_pcmcia_accel_interface;
-extern gpib_interface_t cb_pcmcia_unaccel_interface;
-
// interrupt service routines
irqreturn_t cb_pci_interrupt(int irq, void *arg);
irqreturn_t cb7210_interrupt(int irq, void *arg);
@@ -113,9 +108,9 @@ enum hs_regs {
HS_STATUS = 0x8, /* HS_STATUS register */
};
-static inline unsigned long nec7210_iobase(const struct cb7210_priv *cb_priv)
+static inline u32 nec7210_iobase(const struct cb7210_priv *cb_priv)
{
- return (unsigned long)(cb_priv->nec7210_priv.iobase);
+ return cb_priv->nec7210_priv.iobase;
}
static inline int cb7210_page_in_bits(unsigned int page)
diff --git a/drivers/staging/gpib/cec/Makefile b/drivers/staging/gpib/cec/Makefile
index f4638628ff29..b7141e23d4e0 100644
--- a/drivers/staging/gpib/cec/Makefile
+++ b/drivers/staging/gpib/cec/Makefile
@@ -1,3 +1,3 @@
-obj-m += cec_gpib.o
+obj-$(CONFIG_GPIB_CEC_PCI) += cec_gpib.o
diff --git a/drivers/staging/gpib/cec/cec.h b/drivers/staging/gpib/cec/cec.h
index 352cf83d8328..040ca70ed708 100644
--- a/drivers/staging/gpib/cec/cec.h
+++ b/drivers/staging/gpib/cec/cec.h
@@ -16,10 +16,6 @@ struct cec_priv {
unsigned int irq;
};
-// interfaces
-extern gpib_interface_t cec_pci_interface;
-extern gpib_interface_t cec_pcmcia_interface;
-
// interface functions
int cec_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
int cec_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
diff --git a/drivers/staging/gpib/cec/cec_gpib.c b/drivers/staging/gpib/cec/cec_gpib.c
index 3dc933deb401..d056cd1d6b3e 100644
--- a/drivers/staging/gpib/cec/cec_gpib.c
+++ b/drivers/staging/gpib/cec/cec_gpib.c
@@ -182,32 +182,32 @@ void cec_return_to_local(gpib_board_t *board)
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-gpib_interface_t cec_pci_interface = {
-name: "cec_pci",
-attach : cec_pci_attach,
-detach : cec_pci_detach,
-read : cec_read,
-write : cec_write,
-command : cec_command,
-take_control : cec_take_control,
-go_to_standby : cec_go_to_standby,
-request_system_control : cec_request_system_control,
-interface_clear : cec_interface_clear,
-remote_enable : cec_remote_enable,
-enable_eos : cec_enable_eos,
-disable_eos : cec_disable_eos,
-parallel_poll : cec_parallel_poll,
-parallel_poll_configure : cec_parallel_poll_configure,
-parallel_poll_response : cec_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL, //XXX
-update_status : cec_update_status,
-primary_address : cec_primary_address,
-secondary_address : cec_secondary_address,
-serial_poll_response : cec_serial_poll_response,
-serial_poll_status : cec_serial_poll_status,
-t1_delay : cec_t1_delay,
-return_to_local : cec_return_to_local,
+static gpib_interface_t cec_pci_interface = {
+ .name = "cec_pci",
+ .attach = cec_pci_attach,
+ .detach = cec_pci_detach,
+ .read = cec_read,
+ .write = cec_write,
+ .command = cec_command,
+ .take_control = cec_take_control,
+ .go_to_standby = cec_go_to_standby,
+ .request_system_control = cec_request_system_control,
+ .interface_clear = cec_interface_clear,
+ .remote_enable = cec_remote_enable,
+ .enable_eos = cec_enable_eos,
+ .disable_eos = cec_disable_eos,
+ .parallel_poll = cec_parallel_poll,
+ .parallel_poll_configure = cec_parallel_poll_configure,
+ .parallel_poll_response = cec_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL, //XXX
+ .update_status = cec_update_status,
+ .primary_address = cec_primary_address,
+ .secondary_address = cec_secondary_address,
+ .serial_poll_response = cec_serial_poll_response,
+ .serial_poll_status = cec_serial_poll_status,
+ .t1_delay = cec_t1_delay,
+ .return_to_local = cec_return_to_local,
};
static int cec_allocate_private(gpib_board_t *board)
@@ -297,8 +297,8 @@ int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
cec_priv->plx_iobase = pci_resource_start(cec_priv->pci_device, 1);
pr_info(" plx9050 base address 0x%lx\n", cec_priv->plx_iobase);
- nec_priv->iobase = (void *)(pci_resource_start(cec_priv->pci_device, 3));
- pr_info(" nec7210 base address 0x%p\n", nec_priv->iobase);
+ nec_priv->iobase = pci_resource_start(cec_priv->pci_device, 3);
+ pr_info(" nec7210 base address 0x%x\n", nec_priv->iobase);
isr_flags |= IRQF_SHARED;
if (request_irq(cec_priv->pci_device->irq, cec_interrupt, isr_flags, "pci-gpib", board)) {
@@ -365,11 +365,15 @@ static int __init cec_init_module(void)
result = pci_register_driver(&cec_pci_driver);
if (result) {
- pr_err("cec_gpib: pci_driver_register failed!\n");
+ pr_err("cec_gpib: pci_register_driver failed: error = %d\n", result);
return result;
}
- gpib_register_driver(&cec_pci_interface, THIS_MODULE);
+ result = gpib_register_driver(&cec_pci_interface, THIS_MODULE);
+ if (result) {
+ pr_err("cec_gpib: gpib_register_driver failed: error = %d\n", result);
+ return result;
+ }
return 0;
}
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/staging/gpib/common/gpib_os.c
index 405237d8cb47..4901e660242e 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/staging/gpib/common/gpib_os.c
@@ -116,56 +116,6 @@ int io_timed_out(gpib_board_t *board)
return 0;
}
-void writeb_wrapper(unsigned int value, void *address)
-{
- writeb(value, address);
-};
-EXPORT_SYMBOL(writeb_wrapper);
-
-void writew_wrapper(unsigned int value, void *address)
-{
- writew(value, address);
-};
-EXPORT_SYMBOL(writew_wrapper);
-
-unsigned int readb_wrapper(void *address)
-{
- return readb(address);
-};
-EXPORT_SYMBOL(readb_wrapper);
-
-unsigned int readw_wrapper(void *address)
-{
- return readw(address);
-};
-EXPORT_SYMBOL(readw_wrapper);
-
-#ifdef CONFIG_HAS_IOPORT
-void outb_wrapper(unsigned int value, void *address)
-{
- outb(value, (unsigned long)(address));
-};
-EXPORT_SYMBOL(outb_wrapper);
-
-void outw_wrapper(unsigned int value, void *address)
-{
- outw(value, (unsigned long)(address));
-};
-EXPORT_SYMBOL(outw_wrapper);
-
-unsigned int inb_wrapper(void *address)
-{
- return inb((unsigned long)(address));
-};
-EXPORT_SYMBOL(inb_wrapper);
-
-unsigned int inw_wrapper(void *address)
-{
- return inw((unsigned long)(address));
-};
-EXPORT_SYMBOL(inw_wrapper);
-#endif
-
/* this is a function instead of a constant because of Suse
* defining HZ to be a function call to get_hz()
*/
@@ -536,7 +486,7 @@ int dvrsp(gpib_board_t *board, unsigned int pad, int sad,
return -1;
}
- if (pad > MAX_GPIB_PRIMARY_ADDRESS || sad > MAX_GPIB_SECONDARY_ADDRESS) {
+ if (pad > MAX_GPIB_PRIMARY_ADDRESS || sad > MAX_GPIB_SECONDARY_ADDRESS || sad < -1) {
pr_err("gpib: bad address for serial poll");
return -1;
}
@@ -885,7 +835,7 @@ static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
return -EBUSY;
}
- retval = copy_from_user(&cmd, (void *)arg, sizeof(board_type_ioctl_t));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(board_type_ioctl_t));
if (retval)
return retval;
@@ -929,7 +879,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
unsigned long arg)
{
read_write_ioctl_t read_cmd;
- u8 *userbuf;
+ u8 __user *userbuf;
unsigned long remain;
int end_flag = 0;
int retval;
@@ -937,7 +887,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
gpib_descriptor_t *desc;
size_t nbytes;
- retval = copy_from_user(&read_cmd, (void *)arg, sizeof(read_cmd));
+ retval = copy_from_user(&read_cmd, (void __user *)arg, sizeof(read_cmd));
if (retval)
return -EFAULT;
@@ -951,7 +901,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (WARN_ON_ONCE(sizeof(userbuf) > sizeof(read_cmd.buffer_ptr)))
return -EFAULT;
- userbuf = (u8 *)(unsigned long)read_cmd.buffer_ptr;
+ userbuf = (u8 __user *)(unsigned long)read_cmd.buffer_ptr;
userbuf += read_cmd.completed_transfer_count;
remain = read_cmd.requested_transfer_count - read_cmd.completed_transfer_count;
@@ -989,7 +939,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (remain == 0 || end_flag)
read_ret = 0;
if (retval == 0)
- retval = copy_to_user((void *)arg, &read_cmd, sizeof(read_cmd));
+ retval = copy_to_user((void __user *)arg, &read_cmd, sizeof(read_cmd));
atomic_set(&desc->io_in_progress, 0);
@@ -1004,7 +954,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
gpib_board_t *board, unsigned long arg)
{
read_write_ioctl_t cmd;
- u8 *userbuf;
+ u8 __user *userbuf;
unsigned long remain;
int retval;
int fault = 0;
@@ -1012,7 +962,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
size_t bytes_written;
int no_clear_io_in_prog;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1023,7 +973,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
if (!desc)
return -EINVAL;
- userbuf = (u8 *)(unsigned long)cmd.buffer_ptr;
+ userbuf = (u8 __user *)(unsigned long)cmd.buffer_ptr;
userbuf += cmd.completed_transfer_count;
no_clear_io_in_prog = cmd.end;
@@ -1066,7 +1016,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
cmd.completed_transfer_count = cmd.requested_transfer_count - remain;
if (fault == 0)
- fault = copy_to_user((void *)arg, &cmd, sizeof(cmd));
+ fault = copy_to_user((void __user *)arg, &cmd, sizeof(cmd));
/*
* no_clear_io_in_prog (cmd.end) is true when io_in_progress should
@@ -1088,13 +1038,13 @@ static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
unsigned long arg)
{
read_write_ioctl_t write_cmd;
- u8 *userbuf;
+ u8 __user *userbuf;
unsigned long remain;
int retval = 0;
int fault;
gpib_descriptor_t *desc;
- fault = copy_from_user(&write_cmd, (void *)arg, sizeof(write_cmd));
+ fault = copy_from_user(&write_cmd, (void __user *)arg, sizeof(write_cmd));
if (fault)
return -EFAULT;
@@ -1105,7 +1055,7 @@ static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (!desc)
return -EINVAL;
- userbuf = (u8 *)(unsigned long)write_cmd.buffer_ptr;
+ userbuf = (u8 __user *)(unsigned long)write_cmd.buffer_ptr;
userbuf += write_cmd.completed_transfer_count;
remain = write_cmd.requested_transfer_count - write_cmd.completed_transfer_count;
@@ -1144,7 +1094,7 @@ static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (remain == 0)
retval = 0;
if (fault == 0)
- fault = copy_to_user((void *)arg, &write_cmd, sizeof(write_cmd));
+ fault = copy_to_user((void __user *)arg, &write_cmd, sizeof(write_cmd));
atomic_set(&desc->io_in_progress, 0);
@@ -1161,7 +1111,7 @@ static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg)
spoll_bytes_ioctl_t cmd;
int retval;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1171,7 +1121,7 @@ static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg)
else
cmd.num_bytes = num_status_bytes(device);
- retval = copy_to_user((void *)arg, &cmd, sizeof(cmd));
+ retval = copy_to_user((void __user *)arg, &cmd, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1281,7 +1231,7 @@ static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long
gpib_file_private_t *file_priv = filep->private_data;
int i;
- retval = copy_from_user(&open_dev_cmd, (void *)arg, sizeof(open_dev_cmd));
+ retval = copy_from_user(&open_dev_cmd, (void __user *)arg, sizeof(open_dev_cmd));
if (retval)
return -EFAULT;
@@ -1317,7 +1267,7 @@ static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long
atomic_set(&board->stuck_srq, 0);
open_dev_cmd.handle = i;
- retval = copy_to_user((void *)arg, &open_dev_cmd, sizeof(open_dev_cmd));
+ retval = copy_to_user((void __user *)arg, &open_dev_cmd, sizeof(open_dev_cmd));
if (retval)
return -EFAULT;
@@ -1330,7 +1280,7 @@ static int close_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned lon
gpib_file_private_t *file_priv = filep->private_data;
int retval;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1358,7 +1308,7 @@ static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg)
dev_dbg(board->gpib_dev, "pid %i, entering %s()\n", current->pid, __func__);
- retval = copy_from_user(&serial_cmd, (void *)arg, sizeof(serial_cmd));
+ retval = copy_from_user(&serial_cmd, (void __user *)arg, sizeof(serial_cmd));
if (retval)
return -EFAULT;
@@ -1367,7 +1317,7 @@ static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg)
if (retval < 0)
return retval;
- retval = copy_to_user((void *)arg, &serial_cmd, sizeof(serial_cmd));
+ retval = copy_to_user((void __user *)arg, &serial_cmd, sizeof(serial_cmd));
if (retval)
return -EFAULT;
@@ -1381,7 +1331,7 @@ static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
int retval;
gpib_descriptor_t *desc;
- retval = copy_from_user(&wait_cmd, (void *)arg, sizeof(wait_cmd));
+ retval = copy_from_user(&wait_cmd, (void __user *)arg, sizeof(wait_cmd));
if (retval)
return -EFAULT;
@@ -1394,7 +1344,7 @@ static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (retval < 0)
return retval;
- retval = copy_to_user((void *)arg, &wait_cmd, sizeof(wait_cmd));
+ retval = copy_to_user((void __user *)arg, &wait_cmd, sizeof(wait_cmd));
if (retval)
return -EFAULT;
@@ -1410,7 +1360,7 @@ static int parallel_poll_ioctl(gpib_board_t *board, unsigned long arg)
if (retval < 0)
return retval;
- retval = copy_to_user((void *)arg, &poll_byte, sizeof(poll_byte));
+ retval = copy_to_user((void __user *)arg, &poll_byte, sizeof(poll_byte));
if (retval)
return -EFAULT;
@@ -1421,14 +1371,14 @@ static int online_ioctl(gpib_board_t *board, unsigned long arg)
{
online_ioctl_t online_cmd;
int retval;
- void *init_data = NULL;
+ void __user *init_data = NULL;
board->config.init_data = NULL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&online_cmd, (void *)arg, sizeof(online_cmd));
+ retval = copy_from_user(&online_cmd, (void __user *)arg, sizeof(online_cmd));
if (retval)
return -EFAULT;
if (online_cmd.init_data_length > 0) {
@@ -1437,7 +1387,7 @@ static int online_ioctl(gpib_board_t *board, unsigned long arg)
return -ENOMEM;
if (WARN_ON_ONCE(sizeof(init_data) > sizeof(online_cmd.init_data_ptr)))
return -EFAULT;
- init_data = (void *)(unsigned long)(online_cmd.init_data_ptr);
+ init_data = (void __user *)(unsigned long)(online_cmd.init_data_ptr);
retval = copy_from_user(board->config.init_data, init_data,
online_cmd.init_data_length);
if (retval) {
@@ -1466,7 +1416,7 @@ static int remote_enable_ioctl(gpib_board_t *board, unsigned long arg)
int enable;
int retval;
- retval = copy_from_user(&enable, (void *)arg, sizeof(enable));
+ retval = copy_from_user(&enable, (void __user *)arg, sizeof(enable));
if (retval)
return -EFAULT;
@@ -1478,7 +1428,7 @@ static int take_control_ioctl(gpib_board_t *board, unsigned long arg)
int synchronous;
int retval;
- retval = copy_from_user(&synchronous, (void *)arg, sizeof(synchronous));
+ retval = copy_from_user(&synchronous, (void __user *)arg, sizeof(synchronous));
if (retval)
return -EFAULT;
@@ -1494,7 +1444,7 @@ static int line_status_ioctl(gpib_board_t *board, unsigned long arg)
if (retval < 0)
return retval;
- retval = copy_to_user((void *)arg, &lines, sizeof(lines));
+ retval = copy_to_user((void __user *)arg, &lines, sizeof(lines));
if (retval)
return -EFAULT;
@@ -1508,7 +1458,7 @@ static int pad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
int retval;
gpib_descriptor_t *desc;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1544,7 +1494,7 @@ static int sad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
int retval;
gpib_descriptor_t *desc;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1577,7 +1527,7 @@ static int eos_ioctl(gpib_board_t *board, unsigned long arg)
eos_ioctl_t eos_cmd;
int retval;
- retval = copy_from_user(&eos_cmd, (void *)arg, sizeof(eos_cmd));
+ retval = copy_from_user(&eos_cmd, (void __user *)arg, sizeof(eos_cmd));
if (retval)
return -EFAULT;
@@ -1589,7 +1539,7 @@ static int request_service_ioctl(gpib_board_t *board, unsigned long arg)
u8 status_byte;
int retval;
- retval = copy_from_user(&status_byte, (void *)arg, sizeof(status_byte));
+ retval = copy_from_user(&status_byte, (void __user *)arg, sizeof(status_byte));
if (retval)
return -EFAULT;
@@ -1601,7 +1551,8 @@ static int request_service2_ioctl(gpib_board_t *board, unsigned long arg)
request_service2_t request_service2_cmd;
int retval;
- retval = copy_from_user(&request_service2_cmd, (void *)arg, sizeof(request_service2_t));
+ retval = copy_from_user(&request_service2_cmd, (void __user *)arg,
+ sizeof(request_service2_t));
if (retval)
return -EFAULT;
@@ -1617,13 +1568,13 @@ static int iobase_ioctl(gpib_board_config_t *config, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&base_addr, (void *)arg, sizeof(base_addr));
+ retval = copy_from_user(&base_addr, (void __user *)arg, sizeof(base_addr));
if (retval)
return -EFAULT;
if (WARN_ON_ONCE(sizeof(void *) > sizeof(base_addr)))
return -EFAULT;
- config->ibbase = (void *)(unsigned long)(base_addr);
+ config->ibbase = base_addr;
return 0;
}
@@ -1636,7 +1587,7 @@ static int irq_ioctl(gpib_board_config_t *config, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&irq, (void *)arg, sizeof(irq));
+ retval = copy_from_user(&irq, (void __user *)arg, sizeof(irq));
if (retval)
return -EFAULT;
@@ -1653,7 +1604,7 @@ static int dma_ioctl(gpib_board_config_t *config, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&dma_channel, (void *)arg, sizeof(dma_channel));
+ retval = copy_from_user(&dma_channel, (void __user *)arg, sizeof(dma_channel));
if (retval)
return -EFAULT;
@@ -1669,7 +1620,7 @@ static int autospoll_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
int retval;
gpib_descriptor_t *desc;
- retval = copy_from_user(&enable, (void *)arg, sizeof(enable));
+ retval = copy_from_user(&enable, (void __user *)arg, sizeof(enable));
if (retval)
return -EFAULT;
@@ -1704,7 +1655,7 @@ static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
{
int retval, lock_mutex;
- retval = copy_from_user(&lock_mutex, (void *)arg, sizeof(lock_mutex));
+ retval = copy_from_user(&lock_mutex, (void __user *)arg, sizeof(lock_mutex));
if (retval)
return -EFAULT;
@@ -1748,7 +1699,7 @@ static int timeout_ioctl(gpib_board_t *board, unsigned long arg)
unsigned int timeout;
int retval;
- retval = copy_from_user(&timeout, (void *)arg, sizeof(timeout));
+ retval = copy_from_user(&timeout, (void __user *)arg, sizeof(timeout));
if (retval)
return -EFAULT;
@@ -1763,7 +1714,7 @@ static int ppc_ioctl(gpib_board_t *board, unsigned long arg)
ppoll_config_ioctl_t cmd;
int retval;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1789,7 +1740,7 @@ static int set_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
local_ppoll_mode_ioctl_t cmd;
int retval;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1809,7 +1760,7 @@ static int get_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
int retval;
cmd = board->local_ppoll_mode;
- retval = copy_to_user((void *)arg, &cmd, sizeof(cmd));
+ retval = copy_to_user((void __user *)arg, &cmd, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1823,7 +1774,7 @@ static int query_board_rsv_ioctl(gpib_board_t *board, unsigned long arg)
status = board->interface->serial_poll_status(board);
- retval = copy_to_user((void *)arg, &status, sizeof(status));
+ retval = copy_to_user((void __user *)arg, &status, sizeof(status));
if (retval)
return -EFAULT;
@@ -1846,7 +1797,7 @@ static int board_info_ioctl(const gpib_board_t *board, unsigned long arg)
info.t1_delay = board->t1_nano_sec;
info.ist = board->ist;
info.no_7_bit_eos = board->interface->no_7_bit_eos;
- retval = copy_to_user((void *)arg, &info, sizeof(info));
+ retval = copy_to_user((void __user *)arg, &info, sizeof(info));
if (retval)
return -EFAULT;
@@ -1858,7 +1809,7 @@ static int interface_clear_ioctl(gpib_board_t *board, unsigned long arg)
unsigned int usec_duration;
int retval;
- retval = copy_from_user(&usec_duration, (void *)arg, sizeof(usec_duration));
+ retval = copy_from_user(&usec_duration, (void __user *)arg, sizeof(usec_duration));
if (retval)
return -EFAULT;
@@ -1873,7 +1824,7 @@ static int select_pci_ioctl(gpib_board_config_t *config, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&selection, (void *)arg, sizeof(selection));
+ retval = copy_from_user(&selection, (void __user *)arg, sizeof(selection));
if (retval)
return -EFAULT;
@@ -1895,7 +1846,7 @@ static int select_device_path_ioctl(gpib_board_config_t *config, unsigned long a
if (!selection)
return -ENOMEM;
- retval = copy_from_user(selection, (void *)arg, sizeof(select_device_path_ioctl_t));
+ retval = copy_from_user(selection, (void __user *)arg, sizeof(select_device_path_ioctl_t));
if (retval) {
vfree(selection);
return -EFAULT;
@@ -2029,7 +1980,7 @@ static int event_ioctl(gpib_board_t *board, unsigned long arg)
user_event = event;
- retval = copy_to_user((void *)arg, &user_event, sizeof(user_event));
+ retval = copy_to_user((void __user *)arg, &user_event, sizeof(user_event));
if (retval)
return -EFAULT;
@@ -2041,7 +1992,7 @@ static int request_system_control_ioctl(gpib_board_t *board, unsigned long arg)
rsc_ioctl_t request_control;
int retval;
- retval = copy_from_user(&request_control, (void *)arg, sizeof(request_control));
+ retval = copy_from_user(&request_control, (void __user *)arg, sizeof(request_control));
if (retval)
return -EFAULT;
@@ -2061,7 +2012,7 @@ static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg)
return -EIO;
}
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -2072,13 +2023,13 @@ static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-const struct file_operations ib_fops = {
-owner: THIS_MODULE,
-llseek : NULL,
-unlocked_ioctl : &ibioctl,
-compat_ioctl : &ibioctl,
-open : &ibopen,
-release : &ibclose,
+static const struct file_operations ib_fops = {
+ .owner = THIS_MODULE,
+ .llseek = NULL,
+ .unlocked_ioctl = &ibioctl,
+ .compat_ioctl = &ibioctl,
+ .open = &ibopen,
+ .release = &ibclose,
};
gpib_board_t board_array[GPIB_MAX_NUM_BOARDS];
@@ -2094,18 +2045,19 @@ void init_gpib_descriptor(gpib_descriptor_t *desc)
atomic_set(&desc->io_in_progress, 0);
}
-void gpib_register_driver(gpib_interface_t *interface, struct module *provider_module)
+int gpib_register_driver(gpib_interface_t *interface, struct module *provider_module)
{
struct gpib_interface_list_struct *entry;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
- return;
+ return -ENOMEM;
entry->interface = interface;
entry->module = provider_module;
list_add(&entry->list, &registered_drivers);
- pr_info("gpib: registered %s interface\n", interface->name);
+
+ return 0;
}
EXPORT_SYMBOL(gpib_register_driver);
@@ -2245,7 +2197,7 @@ static int __init gpib_common_init_module(void)
return PTR_ERR(gpib_class);
}
for (i = 0; i < GPIB_MAX_NUM_BOARDS; ++i)
- board_array[i].gpib_dev = device_create(gpib_class, 0,
+ board_array[i].gpib_dev = device_create(gpib_class, NULL,
MKDEV(GPIB_CODE, i), NULL, "gpib%i", i);
return 0;
diff --git a/drivers/staging/gpib/common/iblib.c b/drivers/staging/gpib/common/iblib.c
index db1911cc1b26..5f6fa135f505 100644
--- a/drivers/staging/gpib/common/iblib.c
+++ b/drivers/staging/gpib/common/iblib.c
@@ -695,7 +695,7 @@ int ibwait(gpib_board_t *board, int wait_mask, int clear_mask, int set_mask,
/* make sure we only clear status bits that we are reporting */
if (*status & clear_mask || set_mask)
- general_ibstatus(board, status_queue, *status & clear_mask, set_mask, 0);
+ general_ibstatus(board, status_queue, *status & clear_mask, set_mask, NULL);
return 0;
}
diff --git a/drivers/staging/gpib/eastwood/Makefile b/drivers/staging/gpib/eastwood/Makefile
index c74056f959d0..384825195f77 100644
--- a/drivers/staging/gpib/eastwood/Makefile
+++ b/drivers/staging/gpib/eastwood/Makefile
@@ -1,3 +1,3 @@
-obj-m += fluke_gpib.o
+obj-$(CONFIG_GPIB_FLUKE) += fluke_gpib.o
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.c b/drivers/staging/gpib/eastwood/fluke_gpib.c
index 3f938ab0c84d..0304c5de4ccd 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.c
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.c
@@ -720,31 +720,31 @@ static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
return retval;
}
-gpib_interface_t fluke_unaccel_interface = {
-name: "fluke_unaccel",
-attach : fluke_attach_holdoff_all,
-detach : fluke_detach,
-read : fluke_read,
-write : fluke_write,
-command : fluke_command,
-take_control : fluke_take_control,
-go_to_standby : fluke_go_to_standby,
-request_system_control : fluke_request_system_control,
-interface_clear : fluke_interface_clear,
-remote_enable : fluke_remote_enable,
-enable_eos : fluke_enable_eos,
-disable_eos : fluke_disable_eos,
-parallel_poll : fluke_parallel_poll,
-parallel_poll_configure : fluke_parallel_poll_configure,
-parallel_poll_response : fluke_parallel_poll_response,
-line_status : fluke_line_status,
-update_status : fluke_update_status,
-primary_address : fluke_primary_address,
-secondary_address : fluke_secondary_address,
-serial_poll_response : fluke_serial_poll_response,
-serial_poll_status : fluke_serial_poll_status,
-t1_delay : fluke_t1_delay,
-return_to_local : fluke_return_to_local,
+static gpib_interface_t fluke_unaccel_interface = {
+ .name = "fluke_unaccel",
+ .attach = fluke_attach_holdoff_all,
+ .detach = fluke_detach,
+ .read = fluke_read,
+ .write = fluke_write,
+ .command = fluke_command,
+ .take_control = fluke_take_control,
+ .go_to_standby = fluke_go_to_standby,
+ .request_system_control = fluke_request_system_control,
+ .interface_clear = fluke_interface_clear,
+ .remote_enable = fluke_remote_enable,
+ .enable_eos = fluke_enable_eos,
+ .disable_eos = fluke_disable_eos,
+ .parallel_poll = fluke_parallel_poll,
+ .parallel_poll_configure = fluke_parallel_poll_configure,
+ .parallel_poll_response = fluke_parallel_poll_response,
+ .line_status = fluke_line_status,
+ .update_status = fluke_update_status,
+ .primary_address = fluke_primary_address,
+ .secondary_address = fluke_secondary_address,
+ .serial_poll_response = fluke_serial_poll_response,
+ .serial_poll_status = fluke_serial_poll_status,
+ .t1_delay = fluke_t1_delay,
+ .return_to_local = fluke_return_to_local,
};
/* fluke_hybrid uses dma for writes but not for reads. Added
@@ -755,58 +755,58 @@ return_to_local : fluke_return_to_local,
* register just as the dma controller is also doing a read.
*/
-gpib_interface_t fluke_hybrid_interface = {
-name: "fluke_hybrid",
-attach : fluke_attach_holdoff_all,
-detach : fluke_detach,
-read : fluke_read,
-write : fluke_accel_write,
-command : fluke_command,
-take_control : fluke_take_control,
-go_to_standby : fluke_go_to_standby,
-request_system_control : fluke_request_system_control,
-interface_clear : fluke_interface_clear,
-remote_enable : fluke_remote_enable,
-enable_eos : fluke_enable_eos,
-disable_eos : fluke_disable_eos,
-parallel_poll : fluke_parallel_poll,
-parallel_poll_configure : fluke_parallel_poll_configure,
-parallel_poll_response : fluke_parallel_poll_response,
-line_status : fluke_line_status,
-update_status : fluke_update_status,
-primary_address : fluke_primary_address,
-secondary_address : fluke_secondary_address,
-serial_poll_response : fluke_serial_poll_response,
-serial_poll_status : fluke_serial_poll_status,
-t1_delay : fluke_t1_delay,
-return_to_local : fluke_return_to_local,
+static gpib_interface_t fluke_hybrid_interface = {
+ .name = "fluke_hybrid",
+ .attach = fluke_attach_holdoff_all,
+ .detach = fluke_detach,
+ .read = fluke_read,
+ .write = fluke_accel_write,
+ .command = fluke_command,
+ .take_control = fluke_take_control,
+ .go_to_standby = fluke_go_to_standby,
+ .request_system_control = fluke_request_system_control,
+ .interface_clear = fluke_interface_clear,
+ .remote_enable = fluke_remote_enable,
+ .enable_eos = fluke_enable_eos,
+ .disable_eos = fluke_disable_eos,
+ .parallel_poll = fluke_parallel_poll,
+ .parallel_poll_configure = fluke_parallel_poll_configure,
+ .parallel_poll_response = fluke_parallel_poll_response,
+ .line_status = fluke_line_status,
+ .update_status = fluke_update_status,
+ .primary_address = fluke_primary_address,
+ .secondary_address = fluke_secondary_address,
+ .serial_poll_response = fluke_serial_poll_response,
+ .serial_poll_status = fluke_serial_poll_status,
+ .t1_delay = fluke_t1_delay,
+ .return_to_local = fluke_return_to_local,
};
-gpib_interface_t fluke_interface = {
-name: "fluke",
-attach : fluke_attach_holdoff_end,
-detach : fluke_detach,
-read : fluke_accel_read,
-write : fluke_accel_write,
-command : fluke_command,
-take_control : fluke_take_control,
-go_to_standby : fluke_go_to_standby,
-request_system_control : fluke_request_system_control,
-interface_clear : fluke_interface_clear,
-remote_enable : fluke_remote_enable,
-enable_eos : fluke_enable_eos,
-disable_eos : fluke_disable_eos,
-parallel_poll : fluke_parallel_poll,
-parallel_poll_configure : fluke_parallel_poll_configure,
-parallel_poll_response : fluke_parallel_poll_response,
-line_status : fluke_line_status,
-update_status : fluke_update_status,
-primary_address : fluke_primary_address,
-secondary_address : fluke_secondary_address,
-serial_poll_response : fluke_serial_poll_response,
-serial_poll_status : fluke_serial_poll_status,
-t1_delay : fluke_t1_delay,
-return_to_local : fluke_return_to_local,
+static gpib_interface_t fluke_interface = {
+ .name = "fluke",
+ .attach = fluke_attach_holdoff_end,
+ .detach = fluke_detach,
+ .read = fluke_accel_read,
+ .write = fluke_accel_write,
+ .command = fluke_command,
+ .take_control = fluke_take_control,
+ .go_to_standby = fluke_go_to_standby,
+ .request_system_control = fluke_request_system_control,
+ .interface_clear = fluke_interface_clear,
+ .remote_enable = fluke_remote_enable,
+ .enable_eos = fluke_enable_eos,
+ .disable_eos = fluke_disable_eos,
+ .parallel_poll = fluke_parallel_poll,
+ .parallel_poll_configure = fluke_parallel_poll_configure,
+ .parallel_poll_response = fluke_parallel_poll_response,
+ .line_status = fluke_line_status,
+ .update_status = fluke_update_status,
+ .primary_address = fluke_primary_address,
+ .secondary_address = fluke_secondary_address,
+ .serial_poll_response = fluke_serial_poll_response,
+ .serial_poll_status = fluke_serial_poll_status,
+ .t1_delay = fluke_t1_delay,
+ .return_to_local = fluke_return_to_local,
};
irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board)
@@ -1011,12 +1011,12 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
}
e_priv->gpib_iomem_res = res;
- nec_priv->iobase = ioremap(e_priv->gpib_iomem_res->start,
+ nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
resource_size(e_priv->gpib_iomem_res));
- pr_info("gpib: iobase %lx remapped to %p, length=%d\n",
- (unsigned long)e_priv->gpib_iomem_res->start,
- nec_priv->iobase, (int)resource_size(e_priv->gpib_iomem_res));
- if (!nec_priv->iobase) {
+ pr_info("gpib: mmiobase %llx remapped to %p, length=%d\n",
+ (u64)e_priv->gpib_iomem_res->start,
+ nec_priv->mmiobase, (int)resource_size(e_priv->gpib_iomem_res));
+ if (!nec_priv->mmiobase) {
dev_err(&fluke_gpib_pdev->dev, "Could not map I/O memory\n");
return -ENOMEM;
}
@@ -1107,7 +1107,7 @@ void fluke_detach(gpib_board_t *board)
gpib_free_pseudo_irq(board);
nec_priv = &e_priv->nec7210_priv;
- if (nec_priv->iobase) {
+ if (nec_priv->mmiobase) {
fluke_paged_write_byte(e_priv, 0, ISR0_IMR0, ISR0_IMR0_PAGE);
nec7210_board_reset(nec_priv, board);
}
@@ -1155,16 +1155,38 @@ static int __init fluke_init_module(void)
result = platform_driver_register(&fluke_gpib_platform_driver);
if (result) {
- pr_err("fluke_gpib: platform_driver_register failed!\n");
+ pr_err("fluke_gpib: platform_driver_register failed: error = %d\n", result);
return result;
}
- gpib_register_driver(&fluke_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&fluke_hybrid_interface, THIS_MODULE);
- gpib_register_driver(&fluke_interface, THIS_MODULE);
+ result = gpib_register_driver(&fluke_unaccel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_unaccel;
+ }
+
+ result = gpib_register_driver(&fluke_hybrid_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_hybrid;
+ }
+
+ result = gpib_register_driver(&fluke_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_interface;
+ }
- pr_info("fluke_gpib\n");
return 0;
+
+err_interface:
+ gpib_unregister_driver(&fluke_hybrid_interface);
+err_hybrid:
+ gpib_unregister_driver(&fluke_unaccel_interface);
+err_unaccel:
+ platform_driver_unregister(&fluke_gpib_platform_driver);
+
+ return result;
}
static void __exit fluke_exit_module(void)
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.h b/drivers/staging/gpib/eastwood/fluke_gpib.h
index fcbd42f8f9af..3e4348196b42 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.h
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.h
@@ -21,7 +21,7 @@ struct fluke_priv {
struct dma_chan *dma_channel;
u8 *dma_buffer;
int dma_buffer_size;
- void *write_transfer_counter;
+ void __iomem *write_transfer_counter;
};
// cb7210 specific registers and bits
@@ -72,7 +72,7 @@ static inline uint8_t fluke_read_byte_nolock(struct nec7210_priv *nec_priv,
{
u8 retval;
- retval = readl(nec_priv->iobase + register_num * nec_priv->offset);
+ retval = readl(nec_priv->mmiobase + register_num * nec_priv->offset);
return retval;
}
@@ -80,7 +80,7 @@ static inline uint8_t fluke_read_byte_nolock(struct nec7210_priv *nec_priv,
static inline void fluke_write_byte_nolock(struct nec7210_priv *nec_priv, uint8_t data,
int register_num)
{
- writel(data, nec_priv->iobase + register_num * nec_priv->offset);
+ writel(data, nec_priv->mmiobase + register_num * nec_priv->offset);
}
static inline uint8_t fluke_paged_read_byte(struct fluke_priv *e_priv,
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index 62791db1c34a..f950e7cdd8f8 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GPIB Driver for fmh_gpib_core");
+MODULE_AUTHOR("Frank Mori Hess <fmh6jj@gmail.com>");
static irqreturn_t fmh_gpib_interrupt(int irq, void *arg);
static int fmh_gpib_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config);
@@ -1038,116 +1040,116 @@ static int fmh_gpib_fifo_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
return retval;
}
-gpib_interface_t fmh_gpib_unaccel_interface = {
-name: "fmh_gpib_unaccel",
-attach : fmh_gpib_attach_holdoff_all,
-detach : fmh_gpib_detach,
-read : fmh_gpib_read,
-write : fmh_gpib_write,
-command : fmh_gpib_command,
-take_control : fmh_gpib_take_control,
-go_to_standby : fmh_gpib_go_to_standby,
-request_system_control : fmh_gpib_request_system_control,
-interface_clear : fmh_gpib_interface_clear,
-remote_enable : fmh_gpib_remote_enable,
-enable_eos : fmh_gpib_enable_eos,
-disable_eos : fmh_gpib_disable_eos,
-parallel_poll : fmh_gpib_parallel_poll,
-parallel_poll_configure : fmh_gpib_parallel_poll_configure,
-parallel_poll_response : fmh_gpib_parallel_poll_response,
-local_parallel_poll_mode : fmh_gpib_local_parallel_poll_mode,
-line_status : fmh_gpib_line_status,
-update_status : fmh_gpib_update_status,
-primary_address : fmh_gpib_primary_address,
-secondary_address : fmh_gpib_secondary_address,
-serial_poll_response2 : fmh_gpib_serial_poll_response2,
-serial_poll_status : fmh_gpib_serial_poll_status,
-t1_delay : fmh_gpib_t1_delay,
-return_to_local : fmh_gpib_return_to_local,
+static gpib_interface_t fmh_gpib_unaccel_interface = {
+ .name = "fmh_gpib_unaccel",
+ .attach = fmh_gpib_attach_holdoff_all,
+ .detach = fmh_gpib_detach,
+ .read = fmh_gpib_read,
+ .write = fmh_gpib_write,
+ .command = fmh_gpib_command,
+ .take_control = fmh_gpib_take_control,
+ .go_to_standby = fmh_gpib_go_to_standby,
+ .request_system_control = fmh_gpib_request_system_control,
+ .interface_clear = fmh_gpib_interface_clear,
+ .remote_enable = fmh_gpib_remote_enable,
+ .enable_eos = fmh_gpib_enable_eos,
+ .disable_eos = fmh_gpib_disable_eos,
+ .parallel_poll = fmh_gpib_parallel_poll,
+ .parallel_poll_configure = fmh_gpib_parallel_poll_configure,
+ .parallel_poll_response = fmh_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = fmh_gpib_local_parallel_poll_mode,
+ .line_status = fmh_gpib_line_status,
+ .update_status = fmh_gpib_update_status,
+ .primary_address = fmh_gpib_primary_address,
+ .secondary_address = fmh_gpib_secondary_address,
+ .serial_poll_response2 = fmh_gpib_serial_poll_response2,
+ .serial_poll_status = fmh_gpib_serial_poll_status,
+ .t1_delay = fmh_gpib_t1_delay,
+ .return_to_local = fmh_gpib_return_to_local,
};
-gpib_interface_t fmh_gpib_interface = {
-name: "fmh_gpib",
-attach : fmh_gpib_attach_holdoff_end,
-detach : fmh_gpib_detach,
-read : fmh_gpib_accel_read,
-write : fmh_gpib_accel_write,
-command : fmh_gpib_command,
-take_control : fmh_gpib_take_control,
-go_to_standby : fmh_gpib_go_to_standby,
-request_system_control : fmh_gpib_request_system_control,
-interface_clear : fmh_gpib_interface_clear,
-remote_enable : fmh_gpib_remote_enable,
-enable_eos : fmh_gpib_enable_eos,
-disable_eos : fmh_gpib_disable_eos,
-parallel_poll : fmh_gpib_parallel_poll,
-parallel_poll_configure : fmh_gpib_parallel_poll_configure,
-parallel_poll_response : fmh_gpib_parallel_poll_response,
-local_parallel_poll_mode : fmh_gpib_local_parallel_poll_mode,
-line_status : fmh_gpib_line_status,
-update_status : fmh_gpib_update_status,
-primary_address : fmh_gpib_primary_address,
-secondary_address : fmh_gpib_secondary_address,
-serial_poll_response2 : fmh_gpib_serial_poll_response2,
-serial_poll_status : fmh_gpib_serial_poll_status,
-t1_delay : fmh_gpib_t1_delay,
-return_to_local : fmh_gpib_return_to_local,
+static gpib_interface_t fmh_gpib_interface = {
+ .name = "fmh_gpib",
+ .attach = fmh_gpib_attach_holdoff_end,
+ .detach = fmh_gpib_detach,
+ .read = fmh_gpib_accel_read,
+ .write = fmh_gpib_accel_write,
+ .command = fmh_gpib_command,
+ .take_control = fmh_gpib_take_control,
+ .go_to_standby = fmh_gpib_go_to_standby,
+ .request_system_control = fmh_gpib_request_system_control,
+ .interface_clear = fmh_gpib_interface_clear,
+ .remote_enable = fmh_gpib_remote_enable,
+ .enable_eos = fmh_gpib_enable_eos,
+ .disable_eos = fmh_gpib_disable_eos,
+ .parallel_poll = fmh_gpib_parallel_poll,
+ .parallel_poll_configure = fmh_gpib_parallel_poll_configure,
+ .parallel_poll_response = fmh_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = fmh_gpib_local_parallel_poll_mode,
+ .line_status = fmh_gpib_line_status,
+ .update_status = fmh_gpib_update_status,
+ .primary_address = fmh_gpib_primary_address,
+ .secondary_address = fmh_gpib_secondary_address,
+ .serial_poll_response2 = fmh_gpib_serial_poll_response2,
+ .serial_poll_status = fmh_gpib_serial_poll_status,
+ .t1_delay = fmh_gpib_t1_delay,
+ .return_to_local = fmh_gpib_return_to_local,
};
-gpib_interface_t fmh_gpib_pci_interface = {
-name: "fmh_gpib_pci",
-attach : fmh_gpib_pci_attach_holdoff_end,
-detach : fmh_gpib_pci_detach,
-read : fmh_gpib_fifo_read,
-write : fmh_gpib_fifo_write,
-command : fmh_gpib_command,
-take_control : fmh_gpib_take_control,
-go_to_standby : fmh_gpib_go_to_standby,
-request_system_control : fmh_gpib_request_system_control,
-interface_clear : fmh_gpib_interface_clear,
-remote_enable : fmh_gpib_remote_enable,
-enable_eos : fmh_gpib_enable_eos,
-disable_eos : fmh_gpib_disable_eos,
-parallel_poll : fmh_gpib_parallel_poll,
-parallel_poll_configure : fmh_gpib_parallel_poll_configure,
-parallel_poll_response : fmh_gpib_parallel_poll_response,
-local_parallel_poll_mode : fmh_gpib_local_parallel_poll_mode,
-line_status : fmh_gpib_line_status,
-update_status : fmh_gpib_update_status,
-primary_address : fmh_gpib_primary_address,
-secondary_address : fmh_gpib_secondary_address,
-serial_poll_response2 : fmh_gpib_serial_poll_response2,
-serial_poll_status : fmh_gpib_serial_poll_status,
-t1_delay : fmh_gpib_t1_delay,
-return_to_local : fmh_gpib_return_to_local,
+static gpib_interface_t fmh_gpib_pci_interface = {
+ .name = "fmh_gpib_pci",
+ .attach = fmh_gpib_pci_attach_holdoff_end,
+ .detach = fmh_gpib_pci_detach,
+ .read = fmh_gpib_fifo_read,
+ .write = fmh_gpib_fifo_write,
+ .command = fmh_gpib_command,
+ .take_control = fmh_gpib_take_control,
+ .go_to_standby = fmh_gpib_go_to_standby,
+ .request_system_control = fmh_gpib_request_system_control,
+ .interface_clear = fmh_gpib_interface_clear,
+ .remote_enable = fmh_gpib_remote_enable,
+ .enable_eos = fmh_gpib_enable_eos,
+ .disable_eos = fmh_gpib_disable_eos,
+ .parallel_poll = fmh_gpib_parallel_poll,
+ .parallel_poll_configure = fmh_gpib_parallel_poll_configure,
+ .parallel_poll_response = fmh_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = fmh_gpib_local_parallel_poll_mode,
+ .line_status = fmh_gpib_line_status,
+ .update_status = fmh_gpib_update_status,
+ .primary_address = fmh_gpib_primary_address,
+ .secondary_address = fmh_gpib_secondary_address,
+ .serial_poll_response2 = fmh_gpib_serial_poll_response2,
+ .serial_poll_status = fmh_gpib_serial_poll_status,
+ .t1_delay = fmh_gpib_t1_delay,
+ .return_to_local = fmh_gpib_return_to_local,
};
-gpib_interface_t fmh_gpib_pci_unaccel_interface = {
-name: "fmh_gpib_pci_unaccel",
-attach : fmh_gpib_pci_attach_holdoff_all,
-detach : fmh_gpib_pci_detach,
-read : fmh_gpib_read,
-write : fmh_gpib_write,
-command : fmh_gpib_command,
-take_control : fmh_gpib_take_control,
-go_to_standby : fmh_gpib_go_to_standby,
-request_system_control : fmh_gpib_request_system_control,
-interface_clear : fmh_gpib_interface_clear,
-remote_enable : fmh_gpib_remote_enable,
-enable_eos : fmh_gpib_enable_eos,
-disable_eos : fmh_gpib_disable_eos,
-parallel_poll : fmh_gpib_parallel_poll,
-parallel_poll_configure : fmh_gpib_parallel_poll_configure,
-parallel_poll_response : fmh_gpib_parallel_poll_response,
-local_parallel_poll_mode : fmh_gpib_local_parallel_poll_mode,
-line_status : fmh_gpib_line_status,
-update_status : fmh_gpib_update_status,
-primary_address : fmh_gpib_primary_address,
-secondary_address : fmh_gpib_secondary_address,
-serial_poll_response2 : fmh_gpib_serial_poll_response2,
-serial_poll_status : fmh_gpib_serial_poll_status,
-t1_delay : fmh_gpib_t1_delay,
-return_to_local : fmh_gpib_return_to_local,
+static gpib_interface_t fmh_gpib_pci_unaccel_interface = {
+ .name = "fmh_gpib_pci_unaccel",
+ .attach = fmh_gpib_pci_attach_holdoff_all,
+ .detach = fmh_gpib_pci_detach,
+ .read = fmh_gpib_read,
+ .write = fmh_gpib_write,
+ .command = fmh_gpib_command,
+ .take_control = fmh_gpib_take_control,
+ .go_to_standby = fmh_gpib_go_to_standby,
+ .request_system_control = fmh_gpib_request_system_control,
+ .interface_clear = fmh_gpib_interface_clear,
+ .remote_enable = fmh_gpib_remote_enable,
+ .enable_eos = fmh_gpib_enable_eos,
+ .disable_eos = fmh_gpib_disable_eos,
+ .parallel_poll = fmh_gpib_parallel_poll,
+ .parallel_poll_configure = fmh_gpib_parallel_poll_configure,
+ .parallel_poll_response = fmh_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = fmh_gpib_local_parallel_poll_mode,
+ .line_status = fmh_gpib_line_status,
+ .update_status = fmh_gpib_update_status,
+ .primary_address = fmh_gpib_primary_address,
+ .secondary_address = fmh_gpib_secondary_address,
+ .serial_poll_response2 = fmh_gpib_serial_poll_response2,
+ .serial_poll_status = fmh_gpib_serial_poll_status,
+ .t1_delay = fmh_gpib_t1_delay,
+ .return_to_local = fmh_gpib_return_to_local,
};
irqreturn_t fmh_gpib_internal_interrupt(gpib_board_t *board)
@@ -1419,15 +1421,14 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
}
e_priv->gpib_iomem_res = res;
- nec_priv->iobase = ioremap(e_priv->gpib_iomem_res->start,
+ nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
resource_size(e_priv->gpib_iomem_res));
- if (!nec_priv->iobase) {
+ if (!nec_priv->mmiobase) {
dev_err(board->dev, "Could not map I/O memory for gpib\n");
return -ENOMEM;
}
- dev_info(board->dev, "iobase 0x%lx remapped to %p, length=%ld\n",
- (unsigned long)e_priv->gpib_iomem_res->start,
- nec_priv->iobase, (unsigned long)resource_size(e_priv->gpib_iomem_res));
+ dev_info(board->dev, "iobase %pr remapped to %p\n",
+ e_priv->gpib_iomem_res, nec_priv->mmiobase);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma_fifos");
if (!res) {
@@ -1507,14 +1508,14 @@ void fmh_gpib_detach(gpib_board_t *board)
free_irq(e_priv->irq, board);
if (e_priv->fifo_base)
fifos_write(e_priv, 0, FIFO_CONTROL_STATUS_REG);
- if (nec_priv->iobase) {
+ if (nec_priv->mmiobase) {
write_byte(nec_priv, 0, ISR0_IMR0_REG);
nec7210_board_reset(nec_priv, board);
}
if (e_priv->fifo_base)
iounmap(e_priv->fifo_base);
- if (nec_priv->iobase)
- iounmap(nec_priv->iobase);
+ if (nec_priv->mmiobase)
+ iounmap(nec_priv->mmiobase);
if (e_priv->dma_port_res) {
release_mem_region(e_priv->dma_port_res->start,
resource_size(e_priv->dma_port_res));
@@ -1564,12 +1565,12 @@ static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config
e_priv->gpib_iomem_res = &pci_device->resource[gpib_control_status_pci_resource_index];
e_priv->dma_port_res = &pci_device->resource[gpib_fifo_pci_resource_index];
- nec_priv->iobase = ioremap(pci_resource_start(pci_device,
+ nec_priv->mmiobase = ioremap(pci_resource_start(pci_device,
gpib_control_status_pci_resource_index),
pci_resource_len(pci_device,
gpib_control_status_pci_resource_index));
dev_info(board->dev, "base address for gpib control/status registers remapped to 0x%p\n",
- nec_priv->iobase);
+ nec_priv->mmiobase);
if (e_priv->dma_port_res->flags & IORESOURCE_MEM) {
e_priv->fifo_base = ioremap(pci_resource_start(pci_device,
@@ -1632,14 +1633,14 @@ void fmh_gpib_pci_detach(gpib_board_t *board)
free_irq(e_priv->irq, board);
if (e_priv->fifo_base)
fifos_write(e_priv, 0, FIFO_CONTROL_STATUS_REG);
- if (nec_priv->iobase) {
+ if (nec_priv->mmiobase) {
write_byte(nec_priv, 0, ISR0_IMR0_REG);
nec7210_board_reset(nec_priv, board);
}
if (e_priv->fifo_base)
iounmap(e_priv->fifo_base);
- if (nec_priv->iobase)
- iounmap(nec_priv->iobase);
+ if (nec_priv->mmiobase)
+ iounmap(nec_priv->mmiobase);
if (e_priv->dma_port_res || e_priv->gpib_iomem_res)
pci_release_regions(to_pci_dev(board->dev));
if (board->dev)
@@ -1691,23 +1692,54 @@ static int __init fmh_gpib_init_module(void)
result = platform_driver_register(&fmh_gpib_platform_driver);
if (result) {
- pr_err("fmh_gpib: platform_driver_register failed!\n");
+ pr_err("fmh_gpib: platform_driver_register failed: error = %d\n", result);
return result;
}
result = pci_register_driver(&fmh_gpib_pci_driver);
if (result) {
- pr_err("fmh_gpib: pci_driver_register failed!\n");
- return result;
+ pr_err("fmh_gpib: pci_register_driver failed: error = %d\n", result);
+ goto err_pci_driver;
+ }
+
+ result = gpib_register_driver(&fmh_gpib_unaccel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_unaccel;
+ }
+
+ result = gpib_register_driver(&fmh_gpib_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_interface;
+ }
+
+ result = gpib_register_driver(&fmh_gpib_pci_unaccel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pci_unaccel;
}
- gpib_register_driver(&fmh_gpib_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&fmh_gpib_interface, THIS_MODULE);
- gpib_register_driver(&fmh_gpib_pci_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&fmh_gpib_pci_interface, THIS_MODULE);
+ result = gpib_register_driver(&fmh_gpib_pci_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pci;
+ }
- pr_info("fmh_gpib\n");
return 0;
+
+err_pci:
+ gpib_unregister_driver(&fmh_gpib_pci_unaccel_interface);
+err_pci_unaccel:
+ gpib_unregister_driver(&fmh_gpib_interface);
+err_interface:
+ gpib_unregister_driver(&fmh_gpib_unaccel_interface);
+err_unaccel:
+ pci_unregister_driver(&fmh_gpib_pci_driver);
+err_pci_driver:
+ platform_driver_unregister(&fmh_gpib_platform_driver);
+
+ return result;
}
static void __exit fmh_gpib_exit_module(void)
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
index 43bfc89d2a6f..de6fd2164414 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
@@ -33,7 +33,7 @@ struct fmh_priv {
u8 *dma_buffer;
int dma_buffer_size;
int dma_burst_length;
- void *fifo_base;
+ void __iomem *fifo_base;
unsigned supports_fifo_interrupts : 1;
};
@@ -127,13 +127,13 @@ static const unsigned int fifo_max_burst_length_mask = 0x00ff;
static inline uint8_t gpib_cs_read_byte(struct nec7210_priv *nec_priv,
unsigned int register_num)
{
- return readb(nec_priv->iobase + register_num * nec_priv->offset);
+ return readb(nec_priv->mmiobase + register_num * nec_priv->offset);
}
static inline void gpib_cs_write_byte(struct nec7210_priv *nec_priv, uint8_t data,
unsigned int register_num)
{
- writeb(data, nec_priv->iobase + register_num * nec_priv->offset);
+ writeb(data, nec_priv->mmiobase + register_num * nec_priv->offset);
}
static inline uint16_t fifos_read(struct fmh_priv *fmh_priv, int register_num)
diff --git a/drivers/staging/gpib/gpio/Makefile b/drivers/staging/gpib/gpio/Makefile
index a31ded6e5924..00ea52abdda7 100644
--- a/drivers/staging/gpib/gpio/Makefile
+++ b/drivers/staging/gpib/gpio/Makefile
@@ -1,4 +1,4 @@
-obj-m += gpib_bitbang.o
+obj-$(CONFIG_GPIB_GPIO) += gpib_bitbang.o
diff --git a/drivers/staging/gpib/gpio/gpib_bitbang.c b/drivers/staging/gpib/gpio/gpib_bitbang.c
index a2d562cbd65b..828c99ea613f 100644
--- a/drivers/staging/gpib/gpio/gpib_bitbang.c
+++ b/drivers/staging/gpib/gpio/gpib_bitbang.c
@@ -147,7 +147,7 @@ DEFINE_LED_TRIGGER(ledtrig_gpib);
led_trigger_event(ledtrig_gpib, LED_OFF); } \
while (0)
-struct gpio_desc *all_descriptors[GPIB_PINS + SN7516X_PINS];
+static struct gpio_desc *all_descriptors[GPIB_PINS + SN7516X_PINS];
#define D01 all_descriptors[0]
#define D02 all_descriptors[1]
@@ -175,7 +175,7 @@ struct gpio_desc *all_descriptors[GPIB_PINS + SN7516X_PINS];
/* YOGA dapter uses a global enable for the buffer chips, re-using the TE pin */
#define YOGA_ENABLE TE
-int gpios_vector[] = {
+static int gpios_vector[] = {
D01_pin_nr,
D02_pin_nr,
D03_pin_nr,
@@ -265,7 +265,7 @@ static struct gpiod_lookup_table gpib_gpio_table_0 = {
static struct gpiod_lookup_table *lookup_tables[] = {
&gpib_gpio_table_0,
&gpib_gpio_table_1,
- 0
+ NULL
};
/* struct which defines private_data for gpio driver */
@@ -315,7 +315,7 @@ struct bb_priv {
enum listener_function_state listener_state;
};
-inline long usec_diff(struct timespec64 *a, struct timespec64 *b);
+static inline long usec_diff(struct timespec64 *a, struct timespec64 *b);
static void bb_buffer_print(unsigned char *buffer, size_t length, int cmd, int eoi);
static void set_data_lines(u8 byte);
static u8 get_data_lines(void);
@@ -1119,7 +1119,7 @@ static void release_gpios(void)
for (j = 0 ; j < NUM_PINS ; j++) {
if (all_descriptors[j]) {
gpiod_put(all_descriptors[j]);
- all_descriptors[j] = 0;
+ all_descriptors[j] = NULL;
}
}
}
@@ -1312,36 +1312,41 @@ bb_attach_out:
return retval;
}
-gpib_interface_t bb_interface = {
-name: NAME,
-attach : bb_attach,
-detach : bb_detach,
-read : bb_read,
-write : bb_write,
-command : bb_command,
-take_control : bb_take_control,
-go_to_standby : bb_go_to_standby,
-request_system_control : bb_request_system_control,
-interface_clear : bb_interface_clear,
-remote_enable : bb_remote_enable,
-enable_eos : bb_enable_eos,
-disable_eos : bb_disable_eos,
-parallel_poll : bb_parallel_poll,
-parallel_poll_configure : bb_parallel_poll_configure,
-parallel_poll_response : bb_parallel_poll_response,
-line_status : bb_line_status,
-update_status : bb_update_status,
-primary_address : bb_primary_address,
-secondary_address : bb_secondary_address,
-serial_poll_response : bb_serial_poll_response,
-serial_poll_status : bb_serial_poll_status,
-t1_delay : bb_t1_delay,
-return_to_local : bb_return_to_local,
+static gpib_interface_t bb_interface = {
+ .name = NAME,
+ .attach = bb_attach,
+ .detach = bb_detach,
+ .read = bb_read,
+ .write = bb_write,
+ .command = bb_command,
+ .take_control = bb_take_control,
+ .go_to_standby = bb_go_to_standby,
+ .request_system_control = bb_request_system_control,
+ .interface_clear = bb_interface_clear,
+ .remote_enable = bb_remote_enable,
+ .enable_eos = bb_enable_eos,
+ .disable_eos = bb_disable_eos,
+ .parallel_poll = bb_parallel_poll,
+ .parallel_poll_configure = bb_parallel_poll_configure,
+ .parallel_poll_response = bb_parallel_poll_response,
+ .line_status = bb_line_status,
+ .update_status = bb_update_status,
+ .primary_address = bb_primary_address,
+ .secondary_address = bb_secondary_address,
+ .serial_poll_response = bb_serial_poll_response,
+ .serial_poll_status = bb_serial_poll_status,
+ .t1_delay = bb_t1_delay,
+ .return_to_local = bb_return_to_local,
};
static int __init bb_init_module(void)
{
- gpib_register_driver(&bb_interface, THIS_MODULE);
+ int result = gpib_register_driver(&bb_interface, THIS_MODULE);
+
+ if (result) {
+ pr_err("gpib_bitbang: gpib_register_driver failed: error = %d\n", result);
+ return result;
+ }
dbg_printk(0, "module loaded with pin map \"%s\"%s\n",
pin_map, (sn7516x_used) ? " and SN7516x driver support" : "");
diff --git a/drivers/staging/gpib/hp_82335/Makefile b/drivers/staging/gpib/hp_82335/Makefile
index 8b7a552e9355..305ce44ee48a 100644
--- a/drivers/staging/gpib/hp_82335/Makefile
+++ b/drivers/staging/gpib/hp_82335/Makefile
@@ -1,4 +1,4 @@
-obj-m += hp82335.o
+obj-$(CONFIG_GPIB_HP82335) += hp82335.o
diff --git a/drivers/staging/gpib/hp_82335/hp82335.c b/drivers/staging/gpib/hp_82335/hp82335.c
index 40afe42aea47..700d1ba029d2 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.c
+++ b/drivers/staging/gpib/hp_82335/hp82335.c
@@ -9,6 +9,7 @@
*/
#include "hp82335.h"
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -172,32 +173,32 @@ void hp82335_return_to_local(gpib_board_t *board)
tms9914_return_to_local(board, &priv->tms9914_priv);
}
-gpib_interface_t hp82335_interface = {
-name: "hp82335",
-attach : hp82335_attach,
-detach : hp82335_detach,
-read : hp82335_read,
-write : hp82335_write,
-command : hp82335_command,
-request_system_control : hp82335_request_system_control,
-take_control : hp82335_take_control,
-go_to_standby : hp82335_go_to_standby,
-interface_clear : hp82335_interface_clear,
-remote_enable : hp82335_remote_enable,
-enable_eos : hp82335_enable_eos,
-disable_eos : hp82335_disable_eos,
-parallel_poll : hp82335_parallel_poll,
-parallel_poll_configure : hp82335_parallel_poll_configure,
-parallel_poll_response : hp82335_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : hp82335_line_status,
-update_status : hp82335_update_status,
-primary_address : hp82335_primary_address,
-secondary_address : hp82335_secondary_address,
-serial_poll_response : hp82335_serial_poll_response,
-serial_poll_status : hp82335_serial_poll_status,
-t1_delay : hp82335_t1_delay,
-return_to_local : hp82335_return_to_local,
+static gpib_interface_t hp82335_interface = {
+ .name = "hp82335",
+ .attach = hp82335_attach,
+ .detach = hp82335_detach,
+ .read = hp82335_read,
+ .write = hp82335_write,
+ .command = hp82335_command,
+ .request_system_control = hp82335_request_system_control,
+ .take_control = hp82335_take_control,
+ .go_to_standby = hp82335_go_to_standby,
+ .interface_clear = hp82335_interface_clear,
+ .remote_enable = hp82335_remote_enable,
+ .enable_eos = hp82335_enable_eos,
+ .disable_eos = hp82335_disable_eos,
+ .parallel_poll = hp82335_parallel_poll,
+ .parallel_poll_configure = hp82335_parallel_poll_configure,
+ .parallel_poll_response = hp82335_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = hp82335_line_status,
+ .update_status = hp82335_update_status,
+ .primary_address = hp82335_primary_address,
+ .secondary_address = hp82335_secondary_address,
+ .serial_poll_response = hp82335_serial_poll_response,
+ .serial_poll_status = hp82335_serial_poll_status,
+ .t1_delay = hp82335_t1_delay,
+ .return_to_local = hp82335_return_to_local,
};
int hp82335_allocate_private(gpib_board_t *board)
@@ -233,7 +234,7 @@ static void hp82335_clear_interrupt(struct hp82335_priv *hp_priv)
{
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
- writeb(0, tms_priv->iobase + HPREG_INTR_CLEAR);
+ writeb(0, tms_priv->mmiobase + HPREG_INTR_CLEAR);
}
int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
@@ -241,7 +242,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
struct hp82335_priv *hp_priv;
struct tms9914_priv *tms_priv;
int retval;
- const unsigned long upper_iomem_base = (unsigned long)config->ibbase + hp82335_rom_size;
+ const unsigned long upper_iomem_base = config->ibbase + hp82335_rom_size;
board->status = 0;
@@ -253,7 +254,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
tms_priv->write_byte = hp82335_write_byte;
tms_priv->offset = 1;
- switch ((unsigned long)(config->ibbase)) {
+ switch (config->ibbase) {
case 0xc4000:
case 0xc8000:
case 0xcc000:
@@ -271,7 +272,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
case 0xfc000:
break;
default:
- pr_err("hp82335: invalid base io address 0x%p\n", config->ibbase);
+ pr_err("hp82335: invalid base io address 0x%u\n", config->ibbase);
return -EINVAL;
}
if (!request_mem_region(upper_iomem_base, hp82335_upper_iomem_size, "hp82335")) {
@@ -280,9 +281,9 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
return -EBUSY;
}
hp_priv->raw_iobase = upper_iomem_base;
- tms_priv->iobase = ioremap(upper_iomem_base, hp82335_upper_iomem_size);
+ tms_priv->mmiobase = ioremap(upper_iomem_base, hp82335_upper_iomem_size);
pr_info("hp82335: upper half of 82335 iomem region 0x%lx remapped to 0x%p\n",
- hp_priv->raw_iobase, tms_priv->iobase);
+ hp_priv->raw_iobase, tms_priv->mmiobase);
retval = request_irq(config->ibirq, hp82335_interrupt, 0, "hp82335", board);
if (retval) {
@@ -296,7 +297,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp82335_clear_interrupt(hp_priv);
- writeb(INTR_ENABLE, tms_priv->iobase + HPREG_CCR);
+ writeb(INTR_ENABLE, tms_priv->mmiobase + HPREG_CCR);
tms9914_online(board, tms_priv);
@@ -312,10 +313,10 @@ void hp82335_detach(gpib_board_t *board)
tms_priv = &hp_priv->tms9914_priv;
if (hp_priv->irq)
free_irq(hp_priv->irq, board);
- if (tms_priv->iobase) {
- writeb(0, tms_priv->iobase + HPREG_CCR);
+ if (tms_priv->mmiobase) {
+ writeb(0, tms_priv->mmiobase + HPREG_CCR);
tms9914_board_reset(tms_priv);
- iounmap((void *)tms_priv->iobase);
+ iounmap(tms_priv->mmiobase);
}
if (hp_priv->raw_iobase)
release_mem_region(hp_priv->raw_iobase, hp82335_upper_iomem_size);
@@ -325,7 +326,13 @@ void hp82335_detach(gpib_board_t *board)
static int __init hp82335_init_module(void)
{
- gpib_register_driver(&hp82335_interface, THIS_MODULE);
+ int result = gpib_register_driver(&hp82335_interface, THIS_MODULE);
+
+ if (result) {
+ pr_err("hp82335: gpib_register_driver failed: error = %d\n", result);
+ return result;
+ }
+
return 0;
}
diff --git a/drivers/staging/gpib/hp_82335/hp82335.h b/drivers/staging/gpib/hp_82335/hp82335.h
index 5e5297af731a..4b185d7c5188 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.h
+++ b/drivers/staging/gpib/hp_82335/hp82335.h
@@ -17,9 +17,6 @@ struct hp82335_priv {
unsigned long raw_iobase;
};
-// interfaces
-extern gpib_interface_t hp82335_interface;
-
// interface functions
int hp82335_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
int hp82335_write(gpib_board_t *board, uint8_t *buffer, size_t length,
diff --git a/drivers/staging/gpib/hp_82341/Makefile b/drivers/staging/gpib/hp_82341/Makefile
index 1fe7db4f8ca4..21367310a17e 100644
--- a/drivers/staging/gpib/hp_82341/Makefile
+++ b/drivers/staging/gpib/hp_82341/Makefile
@@ -1,2 +1,2 @@
-obj-m += hp_82341.o
+obj-$(CONFIG_GPIB_HP82341) += hp_82341.o
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/staging/gpib/hp_82341/hp_82341.c
index 8ad1c885a9fb..0ddae295912f 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.c
+++ b/drivers/staging/gpib/hp_82341/hp_82341.c
@@ -402,59 +402,59 @@ void hp_82341_return_to_local(gpib_board_t *board)
tms9914_return_to_local(board, &priv->tms9914_priv);
}
-gpib_interface_t hp_82341_unaccel_interface = {
-name: "hp_82341_unaccel",
-attach : hp_82341_attach,
-detach : hp_82341_detach,
-read : hp_82341_read,
-write : hp_82341_write,
-command : hp_82341_command,
-request_system_control : hp_82341_request_system_control,
-take_control : hp_82341_take_control,
-go_to_standby : hp_82341_go_to_standby,
-interface_clear : hp_82341_interface_clear,
-remote_enable : hp_82341_remote_enable,
-enable_eos : hp_82341_enable_eos,
-disable_eos : hp_82341_disable_eos,
-parallel_poll : hp_82341_parallel_poll,
-parallel_poll_configure : hp_82341_parallel_poll_configure,
-parallel_poll_response : hp_82341_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : hp_82341_line_status,
-update_status : hp_82341_update_status,
-primary_address : hp_82341_primary_address,
-secondary_address : hp_82341_secondary_address,
-serial_poll_response : hp_82341_serial_poll_response,
-serial_poll_status : hp_82341_serial_poll_status,
-t1_delay : hp_82341_t1_delay,
-return_to_local : hp_82341_return_to_local,
+static gpib_interface_t hp_82341_unaccel_interface = {
+ .name = "hp_82341_unaccel",
+ .attach = hp_82341_attach,
+ .detach = hp_82341_detach,
+ .read = hp_82341_read,
+ .write = hp_82341_write,
+ .command = hp_82341_command,
+ .request_system_control = hp_82341_request_system_control,
+ .take_control = hp_82341_take_control,
+ .go_to_standby = hp_82341_go_to_standby,
+ .interface_clear = hp_82341_interface_clear,
+ .remote_enable = hp_82341_remote_enable,
+ .enable_eos = hp_82341_enable_eos,
+ .disable_eos = hp_82341_disable_eos,
+ .parallel_poll = hp_82341_parallel_poll,
+ .parallel_poll_configure = hp_82341_parallel_poll_configure,
+ .parallel_poll_response = hp_82341_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = hp_82341_line_status,
+ .update_status = hp_82341_update_status,
+ .primary_address = hp_82341_primary_address,
+ .secondary_address = hp_82341_secondary_address,
+ .serial_poll_response = hp_82341_serial_poll_response,
+ .serial_poll_status = hp_82341_serial_poll_status,
+ .t1_delay = hp_82341_t1_delay,
+ .return_to_local = hp_82341_return_to_local,
};
-gpib_interface_t hp_82341_interface = {
-name: "hp_82341",
-attach : hp_82341_attach,
-detach : hp_82341_detach,
-read : hp_82341_accel_read,
-write : hp_82341_accel_write,
-command : hp_82341_command,
-request_system_control : hp_82341_request_system_control,
-take_control : hp_82341_take_control,
-go_to_standby : hp_82341_go_to_standby,
-interface_clear : hp_82341_interface_clear,
-remote_enable : hp_82341_remote_enable,
-enable_eos : hp_82341_enable_eos,
-disable_eos : hp_82341_disable_eos,
-parallel_poll : hp_82341_parallel_poll,
-parallel_poll_configure : hp_82341_parallel_poll_configure,
-parallel_poll_response : hp_82341_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : hp_82341_line_status,
-update_status : hp_82341_update_status,
-primary_address : hp_82341_primary_address,
-secondary_address : hp_82341_secondary_address,
-serial_poll_response : hp_82341_serial_poll_response,
-t1_delay : hp_82341_t1_delay,
-return_to_local : hp_82341_return_to_local,
+static gpib_interface_t hp_82341_interface = {
+ .name = "hp_82341",
+ .attach = hp_82341_attach,
+ .detach = hp_82341_detach,
+ .read = hp_82341_accel_read,
+ .write = hp_82341_accel_write,
+ .command = hp_82341_command,
+ .request_system_control = hp_82341_request_system_control,
+ .take_control = hp_82341_take_control,
+ .go_to_standby = hp_82341_go_to_standby,
+ .interface_clear = hp_82341_interface_clear,
+ .remote_enable = hp_82341_remote_enable,
+ .enable_eos = hp_82341_enable_eos,
+ .disable_eos = hp_82341_disable_eos,
+ .parallel_poll = hp_82341_parallel_poll,
+ .parallel_poll_configure = hp_82341_parallel_poll_configure,
+ .parallel_poll_response = hp_82341_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = hp_82341_line_status,
+ .update_status = hp_82341_update_status,
+ .primary_address = hp_82341_primary_address,
+ .secondary_address = hp_82341_secondary_address,
+ .serial_poll_response = hp_82341_serial_poll_response,
+ .t1_delay = hp_82341_t1_delay,
+ .return_to_local = hp_82341_return_to_local,
};
int hp_82341_allocate_private(gpib_board_t *board)
@@ -473,12 +473,12 @@ void hp_82341_free_private(gpib_board_t *board)
static uint8_t hp_82341_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
- return inb((unsigned long)(priv->iobase) + register_num);
+ return inb(priv->iobase + register_num);
}
static void hp_82341_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
{
- outb(data, (unsigned long)(priv->iobase) + register_num);
+ outb(data, priv->iobase + register_num);
}
static int hp_82341_find_isapnp_board(struct pnp_dev **dev)
@@ -682,8 +682,8 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
{
struct hp_82341_priv *hp_priv;
struct tms9914_priv *tms_priv;
- unsigned long start_addr;
- void *iobase;
+ u32 start_addr;
+ u32 iobase;
int irq;
int i;
int retval;
@@ -704,7 +704,7 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (retval < 0)
return retval;
hp_priv->pnp_dev = dev;
- iobase = (void *)(pnp_port_start(dev, 0));
+ iobase = pnp_port_start(dev, 0);
irq = pnp_irq(dev, 0);
hp_priv->hw_version = HW_VERSION_82341D;
hp_priv->io_region_offset = 0x8;
@@ -714,9 +714,9 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp_priv->hw_version = HW_VERSION_82341C;
hp_priv->io_region_offset = 0x400;
}
- pr_info("hp_82341: base io 0x%p\n", iobase);
+ pr_info("hp_82341: base io 0x%u\n", iobase);
for (i = 0; i < hp_82341_num_io_regions; ++i) {
- start_addr = (unsigned long)(iobase) + i * hp_priv->io_region_offset;
+ start_addr = iobase + i * hp_priv->io_region_offset;
if (!request_region(start_addr, hp_82341_region_iosize, "hp_82341")) {
pr_err("hp_82341: failed to allocate io ports 0x%lx-0x%lx\n",
start_addr,
@@ -725,7 +725,7 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
}
hp_priv->iobase[i] = start_addr;
}
- tms_priv->iobase = (void *)(hp_priv->iobase[2]);
+ tms_priv->iobase = hp_priv->iobase[2];
if (hp_priv->hw_version == HW_VERSION_82341D) {
retval = isapnp_cfg_begin(hp_priv->pnp_dev->card->number,
hp_priv->pnp_dev->number);
@@ -807,8 +807,21 @@ MODULE_DEVICE_TABLE(pnp, hp_82341_pnp_table);
static int __init hp_82341_init_module(void)
{
- gpib_register_driver(&hp_82341_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&hp_82341_interface, THIS_MODULE);
+ int ret;
+
+ ret = gpib_register_driver(&hp_82341_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("hp_82341: gpib_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&hp_82341_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("hp_82341: gpib_register_driver failed: error = %d\n", ret);
+ gpib_unregister_driver(&hp_82341_unaccel_interface);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.h b/drivers/staging/gpib/hp_82341/hp_82341.h
index 7c391860b399..0065ebd9747c 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.h
+++ b/drivers/staging/gpib/hp_82341/hp_82341.h
@@ -26,8 +26,6 @@ struct hp_82341_priv {
enum hp_82341_hardware_version hw_version;
};
-// interfaces
-extern gpib_interface_t hp_82341_interface;
// interface functions
int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
diff --git a/drivers/staging/gpib/include/amcc5920.h b/drivers/staging/gpib/include/amcc5920.h
index 766b3799223f..7a88bd282feb 100644
--- a/drivers/staging/gpib/include/amcc5920.h
+++ b/drivers/staging/gpib/include/amcc5920.h
@@ -22,7 +22,7 @@ static const int bits_per_region = 8;
static inline uint32_t amcc_wait_state_bits(unsigned int region, unsigned int num_wait_states)
{
- return (num_wait_states & 0x7) << (-region * bits_per_region);
+ return (num_wait_states & 0x7) << (--region * bits_per_region);
};
enum amcc_prefetch_bits {
diff --git a/drivers/staging/gpib/include/gpibP.h b/drivers/staging/gpib/include/gpibP.h
index 5fc42b645ab7..d35fdd391f7e 100644
--- a/drivers/staging/gpib/include/gpibP.h
+++ b/drivers/staging/gpib/include/gpibP.h
@@ -16,8 +16,9 @@
#include <linux/fs.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
-void gpib_register_driver(gpib_interface_t *interface, struct module *mod);
+int gpib_register_driver(gpib_interface_t *interface, struct module *mod);
void gpib_unregister_driver(gpib_interface_t *interface);
struct pci_dev *gpib_pci_get_device(const gpib_board_config_t *config, unsigned int vendor_id,
unsigned int device_id, struct pci_dev *from);
@@ -35,16 +36,5 @@ extern gpib_board_t board_array[GPIB_MAX_NUM_BOARDS];
extern struct list_head registered_drivers;
-#include <linux/io.h>
-
-void writeb_wrapper(unsigned int value, void *address);
-unsigned int readb_wrapper(void *address);
-void outb_wrapper(unsigned int value, void *address);
-unsigned int inb_wrapper(void *address);
-void writew_wrapper(unsigned int value, void *address);
-unsigned int readw_wrapper(void *address);
-void outw_wrapper(unsigned int value, void *address);
-unsigned int inw_wrapper(void *address);
-
#endif // _GPIB_P_H
diff --git a/drivers/staging/gpib/include/gpib_types.h b/drivers/staging/gpib/include/gpib_types.h
index ee2643da6d71..b41781a55a60 100644
--- a/drivers/staging/gpib/include/gpib_types.h
+++ b/drivers/staging/gpib/include/gpib_types.h
@@ -31,7 +31,8 @@ typedef struct {
void *init_data;
int init_data_length;
/* IO base address to use for non-pnp cards (set by core, driver should make local copy) */
- void *ibbase;
+ u32 ibbase;
+ void __iomem *mmibbase;
/* IRQ to use for non-pnp cards (set by core, driver should make local copy) */
unsigned int ibirq;
/* dma channel to use for non-pnp cards (set by core, driver should make local copy) */
diff --git a/drivers/staging/gpib/include/nec7210.h b/drivers/staging/gpib/include/nec7210.h
index c00aba4ce846..ca998c4a84bf 100644
--- a/drivers/staging/gpib/include/nec7210.h
+++ b/drivers/staging/gpib/include/nec7210.h
@@ -18,7 +18,10 @@
/* struct used to provide variables local to a nec7210 chip */
struct nec7210_priv {
- void *iobase;
+#ifdef CONFIG_HAS_IOPORT
+ u32 iobase;
+#endif
+ void __iomem *mmiobase;
unsigned int offset; // offset between successive nec7210 io addresses
unsigned int dma_channel;
u8 *dma_buffer;
diff --git a/drivers/staging/gpib/include/tms9914.h b/drivers/staging/gpib/include/tms9914.h
index 456b488212d2..d8c8d1c9b131 100644
--- a/drivers/staging/gpib/include/tms9914.h
+++ b/drivers/staging/gpib/include/tms9914.h
@@ -20,7 +20,10 @@ enum tms9914_holdoff_mode {
/* struct used to provide variables local to a tms9914 chip */
struct tms9914_priv {
- void *iobase;
+#ifdef CONFIG_HAS_IOPORT
+ u32 iobase;
+#endif
+ void __iomem *mmiobase;
unsigned int offset; // offset between successive tms9914 io addresses
unsigned int dma_channel;
// software copy of bits written to interrupt mask registers
diff --git a/drivers/staging/gpib/ines/Makefile b/drivers/staging/gpib/ines/Makefile
index cdcaa59a4e39..6b6e480fd811 100644
--- a/drivers/staging/gpib/ines/Makefile
+++ b/drivers/staging/gpib/ines/Makefile
@@ -1,4 +1,4 @@
ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
-obj-m += ines_gpib.o
+obj-$(CONFIG_GPIB_INES) += ines_gpib.o
diff --git a/drivers/staging/gpib/ines/ines.h b/drivers/staging/gpib/ines/ines.h
index 7e8302619998..3918737fa21a 100644
--- a/drivers/staging/gpib/ines/ines.h
+++ b/drivers/staging/gpib/ines/ines.h
@@ -35,13 +35,6 @@ struct ines_priv {
u8 extend_mode_bits;
};
-// interfaces
-extern gpib_interface_t ines_pci_interface;
-extern gpib_interface_t ines_pci_accel_interface;
-extern gpib_interface_t ines_pcmcia_interface;
-extern gpib_interface_t ines_pcmcia_accel_interface;
-extern gpib_interface_t ines_pcmcia_unaccel_interface;
-
// interface functions
int ines_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
int ines_write(gpib_board_t *board, uint8_t *buffer, size_t length,
@@ -83,14 +76,14 @@ void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count);
/* inb/outb wrappers */
static inline unsigned int ines_inb(struct ines_priv *priv, unsigned int register_number)
{
- return inb((unsigned long)(priv->nec7210_priv.iobase) +
+ return inb(priv->nec7210_priv.iobase +
register_number * priv->nec7210_priv.offset);
}
static inline void ines_outb(struct ines_priv *priv, unsigned int value,
unsigned int register_number)
{
- outb(value, (unsigned long)(priv->nec7210_priv.iobase) +
+ outb(value, priv->nec7210_priv.iobase +
register_number * priv->nec7210_priv.offset);
}
diff --git a/drivers/staging/gpib/ines/ines_gpib.c b/drivers/staging/gpib/ines/ines_gpib.c
index 9d8387c3bf01..22a05a287bce 100644
--- a/drivers/staging/gpib/ines/ines_gpib.c
+++ b/drivers/staging/gpib/ines/ines_gpib.c
@@ -273,10 +273,10 @@ irqreturn_t ines_pci_interrupt(int irq, void *arg)
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
if (priv->pci_chip_type == PCI_CHIP_QUANCOM) {
- if ((inb((unsigned long)nec_priv->iobase +
+ if ((inb(nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG) &
QUANCOM_IRQ_ASSERTED_BIT))
- outb(QUANCOM_IRQ_ENABLE_BIT, (unsigned long)(nec_priv->iobase) +
+ outb(QUANCOM_IRQ_ENABLE_BIT, nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG);
}
@@ -357,38 +357,38 @@ struct ines_pci_id {
enum ines_pci_chip pci_chip_type;
};
-struct ines_pci_id pci_ids[] = {
- {vendor_id: PCI_VENDOR_ID_PLX,
- device_id : PCI_DEVICE_ID_PLX_9050,
- subsystem_vendor_id : PCI_VENDOR_ID_PLX,
- subsystem_device_id : PCI_SUBDEVICE_ID_INES_GPIB,
- gpib_region : 2,
- io_offset : 1,
- pci_chip_type : PCI_CHIP_PLX9050,
+static struct ines_pci_id pci_ids[] = {
+ {.vendor_id = PCI_VENDOR_ID_PLX,
+ .device_id = PCI_DEVICE_ID_PLX_9050,
+ .subsystem_vendor_id = PCI_VENDOR_ID_PLX,
+ .subsystem_device_id = PCI_SUBDEVICE_ID_INES_GPIB,
+ .gpib_region = 2,
+ .io_offset = 1,
+ .pci_chip_type = PCI_CHIP_PLX9050,
},
- {vendor_id: PCI_VENDOR_ID_AMCC,
- device_id : PCI_DEVICE_ID_INES_GPIB_AMCC,
- subsystem_vendor_id : PCI_VENDOR_ID_AMCC,
- subsystem_device_id : PCI_SUBDEVICE_ID_INES_GPIB,
- gpib_region : 1,
- io_offset : 1,
- pci_chip_type : PCI_CHIP_AMCC5920,
+ {.vendor_id = PCI_VENDOR_ID_AMCC,
+ .device_id = PCI_DEVICE_ID_INES_GPIB_AMCC,
+ .subsystem_vendor_id = PCI_VENDOR_ID_AMCC,
+ .subsystem_device_id = PCI_SUBDEVICE_ID_INES_GPIB,
+ .gpib_region = 1,
+ .io_offset = 1,
+ .pci_chip_type = PCI_CHIP_AMCC5920,
},
- {vendor_id: PCI_VENDOR_ID_INES_QUICKLOGIC,
- device_id : PCI_DEVICE_ID_INES_GPIB_QL5030,
- subsystem_vendor_id : PCI_VENDOR_ID_INES_QUICKLOGIC,
- subsystem_device_id : PCI_DEVICE_ID_INES_GPIB_QL5030,
- gpib_region : 1,
- io_offset : 1,
- pci_chip_type : PCI_CHIP_QUICKLOGIC5030,
+ {.vendor_id = PCI_VENDOR_ID_INES_QUICKLOGIC,
+ .device_id = PCI_DEVICE_ID_INES_GPIB_QL5030,
+ .subsystem_vendor_id = PCI_VENDOR_ID_INES_QUICKLOGIC,
+ .subsystem_device_id = PCI_DEVICE_ID_INES_GPIB_QL5030,
+ .gpib_region = 1,
+ .io_offset = 1,
+ .pci_chip_type = PCI_CHIP_QUICKLOGIC5030,
},
- {vendor_id: PCI_VENDOR_ID_QUANCOM,
- device_id : PCI_DEVICE_ID_QUANCOM_GPIB,
- subsystem_vendor_id : -1,
- subsystem_device_id : -1,
- gpib_region : 0,
- io_offset : 4,
- pci_chip_type : PCI_CHIP_QUANCOM,
+ {.vendor_id = PCI_VENDOR_ID_QUANCOM,
+ .device_id = PCI_DEVICE_ID_QUANCOM_GPIB,
+ .subsystem_vendor_id = -1,
+ .subsystem_device_id = -1,
+ .gpib_region = 0,
+ .io_offset = 4,
+ .pci_chip_type = PCI_CHIP_QUANCOM,
},
};
@@ -540,116 +540,116 @@ void ines_return_to_local(gpib_board_t *board)
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-gpib_interface_t ines_pci_unaccel_interface = {
-name: "ines_pci_unaccel",
-attach : ines_pci_attach,
-detach : ines_pci_detach,
-read : ines_read,
-write : ines_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pci_unaccel_interface = {
+ .name = "ines_pci_unaccel",
+ .attach = ines_pci_attach,
+ .detach = ines_pci_detach,
+ .read = ines_read,
+ .write = ines_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_pci_interface = {
-name: "ines_pci",
-attach : ines_pci_accel_attach,
-detach : ines_pci_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pci_interface = {
+ .name = "ines_pci",
+ .attach = ines_pci_accel_attach,
+ .detach = ines_pci_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_pci_accel_interface = {
-name: "ines_pci_accel",
-attach : ines_pci_accel_attach,
-detach : ines_pci_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pci_accel_interface = {
+ .name = "ines_pci_accel",
+ .attach = ines_pci_accel_attach,
+ .detach = ines_pci_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_isa_interface = {
-name: "ines_isa",
-attach : ines_isa_attach,
-detach : ines_isa_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_isa_interface = {
+ .name = "ines_isa",
+ .attach = ines_isa_attach,
+ .detach = ines_isa_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
static int ines_allocate_private(gpib_board_t *board)
@@ -780,8 +780,8 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
if (pci_request_regions(ines_priv->pci_device, "ines-gpib"))
return -1;
- nec_priv->iobase = (void *)(pci_resource_start(ines_priv->pci_device,
- found_id.gpib_region));
+ nec_priv->iobase = pci_resource_start(ines_priv->pci_device,
+ found_id.gpib_region);
ines_priv->pci_chip_type = found_id.pci_chip_type;
nec_priv->offset = found_id.io_offset;
@@ -840,7 +840,7 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
}
break;
case PCI_CHIP_QUANCOM:
- outb(QUANCOM_IRQ_ENABLE_BIT, (unsigned long)(nec_priv->iobase) +
+ outb(QUANCOM_IRQ_ENABLE_BIT, nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG);
break;
case PCI_CHIP_QUICKLOGIC5030:
@@ -899,8 +899,8 @@ int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
ines_priv = board->private_data;
nec_priv = &ines_priv->nec7210_priv;
- if (!request_region((unsigned long)config->ibbase, ines_isa_iosize, "ines_gpib")) {
- pr_err("ines_gpib: ioports at 0x%p already in use\n", config->ibbase);
+ if (!request_region(config->ibbase, ines_isa_iosize, "ines_gpib")) {
+ pr_err("ines_gpib: ioports at 0x%x already in use\n", config->ibbase);
return -1;
}
nec_priv->iobase = config->ibbase;
@@ -931,7 +931,7 @@ void ines_pci_detach(gpib_board_t *board)
break;
case PCI_CHIP_QUANCOM:
if (nec_priv->iobase)
- outb(0, (unsigned long)(nec_priv->iobase) +
+ outb(0, nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG);
break;
default:
@@ -960,7 +960,7 @@ void ines_isa_detach(gpib_board_t *board)
free_irq(ines_priv->irq, board);
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
- release_region((unsigned long)(nec_priv->iobase), ines_isa_iosize);
+ release_region(nec_priv->iobase, ines_isa_iosize);
}
}
ines_free_private(board);
@@ -1122,7 +1122,7 @@ static int ines_gpib_config(struct pcmcia_device *link)
{
struct local_info *dev;
int retval;
- void *virt;
+ void __iomem *virt;
dev = link->priv;
DEBUG(0, "%s(0x%p)\n", __func__, link);
@@ -1156,7 +1156,7 @@ static int ines_gpib_config(struct pcmcia_device *link)
}
virt = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
writeb((link->resource[2]->start >> 2) & 0xff, virt + 0xf0); // IOWindow base
- iounmap((void *)virt);
+ iounmap(virt);
/*
* This actually configures the PCMCIA socket -- setting up
@@ -1227,100 +1227,94 @@ static struct pcmcia_driver ines_gpib_cs_driver = {
.resume = ines_gpib_resume,
};
-int ines_pcmcia_init_module(void)
-{
- pcmcia_register_driver(&ines_gpib_cs_driver);
- return 0;
-}
-
void ines_pcmcia_cleanup_module(void)
{
DEBUG(0, "ines_cs: unloading\n");
pcmcia_unregister_driver(&ines_gpib_cs_driver);
}
-gpib_interface_t ines_pcmcia_unaccel_interface = {
-name: "ines_pcmcia_unaccel",
-attach : ines_pcmcia_attach,
-detach : ines_pcmcia_detach,
-read : ines_read,
-write : ines_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pcmcia_unaccel_interface = {
+ .name = "ines_pcmcia_unaccel",
+ .attach = ines_pcmcia_attach,
+ .detach = ines_pcmcia_detach,
+ .read = ines_read,
+ .write = ines_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_pcmcia_accel_interface = {
-name: "ines_pcmcia_accel",
-attach : ines_pcmcia_accel_attach,
-detach : ines_pcmcia_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pcmcia_accel_interface = {
+ .name = "ines_pcmcia_accel",
+ .attach = ines_pcmcia_accel_attach,
+ .detach = ines_pcmcia_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_pcmcia_interface = {
-name: "ines_pcmcia",
-attach : ines_pcmcia_accel_attach,
-detach : ines_pcmcia_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pcmcia_interface = {
+ .name = "ines_pcmcia",
+ .attach = ines_pcmcia_accel_attach,
+ .detach = ines_pcmcia_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
irqreturn_t ines_pcmcia_interrupt(int irq, void *arg)
@@ -1348,14 +1342,14 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
ines_priv = board->private_data;
nec_priv = &ines_priv->nec7210_priv;
- if (request_region(curr_dev->resource[0]->start,
- resource_size(curr_dev->resource[0]), "ines_gpib") == 0) {
+ if (!request_region(curr_dev->resource[0]->start,
+ resource_size(curr_dev->resource[0]), "ines_gpib")) {
pr_err("ines_gpib: ioports at 0x%lx already in use\n",
(unsigned long)(curr_dev->resource[0]->start));
return -1;
}
- nec_priv->iobase = (void *)(unsigned long)curr_dev->resource[0]->start;
+ nec_priv->iobase = curr_dev->resource[0]->start;
nec7210_board_reset(nec_priv, board);
@@ -1410,7 +1404,7 @@ void ines_pcmcia_detach(gpib_board_t *board)
free_irq(ines_priv->irq, board);
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
- release_region((unsigned long)(nec_priv->iobase), ines_pcmcia_iosize);
+ release_region(nec_priv->iobase, ines_pcmcia_iosize);
}
}
ines_free_private(board);
@@ -1420,28 +1414,86 @@ void ines_pcmcia_detach(gpib_board_t *board)
static int __init ines_init_module(void)
{
- int err = 0;
+ int ret;
+
+ ret = pci_register_driver(&ines_pci_driver);
+ if (ret) {
+ pr_err("ines_gpib: pci_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&ines_pci_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci;
+ }
- err = pci_register_driver(&ines_pci_driver);
- if (err) {
- pr_err("ines_gpib: pci_driver_register failed!\n");
- return err;
+ ret = gpib_register_driver(&ines_pci_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci_unaccel;
+ }
+
+ ret = gpib_register_driver(&ines_pci_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci_accel;
+ }
+
+ ret = gpib_register_driver(&ines_isa_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_isa;
}
- gpib_register_driver(&ines_pci_interface, THIS_MODULE);
- gpib_register_driver(&ines_pci_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&ines_pci_accel_interface, THIS_MODULE);
- gpib_register_driver(&ines_isa_interface, THIS_MODULE);
#ifdef GPIB_PCMCIA
- gpib_register_driver(&ines_pcmcia_interface, THIS_MODULE);
- gpib_register_driver(&ines_pcmcia_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&ines_pcmcia_accel_interface, THIS_MODULE);
- err += ines_pcmcia_init_module();
+ ret = gpib_register_driver(&ines_pcmcia_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia;
+ }
+
+ ret = gpib_register_driver(&ines_pcmcia_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_unaccel;
+ }
+
+ ret = gpib_register_driver(&ines_pcmcia_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_accel;
+ }
+
+ ret = pcmcia_register_driver(&ines_gpib_cs_driver);
+ if (ret) {
+ pr_err("ines_gpib: pcmcia_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_driver;
+ }
#endif
- if (err)
- return -1;
return 0;
+
+#ifdef GPIB_PCMCIA
+err_pcmcia_driver:
+ gpib_unregister_driver(&ines_pcmcia_accel_interface);
+err_pcmcia_accel:
+ gpib_unregister_driver(&ines_pcmcia_unaccel_interface);
+err_pcmcia_unaccel:
+ gpib_unregister_driver(&ines_pcmcia_interface);
+err_pcmcia:
+#endif
+ gpib_unregister_driver(&ines_isa_interface);
+err_isa:
+ gpib_unregister_driver(&ines_pci_accel_interface);
+err_pci_accel:
+ gpib_unregister_driver(&ines_pci_unaccel_interface);
+err_pci_unaccel:
+ gpib_unregister_driver(&ines_pci_interface);
+err_pci:
+ pci_unregister_driver(&ines_pci_driver);
+
+ return ret;
}
static void __exit ines_exit_module(void)
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/Makefile b/drivers/staging/gpib/lpvo_usb_gpib/Makefile
index 137511acce63..360553488e6d 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/Makefile
+++ b/drivers/staging/gpib/lpvo_usb_gpib/Makefile
@@ -1,3 +1,3 @@
-obj-m += lpvo_usb_gpib.o
+obj-$(CONFIG_GPIB_LPVO) += lpvo_usb_gpib.o
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index 267651a15fa0..85322af62c23 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
@@ -10,7 +10,6 @@
/* base module includes */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -25,7 +24,6 @@
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/sched/signal.h>
-#include <linux/uaccess.h>
#include <linux/usb.h>
#include "gpibP.h"
@@ -99,8 +97,8 @@ module_param(debug, int, 0644);
#define USB_GPIB_DEBUG_ON "\nIBDE\xAA\n"
#define USB_GPIB_SET_LISTEN "\nIBDT0\n"
#define USB_GPIB_SET_TALK "\nIBDT1\n"
-#define USB_GPIB_SET_LINES "\nIBDC\n"
-#define USB_GPIB_SET_DATA "\nIBDM\n"
+#define USB_GPIB_SET_LINES "\nIBDC.\n"
+#define USB_GPIB_SET_DATA "\nIBDM.\n"
#define USB_GPIB_READ_LINES "\nIBD?C\n"
#define USB_GPIB_READ_DATA "\nIBD?M\n"
#define USB_GPIB_READ_BUS "\nIBD??\n"
@@ -210,7 +208,7 @@ static int skel_do_release(gpib_board_t *);
* (unix time in sec and NANOsec)
*/
-inline int usec_diff(struct timespec64 *a, struct timespec64 *b)
+static inline int usec_diff(struct timespec64 *a, struct timespec64 *b)
{
return ((a->tv_sec - b->tv_sec) * 1000000 +
(a->tv_nsec - b->tv_nsec) / 1000);
@@ -436,7 +434,7 @@ static void set_timeout(gpib_board_t *board)
static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *config)
{
int retval, j;
- int base = (long)config->ibbase;
+ u32 base = config->ibbase;
char *device_path;
int match;
struct usb_device *udev;
@@ -589,7 +587,7 @@ static int usb_gpib_command(gpib_board_t *board,
size_t *bytes_written)
{
int i, retval;
- char command[6] = "IBc\n";
+ char command[6] = "IBc.\n";
DIA_LOG(1, "enter %p\n", board);
@@ -608,7 +606,7 @@ static int usb_gpib_command(gpib_board_t *board,
}
/**
- * disable_eos() - Disable END on eos byte (END on EOI only)
+ * usb_gpib_disable_eos() - Disable END on eos byte (END on EOI only)
*
* @board: the gpib_board data area for this gpib interface
*
@@ -624,7 +622,7 @@ static void usb_gpib_disable_eos(gpib_board_t *board)
}
/**
- * enable_eos() - Enable END for reads when eos byte is received.
+ * usb_gpib_enable_eos() - Enable END for reads when eos byte is received.
*
* @board: the gpib_board data area for this gpib interface
* @eos_byte: the 'eos' byte
@@ -647,7 +645,7 @@ static int usb_gpib_enable_eos(gpib_board_t *board,
}
/**
- * go_to_standby() - De-assert ATN
+ * usb_gpib_go_to_standby() - De-assert ATN
*
* @board: the gpib_board data area for this gpib interface
*/
@@ -664,7 +662,7 @@ static int usb_gpib_go_to_standby(gpib_board_t *board)
}
/**
- * interface_clear() - Assert or de-assert IFC
+ * usb_gpib_interface_clear() - Assert or de-assert IFC
*
* @board: the gpib_board data area for this gpib interface
* assert: 1: assert IFC; 0: de-assert IFC
@@ -1133,33 +1131,33 @@ static unsigned int usb_gpib_t1_delay(gpib_board_t *board, unsigned int nano_sec
* *** module dispatch table and init/exit functions ***
*/
-gpib_interface_t usb_gpib_interface = {
-name: NAME,
-attach : usb_gpib_attach,
-detach : usb_gpib_detach,
-read : usb_gpib_read,
-write : usb_gpib_write,
-command : usb_gpib_command,
-take_control : usb_gpib_take_control,
-go_to_standby : usb_gpib_go_to_standby,
-request_system_control : usb_gpib_request_system_control,
-interface_clear : usb_gpib_interface_clear,
-remote_enable : usb_gpib_remote_enable,
-enable_eos : usb_gpib_enable_eos,
-disable_eos : usb_gpib_disable_eos,
-parallel_poll : usb_gpib_parallel_poll,
-parallel_poll_configure : usb_gpib_parallel_poll_configure,
-parallel_poll_response : usb_gpib_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : usb_gpib_line_status,
-update_status : usb_gpib_update_status,
-primary_address : usb_gpib_primary_address,
-secondary_address : usb_gpib_secondary_address,
-serial_poll_response : usb_gpib_serial_poll_response,
-serial_poll_status : usb_gpib_serial_poll_status,
-t1_delay : usb_gpib_t1_delay,
-return_to_local : usb_gpib_return_to_local,
-skip_check_for_command_acceptors : 1
+static gpib_interface_t usb_gpib_interface = {
+ .name = NAME,
+ .attach = usb_gpib_attach,
+ .detach = usb_gpib_detach,
+ .read = usb_gpib_read,
+ .write = usb_gpib_write,
+ .command = usb_gpib_command,
+ .take_control = usb_gpib_take_control,
+ .go_to_standby = usb_gpib_go_to_standby,
+ .request_system_control = usb_gpib_request_system_control,
+ .interface_clear = usb_gpib_interface_clear,
+ .remote_enable = usb_gpib_remote_enable,
+ .enable_eos = usb_gpib_enable_eos,
+ .disable_eos = usb_gpib_disable_eos,
+ .parallel_poll = usb_gpib_parallel_poll,
+ .parallel_poll_configure = usb_gpib_parallel_poll_configure,
+ .parallel_poll_response = usb_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = usb_gpib_line_status,
+ .update_status = usb_gpib_update_status,
+ .primary_address = usb_gpib_primary_address,
+ .secondary_address = usb_gpib_secondary_address,
+ .serial_poll_response = usb_gpib_serial_poll_response,
+ .serial_poll_status = usb_gpib_serial_poll_status,
+ .t1_delay = usb_gpib_t1_delay,
+ .return_to_local = usb_gpib_return_to_local,
+ .skip_check_for_command_acceptors = 1
};
/*
@@ -1181,7 +1179,11 @@ static int usb_gpib_init_module(struct usb_interface *interface)
return rv;
if (!assigned_usb_minors) {
- gpib_register_driver(&usb_gpib_interface, THIS_MODULE);
+ rv = gpib_register_driver(&usb_gpib_interface, THIS_MODULE);
+ if (rv) {
+ pr_err("lpvo_usb_gpib: gpib_register_driver failed: error = %d\n", rv);
+ goto exit;
+ }
} else {
/* check if minor is already registered - maybe useless, but if
* it happens the code is inconsistent somewhere
@@ -1878,7 +1880,7 @@ static int skel_release(struct inode *inode, struct file *file)
* user space access to read function
*/
-static ssize_t skel_read(struct file *file, char *buffer, size_t count,
+static ssize_t skel_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
struct usb_skel *dev;
@@ -1909,7 +1911,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
* user space access to write function
*/
-static ssize_t skel_write(struct file *file, const char *user_buffer,
+static ssize_t skel_write(struct file *file, const char __user *user_buffer,
size_t count, loff_t *ppos)
{
struct usb_skel *dev;
diff --git a/drivers/staging/gpib/nec7210/nec7210.c b/drivers/staging/gpib/nec7210/nec7210.c
index 1d9951035497..c9a837fad96e 100644
--- a/drivers/staging/gpib/nec7210/nec7210.c
+++ b/drivers/staging/gpib/nec7210/nec7210.c
@@ -1035,7 +1035,7 @@ EXPORT_SYMBOL(nec7210_board_online);
/* wrappers for io */
uint8_t nec7210_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
- return inb((unsigned long)(priv->iobase) + register_num * priv->offset);
+ return inb(priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_ioport_read_byte);
@@ -1047,7 +1047,7 @@ void nec7210_ioport_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned
*/
nec7210_locking_ioport_write_byte(priv, data, register_num);
else
- outb(data, (unsigned long)(priv->iobase) + register_num * priv->offset);
+ outb(data, priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_ioport_write_byte);
@@ -1058,7 +1058,7 @@ uint8_t nec7210_locking_ioport_read_byte(struct nec7210_priv *priv, unsigned int
unsigned long flags;
spin_lock_irqsave(&priv->register_page_lock, flags);
- retval = inb((unsigned long)(priv->iobase) + register_num * priv->offset);
+ retval = inb(priv->iobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
return retval;
}
@@ -1072,7 +1072,7 @@ void nec7210_locking_ioport_write_byte(struct nec7210_priv *priv, uint8_t data,
spin_lock_irqsave(&priv->register_page_lock, flags);
if (register_num == AUXMR)
udelay(1);
- outb(data, (unsigned long)(priv->iobase) + register_num * priv->offset);
+ outb(data, priv->iobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
}
EXPORT_SYMBOL(nec7210_locking_ioport_write_byte);
@@ -1080,7 +1080,7 @@ EXPORT_SYMBOL(nec7210_locking_ioport_write_byte);
uint8_t nec7210_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
- return readb(priv->iobase + register_num * priv->offset);
+ return readb(priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_iomem_read_byte);
@@ -1092,7 +1092,7 @@ void nec7210_iomem_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned
*/
nec7210_locking_iomem_write_byte(priv, data, register_num);
else
- writeb(data, priv->iobase + register_num * priv->offset);
+ writeb(data, priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_iomem_write_byte);
@@ -1102,7 +1102,7 @@ uint8_t nec7210_locking_iomem_read_byte(struct nec7210_priv *priv, unsigned int
unsigned long flags;
spin_lock_irqsave(&priv->register_page_lock, flags);
- retval = readb(priv->iobase + register_num * priv->offset);
+ retval = readb(priv->mmiobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
return retval;
}
@@ -1116,7 +1116,7 @@ void nec7210_locking_iomem_write_byte(struct nec7210_priv *priv, uint8_t data,
spin_lock_irqsave(&priv->register_page_lock, flags);
if (register_num == AUXMR)
udelay(1);
- writeb(data, priv->iobase + register_num * priv->offset);
+ writeb(data, priv->mmiobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
}
EXPORT_SYMBOL(nec7210_locking_iomem_write_byte);
diff --git a/drivers/staging/gpib/ni_usb/Makefile b/drivers/staging/gpib/ni_usb/Makefile
index e22b3b21a62c..469c5d16add3 100644
--- a/drivers/staging/gpib/ni_usb/Makefile
+++ b/drivers/staging/gpib/ni_usb/Makefile
@@ -1,4 +1,4 @@
-obj-m += ni_usb_gpib.o
+obj-$(CONFIG_GPIB_NI_USB) += ni_usb_gpib.o
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
index b7b6fb1be379..d0656dc520f5 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
@@ -85,7 +85,7 @@ static void ni_usb_bulk_complete(struct urb *urb)
// printk("debug: %s: status=0x%x, error_count=%i, actual_length=%i\n", __func__,
// urb->status, urb->error_count, urb->actual_length);
- up(&context->complete);
+ complete(&context->complete);
}
static void ni_usb_timeout_handler(struct timer_list *t)
@@ -94,7 +94,7 @@ static void ni_usb_timeout_handler(struct timer_list *t)
struct ni_usb_urb_ctx *context = &ni_priv->context;
context->timed_out = 1;
- up(&context->complete);
+ complete(&context->complete);
};
// I'm using nonblocking loosely here, it only means -EAGAIN can be returned in certain cases
@@ -124,7 +124,7 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d
}
usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_pipe = usb_sndbulkpipe(usb_dev, ni_priv->bulk_out_endpoint);
- sema_init(&context->complete, 0);
+ init_completion(&context->complete);
context->timed_out = 0;
usb_fill_bulk_urb(ni_priv->bulk_urb, usb_dev, out_pipe, data, data_length,
&ni_usb_bulk_complete, context);
@@ -143,7 +143,7 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d
return retval;
}
mutex_unlock(&ni_priv->bulk_transfer_lock);
- down(&context->complete); // wait for ni_usb_bulk_complete
+ wait_for_completion(&context->complete); // wait for ni_usb_bulk_complete
if (context->timed_out) {
usb_kill_urb(ni_priv->bulk_urb);
dev_err(&usb_dev->dev, "%s: killed urb due to timeout\n", __func__);
@@ -210,7 +210,7 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
}
usb_dev = interface_to_usbdev(ni_priv->bus_interface);
in_pipe = usb_rcvbulkpipe(usb_dev, ni_priv->bulk_in_endpoint);
- sema_init(&context->complete, 0);
+ init_completion(&context->complete);
context->timed_out = 0;
usb_fill_bulk_urb(ni_priv->bulk_urb, usb_dev, in_pipe, data, data_length,
&ni_usb_bulk_complete, context);
@@ -231,7 +231,7 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
}
mutex_unlock(&ni_priv->bulk_transfer_lock);
if (interruptible) {
- if (down_interruptible(&context->complete)) {
+ if (wait_for_completion_interruptible(&context->complete)) {
/* If we got interrupted by a signal while
* waiting for the usb gpib to respond, we
* should send a stop command so it will
@@ -243,10 +243,10 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
/* now do an uninterruptible wait, it shouldn't take long
* for the board to respond now.
*/
- down(&context->complete);
+ wait_for_completion(&context->complete);
}
} else {
- down(&context->complete);
+ wait_for_completion(&context->complete);
}
if (context->timed_out) {
usb_kill_urb(ni_priv->bulk_urb);
@@ -783,8 +783,10 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
- if (!in_data)
+ if (!in_data) {
+ mutex_unlock(&ni_priv->addressed_transfer_lock);
return -ENOMEM;
+ }
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &usb_bytes_read,
ni_usb_timeout_msecs(board->usec_timeout), 1);
@@ -2351,33 +2353,33 @@ static void ni_usb_detach(gpib_board_t *board)
mutex_unlock(&ni_usb_hotplug_lock);
}
-gpib_interface_t ni_usb_gpib_interface = {
-name: "ni_usb_b",
-attach : ni_usb_attach,
-detach : ni_usb_detach,
-read : ni_usb_read,
-write : ni_usb_write,
-command : ni_usb_command,
-take_control : ni_usb_take_control,
-go_to_standby : ni_usb_go_to_standby,
-request_system_control : ni_usb_request_system_control,
-interface_clear : ni_usb_interface_clear,
-remote_enable : ni_usb_remote_enable,
-enable_eos : ni_usb_enable_eos,
-disable_eos : ni_usb_disable_eos,
-parallel_poll : ni_usb_parallel_poll,
-parallel_poll_configure : ni_usb_parallel_poll_configure,
-parallel_poll_response : ni_usb_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ni_usb_line_status,
-update_status : ni_usb_update_status,
-primary_address : ni_usb_primary_address,
-secondary_address : ni_usb_secondary_address,
-serial_poll_response : ni_usb_serial_poll_response,
-serial_poll_status : ni_usb_serial_poll_status,
-t1_delay : ni_usb_t1_delay,
-return_to_local : ni_usb_return_to_local,
-skip_check_for_command_acceptors : 1
+static gpib_interface_t ni_usb_gpib_interface = {
+ .name = "ni_usb_b",
+ .attach = ni_usb_attach,
+ .detach = ni_usb_detach,
+ .read = ni_usb_read,
+ .write = ni_usb_write,
+ .command = ni_usb_command,
+ .take_control = ni_usb_take_control,
+ .go_to_standby = ni_usb_go_to_standby,
+ .request_system_control = ni_usb_request_system_control,
+ .interface_clear = ni_usb_interface_clear,
+ .remote_enable = ni_usb_remote_enable,
+ .enable_eos = ni_usb_enable_eos,
+ .disable_eos = ni_usb_disable_eos,
+ .parallel_poll = ni_usb_parallel_poll,
+ .parallel_poll_configure = ni_usb_parallel_poll_configure,
+ .parallel_poll_response = ni_usb_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ni_usb_line_status,
+ .update_status = ni_usb_update_status,
+ .primary_address = ni_usb_primary_address,
+ .secondary_address = ni_usb_secondary_address,
+ .serial_poll_response = ni_usb_serial_poll_response,
+ .serial_poll_status = ni_usb_serial_poll_status,
+ .t1_delay = ni_usb_t1_delay,
+ .return_to_local = ni_usb_return_to_local,
+ .skip_check_for_command_acceptors = 1
};
// Table with the USB-devices: just now only testing IDs
@@ -2619,12 +2621,23 @@ static struct usb_driver ni_usb_bus_driver = {
static int __init ni_usb_init_module(void)
{
int i;
+ int ret;
pr_info("ni_usb_gpib driver loading\n");
for (i = 0; i < MAX_NUM_NI_USB_INTERFACES; i++)
ni_usb_driver_interfaces[i] = NULL;
- usb_register(&ni_usb_bus_driver);
- gpib_register_driver(&ni_usb_gpib_interface, THIS_MODULE);
+
+ ret = usb_register(&ni_usb_bus_driver);
+ if (ret) {
+ pr_err("ni_usb_gpib: usb_register failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&ni_usb_gpib_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ni_usb_gpib: gpib_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h b/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
index 9b21dfa0f3f6..4b297db09a9b 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
@@ -56,7 +56,7 @@ enum hs_plus_endpoint_addresses {
};
struct ni_usb_urb_ctx {
- struct semaphore complete;
+ struct completion complete;
unsigned timed_out : 1;
};
diff --git a/drivers/staging/gpib/pc2/Makefile b/drivers/staging/gpib/pc2/Makefile
index 8148425e0f87..481ee4296e1b 100644
--- a/drivers/staging/gpib/pc2/Makefile
+++ b/drivers/staging/gpib/pc2/Makefile
@@ -1,5 +1,5 @@
-obj-m += pc2_gpib.o
+obj-$(CONFIG_GPIB_PC2) += pc2_gpib.o
diff --git a/drivers/staging/gpib/pc2/pc2_gpib.c b/drivers/staging/gpib/pc2/pc2_gpib.c
index 7b3b34f47341..c0b07cb63d9a 100644
--- a/drivers/staging/gpib/pc2/pc2_gpib.c
+++ b/drivers/staging/gpib/pc2/pc2_gpib.c
@@ -238,116 +238,116 @@ static void pc2_return_to_local(gpib_board_t *board)
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-gpib_interface_t pc2_interface = {
-name: "pcII",
-attach : pc2_attach,
-detach : pc2_detach,
-read : pc2_read,
-write : pc2_write,
-command : pc2_command,
-take_control : pc2_take_control,
-go_to_standby : pc2_go_to_standby,
-request_system_control : pc2_request_system_control,
-interface_clear : pc2_interface_clear,
-remote_enable : pc2_remote_enable,
-enable_eos : pc2_enable_eos,
-disable_eos : pc2_disable_eos,
-parallel_poll : pc2_parallel_poll,
-parallel_poll_configure : pc2_parallel_poll_configure,
-parallel_poll_response : pc2_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : pc2_update_status,
-primary_address : pc2_primary_address,
-secondary_address : pc2_secondary_address,
-serial_poll_response : pc2_serial_poll_response,
-serial_poll_status : pc2_serial_poll_status,
-t1_delay : pc2_t1_delay,
-return_to_local : pc2_return_to_local,
+static gpib_interface_t pc2_interface = {
+ .name = "pcII",
+ .attach = pc2_attach,
+ .detach = pc2_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
};
-gpib_interface_t pc2a_interface = {
-name: "pcIIa",
-attach : pc2a_attach,
-detach : pc2a_detach,
-read : pc2_read,
-write : pc2_write,
-command : pc2_command,
-take_control : pc2_take_control,
-go_to_standby : pc2_go_to_standby,
-request_system_control : pc2_request_system_control,
-interface_clear : pc2_interface_clear,
-remote_enable : pc2_remote_enable,
-enable_eos : pc2_enable_eos,
-disable_eos : pc2_disable_eos,
-parallel_poll : pc2_parallel_poll,
-parallel_poll_configure : pc2_parallel_poll_configure,
-parallel_poll_response : pc2_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : pc2_update_status,
-primary_address : pc2_primary_address,
-secondary_address : pc2_secondary_address,
-serial_poll_response : pc2_serial_poll_response,
-serial_poll_status : pc2_serial_poll_status,
-t1_delay : pc2_t1_delay,
-return_to_local : pc2_return_to_local,
+static gpib_interface_t pc2a_interface = {
+ .name = "pcIIa",
+ .attach = pc2a_attach,
+ .detach = pc2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
};
-gpib_interface_t pc2a_cb7210_interface = {
-name: "pcIIa_cb7210",
-attach : pc2a_cb7210_attach,
-detach : pc2a_detach,
-read : pc2_read,
-write : pc2_write,
-command : pc2_command,
-take_control : pc2_take_control,
-go_to_standby : pc2_go_to_standby,
-request_system_control : pc2_request_system_control,
-interface_clear : pc2_interface_clear,
-remote_enable : pc2_remote_enable,
-enable_eos : pc2_enable_eos,
-disable_eos : pc2_disable_eos,
-parallel_poll : pc2_parallel_poll,
-parallel_poll_configure : pc2_parallel_poll_configure,
-parallel_poll_response : pc2_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL, //XXX
-update_status : pc2_update_status,
-primary_address : pc2_primary_address,
-secondary_address : pc2_secondary_address,
-serial_poll_response : pc2_serial_poll_response,
-serial_poll_status : pc2_serial_poll_status,
-t1_delay : pc2_t1_delay,
-return_to_local : pc2_return_to_local,
+static gpib_interface_t pc2a_cb7210_interface = {
+ .name = "pcIIa_cb7210",
+ .attach = pc2a_cb7210_attach,
+ .detach = pc2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL, //XXX
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
};
-gpib_interface_t pc2_2a_interface = {
-name: "pcII_IIa",
-attach : pc2_2a_attach,
-detach : pc2_2a_detach,
-read : pc2_read,
-write : pc2_write,
-command : pc2_command,
-take_control : pc2_take_control,
-go_to_standby : pc2_go_to_standby,
-request_system_control : pc2_request_system_control,
-interface_clear : pc2_interface_clear,
-remote_enable : pc2_remote_enable,
-enable_eos : pc2_enable_eos,
-disable_eos : pc2_disable_eos,
-parallel_poll : pc2_parallel_poll,
-parallel_poll_configure : pc2_parallel_poll_configure,
-parallel_poll_response : pc2_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : pc2_update_status,
-primary_address : pc2_primary_address,
-secondary_address : pc2_secondary_address,
-serial_poll_response : pc2_serial_poll_response,
-serial_poll_status : pc2_serial_poll_status,
-t1_delay : pc2_t1_delay,
-return_to_local : pc2_return_to_local,
+static gpib_interface_t pc2_2a_interface = {
+ .name = "pcII_IIa",
+ .attach = pc2_2a_attach,
+ .detach = pc2_2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
};
static int allocate_private(gpib_board_t *board)
@@ -426,7 +426,7 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv = &pc2_priv->nec7210_priv;
nec_priv->offset = pc2_reg_offset;
- if (request_region((unsigned long)config->ibbase, pc2_iosize, "pc2") == 0) {
+ if (!request_region(config->ibbase, pc2_iosize, "pc2")) {
pr_err("gpib: ioports are already in use\n");
return -1;
}
@@ -471,7 +471,7 @@ void pc2_detach(gpib_board_t *board)
free_irq(pc2_priv->irq, board);
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
- release_region((unsigned long)(nec_priv->iobase), pc2_iosize);
+ release_region(nec_priv->iobase, pc2_iosize);
}
if (nec_priv->dma_buffer) {
dma_free_coherent(board->dev, nec_priv->dma_buffer_length,
@@ -498,14 +498,14 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
nec_priv = &pc2_priv->nec7210_priv;
nec_priv->offset = pc2a_reg_offset;
- switch ((unsigned long)(config->ibbase)) {
+ switch (config->ibbase) {
case 0x02e1:
case 0x22e1:
case 0x42e1:
case 0x62e1:
break;
default:
- pr_err("PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%p\n",
+ pr_err("PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%d\n",
config->ibbase);
return -1;
}
@@ -522,7 +522,7 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
unsigned int err = 0;
for (i = 0; i < num_registers; i++) {
- if (check_region((unsigned long)config->ibbase + i * pc2a_reg_offset, 1))
+ if (check_region(config->ibbase + i * pc2a_reg_offset, 1))
err++;
}
if (config->ibirq && check_region(pc2a_clear_intr_iobase + config->ibirq, 1))
@@ -533,11 +533,11 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
}
#endif
for (i = 0; i < num_registers; i++) {
- if (!request_region((unsigned long)config->ibbase +
+ if (!request_region(config->ibbase +
i * pc2a_reg_offset, 1, "pc2a")) {
pr_err("gpib: ioports are already in use");
for (j = 0; j < i; j++)
- release_region((unsigned long)(config->ibbase) +
+ release_region(config->ibbase +
j * pc2a_reg_offset, 1);
return -1;
}
@@ -608,7 +608,7 @@ static void pc2a_common_detach(gpib_board_t *board, unsigned int num_registers)
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
for (i = 0; i < num_registers; i++)
- release_region((unsigned long)nec_priv->iobase +
+ release_region(nec_priv->iobase +
i * pc2a_reg_offset, 1);
}
if (pc2_priv->clear_intr_addr)
@@ -635,12 +635,42 @@ void pc2_2a_detach(gpib_board_t *board)
static int __init pc2_init_module(void)
{
- gpib_register_driver(&pc2_interface, THIS_MODULE);
- gpib_register_driver(&pc2a_interface, THIS_MODULE);
- gpib_register_driver(&pc2a_cb7210_interface, THIS_MODULE);
- gpib_register_driver(&pc2_2a_interface, THIS_MODULE);
+ int ret;
+
+ ret = gpib_register_driver(&pc2_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&pc2a_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pc2a;
+ }
+
+ ret = gpib_register_driver(&pc2a_cb7210_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_cb7210;
+ }
+
+ ret = gpib_register_driver(&pc2_2a_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pc2_2a;
+ }
return 0;
+
+err_pc2_2a:
+ gpib_unregister_driver(&pc2a_cb7210_interface);
+err_cb7210:
+ gpib_unregister_driver(&pc2a_interface);
+err_pc2a:
+ gpib_unregister_driver(&pc2_interface);
+
+ return ret;
}
static void __exit pc2_exit_module(void)
diff --git a/drivers/staging/gpib/tms9914/Makefile b/drivers/staging/gpib/tms9914/Makefile
index 81b7e3cf104c..4705ab07f413 100644
--- a/drivers/staging/gpib/tms9914/Makefile
+++ b/drivers/staging/gpib/tms9914/Makefile
@@ -1,5 +1,5 @@
-obj-m += tms9914.o
+obj-$(CONFIG_GPIB_TMS9914) += tms9914.o
diff --git a/drivers/staging/gpib/tms9914/tms9914.c b/drivers/staging/gpib/tms9914/tms9914.c
index 152b243b845b..ec8e1d4d762f 100644
--- a/drivers/staging/gpib/tms9914/tms9914.c
+++ b/drivers/staging/gpib/tms9914/tms9914.c
@@ -866,14 +866,14 @@ EXPORT_SYMBOL_GPL(tms9914_online);
// wrapper for inb
uint8_t tms9914_ioport_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
- return inb((unsigned long)(priv->iobase) + register_num * priv->offset);
+ return inb(priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL_GPL(tms9914_ioport_read_byte);
// wrapper for outb
void tms9914_ioport_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
{
- outb(data, (unsigned long)(priv->iobase) + register_num * priv->offset);
+ outb(data, priv->iobase + register_num * priv->offset);
if (register_num == AUXCR)
udelay(1);
}
@@ -883,14 +883,14 @@ EXPORT_SYMBOL_GPL(tms9914_ioport_write_byte);
// wrapper for readb
uint8_t tms9914_iomem_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
- return readb(priv->iobase + register_num * priv->offset);
+ return readb(priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL_GPL(tms9914_iomem_read_byte);
// wrapper for writeb
void tms9914_iomem_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
{
- writeb(data, priv->iobase + register_num * priv->offset);
+ writeb(data, priv->mmiobase + register_num * priv->offset);
if (register_num == AUXCR)
udelay(1);
}
diff --git a/drivers/staging/gpib/tnt4882/Makefile b/drivers/staging/gpib/tnt4882/Makefile
index f767c990db7a..a3c3fb96d5ed 100644
--- a/drivers/staging/gpib/tnt4882/Makefile
+++ b/drivers/staging/gpib/tnt4882/Makefile
@@ -1,5 +1,5 @@
ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
-obj-m += tnt4882.o
+obj-$(CONFIG_GPIB_NI_PCI_ISA) += tnt4882.o
tnt4882-objs := tnt4882_gpib.o mite.o
diff --git a/drivers/staging/gpib/tnt4882/mite.c b/drivers/staging/gpib/tnt4882/mite.c
index 0edf34d243e9..ea64dde46bcb 100644
--- a/drivers/staging/gpib/tnt4882/mite.c
+++ b/drivers/staging/gpib/tnt4882/mite.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Hardware driver for NI Mite PCI interface chip,
@@ -148,72 +148,3 @@ void mite_list_devices(void)
}
pr_info("\n");
}
-
-int mite_bytes_transferred(struct mite_struct *mite, int chan)
-{
- int dar, fcr;
-
- dar = readl(mite->mite_io_addr + MITE_DAR + CHAN_OFFSET(chan));
- fcr = readl(mite->mite_io_addr + MITE_FCR + CHAN_OFFSET(chan)) & 0x000000FF;
- return dar - fcr;
-}
-
-int mite_dma_tcr(struct mite_struct *mite)
-{
- int tcr;
- int lkar;
-
- lkar = readl(mite->mite_io_addr + CHAN_OFFSET(0) + MITE_LKAR);
- tcr = readl(mite->mite_io_addr + CHAN_OFFSET(0) + MITE_TCR);
- MDPRINTK("lkar=0x%08x tcr=%d\n", lkar, tcr);
-
- return tcr;
-}
-
-void mite_dma_disarm(struct mite_struct *mite)
-{
- int chor;
-
- /* disarm */
- chor = CHOR_ABORT;
- writel(chor, mite->mite_io_addr + CHAN_OFFSET(0) + MITE_CHOR);
-}
-
-void mite_dump_regs(struct mite_struct *mite)
-{
- void *addr = 0;
- unsigned long temp = 0;
-
- pr_info("mite address is =0x%p\n", mite->mite_io_addr);
-
- addr = mite->mite_io_addr + MITE_CHOR + CHAN_OFFSET(0);
- pr_info("mite status[CHOR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CHOR_strings,temp);
- addr = mite->mite_io_addr + MITE_CHCR + CHAN_OFFSET(0);
- pr_info("mite status[CHCR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CHCR_strings,temp);
- addr = mite->mite_io_addr + MITE_TCR + CHAN_OFFSET(0);
- pr_info("mite status[TCR] at 0x%p =0x%08x\n", addr, readl(addr));
- addr = mite->mite_io_addr + MITE_MCR + CHAN_OFFSET(0);
- pr_info("mite status[MCR] at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_MCR_strings,temp);
- addr = mite->mite_io_addr + MITE_MAR + CHAN_OFFSET(0);
- pr_info("mite status[MAR] at 0x%p =0x%08x\n", addr, readl(addr));
- addr = mite->mite_io_addr + MITE_DCR + CHAN_OFFSET(0);
- pr_info("mite status[DCR] at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CR_strings,temp);
- addr = mite->mite_io_addr + MITE_DAR + CHAN_OFFSET(0);
- pr_info("mite status[DAR] at 0x%p =0x%08x\n", addr, readl(addr));
- addr = mite->mite_io_addr + MITE_LKCR + CHAN_OFFSET(0);
- pr_info("mite status[LKCR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CR_strings,temp);
- addr = mite->mite_io_addr + MITE_LKAR + CHAN_OFFSET(0);
- pr_info("mite status[LKAR]at 0x%p =0x%08x\n", addr, readl(addr));
-
- addr = mite->mite_io_addr + MITE_CHSR + CHAN_OFFSET(0);
- pr_info("mite status[CHSR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CHSR_strings,temp);
- addr = mite->mite_io_addr + MITE_FCR + CHAN_OFFSET(0);
- pr_info("mite status[FCR] at 0x%p =0x%08x\n\n", addr, readl(addr));
-}
-
diff --git a/drivers/staging/gpib/tnt4882/mite.h b/drivers/staging/gpib/tnt4882/mite.h
index 6454d069b8cc..522d6b56cb7d 100644
--- a/drivers/staging/gpib/tnt4882/mite.h
+++ b/drivers/staging/gpib/tnt4882/mite.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hardware driver for NI Mite PCI interface chip
@@ -34,9 +34,9 @@ struct mite_struct {
struct pci_dev *pcidev;
unsigned long mite_phys_addr;
- void *mite_io_addr;
+ void __iomem *mite_io_addr;
unsigned long daq_phys_addr;
- void *daq_io_addr;
+ void __iomem *daq_io_addr;
int DMA_CheckNearEnd;
@@ -61,15 +61,6 @@ int mite_setup(struct mite_struct *mite);
void mite_unsetup(struct mite_struct *mite);
void mite_list_devices(void);
-int mite_dma_tcr(struct mite_struct *mite);
-
-void mite_dma_arm(struct mite_struct *mite);
-void mite_dma_disarm(struct mite_struct *mite);
-
-void mite_dump_regs(struct mite_struct *mite);
-void mite_setregs(struct mite_struct *mite, unsigned long ll_start, int chan, int dir);
-int mite_bytes_transferred(struct mite_struct *mite, int chan);
-
#define CHAN_OFFSET(x) (0x100 * (x))
/* DMA base for chan 0 is 0x500, chan 1 is 0x600 */
diff --git a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
index e49a952fa0d8..b39ab2abe495 100644
--- a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
+++ b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
@@ -45,10 +45,6 @@ struct tnt4882_priv {
unsigned short imr0_bits;
unsigned short imr3_bits;
unsigned short auxg_bits; // bits written to auxiliary register G
- void (*io_writeb)(unsigned int value, void *address);
- void (*io_writew)(unsigned int value, void *address);
- unsigned int (*io_readb)(void *address);
- unsigned int (*io_readw)(void *address);
};
// interface functions
@@ -104,23 +100,23 @@ static const int atgpib_iosize = 32;
/* paged io */
static inline unsigned int tnt_paged_readb(struct tnt4882_priv *priv, unsigned long offset)
{
- priv->io_writeb(AUX_PAGEIN, priv->nec7210_priv.iobase + AUXMR * priv->nec7210_priv.offset);
+ iowrite8(AUX_PAGEIN, priv->nec7210_priv.mmiobase + AUXMR * priv->nec7210_priv.offset);
udelay(1);
- return priv->io_readb(priv->nec7210_priv.iobase + offset);
+ return ioread8(priv->nec7210_priv.mmiobase + offset);
}
static inline void tnt_paged_writeb(struct tnt4882_priv *priv, unsigned int value,
unsigned long offset)
{
- priv->io_writeb(AUX_PAGEIN, priv->nec7210_priv.iobase + AUXMR * priv->nec7210_priv.offset);
+ iowrite8(AUX_PAGEIN, priv->nec7210_priv.mmiobase + AUXMR * priv->nec7210_priv.offset);
udelay(1);
- priv->io_writeb(value, priv->nec7210_priv.iobase + offset);
+ iowrite8(value, priv->nec7210_priv.mmiobase + offset);
}
/* readb/writeb wrappers */
static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long offset)
{
- void *address = priv->nec7210_priv.iobase + offset;
+ void __iomem *address = priv->nec7210_priv.mmiobase + offset;
unsigned long flags;
unsigned short retval;
spinlock_t *register_lock = &priv->nec7210_priv.register_page_lock;
@@ -134,7 +130,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
switch (priv->nec7210_priv.type) {
case TNT4882:
case TNT5004:
- retval = priv->io_readb(address);
+ retval = ioread8(address);
break;
case NAT4882:
retval = tnt_paged_readb(priv, offset - tnt_pagein_offset);
@@ -149,7 +145,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
}
break;
default:
- retval = priv->io_readb(address);
+ retval = ioread8(address);
break;
}
spin_unlock_irqrestore(register_lock, flags);
@@ -158,7 +154,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, unsigned long offset)
{
- void *address = priv->nec7210_priv.iobase + offset;
+ void __iomem *address = priv->nec7210_priv.mmiobase + offset;
unsigned long flags;
spinlock_t *register_lock = &priv->nec7210_priv.register_page_lock;
@@ -170,7 +166,7 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
switch (priv->nec7210_priv.type) {
case TNT4882:
case TNT5004:
- priv->io_writeb(value, address);
+ iowrite8(value, address);
break;
case NAT4882:
tnt_paged_writeb(priv, value, offset - tnt_pagein_offset);
@@ -183,7 +179,7 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
}
break;
default:
- priv->io_writeb(value, address);
+ iowrite8(value, address);
break;
}
spin_unlock_irqrestore(register_lock, flags);
@@ -288,7 +284,7 @@ static int drain_fifo_words(struct tnt4882_priv *tnt_priv, uint8_t *buffer, int
while (fifo_word_available(tnt_priv) && count + 2 <= num_bytes) {
short word;
- word = tnt_priv->io_readw(nec_priv->iobase + FIFOB);
+ word = ioread16(nec_priv->mmiobase + FIFOB);
buffer[count++] = word & 0xff;
buffer[count++] = (word >> 8) & 0xff;
}
@@ -573,7 +569,7 @@ static int generic_write(gpib_board_t *board, uint8_t *buffer, size_t length,
word = buffer[count++] & 0xff;
if (count < length)
word |= (buffer[count++] << 8) & 0xff00;
- tnt_priv->io_writew(word, nec_priv->iobase + FIFOB);
+ iowrite16(word, nec_priv->mmiobase + FIFOB);
}
// avoid unnecessary HR_NFF interrupts
// tnt_priv->imr3_bits |= HR_NFF;
@@ -894,285 +890,285 @@ void tnt4882_return_to_local(gpib_board_t *board)
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-gpib_interface_t ni_pci_interface = {
-name: "ni_pci",
-attach : ni_pci_attach,
-detach : ni_pci_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_pci_interface = {
+ .name = "ni_pci",
+ .attach = ni_pci_attach,
+ .detach = ni_pci_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_pci_accel_interface = {
-name: "ni_pci_accel",
-attach : ni_pci_attach,
-detach : ni_pci_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_pci_accel_interface = {
+ .name = "ni_pci_accel",
+ .attach = ni_pci_attach,
+ .detach = ni_pci_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_isa_interface = {
-name: "ni_isa",
-attach : ni_tnt_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_isa_interface = {
+ .name = "ni_isa",
+ .attach = ni_tnt_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_nat4882_isa_interface = {
-name: "ni_nat4882_isa",
-attach : ni_nat4882_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_read,
-write : tnt4882_write,
-command : tnt4882_command_unaccel,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_nat4882_isa_interface = {
+ .name = "ni_nat4882_isa",
+ .attach = ni_nat4882_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_read,
+ .write = tnt4882_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_nec_isa_interface = {
-name: "ni_nec_isa",
-attach : ni_nec_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_read,
-write : tnt4882_write,
-command : tnt4882_command_unaccel,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response : tnt4882_serial_poll_response,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_nec_isa_interface = {
+ .name = "ni_nec_isa",
+ .attach = ni_nec_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_read,
+ .write = tnt4882_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_isa_accel_interface = {
-name: "ni_isa_accel",
-attach : ni_tnt_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_isa_accel_interface = {
+ .name = "ni_isa_accel",
+ .attach = ni_tnt_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_nat4882_isa_accel_interface = {
-name: "ni_nat4882_isa_accel",
-attach : ni_nat4882_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command_unaccel,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_nat4882_isa_accel_interface = {
+ .name = "ni_nat4882_isa_accel",
+ .attach = ni_nat4882_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_nec_isa_accel_interface = {
-name: "ni_nec_isa_accel",
-attach : ni_nec_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command_unaccel,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response : tnt4882_serial_poll_response,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_nec_isa_accel_interface = {
+ .name = "ni_nec_isa_accel",
+ .attach = ni_nec_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
#ifdef GPIB_PCMCIA
-gpib_interface_t ni_pcmcia_interface = {
-name: "ni_pcmcia",
-attach : ni_pcmcia_attach,
-detach : ni_pcmcia_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response : tnt4882_serial_poll_response,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_pcmcia_interface = {
+ .name = "ni_pcmcia",
+ .attach = ni_pcmcia_attach,
+ .detach = ni_pcmcia_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_pcmcia_accel_interface = {
-name: "ni_pcmcia_accel",
-attach : ni_pcmcia_attach,
-detach : ni_pcmcia_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response : tnt4882_serial_poll_response,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_pcmcia_accel_interface = {
+ .name = "ni_pcmcia_accel",
+ .attach = ni_pcmcia_attach,
+ .detach = ni_pcmcia_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
#endif
@@ -1269,10 +1265,6 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (tnt4882_allocate_private(board))
return -ENOMEM;
tnt_priv = board->private_data;
- tnt_priv->io_writeb = writeb_wrapper;
- tnt_priv->io_readb = readb_wrapper;
- tnt_priv->io_writew = writew_wrapper;
- tnt_priv->io_readw = readw_wrapper;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = TNT4882;
nec_priv->read_byte = nec7210_locking_iomem_read_byte;
@@ -1324,7 +1316,7 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
}
- nec_priv->iobase = tnt_priv->mite->daq_io_addr;
+ nec_priv->mmiobase = tnt_priv->mite->daq_io_addr;
// get irq
if (request_irq(mite_irq(tnt_priv->mite), tnt4882_interrupt, isr_flags,
@@ -1359,7 +1351,7 @@ void ni_pci_detach(gpib_board_t *board)
if (tnt_priv) {
nec_priv = &tnt_priv->nec7210_priv;
- if (nec_priv->iobase)
+ if (nec_priv->mmiobase)
tnt4882_board_reset(tnt_priv, board);
if (tnt_priv->irq)
free_irq(tnt_priv->irq, board);
@@ -1400,7 +1392,7 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
struct tnt4882_priv *tnt_priv;
struct nec7210_priv *nec_priv;
int isr_flags = 0;
- void *iobase;
+ u32 iobase;
int irq;
board->status = 0;
@@ -1408,10 +1400,6 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
if (tnt4882_allocate_private(board))
return -ENOMEM;
tnt_priv = board->private_data;
- tnt_priv->io_writeb = outb_wrapper;
- tnt_priv->io_readb = inb_wrapper;
- tnt_priv->io_writew = outw_wrapper;
- tnt_priv->io_readw = inw_wrapper;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = chipset;
nec_priv->read_byte = nec7210_locking_ioport_read_byte;
@@ -1427,18 +1415,20 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
if (retval < 0)
return retval;
tnt_priv->pnp_dev = dev;
- iobase = (void *)(pnp_port_start(dev, 0));
+ iobase = pnp_port_start(dev, 0);
irq = pnp_irq(dev, 0);
} else {
iobase = config->ibbase;
irq = config->ibirq;
}
// allocate ioports
- if (!request_region((unsigned long)(iobase), atgpib_iosize, "atgpib")) {
+ if (!request_region(iobase, atgpib_iosize, "atgpib")) {
pr_err("tnt4882: failed to allocate ioports\n");
return -1;
}
- nec_priv->iobase = iobase;
+ nec_priv->mmiobase = ioport_map(iobase, atgpib_iosize);
+ if (!nec_priv->mmiobase)
+ return -1;
// get irq
if (request_irq(irq, tnt4882_interrupt, isr_flags, "atgpib", board)) {
@@ -1478,8 +1468,10 @@ void ni_isa_detach(gpib_board_t *board)
tnt4882_board_reset(tnt_priv, board);
if (tnt_priv->irq)
free_irq(tnt_priv->irq, board);
+ if (nec_priv->mmiobase)
+ ioport_unmap(nec_priv->mmiobase);
if (nec_priv->iobase)
- release_region((unsigned long)(nec_priv->iobase), atgpib_iosize);
+ release_region(nec_priv->iobase, atgpib_iosize);
if (tnt_priv->pnp_dev)
pnp_device_detach(tnt_priv->pnp_dev);
}
@@ -1524,29 +1516,109 @@ static int __init tnt4882_init_module(void)
result = pci_register_driver(&tnt4882_pci_driver);
if (result) {
- pr_err("tnt4882: pci_driver_register failed!\n");
+ pr_err("tnt4882_gpib: pci_register_driver failed: error = %d\n", result);
return result;
}
- gpib_register_driver(&ni_isa_interface, THIS_MODULE);
- gpib_register_driver(&ni_isa_accel_interface, THIS_MODULE);
- gpib_register_driver(&ni_nat4882_isa_interface, THIS_MODULE);
- gpib_register_driver(&ni_nat4882_isa_accel_interface, THIS_MODULE);
- gpib_register_driver(&ni_nec_isa_interface, THIS_MODULE);
- gpib_register_driver(&ni_nec_isa_accel_interface, THIS_MODULE);
- gpib_register_driver(&ni_pci_interface, THIS_MODULE);
- gpib_register_driver(&ni_pci_accel_interface, THIS_MODULE);
+ result = gpib_register_driver(&ni_isa_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_isa;
+ }
+
+ result = gpib_register_driver(&ni_isa_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_isa_accel;
+ }
+
+ result = gpib_register_driver(&ni_nat4882_isa_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_nat4882_isa;
+ }
+
+ result = gpib_register_driver(&ni_nat4882_isa_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_nat4882_isa_accel;
+ }
+
+ result = gpib_register_driver(&ni_nec_isa_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_nec_isa;
+ }
+
+ result = gpib_register_driver(&ni_nec_isa_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_nec_isa_accel;
+ }
+
+ result = gpib_register_driver(&ni_pci_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pci;
+ }
+
+ result = gpib_register_driver(&ni_pci_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pci_accel;
+ }
+
#ifdef GPIB_PCMCIA
- gpib_register_driver(&ni_pcmcia_interface, THIS_MODULE);
- gpib_register_driver(&ni_pcmcia_accel_interface, THIS_MODULE);
- if (init_ni_gpib_cs() < 0)
- return -1;
+ result = gpib_register_driver(&ni_pcmcia_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pcmcia;
+ }
+
+ result = gpib_register_driver(&ni_pcmcia_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pcmcia_accel;
+ }
+
+ result = init_ni_gpib_cs();
+ if (result) {
+ pr_err("tnt4882_gpib: pcmcia_register_driver failed: error = %d\n", result);
+ goto err_pcmcia_driver;
+ }
#endif
mite_init();
mite_list_devices();
return 0;
+
+#ifdef GPIB_PCMCIA
+err_pcmcia_driver:
+ gpib_unregister_driver(&ni_pcmcia_accel_interface);
+err_pcmcia_accel:
+ gpib_unregister_driver(&ni_pcmcia_interface);
+err_pcmcia:
+#endif
+ gpib_unregister_driver(&ni_pci_accel_interface);
+err_pci_accel:
+ gpib_unregister_driver(&ni_pci_interface);
+err_pci:
+ gpib_unregister_driver(&ni_nec_isa_accel_interface);
+err_nec_isa_accel:
+ gpib_unregister_driver(&ni_nec_isa_interface);
+err_nec_isa:
+ gpib_unregister_driver(&ni_nat4882_isa_accel_interface);
+err_nat4882_isa_accel:
+ gpib_unregister_driver(&ni_nat4882_isa_interface);
+err_nat4882_isa:
+ gpib_unregister_driver(&ni_isa_accel_interface);
+err_isa_accel:
+ gpib_unregister_driver(&ni_isa_interface);
+err_isa:
+ pci_unregister_driver(&tnt4882_pci_driver);
+
+ return result;
}
static void __exit tnt4882_exit_module(void)
@@ -1817,10 +1889,6 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (tnt4882_allocate_private(board))
return -ENOMEM;
tnt_priv = board->private_data;
- tnt_priv->io_writeb = outb_wrapper;
- tnt_priv->io_readb = inb_wrapper;
- tnt_priv->io_writew = outw_wrapper;
- tnt_priv->io_readw = inw_wrapper;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = TNT4882;
nec_priv->read_byte = nec7210_locking_ioport_read_byte;
@@ -1828,14 +1896,17 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv->offset = atgpib_reg_offset;
DEBUG(0, "ioport1 window attributes: 0x%lx\n", curr_dev->resource[0]->flags);
- if (request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
- "tnt4882") == 0) {
+ if (!request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
+ "tnt4882")) {
pr_err("gpib: ioports starting at 0x%lx are already in use\n",
(unsigned long)curr_dev->resource[0]->start);
return -EIO;
}
- nec_priv->iobase = (void *)(unsigned long)curr_dev->resource[0]->start;
+ nec_priv->mmiobase = ioport_map(curr_dev->resource[0]->start,
+ resource_size(curr_dev->resource[0]));
+ if (!nec_priv->mmiobase)
+ return -1;
// get irq
if (request_irq(curr_dev->irq, tnt4882_interrupt, isr_flags, "tnt4882", board)) {
@@ -1860,9 +1931,11 @@ void ni_pcmcia_detach(gpib_board_t *board)
nec_priv = &tnt_priv->nec7210_priv;
if (tnt_priv->irq)
free_irq(tnt_priv->irq, board);
+ if (nec_priv->mmiobase)
+ ioport_unmap(nec_priv->mmiobase);
if (nec_priv->iobase) {
tnt4882_board_reset(tnt_priv, board);
- release_region((unsigned long)nec_priv->iobase, pcmcia_gpib_iosize);
+ release_region(nec_priv->iobase, pcmcia_gpib_iosize);
}
}
tnt4882_free_private(board);
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index ca71023df447..5d80ace41d8e 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -1128,18 +1128,7 @@ done:
static int gb_camera_debugfs_open(struct inode *inode, struct file *file)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
- const struct gb_camera_debugfs_entry *entry =
- &gb_camera_debugfs_entries[i];
-
- if (!strcmp(file->f_path.dentry->d_iname, entry->name)) {
- file->private_data = (void *)entry;
- break;
- }
- }
-
+ file->private_data = (void *)debugfs_get_aux(file);
return 0;
}
@@ -1175,8 +1164,8 @@ static int gb_camera_debugfs_init(struct gb_camera *gcam)
gcam->debugfs.buffers[i].length = 0;
- debugfs_create_file(entry->name, entry->mask,
- gcam->debugfs.root, gcam,
+ debugfs_create_file_aux(entry->name, entry->mask,
+ gcam->debugfs.root, gcam, entry,
&gb_camera_debugfs_ops);
}
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 492612e8f8ba..140ee4f9c137 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -158,7 +158,7 @@ static int ad9832_write_frequency(struct ad9832_state *st,
static int ad9832_write_phase(struct ad9832_state *st,
unsigned long addr, unsigned long phase)
{
- if (phase > BIT(AD9832_PHASE_BITS))
+ if (phase >= BIT(AD9832_PHASE_BITS))
return -EINVAL;
st->phase_data[0] = cpu_to_be16((AD9832_CMD_PHA8BITSW << CMD_SHIFT) |
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 47e7d7e6d920..6e99e008c5f4 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -131,7 +131,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
static int ad9834_write_phase(struct ad9834_state *st,
unsigned long addr, unsigned long phase)
{
- if (phase > BIT(AD9834_PHASE_BITS))
+ if (phase >= BIT(AD9834_PHASE_BITS))
return -EINVAL;
st->data = cpu_to_be16(addr | phase);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index 049246774ced..6146555fe9cf 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -172,10 +172,10 @@ void atomisp_unregister_subdev(struct v4l2_subdev *subdev);
#define IS_BYT __IS_SOC(INTEL_ATOM_SILVERMONT)
#define IS_CHT __IS_SOC(INTEL_ATOM_AIRMONT)
#define IS_MRFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID)
-#define IS_MOFD __IS_SOC(INTEL_ATOM_AIRMONT_MID)
+#define IS_MOFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID2)
/* Both CHT and MOFD come with ISP2401 */
#define IS_ISP2401 __IS_SOCS(INTEL_ATOM_AIRMONT, \
- INTEL_ATOM_AIRMONT_MID)
+ INTEL_ATOM_SILVERMONT_MID2)
#endif /* ATOMISP_PLATFORM_H_ */
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
index 07ed33464d71..224ca8d42721 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -624,10 +624,10 @@ static int alloc_private_pages(struct hmm_buffer_object *bo)
const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
int ret;
- ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages);
+ ret = alloc_pages_bulk(gfp, bo->pgnr, bo->pages);
if (ret != bo->pgnr) {
free_pages_bulk_array(ret, bo->pages);
- dev_err(atomisp_dev, "alloc_pages_bulk_array() failed\n");
+ dev_err(atomisp_dev, "alloc_pages_bulk() failed\n");
return -ENOMEM;
}
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index 118bff988bc7..bb28daa4d713 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -54,22 +54,18 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
break;
ret = imx_media_of_add_csi(imxmd, csi_np);
+ of_node_put(csi_np);
if (ret) {
/* unavailable or already added is not an error */
if (ret == -ENODEV || ret == -EEXIST) {
- of_node_put(csi_np);
continue;
}
/* other error, can't continue */
- goto err_out;
+ return ret;
}
}
return 0;
-
-err_out:
- of_node_put(csi_np);
- return ret;
}
EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index 4aa2797f5e3c..8b85524beb59 100644
--- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -322,7 +322,8 @@ struct ipu3_uapi_ae_config {
* 0: positive, 1: negative, default 0.
* @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be
* used for building histogram. Range [0, 32], default 8.
- * Rule:
+ *
+ * Rule:
* y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32
* A single Y is calculated based on sum of Gr/R/B/Gb based on
* their contribution ratio.
diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
index ede02e8c891c..0751b2e04895 100644
--- a/drivers/staging/media/max96712/max96712.c
+++ b/drivers/staging/media/max96712/max96712.c
@@ -418,7 +418,6 @@ static int max96712_probe(struct i2c_client *client)
priv->info = of_device_get_match_data(&client->dev);
priv->client = client;
- i2c_set_clientdata(client, priv);
priv->regmap = devm_regmap_init_i2c(client, &max96712_i2c_regmap);
if (IS_ERR(priv->regmap))
@@ -448,7 +447,8 @@ static int max96712_probe(struct i2c_client *client)
static void max96712_remove(struct i2c_client *client)
{
- struct max96712_priv *priv = i2c_get_clientdata(client);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct max96712_priv *priv = container_of(sd, struct max96712_priv, sd);
v4l2_async_unregister_subdev(&priv->sd);
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index ba200ee669f3..8560b84a3146 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -48,7 +48,6 @@ r8723bs-y = \
hal/HalHWImg8723B_RF.o \
hal/HalPhyRf_8723B.o \
os_dep/ioctl_cfg80211.o \
- os_dep/ioctl_linux.o \
os_dep/mlme_linux.o \
os_dep/osdep_service.o \
os_dep/os_intfs.o \
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
index fcda9db6ebb5..79d543d88278 100644
--- a/drivers/staging/rtl8723bs/core/rtw_io.c
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -141,12 +141,12 @@ int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapt
*/
int rtw_inc_and_chk_continual_io_error(struct dvobj_priv *dvobj)
{
- int ret = false;
- int value = atomic_inc_return(&dvobj->continual_io_error);
- if (value > MAX_CONTINUAL_IO_ERR)
- ret = true;
+ int error_count = atomic_inc_return(&dvobj->continual_io_error);
- return ret;
+ if (error_count > MAX_CONTINUAL_IO_ERR)
+ return true;
+
+ return false;
}
/*
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 317f3db19397..952ce6dd5af9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -4959,7 +4959,6 @@ void _linked_info_dump(struct adapter *padapter)
rtw_hal_get_def_var(padapter, HW_DEF_RA_INFO_DUMP, &i);
}
}
- rtw_hal_set_def_var(padapter, HAL_DEF_DBG_RX_INFO_DUMP, NULL);
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 699cff7b0ac9..297c93d65315 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -1467,7 +1467,8 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
pxmitbuf->priv_data = NULL;
pxmitbuf->len = 0;
- pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+ pxmitbuf->pdata = pxmitbuf->phead;
+ pxmitbuf->ptail = pxmitbuf->phead;
pxmitbuf->agg_num = 1;
if (pxmitbuf->sctx)
@@ -1526,7 +1527,8 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
pxmitbuf->priv_data = NULL;
pxmitbuf->len = 0;
- pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+ pxmitbuf->pdata = pxmitbuf->phead;
+ pxmitbuf->ptail = pxmitbuf->phead;
pxmitbuf->agg_num = 0;
pxmitbuf->pg_num = 0;
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 95fb38283c58..b41ec89932af 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -682,14 +682,6 @@ u8 SetHalDefVar(
u8 bResult = _SUCCESS;
switch (variable) {
- case HAL_DEF_DBG_RX_INFO_DUMP:
-
- if (odm->bLinked) {
- #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
- rtw_dump_raw_rssi_info(adapter);
- #endif
- }
- break;
case HW_DEF_ODM_DBG_FLAG:
ODM_CmnInfoUpdate(odm, ODM_CMNINFO_DBG_COMP, *((u64 *)value));
break;
@@ -879,53 +871,6 @@ void rtw_hal_check_rxfifo_full(struct adapter *adapter)
}
}
-#ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
-void rtw_dump_raw_rssi_info(struct adapter *padapter)
-{
- u8 isCCKrate, rf_path;
- struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
- struct rx_raw_rssi *psample_pkt_rssi = &padapter->recvpriv.raw_rssi_info;
-
- isCCKrate = psample_pkt_rssi->data_rate <= DESC_RATE11M;
-
- if (isCCKrate)
- psample_pkt_rssi->mimo_signal_strength[0] = psample_pkt_rssi->pwdball;
-
- for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
- if (!isCCKrate) {
- netdev_dbg(padapter->pnetdev, ", rx_ofdm_pwr:%d(dBm), rx_ofdm_snr:%d(dB)\n",
- psample_pkt_rssi->ofdm_pwr[rf_path],
- psample_pkt_rssi->ofdm_snr[rf_path]);
- }
- }
-}
-
-void rtw_store_phy_info(struct adapter *padapter, union recv_frame *prframe)
-{
- u8 isCCKrate, rf_path;
- struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
-
- struct odm_phy_info *pPhyInfo = (PODM_PHY_INFO_T)(&pattrib->phy_info);
- struct rx_raw_rssi *psample_pkt_rssi = &padapter->recvpriv.raw_rssi_info;
-
- psample_pkt_rssi->data_rate = pattrib->data_rate;
- isCCKrate = pattrib->data_rate <= DESC_RATE11M;
-
- psample_pkt_rssi->pwdball = pPhyInfo->rx_pwd_ba11;
- psample_pkt_rssi->pwr_all = pPhyInfo->recv_signal_power;
-
- for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
- psample_pkt_rssi->mimo_signal_strength[rf_path] = pPhyInfo->rx_mimo_signal_strength[rf_path];
- psample_pkt_rssi->mimo_signal_quality[rf_path] = pPhyInfo->rx_mimo_signal_quality[rf_path];
- if (!isCCKrate) {
- psample_pkt_rssi->ofdm_pwr[rf_path] = pPhyInfo->RxPwr[rf_path];
- psample_pkt_rssi->ofdm_snr[rf_path] = pPhyInfo->RxSNR[rf_path];
- }
- }
-}
-#endif
-
static u32 Array_kfreemap[] = {
0xf8, 0xe,
0xf6, 0xc,
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
index 717faebf8aca..db3d7d72bffa 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
@@ -67,8 +67,4 @@ void rtl8723b_process_phy_info(struct adapter *padapter, void *prframe)
/* Check EVM */
/* */
process_link_qual(padapter, precvframe);
- #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
- rtw_store_phy_info(padapter, prframe);
- #endif
-
}
diff --git a/drivers/staging/rtl8723bs/include/hal_com.h b/drivers/staging/rtl8723bs/include/hal_com.h
index 4db93484725f..258a74076dd9 100644
--- a/drivers/staging/rtl8723bs/include/hal_com.h
+++ b/drivers/staging/rtl8723bs/include/hal_com.h
@@ -149,11 +149,6 @@ bool eqNByte(u8 *str1, u8 *str2, u32 num);
bool GetU1ByteIntegerFromStringInDecimal(char *str, u8 *in);
-#ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
-void rtw_store_phy_info(struct adapter *padapter, union recv_frame *prframe);
-void rtw_dump_raw_rssi_info(struct adapter *padapter);
-#endif
-
#define HWSET_MAX_SIZE 512
void rtw_bb_rf_gain_offset(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 282e141616b0..85de862823c2 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -144,7 +144,6 @@ enum hal_def_variable {
HAL_DEF_PCI_AMD_L1_SUPPORT,
HAL_DEF_PCI_ASPM_OSC, /* Support for ASPM OSC, added by Roger, 2013.03.27. */
HAL_DEF_MACID_SLEEP, /* Support for MACID sleep */
- HAL_DEF_DBG_RX_INFO_DUMP,
};
enum hal_odm_variable {
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index 215ece612f71..73199be78139 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -47,8 +47,6 @@ u32 rtw_start_drv_threads(struct adapter *padapter);
void rtw_stop_drv_threads(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
-int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
struct net_device *rtw_init_netdev(struct adapter *padapter);
void rtw_unregister_netdevs(struct dvobj_priv *dvobj);
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 18dd1464e0c2..aa9f9d5ecd01 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -89,21 +89,6 @@ struct phy_info {
u8 btCoexPwrAdjust;
};
-#ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
-struct rx_raw_rssi {
- u8 data_rate;
- u8 pwdball;
- s8 pwr_all;
-
- u8 mimo_signal_strength[4];/* in 0~100 index */
- u8 mimo_signal_quality[4];
-
- s8 ofdm_pwr[4];
- u8 ofdm_snr[4];
-
-};
-#endif
-
struct rx_pkt_attrib {
u16 pkt_len;
u8 physt;
@@ -221,9 +206,6 @@ struct recv_priv {
u8 signal_strength;
u8 signal_qual;
s8 rssi; /* translate_percentage_to_dbm(ptarget_wlan->network.PhyInfo.SignalStrength); */
- #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
- struct rx_raw_rssi raw_rssi_info;
- #endif
/* s8 rxpwdb; */
s16 noise;
/* int RxSNRdB[2]; */
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index c053ee9c1361..7fcc46a0bb48 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1802,7 +1802,8 @@ static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
}
static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
- struct wireless_dev *wdev, int *dbm)
+ struct wireless_dev *wdev,
+ unsigned int link_id, int *dbm)
{
*dbm = (12);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
deleted file mode 100644
index 793b051536f3..000000000000
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ /dev/null
@@ -1,1286 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <drv_types.h>
-#include <rtw_mp.h>
-#include <hal_btcoex.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-
-#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
-
-static int wpa_set_auth_algs(struct net_device *dev, u32 value)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- int ret = 0;
-
- if ((value & IW_AUTH_ALG_SHARED_KEY) && (value & IW_AUTH_ALG_OPEN_SYSTEM)) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
- } else if (value & IW_AUTH_ALG_SHARED_KEY) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
-
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
- } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
- /* padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; */
- if (padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK) {
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- }
- } else {
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
-{
- int ret = 0;
- u8 max_idx;
- u32 wep_key_idx, wep_key_len, wep_total_len;
- struct ndis_802_11_wep *pwep = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- param->u.crypt.err = 0;
- param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
- if (param_len < (u32)((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
- param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
- param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0)
- max_idx = WEP_KEYS - 1;
- else
- max_idx = BIP_MAX_KEYID;
-
- if (param->u.crypt.idx > max_idx) {
- netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
- ret = -EINVAL;
- goto exit;
- }
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
-
- wep_key_idx = param->u.crypt.idx;
- wep_key_len = param->u.crypt.key_len;
-
- if (wep_key_len > 0) {
- wep_key_len = wep_key_len <= 5 ? 5 : 13;
- wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
- /* Allocate a full structure to avoid potentially running off the end. */
- pwep = kzalloc(sizeof(*pwep), GFP_KERNEL);
- if (!pwep) {
- ret = -ENOMEM;
- goto exit;
- }
-
- pwep->key_length = wep_key_len;
- pwep->length = wep_total_len;
-
- if (wep_key_len == 13) {
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
- }
- } else {
- ret = -EINVAL;
- goto exit;
- }
-
- pwep->key_index = wep_key_idx;
- pwep->key_index |= 0x80000000;
-
- memcpy(pwep->key_material, param->u.crypt.key, pwep->key_length);
-
- if (param->u.crypt.set_tx) {
- if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
- ret = -EOPNOTSUPP;
- } else {
- /* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
- /* psecuritypriv->dot11PrivacyKeyIndex =keyid", but can rtw_set_key to fw/cam */
-
- if (wep_key_idx >= WEP_KEYS) {
- ret = -EOPNOTSUPP;
- goto exit;
- }
-
- memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->key_material, pwep->key_length);
- psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->key_length;
- rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0, true);
- }
-
- goto exit;
- }
-
- if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { /* 802_1x */
- struct sta_info *psta, *pbcmc_sta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == true) { /* sta mode */
- psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
- if (psta) {
- /* Jeff: don't disable ieee8021x_blocked while clearing key */
- if (strcmp(param->u.crypt.alg, "none") != 0)
- psta->ieee8021x_blocked = false;
-
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
- psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
- }
-
- if (param->u.crypt.set_tx == 1) { /* pairwise key */
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
- memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
-
- padapter->securitypriv.busetkipkey = false;
- /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
- }
-
- rtw_setstakey_cmd(padapter, psta, true, true);
- } else { /* group key */
- if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0) {
- memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- /* only TKIP group key need to install this */
- if (param->u.crypt.key_len > 16) {
- memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
- memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
- }
- padapter->securitypriv.binstallGrpkey = true;
-
- padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
-
- rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true);
- } else if (strcmp(param->u.crypt.alg, "BIP") == 0) {
- /* save the IGTK key, length 16 bytes */
- memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- padapter->securitypriv.dot11wBIPKeyid = param->u.crypt.idx;
- padapter->securitypriv.binstallBIPkey = true;
- }
- }
- }
-
- pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta) {
- /* Jeff: don't disable ieee8021x_blocked while clearing key */
- if (strcmp(param->u.crypt.alg, "none") != 0)
- pbcmc_sta->ieee8021x_blocked = false;
-
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
- pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
- }
- }
- } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- /* adhoc mode */
- }
- }
-
-exit:
-
- kfree(pwep);
- return ret;
-}
-
-static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ielen)
-{
- u8 *buf = NULL;
- int group_cipher = 0, pairwise_cipher = 0;
- int ret = 0;
- u8 null_addr[] = {0, 0, 0, 0, 0, 0};
-
- if (ielen > MAX_WPA_IE_LEN || !pie) {
- _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- if (!pie)
- return ret;
- else
- return -EINVAL;
- }
-
- if (ielen) {
- buf = rtw_zmalloc(ielen);
- if (!buf) {
- ret = -ENOMEM;
- goto exit;
- }
-
- memcpy(buf, pie, ielen);
-
- if (ielen < RSN_HEADER_LEN) {
- ret = -1;
- goto exit;
- }
-
- if (rtw_parse_wpa_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK;
- memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
- }
-
- if (rtw_parse_wpa2_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK;
- memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
- }
-
- if (group_cipher == 0)
- group_cipher = WPA_CIPHER_NONE;
- if (pairwise_cipher == 0)
- pairwise_cipher = WPA_CIPHER_NONE;
-
- switch (group_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- }
-
- switch (pairwise_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- }
-
- _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- {/* set wps_ie */
- u16 cnt = 0;
- u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
-
- while (cnt < ielen) {
- eid = buf[cnt];
-
- if ((eid == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&buf[cnt + 2], wps_oui, 4))) {
- padapter->securitypriv.wps_ie_len = ((buf[cnt + 1] + 2) < MAX_WPS_IE_LEN) ? (buf[cnt + 1] + 2) : MAX_WPS_IE_LEN;
-
- memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len);
-
- set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
-
- cnt += buf[cnt + 1] + 2;
-
- break;
- } else {
- cnt += buf[cnt + 1] + 2; /* goto next */
- }
- }
- }
- }
-
- /* TKIP and AES disallow multicast packets until installing group key */
- if (padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_ ||
- padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_WTMIC_ ||
- padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
- /* WPS open need to enable multicast */
- /* check_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS) == true) */
- rtw_hal_set_hwreg(padapter, HW_VAR_OFF_RCR_AM, null_addr);
-
-exit:
-
- kfree(buf);
-
- return ret;
-}
-
-static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
-{
- uint ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- switch (name) {
- case IEEE_PARAM_WPA_ENABLED:
-
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; /* 802.1x */
-
- /* ret = ieee80211_wpa_enable(ieee, value); */
-
- switch ((value) & 0xff) {
- case 1: /* WPA */
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; /* WPA_PSK */
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case 2: /* WPA2 */
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK; /* WPA2_PSK */
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- }
-
- break;
-
- case IEEE_PARAM_TKIP_COUNTERMEASURES:
- /* ieee->tkip_countermeasures =value; */
- break;
-
- case IEEE_PARAM_DROP_UNENCRYPTED:
- {
- /* HACK:
- *
- * wpa_supplicant calls set_wpa_enabled when the driver
- * is loaded and unloaded, regardless of if WPA is being
- * used. No other calls are made which can be used to
- * determine if encryption will be used or not prior to
- * association being expected. If encryption is not being
- * used, drop_unencrypted is set to false, else true -- we
- * can use this to determine if the CAP_PRIVACY_ON bit should
- * be set.
- */
- break;
- }
- case IEEE_PARAM_PRIVACY_INVOKED:
-
- /* ieee->privacy_invoked =value; */
-
- break;
-
- case IEEE_PARAM_AUTH_ALGS:
-
- ret = wpa_set_auth_algs(dev, value);
-
- break;
-
- case IEEE_PARAM_IEEE_802_1X:
-
- /* ieee->ieee802_1x =value; */
-
- break;
-
- case IEEE_PARAM_WPAX_SELECT:
-
- /* added for WPA2 mixed mode */
- /*
- spin_lock_irqsave(&ieee->wpax_suitlist_lock, flags);
- ieee->wpax_type_set = 1;
- ieee->wpax_type_notify = value;
- spin_unlock_irqrestore(&ieee->wpax_suitlist_lock, flags);
- */
-
- break;
-
- default:
-
- ret = -EOPNOTSUPP;
-
- break;
- }
-
- return ret;
-}
-
-static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- switch (command) {
- case IEEE_MLME_STA_DEAUTH:
-
- if (!rtw_set_802_11_disassociate(padapter))
- ret = -1;
-
- break;
-
- case IEEE_MLME_STA_DISASSOC:
-
- if (!rtw_set_802_11_disassociate(padapter))
- ret = -1;
-
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
-
-static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
-{
- struct ieee_param *param;
- uint ret = 0;
-
- /* down(&ieee->wx_sem); */
-
- if (!p->pointer || p->length != sizeof(struct ieee_param))
- return -EINVAL;
-
- param = rtw_malloc(p->length);
- if (!param)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
-
- switch (param->cmd) {
- case IEEE_CMD_SET_WPA_PARAM:
- ret = wpa_set_param(dev, param->u.wpa_param.name, param->u.wpa_param.value);
- break;
-
- case IEEE_CMD_SET_WPA_IE:
- /* ret = wpa_set_wpa_ie(dev, param, p->length); */
- ret = rtw_set_wpa_ie(rtw_netdev_priv(dev), (char *)param->u.wpa_ie.data, (u16)param->u.wpa_ie.len);
- break;
-
- case IEEE_CMD_SET_ENCRYPTION:
- ret = wpa_set_encryption(dev, param, p->length);
- break;
-
- case IEEE_CMD_MLME:
- ret = wpa_mlme(dev, param->u.mlme.command, param->u.mlme.reason_code);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
-
- /* up(&ieee->wx_sem); */
- return ret;
-}
-
-static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
-{
- int ret = 0;
- u32 wep_key_idx, wep_key_len, wep_total_len;
- struct ndis_802_11_wep *pwep = NULL;
- struct sta_info *psta = NULL, *pbcmc_sta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- char *txkey = padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey;
- char *rxkey = padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey;
- char *grpkey = psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey;
-
- param->u.crypt.err = 0;
- param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
- /* sizeof(struct ieee_param) = 64 bytes; */
- /* if (param_len != (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len) */
- if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS) {
- ret = -EINVAL;
- goto exit;
- }
- } else {
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (!psta)
- /* ret = -EINVAL; */
- goto exit;
- }
-
- if (strcmp(param->u.crypt.alg, "none") == 0 && !psta) {
- /* todo:clear default encryption keys */
-
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled;
- psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
- psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
-
- goto exit;
- }
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0 && !psta) {
- wep_key_idx = param->u.crypt.idx;
- wep_key_len = param->u.crypt.key_len;
-
- if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (wep_key_len > 0) {
- wep_key_len = wep_key_len <= 5 ? 5 : 13;
- wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
- /* Allocate a full structure to avoid potentially running off the end. */
- pwep = kzalloc(sizeof(*pwep), GFP_KERNEL);
- if (!pwep)
- goto exit;
-
- pwep->key_length = wep_key_len;
- pwep->length = wep_total_len;
- }
-
- pwep->key_index = wep_key_idx;
-
- memcpy(pwep->key_material, param->u.crypt.key, pwep->key_length);
-
- if (param->u.crypt.set_tx) {
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
- psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
- psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
- psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
-
- if (pwep->key_length == 13) {
- psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
- psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
- }
-
- psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
-
- memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->key_material, pwep->key_length);
-
- psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->key_length;
-
- rtw_ap_set_wep_key(padapter, pwep->key_material, pwep->key_length, wep_key_idx, 1);
- } else {
- /* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
- /* psecuritypriv->dot11PrivacyKeyIndex =keyid", but can rtw_set_key to cam */
-
- memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->key_material, pwep->key_length);
-
- psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->key_length;
-
- rtw_ap_set_wep_key(padapter, pwep->key_material, pwep->key_length, wep_key_idx, 0);
- }
-
- goto exit;
- }
-
- if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* group key */
- if (param->u.crypt.set_tx == 1) {
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
- if (param->u.crypt.key_len == 13)
- psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
-
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
-
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- /* set mic key */
- memcpy(txkey, &param->u.crypt.key[16], 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
-
- psecuritypriv->busetkipkey = true;
-
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- psecuritypriv->dot118021XGrpPrivacy = _AES_;
-
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- } else {
- psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
- }
-
- psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
-
- psecuritypriv->binstallGrpkey = true;
-
- psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
-
- rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
-
- pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta) {
- pbcmc_sta->ieee8021x_blocked = false;
- pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
- }
- }
-
- goto exit;
- }
-
- if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- if (param->u.crypt.set_tx == 1) {
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- psta->dot118021XPrivacy = _WEP40_;
- if (param->u.crypt.key_len == 13)
- psta->dot118021XPrivacy = _WEP104_;
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- psta->dot118021XPrivacy = _TKIP_;
-
- /* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
- memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
-
- psecuritypriv->busetkipkey = true;
-
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- psta->dot118021XPrivacy = _AES_;
- } else {
- psta->dot118021XPrivacy = _NO_PRIVACY_;
- }
-
- rtw_ap_set_pairwise_key(padapter, psta);
-
- psta->ieee8021x_blocked = false;
-
- } else { /* group key??? */
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
- if (param->u.crypt.key_len == 13)
- psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
-
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- /* set mic key */
- memcpy(txkey, &param->u.crypt.key[16], 8);
- memcpy(rxkey, &param->u.crypt.key[24], 8);
-
- psecuritypriv->busetkipkey = true;
-
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- psecuritypriv->dot118021XGrpPrivacy = _AES_;
-
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- } else {
- psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
- }
-
- psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
-
- psecuritypriv->binstallGrpkey = true;
-
- psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
-
- rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
-
- pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta) {
- pbcmc_sta->ieee8021x_blocked = false;
- pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
- }
- }
- }
- }
-
-exit:
- kfree(pwep);
-
- return ret;
-}
-
-static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- unsigned char *pbuf = param->u.bcn_ie.buf;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2);
-
- if ((pstapriv->max_num_sta > NUM_STA) || (pstapriv->max_num_sta <= 0))
- pstapriv->max_num_sta = NUM_STA;
-
- if (rtw_check_beacon_data(padapter, pbuf, (len - 12 - 2)) == _SUCCESS)/* 12 = param header, 2:no packed */
- ret = 0;
- else
- ret = -EINVAL;
-
- return ret;
-}
-
-static void rtw_hostapd_sta_flush(struct net_device *dev)
-{
- /* _irqL irqL; */
- /* struct list_head *phead, *plist; */
- /* struct sta_info *psta = NULL; */
- struct adapter *padapter = rtw_netdev_priv(dev);
- /* struct sta_priv *pstapriv = &padapter->stapriv; */
-
- flush_all_cam_entry(padapter); /* clear CAM */
-
- rtw_sta_flush(padapter);
-}
-
-static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
-{
- int ret = 0;
- struct sta_info *psta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
-/*
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (psta)
- {
- rtw_free_stainfo(padapter, psta);
-
- psta = NULL;
- }
-*/
- /* psta = rtw_alloc_stainfo(pstapriv, param->sta_addr); */
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (psta) {
- int flags = param->u.add_sta.flags;
-
- psta->aid = param->u.add_sta.aid;/* aid = 1~2007 */
-
- memcpy(psta->bssrateset, param->u.add_sta.tx_supp_rates, 16);
-
- /* check wmm cap. */
- if (WLAN_STA_WME & flags)
- psta->qos_option = 1;
- else
- psta->qos_option = 0;
-
- if (pmlmepriv->qospriv.qos_option == 0)
- psta->qos_option = 0;
-
- /* chec 802.11n ht cap. */
- if (WLAN_STA_HT & flags) {
- psta->htpriv.ht_option = true;
- psta->qos_option = 1;
- memcpy((void *)&psta->htpriv.ht_cap, (void *)&param->u.add_sta.ht_cap, sizeof(struct ieee80211_ht_cap));
- } else {
- psta->htpriv.ht_option = false;
- }
-
- if (!pmlmepriv->htpriv.ht_option)
- psta->htpriv.ht_option = false;
-
- update_sta_info_apmode(padapter, psta);
-
- } else {
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
-{
- int ret = 0;
- struct sta_info *psta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (psta) {
- u8 updated = false;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (list_empty(&psta->asoc_list) == false) {
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
- updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- associated_clients_update(padapter, updated);
-
- psta = NULL;
- }
-
- return ret;
-}
-
-static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct sta_info *psta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
- struct sta_data *psta_data = (struct sta_data *)param_ex->data;
-
- if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
- return -EINVAL;
-
- if (param_ex->sta_addr[0] == 0xff && param_ex->sta_addr[1] == 0xff &&
- param_ex->sta_addr[2] == 0xff && param_ex->sta_addr[3] == 0xff &&
- param_ex->sta_addr[4] == 0xff && param_ex->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- psta = rtw_get_stainfo(pstapriv, param_ex->sta_addr);
- if (psta) {
- psta_data->aid = (u16)psta->aid;
- psta_data->capability = psta->capability;
- psta_data->flags = psta->flags;
-
-/*
- nonerp_set : BIT(0)
- no_short_slot_time_set : BIT(1)
- no_short_preamble_set : BIT(2)
- no_ht_gf_set : BIT(3)
- no_ht_set : BIT(4)
- ht_20mhz_set : BIT(5)
-*/
-
- psta_data->sta_set = ((psta->nonerp_set) |
- (psta->no_short_slot_time_set << 1) |
- (psta->no_short_preamble_set << 2) |
- (psta->no_ht_gf_set << 3) |
- (psta->no_ht_set << 4) |
- (psta->ht_20mhz_set << 5));
-
- psta_data->tx_supp_rates_len = psta->bssratelen;
- memcpy(psta_data->tx_supp_rates, psta->bssrateset, psta->bssratelen);
- memcpy(&psta_data->ht_cap, &psta->htpriv.ht_cap, sizeof(struct ieee80211_ht_cap));
- psta_data->rx_pkts = psta->sta_stats.rx_data_pkts;
- psta_data->rx_bytes = psta->sta_stats.rx_bytes;
- psta_data->rx_drops = psta->sta_stats.rx_drops;
-
- psta_data->tx_pkts = psta->sta_stats.tx_pkts;
- psta_data->tx_bytes = psta->sta_stats.tx_bytes;
- psta_data->tx_drops = psta->sta_stats.tx_drops;
-
- } else {
- ret = -1;
- }
-
- return ret;
-}
-
-static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
-{
- int ret = 0;
- struct sta_info *psta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (psta) {
- if ((psta->wpa_ie[0] == WLAN_EID_RSN) || (psta->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC)) {
- int wpa_ie_len;
- int copy_len;
-
- wpa_ie_len = psta->wpa_ie[1];
-
- copy_len = ((wpa_ie_len + 2) > sizeof(psta->wpa_ie)) ? (sizeof(psta->wpa_ie)) : (wpa_ie_len + 2);
-
- param->u.wpa_ie.len = copy_len;
-
- memcpy(param->u.wpa_ie.reserved, psta->wpa_ie, copy_len);
- }
- } else {
- ret = -1;
- }
-
- return ret;
-}
-
-static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- int ie_len;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
-
- kfree(pmlmepriv->wps_beacon_ie);
- pmlmepriv->wps_beacon_ie = NULL;
-
- if (ie_len > 0) {
- pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
- pmlmepriv->wps_beacon_ie_len = ie_len;
- if (!pmlmepriv->wps_beacon_ie)
- return -EINVAL;
-
- memcpy(pmlmepriv->wps_beacon_ie, param->u.bcn_ie.buf, ie_len);
-
- update_beacon(padapter, WLAN_EID_VENDOR_SPECIFIC, wps_oui, true);
-
- pmlmeext->bstart_bss = true;
- }
-
- return ret;
-}
-
-static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int ie_len;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
-
- kfree(pmlmepriv->wps_probe_resp_ie);
- pmlmepriv->wps_probe_resp_ie = NULL;
-
- if (ie_len > 0) {
- pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
- pmlmepriv->wps_probe_resp_ie_len = ie_len;
- if (!pmlmepriv->wps_probe_resp_ie)
- return -EINVAL;
-
- memcpy(pmlmepriv->wps_probe_resp_ie, param->u.bcn_ie.buf, ie_len);
- }
-
- return ret;
-}
-
-static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int ie_len;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
-
- kfree(pmlmepriv->wps_assoc_resp_ie);
- pmlmepriv->wps_assoc_resp_ie = NULL;
-
- if (ie_len > 0) {
- pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
- pmlmepriv->wps_assoc_resp_ie_len = ie_len;
- if (!pmlmepriv->wps_assoc_resp_ie)
- return -EINVAL;
-
- memcpy(pmlmepriv->wps_assoc_resp_ie, param->u.bcn_ie.buf, ie_len);
- }
-
- return ret;
-}
-
-static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *adapter = rtw_netdev_priv(dev);
- struct mlme_priv *mlmepriv = &adapter->mlmepriv;
- struct mlme_ext_priv *mlmeext = &adapter->mlmeextpriv;
- struct mlme_ext_info *mlmeinfo = &mlmeext->mlmext_info;
- int ie_len;
- u8 *ssid_ie;
- char ssid[NDIS_802_11_LENGTH_SSID + 1];
- signed int ssid_len;
- u8 ignore_broadcast_ssid;
-
- if (check_fwstate(mlmepriv, WIFI_AP_STATE) != true)
- return -EPERM;
-
- if (param->u.bcn_ie.reserved[0] != 0xea)
- return -EINVAL;
-
- mlmeinfo->hidden_ssid_mode = ignore_broadcast_ssid = param->u.bcn_ie.reserved[1];
-
- ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
- ssid_ie = rtw_get_ie(param->u.bcn_ie.buf, WLAN_EID_SSID, &ssid_len, ie_len);
-
- if (ssid_ie && ssid_len > 0 && ssid_len <= NDIS_802_11_LENGTH_SSID) {
- struct wlan_bssid_ex *pbss_network = &mlmepriv->cur_network.network;
- struct wlan_bssid_ex *pbss_network_ext = &mlmeinfo->network;
-
- memcpy(ssid, ssid_ie + 2, ssid_len);
- ssid[ssid_len] = 0x0;
-
- memcpy(pbss_network->ssid.ssid, (void *)ssid, ssid_len);
- pbss_network->ssid.ssid_length = ssid_len;
- memcpy(pbss_network_ext->ssid.ssid, (void *)ssid, ssid_len);
- pbss_network_ext->ssid.ssid_length = ssid_len;
- }
-
- return ret;
-}
-
-static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- rtw_acl_remove_sta(padapter, param->sta_addr);
- return 0;
-}
-
-static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- return rtw_acl_add_sta(padapter, param->sta_addr);
-}
-
-static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- rtw_set_macaddr_acl(padapter, param->u.mlme.command);
-
- return ret;
-}
-
-static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
-{
- struct ieee_param *param;
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- /*
- * this function is expect to call in master mode, which allows no power saving
- * so, we just check hw_init_completed
- */
-
- if (!padapter->hw_init_completed)
- return -EPERM;
-
- if (!p->pointer || p->length != sizeof(*param))
- return -EINVAL;
-
- param = rtw_malloc(p->length);
- if (!param)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
-
- switch (param->cmd) {
- case RTL871X_HOSTAPD_FLUSH:
-
- rtw_hostapd_sta_flush(dev);
-
- break;
-
- case RTL871X_HOSTAPD_ADD_STA:
-
- ret = rtw_add_sta(dev, param);
-
- break;
-
- case RTL871X_HOSTAPD_REMOVE_STA:
-
- ret = rtw_del_sta(dev, param);
-
- break;
-
- case RTL871X_HOSTAPD_SET_BEACON:
-
- ret = rtw_set_beacon(dev, param, p->length);
-
- break;
-
- case RTL871X_SET_ENCRYPTION:
-
- ret = rtw_set_encryption(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_GET_WPAIE_STA:
-
- ret = rtw_get_sta_wpaie(dev, param);
-
- break;
-
- case RTL871X_HOSTAPD_SET_WPS_BEACON:
-
- ret = rtw_set_wps_beacon(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_SET_WPS_PROBE_RESP:
-
- ret = rtw_set_wps_probe_resp(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP:
-
- ret = rtw_set_wps_assoc_resp(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_SET_HIDDEN_SSID:
-
- ret = rtw_set_hidden_ssid(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_GET_INFO_STA:
-
- ret = rtw_ioctl_get_sta_data(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_SET_MACADDR_ACL:
-
- ret = rtw_ioctl_set_macaddr_acl(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_ACL_ADD_STA:
-
- ret = rtw_ioctl_acl_add_sta(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_ACL_REMOVE_STA:
-
- ret = rtw_ioctl_acl_remove_sta(dev, param, p->length);
-
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
- return ret;
-}
-
-/* copy from net/wireless/wext.c end */
-
-int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct iwreq *wrq = (struct iwreq *)rq;
- int ret = 0;
-
- switch (cmd) {
- case RTL_IOCTL_WPA_SUPPLICANT:
- ret = wpa_supplicant_ioctl(dev, &wrq->u.data);
- break;
- case RTL_IOCTL_HOSTAPD:
- ret = rtw_hostapd_ioctl(dev, &wrq->u.data);
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 4e1917c05402..738a601c55bb 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -407,7 +407,6 @@ static const struct net_device_ops rtw_netdev_ops = {
.ndo_select_queue = rtw_select_queue,
.ndo_set_mac_address = rtw_net_set_mac_address,
.ndo_get_stats = rtw_net_get_stats,
- .ndo_do_ioctl = rtw_ioctl,
};
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index dc0d715ed970..0dbe76ee5570 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -59,7 +59,7 @@ static int bcm2835_audio_send_msg_locked(struct bcm2835_audio_instance *instance
if (wait) {
if (!wait_for_completion_timeout(&instance->msg_avail_comp,
- msecs_to_jiffies(10 * 1000))) {
+ secs_to_jiffies(10))) {
dev_err(instance->dev,
"vchi message timeout, msg=%d\n", m->type);
return -ETIMEDOUT;
diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
index 31a44025e08f..733594dde9ae 100644
--- a/drivers/staging/vme_user/vme_tsi148.c
+++ b/drivers/staging/vme_user/vme_tsi148.c
@@ -761,8 +761,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
goto err_resource;
}
- image->kern_base = ioremap(
- image->bus_resource.start, size);
+ image->kern_base = ioremap(image->bus_resource.start, size);
if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 922b207bc69d..70d76f3dd693 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -2,9 +2,9 @@
config ISCSI_TARGET
tristate "SCSI Target Mode Stack"
depends on INET
+ select CRC32
select CRYPTO
- select CRYPTO_CRC32C
- select CRYPTO_CRC32C_INTEL if X86
+ select CRYPTO_HASH
help
Say M to enable the SCSI target mode stack. A SCSI target mode stack
is software that makes local storage available over a storage network
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 6002283cbeba..1244ef3aa86c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -8,7 +8,7 @@
*
******************************************************************************/
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/completion.h>
@@ -490,8 +490,8 @@ void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
}
EXPORT_SYMBOL(iscsit_aborted_task);
-static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
- u32, u32, const void *, void *);
+static u32 iscsit_crc_buf(const void *buf, u32 payload_length,
+ u32 padding, const void *pad_bytes);
static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
static int
@@ -510,9 +510,7 @@ iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL,
- header_digest);
+ *header_digest = iscsit_crc_buf(hdr, ISCSI_HDR_LEN, 0, NULL);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -537,11 +535,9 @@ iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
- data_buf, data_buf_len,
- padding, &cmd->pad_bytes,
- &cmd->data_crc);
-
+ cmd->data_crc = iscsit_crc_buf(data_buf, data_buf_len,
+ padding,
+ &cmd->pad_bytes);
iov[niov].iov_base = &cmd->data_crc;
iov[niov++].iov_len = ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -566,8 +562,8 @@ iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
u32 data_offset, u32 data_length);
static void iscsit_unmap_iovec(struct iscsit_cmd *);
-static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
- u32, u32, u32, u8 *);
+static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length,
+ u32 padding, const u8 *pad_bytes);
static int
iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
const struct iscsi_datain *datain)
@@ -584,10 +580,8 @@ iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
- ISCSI_HDR_LEN, 0, NULL,
- header_digest);
-
+ *header_digest = iscsit_crc_buf(cmd->pdu, ISCSI_HDR_LEN, 0,
+ NULL);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -614,12 +608,8 @@ iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
- cmd, datain->offset,
- datain->length,
- cmd->padding,
- cmd->pad_bytes);
-
+ cmd->data_crc = iscsit_crc_sglist(cmd, datain->length,
+ cmd->padding, cmd->pad_bytes);
iov[iov_count].iov_base = &cmd->data_crc;
iov[iov_count++].iov_len = ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -1404,77 +1394,45 @@ iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
return iscsit_get_immediate_data(cmd, hdr, dump_payload);
}
-static u32 iscsit_do_crypto_hash_sg(
- struct ahash_request *hash,
- struct iscsit_cmd *cmd,
- u32 data_offset,
- u32 data_length,
- u32 padding,
- u8 *pad_bytes)
+static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length,
+ u32 padding, const u8 *pad_bytes)
{
- u32 data_crc;
- struct scatterlist *sg;
- unsigned int page_off;
-
- crypto_ahash_init(hash);
-
- sg = cmd->first_data_sg;
- page_off = cmd->first_data_sg_off;
-
- if (data_length && page_off) {
- struct scatterlist first_sg;
- u32 len = min_t(u32, data_length, sg->length - page_off);
-
- sg_init_table(&first_sg, 1);
- sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
-
- ahash_request_set_crypt(hash, &first_sg, NULL, len);
- crypto_ahash_update(hash);
-
- data_length -= len;
- sg = sg_next(sg);
- }
+ struct scatterlist *sg = cmd->first_data_sg;
+ unsigned int page_off = cmd->first_data_sg_off;
+ u32 crc = ~0;
while (data_length) {
- u32 cur_len = min_t(u32, data_length, sg->length);
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+ const void *virt;
- ahash_request_set_crypt(hash, sg, NULL, cur_len);
- crypto_ahash_update(hash);
+ virt = kmap_local_page(sg_page(sg)) + sg->offset + page_off;
+ crc = crc32c(crc, virt, cur_len);
+ kunmap_local(virt);
- data_length -= cur_len;
/* iscsit_map_iovec has already checked for invalid sg pointers */
sg = sg_next(sg);
- }
-
- if (padding) {
- struct scatterlist pad_sg;
- sg_init_one(&pad_sg, pad_bytes, padding);
- ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
- padding);
- crypto_ahash_finup(hash);
- } else {
- ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
- crypto_ahash_final(hash);
+ page_off = 0;
+ data_length -= cur_len;
}
- return data_crc;
+ if (padding)
+ crc = crc32c(crc, pad_bytes, padding);
+
+ return ~crc;
}
-static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
- const void *buf, u32 payload_length, u32 padding,
- const void *pad_bytes, void *data_crc)
+static u32 iscsit_crc_buf(const void *buf, u32 payload_length,
+ u32 padding, const void *pad_bytes)
{
- struct scatterlist sg[2];
+ u32 crc = ~0;
- sg_init_table(sg, ARRAY_SIZE(sg));
- sg_set_buf(sg, buf, payload_length);
- if (padding)
- sg_set_buf(sg + 1, pad_bytes, padding);
+ crc = crc32c(crc, buf, payload_length);
- ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
+ if (padding)
+ crc = crc32c(crc, pad_bytes, padding);
- crypto_ahash_digest(hash);
+ return ~crc;
}
int
@@ -1662,11 +1620,8 @@ iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
- be32_to_cpu(hdr->offset),
- payload_length, padding,
- cmd->pad_bytes);
-
+ data_crc = iscsit_crc_sglist(cmd, payload_length, padding,
+ cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
@@ -1925,10 +1880,8 @@ static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cm
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
- payload_length, padding,
- cmd->pad_bytes, &data_crc);
-
+ data_crc = iscsit_crc_buf(ping_data, payload_length,
+ padding, cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
" 0x%08x does not match computed 0x%08x\n",
@@ -2328,10 +2281,7 @@ iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- text_in, rx_size, 0, NULL,
- &data_crc);
-
+ data_crc = iscsit_crc_buf(text_in, rx_size, 0, NULL);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
" 0x%08x does not match computed"
@@ -2688,10 +2638,8 @@ static int iscsit_handle_immediate_data(
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
- cmd->write_data_done, length, padding,
- cmd->pad_bytes);
-
+ data_crc = iscsit_crc_sglist(cmd, length, padding,
+ cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ImmediateData CRC32C DataDigest 0x%08x"
" does not match computed 0x%08x\n", checksum,
@@ -4116,10 +4064,8 @@ static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
break;
}
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
- ISCSI_HDR_LEN, 0, NULL,
- &checksum);
-
+ checksum = iscsit_crc_buf(buffer, ISCSI_HDR_LEN, 0,
+ NULL);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
" received 0x%08x, computed 0x%08x\n",
@@ -4406,15 +4352,6 @@ int iscsit_close_connection(
*/
iscsit_check_conn_usage_count(conn);
- ahash_request_free(conn->conn_tx_hash);
- if (conn->conn_rx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
- ahash_request_free(conn->conn_rx_hash);
- crypto_free_ahash(tfm);
- }
-
if (conn->sock)
sock_release(conn->sock);
@@ -4727,21 +4664,6 @@ int iscsit_logout_post_handler(
}
EXPORT_SYMBOL(iscsit_logout_post_handler);
-void iscsit_fail_session(struct iscsit_session *sess)
-{
- struct iscsit_conn *conn;
-
- spin_lock_bh(&sess->conn_lock);
- list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
- pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
- conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
- }
- spin_unlock_bh(&sess->conn_lock);
-
- pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
- sess->session_state = TARG_SESS_STATE_FAILED;
-}
-
void iscsit_stop_session(
struct iscsit_session *sess,
int session_sleep,
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 873411e95ed2..f4addae2aae4 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -40,7 +40,6 @@ extern int iscsi_target_tx_thread(void *);
extern int iscsi_target_rx_thread(void *);
extern int iscsit_close_connection(struct iscsit_conn *);
extern int iscsit_close_session(struct iscsit_session *, bool can_sleep);
-extern void iscsit_fail_session(struct iscsit_session *);
extern void iscsit_stop_session(struct iscsit_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 18e88d2ea5fd..56d78af7cec7 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -25,54 +25,6 @@
/*
* FIXME: Does RData SNACK apply here as well?
*/
-void iscsit_create_conn_recovery_datain_values(
- struct iscsit_cmd *cmd,
- __be32 exp_data_sn)
-{
- u32 data_sn = 0;
- struct iscsit_conn *conn = cmd->conn;
-
- cmd->next_burst_len = 0;
- cmd->read_data_done = 0;
-
- while (be32_to_cpu(exp_data_sn) > data_sn) {
- if ((cmd->next_burst_len +
- conn->conn_ops->MaxRecvDataSegmentLength) <
- conn->sess->sess_ops->MaxBurstLength) {
- cmd->read_data_done +=
- conn->conn_ops->MaxRecvDataSegmentLength;
- cmd->next_burst_len +=
- conn->conn_ops->MaxRecvDataSegmentLength;
- } else {
- cmd->read_data_done +=
- (conn->sess->sess_ops->MaxBurstLength -
- cmd->next_burst_len);
- cmd->next_burst_len = 0;
- }
- data_sn++;
- }
-}
-
-void iscsit_create_conn_recovery_dataout_values(
- struct iscsit_cmd *cmd)
-{
- u32 write_data_done = 0;
- struct iscsit_conn *conn = cmd->conn;
-
- cmd->data_sn = 0;
- cmd->next_burst_len = 0;
-
- while (cmd->write_data_done > write_data_done) {
- if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
- cmd->write_data_done)
- write_data_done += conn->sess->sess_ops->MaxBurstLength;
- else
- break;
- }
-
- cmd->write_data_done = write_data_done;
-}
-
static int iscsit_attach_active_connection_recovery_entry(
struct iscsit_session *sess,
struct iscsi_conn_recovery *cr)
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 6655e4bcf893..9064c74eef7a 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -9,8 +9,6 @@ struct iscsit_conn;
struct iscsi_conn_recovery;
struct iscsit_session;
-extern void iscsit_create_conn_recovery_datain_values(struct iscsit_cmd *, __be32);
-extern void iscsit_create_conn_recovery_dataout_values(struct iscsit_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
struct iscsit_session *, u16);
extern void iscsit_free_connection_recovery_entries(struct iscsit_session *);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 90b870f234f0..c2ac9a99ebbb 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -8,7 +8,6 @@
*
******************************************************************************/
-#include <crypto/hash.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kthread.h>
@@ -71,46 +70,6 @@ out_login:
return NULL;
}
-/*
- * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
- * per struct iscsit_conn libcrypto contexts for crc32c and crc32-intel
- */
-int iscsi_login_setup_crypto(struct iscsit_conn *conn)
-{
- struct crypto_ahash *tfm;
-
- /*
- * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
- * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
- * to software 1x8 byte slicing from crc32c.ko
- */
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
- pr_err("crypto_alloc_ahash() failed\n");
- return -ENOMEM;
- }
-
- conn->conn_rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!conn->conn_rx_hash) {
- pr_err("ahash_request_alloc() failed for conn_rx_hash\n");
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
- ahash_request_set_callback(conn->conn_rx_hash, 0, NULL, NULL);
-
- conn->conn_tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!conn->conn_tx_hash) {
- pr_err("ahash_request_alloc() failed for conn_tx_hash\n");
- ahash_request_free(conn->conn_rx_hash);
- conn->conn_rx_hash = NULL;
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
- ahash_request_set_callback(conn->conn_tx_hash, 0, NULL, NULL);
-
- return 0;
-}
-
static int iscsi_login_check_initiator_version(
struct iscsit_conn *conn,
u8 version_max,
@@ -1165,15 +1124,6 @@ old_sess_out:
iscsit_dec_session_usage_count(conn->sess);
}
- ahash_request_free(conn->conn_tx_hash);
- if (conn->conn_rx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
- ahash_request_free(conn->conn_rx_hash);
- crypto_free_ahash(tfm);
- }
-
if (conn->param_list) {
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index e8760735486b..03c7d695d58f 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -9,7 +9,6 @@ struct iscsi_login;
struct iscsi_np;
struct sockaddr_storage;
-extern int iscsi_login_setup_crypto(struct iscsit_conn *);
extern int iscsi_check_for_session_reinstatement(struct iscsit_conn *);
extern int iscsi_login_post_auth_non_zero_tsih(struct iscsit_conn *, u16, u32);
extern int iscsit_setup_np(struct iscsi_np *,
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index fa3fb5f4e6bc..16e3ded98c32 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1194,14 +1194,7 @@ int iscsi_target_locate_portal(
goto get_target;
sess->sess_ops->SessionType = 1;
- /*
- * Setup crc32c modules from libcrypto
- */
- if (iscsi_login_setup_crypto(conn) < 0) {
- pr_err("iscsi_login_setup_crypto() failed\n");
- ret = -1;
- goto out;
- }
+
/*
* Serialize access across the discovery struct iscsi_portal_group to
* process login attempt.
@@ -1258,17 +1251,7 @@ get_target:
}
conn->tpg_np = tpg_np;
pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
- /*
- * Setup crc32c modules from libcrypto
- */
- if (iscsi_login_setup_crypto(conn) < 0) {
- pr_err("iscsi_login_setup_crypto() failed\n");
- kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
- iscsit_put_tiqn_for_login(tiqn);
- conn->tpg = NULL;
- ret = -1;
- goto out;
- }
+
/*
* Serialize access across the struct iscsi_portal_group to
* process login attempt.
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 5b90c22ee3dc..1d4e1788e073 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -67,54 +67,6 @@ int iscsi_login_tx_data(
return 0;
}
-void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
-{
- pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
- "CRC32C" : "None");
- pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
- "CRC32C" : "None");
- pr_debug("MaxRecvDataSegmentLength: %u\n",
- conn_ops->MaxRecvDataSegmentLength);
-}
-
-void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
-{
- pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
- pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
- pr_debug("TargetName: %s\n", sess_ops->TargetName);
- pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
- pr_debug("TargetPortalGroupTag: %hu\n",
- sess_ops->TargetPortalGroupTag);
- pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
- pr_debug("InitialR2T: %s\n",
- (sess_ops->InitialR2T) ? "Yes" : "No");
- pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
- "Yes" : "No");
- pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
- pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
- pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
- pr_debug("DefaultTime2Retain: %hu\n",
- sess_ops->DefaultTime2Retain);
- pr_debug("MaxOutstandingR2T: %hu\n",
- sess_ops->MaxOutstandingR2T);
- pr_debug("DataPDUInOrder: %s\n",
- (sess_ops->DataPDUInOrder) ? "Yes" : "No");
- pr_debug("DataSequenceInOrder: %s\n",
- (sess_ops->DataSequenceInOrder) ? "Yes" : "No");
- pr_debug("ErrorRecoveryLevel: %hu\n",
- sess_ops->ErrorRecoveryLevel);
- pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
- "Discovery" : "Normal");
-}
-
-void iscsi_print_params(struct iscsi_param_list *param_list)
-{
- struct iscsi_param *param;
-
- list_for_each_entry(param, &param_list->param_list, p_list)
- pr_debug("%s: %s\n", param->name, param->value);
-}
-
static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
char *name, char *value, u8 phase, u8 scope, u8 sender,
u16 type_range, u8 use)
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 00fbbebb8c75..c672a971fcb7 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -32,9 +32,6 @@ struct iscsi_sess_ops;
extern int iscsi_login_rx_data(struct iscsit_conn *, char *, int);
extern int iscsi_login_tx_data(struct iscsit_conn *, char *, char *, int);
-extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
-extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
-extern void iscsi_print_params(struct iscsi_param_list *);
extern int iscsi_create_default_params(struct iscsi_param_list **);
extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index f7bac98fd4fe..bf06cfdfb012 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -200,11 +200,6 @@ static void iscsit_clear_tpg_np_login_threads(
spin_unlock(&tpg->tpg_np_lock);
}
-void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
-{
- iscsi_print_params(tpg->param_list);
-}
-
static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index d44d09f2dde9..1155b7b3164a 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -18,7 +18,6 @@ extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
struct iscsi_np *, struct iscsi_tpg_np **);
extern int iscsit_get_tpg(struct iscsi_portal_group *);
extern void iscsit_put_tpg(struct iscsi_portal_group *);
-extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
int);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 91a75a4a7cc1..ed2dadb21f75 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -333,50 +333,6 @@ int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
}
EXPORT_SYMBOL(iscsit_sequence_cmd);
-int iscsit_check_unsolicited_dataout(struct iscsit_cmd *cmd, unsigned char *buf)
-{
- struct iscsit_conn *conn = cmd->conn;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct iscsi_data *hdr = (struct iscsi_data *) buf;
- u32 payload_length = ntoh24(hdr->dlength);
-
- if (conn->sess->sess_ops->InitialR2T) {
- pr_err("Received unexpected unsolicited data"
- " while InitialR2T=Yes, protocol error.\n");
- transport_send_check_condition_and_sense(se_cmd,
- TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
- return -1;
- }
-
- if ((cmd->first_burst_len + payload_length) >
- conn->sess->sess_ops->FirstBurstLength) {
- pr_err("Total %u bytes exceeds FirstBurstLength: %u"
- " for this Unsolicited DataOut Burst.\n",
- (cmd->first_burst_len + payload_length),
- conn->sess->sess_ops->FirstBurstLength);
- transport_send_check_condition_and_sense(se_cmd,
- TCM_INCORRECT_AMOUNT_OF_DATA, 0);
- return -1;
- }
-
- if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
- return 0;
-
- if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
- ((cmd->first_burst_len + payload_length) !=
- conn->sess->sess_ops->FirstBurstLength)) {
- pr_err("Unsolicited non-immediate data received %u"
- " does not equal FirstBurstLength: %u, and does"
- " not equal ExpXferLen %u.\n",
- (cmd->first_burst_len + payload_length),
- conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
- transport_send_check_condition_and_sense(se_cmd,
- TCM_INCORRECT_AMOUNT_OF_DATA, 0);
- return -1;
- }
- return 0;
-}
-
struct iscsit_cmd *iscsit_find_cmd_from_itt(
struct iscsit_conn *conn,
itt_t init_task_tag)
@@ -1252,20 +1208,6 @@ int iscsit_tx_login_rsp(struct iscsit_conn *conn, u8 status_class, u8 status_det
return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
}
-void iscsit_print_session_params(struct iscsit_session *sess)
-{
- struct iscsit_conn *conn;
-
- pr_debug("-----------------------------[Session Params for"
- " SID: %u]-----------------------------\n", sess->sid);
- spin_lock_bh(&sess->conn_lock);
- list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
- iscsi_dump_conn_ops(conn->conn_ops);
- spin_unlock_bh(&sess->conn_lock);
-
- iscsi_dump_sess_ops(sess->sess_ops);
-}
-
int rx_data(
struct iscsit_conn *conn,
struct kvec *iov,
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 336da4fb0a77..7ae48a8a5cbf 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -23,7 +23,6 @@ extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *);
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsit_cmd *, u32);
extern int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char * ,__be32 cmdsn);
-extern int iscsit_check_unsolicited_dataout(struct iscsit_cmd *, unsigned char *);
extern struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsit_conn *,
itt_t, u32);
extern struct iscsit_cmd *iscsit_find_cmd_from_ttt(struct iscsit_conn *, u32);
@@ -61,7 +60,6 @@ extern int iscsit_set_login_timer_kworker(struct iscsit_conn *, struct task_stru
extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int);
extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
-extern void iscsit_print_session_params(struct iscsit_session *);
extern int rx_data(struct iscsit_conn *, struct kvec *, int, int);
extern int tx_data(struct iscsit_conn *, struct kvec *, int, int);
extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 287ac5b0495f..f991cf759836 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -823,7 +823,6 @@ static sense_reason_t
pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct request *req)
{
- struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct bio *bio = NULL;
struct page *page;
struct scatterlist *sg;
@@ -871,12 +870,11 @@ new_bio:
(rw) ? "rw" : "r", nr_vecs);
}
- pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
+ pr_debug("PSCSI: Calling bio_add_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
- rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
- bio, page, bytes, off);
+ rc = bio_add_page(bio, page, bytes, off);
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
bio_segments(bio), nr_vecs);
if (rc != bytes) {
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index c42cbde8a31b..210648a0092e 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -117,9 +117,9 @@ static ssize_t target_stat_tgt_status_show(struct config_item *item,
char *page)
{
if (to_stat_tgt_dev(item)->export_count)
- return snprintf(page, PAGE_SIZE, "activated");
+ return snprintf(page, PAGE_SIZE, "activated\n");
else
- return snprintf(page, PAGE_SIZE, "deactivated");
+ return snprintf(page, PAGE_SIZE, "deactivated\n");
}
static ssize_t target_stat_tgt_non_access_lus_show(struct config_item *item,
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 3e33cf2af73b..f0c3ac1103bb 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1272,8 +1272,9 @@ static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
&res.smccc);
if (res.result.build_id)
- pr_info("revision %lu.%lu (%08lx)", res.result.major,
- res.result.minor, res.result.build_id);
+ pr_info("revision %lu.%lu (%0*lx)", res.result.major,
+ res.result.minor, (int)sizeof(res.result.build_id) * 2,
+ res.result.build_id);
else
pr_info("revision %lu.%lu", res.result.major, res.result.minor);
}
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index 322a543b8c27..d0f397c90242 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req;
- bool interruptable;
u32 ret;
/*
@@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
/*
* Wait for supplicant to process and return result, once we've
* returned from wait_for_completion(&req->c) successfully we have
- * exclusive access again.
+ * exclusive access again. Allow the wait to be killable such that
+ * the wait doesn't turn into an indefinite state if the supplicant
+ * gets hung for some reason.
*/
- while (wait_for_completion_interruptible(&req->c)) {
+ if (wait_for_completion_killable(&req->c)) {
mutex_lock(&supp->mutex);
- interruptable = !supp->ctx;
- if (interruptable) {
- /*
- * There's no supplicant available and since the
- * supp->mutex currently is held none can
- * become available until the mutex released
- * again.
- *
- * Interrupting an RPC to supplicant is only
- * allowed as a way of slightly improving the user
- * experience in case the supplicant hasn't been
- * started yet. During normal operation the supplicant
- * will serve all requests in a timely manner and
- * interrupting then wouldn't make sense.
- */
- if (req->in_queue) {
- list_del(&req->link);
- req->in_queue = false;
- }
+ if (req->in_queue) {
+ list_del(&req->link);
+ req->in_queue = false;
}
mutex_unlock(&supp->mutex);
-
- if (interruptable) {
- req->ret = TEEC_ERROR_COMMUNICATION;
- break;
- }
+ req->ret = TEEC_ERROR_COMMUNICATION;
}
ret = req->ret;
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 280071be30b1..6b7ab1814c12 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -57,8 +57,6 @@ struct time_in_idle {
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
* @em: Reference on the Energy Model of the device
- * @cdev: thermal_cooling_device pointer to keep track of the
- * registered cooling device.
* @policy: cpufreq policy.
* @cooling_ops: cpufreq callbacks to thermal cooling device ops
* @idle_time: idle time stats
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 97f3d819852b..51951967d67f 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -7,6 +7,27 @@
* Based on step_wise.c with following Copyrights:
* Copyright (C) 2012 Intel Corp
* Copyright (C) 2012 Durgadoss R <durgadoss.r@intel.com>
+ *
+ * Regulation Logic: a two point regulation, deliver cooling state depending
+ * on the previous state shown in this diagram:
+ *
+ * Fan: OFF ON
+ *
+ * |
+ * |
+ * trip_temp: +---->+
+ * | | ^
+ * | | |
+ * | | Temperature
+ * (trip_temp - hyst): +<----+
+ * |
+ * |
+ * |
+ *
+ * * If the fan is not running and temperature exceeds trip_temp, the fan
+ * gets turned on.
+ * * In case the fan is running, temperature must fall below
+ * (trip_temp - hyst) so that the fan gets turned off again.
*/
#include <linux/thermal.h>
@@ -34,36 +55,14 @@ static void bang_bang_set_instance_target(struct thermal_instance *instance,
}
/**
- * bang_bang_control - controls devices associated with the given zone
+ * bang_bang_trip_crossed - controls devices associated with the given zone
* @tz: thermal_zone_device
* @trip: the trip point
- * @crossed_up: whether or not the trip has been crossed on the way up
- *
- * Regulation Logic: a two point regulation, deliver cooling state depending
- * on the previous state shown in this diagram:
- *
- * Fan: OFF ON
- *
- * |
- * |
- * trip_temp: +---->+
- * | | ^
- * | | |
- * | | Temperature
- * (trip_temp - hyst): +<----+
- * |
- * |
- * |
- *
- * * If the fan is not running and temperature exceeds trip_temp, the fan
- * gets turned on.
- * * In case the fan is running, temperature must fall below
- * (trip_temp - hyst) so that the fan gets turned off again.
- *
+ * @upward: whether or not the trip has been crossed on the way up
*/
-static void bang_bang_control(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- bool crossed_up)
+static void bang_bang_trip_crossed(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ bool upward)
{
const struct thermal_trip_desc *td = trip_to_trip_desc(trip);
struct thermal_instance *instance;
@@ -75,7 +74,7 @@ static void bang_bang_control(struct thermal_zone_device *tz,
tz->temperature, trip->hysteresis);
list_for_each_entry(instance, &td->thermal_instances, trip_node)
- bang_bang_set_instance_target(instance, crossed_up);
+ bang_bang_set_instance_target(instance, upward);
}
static void bang_bang_manage(struct thermal_zone_device *tz)
@@ -123,7 +122,7 @@ static void bang_bang_update_tz(struct thermal_zone_device *tz,
static struct thermal_governor thermal_gov_bang_bang = {
.name = "bang_bang",
- .trip_crossed = bang_bang_control,
+ .trip_crossed = bang_bang_trip_crossed,
.manage = bang_bang_manage,
.update_tz = bang_bang_update_tz,
};
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 3b644de3292e..0d9f636c80f4 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -370,7 +370,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
for (i = 0; i < num_actors; i++) {
struct power_actor *pa = &power[i];
- u64 req_range = (u64)pa->req_power * power_range;
+ u64 req_range = (u64)pa->weighted_req_power * power_range;
pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range,
total_req_power);
@@ -641,6 +641,22 @@ clean_state:
return ret;
}
+static void power_allocator_update_weight(struct power_allocator_params *params)
+{
+ const struct thermal_trip_desc *td;
+ struct thermal_instance *instance;
+
+ if (!params->trip_max)
+ return;
+
+ td = trip_to_trip_desc(params->trip_max);
+
+ params->total_weight = 0;
+ list_for_each_entry(instance, &td->thermal_instances, trip_node)
+ if (power_actor_is_valid(instance))
+ params->total_weight += instance->weight;
+}
+
static void power_allocator_update_tz(struct thermal_zone_device *tz,
enum thermal_notify_event reason)
{
@@ -656,16 +672,12 @@ static void power_allocator_update_tz(struct thermal_zone_device *tz,
if (power_actor_is_valid(instance))
num_actors++;
- if (num_actors == params->num_actors)
- return;
+ if (num_actors != params->num_actors)
+ allocate_actors_buffer(params, num_actors);
- allocate_actors_buffer(params, num_actors);
- break;
+ fallthrough;
case THERMAL_INSTANCE_WEIGHT_CHANGED:
- params->total_weight = 0;
- list_for_each_entry(instance, &td->thermal_instances, trip_node)
- if (power_actor_is_valid(instance))
- params->total_weight += instance->weight;
+ power_allocator_update_weight(params);
break;
default:
break;
@@ -731,6 +743,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
tz->governor_data = params;
+ power_allocator_update_weight(params);
+
return 0;
free_params:
diff --git a/drivers/thermal/gov_user_space.c b/drivers/thermal/gov_user_space.c
index 75137b419eb2..ef95cf7d65ef 100644
--- a/drivers/thermal/gov_user_space.c
+++ b/drivers/thermal/gov_user_space.c
@@ -23,16 +23,16 @@ static int user_space_bind(struct thermal_zone_device *tz)
}
/**
- * notify_user_space - Notifies user space about thermal events
+ * user_space_trip_crossed - Notify user space about trip crossing events
* @tz: thermal_zone_device
* @trip: trip point
- * @crossed_up: whether or not the trip has been crossed on the way up
+ * @upward: whether or not the trip has been crossed on the way up
*
* This function notifies the user space through UEvents.
*/
-static void notify_user_space(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- bool crossed_up)
+static void user_space_trip_crossed(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ bool upward)
{
char *thermal_prop[5];
int i;
@@ -52,7 +52,7 @@ static void notify_user_space(struct thermal_zone_device *tz,
static struct thermal_governor thermal_gov_user_space = {
.name = "user_space",
- .trip_crossed = notify_user_space,
+ .trip_crossed = user_space_trip_crossed,
.bind_to_tz = user_space_bind,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_user_space);
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index a31f2f32996a..e0268fac7093 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -21,8 +21,8 @@ config INTEL_TCC
config X86_PKG_TEMP_THERMAL
tristate "X86 package temperature thermal driver"
- depends on X86_THERMAL_VECTOR
- select THERMAL_GOV_USER_SPACE
+ depends on X86_THERMAL_VECTOR && NET
+ select THERMAL_NETLINK
select INTEL_TCC
default m
help
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index e76b13e44d03..4c699f0896b5 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -5,8 +5,8 @@
config INT340X_THERMAL
tristate "ACPI INT340X thermal drivers"
- depends on X86_64 && ACPI && PCI
- select THERMAL_GOV_USER_SPACE
+ depends on X86_64 && ACPI && PCI && NET
+ select THERMAL_NETLINK
select ACPI_THERMAL_REL
select ACPI_FAN
select ACPI_THERMAL_LIB
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 8660ef2175be..0e07693ecf59 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -521,7 +521,6 @@ static struct thermal_zone_device_ops int3400_thermal_ops = {
};
static struct thermal_zone_params int3400_thermal_params = {
- .governor_name = "user_space",
.no_hwmon = true,
};
@@ -690,6 +689,7 @@ static const struct acpi_device_id int3400_thermal_match[] = {
{"INTC1042", 0},
{"INTC1068", 0},
{"INTC10A0", 0},
+ {"INTC10D4", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 04aa0afb3b1d..5a925a8df7b3 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -275,6 +275,7 @@ static const struct acpi_device_id int3403_device_ids[] = {
{"INTC1062", 0},
{"INTC1069", 0},
{"INTC10A1", 0},
+ {"INTC10D5", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 31ed338eb83c..8dca6a6aceca 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -105,7 +105,6 @@ static int int340x_thermal_read_trips(struct acpi_device *zone_adev,
}
static struct thermal_zone_params int340x_thermal_params = {
- .governor_name = "user_space",
.no_hwmon = true,
};
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index d5eca6db2c00..ba2d89d3024c 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -30,6 +30,7 @@
#define PCI_DEVICE_ID_INTEL_RPL_THERMAL 0xA71D
#define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903
#define PCI_DEVICE_ID_INTEL_TGL_THERMAL 0x9A03
+#define PCI_DEVICE_ID_INTEL_PTL_THERMAL 0xB01D
struct power_config {
u32 index;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 145d471546d5..a55aaa8cef42 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -272,7 +272,6 @@ static const struct thermal_zone_device_ops tzone_ops = {
};
static struct thermal_zone_params tzone_params = {
- .governor_name = "user_space",
.no_hwmon = true,
};
@@ -495,6 +494,9 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_WT_HINT) },
{ PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
+ { PCI_DEVICE_DATA(INTEL, PTL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
+ PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_MSI_SUPPORT |
+ PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
{ },
};
diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c
index 817421508d5c..b2a615aea7c1 100644
--- a/drivers/thermal/intel/intel_tcc.c
+++ b/drivers/thermal/intel/intel_tcc.c
@@ -106,7 +106,7 @@ static const struct x86_cpu_id intel_tcc_cpu_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &temp_broadwell),
- X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID2, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &temp_goldmont),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &temp_goldmont),
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 19a3894ad752..2328ac0d8561 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -453,23 +453,23 @@ static void move_to_trips_invalid(struct thermal_zone_device *tz,
static void thermal_governor_trip_crossed(struct thermal_governor *governor,
struct thermal_zone_device *tz,
const struct thermal_trip *trip,
- bool crossed_up)
+ bool upward)
{
if (trip->type == THERMAL_TRIP_HOT || trip->type == THERMAL_TRIP_CRITICAL)
return;
if (governor->trip_crossed)
- governor->trip_crossed(tz, trip, crossed_up);
+ governor->trip_crossed(tz, trip, upward);
}
static void thermal_trip_crossed(struct thermal_zone_device *tz,
struct thermal_trip_desc *td,
struct thermal_governor *governor,
- bool crossed_up)
+ bool upward)
{
const struct thermal_trip *trip = &td->trip;
- if (crossed_up) {
+ if (upward) {
if (trip->type == THERMAL_TRIP_PASSIVE)
tz->passive++;
else if (trip->type == THERMAL_TRIP_CRITICAL ||
@@ -486,7 +486,7 @@ static void thermal_trip_crossed(struct thermal_zone_device *tz,
thermal_notify_tz_trip_down(tz, trip);
thermal_debug_tz_trip_down(tz, trip);
}
- thermal_governor_trip_crossed(governor, tz, trip, crossed_up);
+ thermal_governor_trip_crossed(governor, tz, trip, upward);
}
void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz,
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index be271e7c8f41..09866f0ce765 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -56,7 +56,7 @@ struct thermal_governor {
void (*unbind_from_tz)(struct thermal_zone_device *tz);
void (*trip_crossed)(struct thermal_zone_device *tz,
const struct thermal_trip *trip,
- bool crossed_up);
+ bool upward);
void (*manage)(struct thermal_zone_device *tz);
void (*update_tz)(struct thermal_zone_device *tz,
enum thermal_notify_event reason);
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index fab11b98ca49..5401f03d6b6c 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -160,6 +160,7 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int
return ERR_PTR(ret);
}
+ of_node_put(sensor_specs.np);
if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ?
sensor_specs.args[0] : 0)) {
pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child);
@@ -273,6 +274,34 @@ static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index,
return true;
}
+static bool thermal_of_cm_lookup(struct device_node *cm_np,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
+{
+ for_each_child_of_node_scoped(cm_np, child) {
+ struct device_node *tr_np;
+ int count, i;
+
+ tr_np = of_parse_phandle(child, "trip", 0);
+ if (tr_np != trip->priv)
+ continue;
+
+ /* The trip has been found, look up the cdev. */
+ count = of_count_phandle_with_args(child, "cooling-device",
+ "#cooling-cells");
+ if (count <= 0)
+ pr_err("Add a cooling_device property with at least one device\n");
+
+ for (i = 0; i < count; i++) {
+ if (thermal_of_get_cooling_spec(child, i, cdev, c))
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool thermal_of_should_bind(struct thermal_zone_device *tz,
const struct thermal_trip *trip,
struct thermal_cooling_device *cdev,
@@ -292,27 +321,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz,
goto out;
/* Look up the trip and the cdev in the cooling maps. */
- for_each_child_of_node_scoped(cm_np, child) {
- struct device_node *tr_np;
- int count, i;
-
- tr_np = of_parse_phandle(child, "trip", 0);
- if (tr_np != trip->priv)
- continue;
-
- /* The trip has been found, look up the cdev. */
- count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells");
- if (count <= 0)
- pr_err("Add a cooling_device property with at least one device\n");
-
- for (i = 0; i < count; i++) {
- result = thermal_of_get_cooling_spec(child, i, cdev, c);
- if (result)
- break;
- }
-
- break;
- }
+ result = thermal_of_cm_lookup(cm_np, trip, cdev, c);
of_node_put(cm_np);
out:
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 4bdb2d45e0bf..dc1f456736dc 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -70,6 +70,9 @@ struct tb_ctl {
#define tb_ctl_dbg(ctl, format, arg...) \
dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
+#define tb_ctl_dbg_once(ctl, format, arg...) \
+ dev_dbg_once(&(ctl)->nhi->pdev->dev, format, ## arg)
+
static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
/* Serializes access to request kref_get/put */
static DEFINE_MUTEX(tb_cfg_request_lock);
@@ -265,7 +268,7 @@ static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
return res;
}
-static void tb_cfg_print_error(struct tb_ctl *ctl,
+static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space,
const struct tb_cfg_result *res)
{
WARN_ON(res->err != 1);
@@ -279,8 +282,8 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
* Invalid cfg_space/offset/length combination in
* cfg_read/cfg_write.
*/
- tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
- res->response_route, res->response_port);
+ tb_ctl_dbg_once(ctl, "%llx:%x: invalid config space (%u) or offset\n",
+ res->response_route, res->response_port, space);
return;
case TB_CFG_ERROR_NO_SUCH_PORT:
/*
@@ -1072,7 +1075,7 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
return -ENODEV;
- tb_cfg_print_error(ctl, res);
+ tb_cfg_print_error(ctl, space, res);
if (res->tb_error == TB_CFG_ERROR_LOCK)
return -EACCES;
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index bf930a191472..7e08ca8f0895 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -140,5 +140,4 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length);
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
-
#endif
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index a1d0d8a33f20..f8328ca7e22e 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -168,6 +168,13 @@ static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
* offset relative_offset cap_id vs_cap_id value\n
* v[0] v[1] v[2] v[3] v[4]
*
+ * For Path configuration space:
+ * Short format is: offset value\n
+ * v[0] v[1]
+ * Long format as produced from the read side:
+ * offset relative_offset in_hop_id value\n
+ * v[0] v[1] v[2] v[3]
+ *
* For Counter configuration space:
* Short format is: offset\n
* v[0]
@@ -191,14 +198,33 @@ static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
}
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
+/*
+ * Path registers need to be written in double word pairs and they both must be
+ * read before written. This writes one double word in patch config space
+ * following the spec flow.
+ */
+static int path_write_one(struct tb_port *port, u32 val, u32 offset)
+{
+ u32 index = offset % PATH_LEN;
+ u32 offs = offset - index;
+ u32 data[PATH_LEN];
+ int ret;
+
+ ret = tb_port_read(port, data, TB_CFG_HOPS, offs, PATH_LEN);
+ if (ret)
+ return ret;
+ data[index] = val;
+ return tb_port_write(port, data, TB_CFG_HOPS, offs, PATH_LEN);
+}
+
static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
- const char __user *user_buf, size_t count,
- loff_t *ppos)
+ enum tb_cfg_space space, const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
+ int long_fmt_len, ret = 0;
struct tb *tb = sw->tb;
char *line, *buf;
u32 val, offset;
- int ret = 0;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
@@ -214,12 +240,21 @@ static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
/* User did hardware changes behind the driver's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ if (space == TB_CFG_HOPS)
+ long_fmt_len = 4;
+ else
+ long_fmt_len = 5;
+
line = buf;
- while (parse_line(&line, &offset, &val, 2, 5)) {
- if (port)
- ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
- else
+ while (parse_line(&line, &offset, &val, 2, long_fmt_len)) {
+ if (port) {
+ if (space == TB_CFG_HOPS)
+ ret = path_write_one(port, val, offset);
+ else
+ ret = tb_port_write(port, &val, space, offset, 1);
+ } else {
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
+ }
if (ret)
break;
}
@@ -240,7 +275,16 @@ static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
- return regs_write(port->sw, port, user_buf, count, ppos);
+ return regs_write(port->sw, port, TB_CFG_PORT, user_buf, count, ppos);
+}
+
+static ssize_t path_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+
+ return regs_write(port->sw, port, TB_CFG_HOPS, user_buf, count, ppos);
}
static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
@@ -249,7 +293,7 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
struct seq_file *s = file->private_data;
struct tb_switch *sw = s->private;
- return regs_write(sw, NULL, user_buf, count, ppos);
+ return regs_write(sw, NULL, TB_CFG_SWITCH, user_buf, count, ppos);
}
static bool parse_sb_line(char **line, u8 *reg, u8 *data, size_t data_size,
@@ -401,6 +445,7 @@ out:
#define DEBUGFS_MODE 0600
#else
#define port_regs_write NULL
+#define path_write NULL
#define switch_regs_write NULL
#define port_sb_regs_write NULL
#define retimer_sb_regs_write NULL
@@ -2243,7 +2288,7 @@ out_rpm_put:
return ret;
}
-DEBUGFS_ATTR_RO(path);
+DEBUGFS_ATTR_RW(path);
static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
int counter)
@@ -2368,6 +2413,8 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
sw->debugfs_dir = debugfs_dir;
debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
&switch_regs_fops);
+ if (sw->drom)
+ debugfs_create_blob("drom", 0400, debugfs_dir, &sw->drom_blob);
tb_switch_for_each_port(sw, port) {
struct dentry *debugfs_dir;
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index eb241b270f79..9c1d65d26553 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -435,6 +435,29 @@ static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size)
return 0;
}
+static int tb_switch_drom_alloc(struct tb_switch *sw, size_t size)
+{
+ sw->drom = kzalloc(size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ sw->drom_blob.data = sw->drom;
+ sw->drom_blob.size = size;
+#endif
+ return 0;
+}
+
+static void tb_switch_drom_free(struct tb_switch *sw)
+{
+#ifdef CONFIG_DEBUG_FS
+ sw->drom_blob.data = NULL;
+ sw->drom_blob.size = 0;
+#endif
+ kfree(sw->drom);
+ sw->drom = NULL;
+}
+
/*
* tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
*/
@@ -447,9 +470,9 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
if (len < 0 || len < sizeof(struct tb_drom_header))
return -EINVAL;
- sw->drom = kmalloc(len, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ res = tb_switch_drom_alloc(sw, len);
+ if (res)
+ return res;
res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
len);
@@ -464,8 +487,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
return 0;
err:
- kfree(sw->drom);
- sw->drom = NULL;
+ tb_switch_drom_free(sw);
return -EINVAL;
}
@@ -491,13 +513,15 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
- if (ret)
- goto err_free;
+ if (ret) {
+ tb_switch_drom_free(sw);
+ return ret;
+ }
/*
* Read UID from the minimal DROM because the one in NVM is just
@@ -505,11 +529,6 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
*/
tb_drom_read_uid_only(sw, &sw->uid);
return 0;
-
-err_free:
- kfree(sw->drom);
- sw->drom = NULL;
- return ret;
}
static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
@@ -522,15 +541,13 @@ static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
- if (ret) {
- kfree(sw->drom);
- sw->drom = NULL;
- }
+ if (ret)
+ tb_switch_drom_free(sw);
return ret;
}
@@ -552,19 +569,14 @@ static int tb_drom_bit_bang(struct tb_switch *sw, u16 *size)
return -EIO;
}
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = tb_eeprom_read_n(sw, 0, sw->drom, *size);
if (ret)
- goto err;
-
- return 0;
+ tb_switch_drom_free(sw);
-err:
- kfree(sw->drom);
- sw->drom = NULL;
return ret;
}
@@ -646,9 +658,7 @@ static int tb_drom_parse(struct tb_switch *sw, u16 size)
return 0;
err:
- kfree(sw->drom);
- sw->drom = NULL;
-
+ tb_switch_drom_free(sw);
return ret;
}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index f760e54cd9bd..e1a5f6e3d0b6 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -581,10 +581,10 @@ int tb_path_activate(struct tb_path *path)
}
}
path->activated = true;
- tb_dbg(path->tb, "path activation complete\n");
+ tb_dbg(path->tb, "%s path activation complete\n", path->name);
return 0;
err:
- tb_WARN(path->tb, "path activation failed\n");
+ tb_WARN(path->tb, "%s path activation failed\n", path->name);
return res;
}
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index eeb64433ebbc..1f25529fe05d 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -472,7 +472,7 @@ struct tb_retimer_lookup {
u8 index;
};
-static int retimer_match(struct device *dev, void *data)
+static int retimer_match(struct device *dev, const void *data)
{
const struct tb_retimer_lookup *lookup = data;
struct tb_retimer *rt = tb_to_retimer(dev);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index a7c6919fbf97..390abcfe7188 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -20,6 +20,12 @@
#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
/*
+ * How many time bandwidth allocation request from graphics driver is
+ * retried if the DP tunnel is still activating.
+ */
+#define TB_BW_ALLOC_RETRIES 3
+
+/*
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
* direction. This is 40G - 10% guard band bandwidth.
*/
@@ -69,14 +75,20 @@ static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
}
struct tb_hotplug_event {
- struct work_struct work;
+ struct delayed_work work;
struct tb *tb;
u64 route;
u8 port;
bool unplug;
+ int retry;
};
+static void tb_scan_port(struct tb_port *port);
static void tb_handle_hotplug(struct work_struct *work);
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port,
+ const char *reason);
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port,
+ int retry, unsigned long delay);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
{
@@ -90,8 +102,8 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
ev->route = route;
ev->port = port;
ev->unplug = unplug;
- INIT_WORK(&ev->work, tb_handle_hotplug);
- queue_work(tb->wq, &ev->work);
+ INIT_DELAYED_WORK(&ev->work, tb_handle_hotplug);
+ queue_delayed_work(tb->wq, &ev->work, 0);
}
/* enumeration & hot plug handling */
@@ -961,7 +973,7 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
return 0;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_reclaim:
if (tb_route(parent))
tb_reclaim_usb3_bandwidth(tb, down, up);
@@ -1238,8 +1250,6 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
tb_switch_configure_link(sw);
}
-static void tb_scan_port(struct tb_port *port);
-
/*
* tb_scan_switch() - scan for and initialize downstream switches
*/
@@ -1727,7 +1737,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
break;
}
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
/*
@@ -1864,12 +1874,76 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
+static void tb_dp_tunnel_active(struct tb_tunnel *tunnel, void *data)
+{
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+ struct tb *tb = data;
+
+ mutex_lock(&tb->lock);
+ if (tb_tunnel_is_active(tunnel)) {
+ int consumed_up, consumed_down, ret;
+
+ tb_tunnel_dbg(tunnel, "DPRX capabilities read completed\n");
+
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up,
+ &consumed_down);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to read consumed bandwidth, tearing down\n");
+ tb_deactivate_and_free_tunnel(tunnel);
+ } else {
+ tb_reclaim_usb3_bandwidth(tb, in, out);
+ /*
+ * Transition the links to asymmetric if the
+ * consumption exceeds the threshold.
+ */
+ tb_configure_asym(tb, in, out, consumed_up,
+ consumed_down);
+ /*
+ * Update the domain with the new bandwidth
+ * estimation.
+ */
+ tb_recalc_estimated_bandwidth(tb);
+ /*
+ * In case of DP tunnel exists, change host
+ * router's 1st children TMU mode to HiFi for
+ * CL0s to work.
+ */
+ tb_increase_tmu_accuracy(tunnel);
+ }
+ } else {
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * This tunnel failed to establish. This means DPRX
+ * negotiation most likely did not complete which
+ * happens either because there is no graphics driver
+ * loaded or not all DP cables where connected to the
+ * discrete router.
+ *
+ * In both cases we remove the DP IN adapter from the
+ * available resources as it is not usable. This will
+ * also tear down the tunnel and try to re-use the
+ * released DP OUT.
+ *
+ * It will be added back only if there is hotplug for
+ * the DP IN again.
+ */
+ tb_tunnel_warn(tunnel, "not active, tearing down\n");
+ tb_dp_resource_unavailable(tb, in, "DPRX negotiation failed");
+ }
+ mutex_unlock(&tb->lock);
+
+ tb_domain_put(tb);
+}
+
+static void tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
- int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
/*
@@ -1921,47 +1995,29 @@ static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
available_up, available_down);
tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
- available_down);
+ available_down, tb_dp_tunnel_active,
+ tb_domain_get(tb));
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim_usb;
}
- if (tb_tunnel_activate(tunnel)) {
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+
+ ret = tb_tunnel_activate(tunnel);
+ if (ret && ret != -EINPROGRESS) {
tb_port_info(out, "DP tunnel activation failed, aborting\n");
+ list_del(&tunnel->list);
goto err_free;
}
- /* If fail reading tunnel's consumed bandwidth, tear it down */
- ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
- if (ret)
- goto err_deactivate;
-
- list_add_tail(&tunnel->list, &tcm->tunnel_list);
-
- tb_reclaim_usb3_bandwidth(tb, in, out);
- /*
- * Transition the links to asymmetric if the consumption exceeds
- * the threshold.
- */
- tb_configure_asym(tb, in, out, consumed_up, consumed_down);
-
- /* Update the domain with the new bandwidth estimation */
- tb_recalc_estimated_bandwidth(tb);
-
- /*
- * In case of DP tunnel exists, change host router's 1st children
- * TMU mode to HiFi for CL0s to work.
- */
- tb_increase_tmu_accuracy(tunnel);
- return true;
+ return;
-err_deactivate:
- tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_reclaim_usb:
tb_reclaim_usb3_bandwidth(tb, in, out);
+ tb_domain_put(tb);
err_detach_group:
tb_detach_bandwidth_group(in);
err_dealloc_dp:
@@ -1971,8 +2027,6 @@ err_rpm_put:
pm_runtime_put_autosuspend(&out->sw->dev);
pm_runtime_mark_last_busy(&in->sw->dev);
pm_runtime_put_autosuspend(&in->sw->dev);
-
- return false;
}
static void tb_tunnel_dp(struct tb *tb)
@@ -2090,17 +2144,18 @@ static void tb_switch_exit_redrive(struct tb_switch *sw)
}
}
-static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port,
+ const char *reason)
{
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
if (tb_port_is_dpin(port)) {
- tb_port_dbg(port, "DP IN resource unavailable\n");
+ tb_port_dbg(port, "DP IN resource unavailable: %s\n", reason);
in = port;
out = NULL;
} else {
- tb_port_dbg(port, "DP OUT resource unavailable\n");
+ tb_port_dbg(port, "DP OUT resource unavailable: %s\n", reason);
in = NULL;
out = port;
}
@@ -2182,7 +2237,7 @@ static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
tb_tunnel_deactivate(tunnel);
list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return 0;
}
@@ -2212,7 +2267,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
if (tb_tunnel_activate(tunnel)) {
tb_port_info(up,
"PCIe tunnel activation failed, aborting\n");
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return -EIO;
}
@@ -2271,7 +2326,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
return 0;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_clx:
tb_enable_clx(sw);
mutex_unlock(&tb->lock);
@@ -2334,7 +2389,7 @@ static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
*/
static void tb_handle_hotplug(struct work_struct *work)
{
- struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
@@ -2406,7 +2461,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_xdomain_put(xd);
tb_port_unconfigure_xdomain(port);
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
- tb_dp_resource_unavailable(tb, port);
+ tb_dp_resource_unavailable(tb, port, "adapter unplug");
} else if (!port->port) {
tb_sw_dbg(sw, "xHCI disconnect request\n");
tb_switch_xhci_disconnect(sw);
@@ -2639,7 +2694,7 @@ fail:
static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
- struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
int requested_bw, requested_up, requested_down, ret;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
@@ -2666,7 +2721,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
goto put_sw;
}
- tb_port_dbg(in, "handling bandwidth allocation request\n");
+ tb_port_dbg(in, "handling bandwidth allocation request, retry %d\n", ev->retry);
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (!tunnel) {
@@ -2719,12 +2774,33 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
if (ret) {
- if (ret == -ENOBUFS)
+ if (ret == -ENOBUFS) {
tb_tunnel_warn(tunnel,
"not enough bandwidth available\n");
- else
+ } else if (ret == -ENOTCONN) {
+ tb_tunnel_dbg(tunnel, "not active yet\n");
+ /*
+ * We got bandwidth allocation request but the
+ * tunnel is not yet active. This means that
+ * tb_dp_tunnel_active() is not yet called for
+ * this tunnel. Allow it some time and retry
+ * this request a couple of times.
+ */
+ if (ev->retry < TB_BW_ALLOC_RETRIES) {
+ tb_tunnel_dbg(tunnel,
+ "retrying bandwidth allocation request\n");
+ tb_queue_dp_bandwidth_request(tb, ev->route,
+ ev->port,
+ ev->retry + 1,
+ msecs_to_jiffies(50));
+ } else {
+ tb_tunnel_dbg(tunnel,
+ "run out of retries, failing the request");
+ }
+ } else {
tb_tunnel_warn(tunnel,
"failed to change bandwidth allocation\n");
+ }
} else {
tb_tunnel_dbg(tunnel,
"bandwidth allocation changed to %d/%d Mb/s\n",
@@ -2745,7 +2821,8 @@ unlock:
kfree(ev);
}
-static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port,
+ int retry, unsigned long delay)
{
struct tb_hotplug_event *ev;
@@ -2756,8 +2833,9 @@ static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
ev->tb = tb;
ev->route = route;
ev->port = port;
- INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
- queue_work(tb->wq, &ev->work);
+ ev->retry = retry;
+ INIT_DELAYED_WORK(&ev->work, tb_handle_dp_bandwidth_request);
+ queue_delayed_work(tb->wq, &ev->work, delay);
}
static void tb_handle_notification(struct tb *tb, u64 route,
@@ -2777,7 +2855,7 @@ static void tb_handle_notification(struct tb *tb, u64 route,
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n",
route);
- tb_queue_dp_bandwidth_request(tb, route, error->port);
+ tb_queue_dp_bandwidth_request(tb, route, error->port, 0, 0);
break;
default:
@@ -2832,7 +2910,7 @@ static void tb_stop(struct tb *tb)
*/
if (tb_tunnel_is_dma(tunnel))
tb_tunnel_deactivate(tunnel);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
tb_switch_remove(tb->root_switch);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
@@ -3028,7 +3106,7 @@ static int tb_resume_noirq(struct tb *tb)
if (tb_tunnel_is_usb3(tunnel))
usb3_delay = 500;
tb_tunnel_deactivate(tunnel);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
/* Re-create our tunnels now */
@@ -3039,7 +3117,7 @@ static int tb_resume_noirq(struct tb *tb)
/* Only need to do it once */
usb3_delay = 0;
}
- tb_tunnel_restart(tunnel);
+ tb_tunnel_activate(tunnel);
}
if (!list_empty(&tcm->tunnel_list)) {
/*
@@ -3149,7 +3227,7 @@ static int tb_runtime_resume(struct tb *tb)
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
- tb_tunnel_restart(tunnel);
+ tb_tunnel_activate(tunnel);
tb_switch_enter_redrive(tb->root_switch);
tcm->hotplug_active = true;
mutex_unlock(&tb->lock);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index ddbf0cd78377..b54147a1ba87 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -9,6 +9,7 @@
#ifndef TB_H_
#define TB_H_
+#include <linux/debugfs.h>
#include <linux/nvmem-provider.h>
#include <linux/pci.h>
#include <linux/thunderbolt.h>
@@ -160,6 +161,7 @@ struct tb_switch_tmu {
* @max_pcie_credits: Router preferred number of buffers for PCIe
* @max_dma_credits: Router preferred number of buffers for DMA/P2P
* @clx: CLx states on the upstream link of the router
+ * @drom_blob: DROM debugfs blob wrapper
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
@@ -212,6 +214,9 @@ struct tb_switch {
unsigned int max_pcie_credits;
unsigned int max_dma_credits;
unsigned int clx;
+#ifdef CONFIG_DEBUG_FS
+ struct debugfs_blob_wrapper drom_blob;
+#endif
};
/**
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index 9475c6698c7d..1f4318249c22 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -1382,8 +1382,8 @@ static void tb_test_tunnel_pcie(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_dp(struct kunit *test)
@@ -1406,7 +1406,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1421,7 +1421,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_chain(struct kunit *test)
@@ -1452,7 +1452,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
in = &host->ports[5];
out = &dev4->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1467,7 +1467,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_tree(struct kunit *test)
@@ -1502,7 +1502,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1517,7 +1517,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_max_length(struct kunit *test)
@@ -1567,7 +1567,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
in = &dev6->ports[13];
out = &dev12->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1597,7 +1597,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
&host->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_3dp(struct kunit *test)
@@ -1637,7 +1637,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
out2 = &dev5->ports[13];
out3 = &dev4->ports[14];
- tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
+ tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
@@ -1645,7 +1645,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
- tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
+ tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
@@ -1653,7 +1653,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
- tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
+ tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
@@ -1661,8 +1661,8 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_usb3(struct kunit *test)
@@ -1716,8 +1716,8 @@ static void tb_test_tunnel_usb3(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_port_on_path(struct kunit *test)
@@ -1750,7 +1750,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
@@ -1783,7 +1783,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
port = &dev4->ports[1];
KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
- tb_tunnel_free(dp_tunnel);
+ tb_tunnel_put(dp_tunnel);
}
static void tb_test_tunnel_dma(struct kunit *test)
@@ -1826,7 +1826,7 @@ static void tb_test_tunnel_dma(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_rx(struct kunit *test)
@@ -1863,7 +1863,7 @@ static void tb_test_tunnel_dma_rx(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_tx(struct kunit *test)
@@ -1900,7 +1900,7 @@ static void tb_test_tunnel_dma_tx(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_chain(struct kunit *test)
@@ -1966,7 +1966,7 @@ static void tb_test_tunnel_dma_chain(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_match(struct kunit *test)
@@ -1993,7 +1993,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
@@ -2005,7 +2005,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
@@ -2017,7 +2017,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
@@ -2050,7 +2050,7 @@ static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
@@ -2083,7 +2083,7 @@ static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_pcie(struct kunit *test)
@@ -2116,7 +2116,7 @@ static void tb_test_credit_alloc_pcie(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_without_dp(struct kunit *test)
@@ -2166,7 +2166,7 @@ static void tb_test_credit_alloc_without_dp(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dp(struct kunit *test)
@@ -2182,7 +2182,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
@@ -2210,7 +2210,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_usb3(struct kunit *test)
@@ -2243,7 +2243,7 @@ static void tb_test_credit_alloc_usb3(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dma(struct kunit *test)
@@ -2279,7 +2279,7 @@ static void tb_test_credit_alloc_dma(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
@@ -2356,7 +2356,7 @@ static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
* Release the first DMA tunnel. That should make 14 buffers
* available for the next tunnel.
*/
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel1);
tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
KUNIT_ASSERT_NOT_NULL(test, tunnel3);
@@ -2375,8 +2375,8 @@ static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
- tb_tunnel_free(tunnel3);
- tb_tunnel_free(tunnel2);
+ tb_tunnel_put(tunnel3);
+ tb_tunnel_put(tunnel2);
}
static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
@@ -2418,7 +2418,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
in = &host->ports[5];
out = &dev->ports[13];
- dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
@@ -2455,7 +2455,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
in = &host->ports[6];
out = &dev->ports[14];
- dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
@@ -2595,12 +2595,12 @@ static void tb_test_credit_alloc_all(struct kunit *test)
dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
- tb_tunnel_free(dma_tunnel2);
- tb_tunnel_free(dma_tunnel1);
- tb_tunnel_free(usb3_tunnel);
- tb_tunnel_free(dp_tunnel2);
- tb_tunnel_free(dp_tunnel1);
- tb_tunnel_free(pcie_tunnel);
+ tb_tunnel_put(dma_tunnel2);
+ tb_tunnel_put(dma_tunnel1);
+ tb_tunnel_put(usb3_tunnel);
+ tb_tunnel_put(dp_tunnel2);
+ tb_tunnel_put(dp_tunnel1);
+ tb_tunnel_put(pcie_tunnel);
}
static const u32 root_directory[] = {
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 41cf6378ad25..717b31d78728 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -70,6 +70,24 @@
#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
+/*
+ * According to VESA spec, the DPRX negotiation shall compete in 5
+ * seconds after tunnel is established. Since at least i915 can runtime
+ * suspend if there is nothing connected, and that it polls any new
+ * connections every 10 seconds, we use 12 seconds here.
+ *
+ * These are in ms.
+ */
+#define TB_DPRX_TIMEOUT 12000
+#define TB_DPRX_WAIT_TIMEOUT 25
+#define TB_DPRX_POLL_DELAY 50
+
+static int dprx_timeout = TB_DPRX_TIMEOUT;
+module_param(dprx_timeout, int, 0444);
+MODULE_PARM_DESC(dprx_timeout,
+ "DPRX capability read timeout in ms, -1 waits forever (default: "
+ __MODULE_STRING(TB_DPRX_TIMEOUT) ")");
+
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
@@ -82,6 +100,9 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
+/* Synchronizes kref_get()/put() of struct tb_tunnel */
+static DEFINE_MUTEX(tb_tunnel_lock);
+
static inline unsigned int tb_usable_credits(const struct tb_port *port)
{
return port->total_credits - port->ctl_credits;
@@ -155,7 +176,7 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
if (!tunnel->paths) {
- tb_tunnel_free(tunnel);
+ kfree(tunnel);
return NULL;
}
@@ -163,10 +184,42 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->tb = tb;
tunnel->npaths = npaths;
tunnel->type = type;
+ kref_init(&tunnel->kref);
return tunnel;
}
+static void tb_tunnel_get(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_get(&tunnel->kref);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
+static void tb_tunnel_destroy(struct kref *kref)
+{
+ struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
+ int i;
+
+ if (tunnel->destroy)
+ tunnel->destroy(tunnel);
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i])
+ tb_path_free(tunnel->paths[i]);
+ }
+
+ kfree(tunnel->paths);
+ kfree(tunnel);
+}
+
+void tb_tunnel_put(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_put(&tunnel->kref, tb_tunnel_destroy);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
@@ -355,7 +408,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -404,7 +457,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -851,7 +904,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
return 0;
}
-static int tb_dp_init(struct tb_tunnel *tunnel)
+static int tb_dp_pre_activate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
struct tb_switch *sw = in->sw;
@@ -877,7 +930,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
return tb_dp_bandwidth_alloc_mode_enable(tunnel);
}
-static void tb_dp_deinit(struct tb_tunnel *tunnel)
+static void tb_dp_post_deactivate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
@@ -889,6 +942,95 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel)
}
}
+static ktime_t dprx_timeout_to_ktime(int timeout_msec)
+{
+ return timeout_msec >= 0 ?
+ ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX;
+}
+
+static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
+{
+ ktime_t timeout = dprx_timeout_to_ktime(timeout_msec);
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set for
+ * active tunnel.
+ */
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE)
+ return 0;
+
+ usleep_range(100, 150);
+ } while (ktime_before(ktime_get(), timeout));
+
+ tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
+ return -ETIMEDOUT;
+}
+
+static void tb_dp_dprx_work(struct work_struct *work)
+{
+ struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
+ struct tb *tb = tunnel->tb;
+
+ if (!tunnel->dprx_canceled) {
+ mutex_lock(&tb->lock);
+ if (tb_dp_is_usb4(tunnel->src_port->sw) &&
+ tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) {
+ if (ktime_before(ktime_get(), tunnel->dprx_timeout)) {
+ queue_delayed_work(tb->wq, &tunnel->dprx_work,
+ msecs_to_jiffies(TB_DPRX_POLL_DELAY));
+ mutex_unlock(&tb->lock);
+ return;
+ }
+ } else {
+ tunnel->state = TB_TUNNEL_ACTIVE;
+ }
+ mutex_unlock(&tb->lock);
+ }
+
+ if (tunnel->callback)
+ tunnel->callback(tunnel, tunnel->callback_data);
+}
+
+static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
+{
+ /*
+ * Bump up the reference to keep the tunnel around. It will be
+ * dropped in tb_dp_dprx_stop() once the tunnel is deactivated.
+ */
+ tb_tunnel_get(tunnel);
+
+ tunnel->dprx_started = true;
+
+ if (tunnel->callback) {
+ tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
+ queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
+ return -EINPROGRESS;
+ }
+
+ return tb_dp_is_usb4(tunnel->src_port->sw) ?
+ tb_dp_wait_dprx(tunnel, dprx_timeout) : 0;
+}
+
+static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
+{
+ if (tunnel->dprx_started) {
+ tunnel->dprx_started = false;
+ tunnel->dprx_canceled = true;
+ cancel_delayed_work(&tunnel->dprx_work);
+ tb_tunnel_put(tunnel);
+ }
+}
+
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
{
int ret;
@@ -910,6 +1052,7 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
} else {
+ tb_dp_dprx_stop(tunnel);
tb_dp_port_hpd_clear(tunnel->src_port);
tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
if (tb_port_is_dpout(tunnel->dst_port))
@@ -920,10 +1063,13 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
if (ret)
return ret;
- if (tb_port_is_dpout(tunnel->dst_port))
- return tb_dp_port_enable(tunnel->dst_port, active);
+ if (tb_port_is_dpout(tunnel->dst_port)) {
+ ret = tb_dp_port_enable(tunnel->dst_port, active);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return active ? tb_dp_dprx_start(tunnel) : 0;
}
/**
@@ -1076,35 +1222,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
return 0;
}
-static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
-{
- ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
- struct tb_port *in = tunnel->src_port;
-
- /*
- * Wait for DPRX done. Normally it should be already set for
- * active tunnel.
- */
- do {
- u32 val;
- int ret;
-
- ret = tb_port_read(in, &val, TB_CFG_PORT,
- in->cap_adap + DP_COMMON_CAP, 1);
- if (ret)
- return ret;
-
- if (val & DP_COMMON_CAP_DPRX_DONE) {
- tb_tunnel_dbg(tunnel, "DPRX read done\n");
- return 0;
- }
- usleep_range(100, 150);
- } while (ktime_before(ktime_get(), timeout));
-
- tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
- return -ETIMEDOUT;
-}
-
/* Read cap from tunnel DP IN */
static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
u32 *lanes)
@@ -1168,32 +1285,39 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int ret;
if (tb_dp_is_usb4(sw)) {
- /*
- * On USB4 routers check if the bandwidth allocation
- * mode is enabled first and then read the bandwidth
- * through those registers.
- */
- ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
- consumed_down);
- if (ret < 0) {
- if (ret != -EOPNOTSUPP)
+ ret = tb_dp_wait_dprx(tunnel, 0);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ /*
+ * While we wait for DPRX complete the
+ * tunnel consumes as much as it had
+ * been reserved initially.
+ */
+ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
+ &rate, &lanes);
+ if (ret)
+ return ret;
+ } else {
+ return ret;
+ }
+ } else {
+ /*
+ * On USB4 routers check if the bandwidth allocation
+ * mode is enabled first and then read the bandwidth
+ * through those registers.
+ */
+ ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ } else if (!ret) {
+ return 0;
+ }
+ ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
+ if (ret)
return ret;
- } else if (!ret) {
- return 0;
}
- /*
- * Then see if the DPRX negotiation is ready and if yes
- * return that bandwidth (it may be smaller than the
- * reduced one). According to VESA spec, the DPRX
- * negotiation shall compete in 5 seconds after tunnel
- * established. We give it 100ms extra just in case.
- */
- ret = tb_dp_wait_dprx(tunnel, 5100);
- if (ret)
- return ret;
- ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
- if (ret)
- return ret;
} else if (sw->generation >= 2) {
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
if (ret)
@@ -1365,9 +1489,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1424,7 +1548,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1439,15 +1563,24 @@ err_free:
* %0 if no available bandwidth.
* @max_down: Maximum available downstream bandwidth for the DP tunnel.
* %0 if no available bandwidth.
+ * @callback: Optional callback that is called when the DP tunnel is
+ * fully activated (or there is an error)
+ * @callback_data: Optional data for @callback
*
* Allocates a tunnel between @in and @out that is capable of tunneling
- * Display Port traffic.
+ * Display Port traffic. If @callback is not %NULL it will be called
+ * after tb_tunnel_activate() once the tunnel has been fully activated.
+ * It can call tb_tunnel_is_active() to check if activation was
+ * successful (or if it returns %false there was some sort of issue).
+ * The @callback is called without @tb->lock held.
*
- * Return: Returns a tb_tunnel on success or NULL on failure.
+ * Return: Returns a tb_tunnel on success or &NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
- int max_up, int max_down)
+ int max_up, int max_down,
+ void (*callback)(struct tb_tunnel *, void *),
+ void *callback_data)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -1461,9 +1594,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1472,6 +1605,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->dst_port = out;
tunnel->max_up = max_up;
tunnel->max_down = max_down;
+ tunnel->callback = callback;
+ tunnel->callback_data = callback_data;
+ INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work);
paths = tunnel->paths;
pm_support = usb4_switch_version(in->sw) >= 2;
@@ -1500,7 +1636,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1620,7 +1756,7 @@ static void tb_dma_release_credits(struct tb_path_hop *hop)
}
}
-static void tb_dma_deinit_path(struct tb_path *path)
+static void tb_dma_destroy_path(struct tb_path *path)
{
struct tb_path_hop *hop;
@@ -1628,14 +1764,14 @@ static void tb_dma_deinit_path(struct tb_path *path)
tb_dma_release_credits(hop);
}
-static void tb_dma_deinit(struct tb_tunnel *tunnel)
+static void tb_dma_destroy(struct tb_tunnel *tunnel)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
if (!tunnel->paths[i])
continue;
- tb_dma_deinit_path(tunnel->paths[i]);
+ tb_dma_destroy_path(tunnel->paths[i]);
}
}
@@ -1681,7 +1817,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tunnel->src_port = nhi;
tunnel->dst_port = dst;
- tunnel->deinit = tb_dma_deinit;
+ tunnel->destroy = tb_dma_destroy;
credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
@@ -1712,7 +1848,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1793,7 +1929,7 @@ static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
return min(up_max_rate, down_max_rate);
}
-static int tb_usb3_init(struct tb_tunnel *tunnel)
+static int tb_usb3_pre_activate(struct tb_tunnel *tunnel)
{
tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
@@ -2024,7 +2160,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2038,7 +2174,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -2094,7 +2230,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
"USB3 Down");
if (!path) {
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
tb_usb3_init_path(path);
@@ -2103,7 +2239,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
"USB3 Up");
if (!path) {
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
tb_usb3_init_path(path);
@@ -2113,7 +2249,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
tunnel->allocated_up = min(max_rate, max_up);
tunnel->allocated_down = min(max_rate, max_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2125,31 +2261,6 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
}
/**
- * tb_tunnel_free() - free a tunnel
- * @tunnel: Tunnel to be freed
- *
- * Frees a tunnel. The tunnel does not need to be deactivated.
- */
-void tb_tunnel_free(struct tb_tunnel *tunnel)
-{
- int i;
-
- if (!tunnel)
- return;
-
- if (tunnel->deinit)
- tunnel->deinit(tunnel);
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i])
- tb_path_free(tunnel->paths[i]);
- }
-
- kfree(tunnel->paths);
- kfree(tunnel);
-}
-
-/**
* tb_tunnel_is_invalid - check whether an activated path is still valid
* @tunnel: Tunnel to check
*/
@@ -2167,12 +2278,15 @@ bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
}
/**
- * tb_tunnel_restart() - activate a tunnel after a hardware reset
- * @tunnel: Tunnel to restart
+ * tb_tunnel_activate() - activate a tunnel
+ * @tunnel: Tunnel to activate
*
- * Return: 0 on success and negative errno in case if failure
+ * Return: 0 on success and negative errno in case if failure.
+ * Specifically returns %-EINPROGRESS if the tunnel activation is still
+ * in progress (that's for DP tunnels to complete DPRX capabilities
+ * read).
*/
-int tb_tunnel_restart(struct tb_tunnel *tunnel)
+int tb_tunnel_activate(struct tb_tunnel *tunnel)
{
int res, i;
@@ -2189,8 +2303,10 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
}
}
- if (tunnel->init) {
- res = tunnel->init(tunnel);
+ tunnel->state = TB_TUNNEL_ACTIVATING;
+
+ if (tunnel->pre_activate) {
+ res = tunnel->pre_activate(tunnel);
if (res)
return res;
}
@@ -2203,10 +2319,14 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
if (tunnel->activate) {
res = tunnel->activate(tunnel, true);
- if (res)
+ if (res) {
+ if (res == -EINPROGRESS)
+ return res;
goto err;
+ }
}
+ tunnel->state = TB_TUNNEL_ACTIVE;
return 0;
err:
@@ -2216,27 +2336,6 @@ err:
}
/**
- * tb_tunnel_activate() - activate a tunnel
- * @tunnel: Tunnel to activate
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-int tb_tunnel_activate(struct tb_tunnel *tunnel)
-{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i]->activated) {
- tb_tunnel_WARN(tunnel,
- "trying to activate an already activated tunnel\n");
- return -EINVAL;
- }
- }
-
- return tb_tunnel_restart(tunnel);
-}
-
-/**
* tb_tunnel_deactivate() - deactivate a tunnel
* @tunnel: Tunnel to deactivate
*/
@@ -2253,6 +2352,11 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
if (tunnel->paths[i] && tunnel->paths[i]->activated)
tb_path_deactivate(tunnel->paths[i]);
}
+
+ if (tunnel->post_deactivate)
+ tunnel->post_deactivate(tunnel);
+
+ tunnel->state = TB_TUNNEL_INACTIVE;
}
/**
@@ -2279,18 +2383,10 @@ bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
return false;
}
-static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+// Is tb_tunnel_activate() called for the tunnel
+static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (!tunnel->paths[i])
- return false;
- if (!tunnel->paths[i]->activated)
- return false;
- }
-
- return true;
+ return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel);
}
/**
@@ -2307,7 +2403,7 @@ int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->maximum_bandwidth)
return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
@@ -2328,7 +2424,7 @@ int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->allocated_bandwidth)
return tunnel->allocated_bandwidth(tunnel, allocated_up,
@@ -2351,7 +2447,7 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->alloc_bandwidth)
return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
@@ -2376,26 +2472,27 @@ int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
{
int up_bw = 0, down_bw = 0;
- if (!tb_tunnel_is_active(tunnel))
- goto out;
-
- if (tunnel->consumed_bandwidth) {
+ /*
+ * Here we need to distinguish between not active tunnel from
+ * tunnels that are either fully active or activation started.
+ * The latter is true for DP tunnels where we must report the
+ * consumed to be the maximum we gave it until DPRX capabilities
+ * read is done by the graphics driver.
+ */
+ if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) {
int ret;
ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
if (ret)
return ret;
-
- tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
- down_bw);
}
-out:
if (consumed_up)
*consumed_up = up_bw;
if (consumed_down)
*consumed_down = down_bw;
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw);
return 0;
}
@@ -2411,7 +2508,7 @@ out:
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
{
if (!tb_tunnel_is_active(tunnel))
- return 0;
+ return -ENOTCONN;
if (tunnel->release_unused_bandwidth) {
int ret;
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index 1a27ccd08b86..8a0a0cb21a89 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -19,16 +19,33 @@ enum tb_tunnel_type {
};
/**
+ * enum tb_tunnel_state - State of a tunnel
+ * @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel
+ * @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel
+ * @TB_TUNNEL_ACTIVE: The tunnel is fully active
+ */
+enum tb_tunnel_state {
+ TB_TUNNEL_INACTIVE,
+ TB_TUNNEL_ACTIVATING,
+ TB_TUNNEL_ACTIVE,
+};
+
+/**
* struct tb_tunnel - Tunnel between two ports
+ * @kref: Reference count
* @tb: Pointer to the domain
* @src_port: Source port of the tunnel
* @dst_port: Destination port of the tunnel. For discovered incomplete
* tunnels may be %NULL or null adapter port instead.
* @paths: All paths required by the tunnel
* @npaths: Number of paths in @paths
- * @init: Optional tunnel specific initialization
- * @deinit: Optional tunnel specific de-initialization
+ * @pre_activate: Optional tunnel specific initialization called before
+ * activation. Can touch hardware.
* @activate: Optional tunnel specific activation/deactivation
+ * @post_deactivate: Optional tunnel specific de-initialization called
+ * after deactivation. Can touch hardware.
+ * @destroy: Optional tunnel specific callback called when the tunnel
+ * memory is being released. Should not touch hardware.
* @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
* @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
* @alloc_bandwidth: Change tunnel bandwidth allocation
@@ -37,6 +54,7 @@ enum tb_tunnel_type {
* @reclaim_available_bandwidth: Reclaim back available bandwidth
* @list: Tunnels are linked using this field
* @type: Type of the tunnel
+ * @state: Current state of the tunnel
* @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
* Only set if the bandwidth needs to be limited.
* @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
@@ -45,16 +63,24 @@ enum tb_tunnel_type {
* @allocated_down: Allocated downstream bandwidth (only for USB3)
* @bw_mode: DP bandwidth allocation mode registers can be used to
* determine consumed and allocated bandwidth
+ * @dprx_started: DPRX negotiation was started (tb_dp_dprx_start() was called for it)
+ * @dprx_canceled: Was DPRX capabilities read poll canceled
+ * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
+ * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
+ * @callback: Optional callback called when DP tunnel is fully activated
+ * @callback_data: Optional data for @callback
*/
struct tb_tunnel {
+ struct kref kref;
struct tb *tb;
struct tb_port *src_port;
struct tb_port *dst_port;
struct tb_path **paths;
size_t npaths;
- int (*init)(struct tb_tunnel *tunnel);
- void (*deinit)(struct tb_tunnel *tunnel);
+ int (*pre_activate)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ void (*post_deactivate)(struct tb_tunnel *tunnel);
+ void (*destroy)(struct tb_tunnel *tunnel);
int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
int *max_down);
int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
@@ -69,11 +95,18 @@ struct tb_tunnel {
int *available_down);
struct list_head list;
enum tb_tunnel_type type;
+ enum tb_tunnel_state state;
int max_up;
int max_down;
int allocated_up;
int allocated_down;
bool bw_mode;
+ bool dprx_started;
+ bool dprx_canceled;
+ ktime_t dprx_timeout;
+ struct delayed_work dprx_work;
+ void (*callback)(struct tb_tunnel *tunnel, void *data);
+ void *callback_data;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
@@ -86,7 +119,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
- int max_up, int max_down);
+ int max_up, int max_down,
+ void (*callback)(struct tb_tunnel *, void *),
+ void *callback_data);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
int transmit_ring, int receive_path,
@@ -99,10 +134,24 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up,
int max_down);
-void tb_tunnel_free(struct tb_tunnel *tunnel);
+void tb_tunnel_put(struct tb_tunnel *tunnel);
int tb_tunnel_activate(struct tb_tunnel *tunnel);
-int tb_tunnel_restart(struct tb_tunnel *tunnel);
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
+
+/**
+ * tb_tunnel_is_active() - Is tunnel fully activated
+ * @tunnel: Tunnel to check
+ *
+ * Returns %true if @tunnel is fully activated. For other than DP
+ * tunnels this is pretty much once tb_tunnel_activate() returns
+ * successfully. However, for DP tunnels this returns %true only once the
+ * DPRX capabilities read has been issued successfully.
+ */
+static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+ return tunnel->state == TB_TUNNEL_ACTIVE;
+}
+
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port);
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 11a50c86a1e4..b0630e6d9472 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1026,7 +1026,7 @@ static int remove_missing_service(struct device *dev, void *data)
return 0;
}
-static int find_service(struct device *dev, void *data)
+static int find_service(struct device *dev, const void *data)
{
const struct tb_property *p = data;
struct tb_service *svc;
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index afbf7738c7c4..58b28be63c79 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -1154,7 +1154,7 @@ static char kgdbfdc_rbuf[4];
/* write buffer to allow compaction */
static unsigned int kgdbfdc_wbuflen;
-static char kgdbfdc_wbuf[4];
+static u8 kgdbfdc_wbuf[4];
static void __iomem *kgdbfdc_setup(void)
{
@@ -1215,7 +1215,7 @@ static int kgdbfdc_read_char(void)
/* push an FDC word from write buffer to TX FIFO */
static void kgdbfdc_push_one(void)
{
- const char *bufs[1] = { kgdbfdc_wbuf };
+ const u8 *bufs[1] = { kgdbfdc_wbuf };
struct fdc_word word;
void __iomem *regs;
unsigned int i;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 252849910588..363afe11974f 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2224,7 +2224,7 @@ static int gsm_dlci_negotiate(struct gsm_dlci *dlci)
*
* Some control dlci can stay in ADM mode with other dlci working just
* fine. In that case we can just keep the control dlci open after the
- * DLCI_OPENING retries time out.
+ * DLCI_OPENING receives DM.
*/
static void gsm_dlci_t1(struct timer_list *t)
@@ -2243,16 +2243,19 @@ static void gsm_dlci_t1(struct timer_list *t)
}
break;
case DLCI_OPENING:
- if (dlci->retries) {
- dlci->retries--;
- gsm_command(dlci->gsm, dlci->addr, SABM|PF);
- mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
- } else if (!dlci->addr && gsm->control == (DM | PF)) {
+ if (!dlci->addr && gsm->control == (DM | PF)) {
if (debug & DBG_ERRORS)
- pr_info("DLCI %d opening in ADM mode.\n",
- dlci->addr);
+ pr_info("DLCI 0 opening in ADM mode.\n");
dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
+ } else if (dlci->retries) {
+ if (!dlci->addr || !gsm->dlci[0] ||
+ gsm->dlci[0]->state != DLCI_OPENING) {
+ dlci->retries--;
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ }
+
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else {
gsm->open_error++;
gsm_dlci_begin_close(dlci); /* prevent half open link */
@@ -2308,7 +2311,9 @@ static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
dlci->retries = gsm->n2;
if (!need_pn) {
dlci->state = DLCI_OPENING;
- gsm_command(gsm, dlci->addr, SABM|PF);
+ if (!dlci->addr || !gsm->dlci[0] ||
+ gsm->dlci[0]->state != DLCI_OPENING)
+ gsm_command(gsm, dlci->addr, SABM|PF);
} else {
/* Configure DLCI before setup */
dlci->state = DLCI_CONFIGURE;
@@ -4251,7 +4256,7 @@ static const struct tty_port_operations gsm_port_ops = {
static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct gsm_mux *gsm;
- struct gsm_dlci *dlci;
+ struct gsm_dlci *dlci, *dlci0;
unsigned int line = tty->index;
unsigned int mux = mux_line_to_num(line);
bool alloc = false;
@@ -4274,10 +4279,20 @@ static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
perspective as we don't have to worry about this
if DLCI0 is lost */
mutex_lock(&gsm->mutex);
- if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) {
+
+ dlci0 = gsm->dlci[0];
+ if (dlci0 && dlci0->state != DLCI_OPEN) {
mutex_unlock(&gsm->mutex);
- return -EL2NSYNC;
+
+ if (dlci0->state == DLCI_OPENING)
+ wait_event(gsm->event, dlci0->state != DLCI_OPENING);
+
+ if (dlci0->state != DLCI_OPEN)
+ return -EL2NSYNC;
+
+ mutex_lock(&gsm->mutex);
}
+
dlci = gsm->dlci[line];
if (dlci == NULL) {
alloc = true;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index df08f13052ff..8bb1a01fef2a 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -798,7 +798,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
/* We refuse fsnotify events on ptmx, since it's a shared resource */
- filp->f_mode |= FMODE_NONOTIFY;
+ file_set_fsnotify_mode(filp, FMODE_NONOTIFY);
retval = tty_alloc_file(filp);
if (retval)
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index e5310c65cf52..b861585ca02a 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -231,8 +231,8 @@ void serial8250_rpm_put_tx(struct uart_8250_port *p);
int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485);
-void serial8250_em485_start_tx(struct uart_8250_port *p);
-void serial8250_em485_stop_tx(struct uart_8250_port *p);
+void serial8250_em485_start_tx(struct uart_8250_port *p, bool toggle_ier);
+void serial8250_em485_stop_tx(struct uart_8250_port *p, bool toggle_ier);
void serial8250_em485_destroy(struct uart_8250_port *p);
extern struct serial_rs485 serial8250_em485_supported;
@@ -374,6 +374,7 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
#ifdef CONFIG_SERIAL_8250_DMA
extern int serial8250_tx_dma(struct uart_8250_port *);
+extern void serial8250_tx_dma_flush(struct uart_8250_port *);
extern int serial8250_rx_dma(struct uart_8250_port *);
extern void serial8250_rx_dma_flush(struct uart_8250_port *);
extern int serial8250_request_dma(struct uart_8250_port *);
@@ -406,6 +407,7 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
return -1;
}
+static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { }
static inline int serial8250_rx_dma(struct uart_8250_port *p)
{
return -1;
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index fdb53b54e99e..0609582a62f7 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -46,7 +46,7 @@ struct bcm2835aux_data {
u32 cntl;
};
-static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up)
+static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up, bool toggle_ier)
{
if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
struct bcm2835aux_data *data = dev_get_drvdata(up->port.dev);
@@ -65,7 +65,7 @@ static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up)
serial8250_out_MCR(up, UART_MCR_RTS);
}
-static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up)
+static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up, bool toggle_ier)
{
if (up->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
serial8250_out_MCR(up, 0);
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index d0b18358859e..742004d63c6f 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1056,8 +1056,7 @@ static int brcmuart_probe(struct platform_device *pdev)
}
/* setup HR timer */
- hrtimer_init(&priv->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->hrt.function = brcmuart_hrtimer_func;
+ hrtimer_setup(&priv->hrt, brcmuart_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
up.port.shutdown = brcmuart_shutdown;
up.port.startup = brcmuart_startup;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 5f9f06911795..6f676bb37ac3 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -675,7 +675,6 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work)
uart_port_lock_irqsave(port, &flags);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
- up->port.read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
uart_port_unlock_irqrestore(port, flags);
}
@@ -812,6 +811,9 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
uart->dl_write = up->dl_write;
if (uart->port.type != PORT_8250_CIR) {
+ if (uart_console_registered(&uart->port))
+ pm_runtime_get_sync(uart->port.dev);
+
if (serial8250_isa_config != NULL)
serial8250_isa_config(0, &uart->port,
&uart->capabilities);
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index d215c494ee24..f245a84f4a50 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -149,6 +149,22 @@ err:
return ret;
}
+void serial8250_tx_dma_flush(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (!dma->tx_running)
+ return;
+
+ /*
+ * kfifo_reset() has been called by the serial core, avoid
+ * advancing and underflowing in __dma_tx_complete().
+ */
+ dma->tx_size = 0;
+
+ dmaengine_terminate_async(dma->rxchan);
+}
+
int serial8250_rx_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 64aed7efc569..11c860ea80f6 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -110,7 +110,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
spin_lock_init(&port->lock);
if (resource_type(&resource) == IORESOURCE_IO) {
- port->iotype = UPIO_PORT;
port->iobase = resource.start;
} else {
port->mapbase = resource.start;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 9eb9aa766811..c2b75e3f106d 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -365,7 +365,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
if (up->port.rs485.flags & SER_RS485_ENABLED &&
up->port.rs485_config == serial8250_em485_config)
- serial8250_em485_stop_tx(up);
+ serial8250_em485_stop_tx(up, true);
}
/*
@@ -412,7 +412,13 @@ static void omap_8250_set_termios(struct uart_port *port,
*/
uart_update_timeout(port, termios->c_cflag, baud);
- up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ /*
+ * Specify which conditions may be considered for error
+ * handling and the ignoring of characters. The actual
+ * ignoring of characters only occurs if the bit is set
+ * in @ignore_status_mask as well.
+ */
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (IGNBRK | PARMRK))
@@ -838,7 +844,6 @@ static void omap_8250_unthrottle(struct uart_port *port)
if (up->dma)
up->dma->rx_dma(up);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
- port->read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
uart_port_unlock_irqrestore(port, flags);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 3c3f7c926afb..df4d0d832e54 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -64,23 +64,17 @@
#define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
-#define PCI_VENDOR_ID_WCH 0x4348
-#define PCI_DEVICE_ID_WCH_CH352_2S 0x3253
-#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453
-#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046
-#define PCI_DEVICE_ID_WCH_CH353_1S1P 0x5053
-#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053
-#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173
+
+#define PCI_DEVICE_ID_WCHCN_CH352_2S 0x3253
+#define PCI_DEVICE_ID_WCHCN_CH355_4S 0x7173
+
#define PCI_VENDOR_ID_AGESTAR 0x5372
#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
-#define PCIE_VENDOR_ID_WCH 0x1c00
-#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
-#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
-#define PCIE_DEVICE_ID_WCH_CH384_8S 0x3853
-#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
+#define PCI_DEVICE_ID_WCHIC_CH384_4S 0x3470
+#define PCI_DEVICE_ID_WCHIC_CH384_8S 0x3853
#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
@@ -2817,80 +2811,80 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
},
/* WCH CH353 1S1P card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH353_1S1P,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH353_1S1P,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH353 2S1P card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH353_2S1P,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH353_2S1P,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH353 4S card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH353_4S,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH353_4S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH353 2S1PF card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH353_2S1PF,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH353_2S1PF,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH352 2S card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH352_2S,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH352_2S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH355 4S card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH355_4S,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH355_4S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch355_setup,
},
/* WCH CH382 2S card (16850 clone) */
{
- .vendor = PCIE_VENDOR_ID_WCH,
- .device = PCIE_DEVICE_ID_WCH_CH382_2S,
+ .vendor = PCI_VENDOR_ID_WCHIC,
+ .device = PCI_DEVICE_ID_WCHIC_CH382_2S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch38x_setup,
},
/* WCH CH382 2S1P card (16850 clone) */
{
- .vendor = PCIE_VENDOR_ID_WCH,
- .device = PCIE_DEVICE_ID_WCH_CH382_2S1P,
+ .vendor = PCI_VENDOR_ID_WCHIC,
+ .device = PCI_DEVICE_ID_WCHIC_CH382_2S1P,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch38x_setup,
},
/* WCH CH384 4S card (16850 clone) */
{
- .vendor = PCIE_VENDOR_ID_WCH,
- .device = PCIE_DEVICE_ID_WCH_CH384_4S,
+ .vendor = PCI_VENDOR_ID_WCHIC,
+ .device = PCI_DEVICE_ID_WCHIC_CH384_4S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch38x_setup,
},
/* WCH CH384 8S card (16850 clone) */
{
- .vendor = PCIE_VENDOR_ID_WCH,
- .device = PCIE_DEVICE_ID_WCH_CH384_8S,
+ .vendor = PCI_VENDOR_ID_WCHIC,
+ .device = PCI_DEVICE_ID_WCHIC_CH384_8S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_wch_ch38x_init,
@@ -3967,11 +3961,11 @@ static const struct pci_device_id blacklist[] = {
/* multi-io cards handled by parport_serial */
/* WCH CH353 2S1P */
- { PCI_DEVICE(0x4348, 0x7053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ { PCI_VDEVICE(WCHCN, 0x7053), REPORT_CONFIG(PARPORT_SERIAL), },
/* WCH CH353 1S1P */
- { PCI_DEVICE(0x4348, 0x5053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ { PCI_VDEVICE(WCHCN, 0x5053), REPORT_CONFIG(PARPORT_SERIAL), },
/* WCH CH382 2S1P */
- { PCI_DEVICE(0x1c00, 0x3250), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ { PCI_VDEVICE(WCHIC, 0x3250), REPORT_CONFIG(PARPORT_SERIAL), },
/* Intel platforms with MID UART */
{ PCI_VDEVICE(INTEL, 0x081b), REPORT_8250_CONFIG(MID), },
@@ -6044,27 +6038,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
* WCH CH353 series devices: The 2S1P is handled by parport_serial
* so not listed here.
*/
- { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_4S,
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_4S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_4_115200 },
- { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_2S1PF,
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1PF,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH355_4S,
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH355_4S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_4_115200 },
- { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch382_2 },
- { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_4S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_4 },
- { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_8S,
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_8S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_8 },
/*
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index 838f181f929b..e9c51d4e447d 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -78,6 +78,12 @@
#define UART_TX_BYTE_FIFO 0x00
#define UART_FIFO_CTL 0x02
+#define UART_MODEM_CTL_REG 0x04
+#define UART_MODEM_CTL_RTS_SET BIT(1)
+
+#define UART_LINE_STAT_REG 0x05
+#define UART_LINE_XMIT_CHECK_MASK GENMASK(6, 5)
+
#define UART_ACTV_REG 0x11
#define UART_BLOCK_SET_ACTIVE BIT(0)
@@ -94,6 +100,7 @@
#define UART_BIT_SAMPLE_CNT_16 16
#define BAUD_CLOCK_DIV_INT_MSK GENMASK(31, 8)
#define ADCL_CFG_RTS_DELAY_MASK GENMASK(11, 8)
+#define FRAC_DIV_TX_END_POINT_MASK GENMASK(23, 20)
#define UART_WAKE_REG 0x8C
#define UART_WAKE_MASK_REG 0x90
@@ -134,6 +141,11 @@
#define UART_BST_STAT_LSR_FRAME_ERR 0x8000000
#define UART_BST_STAT_LSR_THRE 0x20000000
+#define GET_MODEM_CTL_RTS_STATUS(reg) ((reg) & UART_MODEM_CTL_RTS_SET)
+#define GET_RTS_PIN_STATUS(val) (((val) & TIOCM_RTS) >> 1)
+#define RTS_TOGGLE_STATUS_MASK(val, reg) (GET_MODEM_CTL_RTS_STATUS(reg) \
+ != GET_RTS_PIN_STATUS(val))
+
struct pci1xxxx_8250 {
unsigned int nr;
u8 dev_rev;
@@ -254,6 +266,47 @@ static void pci1xxxx_set_divisor(struct uart_port *port, unsigned int baud,
port->membase + UART_BAUD_CLK_DIVISOR_REG);
}
+static void pci1xxxx_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ u32 fract_div_cfg_reg;
+ u32 line_stat_reg;
+ u32 modem_ctl_reg;
+ u32 adcl_cfg_reg;
+
+ adcl_cfg_reg = readl(port->membase + ADCL_CFG_REG);
+
+ /* HW is responsible in ADCL_EN case */
+ if ((adcl_cfg_reg & (ADCL_CFG_EN | ADCL_CFG_PIN_SEL)))
+ return;
+
+ modem_ctl_reg = readl(port->membase + UART_MODEM_CTL_REG);
+
+ serial8250_do_set_mctrl(port, mctrl);
+
+ if (RTS_TOGGLE_STATUS_MASK(mctrl, modem_ctl_reg)) {
+ line_stat_reg = readl(port->membase + UART_LINE_STAT_REG);
+ if (line_stat_reg & UART_LINE_XMIT_CHECK_MASK) {
+ fract_div_cfg_reg = readl(port->membase +
+ FRAC_DIV_CFG_REG);
+
+ writel((fract_div_cfg_reg &
+ ~(FRAC_DIV_TX_END_POINT_MASK)),
+ port->membase + FRAC_DIV_CFG_REG);
+
+ /* Enable ADC and set the nRTS pin */
+ writel((adcl_cfg_reg | (ADCL_CFG_EN |
+ ADCL_CFG_PIN_SEL)),
+ port->membase + ADCL_CFG_REG);
+
+ /* Revert to the original settings */
+ writel(adcl_cfg_reg, port->membase + ADCL_CFG_REG);
+
+ writel(fract_div_cfg_reg, port->membase +
+ FRAC_DIV_CFG_REG);
+ }
+ }
+}
+
static int pci1xxxx_rs485_config(struct uart_port *port,
struct ktermios *termios,
struct serial_rs485 *rs485)
@@ -631,9 +684,14 @@ static int pci1xxxx_setup(struct pci_dev *pdev,
port->port.rs485_config = pci1xxxx_rs485_config;
port->port.rs485_supported = pci1xxxx_rs485_supported;
- /* From C0 rev Burst operation is supported */
+ /*
+ * C0 and later revisions support Burst operation.
+ * RTS workaround in mctrl is applicable only to B0.
+ */
if (rev >= 0xC0)
port->port.handle_irq = pci1xxxx_handle_irq;
+ else if (rev == 0xB0)
+ port->port.set_mctrl = pci1xxxx_set_mctrl;
ret = serial8250_pci_setup_port(pdev, port, 0, PORT_OFFSET * port_idx, 0);
if (ret < 0)
diff --git a/drivers/tty/serial/8250/8250_platform.c b/drivers/tty/serial/8250/8250_platform.c
index 8bdc1879d952..c0343bfb8064 100644
--- a/drivers/tty/serial/8250/8250_platform.c
+++ b/drivers/tty/serial/8250/8250_platform.c
@@ -112,7 +112,6 @@ static int serial8250_probe_acpi(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uart_8250_port uart = { };
struct resource *regs;
- unsigned char iotype;
int ret, line;
regs = platform_get_mem_or_io(pdev, 0);
@@ -122,13 +121,11 @@ static int serial8250_probe_acpi(struct platform_device *pdev)
switch (resource_type(regs)) {
case IORESOURCE_IO:
uart.port.iobase = regs->start;
- iotype = UPIO_PORT;
break;
case IORESOURCE_MEM:
uart.port.mapbase = regs->start;
uart.port.mapsize = resource_size(regs);
uart.port.flags = UPF_IOREMAP;
- iotype = UPIO_MEM;
break;
default:
return -EINVAL;
@@ -147,12 +144,6 @@ static int serial8250_probe_acpi(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * The previous call may not set iotype correctly when reg-io-width
- * property is absent and it doesn't support IO port resource.
- */
- uart.port.iotype = iotype;
-
line = serial8250_register_8250_port(&uart);
if (line < 0)
return line;
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 7c06ae79d8e2..7a837fdf9df1 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -436,7 +436,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
struct uart_8250_port uart, *port;
int ret, flags = dev_id->driver_data;
- unsigned char iotype;
long line;
if (flags & UNKNOWN_DEV) {
@@ -448,14 +447,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
memset(&uart, 0, sizeof(uart));
if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
uart.port.iobase = pnp_port_start(dev, 2);
- iotype = UPIO_PORT;
} else if (pnp_port_valid(dev, 0)) {
uart.port.iobase = pnp_port_start(dev, 0);
- iotype = UPIO_PORT;
} else if (pnp_mem_valid(dev, 0)) {
uart.port.mapbase = pnp_mem_start(dev, 0);
uart.port.mapsize = pnp_mem_len(dev, 0);
- iotype = UPIO_MEM;
uart.port.flags = UPF_IOREMAP;
} else
return -ENODEV;
@@ -471,12 +467,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
if (ret)
return ret;
- /*
- * The previous call may not set iotype correctly when reg-io-width
- * property is absent and it doesn't support IO port resource.
- */
- uart.port.iotype = iotype;
-
if (flags & CIR_PORT) {
uart.port.flags |= UPF_FIXED_PORT | UPF_FIXED_TYPE;
uart.port.type = PORT_8250_CIR;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 649e74e9b52f..c57f44882abb 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -566,19 +566,17 @@ static int serial8250_em485_init(struct uart_8250_port *p)
if (!p->em485)
return -ENOMEM;
- hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- p->em485->stop_tx_timer.function = &serial8250_em485_handle_stop_tx;
- p->em485->start_tx_timer.function = &serial8250_em485_handle_start_tx;
+ hrtimer_setup(&p->em485->stop_tx_timer, &serial8250_em485_handle_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&p->em485->start_tx_timer, &serial8250_em485_handle_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
p->em485->port = p;
p->em485->active_timer = NULL;
p->em485->tx_stopped = true;
deassert_rts:
if (p->em485->tx_stopped)
- p->rs485_stop_tx(p);
+ p->rs485_stop_tx(p, true);
return 0;
}
@@ -1390,7 +1388,6 @@ static void serial8250_stop_rx(struct uart_port *port)
serial8250_rpm_get(up);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
- up->port.read_status_mask &= ~UART_LSR_DR;
serial_port_out(port, UART_IER, up->ier);
serial8250_rpm_put(up);
@@ -1399,10 +1396,11 @@ static void serial8250_stop_rx(struct uart_port *port)
/**
* serial8250_em485_stop_tx() - generic ->rs485_stop_tx() callback
* @p: uart 8250 port
+ * @toggle_ier: true to allow enabling receive interrupts
*
* Generic callback usable by 8250 uart drivers to stop rs485 transmission.
*/
-void serial8250_em485_stop_tx(struct uart_8250_port *p)
+void serial8250_em485_stop_tx(struct uart_8250_port *p, bool toggle_ier)
{
unsigned char mcr = serial8250_in_MCR(p);
@@ -1423,8 +1421,10 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
serial8250_clear_and_reinit_fifos(p);
- p->ier |= UART_IER_RLSI | UART_IER_RDI;
- serial_port_out(&p->port, UART_IER, p->ier);
+ if (toggle_ier) {
+ p->ier |= UART_IER_RLSI | UART_IER_RDI;
+ serial_port_out(&p->port, UART_IER, p->ier);
+ }
}
}
EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
@@ -1439,7 +1439,7 @@ static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
serial8250_rpm_get(p);
uart_port_lock_irqsave(&p->port, &flags);
if (em485->active_timer == &em485->stop_tx_timer) {
- p->rs485_stop_tx(p);
+ p->rs485_stop_tx(p, true);
em485->active_timer = NULL;
em485->tx_stopped = true;
}
@@ -1471,7 +1471,7 @@ static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
em485->active_timer = &em485->stop_tx_timer;
hrtimer_start(&em485->stop_tx_timer, ns_to_ktime(stop_delay), HRTIMER_MODE_REL);
} else {
- p->rs485_stop_tx(p);
+ p->rs485_stop_tx(p, true);
em485->active_timer = NULL;
em485->tx_stopped = true;
}
@@ -1560,6 +1560,7 @@ static inline void __start_tx(struct uart_port *port)
/**
* serial8250_em485_start_tx() - generic ->rs485_start_tx() callback
* @up: uart 8250 port
+ * @toggle_ier: true to allow disabling receive interrupts
*
* Generic callback usable by 8250 uart drivers to start rs485 transmission.
* Assumes that setting the RTS bit in the MCR register means RTS is high.
@@ -1567,11 +1568,11 @@ static inline void __start_tx(struct uart_port *port)
* stoppable by disabling the UART_IER_RDI interrupt. (Some chips set the
* UART_LSR_DR bit even when UART_IER_RDI is disabled, foiling this approach.)
*/
-void serial8250_em485_start_tx(struct uart_8250_port *up)
+void serial8250_em485_start_tx(struct uart_8250_port *up, bool toggle_ier)
{
unsigned char mcr = serial8250_in_MCR(up);
- if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
+ if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX) && toggle_ier)
serial8250_stop_rx(&up->port);
if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
@@ -1605,7 +1606,7 @@ static bool start_tx_rs485(struct uart_port *port)
if (em485->tx_stopped) {
em485->tx_stopped = false;
- up->rs485_start_tx(up);
+ up->rs485_start_tx(up, true);
if (up->port.rs485.delay_rts_before_send > 0) {
em485->active_timer = &em485->start_tx_timer;
@@ -1931,7 +1932,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
*/
if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) &&
(port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) &&
- !(port->read_status_mask & UART_LSR_DR))
+ !(up->ier & (UART_IER_RLSI | UART_IER_RDI)))
skip_rx = true;
if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
@@ -2079,11 +2080,20 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
serial8250_rpm_put(up);
}
-static void wait_for_lsr(struct uart_8250_port *up, int bits)
+/* Returns true if @bits were set, false on timeout */
+static bool wait_for_lsr(struct uart_8250_port *up, int bits)
{
- unsigned int status, tmout = 10000;
+ unsigned int status, tmout;
+
+ /*
+ * Wait for a character to be sent. Fallback to a safe default
+ * timeout value if @frame_time is not available.
+ */
+ if (up->port.frame_time)
+ tmout = up->port.frame_time * 2 / NSEC_PER_USEC;
+ else
+ tmout = 10000;
- /* Wait up to 10ms for the character(s) to be sent. */
for (;;) {
status = serial_lsr_in(up);
@@ -2094,11 +2104,11 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits)
udelay(1);
touch_nmi_watchdog();
}
+
+ return (tmout != 0);
}
-/*
- * Wait for transmitter & holding register to empty
- */
+/* Wait for transmitter and holding register to empty with timeout */
static void wait_for_xmitr(struct uart_8250_port *up, int bits)
{
unsigned int tmout;
@@ -2543,6 +2553,14 @@ static void serial8250_shutdown(struct uart_port *port)
serial8250_do_shutdown(port);
}
+static void serial8250_flush_buffer(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ if (up->dma)
+ serial8250_tx_dma_flush(up);
+}
+
static unsigned int serial8250_do_get_divisor(struct uart_port *port,
unsigned int baud,
unsigned int *frac)
@@ -2786,7 +2804,13 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
*/
uart_update_timeout(port, termios->c_cflag, baud);
- port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ /*
+ * Specify which conditions may be considered for error
+ * handling and the ignoring of characters. The actual
+ * ignoring of characters only occurs if the bit is set
+ * in @ignore_status_mask as well.
+ */
+ port->read_status_mask = UART_LSR_OE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
@@ -3226,6 +3250,7 @@ static const struct uart_ops serial8250_pops = {
.break_ctl = serial8250_break_ctl,
.startup = serial8250_startup,
.shutdown = serial8250_shutdown,
+ .flush_buffer = serial8250_flush_buffer,
.set_termios = serial8250_set_termios,
.set_ldisc = serial8250_set_ldisc,
.pm = serial8250_pm,
@@ -3250,7 +3275,7 @@ void serial8250_init_port(struct uart_8250_port *up)
port->ops = &serial8250_pops;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
- up->cur_iotype = 0xFF;
+ up->cur_iotype = UPIO_UNKNOWN;
}
EXPORT_SYMBOL_GPL(serial8250_init_port);
@@ -3285,10 +3310,15 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults);
static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
{
+ serial_port_out(port, UART_TX, ch);
+}
+
+static void serial8250_console_wait_putchar(struct uart_port *port, unsigned char ch)
+{
struct uart_8250_port *up = up_to_u8250p(port);
wait_for_xmitr(up, UART_LSR_THRE);
- serial_port_out(port, UART_TX, ch);
+ serial8250_console_putchar(port, ch);
}
/*
@@ -3317,6 +3347,16 @@ static void serial8250_console_restore(struct uart_8250_port *up)
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
+static void fifo_wait_for_lsr(struct uart_8250_port *up, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ if (wait_for_lsr(up, UART_LSR_THRE))
+ return;
+ }
+}
+
/*
* Print a string to the serial port using the device FIFO
*
@@ -3326,24 +3366,34 @@ static void serial8250_console_restore(struct uart_8250_port *up)
static void serial8250_console_fifo_write(struct uart_8250_port *up,
const char *s, unsigned int count)
{
- int i;
const char *end = s + count;
unsigned int fifosize = up->tx_loadsz;
+ struct uart_port *port = &up->port;
+ unsigned int tx_count = 0;
bool cr_sent = false;
+ unsigned int i;
while (s != end) {
- wait_for_lsr(up, UART_LSR_THRE);
+ /* Allow timeout for each byte of a possibly full FIFO */
+ fifo_wait_for_lsr(up, fifosize);
for (i = 0; i < fifosize && s != end; ++i) {
if (*s == '\n' && !cr_sent) {
- serial_out(up, UART_TX, '\r');
+ serial8250_console_putchar(port, '\r');
cr_sent = true;
} else {
- serial_out(up, UART_TX, *s++);
+ serial8250_console_putchar(port, *s++);
cr_sent = false;
}
}
+ tx_count = i;
}
+
+ /*
+ * Allow timeout for each byte written since the caller will only wait
+ * for UART_LSR_BOTH_EMPTY using the timeout of a single character
+ */
+ fifo_wait_for_lsr(up, tx_count);
}
/*
@@ -3385,7 +3435,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (em485) {
if (em485->tx_stopped)
- up->rs485_start_tx(up);
+ up->rs485_start_tx(up, false);
mdelay(port->rs485.delay_rts_before_send);
}
@@ -3412,7 +3462,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (likely(use_fifo))
serial8250_console_fifo_write(up, s, count);
else
- uart_console_write(port, s, count, serial8250_console_putchar);
+ uart_console_write(port, s, count, serial8250_console_wait_putchar);
/*
* Finally, wait for transmitter to become empty
@@ -3423,7 +3473,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (em485) {
mdelay(port->rs485.delay_rts_after_send);
if (em485->tx_stopped)
- up->rs485_stop_tx(up);
+ up->rs485_stop_tx(up, false);
}
serial_port_out(port, UART_IER, ier);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 45f0f779fbf9..976dae3bb1bb 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -128,7 +128,7 @@ config SERIAL_SB1250_DUART_CONSOLE
config SERIAL_ATMEL
bool "AT91 on-chip serial port support"
depends on COMMON_CLK
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_AT91 || ARCH_LAN969X || COMPILE_TEST
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
select MFD_AT91_USART
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index b9c3c3bed0c1..d47a62d1c9f7 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -24,8 +24,6 @@
#include <linux/io.h>
#include <linux/altera_jtaguart.h>
-#define DRV_NAME "altera_jtaguart"
-
/*
* Altera JTAG UART register definitions according to the Altera JTAG UART
* datasheet: https://www.altera.com/literature/hb/nios2/n2cpu_nii51009.pdf
@@ -173,7 +171,7 @@ static int altera_jtaguart_startup(struct uart_port *port)
int ret;
ret = request_irq(port->irq, altera_jtaguart_interrupt, 0,
- DRV_NAME, port);
+ dev_name(port->dev), port);
if (ret) {
dev_err(port->dev, "unable to attach Altera JTAG UART %d interrupt vector=%d\n",
port->line, port->irq);
@@ -365,7 +363,7 @@ OF_EARLYCON_DECLARE(juart, "altr,juart-1.0", altera_jtaguart_earlycon_setup);
static struct uart_driver altera_jtaguart_driver = {
.owner = THIS_MODULE,
- .driver_name = "altera_jtaguart",
+ .driver_name = KBUILD_MODNAME,
.dev_name = "ttyJ",
.major = ALTERA_JTAGUART_MAJOR,
.minor = ALTERA_JTAGUART_MINOR,
@@ -451,7 +449,7 @@ static struct platform_driver altera_jtaguart_platform_driver = {
.probe = altera_jtaguart_probe,
.remove = altera_jtaguart_remove,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(altera_jtaguart_match),
},
};
@@ -481,4 +479,4 @@ module_exit(altera_jtaguart_exit);
MODULE_DESCRIPTION("Altera JTAG UART driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index c94655453c33..1759137121cc 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -24,7 +24,6 @@
#include <linux/io.h>
#include <linux/altera_uart.h>
-#define DRV_NAME "altera_uart"
#define SERIAL_ALTERA_MAJOR 204
#define SERIAL_ALTERA_MINOR 213
@@ -518,7 +517,7 @@ OF_EARLYCON_DECLARE(uart, "altr,uart-1.0", altera_uart_earlycon_setup);
*/
static struct uart_driver altera_uart_driver = {
.owner = THIS_MODULE,
- .driver_name = DRV_NAME,
+ .driver_name = KBUILD_MODNAME,
.dev_name = "ttyAL",
.major = SERIAL_ALTERA_MAJOR,
.minor = SERIAL_ALTERA_MINOR,
@@ -619,7 +618,7 @@ static struct platform_driver altera_uart_platform_driver = {
.probe = altera_uart_probe,
.remove = altera_uart_remove,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(altera_uart_match),
},
};
@@ -649,5 +648,5 @@ module_exit(altera_uart_exit);
MODULE_DESCRIPTION("Altera UART driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_ALTERA_MAJOR);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 69b7a3e1e418..98f178bdbcbe 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -248,6 +248,13 @@ struct pl011_dmatx_data {
bool queued;
};
+enum pl011_rs485_tx_state {
+ OFF,
+ WAIT_AFTER_RTS,
+ SEND,
+ WAIT_AFTER_SEND,
+};
+
/*
* We wrap our port structure around the generic uart_port.
*/
@@ -261,8 +268,10 @@ struct uart_amba_port {
unsigned int fifosize; /* vendor-specific */
unsigned int fixed_baud; /* vendor-set fixed baud rate */
char type[12];
- bool rs485_tx_started;
- unsigned int rs485_tx_drain_interval; /* usecs */
+ ktime_t rs485_tx_drain_interval; /* nano */
+ enum pl011_rs485_tx_state rs485_tx_state;
+ struct hrtimer trigger_start_tx;
+ struct hrtimer trigger_stop_tx;
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
unsigned int dmacr; /* dma control reg */
@@ -1260,30 +1269,31 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
{
- /*
- * To be on the safe side only time out after twice as many iterations
- * as fifo size.
- */
- const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
struct uart_port *port = &uap->port;
- int i = 0;
u32 cr;
- /* Wait until hardware tx queue is empty */
- while (!pl011_tx_empty(port)) {
- if (i > MAX_TX_DRAIN_ITERS) {
- dev_warn(port->dev,
- "timeout while draining hardware tx queue\n");
- break;
- }
+ if (uap->rs485_tx_state == SEND)
+ uap->rs485_tx_state = WAIT_AFTER_SEND;
- udelay(uap->rs485_tx_drain_interval);
- i++;
+ if (uap->rs485_tx_state == WAIT_AFTER_SEND) {
+ /* Schedule hrtimer if tx queue not empty */
+ if (!pl011_tx_empty(port)) {
+ hrtimer_start(&uap->trigger_stop_tx,
+ uap->rs485_tx_drain_interval,
+ HRTIMER_MODE_REL);
+ return;
+ }
+ if (port->rs485.delay_rts_after_send > 0) {
+ hrtimer_start(&uap->trigger_stop_tx,
+ ms_to_ktime(port->rs485.delay_rts_after_send),
+ HRTIMER_MODE_REL);
+ return;
+ }
+ /* Continue without any delay */
+ } else if (uap->rs485_tx_state == WAIT_AFTER_RTS) {
+ hrtimer_try_to_cancel(&uap->trigger_start_tx);
}
- if (port->rs485.delay_rts_after_send)
- mdelay(port->rs485.delay_rts_after_send);
-
cr = pl011_read(uap, REG_CR);
if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
@@ -1296,7 +1306,7 @@ static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
cr |= UART011_CR_RXE;
pl011_write(cr, uap, REG_CR);
- uap->rs485_tx_started = false;
+ uap->rs485_tx_state = OFF;
}
static void pl011_stop_tx(struct uart_port *port)
@@ -1304,11 +1314,18 @@ static void pl011_stop_tx(struct uart_port *port)
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ uap->rs485_tx_state == WAIT_AFTER_RTS) {
+ pl011_rs485_tx_stop(uap);
+ return;
+ }
+
uap->im &= ~UART011_TXIM;
pl011_write(uap->im, uap, REG_IMSC);
pl011_dma_tx_stop(uap);
- if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ uap->rs485_tx_state != OFF)
pl011_rs485_tx_stop(uap);
}
@@ -1328,10 +1345,19 @@ static void pl011_rs485_tx_start(struct uart_amba_port *uap)
struct uart_port *port = &uap->port;
u32 cr;
+ if (uap->rs485_tx_state == WAIT_AFTER_RTS) {
+ uap->rs485_tx_state = SEND;
+ return;
+ }
+ if (uap->rs485_tx_state == WAIT_AFTER_SEND) {
+ hrtimer_try_to_cancel(&uap->trigger_stop_tx);
+ uap->rs485_tx_state = SEND;
+ return;
+ }
+ /* uap->rs485_tx_state == OFF */
/* Enable transmitter */
cr = pl011_read(uap, REG_CR);
cr |= UART011_CR_TXE;
-
/* Disable receiver if half-duplex */
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
cr &= ~UART011_CR_RXE;
@@ -1343,10 +1369,14 @@ static void pl011_rs485_tx_start(struct uart_amba_port *uap)
pl011_write(cr, uap, REG_CR);
- if (port->rs485.delay_rts_before_send)
- mdelay(port->rs485.delay_rts_before_send);
-
- uap->rs485_tx_started = true;
+ if (port->rs485.delay_rts_before_send > 0) {
+ uap->rs485_tx_state = WAIT_AFTER_RTS;
+ hrtimer_start(&uap->trigger_start_tx,
+ ms_to_ktime(port->rs485.delay_rts_before_send),
+ HRTIMER_MODE_REL);
+ } else {
+ uap->rs485_tx_state = SEND;
+ }
}
static void pl011_start_tx(struct uart_port *port)
@@ -1355,13 +1385,44 @@ static void pl011_start_tx(struct uart_port *port)
container_of(port, struct uart_amba_port, port);
if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
- !uap->rs485_tx_started)
+ uap->rs485_tx_state != SEND) {
pl011_rs485_tx_start(uap);
+ if (uap->rs485_tx_state == WAIT_AFTER_RTS)
+ return;
+ }
if (!pl011_dma_tx_start(uap))
pl011_start_tx_pio(uap);
}
+static enum hrtimer_restart pl011_trigger_start_tx(struct hrtimer *t)
+{
+ struct uart_amba_port *uap =
+ container_of(t, struct uart_amba_port, trigger_start_tx);
+ unsigned long flags;
+
+ uart_port_lock_irqsave(&uap->port, &flags);
+ if (uap->rs485_tx_state == WAIT_AFTER_RTS)
+ pl011_start_tx(&uap->port);
+ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart pl011_trigger_stop_tx(struct hrtimer *t)
+{
+ struct uart_amba_port *uap =
+ container_of(t, struct uart_amba_port, trigger_stop_tx);
+ unsigned long flags;
+
+ uart_port_lock_irqsave(&uap->port, &flags);
+ if (uap->rs485_tx_state == WAIT_AFTER_SEND)
+ pl011_rs485_tx_stop(uap);
+ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ return HRTIMER_NORESTART;
+}
+
static void pl011_stop_rx(struct uart_port *port)
{
struct uart_amba_port *uap =
@@ -1953,7 +2014,7 @@ static void pl011_shutdown(struct uart_port *port)
pl011_dma_shutdown(uap);
- if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
+ if ((port->rs485.flags & SER_RS485_ENABLED && uap->rs485_tx_state != OFF))
pl011_rs485_tx_stop(uap);
free_irq(uap->port.irq, uap);
@@ -2098,7 +2159,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* with the given baud rate. We use this as the poll interval when we
* wait for the tx queue to empty.
*/
- uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
+ uap->rs485_tx_drain_interval = ns_to_ktime(DIV_ROUND_UP(bits * NSEC_PER_SEC, baud));
pl011_setup_status_masks(port, termios);
@@ -2806,6 +2867,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
return -EINVAL;
}
}
+ hrtimer_setup(&uap->trigger_start_tx, pl011_trigger_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&uap->trigger_stop_tx, pl011_trigger_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 0cf05ac18993..f44f9d20a974 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1727,26 +1727,16 @@ static void atmel_init_property(struct atmel_uart_port *atmel_port,
/* DMA/PDC usage specification */
if (of_property_read_bool(np, "atmel,use-dma-rx")) {
- if (of_property_read_bool(np, "dmas")) {
- atmel_port->use_dma_rx = true;
- atmel_port->use_pdc_rx = false;
- } else {
- atmel_port->use_dma_rx = false;
- atmel_port->use_pdc_rx = true;
- }
+ atmel_port->use_dma_rx = of_property_present(np, "dmas");
+ atmel_port->use_pdc_rx = !atmel_port->use_dma_rx;
} else {
atmel_port->use_dma_rx = false;
atmel_port->use_pdc_rx = false;
}
if (of_property_read_bool(np, "atmel,use-dma-tx")) {
- if (of_property_read_bool(np, "dmas")) {
- atmel_port->use_dma_tx = true;
- atmel_port->use_pdc_tx = false;
- } else {
- atmel_port->use_dma_tx = false;
- atmel_port->use_pdc_tx = true;
- }
+ atmel_port->use_dma_tx = of_property_present(np, "dmas");
+ atmel_port->use_pdc_tx = !atmel_port->use_dma_tx;
} else {
atmel_port->use_dma_tx = false;
atmel_port->use_pdc_tx = false;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 57b0632a3db6..c91b9d9818cd 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -245,7 +245,7 @@
#define DRIVER_NAME "fsl-lpuart"
#define DEV_NAME "ttyLP"
-#define UART_NR 8
+#define UART_NR 12
/* IMX lpuart has four extra unused regs located at the beginning */
#define IMX_REG_OFF 0x10
@@ -1965,6 +1965,11 @@ static void lpuart32_shutdown(struct uart_port *port)
UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
lpuart32_write(port, temp, UARTCTRL);
+ /* flush Rx/Tx FIFO */
+ temp = lpuart32_read(port, UARTFIFO);
+ temp |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
+ lpuart32_write(port, temp, UARTFIFO);
+
uart_port_unlock_irqrestore(port, flags);
lpuart_dma_shutdown(sport);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 17f70e4bee43..9a1afe409b98 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2582,10 +2582,10 @@ static int imx_uart_probe(struct platform_device *pdev)
imx_uart_writel(sport, ucr3, UCR3);
}
- hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- sport->trigger_start_tx.function = imx_trigger_start_tx;
- sport->trigger_stop_tx.function = imx_trigger_stop_tx;
+ hrtimer_setup(&sport->trigger_start_tx, imx_trigger_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&sport->trigger_stop_tx, imx_trigger_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
@@ -2692,7 +2692,7 @@ static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
{
u32 ucr3;
- uart_port_lock(&sport->port);
+ uart_port_lock_irq(&sport->port);
ucr3 = imx_uart_readl(sport, UCR3);
if (on) {
@@ -2714,7 +2714,7 @@ static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
imx_uart_writel(sport, ucr1, UCR1);
}
- uart_port_unlock(&sport->port);
+ uart_port_unlock_irq(&sport->port);
}
static int imx_uart_suspend_noirq(struct device *dev)
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index e93850f6447a..2833708e369f 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -27,18 +27,6 @@
#include <linux/kgdb.h>
#include <linux/kdb.h>
-static int kgdb_nmi_knock = 1;
-module_param_named(knock, kgdb_nmi_knock, int, 0600);
-MODULE_PARM_DESC(knock, "if set to 1 (default), the special '$3#33' command " \
- "must be used to enter the debugger; when set to 0, " \
- "hitting return key is enough to enter the debugger; " \
- "when set to -1, the debugger is entered immediately " \
- "upon NMI");
-
-static char *kgdb_nmi_magic = "$3#33";
-module_param_named(magic, kgdb_nmi_magic, charp, 0600);
-MODULE_PARM_DESC(magic, "magic sequence to enter NMI debugger (default $3#33)");
-
static atomic_t kgdb_nmi_num_readers = ATOMIC_INIT(0);
static int kgdb_nmi_console_setup(struct console *co, char *options)
@@ -95,95 +83,6 @@ struct kgdb_nmi_tty_priv {
static struct tty_port *kgdb_nmi_port;
-static void kgdb_tty_recv(int ch)
-{
- struct kgdb_nmi_tty_priv *priv;
- char c = ch;
-
- if (!kgdb_nmi_port || ch < 0)
- return;
- /*
- * Can't use port->tty->driver_data as tty might be not there. Timer
- * will check for tty and will get the ref, but here we don't have to
- * do that, and actually, we can't: we're in NMI context, no locks are
- * possible.
- */
- priv = container_of(kgdb_nmi_port, struct kgdb_nmi_tty_priv, port);
- kfifo_in(&priv->fifo, &c, 1);
-}
-
-static int kgdb_nmi_poll_one_knock(void)
-{
- static int n;
- int c;
- const char *magic = kgdb_nmi_magic;
- size_t m = strlen(magic);
- bool printch = false;
-
- c = dbg_io_ops->read_char();
- if (c == NO_POLL_CHAR)
- return c;
-
- if (!kgdb_nmi_knock && (c == '\r' || c == '\n')) {
- return 1;
- } else if (c == magic[n]) {
- n = (n + 1) % m;
- if (!n)
- return 1;
- printch = true;
- } else {
- n = 0;
- }
-
- if (atomic_read(&kgdb_nmi_num_readers)) {
- kgdb_tty_recv(c);
- return 0;
- }
-
- if (printch) {
- kdb_printf("%c", c);
- return 0;
- }
-
- kdb_printf("\r%s %s to enter the debugger> %*s",
- kgdb_nmi_knock ? "Type" : "Hit",
- kgdb_nmi_knock ? magic : "<return>", (int)m, "");
- while (m--)
- kdb_printf("\b");
- return 0;
-}
-
-/**
- * kgdb_nmi_poll_knock - Check if it is time to enter the debugger
- *
- * "Serial ports are often noisy, especially when muxed over another port (we
- * often use serial over the headset connector). Noise on the async command
- * line just causes characters that are ignored, on a command line that blocked
- * execution noise would be catastrophic." -- Colin Cross
- *
- * So, this function implements KGDB/KDB knocking on the serial line: we won't
- * enter the debugger until we receive a known magic phrase (which is actually
- * "$3#33", known as "escape to KDB" command. There is also a relaxed variant
- * of knocking, i.e. just pressing the return key is enough to enter the
- * debugger. And if knocking is disabled, the function always returns 1.
- */
-bool kgdb_nmi_poll_knock(void)
-{
- if (kgdb_nmi_knock < 0)
- return true;
-
- while (1) {
- int ret;
-
- ret = kgdb_nmi_poll_one_knock();
- if (ret == NO_POLL_CHAR)
- return false;
- else if (ret == 1)
- break;
- }
- return true;
-}
-
/*
* The tasklet is cheap, it does not cause wakeups when reschedules itself,
* instead it waits for the next tick.
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index f55aa353aed9..2204cc3e3b07 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1621,7 +1621,7 @@ mpc52xx_console_setup(struct console *co, char *options)
(void *)port->mapbase, port->membase,
port->irq, port->uartclk);
- /* Setup the port parameters accoding to options */
+ /* Setup the port parameters according to options */
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index a3093e09309f..560f45ed19ae 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -314,6 +314,7 @@
#define SC16IS7XX_FIFO_SIZE (64)
#define SC16IS7XX_GPIOS_PER_BANK 4
+#define SC16IS7XX_POLL_PERIOD_MS 10
#define SC16IS7XX_RECONF_MD BIT(0)
#define SC16IS7XX_RECONF_IER BIT(1)
#define SC16IS7XX_RECONF_RS485 BIT(2)
@@ -348,6 +349,8 @@ struct sc16is7xx_port {
u8 mctrl_mask;
struct kthread_worker kworker;
struct task_struct *kworker_task;
+ struct kthread_delayed_work poll_work;
+ bool polling;
struct sc16is7xx_one p[];
};
@@ -861,6 +864,18 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void sc16is7xx_poll_proc(struct kthread_work *ws)
+{
+ struct sc16is7xx_port *s = container_of(ws, struct sc16is7xx_port, poll_work.work);
+
+ /* Reuse standard IRQ handler. Interrupt ID is unused in this context. */
+ sc16is7xx_irq(0, s);
+
+ /* Setup delay based on SC16IS7XX_POLL_PERIOD_MS */
+ kthread_queue_delayed_work(&s->kworker, &s->poll_work,
+ msecs_to_jiffies(SC16IS7XX_POLL_PERIOD_MS));
+}
+
static void sc16is7xx_tx_proc(struct kthread_work *ws)
{
struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
@@ -1149,6 +1164,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termi
static int sc16is7xx_startup(struct uart_port *port)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned int val;
unsigned long flags;
@@ -1211,6 +1227,10 @@ static int sc16is7xx_startup(struct uart_port *port)
sc16is7xx_enable_ms(port);
uart_port_unlock_irqrestore(port, flags);
+ if (s->polling)
+ kthread_queue_delayed_work(&s->kworker, &s->poll_work,
+ msecs_to_jiffies(SC16IS7XX_POLL_PERIOD_MS));
+
return 0;
}
@@ -1232,6 +1252,9 @@ static void sc16is7xx_shutdown(struct uart_port *port)
sc16is7xx_power(port, 0);
+ if (s->polling)
+ kthread_cancel_delayed_work_sync(&s->poll_work);
+
kthread_flush_worker(&s->kworker);
}
@@ -1538,6 +1561,11 @@ int sc16is7xx_probe(struct device *dev, const struct sc16is7xx_devtype *devtype,
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
+ s->polling = (irq <= 0);
+ if (s->polling)
+ dev_dbg(dev,
+ "No interrupt pin definition, falling back to polling mode\n");
+
s->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(s->clk))
return PTR_ERR(s->clk);
@@ -1665,6 +1693,12 @@ int sc16is7xx_probe(struct device *dev, const struct sc16is7xx_devtype *devtype,
goto out_ports;
#endif
+ if (s->polling) {
+ /* Initialize kernel thread for polling */
+ kthread_init_delayed_work(&s->poll_work, sc16is7xx_poll_proc);
+ return 0;
+ }
+
/*
* Setup interrupt. We first try to acquire the IRQ line as level IRQ.
* If that succeeds, we can allow sharing the interrupt as well.
@@ -1724,6 +1758,9 @@ void sc16is7xx_remove(struct device *dev)
sc16is7xx_power(&s->p[i].port, 0);
}
+ if (s->polling)
+ kthread_cancel_delayed_work_sync(&s->poll_work);
+
kthread_flush_worker(&s->kworker);
kthread_stop(s->kworker_task);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 74fa02b23772..92f7e752f862 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -790,7 +790,6 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
- int ret = -ENODEV;
/* Initialize structure in case we error out later to prevent any stack info leakage. */
*retinfo = (struct serial_struct){};
@@ -799,10 +798,10 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
* Ensure the state we copy is consistent and no hardware changes
* occur as we go
*/
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
uport = uart_port_check(state);
if (!uport)
- goto out;
+ return -ENODEV;
retinfo->type = uport->type;
retinfo->line = uport->line;
@@ -823,10 +822,7 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
retinfo->iomem_reg_shift = uport->regshift;
retinfo->iomem_base = (void *)(unsigned long)uport->mapbase;
- ret = 0;
-out:
- mutex_unlock(&port->mutex);
- return ret;
+ return 0;
}
static int uart_get_info_user(struct tty_struct *tty,
@@ -838,6 +834,61 @@ static int uart_get_info_user(struct tty_struct *tty,
return uart_get_info(port, ss) < 0 ? -EIO : 0;
}
+static int uart_change_port(struct uart_port *uport,
+ const struct serial_struct *new_info,
+ unsigned long new_port)
+{
+ unsigned long old_iobase, old_mapbase;
+ unsigned int old_type, old_iotype, old_hub6, old_shift;
+ int retval;
+
+ old_iobase = uport->iobase;
+ old_mapbase = uport->mapbase;
+ old_type = uport->type;
+ old_hub6 = uport->hub6;
+ old_iotype = uport->iotype;
+ old_shift = uport->regshift;
+
+ if (old_type != PORT_UNKNOWN && uport->ops->release_port)
+ uport->ops->release_port(uport);
+
+ uport->iobase = new_port;
+ uport->type = new_info->type;
+ uport->hub6 = new_info->hub6;
+ uport->iotype = new_info->io_type;
+ uport->regshift = new_info->iomem_reg_shift;
+ uport->mapbase = (unsigned long)new_info->iomem_base;
+
+ if (uport->type == PORT_UNKNOWN || !uport->ops->request_port)
+ return 0;
+
+ retval = uport->ops->request_port(uport);
+ if (retval == 0)
+ return 0; /* succeeded => done */
+
+ /*
+ * If we fail to request resources for the new port, try to restore the
+ * old settings.
+ */
+ uport->iobase = old_iobase;
+ uport->type = old_type;
+ uport->hub6 = old_hub6;
+ uport->iotype = old_iotype;
+ uport->regshift = old_shift;
+ uport->mapbase = old_mapbase;
+
+ if (old_type == PORT_UNKNOWN)
+ return retval;
+
+ retval = uport->ops->request_port(uport);
+ /* If we failed to restore the old settings, we fail like this. */
+ if (retval)
+ uport->type = PORT_UNKNOWN;
+
+ /* We failed anyway. */
+ return -EBUSY;
+}
+
static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
struct uart_state *state,
struct serial_struct *new_info)
@@ -847,7 +898,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
unsigned int change_irq, change_port, closing_wait;
unsigned int old_custom_divisor, close_delay;
upf_t old_flags, new_flags;
- int retval = 0;
+ int retval;
if (!uport)
return -EIO;
@@ -886,13 +937,10 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
if (!(uport->flags & UPF_FIXED_PORT)) {
unsigned int uartclk = new_info->baud_base * 16;
/* check needs to be done here before other settings made */
- if (uartclk == 0) {
- retval = -EINVAL;
- goto exit;
- }
+ if (uartclk == 0)
+ return -EINVAL;
}
if (!capable(CAP_SYS_ADMIN)) {
- retval = -EPERM;
if (change_irq || change_port ||
(new_info->baud_base != uport->uartclk / 16) ||
(close_delay != port->close_delay) ||
@@ -900,7 +948,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
(new_info->xmit_fifo_size &&
new_info->xmit_fifo_size != uport->fifosize) ||
(((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
- goto exit;
+ return -EPERM;
uport->flags = ((uport->flags & ~UPF_USR_MASK) |
(new_flags & UPF_USR_MASK));
uport->custom_divisor = new_info->custom_divisor;
@@ -910,30 +958,24 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
if (change_irq || change_port) {
retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
if (retval)
- goto exit;
+ return retval;
}
- /*
- * Ask the low level driver to verify the settings.
- */
- if (uport->ops->verify_port)
+ /* Ask the low level driver to verify the settings. */
+ if (uport->ops->verify_port) {
retval = uport->ops->verify_port(uport, new_info);
+ if (retval)
+ return retval;
+ }
if ((new_info->irq >= irq_get_nr_irqs()) || (new_info->irq < 0) ||
(new_info->baud_base < 9600))
- retval = -EINVAL;
-
- if (retval)
- goto exit;
+ return -EINVAL;
if (change_port || change_irq) {
- retval = -EBUSY;
-
- /*
- * Make sure that we are the sole user of this port.
- */
+ /* Make sure that we are the sole user of this port. */
if (tty_port_users(port) > 1)
- goto exit;
+ return -EBUSY;
/*
* We need to shutdown the serial port at the old
@@ -943,69 +985,9 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
}
if (change_port) {
- unsigned long old_iobase, old_mapbase;
- unsigned int old_type, old_iotype, old_hub6, old_shift;
-
- old_iobase = uport->iobase;
- old_mapbase = uport->mapbase;
- old_type = uport->type;
- old_hub6 = uport->hub6;
- old_iotype = uport->iotype;
- old_shift = uport->regshift;
-
- /*
- * Free and release old regions
- */
- if (old_type != PORT_UNKNOWN && uport->ops->release_port)
- uport->ops->release_port(uport);
-
- uport->iobase = new_port;
- uport->type = new_info->type;
- uport->hub6 = new_info->hub6;
- uport->iotype = new_info->io_type;
- uport->regshift = new_info->iomem_reg_shift;
- uport->mapbase = (unsigned long)new_info->iomem_base;
-
- /*
- * Claim and map the new regions
- */
- if (uport->type != PORT_UNKNOWN && uport->ops->request_port) {
- retval = uport->ops->request_port(uport);
- } else {
- /* Always success - Jean II */
- retval = 0;
- }
-
- /*
- * If we fail to request resources for the
- * new port, try to restore the old settings.
- */
- if (retval) {
- uport->iobase = old_iobase;
- uport->type = old_type;
- uport->hub6 = old_hub6;
- uport->iotype = old_iotype;
- uport->regshift = old_shift;
- uport->mapbase = old_mapbase;
-
- if (old_type != PORT_UNKNOWN) {
- retval = uport->ops->request_port(uport);
- /*
- * If we failed to restore the old settings,
- * we fail like this.
- */
- if (retval)
- uport->type = PORT_UNKNOWN;
-
- /*
- * We failed anyway.
- */
- retval = -EBUSY;
- }
-
- /* Added to return the correct error -Ram Gupta */
- goto exit;
- }
+ retval = uart_change_port(uport, new_info, new_port);
+ if (retval)
+ return retval;
}
if (change_irq)
@@ -1021,9 +1003,9 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
uport->fifosize = new_info->xmit_fifo_size;
check_and_exit:
- retval = 0;
if (uport->type == PORT_UNKNOWN)
- goto exit;
+ return 0;
+
if (tty_port_initialized(port)) {
if (((old_flags ^ uport->flags) & UPF_SPD_MASK) ||
old_custom_divisor != uport->custom_divisor) {
@@ -1039,15 +1021,17 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
}
uart_change_line_settings(tty, state, NULL);
}
- } else {
- retval = uart_startup(tty, state, true);
- if (retval == 0)
- tty_port_set_initialized(port, true);
- if (retval > 0)
- retval = 0;
+
+ return 0;
}
- exit:
- return retval;
+
+ retval = uart_startup(tty, state, true);
+ if (retval < 0)
+ return retval;
+ if (retval == 0)
+ tty_port_set_initialized(port, true);
+
+ return 0;
}
static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss)
@@ -2365,9 +2349,9 @@ struct uart_match {
struct uart_driver *driver;
};
-static int serial_match_port(struct device *dev, void *data)
+static int serial_match_port(struct device *dev, const void *data)
{
- struct uart_match *match = data;
+ const struct uart_match *match = data;
struct tty_driver *tty_drv = match->driver->tty_driver;
dev_t devt = MKDEV(tty_drv->major, tty_drv->minor_start) +
match->port->line;
@@ -3061,26 +3045,25 @@ static ssize_t console_store(struct device *dev,
if (ret)
return ret;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
uport = uart_port_check(state);
- if (uport) {
- oldconsole = uart_console_registered(uport);
- if (oldconsole && !newconsole) {
- ret = unregister_console(uport->cons);
- } else if (!oldconsole && newconsole) {
- if (uart_console(uport)) {
- uport->console_reinit = 1;
- register_console(uport->cons);
- } else {
- ret = -ENOENT;
- }
- }
- } else {
- ret = -ENXIO;
+ if (!uport)
+ return -ENXIO;
+
+ oldconsole = uart_console_registered(uport);
+ if (oldconsole && !newconsole) {
+ ret = unregister_console(uport->cons);
+ if (ret < 0)
+ return ret;
+ } else if (!oldconsole && newconsole) {
+ if (!uart_console(uport))
+ return -ENOENT;
+
+ uport->console_reinit = 1;
+ register_console(uport->cons);
}
- mutex_unlock(&port->mutex);
- return ret < 0 ? ret : count;
+ return count;
}
static DEVICE_ATTR_RO(uartclk);
@@ -3136,7 +3119,6 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
{
struct uart_state *state;
struct tty_port *port;
- int ret = 0;
struct device *tty_dev;
int num_groups;
@@ -3146,11 +3128,9 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
state = drv->state + uport->line;
port = &state->port;
- mutex_lock(&port->mutex);
- if (state->uart_port) {
- ret = -EINVAL;
- goto out;
- }
+ guard(mutex)(&port->mutex);
+ if (state->uart_port)
+ return -EINVAL;
/* Link the port to the driver state table and vice versa */
atomic_set(&state->refcount, 1);
@@ -3170,10 +3150,8 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
uport->minor = drv->tty_driver->minor_start + uport->line;
uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name,
drv->tty_driver->name_base + uport->line);
- if (!uport->name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!uport->name)
+ return -ENOMEM;
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
@@ -3189,10 +3167,9 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
uport->tty_groups = kcalloc(num_groups, sizeof(*uport->tty_groups),
GFP_KERNEL);
- if (!uport->tty_groups) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!uport->tty_groups)
+ return -ENOMEM;
+
uport->tty_groups[0] = &tty_dev_attr_group;
if (uport->attr_group)
uport->tty_groups[1] = uport->attr_group;
@@ -3215,10 +3192,7 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
uport->line);
}
- out:
- mutex_unlock(&port->mutex);
-
- return ret;
+ return 0;
}
/**
@@ -3384,7 +3358,7 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
struct serial_ctrl_device *ctrl_dev, *new_ctrl_dev = NULL;
int ret;
- mutex_lock(&port_mutex);
+ guard(mutex)(&port_mutex);
/*
* Prevent serial_port_runtime_resume() from trying to use the port
@@ -3396,10 +3370,8 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
ctrl_dev = serial_core_ctrl_find(drv, port->dev, port->ctrl_id);
if (!ctrl_dev) {
new_ctrl_dev = serial_core_ctrl_device_add(port);
- if (IS_ERR(new_ctrl_dev)) {
- ret = PTR_ERR(new_ctrl_dev);
- goto err_unlock;
- }
+ if (IS_ERR(new_ctrl_dev))
+ return PTR_ERR(new_ctrl_dev);
ctrl_dev = new_ctrl_dev;
}
@@ -3420,8 +3392,6 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
if (ret)
goto err_unregister_port_dev;
- mutex_unlock(&port_mutex);
-
return 0;
err_unregister_port_dev:
@@ -3430,9 +3400,6 @@ err_unregister_port_dev:
err_unregister_ctrl_dev:
serial_base_ctrl_device_remove(new_ctrl_dev);
-err_unlock:
- mutex_unlock(&port_mutex);
-
return ret;
}
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index d35f1d24156c..2fc48cd63f6c 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -173,6 +173,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
* The caller is responsible to initialize the following fields of the @port
* ->dev (must be valid)
* ->flags
+ * ->iobase
* ->mapbase
* ->mapsize
* ->regshift (if @use_defaults is false)
@@ -214,7 +215,7 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
/* Read the registers I/O access type (default: MMIO 8-bit) */
ret = device_property_read_u32(dev, "reg-io-width", &value);
if (ret) {
- port->iotype = UPIO_MEM;
+ port->iotype = port->iobase ? UPIO_PORT : UPIO_MEM;
} else {
switch (value) {
case 1:
@@ -227,15 +228,16 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
break;
default:
- if (!use_defaults) {
- dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
- return -EINVAL;
- }
port->iotype = UPIO_UNKNOWN;
break;
}
}
+ if (!use_defaults && port->iotype == UPIO_UNKNOWN) {
+ dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
+ return -EINVAL;
+ }
+
/* Read the address mapping base offset (default: no offset) */
ret = device_property_read_u32(dev, "reg-offset", &value);
if (ret)
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 924b803af440..b72c3bc19bfa 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -165,6 +165,8 @@ struct sci_port {
static struct sci_port sci_ports[SCI_NPORTS];
static unsigned long sci_ports_in_use;
static struct uart_driver sci_uart_driver;
+static bool sci_uart_earlycon;
+static bool sci_uart_earlycon_dev_probing;
static inline struct sci_port *
to_sci_port(struct uart_port *uart)
@@ -1700,8 +1702,7 @@ static void sci_request_dma(struct uart_port *port)
dma += s->buf_len_rx;
}
- hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- s->rx_timer.function = sci_dma_rx_timer_fn;
+ hrtimer_setup(&s->rx_timer, sci_dma_rx_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
s->chan_rx_saved = s->chan_rx = chan;
@@ -3056,10 +3057,6 @@ static int sci_init_single(struct platform_device *dev,
ret = sci_init_clocks(sci_port, &dev->dev);
if (ret < 0)
return ret;
-
- port->dev = &dev->dev;
-
- pm_runtime_enable(&dev->dev);
}
port->type = p->type;
@@ -3086,11 +3083,6 @@ static int sci_init_single(struct platform_device *dev,
return 0;
}
-static void sci_cleanup_single(struct sci_port *port)
-{
- pm_runtime_disable(port->port.dev);
-}
-
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
static void serial_console_putchar(struct uart_port *port, unsigned char ch)
@@ -3260,8 +3252,6 @@ static void sci_remove(struct platform_device *dev)
sci_ports_in_use &= ~BIT(port->port.line);
uart_remove_one_port(&sci_uart_driver, &port->port);
- sci_cleanup_single(port);
-
if (port->port.fifosize > 1)
device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger);
if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF)
@@ -3396,7 +3386,8 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
static int sci_probe_single(struct platform_device *dev,
unsigned int index,
struct plat_sci_port *p,
- struct sci_port *sciport)
+ struct sci_port *sciport,
+ struct resource *sci_res)
{
int ret;
@@ -3425,6 +3416,11 @@ static int sci_probe_single(struct platform_device *dev,
if (ret)
return ret;
+ sciport->port.dev = &dev->dev;
+ ret = devm_pm_runtime_enable(&dev->dev);
+ if (ret)
+ return ret;
+
sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
if (IS_ERR(sciport->gpios))
return PTR_ERR(sciport->gpios);
@@ -3438,18 +3434,37 @@ static int sci_probe_single(struct platform_device *dev,
sciport->port.flags |= UPF_HARD_FLOW;
}
- ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
- if (ret) {
- sci_cleanup_single(sciport);
- return ret;
+ if (sci_uart_earlycon && sci_ports[0].port.mapbase == sci_res->start) {
+ /*
+ * In case:
+ * - this is the earlycon port (mapped on index 0 in sci_ports[]) and
+ * - it now maps to an alias other than zero and
+ * - the earlycon is still alive (e.g., "earlycon keep_bootcon" is
+ * available in bootargs)
+ *
+ * we need to avoid disabling clocks and PM domains through the runtime
+ * PM APIs called in __device_attach(). For this, increment the runtime
+ * PM reference counter (the clocks and PM domains were already enabled
+ * by the bootloader). Otherwise the earlycon may access the HW when it
+ * has no clocks enabled leading to failures (infinite loop in
+ * sci_poll_put_char()).
+ */
+ pm_runtime_get_noresume(&dev->dev);
+
+ /*
+ * Skip cleanup the sci_port[0] in early_console_exit(), this
+ * port is the same as the earlycon one.
+ */
+ sci_uart_earlycon_dev_probing = true;
}
- return 0;
+ return uart_add_one_port(&sci_uart_driver, &sciport->port);
}
static int sci_probe(struct platform_device *dev)
{
struct plat_sci_port *p;
+ struct resource *res;
struct sci_port *sp;
unsigned int dev_id;
int ret;
@@ -3479,9 +3494,29 @@ static int sci_probe(struct platform_device *dev)
}
sp = &sci_ports[dev_id];
+
+ /*
+ * In case:
+ * - the probed port alias is zero (as the one used by earlycon), and
+ * - the earlycon is still active (e.g., "earlycon keep_bootcon" in
+ * bootargs)
+ *
+ * defer the probe of this serial. This is a debug scenario and the user
+ * must be aware of it.
+ *
+ * Except when the probed port is the same as the earlycon port.
+ */
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (sci_uart_earlycon && sp == &sci_ports[0] && sp->port.mapbase != res->start)
+ return dev_err_probe(&dev->dev, -EBUSY, "sci_port[0] is used by earlycon!\n");
+
platform_set_drvdata(dev, sp);
- ret = sci_probe_single(dev, dev_id, p, sp);
+ ret = sci_probe_single(dev, dev_id, p, sp, res);
if (ret)
return ret;
@@ -3562,7 +3597,23 @@ sh_early_platform_init_buffer("earlyprintk", &sci_driver,
early_serial_buf, ARRAY_SIZE(early_serial_buf));
#endif
#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
-static struct plat_sci_port port_cfg __initdata;
+static struct plat_sci_port port_cfg;
+
+static int early_console_exit(struct console *co)
+{
+ struct sci_port *sci_port = &sci_ports[0];
+
+ /*
+ * Clean the slot used by earlycon. A new SCI device might
+ * map to this slot.
+ */
+ if (!sci_uart_earlycon_dev_probing) {
+ memset(sci_port, 0, sizeof(*sci_port));
+ sci_uart_earlycon = false;
+ }
+
+ return 0;
+}
static int __init early_console_setup(struct earlycon_device *device,
int type)
@@ -3571,15 +3622,18 @@ static int __init early_console_setup(struct earlycon_device *device,
return -ENODEV;
device->port.type = type;
- memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port));
+ sci_ports[0].port = device->port;
port_cfg.type = type;
sci_ports[0].cfg = &port_cfg;
sci_ports[0].params = sci_probe_regmap(&port_cfg);
+ sci_uart_earlycon = true;
port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR);
sci_serial_out(&sci_ports[0].port, SCSCR,
SCSCR_RE | SCSCR_TE | port_cfg.scscr);
device->con->write = serial_console_write;
+ device->con->exit = early_console_exit;
+
return 0;
}
static int __init sci_early_console_setup(struct earlycon_device *device,
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 7dc254546075..1ec5d8c3aef8 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -1051,14 +1051,14 @@ static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (break_state)
stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ);
else
stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int stm32_usart_startup(struct uart_port *port)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index beb151be4d32..fe457bf1e15b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -287,7 +287,7 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
continue;
}
- if (uart_handle_sysrq_char(port, data))
+ if (uart_prepare_sysrq_char(port, data))
continue;
if (is_rxbs_support) {
@@ -454,7 +454,7 @@ static void cdns_uart_handle_tx(void *dev_id)
if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED &&
(kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port))) {
- cdns_uart->tx_timer.function = &cdns_rs485_rx_callback;
+ hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_rx_callback);
hrtimer_start(&cdns_uart->tx_timer,
ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL);
}
@@ -495,7 +495,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
!(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
cdns_uart_handle_rx(dev_id, isrstatus);
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -734,7 +734,7 @@ static void cdns_uart_start_tx(struct uart_port *port)
if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) {
if (!cdns_uart->rs485_tx_started) {
- cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_tx_callback);
cdns_rs485_tx_setup(cdns_uart);
return hrtimer_start(&cdns_uart->tx_timer,
ms_to_ktime(port->rs485.delay_rts_before_send),
@@ -1380,9 +1380,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
unsigned int imr, ctrl;
int locked = 1;
- if (port->sysrq)
- locked = 0;
- else if (oops_in_progress)
+ if (oops_in_progress)
locked = uart_port_trylock_irqsave(port, &flags);
else
uart_port_lock_irqsave(port, &flags);
@@ -1628,8 +1626,8 @@ static int cdns_rs485_config(struct uart_port *port, struct ktermios *termios,
writel(val, port->membase + CDNS_UART_MODEMCR);
/* Timer setup */
- hrtimer_init(&cdns_uart->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ hrtimer_setup(&cdns_uart->tx_timer, &cdns_rs485_tx_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Disable transmitter and make Rx setup*/
cdns_uart_stop_tx(port);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index dcb1769c3625..449dbd216460 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2622,14 +2622,13 @@ static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
static int tty_set_serial(struct tty_struct *tty, struct serial_struct *ss)
{
- char comm[TASK_COMM_LEN];
int flags;
flags = ss->flags & ASYNC_DEPRECATED;
if (flags)
pr_warn_ratelimited("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
- __func__, get_task_comm(comm, current), flags);
+ __func__, current->comm, flags);
if (!tty->ops->set_serial)
return -ENOTTY;
@@ -3618,7 +3617,7 @@ void console_sysfs_notify(void)
sysfs_notify(&consdev->kobj, NULL, "active");
}
-static struct ctl_table tty_table[] = {
+static const struct ctl_table tty_table[] = {
{
.procname = "legacy_tiocsti",
.data = &tty_legacy_tiocsti,
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 564341f1a74f..0bd6544e30a6 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -192,6 +192,20 @@ int set_selection_user(const struct tiocl_selection __user *sel,
if (copy_from_user(&v, sel, sizeof(*sel)))
return -EFAULT;
+ /*
+ * TIOCL_SELCLEAR, TIOCL_SELPOINTER and TIOCL_SELMOUSEREPORT are OK to
+ * use without CAP_SYS_ADMIN as they do not modify the selection.
+ */
+ switch (v.sel_mode) {
+ case TIOCL_SELCLEAR:
+ case TIOCL_SELPOINTER:
+ case TIOCL_SELMOUSEREPORT:
+ break;
+ default:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ }
+
return set_selection_kernel(&v, tty);
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 96842ce817af..be5564ed8c01 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3345,8 +3345,6 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
switch (type) {
case TIOCL_SETSEL:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
return set_selection_user(param, tty);
case TIOCL_PASTESEL:
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 796e37a1d859..3438269a5440 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -1439,6 +1439,7 @@ static ssize_t max_number_of_rtt_store(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_dev_info *dev_info = &hba->dev_info;
struct scsi_device *sdev;
+ unsigned int memflags;
unsigned int rtt;
int ret;
@@ -1458,14 +1459,16 @@ static ssize_t max_number_of_rtt_store(struct device *dev,
ufshcd_rpm_get_sync(hba);
+ memflags = memalloc_noio_save();
shost_for_each_device(sdev, hba->host)
- blk_mq_freeze_queue(sdev->request_queue);
+ blk_mq_freeze_queue_nomemsave(sdev->request_queue);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
shost_for_each_device(sdev, hba->host)
- blk_mq_unfreeze_queue(sdev->request_queue);
+ blk_mq_unfreeze_queue_nomemrestore(sdev->request_queue);
+ memalloc_noio_restore(memflags);
ufshcd_rpm_put_sync(hba);
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 6c09d97ae006..252186124669 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -194,10 +194,12 @@ out:
ufshcd_rpm_put_sync(hba);
kfree(buff);
bsg_reply->result = ret;
- job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
- if (ret == 0)
+ if (ret == 0) {
+ job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
+ sizeof(struct ufs_bsg_reply);
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+ }
return ret;
}
@@ -216,6 +218,7 @@ void ufs_bsg_remove(struct ufs_hba *hba)
return;
bsg_remove_queue(hba->bsg_queue);
+ hba->bsg_queue = NULL;
device_del(bsg_dev);
put_device(bsg_dev);
@@ -257,6 +260,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
NULL, 0);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
+ device_del(bsg_dev);
goto out;
}
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index a714dad82cd1..694ff7578fc1 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -17,20 +17,14 @@ static const struct ufs_crypto_alg_entry {
},
};
-static int ufshcd_program_key(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot)
+static void ufshcd_program_key(struct ufs_hba *hba,
+ const union ufs_crypto_cfg_entry *cfg, int slot)
{
int i;
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
- int err = 0;
ufshcd_hold(hba);
- if (hba->vops && hba->vops->program_key) {
- err = hba->vops->program_key(hba, cfg, slot);
- goto out;
- }
-
/* Ensure that CFGE is cleared before programming the key */
ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
for (i = 0; i < 16; i++) {
@@ -43,17 +37,14 @@ static int ufshcd_program_key(struct ufs_hba *hba,
/* Dword 16 must be written last */
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]),
slot_offset + 16 * sizeof(cfg->reg_val[0]));
-out:
ufshcd_release(hba);
- return err;
}
static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct ufs_hba *hba =
- container_of(profile, struct ufs_hba, crypto_profile);
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array;
const struct ufs_crypto_alg_entry *alg =
&ufs_crypto_algs[key->crypto_cfg.crypto_mode];
@@ -61,7 +52,6 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
int i;
int cap_idx = -1;
union ufs_crypto_cfg_entry cfg = {};
- int err;
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) {
@@ -89,25 +79,25 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
memcpy(cfg.crypto_key, key->raw, key->size);
}
- err = ufshcd_program_key(hba, &cfg, slot);
+ ufshcd_program_key(hba, &cfg, slot);
memzero_explicit(&cfg, sizeof(cfg));
- return err;
+ return 0;
}
static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct ufs_hba *hba =
- container_of(profile, struct ufs_hba, crypto_profile);
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
/*
* Clear the crypto cfg on the device. Clearing CFGE
* might not be sufficient, so just clear the entire cfg.
*/
union ufs_crypto_cfg_entry cfg = {};
- return ufshcd_program_key(hba, &cfg, slot);
+ ufshcd_program_key(hba, &cfg, slot);
+ return 0;
}
/*
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 9c26e8767515..464f13da259a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -258,10 +258,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
return UFS_PM_LVL_0;
}
+static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
+{
+ return hba->outstanding_tasks || hba->active_uic_cmd ||
+ hba->uic_async_done;
+}
+
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
- hba->active_uic_cmd || hba->uic_async_done);
+ return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -623,8 +628,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
+ scsi_host_busy(hba->host), hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -1447,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.suspend_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.active_reqs ||
+ hba->clk_scaling.is_suspended)
+ return;
+
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
}
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_suspend_device(hba->devfreq);
}
@@ -1465,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.resume_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (!hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended)
+ return;
+ hba->clk_scaling.is_suspended = false;
}
- hba->clk_scaling.is_suspended = false;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_resume_device(hba->devfreq);
}
@@ -1487,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev,
bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
- unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -1508,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev,
*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
}
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (ufshcd_eh_in_progress(hba))
+ return 0;
- /* Skip scaling clock when clock scaling is suspended */
- if (hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- dev_warn(hba->dev, "clock scaling is suspended, skip");
- return 0;
- }
+ /* Skip scaling clock when clock scaling is suspended */
+ if (hba->clk_scaling.is_suspended) {
+ dev_warn(hba->dev, "clock scaling is suspended, skip");
+ return 0;
+ }
- if (!hba->clk_scaling.active_reqs)
- sched_clk_scaling_suspend_work = true;
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
- if (list_empty(clk_list)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- goto out;
- }
+ if (list_empty(clk_list))
+ goto out;
- /* Decide based on the target or rounded-off frequency and update */
- if (hba->use_pm_opp)
- scale_up = *freq > hba->clk_scaling.target_freq;
- else
- scale_up = *freq == clki->max_freq;
+ /* Decide based on the target or rounded-off frequency and update */
+ if (hba->use_pm_opp)
+ scale_up = *freq > hba->clk_scaling.target_freq;
+ else
+ scale_up = *freq == clki->max_freq;
- if (!hba->use_pm_opp && !scale_up)
- *freq = clki->min_freq;
+ if (!hba->use_pm_opp && !scale_up)
+ *freq = clki->min_freq;
- /* Update the frequency */
- if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- ret = 0;
- goto out; /* no state change required */
+ /* Update the frequency */
+ if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
+ ret = 0;
+ goto out; /* no state change required */
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
@@ -1569,7 +1566,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
@@ -1577,7 +1573,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
memset(stat, 0, sizeof(*stat));
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
curr_t = ktime_get();
if (!scaling->window_start_t)
goto start_window;
@@ -1613,7 +1610,7 @@ start_window:
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return 0;
}
@@ -1677,19 +1674,19 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool suspend = false;
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->clk_scaling.is_suspended) {
- suspend = true;
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (suspend)
devfreq_suspend_device(hba->devfreq);
@@ -1697,15 +1694,15 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool resume = false;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_scaling.is_suspended) {
- resume = true;
- hba->clk_scaling.is_suspended = false;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (resume)
devfreq_resume_device(hba->devfreq);
@@ -1791,6 +1788,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
+ spin_lock_init(&hba->clk_scaling.lock);
+
hba->clk_scaling.workq = alloc_ordered_workqueue(
"ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
@@ -1811,19 +1810,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
- unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == CLKS_ON) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ if (hba->clk_gating.state == CLKS_ON)
+ return;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);
@@ -1858,7 +1854,7 @@ void ufshcd_hold(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
hba->clk_gating.active_reqs++;
start:
@@ -1874,11 +1870,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
}
break;
@@ -1907,17 +1903,17 @@ start:
*/
fallthrough;
case REQ_CLKS_ON:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state);
break;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1925,28 +1921,32 @@ static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
- unsigned long flags;
int ret;
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case save time by
- * skipping the gating work and exit after changing the clock
- * state to CLKS_ON.
- */
- if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state != REQ_CLKS_OFF)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto rel_lock;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ hba->clk_gating.state != REQ_CLKS_OFF) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ return;
+ }
- if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
- goto rel_lock;
+ if (hba->clk_gating.active_reqs)
+ return;
+ }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_is_ufs_dev_busy(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
@@ -1957,7 +1957,7 @@ static void ufshcd_gate_work(struct work_struct *work)
__func__, ret);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- goto out;
+ return;
}
ufshcd_set_link_hibern8(hba);
}
@@ -1977,33 +1977,34 @@ static void ufshcd_gate_work(struct work_struct *work)
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
-rel_lock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return;
}
-/* host lock must be held before calling this variant */
static void __ufshcd_release(struct ufs_hba *hba)
{
+ lockdep_assert_held(&hba->clk_gating.lock);
+
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.active_reqs--;
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
- hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
- hba->active_uic_cmd || hba->uic_async_done ||
+ !hba->clk_gating.is_initialized ||
hba->clk_gating.state == CLKS_OFF)
return;
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_has_pending_tasks(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
+
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
@@ -2013,11 +2014,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
void ufshcd_release(struct ufs_hba *hba)
{
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
__ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);
@@ -2032,11 +2030,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
hba->clk_gating.delay_ms = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
@@ -2064,7 +2060,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
u32 value;
if (kstrtou32(buf, 0, &value))
@@ -2072,9 +2067,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
value = !!value;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
+
if (value == hba->clk_gating.is_enabled)
- goto out;
+ return count;
if (value)
__ufshcd_release(hba);
@@ -2082,8 +2078,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
hba->clk_gating.active_reqs++;
hba->clk_gating.is_enabled = value;
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return count;
}
@@ -2154,19 +2149,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
- }
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
@@ -2182,18 +2175,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
hba->clk_scaling.active_reqs--;
if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
@@ -2201,7 +2193,6 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static inline int ufshcd_monitor_opcode2dir(u8 opcode)
@@ -2418,12 +2409,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
else
hba->lsdb_sup = true;
- if (!hba->mcq_sup)
- return 0;
-
hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
- hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
- hba->mcq_capabilities);
return 0;
}
@@ -3118,8 +3104,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case UPIU_TRANSACTION_QUERY_RSP: {
u8 response = lrbp->ucd_rsp_ptr->header.response;
- if (response == 0)
+ if (response == 0) {
err = ufshcd_copy_query_response(hba, lrbp);
+ } else {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n",
+ __func__, response);
+ }
break;
}
case UPIU_TRANSACTION_REJECT_UPIU:
@@ -4812,20 +4803,14 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
*/
void ufshcd_hba_stop(struct ufs_hba *hba)
{
- unsigned long flags;
int err;
- /*
- * Obtain the host lock to prevent that the controller is disabled
- * while the UFS interrupt handler is active on another CPU.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_disable_irq(hba);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
10, 1);
+ ufshcd_enable_irq(hba);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
@@ -5195,12 +5180,12 @@ set_qdepth:
}
/**
- * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * ufshcd_sdev_init - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
* Return: success.
*/
-static int ufshcd_slave_alloc(struct scsi_device *sdev)
+static int ufshcd_sdev_init(struct scsi_device *sdev)
{
struct ufs_hba *hba;
@@ -5243,14 +5228,14 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
}
/**
- * ufshcd_device_configure - adjust SCSI device configurations
+ * ufshcd_sdev_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
* @lim: queue limits
*
* Return: 0 (success).
*/
-static int ufshcd_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int ufshcd_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
@@ -5281,10 +5266,10 @@ static int ufshcd_device_configure(struct scsi_device *sdev,
}
/**
- * ufshcd_slave_destroy - remove SCSI device configurations
+ * ufshcd_sdev_destroy - remove SCSI device configurations
* @sdev: pointer to SCSI device
*/
-static void ufshcd_slave_destroy(struct scsi_device *sdev)
+static void ufshcd_sdev_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
unsigned long flags;
@@ -5994,24 +5979,6 @@ out:
__func__, err);
}
-static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
-{
- u32 value;
-
- if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
- return;
-
- dev_info(hba->dev, "exception Tcase %d\n", value - 80);
-
- ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
-
- /*
- * A placeholder for the platform vendors to add whatever additional
- * steps required
- */
-}
-
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6232,7 +6199,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
- ufshcd_temp_exception_event_handler(hba, status);
+ ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
ufs_debugfs_exception_event(hba, status);
}
@@ -8133,31 +8100,6 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
-static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
-{
- struct ufs_dev_info *dev_info = &hba->dev_info;
- u32 ext_ufs_feature;
- u32 ext_iid_en = 0;
- int err;
-
- /* Only UFS-4.0 and above may support EXT_IID */
- if (dev_info->wspecversion < 0x400)
- goto out;
-
- ext_ufs_feature = get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
- if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
- goto out;
-
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
- if (err)
- dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
-
-out:
- dev_info->b_ext_iid_en = ext_iid_en;
-}
-
static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8259,7 +8201,9 @@ static void ufshcd_rtc_work(struct work_struct *work)
hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
/* Update RTC only when there are no requests in progress and UFSHCI is operational */
- if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
+ if (!ufshcd_is_ufs_dev_busy(hba) &&
+ hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
+ !hba->clk_gating.active_reqs)
ufshcd_update_rtc(hba);
if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
@@ -8351,9 +8295,6 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufs_init_rtc(hba, desc_buf);
- if (hba->ext_iid_sup)
- ufshcd_ext_iid_probe(hba, desc_buf);
-
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -8941,7 +8882,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
__func__, hba->outstanding_tasks);
- return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
+ return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -8967,9 +8908,9 @@ static const struct scsi_host_template ufshcd_driver_template = {
.map_queues = ufshcd_map_queues,
.queuecommand = ufshcd_queuecommand,
.mq_poll = ufshcd_poll,
- .slave_alloc = ufshcd_slave_alloc,
- .device_configure = ufshcd_device_configure,
- .slave_destroy = ufshcd_slave_destroy,
+ .sdev_init = ufshcd_sdev_init,
+ .sdev_configure = ufshcd_sdev_configure,
+ .sdev_destroy = ufshcd_sdev_destroy,
.change_queue_depth = ufshcd_change_queue_depth,
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
@@ -9155,7 +9096,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- unsigned long flags;
ktime_t start = ktime_get();
bool clk_state_changed = false;
@@ -9205,12 +9145,11 @@ out:
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
}
- } else if (!ret && on) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.state = CLKS_ON;
+ } else if (!ret && on && hba->clk_gating.is_initialized) {
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+ hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
if (clk_state_changed)
@@ -10293,16 +10232,6 @@ EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
#endif /* CONFIG_PM_SLEEP */
/**
- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
- * @hba: pointer to Host Bus Adapter (HBA)
- */
-void ufshcd_dealloc_host(struct ufs_hba *hba)
-{
- scsi_host_put(hba->host);
-}
-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
-
-/**
* ufshcd_set_dma_mask - Set dma mask based on the controller
* addressing capability
* @hba: per adapter instance
@@ -10321,11 +10250,25 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
}
/**
+ * ufshcd_devres_release - devres cleanup handler, invoked during release of
+ * hba->dev
+ * @host: pointer to SCSI host
+ */
+static void ufshcd_devres_release(void *host)
+{
+ scsi_host_put(host);
+}
+
+/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
*
* Return: 0 on success, non-zero value on failure.
+ *
+ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
+ * keeps track of its allocations using devres and deallocates everything on
+ * device removal automatically.
*/
int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
@@ -10347,6 +10290,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+
+ err = devm_add_action_or_reset(dev, ufshcd_devres_release,
+ host);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to add ufshcd dealloc action\n");
+
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
@@ -10411,7 +10361,6 @@ static int ufshcd_add_scsi_host(struct ufs_hba *hba)
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
.ops = &ufshcd_tmf_ops,
- .flags = BLK_MQ_F_NO_SCHED,
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
@@ -10476,6 +10425,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->irq = irq;
hba->vps = &ufs_hba_vps;
+ /*
+ * Initialize clk_gating.lock early since it is being used in
+ * ufshcd_setup_clocks()
+ */
+ spin_lock_init(&hba->clk_gating.lock);
+
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Host controller drivers can override them in their
+ * 'ufs_hba_variant_ops::init' callback.
+ *
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -10589,21 +10559,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
- /*
- * Set the default power management level for runtime and system PM if
- * not set by the host controller drivers.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- if (!hba->rpm_lvl)
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- if (!hba->spm_lvl)
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
-
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 91e94fe990b4..2cfa1774944b 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -112,11 +112,18 @@ static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
qcom_ice_enable(host->ice);
}
+static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops; /* forward decl */
+
static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
{
struct ufs_hba *hba = host->hba;
+ struct blk_crypto_profile *profile = &hba->crypto_profile;
struct device *dev = hba->dev;
struct qcom_ice *ice;
+ union ufs_crypto_capabilities caps;
+ union ufs_crypto_cap_entry cap;
+ int err;
+ int i;
ice = of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
@@ -128,8 +135,38 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
return PTR_ERR_OR_ZERO(ice);
host->ice = ice;
- hba->caps |= UFSHCD_CAP_CRYPTO;
+ /* Initialize the blk_crypto_profile */
+
+ caps.reg_val = cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP));
+
+ /* The number of keyslots supported is (CFGC+1) */
+ err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1);
+ if (err)
+ return err;
+
+ profile->ll_ops = ufs_qcom_crypto_ops;
+ profile->max_dun_bytes_supported = 8;
+ profile->dev = dev;
+
+ /*
+ * Currently this driver only supports AES-256-XTS. All known versions
+ * of ICE support it, but to be safe make sure it is really declared in
+ * the crypto capability registers. The crypto capability registers
+ * also give the supported data unit size(s).
+ */
+ for (i = 0; i < caps.num_crypto_cap; i++) {
+ cap.reg_val = cpu_to_le32(ufshcd_readl(hba,
+ REG_UFS_CRYPTOCAP +
+ i * sizeof(__le32)));
+ if (cap.algorithm_id == UFS_CRYPTO_ALG_AES_XTS &&
+ cap.key_size == UFS_CRYPTO_KEY_SIZE_256)
+ profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |=
+ cap.sdus_mask * 512;
+ }
+
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+ hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE;
return 0;
}
@@ -149,34 +186,49 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
return 0;
}
-static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg,
- int slot)
+static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- union ufs_crypto_cap_entry cap;
- bool config_enable =
- cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE;
+ int err;
/* Only AES-256-XTS has been tested so far. */
- cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
- if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
- cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
+ if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
return -EOPNOTSUPP;
- if (config_enable)
- return qcom_ice_program_key(host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- cfg->crypto_key,
- cfg->data_unit_size, slot);
- else
- return qcom_ice_evict_key(host->ice, slot);
+ ufshcd_hold(hba);
+ err = qcom_ice_program_key(host->ice,
+ QCOM_ICE_CRYPTO_ALG_AES_XTS,
+ QCOM_ICE_CRYPTO_KEY_SIZE_256,
+ key->raw,
+ key->crypto_cfg.data_unit_size / 512,
+ slot);
+ ufshcd_release(hba);
+ return err;
}
-#else
+static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ ufshcd_hold(hba);
+ err = qcom_ice_evict_key(host->ice, slot);
+ ufshcd_release(hba);
+ return err;
+}
-#define ufs_qcom_ice_program_key NULL
+static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = {
+ .keyslot_program = ufs_qcom_ice_keyslot_program,
+ .keyslot_evict = ufs_qcom_ice_keyslot_evict,
+};
+
+#else
static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
{
@@ -1730,15 +1782,19 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
ufshcd_mcq_config_esi(hba, msg);
}
+struct ufs_qcom_irq {
+ unsigned int irq;
+ unsigned int idx;
+ struct ufs_hba *hba;
+};
+
static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
{
- struct msi_desc *desc = data;
- struct device *dev = msi_desc_to_dev(desc);
- struct ufs_hba *hba = dev_get_drvdata(dev);
- u32 id = desc->msi_index;
- struct ufs_hw_queue *hwq = &hba->uhq[id];
+ struct ufs_qcom_irq *qi = data;
+ struct ufs_hba *hba = qi->hba;
+ struct ufs_hw_queue *hwq = &hba->uhq[qi->idx];
- ufshcd_mcq_write_cqis(hba, 0x1, id);
+ ufshcd_mcq_write_cqis(hba, 0x1, qi->idx);
ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
@@ -1747,8 +1803,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct msi_desc *desc;
- struct msi_desc *failed_desc = NULL;
+ struct ufs_qcom_irq *qi;
int nr_irqs, ret;
if (host->esi_enabled)
@@ -1759,47 +1814,47 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
+ if (qi)
+ return -ENOMEM;
+
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
- return ret;
+ goto cleanup;
}
- msi_lock_descs(hba->dev);
- msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- ret = devm_request_irq(hba->dev, desc->irq,
- ufs_qcom_mcq_esi_handler,
- IRQF_SHARED, "qcom-mcq-esi", desc);
+ for (int idx = 0; idx < nr_irqs; idx++) {
+ qi[idx].irq = msi_get_virq(hba->dev, idx);
+ qi[idx].idx = idx;
+ qi[idx].hba = hba;
+
+ ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
+ IRQF_SHARED, "qcom-mcq-esi", qi + idx);
if (ret) {
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
- __func__, desc->irq, ret);
- failed_desc = desc;
- break;
+ __func__, qi[idx].irq, ret);
+ qi[idx].irq = 0;
+ goto cleanup;
}
}
- msi_unlock_descs(hba->dev);
- if (ret) {
- /* Rewind */
- msi_lock_descs(hba->dev);
- msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- if (desc == failed_desc)
- break;
- devm_free_irq(hba->dev, desc->irq, hba);
- }
- msi_unlock_descs(hba->dev);
- platform_device_msi_free_irqs_all(hba->dev);
- } else {
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0)
- ufshcd_rmwl(hba, ESI_VEC_MASK,
- FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
- REG_UFS_CFG3);
- ufshcd_mcq_enable_esi(hba);
- host->esi_enabled = true;
+ if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_rmwl(hba, ESI_VEC_MASK,
+ FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+ REG_UFS_CFG3);
}
+ ufshcd_mcq_enable_esi(hba);
+ host->esi_enabled = true;
+ return 0;
+cleanup:
+ for (int idx = 0; qi[idx].irq; idx++)
+ devm_free_irq(hba->dev, qi[idx].irq, hba);
+ platform_device_msi_free_irqs_all(hba->dev);
+ devm_kfree(hba->dev, qi);
return ret;
}
@@ -1826,7 +1881,6 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
- .program_key = ufs_qcom_ice_program_key,
.mcq_config_resource = ufs_qcom_mcq_config_resource,
.get_hba_mac = ufs_qcom_get_hba_mac,
.op_runtime_config = ufs_qcom_op_runtime_config,
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index ea39c5d5b8cf..9cfcaad23cf9 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -562,7 +562,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
}
/**
@@ -605,7 +604,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
- ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 505572d4fa87..ffe5d1d2b215 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -465,21 +465,17 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
struct device *dev = &pdev->dev;
mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mmio_base)) {
- err = PTR_ERR(mmio_base);
- goto out;
- }
+ if (IS_ERR(mmio_base))
+ return PTR_ERR(mmio_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto out;
- }
+ if (irq < 0)
+ return irq;
err = ufshcd_alloc_host(dev, &hba);
if (err) {
dev_err(dev, "Allocation failed\n");
- goto out;
+ return err;
}
hba->vops = vops;
@@ -488,13 +484,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
if (err) {
dev_err(dev, "%s: clock parse failed %d\n",
__func__, err);
- goto dealloc_host;
+ return err;
}
err = ufshcd_parse_regulator_info(hba);
if (err) {
dev_err(dev, "%s: regulator init failed %d\n",
__func__, err);
- goto dealloc_host;
+ return err;
}
ufshcd_init_lanes_per_dir(hba);
@@ -502,25 +498,20 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
err = ufshcd_parse_operating_points(hba);
if (err) {
dev_err(dev, "%s: OPP parse failed %d\n", __func__, err);
- goto dealloc_host;
+ return err;
}
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err_probe(dev, err, "Initialization failed with error %d\n",
err);
- goto dealloc_host;
+ return err;
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
-
-dealloc_host:
- ufshcd_dealloc_host(hba);
-out:
- return err;
}
EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
@@ -534,7 +525,6 @@ void ufshcd_pltfrm_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 004a549c6c7d..d93ed4e86a17 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -565,7 +565,7 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait)
mutex_lock(&idev->info_lock);
if (!idev->info || !idev->info->irq)
- ret = -EIO;
+ ret = EPOLLERR;
mutex_unlock(&idev->info_lock);
if (ret)
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index c70dd81bfc61..31aa75110ba5 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -167,6 +167,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
}
uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
pdev->dev.of_node);
+ if (!uioinfo->name)
+ return -ENOMEM;
uioinfo->version = "devicetree";
}
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 3976360d0096..1b19b5647495 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -296,51 +296,51 @@ hv_uio_probe(struct hv_device *dev,
pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
- pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
- if (pdata->recv_buf == NULL) {
- ret = -ENOMEM;
- goto fail_free_ring;
+ if (channel->device_id == HV_NIC) {
+ pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
+ if (!pdata->recv_buf) {
+ ret = -ENOMEM;
+ goto fail_free_ring;
+ }
+
+ ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
+ RECV_BUFFER_SIZE, &pdata->recv_gpadl);
+ if (ret) {
+ if (!pdata->recv_gpadl.decrypted)
+ vfree(pdata->recv_buf);
+ goto fail_close;
+ }
+
+ /* put Global Physical Address Label in name */
+ snprintf(pdata->recv_name, sizeof(pdata->recv_name),
+ "recv:%u", pdata->recv_gpadl.gpadl_handle);
+ pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
+ pdata->info.mem[RECV_BUF_MAP].addr = (uintptr_t)pdata->recv_buf;
+ pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
+ pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
+
+ pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
+ if (!pdata->send_buf) {
+ ret = -ENOMEM;
+ goto fail_close;
+ }
+
+ ret = vmbus_establish_gpadl(channel, pdata->send_buf,
+ SEND_BUFFER_SIZE, &pdata->send_gpadl);
+ if (ret) {
+ if (!pdata->send_gpadl.decrypted)
+ vfree(pdata->send_buf);
+ goto fail_close;
+ }
+
+ snprintf(pdata->send_name, sizeof(pdata->send_name),
+ "send:%u", pdata->send_gpadl.gpadl_handle);
+ pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
+ pdata->info.mem[SEND_BUF_MAP].addr = (uintptr_t)pdata->send_buf;
+ pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
+ pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
}
- ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
- RECV_BUFFER_SIZE, &pdata->recv_gpadl);
- if (ret) {
- if (!pdata->recv_gpadl.decrypted)
- vfree(pdata->recv_buf);
- goto fail_close;
- }
-
- /* put Global Physical Address Label in name */
- snprintf(pdata->recv_name, sizeof(pdata->recv_name),
- "recv:%u", pdata->recv_gpadl.gpadl_handle);
- pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
- pdata->info.mem[RECV_BUF_MAP].addr
- = (uintptr_t)pdata->recv_buf;
- pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
- pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
-
- pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
- if (pdata->send_buf == NULL) {
- ret = -ENOMEM;
- goto fail_close;
- }
-
- ret = vmbus_establish_gpadl(channel, pdata->send_buf,
- SEND_BUFFER_SIZE, &pdata->send_gpadl);
- if (ret) {
- if (!pdata->send_gpadl.decrypted)
- vfree(pdata->send_buf);
- goto fail_close;
- }
-
- snprintf(pdata->send_name, sizeof(pdata->send_name),
- "send:%u", pdata->send_gpadl.gpadl_handle);
- pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
- pdata->info.mem[SEND_BUF_MAP].addr
- = (uintptr_t)pdata->send_buf;
- pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
- pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
-
pdata->info.priv = pdata;
pdata->device = dev;
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 0dd85d2635b9..47d06af33747 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1131,7 +1131,10 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
- struct usb_endpoint_descriptor *in, *out;
+ static const u8 ep_addrs[] = {
+ CXACRU_EP_CMD + USB_DIR_IN,
+ CXACRU_EP_CMD + USB_DIR_OUT,
+ 0};
int ret;
/* instance init */
@@ -1179,13 +1182,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
}
if (usb_endpoint_xfer_int(&cmd_ep->desc))
- ret = usb_find_common_endpoints(intf->cur_altsetting,
- NULL, NULL, &in, &out);
+ ret = usb_check_int_endpoints(intf, ep_addrs);
else
- ret = usb_find_common_endpoints(intf->cur_altsetting,
- &in, &out, NULL, NULL);
+ ret = usb_check_bulk_endpoints(intf, ep_addrs);
- if (ret) {
+ if (!ret) {
usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
ret = -ENODEV;
goto fail;
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 4a3f0f958256..97edf767ecee 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/dmi.h>
@@ -1671,12 +1672,12 @@ static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
"CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
"SupDir IN: %s, OUT: %s\n",
pep->name, 1024,
- (pep->endpoint.caps.type_control) ? "yes" : "no",
- (pep->endpoint.caps.type_int) ? "yes" : "no",
- (pep->endpoint.caps.type_bulk) ? "yes" : "no",
- (pep->endpoint.caps.type_iso) ? "yes" : "no",
- (pep->endpoint.caps.dir_in) ? "yes" : "no",
- (pep->endpoint.caps.dir_out) ? "yes" : "no");
+ str_yes_no(pep->endpoint.caps.type_control),
+ str_yes_no(pep->endpoint.caps.type_int),
+ str_yes_no(pep->endpoint.caps.type_bulk),
+ str_yes_no(pep->endpoint.caps.type_iso),
+ str_yes_no(pep->endpoint.caps.dir_in),
+ str_yes_no(pep->endpoint.caps.dir_out));
INIT_LIST_HEAD(&pep->pending_list);
}
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 465e9267b49c..98980a23e1c2 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -529,9 +529,7 @@ int cdns_resume(struct cdns *cdns)
int ret = 0;
if (cdns_power_is_lost(cdns)) {
- if (cdns->role_sw) {
- cdns->role = cdns_role_get(cdns->role_sw);
- } else {
+ if (!cdns->role_sw) {
real_role = cdns_hw_role_state_machine(cdns);
if (real_role != cdns->role) {
ret = cdns_hw_role_switch(cdns);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index f2801700be8e..1a7fc638213e 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -370,25 +370,29 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->pinctrl = devm_pinctrl_get(dev);
if (PTR_ERR(data->pinctrl) == -ENODEV)
data->pinctrl = NULL;
- else if (IS_ERR(data->pinctrl))
- return dev_err_probe(dev, PTR_ERR(data->pinctrl),
+ else if (IS_ERR(data->pinctrl)) {
+ ret = dev_err_probe(dev, PTR_ERR(data->pinctrl),
"pinctrl get failed\n");
+ goto err_put;
+ }
data->hsic_pad_regulator =
devm_regulator_get_optional(dev, "hsic");
if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
/* no pad regulator is needed */
data->hsic_pad_regulator = NULL;
- } else if (IS_ERR(data->hsic_pad_regulator))
- return dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
+ } else if (IS_ERR(data->hsic_pad_regulator)) {
+ ret = dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
"Get HSIC pad regulator error\n");
+ goto err_put;
+ }
if (data->hsic_pad_regulator) {
ret = regulator_enable(data->hsic_pad_regulator);
if (ret) {
dev_err(dev,
"Failed to enable HSIC pad regulator\n");
- return ret;
+ goto err_put;
}
}
}
@@ -402,13 +406,14 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
dev_err(dev,
"pinctrl_hsic_idle lookup failed, err=%ld\n",
PTR_ERR(pinctrl_hsic_idle));
- return PTR_ERR(pinctrl_hsic_idle);
+ ret = PTR_ERR(pinctrl_hsic_idle);
+ goto err_put;
}
ret = pinctrl_select_state(data->pinctrl, pinctrl_hsic_idle);
if (ret) {
dev_err(dev, "hsic_idle select failed, err=%d\n", ret);
- return ret;
+ goto err_put;
}
data->pinctrl_hsic_active = pinctrl_lookup_state(data->pinctrl,
@@ -417,7 +422,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
dev_err(dev,
"pinctrl_hsic_active lookup failed, err=%ld\n",
PTR_ERR(data->pinctrl_hsic_active));
- return PTR_ERR(data->pinctrl_hsic_active);
+ ret = PTR_ERR(data->pinctrl_hsic_active);
+ goto err_put;
}
}
@@ -527,6 +533,8 @@ disable_hsic_regulator:
if (pdata.flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
data->ci_pdev = NULL;
+err_put:
+ put_device(data->usbmisc_data->dev);
return ret;
}
@@ -551,6 +559,7 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev)
if (data->hsic_pad_regulator)
regulator_disable(data->hsic_pad_regulator);
}
+ put_device(data->usbmisc_data->dev);
}
static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 0cce19208370..ced6076a8248 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -13,6 +13,7 @@
#include <linux/usb/hcd.h>
#include <linux/usb/chipidea.h>
#include <linux/regulator/consumer.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include "../host/ehci.h"
@@ -56,7 +57,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
if (ret) {
dev_err(dev,
"Failed to %s vbus regulator, ret=%d\n",
- enable ? "enable" : "disable", ret);
+ str_enable_disable(enable), ret);
return ret;
}
priv->enabled = enable;
@@ -256,8 +257,14 @@ static int ci_ehci_hub_control(
struct device *dev = hcd->self.controller;
struct ci_hdrc *ci = dev_get_drvdata(dev);
- port_index = wIndex & 0xff;
- port_index -= (port_index > 0);
+ /*
+ * Avoid out-of-bounds values while calculating the port index
+ * from wIndex. The compiler doesn't like pointers to invalid
+ * addresses, even if they are never used.
+ */
+ port_index = (wIndex - 1) & 0xff;
+ if (port_index >= HCS_N_PORTS_MAX)
+ port_index = 0;
status_reg = &ehci->regs->port_status[port_index];
spin_lock_irqsave(&ehci->lock, flags);
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index c17516c29b63..a093544482d5 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -424,8 +424,7 @@ static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
/* Initialize timers */
static int ci_otg_init_timers(struct ci_hdrc *ci)
{
- hrtimer_init(&ci->otg_fsm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ci->otg_fsm_hrtimer.function = ci_otg_hrtimer_func;
+ hrtimer_setup(&ci->otg_fsm_hrtimer, ci_otg_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6b37d1c47fce..c2ecfa3c8349 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -371,7 +371,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
- struct usb_cdc_notification *dr = urb->transfer_buffer;
+ struct usb_cdc_notification *dr;
unsigned int current_size = urb->actual_length;
unsigned int expected_size, copy_size, alloc_size;
int retval;
@@ -398,14 +398,25 @@ static void acm_ctrl_irq(struct urb *urb)
usb_mark_last_busy(acm->dev);
- if (acm->nb_index)
+ if (acm->nb_index == 0) {
+ /*
+ * The first chunk of a message must contain at least the
+ * notification header with the length field, otherwise we
+ * can't get an expected_size.
+ */
+ if (current_size < sizeof(struct usb_cdc_notification)) {
+ dev_dbg(&acm->control->dev, "urb too short\n");
+ goto exit;
+ }
+ dr = urb->transfer_buffer;
+ } else {
dr = (struct usb_cdc_notification *)acm->notification_buffer;
-
+ }
/* size = notification-header + (optional) data */
expected_size = sizeof(struct usb_cdc_notification) +
le16_to_cpu(dr->wLength);
- if (current_size < expected_size) {
+ if (acm->nb_index != 0 || current_size < expected_size) {
/* notification is transmitted fragmented, reassemble */
if (acm->nb_size < expected_size) {
u8 *new_buffer;
@@ -1727,13 +1738,16 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
- { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
+ { USB_DEVICE(0x045b, 0x023c), /* Renesas R-Car H3 USB Download mode */
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
+ },
+ { USB_DEVICE(0x045b, 0x0247), /* Renesas R-Car D3 USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
- { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
+ { USB_DEVICE(0x045b, 0x0248), /* Renesas R-Car M3-N USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
- { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
+ { USB_DEVICE(0x045b, 0x024D), /* Renesas R-Car E3 USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 5a2e43331064..e2527faa6592 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -87,7 +87,7 @@
/* Get two-int array: [0]=vendor ID, [1]=product ID: */
#define LPIOC_GET_VID_PID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_VID_PID, len)
/* Perform class specific soft reset */
-#define LPIOC_SOFT_RESET _IOC(_IOC_NONE, 'P', IOCNR_SOFT_RESET, 0);
+#define LPIOC_SOFT_RESET _IOC(_IOC_NONE, 'P', IOCNR_SOFT_RESET, 0)
/*
* A DEVICE_ID string may include the printer's serial number.
@@ -1337,11 +1337,12 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
+ alts = usblp->protocol[protocol].alt_setting;
+ if (alts < 0)
+ return -EINVAL;
+
/* Don't unnecessarily set the interface if there's a single alt. */
if (usblp->intf->num_altsetting > 1) {
- alts = usblp->protocol[protocol].alt_setting;
- if (alts < 0)
- return -EINVAL;
r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
if (r < 0) {
printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 871cf199b6bf..fc0845f681be 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -41,6 +41,12 @@ const char *usb_ep_type_string(int ep_type)
}
EXPORT_SYMBOL_GPL(usb_ep_type_string);
+/**
+ * usb_otg_state_string() - returns human readable name of OTG state.
+ * @state: the OTG state to return the human readable name of. If it's not
+ * any of the states defined in usb_otg_state enum, 'UNDEFINED' will be
+ * returned.
+ */
const char *usb_otg_state_string(enum usb_otg_state state)
{
static const char *const names[] = {
@@ -179,6 +185,14 @@ static const char *const usb_dr_modes[] = {
[USB_DR_MODE_OTG] = "otg",
};
+/**
+ * usb_get_dr_mode_from_string() - Get dual role mode for given string
+ * @str: String to find the corresponding dual role mode for
+ *
+ * This function performs a lookup for the given string and returns the
+ * corresponding enum usb_dr_mode. If no match for the string could be found,
+ * 'USB_DR_MODE_UNKNOWN' is returned.
+ */
static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str)
{
int ret;
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index c84b4a700084..aa710b50791b 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regulator/consumer.h>
+#include <linux/string_choices.h>
#include <linux/usb/role.h>
#define USB_GPIO_DEB_MS 20 /* ms */
@@ -111,7 +112,7 @@ static void usb_conn_detect_cable(struct work_struct *work)
if (info->vbus)
dev_dbg(info->dev, "vbus regulator is %s\n",
- regulator_is_enabled(info->vbus) ? "enabled" : "disabled");
+ str_enabled_disabled(regulator_is_enabled(info->vbus)));
power_supply_changed(info->charger);
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 25a00f974934..f7bf8d1de3ad 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -9,6 +9,7 @@
#include <linux/usb/quirks.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/device.h>
#include <asm/byteorder.h>
#include "usb.h"
@@ -18,12 +19,6 @@
#define USB_MAXCONFIG 8 /* Arbitrary limit */
-
-static inline const char *plural(int n)
-{
- return (n == 1 ? "" : "s");
-}
-
static int find_next_descriptor(unsigned char *buffer, int size,
int dt1, int dt2, int *num_skipped)
{
@@ -484,7 +479,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
retval = buffer - buffer0 + i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
- n, plural(n), "endpoint");
+ n, str_plural(n), "endpoint");
return retval;
skip_to_next_endpoint_or_interface_descriptor:
@@ -563,7 +558,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
alt->extralen = i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
- n, plural(n), "interface");
+ n, str_plural(n), "interface");
buffer += i;
size -= i;
@@ -605,7 +600,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
dev_notice(ddev, "config %d interface %d altsetting %d has %d "
"endpoint descriptor%s, different from the interface "
"descriptor's value: %d\n",
- cfgno, inum, asnum, n, plural(n), num_ep_orig);
+ cfgno, inum, asnum, n, str_plural(n), num_ep_orig);
return buffer - buffer0;
skip_to_next_interface_descriptor:
@@ -664,7 +659,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
if (size2 < sizeof(struct usb_descriptor_header)) {
dev_notice(ddev, "config %d descriptor has %d excess "
"byte%s, ignoring\n",
- cfgno, size2, plural(size2));
+ cfgno, size2, str_plural(size2));
break;
}
@@ -754,7 +749,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
if (n != nintf)
dev_notice(ddev, "config %d has %d interface%s, different from "
"the descriptor's value: %d\n",
- cfgno, n, plural(n), nintf_orig);
+ cfgno, n, str_plural(n), nintf_orig);
else if (n == 0)
dev_notice(ddev, "config %d has no interfaces?\n", cfgno);
config->desc.bNumInterfaces = nintf = n;
@@ -798,7 +793,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
config->extralen = i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
- n, plural(n), "configuration");
+ n, str_plural(n), "configuration");
buffer += i;
size -= i;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f203fdbfb6f6..460d4dde5994 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1086,15 +1086,14 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
pr_info("%s: registered new interface driver %s\n",
usbcore_name, new_driver->name);
-out:
- return retval;
+ return 0;
out_newid:
driver_unregister(&new_driver->driver);
-
+out:
pr_err("%s: error %d registering interface driver %s\n",
usbcore_name, retval, new_driver->name);
- goto out;
+ return retval;
}
EXPORT_SYMBOL_GPL(usb_register_driver);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index b134bff5c3fe..9c6ae5e1198b 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -21,14 +21,10 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/string_choices.h>
#include <uapi/linux/usb/audio.h>
#include "usb.h"
-static inline const char *plural(int n)
-{
- return (n == 1 ? "" : "s");
-}
-
static int is_rndis(struct usb_interface_descriptor *desc)
{
return desc->bInterfaceClass == USB_CLASS_COMM
@@ -194,18 +190,18 @@ int usb_choose_configuration(struct usb_device *udev)
if (insufficient_power > 0)
dev_info(&udev->dev, "rejected %d configuration%s "
"due to insufficient available bus power\n",
- insufficient_power, plural(insufficient_power));
+ insufficient_power, str_plural(insufficient_power));
if (best) {
i = best->desc.bConfigurationValue;
dev_dbg(&udev->dev,
"configuration #%d chosen from %d choice%s\n",
- i, num_configs, plural(num_configs));
+ i, num_configs, str_plural(num_configs));
} else {
i = -1;
dev_warn(&udev->dev,
"no configuration chosen from %d choice%s\n",
- num_configs, plural(num_configs));
+ num_configs, str_plural(num_configs));
}
return i;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index a08f3f228e6d..56b534f59907 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -422,7 +422,12 @@ static int suspend_common(struct device *dev, pm_message_t msg)
bool do_wakeup;
int retval;
- do_wakeup = PMSG_IS_AUTO(msg) ? true : device_may_wakeup(dev);
+ if (PMSG_IS_AUTO(msg))
+ do_wakeup = true;
+ else if (PMSG_NO_WAKEUP(msg))
+ do_wakeup = false;
+ else
+ do_wakeup = device_may_wakeup(dev);
/* Root hub suspend should have stopped all downstream traffic,
* and all bus master traffic. And done so for both the interface
@@ -521,6 +526,11 @@ static int hcd_pci_suspend(struct device *dev)
return suspend_common(dev, PMSG_SUSPEND);
}
+static int hcd_pci_freeze(struct device *dev)
+{
+ return suspend_common(dev, PMSG_FREEZE);
+}
+
static int hcd_pci_suspend_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -590,6 +600,7 @@ static int hcd_pci_restore(struct device *dev)
#else
#define hcd_pci_suspend NULL
+#define hcd_pci_freeze NULL
#define hcd_pci_suspend_noirq NULL
#define hcd_pci_poweroff_late NULL
#define hcd_pci_resume_noirq NULL
@@ -624,7 +635,7 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.suspend_noirq = hcd_pci_suspend_noirq,
.resume_noirq = hcd_pci_resume_noirq,
.resume = hcd_pci_resume,
- .freeze = hcd_pci_suspend,
+ .freeze = hcd_pci_freeze,
.freeze_noirq = check_root_hub_suspended,
.thaw_noirq = NULL,
.thaw = hcd_pci_resume,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 0b2490347b9f..a75cf1f6d741 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -415,7 +415,7 @@ ascii2desc(char const *s, u8 *buf, unsigned len)
static unsigned
rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
{
- char buf[100];
+ char buf[160];
char const *s;
static char const langids[4] = {4, USB_DT_STRING, 0x09, 0x04};
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4b93c0bd1d4b..dcba4281ea48 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -18,6 +18,7 @@
#include <linux/sched/mm.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/kcov.h>
#include <linux/ioctl.h>
#include <linux/usb.h>
@@ -1496,7 +1497,7 @@ static int hub_configure(struct usb_hub *hub,
maxchild = hub->descriptor->bNbrPorts;
dev_info(hub_dev, "%d port%s detected\n", maxchild,
- (maxchild == 1) ? "" : "s");
+ str_plural(maxchild));
hub->ports = kcalloc(maxchild, sizeof(struct usb_port *), GFP_KERNEL);
if (!hub->ports) {
@@ -1848,6 +1849,17 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
hdev = interface_to_usbdev(intf);
/*
+ * The USB 2.0 spec prohibits hubs from having more than one
+ * configuration or interface, and we rely on this prohibition.
+ * Refuse to accept a device that violates it.
+ */
+ if (hdev->descriptor.bNumConfigurations > 1 ||
+ hdev->actconfig->desc.bNumInterfaces > 1) {
+ dev_err(&intf->dev, "Invalid hub with more than one config or interface\n");
+ return -EINVAL;
+ }
+
+ /*
* Set default autosuspend delay as 0 to speedup bus suspend,
* based on the below considerations:
*
@@ -2663,13 +2675,13 @@ int usb_new_device(struct usb_device *udev)
err = sysfs_create_link(&udev->dev.kobj,
&port_dev->dev.kobj, "port");
if (err)
- goto fail;
+ goto out_del_dev;
err = sysfs_create_link(&port_dev->dev.kobj,
&udev->dev.kobj, "device");
if (err) {
sysfs_remove_link(&udev->dev.kobj, "port");
- goto fail;
+ goto out_del_dev;
}
if (!test_and_set_bit(port1, hub->child_usage_bits))
@@ -2683,6 +2695,8 @@ int usb_new_device(struct usb_device *udev)
pm_runtime_put_sync_autosuspend(&udev->dev);
return err;
+out_del_dev:
+ device_del(&udev->dev);
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
pm_runtime_disable(&udev->dev);
@@ -4137,14 +4151,14 @@ static int usb_set_device_initiated_lpm(struct usb_device *udev,
break;
default:
dev_warn(&udev->dev, "%s: Can't %s non-U1 or U2 state.\n",
- __func__, enable ? "enable" : "disable");
+ __func__, str_enable_disable(enable));
return -EINVAL;
}
if (udev->state != USB_STATE_CONFIGURED) {
dev_dbg(&udev->dev, "%s: Can't %s %s state "
"for unconfigured device.\n",
- __func__, enable ? "enable" : "disable",
+ __func__, str_enable_disable(enable),
usb3_lpm_names[state]);
return 0;
}
@@ -4170,8 +4184,7 @@ static int usb_set_device_initiated_lpm(struct usb_device *udev,
}
if (ret < 0) {
dev_warn(&udev->dev, "%s of device-initiated %s failed.\n",
- enable ? "Enable" : "Disable",
- usb3_lpm_names[state]);
+ str_enable_disable(enable), usb3_lpm_names[state]);
return -EBUSY;
}
return 0;
@@ -4696,7 +4709,6 @@ void usb_ep0_reinit(struct usb_device *udev)
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
-#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
@@ -4802,7 +4814,7 @@ static int get_bMaxPacketSize0(struct usb_device *udev,
for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
/* Start with invalid values in case the transfer fails */
buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
- rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
+ rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
USB_DT_DEVICE << 8, 0,
buf, size,
@@ -6054,6 +6066,36 @@ void usb_hub_cleanup(void)
} /* usb_hub_cleanup() */
/**
+ * hub_hc_release_resources - clear resources used by host controller
+ * @udev: pointer to device being released
+ *
+ * Context: task context, might sleep
+ *
+ * Function releases the host controller resources in correct order before
+ * making any operation on resuming usb device. The host controller resources
+ * allocated for devices in tree should be released starting from the last
+ * usb device in tree toward the root hub. This function is used only during
+ * resuming device when usb device require reinitialization – that is, when
+ * flag udev->reset_resume is set.
+ *
+ * This call is synchronous, and may not be used in an interrupt context.
+ */
+static void hub_hc_release_resources(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int i;
+
+ /* Release up resources for all children before this device */
+ for (i = 0; i < udev->maxchild; i++)
+ if (hub->ports[i]->child)
+ hub_hc_release_resources(hub->ports[i]->child);
+
+ if (hcd->driver->reset_device)
+ hcd->driver->reset_device(hcd, udev);
+}
+
+/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
*
@@ -6117,6 +6159,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
bos = udev->bos;
udev->bos = NULL;
+ if (udev->reset_resume)
+ hub_hc_release_resources(udev);
+
mutex_lock(hcd->address0_mutex);
for (i = 0; i < PORT_INIT_TRIES; ++i) {
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 45d7af00f8d1..f54198171b6a 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -9,6 +9,7 @@
#include <linux/kstrtox.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/sysfs.h>
#include <linux/pm_qos.h>
#include <linux/component.h>
@@ -25,7 +26,7 @@ static ssize_t early_stop_show(struct device *dev,
{
struct usb_port *port_dev = to_usb_port(dev);
- return sysfs_emit(buf, "%s\n", port_dev->early_stop ? "yes" : "no");
+ return sysfs_emit(buf, "%s\n", str_yes_no(port_dev->early_stop));
}
static ssize_t early_stop_store(struct device *dev, struct device_attribute *attr,
@@ -453,10 +454,11 @@ static int usb_port_runtime_suspend(struct device *dev)
static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
+ struct usb_device *udev = port_dev->child;
- if (port_dev->child) {
- usb_disable_usb2_hardware_lpm(port_dev->child);
- usb_unlocked_disable_lpm(port_dev->child);
+ if (udev && !udev->port_is_suspended) {
+ usb_disable_usb2_hardware_lpm(udev);
+ usb_unlocked_disable_lpm(udev);
}
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 13171454f959..8efbacc5bc34 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -341,6 +341,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* Prolific Single-LUN Mass Storage Card Reader */
+ { USB_DEVICE(0x067b, 0x2731), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_NO_LPM },
+
/* Saitek Cyborg Gold Joystick */
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -394,6 +398,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+ /* TOSHIBA TransMemory-Mx */
+ { USB_DEVICE(0x0930, 0x1408), .driver_info = USB_QUIRK_NO_LPM },
+
/* NVIDIA Jetson devices in Force Recovery mode */
{ USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -432,6 +439,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Sony Xperia XZ1 Compact (lilac) smartphone in fastboot mode */
+ { USB_DEVICE(0x0fce, 0x0dde), .driver_info = USB_QUIRK_NO_LPM },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@@ -522,6 +532,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ /* Teclast disk */
+ { USB_DEVICE(0x1f75, 0x0917), .driver_info = USB_QUIRK_NO_LPM },
+
/* Hauppauge HVR-950q */
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index b4cba23831ac..23f3cb1989f4 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -854,7 +854,7 @@ static const struct attribute_group dev_string_attr_grp = {
static ssize_t
descriptors_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -890,11 +890,11 @@ descriptors_read(struct file *filp, struct kobject *kobj,
}
return count - nleft;
}
-static BIN_ATTR_RO(descriptors, 18 + 65535); /* dev descr + max-size raw descriptor */
+static const BIN_ATTR_RO(descriptors, 18 + 65535); /* dev descr + max-size raw descriptor */
static ssize_t
bos_descriptors_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -913,12 +913,12 @@ bos_descriptors_read(struct file *filp, struct kobject *kobj,
}
return n;
}
-static BIN_ATTR_RO(bos_descriptors, 65535); /* max-size BOS */
+static const BIN_ATTR_RO(bos_descriptors, 65535); /* max-size BOS */
/* When modifying this list, be sure to modify dev_bin_attrs_are_visible()
* accordingly.
*/
-static struct bin_attribute *dev_bin_attrs[] = {
+static const struct bin_attribute *const dev_bin_attrs[] = {
&bin_attr_descriptors,
&bin_attr_bos_descriptors,
NULL
@@ -944,7 +944,7 @@ static umode_t dev_bin_attrs_are_visible(struct kobject *kobj,
}
static const struct attribute_group dev_bin_attr_grp = {
- .bin_attrs = dev_bin_attrs,
+ .bin_attrs_new = dev_bin_attrs,
.is_bin_visible = dev_bin_attrs_are_visible,
};
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 03c22114214b..935c0efea0b6 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -213,8 +213,7 @@ usb_acpi_get_connect_type(struct usb_port *port_dev, acpi_handle *handle)
* no connectable, the port would be not used.
*/
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status) && pld)
+ if (acpi_get_physical_device_location(handle, &pld) && pld)
port_dev->location = USB_ACPI_LOCATION_VALID |
pld->group_token << 8 | pld->group_position;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e7bf9cc635be..bd4c788f03bc 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4615,6 +4615,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
spin_lock_irqsave(&hsotg->lock, flags);
hsotg->driver = NULL;
+ hsotg->gadget.dev.of_node = NULL;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
hsotg->enabled = 0;
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 238c6fd50e75..2a542a99ec44 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1459,8 +1459,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
/* Initialize QH */
qh->hsotg = hsotg;
timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
- hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- qh->wait_timer.function = &dwc2_wait_timer_fn;
+ hrtimer_setup(&qh->wait_timer, &dwc2_wait_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f219c82e9619..66a08b527165 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -131,11 +131,24 @@ void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
}
}
-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
{
+ unsigned int hw_mode;
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+
+ /*
+ * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and
+ * GUSB2PHYCFG.SUSPHY should be cleared during mode switching,
+ * and they can be set after core initialization.
+ */
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) {
+ if (DWC3_GCTL_PRTCAP(reg) != mode)
+ dwc3_enable_susphy(dwc, false);
+ }
+
reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
reg |= DWC3_GCTL_PRTCAPDIR(mode);
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
@@ -216,7 +229,7 @@ static void __dwc3_set_mode(struct work_struct *work)
spin_lock_irqsave(&dwc->lock, flags);
- dwc3_set_prtcap(dwc, desired_dr_role);
+ dwc3_set_prtcap(dwc, desired_dr_role, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -658,16 +671,7 @@ static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index)
*/
reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
- /*
- * Above DWC_usb3.0 1.94a, it is recommended to set
- * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
- * So default value will be '0' when the core is reset. Application
- * needs to set it to '1' after the core initialization is completed.
- *
- * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
- * cleared after power-on reset, and it can be set after core
- * initialization.
- */
+ /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */
reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
if (dwc->u2ss_inp3_quirk)
@@ -747,15 +751,7 @@ static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index)
break;
}
- /*
- * Above DWC_usb3.0 1.94a, it is recommended to set
- * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
- * So default value will be '0' when the core is reset. Application
- * needs to set it to '1' after the core initialization is completed.
- *
- * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
- * after power-on reset, and it can be set after core initialization.
- */
+ /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
if (dwc->dis_enblslpm_quirk)
@@ -830,6 +826,25 @@ static int dwc3_phy_init(struct dwc3 *dwc)
goto err_exit_usb3_phy;
}
+ /*
+ * Above DWC_usb3.0 1.94a, it is recommended to set
+ * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during
+ * coreConsultant configuration. So default value will be '0' when the
+ * core is reset. Application needs to set it to '1' after the core
+ * initialization is completed.
+ *
+ * Certain phy requires to be in P0 power state during initialization.
+ * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear
+ * prior to phy init to maintain in the P0 state.
+ *
+ * After phy initialization, some phy operations can only be executed
+ * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and
+ * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid
+ * blocking phy ops.
+ */
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+ dwc3_enable_susphy(dwc, true);
+
return 0;
err_exit_usb3_phy:
@@ -1479,6 +1494,26 @@ static int dwc3_core_init(struct dwc3 *dwc)
}
}
+ /*
+ * STAR 9001346572: This issue affects DWC_usb31 versions 1.80a and
+ * prior. When an active endpoint not currently cached in the host
+ * controller is chosen to be cached to the same index as an endpoint
+ * receiving NAKs, the endpoint receiving NAKs enters continuous
+ * retry mode. This prevents it from being evicted from the host
+ * controller cache, blocking the new endpoint from being cached and
+ * serviced.
+ *
+ * To resolve this, for controller versions 1.70a and 1.80a, set the
+ * GUCTL3 bit[16] (USB2.0 Internal Retry Disable) to 1. This bit
+ * disables the USB2.0 internal retry feature. The GUCTL3[16] register
+ * function is available only from version 1.70a.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC31, 170A, 180A)) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
+ reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE;
+ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
+ }
+
return 0;
err_power_off_phy:
@@ -1568,7 +1603,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
@@ -1580,7 +1615,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
return dev_err_probe(dev, ret, "failed to initialize gadget\n");
break;
case USB_DR_MODE_HOST:
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
@@ -1625,7 +1660,7 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
}
/* de-assert DRVVBUS for HOST and OTG mode */
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
}
static void dwc3_get_software_properties(struct dwc3 *dwc)
@@ -1664,8 +1699,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
u8 tx_thr_num_pkt_prd = 0;
u8 tx_max_burst_prd = 0;
u8 tx_fifo_resize_max_num;
- const char *usb_psy_name;
- int ret;
/* default to highest possible threshold */
lpm_nyet_threshold = 0xf;
@@ -1700,13 +1733,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
- ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
- if (ret >= 0) {
- dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
- if (!dwc->usb_psy)
- dev_err(dev, "couldn't get usb power supply\n");
- }
-
dwc->has_lpm_erratum = device_property_read_bool(dev,
"snps,has-lpm-erratum");
device_property_read_u8(dev, "snps,lpm-nyet-threshold",
@@ -1824,8 +1850,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
dwc->tx_max_burst_prd = tx_max_burst_prd;
- dwc->imod_interval = 0;
-
dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
}
@@ -1843,21 +1867,19 @@ static void dwc3_check_params(struct dwc3 *dwc)
unsigned int hwparam_gen =
DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
- /* Check for proper value of imod_interval */
- if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
- dev_warn(dwc->dev, "Interrupt moderation not supported\n");
- dwc->imod_interval = 0;
- }
-
/*
+ * Enable IMOD for all supporting controllers.
+ *
+ * Particularly, DWC_usb3 v3.00a must enable this feature for
+ * the following reason:
+ *
* Workaround for STAR 9000961433 which affects only version
* 3.00a of the DWC_usb3 core. This prevents the controller
* interrupt from being masked while handling events. IMOD
* allows us to work around this issue. Enable it for the
* affected version.
*/
- if (!dwc->imod_interval &&
- DWC3_VER_IS(DWC3, 300A))
+ if (dwc3_has_imod((dwc)))
dwc->imod_interval = 1;
/* Check the maximum_speed parameter */
@@ -2109,6 +2131,23 @@ static int dwc3_get_num_ports(struct dwc3 *dwc)
return 0;
}
+static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc)
+{
+ struct power_supply *usb_psy;
+ const char *usb_psy_name;
+ int ret;
+
+ ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name);
+ if (ret < 0)
+ return NULL;
+
+ usb_psy = power_supply_get_by_name(usb_psy_name);
+ if (!usb_psy)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return usb_psy;
+}
+
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2165,6 +2204,10 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_software_properties(dwc);
+ dwc->usb_psy = dwc3_get_usb_power_supply(dwc);
+ if (IS_ERR(dwc->usb_psy))
+ return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n");
+
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset)) {
ret = PTR_ERR(dwc->reset);
@@ -2425,7 +2468,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (ret)
return ret;
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
dwc3_gadget_resume(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
@@ -2433,7 +2476,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true);
break;
}
/* Restore GUSB2PHYCFG bits that were modified in suspend */
@@ -2462,7 +2505,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (ret)
return ret;
- dwc3_set_prtcap(dwc, dwc->current_dr_role);
+ dwc3_set_prtcap(dwc, dwc->current_dr_role, true);
dwc3_otg_init(dwc);
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
@@ -2589,12 +2632,15 @@ static int dwc3_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ goto out;
ret = dwc3_resume_common(dwc, PMSG_RESUME);
if (ret)
pm_runtime_set_suspended(dev);
+out:
pm_runtime_enable(dev);
return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index ee73789326bc..aaa39e663f60 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -425,6 +425,7 @@
/* Global User Control Register 3 */
#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
+#define DWC3_GUCTL3_USB20_RETRY_DISABLE BIT(16)
/* Device Configuration Register */
#define DWC3_DCFG_NUMLANES(n) (((n) & 0x3) << 30) /* DWC_usb32 only */
@@ -464,6 +465,7 @@
#define DWC3_DCTL_TRGTULST_SS_INACT (DWC3_DCTL_TRGTULST(6))
/* These apply for core versions 1.94a and later */
+#define DWC3_DCTL_NYET_THRES_MASK (0xf << 20)
#define DWC3_DCTL_NYET_THRES(n) (((n) & 0xf) << 20)
#define DWC3_DCTL_KEEP_CONNECT BIT(19)
@@ -715,6 +717,7 @@ struct dwc3_event_buffer {
/**
* struct dwc3_ep - device side endpoint representation
* @endpoint: usb endpoint
+ * @nostream_work: work for handling bulk NoStream
* @cancelled_list: list of cancelled requests for this endpoint
* @pending_list: list of pending requests for this endpoint
* @started_list: list of started requests on this endpoint
@@ -741,6 +744,7 @@ struct dwc3_event_buffer {
*/
struct dwc3_ep {
struct usb_ep endpoint;
+ struct delayed_work nostream_work;
struct list_head cancelled_list;
struct list_head pending_list;
struct list_head started_list;
@@ -763,7 +767,7 @@ struct dwc3_ep {
#define DWC3_EP_WAIT_TRANSFER_COMPLETE BIT(7)
#define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8)
#define DWC3_EP_FORCE_RESTART_STREAM BIT(9)
-#define DWC3_EP_FIRST_STREAM_PRIMED BIT(10)
+#define DWC3_EP_STREAM_PRIMED BIT(10)
#define DWC3_EP_PENDING_CLEAR_STALL BIT(11)
#define DWC3_EP_TXFIFO_RESIZED BIT(12)
#define DWC3_EP_DELAY_STOP BIT(13)
@@ -956,7 +960,6 @@ struct dwc3_request {
struct usb_request request;
struct list_head list;
struct dwc3_ep *dep;
- struct scatterlist *sg;
struct scatterlist *start_sg;
unsigned int num_pending_sgs;
@@ -1555,7 +1558,7 @@ struct dwc3_gadget_ep_cmd_params {
#define DWC3_HAS_OTG BIT(3)
/* prototypes */
-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode);
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy);
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index d76ae676783c..7977860932b1 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -173,7 +173,7 @@ void dwc3_otg_init(struct dwc3 *dwc)
* block "Initialize GCTL for OTG operation".
*/
/* GCTL.PrtCapDir=2'b11 */
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
/* GUSB2PHYCFG0.SusPHY=0 */
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
@@ -556,7 +556,7 @@ int dwc3_drd_init(struct dwc3 *dwc)
dwc3_drd_update(dwc);
} else {
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
/* use OTG block to get ID event */
irq = dwc3_otg_get_irq(dwc);
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 5e3d1741701f..c158364bc03e 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -108,6 +108,9 @@
#define DWC3_AM62_AUTOSUSPEND_DELAY 100
+#define USBSS_DEBUG_CFG_OFF 0x0
+#define USBSS_DEBUG_CFG_DISABLED 0x7
+
struct dwc3_am62 {
struct device *dev;
void __iomem *usbss;
@@ -117,6 +120,7 @@ struct dwc3_am62 {
unsigned int offset;
unsigned int vbus_divider;
u32 wakeup_stat;
+ void __iomem *phy_regs;
};
static const int dwc3_ti_rate_table[] = { /* in KHZ */
@@ -166,6 +170,7 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
if (ret)
return ret;
+ of_node_put(args.np);
am62->offset = args.args[0];
/* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
@@ -184,15 +189,47 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
return 0;
}
+static int dwc3_ti_init(struct dwc3_am62 *am62)
+{
+ int ret;
+ u32 reg;
+
+ /* Read the syscon property and set the rate code */
+ ret = phy_syscon_pll_refclk(am62);
+ if (ret)
+ return ret;
+
+ /* Workaround Errata i2409 */
+ if (am62->phy_regs) {
+ reg = readl(am62->phy_regs + USB_PHY_PLL_REG12);
+ reg |= USB_PHY_PLL_LDO_REF_EN | USB_PHY_PLL_LDO_REF_EN_EN;
+ writel(reg, am62->phy_regs + USB_PHY_PLL_REG12);
+ }
+
+ /* VBUS divider select */
+ reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
+ if (am62->vbus_divider)
+ reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
+
+ dwc3_ti_writel(am62, USBSS_PHY_CONFIG, reg);
+
+ clk_prepare_enable(am62->usb2_refclk);
+
+ /* Set mode valid bit to indicate role is valid */
+ reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
+ reg |= USBSS_MODE_VALID;
+ dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
+
+ return 0;
+}
+
static int dwc3_ti_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_am62 *am62;
unsigned long rate;
- void __iomem *phy;
int i, ret;
- u32 reg;
am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL);
if (!am62)
@@ -228,29 +265,17 @@ static int dwc3_ti_probe(struct platform_device *pdev)
am62->rate_code = i;
- /* Read the syscon property and set the rate code */
- ret = phy_syscon_pll_refclk(am62);
- if (ret)
- return ret;
-
- /* Workaround Errata i2409 */
- phy = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(phy)) {
+ am62->phy_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(am62->phy_regs)) {
dev_err(dev, "can't map PHY IOMEM resource. Won't apply i2409 fix.\n");
- phy = NULL;
- } else {
- reg = readl(phy + USB_PHY_PLL_REG12);
- reg |= USB_PHY_PLL_LDO_REF_EN | USB_PHY_PLL_LDO_REF_EN_EN;
- writel(reg, phy + USB_PHY_PLL_REG12);
+ am62->phy_regs = NULL;
}
- /* VBUS divider select */
am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
- reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
- if (am62->vbus_divider)
- reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
- dwc3_ti_writel(am62, USBSS_PHY_CONFIG, reg);
+ ret = dwc3_ti_init(am62);
+ if (ret)
+ return ret;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -258,7 +283,6 @@ static int dwc3_ti_probe(struct platform_device *pdev)
* Don't ignore its dependencies with its children
*/
pm_suspend_ignore_children(dev, false);
- clk_prepare_enable(am62->usb2_refclk);
pm_runtime_get_noresume(dev);
ret = of_platform_populate(node, NULL, NULL, dev);
@@ -267,11 +291,6 @@ static int dwc3_ti_probe(struct platform_device *pdev)
goto err_pm_disable;
}
- /* Set mode valid bit to indicate role is valid */
- reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
- reg |= USBSS_MODE_VALID;
- dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
-
/* Device has capability to wakeup system from sleep */
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
@@ -309,6 +328,7 @@ static void dwc3_ti_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_set_suspended(dev);
}
@@ -338,6 +358,9 @@ static int dwc3_ti_suspend_common(struct device *dev)
dwc3_ti_writel(am62, USBSS_WAKEUP_STAT, USBSS_WAKEUP_STAT_CLR);
}
+ /* just to track if module resets on suspend */
+ dwc3_ti_writel(am62, USBSS_DEBUG_CFG, USBSS_DEBUG_CFG_DISABLED);
+
clk_disable_unprepare(am62->usb2_refclk);
return 0;
@@ -348,7 +371,14 @@ static int dwc3_ti_resume_common(struct device *dev)
struct dwc3_am62 *am62 = dev_get_drvdata(dev);
u32 reg;
- clk_prepare_enable(am62->usb2_refclk);
+ reg = dwc3_ti_readl(am62, USBSS_DEBUG_CFG);
+ if (reg != USBSS_DEBUG_CFG_DISABLED) {
+ /* lost power/context */
+ dwc3_ti_init(am62);
+ } else {
+ dwc3_ti_writel(am62, USBSS_DEBUG_CFG, USBSS_DEBUG_CFG_OFF);
+ clk_prepare_enable(am62->usb2_refclk);
+ }
if (device_may_wakeup(dev)) {
/* Clear wakeup config enable bits */
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index b261c46124c6..fe74d11bb629 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -457,7 +457,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
struct dwc3_omap *omap;
struct device *dev = &pdev->dev;
- struct regulator *vbus_reg = NULL;
+ struct regulator *vbus_reg;
int ret;
int irq;
@@ -483,12 +483,11 @@ static int dwc3_omap_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- if (of_property_read_bool(node, "vbus-supply")) {
- vbus_reg = devm_regulator_get(dev, "vbus");
- if (IS_ERR(vbus_reg)) {
- dev_err(dev, "vbus init failed\n");
- return PTR_ERR(vbus_reg);
- }
+ vbus_reg = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(vbus_reg)) {
+ if (PTR_ERR(vbus_reg) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(vbus_reg), "vbus init failed\n");
+ vbus_reg = NULL;
}
omap->dev = dev;
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index e16c3237180e..ef7c43008946 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -309,7 +309,6 @@ static void st_dwc3_remove(struct platform_device *pdev)
reset_control_assert(dwc3_data->rstc_rst);
}
-#ifdef CONFIG_PM_SLEEP
static int st_dwc3_suspend(struct device *dev)
{
struct st_dwc3 *dwc3_data = dev_get_drvdata(dev);
@@ -343,9 +342,8 @@ static int st_dwc3_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(st_dwc3_dev_pm_ops, st_dwc3_suspend, st_dwc3_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_dwc3_dev_pm_ops, st_dwc3_suspend, st_dwc3_resume);
static const struct of_device_id st_dwc3_match[] = {
{ .compatible = "st,stih407-dwc3" },
@@ -360,7 +358,7 @@ static struct platform_driver st_dwc3_driver = {
.driver = {
.name = "usb-st-dwc3",
.of_match_table = st_dwc3_match,
- .pm = &st_dwc3_dev_pm_ops,
+ .pm = pm_sleep_ptr(&st_dwc3_dev_pm_ops),
},
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 83dc7304d701..89a4dc8ebf94 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -996,8 +996,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
/*
* All stream eps will reinitiate stream on NoStream
- * rejection until we can determine that the host can
- * prime after the first transfer.
+ * rejection.
*
* However, if the controller is capable of
* TXF_FLUSH_BYPASS, then IN direction endpoints will
@@ -2630,10 +2629,38 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
{
u32 reg;
u32 timeout = 2000;
+ u32 saved_config = 0;
if (pm_runtime_suspended(dwc->dev))
return 0;
+ /*
+ * When operating in USB 2.0 speeds (HS/FS), ensure that
+ * GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY are cleared before starting
+ * or stopping the controller. This resolves timeout issues that occur
+ * during frequent role switches between host and device modes.
+ *
+ * Save and clear these settings, then restore them after completing the
+ * controller start or stop sequence.
+ *
+ * This solution was discovered through experimentation as it is not
+ * mentioned in the dwc3 programming guide. It has been tested on an
+ * Exynos platforms.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
+ saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ }
+
+ if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
+ saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+ }
+
+ if (saved_config)
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
@@ -2661,6 +2688,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
reg &= DWC3_DSTS_DEVCTRLHLT;
} while (--timeout && !(!is_on ^ !reg));
+ if (saved_config) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= saved_config;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+
if (!timeout)
return -ETIMEDOUT;
@@ -2740,6 +2773,8 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
__dwc3_gadget_stop(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
+ usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
+
return ret;
}
@@ -3298,6 +3333,50 @@ static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
return dwc3_alloc_trb_pool(dep);
}
+#define nostream_work_to_dep(w) (container_of(to_delayed_work(w), struct dwc3_ep, nostream_work))
+static void dwc3_nostream_work(struct work_struct *work)
+{
+ struct dwc3_ep *dep = nostream_work_to_dep(work);
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (dep->flags & DWC3_EP_STREAM_PRIMED)
+ goto out;
+
+ if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
+ (!DWC3_MST_CAPABLE(&dwc->hwparams) &&
+ !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)))
+ goto out;
+ /*
+ * If the host rejects a stream due to no active stream, by the
+ * USB and xHCI spec, the endpoint will be put back to idle
+ * state. When the host is ready (buffer added/updated), it will
+ * prime the endpoint to inform the usb device controller. This
+ * triggers the device controller to issue ERDY to restart the
+ * stream. However, some hosts don't follow this and keep the
+ * endpoint in the idle state. No prime will come despite host
+ * streams are updated, and the device controller will not be
+ * triggered to generate ERDY to move the next stream data. To
+ * workaround this and maintain compatibility with various
+ * hosts, force to reinitiate the stream until the host is ready
+ * instead of waiting for the host to prime the endpoint.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
+ unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
+
+ dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
+ } else {
+ dep->flags |= DWC3_EP_DELAY_START;
+ dwc3_stop_active_transfer(dep, true, true);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+out:
+ dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
{
struct dwc3_ep *dep;
@@ -3343,6 +3422,7 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
INIT_LIST_HEAD(&dep->pending_list);
INIT_LIST_HEAD(&dep->started_list);
INIT_LIST_HEAD(&dep->cancelled_list);
+ INIT_DELAYED_WORK(&dep->nostream_work, dwc3_nostream_work);
dwc3_debugfs_create_endpoint_dir(dep);
@@ -3742,66 +3822,27 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
- struct dwc3 *dwc = dep->dwc;
-
if (event->status == DEPEVT_STREAMEVT_FOUND) {
- dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
- goto out;
+ cancel_delayed_work(&dep->nostream_work);
+ dep->flags |= DWC3_EP_STREAM_PRIMED;
+ dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
+ return;
}
/* Note: NoStream rejection event param value is 0 and not 0xFFFF */
switch (event->parameters) {
case DEPEVT_STREAM_PRIME:
- /*
- * If the host can properly transition the endpoint state from
- * idle to prime after a NoStream rejection, there's no need to
- * force restarting the endpoint to reinitiate the stream. To
- * simplify the check, assume the host follows the USB spec if
- * it primed the endpoint more than once.
- */
- if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
- if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
- dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
- else
- dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
- }
-
+ cancel_delayed_work(&dep->nostream_work);
+ dep->flags |= DWC3_EP_STREAM_PRIMED;
+ dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
break;
case DEPEVT_STREAM_NOSTREAM:
- if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
- !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
- (!DWC3_MST_CAPABLE(&dwc->hwparams) &&
- !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)))
- break;
-
- /*
- * If the host rejects a stream due to no active stream, by the
- * USB and xHCI spec, the endpoint will be put back to idle
- * state. When the host is ready (buffer added/updated), it will
- * prime the endpoint to inform the usb device controller. This
- * triggers the device controller to issue ERDY to restart the
- * stream. However, some hosts don't follow this and keep the
- * endpoint in the idle state. No prime will come despite host
- * streams are updated, and the device controller will not be
- * triggered to generate ERDY to move the next stream data. To
- * workaround this and maintain compatibility with various
- * hosts, force to reinitiate the stream until the host is ready
- * instead of waiting for the host to prime the endpoint.
- */
- if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
- unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
-
- dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
- } else {
- dep->flags |= DWC3_EP_DELAY_START;
- dwc3_stop_active_transfer(dep, true, true);
- return;
- }
+ dep->flags &= ~DWC3_EP_STREAM_PRIMED;
+ if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM)
+ queue_delayed_work(system_wq, &dep->nostream_work,
+ msecs_to_jiffies(100));
break;
}
-
-out:
- dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
}
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
@@ -4195,8 +4236,10 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
"LPM Erratum not available on dwc3 revisions < 2.40a\n");
- if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
+ if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A)) {
+ reg &= ~DWC3_DCTL_NYET_THRES_MASK;
reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
+ }
dwc3_gadget_dctl_write_safe(dwc, reg);
} else {
@@ -4458,14 +4501,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
DWC3_GEVNTSIZ_SIZE(evt->length));
+ evt->flags &= ~DWC3_EVENT_PENDING;
+ /*
+ * Add an explicit write memory barrier to make sure that the update of
+ * clearing DWC3_EVENT_PENDING is observed in dwc3_check_event_buf()
+ */
+ wmb();
+
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
- /* Keep the clearing of DWC3_EVENT_PENDING at the end */
- evt->flags &= ~DWC3_EVENT_PENDING;
-
return ret;
}
diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
index 49f25a70b32e..7fb4d4715e9f 100644
--- a/drivers/usb/fotg210/fotg210-core.c
+++ b/drivers/usb/fotg210/fotg210-core.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
@@ -119,8 +120,8 @@ void fotg210_vbus(struct fotg210 *fotg, bool enable)
ret = regmap_update_bits(fotg->map, GEMINI_GLOBAL_MISC_CTRL, mask, val);
if (ret)
dev_err(fotg->dev, "failed to %s VBUS\n",
- enable ? "enable" : "disable");
- dev_info(fotg->dev, "%s: %s VBUS\n", __func__, enable ? "enable" : "disable");
+ str_enable_disable(enable));
+ dev_info(fotg->dev, "%s: %s VBUS\n", __func__, str_enable_disable(enable));
}
static int fotg210_probe(struct platform_device *pdev)
diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
index 3d404d19a205..64c4965a160f 100644
--- a/drivers/usb/fotg210/fotg210-hcd.c
+++ b/drivers/usb/fotg210/fotg210-hcd.c
@@ -4901,8 +4901,7 @@ static int hcd_fotg210_init(struct usb_hcd *hcd)
*/
fotg210->need_io_watchdog = 1;
- hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fotg210->hrtimer.function = fotg210_hrtimer_func;
+ hrtimer_setup(&fotg210->hrtimer, fotg210_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 566ff0b1282a..76521555e3c1 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -211,6 +211,8 @@ config USB_F_MIDI
config USB_F_MIDI2
tristate
+ select SND_UMP
+ select SND_UMP_LEGACY_RAWMIDI
config USB_F_HID
tristate
@@ -445,8 +447,6 @@ config USB_CONFIGFS_F_MIDI2
depends on USB_CONFIGFS
depends on SND
select USB_LIBCOMPOSITE
- select SND_UMP
- select SND_UMP_LEGACY_RAWMIDI
select USB_F_MIDI2
help
The MIDI 2.0 function driver provides the generic emulated
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index bdda8c74602d..869ad99afb48 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1050,10 +1050,11 @@ static int set_config(struct usb_composite_dev *cdev,
else
usb_gadget_set_remote_wakeup(gadget, 0);
done:
- if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
- usb_gadget_set_selfpowered(gadget);
- else
+ if (power > USB_SELF_POWER_VBUS_MAX_DRAW ||
+ (c && !(c->bmAttributes & USB_CONFIG_ATT_SELFPOWER)))
usb_gadget_clear_selfpowered(gadget);
+ else
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
@@ -2615,7 +2616,10 @@ void composite_suspend(struct usb_gadget *gadget)
cdev->suspended = 1;
- usb_gadget_set_selfpowered(gadget);
+ if (cdev->config &&
+ cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER)
+ usb_gadget_set_selfpowered(gadget);
+
usb_gadget_vbus_draw(gadget, 2);
}
@@ -2649,8 +2653,11 @@ void composite_resume(struct usb_gadget *gadget)
else
maxpower = min(maxpower, 900U);
- if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
+ if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW ||
+ !(cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
usb_gadget_clear_selfpowered(gadget);
+ else
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, maxpower);
} else {
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 6499a88d346c..fba2a56dae97 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -827,11 +827,15 @@ static ssize_t gadget_string_s_store(struct config_item *item, const char *page,
{
struct gadget_string *string = to_gadget_string(item);
int size = min(sizeof(string->string), len + 1);
+ ssize_t cpy_len;
if (len > USB_MAX_STRING_LEN)
return -EINVAL;
- return strscpy(string->string, page, size);
+ cpy_len = strscpy(string->string, page, size);
+ if (cpy_len > 0 && string->string[cpy_len - 1] == '\n')
+ string->string[cpy_len - 1] = 0;
+ return len;
}
CONFIGFS_ATTR(gadget_string_, s);
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 6cb7771e8a69..80841de845b0 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
+#include <linux/string_choices.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
@@ -387,8 +388,7 @@ static void ecm_do_notify(struct f_ecm *ecm)
event->wLength = 0;
req->length = sizeof *event;
- DBG(cdev, "notify connect %s\n",
- ecm->is_open ? "true" : "false");
+ DBG(cdev, "notify connect %s\n", str_true_false(ecm->is_open));
ecm->notify_state = ECM_NOTIFY_SPEED;
break;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index ad79eb0f729b..2dea9e42a0f8 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2285,7 +2285,7 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
struct usb_gadget_strings **lang;
int first_id;
- if (WARN_ON(ffs->state != FFS_ACTIVE
+ if ((ffs->state != FFS_ACTIVE
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
return -EBADFD;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 837fcdfa3840..da82598fcef8 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -283,7 +283,7 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req)
/* Our transmit completed. See if there's more to go.
* f_midi_transmit eats req, don't queue it again. */
req->length = 0;
- f_midi_transmit(midi);
+ queue_work(system_highpri_wq, &midi->work);
return;
}
break;
@@ -907,6 +907,15 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
status = -ENODEV;
+ /*
+ * Reset wMaxPacketSize with maximum packet size of FS bulk transfer before
+ * endpoint claim. This ensures that the wMaxPacketSize does not exceed the
+ * limit during bind retries where configured dwc3 TX/RX FIFO's maxpacket
+ * size of 512 bytes for IN/OUT endpoints in support HS speed only.
+ */
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(64);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(64);
+
/* allocate instance-specific endpoints */
midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
if (!midi->in_ep)
@@ -1000,11 +1009,11 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
}
/* configure the endpoint descriptors ... */
- ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
- ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
+ ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
+ ms_out_desc.bNumEmbMIDIJack = midi->out_ports;
- ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
- ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
+ ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
+ ms_in_desc.bNumEmbMIDIJack = midi->in_ports;
/* ... and add them to the list */
endpoint_descriptor_index = i;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 8e761249d672..58b0dd575af3 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
+#include <linux/string_choices.h>
#include <linux/usb/cdc.h>
@@ -558,7 +559,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
req->length = sizeof *event;
DBG(cdev, "notify connect %s\n",
- ncm->is_open ? "true" : "false");
+ str_true_false(ncm->is_open));
ncm->notify_state = NCM_NOTIFY_NONE;
break;
@@ -1558,8 +1559,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- ncm->task_timer.function = ncm_tx_timeout;
+ hrtimer_setup(&ncm->task_timer, ncm_tx_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 15bb3aa12aa8..5a2e1237f85c 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/usb/ch9.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
@@ -50,7 +51,7 @@ static int bot_enqueue_cmd_cbw(struct f_uas *fu)
if (fu->flags & USBG_BOT_CMD_PEND)
return 0;
- ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
+ ret = usb_ep_queue(fu->ep_out, fu->cmd[0].req, GFP_ATOMIC);
if (!ret)
fu->flags |= USBG_BOT_CMD_PEND;
return ret;
@@ -62,10 +63,11 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
struct f_uas *fu = cmd->fu;
transport_generic_free_cmd(&cmd->se_cmd, 0);
- if (req->status < 0) {
- pr_err("ERR %s(%d)\n", __func__, __LINE__);
+ if (req->status == -ESHUTDOWN)
return;
- }
+
+ if (req->status < 0)
+ pr_err("ERR %s(%d)\n", __func__, __LINE__);
/* CSW completed, wait for next CBW */
bot_enqueue_cmd_cbw(fu);
@@ -136,7 +138,7 @@ static void bot_send_bad_status(struct usbg_cmd *cmd)
}
req->complete = bot_err_compl;
req->context = cmd;
- req->buf = fu->cmd.buf;
+ req->buf = fu->cmd[0].buf;
usb_ep_queue(ep, req, GFP_KERNEL);
} else {
bot_enqueue_sense_code(fu, cmd);
@@ -196,6 +198,11 @@ static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
if (req->status < 0)
pr_err("ERR %s(%d)\n", __func__, __LINE__);
+ if (req->status == -ESHUTDOWN) {
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ return;
+ }
+
bot_send_status(cmd, true);
}
@@ -244,11 +251,8 @@ static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
static int bot_send_write_request(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
int ret;
- init_completion(&cmd->write_complete);
cmd->fu = fu;
if (!cmd->data_len) {
@@ -256,22 +260,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
return -EINVAL;
}
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- fu->bot_req_out->buf = cmd->data_buf;
- } else {
- fu->bot_req_out->buf = NULL;
- fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
- fu->bot_req_out->sg = se_cmd->t_data_sg;
- }
-
- fu->bot_req_out->complete = usbg_data_write_cmpl;
- fu->bot_req_out->length = se_cmd->data_length;
- fu->bot_req_out->context = cmd;
-
ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
if (ret)
goto cleanup;
@@ -279,8 +267,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
if (ret)
pr_err("%s(%d)\n", __func__, __LINE__);
- wait_for_completion(&cmd->write_complete);
- target_execute_cmd(se_cmd);
cleanup:
return ret;
}
@@ -292,14 +278,31 @@ static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
struct f_uas *fu = req->context;
int ret;
+ if (req->status == -ESHUTDOWN)
+ return;
+
fu->flags &= ~USBG_BOT_CMD_PEND;
- if (req->status < 0)
+ if (req->status < 0) {
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+
+ dev_err(&gadget->dev, "BOT command req err (%d)\n", req->status);
+ bot_enqueue_cmd_cbw(fu);
return;
+ }
ret = bot_submit_command(fu, req->buf, req->actual);
- if (ret)
+ if (ret) {
pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
+ if (!(fu->flags & USBG_BOT_WEDGED))
+ usb_ep_set_wedge(fu->ep_in);
+
+ fu->flags |= USBG_BOT_WEDGED;
+ bot_enqueue_cmd_cbw(fu);
+ } else if (fu->flags & USBG_BOT_WEDGED) {
+ fu->flags &= ~USBG_BOT_WEDGED;
+ usb_ep_clear_halt(fu->ep_in);
+ }
}
static int bot_prepare_reqs(struct f_uas *fu)
@@ -314,8 +317,8 @@ static int bot_prepare_reqs(struct f_uas *fu)
if (!fu->bot_req_out)
goto err_out;
- fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
- if (!fu->cmd.req)
+ fu->cmd[0].req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+ if (!fu->cmd[0].req)
goto err_cmd;
fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
@@ -327,27 +330,27 @@ static int bot_prepare_reqs(struct f_uas *fu)
fu->bot_status.req->complete = bot_status_complete;
fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
- fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
+ fu->cmd[0].buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
+ if (!fu->cmd[0].buf)
goto err_buf;
- fu->cmd.req->complete = bot_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_out->maxpacket;
- fu->cmd.req->context = fu;
+ fu->cmd[0].req->complete = bot_cmd_complete;
+ fu->cmd[0].req->buf = fu->cmd[0].buf;
+ fu->cmd[0].req->length = fu->ep_out->maxpacket;
+ fu->cmd[0].req->context = fu;
ret = bot_enqueue_cmd_cbw(fu);
if (ret)
goto err_queue;
return 0;
err_queue:
- kfree(fu->cmd.buf);
- fu->cmd.buf = NULL;
+ kfree(fu->cmd[0].buf);
+ fu->cmd[0].buf = NULL;
err_buf:
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
err_sts:
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
- fu->cmd.req = NULL;
+ usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
+ fu->cmd[0].req = NULL;
err_cmd:
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
fu->bot_req_out = NULL;
@@ -372,16 +375,16 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
+ usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
- kfree(fu->cmd.buf);
+ kfree(fu->cmd[0].buf);
fu->bot_req_in = NULL;
fu->bot_req_out = NULL;
- fu->cmd.req = NULL;
+ fu->cmd[0].req = NULL;
fu->bot_status.req = NULL;
- fu->cmd.buf = NULL;
+ fu->cmd[0].buf = NULL;
}
static void bot_set_alt(struct f_uas *fu)
@@ -441,14 +444,10 @@ static int usbg_bot_setup(struct usb_function *f,
pr_err("No LUNs configured?\n");
return -EINVAL;
}
- /*
- * If 4 LUNs are present we return 3 i.e. LUN 0..3 can be
- * accessed. The upper limit is 0xf
- */
luns--;
- if (luns > 0xf) {
+ if (luns > US_BULK_MAX_LUN_LIMIT) {
pr_info_once("Limiting the number of luns to 16\n");
- luns = 0xf;
+ luns = US_BULK_MAX_LUN_LIMIT;
}
ret_lun = cdev->req->buf;
*ret_lun = luns;
@@ -457,6 +456,11 @@ static int usbg_bot_setup(struct usb_function *f,
case US_BULK_RESET_REQUEST:
/* XXX maybe we should remove previous requests for IN + OUT */
+ if (fu->flags & USBG_BOT_WEDGED) {
+ fu->flags &= ~USBG_BOT_WEDGED;
+ usb_ep_clear_halt(fu->ep_in);
+ }
+
bot_enqueue_cmd_cbw(fu);
return 0;
}
@@ -465,6 +469,45 @@ static int usbg_bot_setup(struct usb_function *f,
/* Start uas.c code */
+static int tcm_to_uasp_response(enum tcm_tmrsp_table code)
+{
+ switch (code) {
+ case TMR_FUNCTION_FAILED:
+ return RC_TMF_FAILED;
+ case TMR_FUNCTION_COMPLETE:
+ case TMR_TASK_DOES_NOT_EXIST:
+ return RC_TMF_COMPLETE;
+ case TMR_LUN_DOES_NOT_EXIST:
+ return RC_INCORRECT_LUN;
+ case TMR_FUNCTION_REJECTED:
+ case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+ default:
+ return RC_TMF_NOT_SUPPORTED;
+ }
+}
+
+static unsigned char uasp_to_tcm_func(int code)
+{
+ switch (code) {
+ case TMF_ABORT_TASK:
+ return TMR_ABORT_TASK;
+ case TMF_ABORT_TASK_SET:
+ return TMR_ABORT_TASK_SET;
+ case TMF_CLEAR_TASK_SET:
+ return TMR_CLEAR_TASK_SET;
+ case TMF_LOGICAL_UNIT_RESET:
+ return TMR_LUN_RESET;
+ case TMF_CLEAR_ACA:
+ return TMR_CLEAR_ACA;
+ case TMF_I_T_NEXUS_RESET:
+ case TMF_QUERY_TASK:
+ case TMF_QUERY_TASK_SET:
+ case TMF_QUERY_ASYNC_EVENT:
+ default:
+ return TMR_UNKNOWN;
+ }
+}
+
static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
{
/* We have either all three allocated or none */
@@ -482,10 +525,14 @@ static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
static void uasp_free_cmdreq(struct f_uas *fu)
{
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
- kfree(fu->cmd.buf);
- fu->cmd.req = NULL;
- fu->cmd.buf = NULL;
+ int i;
+
+ for (i = 0; i < USBG_NUM_CMDS; i++) {
+ usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
+ kfree(fu->cmd[i].buf);
+ fu->cmd[i].req = NULL;
+ fu->cmd[i].buf = NULL;
+ }
}
static void uasp_cleanup_old_alt(struct f_uas *fu)
@@ -500,7 +547,7 @@ static void uasp_cleanup_old_alt(struct f_uas *fu)
usb_ep_disable(fu->ep_status);
usb_ep_disable(fu->ep_cmd);
- for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
+ for (i = 0; i < USBG_NUM_CMDS; i++)
uasp_cleanup_one_stream(fu, &fu->stream[i]);
uasp_free_cmdreq(fu);
}
@@ -512,7 +559,7 @@ static int uasp_prepare_r_request(struct usbg_cmd *cmd)
struct se_cmd *se_cmd = &cmd->se_cmd;
struct f_uas *fu = cmd->fu;
struct usb_gadget *gadget = fuas_to_gadget(fu);
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
if (!gadget->sg_supported) {
cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
@@ -532,6 +579,7 @@ static int uasp_prepare_r_request(struct usbg_cmd *cmd)
}
stream->req_in->is_last = 1;
+ stream->req_in->stream_id = cmd->tag;
stream->req_in->complete = uasp_status_data_cmpl;
stream->req_in->length = se_cmd->data_length;
stream->req_in->context = cmd;
@@ -544,7 +592,7 @@ static void uasp_prepare_status(struct usbg_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct sense_iu *iu = &cmd->sense_iu;
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
cmd->state = UASP_QUEUE_COMMAND;
iu->iu_id = IU_ID_STATUS;
@@ -556,20 +604,76 @@ static void uasp_prepare_status(struct usbg_cmd *cmd)
iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
iu->status = se_cmd->scsi_status;
stream->req_status->is_last = 1;
+ stream->req_status->stream_id = cmd->tag;
stream->req_status->context = cmd;
stream->req_status->length = se_cmd->scsi_sense_length + 16;
stream->req_status->buf = iu;
stream->req_status->complete = uasp_status_data_cmpl;
}
+static void uasp_prepare_response(struct usbg_cmd *cmd)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct response_iu *rsp_iu = &cmd->response_iu;
+ struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
+
+ cmd->state = UASP_QUEUE_COMMAND;
+ rsp_iu->iu_id = IU_ID_RESPONSE;
+ rsp_iu->tag = cpu_to_be16(cmd->tag);
+
+ if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
+ rsp_iu->response_code = cmd->tmr_rsp;
+ else
+ rsp_iu->response_code =
+ tcm_to_uasp_response(se_cmd->se_tmr_req->response);
+
+ /*
+ * The UASP driver must support all the task management functions listed
+ * in Table 20 of UAS-r04. To remain compliant while indicate that the
+ * TMR did not go through, report RC_TMF_FAILED instead of
+ * RC_TMF_NOT_SUPPORTED and print a warning to the user.
+ */
+ switch (cmd->tmr_func) {
+ case TMF_ABORT_TASK:
+ case TMF_ABORT_TASK_SET:
+ case TMF_CLEAR_TASK_SET:
+ case TMF_LOGICAL_UNIT_RESET:
+ case TMF_CLEAR_ACA:
+ case TMF_I_T_NEXUS_RESET:
+ case TMF_QUERY_TASK:
+ case TMF_QUERY_TASK_SET:
+ case TMF_QUERY_ASYNC_EVENT:
+ if (rsp_iu->response_code == RC_TMF_NOT_SUPPORTED) {
+ struct usb_gadget *gadget = fuas_to_gadget(cmd->fu);
+
+ dev_warn(&gadget->dev, "TMF function %d not supported\n",
+ cmd->tmr_func);
+ rsp_iu->response_code = RC_TMF_FAILED;
+ }
+ break;
+ default:
+ break;
+ }
+
+ stream->req_status->is_last = 1;
+ stream->req_status->stream_id = cmd->tag;
+ stream->req_status->context = cmd;
+ stream->req_status->length = sizeof(struct response_iu);
+ stream->req_status->buf = rsp_iu;
+ stream->req_status->complete = uasp_status_data_cmpl;
+}
+
+static void usbg_release_cmd(struct se_cmd *se_cmd);
+static int uasp_send_tm_response(struct usbg_cmd *cmd);
+
static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
- struct uas_stream *stream = cmd->stream;
struct f_uas *fu = cmd->fu;
+ struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
int ret;
- if (req->status < 0)
+ if (req->status == -ESHUTDOWN)
goto cleanup;
switch (cmd->state) {
@@ -600,8 +704,37 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
break;
case UASP_QUEUE_COMMAND:
- transport_generic_free_cmd(&cmd->se_cmd, 0);
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ /*
+ * Overlapped command detected and cancelled.
+ * So send overlapped attempted status.
+ */
+ if (cmd->tmr_rsp == RC_OVERLAPPED_TAG &&
+ req->status == -ECONNRESET) {
+ uasp_send_tm_response(cmd);
+ return;
+ }
+
+ hash_del(&stream->node);
+
+ /*
+ * If no command submitted to target core here, just free the
+ * bitmap index. This is for the cases where f_tcm handles
+ * status response instead of the target core.
+ */
+ if (cmd->tmr_rsp != RC_OVERLAPPED_TAG &&
+ cmd->tmr_rsp != RC_RESPONSE_UNKNOWN) {
+ struct se_session *se_sess;
+
+ se_sess = fu->tpg->tpg_nexus->tvn_se_sess;
+ sbitmap_queue_clear(&se_sess->sess_tag_pool,
+ cmd->se_cmd.map_tag,
+ cmd->se_cmd.map_cpu);
+ } else {
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ }
+
+ usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
+ complete(&stream->cmd_completion);
break;
default:
@@ -610,27 +743,38 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
return;
cleanup:
+ hash_del(&stream->node);
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static int uasp_send_status_response(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
struct sense_iu *iu = &cmd->sense_iu;
iu->tag = cpu_to_be16(cmd->tag);
- stream->req_status->complete = uasp_status_data_cmpl;
- stream->req_status->context = cmd;
cmd->fu = fu;
uasp_prepare_status(cmd);
return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
}
+static int uasp_send_tm_response(struct usbg_cmd *cmd)
+{
+ struct f_uas *fu = cmd->fu;
+ struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
+ struct response_iu *iu = &cmd->response_iu;
+
+ iu->tag = cpu_to_be16(cmd->tag);
+ cmd->fu = fu;
+ uasp_prepare_response(cmd);
+ return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
+}
+
static int uasp_send_read_response(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
struct sense_iu *iu = &cmd->sense_iu;
int ret;
@@ -674,11 +818,10 @@ static int uasp_send_write_request(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
struct sense_iu *iu = &cmd->sense_iu;
int ret;
- init_completion(&cmd->write_complete);
cmd->fu = fu;
iu->tag = cpu_to_be16(cmd->tag);
@@ -710,36 +853,31 @@ static int uasp_send_write_request(struct usbg_cmd *cmd)
pr_err("%s(%d)\n", __func__, __LINE__);
}
- wait_for_completion(&cmd->write_complete);
- target_execute_cmd(se_cmd);
cleanup:
return ret;
}
-static int usbg_submit_command(struct f_uas *, void *, unsigned int);
+static int usbg_submit_command(struct f_uas *, struct usb_request *);
static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_uas *fu = req->context;
- int ret;
- if (req->status < 0)
+ if (req->status == -ESHUTDOWN)
return;
- ret = usbg_submit_command(fu, req->buf, req->actual);
- /*
- * Once we tune for performance enqueue the command req here again so
- * we can receive a second command while we processing this one. Pay
- * attention to properly sync STAUS endpoint with DATA IN + OUT so you
- * don't break HS.
- */
- if (!ret)
+ if (req->status < 0) {
+ usb_ep_queue(fu->ep_cmd, req, GFP_ATOMIC);
return;
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ }
+
+ usbg_submit_command(fu, req);
}
static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
{
+ init_completion(&stream->cmd_completion);
+
stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
if (!stream->req_in)
goto out;
@@ -764,66 +902,48 @@ out:
return -ENOMEM;
}
-static int uasp_alloc_cmd(struct f_uas *fu)
+static int uasp_alloc_cmd(struct f_uas *fu, int i)
{
- fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
- if (!fu->cmd.req)
+ fu->cmd[i].req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
+ if (!fu->cmd[i].req)
goto err;
- fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
+ fu->cmd[i].buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
+ if (!fu->cmd[i].buf)
goto err_buf;
- fu->cmd.req->complete = uasp_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_cmd->maxpacket;
- fu->cmd.req->context = fu;
+ fu->cmd[i].req->complete = uasp_cmd_complete;
+ fu->cmd[i].req->buf = fu->cmd[i].buf;
+ fu->cmd[i].req->length = fu->ep_cmd->maxpacket;
+ fu->cmd[i].req->context = fu;
return 0;
err_buf:
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
+ usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
err:
return -ENOMEM;
}
-static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
-{
- int i;
-
- for (i = 0; i < max_streams; i++) {
- struct uas_stream *s = &fu->stream[i];
-
- s->req_in->stream_id = i + 1;
- s->req_out->stream_id = i + 1;
- s->req_status->stream_id = i + 1;
- }
-}
-
static int uasp_prepare_reqs(struct f_uas *fu)
{
int ret;
int i;
- int max_streams;
- if (fu->flags & USBG_USE_STREAMS)
- max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
- else
- max_streams = 1;
-
- for (i = 0; i < max_streams; i++) {
+ for (i = 0; i < USBG_NUM_CMDS; i++) {
ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
if (ret)
goto err_cleanup;
}
- ret = uasp_alloc_cmd(fu);
- if (ret)
- goto err_free_stream;
- uasp_setup_stream_res(fu, max_streams);
+ for (i = 0; i < USBG_NUM_CMDS; i++) {
+ ret = uasp_alloc_cmd(fu, i);
+ if (ret)
+ goto err_free_stream;
- ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
- if (ret)
- goto err_free_stream;
+ ret = usb_ep_queue(fu->ep_cmd, fu->cmd[i].req, GFP_ATOMIC);
+ if (ret)
+ goto err_free_stream;
+ }
return 0;
@@ -914,6 +1034,8 @@ static int get_cmd_dir(const unsigned char *cdb)
case READ_TOC:
case READ_FORMAT_CAPACITIES:
case REQUEST_SENSE:
+ case ATA_12:
+ case ATA_16:
ret = DMA_FROM_DEVICE;
break;
@@ -957,7 +1079,18 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context;
struct se_cmd *se_cmd = &cmd->se_cmd;
- if (req->status < 0) {
+ cmd->state = UASP_QUEUE_COMMAND;
+
+ if (req->status == -ESHUTDOWN) {
+ struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
+
+ hash_del(&stream->node);
+ target_put_sess_cmd(se_cmd);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ return;
+ }
+
+ if (req->status) {
pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
goto cleanup;
}
@@ -969,11 +1102,22 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
se_cmd->data_length);
}
- complete(&cmd->write_complete);
+ cmd->flags |= USBG_CMD_PENDING_DATA_WRITE;
+ queue_work(cmd->fu->tpg->workqueue, &cmd->work);
return;
cleanup:
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ target_put_sess_cmd(se_cmd);
+
+ /* Command was aborted due to overlapped tag */
+ if (cmd->state == UASP_QUEUE_COMMAND &&
+ cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
+ uasp_send_tm_response(cmd);
+ return;
+ }
+
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD, 0);
}
static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
@@ -995,9 +1139,12 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
}
req->is_last = 1;
+ req->stream_id = cmd->tag;
req->complete = usbg_data_write_cmpl;
req->length = se_cmd->data_length;
req->context = cmd;
+
+ cmd->state = UASP_SEND_STATUS;
return 0;
}
@@ -1037,36 +1184,153 @@ static int usbg_send_read_response(struct se_cmd *se_cmd)
return uasp_send_read_response(cmd);
}
-static void usbg_cmd_work(struct work_struct *work)
+static void usbg_aborted_task(struct se_cmd *se_cmd);
+
+static void usbg_submit_tmr(struct usbg_cmd *cmd)
+{
+ struct se_session *se_sess;
+ struct se_cmd *se_cmd;
+ int flags = TARGET_SCF_ACK_KREF;
+
+ se_cmd = &cmd->se_cmd;
+ se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
+
+ target_submit_tmr(se_cmd, se_sess,
+ cmd->response_iu.add_response_info,
+ cmd->unpacked_lun, NULL, uasp_to_tcm_func(cmd->tmr_func),
+ GFP_ATOMIC, cmd->tag, flags);
+}
+
+static void usbg_submit_cmd(struct usbg_cmd *cmd)
{
- struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
struct se_cmd *se_cmd;
struct tcm_usbg_nexus *tv_nexus;
struct usbg_tpg *tpg;
int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
+ /*
+ * Note: each command will spawn its own process, and each stage of the
+ * command is processed sequentially. Should this no longer be the case,
+ * locking is needed.
+ */
+ if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
+ target_execute_cmd(&cmd->se_cmd);
+ cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
+ return;
+ }
+
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
dir = get_cmd_dir(cmd->cmd_buf);
- if (dir < 0) {
- __target_init_cmd(se_cmd,
- tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
- tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
- cmd->prio_attr, cmd->sense_iu.sense,
- cmd->unpacked_lun, NULL);
+ if (dir < 0)
goto out;
- }
target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
cmd->sense_iu.sense, cmd->unpacked_lun, 0,
cmd->prio_attr, dir, flags);
+
return;
out:
+ __target_init_cmd(se_cmd,
+ tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ cmd->prio_attr, cmd->sense_iu.sense,
+ cmd->unpacked_lun, NULL);
transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ TCM_UNSUPPORTED_SCSI_OPCODE, 0);
+}
+
+static void usbg_cmd_work(struct work_struct *work)
+{
+ struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
+
+ /*
+ * Failure is detected by f_tcm here. Skip submitting the command to the
+ * target core if we already know the failing response and send the usb
+ * response to the host directly.
+ */
+ if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
+ goto skip;
+
+ if (cmd->tmr_func)
+ usbg_submit_tmr(cmd);
+ else
+ usbg_submit_cmd(cmd);
+
+ return;
+
+skip:
+ if (cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
+ struct f_uas *fu = cmd->fu;
+ struct se_session *se_sess;
+ struct uas_stream *stream = NULL;
+ struct hlist_node *tmp;
+ struct usbg_cmd *active_cmd = NULL;
+
+ se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
+
+ hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, cmd->tag) {
+ int i = stream - &fu->stream[0];
+
+ active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
+ if (active_cmd->tag == cmd->tag)
+ break;
+ }
+
+ /* Sanity check */
+ if (!stream || (active_cmd && active_cmd->tag != cmd->tag)) {
+ usbg_submit_command(cmd->fu, cmd->req);
+ return;
+ }
+
+ reinit_completion(&stream->cmd_completion);
+
+ /*
+ * A UASP command consists of the command, data, and status
+ * stages, each operating sequentially from different endpoints.
+ *
+ * Each USB endpoint operates independently, and depending on
+ * hardware implementation, a completion callback for a transfer
+ * from one endpoint may not reflect the order of completion on
+ * the wire. This is particularly true for devices with
+ * endpoints that have independent interrupts and event buffers.
+ *
+ * The driver must still detect misbehaving hosts and respond
+ * with an overlap status. To reduce false overlap failures,
+ * allow the active and matching stream ID a brief 1ms to
+ * complete before responding with an overlap command failure.
+ * Overlap failure should be rare.
+ */
+ wait_for_completion_timeout(&stream->cmd_completion, msecs_to_jiffies(1));
+
+ /* If the previous stream is completed, retry the command. */
+ if (!hash_hashed(&stream->node)) {
+ usbg_submit_command(cmd->fu, cmd->req);
+ return;
+ }
+
+ /*
+ * The command isn't submitted to the target core, so we're safe
+ * to remove the bitmap index from the session tag pool.
+ */
+ sbitmap_queue_clear(&se_sess->sess_tag_pool,
+ cmd->se_cmd.map_tag,
+ cmd->se_cmd.map_cpu);
+
+ /*
+ * Overlap command tag detected. Cancel any pending transfer of
+ * the command submitted to target core.
+ */
+ active_cmd->tmr_rsp = RC_OVERLAPPED_TAG;
+ usbg_aborted_task(&active_cmd->se_cmd);
+
+ /* Send the response after the transfer is aborted. */
+ return;
+ }
+
+ uasp_send_tm_response(cmd);
}
static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
@@ -1084,6 +1348,7 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
memset(cmd, 0, sizeof(*cmd));
cmd->se_cmd.map_tag = tag;
cmd->se_cmd.map_cpu = cpu;
+ cmd->se_cmd.cpuid = cpu;
cmd->se_cmd.tag = cmd->tag = scsi_tag;
cmd->fu = fu;
@@ -1092,50 +1357,82 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
static void usbg_release_cmd(struct se_cmd *);
-static int usbg_submit_command(struct f_uas *fu,
- void *cmdbuf, unsigned int len)
+static int usbg_submit_command(struct f_uas *fu, struct usb_request *req)
{
- struct command_iu *cmd_iu = cmdbuf;
+ struct iu *iu = req->buf;
struct usbg_cmd *cmd;
struct usbg_tpg *tpg = fu->tpg;
struct tcm_usbg_nexus *tv_nexus;
+ struct uas_stream *stream;
+ struct hlist_node *tmp;
+ struct command_iu *cmd_iu;
u32 cmd_len;
u16 scsi_tag;
- if (cmd_iu->iu_id != IU_ID_COMMAND) {
- pr_err("Unsupported type %d\n", cmd_iu->iu_id);
- return -EINVAL;
- }
-
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Missing nexus, ignoring command\n");
return -EINVAL;
}
- cmd_len = (cmd_iu->len & ~0x3) + 16;
- if (cmd_len > USBG_MAX_CMD)
- return -EINVAL;
-
- scsi_tag = be16_to_cpup(&cmd_iu->tag);
+ scsi_tag = be16_to_cpup(&iu->tag);
cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
if (IS_ERR(cmd)) {
pr_err("usbg_get_cmd failed\n");
return -ENOMEM;
}
- memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
- if (fu->flags & USBG_USE_STREAMS) {
- if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
- goto err;
- if (!cmd->tag)
- cmd->stream = &fu->stream[0];
- else
- cmd->stream = &fu->stream[cmd->tag - 1];
- } else {
- cmd->stream = &fu->stream[0];
+ cmd->req = req;
+ cmd->fu = fu;
+ cmd->tag = scsi_tag;
+ cmd->se_cmd.tag = scsi_tag;
+ cmd->tmr_func = 0;
+ cmd->tmr_rsp = RC_RESPONSE_UNKNOWN;
+ cmd->flags = 0;
+
+ cmd_iu = (struct command_iu *)iu;
+
+ /* Command and Task Management IUs share the same LUN offset */
+ cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
+
+ if (iu->iu_id != IU_ID_COMMAND && iu->iu_id != IU_ID_TASK_MGMT) {
+ cmd->tmr_rsp = RC_INVALID_INFO_UNIT;
+ goto skip;
+ }
+
+ hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, scsi_tag) {
+ struct usbg_cmd *active_cmd;
+ struct se_session *se_sess;
+ int i = stream - &fu->stream[0];
+
+ se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
+ active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
+
+ if (active_cmd->tag == scsi_tag) {
+ cmd->tmr_rsp = RC_OVERLAPPED_TAG;
+ goto skip;
+ }
}
+ stream = &fu->stream[cmd->se_cmd.map_tag];
+ hash_add(fu->stream_hash, &stream->node, scsi_tag);
+
+ if (iu->iu_id == IU_ID_TASK_MGMT) {
+ struct task_mgmt_iu *tm_iu;
+
+ tm_iu = (struct task_mgmt_iu *)iu;
+ cmd->tmr_func = tm_iu->function;
+ goto skip;
+ }
+
+ cmd_len = (cmd_iu->len & ~0x3) + 16;
+ if (cmd_len > USBG_MAX_CMD) {
+ target_free_tag(tv_nexus->tvn_se_sess, &cmd->se_cmd);
+ hash_del(&stream->node);
+ return -EINVAL;
+ }
+ memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
+
switch (cmd_iu->prio_attr & 0x7) {
case UAS_HEAD_TAG:
cmd->prio_attr = TCM_HEAD_TAG;
@@ -1155,15 +1452,11 @@ static int usbg_submit_command(struct f_uas *fu,
break;
}
- cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
-
+skip:
INIT_WORK(&cmd->work, usbg_cmd_work);
queue_work(tpg->workqueue, &cmd->work);
return 0;
-err:
- usbg_release_cmd(&cmd->se_cmd);
- return -EINVAL;
}
static void bot_cmd_work(struct work_struct *work)
@@ -1172,30 +1465,40 @@ static void bot_cmd_work(struct work_struct *work)
struct se_cmd *se_cmd;
struct tcm_usbg_nexus *tv_nexus;
struct usbg_tpg *tpg;
+ int flags = TARGET_SCF_ACK_KREF;
int dir;
+ /*
+ * Note: each command will spawn its own process, and each stage of the
+ * command is processed sequentially. Should this no longer be the case,
+ * locking is needed.
+ */
+ if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
+ target_execute_cmd(&cmd->se_cmd);
+ cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
+ return;
+ }
+
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
dir = get_cmd_dir(cmd->cmd_buf);
- if (dir < 0) {
- __target_init_cmd(se_cmd,
- tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
- tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
- cmd->prio_attr, cmd->sense_iu.sense,
- cmd->unpacked_lun, NULL);
+ if (dir < 0)
goto out;
- }
target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- cmd->data_len, cmd->prio_attr, dir, 0);
+ cmd->data_len, cmd->prio_attr, dir, flags);
return;
out:
+ __target_init_cmd(se_cmd,
+ tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ cmd->prio_attr, cmd->sense_iu.sense,
+ cmd->unpacked_lun, NULL);
transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ TCM_UNSUPPORTED_SCSI_OPCODE, 0);
}
static int bot_submit_command(struct f_uas *fu,
@@ -1239,6 +1542,7 @@ static int bot_submit_command(struct f_uas *fu,
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
+ cmd->flags = 0;
INIT_WORK(&cmd->work, bot_cmd_work);
queue_work(tpg->workqueue, &cmd->work);
@@ -1275,16 +1579,38 @@ static void usbg_release_cmd(struct se_cmd *se_cmd)
se_cmd);
struct se_session *se_sess = se_cmd->se_sess;
+ cmd->tag = 0;
kfree(cmd->data_buf);
target_free_tag(se_sess, se_cmd);
}
static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
+
+ uasp_send_tm_response(cmd);
}
static void usbg_aborted_task(struct se_cmd *se_cmd)
{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
+ int ret = 0;
+
+ if (stream->req_out->status == -EINPROGRESS)
+ ret = usb_ep_dequeue(fu->ep_out, stream->req_out);
+ else if (stream->req_in->status == -EINPROGRESS)
+ ret = usb_ep_dequeue(fu->ep_in, stream->req_in);
+ else if (stream->req_status->status == -EINPROGRESS)
+ ret = usb_ep_dequeue(fu->ep_status, stream->req_status);
+
+ if (ret)
+ dev_err(&gadget->dev, "Failed to abort cmd tag %d, (%d)\n",
+ cmd->tag, ret);
+
+ cmd->state = UASP_QUEUE_COMMAND;
}
static const char *usbg_check_wwn(const char *name)
@@ -1355,7 +1681,8 @@ static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
goto unref_dep;
mutex_init(&tpg->tpg_mutex);
atomic_set(&tpg->tpg_port_count, 0);
- tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
+ tpg->workqueue = alloc_workqueue("tcm_usb_gadget",
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
if (!tpg->workqueue)
goto free_tpg;
@@ -1746,7 +2073,7 @@ static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
.bLength = sizeof(uasp_bi_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bMaxBurst = 0,
+ .bMaxBurst = 15,
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
.wBytesPerInterval = 0,
};
@@ -1754,7 +2081,7 @@ static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
.bLength = sizeof(bot_bi_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bMaxBurst = 0,
+ .bMaxBurst = 15,
};
static struct usb_endpoint_descriptor uasp_bo_desc = {
@@ -1789,12 +2116,14 @@ static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
.bLength = sizeof(uasp_bo_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 15,
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
};
static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
.bLength = sizeof(bot_bo_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 15,
};
static struct usb_endpoint_descriptor uasp_status_desc = {
@@ -1971,43 +2300,39 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
bot_intf_desc.bInterfaceNumber = iface;
uasp_intf_desc.bInterfaceNumber = iface;
fu->iface = iface;
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
- &uasp_bi_ep_comp_desc);
+ ep = usb_ep_autoconfig(gadget, &uasp_fs_bi_desc);
if (!ep)
goto ep_fail;
fu->ep_in = ep;
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
- &uasp_bo_ep_comp_desc);
+ ep = usb_ep_autoconfig(gadget, &uasp_fs_bo_desc);
if (!ep)
goto ep_fail;
fu->ep_out = ep;
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
- &uasp_status_in_ep_comp_desc);
+ ep = usb_ep_autoconfig(gadget, &uasp_fs_status_desc);
if (!ep)
goto ep_fail;
fu->ep_status = ep;
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
- &uasp_cmd_comp_desc);
+ ep = usb_ep_autoconfig(gadget, &uasp_fs_cmd_desc);
if (!ep)
goto ep_fail;
fu->ep_cmd = ep;
/* Assume endpoint addresses are the same for both speeds */
- uasp_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
- uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+ uasp_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
+ uasp_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
uasp_status_desc.bEndpointAddress =
- uasp_ss_status_desc.bEndpointAddress;
- uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+ uasp_fs_status_desc.bEndpointAddress;
+ uasp_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
- uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
- uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
- uasp_fs_status_desc.bEndpointAddress =
- uasp_ss_status_desc.bEndpointAddress;
- uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+ uasp_ss_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
+ uasp_ss_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
+ uasp_ss_status_desc.bEndpointAddress =
+ uasp_fs_status_desc.bEndpointAddress;
+ uasp_ss_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, uasp_fs_function_desc,
uasp_hs_function_desc, uasp_ss_function_desc,
@@ -2051,9 +2376,14 @@ static void tcm_delayed_set_alt(struct work_struct *wq)
static int tcm_get_alt(struct usb_function *f, unsigned intf)
{
- if (intf == bot_intf_desc.bInterfaceNumber)
+ struct f_uas *fu = to_f_uas(f);
+
+ if (fu->iface != intf)
+ return -EOPNOTSUPP;
+
+ if (fu->flags & USBG_IS_BOT)
return USB_G_ALT_INT_BBB;
- if (intf == uasp_intf_desc.bInterfaceNumber)
+ else if (fu->flags & USBG_IS_UAS)
return USB_G_ALT_INT_UAS;
return -EOPNOTSUPP;
@@ -2063,6 +2393,9 @@ static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_uas *fu = to_f_uas(f);
+ if (fu->iface != intf)
+ return -EOPNOTSUPP;
+
if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
struct guas_setup_wq *work;
@@ -2271,6 +2604,8 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
fu->function.disable = tcm_disable;
fu->function.free_func = tcm_free;
fu->tpg = tpg_instances[i].tpg;
+
+ hash_init(fu->stream_hash);
mutex_unlock(&tpg_instances_lock);
return &fu->function;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index ce5b77f89190..9b324821c93b 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1185,6 +1185,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
uac2->as_in_alt = 0;
}
+ std_ac_if_desc.bNumEndpoints = 0;
if (FUOUT_EN(uac2_opts) || FUIN_EN(uac2_opts)) {
uac2->int_ep = usb_ep_autoconfig(gadget, &fs_ep_int_desc);
if (!uac2->int_ep) {
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index ced5d2b09234..11ac785d5eee 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -131,7 +131,7 @@ static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
#define FSG_BUFLEN ((u32)16384)
/* Maximal number of LUNs supported in mass storage function */
-#define FSG_MAX_LUNS 16
+#define FSG_MAX_LUNS (US_BULK_MAX_LUN_LIMIT + 1)
enum fsg_buffer_state {
BUF_STATE_SENDING = -2,
diff --git a/drivers/usb/gadget/function/tcm.h b/drivers/usb/gadget/function/tcm.h
index 3cd565794ad7..009974d81d66 100644
--- a/drivers/usb/gadget/function/tcm.h
+++ b/drivers/usb/gadget/function/tcm.h
@@ -4,6 +4,7 @@
#include <linux/kref.h>
/* #include <linux/usb/uas.h> */
+#include <linux/hashtable.h>
#include <linux/usb/composite.h>
#include <linux/usb/uas.h>
#include <linux/usb/storage.h>
@@ -13,9 +14,11 @@
#define USBG_NAMELEN 32
#define fuas_to_gadget(f) (f->function.config->cdev->gadget)
-#define UASP_SS_EP_COMP_LOG_STREAMS 4
+#define UASP_SS_EP_COMP_LOG_STREAMS 5
#define UASP_SS_EP_COMP_NUM_STREAMS (1 << UASP_SS_EP_COMP_LOG_STREAMS)
+#define USBG_NUM_CMDS (UASP_SS_EP_COMP_NUM_STREAMS + 1)
+
enum {
USB_G_STR_INT_UAS = 0,
USB_G_STR_INT_BBB,
@@ -24,7 +27,7 @@ enum {
#define USB_G_ALT_INT_BBB 0
#define USB_G_ALT_INT_UAS 1
-#define USB_G_DEFAULT_SESSION_TAGS 128
+#define USB_G_DEFAULT_SESSION_TAGS USBG_NUM_CMDS
struct tcm_usbg_nexus {
struct se_session *tvn_se_sess;
@@ -72,15 +75,23 @@ struct usbg_cmd {
struct se_cmd se_cmd;
void *data_buf; /* used if no sg support available */
struct f_uas *fu;
- struct completion write_complete;
struct kref ref;
+ struct usb_request *req;
+
+ u32 flags;
+#define USBG_CMD_PENDING_DATA_WRITE BIT(0)
+
/* UAS only */
u16 tag;
u16 prio_attr;
struct sense_iu sense_iu;
+ struct response_iu response_iu;
enum uas_state state;
- struct uas_stream *stream;
+
+ int tmr_func;
+ int tmr_rsp;
+#define RC_RESPONSE_UNKNOWN 0xff
/* BOT only */
__le32 bot_tag;
@@ -93,6 +104,9 @@ struct uas_stream {
struct usb_request *req_in;
struct usb_request *req_out;
struct usb_request *req_status;
+
+ struct completion cmd_completion;
+ struct hlist_node node;
};
struct usbg_cdb {
@@ -116,15 +130,17 @@ struct f_uas {
#define USBG_USE_STREAMS (1 << 2)
#define USBG_IS_BOT (1 << 3)
#define USBG_BOT_CMD_PEND (1 << 4)
+#define USBG_BOT_WEDGED (1 << 5)
- struct usbg_cdb cmd;
+ struct usbg_cdb cmd[USBG_NUM_CMDS];
struct usb_ep *ep_in;
struct usb_ep *ep_out;
/* UAS */
struct usb_ep *ep_status;
struct usb_ep *ep_cmd;
- struct uas_stream stream[UASP_SS_EP_COMP_NUM_STREAMS];
+ struct uas_stream stream[USBG_NUM_CMDS];
+ DECLARE_HASHTABLE(stream_hash, UASP_SS_EP_COMP_LOG_STREAMS);
/* BOT */
struct bot_status bot_status;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 09e2838917e2..f58590bf5e02 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1052,8 +1052,8 @@ void gether_suspend(struct gether *link)
* There is a transfer in progress. So we trigger a remote
* wakeup to inform the host.
*/
- ether_wakeup_host(dev->port_usb);
- return;
+ if (!ether_wakeup_host(dev->port_usb))
+ return;
}
spin_lock_irqsave(&dev->lock, flags);
link->is_suspend = true;
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 53d9fc41acc5..36fff45e8c9b 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -21,6 +21,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/console.h>
@@ -1545,7 +1546,7 @@ static int __init userial_init(void)
pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
MAX_U_SERIAL_PORTS,
- (MAX_U_SERIAL_PORTS == 1) ? "" : "s");
+ str_plural(MAX_U_SERIAL_PORTS));
return status;
fail:
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 79e223713d8b..fb77b0b21790 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -818,7 +818,7 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
return -EINVAL;
/* Allocate a kthread for asynchronous hw submit handler. */
- video->kworker = kthread_create_worker(0, "UVCG");
+ video->kworker = kthread_run_worker(0, "UVCG");
if (IS_ERR(video->kworker)) {
uvcg_err(&video->uvc->func, "failed to create UVCG kworker\n");
return PTR_ERR(video->kworker);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 9c7381661016..b6a30d88a800 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -20,6 +20,7 @@
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/poll.h>
#include <linux/kthread.h>
#include <linux/aio.h>
@@ -1182,7 +1183,7 @@ ep0_fasync (int f, struct file *fd, int on)
{
struct dev_data *dev = fd->private_data;
// caller must F_SETOWN before signal delivery happens
- VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
+ VDEBUG(dev, "%s %s\n", __func__, str_on_off(on));
return fasync_helper (f, fd, on, &dev->fasync);
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index a63e4af60a56..02fe1a08d575 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -22,6 +22,7 @@
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/dma-mapping.h>
#include <linux/bcd.h>
#include <linux/version.h>
@@ -219,7 +220,7 @@ static int ast_vhub_hub_dev_feature(struct ast_vhub_ep *ep,
if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
ep->vhub->wakeup_en = is_set;
EPDBG(ep, "Hub remote wakeup %s\n",
- is_set ? "enabled" : "disabled");
+ str_enabled_disabled(is_set));
return std_req_complete;
}
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index e3af4ec3794e..aa4c61094dc6 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -131,7 +132,7 @@ static void proc_ep_show(struct seq_file *s, struct at91_ep *ep)
seq_printf(s, "csr %08x rxbytes=%d %s %s %s" EIGHTBITS "\n",
csr,
(csr & 0x07ff0000) >> 16,
- (csr & (1 << 15)) ? "enabled" : "disabled",
+ str_enabled_disabled(csr & (1 << 15)),
(csr & (1 << 11)) ? "DATA1" : "DATA0",
types[(csr & 0x700) >> 8],
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
index 62fce42ef2da..7e69944ef18a 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
@@ -29,6 +29,7 @@
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/property.h>
+#include <linux/string_choices.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
@@ -2233,12 +2234,12 @@ static int cdns2_init_eps(struct cdns2_device *pdev)
dev_dbg(pdev->dev, "Init %s, SupType: CTRL: %s, INT: %s, "
"BULK: %s, ISOC %s, SupDir IN: %s, OUT: %s\n",
pep->name,
- (pep->endpoint.caps.type_control) ? "yes" : "no",
- (pep->endpoint.caps.type_int) ? "yes" : "no",
- (pep->endpoint.caps.type_bulk) ? "yes" : "no",
- (pep->endpoint.caps.type_iso) ? "yes" : "no",
- (pep->endpoint.caps.dir_in) ? "yes" : "no",
- (pep->endpoint.caps.dir_out) ? "yes" : "no");
+ str_yes_no(pep->endpoint.caps.type_control),
+ str_yes_no(pep->endpoint.caps.type_int),
+ str_yes_no(pep->endpoint.caps.type_bulk),
+ str_yes_no(pep->endpoint.caps.type_iso),
+ str_yes_no(pep->endpoint.caps.dir_in),
+ str_yes_no(pep->endpoint.caps.dir_out));
INIT_LIST_HEAD(&pep->pending_list);
INIT_LIST_HEAD(&pep->deferred_list);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index a6f46364be65..4b3d5075621a 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1543,8 +1543,8 @@ void usb_del_gadget(struct usb_gadget *gadget)
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
sysfs_remove_link(&udc->dev.kobj, "gadget");
- flush_work(&gadget->work);
device_del(&gadget->dev);
+ flush_work(&gadget->work);
ida_free(&gadget_id_numbers, gadget->id_number);
cancel_work_sync(&udc->vbus_work);
device_unregister(&udc->dev);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index a7e8fa45776b..4f1b5db51dda 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/hrtimer.h>
@@ -625,7 +626,7 @@ static int dummy_enable(struct usb_ep *_ep,
desc->bEndpointAddress & 0x0f,
(desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
usb_ep_type_string(usb_endpoint_type(desc)),
- max, ep->stream_en ? "enabled" : "disabled");
+ max, str_enabled_disabled(ep->stream_en));
/* at this point real hardware should be NAKing transfers
* to that endpoint, until a buffer is queued to it.
@@ -2478,8 +2479,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- dum_hcd->timer.function = dummy_timer;
+ hrtimer_setup(&dum_hcd->timer, dummy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2508,8 +2508,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- dum_hcd->timer.function = dummy_timer;
+ hrtimer_setup(&dum_hcd->timer, dummy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 8b7f7f961774..4dea8bc30cf6 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -1181,7 +1182,7 @@ static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct fsl_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
- dev_vdbg(&gadget->dev, "VBUS %s\n", is_active ? "on" : "off");
+ dev_vdbg(&gadget->dev, "VBUS %s\n", str_on_off(is_active));
udc->vbus_active = (is_active != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 698463bf697b..8902abe3ca76 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -1252,7 +1253,7 @@ static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
- VDBG("VBUS %s\n", is_active ? "on" : "off");
+ VDBG("VBUS %s\n", str_on_off(is_active));
udc->vbus_active = (is_active != 0);
if (cpu_is_omap15xx()) {
/* "software" detect, ignored if !VBUS_MODE_1510 */
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index f9a55d4f189f..897f53601b5b 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -20,6 +20,7 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/prefetch.h>
#include <linux/byteorder/generic.h>
#include <linux/platform_data/pxa2xx_udc.h>
@@ -1083,7 +1084,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
is_first_req = list_empty(&ep->queue);
ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
- _req, is_first_req ? "yes" : "no",
+ _req, str_yes_no(is_first_req),
_req->length, _req->buf);
if (!ep->enabled) {
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fce5c41d9f29..89b304cf6d03 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -310,7 +310,7 @@ struct renesas_usb3_request {
struct list_head queue;
};
-#define USB3_EP_NAME_SIZE 8
+#define USB3_EP_NAME_SIZE 16
struct renesas_usb3_ep {
struct usb_ep ep;
struct renesas_usb3 *usb3;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6de79ac5e6a4..6d1d190c914d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -466,8 +466,7 @@ static int ehci_init(struct usb_hcd *hcd)
*/
ehci->need_io_watchdog = 1;
- hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ehci->hrtimer.function = ehci_hrtimer_func;
+ hrtimer_setup(&ehci->hrtimer, ehci_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index a6c20facf945..fce800ba4c61 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -15,6 +15,7 @@
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
@@ -2756,7 +2757,7 @@ static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
if (!HCS_PPC(oxu->hcs_params))
return;
- oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
+ oxu_dbg(oxu, "...power%s ports...\n", str_up_down(is_on));
for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
if (is_on)
oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 1f9c1b1435d8..0404489c2f6a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -958,6 +958,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
* booting from USB disk or using a usb keyboard
*/
hcc_params = readl(base + EHCI_HCC_PARAMS);
+
+ /* LS7A EHCI controller doesn't have extended capabilities, the
+ * EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS
+ * register should be 0x0 but it reads as 0xa0. So clear it to
+ * avoid error messages on boot.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14)
+ hcc_params &= ~(0xffL << 8);
+
offset = (hcc_params >> 8) & 0xff;
while (offset && --count) {
pci_read_config_dword(pdev, offset, &cap);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 036f5fd6d159..fa2e4badd288 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -48,6 +48,7 @@
#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
+#include <linux/string_choices.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -98,7 +99,7 @@ static void port_power(struct sl811 *sl811, int is_on)
if (sl811->board && sl811->board->port_power) {
/* switch VBUS, at 500mA unless hub power budget gets set */
dev_dbg(hcd->self.controller, "power %s\n",
- is_on ? "on" : "off");
+ str_on_off(is_on));
sl811->board->port_power(hcd->self.controller, is_on);
}
diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
index 9e94cebf4a56..f6b9a00a0ab9 100644
--- a/drivers/usb/host/xhci-caps.h
+++ b/drivers/usb/host/xhci-caps.h
@@ -83,3 +83,9 @@
#define HCC2_CIC(p) ((p) & (1 << 5))
/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
#define HCC2_ETC(p) ((p) & (1 << 6))
+/* true: HC support Extended TBC TRB Status Capability */
+#define HCC2_ETC_TSC(p) ((p) & (1 << 7))
+/* true: HC support Get/Set Extended Property Capability */
+#define HCC2_GSC(p) ((p) & (1 << 8))
+/* true: HC support Virtualization Based Trusted I/O Capability */
+#define HCC2_VTC(p) ((p) & (1 << 9))
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 227e513867dd..fd7895b24367 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -957,7 +957,7 @@ static void xhci_dbc_handle_events(struct work_struct *work)
/* set fast poll rate if there are pending data transfers */
if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
!list_empty(&dbc->eps[BULK_IN].list_pending))
- poll_interval = 1;
+ poll_interval = 0;
break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index d719c16ea30b..60ed753c85bb 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -110,15 +110,74 @@ static void dbc_start_rx(struct dbc_port *port)
}
}
+/*
+ * Queue received data to tty buffer and push it.
+ *
+ * Returns nr of remaining bytes that didn't fit tty buffer, i.e. 0 if all
+ * bytes sucessfullt moved. In case of error returns negative errno.
+ * Call with lock held
+ */
+static int dbc_rx_push_buffer(struct dbc_port *port, struct dbc_request *req)
+{
+ char *packet = req->buf;
+ unsigned int n, size = req->actual;
+ int count;
+
+ if (!req->actual)
+ return 0;
+
+ /* if n_read is set then request was partially moved to tty buffer */
+ n = port->n_read;
+ if (n) {
+ packet += n;
+ size -= n;
+ }
+
+ count = tty_insert_flip_string(&port->port, packet, size);
+ if (count)
+ tty_flip_buffer_push(&port->port);
+ if (count != size) {
+ port->n_read += count;
+ return size - count;
+ }
+
+ port->n_read = 0;
+ return 0;
+}
+
static void
dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
{
unsigned long flags;
struct dbc_port *port = dbc_to_port(dbc);
+ struct tty_struct *tty;
+ int untransferred;
+
+ tty = port->port.tty;
spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Only defer copyig data to tty buffer in case:
+ * - !list_empty(&port->read_queue), there are older pending data
+ * - tty is throttled
+ * - failed to copy all data to buffer, defer remaining part
+ */
+
+ if (list_empty(&port->read_queue) && tty && !tty_throttled(tty)) {
+ untransferred = dbc_rx_push_buffer(port, req);
+ if (untransferred == 0) {
+ list_add_tail(&req->list_pool, &port->read_pool);
+ if (req->status != -ESHUTDOWN)
+ dbc_start_rx(port);
+ goto out;
+ }
+ }
+
+ /* defer moving data from req to tty buffer to a tasklet */
list_add_tail(&req->list_pool, &port->read_queue);
tasklet_schedule(&port->push);
+out:
spin_unlock_irqrestore(&port->port_lock, flags);
}
@@ -331,10 +390,10 @@ static void dbc_rx_push(struct tasklet_struct *t)
struct dbc_request *req;
struct tty_struct *tty;
unsigned long flags;
- bool do_push = false;
bool disconnect = false;
struct dbc_port *port = from_tasklet(port, t, push);
struct list_head *queue = &port->read_queue;
+ int untransferred;
spin_lock_irqsave(&port->port_lock, flags);
tty = port->port.tty;
@@ -356,42 +415,15 @@ static void dbc_rx_push(struct tasklet_struct *t)
break;
}
- if (req->actual) {
- char *packet = req->buf;
- unsigned int n, size = req->actual;
- int count;
-
- n = port->n_read;
- if (n) {
- packet += n;
- size -= n;
- }
-
- count = tty_insert_flip_string(&port->port, packet,
- size);
- if (count)
- do_push = true;
- if (count != size) {
- port->n_read += count;
- break;
- }
- port->n_read = 0;
- }
+ untransferred = dbc_rx_push_buffer(port, req);
+ if (untransferred > 0)
+ break;
list_move_tail(&req->list_pool, &port->read_pool);
}
- if (do_push)
- tty_flip_buffer_push(&port->port);
-
- if (!list_empty(queue) && tty) {
- if (!tty_throttled(tty)) {
- if (do_push)
- tasklet_schedule(&port->push);
- else
- pr_warn("ttyDBC0: RX not scheduled?\n");
- }
- }
+ if (!list_empty(queue))
+ tasklet_schedule(&port->push);
if (!disconnect)
dbc_start_rx(port);
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 4f0c1b96e208..1f5ef174abea 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -232,16 +232,7 @@ static struct xhci_file_map ring_files[] = {
static int xhci_ring_open(struct inode *inode, struct file *file)
{
- int i;
- struct xhci_file_map *f_map;
- const char *file_name = file_dentry(file)->d_iname;
-
- for (i = 0; i < ARRAY_SIZE(ring_files); i++) {
- f_map = &ring_files[i];
-
- if (strcmp(f_map->name, file_name) == 0)
- break;
- }
+ const struct xhci_file_map *f_map = debugfs_get_aux(file);
return single_open(file, f_map->show, inode->i_private);
}
@@ -318,16 +309,7 @@ static struct xhci_file_map context_files[] = {
static int xhci_context_open(struct inode *inode, struct file *file)
{
- int i;
- struct xhci_file_map *f_map;
- const char *file_name = file_dentry(file)->d_iname;
-
- for (i = 0; i < ARRAY_SIZE(context_files); i++) {
- f_map = &context_files[i];
-
- if (strcmp(f_map->name, file_name) == 0)
- break;
- }
+ const struct xhci_file_map *f_map = debugfs_get_aux(file);
return single_open(file, f_map->show, inode->i_private);
}
@@ -410,7 +392,8 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
int i;
for (i = 0; i < nentries; i++)
- debugfs_create_file(files[i].name, 0444, parent, data, fops);
+ debugfs_create_file_aux(files[i].name, 0444, parent,
+ data, &files[i], fops);
}
static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 9693464c0520..69c278b64084 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/pci.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -770,9 +771,16 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
struct xhci_port *port)
{
+ struct usb_hcd *hcd;
void __iomem *base;
u32 offset;
+ /* Don't try and probe this capability for non-Intel hosts */
+ hcd = xhci_to_hcd(xhci);
+ if (!dev_is_pci(hcd->self.controller) ||
+ to_pci_dev(hcd->self.controller)->vendor != PCI_VENDOR_ID_INTEL)
+ return USB_LINK_UNKNOWN;
+
base = &xhci->cap_regs->hc_capbase;
offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 92703efda1f7..fdf0c1008225 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2437,7 +2437,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
- if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
+ if (xhci->quirks & XHCI_TRB_OVERFETCH)
+ /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
else
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 2d1e205c14c6..54460d11f7ee 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -38,6 +38,8 @@
#define PCI_DEVICE_ID_ETRON_EJ168 0x7023
#define PCI_DEVICE_ID_ETRON_EJ188 0x7052
+#define PCI_DEVICE_ID_VIA_VL805 0x3483
+
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
@@ -418,8 +420,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
- if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) {
xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
+ }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
@@ -467,11 +471,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->device == 0x9202) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
if (pdev->device == 0x9203)
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
if (pdev->vendor == PCI_VENDOR_ID_CDNS &&
@@ -653,8 +657,8 @@ put_runtime_pm:
}
EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, "xhci");
-static const struct pci_device_id pci_ids_reject[] = {
- /* handled by xhci-pci-renesas */
+/* handled by xhci-pci-renesas if enabled */
+static const struct pci_device_id pci_ids_renesas[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
{ /* end: all zeroes */ }
@@ -662,7 +666,8 @@ static const struct pci_device_id pci_ids_reject[] = {
static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- if (pci_match_id(pci_ids_reject, dev))
+ if (IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) &&
+ pci_match_id(pci_ids_renesas, dev))
return -ENODEV;
return xhci_pci_common_probe(dev, id);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index e6c9006bd568..d85ffa9ffaa7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -290,7 +290,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+ if ((priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) ||
+ (xhci->quirks & XHCI_SKIP_PHY_INIT))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
@@ -329,6 +330,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
usb3_hcd->can_do_streams = 1;
if (xhci->shared_hcd) {
+ xhci->shared_hcd->rsrc_start = hcd->rsrc_start;
+ xhci->shared_hcd->rsrc_len = hcd->rsrc_len;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
@@ -566,6 +569,7 @@ EXPORT_SYMBOL_GPL(xhci_plat_pm_ops);
static const struct acpi_device_id usb_xhci_acpi_match[] = {
/* XHCI-compliant USB Controller */
{ "PNP0D10", },
+ { "PNP0D15", },
{ }
};
MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 09b05a62375e..965bffce301e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -55,6 +55,7 @@
#include <linux/jiffies.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/dma-mapping.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -422,7 +423,8 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
!(xhci->xhc_state & XHCI_STATE_DYING)) {
xhci->current_cmd = cur_cmd;
- xhci_mod_cmd_timer(xhci);
+ if (cur_cmd)
+ xhci_mod_cmd_timer(xhci);
xhci_ring_cmd_db(xhci);
}
}
@@ -1649,12 +1651,13 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
NEC_FW_MINOR(le32_to_cpu(event->status)));
}
-static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
+static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 comp_code, u32 comp_param)
{
list_del(&cmd->cmd_list);
if (cmd->completion) {
- cmd->status = status;
+ cmd->status = comp_code;
+ cmd->comp_param = comp_param;
complete(cmd->completion);
} else {
kfree(cmd);
@@ -1666,7 +1669,7 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
struct xhci_command *cur_cmd, *tmp_cmd;
xhci->current_cmd = NULL;
list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
- xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
+ xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED, 0);
}
void xhci_handle_command_timeout(struct work_struct *work)
@@ -1751,6 +1754,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ u32 status = le32_to_cpu(event->status);
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
u32 cmd_comp_code;
@@ -1879,7 +1883,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
}
event_handled:
- xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
+ xhci_complete_del_and_free_cmd(cmd, cmd_comp_code, COMP_PARAM(status));
inc_deq(xhci, xhci->cmd_ring);
}
@@ -3438,8 +3442,8 @@ static void check_interval(struct urb *urb, struct xhci_ep_ctx *ep_ctx)
if (xhci_interval != ep_interval) {
dev_dbg_ratelimited(&urb->dev->dev,
"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
- ep_interval, ep_interval == 1 ? "" : "s",
- xhci_interval, xhci_interval == 1 ? "" : "s");
+ ep_interval, str_plural(ep_interval),
+ xhci_interval, str_plural(xhci_interval));
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 06ae193ec874..22dc86fb5254 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -26,6 +26,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
#include <linux/usb/role.h>
@@ -724,7 +725,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
if (err < 0) {
dev_err(dev,
"failed to %s LFPS detection on USB3#%u: %d\n",
- enable ? "enable" : "disable", port, err);
+ str_enable_disable(enable), port, err);
rsp.cmd = MBOX_CMD_NAK;
} else {
rsp.cmd = MBOX_CMD_ACK;
@@ -1349,7 +1350,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
u32 status;
int ret;
- dev_dbg(tegra->dev, "host mode %s\n", tegra->host_mode ? "on" : "off");
+ dev_dbg(tegra->dev, "host mode %s\n", str_on_off(tegra->host_mode));
mutex_lock(&tegra->lock);
@@ -1667,7 +1668,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_padctl;
}
- if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
+ if (!of_property_present(pdev->dev.of_node, "power-domains")) {
tegra->host_rst = devm_reset_control_get(&pdev->dev,
"xusb_host");
if (IS_ERR(tegra->host_rst)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 5ebde8cae4fc..1a90ebc8a30e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
@@ -779,8 +780,12 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
struct xhci_segment *seg;
ring = xhci->cmd_ring;
- xhci_for_each_ring_seg(ring->first_seg, seg)
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
+ /* erase all TRBs before the link */
memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ /* clear link cycle bit */
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
+ }
xhci_initialize_ring_info(ring);
/*
@@ -4523,7 +4528,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
hlpm_addr = ports[port_num]->addr + PORTHLPMC;
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
- enable ? "enable" : "disable", port_num + 1);
+ str_enable_disable(enable), port_num + 1);
if (enable) {
/* Host supports BESL timeout instead of HIRD */
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4914f0a10cff..779b01dee068 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -529,6 +529,7 @@ struct xhci_command {
/* Input context for changing device state */
struct xhci_container_ctx *in_ctx;
u32 status;
+ u32 comp_param;
int slot_id;
/* If completion is null, no one is waiting on this command
* and the structure can be freed after the command completes.
@@ -959,6 +960,9 @@ struct xhci_event_cmd {
__le32 flags;
};
+/* status bitmasks */
+#define COMP_PARAM(p) ((p) & 0xffffff) /* Command Completion Parameter */
+
/* Address device - disable SetAddress */
#define TRB_BSR (1<<9)
@@ -1628,7 +1632,7 @@ struct xhci_hcd {
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
-#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
+#define XHCI_TRB_OVERFETCH BIT_ULL(45)
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 9f758241d9d3..934ec5310fb9 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -322,7 +322,7 @@ static inline void mts_urb_abort(struct mts_desc* desc) {
usb_kill_urb( desc->urb );
}
-static int mts_slave_alloc (struct scsi_device *s)
+static int mts_sdev_init (struct scsi_device *s)
{
s->inquiry_len = 0x24;
return 0;
@@ -626,7 +626,7 @@ static const struct scsi_host_template mts_scsi_host_template = {
.this_id = -1,
.emulated = 1,
.dma_alignment = 511,
- .slave_alloc = mts_slave_alloc,
+ .sdev_init = mts_sdev_init,
.max_sectors= 256, /* 128 K */
};
diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
index f0de99858353..c003049bafbf 100644
--- a/drivers/usb/mtu3/mtu3_debugfs.c
+++ b/drivers/usb/mtu3/mtu3_debugfs.c
@@ -7,6 +7,7 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include "mtu3.h"
@@ -256,16 +257,7 @@ static const struct mtu3_file_map mtu3_ep_files[] = {
static int mtu3_ep_open(struct inode *inode, struct file *file)
{
- const char *file_name = file_dentry(file)->d_iname;
- const struct mtu3_file_map *f_map;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
- f_map = &mtu3_ep_files[i];
-
- if (strcmp(f_map->name, file_name) == 0)
- break;
- }
+ const struct mtu3_file_map *f_map = debugfs_get_aux(file);
return single_open(file, f_map->show, inode->i_private);
}
@@ -288,17 +280,8 @@ static const struct debugfs_reg32 mtu3_prb_regs[] = {
static int mtu3_probe_show(struct seq_file *sf, void *unused)
{
- const char *file_name = file_dentry(sf->file)->d_iname;
struct mtu3 *mtu = sf->private;
- const struct debugfs_reg32 *regs;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
- regs = &mtu3_prb_regs[i];
-
- if (strcmp(regs->name, file_name) == 0)
- break;
- }
+ const struct debugfs_reg32 *regs = debugfs_get_aux(sf->file);
seq_printf(sf, "0x%04x - 0x%08x\n", (u32)regs->offset,
mtu3_readl(mtu->ippc_base, (u32)regs->offset));
@@ -314,13 +297,11 @@ static int mtu3_probe_open(struct inode *inode, struct file *file)
static ssize_t mtu3_probe_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- const char *file_name = file_dentry(file)->d_iname;
struct seq_file *sf = file->private_data;
struct mtu3 *mtu = sf->private;
- const struct debugfs_reg32 *regs;
+ const struct debugfs_reg32 *regs = debugfs_get_aux(file);
char buf[32];
u32 val;
- int i;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -328,12 +309,6 @@ static ssize_t mtu3_probe_write(struct file *file, const char __user *ubuf,
if (kstrtou32(buf, 0, &val))
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
- regs = &mtu3_prb_regs[i];
-
- if (strcmp(regs->name, file_name) == 0)
- break;
- }
mtu3_writel(mtu->ippc_base, (u32)regs->offset, val);
return count;
@@ -358,8 +333,8 @@ static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu)
for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
regs = &mtu3_prb_regs[i];
- debugfs_create_file(regs->name, 0644, dir_prb,
- mtu, &mtu3_probe_fops);
+ debugfs_create_file_aux(regs->name, 0644, dir_prb,
+ mtu, regs, &mtu3_probe_fops);
}
mtu3_debugfs_regset(mtu, mtu->ippc_base, mtu3_prb_regs,
@@ -379,8 +354,8 @@ static void mtu3_debugfs_create_ep_dir(struct mtu3_ep *mep,
for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
files = &mtu3_ep_files[i];
- debugfs_create_file(files->name, 0444, dir_ep,
- mep, &mtu3_ep_fops);
+ debugfs_create_file_aux(files->name, 0444, dir_ep,
+ mep, files, &mtu3_ep_fops);
}
}
@@ -479,7 +454,7 @@ static int ssusb_vbus_show(struct seq_file *sf, void *unused)
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
seq_printf(sf, "vbus state: %s\n(echo on/off)\n",
- regulator_is_enabled(otg_sx->vbus) ? "on" : "off");
+ str_on_off(regulator_is_enabled(otg_sx->vbus)));
return 0;
}
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index 8191b7ed3852..ffa5b9401dad 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -7,6 +7,7 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
+#include <linux/string_choices.h>
#include "mtu3.h"
#include "mtu3_dr.h"
#include "mtu3_debug.h"
@@ -109,7 +110,7 @@ int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
if (!vbus)
return 0;
- dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, is_on ? "on" : "off");
+ dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, str_on_off(is_on));
if (is_on) {
ret = regulator_enable(vbus);
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index ad0eeac4332d..bf73fbc29976 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -7,6 +7,7 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
+#include <linux/string_choices.h>
#include "mtu3.h"
#include "mtu3_trace.h"
@@ -490,7 +491,7 @@ static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on)
unsigned long flags;
dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__,
- is_on ? "on" : "off", mtu->is_active ? "" : "in");
+ str_on_off(is_on), mtu->is_active ? "" : "in");
pm_runtime_get_sync(mtu->dev);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index f772aa272bea..26fd71a5f9b2 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/dma-mapping.h>
#include <linux/usb/usb_phy_generic.h>
@@ -306,7 +307,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
}
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
- drvvbus ? "on" : "off",
+ str_on_off(drvvbus),
usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 03b1154a6014..7f349f5e781d 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -72,6 +72,7 @@
#include <linux/kobject.h>
#include <linux/prefetch.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/dma-mapping.h>
@@ -1937,7 +1938,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
pm_runtime_put_sync(dev);
return sprintf(buf, "Vbus %s, timeout %lu msec\n",
- vbus ? "on" : "off", val);
+ str_on_off(vbus), val);
}
static DEVICE_ATTR_RW(vbus);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 9589243e8951..4cde3abb7006 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -760,8 +760,8 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
if (!controller)
goto kzalloc_fail;
- hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- controller->early_tx.function = cppi41_recheck_tx_req;
+ hrtimer_setup(&controller->early_tx, cppi41_recheck_tx_req, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
INIT_LIST_HEAD(&controller->early_tx_list);
controller->controller.channel_alloc = cppi41_dma_channel_allocate;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 2542239ec64e..f877faf5a930 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -24,6 +24,7 @@
#include <linux/usb/usb_phy_generic.h>
#include <linux/platform_data/usb-omap.h>
#include <linux/sizes.h>
+#include <linux/string_choices.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -378,7 +379,7 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
/* NOTE: this must complete power-on within 100 ms. */
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
- drvvbus ? "on" : "off",
+ str_on_off(drvvbus),
usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index c6076df0d50c..6869c58367f2 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -1606,7 +1607,7 @@ static void musb_pullup(struct musb *musb, int is_on)
/* FIXME if on, HdrcStart; if off, HdrcStop */
musb_dbg(musb, "gadget D+ pullup %s",
- is_on ? "on" : "off");
+ str_on_off(is_on));
musb_writeb(musb->mregs, MUSB_POWER, power);
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 732ba981e607..6b4481a867c5 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
@@ -1028,7 +1029,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+ urb->actual_length);
musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
fifo_count,
- (fifo_count == 1) ? "" : "s",
+ str_plural(fifo_count),
fifo_dest);
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 42c42e193232..40ac68e52cee 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/proc_fs.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -529,7 +530,7 @@ int fsl_otg_start_gadget(struct otg_fsm *fsm, int on)
if (!otg->gadget || !otg->gadget->dev.parent)
return -ENODEV;
- VDBG("gadget %s\n", on ? "on" : "off");
+ VDBG("gadget %s\n", str_on_off(on));
dev = otg->gadget->dev.parent;
if (on) {
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 6c3ececf9137..8423be59ec0f 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -212,7 +212,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
if (of_property_read_u32(node, "clock-frequency", &clk_rate))
clk_rate = 0;
- needs_clk = of_property_read_bool(node, "clocks");
+ needs_clk = of_property_present(node, "clocks");
}
nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_ASIS);
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index a7a102f2e163..30d6c8840a5e 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
@@ -217,7 +218,7 @@ static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on)
if (!otg->gadget)
return;
- dev_info(mvotg->phy.dev, "gadget %s\n", on ? "on" : "off");
+ dev_info(mvotg->phy.dev, "gadget %s\n", str_on_off(on));
if (on)
usb_gadget_vbus_connect(otg->gadget);
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index ae7bf3ff89ee..88607d0edb01 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -18,6 +18,7 @@
#include <linux/extcon-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string_choices.h>
#include <linux/usb/otg.h>
#include <linux/mfd/retu.h>
#include <linux/usb/gadget.h>
@@ -63,7 +64,7 @@ static ssize_t vbus_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
- return sprintf(buf, "%s\n", tu->vbus_state ? "on" : "off");
+ return sprintf(buf, "%s\n", str_on_off(tu->vbus_state));
}
static DEVICE_ATTR_RO(vbus);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 1ce134505cee..e1435bc59662 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -346,13 +346,6 @@ static void devm_usb_phy_release2(struct device *dev, void *_res)
usb_put_phy(res->phy);
}
-static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
-{
- struct usb_phy **phy = res;
-
- return *phy == match_data;
-}
-
static void usb_charger_init(struct usb_phy *usb_phy)
{
usb_phy->chg_type = UNKNOWN_TYPE;
@@ -615,25 +608,6 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
EXPORT_SYMBOL_GPL(devm_usb_get_phy_by_phandle);
/**
- * devm_usb_put_phy - release the USB PHY
- * @dev: device that wants to release this phy
- * @phy: the phy returned by devm_usb_get_phy()
- *
- * destroys the devres associated with this phy and invokes usb_put_phy
- * to release the phy.
- *
- * For use by USB host and peripheral drivers.
- */
-void devm_usb_put_phy(struct device *dev, struct usb_phy *phy)
-{
- int r;
-
- r = devres_release(dev, devm_usb_phy_release, devm_usb_phy_match, phy);
- dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
-}
-EXPORT_SYMBOL_GPL(devm_usb_put_phy);
-
-/**
* usb_put_phy - release the USB PHY
* @x: the phy returned by usb_get_phy()
*
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 935fc496fe94..4b35ef216125 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -312,8 +312,10 @@ static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv)
priv->clks[1] = of_clk_get(dev_of_node(dev), 1);
if (PTR_ERR(priv->clks[1]) == -ENOENT)
priv->clks[1] = NULL;
- else if (IS_ERR(priv->clks[1]))
+ else if (IS_ERR(priv->clks[1])) {
+ clk_put(priv->clks[0]);
return PTR_ERR(priv->clks[1]);
+ }
return 0;
}
@@ -779,6 +781,8 @@ static void usbhs_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "usb remove\n");
+ flush_delayed_work(&priv->notify_hotplug_work);
+
/* power off */
if (!usbhs_get_dparam(priv, runtime_pwctrl))
usbhsc_power_ctrl(priv, 0);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 105132ae87ac..e8e5723f5412 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -1094,7 +1094,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
goto usbhs_mod_gadget_probe_err_gpriv;
}
- gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
+ gpriv->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
!IS_ERR(gpriv->transceiver) ? "" : "no ");
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index c58a12c147f4..30482d4cf826 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -387,8 +387,11 @@ usb_role_switch_register(struct device *parent,
dev_set_name(&sw->dev, "%s-role-switch",
desc->name ? desc->name : dev_name(parent));
+ sw->registered = true;
+
ret = device_register(&sw->dev);
if (ret) {
+ sw->registered = false;
put_device(&sw->dev);
return ERR_PTR(ret);
}
@@ -399,8 +402,6 @@ usb_role_switch_register(struct device *parent,
dev_warn(&sw->dev, "failed to add component\n");
}
- sw->registered = true;
-
/* TODO: Symlinks for the host port and the device controller. */
return sw;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index d10e4c4848a0..7cc36f84821f 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -63,6 +63,7 @@
#define CH341_REG_DIVISOR 0x13
#define CH341_REG_LCR 0x18
#define CH341_REG_LCR2 0x25
+#define CH341_REG_FLOW_CTL 0x27
#define CH341_NBREAK_BITS 0x01
@@ -77,6 +78,9 @@
#define CH341_LCR_CS6 0x01
#define CH341_LCR_CS5 0x00
+#define CH341_FLOW_CTL_NONE 0x00
+#define CH341_FLOW_CTL_RTSCTS 0x01
+
#define CH341_QUIRK_LIMITED_PRESCALER BIT(0)
#define CH341_QUIRK_SIMULATE_BREAK BIT(1)
@@ -478,6 +482,28 @@ err_kill_interrupt_urb:
return r;
}
+static void ch341_set_flow_control(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
+{
+ u16 flow_ctl;
+ int r;
+
+ if (C_CRTSCTS(tty))
+ flow_ctl = CH341_FLOW_CTL_RTSCTS;
+ else
+ flow_ctl = CH341_FLOW_CTL_NONE;
+
+ r = ch341_control_out(port->serial->dev,
+ CH341_REQ_WRITE_REG,
+ (CH341_REG_FLOW_CTL << 8) | CH341_REG_FLOW_CTL,
+ (flow_ctl << 8) | flow_ctl);
+ if (r < 0 && old_termios) {
+ tty->termios.c_cflag &= ~CRTSCTS;
+ tty->termios.c_cflag |= (old_termios->c_cflag & CRTSCTS);
+ }
+}
+
/* Old_termios contains the original termios settings and
* tty->termios contains the new setting to be used.
*/
@@ -546,6 +572,8 @@ static void ch341_set_termios(struct tty_struct *tty,
spin_unlock_irqrestore(&priv->lock, flags);
ch341_set_handshake(port->serial->dev, priv->mcr);
+
+ ch341_set_flow_control(tty, port, old_termios);
}
/*
@@ -632,13 +660,12 @@ restore:
static int ch341_break_ctl(struct tty_struct *tty, int break_state)
{
- const uint16_t ch341_break_reg =
- ((uint16_t) CH341_REG_LCR << 8) | CH341_REG_BREAK;
+ const u16 ch341_break_reg = (CH341_REG_LCR << 8) | CH341_REG_BREAK;
struct usb_serial_port *port = tty->driver_data;
struct ch341_private *priv = usb_get_serial_port_data(port);
+ u16 reg_contents;
+ u8 break_reg[2];
int r;
- uint16_t reg_contents;
- uint8_t break_reg[2];
if (priv->quirks & CH341_QUIRK_SIMULATE_BREAK)
return ch341_simulate_break(tty, break_state);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c24101f0a07a..9960ac2b10b7 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -223,6 +223,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ { USB_DEVICE(0x1B93, 0x1013) }, /* Phoenix Contact UPS Device */
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e07c5e3eb18c..9b34e23b7091 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1079,6 +1079,20 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
/* GMC devices */
{ USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
+ /* Altera USB Blaster 3 */
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6022_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6025_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 3) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6029_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 3) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602C_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 5ee60ba2a73c..52be47d684ea 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1612,3 +1612,16 @@
*/
#define GMC_VID 0x1cd7
#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
+
+/*
+ * Altera USB Blaster 3 (http://www.altera.com).
+ */
+#define ALTERA_VID 0x09fb
+#define ALTERA_UB3_6022_PID 0x6022
+#define ALTERA_UB3_6025_PID 0x6025
+#define ALTERA_UB3_6026_PID 0x6026
+#define ALTERA_UB3_6029_PID 0x6029
+#define ALTERA_UB3_602A_PID 0x602a
+#define ALTERA_UB3_602C_PID 0x602c
+#define ALTERA_UB3_602D_PID 0x602d
+#define ALTERA_UB3_602E_PID 0x602e
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 64317b390d22..5cd26dac2069 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -619,15 +619,6 @@ static void option_instat_callback(struct urb *urb);
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
-/* MeiG Smart Technology products */
-#define MEIGSMART_VENDOR_ID 0x2dee
-/* MeiG Smart SRM825L based on Qualcomm 315 */
-#define MEIGSMART_PRODUCT_SRM825L 0x4d22
-/* MeiG Smart SLM320 based on UNISOC UIS8910 */
-#define MEIGSMART_PRODUCT_SLM320 0x4d41
-/* MeiG Smart SLM770A based on ASR1803 */
-#define MEIGSMART_PRODUCT_SLM770A 0x4d57
-
/* Device flags */
/* Highest interface number which can be used with NCTRL() and RSVD() */
@@ -1367,23 +1358,23 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990A (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990A (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990A (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990A (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */
.driver_info = RSVD(0) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990A (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990A (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990A (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990A (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
@@ -1397,12 +1388,44 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x30), /* Telit FE990B (rmnet) */
+ .driver_info = NCTRL(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x30), /* Telit FE990B (MBIM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x30), /* Telit FE990B (RNDIS) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x30), /* Telit FE990B (ECM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x60) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x30), /* Telit FN990B (rmnet) */
+ .driver_info = NCTRL(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x30), /* Telit FN990B (RNDIS) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x30), /* Telit FN990B (ECM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x60) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2347,6 +2370,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d41, 0xff, 0, 0) }, /* MeiG Smart SLM320 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d57, 0xff, 0, 0) }, /* MeiG Smart SLM770A */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0, 0) }, /* MeiG Smart SRM815 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x02) }, /* MeiG Smart SLM828 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x03) }, /* MeiG Smart SLM828 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x30) }, /* MeiG Smart SRM815 and SRM825L */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x40) }, /* MeiG Smart SRM825L */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x60) }, /* MeiG Smart SRM825L */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
@@ -2403,15 +2434,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
.driver_info = NCTRL(1) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff), /* TCL IK512 ECM */
.driver_info = NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2949, 0x8700, 0xff) }, /* Neoway N723-EA */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index a317bdbd00ad..72fe83a6c978 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -503,7 +503,7 @@ static void qt2_process_read_urb(struct urb *urb)
newport = *(ch + 3);
- if (newport > serial->num_ports) {
+ if (newport >= serial->num_ports) {
dev_err(&port->dev,
"%s - port change to invalid port: %i\n",
__func__, newport);
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index d17b60a644ef..4be1d617d63d 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -3,8 +3,7 @@
# USB Storage driver configuration
#
-comment "NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may"
-comment "also be needed; see USB_STORAGE Help for more info"
+comment "NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; see USB_STORAGE Help for more info"
config USB_STORAGE
tristate "USB Mass Storage support"
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 8c8b5e6041cc..d2f476e48d0c 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -64,7 +64,7 @@ static const char* host_info(struct Scsi_Host *host)
return us->scsi_name;
}
-static int slave_alloc (struct scsi_device *sdev)
+static int sdev_init (struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
@@ -88,7 +88,7 @@ static int slave_alloc (struct scsi_device *sdev)
return 0;
}
-static int device_configure(struct scsi_device *sdev, struct queue_limits *lim)
+static int sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct us_data *us = host_to_us(sdev->host);
struct device *dev = us->pusb_dev->bus->sysdev;
@@ -127,7 +127,7 @@ static int device_configure(struct scsi_device *sdev, struct queue_limits *lim)
lim->max_hw_sectors, dma_max_mapping_size(dev) >> SECTOR_SHIFT);
/*
- * We can't put these settings in slave_alloc() because that gets
+ * We can't put these settings in sdev_init() because that gets
* called before the device type is known. Consequently these
* settings can't be overridden via the scsi devinfo mechanism.
*/
@@ -592,12 +592,9 @@ static ssize_t max_sectors_store(struct device *dev, struct device_attribute *at
if (sscanf(buf, "%hu", &ms) <= 0)
return -EINVAL;
- blk_mq_freeze_queue(sdev->request_queue);
lim = queue_limits_start_update(sdev->request_queue);
lim.max_hw_sectors = ms;
- ret = queue_limits_commit_update(sdev->request_queue, &lim);
- blk_mq_unfreeze_queue(sdev->request_queue);
-
+ ret = queue_limits_commit_update_frozen(sdev->request_queue, &lim);
if (ret)
return ret;
return count;
@@ -637,8 +634,8 @@ static const struct scsi_host_template usb_stor_host_template = {
/* unknown initiator id */
.this_id = -1,
- .slave_alloc = slave_alloc,
- .device_configure = device_configure,
+ .sdev_init = sdev_init,
+ .sdev_configure = sdev_configure,
.target_alloc = target_alloc,
/* lots of sg segments can be handled */
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 087c706bb315..c33cbf177e6f 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -32,6 +32,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/cdrom.h>
#include <scsi/scsi.h>
@@ -651,8 +652,7 @@ static int usbat_hp8200e_rw_block_test(struct us_data *us,
return USB_STOR_TRANSPORT_FAILED;
usb_stor_dbg(us, "Redoing %s\n",
- direction == DMA_TO_DEVICE
- ? "write" : "read");
+ str_write_read(direction == DMA_TO_DEVICE));
} else if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 9d767f6bf722..e6bc8ecaecbb 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1087,13 +1087,9 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n",
result, us->iobuf[0]);
- /*
- * If we have a successful request, return the result if valid. The
- * CBW LUN field is 4 bits wide, so the value reported by the device
- * should fit into that.
- */
+ /* If we have a successful request, return the result if valid. */
if (result > 0) {
- if (us->iobuf[0] < 16) {
+ if (us->iobuf[0] <= US_BULK_MAX_LUN_LIMIT) {
return us->iobuf[0];
} else {
dev_info(&us->pusb_intf->dev,
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index f9ad90ce7af4..4ed0dc19afe0 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -817,7 +817,7 @@ static int uas_target_alloc(struct scsi_target *starget)
return 0;
}
-static int uas_slave_alloc(struct scsi_device *sdev)
+static int uas_sdev_init(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
@@ -832,8 +832,8 @@ static int uas_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int uas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int uas_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct uas_dev_info *devinfo = sdev->hostdata;
@@ -905,8 +905,8 @@ static const struct scsi_host_template uas_host_template = {
.name = "uas",
.queuecommand = uas_queuecommand,
.target_alloc = uas_target_alloc,
- .slave_alloc = uas_slave_alloc,
- .device_configure = uas_device_configure,
+ .sdev_init = uas_sdev_init,
+ .sdev_configure = uas_sdev_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.this_id = -1,
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index e5ad23d86833..54f0b1c83317 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -255,6 +255,13 @@ UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Added by Lubomir Rintel <lkundrak@v3.sk>, a very fine chap */
+UNUSUAL_DEV( 0x0421, 0x06c2, 0x0000, 0x0406,
+ "Nokia",
+ "Nokia 208",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
#ifdef NO_SDDR09
UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
"Microtech",
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 1a6b5e872b0d..7867fa7c405d 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -23,4 +23,13 @@ config TYPEC_NVIDIA_ALTMODE
To compile this driver as a module, choose M here: the
module will be called typec_nvidia.
+config TYPEC_TBT_ALTMODE
+ tristate "Thunderbolt3 Alternate Mode driver"
+ help
+ Select this option if you have Thunderbolt3 hardware on your
+ system.
+
+ To compile this driver as a module, choose M here: the
+ module will be called typec_thunderbolt.
+
endmenu
diff --git a/drivers/usb/typec/altmodes/Makefile b/drivers/usb/typec/altmodes/Makefile
index 45717548b396..508a68351bd2 100644
--- a/drivers/usb/typec/altmodes/Makefile
+++ b/drivers/usb/typec/altmodes/Makefile
@@ -4,3 +4,5 @@ obj-$(CONFIG_TYPEC_DP_ALTMODE) += typec_displayport.o
typec_displayport-y := displayport.o
obj-$(CONFIG_TYPEC_NVIDIA_ALTMODE) += typec_nvidia.o
typec_nvidia-y := nvidia.o
+obj-$(CONFIG_TYPEC_TBT_ALTMODE) += typec_thunderbolt.o
+typec_thunderbolt-y := thunderbolt.o
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 2f03190a9873..ac84a6d64c2f 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -252,7 +252,7 @@ static void dp_altmode_work(struct work_struct *work)
case DP_STATE_ENTER:
ret = typec_altmode_enter(dp->alt, NULL);
if (ret && ret != -EBUSY)
- dev_err(&dp->alt->dev, "failed to enter mode\n");
+ dev_err(&dp->alt->dev, "failed to enter mode: %d\n", ret);
break;
case DP_STATE_ENTER_PRIME:
ret = typec_cable_altmode_enter(dp->alt, TYPEC_PLUG_SOP_P, NULL);
@@ -791,7 +791,7 @@ void dp_altmode_remove(struct typec_altmode *alt)
EXPORT_SYMBOL_GPL(dp_altmode_remove);
static const struct typec_device_id dp_typec_id[] = {
- { USB_TYPEC_DP_SID, USB_TYPEC_DP_MODE },
+ { USB_TYPEC_DP_SID },
{ },
};
MODULE_DEVICE_TABLE(typec, dp_typec_id);
diff --git a/drivers/usb/typec/altmodes/nvidia.c b/drivers/usb/typec/altmodes/nvidia.c
index fe70b36f078f..2b77d931e494 100644
--- a/drivers/usb/typec/altmodes/nvidia.c
+++ b/drivers/usb/typec/altmodes/nvidia.c
@@ -24,7 +24,7 @@ static void nvidia_altmode_remove(struct typec_altmode *alt)
}
static const struct typec_device_id nvidia_typec_id[] = {
- { USB_TYPEC_NVIDIA_VLINK_SID, TYPEC_ANY_MODE },
+ { USB_TYPEC_NVIDIA_VLINK_SID },
{ },
};
MODULE_DEVICE_TABLE(typec, nvidia_typec_id);
diff --git a/drivers/usb/typec/altmodes/thunderbolt.c b/drivers/usb/typec/altmodes/thunderbolt.c
new file mode 100644
index 000000000000..1b475b1d98e7
--- /dev/null
+++ b/drivers/usb/typec/altmodes/thunderbolt.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB Typec-C Thunderbolt3 Alternate Mode driver
+ *
+ * Copyright (C) 2019 Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/usb/pd_vdo.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_tbt.h>
+
+enum tbt_state {
+ TBT_STATE_IDLE,
+ TBT_STATE_SOP_P_ENTER,
+ TBT_STATE_SOP_PP_ENTER,
+ TBT_STATE_ENTER,
+ TBT_STATE_EXIT,
+ TBT_STATE_SOP_PP_EXIT,
+ TBT_STATE_SOP_P_EXIT
+};
+
+struct tbt_altmode {
+ enum tbt_state state;
+ struct typec_cable *cable;
+ struct typec_altmode *alt;
+ struct typec_altmode *plug[2];
+ u32 enter_vdo;
+
+ struct work_struct work;
+ struct mutex lock; /* device lock */
+};
+
+static bool tbt_ready(struct typec_altmode *alt);
+
+static int tbt_enter_mode(struct tbt_altmode *tbt)
+{
+ struct typec_altmode *plug = tbt->plug[TYPEC_PLUG_SOP_P];
+ u32 vdo;
+
+ vdo = tbt->alt->vdo & (TBT_VENDOR_SPECIFIC_B0 | TBT_VENDOR_SPECIFIC_B1);
+ vdo |= tbt->alt->vdo & TBT_INTEL_SPECIFIC_B0;
+ vdo |= TBT_MODE;
+
+ if (plug) {
+ if (typec_cable_is_active(tbt->cable))
+ vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
+
+ vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_SPEED(plug->vdo));
+ vdo |= plug->vdo & TBT_CABLE_ROUNDED;
+ vdo |= plug->vdo & TBT_CABLE_OPTICAL;
+ vdo |= plug->vdo & TBT_CABLE_RETIMER;
+ vdo |= plug->vdo & TBT_CABLE_LINK_TRAINING;
+ } else {
+ vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_USB3_PASSIVE);
+ }
+
+ tbt->enter_vdo = vdo;
+ return typec_altmode_enter(tbt->alt, &vdo);
+}
+
+static void tbt_altmode_work(struct work_struct *work)
+{
+ struct tbt_altmode *tbt = container_of(work, struct tbt_altmode, work);
+ int ret;
+
+ mutex_lock(&tbt->lock);
+
+ switch (tbt->state) {
+ case TBT_STATE_SOP_P_ENTER:
+ ret = typec_cable_altmode_enter(tbt->alt, TYPEC_PLUG_SOP_P, NULL);
+ if (ret) {
+ dev_dbg(&tbt->plug[TYPEC_PLUG_SOP_P]->dev,
+ "failed to enter mode (%d)\n", ret);
+ goto disable_plugs;
+ }
+ break;
+ case TBT_STATE_SOP_PP_ENTER:
+ ret = typec_cable_altmode_enter(tbt->alt, TYPEC_PLUG_SOP_PP, NULL);
+ if (ret) {
+ dev_dbg(&tbt->plug[TYPEC_PLUG_SOP_PP]->dev,
+ "failed to enter mode (%d)\n", ret);
+ goto disable_plugs;
+ }
+ break;
+ case TBT_STATE_ENTER:
+ ret = tbt_enter_mode(tbt);
+ if (ret)
+ dev_dbg(&tbt->alt->dev, "failed to enter mode (%d)\n",
+ ret);
+ break;
+ case TBT_STATE_EXIT:
+ typec_altmode_exit(tbt->alt);
+ break;
+ case TBT_STATE_SOP_PP_EXIT:
+ typec_cable_altmode_exit(tbt->alt, TYPEC_PLUG_SOP_PP);
+ break;
+ case TBT_STATE_SOP_P_EXIT:
+ typec_cable_altmode_exit(tbt->alt, TYPEC_PLUG_SOP_P);
+ break;
+ default:
+ break;
+ }
+
+ tbt->state = TBT_STATE_IDLE;
+
+ mutex_unlock(&tbt->lock);
+ return;
+
+disable_plugs:
+ for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ if (tbt->plug[i])
+ typec_altmode_put_plug(tbt->plug[i]);
+
+ tbt->plug[i] = NULL;
+ }
+
+ tbt->state = TBT_STATE_ENTER;
+ schedule_work(&tbt->work);
+ mutex_unlock(&tbt->lock);
+}
+
+/*
+ * If SOP' is available, enter that first (which will trigger a VDM response
+ * that will enter SOP" if available and then the port). If entering SOP' fails,
+ * stop attempting to enter either cable altmode (probably not supported) and
+ * directly enter the port altmode.
+ */
+static int tbt_enter_modes_ordered(struct typec_altmode *alt)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ int ret = 0;
+
+ lockdep_assert_held(&tbt->lock);
+
+ if (!tbt_ready(tbt->alt))
+ return -ENODEV;
+
+ if (tbt->plug[TYPEC_PLUG_SOP_P]) {
+ ret = typec_cable_altmode_enter(alt, TYPEC_PLUG_SOP_P, NULL);
+ if (ret < 0) {
+ for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ if (tbt->plug[i])
+ typec_altmode_put_plug(tbt->plug[i]);
+
+ tbt->plug[i] = NULL;
+ }
+ } else {
+ return ret;
+ }
+ }
+
+ return tbt_enter_mode(tbt);
+}
+
+static int tbt_cable_altmode_vdm(struct typec_altmode *alt,
+ enum typec_plug_index sop, const u32 hdr,
+ const u32 *vdo, int count)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ int cmd_type = PD_VDO_CMDT(hdr);
+ int cmd = PD_VDO_CMD(hdr);
+
+ mutex_lock(&tbt->lock);
+
+ if (tbt->state != TBT_STATE_IDLE) {
+ mutex_unlock(&tbt->lock);
+ return -EBUSY;
+ }
+
+ switch (cmd_type) {
+ case CMDT_RSP_ACK:
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ /*
+ * Following the order described in USB Type-C Spec
+ * R2.0 Section 6.7.3: SOP', SOP", then port.
+ */
+ if (sop == TYPEC_PLUG_SOP_P) {
+ if (tbt->plug[TYPEC_PLUG_SOP_PP])
+ tbt->state = TBT_STATE_SOP_PP_ENTER;
+ else
+ tbt->state = TBT_STATE_ENTER;
+ } else if (sop == TYPEC_PLUG_SOP_PP)
+ tbt->state = TBT_STATE_ENTER;
+
+ break;
+ case CMD_EXIT_MODE:
+ /* Exit in opposite order: Port, SOP", then SOP'. */
+ if (sop == TYPEC_PLUG_SOP_PP)
+ tbt->state = TBT_STATE_SOP_P_EXIT;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (tbt->state != TBT_STATE_IDLE)
+ schedule_work(&tbt->work);
+
+ mutex_unlock(&tbt->lock);
+ return 0;
+}
+
+static int tbt_altmode_vdm(struct typec_altmode *alt,
+ const u32 hdr, const u32 *vdo, int count)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ struct typec_thunderbolt_data data;
+ int cmd_type = PD_VDO_CMDT(hdr);
+ int cmd = PD_VDO_CMD(hdr);
+
+ mutex_lock(&tbt->lock);
+
+ if (tbt->state != TBT_STATE_IDLE) {
+ mutex_unlock(&tbt->lock);
+ return -EBUSY;
+ }
+
+ switch (cmd_type) {
+ case CMDT_RSP_ACK:
+ /* Port altmode is last to enter and first to exit. */
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ memset(&data, 0, sizeof(data));
+
+ data.device_mode = tbt->alt->vdo;
+ data.enter_vdo = tbt->enter_vdo;
+ if (tbt->plug[TYPEC_PLUG_SOP_P])
+ data.cable_mode = tbt->plug[TYPEC_PLUG_SOP_P]->vdo;
+
+ typec_altmode_notify(alt, TYPEC_STATE_MODAL, &data);
+ break;
+ case CMD_EXIT_MODE:
+ if (tbt->plug[TYPEC_PLUG_SOP_PP])
+ tbt->state = TBT_STATE_SOP_PP_EXIT;
+ else if (tbt->plug[TYPEC_PLUG_SOP_P])
+ tbt->state = TBT_STATE_SOP_P_EXIT;
+ break;
+ }
+ break;
+ case CMDT_RSP_NAK:
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ dev_warn(&alt->dev, "Enter Mode refused\n");
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (tbt->state != TBT_STATE_IDLE)
+ schedule_work(&tbt->work);
+
+ mutex_unlock(&tbt->lock);
+
+ return 0;
+}
+
+static int tbt_altmode_activate(struct typec_altmode *alt, int activate)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ int ret;
+
+ mutex_lock(&tbt->lock);
+
+ if (activate)
+ ret = tbt_enter_modes_ordered(alt);
+ else
+ ret = typec_altmode_exit(alt);
+
+ mutex_unlock(&tbt->lock);
+
+ return ret;
+}
+
+static const struct typec_altmode_ops tbt_altmode_ops = {
+ .vdm = tbt_altmode_vdm,
+ .activate = tbt_altmode_activate
+};
+
+static const struct typec_cable_ops tbt_cable_ops = {
+ .vdm = tbt_cable_altmode_vdm,
+};
+
+static int tbt_altmode_probe(struct typec_altmode *alt)
+{
+ struct tbt_altmode *tbt;
+
+ tbt = devm_kzalloc(&alt->dev, sizeof(*tbt), GFP_KERNEL);
+ if (!tbt)
+ return -ENOMEM;
+
+ INIT_WORK(&tbt->work, tbt_altmode_work);
+ mutex_init(&tbt->lock);
+ tbt->alt = alt;
+
+ alt->desc = "Thunderbolt3";
+ typec_altmode_set_drvdata(alt, tbt);
+ typec_altmode_set_ops(alt, &tbt_altmode_ops);
+
+ if (tbt_ready(alt)) {
+ if (tbt->plug[TYPEC_PLUG_SOP_P])
+ tbt->state = TBT_STATE_SOP_P_ENTER;
+ else if (tbt->plug[TYPEC_PLUG_SOP_PP])
+ tbt->state = TBT_STATE_SOP_PP_ENTER;
+ else
+ tbt->state = TBT_STATE_ENTER;
+ schedule_work(&tbt->work);
+ }
+
+ return 0;
+}
+
+static void tbt_altmode_remove(struct typec_altmode *alt)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+
+ for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ if (tbt->plug[i])
+ typec_altmode_put_plug(tbt->plug[i]);
+ }
+
+ if (tbt->cable)
+ typec_cable_put(tbt->cable);
+}
+
+static bool tbt_ready(struct typec_altmode *alt)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ struct typec_altmode *plug;
+
+ if (tbt->cable)
+ return true;
+
+ /* Thunderbolt 3 requires a cable with eMarker */
+ tbt->cable = typec_cable_get(typec_altmode2port(tbt->alt));
+ if (!tbt->cable)
+ return false;
+
+ /* We accept systems without SOP' or SOP''. This means the port altmode
+ * driver will be responsible for properly ordering entry/exit.
+ */
+ for (int i = 0; i < TYPEC_PLUG_SOP_PP + 1; i++) {
+ plug = typec_altmode_get_plug(tbt->alt, i);
+ if (IS_ERR(plug))
+ continue;
+
+ if (!plug || plug->svid != USB_TYPEC_TBT_SID)
+ break;
+
+ plug->desc = "Thunderbolt3";
+ plug->cable_ops = &tbt_cable_ops;
+ typec_altmode_set_drvdata(plug, tbt);
+
+ tbt->plug[i] = plug;
+ }
+
+ return true;
+}
+
+static const struct typec_device_id tbt_typec_id[] = {
+ { USB_TYPEC_TBT_SID },
+ { }
+};
+MODULE_DEVICE_TABLE(typec, tbt_typec_id);
+
+static struct typec_altmode_driver tbt_altmode_driver = {
+ .id_table = tbt_typec_id,
+ .probe = tbt_altmode_probe,
+ .remove = tbt_altmode_remove,
+ .driver = {
+ .name = "typec-thunderbolt",
+ }
+};
+module_typec_altmode_driver(tbt_altmode_driver);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Thunderbolt3 USB Type-C Alternate Mode");
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index aa879253d3b8..ae90688d23e4 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -454,8 +454,7 @@ static int typec_match(struct device *dev, const struct device_driver *driver)
const struct typec_device_id *id;
for (id = drv->id_table; id->svid; id++)
- if (id->svid == altmode->svid &&
- (id->mode == TYPEC_ANY_MODE || id->mode == altmode->mode))
+ if (id->svid == altmode->svid)
return 1;
return 0;
}
@@ -470,8 +469,7 @@ static int typec_uevent(const struct device *dev, struct kobj_uevent_env *env)
if (add_uevent_var(env, "MODE=%u", altmode->mode))
return -ENOMEM;
- return add_uevent_var(env, "MODALIAS=typec:id%04Xm%02X",
- altmode->svid, altmode->mode);
+ return add_uevent_var(env, "MODALIAS=typec:id%04X", altmode->svid);
}
static int typec_altmode_create_links(struct altmode *alt)
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 4b3047e055a3..9c76c3d0c6cf 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
@@ -229,21 +230,21 @@ static const char * const usb_modes[] = {
/* ------------------------------------------------------------------------- */
/* Alternate Modes */
-static int altmode_match(struct device *dev, void *data)
+static int altmode_match(struct device *dev, const void *data)
{
struct typec_altmode *adev = to_typec_altmode(dev);
- struct typec_device_id *id = data;
+ const struct typec_device_id *id = data;
if (!is_typec_altmode(dev))
return 0;
- return ((adev->svid == id->svid) && (adev->mode == id->mode));
+ return (adev->svid == id->svid);
}
static void typec_altmode_set_partner(struct altmode *altmode)
{
struct typec_altmode *adev = &altmode->adev;
- struct typec_device_id id = { adev->svid, adev->mode, };
+ struct typec_device_id id = { adev->svid };
struct typec_port *port = typec_altmode2port(adev);
struct altmode *partner;
struct device *dev;
@@ -361,7 +362,7 @@ active_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct typec_altmode *alt = to_typec_altmode(dev);
- return sprintf(buf, "%s\n", alt->active ? "yes" : "no");
+ return sprintf(buf, "%s\n", str_yes_no(alt->active));
}
static ssize_t active_store(struct device *dev, struct device_attribute *attr,
@@ -458,7 +459,8 @@ static umode_t typec_altmode_attr_is_visible(struct kobject *kobj,
struct typec_altmode *adev = to_typec_altmode(kobj_to_dev(kobj));
if (attr == &dev_attr_active.attr)
- if (!adev->ops || !adev->ops->activate)
+ if (!is_typec_port(adev->dev.parent) &&
+ (!adev->ops || !adev->ops->activate))
return 0444;
return attr->mode;
@@ -563,7 +565,7 @@ typec_register_altmode(struct device *parent,
if (is_port) {
alt->attrs[3] = &dev_attr_supported_roles.attr;
- alt->adev.active = true; /* Enabled by default */
+ alt->adev.active = !desc->inactive; /* Enabled by default */
}
sprintf(alt->group_name, "mode%d", desc->mode);
@@ -706,7 +708,7 @@ static ssize_t supports_usb_power_delivery_show(struct device *dev,
{
struct typec_partner *p = to_typec_partner(dev);
- return sprintf(buf, "%s\n", p->usb_pd ? "yes" : "no");
+ return sprintf(buf, "%s\n", str_yes_no(p->usb_pd));
}
static DEVICE_ATTR_RO(supports_usb_power_delivery);
@@ -1282,11 +1284,6 @@ const struct device_type typec_cable_dev_type = {
.release = typec_cable_release,
};
-static int cable_match(struct device *dev, void *data)
-{
- return is_typec_cable(dev);
-}
-
/**
* typec_cable_get - Get a reference to the USB Type-C cable
* @port: The USB Type-C Port the cable is connected to
@@ -1298,7 +1295,8 @@ struct typec_cable *typec_cable_get(struct typec_port *port)
{
struct device *dev;
- dev = device_find_child(&port->dev, NULL, cable_match);
+ dev = device_find_child(&port->dev, &typec_cable_dev_type,
+ device_match_type);
if (!dev)
return NULL;
@@ -1858,7 +1856,7 @@ static ssize_t vconn_source_show(struct device *dev,
struct typec_port *port = to_typec_port(dev);
return sprintf(buf, "%s\n",
- port->vconn_role == TYPEC_SOURCE ? "yes" : "no");
+ str_yes_no(port->vconn_role == TYPEC_SOURCE));
}
static DEVICE_ATTR_RW(vconn_source);
@@ -2028,16 +2026,12 @@ const struct device_type typec_port_dev_type = {
/* --------------------------------------- */
/* Driver callbacks to report role updates */
-static int partner_match(struct device *dev, void *data)
-{
- return is_typec_partner(dev);
-}
-
static struct typec_partner *typec_get_partner(struct typec_port *port)
{
struct device *dev;
- dev = device_find_child(&port->dev, NULL, partner_match);
+ dev = device_find_child(&port->dev, &typec_partner_dev_type,
+ device_match_type);
if (!dev)
return NULL;
@@ -2170,7 +2164,9 @@ void typec_set_pwr_opmode(struct typec_port *port,
sysfs_notify(&port->dev.kobj, NULL, "power_operation_mode");
kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
- partner_dev = device_find_child(&port->dev, NULL, partner_match);
+ partner_dev = device_find_child(&port->dev,
+ &typec_partner_dev_type,
+ device_match_type);
if (partner_dev) {
struct typec_partner *partner = to_typec_partner(partner_dev);
@@ -2334,7 +2330,9 @@ int typec_get_negotiated_svdm_version(struct typec_port *port)
enum usb_pd_svdm_ver svdm_version;
struct device *partner_dev;
- partner_dev = device_find_child(&port->dev, NULL, partner_match);
+ partner_dev = device_find_child(&port->dev,
+ &typec_partner_dev_type,
+ device_match_type);
if (!partner_dev)
return -ENODEV;
@@ -2361,7 +2359,8 @@ int typec_get_cable_svdm_version(struct typec_port *port)
enum usb_pd_svdm_ver svdm_version;
struct device *cable_dev;
- cable_dev = device_find_child(&port->dev, NULL, cable_match);
+ cable_dev = device_find_child(&port->dev, &typec_cable_dev_type,
+ device_match_type);
if (!cable_dev)
return -ENODEV;
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index fb1242e82ffd..3ecc688dda82 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -16,10 +16,17 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
+#define HD3SS3220_REG_CN_STAT 0x08
#define HD3SS3220_REG_CN_STAT_CTRL 0x09
#define HD3SS3220_REG_GEN_CTRL 0x0A
#define HD3SS3220_REG_DEV_REV 0xA0
+/* Register HD3SS3220_REG_CN_STAT */
+#define HD3SS3220_REG_CN_STAT_CURRENT_MODE_MASK (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CURRENT_MODE_DEFAULT 0x00
+#define HD3SS3220_REG_CN_STAT_CURRENT_MODE_MID BIT(6)
+#define HD3SS3220_REG_CN_STAT_CURRENT_MODE_HIGH BIT(7)
+
/* Register HD3SS3220_REG_CN_STAT_CTRL*/
#define HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK (BIT(7) | BIT(6))
#define HD3SS3220_REG_CN_STAT_CTRL_AS_DFP BIT(6)
@@ -28,10 +35,16 @@
#define HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS BIT(4)
/* Register HD3SS3220_REG_GEN_CTRL*/
+#define HD3SS3220_REG_GEN_CTRL_DISABLE_TERM BIT(0)
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK (BIT(2) | BIT(1))
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT 0x00
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK BIT(1)
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC (BIT(2) | BIT(1))
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_MASK (BIT(5) | BIT(4))
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DEFAULT 0x00
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DFP BIT(5)
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_UFP BIT(4)
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DRP (BIT(5) | BIT(4))
struct hd3ss3220 {
struct device *dev;
@@ -43,8 +56,96 @@ struct hd3ss3220 {
bool poll;
};
-static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int src_pref)
+static int hd3ss3220_set_power_opmode(struct hd3ss3220 *hd3ss3220, int power_opmode)
+{
+ int current_mode;
+
+ switch (power_opmode) {
+ case TYPEC_PWR_MODE_USB:
+ current_mode = HD3SS3220_REG_CN_STAT_CURRENT_MODE_DEFAULT;
+ break;
+ case TYPEC_PWR_MODE_1_5A:
+ current_mode = HD3SS3220_REG_CN_STAT_CURRENT_MODE_MID;
+ break;
+ case TYPEC_PWR_MODE_3_0A:
+ current_mode = HD3SS3220_REG_CN_STAT_CURRENT_MODE_HIGH;
+ break;
+ case TYPEC_PWR_MODE_PD: /* Power delivery not supported */
+ default:
+ dev_err(hd3ss3220->dev, "bad power operation mode: %d\n", power_opmode);
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT,
+ HD3SS3220_REG_CN_STAT_CURRENT_MODE_MASK,
+ current_mode);
+}
+
+static int hd3ss3220_set_port_type(struct hd3ss3220 *hd3ss3220, int type)
+{
+ int mode_select, err;
+
+ switch (type) {
+ case TYPEC_PORT_SRC:
+ mode_select = HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DFP;
+ break;
+ case TYPEC_PORT_SNK:
+ mode_select = HD3SS3220_REG_GEN_CTRL_MODE_SELECT_UFP;
+ break;
+ case TYPEC_PORT_DRP:
+ mode_select = HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DRP;
+ break;
+ default:
+ dev_err(hd3ss3220->dev, "bad port type: %d\n", type);
+ return -EINVAL;
+ }
+
+ /* Disable termination before changing MODE_SELECT as required by datasheet */
+ err = regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_DISABLE_TERM,
+ HD3SS3220_REG_GEN_CTRL_DISABLE_TERM);
+ if (err < 0) {
+ dev_err(hd3ss3220->dev, "Failed to disable port for mode change: %d\n", err);
+ return err;
+ }
+
+ err = regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_MODE_SELECT_MASK,
+ mode_select);
+ if (err < 0) {
+ dev_err(hd3ss3220->dev, "Failed to change mode: %d\n", err);
+ regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_DISABLE_TERM, 0);
+ return err;
+ }
+
+ err = regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_DISABLE_TERM, 0);
+ if (err < 0)
+ dev_err(hd3ss3220->dev, "Failed to re-enable port after mode change: %d\n", err);
+
+ return err;
+}
+
+static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int prefer_role)
{
+ int src_pref;
+
+ switch (prefer_role) {
+ case TYPEC_NO_PREFERRED_ROLE:
+ src_pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT;
+ break;
+ case TYPEC_SINK:
+ src_pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK;
+ break;
+ case TYPEC_SOURCE:
+ src_pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC;
+ break;
+ default:
+ dev_err(hd3ss3220->dev, "bad role preference: %d\n", prefer_role);
+ return -EINVAL;
+ }
+
return regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK,
src_pref);
@@ -76,31 +177,23 @@ static enum usb_role hd3ss3220_get_attached_state(struct hd3ss3220 *hd3ss3220)
return attached_state;
}
-static int hd3ss3220_dr_set(struct typec_port *port, enum typec_data_role role)
+static int hd3ss3220_try_role(struct typec_port *port, int role)
{
struct hd3ss3220 *hd3ss3220 = typec_get_drvdata(port);
- enum usb_role role_val;
- int pref, ret = 0;
- if (role == TYPEC_HOST) {
- role_val = USB_ROLE_HOST;
- pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC;
- } else {
- role_val = USB_ROLE_DEVICE;
- pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK;
- }
-
- ret = hd3ss3220_set_source_pref(hd3ss3220, pref);
- usleep_range(10, 100);
+ return hd3ss3220_set_source_pref(hd3ss3220, role);
+}
- usb_role_switch_set_role(hd3ss3220->role_sw, role_val);
- typec_set_data_role(hd3ss3220->port, role);
+static int hd3ss3220_port_type_set(struct typec_port *port, enum typec_port_type type)
+{
+ struct hd3ss3220 *hd3ss3220 = typec_get_drvdata(port);
- return ret;
+ return hd3ss3220_set_port_type(hd3ss3220, type);
}
static const struct typec_operations hd3ss3220_ops = {
- .dr_set = hd3ss3220_dr_set
+ .try_role = hd3ss3220_try_role,
+ .port_type_set = hd3ss3220_port_type_set,
};
static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
@@ -108,9 +201,6 @@ static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
usb_role_switch_set_role(hd3ss3220->role_sw, role_state);
- if (role_state == USB_ROLE_NONE)
- hd3ss3220_set_source_pref(hd3ss3220,
- HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
switch (role_state) {
case USB_ROLE_HOST:
@@ -162,6 +252,67 @@ static irqreturn_t hd3ss3220_irq_handler(int irq, void *data)
return hd3ss3220_irq(hd3ss3220);
}
+static int hd3ss3220_configure_power_opmode(struct hd3ss3220 *hd3ss3220,
+ struct fwnode_handle *connector)
+{
+ /*
+ * Supported power operation mode can be configured through device tree
+ */
+ const char *cap_str;
+ int ret, power_opmode;
+
+ ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str);
+ if (ret)
+ return 0;
+
+ power_opmode = typec_find_pwr_opmode(cap_str);
+ return hd3ss3220_set_power_opmode(hd3ss3220, power_opmode);
+}
+
+static int hd3ss3220_configure_port_type(struct hd3ss3220 *hd3ss3220,
+ struct fwnode_handle *connector,
+ struct typec_capability *cap)
+{
+ /*
+ * Port type can be configured through device tree
+ */
+ const char *cap_str;
+ int ret;
+
+ ret = fwnode_property_read_string(connector, "power-role", &cap_str);
+ if (ret)
+ return 0;
+
+ ret = typec_find_port_power_role(cap_str);
+ if (ret < 0)
+ return ret;
+
+ cap->type = ret;
+ return hd3ss3220_set_port_type(hd3ss3220, cap->type);
+}
+
+static int hd3ss3220_configure_source_pref(struct hd3ss3220 *hd3ss3220,
+ struct fwnode_handle *connector,
+ struct typec_capability *cap)
+{
+ /*
+ * Preferred role can be configured through device tree
+ */
+ const char *cap_str;
+ int ret;
+
+ ret = fwnode_property_read_string(connector, "try-power-role", &cap_str);
+ if (ret)
+ return 0;
+
+ ret = typec_find_power_role(cap_str);
+ if (ret < 0)
+ return ret;
+
+ cap->prefer_role = ret;
+ return hd3ss3220_set_source_pref(hd3ss3220, cap->prefer_role);
+}
+
static const struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
@@ -188,8 +339,6 @@ static int hd3ss3220_probe(struct i2c_client *client)
if (IS_ERR(hd3ss3220->regmap))
return PTR_ERR(hd3ss3220->regmap);
- hd3ss3220_set_source_pref(hd3ss3220,
- HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
/* For backward compatibility check the connector child node first */
connector = device_get_named_child_node(hd3ss3220->dev, "connector");
if (connector) {
@@ -217,12 +366,24 @@ static int hd3ss3220_probe(struct i2c_client *client)
typec_cap.ops = &hd3ss3220_ops;
typec_cap.fwnode = connector;
+ ret = hd3ss3220_configure_source_pref(hd3ss3220, connector, &typec_cap);
+ if (ret < 0)
+ goto err_put_role;
+
+ ret = hd3ss3220_configure_port_type(hd3ss3220, connector, &typec_cap);
+ if (ret < 0)
+ goto err_put_role;
+
hd3ss3220->port = typec_register_port(&client->dev, &typec_cap);
if (IS_ERR(hd3ss3220->port)) {
ret = PTR_ERR(hd3ss3220->port);
goto err_put_role;
}
+ ret = hd3ss3220_configure_power_opmode(hd3ss3220, connector);
+ if (ret < 0)
+ goto err_unreg_port;
+
hd3ss3220_set_role(hd3ss3220);
ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL, &data);
if (ret < 0)
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 5dfe95754394..65dda9183e6f 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -718,7 +718,7 @@ DEFINE_SHOW_ATTRIBUTE(port_iom_status);
static void pmc_mux_port_debugfs_init(struct pmc_usb_port *port)
{
struct dentry *debugfs_dir;
- char name[6];
+ char name[8];
snprintf(name, sizeof(name), "port%d", port->usb3_port - 1);
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index e2fe479e16ad..f15c63d3a8f4 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/usb.h>
#include <linux/usb/typec.h>
@@ -733,7 +734,7 @@ static int tcpm_set_vconn(struct tcpc_dev *dev, bool on)
mutex_lock(&chip->lock);
if (chip->vconn_on == on) {
- fusb302_log(chip, "vconn is already %s", on ? "On" : "Off");
+ fusb302_log(chip, "vconn is already %s", str_on_off(on));
goto done;
}
if (on) {
@@ -746,7 +747,7 @@ static int tcpm_set_vconn(struct tcpc_dev *dev, bool on)
if (ret < 0)
goto done;
chip->vconn_on = on;
- fusb302_log(chip, "vconn := %s", on ? "On" : "Off");
+ fusb302_log(chip, "vconn := %s", str_on_off(on));
done:
mutex_unlock(&chip->lock);
@@ -761,7 +762,7 @@ static int tcpm_set_vbus(struct tcpc_dev *dev, bool on, bool charge)
mutex_lock(&chip->lock);
if (chip->vbus_on == on) {
- fusb302_log(chip, "vbus is already %s", on ? "On" : "Off");
+ fusb302_log(chip, "vbus is already %s", str_on_off(on));
} else {
if (on)
ret = regulator_enable(chip->vbus);
@@ -769,15 +770,14 @@ static int tcpm_set_vbus(struct tcpc_dev *dev, bool on, bool charge)
ret = regulator_disable(chip->vbus);
if (ret < 0) {
fusb302_log(chip, "cannot %s vbus regulator, ret=%d",
- on ? "enable" : "disable", ret);
+ str_enable_disable(on), ret);
goto done;
}
chip->vbus_on = on;
- fusb302_log(chip, "vbus := %s", on ? "On" : "Off");
+ fusb302_log(chip, "vbus := %s", str_on_off(on));
}
if (chip->charge_on == charge)
- fusb302_log(chip, "charge is already %s",
- charge ? "On" : "Off");
+ fusb302_log(chip, "charge is already %s", str_on_off(charge));
else
chip->charge_on = charge;
@@ -854,16 +854,16 @@ static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on)
ret = fusb302_pd_set_auto_goodcrc(chip, on);
if (ret < 0) {
fusb302_log(chip, "cannot turn %s auto GCRC, ret=%d",
- on ? "on" : "off", ret);
+ str_on_off(on), ret);
goto done;
}
ret = fusb302_pd_set_interrupts(chip, on);
if (ret < 0) {
fusb302_log(chip, "cannot turn %s pd interrupts, ret=%d",
- on ? "on" : "off", ret);
+ str_on_off(on), ret);
goto done;
}
- fusb302_log(chip, "pd := %s", on ? "on" : "off");
+ fusb302_log(chip, "pd := %s", str_on_off(on));
done:
mutex_unlock(&chip->lock);
@@ -1531,7 +1531,7 @@ static void fusb302_irq_work(struct work_struct *work)
if (interrupt & FUSB_REG_INTERRUPT_VBUSOK) {
vbus_present = !!(status0 & FUSB_REG_STATUS0_VBUSOK);
fusb302_log(chip, "IRQ: VBUS_OK, vbus=%s",
- vbus_present ? "On" : "Off");
+ str_on_off(vbus_present));
if (vbus_present != chip->vbus_present) {
chip->vbus_present = vbus_present;
tcpm_vbus_change(chip->tcpm_port);
@@ -1562,7 +1562,7 @@ static void fusb302_irq_work(struct work_struct *work)
if ((interrupt & FUSB_REG_INTERRUPT_COMP_CHNG) && intr_comp_chng) {
comp_result = !!(status0 & FUSB_REG_STATUS0_COMP);
fusb302_log(chip, "IRQ: COMP_CHNG, comp=%s",
- comp_result ? "true" : "false");
+ str_true_false(comp_result));
if (comp_result) {
/* cc level > Rd_threshold, detach */
chip->cc1 = TYPEC_CC_OPEN;
diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
index 22163d8f9eb0..0cdda06592fd 100644
--- a/drivers/usb/typec/tcpm/maxim_contaminant.c
+++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
@@ -135,7 +135,7 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
if (mv < 0)
- return ret;
+ return mv;
/* OVP enable */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, 0);
@@ -157,7 +157,7 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
if (mv < 0)
- return ret;
+ return mv;
/* Disable current source */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, 0);
if (ret < 0)
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
index 726423684bae..18303b34594b 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -12,6 +12,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
#include "qcom_pmic_typec.h"
@@ -418,7 +419,7 @@ static int qcom_pmic_typec_pdphy_set_pd_rx(struct tcpc_dev *tcpc, bool on)
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
- dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
+ dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", str_on_off(on));
return ret;
}
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
index df79059cda67..8fac171778da 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
@@ -12,6 +12,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
#include "qcom_pmic_typec.h"
@@ -38,7 +39,7 @@ static int qcom_pmic_typec_pdphy_stub_set_pd_rx(struct tcpc_dev *tcpc, bool on)
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
struct device *dev = tcpm->dev;
- dev_dbg(dev, "set_pd_rx: %s\n", on ? "on" : "off");
+ dev_dbg(dev, "set_pd_rx: %s\n", str_on_off(on));
return 0;
}
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
index c37dede62e12..4fc83dcfae64 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
#include <linux/workqueue.h>
@@ -562,7 +563,8 @@ done:
spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
dev_dbg(dev, "set_vconn: orientation %d control 0x%08x state %s cc %s vconn %s\n",
- orientation, value, on ? "on" : "off", misc_to_vconn(misc), misc_to_cc(misc));
+ orientation, value, str_on_off(on), misc_to_vconn(misc),
+ misc_to_cc(misc));
return ret;
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index ed32583829be..19ab6647af70 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -27,6 +27,7 @@
#define VPPS_NEW_MIN_PERCENT 95
#define VPPS_VALID_MIN_MV 100
#define VSINKDISCONNECT_PD_MIN_PERCENT 90
+#define VPPS_SHUTDOWN_MIN_PERCENT 85
struct tcpci {
struct device *dev;
@@ -282,7 +283,7 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
if (cc2 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP);
- else
+ else if (cc2 >= TYPEC_CC_RP_DEF)
reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD);
} else {
reg &= ~TCPC_ROLE_CTRL_CC1;
@@ -290,7 +291,7 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
if (cc1 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP);
- else
+ else if (cc1 >= TYPEC_CC_RP_DEF)
reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD);
}
}
@@ -366,7 +367,8 @@ static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable)
}
static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
- bool pps_active, u32 requested_vbus_voltage_mv)
+ bool pps_active, u32 requested_vbus_voltage_mv,
+ u32 apdo_min_voltage_mv)
{
struct tcpci *tcpci = tcpc_to_tcpci(dev);
unsigned int pwr_ctrl, threshold = 0;
@@ -388,9 +390,12 @@ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum ty
threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
} else if (mode == TYPEC_PWR_MODE_PD) {
if (pps_active)
- threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
- VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
- VSINKDISCONNECT_PD_MIN_PERCENT / 100;
+ /*
+ * To prevent disconnect when the source is in Current Limit Mode.
+ * Set the threshold to the lowest possible voltage vPpsShutdown (min)
+ */
+ threshold = VPPS_SHUTDOWN_MIN_PERCENT * apdo_min_voltage_mv / 100 -
+ VSINKPD_MIN_IR_DROP_MV;
else
threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
@@ -700,7 +705,7 @@ static int tcpci_init(struct tcpc_dev *tcpc)
tcpci->alert_mask = reg;
- return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
+ return 0;
}
irqreturn_t tcpci_irq(struct tcpci *tcpci)
@@ -923,22 +928,27 @@ static int tcpci_probe(struct i2c_client *client)
chip->data.set_orientation = err;
+ chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
+ if (IS_ERR(chip->tcpci))
+ return PTR_ERR(chip->tcpci);
+
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
_tcpci_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&client->dev), chip);
if (err < 0)
- return err;
+ goto unregister_port;
- /*
- * Disable irq while registering port. If irq is configured as an edge
- * irq this allow to keep track and process the irq as soon as it is enabled.
- */
- disable_irq(client->irq);
- chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
- enable_irq(client->irq);
+ /* Enable chip interrupts at last */
+ err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, chip->tcpci->alert_mask);
+ if (err < 0)
+ goto unregister_port;
- return PTR_ERR_OR_ZERO(chip->tcpci);
+ return 0;
+
+unregister_port:
+ tcpci_unregister_port(chip->tcpci);
+ return err;
}
static void tcpci_remove(struct i2c_client *client)
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6370.c b/drivers/usb/typec/tcpm/tcpci_mt6370.c
index 1479f961772d..ed822f438a09 100644
--- a/drivers/usb/typec/tcpm/tcpci_mt6370.c
+++ b/drivers/usb/typec/tcpm/tcpci_mt6370.c
@@ -11,7 +11,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeup.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 64f6dd0dc660..88c50b984e8a 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -334,6 +334,11 @@ static int rt1711h_probe(struct i2c_client *client)
{
int ret;
struct rt1711h_chip *chip;
+ const u16 alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED |
+ TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST |
+ TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS |
+ TCPC_ALERT_CC_STATUS | TCPC_ALERT_RX_BUF_OVF |
+ TCPC_ALERT_FAULT;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -382,6 +387,12 @@ static int rt1711h_probe(struct i2c_client *client)
dev_name(chip->dev), chip);
if (ret < 0)
return ret;
+
+ /* Enable alert interrupts */
+ ret = rt1711h_write16(chip, TCPC_ALERT_MASK, alert_mask);
+ if (ret < 0)
+ return ret;
+
enable_irq_wake(client->irq);
return 0;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 6021eeb903fe..a99db4e025cd 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/usb.h>
#include <linux/usb/pd.h>
#include <linux/usb/pd_ado.h>
@@ -185,7 +186,8 @@
S(UNSTRUCTURED_VDMS), \
S(STRUCTURED_VDMS), \
S(COUNTRY_INFO), \
- S(COUNTRY_CODES)
+ S(COUNTRY_CODES), \
+ S(REVISION_INFORMATION)
#define GENERATE_ENUM(e) e
#define GENERATE_STRING(s) #s
@@ -225,6 +227,7 @@ enum pd_msg_request {
PD_MSG_CTRL_NOT_SUPP,
PD_MSG_DATA_SINK_CAP,
PD_MSG_DATA_SOURCE_CAP,
+ PD_MSG_DATA_REV,
};
enum adev_actions {
@@ -310,6 +313,13 @@ struct pd_data {
unsigned int operating_snk_mw;
};
+struct pd_revision_info {
+ u8 rev_major;
+ u8 rev_minor;
+ u8 ver_major;
+ u8 ver_minor;
+};
+
/*
* @sink_wait_cap_time: Deadline (in ms) for tTypeCSinkWaitCap timer
* @ps_src_wait_off_time: Deadline (in ms) for tPSSourceOff timer
@@ -567,6 +577,9 @@ struct tcpm_port {
/* Timer deadline values configured at runtime */
struct pd_timings timings;
+
+ /* Indicates maximum (revision, version) supported */
+ struct pd_revision_info pd_rev;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -880,8 +893,8 @@ static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
if (port->tcpc->enable_auto_vbus_discharge) {
ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
- tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
- ret);
+ tcpm_log_force(port, "%s vbus discharge ret:%d",
+ str_enable_disable(enable), ret);
if (!ret)
port->auto_vbus_discharge_enabled = enable;
}
@@ -1234,6 +1247,24 @@ static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_rol
}
}
+static int tcpm_pd_send_revision(struct tcpm_port *port)
+{
+ struct pd_message msg;
+ u32 rmdo;
+
+ memset(&msg, 0, sizeof(msg));
+ rmdo = RMDO(port->pd_rev.rev_major, port->pd_rev.rev_minor,
+ port->pd_rev.ver_major, port->pd_rev.ver_minor);
+ msg.payload[0] = cpu_to_le32(rmdo);
+ msg.header = PD_HEADER_LE(PD_DATA_REVISION,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ 1);
+ return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+}
+
static int tcpm_pd_send_source_caps(struct tcpm_port *port)
{
struct pd_message msg;
@@ -2943,10 +2974,12 @@ static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
return 0;
ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
- requested_vbus_voltage);
+ requested_vbus_voltage,
+ port->pps_data.min_volt);
tcpm_log_force(port,
- "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
- mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
+ "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d",
+ mode, pps_active ? 'y' : 'n', requested_vbus_voltage,
+ port->pps_data.min_volt, ret);
return ret;
}
@@ -3537,6 +3570,17 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
break;
+ case PD_CTRL_GET_REVISION:
+ if (port->negotiated_rev >= PD_REV30 && port->pd_rev.rev_major)
+ tcpm_pd_handle_msg(port, PD_MSG_DATA_REV,
+ REVISION_INFORMATION);
+ else
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ break;
default:
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
@@ -3781,6 +3825,14 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
tcpm_ams_finish(port);
}
break;
+ case PD_MSG_DATA_REV:
+ ret = tcpm_pd_send_revision(port);
+ if (ret)
+ tcpm_log(port,
+ "Unable to send revision msg, ret=%d",
+ ret);
+ tcpm_ams_finish(port);
+ break;
default:
break;
}
@@ -4390,7 +4442,7 @@ static void tcpm_unregister_altmodes(struct tcpm_port *port)
static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
{
- tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
+ tcpm_log(port, "Setting usb_comm capable %s", str_true_false(capable));
if (port->tcpc->set_partner_usb_comm_capable)
port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
@@ -4772,7 +4824,7 @@ static void run_state_machine(struct tcpm_port *port)
port->caps_count = 0;
port->pd_capable = true;
tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
- PD_T_SEND_SOURCE_CAP);
+ PD_T_SENDER_RESPONSE);
}
break;
case SRC_SEND_CAPABILITIES_TIMEOUT:
@@ -5065,16 +5117,16 @@ static void run_state_machine(struct tcpm_port *port)
*/
if (port->vbus_never_low) {
port->vbus_never_low = false;
- tcpm_set_state(port, SNK_SOFT_RESET,
- port->timings.sink_wait_cap_time);
+ upcoming_state = SNK_SOFT_RESET;
} else {
if (!port->self_powered)
upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
else
upcoming_state = hard_reset_state(port);
- tcpm_set_state(port, SNK_WAIT_CAPABILITIES_TIMEOUT,
- port->timings.sink_wait_cap_time);
}
+
+ tcpm_set_state(port, upcoming_state,
+ port->timings.sink_wait_cap_time);
break;
case SNK_WAIT_CAPABILITIES_TIMEOUT:
/*
@@ -5539,8 +5591,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
port->pps_data.active, 0);
tcpm_set_charge(port, false);
- tcpm_set_state(port, hard_reset_state(port),
- port->timings.ps_src_off_time);
+ tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
break;
case PR_SWAP_SNK_SRC_SOURCE_ON:
tcpm_enable_auto_vbus_discharge(port, true);
@@ -7036,7 +7087,9 @@ static void tcpm_port_unregister_pd(struct tcpm_port *port)
static int tcpm_port_register_pd(struct tcpm_port *port)
{
- struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
+ u16 pd_revision = port->typec_caps.pd_revision;
+ u16 pd_version = port->pd_rev.ver_major << 8 | port->pd_rev.ver_minor;
+ struct usb_power_delivery_desc desc = { pd_revision, pd_version };
struct usb_power_delivery_capabilities *cap;
int ret, i;
@@ -7331,6 +7384,29 @@ static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fw
return 0;
}
+static void tcpm_fw_get_pd_revision(struct tcpm_port *port, struct fwnode_handle *fwnode)
+{
+ int ret;
+ u8 val[4];
+
+ ret = fwnode_property_count_u8(fwnode, "pd-revision");
+ if (!ret || ret != 4) {
+ tcpm_log(port, "Unable to find pd-revision property or incorrect array size");
+ return;
+ }
+
+ ret = fwnode_property_read_u8_array(fwnode, "pd-revision", val, 4);
+ if (ret) {
+ tcpm_log(port, "Failed to parse pd-revision, ret:(%d)", ret);
+ return;
+ }
+
+ port->pd_rev.rev_major = val[0];
+ port->pd_rev.rev_minor = val[1];
+ port->pd_rev.ver_major = val[2];
+ port->pd_rev.ver_minor = val[3];
+}
+
/* Power Supply access to expose source power information */
enum tcpm_psy_online_states {
TCPM_PSY_OFFLINE = 0,
@@ -7635,7 +7711,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
mutex_init(&port->lock);
mutex_init(&port->swap_lock);
- port->wq = kthread_create_worker(0, dev_name(dev));
+ port->wq = kthread_run_worker(0, dev_name(dev));
if (IS_ERR(port->wq))
return ERR_CAST(port->wq);
sched_set_fifo(port->wq->task);
@@ -7645,14 +7721,14 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
kthread_init_work(&port->event_work, tcpm_pd_event_handler);
kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
- hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->state_machine_timer.function = state_machine_timer_handler;
- hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
- hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->enable_frs_timer.function = enable_frs_timer_handler;
- hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->send_discover_timer.function = send_discover_timer_handler;
+ hrtimer_setup(&port->state_machine_timer, state_machine_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&port->vdm_state_machine_timer, vdm_state_machine_timer_handler,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_setup(&port->enable_frs_timer, enable_frs_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&port->send_discover_timer, send_discover_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->pd_event_lock);
@@ -7669,11 +7745,18 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
goto out_destroy_wq;
tcpm_fw_get_timings(port, tcpc->fwnode);
+ tcpm_fw_get_pd_revision(port, tcpc->fwnode);
port->try_role = port->typec_caps.prefer_role;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
- port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
+
+ if (port->pd_rev.rev_major)
+ port->typec_caps.pd_revision = port->pd_rev.rev_major << 8 |
+ port->pd_rev.rev_minor;
+ else
+ port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
+
port->typec_caps.svdm_version = SVDM_VER_2_0;
port->typec_caps.driver_data = port;
port->typec_caps.ops = &tcpm_ops;
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index 680e1b87b152..75559601fe8f 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -69,6 +69,19 @@ config UCSI_PMIC_GLINK
To compile the driver as a module, choose M here: the module will be
called ucsi_glink.
+config CROS_EC_UCSI
+ tristate "UCSI Driver for ChromeOS EC"
+ depends on MFD_CROS_EC_DEV
+ depends on CROS_USBPD_NOTIFY
+ depends on !EXTCON_TCSS_CROS_EC
+ default MFD_CROS_EC_DEV
+ help
+ This driver enables UCSI support for a ChromeOS EC. The EC is
+ expected to implement a PPM.
+
+ To compile the driver as a module, choose M here: the module
+ will be called cros_ec_ucsi.
+
config UCSI_LENOVO_YOGA_C630
tristate "UCSI Interface Driver for Lenovo Yoga C630"
depends on EC_LENOVO_YOGA_C630
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index aed41d23887b..be98a879104d 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -21,4 +21,5 @@ obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
obj-$(CONFIG_UCSI_STM32G0) += ucsi_stm32g0.o
obj-$(CONFIG_UCSI_PMIC_GLINK) += ucsi_glink.o
+obj-$(CONFIG_CROS_EC_UCSI) += cros_ec_ucsi.o
obj-$(CONFIG_UCSI_LENOVO_YOGA_C630) += ucsi_yoga_c630.o
diff --git a/drivers/usb/typec/ucsi/cros_ec_ucsi.c b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
new file mode 100644
index 000000000000..c605c8616726
--- /dev/null
+++ b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI driver for ChromeOS EC
+ *
+ * Copyright 2024 Google LLC.
+ */
+
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "ucsi.h"
+
+/*
+ * Maximum size in bytes of a UCSI message between AP and EC
+ */
+#define MAX_EC_DATA_SIZE 256
+
+/*
+ * Maximum time in milliseconds the cros_ec_ucsi driver
+ * will wait for a response to a command or and ack.
+ */
+#define WRITE_TMO_MS 5000
+
+/* Number of times to attempt recovery from a write timeout before giving up. */
+#define WRITE_TMO_CTR_MAX 5
+
+struct cros_ucsi_data {
+ struct device *dev;
+ struct ucsi *ucsi;
+
+ struct cros_ec_device *ec;
+ struct notifier_block nb;
+ struct work_struct work;
+ struct delayed_work write_tmo;
+ int tmo_counter;
+
+ struct completion complete;
+ unsigned long flags;
+};
+
+static int cros_ucsi_read(struct ucsi *ucsi, unsigned int offset, void *val,
+ size_t val_len)
+{
+ struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
+ struct ec_params_ucsi_ppm_get req = {
+ .offset = offset,
+ .size = val_len,
+ };
+ int ret;
+
+ if (val_len > MAX_EC_DATA_SIZE) {
+ dev_err(udata->dev, "Can't read %zu bytes. Too big.\n", val_len);
+ return -EINVAL;
+ }
+
+ ret = cros_ec_cmd(udata->ec, 0, EC_CMD_UCSI_PPM_GET,
+ &req, sizeof(req), val, val_len);
+ if (ret < 0) {
+ dev_warn(udata->dev, "Failed to send EC message UCSI_PPM_GET: error=%d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int cros_ucsi_read_version(struct ucsi *ucsi, u16 *version)
+{
+ return cros_ucsi_read(ucsi, UCSI_VERSION, version, sizeof(*version));
+}
+
+static int cros_ucsi_read_cci(struct ucsi *ucsi, u32 *cci)
+{
+ return cros_ucsi_read(ucsi, UCSI_CCI, cci, sizeof(*cci));
+}
+
+static int cros_ucsi_read_message_in(struct ucsi *ucsi, void *val,
+ size_t val_len)
+{
+ return cros_ucsi_read(ucsi, UCSI_MESSAGE_IN, val, val_len);
+}
+
+static int cros_ucsi_async_control(struct ucsi *ucsi, u64 cmd)
+{
+ struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
+ u8 ec_buf[sizeof(struct ec_params_ucsi_ppm_set) + sizeof(cmd)];
+ struct ec_params_ucsi_ppm_set *req = (struct ec_params_ucsi_ppm_set *) ec_buf;
+ int ret;
+
+ req->offset = UCSI_CONTROL;
+ memcpy(req->data, &cmd, sizeof(cmd));
+ ret = cros_ec_cmd(udata->ec, 0, EC_CMD_UCSI_PPM_SET,
+ req, sizeof(ec_buf), NULL, 0);
+ if (ret < 0) {
+ dev_warn(udata->dev, "Failed to send EC message UCSI_PPM_SET: error=%d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd)
+{
+ struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_sync_control_common(ucsi, cmd);
+ switch (ret) {
+ case -EBUSY:
+ /* EC may return -EBUSY if CCI.busy is set.
+ * Convert this to a timeout.
+ */
+ case -ETIMEDOUT:
+ /* Schedule recovery attempt when we timeout
+ * or tried to send a command while still busy.
+ */
+ cancel_delayed_work_sync(&udata->write_tmo);
+ schedule_delayed_work(&udata->write_tmo,
+ msecs_to_jiffies(WRITE_TMO_MS));
+ break;
+ case 0:
+ /* Successful write. Cancel any pending recovery work. */
+ cancel_delayed_work_sync(&udata->write_tmo);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ucsi_operations cros_ucsi_ops = {
+ .read_version = cros_ucsi_read_version,
+ .read_cci = cros_ucsi_read_cci,
+ .read_message_in = cros_ucsi_read_message_in,
+ .async_control = cros_ucsi_async_control,
+ .sync_control = cros_ucsi_sync_control,
+};
+
+static void cros_ucsi_work(struct work_struct *work)
+{
+ struct cros_ucsi_data *udata = container_of(work, struct cros_ucsi_data, work);
+ u32 cci;
+
+ if (cros_ucsi_read_cci(udata->ucsi, &cci))
+ return;
+
+ ucsi_notify_common(udata->ucsi, cci);
+}
+
+static void cros_ucsi_write_timeout(struct work_struct *work)
+{
+ struct cros_ucsi_data *udata =
+ container_of(work, struct cros_ucsi_data, write_tmo.work);
+ u32 cci;
+ u64 cmd;
+
+ if (cros_ucsi_read(udata->ucsi, UCSI_CCI, &cci, sizeof(cci))) {
+ dev_err(udata->dev,
+ "Reading CCI failed; no write timeout recovery possible.\n");
+ return;
+ }
+
+ if (cci & UCSI_CCI_BUSY) {
+ udata->tmo_counter++;
+
+ if (udata->tmo_counter <= WRITE_TMO_CTR_MAX)
+ schedule_delayed_work(&udata->write_tmo,
+ msecs_to_jiffies(WRITE_TMO_MS));
+ else
+ dev_err(udata->dev,
+ "PPM unresponsive - too many write timeouts.\n");
+
+ return;
+ }
+
+ /* No longer busy means we can reset our timeout counter. */
+ udata->tmo_counter = 0;
+
+ /* Need to ack previous command which may have timed out. */
+ if (cci & UCSI_CCI_COMMAND_COMPLETE) {
+ cmd = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
+ cros_ucsi_async_control(udata->ucsi, cmd);
+
+ /* Check again after a few seconds that the system has
+ * recovered to make sure our async write above was successful.
+ */
+ schedule_delayed_work(&udata->write_tmo,
+ msecs_to_jiffies(WRITE_TMO_MS));
+ return;
+ }
+
+ /* We recovered from a previous timeout. Treat this as a recovery from
+ * suspend and call resume.
+ */
+ ucsi_resume(udata->ucsi);
+}
+
+static int cros_ucsi_event(struct notifier_block *nb,
+ unsigned long host_event, void *_notify)
+{
+ struct cros_ucsi_data *udata = container_of(nb, struct cros_ucsi_data, nb);
+
+ if (!(host_event & PD_EVENT_PPM))
+ return NOTIFY_OK;
+
+ dev_dbg(udata->dev, "UCSI notification received\n");
+ flush_work(&udata->work);
+ schedule_work(&udata->work);
+
+ return NOTIFY_OK;
+}
+
+static void cros_ucsi_destroy(struct cros_ucsi_data *udata)
+{
+ cros_usbpd_unregister_notify(&udata->nb);
+ cancel_delayed_work_sync(&udata->write_tmo);
+ cancel_work_sync(&udata->work);
+ ucsi_destroy(udata->ucsi);
+}
+
+static int cros_ucsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_data = dev_get_drvdata(dev->parent);
+ struct cros_ucsi_data *udata;
+ int ret;
+
+ udata = devm_kzalloc(dev, sizeof(*udata), GFP_KERNEL);
+ if (!udata)
+ return -ENOMEM;
+
+ udata->dev = dev;
+
+ udata->ec = ec_data->ec_dev;
+ if (!udata->ec)
+ return dev_err_probe(dev, -ENODEV, "couldn't find parent EC device\n");
+
+ platform_set_drvdata(pdev, udata);
+
+ INIT_WORK(&udata->work, cros_ucsi_work);
+ INIT_DELAYED_WORK(&udata->write_tmo, cros_ucsi_write_timeout);
+ init_completion(&udata->complete);
+
+ udata->ucsi = ucsi_create(dev, &cros_ucsi_ops);
+ if (IS_ERR(udata->ucsi))
+ return dev_err_probe(dev, PTR_ERR(udata->ucsi), "failed to allocate UCSI instance\n");
+
+ ucsi_set_drvdata(udata->ucsi, udata);
+
+ udata->nb.notifier_call = cros_ucsi_event;
+ ret = cros_usbpd_register_notify(&udata->nb);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register notifier\n");
+ ucsi_destroy(udata->ucsi);
+ return ret;
+ }
+
+ ret = ucsi_register(udata->ucsi);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register UCSI\n");
+ cros_ucsi_destroy(udata);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void cros_ucsi_remove(struct platform_device *dev)
+{
+ struct cros_ucsi_data *udata = platform_get_drvdata(dev);
+
+ ucsi_unregister(udata->ucsi);
+ cros_ucsi_destroy(udata);
+}
+
+static int __maybe_unused cros_ucsi_suspend(struct device *dev)
+{
+ struct cros_ucsi_data *udata = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&udata->write_tmo);
+ cancel_work_sync(&udata->work);
+
+ return 0;
+}
+
+static void __maybe_unused cros_ucsi_complete(struct device *dev)
+{
+ struct cros_ucsi_data *udata = dev_get_drvdata(dev);
+
+ ucsi_resume(udata->ucsi);
+}
+
+/*
+ * UCSI protocol is also used on ChromeOS platforms which reply on
+ * cros_ec_lpc.c driver for communication with embedded controller (EC).
+ * On such platforms communication with the EC is not available until
+ * the .complete() callback of the cros_ec_lpc driver is executed.
+ * For this reason we delay ucsi_resume() until the .complete() stage
+ * otherwise UCSI SET_NOTIFICATION_ENABLE command will fail and we won't
+ * receive any UCSI notifications from the EC where PPM is implemented.
+ */
+static const struct dev_pm_ops cros_ucsi_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = cros_ucsi_suspend,
+ .complete = cros_ucsi_complete,
+#endif
+};
+
+static const struct platform_device_id cros_ucsi_id[] = {
+ { KBUILD_MODNAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ucsi_id);
+
+static struct platform_driver cros_ucsi_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = &cros_ucsi_pm_ops,
+ },
+ .id_table = cros_ucsi_id,
+ .probe = cros_ucsi_probe,
+ .remove = cros_ucsi_remove,
+};
+
+module_platform_driver(cros_ucsi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("UCSI driver for ChromeOS EC");
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index fcf499cc9458..2a2915b0a645 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -25,7 +25,7 @@
* difficult to estimate the time it takes for the system to process the command
* before it is actually passed to the PPM.
*/
-#define UCSI_TIMEOUT_MS 5000
+#define UCSI_TIMEOUT_MS 10000
/*
* UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
@@ -1346,7 +1346,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
@@ -1364,7 +1364,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
if (cci & UCSI_CCI_COMMAND_COMPLETE)
@@ -1393,7 +1393,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
/* Give the PPM time to process a reset before reading CCI */
msleep(20);
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret)
goto out;
@@ -1825,11 +1825,11 @@ static int ucsi_init(struct ucsi *ucsi)
err_unregister:
for (con = connector; con->port; con++) {
+ if (con->wq)
+ destroy_workqueue(con->wq);
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(con);
- if (con->wq)
- destroy_workqueue(con->wq);
usb_power_delivery_unregister_capabilities(con->port_sink_caps);
con->port_sink_caps = NULL;
@@ -1929,8 +1929,8 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
struct ucsi *ucsi;
if (!ops ||
- !ops->read_version || !ops->read_cci || !ops->read_message_in ||
- !ops->sync_control || !ops->async_control)
+ !ops->read_version || !ops->read_cci || !ops->poll_cci ||
+ !ops->read_message_in || !ops->sync_control || !ops->async_control)
return ERR_PTR(-EINVAL);
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
@@ -2013,10 +2013,6 @@ void ucsi_unregister(struct ucsi *ucsi)
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
- ucsi_unregister_partner(&ucsi->connector[i]);
- ucsi_unregister_altmodes(&ucsi->connector[i],
- UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(&ucsi->connector[i]);
if (ucsi->connector[i].wq) {
struct ucsi_work *uwork;
@@ -2032,6 +2028,11 @@ void ucsi_unregister(struct ucsi *ucsi)
destroy_workqueue(ucsi->connector[i].wq);
}
+ ucsi_unregister_partner(&ucsi->connector[i]);
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
+
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps);
ucsi->connector[i].port_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 5ff369c24a2f..28780acc4af2 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -30,6 +30,7 @@ struct dentry;
#define UCSIv2_MESSAGE_OUT 272
/* UCSI versions */
+#define UCSI_VERSION_1_0 0x0100
#define UCSI_VERSION_1_1 0x0110
#define UCSI_VERSION_1_2 0x0120
#define UCSI_VERSION_2_0 0x0200
@@ -61,6 +62,7 @@ struct dentry;
* struct ucsi_operations - UCSI I/O operations
* @read_version: Read implemented UCSI version
* @read_cci: Read CCI register
+ * @poll_cci: Read CCI register while polling with notifications disabled
* @read_message_in: Read message data from UCSI
* @sync_control: Blocking control operation
* @async_control: Non-blocking control operation
@@ -75,6 +77,7 @@ struct dentry;
struct ucsi_operations {
int (*read_version)(struct ucsi *ucsi, u16 *version);
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
+ int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
int (*sync_control)(struct ucsi *ucsi, u64 command);
int (*async_control)(struct ucsi *ucsi, u64 command);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 5c5515551963..ac1ebb5d9527 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -59,19 +59,24 @@ static int ucsi_acpi_read_version(struct ucsi *ucsi, u16 *version)
static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- int ret;
-
- if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
- ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
- if (ret)
- return ret;
- }
memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
return 0;
}
+static int ucsi_acpi_poll_cci(struct ucsi *ucsi, u32 *cci)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (ret)
+ return ret;
+
+ return ucsi_acpi_read_cci(ucsi, cci);
+}
+
static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
@@ -94,6 +99,7 @@ static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_acpi_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
+ .poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_acpi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_acpi_async_control
@@ -142,6 +148,7 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_gram_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
+ .poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_gram_read_message_in,
.sync_control = ucsi_gram_sync_control,
.async_control = ucsi_acpi_async_control
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index fcb8e61136cf..4b1668733a4b 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -646,7 +646,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
UCSI_CMD_CONNECTOR_MASK;
if (con_index == 0) {
ret = -EINVAL;
- goto unlock;
+ goto err_put;
}
con = &uc->ucsi->connector[con_index - 1];
ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
@@ -654,8 +654,8 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
ret = ucsi_sync_control_common(ucsi, command);
+err_put:
pm_runtime_put_sync(uc->dev);
-unlock:
mutex_unlock(&uc->lock);
return ret;
@@ -664,6 +664,7 @@ unlock:
static const struct ucsi_operations ucsi_ccg_ops = {
.read_version = ucsi_ccg_read_version,
.read_cci = ucsi_ccg_read_cci,
+ .poll_cci = ucsi_ccg_read_cci,
.read_message_in = ucsi_ccg_read_message_in,
.sync_control = ucsi_ccg_sync_control,
.async_control = ucsi_ccg_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 90948cd6d297..8af79101a2fc 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -185,6 +185,11 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(con->ucsi);
int orientation;
+ if (!UCSI_CONSTAT(con, CONNECTED)) {
+ typec_set_orientation(con->port, TYPEC_ORIENTATION_NONE);
+ return;
+ }
+
if (con->num > PMIC_GLINK_MAX_PORTS ||
!ucsi->port_orientation[con->num - 1])
return;
@@ -201,6 +206,7 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
static const struct ucsi_operations pmic_glink_ucsi_ops = {
.read_version = pmic_glink_ucsi_read_version,
.read_cci = pmic_glink_ucsi_read_cci,
+ .poll_cci = pmic_glink_ucsi_read_cci,
.read_message_in = pmic_glink_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = pmic_glink_ucsi_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index 6923fad31d79..57ef7d83a412 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -424,6 +424,7 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
static const struct ucsi_operations ucsi_stm32g0_ops = {
.read_version = ucsi_stm32g0_read_version,
.read_cci = ucsi_stm32g0_read_cci,
+ .poll_cci = ucsi_stm32g0_read_cci,
.read_message_in = ucsi_stm32g0_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_stm32g0_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
index f3a5e24ea84d..d33e3f2dd1d8 100644
--- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
@@ -71,9 +71,10 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command)
return yoga_c630_ec_ucsi_write(uec->ec, (u8*)&command);
}
-const struct ucsi_operations yoga_c630_ucsi_ops = {
+static const struct ucsi_operations yoga_c630_ucsi_ops = {
.read_version = yoga_c630_ucsi_read_version,
.read_cci = yoga_c630_ucsi_read_cci,
+ .poll_cci = yoga_c630_ucsi_read_cci,
.read_message_in = yoga_c630_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = yoga_c630_ucsi_async_control,
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 6338d818bc8b..9aa30ef76f3b 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -269,7 +269,7 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
return 0;
}
- usbip_dbg_stub_rx("seqnum %d is not pending\n",
+ usbip_dbg_stub_rx("seqnum %u is not pending\n",
pdu->u.cmd_unlink.seqnum);
/*
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index b1c2f6781cb3..7eb2e074012a 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -201,7 +201,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb);
- usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ usbip_dbg_stub_tx("setup txdata seqnum: %u\n",
pdu_header.base.seqnum);
if (priv->sgl) {
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index b03e5021c25b..e70fba9f55d6 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "usbip_common.h"
#include "vhci.h"
@@ -675,7 +676,7 @@ static void vhci_tx_urb(struct urb *urb, struct vhci_device *vdev)
spin_lock_irqsave(&vdev->priv_lock, flags);
- priv->seqnum = atomic_inc_return(&vhci_hcd->seqnum);
+ priv->seqnum = (u32)atomic_inc_return(&vhci_hcd->seqnum);
if (priv->seqnum == 0xffff)
dev_info(&urb->dev->dev, "seqnum max\n");
@@ -1161,12 +1162,8 @@ static int vhci_setup(struct usb_hcd *hcd)
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
}
- /*
- * Support SG.
- * sg_tablesize is an arbitrary value to alleviate memory pressure
- * on the host.
- */
- hcd->self.sg_tablesize = 32;
+ /* accept arbitrarily long scatter-gather lists */
+ hcd->self.sg_tablesize = ~0;
hcd->self.no_sg_constraint = 1;
return 0;
@@ -1453,7 +1450,7 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
if (connected > 0) {
dev_info(&pdev->dev,
"We have %d active connection%s. Do not suspend.\n",
- connected, (connected == 1 ? "" : "s"));
+ connected, str_plural(connected));
ret = -EBUSY;
} else {
dev_info(&pdev->dev, "suspend vhci_hcd");
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 7f2d1c241559..a75f4a898a41 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -66,7 +66,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
spin_unlock_irqrestore(&vdev->priv_lock, flags);
if (!urb) {
- pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
+ pr_err("cannot find a urb of seqnum %u max seqnum %u\n",
pdu->base.seqnum,
atomic_read(&vhci_hcd->seqnum));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
@@ -162,10 +162,10 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
* already received the result of its submit result and gave
* back the URB.
*/
- pr_info("the urb (seqnum %d) was already given back\n",
+ pr_info("the urb (seqnum %u) was already given back\n",
pdu->base.seqnum);
} else {
- usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
+ usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
/* If unlink is successful, status is -ECONNRESET */
urb->status = pdu->u.ret_unlink.status;
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 907a43a00896..2aae3edfc813 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -67,7 +67,7 @@ out:
* Exposes device descriptor from the gadget driver.
*/
static ssize_t dev_desc_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *out,
+ const struct bin_attribute *attr, char *out,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -88,7 +88,7 @@ unlock:
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
-static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
+static const BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
static ssize_t usbip_sockfd_store(struct device *dev,
struct device_attribute *attr,
@@ -252,14 +252,14 @@ static struct attribute *dev_attrs[] = {
NULL,
};
-static struct bin_attribute *dev_bin_attrs[] = {
+static const struct bin_attribute *const dev_bin_attrs[] = {
&bin_attr_dev_desc,
NULL,
};
static const struct attribute_group vudc_attr_group = {
.attrs = dev_attrs,
- .bin_attrs = dev_bin_attrs,
+ .bin_attrs_new = dev_bin_attrs,
};
const struct attribute_group *vudc_groups[] = {
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 3ccb17c3e840..30c11bf9f4e7 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -107,7 +107,7 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb_p);
- usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ usbip_dbg_stub_tx("setup txdata seqnum: %u\n",
pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 5f581e71e201..36099047560d 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1952,7 +1952,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
goto out_free;
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
- dests[1].counter_id = mlx5_fc_id(node->ucast_counter.counter);
+ dests[1].counter = node->ucast_counter.counter;
#endif
node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS);
if (IS_ERR(node->ucast_rule)) {
@@ -1961,7 +1961,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
}
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
- dests[1].counter_id = mlx5_fc_id(node->mcast_counter.counter);
+ dests[1].counter = node->mcast_counter.counter;
#endif
memset(dmac_c, 0, ETH_ALEN);
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa.h b/drivers/vdpa/octeon_ep/octep_vdpa.h
index 046710ec4d42..53b020b019f7 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa.h
+++ b/drivers/vdpa/octeon_ep/octep_vdpa.h
@@ -8,6 +8,7 @@
#include <linux/pci_regs.h>
#include <linux/vdpa.h>
#include <linux/virtio_pci_modern.h>
+#include <uapi/linux/virtio_crypto.h>
#include <uapi/linux/virtio_net.h>
#include <uapi/linux/virtio_blk.h>
#include <uapi/linux/virtio_config.h>
@@ -29,12 +30,12 @@
#define OCTEP_EPF_RINFO(x) (0x000209f0 | ((x) << 25))
#define OCTEP_VF_MBOX_DATA(x) (0x00010210 | ((x) << 17))
#define OCTEP_PF_MBOX_DATA(x) (0x00022000 | ((x) << 4))
-
-#define OCTEP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
-#define OCTEP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0x7F)
+#define OCTEP_VF_IN_CTRL(x) (0x00010000 | ((x) << 17))
+#define OCTEP_VF_IN_CTRL_RPVF(val) (((val) >> 48) & 0xF)
#define OCTEP_FW_READY_SIGNATURE0 0xFEEDFEED
#define OCTEP_FW_READY_SIGNATURE1 0x3355ffaa
+#define OCTEP_MAX_CB_INTR 8
enum octep_vdpa_dev_status {
OCTEP_VDPA_DEV_STATUS_INVALID,
@@ -48,9 +49,26 @@ enum octep_vdpa_dev_status {
struct octep_vring_info {
struct vdpa_callback cb;
void __iomem *notify_addr;
- u32 __iomem *cb_notify_addr;
+ void __iomem *cb_notify_addr;
phys_addr_t notify_pa;
- char msix_name[256];
+};
+
+enum octep_pci_vndr_cfg_type {
+ OCTEP_PCI_VNDR_CFG_TYPE_VIRTIO_ID,
+ OCTEP_PCI_VNDR_CFG_TYPE_MAX,
+};
+
+struct octep_pci_vndr_data {
+ struct virtio_pci_vndr_data hdr;
+ u8 id;
+ u8 bar;
+ union {
+ u64 data;
+ struct {
+ u32 offset;
+ u32 length;
+ };
+ };
};
struct octep_hw {
@@ -68,7 +86,9 @@ struct octep_hw {
u64 features;
u16 nr_vring;
u32 config_size;
- int irq;
+ int nb_irqs;
+ int *irqs;
+ u8 dev_id;
};
u8 octep_hw_get_status(struct octep_hw *oct_hw);
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_hw.c b/drivers/vdpa/octeon_ep/octep_vdpa_hw.c
index 1d4767b33315..74240101c505 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa_hw.c
+++ b/drivers/vdpa/octeon_ep/octep_vdpa_hw.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2024 Marvell. */
#include <linux/iopoll.h>
+#include <linux/build_bug.h>
#include "octep_vdpa.h"
@@ -358,7 +359,14 @@ u16 octep_get_vq_size(struct octep_hw *oct_hw)
static u32 octep_get_config_size(struct octep_hw *oct_hw)
{
- return sizeof(struct virtio_net_config);
+ switch (oct_hw->dev_id) {
+ case VIRTIO_ID_NET:
+ return sizeof(struct virtio_net_config);
+ case VIRTIO_ID_CRYPTO:
+ return sizeof(struct virtio_crypto_config);
+ default:
+ return 0;
+ }
}
static void __iomem *octep_get_cap_addr(struct octep_hw *oct_hw, struct virtio_pci_cap *cap)
@@ -416,8 +424,25 @@ static int octep_pci_signature_verify(struct octep_hw *oct_hw)
return 0;
}
+static void octep_vndr_data_process(struct octep_hw *oct_hw,
+ struct octep_pci_vndr_data *vndr_data)
+{
+ BUILD_BUG_ON(sizeof(struct octep_pci_vndr_data) % 4 != 0);
+
+ switch (vndr_data->id) {
+ case OCTEP_PCI_VNDR_CFG_TYPE_VIRTIO_ID:
+ oct_hw->dev_id = (u8)vndr_data->data;
+ break;
+ default:
+ dev_err(&oct_hw->pdev->dev, "Invalid vendor data id %u\n",
+ vndr_data->id);
+ break;
+ }
+}
+
int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev)
{
+ struct octep_pci_vndr_data vndr_data;
struct octep_mbox __iomem *mbox;
struct device *dev = &pdev->dev;
struct virtio_pci_cap cap;
@@ -466,6 +491,15 @@ int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev)
case VIRTIO_PCI_CAP_ISR_CFG:
oct_hw->isr = octep_get_cap_addr(oct_hw, &cap);
break;
+ case VIRTIO_PCI_CAP_VENDOR_CFG:
+ octep_pci_caps_read(oct_hw, &vndr_data, sizeof(vndr_data), pos);
+ if (vndr_data.hdr.vendor_id != PCI_VENDOR_ID_CAVIUM) {
+ dev_err(dev, "Invalid vendor data\n");
+ return -EINVAL;
+ }
+
+ octep_vndr_data_process(oct_hw, &vndr_data);
+ break;
}
pos = cap.cap_next;
@@ -495,8 +529,6 @@ int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev)
if (!oct_hw->vqs)
return -ENOMEM;
- oct_hw->irq = -1;
-
dev_info(&pdev->dev, "Device features : %llx\n", oct_hw->features);
dev_info(&pdev->dev, "Maximum queues : %u\n", oct_hw->nr_vring);
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_main.c b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
index cd55b1aac151..f3d4dda4e04c 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa_main.c
+++ b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
@@ -49,58 +49,89 @@ static irqreturn_t octep_vdpa_intr_handler(int irq, void *data)
struct octep_hw *oct_hw = data;
int i;
- for (i = 0; i < oct_hw->nr_vring; i++) {
- if (oct_hw->vqs[i].cb.callback && ioread32(oct_hw->vqs[i].cb_notify_addr)) {
- /* Acknowledge the per queue notification to the device */
- iowrite32(0, oct_hw->vqs[i].cb_notify_addr);
- oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private);
+ /* Each device has multiple interrupts (nb_irqs) shared among rings
+ * (nr_vring). Device interrupts are mapped to the rings in a
+ * round-robin fashion.
+ *
+ * For example, if nb_irqs = 8 and nr_vring = 64:
+ * 0 -> 0, 8, 16, 24, 32, 40, 48, 56;
+ * 1 -> 1, 9, 17, 25, 33, 41, 49, 57;
+ * ...
+ * 7 -> 7, 15, 23, 31, 39, 47, 55, 63;
+ */
+
+ for (i = irq - oct_hw->irqs[0]; i < oct_hw->nr_vring; i += oct_hw->nb_irqs) {
+ if (ioread8(oct_hw->vqs[i].cb_notify_addr)) {
+ /* Acknowledge the per ring notification to the device */
+ iowrite8(0, oct_hw->vqs[i].cb_notify_addr);
+
+ if (likely(oct_hw->vqs[i].cb.callback))
+ oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private);
+ break;
}
}
+ /* Check for config interrupt. Config uses the first interrupt */
+ if (unlikely(irq == oct_hw->irqs[0] && ioread8(oct_hw->isr))) {
+ iowrite8(0, oct_hw->isr);
+
+ if (oct_hw->config_cb.callback)
+ oct_hw->config_cb.callback(oct_hw->config_cb.private);
+ }
+
return IRQ_HANDLED;
}
static void octep_free_irqs(struct octep_hw *oct_hw)
{
struct pci_dev *pdev = oct_hw->pdev;
+ int irq;
- if (oct_hw->irq != -1) {
- devm_free_irq(&pdev->dev, oct_hw->irq, oct_hw);
- oct_hw->irq = -1;
+ if (!oct_hw->irqs)
+ return;
+
+ for (irq = 0; irq < oct_hw->nb_irqs; irq++) {
+ if (!oct_hw->irqs[irq])
+ break;
+
+ devm_free_irq(&pdev->dev, oct_hw->irqs[irq], oct_hw);
}
+
pci_free_irq_vectors(pdev);
+ devm_kfree(&pdev->dev, oct_hw->irqs);
+ oct_hw->irqs = NULL;
}
static int octep_request_irqs(struct octep_hw *oct_hw)
{
struct pci_dev *pdev = oct_hw->pdev;
- int ret, irq;
+ int ret, irq, idx;
- /* Currently HW device provisions one IRQ per VF, hence
- * allocate one IRQ for all virtqueues call interface.
- */
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ oct_hw->irqs = devm_kcalloc(&pdev->dev, oct_hw->nb_irqs, sizeof(int), GFP_KERNEL);
+ if (!oct_hw->irqs)
+ return -ENOMEM;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, oct_hw->nb_irqs, PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to alloc msix vector");
return ret;
}
- snprintf(oct_hw->vqs->msix_name, sizeof(oct_hw->vqs->msix_name),
- OCTEP_VDPA_DRIVER_NAME "-vf-%d", pci_iov_vf_id(pdev));
-
- irq = pci_irq_vector(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, octep_vdpa_intr_handler, 0,
- oct_hw->vqs->msix_name, oct_hw);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register interrupt handler\n");
- goto free_irq_vec;
+ for (idx = 0; idx < oct_hw->nb_irqs; idx++) {
+ irq = pci_irq_vector(pdev, idx);
+ ret = devm_request_irq(&pdev->dev, irq, octep_vdpa_intr_handler, 0,
+ dev_name(&pdev->dev), oct_hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register interrupt handler\n");
+ goto free_irqs;
+ }
+ oct_hw->irqs[idx] = irq;
}
- oct_hw->irq = irq;
return 0;
-free_irq_vec:
- pci_free_irq_vectors(pdev);
+free_irqs:
+ octep_free_irqs(oct_hw);
return ret;
}
@@ -271,7 +302,9 @@ static u32 octep_vdpa_get_generation(struct vdpa_device *vdpa_dev)
static u32 octep_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
{
- return VIRTIO_ID_NET;
+ struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
+
+ return oct_hw->dev_id;
}
static u32 octep_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
@@ -559,6 +592,7 @@ static void octep_vdpa_setup_task(struct work_struct *work)
struct device *dev = &pdev->dev;
struct octep_hw *oct_hw;
unsigned long timeout;
+ u64 val;
int ret;
oct_hw = &mgmt_dev->oct_hw;
@@ -590,6 +624,13 @@ static void octep_vdpa_setup_task(struct work_struct *work)
if (ret)
return;
+ val = readq(oct_hw->base[OCTEP_HW_MBOX_BAR] + OCTEP_VF_IN_CTRL(0));
+ oct_hw->nb_irqs = OCTEP_VF_IN_CTRL_RPVF(val);
+ if (!oct_hw->nb_irqs || oct_hw->nb_irqs > OCTEP_MAX_CB_INTR) {
+ dev_err(dev, "Invalid number of interrupts %d\n", oct_hw->nb_irqs);
+ goto unmap_region;
+ }
+
ret = octep_hw_caps_read(oct_hw, pdev);
if (ret < 0)
goto unmap_region;
@@ -768,12 +809,6 @@ static int octep_vdpa_pf_setup(struct octep_pf *octpf)
return -EINVAL;
}
- if (OCTEP_EPF_RINFO_RPVF(val) != BIT_ULL(0)) {
- val &= ~GENMASK_ULL(35, 32);
- val |= BIT_ULL(32);
- writeq(val, addr + OCTEP_EPF_RINFO(0));
- }
-
len = pci_resource_len(pdev, OCTEP_HW_CAPS_BAR);
octpf->vf_stride = len / totalvfs;
diff --git a/drivers/vdpa/solidrun/snet_main.c b/drivers/vdpa/solidrun/snet_main.c
index c8b74980dbd1..55ec51c17ab3 100644
--- a/drivers/vdpa/solidrun/snet_main.c
+++ b/drivers/vdpa/solidrun/snet_main.c
@@ -556,36 +556,38 @@ static const struct vdpa_config_ops snet_config_ops = {
static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
{
char *name;
- int ret, i, mask = 0;
+ unsigned short i;
+ bool bars_found = false;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "psnet[%s]-bars", pci_name(pdev));
+ if (!name)
+ return -ENOMEM;
+
/* We don't know which BAR will be used to communicate..
* We will map every bar with len > 0.
*
* Later, we will discover the BAR and unmap all other BARs.
*/
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i))
- mask |= (1 << i);
- }
+ void __iomem *io;
- /* No BAR can be used.. */
- if (!mask) {
- SNET_ERR(pdev, "Failed to find a PCI BAR\n");
- return -ENODEV;
- }
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "psnet[%s]-bars", pci_name(pdev));
- if (!name)
- return -ENOMEM;
+ io = pcim_iomap_region(pdev, i, name);
+ if (IS_ERR(io)) {
+ SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
+ return PTR_ERR(io);
+ }
- ret = pcim_iomap_regions(pdev, mask, name);
- if (ret) {
- SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
- return ret;
+ psnet->bars[i] = io;
+ bars_found = true;
}
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (mask & (1 << i))
- psnet->bars[i] = pcim_iomap_table(pdev)[i];
+ /* No BAR can be used.. */
+ if (!bars_found) {
+ SNET_ERR(pdev, "Failed to find a PCI BAR\n");
+ return -ENODEV;
}
return 0;
@@ -594,20 +596,20 @@ static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet)
{
char *name;
- int ret;
+ void __iomem *io;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "snet[%s]-bars", pci_name(pdev));
if (!name)
return -ENOMEM;
/* Request and map BAR */
- ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name);
- if (ret) {
+ io = pcim_iomap_region(pdev, snet->psnet->cfg.vf_bar, name);
+ if (IS_ERR(io)) {
SNET_ERR(pdev, "Failed to request and map PCI BAR for a VF\n");
- return ret;
+ return PTR_ERR(io);
}
- snet->bar = pcim_iomap_table(pdev)[snet->psnet->cfg.vf_bar];
+ snet->bar = io;
return 0;
}
@@ -656,15 +658,12 @@ static int psnet_detect_bar(struct psnet *psnet, u32 off)
static void psnet_unmap_unused_bars(struct pci_dev *pdev, struct psnet *psnet)
{
- int i, mask = 0;
+ unsigned short i;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (psnet->bars[i] && i != psnet->barno)
- mask |= (1 << i);
+ pcim_iounmap_region(pdev, i);
}
-
- if (mask)
- pcim_iounmap_regions(pdev, mask);
}
/* Read SNET config from PCI BAR */
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 8ffea8430f95..c204fc8e471a 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -229,7 +229,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
dev = &vdpasim->vdpa.dev;
kthread_init_work(&vdpasim->work, vdpasim_work_fn);
- vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s",
+ vdpasim->worker = kthread_run_worker(0, "vDPA sim worker: %s",
dev_attr->name);
if (IS_ERR(vdpasim->worker))
goto err_iommu;
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 16380764275e..8787407f75b0 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -367,6 +367,14 @@ static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
}
+static void vp_vdpa_kick_vq_with_data(struct vdpa_device *vdpa, u32 data)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ u16 qid = data & 0xFFFF;
+
+ vp_iowrite32(data, vp_vdpa->vring[qid].notify);
+}
+
static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
@@ -472,6 +480,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = {
.get_vq_size = vp_vdpa_get_vq_size,
.set_vq_address = vp_vdpa_set_vq_address,
.kick_vq = vp_vdpa_kick_vq,
+ .kick_vq_with_data = vp_vdpa_kick_vq_with_data,
.get_generation = vp_vdpa_get_generation,
.get_device_id = vp_vdpa_get_device_id,
.get_vendor_id = vp_vdpa_get_vendor_id,
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 49559605177e..c321d442f0da 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -266,24 +266,12 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
if (ret)
goto err_free;
- /*
- * We can't use anon_inode_getfd() because we need to modify
- * the f_mode flags directly to allow more than just ioctls
- */
- filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
- df, O_RDWR);
+ filep = anon_inode_getfile_fmode("[vfio-device]", &vfio_device_fops,
+ df, O_RDWR, FMODE_PREAD | FMODE_PWRITE);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
goto err_close_device;
}
-
- /*
- * TODO: add an anon_inode interface to do this.
- * Appears to be missing by lack of need rather than
- * explicitly prevented. Now there's need.
- */
- filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
-
/*
* Use the pseudo fs inode on the device to link all mmaps
* to the same address space, allowing us to unmap all vmas
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index ed4737de4528..f2e686f8f1ef 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -76,7 +76,7 @@ int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
if (ret)
return ret;
- ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL);
+ ret = class_compat_create_link(mdev_bus_compat_class, dev);
if (ret)
dev_warn(dev, "Failed to create compatibility class link\n");
@@ -98,7 +98,7 @@ void mdev_unregister_parent(struct mdev_parent *parent)
dev_info(parent->dev, "MDEV: Unregistering\n");
down_write(&parent->unreg_sem);
- class_compat_remove_link(mdev_bus_compat_class, parent->dev, NULL);
+ class_compat_remove_link(mdev_bus_compat_class, parent->dev);
device_for_each_child(parent->dev, NULL, mdev_device_remove_cb);
parent_remove_sysfs_files(parent);
up_write(&parent->unreg_sem);
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index eb7387ee6ebd..11eda6b207f1 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -408,7 +408,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
buf->dma_dir, 0);
}
- /* Undo alloc_pages_bulk_array() */
+ /* Undo alloc_pages_bulk() */
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
__free_page(sg_page_iter_page(&sg_iter));
sg_free_append_table(&buf->table);
@@ -431,8 +431,8 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
return -ENOMEM;
do {
- filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
+ filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
+ page_list);
if (!filled) {
ret = -ENOMEM;
goto err;
@@ -1342,7 +1342,7 @@ static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
{
int i;
- /* Undo alloc_pages_bulk_array() */
+ /* Undo alloc_pages_bulk() */
for (i = 0; i < recv_buf->npages; i++)
__free_page(recv_buf->page_list[i]);
@@ -1361,9 +1361,9 @@ static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
return -ENOMEM;
for (;;) {
- filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT,
- npages - done,
- recv_buf->page_list + done);
+ filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT,
+ npages - done,
+ recv_buf->page_list + done);
if (!filled)
goto err;
diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
index a467085038f0..e5ac39c4cc6b 100644
--- a/drivers/vfio/pci/nvgrace-gpu/main.c
+++ b/drivers/vfio/pci/nvgrace-gpu/main.c
@@ -5,6 +5,8 @@
#include <linux/sizes.h>
#include <linux/vfio_pci_core.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
/*
* The device memory usable to the workloads running in the VM is cached
@@ -17,12 +19,21 @@
#define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX
#define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX
-/* Memory size expected as non cached and reserved by the VM driver */
-#define RESMEM_SIZE SZ_1G
-
/* A hardwired and constant ABI value between the GPU FW and VFIO driver. */
#define MEMBLK_SIZE SZ_512M
+#define DVSEC_BITMAP_OFFSET 0xA
+#define MIG_SUPPORTED_WITH_CACHED_RESMEM BIT(0)
+
+#define GPU_CAP_DVSEC_REGISTER 3
+
+#define C2C_LINK_BAR0_OFFSET 0x1498
+#define HBM_TRAINING_BAR0_OFFSET 0x200BC
+#define STATUS_READY 0xFF
+
+#define POLL_QUANTUM_MS 1000
+#define POLL_TIMEOUT_MS (30 * 1000)
+
/*
* The state of the two device memory region - resmem and usemem - is
* saved as struct mem_region.
@@ -46,6 +57,7 @@ struct nvgrace_gpu_pci_core_device {
struct mem_region resmem;
/* Lock to control device memory kernel mapping */
struct mutex remap_lock;
+ bool has_mig_hw_bug;
};
static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev)
@@ -66,7 +78,7 @@ nvgrace_gpu_memregion(int index,
if (index == USEMEM_REGION_INDEX)
return &nvdev->usemem;
- if (index == RESMEM_REGION_INDEX)
+ if (nvdev->resmem.memlength && index == RESMEM_REGION_INDEX)
return &nvdev->resmem;
return NULL;
@@ -751,40 +763,67 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
u64 memphys, u64 memlength)
{
int ret = 0;
+ u64 resmem_size = 0;
/*
- * The VM GPU device driver needs a non-cacheable region to support
- * the MIG feature. Since the device memory is mapped as NORMAL cached,
- * carve out a region from the end with a different NORMAL_NC
- * property (called as reserved memory and represented as resmem). This
- * region then is exposed as a 64b BAR (region 2 and 3) to the VM, while
- * exposing the rest (termed as usable memory and represented using usemem)
- * as cacheable 64b BAR (region 4 and 5).
+ * On Grace Hopper systems, the VM GPU device driver needs a non-cacheable
+ * region to support the MIG feature owing to a hardware bug. Since the
+ * device memory is mapped as NORMAL cached, carve out a region from the end
+ * with a different NORMAL_NC property (called as reserved memory and
+ * represented as resmem). This region then is exposed as a 64b BAR
+ * (region 2 and 3) to the VM, while exposing the rest (termed as usable
+ * memory and represented using usemem) as cacheable 64b BAR (region 4 and 5).
*
* devmem (memlength)
* |-------------------------------------------------|
* | |
* usemem.memphys resmem.memphys
+ *
+ * This hardware bug is fixed on the Grace Blackwell platforms and the
+ * presence of the bug can be determined through nvdev->has_mig_hw_bug.
+ * Thus on systems with the hardware fix, there is no need to partition
+ * the GPU device memory and the entire memory is usable and mapped as
+ * NORMAL cached (i.e. resmem size is 0).
*/
+ if (nvdev->has_mig_hw_bug)
+ resmem_size = SZ_1G;
+
nvdev->usemem.memphys = memphys;
/*
* The device memory exposed to the VM is added to the kernel by the
- * VM driver module in chunks of memory block size. Only the usable
- * memory (usemem) is added to the kernel for usage by the VM
- * workloads. Make the usable memory size memblock aligned.
+ * VM driver module in chunks of memory block size. Note that only the
+ * usable memory (usemem) is added to the kernel for usage by the VM
+ * workloads.
*/
- if (check_sub_overflow(memlength, RESMEM_SIZE,
+ if (check_sub_overflow(memlength, resmem_size,
&nvdev->usemem.memlength)) {
ret = -EOVERFLOW;
goto done;
}
/*
- * The USEMEM part of the device memory has to be MEMBLK_SIZE
- * aligned. This is a hardwired ABI value between the GPU FW and
- * VFIO driver. The VM device driver is also aware of it and make
- * use of the value for its calculation to determine USEMEM size.
+ * The usemem region is exposed as a 64B Bar composed of region 4 and 5.
+ * Calculate and save the BAR size for the region.
+ */
+ nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
+
+ /*
+ * If the hardware has the fix for MIG, there is no requirement
+ * for splitting the device memory to create RESMEM. The entire
+ * device memory is usable and will be USEMEM. Return here for
+ * such case.
+ */
+ if (!nvdev->has_mig_hw_bug)
+ goto done;
+
+ /*
+ * When the device memory is split to workaround the MIG bug on
+ * Grace Hopper, the USEMEM part of the device memory has to be
+ * MEMBLK_SIZE aligned. This is a hardwired ABI value between the
+ * GPU FW and VFIO driver. The VM device driver is also aware of it
+ * and make use of the value for its calculation to determine USEMEM
+ * size. Note that the device memory may not be 512M aligned.
*/
nvdev->usemem.memlength = round_down(nvdev->usemem.memlength,
MEMBLK_SIZE);
@@ -803,15 +842,93 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
}
/*
- * The memory regions are exposed as BARs. Calculate and save
- * the BAR size for them.
+ * The resmem region is exposed as a 64b BAR composed of region 2 and 3
+ * for Grace Hopper. Calculate and save the BAR size for the region.
*/
- nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength);
done:
return ret;
}
+static bool nvgrace_gpu_has_mig_hw_bug(struct pci_dev *pdev)
+{
+ int pcie_dvsec;
+ u16 dvsec_ctrl16;
+
+ pcie_dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_NVIDIA,
+ GPU_CAP_DVSEC_REGISTER);
+
+ if (pcie_dvsec) {
+ pci_read_config_word(pdev,
+ pcie_dvsec + DVSEC_BITMAP_OFFSET,
+ &dvsec_ctrl16);
+
+ if (dvsec_ctrl16 & MIG_SUPPORTED_WITH_CACHED_RESMEM)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * To reduce the system bootup time, the HBM training has
+ * been moved out of the UEFI on the Grace-Blackwell systems.
+ *
+ * The onus of checking whether the HBM training has completed
+ * thus falls on the module. The HBM training status can be
+ * determined from a BAR0 register.
+ *
+ * Similarly, another BAR0 register exposes the status of the
+ * CPU-GPU chip-to-chip (C2C) cache coherent interconnect.
+ *
+ * Poll these register and check for 30s. If the HBM training is
+ * not complete or if the C2C link is not ready, fail the probe.
+ *
+ * While the wait is not required on Grace Hopper systems, it
+ * is beneficial to make the check to ensure the device is in an
+ * expected state.
+ *
+ * Ensure that the BAR0 region is enabled before accessing the
+ * registers.
+ */
+static int nvgrace_gpu_wait_device_ready(struct pci_dev *pdev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(POLL_TIMEOUT_MS);
+ void __iomem *io;
+ int ret = -ETIME;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_selected_regions(pdev, 1 << 0, KBUILD_MODNAME);
+ if (ret)
+ goto request_region_exit;
+
+ io = pci_iomap(pdev, 0, 0);
+ if (!io) {
+ ret = -ENOMEM;
+ goto iomap_exit;
+ }
+
+ do {
+ if ((ioread32(io + C2C_LINK_BAR0_OFFSET) == STATUS_READY) &&
+ (ioread32(io + HBM_TRAINING_BAR0_OFFSET) == STATUS_READY)) {
+ ret = 0;
+ goto reg_check_exit;
+ }
+ msleep(POLL_QUANTUM_MS);
+ } while (!time_after(jiffies, timeout));
+
+reg_check_exit:
+ pci_iounmap(pdev, io);
+iomap_exit:
+ pci_release_selected_regions(pdev, 1 << 0);
+request_region_exit:
+ pci_disable_device(pdev);
+ return ret;
+}
+
static int nvgrace_gpu_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -820,6 +937,10 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev,
u64 memphys, memlength;
int ret;
+ ret = nvgrace_gpu_wait_device_ready(pdev);
+ if (ret)
+ return ret;
+
ret = nvgrace_gpu_fetch_memory_property(pdev, &memphys, &memlength);
if (!ret)
ops = &nvgrace_gpu_pci_ops;
@@ -832,6 +953,8 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev,
dev_set_drvdata(&pdev->dev, &nvdev->core_device);
if (ops == &nvgrace_gpu_pci_ops) {
+ nvdev->has_mig_hw_bug = nvgrace_gpu_has_mig_hw_bug(pdev);
+
/*
* Device memory properties are identified in the host ACPI
* table. Set the nvgrace_gpu_pci_core_device structure.
@@ -868,6 +991,8 @@ static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = {
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) },
/* GH200 SKU */
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2348) },
+ /* GB200 SKU */
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2941) },
{}
};
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index ea2745c1ac5e..94142581c98c 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -511,13 +511,13 @@ static void vfio_bar_fixup(struct vfio_pci_core_device *vdev)
mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
*vbar &= cpu_to_le32((u32)mask);
- } else if (pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW) {
- mask = ~(0x20000 - 1);
+ } else if (pdev->rom && pdev->romlen) {
+ mask = ~(roundup_pow_of_two(pdev->romlen) - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
*vbar &= cpu_to_le32((u32)mask);
- } else
+ } else {
*vbar = 0;
+ }
vdev->bardirty = false;
}
@@ -1389,11 +1389,12 @@ static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epo
switch (ecap) {
case PCI_EXT_CAP_ID_VNDR:
- ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword);
+ ret = pci_read_config_dword(pdev, epos + PCI_VNDR_HEADER,
+ &dword);
if (ret)
return pcibios_err_to_errno(ret);
- return dword >> PCI_VSEC_HDR_LEN_SHIFT;
+ return PCI_VNDR_HEADER_LEN(dword);
case PCI_EXT_CAP_ID_VC:
case PCI_EXT_CAP_ID_VC9:
case PCI_EXT_CAP_ID_MFVC:
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 1a4ed5a357d3..586e49efb81b 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1054,31 +1054,27 @@ static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
+ info.size = 0;
- /* Report the BAR size, not the ROM size */
- info.size = pci_resource_len(pdev, info.index);
- if (!info.size) {
- /* Shadow ROMs appear as PCI option ROMs */
- if (pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW)
- info.size = 0x20000;
- else
- break;
- }
-
- /*
- * Is it really there? Enable memory decode for implicit access
- * in pci_map_rom().
- */
- cmd = vfio_pci_memory_lock_and_enable(vdev);
- io = pci_map_rom(pdev, &size);
- if (io) {
+ if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
+ /*
+ * Check ROM content is valid. Need to enable memory
+ * decode for ROM access in pci_map_rom().
+ */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ io = pci_map_rom(pdev, &size);
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ /* Report the BAR size, not the ROM size. */
+ info.size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+ pci_unmap_rom(pdev, io);
+ }
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ } else if (pdev->rom && pdev->romlen) {
info.flags = VFIO_REGION_INFO_FLAG_READ;
- pci_unmap_rom(pdev, io);
- } else {
- info.size = 0;
+ /* Report BAR size as power of two. */
+ info.size = roundup_pow_of_two(pdev->romlen);
}
- vfio_pci_memory_unlock_and_restore(vdev, cmd);
break;
}
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 66b72c289284..6192788c8ba3 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/vfio.h>
#include <linux/vgaarb.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "vfio_pci_priv.h"
@@ -61,9 +62,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_iowrite##size);
VFIO_IOWRITE(8)
VFIO_IOWRITE(16)
VFIO_IOWRITE(32)
-#ifdef iowrite64
VFIO_IOWRITE(64)
-#endif
#define VFIO_IOREAD(size) \
int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \
@@ -89,9 +88,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_ioread##size);
VFIO_IOREAD(8)
VFIO_IOREAD(16)
VFIO_IOREAD(32)
-#ifdef ioread64
VFIO_IOREAD(64)
-#endif
#define VFIO_IORDWR(size) \
static int vfio_pci_iordwr##size(struct vfio_pci_core_device *vdev,\
@@ -127,9 +124,7 @@ static int vfio_pci_iordwr##size(struct vfio_pci_core_device *vdev,\
VFIO_IORDWR(8)
VFIO_IORDWR(16)
VFIO_IORDWR(32)
-#if defined(ioread64) && defined(iowrite64)
VFIO_IORDWR(64)
-#endif
/*
* Read or write from an __iomem region (MMIO or I/O port) with an excluded
@@ -155,7 +150,6 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
else
fillable = 0;
-#if defined(ioread64) && defined(iowrite64)
if (fillable >= 8 && !(off % 8)) {
ret = vfio_pci_iordwr64(vdev, iswrite, test_mem,
io, buf, off, &filled);
@@ -163,7 +157,6 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
return ret;
} else
-#endif
if (fillable >= 4 && !(off % 4)) {
ret = vfio_pci_iordwr32(vdev, iswrite, test_mem,
io, buf, off, &filled);
@@ -244,9 +237,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
if (pci_resource_start(pdev, bar))
end = pci_resource_len(pdev, bar);
- else if (bar == PCI_ROM_RESOURCE &&
- pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW)
- end = 0x20000;
+ else if (bar == PCI_ROM_RESOURCE && pdev->rom && pdev->romlen)
+ end = roundup_pow_of_two(pdev->romlen);
else
return -EINVAL;
@@ -261,11 +253,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
* excluded range at the end of the actual ROM. This makes
* filling large ROM BARs much faster.
*/
- io = pci_map_rom(pdev, &x_start);
- if (!io) {
- done = -ENOMEM;
- goto out;
+ if (pci_resource_start(pdev, bar)) {
+ io = pci_map_rom(pdev, &x_start);
+ } else {
+ io = ioremap(pdev->rom, pdev->romlen);
+ x_start = pdev->romlen;
}
+ if (!io)
+ return -ENOMEM;
x_end = end;
} else {
int ret = vfio_pci_core_setup_barmap(vdev, bar);
@@ -288,8 +283,13 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
if (done >= 0)
*ppos += done;
- if (bar == PCI_ROM_RESOURCE)
- pci_unmap_rom(pdev, io);
+ if (bar == PCI_ROM_RESOURCE) {
+ if (pci_resource_start(pdev, bar))
+ pci_unmap_rom(pdev, io);
+ else
+ iounmap(io);
+ }
+
out:
return done;
}
@@ -381,12 +381,10 @@ static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
vfio_pci_core_iowrite32(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
-#ifdef iowrite64
case 8:
vfio_pci_core_iowrite64(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
-#endif
}
}
@@ -440,10 +438,8 @@ int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
pos >= vdev->msix_offset + vdev->msix_size))
return -EINVAL;
-#ifndef iowrite64
if (count == 8)
return -EINVAL;
-#endif
ret = vfio_pci_core_setup_barmap(vdev, bar);
if (ret)
diff --git a/drivers/vfio/pci/virtio/migrate.c b/drivers/vfio/pci/virtio/migrate.c
index ee54f4c17857..ba92bb4e9af9 100644
--- a/drivers/vfio/pci/virtio/migrate.c
+++ b/drivers/vfio/pci/virtio/migrate.c
@@ -77,8 +77,8 @@ static int virtiovf_add_migration_pages(struct virtiovf_data_buffer *buf,
return -ENOMEM;
do {
- filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
+ filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
+ page_list);
if (!filled) {
ret = -ENOMEM;
goto err;
@@ -112,7 +112,7 @@ static void virtiovf_free_data_buffer(struct virtiovf_data_buffer *buf)
{
struct sg_page_iter sg_iter;
- /* Undo alloc_pages_bulk_array() */
+ /* Undo alloc_pages_bulk() */
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
__free_page(sg_page_iter_page(&sg_iter));
sg_free_append_table(&buf->table);
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index e53757d1d095..3bf1043cd795 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -388,6 +388,11 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
{
unsigned int done = 0;
+ if (off >= reg->size)
+ return -EINVAL;
+
+ count = min_t(size_t, count, reg->size - off);
+
if (!reg->ioaddr) {
reg->ioaddr =
ioremap(reg->addr, reg->size);
@@ -467,6 +472,11 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
{
unsigned int done = 0;
+ if (off >= reg->size)
+ return -EINVAL;
+
+ count = min_t(size_t, count, reg->size - off);
+
if (!reg->ioaddr) {
reg->ioaddr =
ioremap(reg->addr, reg->size);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9ad37c012189..b9b9e9d40951 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1107,6 +1107,7 @@ static void handle_rx(struct vhost_net *net)
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
bool busyloop_intr = false;
+ bool set_num_buffers;
struct socket *sock;
struct iov_iter fixup;
__virtio16 num_buffers;
@@ -1129,6 +1130,8 @@ static void handle_rx(struct vhost_net *net)
vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
+ set_num_buffers = mergeable ||
+ vhost_has_feature(vq, VIRTIO_F_VERSION_1);
do {
sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
@@ -1205,7 +1208,7 @@ static void handle_rx(struct vhost_net *net)
/* TODO: Should check and handle checksum. */
num_buffers = cpu_to_vhost16(vq, headcount);
- if (likely(mergeable) &&
+ if (likely(set_num_buffers) &&
copy_to_iter(&num_buffers, sizeof num_buffers,
&fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9ac25d08f473..63612faeab72 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -666,7 +666,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
worker, name);
- if (!vtsk)
+ if (IS_ERR(vtsk))
goto free_worker;
mutex_init(&worker->mutex);
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 36bfb6deb8ab..d866608da8d1 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -2199,7 +2199,7 @@ static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u
static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2211,7 +2211,7 @@ static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2227,7 +2227,7 @@ static const struct bin_attribute edid1_attr = {
.mode = 0444,
},
.size = EDID_LENGTH,
- .read = radeon_show_edid1,
+ .read_new = radeon_show_edid1,
};
static const struct bin_attribute edid2_attr = {
@@ -2236,7 +2236,7 @@ static const struct bin_attribute edid2_attr = {
.mode = 0444,
},
.size = EDID_LENGTH,
- .read = radeon_show_edid2,
+ .read_new = radeon_show_edid2,
};
static int radeonfb_pci_register(struct pci_dev *pdev,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 20517448487e..0e1bd3dba255 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -275,7 +275,7 @@ static const struct fb_ops efifb_ops = {
.fb_setcolreg = efifb_setcolreg,
};
-static int efifb_setup(struct screen_info *si, char *options)
+static void efifb_setup(struct screen_info *si, char *options)
{
char *this_opt;
@@ -299,8 +299,6 @@ static int efifb_setup(struct screen_info *si, char *options)
use_bgrt = false;
}
}
-
- return 0;
}
static inline bool fb_base_is_valid(struct screen_info *si)
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 7fdb5edd7e2e..75338ffc703f 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -282,6 +282,8 @@ static uint screen_depth;
static uint screen_fb_size;
static uint dio_fb_size; /* FB size for deferred IO */
+static void hvfb_putmem(struct fb_info *info);
+
/* Send message to Hyper-V host */
static inline int synthvid_send(struct hv_device *hdev,
struct synthvid_msg *msg)
@@ -863,6 +865,17 @@ static void hvfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width,
}
/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup related to
+ * framebuffer here.
+ */
+static void hvfb_destroy(struct fb_info *info)
+{
+ hvfb_putmem(info);
+ framebuffer_release(info);
+}
+
+/*
* TODO: GEN1 codepaths allocate from system or DMA-able memory. Fix the
* driver to use the _SYSMEM_ or _DMAMEM_ helpers in these cases.
*/
@@ -877,6 +890,7 @@ static const struct fb_ops hvfb_ops = {
.fb_set_par = hvfb_set_par,
.fb_setcolreg = hvfb_setcolreg,
.fb_blank = hvfb_blank,
+ .fb_destroy = hvfb_destroy,
};
/* Get options from kernel paramenter "video=" */
@@ -952,7 +966,7 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
}
/* Release contiguous physical memory */
-static void hvfb_release_phymem(struct hv_device *hdev,
+static void hvfb_release_phymem(struct device *device,
phys_addr_t paddr, unsigned int size)
{
unsigned int order = get_order(size);
@@ -960,7 +974,7 @@ static void hvfb_release_phymem(struct hv_device *hdev,
if (order <= MAX_PAGE_ORDER)
__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
else
- dma_free_coherent(&hdev->device,
+ dma_free_coherent(device,
round_up(size, PAGE_SIZE),
phys_to_virt(paddr),
paddr);
@@ -989,6 +1003,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
+ aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
/*
* For Gen 1 VM, we can directly use the contiguous memory
@@ -1010,11 +1025,21 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
+ } else {
+ aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
}
/*
- * Cannot use the contiguous physical memory.
- * Allocate mmio space for framebuffer.
+ * Cannot use contiguous physical memory, so allocate MMIO space for
+ * the framebuffer. At this point in the function, conflicting devices
+ * that might have claimed the framebuffer MMIO space based on
+ * screen_info.lfb_base must have already been removed so that
+ * vmbus_allocate_mmio() does not allocate different MMIO space. If the
+ * kdump image were to be loaded using kexec_file_load(), the
+ * framebuffer location in the kdump image would be set from
+ * screen_info.lfb_base at the time that kdump is enabled. If the
+ * framebuffer has moved elsewhere, this could be the wrong location,
+ * causing kdump to hang when efifb (for example) loads.
*/
dio_fb_size =
screen_width * screen_height * screen_depth / 8;
@@ -1051,11 +1076,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->screen_size = dio_fb_size;
getmem_done:
- if (base && size)
- aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
- else
- aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
-
if (!gen2vm)
pci_dev_put(pdev);
@@ -1074,16 +1094,16 @@ err1:
}
/* Release the framebuffer */
-static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
+static void hvfb_putmem(struct fb_info *info)
{
struct hvfb_par *par = info->par;
if (par->need_docopy) {
vfree(par->dio_vp);
- iounmap(info->screen_base);
+ iounmap(par->mmio_vp);
vmbus_free_mmio(par->mem->start, screen_fb_size);
} else {
- hvfb_release_phymem(hdev, info->fix.smem_start,
+ hvfb_release_phymem(info->device, info->fix.smem_start,
screen_fb_size);
}
@@ -1172,7 +1192,7 @@ static int hvfb_probe(struct hv_device *hdev,
if (ret)
goto error;
- ret = register_framebuffer(info);
+ ret = devm_register_framebuffer(&hdev->device, info);
if (ret) {
pr_err("Unable to register framebuffer\n");
goto error;
@@ -1197,7 +1217,7 @@ static int hvfb_probe(struct hv_device *hdev,
error:
fb_deferred_io_cleanup(info);
- hvfb_putmem(hdev, info);
+ hvfb_putmem(info);
error2:
vmbus_close(hdev->channel);
error1:
@@ -1220,14 +1240,10 @@ static void hvfb_remove(struct hv_device *hdev)
fb_deferred_io_cleanup(info);
- unregister_framebuffer(info);
cancel_delayed_work_sync(&par->dwork);
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
-
- hvfb_putmem(hdev, info);
- framebuffer_release(info);
}
static int hvfb_suspend(struct hv_device *hdev)
diff --git a/drivers/video/fbdev/omap/lcd_dma.c b/drivers/video/fbdev/omap/lcd_dma.c
index f85817635a8c..0da23c57e475 100644
--- a/drivers/video/fbdev/omap/lcd_dma.c
+++ b/drivers/video/fbdev/omap/lcd_dma.c
@@ -432,8 +432,8 @@ static int __init omap_init_lcd_dma(void)
spin_lock_init(&lcd_dma.lock);
- r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
- "LCD DMA", NULL);
+ r = request_threaded_irq(INT_DMA_LCD, NULL, lcd_dma_irq_handler,
+ IRQF_ONESHOT, "LCD DMA", NULL);
if (r != 0)
pr_err("unable to request IRQ for LCD DMA (error %d)\n", r);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index c3329c8b4c16..ccb96a5be07e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -3933,18 +3933,13 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
- if (np && of_property_read_bool(np, "syscon-pol")) {
- dispc.syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol");
+ if (np && of_property_present(np, "syscon-pol")) {
+ dispc.syscon_pol = syscon_regmap_lookup_by_phandle_args(np, "syscon-pol",
+ 1, &dispc.syscon_pol_offset);
if (IS_ERR(dispc.syscon_pol)) {
dev_err(&pdev->dev, "failed to get syscon-pol regmap\n");
return PTR_ERR(dispc.syscon_pol);
}
-
- if (of_property_read_u32_index(np, "syscon-pol", 1,
- &dispc.syscon_pol_offset)) {
- dev_err(&pdev->dev, "failed to get syscon-pol offset\n");
- return -EINVAL;
- }
}
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index c04cbe0ef173..7c636db79882 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -36,6 +36,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
np = of_get_next_parent(np);
}
+ of_node_put(np);
return NULL;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index b33f62c5cb22..bb7fe54dd019 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -567,23 +567,6 @@ static void hdmi_core_enable_interrupts(struct hdmi_core_data *core)
REG_FLD_MOD(core->base, HDMI_CORE_IH_MUTE, 0x0, 1, 0);
}
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
-{
- void __iomem *base = core->base;
-
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT1, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT2, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_AS_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_CEC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_VP_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CMPHY_STAT0, 0xff, 7, 0);
-
- return 0;
-}
-
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h
index 192c9b6e2f7b..493857374a15 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h
@@ -283,7 +283,6 @@ struct csc_table {
int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 935cd8413ed5..4715dcb59811 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -2123,11 +2123,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev)
{
struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
- int brightness = bdev->props.brightness;
-
- if (bdev->props.power != BACKLIGHT_POWER_ON ||
- bdev->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ int brightness = backlight_get_brightness(bdev);
ch->bl_brightness = brightness;
return ch->cfg->bl_info.set_brightness(brightness);
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 86ecbb2d86db..7734377b2d87 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -27,6 +27,7 @@
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include <asm/div64.h>
@@ -1712,8 +1713,8 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
BUG();
}
- dev_info(info->dev, "fb %s %sabled at start\n",
- fbname, enable ? "en" : "dis");
+ dev_info(info->dev, "fb %s %s at start\n",
+ fbname, str_enabled_disabled(enable));
/* check to see if our routing allows this */
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 71ac9e36f67c..acadf0eb450c 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1416,7 +1416,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
static ssize_t edid_show(
struct file *filp,
- struct kobject *kobj, struct bin_attribute *a,
+ struct kobject *kobj, const struct bin_attribute *a,
char *buf, loff_t off, size_t count) {
struct device *fbdev = kobj_to_dev(kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
@@ -1438,7 +1438,7 @@ static ssize_t edid_show(
static ssize_t edid_store(
struct file *filp,
- struct kobject *kobj, struct bin_attribute *a,
+ struct kobject *kobj, const struct bin_attribute *a,
char *src, loff_t src_off, size_t src_size) {
struct device *fbdev = kobj_to_dev(kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
@@ -1482,8 +1482,8 @@ static const struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0666,
.size = EDID_LENGTH,
- .read = edid_show,
- .write = edid_store
+ .read_new = edid_show,
+ .write_new = edid_store
};
static const struct device_attribute fb_device_attrs[] = {
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index fce0f5db7ba3..eedab14c7d51 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -185,9 +185,10 @@ static inline void setindex(int index)
/* Check if the video mode is supported by the driver */
static inline int check_mode_supported(const struct screen_info *si)
{
+ unsigned int type = screen_info_video_type(si);
+
/* only EGA and VGA in 16 color graphic mode are supported */
- if (si->orig_video_isVGA != VIDEO_TYPE_EGAC &&
- si->orig_video_isVGA != VIDEO_TYPE_VGAC)
+ if (type != VIDEO_TYPE_EGAC && type != VIDEO_TYPE_VGAC)
return -ENODEV;
if (si->orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
@@ -1338,7 +1339,7 @@ static int vga16fb_probe(struct platform_device *dev)
printk(KERN_INFO "vga16fb: mapped to 0x%p\n", info->screen_base);
par = info->par;
- par->isVGA = si->orig_video_isVGA == VIDEO_TYPE_VGAC;
+ par->isVGA = screen_info_video_type(si) == VIDEO_TYPE_VGAC;
par->palette_blanked = 0;
par->vesa_blanked = 0;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index ba301f3f4951..45b42f14a750 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -895,34 +895,6 @@ hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
}
/**
- * hdmi_infoframe_check() - check a HDMI infoframe
- * @frame: HDMI infoframe
- *
- * Validates that the infoframe is consistent and updates derived fields
- * (eg. length) based on other fields.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int
-hdmi_infoframe_check(union hdmi_infoframe *frame)
-{
- switch (frame->any.type) {
- case HDMI_INFOFRAME_TYPE_AVI:
- return hdmi_avi_infoframe_check(&frame->avi);
- case HDMI_INFOFRAME_TYPE_SPD:
- return hdmi_spd_infoframe_check(&frame->spd);
- case HDMI_INFOFRAME_TYPE_AUDIO:
- return hdmi_audio_infoframe_check(&frame->audio);
- case HDMI_INFOFRAME_TYPE_VENDOR:
- return hdmi_vendor_any_infoframe_check(&frame->vendor);
- default:
- WARN(1, "Bad infoframe type %d\n", frame->any.type);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(hdmi_infoframe_check);
-
-/**
* hdmi_infoframe_pack_only() - write a HDMI infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c
index c24036c4e51e..e4e196abdaac 100644
--- a/drivers/virt/acrn/hsm.c
+++ b/drivers/virt/acrn/hsm.c
@@ -49,7 +49,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
switch (cmd & PMCMD_TYPE_MASK) {
case ACRN_PMCMD_GET_PX_CNT:
case ACRN_PMCMD_GET_CX_CNT:
- pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
+ pm_info = kzalloc(sizeof(u64), GFP_KERNEL);
if (!pm_info)
return -ENOMEM;
@@ -64,7 +64,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
kfree(pm_info);
break;
case ACRN_PMCMD_GET_PX_DATA:
- px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
+ px_data = kzalloc(sizeof(*px_data), GFP_KERNEL);
if (!px_data)
return -ENOMEM;
@@ -79,7 +79,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
kfree(px_data);
break;
case ACRN_PMCMD_GET_CX_DATA:
- cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
+ cx_data = kzalloc(sizeof(*cx_data), GFP_KERNEL);
if (!cx_data)
return -ENOMEM;
diff --git a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
index 488153879ec9..87f162736b2e 100644
--- a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
+++ b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
@@ -6,6 +6,7 @@
#include <linux/arm-smccc.h>
#include <linux/cc_platform.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/tsm.h>
@@ -219,6 +220,13 @@ static void __exit arm_cca_guest_exit(void)
}
module_exit(arm_cca_guest_exit);
+/* modalias, so userspace can autoload this module when RSI is available */
+static const struct platform_device_id arm_cca_match[] __maybe_unused = {
+ { RSI_PDEV_NAME, 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, arm_cca_match);
MODULE_AUTHOR("Sami Mujawar <sami.mujawar@arm.com>");
MODULE_DESCRIPTION("Arm CCA Guest TSM Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/virt/coco/sev-guest/Kconfig b/drivers/virt/coco/sev-guest/Kconfig
index 0b772bd921d8..a6405ab6c2c3 100644
--- a/drivers/virt/coco/sev-guest/Kconfig
+++ b/drivers/virt/coco/sev-guest/Kconfig
@@ -2,7 +2,6 @@ config SEV_GUEST
tristate "AMD SEV Guest driver"
default m
depends on AMD_MEM_ENCRYPT
- select CRYPTO_LIB_AESGCM
select TSM_REPORTS
help
SEV-SNP firmware provides the guest a mechanism to communicate with
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index b699771be029..cf3fb61f4d5b 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -23,6 +23,7 @@
#include <linux/cleanup.h>
#include <linux/uuid.h>
#include <linux/configfs.h>
+#include <linux/mm.h>
#include <uapi/linux/sev-guest.h>
#include <uapi/linux/psp-sev.h>
@@ -31,9 +32,6 @@
#define DEVICE_NAME "sev-guest"
-#define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
-#define SNP_REQ_RETRY_DELAY (2*HZ)
-
#define SVSM_MAX_RETRIES 3
struct snp_guest_dev {
@@ -41,12 +39,6 @@ struct snp_guest_dev {
struct miscdevice misc;
struct snp_msg_desc *msg_desc;
-
- union {
- struct snp_report_req report;
- struct snp_derived_key_req derived_key;
- struct snp_ext_report_req ext_report;
- } req;
};
/*
@@ -60,86 +52,6 @@ static int vmpck_id = -1;
module_param(vmpck_id, int, 0444);
MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
-/* Mutex to serialize the shared buffer access and command handling. */
-static DEFINE_MUTEX(snp_cmd_mutex);
-
-static bool is_vmpck_empty(struct snp_msg_desc *mdesc)
-{
- char zero_key[VMPCK_KEY_LEN] = {0};
-
- if (mdesc->vmpck)
- return !memcmp(mdesc->vmpck, zero_key, VMPCK_KEY_LEN);
-
- return true;
-}
-
-/*
- * If an error is received from the host or AMD Secure Processor (ASP) there
- * are two options. Either retry the exact same encrypted request or discontinue
- * using the VMPCK.
- *
- * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
- * encrypt the requests. The IV for this scheme is the sequence number. GCM
- * cannot tolerate IV reuse.
- *
- * The ASP FW v1.51 only increments the sequence numbers on a successful
- * guest<->ASP back and forth and only accepts messages at its exact sequence
- * number.
- *
- * So if the sequence number were to be reused the encryption scheme is
- * vulnerable. If the sequence number were incremented for a fresh IV the ASP
- * will reject the request.
- */
-static void snp_disable_vmpck(struct snp_msg_desc *mdesc)
-{
- pr_alert("Disabling VMPCK%d communication key to prevent IV reuse.\n",
- vmpck_id);
- memzero_explicit(mdesc->vmpck, VMPCK_KEY_LEN);
- mdesc->vmpck = NULL;
-}
-
-static inline u64 __snp_get_msg_seqno(struct snp_msg_desc *mdesc)
-{
- u64 count;
-
- lockdep_assert_held(&snp_cmd_mutex);
-
- /* Read the current message sequence counter from secrets pages */
- count = *mdesc->os_area_msg_seqno;
-
- return count + 1;
-}
-
-/* Return a non-zero on success */
-static u64 snp_get_msg_seqno(struct snp_msg_desc *mdesc)
-{
- u64 count = __snp_get_msg_seqno(mdesc);
-
- /*
- * The message sequence counter for the SNP guest request is a 64-bit
- * value but the version 2 of GHCB specification defines a 32-bit storage
- * for it. If the counter exceeds the 32-bit value then return zero.
- * The caller should check the return value, but if the caller happens to
- * not check the value and use it, then the firmware treats zero as an
- * invalid number and will fail the message request.
- */
- if (count >= UINT_MAX) {
- pr_err("request message sequence counter overflow\n");
- return 0;
- }
-
- return count;
-}
-
-static void snp_inc_msg_seqno(struct snp_msg_desc *mdesc)
-{
- /*
- * The counter is also incremented by the PSP, so increment it by 2
- * and save in secrets page.
- */
- *mdesc->os_area_msg_seqno += 2;
-}
-
static inline struct snp_guest_dev *to_snp_dev(struct file *file)
{
struct miscdevice *dev = file->private_data;
@@ -147,242 +59,6 @@ static inline struct snp_guest_dev *to_snp_dev(struct file *file)
return container_of(dev, struct snp_guest_dev, misc);
}
-static struct aesgcm_ctx *snp_init_crypto(u8 *key, size_t keylen)
-{
- struct aesgcm_ctx *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
- if (!ctx)
- return NULL;
-
- if (aesgcm_expandkey(ctx, key, keylen, AUTHTAG_LEN)) {
- pr_err("Crypto context initialization failed\n");
- kfree(ctx);
- return NULL;
- }
-
- return ctx;
-}
-
-static int verify_and_dec_payload(struct snp_msg_desc *mdesc, struct snp_guest_req *req)
-{
- struct snp_guest_msg *resp_msg = &mdesc->secret_response;
- struct snp_guest_msg *req_msg = &mdesc->secret_request;
- struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr;
- struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr;
- struct aesgcm_ctx *ctx = mdesc->ctx;
- u8 iv[GCM_AES_IV_SIZE] = {};
-
- pr_debug("response [seqno %lld type %d version %d sz %d]\n",
- resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version,
- resp_msg_hdr->msg_sz);
-
- /* Copy response from shared memory to encrypted memory. */
- memcpy(resp_msg, mdesc->response, sizeof(*resp_msg));
-
- /* Verify that the sequence counter is incremented by 1 */
- if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1)))
- return -EBADMSG;
-
- /* Verify response message type and version number. */
- if (resp_msg_hdr->msg_type != (req_msg_hdr->msg_type + 1) ||
- resp_msg_hdr->msg_version != req_msg_hdr->msg_version)
- return -EBADMSG;
-
- /*
- * If the message size is greater than our buffer length then return
- * an error.
- */
- if (unlikely((resp_msg_hdr->msg_sz + ctx->authsize) > req->resp_sz))
- return -EBADMSG;
-
- /* Decrypt the payload */
- memcpy(iv, &resp_msg_hdr->msg_seqno, min(sizeof(iv), sizeof(resp_msg_hdr->msg_seqno)));
- if (!aesgcm_decrypt(ctx, req->resp_buf, resp_msg->payload, resp_msg_hdr->msg_sz,
- &resp_msg_hdr->algo, AAD_LEN, iv, resp_msg_hdr->authtag))
- return -EBADMSG;
-
- return 0;
-}
-
-static int enc_payload(struct snp_msg_desc *mdesc, u64 seqno, struct snp_guest_req *req)
-{
- struct snp_guest_msg *msg = &mdesc->secret_request;
- struct snp_guest_msg_hdr *hdr = &msg->hdr;
- struct aesgcm_ctx *ctx = mdesc->ctx;
- u8 iv[GCM_AES_IV_SIZE] = {};
-
- memset(msg, 0, sizeof(*msg));
-
- hdr->algo = SNP_AEAD_AES_256_GCM;
- hdr->hdr_version = MSG_HDR_VER;
- hdr->hdr_sz = sizeof(*hdr);
- hdr->msg_type = req->msg_type;
- hdr->msg_version = req->msg_version;
- hdr->msg_seqno = seqno;
- hdr->msg_vmpck = req->vmpck_id;
- hdr->msg_sz = req->req_sz;
-
- /* Verify the sequence number is non-zero */
- if (!hdr->msg_seqno)
- return -ENOSR;
-
- pr_debug("request [seqno %lld type %d version %d sz %d]\n",
- hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
-
- if (WARN_ON((req->req_sz + ctx->authsize) > sizeof(msg->payload)))
- return -EBADMSG;
-
- memcpy(iv, &hdr->msg_seqno, min(sizeof(iv), sizeof(hdr->msg_seqno)));
- aesgcm_encrypt(ctx, msg->payload, req->req_buf, req->req_sz, &hdr->algo,
- AAD_LEN, iv, hdr->authtag);
-
- return 0;
-}
-
-static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
- struct snp_guest_request_ioctl *rio)
-{
- unsigned long req_start = jiffies;
- unsigned int override_npages = 0;
- u64 override_err = 0;
- int rc;
-
-retry_request:
- /*
- * Call firmware to process the request. In this function the encrypted
- * message enters shared memory with the host. So after this call the
- * sequence number must be incremented or the VMPCK must be deleted to
- * prevent reuse of the IV.
- */
- rc = snp_issue_guest_request(req, &mdesc->input, rio);
- switch (rc) {
- case -ENOSPC:
- /*
- * If the extended guest request fails due to having too
- * small of a certificate data buffer, retry the same
- * guest request without the extended data request in
- * order to increment the sequence number and thus avoid
- * IV reuse.
- */
- override_npages = mdesc->input.data_npages;
- req->exit_code = SVM_VMGEXIT_GUEST_REQUEST;
-
- /*
- * Override the error to inform callers the given extended
- * request buffer size was too small and give the caller the
- * required buffer size.
- */
- override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
-
- /*
- * If this call to the firmware succeeds, the sequence number can
- * be incremented allowing for continued use of the VMPCK. If
- * there is an error reflected in the return value, this value
- * is checked further down and the result will be the deletion
- * of the VMPCK and the error code being propagated back to the
- * user as an ioctl() return code.
- */
- goto retry_request;
-
- /*
- * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
- * throttled. Retry in the driver to avoid returning and reusing the
- * message sequence number on a different message.
- */
- case -EAGAIN:
- if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
- rc = -ETIMEDOUT;
- break;
- }
- schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
- goto retry_request;
- }
-
- /*
- * Increment the message sequence number. There is no harm in doing
- * this now because decryption uses the value stored in the response
- * structure and any failure will wipe the VMPCK, preventing further
- * use anyway.
- */
- snp_inc_msg_seqno(mdesc);
-
- if (override_err) {
- rio->exitinfo2 = override_err;
-
- /*
- * If an extended guest request was issued and the supplied certificate
- * buffer was not large enough, a standard guest request was issued to
- * prevent IV reuse. If the standard request was successful, return -EIO
- * back to the caller as would have originally been returned.
- */
- if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
- rc = -EIO;
- }
-
- if (override_npages)
- mdesc->input.data_npages = override_npages;
-
- return rc;
-}
-
-static int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
- struct snp_guest_request_ioctl *rio)
-{
- u64 seqno;
- int rc;
-
- guard(mutex)(&snp_cmd_mutex);
-
- /* Check if the VMPCK is not empty */
- if (is_vmpck_empty(mdesc)) {
- pr_err_ratelimited("VMPCK is disabled\n");
- return -ENOTTY;
- }
-
- /* Get message sequence and verify that its a non-zero */
- seqno = snp_get_msg_seqno(mdesc);
- if (!seqno)
- return -EIO;
-
- /* Clear shared memory's response for the host to populate. */
- memset(mdesc->response, 0, sizeof(struct snp_guest_msg));
-
- /* Encrypt the userspace provided payload in mdesc->secret_request. */
- rc = enc_payload(mdesc, seqno, req);
- if (rc)
- return rc;
-
- /*
- * Write the fully encrypted request to the shared unencrypted
- * request page.
- */
- memcpy(mdesc->request, &mdesc->secret_request,
- sizeof(mdesc->secret_request));
-
- rc = __handle_guest_request(mdesc, req, rio);
- if (rc) {
- if (rc == -EIO &&
- rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
- return rc;
-
- pr_alert("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
- rc, rio->exitinfo2);
-
- snp_disable_vmpck(mdesc);
- return rc;
- }
-
- rc = verify_and_dec_payload(mdesc, req);
- if (rc) {
- pr_alert("Detected unexpected decode failure from ASP. rc: %d\n", rc);
- snp_disable_vmpck(mdesc);
- return rc;
- }
-
- return 0;
-}
-
struct snp_req_resp {
sockptr_t req_data;
sockptr_t resp_data;
@@ -390,7 +66,7 @@ struct snp_req_resp {
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
- struct snp_report_req *report_req = &snp_dev->req.report;
+ struct snp_report_req *report_req __free(kfree) = NULL;
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_report_resp *report_resp;
struct snp_guest_req req = {};
@@ -399,6 +75,10 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
+ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
+ if (!report_req)
+ return -ENOMEM;
+
if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
return -EFAULT;
@@ -414,7 +94,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
req.msg_version = arg->msg_version;
req.msg_type = SNP_MSG_REPORT_REQ;
- req.vmpck_id = vmpck_id;
+ req.vmpck_id = mdesc->vmpck_id;
req.req_buf = report_req;
req.req_sz = sizeof(*report_req);
req.resp_buf = report_resp->data;
@@ -435,7 +115,7 @@ e_free:
static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
- struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key;
+ struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
struct snp_derived_key_resp derived_key_resp = {0};
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_guest_req req = {};
@@ -455,13 +135,17 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
if (sizeof(buf) < resp_len)
return -ENOMEM;
+ derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
+ if (!derived_key_req)
+ return -ENOMEM;
+
if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
sizeof(*derived_key_req)))
return -EFAULT;
req.msg_version = arg->msg_version;
req.msg_type = SNP_MSG_KEY_REQ;
- req.vmpck_id = vmpck_id;
+ req.vmpck_id = mdesc->vmpck_id;
req.req_buf = derived_key_req;
req.req_sz = sizeof(*derived_key_req);
req.resp_buf = buf;
@@ -487,16 +171,21 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
struct snp_req_resp *io)
{
- struct snp_ext_report_req *report_req = &snp_dev->req.ext_report;
+ struct snp_ext_report_req *report_req __free(kfree) = NULL;
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_report_resp *report_resp;
struct snp_guest_req req = {};
int ret, npages = 0, resp_len;
sockptr_t certs_address;
+ struct page *page;
if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
return -EINVAL;
+ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
+ if (!report_req)
+ return -ENOMEM;
+
if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
return -EFAULT;
@@ -522,8 +211,20 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
* the host. If host does not supply any certs in it, then copy
* zeros to indicate that certificate data was not provided.
*/
- memset(mdesc->certs_data, 0, report_req->certs_len);
npages = report_req->certs_len >> PAGE_SHIFT;
+ page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+ get_order(report_req->certs_len));
+ if (!page)
+ return -ENOMEM;
+
+ req.certs_data = page_address(page);
+ ret = set_memory_decrypted((unsigned long)req.certs_data, npages);
+ if (ret) {
+ pr_err("failed to mark page shared, ret=%d\n", ret);
+ __free_pages(page, get_order(report_req->certs_len));
+ return -EFAULT;
+ }
+
cmd:
/*
* The intermediate response buffer is used while decrypting the
@@ -532,14 +233,16 @@ cmd:
*/
resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
- if (!report_resp)
- return -ENOMEM;
+ if (!report_resp) {
+ ret = -ENOMEM;
+ goto e_free_data;
+ }
- mdesc->input.data_npages = npages;
+ req.input.data_npages = npages;
req.msg_version = arg->msg_version;
req.msg_type = SNP_MSG_REPORT_REQ;
- req.vmpck_id = vmpck_id;
+ req.vmpck_id = mdesc->vmpck_id;
req.req_buf = &report_req->data;
req.req_sz = sizeof(report_req->data);
req.resp_buf = report_resp->data;
@@ -550,7 +253,7 @@ cmd:
/* If certs length is invalid then copy the returned length */
if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
- report_req->certs_len = mdesc->input.data_npages << PAGE_SHIFT;
+ report_req->certs_len = req.input.data_npages << PAGE_SHIFT;
if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
ret = -EFAULT;
@@ -559,7 +262,7 @@ cmd:
if (ret)
goto e_free;
- if (npages && copy_to_sockptr(certs_address, mdesc->certs_data, report_req->certs_len)) {
+ if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) {
ret = -EFAULT;
goto e_free;
}
@@ -569,6 +272,13 @@ cmd:
e_free:
kfree(report_resp);
+e_free_data:
+ if (npages) {
+ if (set_memory_encrypted((unsigned long)req.certs_data, npages))
+ WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
+ else
+ __free_pages(page, get_order(report_req->certs_len));
+ }
return ret;
}
@@ -616,76 +326,11 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
return ret;
}
-static void free_shared_pages(void *buf, size_t sz)
-{
- unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
- int ret;
-
- if (!buf)
- return;
-
- ret = set_memory_encrypted((unsigned long)buf, npages);
- if (ret) {
- WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
- return;
- }
-
- __free_pages(virt_to_page(buf), get_order(sz));
-}
-
-static void *alloc_shared_pages(struct device *dev, size_t sz)
-{
- unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
- struct page *page;
- int ret;
-
- page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
- if (!page)
- return NULL;
-
- ret = set_memory_decrypted((unsigned long)page_address(page), npages);
- if (ret) {
- dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
- __free_pages(page, get_order(sz));
- return NULL;
- }
-
- return page_address(page);
-}
-
static const struct file_operations snp_guest_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = snp_guest_ioctl,
};
-static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno)
-{
- u8 *key = NULL;
-
- switch (id) {
- case 0:
- *seqno = &secrets->os_area.msg_seqno_0;
- key = secrets->vmpck0;
- break;
- case 1:
- *seqno = &secrets->os_area.msg_seqno_1;
- key = secrets->vmpck1;
- break;
- case 2:
- *seqno = &secrets->os_area.msg_seqno_2;
- key = secrets->vmpck2;
- break;
- case 3:
- *seqno = &secrets->os_area.msg_seqno_3;
- key = secrets->vmpck3;
- break;
- default:
- break;
- }
-
- return key;
-}
-
struct snp_msg_report_resp_hdr {
u32 status;
u32 report_size;
@@ -979,13 +624,10 @@ static void unregister_sev_tsm(void *data)
static int __init sev_guest_probe(struct platform_device *pdev)
{
- struct sev_guest_platform_data *data;
- struct snp_secrets_page *secrets;
struct device *dev = &pdev->dev;
struct snp_guest_dev *snp_dev;
struct snp_msg_desc *mdesc;
struct miscdevice *misc;
- void __iomem *mapping;
int ret;
BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
@@ -993,115 +635,57 @@ static int __init sev_guest_probe(struct platform_device *pdev)
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return -ENODEV;
- if (!dev->platform_data)
- return -ENODEV;
-
- data = (struct sev_guest_platform_data *)dev->platform_data;
- mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
- if (!mapping)
- return -ENODEV;
-
- secrets = (__force void *)mapping;
-
- ret = -ENOMEM;
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
if (!snp_dev)
- goto e_unmap;
-
- mdesc = devm_kzalloc(&pdev->dev, sizeof(struct snp_msg_desc), GFP_KERNEL);
- if (!mdesc)
- goto e_unmap;
-
- /* Adjust the default VMPCK key based on the executing VMPL level */
- if (vmpck_id == -1)
- vmpck_id = snp_vmpl;
+ return -ENOMEM;
- ret = -EINVAL;
- mdesc->vmpck = get_vmpck(vmpck_id, secrets, &mdesc->os_area_msg_seqno);
- if (!mdesc->vmpck) {
- dev_err(dev, "Invalid VMPCK%d communication key\n", vmpck_id);
- goto e_unmap;
- }
+ mdesc = snp_msg_alloc();
+ if (IS_ERR_OR_NULL(mdesc))
+ return -ENOMEM;
- /* Verify that VMPCK is not zero. */
- if (is_vmpck_empty(mdesc)) {
- dev_err(dev, "Empty VMPCK%d communication key\n", vmpck_id);
- goto e_unmap;
- }
+ ret = snp_msg_init(mdesc, vmpck_id);
+ if (ret)
+ goto e_msg_init;
platform_set_drvdata(pdev, snp_dev);
snp_dev->dev = dev;
- mdesc->secrets = secrets;
-
- /* Allocate the shared page used for the request and response message. */
- mdesc->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
- if (!mdesc->request)
- goto e_unmap;
-
- mdesc->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
- if (!mdesc->response)
- goto e_free_request;
-
- mdesc->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
- if (!mdesc->certs_data)
- goto e_free_response;
-
- ret = -EIO;
- mdesc->ctx = snp_init_crypto(mdesc->vmpck, VMPCK_KEY_LEN);
- if (!mdesc->ctx)
- goto e_free_cert_data;
misc = &snp_dev->misc;
misc->minor = MISC_DYNAMIC_MINOR;
misc->name = DEVICE_NAME;
misc->fops = &snp_guest_fops;
- /* Initialize the input addresses for guest request */
- mdesc->input.req_gpa = __pa(mdesc->request);
- mdesc->input.resp_gpa = __pa(mdesc->response);
- mdesc->input.data_gpa = __pa(mdesc->certs_data);
-
/* Set the privlevel_floor attribute based on the vmpck_id */
- sev_tsm_ops.privlevel_floor = vmpck_id;
+ sev_tsm_ops.privlevel_floor = mdesc->vmpck_id;
ret = tsm_register(&sev_tsm_ops, snp_dev);
if (ret)
- goto e_free_cert_data;
+ goto e_msg_init;
ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
if (ret)
- goto e_free_cert_data;
+ goto e_msg_init;
ret = misc_register(misc);
if (ret)
- goto e_free_ctx;
+ goto e_msg_init;
snp_dev->msg_desc = mdesc;
- dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n", vmpck_id);
+ dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n",
+ mdesc->vmpck_id);
return 0;
-e_free_ctx:
- kfree(mdesc->ctx);
-e_free_cert_data:
- free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
-e_free_response:
- free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
-e_free_request:
- free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
-e_unmap:
- iounmap(mapping);
+e_msg_init:
+ snp_msg_free(mdesc);
+
return ret;
}
static void __exit sev_guest_remove(struct platform_device *pdev)
{
struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
- struct snp_msg_desc *mdesc = snp_dev->msg_desc;
- free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
- free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
- free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
- kfree(mdesc->ctx);
+ snp_msg_free(snp_dev->msg_desc);
misc_deregister(&snp_dev->misc);
}
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
index cc329887bfae..eaba28c95e73 100644
--- a/drivers/virt/vboxguest/Kconfig
+++ b/drivers/virt/vboxguest/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VBOXGUEST
tristate "Virtual Box Guest integration support"
- depends on X86 && PCI && INPUT
+ depends on (ARM64 || X86 || COMPILE_TEST) && PCI && INPUT
+ depends on HAS_IOPORT
help
This is a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b9095751e43b..ba37665188b5 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -377,6 +377,24 @@ static void virtio_dev_remove(struct device *_d)
of_node_put(dev->dev.of_node);
}
+/*
+ * virtio_irq_get_affinity - get IRQ affinity mask for device
+ * @_d: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @_d and @irq_vec.
+ */
+static const struct cpumask *virtio_irq_get_affinity(struct device *_d,
+ unsigned int irq_vec)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+
+ if (!dev->config->get_vq_affinity)
+ return NULL;
+
+ return dev->config->get_vq_affinity(dev, irq_vec);
+}
+
static const struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
@@ -384,6 +402,7 @@ static const struct bus_type virtio_bus = {
.uevent = virtio_uevent,
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
+ .irq_get_affinity = virtio_irq_get_affinity,
};
int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
@@ -527,29 +546,7 @@ void unregister_virtio_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(unregister_virtio_device);
-#ifdef CONFIG_PM_SLEEP
-int virtio_device_freeze(struct virtio_device *dev)
-{
- struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- int ret;
-
- virtio_config_core_disable(dev);
-
- dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
-
- if (drv && drv->freeze) {
- ret = drv->freeze(dev);
- if (ret) {
- virtio_config_core_enable(dev);
- return ret;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(virtio_device_freeze);
-
-int virtio_device_restore(struct virtio_device *dev)
+static int virtio_device_restore_priv(struct virtio_device *dev, bool restore)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
int ret;
@@ -580,8 +577,14 @@ int virtio_device_restore(struct virtio_device *dev)
if (ret)
goto err;
- if (drv->restore) {
- ret = drv->restore(dev);
+ if (restore) {
+ if (drv->restore) {
+ ret = drv->restore(dev);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = drv->reset_done(dev);
if (ret)
goto err;
}
@@ -598,9 +601,69 @@ err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;
}
+
+#ifdef CONFIG_PM_SLEEP
+int virtio_device_freeze(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
+
+ virtio_config_core_disable(dev);
+
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+
+ if (drv && drv->freeze) {
+ ret = drv->freeze(dev);
+ if (ret) {
+ virtio_config_core_enable(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_device_freeze);
+
+int virtio_device_restore(struct virtio_device *dev)
+{
+ return virtio_device_restore_priv(dev, true);
+}
EXPORT_SYMBOL_GPL(virtio_device_restore);
#endif
+int virtio_device_reset_prepare(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
+
+ if (!drv || !drv->reset_prepare)
+ return -EOPNOTSUPP;
+
+ virtio_config_core_disable(dev);
+
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+
+ ret = drv->reset_prepare(dev);
+ if (ret) {
+ virtio_config_core_enable(dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_device_reset_prepare);
+
+int virtio_device_reset_done(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ if (!drv || !drv->reset_done)
+ return -EOPNOTSUPP;
+
+ return virtio_device_restore_priv(dev, false);
+}
+EXPORT_SYMBOL_GPL(virtio_device_reset_done);
+
static int virtio_init(void)
{
if (bus_register(&virtio_bus) != 0)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index b36d2803674e..89da052f4f68 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -251,7 +251,7 @@ static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
for (num_pfns = 0; num_pfns < num;
num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- struct page *page = balloon_page_alloc();
+ page = balloon_page_alloc();
if (!page) {
dev_info_ratelimited(&vb->vdev->dev,
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index b0b871441578..56d0dbe62163 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -133,6 +133,8 @@ struct virtio_mem {
uint64_t addr;
/* Maximum region size in bytes. */
uint64_t region_size;
+ /* Usable region size in bytes. */
+ uint64_t usable_region_size;
/* The parent resource for all memory added via this device. */
struct resource *parent_resource;
@@ -2368,7 +2370,7 @@ static int virtio_mem_cleanup_pending_mb(struct virtio_mem *vm)
static void virtio_mem_refresh_config(struct virtio_mem *vm)
{
const struct range pluggable_range = mhp_get_pluggable_range(true);
- uint64_t new_plugged_size, usable_region_size, end_addr;
+ uint64_t new_plugged_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */
virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
@@ -2378,8 +2380,8 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
/* calculate the last usable memory block id */
virtio_cread_le(vm->vdev, struct virtio_mem_config,
- usable_region_size, &usable_region_size);
- end_addr = min(vm->addr + usable_region_size - 1,
+ usable_region_size, &vm->usable_region_size);
+ end_addr = min(vm->addr + vm->usable_region_size - 1,
pluggable_range.end);
if (vm->in_sbm) {
@@ -2648,6 +2650,7 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
if (rc)
goto out_unreg_pm;
+ virtio_device_ready(vm->vdev);
return 0;
out_unreg_pm:
unregister_pm_notifier(&vm->pm_notifier);
@@ -2725,13 +2728,103 @@ static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
mutex_unlock(&vm->hotplug_mutex);
return is_ram;
}
+
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+static int virtio_mem_vmcore_add_device_ram(struct virtio_mem *vm,
+ struct list_head *list, uint64_t start, uint64_t end)
+{
+ int rc;
+
+ rc = vmcore_alloc_add_range(list, start, end - start);
+ if (rc)
+ dev_err(&vm->vdev->dev,
+ "Error adding device RAM range: %d\n", rc);
+ return rc;
+}
+
+static int virtio_mem_vmcore_get_device_ram(struct vmcore_cb *cb,
+ struct list_head *list)
+{
+ struct virtio_mem *vm = container_of(cb, struct virtio_mem,
+ vmcore_cb);
+ const uint64_t device_start = vm->addr;
+ const uint64_t device_end = vm->addr + vm->usable_region_size;
+ uint64_t chunk_size, cur_start, cur_end, plugged_range_start = 0;
+ LIST_HEAD(tmp_list);
+ int rc;
+
+ if (!vm->plugged_size)
+ return 0;
+
+ /* Process memory sections, unless the device block size is bigger. */
+ chunk_size = max_t(uint64_t, PFN_PHYS(PAGES_PER_SECTION),
+ vm->device_block_size);
+
+ mutex_lock(&vm->hotplug_mutex);
+
+ /*
+ * We process larger chunks and indicate the complete chunk if any
+ * block in there is plugged. This reduces the number of pfn_is_ram()
+ * callbacks and mimic what is effectively being done when the old
+ * kernel would add complete memory sections/blocks to the elfcore hdr.
+ */
+ cur_start = device_start;
+ for (cur_start = device_start; cur_start < device_end; cur_start = cur_end) {
+ cur_end = ALIGN_DOWN(cur_start + chunk_size, chunk_size);
+ cur_end = min_t(uint64_t, cur_end, device_end);
+
+ rc = virtio_mem_send_state_request(vm, cur_start,
+ cur_end - cur_start);
+
+ if (rc < 0) {
+ dev_err(&vm->vdev->dev,
+ "Error querying block states: %d\n", rc);
+ goto out;
+ } else if (rc != VIRTIO_MEM_STATE_UNPLUGGED) {
+ /* Merge ranges with plugged memory. */
+ if (!plugged_range_start)
+ plugged_range_start = cur_start;
+ continue;
+ }
+
+ /* Flush any plugged range. */
+ if (plugged_range_start) {
+ rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
+ plugged_range_start,
+ cur_start);
+ if (rc)
+ goto out;
+ plugged_range_start = 0;
+ }
+ }
+
+ /* Flush any plugged range. */
+ if (plugged_range_start)
+ rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
+ plugged_range_start,
+ cur_start);
+out:
+ mutex_unlock(&vm->hotplug_mutex);
+ if (rc < 0) {
+ vmcore_free_ranges(&tmp_list);
+ return rc;
+ }
+ list_splice_tail(&tmp_list, list);
+ return 0;
+}
+#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
#endif /* CONFIG_PROC_VMCORE */
static int virtio_mem_init_kdump(struct virtio_mem *vm)
{
+ /* We must be prepared to receive a callback immediately. */
+ virtio_device_ready(vm->vdev);
#ifdef CONFIG_PROC_VMCORE
dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+ vm->vmcore_cb.get_device_ram = virtio_mem_vmcore_get_device_ram;
+#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
register_vmcore_cb(&vm->vmcore_cb);
return 0;
#else /* CONFIG_PROC_VMCORE */
@@ -2760,6 +2853,8 @@ static int virtio_mem_init(struct virtio_mem *vm)
virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
&vm->region_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, usable_region_size,
+ &vm->usable_region_size);
/* Determine the nid for the device based on the lowest address. */
if (vm->nid == NUMA_NO_NODE)
@@ -2855,8 +2950,8 @@ static int virtio_mem_probe(struct virtio_device *vdev)
mutex_init(&vm->hotplug_mutex);
INIT_LIST_HEAD(&vm->next);
spin_lock_init(&vm->removal_lock);
- hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vm->retry_timer.function = virtio_mem_timer_expired;
+ hrtimer_setup(&vm->retry_timer, virtio_mem_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
vm->in_kdump = is_kdump_kernel();
@@ -2870,8 +2965,6 @@ static int virtio_mem_probe(struct virtio_device *vdev)
if (rc)
goto out_del_vq;
- virtio_device_ready(vdev);
-
/* trigger a config update to start processing the requested_size */
if (!vm->in_kdump) {
atomic_set(&vm->config_changed, 1);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 88074451dd61..d6d79af44569 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -794,6 +794,46 @@ static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
return num_vfs;
}
+static void virtio_pci_reset_prepare(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret = 0;
+
+ ret = virtio_device_reset_prepare(&vp_dev->vdev);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ dev_warn(&pci_dev->dev, "Reset prepare failure: %d",
+ ret);
+ return;
+ }
+
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static void virtio_pci_reset_done(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret;
+
+ if (pci_is_enabled(pci_dev))
+ return;
+
+ ret = pci_enable_device(pci_dev);
+ if (!ret) {
+ pci_set_master(pci_dev);
+ ret = virtio_device_reset_done(&vp_dev->vdev);
+ }
+
+ if (ret && ret != -EOPNOTSUPP)
+ dev_warn(&pci_dev->dev, "Reset done failure: %d", ret);
+}
+
+static const struct pci_error_handlers virtio_pci_err_handler = {
+ .reset_prepare = virtio_pci_reset_prepare,
+ .reset_done = virtio_pci_reset_done,
+};
+
static struct pci_driver virtio_pci_driver = {
.name = "virtio-pci",
.id_table = virtio_pci_id_table,
@@ -803,6 +843,7 @@ static struct pci_driver virtio_pci_driver = {
.driver.pm = &virtio_pci_pm_ops,
#endif
.sriov_configure = virtio_pci_sriov_configure,
+ .err_handler = &virtio_pci_err_handler,
};
struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev)
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index a2ecbb863c57..e2a568c9a43a 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -7,7 +7,7 @@
* It is a I2C to 1-wire bridge.
* There are two variations: -100 and -800, which have 1 or 8 1-wire ports.
* The complete datasheet can be obtained from MAXIM's website at:
- * http://www.maxim-ic.com/quick_view2.cfm/qv_pk/4382
+ * https://www.analog.com/en/products/ds2482-100.html
*/
#include <linux/module.h>
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
#include <linux/w1.h>
@@ -445,17 +446,20 @@ static int ds2482_probe(struct i2c_client *client)
int err = -ENODEV;
int temp1;
int idx;
+ int ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
I2C_FUNC_SMBUS_BYTE))
return -ENODEV;
- data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct ds2482_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = devm_regulator_get_enable(&client->dev, "vcc");
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to enable regulator\n");
data->client = client;
i2c_set_clientdata(client, data);
@@ -463,7 +467,7 @@ static int ds2482_probe(struct i2c_client *client)
/* Reset the device (sets the read_ptr to status) */
if (ds2482_send_cmd(data, DS2482_CMD_RESET) < 0) {
dev_warn(&client->dev, "DS2482 reset failed.\n");
- goto exit_free;
+ return err;
}
/* Sleep at least 525ns to allow the reset to complete */
@@ -474,7 +478,7 @@ static int ds2482_probe(struct i2c_client *client)
if (temp1 != (DS2482_REG_STS_LL | DS2482_REG_STS_RST)) {
dev_warn(&client->dev, "DS2482 reset status "
"0x%02X - not a DS2482\n", temp1);
- goto exit_free;
+ return err;
}
/* Detect the 8-port version */
@@ -516,9 +520,6 @@ exit_w1_remove:
if (data->w1_ch[idx].pdev != NULL)
w1_remove_master_device(&data->w1_ch[idx].w1_bm);
}
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -532,9 +533,6 @@ static void ds2482_remove(struct i2c_client *client)
if (data->w1_ch[idx].pdev != NULL)
w1_remove_master_device(&data->w1_ch[idx].w1_bm);
}
-
- /* Free the memory */
- kfree(data);
}
/*
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
index 2f5926859b8b..1cae9b243ff8 100644
--- a/drivers/w1/slaves/w1_ds2406.c
+++ b/drivers/w1/slaves/w1_ds2406.c
@@ -24,7 +24,7 @@
static ssize_t w1_f12_read_state(
struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
u8 w1_buf[6] = {W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0};
@@ -61,7 +61,7 @@ static ssize_t w1_f12_read_state(
static ssize_t w1_f12_write_output(
struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -95,14 +95,14 @@ static ssize_t w1_f12_write_output(
}
#define NB_SYSFS_BIN_FILES 2
-static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
+static const struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
{
.attr = {
.name = "state",
.mode = 0444,
},
.size = 1,
- .read = w1_f12_read_state,
+ .read_new = w1_f12_read_state,
},
{
.attr = {
@@ -110,7 +110,7 @@ static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
.mode = 0664,
},
.size = 1,
- .write = w1_f12_write_output,
+ .write_new = w1_f12_write_output,
}
};
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index 56f822a1dfdb..beccd2912d2a 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -65,8 +65,8 @@ static int _read_reg(struct w1_slave *sl, u8 address, unsigned char *buf)
}
static ssize_t state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
@@ -77,7 +77,7 @@ static ssize_t state_read(struct file *filp, struct kobject *kobj,
}
static ssize_t output_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
@@ -90,7 +90,7 @@ static ssize_t output_read(struct file *filp, struct kobject *kobj,
}
static ssize_t activity_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
@@ -103,8 +103,8 @@ static ssize_t activity_read(struct file *filp, struct kobject *kobj,
}
static ssize_t cond_search_mask_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
@@ -117,7 +117,7 @@ static ssize_t cond_search_mask_read(struct file *filp, struct kobject *kobj,
static ssize_t cond_search_polarity_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
if (count != 1 || off != 0)
@@ -127,8 +127,8 @@ static ssize_t cond_search_polarity_read(struct file *filp,
}
static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
if (count != 1 || off != 0)
return -EFAULT;
@@ -160,7 +160,7 @@ static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
#endif
static ssize_t output_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -210,7 +210,7 @@ out:
* Writing to the activity file resets the activity latches.
*/
static ssize_t activity_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -240,8 +240,8 @@ error:
}
static ssize_t status_control_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[4];
@@ -310,14 +310,14 @@ out:
return res;
}
-static BIN_ATTR_RO(state, 1);
-static BIN_ATTR_RW(output, 1);
-static BIN_ATTR_RW(activity, 1);
-static BIN_ATTR_RO(cond_search_mask, 1);
-static BIN_ATTR_RO(cond_search_polarity, 1);
-static BIN_ATTR_RW(status_control, 1);
+static const BIN_ATTR_RO(state, 1);
+static const BIN_ATTR_RW(output, 1);
+static const BIN_ATTR_RW(activity, 1);
+static const BIN_ATTR_RO(cond_search_mask, 1);
+static const BIN_ATTR_RO(cond_search_polarity, 1);
+static const BIN_ATTR_RW(status_control, 1);
-static struct bin_attribute *w1_f29_bin_attrs[] = {
+static const struct bin_attribute *const w1_f29_bin_attrs[] = {
&bin_attr_state,
&bin_attr_output,
&bin_attr_activity,
@@ -328,7 +328,7 @@ static struct bin_attribute *w1_f29_bin_attrs[] = {
};
static const struct attribute_group w1_f29_group = {
- .bin_attrs = w1_f29_bin_attrs,
+ .bin_attrs_new = w1_f29_bin_attrs,
};
static const struct attribute_group *w1_f29_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
index 739009806467..5fa46017ca7c 100644
--- a/drivers/w1/slaves/w1_ds2413.c
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -25,8 +25,8 @@
#define W1_F3A_INVALID_PIO_STATE 0xFF
static ssize_t state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
unsigned int retries = W1_F3A_RETRIES;
@@ -78,10 +78,10 @@ out:
return bytes_read;
}
-static BIN_ATTR_RO(state, 1);
+static const BIN_ATTR_RO(state, 1);
static ssize_t output_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -128,16 +128,16 @@ out:
return bytes_written;
}
-static BIN_ATTR(output, 0664, NULL, output_write, 1);
+static const BIN_ATTR(output, 0664, NULL, output_write, 1);
-static struct bin_attribute *w1_f3a_bin_attrs[] = {
+static const struct bin_attribute *const w1_f3a_bin_attrs[] = {
&bin_attr_state,
&bin_attr_output,
NULL,
};
static const struct attribute_group w1_f3a_group = {
- .bin_attrs = w1_f3a_bin_attrs,
+ .bin_attrs_new = w1_f3a_bin_attrs,
};
static const struct attribute_group *w1_f3a_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2430.c b/drivers/w1/slaves/w1_ds2430.c
index 0ea7d779d17a..ff56e2e68e58 100644
--- a/drivers/w1/slaves/w1_ds2430.c
+++ b/drivers/w1/slaves/w1_ds2430.c
@@ -95,7 +95,7 @@ static int w1_f14_readblock(struct w1_slave *sl, int off, int count, char *buf)
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -202,7 +202,7 @@ retry:
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -263,15 +263,15 @@ out_up:
return count;
}
-static BIN_ATTR_RW(eeprom, W1_F14_EEPROM_SIZE);
+static const BIN_ATTR_RW(eeprom, W1_F14_EEPROM_SIZE);
-static struct bin_attribute *w1_f14_bin_attrs[] = {
+static const struct bin_attribute *const w1_f14_bin_attrs[] = {
&bin_attr_eeprom,
NULL,
};
static const struct attribute_group w1_f14_group = {
- .bin_attrs = w1_f14_bin_attrs,
+ .bin_attrs_new = w1_f14_bin_attrs,
};
static const struct attribute_group *w1_f14_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index 6856b1c29e17..27b390fb59da 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -95,7 +95,7 @@ static int w1_f2d_readblock(struct w1_slave *sl, int off, int count, char *buf)
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -201,7 +201,7 @@ retry:
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -262,15 +262,15 @@ out_up:
return count;
}
-static BIN_ATTR_RW(eeprom, W1_F2D_EEPROM_SIZE);
+static const BIN_ATTR_RW(eeprom, W1_F2D_EEPROM_SIZE);
-static struct bin_attribute *w1_f2d_bin_attrs[] = {
+static const struct bin_attribute *const w1_f2d_bin_attrs[] = {
&bin_attr_eeprom,
NULL,
};
static const struct attribute_group w1_f2d_group = {
- .bin_attrs = w1_f2d_bin_attrs,
+ .bin_attrs_new = w1_f2d_bin_attrs,
};
static const struct attribute_group *w1_f2d_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 250b7f7ec429..22331d840ec1 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -110,7 +110,7 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -224,7 +224,7 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -274,27 +274,27 @@ out_up:
return count;
}
-static struct bin_attribute bin_attr_f23_eeprom = {
+static const struct bin_attribute bin_attr_f23_eeprom = {
.attr = { .name = "eeprom", .mode = 0644 },
- .read = eeprom_read,
- .write = eeprom_write,
+ .read_new = eeprom_read,
+ .write_new = eeprom_write,
.size = W1_EEPROM_DS2433_SIZE,
};
-static struct bin_attribute bin_attr_f43_eeprom = {
+static const struct bin_attribute bin_attr_f43_eeprom = {
.attr = { .name = "eeprom", .mode = 0644 },
- .read = eeprom_read,
- .write = eeprom_write,
+ .read_new = eeprom_read,
+ .write_new = eeprom_write,
.size = W1_EEPROM_DS28EC20_SIZE,
};
-static struct bin_attribute *w1_f23_bin_attributes[] = {
+static const struct bin_attribute *const w1_f23_bin_attributes[] = {
&bin_attr_f23_eeprom,
NULL,
};
static const struct attribute_group w1_f23_group = {
- .bin_attrs = w1_f23_bin_attributes,
+ .bin_attrs_new = w1_f23_bin_attributes,
};
static const struct attribute_group *w1_f23_groups[] = {
@@ -302,13 +302,13 @@ static const struct attribute_group *w1_f23_groups[] = {
NULL,
};
-static struct bin_attribute *w1_f43_bin_attributes[] = {
+static const struct bin_attribute *const w1_f43_bin_attributes[] = {
&bin_attr_f43_eeprom,
NULL,
};
static const struct attribute_group w1_f43_group = {
- .bin_attrs = w1_f43_bin_attributes,
+ .bin_attrs_new = w1_f43_bin_attributes,
};
static const struct attribute_group *w1_f43_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index e008c27b3db9..630a6db5045e 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -288,7 +288,7 @@ static int w1_ds2438_get_current(struct w1_slave *sl, int16_t *voltage)
}
static ssize_t iad_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -310,7 +310,7 @@ static ssize_t iad_write(struct file *filp, struct kobject *kobj,
}
static ssize_t iad_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -331,7 +331,7 @@ static ssize_t iad_read(struct file *filp, struct kobject *kobj,
}
static ssize_t page0_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -361,7 +361,7 @@ static ssize_t page0_read(struct file *filp, struct kobject *kobj,
}
static ssize_t page1_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -391,7 +391,7 @@ static ssize_t page1_read(struct file *filp, struct kobject *kobj,
}
static ssize_t offset_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -410,7 +410,7 @@ static ssize_t offset_write(struct file *filp, struct kobject *kobj,
}
static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -431,7 +431,7 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vad_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -452,7 +452,7 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -472,15 +472,15 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
return ret;
}
-static BIN_ATTR_RW(iad, 0);
-static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
-static BIN_ATTR_RO(page1, DS2438_PAGE_SIZE);
-static BIN_ATTR_WO(offset, 2);
-static BIN_ATTR_RO(temperature, 0/* real length varies */);
-static BIN_ATTR_RO(vad, 0/* real length varies */);
-static BIN_ATTR_RO(vdd, 0/* real length varies */);
+static const BIN_ATTR_RW(iad, 0);
+static const BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
+static const BIN_ATTR_RO(page1, DS2438_PAGE_SIZE);
+static const BIN_ATTR_WO(offset, 2);
+static const BIN_ATTR_RO(temperature, 0/* real length varies */);
+static const BIN_ATTR_RO(vad, 0/* real length varies */);
+static const BIN_ATTR_RO(vdd, 0/* real length varies */);
-static struct bin_attribute *w1_ds2438_bin_attrs[] = {
+static const struct bin_attribute *const w1_ds2438_bin_attrs[] = {
&bin_attr_iad,
&bin_attr_page0,
&bin_attr_page1,
@@ -492,7 +492,7 @@ static struct bin_attribute *w1_ds2438_bin_attrs[] = {
};
static const struct attribute_group w1_ds2438_group = {
- .bin_attrs = w1_ds2438_bin_attrs,
+ .bin_attrs_new = w1_ds2438_bin_attrs,
};
static const struct attribute_group *w1_ds2438_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index 3cde1bb1886b..ba7beb7b01f9 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -87,7 +87,7 @@ int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
EXPORT_SYMBOL(w1_ds2780_eeprom_cmd);
static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -95,15 +95,15 @@ static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
return w1_ds2780_io(dev, buf, off, count, 0);
}
-static BIN_ATTR_RO(w1_slave, DS2780_DATA_SIZE);
+static const BIN_ATTR_RO(w1_slave, DS2780_DATA_SIZE);
-static struct bin_attribute *w1_ds2780_bin_attrs[] = {
+static const struct bin_attribute *const w1_ds2780_bin_attrs[] = {
&bin_attr_w1_slave,
NULL,
};
static const struct attribute_group w1_ds2780_group = {
- .bin_attrs = w1_ds2780_bin_attrs,
+ .bin_attrs_new = w1_ds2780_bin_attrs,
};
static const struct attribute_group *w1_ds2780_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index e418484b4a49..acd04ee96e81 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -84,7 +84,7 @@ int w1_ds2781_eeprom_cmd(struct device *dev, int addr, int cmd)
EXPORT_SYMBOL(w1_ds2781_eeprom_cmd);
static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -92,15 +92,15 @@ static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
return w1_ds2781_io(dev, buf, off, count, 0);
}
-static BIN_ATTR_RO(w1_slave, DS2781_DATA_SIZE);
+static const BIN_ATTR_RO(w1_slave, DS2781_DATA_SIZE);
-static struct bin_attribute *w1_ds2781_bin_attrs[] = {
+static const struct bin_attribute *const w1_ds2781_bin_attrs[] = {
&bin_attr_w1_slave,
NULL,
};
static const struct attribute_group w1_ds2781_group = {
- .bin_attrs = w1_ds2781_bin_attrs,
+ .bin_attrs_new = w1_ds2781_bin_attrs,
};
static const struct attribute_group *w1_ds2781_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2805.c b/drivers/w1/slaves/w1_ds2805.c
index 4c1a2c515317..6ee895640d4a 100644
--- a/drivers/w1/slaves/w1_ds2805.c
+++ b/drivers/w1/slaves/w1_ds2805.c
@@ -92,7 +92,7 @@ static int w1_f0d_readblock(struct w1_slave *sl, int off, int count, char *buf)
}
static ssize_t w1_f0d_read_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -200,7 +200,7 @@ retry:
}
static ssize_t w1_f0d_write_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -261,14 +261,14 @@ out_up:
return count;
}
-static struct bin_attribute w1_f0d_bin_attr = {
+static const struct bin_attribute w1_f0d_bin_attr = {
.attr = {
.name = "eeprom",
.mode = 0644,
},
.size = W1_F0D_EEPROM_SIZE,
- .read = w1_f0d_read_bin,
- .write = w1_f0d_write_bin,
+ .read_new = w1_f0d_read_bin,
+ .write_new = w1_f0d_write_bin,
};
static int w1_f0d_add_slave(struct w1_slave *sl)
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index 2854b8b9e93f..d99ffadbe29b 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -112,7 +112,7 @@ static int w1_f1C_read(struct w1_slave *sl, int addr, int len, char *data)
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -223,7 +223,7 @@ static int w1_f1C_write(struct w1_slave *sl, int addr, int len, const u8 *data)
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
@@ -276,10 +276,10 @@ out_up:
return count;
}
-static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
+static const BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
static ssize_t pio_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
+ const struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
@@ -298,8 +298,8 @@ static ssize_t pio_read(struct file *filp, struct kobject *kobj,
}
static ssize_t pio_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -337,7 +337,7 @@ static ssize_t pio_write(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RW(pio, 1);
+static const BIN_ATTR_RW(pio, 1);
static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -363,7 +363,7 @@ static struct attribute *w1_f1C_attrs[] = {
NULL,
};
-static struct bin_attribute *w1_f1C_bin_attrs[] = {
+static const struct bin_attribute *const w1_f1C_bin_attrs[] = {
&bin_attr_eeprom,
&bin_attr_pio,
NULL,
@@ -371,7 +371,7 @@ static struct bin_attribute *w1_f1C_bin_attrs[] = {
static const struct attribute_group w1_f1C_group = {
.attrs = w1_f1C_attrs,
- .bin_attrs = w1_f1C_bin_attrs,
+ .bin_attrs_new = w1_f1C_bin_attrs,
};
static const struct attribute_group *w1_f1C_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds28e17.c b/drivers/w1/slaves/w1_ds28e17.c
index 52261b54d842..5738cbce1a37 100644
--- a/drivers/w1/slaves/w1_ds28e17.c
+++ b/drivers/w1/slaves/w1_ds28e17.c
@@ -583,7 +583,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
return result;
/* Return current speed value. */
- return sprintf(buf, "%d\n", result);
+ return sysfs_emit(buf, "%d\n", result);
}
static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
@@ -633,7 +633,7 @@ static ssize_t stretch_show(struct device *dev, struct device_attribute *attr,
struct w1_f19_data *data = sl->family_data;
/* Return current stretch value. */
- return sprintf(buf, "%d\n", data->stretch);
+ return sysfs_emit(buf, "%d\n", data->stretch);
}
static ssize_t stretch_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index d82e86d3ddf6..29f200bbab41 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -111,7 +111,7 @@ ATTRIBUTE_GROUPS(w1_slave);
/* Default family */
static ssize_t rw_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
+ const struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -130,8 +130,8 @@ out_up:
}
static ssize_t rw_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -141,15 +141,15 @@ static ssize_t rw_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RW(rw, PAGE_SIZE);
+static const BIN_ATTR_RW(rw, PAGE_SIZE);
-static struct bin_attribute *w1_slave_bin_attrs[] = {
+static const struct bin_attribute *const w1_slave_bin_attrs[] = {
&bin_attr_rw,
NULL,
};
static const struct attribute_group w1_slave_default_group = {
- .bin_attrs = w1_slave_bin_attrs,
+ .bin_attrs_new = w1_slave_bin_attrs,
};
static const struct attribute_group *w1_slave_default_groups[] = {
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index d708c091bf1b..77039f2f0be5 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -135,7 +135,11 @@ static int da9052_wdt_ping(struct watchdog_device *wdt_dev)
}
static const struct watchdog_info da9052_wdt_info = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_CARDRESET |
+ WDIOF_OVERHEAT |
+ WDIOF_POWERUNDER,
.identity = "DA9052 Watchdog",
};
@@ -169,6 +173,13 @@ static int da9052_wdt_probe(struct platform_device *pdev)
da9052_wdt->parent = dev;
watchdog_set_drvdata(da9052_wdt, driver_data);
+ if (da9052->fault_log & DA9052_FAULTLOG_TWDERROR)
+ da9052_wdt->bootstatus |= WDIOF_CARDRESET;
+ if (da9052->fault_log & DA9052_FAULTLOG_TEMPOVER)
+ da9052_wdt->bootstatus |= WDIOF_OVERHEAT;
+ if (da9052->fault_log & DA9052_FAULTLOG_VDDFAULT)
+ da9052_wdt->bootstatus |= WDIOF_POWERUNDER;
+
ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
DA9052_CONTROLD_TWDSCALE, 0);
if (ret < 0) {
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index 33835c0b06de..d3ced783a5f4 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -25,7 +25,6 @@ static bool nowayout = WATCHDOG_NOWAYOUT;
/**
* struct max77620_variant - Data specific to a chip variant
- * @wdt_info: watchdog descriptor
* @reg_onoff_cnfg2: ONOFF_CNFG2 register offset
* @reg_cnfg_glbl2: CNFG_GLBL2 register offset
* @reg_cnfg_glbl3: CNFG_GLBL3 register offset
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 58c9445c0f88..d1f9ce4100a8 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -273,7 +273,8 @@ static int rti_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &wdd->status);
time_left_ms = rti_wdt_get_timeleft_ms(wdd);
- heartbeat_ms = readl(wdt->base + RTIDWDPRLD);
+ /* AM62x TRM: texp = (RTIDWDPRLD + 1) * (2^13) / RTICLK1 */
+ heartbeat_ms = readl(wdt->base + RTIDWDPRLD) + 1;
heartbeat_ms <<= WDT_PRELOAD_SHIFT;
heartbeat_ms *= 1000;
do_div(heartbeat_ms, wdt->freq);
@@ -301,6 +302,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (ret) {
dev_err(dev, "No memory address assigned to the region.\n");
goto err_iomap;
diff --git a/drivers/watchdog/rzv2h_wdt.c b/drivers/watchdog/rzv2h_wdt.c
index 1d1b17312747..8defd0241213 100644
--- a/drivers/watchdog/rzv2h_wdt.c
+++ b/drivers/watchdog/rzv2h_wdt.c
@@ -217,24 +217,24 @@ static int rzv2h_wdt_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->pclk = devm_clk_get_prepared(&pdev->dev, "pclk");
+ priv->pclk = devm_clk_get_prepared(dev, "pclk");
if (IS_ERR(priv->pclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->pclk), "no pclk");
+ return dev_err_probe(dev, PTR_ERR(priv->pclk), "no pclk");
- priv->oscclk = devm_clk_get_prepared(&pdev->dev, "oscclk");
+ priv->oscclk = devm_clk_get_prepared(dev, "oscclk");
if (IS_ERR(priv->oscclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->oscclk), "no oscclk");
+ return dev_err_probe(dev, PTR_ERR(priv->oscclk), "no oscclk");
- priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ priv->rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(priv->rstc))
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
+ return dev_err_probe(dev, PTR_ERR(priv->rstc),
"failed to get cpg reset");
priv->wdev.max_hw_heartbeat_ms = (MILLI * MAX_TIMEOUT_CYCLES * CLOCK_DIV_BY_256) /
clk_get_rate(priv->oscclk);
dev_dbg(dev, "max hw timeout of %dms\n", priv->wdev.max_hw_heartbeat_ms);
- ret = devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
@@ -251,7 +251,7 @@ static int rzv2h_wdt_probe(struct platform_device *pdev)
if (ret)
dev_warn(dev, "Specified timeout invalid, using default");
- return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
+ return devm_watchdog_register_device(dev, &priv->wdev);
}
static const struct of_device_id rzv2h_wdt_ids[] = {
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 7a1096265f18..0820e35ad2e3 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -187,14 +187,12 @@ static int __init softdog_init(void)
watchdog_set_nowayout(&softdog_dev, nowayout);
watchdog_stop_on_reboot(&softdog_dev);
- hrtimer_init(&softdog_ticktock, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- softdog_ticktock.function = softdog_fire;
+ hrtimer_setup(&softdog_ticktock, softdog_fire, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) {
softdog_info.options |= WDIOF_PRETIMEOUT;
- hrtimer_init(&softdog_preticktock, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- softdog_preticktock.function = softdog_pretimeout;
+ hrtimer_setup(&softdog_preticktock, softdog_pretimeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
if (soft_active_on_boot)
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 109e2e37e8f0..c2125f204a13 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -62,7 +62,6 @@
* @clk: (optional) clock structure of wdt
* @rate: (optional) clock rate when provided via properties
* @adev: amba device structure of wdt
- * @status: current status of wdt
* @load_val: load value to be set for current timeout
*/
struct sp805_wdt {
@@ -128,7 +127,7 @@ static unsigned int wdt_timeleft(struct watchdog_device *wdd)
/*If the interrupt is inactive then time left is WDTValue + WDTLoad. */
if (!(readl_relaxed(wdt->base + WDTRIS) & INT_MASK))
- load += wdt->load_val + 1;
+ load += (u64)wdt->load_val + 1;
spin_unlock(&wdt->lock);
return div_u64(load, wdt->rate);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 4190cb800cc4..8369fd94fc1a 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -1051,8 +1051,8 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
}
kthread_init_work(&wd_data->work, watchdog_ping_work);
- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
- wd_data->timer.function = watchdog_timer_expired;
+ hrtimer_setup(&wd_data->timer, watchdog_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_HARD);
watchdog_hrtimer_pretimeout_init(wdd);
if (wdd->id == 0) {
@@ -1229,7 +1229,7 @@ int __init watchdog_dev_init(void)
{
int err;
- watchdog_kworker = kthread_create_worker(0, "watchdogd");
+ watchdog_kworker = kthread_run_worker(0, "watchdogd");
if (IS_ERR(watchdog_kworker)) {
pr_err("Failed to create watchdog kworker\n");
return PTR_ERR(watchdog_kworker);
diff --git a/drivers/watchdog/watchdog_hrtimer_pretimeout.c b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
index 940b53718a91..fbc7eecd8b20 100644
--- a/drivers/watchdog/watchdog_hrtimer_pretimeout.c
+++ b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
@@ -23,8 +23,8 @@ void watchdog_hrtimer_pretimeout_init(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data = wdd->wd_data;
- hrtimer_init(&wd_data->pretimeout_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- wd_data->pretimeout_timer.function = watchdog_hrtimer_pretimeout;
+ hrtimer_setup(&wd_data->pretimeout_timer, watchdog_hrtimer_pretimeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
void watchdog_hrtimer_pretimeout_start(struct watchdog_device *wdd)
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 528395133b4f..163f7f1d70f1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -84,7 +84,7 @@ module_param(balloon_boot_timeout, uint, 0444);
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
static int xen_hotplug_unpopulated;
-static struct ctl_table balloon_table[] = {
+static const struct ctl_table balloon_table[] = {
{
.procname = "hotplug_unpopulated",
.data = &xen_hotplug_unpopulated,
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 985e155ebe4b..41309d38f78c 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -722,12 +722,6 @@ static struct irq_info *xen_irq_init(unsigned int irq)
INIT_RCU_WORK(&info->rwork, delayed_free_irq);
set_info_for_irq(irq, info);
- /*
- * Interrupt affinity setting can be immediate. No point
- * in delaying it until an interrupt is handled.
- */
- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-
INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head);
}
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index c63f317e3df3..093ad4a08672 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -105,7 +105,7 @@ static ssize_t online_show(struct device *dev,
return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
}
-static ssize_t __ref online_store(struct device *dev,
+static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index b72ee9379d77..4926d4badc57 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -341,6 +341,7 @@ int pvcalls_front_socket(struct socket *sock)
pvcalls_exit();
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_socket);
static void free_active_ring(struct sock_mapping *map)
{
@@ -486,6 +487,7 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_connect);
static int __write_ring(struct pvcalls_data_intf *intf,
struct pvcalls_data *data,
@@ -581,6 +583,7 @@ again:
pvcalls_exit_sock(sock);
return tot_sent;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_sendmsg);
static int __read_ring(struct pvcalls_data_intf *intf,
struct pvcalls_data *data,
@@ -666,6 +669,7 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_recvmsg);
int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
@@ -719,6 +723,7 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
pvcalls_exit_sock(sock);
return 0;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_bind);
int pvcalls_front_listen(struct socket *sock, int backlog)
{
@@ -768,8 +773,10 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_listen);
-int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
+int pvcalls_front_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
@@ -788,7 +795,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
return -EINVAL;
}
- nonblock = flags & SOCK_NONBLOCK;
+ nonblock = arg->flags & SOCK_NONBLOCK;
/*
* Backend only supports 1 inflight accept request, will return
* errors for the others
@@ -904,6 +911,7 @@ received:
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_accept);
static __poll_t pvcalls_front_poll_passive(struct file *file,
struct pvcalls_bedata *bedata,
@@ -1004,6 +1012,7 @@ __poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_poll);
int pvcalls_front_release(struct socket *sock)
{
@@ -1087,6 +1096,7 @@ int pvcalls_front_release(struct socket *sock)
pvcalls_exit();
return 0;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_release);
static const struct xenbus_device_id pvcalls_front_ids[] = {
{ "pvcalls" },
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
index f694ad77379f..881ef14660bc 100644
--- a/drivers/xen/pvcalls-front.h
+++ b/drivers/xen/pvcalls-front.h
@@ -12,7 +12,7 @@ int pvcalls_front_bind(struct socket *sock,
int pvcalls_front_listen(struct socket *sock, int backlog);
int pvcalls_front_accept(struct socket *sock,
struct socket *newsock,
- int flags);
+ struct proto_accept_arg *arg);
int pvcalls_front_sendmsg(struct socket *sock,
struct msghdr *msg,
size_t len);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index a337edcf8faf..1f65795cf5d7 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
}
+static inline bool range_requires_alignment(phys_addr_t p, size_t size)
+{
+ phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
+ phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
+
+ return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
+}
+
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
- phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
next_bfn = pfn_to_bfn(xen_pfn);
- /* If buffer is physically aligned, ensure DMA alignment. */
- if (IS_ALIGNED(p, algn) &&
- !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
- return 1;
-
for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
return 1;
@@ -111,7 +113,7 @@ static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
}
#ifdef CONFIG_X86
-int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
+int __init xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
int rc;
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
@@ -156,7 +158,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
*dma_handle = xen_phys_to_dma(dev, phys);
if (*dma_handle + size - 1 > dma_mask ||
- range_straddles_page_boundary(phys, size)) {
+ range_straddles_page_boundary(phys, size) ||
+ range_requires_alignment(phys, size)) {
if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
dma_handle) != 0)
goto out_free_pages;
@@ -182,7 +185,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
size = ALIGN(size, XEN_PAGE_SIZE);
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
- WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
+ WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
+ range_requires_alignment(phys, size)))
return;
if (TestClearPageXenRemapped(virt_to_page(vaddr)))
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 3d34dba9bb2d..10aedcd21363 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -81,7 +81,7 @@ static struct attribute *zorro_device_attrs[] = {
};
static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct zorro_dev *z = to_zorro_dev(kobj_to_dev(kobj));
@@ -98,23 +98,23 @@ static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd));
}
-static struct bin_attribute zorro_config_attr = {
+static const struct bin_attribute zorro_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO,
},
.size = sizeof(struct ConfigDev),
- .read = zorro_read_config,
+ .read_new = zorro_read_config,
};
-static struct bin_attribute *zorro_device_bin_attrs[] = {
+static const struct bin_attribute *const zorro_device_bin_attrs[] = {
&zorro_config_attr,
NULL
};
static const struct attribute_group zorro_device_attr_group = {
.attrs = zorro_device_attrs,
- .bin_attrs = zorro_device_bin_attrs,
+ .bin_attrs_new = zorro_device_bin_attrs,
};
const struct attribute_group *zorro_device_attribute_groups[] = {